aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/Kconfig286
-rw-r--r--arch/parisc/Kconfig.debug29
-rw-r--r--arch/parisc/Makefile133
-rw-r--r--arch/parisc/configs/712_defconfig205
-rw-r--r--arch/parisc/configs/a500_defconfig201
-rw-r--r--arch/parisc/configs/b180_defconfig107
-rw-r--r--arch/parisc/configs/c3000_defconfig176
-rw-r--r--arch/parisc/configs/default_defconfig233
-rw-r--r--arch/parisc/defpalo.conf21
-rw-r--r--arch/parisc/hpux/Makefile5
-rw-r--r--arch/parisc/hpux/entry_hpux.S546
-rw-r--r--arch/parisc/hpux/fs.c206
-rw-r--r--arch/parisc/hpux/gate.S107
-rw-r--r--arch/parisc/hpux/ioctl.c72
-rw-r--r--arch/parisc/hpux/sys_hpux.c970
-rw-r--r--arch/parisc/hpux/wrappers.S250
-rw-r--r--arch/parisc/include/asm/Kbuild3
-rw-r--r--arch/parisc/include/asm/agp.h20
-rw-r--r--arch/parisc/include/asm/asm-offsets.h1
-rw-r--r--arch/parisc/include/asm/asmregs.h183
-rw-r--r--arch/parisc/include/asm/assembly.h520
-rw-r--r--arch/parisc/include/asm/atomic.h347
-rw-r--r--arch/parisc/include/asm/auxvec.h4
-rw-r--r--arch/parisc/include/asm/bitops.h237
-rw-r--r--arch/parisc/include/asm/bitsperlong.h20
-rw-r--r--arch/parisc/include/asm/bug.h92
-rw-r--r--arch/parisc/include/asm/bugs.h19
-rw-r--r--arch/parisc/include/asm/byteorder.h6
-rw-r--r--arch/parisc/include/asm/cache.h60
-rw-r--r--arch/parisc/include/asm/cacheflush.h161
-rw-r--r--arch/parisc/include/asm/checksum.h210
-rw-r--r--arch/parisc/include/asm/compat.h166
-rw-r--r--arch/parisc/include/asm/compat_rt_sigframe.h50
-rw-r--r--arch/parisc/include/asm/compat_signal.h2
-rw-r--r--arch/parisc/include/asm/compat_ucontext.h17
-rw-r--r--arch/parisc/include/asm/cputime.h6
-rw-r--r--arch/parisc/include/asm/current.h15
-rw-r--r--arch/parisc/include/asm/delay.h43
-rw-r--r--arch/parisc/include/asm/device.h7
-rw-r--r--arch/parisc/include/asm/div64.h1
-rw-r--r--arch/parisc/include/asm/dma-mapping.h241
-rw-r--r--arch/parisc/include/asm/dma.h185
-rw-r--r--arch/parisc/include/asm/eisa_bus.h23
-rw-r--r--arch/parisc/include/asm/eisa_eeprom.h153
-rw-r--r--arch/parisc/include/asm/elf.h351
-rw-r--r--arch/parisc/include/asm/emergency-restart.h6
-rw-r--r--arch/parisc/include/asm/errno.h127
-rw-r--r--arch/parisc/include/asm/fb.h19
-rw-r--r--arch/parisc/include/asm/fcntl.h40
-rw-r--r--arch/parisc/include/asm/fixmap.h30
-rw-r--r--arch/parisc/include/asm/floppy.h271
-rw-r--r--arch/parisc/include/asm/ftrace.h39
-rw-r--r--arch/parisc/include/asm/futex.h131
-rw-r--r--arch/parisc/include/asm/grfioctl.h113
-rw-r--r--arch/parisc/include/asm/hardirq.h11
-rw-r--r--arch/parisc/include/asm/hardware.h127
-rw-r--r--arch/parisc/include/asm/hw_irq.h8
-rw-r--r--arch/parisc/include/asm/ide.h57
-rw-r--r--arch/parisc/include/asm/io.h320
-rw-r--r--arch/parisc/include/asm/ioctl.h44
-rw-r--r--arch/parisc/include/asm/ioctls.h92
-rw-r--r--arch/parisc/include/asm/ipcbuf.h27
-rw-r--r--arch/parisc/include/asm/irq.h52
-rw-r--r--arch/parisc/include/asm/irq_regs.h1
-rw-r--r--arch/parisc/include/asm/irqflags.h46
-rw-r--r--arch/parisc/include/asm/kdebug.h1
-rw-r--r--arch/parisc/include/asm/kmap_types.h12
-rw-r--r--arch/parisc/include/asm/led.h42
-rw-r--r--arch/parisc/include/asm/linkage.h31
-rw-r--r--arch/parisc/include/asm/local.h1
-rw-r--r--arch/parisc/include/asm/local64.h1
-rw-r--r--arch/parisc/include/asm/machdep.h16
-rw-r--r--arch/parisc/include/asm/mc146818rtc.h9
-rw-r--r--arch/parisc/include/asm/mckinley.h9
-rw-r--r--arch/parisc/include/asm/mman.h69
-rw-r--r--arch/parisc/include/asm/mmu.h7
-rw-r--r--arch/parisc/include/asm/mmu_context.h80
-rw-r--r--arch/parisc/include/asm/mmzone.h66
-rw-r--r--arch/parisc/include/asm/module.h34
-rw-r--r--arch/parisc/include/asm/msgbuf.h37
-rw-r--r--arch/parisc/include/asm/mutex.h9
-rw-r--r--arch/parisc/include/asm/page.h164
-rw-r--r--arch/parisc/include/asm/param.h1
-rw-r--r--arch/parisc/include/asm/parisc-device.h64
-rw-r--r--arch/parisc/include/asm/parport.h18
-rw-r--r--arch/parisc/include/asm/pci.h266
-rw-r--r--arch/parisc/include/asm/pdc.h767
-rw-r--r--arch/parisc/include/asm/pdc_chassis.h381
-rw-r--r--arch/parisc/include/asm/pdcpat.h308
-rw-r--r--arch/parisc/include/asm/percpu.h7
-rw-r--r--arch/parisc/include/asm/perf.h74
-rw-r--r--arch/parisc/include/asm/perf_event.h6
-rw-r--r--arch/parisc/include/asm/pgalloc.h149
-rw-r--r--arch/parisc/include/asm/pgtable.h512
-rw-r--r--arch/parisc/include/asm/poll.h1
-rw-r--r--arch/parisc/include/asm/posix_types.h128
-rw-r--r--arch/parisc/include/asm/prefetch.h44
-rw-r--r--arch/parisc/include/asm/processor.h360
-rw-r--r--arch/parisc/include/asm/psw.h62
-rw-r--r--arch/parisc/include/asm/ptrace.h64
-rw-r--r--arch/parisc/include/asm/real.h5
-rw-r--r--arch/parisc/include/asm/resource.h7
-rw-r--r--arch/parisc/include/asm/ropes.h322
-rw-r--r--arch/parisc/include/asm/rt_sigframe.h23
-rw-r--r--arch/parisc/include/asm/rtc.h131
-rw-r--r--arch/parisc/include/asm/runway.h12
-rw-r--r--arch/parisc/include/asm/scatterlist.h10
-rw-r--r--arch/parisc/include/asm/sections.h12
-rw-r--r--arch/parisc/include/asm/segment.h6
-rw-r--r--arch/parisc/include/asm/sembuf.h29
-rw-r--r--arch/parisc/include/asm/serial.h10
-rw-r--r--arch/parisc/include/asm/setup.h6
-rw-r--r--arch/parisc/include/asm/shmbuf.h58
-rw-r--r--arch/parisc/include/asm/shmparam.h8
-rw-r--r--arch/parisc/include/asm/sigcontext.h20
-rw-r--r--arch/parisc/include/asm/siginfo.h9
-rw-r--r--arch/parisc/include/asm/signal.h153
-rw-r--r--arch/parisc/include/asm/smp.h55
-rw-r--r--arch/parisc/include/asm/socket.h69
-rw-r--r--arch/parisc/include/asm/sockios.h13
-rw-r--r--arch/parisc/include/asm/spinlock.h197
-rw-r--r--arch/parisc/include/asm/spinlock_types.h21
-rw-r--r--arch/parisc/include/asm/stat.h100
-rw-r--r--arch/parisc/include/asm/statfs.h7
-rw-r--r--arch/parisc/include/asm/string.h10
-rw-r--r--arch/parisc/include/asm/superio.h85
-rw-r--r--arch/parisc/include/asm/swab.h66
-rw-r--r--arch/parisc/include/asm/syscall.h40
-rw-r--r--arch/parisc/include/asm/system.h165
-rw-r--r--arch/parisc/include/asm/termbits.h201
-rw-r--r--arch/parisc/include/asm/termios.h90
-rw-r--r--arch/parisc/include/asm/thread_info.h82
-rw-r--r--arch/parisc/include/asm/timex.h20
-rw-r--r--arch/parisc/include/asm/tlb.h27
-rw-r--r--arch/parisc/include/asm/tlbflush.h83
-rw-r--r--arch/parisc/include/asm/topology.h6
-rw-r--r--arch/parisc/include/asm/traps.h16
-rw-r--r--arch/parisc/include/asm/types.h12
-rw-r--r--arch/parisc/include/asm/uaccess.h270
-rw-r--r--arch/parisc/include/asm/ucontext.h12
-rw-r--r--arch/parisc/include/asm/unaligned.h16
-rw-r--r--arch/parisc/include/asm/unistd.h1012
-rw-r--r--arch/parisc/include/asm/unwind.h79
-rw-r--r--arch/parisc/include/asm/user.h5
-rw-r--r--arch/parisc/include/asm/vga.h6
-rw-r--r--arch/parisc/include/asm/xor.h1
-rw-r--r--arch/parisc/install.sh38
-rw-r--r--arch/parisc/kernel/.gitignore1
-rw-r--r--arch/parisc/kernel/Makefile35
-rw-r--r--arch/parisc/kernel/asm-offsets.c301
-rw-r--r--arch/parisc/kernel/binfmt_elf32.c103
-rw-r--r--arch/parisc/kernel/cache.c515
-rw-r--r--arch/parisc/kernel/drivers.c922
-rw-r--r--arch/parisc/kernel/entry.S2381
-rw-r--r--arch/parisc/kernel/firmware.c1495
-rw-r--r--arch/parisc/kernel/ftrace.c185
-rw-r--r--arch/parisc/kernel/hardware.c1383
-rw-r--r--arch/parisc/kernel/head.S358
-rw-r--r--arch/parisc/kernel/hpmc.S305
-rw-r--r--arch/parisc/kernel/init_task.c70
-rw-r--r--arch/parisc/kernel/inventory.c619
-rw-r--r--arch/parisc/kernel/irq.c423
-rw-r--r--arch/parisc/kernel/module.c958
-rw-r--r--arch/parisc/kernel/pa7300lc.c49
-rw-r--r--arch/parisc/kernel/pacache.S1044
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c162
-rw-r--r--arch/parisc/kernel/pci-dma.c595
-rw-r--r--arch/parisc/kernel/pci.c330
-rw-r--r--arch/parisc/kernel/pdc_chassis.c301
-rw-r--r--arch/parisc/kernel/pdc_cons.c286
-rw-r--r--arch/parisc/kernel/perf.c851
-rw-r--r--arch/parisc/kernel/perf_asm.S1692
-rw-r--r--arch/parisc/kernel/perf_images.h3138
-rw-r--r--arch/parisc/kernel/process.c406
-rw-r--r--arch/parisc/kernel/processor.c422
-rw-r--r--arch/parisc/kernel/ptrace.c285
-rw-r--r--arch/parisc/kernel/real2.S304
-rw-r--r--arch/parisc/kernel/setup.c394
-rw-r--r--arch/parisc/kernel/signal.c653
-rw-r--r--arch/parisc/kernel/signal32.c520
-rw-r--r--arch/parisc/kernel/signal32.h166
-rw-r--r--arch/parisc/kernel/smp.c468
-rw-r--r--arch/parisc/kernel/stacktrace.c63
-rw-r--r--arch/parisc/kernel/sys32.h48
-rw-r--r--arch/parisc/kernel/sys_parisc.c236
-rw-r--r--arch/parisc/kernel/sys_parisc32.c238
-rw-r--r--arch/parisc/kernel/syscall.S688
-rw-r--r--arch/parisc/kernel/syscall_table.S438
-rw-r--r--arch/parisc/kernel/time.c276
-rw-r--r--arch/parisc/kernel/topology.c37
-rw-r--r--arch/parisc/kernel/traps.c885
-rw-r--r--arch/parisc/kernel/unaligned.c754
-rw-r--r--arch/parisc/kernel/unwind.c444
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S178
-rw-r--r--arch/parisc/lib/Makefile7
-rw-r--r--arch/parisc/lib/bitops.c83
-rw-r--r--arch/parisc/lib/checksum.c149
-rw-r--r--arch/parisc/lib/fixup.S92
-rw-r--r--arch/parisc/lib/io.c488
-rw-r--r--arch/parisc/lib/iomap.c486
-rw-r--r--arch/parisc/lib/lusercopy.S180
-rw-r--r--arch/parisc/lib/memcpy.c506
-rw-r--r--arch/parisc/lib/memset.c91
-rw-r--r--arch/parisc/math-emu/Makefile19
-rw-r--r--arch/parisc/math-emu/README11
-rw-r--r--arch/parisc/math-emu/cnv_float.h377
-rw-r--r--arch/parisc/math-emu/dbl_float.h847
-rw-r--r--arch/parisc/math-emu/decode_exc.c370
-rw-r--r--arch/parisc/math-emu/denormal.c135
-rw-r--r--arch/parisc/math-emu/dfadd.c524
-rw-r--r--arch/parisc/math-emu/dfcmp.c181
-rw-r--r--arch/parisc/math-emu/dfdiv.c400
-rw-r--r--arch/parisc/math-emu/dfmpy.c394
-rw-r--r--arch/parisc/math-emu/dfrem.c297
-rw-r--r--arch/parisc/math-emu/dfsqrt.c195
-rw-r--r--arch/parisc/math-emu/dfsub.c526
-rw-r--r--arch/parisc/math-emu/driver.c128
-rw-r--r--arch/parisc/math-emu/fcnvff.c309
-rw-r--r--arch/parisc/math-emu/fcnvfu.c536
-rw-r--r--arch/parisc/math-emu/fcnvfut.c332
-rw-r--r--arch/parisc/math-emu/fcnvfx.c501
-rw-r--r--arch/parisc/math-emu/fcnvfxt.c328
-rw-r--r--arch/parisc/math-emu/fcnvuf.c318
-rw-r--r--arch/parisc/math-emu/fcnvxf.c386
-rw-r--r--arch/parisc/math-emu/float.h582
-rw-r--r--arch/parisc/math-emu/fmpyfadd.c2655
-rw-r--r--arch/parisc/math-emu/fpbits.h65
-rw-r--r--arch/parisc/math-emu/fpu.h76
-rw-r--r--arch/parisc/math-emu/fpudispatch.c1442
-rw-r--r--arch/parisc/math-emu/frnd.c252
-rw-r--r--arch/parisc/math-emu/hppa.h42
-rw-r--r--arch/parisc/math-emu/math-emu.h27
-rw-r--r--arch/parisc/math-emu/sfadd.c518
-rw-r--r--arch/parisc/math-emu/sfcmp.c155
-rw-r--r--arch/parisc/math-emu/sfdiv.c392
-rw-r--r--arch/parisc/math-emu/sfmpy.c380
-rw-r--r--arch/parisc/math-emu/sfrem.c290
-rw-r--r--arch/parisc/math-emu/sfsqrt.c187
-rw-r--r--arch/parisc/math-emu/sfsub.c521
-rw-r--r--arch/parisc/math-emu/sgl_float.h486
-rw-r--r--arch/parisc/mm/Makefile5
-rw-r--r--arch/parisc/mm/fault.c270
-rw-r--r--arch/parisc/mm/init.c1109
-rw-r--r--arch/parisc/mm/ioremap.c99
-rw-r--r--arch/parisc/nm6
-rw-r--r--arch/parisc/oprofile/Makefile9
-rw-r--r--arch/parisc/oprofile/init.c23
247 files changed, 60868 insertions, 0 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
new file mode 100644
index 00000000..65adc86a
--- /dev/null
+++ b/arch/parisc/Kconfig
@@ -0,0 +1,286 @@
+config PARISC
+ def_bool y
+ select HAVE_IDE
+ select HAVE_OPROFILE
+ select HAVE_FUNCTION_TRACER if 64BIT
+ select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
+ select HAVE_FUNCTION_TRACE_MCOUNT_TEST if 64BIT
+ select RTC_CLASS
+ select RTC_DRV_GENERIC
+ select INIT_ALL_POSSIBLE
+ select BUG
+ select HAVE_IRQ_WORK
+ select HAVE_PERF_EVENTS
+ select GENERIC_ATOMIC64 if !64BIT
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_PROBE
+ select IRQ_PER_CPU
+
+ help
+ The PA-RISC microprocessor is designed by Hewlett-Packard and used
+ in many of their workstations & servers (HP9000 700 and 800 series,
+ and later HP3000 series). The PA-RISC Linux project home page is
+ at <http://www.parisc-linux.org/>.
+
+config MMU
+ def_bool y
+
+config STACK_GROWSUP
+ def_bool y
+
+config GENERIC_LOCKBREAK
+ bool
+ default y
+ depends on SMP && PREEMPT
+
+config RWSEM_GENERIC_SPINLOCK
+ def_bool y
+
+config RWSEM_XCHGADD_ALGORITHM
+ bool
+
+config ARCH_HAS_ILOG2_U32
+ bool
+ default n
+
+config ARCH_HAS_ILOG2_U64
+ bool
+ default n
+
+config GENERIC_BUG
+ bool
+ default y
+ depends on BUG
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
+config GENERIC_CALIBRATE_DELAY
+ bool
+ default y
+
+config TIME_LOW_RES
+ bool
+ depends on SMP
+ default y
+
+config HAVE_LATENCYTOP_SUPPORT
+ def_bool y
+
+# unless you want to implement ACPI on PA-RISC ... ;-)
+config PM
+ bool
+
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config NEED_DMA_MAP_STATE
+ def_bool y
+
+config NEED_SG_DMA_LENGTH
+ def_bool y
+
+config ISA_DMA_API
+ bool
+
+config ARCH_MAY_HAVE_PC_FDC
+ bool
+ depends on BROKEN
+ default y
+
+source "init/Kconfig"
+
+source "kernel/Kconfig.freezer"
+
+
+menu "Processor type and features"
+
+choice
+ prompt "Processor type"
+ default PA7000
+
+config PA7000
+ bool "PA7000/PA7100"
+ ---help---
+ This is the processor type of your CPU. This information is
+ used for optimizing purposes. In order to compile a kernel
+ that can run on all 32-bit PA CPUs (albeit not optimally fast),
+ you can specify "PA7000" here.
+
+ Specifying "PA8000" here will allow you to select a 64-bit kernel
+ which is required on some machines.
+
+config PA7100LC
+ bool "PA7100LC"
+ help
+ Select this option for the PCX-L processor, as used in the
+ 712, 715/64, 715/80, 715/100, 715/100XC, 725/100, 743, 748,
+ D200, D210, D300, D310 and E-class
+
+config PA7200
+ bool "PA7200"
+ help
+ Select this option for the PCX-T' processor, as used in the
+ C100, C110, J100, J110, J210XC, D250, D260, D350, D360,
+ K100, K200, K210, K220, K400, K410 and K420
+
+config PA7300LC
+ bool "PA7300LC"
+ help
+ Select this option for the PCX-L2 processor, as used in the
+ 744, A180, B132L, B160L, B180L, C132L, C160L, C180L,
+ D220, D230, D320 and D330.
+
+config PA8X00
+ bool "PA8000 and up"
+ help
+ Select this option for PCX-U to PCX-W2 processors.
+
+endchoice
+
+# Define implied options from the CPU selection here
+
+config PA20
+ def_bool y
+ depends on PA8X00
+
+config PA11
+ def_bool y
+ depends on PA7000 || PA7100LC || PA7200 || PA7300LC
+
+config PREFETCH
+ def_bool y
+ depends on PA8X00 || PA7200
+
+config 64BIT
+ bool "64-bit kernel"
+ depends on PA8X00
+ help
+ Enable this if you want to support 64bit kernel on PA-RISC platform.
+
+ At the moment, only people willing to use more than 2GB of RAM,
+ or having a 64bit-only capable PA-RISC machine should say Y here.
+
+ Since there is no 64bit userland on PA-RISC, there is no point to
+ enable this option otherwise. The 64bit kernel is significantly bigger
+ and slower than the 32bit one.
+
+choice
+ prompt "Kernel page size"
+ default PARISC_PAGE_SIZE_4KB if !64BIT
+ default PARISC_PAGE_SIZE_4KB if 64BIT
+# default PARISC_PAGE_SIZE_16KB if 64BIT
+
+config PARISC_PAGE_SIZE_4KB
+ bool "4KB"
+ help
+ This lets you select the page size of the kernel. For best
+ performance, a page size of 16KB is recommended. For best
+ compatibility with 32bit applications, a page size of 4KB should be
+ selected (the vast majority of 32bit binaries work perfectly fine
+ with a larger page size).
+
+ 4KB For best 32bit compatibility
+ 16KB For best performance
+ 64KB For best performance, might give more overhead.
+
+ If you don't know what to do, choose 4KB.
+
+config PARISC_PAGE_SIZE_16KB
+ bool "16KB (EXPERIMENTAL)"
+ depends on PA8X00 && EXPERIMENTAL
+
+config PARISC_PAGE_SIZE_64KB
+ bool "64KB (EXPERIMENTAL)"
+ depends on PA8X00 && EXPERIMENTAL
+
+endchoice
+
+config SMP
+ bool "Symmetric multi-processing support"
+ select USE_GENERIC_SMP_HELPERS
+ ---help---
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, like most personal computers, say N. If
+ you have a system with more than one CPU, say Y.
+
+ If you say N here, the kernel will run on single and multiprocessor
+ machines, but will use only one CPU of a multiprocessor machine. If
+ you say Y here, the kernel will run on many, but not all,
+ singleprocessor machines. On a singleprocessor machine, the kernel
+ will run faster if you say N here.
+
+ See also <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO
+ available at <http://www.tldp.org/docs.html#howto>.
+
+ If you don't know what to do here, say N.
+
+config HOTPLUG_CPU
+ bool
+ default y if SMP
+ select HOTPLUG
+
+config ARCH_SELECT_MEMORY_MODEL
+ def_bool y
+ depends on 64BIT
+
+config ARCH_DISCONTIGMEM_ENABLE
+ def_bool y
+ depends on 64BIT
+
+config ARCH_FLATMEM_ENABLE
+ def_bool y
+
+config ARCH_DISCONTIGMEM_DEFAULT
+ def_bool y
+ depends on ARCH_DISCONTIGMEM_ENABLE
+
+config NODES_SHIFT
+ int
+ default "3"
+ depends on NEED_MULTIPLE_NODES
+
+source "kernel/Kconfig.preempt"
+source "kernel/Kconfig.hz"
+source "mm/Kconfig"
+
+config COMPAT
+ def_bool y
+ depends on 64BIT
+
+config HPUX
+ bool "Support for HP-UX binaries"
+ depends on !64BIT
+
+config NR_CPUS
+ int "Maximum number of CPUs (2-32)"
+ range 2 32
+ depends on SMP
+ default "32"
+
+endmenu
+
+
+source "drivers/parisc/Kconfig"
+
+
+menu "Executable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "arch/parisc/Kconfig.debug"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
new file mode 100644
index 00000000..7305ac8f
--- /dev/null
+++ b/arch/parisc/Kconfig.debug
@@ -0,0 +1,29 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ depends on DEBUG_KERNEL
+ help
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const
+ data. This option may have a slight performance impact because a
+ portion of the kernel code won't be covered by a TLB anymore.
+ If in doubt, say "N".
+
+config DEBUG_STRICT_USER_COPY_CHECKS
+ bool "Strict copy size checks"
+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
+ ---help---
+ Enabling this option turns a certain set of sanity checks for user
+ copy operations into compile time failures.
+
+ The copy_from_user() etc checks are there to help test if there
+ are sufficient security checks on the length argument of
+ the copy operation, by having gcc prove that the argument is
+ within bounds.
+
+ If unsure, or if you run an older (pre 4.4) gcc, say N.
+
+endmenu
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
new file mode 100644
index 00000000..55cca1da
--- /dev/null
+++ b/arch/parisc/Makefile
@@ -0,0 +1,133 @@
+#
+# parisc/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+# Portions Copyright (C) 1999 The Puffin Group
+#
+# Modified for PA-RISC Linux by Paul Lahaie, Alex deVries,
+# Mike Shaver, Helge Deller and Martin K. Petersen
+#
+
+KBUILD_DEFCONFIG := default_defconfig
+
+NM = sh $(srctree)/arch/parisc/nm
+CHECKFLAGS += -D__hppa__=1
+
+MACHINE := $(shell uname -m)
+ifeq ($(MACHINE),parisc*)
+NATIVE := 1
+endif
+
+ifdef CONFIG_64BIT
+UTS_MACHINE := parisc64
+CHECKFLAGS += -D__LP64__=1 -m64
+WIDTH := 64
+CROSS_COMPILE := hppa64-linux-gnu-
+else # 32-bit
+WIDTH :=
+endif
+
+# attempt to help out folks who are cross-compiling
+ifeq ($(NATIVE),1)
+CROSS_COMPILE := hppa$(WIDTH)-linux-
+endif
+
+OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
+
+cflags-y := -pipe
+
+# These flags should be implied by an hppa-linux configuration, but they
+# are not in gcc 3.2.
+cflags-y += -mno-space-regs -mfast-indirect-calls
+
+# Currently we save and restore fpregs on all kernel entry/interruption paths.
+# If that gets optimized, we might need to disable the use of fpregs in the
+# kernel.
+cflags-y += -mdisable-fpregs
+
+# Without this, "ld -r" results in .text sections that are too big
+# (> 0x40000) for branches to reach stubs.
+ifndef CONFIG_FUNCTION_TRACER
+ cflags-y += -ffunction-sections
+endif
+
+# select which processor to optimise for
+cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100
+cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200
+cflags-$(CONFIG_PA7100LC) += -march=1.1 -mschedule=7100LC
+cflags-$(CONFIG_PA7300LC) += -march=1.1 -mschedule=7300
+cflags-$(CONFIG_PA8X00) += -march=2.0 -mschedule=8000
+
+head-y := arch/parisc/kernel/head.o
+
+KBUILD_CFLAGS += $(cflags-y)
+
+kernel-y := mm/ kernel/ math-emu/ kernel/init_task.o
+kernel-$(CONFIG_HPUX) += hpux/
+
+core-y += $(addprefix arch/parisc/, $(kernel-y))
+libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name`
+
+drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/
+
+PALO := $(shell if (which palo 2>&1); then : ; \
+ elif [ -x /sbin/palo ]; then echo /sbin/palo; \
+ fi)
+
+PALOCONF := $(shell if [ -f $(src)/palo.conf ]; then echo $(src)/palo.conf; \
+ else echo $(obj)/palo.conf; \
+ fi)
+
+palo: vmlinux
+ @if test ! -x "$(PALO)"; then \
+ echo 'ERROR: Please install palo first (apt-get install palo)';\
+ echo 'or build it from source and install it somewhere in your $$PATH';\
+ false; \
+ fi
+ @if test ! -f "$(PALOCONF)"; then \
+ cp $(src)/arch/parisc/defpalo.conf $(obj)/palo.conf; \
+ echo 'A generic palo config file ($(obj)/palo.conf) has been created for you.'; \
+ echo 'You should check it and re-run "make palo".'; \
+ echo 'WARNING: the "lifimage" file is now placed in this directory by default!'; \
+ false; \
+ fi
+ $(PALO) -f $(PALOCONF)
+
+# Shorthands for known targets not supported by parisc, use vmlinux as default
+Image zImage bzImage: vmlinux
+
+kernel_install: vmlinux
+ sh $(src)/arch/parisc/install.sh \
+ $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
+
+install: kernel_install modules_install
+
+CLEAN_FILES += lifimage
+MRPROPER_FILES += palo.conf
+
+define archhelp
+ @echo '* vmlinux - Uncompressed kernel image (./vmlinux)'
+ @echo ' palo - Bootable image (./lifimage)'
+ @echo ' install - Install kernel using'
+ @echo ' (your) ~/bin/$(INSTALLKERNEL) or'
+ @echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
+ @echo ' copy to $$(INSTALL_PATH)'
+endef
+
+# we require gcc 3.3 or above to compile the kernel
+archprepare: checkbin
+checkbin:
+ @if test "$(call cc-version)" -lt "0303"; then \
+ echo -n "Sorry, GCC v3.3 or above is required to build " ; \
+ echo "the kernel." ; \
+ false ; \
+ fi
diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
new file mode 100644
index 00000000..0f90569b
--- /dev/null
+++ b/arch/parisc/configs/712_defconfig
@@ -0,0 +1,205 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PA7100LC=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_GSC_LASI=y
+# CONFIG_PDC_CHASSIS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=m
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+CONFIG_IP_NF_QUEUE=m
+CONFIG_LLC2=m
+CONFIG_NET_PKTGEN=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_ATA_OVER_ETH=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_LASI700=y
+CONFIG_SCSI_DEBUG=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+CONFIG_TUN=m
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_LASI_82596=y
+CONFIG_PPP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+# CONFIG_KEYBOARD_HIL_OLD is not set
+CONFIG_MOUSE_SERIAL=m
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=17
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_SERIAL_MUX is not set
+CONFIG_PDC_CONSOLE=y
+CONFIG_LEGACY_PTY_COUNT=64
+CONFIG_PRINTER=m
+CONFIG_PPDEV=m
+# CONFIG_HW_RANDOM is not set
+CONFIG_RAW_DRIVER=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_DUMMY_CONSOLE_COLUMNS=128
+CONFIG_DUMMY_CONSOLE_ROWS=48
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_HARMONY=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V4=y
+CONFIG_RPCSEC_GSS_SPKM3=m
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DEBUG_RODATA=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_LIBCRC32C=m
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
new file mode 100644
index 00000000..b647b182
--- /dev/null
+++ b/arch/parisc/configs/a500_defconfig
@@ -0,0 +1,201 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EXPERT=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PA8X00=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=8
+# CONFIG_GSC is not set
+CONFIG_PCI=y
+CONFIG_PCI_LBA=y
+CONFIG_PCCARD=m
+# CONFIG_PCMCIA_LOAD_CIS is not set
+CONFIG_YENTA=m
+CONFIG_PD6729=m
+CONFIG_I82092=m
+# CONFIG_SUPERIO is not set
+# CONFIG_CHASSIS_LCD_LED is not set
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+# CONFIG_INET_LRO is not set
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP_DCCP=m
+# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_LLC2=m
+CONFIG_NET_PKTGEN=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_QLOGIC_1280=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=m
+CONFIG_FUSION_FC=m
+CONFIG_FUSION_CTL=m
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+CONFIG_TUN=m
+CONFIG_NET_ETHERNET=y
+CONFIG_NET_VENDOR_3COM=y
+CONFIG_VORTEX=m
+CONFIG_TYPHOON=m
+CONFIG_NET_TULIP=y
+CONFIG_DE2104X=m
+CONFIG_TULIP=y
+CONFIG_TULIP_MMIO=y
+CONFIG_PCMCIA_XIRCOM=m
+CONFIG_HP100=m
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=m
+CONFIG_E100=m
+CONFIG_ACENIC=m
+CONFIG_ACENIC_OMIT_TIGON_I=y
+CONFIG_E1000=m
+CONFIG_TIGON3=m
+CONFIG_NET_PCMCIA=y
+CONFIG_PCMCIA_3C589=m
+CONFIG_PCMCIA_3C574=m
+CONFIG_PCMCIA_SMC91C92=m
+CONFIG_PCMCIA_XIRC2PS=m
+CONFIG_PPP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_CS=m
+CONFIG_SERIAL_8250_NR_UARTS=17
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_PDC_CONSOLE=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_RAW_DRIVER=y
+# CONFIG_HWMON is not set
+CONFIG_AGP=y
+CONFIG_AGP_PARISC=y
+# CONFIG_STI_CONSOLE is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V4=y
+CONFIG_RPCSEC_GSS_SPKM3=m
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_BLOWFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_LIBCRC32C=m
diff --git a/arch/parisc/configs/b180_defconfig b/arch/parisc/configs/b180_defconfig
new file mode 100644
index 00000000..e289f5bf
--- /dev/null
+++ b/arch/parisc/configs/b180_defconfig
@@ -0,0 +1,107 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODVERSIONS=y
+CONFIG_PA7100LC=y
+CONFIG_HPPB=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_EISA=y
+CONFIG_ISA=y
+CONFIG_PCI=y
+CONFIG_GSC_DINO=y
+# CONFIG_PDC_CHASSIS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_LASI700=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
+CONFIG_SCSI_ZALON=y
+CONFIG_SCSI_NCR53C8XX_SYNC=40
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_LASI_82596=y
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+CONFIG_PPP=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_HIL_OLD is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=13
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_PRINTER=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_HARMONY=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_SMB_FS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_SECURITY=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
new file mode 100644
index 00000000..311ca367
--- /dev/null
+++ b/arch/parisc/configs/c3000_defconfig
@@ -0,0 +1,176 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EXPERT=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PA8X00=y
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_GSC is not set
+CONFIG_PCI=y
+CONFIG_PCI_LBA=y
+# CONFIG_PDC_CHASSIS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_DEBUG=y
+CONFIG_IP_NF_QUEUE=m
+CONFIG_NET_PKTGEN=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_BLK_DEV_SIIMAGE=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
+CONFIG_SCSI_DEBUG=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=m
+CONFIG_FUSION_CTL=m
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+CONFIG_TUN=m
+CONFIG_NET_ETHERNET=y
+CONFIG_NET_TULIP=y
+CONFIG_DE2104X=m
+CONFIG_TULIP=y
+CONFIG_TULIP_MMIO=y
+CONFIG_NET_PCI=y
+CONFIG_E100=m
+CONFIG_ACENIC=m
+CONFIG_E1000=m
+CONFIG_TIGON3=m
+CONFIG_PPP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1600
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1200
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_SERIO=m
+CONFIG_SERIO_LIBPS2=m
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=13
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_RAW_DRIVER=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_AD1889=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_PRINTER=m
+CONFIG_USB_STORAGE=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+CONFIG_USB_LEGOTOWER=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_XFS_FS=m
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DEBUG_RODATA=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_LIBCRC32C=m
diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig
new file mode 100644
index 00000000..dfe88f6c
--- /dev/null
+++ b/arch/parisc/configs/default_defconfig
@@ -0,0 +1,233 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PA7100LC=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_EISA=y
+CONFIG_PCI=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
+CONFIG_PCCARD=y
+CONFIG_YENTA=y
+CONFIG_PD6729=y
+CONFIG_I82092=y
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=m
+CONFIG_IPV6=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_LLC2=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_PC_PCMCIA=m
+CONFIG_PARPORT_1284=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECS=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_LASI700=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_ZALON=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_MD_RAID10=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+CONFIG_TUN=m
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_LASI_82596=y
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+CONFIG_NET_PCI=y
+CONFIG_ACENIC=y
+CONFIG_TIGON3=y
+CONFIG_NET_PCMCIA=y
+CONFIG_PPP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+# CONFIG_KEYBOARD_HIL_OLD is not set
+CONFIG_MOUSE_SERIAL=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_CS=y
+CONFIG_SERIAL_8250_NR_UARTS=17
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_LEGACY_PTY_COUNT=64
+CONFIG_PRINTER=m
+CONFIG_PPDEV=m
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_DUMMY_CONSOLE_COLUMNS=128
+CONFIG_DUMMY_CONSOLE_ROWS=48
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_AD1889=y
+CONFIG_SND_HARMONY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_USB=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V4=y
+CONFIG_RPCSEC_GSS_SPKM3=m
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_KEYS=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_LIBCRC32C=m
diff --git a/arch/parisc/defpalo.conf b/arch/parisc/defpalo.conf
new file mode 100644
index 00000000..4e1ae25b
--- /dev/null
+++ b/arch/parisc/defpalo.conf
@@ -0,0 +1,21 @@
+# This a generic Palo configuration file. For more information about how
+# it works try 'palo -?'.
+#
+# Most people using 'make palo' want a bootable file, usable for
+# network or tape booting for example.
+--init-tape=lifimage
+--recoverykernel=vmlinux
+
+########## Pick your ROOT here! ##########
+# You need at least one 'root='!
+#
+# If you want a root ramdisk, use the next 2 lines
+# (Edit the ramdisk image name!!!!)
+--ramdisk=ram-disk-image-file
+--commandline=0/vmlinux HOME=/ root=/dev/ram initrd=0/ramdisk
+
+# If you want NFS root, use the following command line (Edit the HOSTNAME!!!)
+#--commandline=0/vmlinux HOME=/ root=/dev/nfs nfsroot=HOSTNAME ip=bootp
+
+# If you have root on a disk partition, use this (Edit the partition name!!!)
+#--commandline=0/vmlinux HOME=/ root=/dev/sda1
diff --git a/arch/parisc/hpux/Makefile b/arch/parisc/hpux/Makefile
new file mode 100644
index 00000000..1048fb69
--- /dev/null
+++ b/arch/parisc/hpux/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for HPUX emulation
+#
+
+obj-y := entry_hpux.o gate.o wrappers.o fs.o ioctl.o sys_hpux.o
diff --git a/arch/parisc/hpux/entry_hpux.S b/arch/parisc/hpux/entry_hpux.S
new file mode 100644
index 00000000..d15a4135
--- /dev/null
+++ b/arch/parisc/hpux/entry_hpux.S
@@ -0,0 +1,546 @@
+/* syscall table for HPUX specific syscalls
+ *
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ * Copyright (C) 1999 Matthew Wilcox <willy at debian . org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/unistd.h>
+#include <asm/assembly.h>
+#include <linux/sys.h>
+#include <linux/linkage.h>
+
+#define ENTRY_NAME(_name_) ASM_ULONG_INSN _name_
+
+ .section .rodata,"a"
+ .import hpux_unimplemented_wrapper
+ENTRY(hpux_call_table)
+ ENTRY_NAME(sys_ni_syscall) /* 0 */
+ ENTRY_NAME(sys_exit)
+ ENTRY_NAME(hpux_fork_wrapper)
+ ENTRY_NAME(sys_read)
+ ENTRY_NAME(sys_write)
+ ENTRY_NAME(sys_open) /* 5 */
+ ENTRY_NAME(sys_close)
+ ENTRY_NAME(hpux_wait)
+ ENTRY_NAME(sys_creat)
+ ENTRY_NAME(sys_link)
+ ENTRY_NAME(sys_unlink) /* 10 */
+ ENTRY_NAME(hpux_execv_wrapper)
+ ENTRY_NAME(sys_chdir)
+ ENTRY_NAME(sys_time)
+ ENTRY_NAME(sys_mknod)
+ ENTRY_NAME(sys_chmod) /* 15 */
+ ENTRY_NAME(sys_chown)
+ ENTRY_NAME(hpux_brk)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_lseek)
+ ENTRY_NAME(sys_getpid) /* 20 */
+ ENTRY_NAME(hpux_mount)
+ ENTRY_NAME(sys_oldumount)
+ ENTRY_NAME(sys_setuid)
+ ENTRY_NAME(sys_getuid)
+ ENTRY_NAME(sys_stime) /* 25 */
+ ENTRY_NAME(hpux_ptrace)
+ ENTRY_NAME(sys_alarm)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_pause)
+ ENTRY_NAME(sys_utime) /* 30 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_access)
+ ENTRY_NAME(hpux_nice)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 35 */
+ ENTRY_NAME(sys_sync)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_newstat)
+ ENTRY_NAME(hpux_setpgrp3)
+ ENTRY_NAME(sys_newlstat) /* 40 */
+ ENTRY_NAME(sys_dup)
+ ENTRY_NAME(hpux_pipe_wrapper)
+ ENTRY_NAME(sys_times)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 45 */
+ ENTRY_NAME(sys_setgid)
+ ENTRY_NAME(sys_getgid)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 50 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_ioctl)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 55 */
+ ENTRY_NAME(sys_symlink)
+ ENTRY_NAME(hpux_utssys)
+ ENTRY_NAME(sys_readlink)
+ ENTRY_NAME(hpux_execve_wrapper)
+ ENTRY_NAME(sys_umask) /* 60 */
+ ENTRY_NAME(sys_chroot)
+ ENTRY_NAME(sys_fcntl)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 65 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_sbrk)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 70 */
+ ENTRY_NAME(sys_mmap)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 75 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 80 */
+ ENTRY_NAME(sys_getpgid)
+ ENTRY_NAME(sys_setpgid)
+ ENTRY_NAME(sys_setitimer)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 85 */
+ ENTRY_NAME(sys_getitimer)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_dup2) /* 90 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_newfstat)
+ ENTRY_NAME(sys_select)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 95 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 100 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 105 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 110 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 115 */
+ ENTRY_NAME(sys_gettimeofday)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 120 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_fchown)
+ ENTRY_NAME(sys_fchmod)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 125 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_rename)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 130 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_sysconf)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 135 */
+ ENTRY_NAME(sys_mkdir)
+ ENTRY_NAME(sys_rmdir)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 140 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_getrlimit)
+ ENTRY_NAME(sys_setrlimit) /* 145 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 150 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_lockf) /* 155 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 160 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 165 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 170 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 175 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 180 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_sigprocmask) /* 185 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 190 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_getdomainname)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 195 */
+ ENTRY_NAME(hpux_statfs)
+ ENTRY_NAME(hpux_fstatfs)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_waitpid) /* 200 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 205 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 210 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 215 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 220 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 225 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 230 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 235 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 240 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 245 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 250 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 255 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 260 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 265 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 270 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_fchdir)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_accept) /* 275 */
+ ENTRY_NAME(sys_bind)
+ ENTRY_NAME(sys_connect)
+ ENTRY_NAME(sys_getpeername)
+ ENTRY_NAME(sys_getsockname)
+ ENTRY_NAME(sys_getsockopt) /* 280 */
+ ENTRY_NAME(sys_listen)
+ ENTRY_NAME(sys_recv)
+ ENTRY_NAME(sys_recvfrom)
+ ENTRY_NAME(sys_recvmsg)
+ ENTRY_NAME(sys_send) /* 285 */
+ ENTRY_NAME(sys_sendmsg)
+ ENTRY_NAME(sys_sendto)
+ ENTRY_NAME(sys_setsockopt)
+ ENTRY_NAME(sys_shutdown)
+ ENTRY_NAME(sys_socket) /* 290 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 295 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 300 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 305 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 310 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 315 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 320 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 325 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 330 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_lchown)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_sysfs)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 335 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 340 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 345 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 350 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(sys_nanosleep)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 355 */
+ ENTRY_NAME(hpux_getdents)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 360 */
+ ENTRY_NAME(hpux_fstat64)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 365 */
+ ENTRY_NAME(hpux_lstat64)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_stat64)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 370 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 375 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 380 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_setpgrp)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 385 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 390 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 395 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 400 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 405 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 410 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 415 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 420 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 425 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 430 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 435 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 440 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 445 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 450 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 455 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 460 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 465 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 470 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 475 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 480 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 485 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 490 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 495 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 500 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 505 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper) /* 510 */
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+ ENTRY_NAME(hpux_unimplemented_wrapper)
+END(hpux_call_table)
+.end
+
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c
new file mode 100644
index 00000000..0dc8543a
--- /dev/null
+++ b/arch/parisc/hpux/fs.c
@@ -0,0 +1,206 @@
+/*
+ * Implements HPUX syscalls.
+ *
+ * Copyright (C) 1999 Matthew Wilcox <willy with parisc-linux.org>
+ * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
+ * Copyright (C) 2000 John Marvin <jsm with parisc-linux.org>
+ * Copyright (C) 2000 Philipp Rumpf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <asm/errno.h>
+#include <asm/uaccess.h>
+
+int hpux_execve(struct pt_regs *regs)
+{
+ int error;
+ char *filename;
+
+ filename = getname((const char __user *) regs->gr[26]);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+
+ error = do_execve(filename,
+ (const char __user *const __user *) regs->gr[25],
+ (const char __user *const __user *) regs->gr[24],
+ regs);
+
+ putname(filename);
+
+out:
+ return error;
+}
+
+struct hpux_dirent {
+ loff_t d_off;
+ ino_t d_ino;
+ short d_reclen;
+ short d_namlen;
+ char d_name[1];
+};
+
+struct getdents_callback {
+ struct hpux_dirent __user *current_dir;
+ struct hpux_dirent __user *previous;
+ int count;
+ int error;
+};
+
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
+
+static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
+ u64 ino, unsigned d_type)
+{
+ struct hpux_dirent __user * dirent;
+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
+ ino_t d_ino;
+ int reclen = ALIGN(NAME_OFFSET(dirent) + namlen + 1, sizeof(long));
+
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ d_ino = ino;
+ if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
+ buf->error = -EOVERFLOW;
+ return -EOVERFLOW;
+ }
+ dirent = buf->previous;
+ if (dirent)
+ if (put_user(offset, &dirent->d_off))
+ goto Efault;
+ dirent = buf->current_dir;
+ if (put_user(d_ino, &dirent->d_ino) ||
+ put_user(reclen, &dirent->d_reclen) ||
+ put_user(namlen, &dirent->d_namlen) ||
+ copy_to_user(dirent->d_name, name, namlen) ||
+ put_user(0, dirent->d_name + namlen))
+ goto Efault;
+ buf->previous = dirent;
+ buf->current_dir = (void __user *)dirent + reclen;
+ buf->count -= reclen;
+ return 0;
+Efault:
+ buf->error = -EFAULT;
+ return -EFAULT;
+}
+
+#undef NAME_OFFSET
+
+int hpux_getdents(unsigned int fd, struct hpux_dirent __user *dirent, unsigned int count)
+{
+ struct file * file;
+ struct hpux_dirent __user * lastdirent;
+ struct getdents_callback buf;
+ int error = -EBADF;
+
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ buf.current_dir = dirent;
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
+
+ error = vfs_readdir(file, filldir, &buf);
+ if (error >= 0)
+ error = buf.error;
+ lastdirent = buf.previous;
+ if (lastdirent) {
+ if (put_user(file->f_pos, &lastdirent->d_off))
+ error = -EFAULT;
+ else
+ error = count - buf.count;
+ }
+
+ fput(file);
+out:
+ return error;
+}
+
+int hpux_mount(const char *fs, const char *path, int mflag,
+ const char *fstype, const char *dataptr, int datalen)
+{
+ return -ENOSYS;
+}
+
+static int cp_hpux_stat(struct kstat *stat, struct hpux_stat64 __user *statbuf)
+{
+ struct hpux_stat64 tmp;
+
+ /* we probably want a different split here - is hpux 12:20? */
+
+ if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
+ return -EOVERFLOW;
+
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.st_dev = new_encode_dev(stat->dev);
+ tmp.st_ino = stat->ino;
+ tmp.st_mode = stat->mode;
+ tmp.st_nlink = stat->nlink;
+ tmp.st_uid = stat->uid;
+ tmp.st_gid = stat->gid;
+ tmp.st_rdev = new_encode_dev(stat->rdev);
+ tmp.st_size = stat->size;
+ tmp.st_atime = stat->atime.tv_sec;
+ tmp.st_mtime = stat->mtime.tv_sec;
+ tmp.st_ctime = stat->ctime.tv_sec;
+ tmp.st_blocks = stat->blocks;
+ tmp.st_blksize = stat->blksize;
+ return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
+}
+
+long hpux_stat64(const char __user *filename, struct hpux_stat64 __user *statbuf)
+{
+ struct kstat stat;
+ int error = vfs_stat(filename, &stat);
+
+ if (!error)
+ error = cp_hpux_stat(&stat, statbuf);
+
+ return error;
+}
+
+long hpux_fstat64(unsigned int fd, struct hpux_stat64 __user *statbuf)
+{
+ struct kstat stat;
+ int error = vfs_fstat(fd, &stat);
+
+ if (!error)
+ error = cp_hpux_stat(&stat, statbuf);
+
+ return error;
+}
+
+long hpux_lstat64(const char __user *filename,
+ struct hpux_stat64 __user *statbuf)
+{
+ struct kstat stat;
+ int error = vfs_lstat(filename, &stat);
+
+ if (!error)
+ error = cp_hpux_stat(&stat, statbuf);
+
+ return error;
+}
diff --git a/arch/parisc/hpux/gate.S b/arch/parisc/hpux/gate.S
new file mode 100644
index 00000000..38a1c1b8
--- /dev/null
+++ b/arch/parisc/hpux/gate.S
@@ -0,0 +1,107 @@
+/*
+ *
+ * Linux/PARISC Project (http://www.parisc-linux.org/)
+ *
+ * System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
+ * Licensed under the GNU GPL.
+ * thanks to Philipp Rumpf, Mike Shaver and various others
+ * sorry about the wall, puffin..
+ */
+
+#include <asm/assembly.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <linux/linkage.h>
+
+ .level LEVEL
+ .text
+
+ .import hpux_call_table
+ .import hpux_syscall_exit,code
+
+ .align PAGE_SIZE
+ENTRY(hpux_gateway_page)
+ nop
+#ifdef CONFIG_64BIT
+#warning NEEDS WORK for 64-bit
+#endif
+ ldw -64(%r30), %r29 ;! 8th argument
+ ldw -60(%r30), %r19 ;! 7th argument
+ ldw -56(%r30), %r20 ;! 6th argument
+ ldw -52(%r30), %r21 ;! 5th argument
+ gate .+8, %r0 /* become privileged */
+ mtsp %r0,%sr4 /* get kernel space into sr4 */
+ mtsp %r0,%sr5 /* get kernel space into sr5 */
+ mtsp %r0,%sr6 /* get kernel space into sr6 */
+ mfsp %sr7,%r1 /* save user sr7 */
+ mtsp %r1,%sr3 /* and store it in sr3 */
+
+ mtctl %r30,%cr28
+ mfctl %cr30,%r1
+ xor %r1,%r30,%r30 /* ye olde xor trick */
+ xor %r1,%r30,%r1
+ xor %r1,%r30,%r30
+ ldo TASK_SZ_ALGN+FRAME_SIZE(%r30),%r30 /* set up kernel stack */
+
+ /* N.B.: It is critical that we don't set sr7 to 0 until r30
+ * contains a valid kernel stack pointer. It is also
+ * critical that we don't start using the kernel stack
+ * until after sr7 has been set to 0.
+ */
+
+ mtsp %r0,%sr7 /* get kernel space into sr7 */
+ STREG %r1,TASK_PT_GR30-TASK_SZ_ALGN-FRAME_SIZE(%r30) /* save usp */
+ ldo -TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr in %r1 */
+
+ /* Save some registers for sigcontext and potential task
+ switch (see entry.S for the details of which ones are
+ saved/restored). TASK_PT_PSW is zeroed so we can see whether
+ a process is on a syscall or not. For an interrupt the real
+ PSW value is stored. This is needed for gdb and sys_ptrace. */
+ STREG %r0, TASK_PT_PSW(%r1)
+ STREG %r2, TASK_PT_GR2(%r1) /* preserve rp */
+ STREG %r19, TASK_PT_GR19(%r1) /* 7th argument */
+ STREG %r20, TASK_PT_GR20(%r1) /* 6th argument */
+ STREG %r21, TASK_PT_GR21(%r1) /* 5th argument */
+ STREG %r22, TASK_PT_GR22(%r1) /* syscall # */
+ STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
+ STREG %r24, TASK_PT_GR24(%r1) /* 3rd argument */
+ STREG %r25, TASK_PT_GR25(%r1) /* 2nd argument */
+ STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */
+ STREG %r27, TASK_PT_GR27(%r1) /* user dp */
+ STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */
+ STREG %r28, TASK_PT_ORIG_R28(%r1) /* return value 0 (saved for signals) */
+ STREG %r29, TASK_PT_GR29(%r1) /* 8th argument */
+ STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */
+
+ ldo TASK_PT_FR0(%r1), %r27 /* save fpregs from the kernel */
+ save_fp %r27 /* or potential task switch */
+
+ mfctl %cr11, %r27 /* i.e. SAR */
+ STREG %r27, TASK_PT_SAR(%r1)
+
+ loadgp
+
+ stw %r21, -52(%r30) ;! 5th argument
+ stw %r20, -56(%r30) ;! 6th argument
+ stw %r19, -60(%r30) ;! 7th argument
+ stw %r29, -64(%r30) ;! 8th argument
+
+ ldil L%hpux_call_table, %r21
+ ldo R%hpux_call_table(%r21), %r21
+ comiclr,>>= __NR_HPUX_syscalls, %r22, %r0
+ b,n syscall_nosys
+ LDREGX %r22(%r21), %r21
+ ldil L%hpux_syscall_exit,%r2
+ be 0(%sr7,%r21)
+ ldo R%hpux_syscall_exit(%r2),%r2
+
+syscall_nosys:
+ ldil L%hpux_syscall_exit,%r1
+ be R%hpux_syscall_exit(%sr7,%r1)
+ ldo -ENOSYS(%r0),%r28
+ENDPROC(hpux_gateway_page)
+
+ .align PAGE_SIZE
+ENTRY(end_hpux_gateway_page)
diff --git a/arch/parisc/hpux/ioctl.c b/arch/parisc/hpux/ioctl.c
new file mode 100644
index 00000000..dede4765
--- /dev/null
+++ b/arch/parisc/hpux/ioctl.c
@@ -0,0 +1,72 @@
+/*
+ * Implements some necessary HPUX ioctls.
+ *
+ * Copyright (C) 1999-2002 Matthew Wilcox <willy with parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * Supported ioctls:
+ * TCGETA
+ * TCSETA
+ * TCSETAW
+ * TCSETAF
+ * TCSBRK
+ * TCXONC
+ * TCFLSH
+ * TIOCGWINSZ
+ * TIOCSWINSZ
+ * TIOCGPGRP
+ * TIOCSPGRP
+ */
+
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <asm/errno.h>
+#include <asm/ioctl.h>
+#include <asm/termios.h>
+#include <asm/uaccess.h>
+
+static int hpux_ioctl_t(int fd, unsigned long cmd, unsigned long arg)
+{
+ int result = -EOPNOTSUPP;
+ int nr = _IOC_NR(cmd);
+ switch (nr) {
+ case 106:
+ result = sys_ioctl(fd, TIOCSWINSZ, arg);
+ break;
+ case 107:
+ result = sys_ioctl(fd, TIOCGWINSZ, arg);
+ break;
+ }
+ return result;
+}
+
+int hpux_ioctl(int fd, unsigned long cmd, unsigned long arg)
+{
+ int result = -EOPNOTSUPP;
+ int type = _IOC_TYPE(cmd);
+ switch (type) {
+ case 'T':
+ /* Our structures are now compatible with HPUX's */
+ result = sys_ioctl(fd, cmd, arg);
+ break;
+ case 't':
+ result = hpux_ioctl_t(fd, cmd, arg);
+ break;
+ }
+ return result;
+}
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
new file mode 100644
index 00000000..6ab9580b
--- /dev/null
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -0,0 +1,970 @@
+/*
+ * Implements HPUX syscalls.
+ *
+ * Copyright (C) 1999 Matthew Wilcox <willy with parisc-linux.org>
+ * Copyright (C) 2000 Philipp Rumpf
+ * Copyright (C) 2000 John Marvin <jsm with parisc-linux.org>
+ * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
+ * Copyright (C) 2001 Nathan Neulinger <nneul at umr.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/capability.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/utsname.h>
+#include <linux/vfs.h>
+#include <linux/vmalloc.h>
+
+#include <asm/errno.h>
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+
+unsigned long hpux_brk(unsigned long addr)
+{
+ /* Sigh. Looks like HP/UX libc relies on kernel bugs. */
+ return sys_brk(addr + PAGE_SIZE);
+}
+
+int hpux_sbrk(void)
+{
+ return -ENOSYS;
+}
+
+/* Random other syscalls */
+
+int hpux_nice(int priority_change)
+{
+ return -ENOSYS;
+}
+
+int hpux_ptrace(void)
+{
+ return -ENOSYS;
+}
+
+int hpux_wait(int __user *stat_loc)
+{
+ return sys_waitpid(-1, stat_loc, 0);
+}
+
+int hpux_setpgrp(void)
+{
+ return sys_setpgid(0,0);
+}
+
+int hpux_setpgrp3(void)
+{
+ return hpux_setpgrp();
+}
+
+#define _SC_CPU_VERSION 10001
+#define _SC_OPEN_MAX 4
+#define CPU_PA_RISC1_1 0x210
+
+int hpux_sysconf(int which)
+{
+ switch (which) {
+ case _SC_CPU_VERSION:
+ return CPU_PA_RISC1_1;
+ case _SC_OPEN_MAX:
+ return INT_MAX;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*****************************************************************************/
+
+#define HPUX_UTSLEN 9
+#define HPUX_SNLEN 15
+
+struct hpux_utsname {
+ char sysname[HPUX_UTSLEN];
+ char nodename[HPUX_UTSLEN];
+ char release[HPUX_UTSLEN];
+ char version[HPUX_UTSLEN];
+ char machine[HPUX_UTSLEN];
+ char idnumber[HPUX_SNLEN];
+} ;
+
+struct hpux_ustat {
+ int32_t f_tfree; /* total free (daddr_t) */
+ u_int32_t f_tinode; /* total inodes free (ino_t) */
+ char f_fname[6]; /* filsys name */
+ char f_fpack[6]; /* filsys pack name */
+ u_int32_t f_blksize; /* filsys block size (int) */
+};
+
+/*
+ * HPUX's utssys() call. It's a collection of miscellaneous functions,
+ * alas, so there's no nice way of splitting them up.
+ */
+
+/* This function is called from hpux_utssys(); HP-UX implements
+ * ustat() as an option to utssys().
+ *
+ * Now, struct ustat on HP-UX is exactly the same as on Linux, except
+ * that it contains one addition field on the end, int32_t f_blksize.
+ * So, we could have written this function to just call the Linux
+ * sys_ustat(), (defined in linux/fs/super.c), and then just
+ * added this additional field to the user's structure. But I figure
+ * if we're gonna be digging through filesystem structures to get
+ * this, we might as well just do the whole enchilada all in one go.
+ *
+ * So, most of this function is almost identical to sys_ustat().
+ * I have placed comments at the few lines changed or added, to
+ * aid in porting forward if and when sys_ustat() is changed from
+ * its form in kernel 2.2.5.
+ */
+static int hpux_ustat(dev_t dev, struct hpux_ustat __user *ubuf)
+{
+ struct super_block *s;
+ struct hpux_ustat tmp; /* Changed to hpux_ustat */
+ struct kstatfs sbuf;
+ int err = -EINVAL;
+
+ s = user_get_super(dev);
+ if (s == NULL)
+ goto out;
+ err = statfs_by_dentry(s->s_root, &sbuf);
+ drop_super(s);
+ if (err)
+ goto out;
+
+ memset(&tmp,0,sizeof(tmp));
+
+ tmp.f_tfree = (int32_t)sbuf.f_bfree;
+ tmp.f_tinode = (u_int32_t)sbuf.f_ffree;
+ tmp.f_blksize = (u_int32_t)sbuf.f_bsize; /* Added this line */
+
+ err = copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
+out:
+ return err;
+}
+
+/*
+ * Wrapper for hpux statfs call. At the moment, just calls the linux native one
+ * and ignores the extra fields at the end of the hpux statfs struct.
+ *
+ */
+
+typedef int32_t hpux_fsid_t[2]; /* file system ID type */
+typedef uint16_t hpux_site_t;
+
+struct hpux_statfs {
+ int32_t f_type; /* type of info, zero for now */
+ int32_t f_bsize; /* fundamental file system block size */
+ int32_t f_blocks; /* total blocks in file system */
+ int32_t f_bfree; /* free block in fs */
+ int32_t f_bavail; /* free blocks avail to non-superuser */
+ int32_t f_files; /* total file nodes in file system */
+ int32_t f_ffree; /* free file nodes in fs */
+ hpux_fsid_t f_fsid; /* file system ID */
+ int32_t f_magic; /* file system magic number */
+ int32_t f_featurebits; /* file system features */
+ int32_t f_spare[4]; /* spare for later */
+ hpux_site_t f_cnode; /* cluster node where mounted */
+ int16_t f_pad;
+};
+
+static int do_statfs_hpux(struct kstatfs *st, struct hpux_statfs __user *p)
+{
+ struct hpux_statfs buf;
+ memset(&buf, 0, sizeof(buf));
+ buf.f_type = st->f_type;
+ buf.f_bsize = st->f_bsize;
+ buf.f_blocks = st->f_blocks;
+ buf.f_bfree = st->f_bfree;
+ buf.f_bavail = st->f_bavail;
+ buf.f_files = st->f_files;
+ buf.f_ffree = st->f_ffree;
+ buf.f_fsid[0] = st->f_fsid.val[0];
+ buf.f_fsid[1] = st->f_fsid.val[1];
+ if (copy_to_user(p, &buf, sizeof(buf)))
+ return -EFAULT;
+ return 0;
+}
+
+/* hpux statfs */
+asmlinkage long hpux_statfs(const char __user *pathname,
+ struct hpux_statfs __user *buf)
+{
+ struct kstatfs st;
+ int error = user_statfs(pathname, &st);
+ if (!error)
+ error = do_statfs_hpux(&st, buf);
+ return error;
+}
+
+asmlinkage long hpux_fstatfs(unsigned int fd, struct hpux_statfs __user * buf)
+{
+ struct kstatfs st;
+ int error = fd_statfs(fd, &st);
+ if (!error)
+ error = do_statfs_hpux(&st, buf);
+ return error;
+}
+
+
+/* This function is called from hpux_utssys(); HP-UX implements
+ * uname() as an option to utssys().
+ *
+ * The form of this function is pretty much copied from sys_olduname(),
+ * defined in linux/arch/i386/kernel/sys_i386.c.
+ */
+/* TODO: Are these put_user calls OK? Should they pass an int?
+ * (I copied it from sys_i386.c like this.)
+ */
+static int hpux_uname(struct hpux_utsname __user *name)
+{
+ int error;
+
+ if (!name)
+ return -EFAULT;
+ if (!access_ok(VERIFY_WRITE,name,sizeof(struct hpux_utsname)))
+ return -EFAULT;
+
+ down_read(&uts_sem);
+
+ error = __copy_to_user(&name->sysname, &utsname()->sysname,
+ HPUX_UTSLEN - 1);
+ error |= __put_user(0, name->sysname + HPUX_UTSLEN - 1);
+ error |= __copy_to_user(&name->nodename, &utsname()->nodename,
+ HPUX_UTSLEN - 1);
+ error |= __put_user(0, name->nodename + HPUX_UTSLEN - 1);
+ error |= __copy_to_user(&name->release, &utsname()->release,
+ HPUX_UTSLEN - 1);
+ error |= __put_user(0, name->release + HPUX_UTSLEN - 1);
+ error |= __copy_to_user(&name->version, &utsname()->version,
+ HPUX_UTSLEN - 1);
+ error |= __put_user(0, name->version + HPUX_UTSLEN - 1);
+ error |= __copy_to_user(&name->machine, &utsname()->machine,
+ HPUX_UTSLEN - 1);
+ error |= __put_user(0, name->machine + HPUX_UTSLEN - 1);
+
+ up_read(&uts_sem);
+
+ /* HP-UX utsname has no domainname field. */
+
+ /* TODO: Implement idnumber!!! */
+#if 0
+ error |= __put_user(0,name->idnumber);
+ error |= __put_user(0,name->idnumber+HPUX_SNLEN-1);
+#endif
+
+ error = error ? -EFAULT : 0;
+
+ return error;
+}
+
+/* Note: HP-UX just uses the old suser() function to check perms
+ * in this system call. We'll use capable(CAP_SYS_ADMIN).
+ */
+int hpux_utssys(char __user *ubuf, int n, int type)
+{
+ int len;
+ int error;
+ switch( type ) {
+ case 0:
+ /* uname(): */
+ return hpux_uname((struct hpux_utsname __user *)ubuf);
+ break ;
+ case 1:
+ /* Obsolete (used to be umask().) */
+ return -EFAULT ;
+ break ;
+ case 2:
+ /* ustat(): */
+ return hpux_ustat(new_decode_dev(n),
+ (struct hpux_ustat __user *)ubuf);
+ break;
+ case 3:
+ /* setuname():
+ *
+ * On linux (unlike HP-UX), utsname.nodename
+ * is the same as the hostname.
+ *
+ * sys_sethostname() is defined in linux/kernel/sys.c.
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ /* Unlike Linux, HP-UX returns an error if n==0: */
+ if ( n <= 0 )
+ return -EINVAL ;
+ /* Unlike Linux, HP-UX truncates it if n is too big: */
+ len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ;
+ return sys_sethostname(ubuf, len);
+ break ;
+ case 4:
+ /* sethostname():
+ *
+ * sys_sethostname() is defined in linux/kernel/sys.c.
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ /* Unlike Linux, HP-UX returns an error if n==0: */
+ if ( n <= 0 )
+ return -EINVAL ;
+ /* Unlike Linux, HP-UX truncates it if n is too big: */
+ len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ;
+ return sys_sethostname(ubuf, len);
+ break ;
+ case 5:
+ /* gethostname():
+ *
+ * sys_gethostname() is defined in linux/kernel/sys.c.
+ */
+ /* Unlike Linux, HP-UX returns an error if n==0: */
+ if ( n <= 0 )
+ return -EINVAL ;
+ return sys_gethostname(ubuf, n);
+ break ;
+ case 6:
+ /* Supposedly called from setuname() in libc.
+ * TODO: When and why is this called?
+ * Is it ever even called?
+ *
+ * This code should look a lot like sys_sethostname(),
+ * defined in linux/kernel/sys.c. If that gets updated,
+ * update this code similarly.
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ /* Unlike Linux, HP-UX returns an error if n==0: */
+ if ( n <= 0 )
+ return -EINVAL ;
+ /* Unlike Linux, HP-UX truncates it if n is too big: */
+ len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ;
+ /**/
+ /* TODO: print a warning about using this? */
+ down_write(&uts_sem);
+ error = -EFAULT;
+ if (!copy_from_user(utsname()->sysname, ubuf, len)) {
+ utsname()->sysname[len] = 0;
+ error = 0;
+ }
+ up_write(&uts_sem);
+ return error;
+ break ;
+ case 7:
+ /* Sets utsname.release, if you're allowed.
+ * Undocumented. Used by swinstall to change the
+ * OS version, during OS updates. Yuck!!!
+ *
+ * This code should look a lot like sys_sethostname()
+ * in linux/kernel/sys.c. If that gets updated, update
+ * this code similarly.
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ /* Unlike Linux, HP-UX returns an error if n==0: */
+ if ( n <= 0 )
+ return -EINVAL ;
+ /* Unlike Linux, HP-UX truncates it if n is too big: */
+ len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ;
+ /**/
+ /* TODO: print a warning about this? */
+ down_write(&uts_sem);
+ error = -EFAULT;
+ if (!copy_from_user(utsname()->release, ubuf, len)) {
+ utsname()->release[len] = 0;
+ error = 0;
+ }
+ up_write(&uts_sem);
+ return error;
+ break ;
+ default:
+ /* This system call returns -EFAULT if given an unknown type.
+ * Why not -EINVAL? I don't know, it's just not what they did.
+ */
+ return -EFAULT ;
+ }
+}
+
+int hpux_getdomainname(char __user *name, int len)
+{
+ int nlen;
+ int err = -EFAULT;
+
+ down_read(&uts_sem);
+
+ nlen = strlen(utsname()->domainname) + 1;
+
+ if (nlen < len)
+ len = nlen;
+ if(len > __NEW_UTS_LEN)
+ goto done;
+ if(copy_to_user(name, utsname()->domainname, len))
+ goto done;
+ err = 0;
+done:
+ up_read(&uts_sem);
+ return err;
+
+}
+
+int hpux_pipe(int *kstack_fildes)
+{
+ return do_pipe_flags(kstack_fildes, 0);
+}
+
+/* lies - says it works, but it really didn't lock anything */
+int hpux_lockf(int fildes, int function, off_t size)
+{
+ return 0;
+}
+
+int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2)
+{
+ char *fsname = NULL;
+ int len = 0;
+ int fstype;
+
+/*Unimplemented HP-UX syscall emulation. Syscall #334 (sysfs)
+ Args: 1 80057bf4 0 400179f0 0 0 0 */
+ printk(KERN_DEBUG "in hpux_sysfs\n");
+ printk(KERN_DEBUG "hpux_sysfs called with opcode = %d\n", opcode);
+ printk(KERN_DEBUG "hpux_sysfs called with arg1='%lx'\n", arg1);
+
+ if ( opcode == 1 ) { /* GETFSIND */
+ char __user *user_fsname = (char __user *)arg1;
+ len = strlen_user(user_fsname);
+ printk(KERN_DEBUG "len of arg1 = %d\n", len);
+ if (len == 0)
+ return 0;
+ fsname = kmalloc(len, GFP_KERNEL);
+ if (!fsname) {
+ printk(KERN_DEBUG "failed to kmalloc fsname\n");
+ return 0;
+ }
+
+ if (copy_from_user(fsname, user_fsname, len)) {
+ printk(KERN_DEBUG "failed to copy_from_user fsname\n");
+ kfree(fsname);
+ return 0;
+ }
+
+ /* String could be altered by userspace after strlen_user() */
+ fsname[len] = '\0';
+
+ printk(KERN_DEBUG "that is '%s' as (char *)\n", fsname);
+ if ( !strcmp(fsname, "hfs") ) {
+ fstype = 0;
+ } else {
+ fstype = 0;
+ }
+
+ kfree(fsname);
+
+ printk(KERN_DEBUG "returning fstype=%d\n", fstype);
+ return fstype; /* something other than default */
+ }
+
+
+ return 0;
+}
+
+
+/* Table of syscall names and handle for unimplemented routines */
+static const char * const syscall_names[] = {
+ "nosys", /* 0 */
+ "exit",
+ "fork",
+ "read",
+ "write",
+ "open", /* 5 */
+ "close",
+ "wait",
+ "creat",
+ "link",
+ "unlink", /* 10 */
+ "execv",
+ "chdir",
+ "time",
+ "mknod",
+ "chmod", /* 15 */
+ "chown",
+ "brk",
+ "lchmod",
+ "lseek",
+ "getpid", /* 20 */
+ "mount",
+ "umount",
+ "setuid",
+ "getuid",
+ "stime", /* 25 */
+ "ptrace",
+ "alarm",
+ NULL,
+ "pause",
+ "utime", /* 30 */
+ "stty",
+ "gtty",
+ "access",
+ "nice",
+ "ftime", /* 35 */
+ "sync",
+ "kill",
+ "stat",
+ "setpgrp3",
+ "lstat", /* 40 */
+ "dup",
+ "pipe",
+ "times",
+ "profil",
+ "ki_call", /* 45 */
+ "setgid",
+ "getgid",
+ NULL,
+ NULL,
+ NULL, /* 50 */
+ "acct",
+ "set_userthreadid",
+ NULL,
+ "ioctl",
+ "reboot", /* 55 */
+ "symlink",
+ "utssys",
+ "readlink",
+ "execve",
+ "umask", /* 60 */
+ "chroot",
+ "fcntl",
+ "ulimit",
+ NULL,
+ NULL, /* 65 */
+ "vfork",
+ NULL,
+ NULL,
+ NULL,
+ NULL, /* 70 */
+ "mmap",
+ NULL,
+ "munmap",
+ "mprotect",
+ "madvise", /* 75 */
+ "vhangup",
+ "swapoff",
+ NULL,
+ "getgroups",
+ "setgroups", /* 80 */
+ "getpgrp2",
+ "setpgid/setpgrp2",
+ "setitimer",
+ "wait3",
+ "swapon", /* 85 */
+ "getitimer",
+ NULL,
+ NULL,
+ NULL,
+ "dup2", /* 90 */
+ NULL,
+ "fstat",
+ "select",
+ NULL,
+ "fsync", /* 95 */
+ "setpriority",
+ NULL,
+ NULL,
+ NULL,
+ "getpriority", /* 100 */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL, /* 105 */
+ NULL,
+ NULL,
+ "sigvector",
+ "sigblock",
+ "sigsetmask", /* 110 */
+ "sigpause",
+ "sigstack",
+ NULL,
+ NULL,
+ NULL, /* 115 */
+ "gettimeofday",
+ "getrusage",
+ NULL,
+ NULL,
+ "readv", /* 120 */
+ "writev",
+ "settimeofday",
+ "fchown",
+ "fchmod",
+ NULL, /* 125 */
+ "setresuid",
+ "setresgid",
+ "rename",
+ "truncate",
+ "ftruncate", /* 130 */
+ NULL,
+ "sysconf",
+ NULL,
+ NULL,
+ NULL, /* 135 */
+ "mkdir",
+ "rmdir",
+ NULL,
+ "sigcleanup",
+ "setcore", /* 140 */
+ NULL,
+ "gethostid",
+ "sethostid",
+ "getrlimit",
+ "setrlimit", /* 145 */
+ NULL,
+ NULL,
+ "quotactl",
+ "get_sysinfo",
+ NULL, /* 150 */
+ "privgrp",
+ "rtprio",
+ "plock",
+ NULL,
+ "lockf", /* 155 */
+ "semget",
+ NULL,
+ "semop",
+ "msgget",
+ NULL, /* 160 */
+ "msgsnd",
+ "msgrcv",
+ "shmget",
+ NULL,
+ "shmat", /* 165 */
+ "shmdt",
+ NULL,
+ "csp/nsp_init",
+ "cluster",
+ "mkrnod", /* 170 */
+ "test",
+ "unsp_open",
+ NULL,
+ "getcontext",
+ "osetcontext", /* 175 */
+ "bigio",
+ "pipenode",
+ "lsync",
+ "getmachineid",
+ "cnodeid/mysite", /* 180 */
+ "cnodes/sitels",
+ "swapclients",
+ "rmtprocess",
+ "dskless_stats",
+ "sigprocmask", /* 185 */
+ "sigpending",
+ "sigsuspend",
+ "sigaction",
+ NULL,
+ "nfssvc", /* 190 */
+ "getfh",
+ "getdomainname",
+ "setdomainname",
+ "async_daemon",
+ "getdirentries", /* 195 */
+ NULL,
+ NULL,
+ "vfsmount",
+ NULL,
+ "waitpid", /* 200 */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL, /* 205 */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL, /* 210 */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL, /* 215 */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL, /* 220 */
+ NULL,
+ NULL,
+ NULL,
+ "sigsetreturn",
+ "sigsetstatemask", /* 225 */
+ "bfactl",
+ "cs",
+ "cds",
+ NULL,
+ "pathconf", /* 230 */
+ "fpathconf",
+ NULL,
+ NULL,
+ "nfs_fcntl",
+ "ogetacl", /* 235 */
+ "ofgetacl",
+ "osetacl",
+ "ofsetacl",
+ "pstat",
+ "getaudid", /* 240 */
+ "setaudid",
+ "getaudproc",
+ "setaudproc",
+ "getevent",
+ "setevent", /* 245 */
+ "audwrite",
+ "audswitch",
+ "audctl",
+ "ogetaccess",
+ "fsctl", /* 250 */
+ "ulconnect",
+ "ulcontrol",
+ "ulcreate",
+ "uldest",
+ "ulrecv", /* 255 */
+ "ulrecvcn",
+ "ulsend",
+ "ulshutdown",
+ "swapfs",
+ "fss", /* 260 */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL, /* 265 */
+ NULL,
+ "tsync",
+ "getnumfds",
+ "poll",
+ "getmsg", /* 270 */
+ "putmsg",
+ "fchdir",
+ "getmount_cnt",
+ "getmount_entry",
+ "accept", /* 275 */
+ "bind",
+ "connect",
+ "getpeername",
+ "getsockname",
+ "getsockopt", /* 280 */
+ "listen",
+ "recv",
+ "recvfrom",
+ "recvmsg",
+ "send", /* 285 */
+ "sendmsg",
+ "sendto",
+ "setsockopt",
+ "shutdown",
+ "socket", /* 290 */
+ "socketpair",
+ "proc_open",
+ "proc_close",
+ "proc_send",
+ "proc_recv", /* 295 */
+ "proc_sendrecv",
+ "proc_syscall",
+ "ipccreate",
+ "ipcname",
+ "ipcnamerase", /* 300 */
+ "ipclookup",
+ "ipcselect",
+ "ipcconnect",
+ "ipcrecvcn",
+ "ipcsend", /* 305 */
+ "ipcrecv",
+ "ipcgetnodename",
+ "ipcsetnodename",
+ "ipccontrol",
+ "ipcshutdown", /* 310 */
+ "ipcdest",
+ "semctl",
+ "msgctl",
+ "shmctl",
+ "mpctl", /* 315 */
+ "exportfs",
+ "getpmsg",
+ "putpmsg",
+ "strioctl",
+ "msync", /* 320 */
+ "msleep",
+ "mwakeup",
+ "msem_init",
+ "msem_remove",
+ "adjtime", /* 325 */
+ "kload",
+ "fattach",
+ "fdetach",
+ "serialize",
+ "statvfs", /* 330 */
+ "fstatvfs",
+ "lchown",
+ "getsid",
+ "sysfs",
+ NULL, /* 335 */
+ NULL,
+ "sched_setparam",
+ "sched_getparam",
+ "sched_setscheduler",
+ "sched_getscheduler", /* 340 */
+ "sched_yield",
+ "sched_get_priority_max",
+ "sched_get_priority_min",
+ "sched_rr_get_interval",
+ "clock_settime", /* 345 */
+ "clock_gettime",
+ "clock_getres",
+ "timer_create",
+ "timer_delete",
+ "timer_settime", /* 350 */
+ "timer_gettime",
+ "timer_getoverrun",
+ "nanosleep",
+ "toolbox",
+ NULL, /* 355 */
+ "getdents",
+ "getcontext",
+ "sysinfo",
+ "fcntl64",
+ "ftruncate64", /* 360 */
+ "fstat64",
+ "getdirentries64",
+ "getrlimit64",
+ "lockf64",
+ "lseek64", /* 365 */
+ "lstat64",
+ "mmap64",
+ "setrlimit64",
+ "stat64",
+ "truncate64", /* 370 */
+ "ulimit64",
+ NULL,
+ NULL,
+ NULL,
+ NULL, /* 375 */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "setcontext", /* 380 */
+ "sigaltstack",
+ "waitid",
+ "setpgrp",
+ "recvmsg2",
+ "sendmsg2", /* 385 */
+ "socket2",
+ "socketpair2",
+ "setregid",
+ "lwp_create",
+ "lwp_terminate", /* 390 */
+ "lwp_wait",
+ "lwp_suspend",
+ "lwp_resume",
+ "lwp_self",
+ "lwp_abort_syscall", /* 395 */
+ "lwp_info",
+ "lwp_kill",
+ "ksleep",
+ "kwakeup",
+ "ksleep_abort", /* 400 */
+ "lwp_proc_info",
+ "lwp_exit",
+ "lwp_continue",
+ "getacl",
+ "fgetacl", /* 405 */
+ "setacl",
+ "fsetacl",
+ "getaccess",
+ "lwp_mutex_init",
+ "lwp_mutex_lock_sys", /* 410 */
+ "lwp_mutex_unlock",
+ "lwp_cond_init",
+ "lwp_cond_signal",
+ "lwp_cond_broadcast",
+ "lwp_cond_wait_sys", /* 415 */
+ "lwp_getscheduler",
+ "lwp_setscheduler",
+ "lwp_getprivate",
+ "lwp_setprivate",
+ "lwp_detach", /* 420 */
+ "mlock",
+ "munlock",
+ "mlockall",
+ "munlockall",
+ "shm_open", /* 425 */
+ "shm_unlink",
+ "sigqueue",
+ "sigwaitinfo",
+ "sigtimedwait",
+ "sigwait", /* 430 */
+ "aio_read",
+ "aio_write",
+ "lio_listio",
+ "aio_error",
+ "aio_return", /* 435 */
+ "aio_cancel",
+ "aio_suspend",
+ "aio_fsync",
+ "mq_open",
+ "mq_unlink", /* 440 */
+ "mq_send",
+ "mq_receive",
+ "mq_notify",
+ "mq_setattr",
+ "mq_getattr", /* 445 */
+ "ksem_open",
+ "ksem_unlink",
+ "ksem_close",
+ "ksem_destroy",
+ "lw_sem_incr", /* 450 */
+ "lw_sem_decr",
+ "lw_sem_read",
+ "mq_close",
+};
+static const int syscall_names_max = 453;
+
+int
+hpux_unimplemented(unsigned long arg1,unsigned long arg2,unsigned long arg3,
+ unsigned long arg4,unsigned long arg5,unsigned long arg6,
+ unsigned long arg7,unsigned long sc_num)
+{
+ /* NOTE: sc_num trashes arg8 for the few syscalls that actually
+ * have a valid 8th argument.
+ */
+ const char *name = NULL;
+ if ( sc_num <= syscall_names_max && sc_num >= 0 ) {
+ name = syscall_names[sc_num];
+ }
+
+ if ( name ) {
+ printk(KERN_DEBUG "Unimplemented HP-UX syscall emulation. Syscall #%lu (%s)\n",
+ sc_num, name);
+ } else {
+ printk(KERN_DEBUG "Unimplemented unknown HP-UX syscall emulation. Syscall #%lu\n",
+ sc_num);
+ }
+
+ printk(KERN_DEBUG " Args: %lx %lx %lx %lx %lx %lx %lx\n",
+ arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+
+ return -ENOSYS;
+}
diff --git a/arch/parisc/hpux/wrappers.S b/arch/parisc/hpux/wrappers.S
new file mode 100644
index 00000000..58c53c87
--- /dev/null
+++ b/arch/parisc/hpux/wrappers.S
@@ -0,0 +1,250 @@
+/*
+ * Linux/PARISC Project (http://www.parisc-linux.org/)
+ *
+ * HP-UX System Call Wrapper routines and System Call Return Path
+ *
+ * Copyright (C) 2000 Hewlett-Packard (John Marvin)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifdef CONFIG_64BIT
+#warning PA64 support needs more work...did first cut
+#endif
+
+#include <asm/asm-offsets.h>
+#include <asm/assembly.h>
+#include <asm/signal.h>
+#include <linux/linkage.h>
+
+ .level LEVEL
+ .text
+
+ /* These should probably go in a header file somewhere.
+ * They are duplicated in kernel/wrappers.S
+ * Possibly we should consider consolidating these
+ * register save/restore macros.
+ */
+ .macro reg_save regs
+#ifdef CONFIG_64BIT
+#warning NEEDS WORK for 64-bit
+#endif
+ STREG %r3, PT_GR3(\regs)
+ STREG %r4, PT_GR4(\regs)
+ STREG %r5, PT_GR5(\regs)
+ STREG %r6, PT_GR6(\regs)
+ STREG %r7, PT_GR7(\regs)
+ STREG %r8, PT_GR8(\regs)
+ STREG %r9, PT_GR9(\regs)
+ STREG %r10,PT_GR10(\regs)
+ STREG %r11,PT_GR11(\regs)
+ STREG %r12,PT_GR12(\regs)
+ STREG %r13,PT_GR13(\regs)
+ STREG %r14,PT_GR14(\regs)
+ STREG %r15,PT_GR15(\regs)
+ STREG %r16,PT_GR16(\regs)
+ STREG %r17,PT_GR17(\regs)
+ STREG %r18,PT_GR18(\regs)
+ .endm
+
+ .macro reg_restore regs
+ LDREG PT_GR3(\regs), %r3
+ LDREG PT_GR4(\regs), %r4
+ LDREG PT_GR5(\regs), %r5
+ LDREG PT_GR6(\regs), %r6
+ LDREG PT_GR7(\regs), %r7
+ LDREG PT_GR8(\regs), %r8
+ LDREG PT_GR9(\regs), %r9
+ LDREG PT_GR10(\regs),%r10
+ LDREG PT_GR11(\regs),%r11
+ LDREG PT_GR12(\regs),%r12
+ LDREG PT_GR13(\regs),%r13
+ LDREG PT_GR14(\regs),%r14
+ LDREG PT_GR15(\regs),%r15
+ LDREG PT_GR16(\regs),%r16
+ LDREG PT_GR17(\regs),%r17
+ LDREG PT_GR18(\regs),%r18
+ .endm
+
+
+ .import sys_fork
+
+ENTRY(hpux_fork_wrapper)
+ ldo TASK_REGS-TASK_SZ_ALGN-64(%r30),%r1 ;! get pt regs
+ ;! pointer in task
+ reg_save %r1
+
+ STREG %r2,-20(%r30)
+ ldo 64(%r30),%r30
+ STREG %r2,PT_GR19(%r1) ;! save for child
+ STREG %r30,PT_GR21(%r1) ;! save for child
+
+ LDREG PT_GR30(%r1),%r25
+ mtctl %r25,%cr29
+ copy %r1,%r24
+ bl sys_clone,%r2
+ ldi SIGCHLD,%r26
+
+ LDREG -84(%r30),%r2
+fork_return:
+ ldo -64(%r30),%r30
+ ldo TASK_REGS-TASK_SZ_ALGN-64(%r30),%r1 ;! get pt regs
+
+ reg_restore %r1
+
+ /*
+ * HP-UX wants pid (child gets parent pid, parent gets child pid)
+ * in r28 and a flag in r29 (r29 == 1 for child, 0 for parent).
+ * Linux fork returns 0 for child, pid for parent. Since HP-UX
+ * libc stub throws away parent pid and returns 0 for child,
+ * we'll just return 0 for parent pid now. Only applications
+ * that jump directly to the gateway page (not supported) will
+ * know the difference. We can fix this later if necessary.
+ */
+
+ ldo -1024(%r0),%r1
+ comb,>>=,n %r28,%r1,fork_exit /* just let the syscall exit handle it */
+ or,= %r28,%r0,%r0
+ or,tr %r0,%r0,%r29 /* r28 <> 0, we are parent, set r29 to 0 */
+ ldo 1(%r0),%r29 /* r28 == 0, we are child, set r29 to 1 */
+
+fork_exit:
+ bv %r0(%r2)
+ nop
+ENDPROC(hpux_fork_wrapper)
+
+ /* Set the return value for the child */
+
+ENTRY(hpux_child_return)
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+ bl,n schedule_tail, %r2
+#endif
+
+ LDREG TASK_PT_GR19-TASK_SZ_ALGN-128(%r30),%r2
+ b fork_return
+ copy %r0,%r28
+ENDPROC(hpux_child_return)
+
+ .import hpux_execve
+
+ENTRY(hpux_execv_wrapper)
+ copy %r0,%r24 /* NULL environment */
+
+ENTRY(hpux_execve_wrapper)
+
+ ldo TASK_REGS-TASK_SZ_ALGN-64(%r30),%r1 ;! get pt regs
+
+ /*
+ * Do we need to save/restore r3-r18 here?
+ * I don't think so. why would new thread need old
+ * threads registers?
+ */
+
+ /* Store arg0, arg1 and arg2 so that hpux_execve will find them */
+
+ STREG %r26,PT_GR26(%r1)
+ STREG %r25,PT_GR25(%r1)
+ STREG %r24,PT_GR24(%r1)
+
+ STREG %r2,-20(%r30)
+ ldo 64(%r30),%r30
+ bl hpux_execve,%r2
+ copy %r1,%arg0
+
+ ldo -64(%r30),%r30
+ LDREG -20(%r30),%r2
+
+ /* If exec succeeded we need to load the args */
+
+ ldo -1024(%r0),%r1
+ comb,>>= %r28,%r1,exec_error
+ copy %r2,%r19
+ ldo -TASK_SZ_ALGN-64(%r30),%r1 ;! get task ptr
+ LDREG TASK_PT_GR26(%r1),%r26
+ LDREG TASK_PT_GR25(%r1),%r25
+ LDREG TASK_PT_GR24(%r1),%r24
+ LDREG TASK_PT_GR23(%r1),%r23
+ copy %r0,%r2 /* Flag to syscall_exit not to clear args */
+
+exec_error:
+ bv %r0(%r19)
+ nop
+ENDPROC(hpux_execv_wrapper)
+
+ .import hpux_pipe
+
+ /* HP-UX expects pipefd's returned in r28 & r29 */
+
+ENTRY(hpux_pipe_wrapper)
+ STREG %r2,-20(%r30)
+ ldo 64(%r30),%r30
+ bl hpux_pipe,%r2
+ ldo -56(%r30),%r26 /* pass local array to hpux_pipe */
+
+
+ ldo -1024(%r0),%r1
+ comb,>>= %r28,%r1,pipe_exit /* let syscall exit handle it */
+ LDREG -84(%r30),%r2
+
+ /* if success, load fd's from stack array */
+
+ LDREG -56(%r30),%r28
+ LDREG -52(%r30),%r29
+
+pipe_exit:
+ bv %r0(%r2)
+ ldo -64(%r30),%r30
+ENDPROC(hpux_pipe_wrapper)
+
+ .import syscall_exit
+
+ENTRY(hpux_syscall_exit)
+ /*
+ *
+ * HP-UX call return conventions:
+ *
+ * if error:
+ * r22 = 1
+ * r28 = errno value
+ * r29 = secondary return value
+ * else
+ * r22 = 0
+ * r28 = return value
+ * r29 = secondary return value
+ *
+ * For now, we'll just check to see if r28 is < (unsigned long)-1024
+ * (to handle addresses > 2 Gb) and if so set r22 to zero. If not,
+ * we'll complement r28 and set r22 to 1. Wrappers will be
+ * needed for syscalls that care about the secondary return value.
+ * The wrapper may also need a way of avoiding the following code,
+ * but we'll deal with that when it becomes necessary.
+ */
+
+ ldo -1024(%r0),%r1
+ comb,<< %r28,%r1,no_error
+ copy %r0,%r22
+ subi 0,%r28,%r28
+ ldo 1(%r0),%r22
+
+no_error:
+ b,n syscall_exit
+ENDPROC(hpux_syscall_exit)
+
+ .import hpux_unimplemented
+
+ENTRY(hpux_unimplemented_wrapper)
+ b hpux_unimplemented
+ STREG %r22,-64(%r30) /* overwrite arg8 with syscall number */
+ENDPROC(hpux_unimplemented_wrapper)
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
new file mode 100644
index 00000000..19a434f5
--- /dev/null
+++ b/arch/parisc/include/asm/Kbuild
@@ -0,0 +1,3 @@
+include include/asm-generic/Kbuild.asm
+
+header-y += pdc.h
diff --git a/arch/parisc/include/asm/agp.h b/arch/parisc/include/asm/agp.h
new file mode 100644
index 00000000..d226ffa8
--- /dev/null
+++ b/arch/parisc/include/asm/agp.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_PARISC_AGP_H
+#define _ASM_PARISC_AGP_H
+
+/*
+ * PARISC specific AGP definitions.
+ * Copyright (c) 2006 Kyle McMartin <kyle@parisc-linux.org>
+ *
+ */
+
+#define map_page_into_agp(page) /* nothing */
+#define unmap_page_from_agp(page) /* nothing */
+#define flush_agp_cache() mb()
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
+#endif /* _ASM_PARISC_AGP_H */
diff --git a/arch/parisc/include/asm/asm-offsets.h b/arch/parisc/include/asm/asm-offsets.h
new file mode 100644
index 00000000..d370ee36
--- /dev/null
+++ b/arch/parisc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/parisc/include/asm/asmregs.h b/arch/parisc/include/asm/asmregs.h
new file mode 100644
index 00000000..d93c646e
--- /dev/null
+++ b/arch/parisc/include/asm/asmregs.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _PARISC_ASMREGS_H
+#define _PARISC_ASMREGS_H
+
+;! General Registers
+
+rp: .reg %r2
+arg3: .reg %r23
+arg2: .reg %r24
+arg1: .reg %r25
+arg0: .reg %r26
+dp: .reg %r27
+ret0: .reg %r28
+ret1: .reg %r29
+sl: .reg %r29
+sp: .reg %r30
+
+#if 0
+/* PA20_REVISIT */
+arg7: .reg r19
+arg6: .reg r20
+arg5: .reg r21
+arg4: .reg r22
+gp: .reg r27
+ap: .reg r29
+#endif
+
+
+r0: .reg %r0
+r1: .reg %r1
+r2: .reg %r2
+r3: .reg %r3
+r4: .reg %r4
+r5: .reg %r5
+r6: .reg %r6
+r7: .reg %r7
+r8: .reg %r8
+r9: .reg %r9
+r10: .reg %r10
+r11: .reg %r11
+r12: .reg %r12
+r13: .reg %r13
+r14: .reg %r14
+r15: .reg %r15
+r16: .reg %r16
+r17: .reg %r17
+r18: .reg %r18
+r19: .reg %r19
+r20: .reg %r20
+r21: .reg %r21
+r22: .reg %r22
+r23: .reg %r23
+r24: .reg %r24
+r25: .reg %r25
+r26: .reg %r26
+r27: .reg %r27
+r28: .reg %r28
+r29: .reg %r29
+r30: .reg %r30
+r31: .reg %r31
+
+
+;! Space Registers
+
+sr0: .reg %sr0
+sr1: .reg %sr1
+sr2: .reg %sr2
+sr3: .reg %sr3
+sr4: .reg %sr4
+sr5: .reg %sr5
+sr6: .reg %sr6
+sr7: .reg %sr7
+
+
+;! Floating Point Registers
+
+fr0: .reg %fr0
+fr1: .reg %fr1
+fr2: .reg %fr2
+fr3: .reg %fr3
+fr4: .reg %fr4
+fr5: .reg %fr5
+fr6: .reg %fr6
+fr7: .reg %fr7
+fr8: .reg %fr8
+fr9: .reg %fr9
+fr10: .reg %fr10
+fr11: .reg %fr11
+fr12: .reg %fr12
+fr13: .reg %fr13
+fr14: .reg %fr14
+fr15: .reg %fr15
+fr16: .reg %fr16
+fr17: .reg %fr17
+fr18: .reg %fr18
+fr19: .reg %fr19
+fr20: .reg %fr20
+fr21: .reg %fr21
+fr22: .reg %fr22
+fr23: .reg %fr23
+fr24: .reg %fr24
+fr25: .reg %fr25
+fr26: .reg %fr26
+fr27: .reg %fr27
+fr28: .reg %fr28
+fr29: .reg %fr29
+fr30: .reg %fr30
+fr31: .reg %fr31
+
+
+;! Control Registers
+
+rctr: .reg %cr0
+pidr1: .reg %cr8
+pidr2: .reg %cr9
+ccr: .reg %cr10
+sar: .reg %cr11
+pidr3: .reg %cr12
+pidr4: .reg %cr13
+iva: .reg %cr14
+eiem: .reg %cr15
+itmr: .reg %cr16
+pcsq: .reg %cr17
+pcoq: .reg %cr18
+iir: .reg %cr19
+isr: .reg %cr20
+ior: .reg %cr21
+ipsw: .reg %cr22
+eirr: .reg %cr23
+tr0: .reg %cr24
+tr1: .reg %cr25
+tr2: .reg %cr26
+tr3: .reg %cr27
+tr4: .reg %cr28
+tr5: .reg %cr29
+tr6: .reg %cr30
+tr7: .reg %cr31
+
+
+cr0: .reg %cr0
+cr8: .reg %cr8
+cr9: .reg %cr9
+cr10: .reg %cr10
+cr11: .reg %cr11
+cr12: .reg %cr12
+cr13: .reg %cr13
+cr14: .reg %cr14
+cr15: .reg %cr15
+cr16: .reg %cr16
+cr17: .reg %cr17
+cr18: .reg %cr18
+cr19: .reg %cr19
+cr20: .reg %cr20
+cr21: .reg %cr21
+cr22: .reg %cr22
+cr23: .reg %cr23
+cr24: .reg %cr24
+cr25: .reg %cr25
+cr26: .reg %cr26
+cr27: .reg %cr27
+cr28: .reg %cr28
+cr29: .reg %cr29
+cr30: .reg %cr30
+cr31: .reg %cr31
+
+#endif
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
new file mode 100644
index 00000000..89fb4000
--- /dev/null
+++ b/arch/parisc/include/asm/assembly.h
@@ -0,0 +1,520 @@
+/*
+ * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
+ * Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 1999 SuSE GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _PARISC_ASSEMBLY_H
+#define _PARISC_ASSEMBLY_H
+
+#define CALLEE_FLOAT_FRAME_SIZE 80
+
+#ifdef CONFIG_64BIT
+#define LDREG ldd
+#define STREG std
+#define LDREGX ldd,s
+#define LDREGM ldd,mb
+#define STREGM std,ma
+#define SHRREG shrd
+#define SHLREG shld
+#define ANDCM andcm,*
+#define COND(x) * ## x
+#define RP_OFFSET 16
+#define FRAME_SIZE 128
+#define CALLEE_REG_FRAME_SIZE 144
+#define ASM_ULONG_INSN .dword
+#else /* CONFIG_64BIT */
+#define LDREG ldw
+#define STREG stw
+#define LDREGX ldwx,s
+#define LDREGM ldwm
+#define STREGM stwm
+#define SHRREG shr
+#define SHLREG shlw
+#define ANDCM andcm
+#define COND(x) x
+#define RP_OFFSET 20
+#define FRAME_SIZE 64
+#define CALLEE_REG_FRAME_SIZE 128
+#define ASM_ULONG_INSN .word
+#endif
+
+#define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE)
+
+#ifdef CONFIG_PA20
+#define LDCW ldcw,co
+#define BL b,l
+# ifdef CONFIG_64BIT
+# define LEVEL 2.0w
+# else
+# define LEVEL 2.0
+# endif
+#else
+#define LDCW ldcw
+#define BL bl
+#define LEVEL 1.1
+#endif
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_64BIT
+/* the 64-bit pa gnu assembler unfortunately defaults to .level 1.1 or 2.0 so
+ * work around that for now... */
+ .level 2.0w
+#endif
+
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/types.h>
+
+#include <asm/asmregs.h>
+
+ sp = 30
+ gp = 27
+ ipsw = 22
+
+ /*
+ * We provide two versions of each macro to convert from physical
+ * to virtual and vice versa. The "_r1" versions take one argument
+ * register, but trashes r1 to do the conversion. The other
+ * version takes two arguments: a src and destination register.
+ * However, the source and destination registers can not be
+ * the same register.
+ */
+
+ .macro tophys grvirt, grphys
+ ldil L%(__PAGE_OFFSET), \grphys
+ sub \grvirt, \grphys, \grphys
+ .endm
+
+ .macro tovirt grphys, grvirt
+ ldil L%(__PAGE_OFFSET), \grvirt
+ add \grphys, \grvirt, \grvirt
+ .endm
+
+ .macro tophys_r1 gr
+ ldil L%(__PAGE_OFFSET), %r1
+ sub \gr, %r1, \gr
+ .endm
+
+ .macro tovirt_r1 gr
+ ldil L%(__PAGE_OFFSET), %r1
+ add \gr, %r1, \gr
+ .endm
+
+ .macro delay value
+ ldil L%\value, 1
+ ldo R%\value(1), 1
+ addib,UV,n -1,1,.
+ addib,NUV,n -1,1,.+8
+ nop
+ .endm
+
+ .macro debug value
+ .endm
+
+
+ /* Shift Left - note the r and t can NOT be the same! */
+ .macro shl r, sa, t
+ dep,z \r, 31-(\sa), 32-(\sa), \t
+ .endm
+
+ /* The PA 2.0 shift left */
+ .macro shlw r, sa, t
+ depw,z \r, 31-(\sa), 32-(\sa), \t
+ .endm
+
+ /* And the PA 2.0W shift left */
+ .macro shld r, sa, t
+ depd,z \r, 63-(\sa), 64-(\sa), \t
+ .endm
+
+ /* Shift Right - note the r and t can NOT be the same! */
+ .macro shr r, sa, t
+ extru \r, 31-(\sa), 32-(\sa), \t
+ .endm
+
+ /* pa20w version of shift right */
+ .macro shrd r, sa, t
+ extrd,u \r, 63-(\sa), 64-(\sa), \t
+ .endm
+
+ /* load 32-bit 'value' into 'reg' compensating for the ldil
+ * sign-extension when running in wide mode.
+ * WARNING!! neither 'value' nor 'reg' can be expressions
+ * containing '.'!!!! */
+ .macro load32 value, reg
+ ldil L%\value, \reg
+ ldo R%\value(\reg), \reg
+ .endm
+
+ .macro loadgp
+#ifdef CONFIG_64BIT
+ ldil L%__gp, %r27
+ ldo R%__gp(%r27), %r27
+#else
+ ldil L%$global$, %r27
+ ldo R%$global$(%r27), %r27
+#endif
+ .endm
+
+#define SAVE_SP(r, where) mfsp r, %r1 ! STREG %r1, where
+#define REST_SP(r, where) LDREG where, %r1 ! mtsp %r1, r
+#define SAVE_CR(r, where) mfctl r, %r1 ! STREG %r1, where
+#define REST_CR(r, where) LDREG where, %r1 ! mtctl %r1, r
+
+ .macro save_general regs
+ STREG %r1, PT_GR1 (\regs)
+ STREG %r2, PT_GR2 (\regs)
+ STREG %r3, PT_GR3 (\regs)
+ STREG %r4, PT_GR4 (\regs)
+ STREG %r5, PT_GR5 (\regs)
+ STREG %r6, PT_GR6 (\regs)
+ STREG %r7, PT_GR7 (\regs)
+ STREG %r8, PT_GR8 (\regs)
+ STREG %r9, PT_GR9 (\regs)
+ STREG %r10, PT_GR10(\regs)
+ STREG %r11, PT_GR11(\regs)
+ STREG %r12, PT_GR12(\regs)
+ STREG %r13, PT_GR13(\regs)
+ STREG %r14, PT_GR14(\regs)
+ STREG %r15, PT_GR15(\regs)
+ STREG %r16, PT_GR16(\regs)
+ STREG %r17, PT_GR17(\regs)
+ STREG %r18, PT_GR18(\regs)
+ STREG %r19, PT_GR19(\regs)
+ STREG %r20, PT_GR20(\regs)
+ STREG %r21, PT_GR21(\regs)
+ STREG %r22, PT_GR22(\regs)
+ STREG %r23, PT_GR23(\regs)
+ STREG %r24, PT_GR24(\regs)
+ STREG %r25, PT_GR25(\regs)
+ /* r26 is saved in get_stack and used to preserve a value across virt_map */
+ STREG %r27, PT_GR27(\regs)
+ STREG %r28, PT_GR28(\regs)
+ /* r29 is saved in get_stack and used to point to saved registers */
+ /* r30 stack pointer saved in get_stack */
+ STREG %r31, PT_GR31(\regs)
+ .endm
+
+ .macro rest_general regs
+ /* r1 used as a temp in rest_stack and is restored there */
+ LDREG PT_GR2 (\regs), %r2
+ LDREG PT_GR3 (\regs), %r3
+ LDREG PT_GR4 (\regs), %r4
+ LDREG PT_GR5 (\regs), %r5
+ LDREG PT_GR6 (\regs), %r6
+ LDREG PT_GR7 (\regs), %r7
+ LDREG PT_GR8 (\regs), %r8
+ LDREG PT_GR9 (\regs), %r9
+ LDREG PT_GR10(\regs), %r10
+ LDREG PT_GR11(\regs), %r11
+ LDREG PT_GR12(\regs), %r12
+ LDREG PT_GR13(\regs), %r13
+ LDREG PT_GR14(\regs), %r14
+ LDREG PT_GR15(\regs), %r15
+ LDREG PT_GR16(\regs), %r16
+ LDREG PT_GR17(\regs), %r17
+ LDREG PT_GR18(\regs), %r18
+ LDREG PT_GR19(\regs), %r19
+ LDREG PT_GR20(\regs), %r20
+ LDREG PT_GR21(\regs), %r21
+ LDREG PT_GR22(\regs), %r22
+ LDREG PT_GR23(\regs), %r23
+ LDREG PT_GR24(\regs), %r24
+ LDREG PT_GR25(\regs), %r25
+ LDREG PT_GR26(\regs), %r26
+ LDREG PT_GR27(\regs), %r27
+ LDREG PT_GR28(\regs), %r28
+ /* r29 points to register save area, and is restored in rest_stack */
+ /* r30 stack pointer restored in rest_stack */
+ LDREG PT_GR31(\regs), %r31
+ .endm
+
+ .macro save_fp regs
+ fstd,ma %fr0, 8(\regs)
+ fstd,ma %fr1, 8(\regs)
+ fstd,ma %fr2, 8(\regs)
+ fstd,ma %fr3, 8(\regs)
+ fstd,ma %fr4, 8(\regs)
+ fstd,ma %fr5, 8(\regs)
+ fstd,ma %fr6, 8(\regs)
+ fstd,ma %fr7, 8(\regs)
+ fstd,ma %fr8, 8(\regs)
+ fstd,ma %fr9, 8(\regs)
+ fstd,ma %fr10, 8(\regs)
+ fstd,ma %fr11, 8(\regs)
+ fstd,ma %fr12, 8(\regs)
+ fstd,ma %fr13, 8(\regs)
+ fstd,ma %fr14, 8(\regs)
+ fstd,ma %fr15, 8(\regs)
+ fstd,ma %fr16, 8(\regs)
+ fstd,ma %fr17, 8(\regs)
+ fstd,ma %fr18, 8(\regs)
+ fstd,ma %fr19, 8(\regs)
+ fstd,ma %fr20, 8(\regs)
+ fstd,ma %fr21, 8(\regs)
+ fstd,ma %fr22, 8(\regs)
+ fstd,ma %fr23, 8(\regs)
+ fstd,ma %fr24, 8(\regs)
+ fstd,ma %fr25, 8(\regs)
+ fstd,ma %fr26, 8(\regs)
+ fstd,ma %fr27, 8(\regs)
+ fstd,ma %fr28, 8(\regs)
+ fstd,ma %fr29, 8(\regs)
+ fstd,ma %fr30, 8(\regs)
+ fstd %fr31, 0(\regs)
+ .endm
+
+ .macro rest_fp regs
+ fldd 0(\regs), %fr31
+ fldd,mb -8(\regs), %fr30
+ fldd,mb -8(\regs), %fr29
+ fldd,mb -8(\regs), %fr28
+ fldd,mb -8(\regs), %fr27
+ fldd,mb -8(\regs), %fr26
+ fldd,mb -8(\regs), %fr25
+ fldd,mb -8(\regs), %fr24
+ fldd,mb -8(\regs), %fr23
+ fldd,mb -8(\regs), %fr22
+ fldd,mb -8(\regs), %fr21
+ fldd,mb -8(\regs), %fr20
+ fldd,mb -8(\regs), %fr19
+ fldd,mb -8(\regs), %fr18
+ fldd,mb -8(\regs), %fr17
+ fldd,mb -8(\regs), %fr16
+ fldd,mb -8(\regs), %fr15
+ fldd,mb -8(\regs), %fr14
+ fldd,mb -8(\regs), %fr13
+ fldd,mb -8(\regs), %fr12
+ fldd,mb -8(\regs), %fr11
+ fldd,mb -8(\regs), %fr10
+ fldd,mb -8(\regs), %fr9
+ fldd,mb -8(\regs), %fr8
+ fldd,mb -8(\regs), %fr7
+ fldd,mb -8(\regs), %fr6
+ fldd,mb -8(\regs), %fr5
+ fldd,mb -8(\regs), %fr4
+ fldd,mb -8(\regs), %fr3
+ fldd,mb -8(\regs), %fr2
+ fldd,mb -8(\regs), %fr1
+ fldd,mb -8(\regs), %fr0
+ .endm
+
+ .macro callee_save_float
+ fstd,ma %fr12, 8(%r30)
+ fstd,ma %fr13, 8(%r30)
+ fstd,ma %fr14, 8(%r30)
+ fstd,ma %fr15, 8(%r30)
+ fstd,ma %fr16, 8(%r30)
+ fstd,ma %fr17, 8(%r30)
+ fstd,ma %fr18, 8(%r30)
+ fstd,ma %fr19, 8(%r30)
+ fstd,ma %fr20, 8(%r30)
+ fstd,ma %fr21, 8(%r30)
+ .endm
+
+ .macro callee_rest_float
+ fldd,mb -8(%r30), %fr21
+ fldd,mb -8(%r30), %fr20
+ fldd,mb -8(%r30), %fr19
+ fldd,mb -8(%r30), %fr18
+ fldd,mb -8(%r30), %fr17
+ fldd,mb -8(%r30), %fr16
+ fldd,mb -8(%r30), %fr15
+ fldd,mb -8(%r30), %fr14
+ fldd,mb -8(%r30), %fr13
+ fldd,mb -8(%r30), %fr12
+ .endm
+
+#ifdef CONFIG_64BIT
+ .macro callee_save
+ std,ma %r3, CALLEE_REG_FRAME_SIZE(%r30)
+ mfctl %cr27, %r3
+ std %r4, -136(%r30)
+ std %r5, -128(%r30)
+ std %r6, -120(%r30)
+ std %r7, -112(%r30)
+ std %r8, -104(%r30)
+ std %r9, -96(%r30)
+ std %r10, -88(%r30)
+ std %r11, -80(%r30)
+ std %r12, -72(%r30)
+ std %r13, -64(%r30)
+ std %r14, -56(%r30)
+ std %r15, -48(%r30)
+ std %r16, -40(%r30)
+ std %r17, -32(%r30)
+ std %r18, -24(%r30)
+ std %r3, -16(%r30)
+ .endm
+
+ .macro callee_rest
+ ldd -16(%r30), %r3
+ ldd -24(%r30), %r18
+ ldd -32(%r30), %r17
+ ldd -40(%r30), %r16
+ ldd -48(%r30), %r15
+ ldd -56(%r30), %r14
+ ldd -64(%r30), %r13
+ ldd -72(%r30), %r12
+ ldd -80(%r30), %r11
+ ldd -88(%r30), %r10
+ ldd -96(%r30), %r9
+ ldd -104(%r30), %r8
+ ldd -112(%r30), %r7
+ ldd -120(%r30), %r6
+ ldd -128(%r30), %r5
+ ldd -136(%r30), %r4
+ mtctl %r3, %cr27
+ ldd,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3
+ .endm
+
+#else /* ! CONFIG_64BIT */
+
+ .macro callee_save
+ stw,ma %r3, CALLEE_REG_FRAME_SIZE(%r30)
+ mfctl %cr27, %r3
+ stw %r4, -124(%r30)
+ stw %r5, -120(%r30)
+ stw %r6, -116(%r30)
+ stw %r7, -112(%r30)
+ stw %r8, -108(%r30)
+ stw %r9, -104(%r30)
+ stw %r10, -100(%r30)
+ stw %r11, -96(%r30)
+ stw %r12, -92(%r30)
+ stw %r13, -88(%r30)
+ stw %r14, -84(%r30)
+ stw %r15, -80(%r30)
+ stw %r16, -76(%r30)
+ stw %r17, -72(%r30)
+ stw %r18, -68(%r30)
+ stw %r3, -64(%r30)
+ .endm
+
+ .macro callee_rest
+ ldw -64(%r30), %r3
+ ldw -68(%r30), %r18
+ ldw -72(%r30), %r17
+ ldw -76(%r30), %r16
+ ldw -80(%r30), %r15
+ ldw -84(%r30), %r14
+ ldw -88(%r30), %r13
+ ldw -92(%r30), %r12
+ ldw -96(%r30), %r11
+ ldw -100(%r30), %r10
+ ldw -104(%r30), %r9
+ ldw -108(%r30), %r8
+ ldw -112(%r30), %r7
+ ldw -116(%r30), %r6
+ ldw -120(%r30), %r5
+ ldw -124(%r30), %r4
+ mtctl %r3, %cr27
+ ldw,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3
+ .endm
+#endif /* ! CONFIG_64BIT */
+
+ .macro save_specials regs
+
+ SAVE_SP (%sr0, PT_SR0 (\regs))
+ SAVE_SP (%sr1, PT_SR1 (\regs))
+ SAVE_SP (%sr2, PT_SR2 (\regs))
+ SAVE_SP (%sr3, PT_SR3 (\regs))
+ SAVE_SP (%sr4, PT_SR4 (\regs))
+ SAVE_SP (%sr5, PT_SR5 (\regs))
+ SAVE_SP (%sr6, PT_SR6 (\regs))
+ SAVE_SP (%sr7, PT_SR7 (\regs))
+
+ SAVE_CR (%cr17, PT_IASQ0(\regs))
+ mtctl %r0, %cr17
+ SAVE_CR (%cr17, PT_IASQ1(\regs))
+
+ SAVE_CR (%cr18, PT_IAOQ0(\regs))
+ mtctl %r0, %cr18
+ SAVE_CR (%cr18, PT_IAOQ1(\regs))
+
+#ifdef CONFIG_64BIT
+ /* cr11 (sar) is a funny one. 5 bits on PA1.1 and 6 bit on PA2.0
+ * For PA2.0 mtsar or mtctl always write 6 bits, but mfctl only
+ * reads 5 bits. Use mfctl,w to read all six bits. Otherwise
+ * we lose the 6th bit on a save/restore over interrupt.
+ */
+ mfctl,w %cr11, %r1
+ STREG %r1, PT_SAR (\regs)
+#else
+ SAVE_CR (%cr11, PT_SAR (\regs))
+#endif
+ SAVE_CR (%cr19, PT_IIR (\regs))
+
+ /*
+ * Code immediately following this macro (in intr_save) relies
+ * on r8 containing ipsw.
+ */
+ mfctl %cr22, %r8
+ STREG %r8, PT_PSW(\regs)
+ .endm
+
+ .macro rest_specials regs
+
+ REST_SP (%sr0, PT_SR0 (\regs))
+ REST_SP (%sr1, PT_SR1 (\regs))
+ REST_SP (%sr2, PT_SR2 (\regs))
+ REST_SP (%sr3, PT_SR3 (\regs))
+ REST_SP (%sr4, PT_SR4 (\regs))
+ REST_SP (%sr5, PT_SR5 (\regs))
+ REST_SP (%sr6, PT_SR6 (\regs))
+ REST_SP (%sr7, PT_SR7 (\regs))
+
+ REST_CR (%cr17, PT_IASQ0(\regs))
+ REST_CR (%cr17, PT_IASQ1(\regs))
+
+ REST_CR (%cr18, PT_IAOQ0(\regs))
+ REST_CR (%cr18, PT_IAOQ1(\regs))
+
+ REST_CR (%cr11, PT_SAR (\regs))
+
+ REST_CR (%cr22, PT_PSW (\regs))
+ .endm
+
+
+ /* First step to create a "relied upon translation"
+ * See PA 2.0 Arch. page F-4 and F-5.
+ *
+ * The ssm was originally necessary due to a "PCxT bug".
+ * But someone decided it needed to be added to the architecture
+ * and this "feature" went into rev3 of PA-RISC 1.1 Arch Manual.
+ * It's been carried forward into PA 2.0 Arch as well. :^(
+ *
+ * "ssm 0,%r0" is a NOP with side effects (prefetch barrier).
+ * rsm/ssm prevents the ifetch unit from speculatively fetching
+ * instructions past this line in the code stream.
+ * PA 2.0 processor will single step all insn in the same QUAD (4 insn).
+ */
+ .macro pcxt_ssm_bug
+ rsm PSW_SM_I,%r0
+ nop /* 1 */
+ nop /* 2 */
+ nop /* 3 */
+ nop /* 4 */
+ nop /* 5 */
+ nop /* 6 */
+ nop /* 7 */
+ .endm
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
new file mode 100644
index 00000000..26fd1146
--- /dev/null
+++ b/arch/parisc/include/asm/atomic.h
@@ -0,0 +1,347 @@
+/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
+ */
+
+#ifndef _ASM_PARISC_ATOMIC_H_
+#define _ASM_PARISC_ATOMIC_H_
+
+#include <linux/types.h>
+#include <asm/system.h>
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ *
+ * And probably incredibly slow on parisc. OTOH, we don't
+ * have to write any serious assembly. prumpf
+ */
+
+#ifdef CONFIG_SMP
+#include <asm/spinlock.h>
+#include <asm/cache.h> /* we use L1_CACHE_BYTES */
+
+/* Use an array of spinlocks for our atomic_ts.
+ * Hash function to index into a different SPINLOCK.
+ * Since "a" is usually an address, use one spinlock per cacheline.
+ */
+# define ATOMIC_HASH_SIZE 4
+# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
+
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+
+/* Can't use raw_spin_lock_irq because of #include problems, so
+ * this is the substitute */
+#define _atomic_spin_lock_irqsave(l,f) do { \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
+ local_irq_save(f); \
+ arch_spin_lock(s); \
+} while(0)
+
+#define _atomic_spin_unlock_irqrestore(l,f) do { \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spin_unlock(s); \
+ local_irq_restore(f); \
+} while(0)
+
+
+#else
+# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
+# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
+#endif
+
+/* This should get optimized out since it's never called.
+** Or get a link error if xchg is used "wrong".
+*/
+extern void __xchg_called_with_bad_pointer(void);
+
+
+/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
+extern unsigned long __xchg8(char, char *);
+extern unsigned long __xchg32(int, int *);
+#ifdef CONFIG_64BIT
+extern unsigned long __xchg64(unsigned long, unsigned long *);
+#endif
+
+/* optimizer better get rid of switch since size is a constant */
+static __inline__ unsigned long
+__xchg(unsigned long x, __volatile__ void * ptr, int size)
+{
+ switch(size) {
+#ifdef CONFIG_64BIT
+ case 8: return __xchg64(x,(unsigned long *) ptr);
+#endif
+ case 4: return __xchg32((int) x, (int *) ptr);
+ case 1: return __xchg8((char) x, (char *) ptr);
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+
+/*
+** REVISIT - Abandoned use of LDCW in xchg() for now:
+** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
+** o and while we are at it, could CONFIG_64BIT code use LDCD too?
+**
+** if (__builtin_constant_p(x) && (x == NULL))
+** if (((unsigned long)p & 0xf) == 0)
+** return __ldcw(p);
+*/
+#define xchg(ptr,x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+/* bug catcher for when unsupported size is used - won't link */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
+extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
+extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
+
+/* don't worry...optimizer will get rid of most of this */
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
+{
+ switch(size) {
+#ifdef CONFIG_64BIT
+ case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+#endif
+ case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr,o,n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new_, int size)
+{
+ switch (size) {
+#ifdef CONFIG_64BIT
+ case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+#endif
+ case 4: return __cmpxchg_u32(ptr, old, new_);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new_, size);
+ }
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#ifdef CONFIG_64BIT
+#define cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+ })
+#else
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
+/*
+ * Note that we need not lock read accesses - aligned word writes/reads
+ * are atomic, so a reader never sees inconsistent values.
+ */
+
+/* It's possible to reduce all atomic operations to either
+ * __atomic_add_return, atomic_set and atomic_read (the latter
+ * is there only for consistency).
+ */
+
+static __inline__ int __atomic_add_return(int i, atomic_t *v)
+{
+ int ret;
+ unsigned long flags;
+ _atomic_spin_lock_irqsave(v, flags);
+
+ ret = (v->counter += i);
+
+ _atomic_spin_unlock_irqrestore(v, flags);
+ return ret;
+}
+
+static __inline__ void atomic_set(atomic_t *v, int i)
+{
+ unsigned long flags;
+ _atomic_spin_lock_irqsave(v, flags);
+
+ v->counter = i;
+
+ _atomic_spin_unlock_irqrestore(v, flags);
+}
+
+static __inline__ int atomic_read(const atomic_t *v)
+{
+ return (*(volatile int *)&(v)->counter);
+}
+
+/* exported interface */
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
+#define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v))))
+#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
+#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
+
+#define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
+#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
+#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
+#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
+
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
+
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
+
+#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
+
+#ifdef CONFIG_64BIT
+
+#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
+
+static __inline__ s64
+__atomic64_add_return(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ unsigned long flags;
+ _atomic_spin_lock_irqsave(v, flags);
+
+ ret = (v->counter += i);
+
+ _atomic_spin_unlock_irqrestore(v, flags);
+ return ret;
+}
+
+static __inline__ void
+atomic64_set(atomic64_t *v, s64 i)
+{
+ unsigned long flags;
+ _atomic_spin_lock_irqsave(v, flags);
+
+ v->counter = i;
+
+ _atomic_spin_unlock_irqrestore(v, flags);
+}
+
+static __inline__ s64
+atomic64_read(const atomic64_t *v)
+{
+ return (*(volatile long *)&(v)->counter);
+}
+
+#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
+#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))
+#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
+#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
+
+#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))
+#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))
+#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
+#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
+
+#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+
+#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
+#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
+
+/* exported interface */
+#define atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long c, old;
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic64_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
+#else /* CONFIG_64BIT */
+
+#include <asm-generic/atomic64.h>
+
+#endif /* !CONFIG_64BIT */
+
+#include <asm-generic/atomic-long.h>
+
+#endif /* _ASM_PARISC_ATOMIC_H_ */
diff --git a/arch/parisc/include/asm/auxvec.h b/arch/parisc/include/asm/auxvec.h
new file mode 100644
index 00000000..9c3ac4b8
--- /dev/null
+++ b/arch/parisc/include/asm/auxvec.h
@@ -0,0 +1,4 @@
+#ifndef __ASMPARISC_AUXVEC_H
+#define __ASMPARISC_AUXVEC_H
+
+#endif
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
new file mode 100644
index 00000000..43c516fa
--- /dev/null
+++ b/arch/parisc/include/asm/bitops.h
@@ -0,0 +1,237 @@
+#ifndef _PARISC_BITOPS_H
+#define _PARISC_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+
+/*
+ * HP-PARISC specific bit operations
+ * for a detailed description of the functions please refer
+ * to include/asm-i386/bitops.h or kerneldoc
+ */
+
+#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
+
+
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
+
+/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
+ * on use of volatile and __*_bit() (set/clear/change):
+ * *_bit() want use of volatile.
+ * __*_bit() are "relaxed" and don't use spinlock or volatile.
+ */
+
+static __inline__ void set_bit(int nr, volatile unsigned long * addr)
+{
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long flags;
+
+ addr += (nr >> SHIFT_PER_LONG);
+ _atomic_spin_lock_irqsave(addr, flags);
+ *addr |= mask;
+ _atomic_spin_unlock_irqrestore(addr, flags);
+}
+
+static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
+{
+ unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
+ unsigned long flags;
+
+ addr += (nr >> SHIFT_PER_LONG);
+ _atomic_spin_lock_irqsave(addr, flags);
+ *addr &= mask;
+ _atomic_spin_unlock_irqrestore(addr, flags);
+}
+
+static __inline__ void change_bit(int nr, volatile unsigned long * addr)
+{
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long flags;
+
+ addr += (nr >> SHIFT_PER_LONG);
+ _atomic_spin_lock_irqsave(addr, flags);
+ *addr ^= mask;
+ _atomic_spin_unlock_irqrestore(addr, flags);
+}
+
+static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
+{
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long old;
+ unsigned long flags;
+ int set;
+
+ addr += (nr >> SHIFT_PER_LONG);
+ _atomic_spin_lock_irqsave(addr, flags);
+ old = *addr;
+ set = (old & mask) ? 1 : 0;
+ if (!set)
+ *addr = old | mask;
+ _atomic_spin_unlock_irqrestore(addr, flags);
+
+ return set;
+}
+
+static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
+{
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long old;
+ unsigned long flags;
+ int set;
+
+ addr += (nr >> SHIFT_PER_LONG);
+ _atomic_spin_lock_irqsave(addr, flags);
+ old = *addr;
+ set = (old & mask) ? 1 : 0;
+ if (set)
+ *addr = old & ~mask;
+ _atomic_spin_unlock_irqrestore(addr, flags);
+
+ return set;
+}
+
+static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
+{
+ unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+ unsigned long oldbit;
+ unsigned long flags;
+
+ addr += (nr >> SHIFT_PER_LONG);
+ _atomic_spin_lock_irqsave(addr, flags);
+ oldbit = *addr;
+ *addr = oldbit ^ mask;
+ _atomic_spin_unlock_irqrestore(addr, flags);
+
+ return (oldbit & mask) ? 1 : 0;
+}
+
+#include <asm-generic/bitops/non-atomic.h>
+
+#ifdef __KERNEL__
+
+/**
+ * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1".
+ * @word: The word to search
+ *
+ * __ffs() return is undefined if no bit is set.
+ *
+ * 32-bit fast __ffs by LaMont Jones "lamont At hp com".
+ * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org".
+ * (with help from willy/jejb to get the semantics right)
+ *
+ * This algorithm avoids branches by making use of nullification.
+ * One side effect of "extr" instructions is it sets PSW[N] bit.
+ * How PSW[N] (nullify next insn) gets set is determined by the
+ * "condition" field (eg "<>" or "TR" below) in the extr* insn.
+ * Only the 1st and one of either the 2cd or 3rd insn will get executed.
+ * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
+ * cycles for each mispredicted branch.
+ */
+
+static __inline__ unsigned long __ffs(unsigned long x)
+{
+ unsigned long ret;
+
+ __asm__(
+#ifdef CONFIG_64BIT
+ " ldi 63,%1\n"
+ " extrd,u,*<> %0,63,32,%%r0\n"
+ " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */
+ " addi -32,%1,%1\n"
+#else
+ " ldi 31,%1\n"
+#endif
+ " extru,<> %0,31,16,%%r0\n"
+ " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */
+ " addi -16,%1,%1\n"
+ " extru,<> %0,31,8,%%r0\n"
+ " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */
+ " addi -8,%1,%1\n"
+ " extru,<> %0,31,4,%%r0\n"
+ " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */
+ " addi -4,%1,%1\n"
+ " extru,<> %0,31,2,%%r0\n"
+ " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */
+ " addi -2,%1,%1\n"
+ " extru,= %0,31,1,%%r0\n" /* check last bit */
+ " addi -1,%1,%1\n"
+ : "+r" (x), "=r" (ret) );
+ return ret;
+}
+
+#include <asm-generic/bitops/ffz.h>
+
+/*
+ * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
+ * This is defined the same way as the libc and compiler builtin
+ * ffs routines, therefore differs in spirit from the above ffz (man ffs).
+ */
+static __inline__ int ffs(int x)
+{
+ return x ? (__ffs((unsigned long)x) + 1) : 0;
+}
+
+/*
+ * fls: find last (most significant) bit set.
+ * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+static __inline__ int fls(int x)
+{
+ int ret;
+ if (!x)
+ return 0;
+
+ __asm__(
+ " ldi 1,%1\n"
+ " extru,<> %0,15,16,%%r0\n"
+ " zdep,TR %0,15,16,%0\n" /* xxxx0000 */
+ " addi 16,%1,%1\n"
+ " extru,<> %0,7,8,%%r0\n"
+ " zdep,TR %0,23,24,%0\n" /* xx000000 */
+ " addi 8,%1,%1\n"
+ " extru,<> %0,3,4,%%r0\n"
+ " zdep,TR %0,27,28,%0\n" /* x0000000 */
+ " addi 4,%1,%1\n"
+ " extru,<> %0,1,2,%%r0\n"
+ " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */
+ " addi 2,%1,%1\n"
+ " extru,= %0,0,1,%%r0\n"
+ " addi 1,%1,%1\n" /* if y & 8, add 1 */
+ : "+r" (x), "=r" (ret) );
+
+ return ret;
+}
+
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/sched.h>
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/bitops/find.h>
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/le.h>
+
+/* '3' is bits per byte */
+#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
+
+#define ext2_set_bit_atomic(l,nr,addr) \
+ test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
+#define ext2_clear_bit_atomic(l,nr,addr) \
+ test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
+
+#endif /* __KERNEL__ */
+
+#endif /* _PARISC_BITOPS_H */
diff --git a/arch/parisc/include/asm/bitsperlong.h b/arch/parisc/include/asm/bitsperlong.h
new file mode 100644
index 00000000..75196b41
--- /dev/null
+++ b/arch/parisc/include/asm/bitsperlong.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_PARISC_BITSPERLONG_H
+#define __ASM_PARISC_BITSPERLONG_H
+
+/*
+ * using CONFIG_* outside of __KERNEL__ is wrong,
+ * __LP64__ was also removed from headers, so what
+ * is the right approach on parisc?
+ * -arnd
+ */
+#if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__)
+#define __BITS_PER_LONG 64
+#define SHIFT_PER_LONG 6
+#else
+#define __BITS_PER_LONG 32
+#define SHIFT_PER_LONG 5
+#endif
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_PARISC_BITSPERLONG_H */
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
new file mode 100644
index 00000000..72cfdb0c
--- /dev/null
+++ b/arch/parisc/include/asm/bug.h
@@ -0,0 +1,92 @@
+#ifndef _PARISC_BUG_H
+#define _PARISC_BUG_H
+
+/*
+ * Tell the user there is some problem.
+ * The offending file and line are encoded in the __bug_table section.
+ */
+
+#ifdef CONFIG_BUG
+#define HAVE_ARCH_BUG
+#define HAVE_ARCH_WARN_ON
+
+/* the break instruction is used as BUG() marker. */
+#define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff"
+#define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */
+
+#if defined(CONFIG_64BIT)
+#define ASM_WORD_INSN ".dword\t"
+#else
+#define ASM_WORD_INSN ".word\t"
+#endif
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define BUG() \
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+ "\t.pushsection __bug_table,\"a\"\n" \
+ "2:\t" ASM_WORD_INSN "1b, %c0\n" \
+ "\t.short %c1, %c2\n" \
+ "\t.org 2b+%c3\n" \
+ "\t.popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (0), "i" (sizeof(struct bug_entry)) ); \
+ unreachable(); \
+ } while(0)
+
+#else
+#define BUG() \
+ do { \
+ asm volatile(PARISC_BUG_BREAK_ASM : : ); \
+ unreachable(); \
+ } while(0)
+#endif
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define __WARN_TAINT(taint) \
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+ "\t.pushsection __bug_table,\"a\"\n" \
+ "2:\t" ASM_WORD_INSN "1b, %c0\n" \
+ "\t.short %c1, %c2\n" \
+ "\t.org 2b+%c3\n" \
+ "\t.popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (BUGFLAG_TAINT(taint)), \
+ "i" (sizeof(struct bug_entry)) ); \
+ } while(0)
+#else
+#define __WARN_TAINT(taint) \
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+ "\t.pushsection __bug_table,\"a\"\n" \
+ "2:\t" ASM_WORD_INSN "1b\n" \
+ "\t.short %c0\n" \
+ "\t.org 2b+%c1\n" \
+ "\t.popsection" \
+ : : "i" (BUGFLAG_TAINT(taint)), \
+ "i" (sizeof(struct bug_entry)) ); \
+ } while(0)
+#endif
+
+
+#define WARN_ON(x) ({ \
+ int __ret_warn_on = !!(x); \
+ if (__builtin_constant_p(__ret_warn_on)) { \
+ if (__ret_warn_on) \
+ __WARN(); \
+ } else { \
+ if (unlikely(__ret_warn_on)) \
+ __WARN(); \
+ } \
+ unlikely(__ret_warn_on); \
+})
+
+#endif
+
+#include <asm-generic/bug.h>
+#endif
+
diff --git a/arch/parisc/include/asm/bugs.h b/arch/parisc/include/asm/bugs.h
new file mode 100644
index 00000000..9e628434
--- /dev/null
+++ b/arch/parisc/include/asm/bugs.h
@@ -0,0 +1,19 @@
+/*
+ * include/asm-parisc/bugs.h
+ *
+ * Copyright (C) 1999 Mike Shaver
+ */
+
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ * void check_bugs(void);
+ */
+
+#include <asm/processor.h>
+
+static inline void check_bugs(void)
+{
+// identify_cpu(&boot_cpu_data);
+}
diff --git a/arch/parisc/include/asm/byteorder.h b/arch/parisc/include/asm/byteorder.h
new file mode 100644
index 00000000..58af2c5f
--- /dev/null
+++ b/arch/parisc/include/asm/byteorder.h
@@ -0,0 +1,6 @@
+#ifndef _PARISC_BYTEORDER_H
+#define _PARISC_BYTEORDER_H
+
+#include <linux/byteorder/big_endian.h>
+
+#endif /* _PARISC_BYTEORDER_H */
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
new file mode 100644
index 00000000..47f11c70
--- /dev/null
+++ b/arch/parisc/include/asm/cache.h
@@ -0,0 +1,60 @@
+/*
+ * include/asm-parisc/cache.h
+ */
+
+#ifndef __ARCH_PARISC_CACHE_H
+#define __ARCH_PARISC_CACHE_H
+
+
+/*
+ * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
+ * 32-byte cachelines. The default configuration is not for SMP anyway,
+ * so if you're building for SMP, you should select the appropriate
+ * processor type. There is a potential livelock danger when running
+ * a machine with this value set too small, but it's more probable you'll
+ * just ruin performance.
+ */
+#ifdef CONFIG_PA20
+#define L1_CACHE_BYTES 64
+#define L1_CACHE_SHIFT 6
+#else
+#define L1_CACHE_BYTES 32
+#define L1_CACHE_SHIFT 5
+#endif
+
+#ifndef __ASSEMBLY__
+
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+void parisc_cache_init(void); /* initializes cache-flushing */
+void disable_sr_hashing_asm(int); /* low level support for above */
+void disable_sr_hashing(void); /* turns off space register hashing */
+void free_sid(unsigned long);
+unsigned long alloc_sid(void);
+
+struct seq_file;
+extern void show_cache_info(struct seq_file *m);
+
+extern int split_tlb;
+extern int dcache_stride;
+extern int icache_stride;
+extern struct pdc_cache_info cache_info;
+void parisc_setup_cache_timing(void);
+
+#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" : : "r" (addr));
+#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" : : "r" (addr));
+#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" : : "r" (addr));
+
+#endif /* ! __ASSEMBLY__ */
+
+/* Classes of processor wrt: disabling space register hashing */
+
+#define SRHASH_PCXST 0 /* pcxs, pcxt, pcxt_ */
+#define SRHASH_PCXL 1 /* pcxl */
+#define SRHASH_PA20 2 /* pcxu, pcxu_, pcxw, pcxw_ */
+
+#endif
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
new file mode 100644
index 00000000..da601dd3
--- /dev/null
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -0,0 +1,161 @@
+#ifndef _PARISC_CACHEFLUSH_H
+#define _PARISC_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <asm/tlbflush.h>
+
+/* The usual comment is "Caches aren't brain-dead on the <architecture>".
+ * Unfortunately, that doesn't apply to PA-RISC. */
+
+/* Internal implementation */
+void flush_data_cache_local(void *); /* flushes local data-cache only */
+void flush_instruction_cache_local(void *); /* flushes local code-cache only */
+#ifdef CONFIG_SMP
+void flush_data_cache(void); /* flushes data-cache only (all processors) */
+void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
+#else
+#define flush_data_cache() flush_data_cache_local(NULL)
+#define flush_instruction_cache() flush_instruction_cache_local(NULL)
+#endif
+
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+void flush_user_icache_range_asm(unsigned long, unsigned long);
+void flush_kernel_icache_range_asm(unsigned long, unsigned long);
+void flush_user_dcache_range_asm(unsigned long, unsigned long);
+void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
+void flush_kernel_dcache_page_asm(void *);
+void flush_kernel_icache_page(void *);
+void flush_user_dcache_range(unsigned long, unsigned long);
+void flush_user_icache_range(unsigned long, unsigned long);
+
+/* Cache flush operations */
+
+void flush_cache_all_local(void);
+void flush_cache_all(void);
+void flush_cache_mm(struct mm_struct *mm);
+
+#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+void flush_kernel_dcache_page_addr(void *addr);
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+ flush_kernel_dcache_page_addr(page_address(page));
+}
+
+#define flush_kernel_dcache_range(start,size) \
+ flush_kernel_dcache_range_asm((start), (start)+(size));
+/* vmap range flushes and invalidates. Architecturally, we don't need
+ * the invalidate, because the CPU should refuse to speculate once an
+ * area has been flushed, so invalidate is left empty */
+static inline void flush_kernel_vmap_range(void *vaddr, int size)
+{
+ unsigned long start = (unsigned long)vaddr;
+
+ flush_kernel_dcache_range_asm(start, start + size);
+}
+static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+ unsigned long start = (unsigned long)vaddr;
+ void *cursor = vaddr;
+
+ for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
+ struct page *page = vmalloc_to_page(cursor);
+
+ if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+ flush_kernel_dcache_page(page);
+ }
+ flush_kernel_dcache_range_asm(start, start + size);
+}
+
+#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vunmap(start, end) flush_cache_all()
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+extern void flush_dcache_page(struct page *page);
+
+#define flush_dcache_mmap_lock(mapping) \
+ spin_lock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_unlock(mapping) \
+ spin_unlock_irq(&(mapping)->tree_lock)
+
+#define flush_icache_page(vma,page) do { \
+ flush_kernel_dcache_page(page); \
+ flush_kernel_icache_page(page_address(page)); \
+} while (0)
+
+#define flush_icache_range(s,e) do { \
+ flush_kernel_dcache_range_asm(s,e); \
+ flush_kernel_icache_range_asm(s,e); \
+} while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ flush_cache_page(vma, vaddr, page_to_pfn(page)); \
+ memcpy(dst, src, len); \
+ flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ flush_cache_page(vma, vaddr, page_to_pfn(page)); \
+ memcpy(dst, src, len); \
+} while (0)
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
+void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+
+/* defined in pacache.S exported in cache.c used by flush_anon_page */
+void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+
+#define ARCH_HAS_FLUSH_ANON_PAGE
+static inline void
+flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+{
+ if (PageAnon(page)) {
+ flush_tlb_page(vma, vmaddr);
+ flush_dcache_page_asm(page_to_phys(page), vmaddr);
+ }
+}
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
+#ifdef CONFIG_PA8X00
+/* Only pa8800, pa8900 needs this */
+
+#include <asm/kmap_types.h>
+
+#define ARCH_HAS_KMAP
+
+void kunmap_parisc(void *addr);
+
+static inline void *kmap(struct page *page)
+{
+ might_sleep();
+ return page_address(page);
+}
+
+#define kunmap(page) kunmap_parisc(page_address(page))
+
+static inline void *__kmap_atomic(struct page *page)
+{
+ pagefault_disable();
+ return page_address(page);
+}
+
+static inline void __kunmap_atomic(void *addr)
+{
+ kunmap_parisc(addr);
+ pagefault_enable();
+}
+
+#define kmap_atomic_prot(page, prot) kmap_atomic(page)
+#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
+#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
+#endif
+
+#endif /* _PARISC_CACHEFLUSH_H */
+
diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
new file mode 100644
index 00000000..c84b2fcb
--- /dev/null
+++ b/arch/parisc/include/asm/checksum.h
@@ -0,0 +1,210 @@
+#ifndef _PARISC_CHECKSUM_H
+#define _PARISC_CHECKSUM_H
+
+#include <linux/in6.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *, int, __wsum);
+
+/*
+ * The same as csum_partial, but copies from src while it checksums.
+ *
+ * Here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum);
+
+/*
+ * this is a new version of the above that records errors it finds in *errp,
+ * but continues and zeros the rest of the buffer.
+ */
+extern __wsum csum_partial_copy_from_user(const void __user *src,
+ void *dst, int len, __wsum sum, int *errp);
+
+/*
+ * Optimized for IP headers, which always checksum on 4 octet boundaries.
+ *
+ * Written by Randolph Chung <tausq@debian.org>, and then mucked with by
+ * LaMont Jones <lamont@debian.org>
+ */
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int sum;
+
+ __asm__ __volatile__ (
+" ldws,ma 4(%1), %0\n"
+" addib,<= -4, %2, 2f\n"
+"\n"
+" ldws 4(%1), %%r20\n"
+" ldws 8(%1), %%r21\n"
+" add %0, %%r20, %0\n"
+" ldws,ma 12(%1), %%r19\n"
+" addc %0, %%r21, %0\n"
+" addc %0, %%r19, %0\n"
+"1: ldws,ma 4(%1), %%r19\n"
+" addib,< 0, %2, 1b\n"
+" addc %0, %%r19, %0\n"
+"\n"
+" extru %0, 31, 16, %%r20\n"
+" extru %0, 15, 16, %%r21\n"
+" addc %%r20, %%r21, %0\n"
+" extru %0, 15, 16, %%r21\n"
+" add %0, %%r21, %0\n"
+" subi -1, %0, %0\n"
+"2:\n"
+ : "=r" (sum), "=r" (iph), "=r" (ihl)
+ : "1" (iph), "2" (ihl)
+ : "r19", "r20", "r21", "memory");
+
+ return (__force __sum16)sum;
+}
+
+/*
+ * Fold a partial checksum
+ */
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+ /* add the swapped two 16-bit halves of sum,
+ a possible carry from adding the two 16-bit halves,
+ will carry from the lower half into the upper half,
+ giving us the correct sum in the upper half. */
+ sum += (sum << 16) + (sum >> 16);
+ return (__force __sum16)(~sum >> 16);
+}
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ __asm__(
+ " add %1, %0, %0\n"
+ " addc %2, %0, %0\n"
+ " addc %3, %0, %0\n"
+ " addc %%r0, %0, %0\n"
+ : "=r" (sum)
+ : "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum));
+ return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline __sum16 ip_compute_csum(const void *buf, int len)
+{
+ return csum_fold (csum_partial(buf, len, 0));
+}
+
+
+#define _HAVE_ARCH_IPV6_CSUM
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, unsigned short proto,
+ __wsum sum)
+{
+ __asm__ __volatile__ (
+
+#if BITS_PER_LONG > 32
+
+ /*
+ ** We can execute two loads and two adds per cycle on PA 8000.
+ ** But add insn's get serialized waiting for the carry bit.
+ ** Try to keep 4 registers with "live" values ahead of the ALU.
+ */
+
+" ldd,ma 8(%1), %%r19\n" /* get 1st saddr word */
+" ldd,ma 8(%2), %%r20\n" /* get 1st daddr word */
+" add %8, %3, %3\n"/* add 16-bit proto + len */
+" add %%r19, %0, %0\n"
+" ldd,ma 8(%1), %%r21\n" /* 2cd saddr */
+" ldd,ma 8(%2), %%r22\n" /* 2cd daddr */
+" add,dc %%r20, %0, %0\n"
+" add,dc %%r21, %0, %0\n"
+" add,dc %%r22, %0, %0\n"
+" add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */
+" extrd,u %0, 31, 32, %%r19\n" /* copy upper half down */
+" depdi 0, 31, 32, %0\n" /* clear upper half */
+" add %%r19, %0, %0\n" /* fold into 32-bits */
+" addc 0, %0, %0\n" /* add carry */
+
+#else
+
+ /*
+ ** For PA 1.x, the insn order doesn't matter as much.
+ ** Insn stream is serialized on the carry bit here too.
+ ** result from the previous operation (eg r0 + x)
+ */
+
+" ldw,ma 4(%1), %%r19\n" /* get 1st saddr word */
+" ldw,ma 4(%2), %%r20\n" /* get 1st daddr word */
+" add %8, %3, %3\n" /* add 16-bit proto + len */
+" add %%r19, %0, %0\n"
+" ldw,ma 4(%1), %%r21\n" /* 2cd saddr */
+" addc %%r20, %0, %0\n"
+" ldw,ma 4(%2), %%r22\n" /* 2cd daddr */
+" addc %%r21, %0, %0\n"
+" ldw,ma 4(%1), %%r19\n" /* 3rd saddr */
+" addc %%r22, %0, %0\n"
+" ldw,ma 4(%2), %%r20\n" /* 3rd daddr */
+" addc %%r19, %0, %0\n"
+" ldw,ma 4(%1), %%r21\n" /* 4th saddr */
+" addc %%r20, %0, %0\n"
+" ldw,ma 4(%2), %%r22\n" /* 4th daddr */
+" addc %%r21, %0, %0\n"
+" addc %%r22, %0, %0\n"
+" addc %3, %0, %0\n" /* fold in proto+len, catch carry */
+
+#endif
+ : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len)
+ : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto)
+ : "r19", "r20", "r21", "r22", "memory");
+ return csum_fold(sum);
+}
+
+/*
+ * Copy and checksum to user
+ */
+#define HAVE_CSUM_COPY_USER
+static __inline__ __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst,
+ int len, __wsum sum,
+ int *err_ptr)
+{
+ /* code stolen from include/asm-mips64 */
+ sum = csum_partial(src, len, sum);
+
+ if (copy_to_user(dst, src, len)) {
+ *err_ptr = -EFAULT;
+ return (__force __wsum)-1;
+ }
+
+ return sum;
+}
+
+#endif
+
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
new file mode 100644
index 00000000..efa0b60c
--- /dev/null
+++ b/arch/parisc/include/asm/compat.h
@@ -0,0 +1,166 @@
+#ifndef _ASM_PARISC_COMPAT_H
+#define _ASM_PARISC_COMPAT_H
+/*
+ * Architecture specific compatibility types
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+
+#define COMPAT_USER_HZ 100
+#define COMPAT_UTS_MACHINE "parisc\0\0"
+
+typedef u32 compat_size_t;
+typedef s32 compat_ssize_t;
+typedef s32 compat_time_t;
+typedef s32 compat_clock_t;
+typedef s32 compat_pid_t;
+typedef u32 __compat_uid_t;
+typedef u32 __compat_gid_t;
+typedef u32 __compat_uid32_t;
+typedef u32 __compat_gid32_t;
+typedef u16 compat_mode_t;
+typedef u32 compat_ino_t;
+typedef u32 compat_dev_t;
+typedef s32 compat_off_t;
+typedef s64 compat_loff_t;
+typedef u16 compat_nlink_t;
+typedef u16 compat_ipc_pid_t;
+typedef s32 compat_daddr_t;
+typedef u32 compat_caddr_t;
+typedef s32 compat_timer_t;
+
+typedef s32 compat_int_t;
+typedef s32 compat_long_t;
+typedef s64 compat_s64;
+typedef u32 compat_uint_t;
+typedef u32 compat_ulong_t;
+typedef u64 compat_u64;
+
+struct compat_timespec {
+ compat_time_t tv_sec;
+ s32 tv_nsec;
+};
+
+struct compat_timeval {
+ compat_time_t tv_sec;
+ s32 tv_usec;
+};
+
+struct compat_stat {
+ compat_dev_t st_dev; /* dev_t is 32 bits on parisc */
+ compat_ino_t st_ino; /* 32 bits */
+ compat_mode_t st_mode; /* 16 bits */
+ compat_nlink_t st_nlink; /* 16 bits */
+ u16 st_reserved1; /* old st_uid */
+ u16 st_reserved2; /* old st_gid */
+ compat_dev_t st_rdev;
+ compat_off_t st_size;
+ compat_time_t st_atime;
+ u32 st_atime_nsec;
+ compat_time_t st_mtime;
+ u32 st_mtime_nsec;
+ compat_time_t st_ctime;
+ u32 st_ctime_nsec;
+ s32 st_blksize;
+ s32 st_blocks;
+ u32 __unused1; /* ACL stuff */
+ compat_dev_t __unused2; /* network */
+ compat_ino_t __unused3; /* network */
+ u32 __unused4; /* cnodes */
+ u16 __unused5; /* netsite */
+ short st_fstype;
+ compat_dev_t st_realdev;
+ u16 st_basemode;
+ u16 st_spareshort;
+ __compat_uid32_t st_uid;
+ __compat_gid32_t st_gid;
+ u32 st_spare4[3];
+};
+
+struct compat_flock {
+ short l_type;
+ short l_whence;
+ compat_off_t l_start;
+ compat_off_t l_len;
+ compat_pid_t l_pid;
+};
+
+struct compat_flock64 {
+ short l_type;
+ short l_whence;
+ compat_loff_t l_start;
+ compat_loff_t l_len;
+ compat_pid_t l_pid;
+};
+
+struct compat_statfs {
+ s32 f_type;
+ s32 f_bsize;
+ s32 f_blocks;
+ s32 f_bfree;
+ s32 f_bavail;
+ s32 f_files;
+ s32 f_ffree;
+ __kernel_fsid_t f_fsid;
+ s32 f_namelen;
+ s32 f_frsize;
+ s32 f_spare[5];
+};
+
+struct compat_sigcontext {
+ compat_int_t sc_flags;
+ compat_int_t sc_gr[32]; /* PSW in sc_gr[0] */
+ u64 sc_fr[32];
+ compat_int_t sc_iasq[2];
+ compat_int_t sc_iaoq[2];
+ compat_int_t sc_sar; /* cr11 */
+};
+
+#define COMPAT_RLIM_INFINITY 0xffffffff
+
+typedef u32 compat_old_sigset_t; /* at least 32 bits */
+
+#define _COMPAT_NSIG 64
+#define _COMPAT_NSIG_BPW 32
+
+typedef u32 compat_sigset_word;
+
+#define COMPAT_OFF_T_MAX 0x7fffffff
+#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
+
+/*
+ * A pointer passed in from user mode. This should not
+ * be used for syscall parameters, just declare them
+ * as pointers because the syscall entry code will have
+ * appropriately converted them already.
+ */
+typedef u32 compat_uptr_t;
+
+static inline void __user *compat_ptr(compat_uptr_t uptr)
+{
+ return (void __user *)(unsigned long)uptr;
+}
+
+static inline compat_uptr_t ptr_to_compat(void __user *uptr)
+{
+ return (u32)(unsigned long)uptr;
+}
+
+static __inline__ void __user *arch_compat_alloc_user_space(long len)
+{
+ struct pt_regs *regs = &current->thread.regs;
+ return (void __user *)regs->gr[30];
+}
+
+static inline int __is_compat_task(struct task_struct *t)
+{
+ return test_ti_thread_flag(task_thread_info(t), TIF_32BIT);
+}
+
+static inline int is_compat_task(void)
+{
+ return __is_compat_task(current);
+}
+
+#endif /* _ASM_PARISC_COMPAT_H */
diff --git a/arch/parisc/include/asm/compat_rt_sigframe.h b/arch/parisc/include/asm/compat_rt_sigframe.h
new file mode 100644
index 00000000..81bec28b
--- /dev/null
+++ b/arch/parisc/include/asm/compat_rt_sigframe.h
@@ -0,0 +1,50 @@
+#include<linux/compat.h>
+#include<linux/compat_siginfo.h>
+#include<asm/compat_ucontext.h>
+
+#ifndef _ASM_PARISC_COMPAT_RT_SIGFRAME_H
+#define _ASM_PARISC_COMPAT_RT_SIGFRAME_H
+
+/* In a deft move of uber-hackery, we decide to carry the top half of all
+ * 64-bit registers in a non-portable, non-ABI, hidden structure.
+ * Userspace can read the hidden structure if it *wants* but is never
+ * guaranteed to be in the same place. Infact the uc_sigmask from the
+ * ucontext_t structure may push the hidden register file downards
+ */
+struct compat_regfile {
+ /* Upper half of all the 64-bit registers that were truncated
+ on a copy to a 32-bit userspace */
+ compat_int_t rf_gr[32];
+ compat_int_t rf_iasq[2];
+ compat_int_t rf_iaoq[2];
+ compat_int_t rf_sar;
+};
+
+#define COMPAT_SIGRETURN_TRAMP 4
+#define COMPAT_SIGRESTARTBLOCK_TRAMP 5
+#define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + COMPAT_SIGRESTARTBLOCK_TRAMP)
+
+struct compat_rt_sigframe {
+ /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c
+ Secondary to that it must protect the ERESTART_RESTARTBLOCK
+ trampoline we left on the stack (we were bad and didn't
+ change sp so we could run really fast.) */
+ compat_uint_t tramp[COMPAT_TRAMP_SIZE];
+ compat_siginfo_t info;
+ struct compat_ucontext uc;
+ /* Hidden location of truncated registers, *must* be last. */
+ struct compat_regfile regs;
+};
+
+/*
+ * The 32-bit ABI wants at least 48 bytes for a function call frame:
+ * 16 bytes for arg0-arg3, and 32 bytes for magic (the only part of
+ * which Linux/parisc uses is sp-20 for the saved return pointer...)
+ * Then, the stack pointer must be rounded to a cache line (64 bytes).
+ */
+#define SIGFRAME32 64
+#define FUNCTIONCALLFRAME32 48
+#define PARISC_RT_SIGFRAME_SIZE32 \
+ (((sizeof(struct compat_rt_sigframe) + FUNCTIONCALLFRAME32) + SIGFRAME32) & -SIGFRAME32)
+
+#endif
diff --git a/arch/parisc/include/asm/compat_signal.h b/arch/parisc/include/asm/compat_signal.h
new file mode 100644
index 00000000..6ad02c36
--- /dev/null
+++ b/arch/parisc/include/asm/compat_signal.h
@@ -0,0 +1,2 @@
+/* Use generic */
+#include <asm-generic/compat_signal.h>
diff --git a/arch/parisc/include/asm/compat_ucontext.h b/arch/parisc/include/asm/compat_ucontext.h
new file mode 100644
index 00000000..2f7292af
--- /dev/null
+++ b/arch/parisc/include/asm/compat_ucontext.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_PARISC_COMPAT_UCONTEXT_H
+#define _ASM_PARISC_COMPAT_UCONTEXT_H
+
+#include <linux/compat.h>
+
+/* 32-bit ucontext as seen from an 64-bit kernel */
+struct compat_ucontext {
+ compat_uint_t uc_flags;
+ compat_uptr_t uc_link;
+ compat_stack_t uc_stack; /* struct compat_sigaltstack (12 bytes)*/
+ /* FIXME: Pad out to get uc_mcontext to start at an 8-byte aligned boundary */
+ compat_uint_t pad[1];
+ struct compat_sigcontext uc_mcontext;
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif /* !_ASM_PARISC_COMPAT_UCONTEXT_H */
diff --git a/arch/parisc/include/asm/cputime.h b/arch/parisc/include/asm/cputime.h
new file mode 100644
index 00000000..dcdf2fbd
--- /dev/null
+++ b/arch/parisc/include/asm/cputime.h
@@ -0,0 +1,6 @@
+#ifndef __PARISC_CPUTIME_H
+#define __PARISC_CPUTIME_H
+
+#include <asm-generic/cputime.h>
+
+#endif /* __PARISC_CPUTIME_H */
diff --git a/arch/parisc/include/asm/current.h b/arch/parisc/include/asm/current.h
new file mode 100644
index 00000000..0fb9338e
--- /dev/null
+++ b/arch/parisc/include/asm/current.h
@@ -0,0 +1,15 @@
+#ifndef _PARISC_CURRENT_H
+#define _PARISC_CURRENT_H
+
+#include <linux/thread_info.h>
+
+struct task_struct;
+
+static inline struct task_struct * get_current(void)
+{
+ return current_thread_info()->task;
+}
+
+#define current get_current()
+
+#endif /* !(_PARISC_CURRENT_H) */
diff --git a/arch/parisc/include/asm/delay.h b/arch/parisc/include/asm/delay.h
new file mode 100644
index 00000000..7a75e984
--- /dev/null
+++ b/arch/parisc/include/asm/delay.h
@@ -0,0 +1,43 @@
+#ifndef _PARISC_DELAY_H
+#define _PARISC_DELAY_H
+
+#include <asm/system.h> /* for mfctl() */
+#include <asm/processor.h> /* for boot_cpu_data */
+
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines
+ */
+
+static __inline__ void __delay(unsigned long loops) {
+ asm volatile(
+ " .balignl 64,0x34000034\n"
+ " addib,UV -1,%0,.\n"
+ " nop\n"
+ : "=r" (loops) : "0" (loops));
+}
+
+static __inline__ void __cr16_delay(unsigned long clocks) {
+ unsigned long start;
+
+ /*
+ * Note: Due to unsigned math, cr16 rollovers shouldn't be
+ * a problem here. However, on 32 bit, we need to make sure
+ * we don't pass in too big a value. The current default
+ * value of MAX_UDELAY_MS should help prevent this.
+ */
+
+ start = mfctl(16);
+ while ((mfctl(16) - start) < clocks)
+ ;
+}
+
+static __inline__ void __udelay(unsigned long usecs) {
+ __cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL));
+}
+
+#define udelay(n) __udelay(n)
+
+#endif /* defined(_PARISC_DELAY_H) */
diff --git a/arch/parisc/include/asm/device.h b/arch/parisc/include/asm/device.h
new file mode 100644
index 00000000..d8f9872b
--- /dev/null
+++ b/arch/parisc/include/asm/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/arch/parisc/include/asm/div64.h b/arch/parisc/include/asm/div64.h
new file mode 100644
index 00000000..6cd978ce
--- /dev/null
+++ b/arch/parisc/include/asm/div64.h
@@ -0,0 +1 @@
+#include <asm-generic/div64.h>
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
new file mode 100644
index 00000000..4ef73b09
--- /dev/null
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -0,0 +1,241 @@
+#ifndef _PARISC_DMA_MAPPING_H
+#define _PARISC_DMA_MAPPING_H
+
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/scatterlist.h>
+
+/* See Documentation/PCI/PCI-DMA-mapping.txt */
+struct hppa_dma_ops {
+ int (*dma_supported)(struct device *dev, u64 mask);
+ void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
+ void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
+ void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
+ dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
+ void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
+ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);
+ void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction);
+ void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
+ void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
+ void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
+ void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
+};
+
+/*
+** We could live without the hppa_dma_ops indirection if we didn't want
+** to support 4 different coherent dma models with one binary (they will
+** someday be loadable modules):
+** I/O MMU consistent method dma_sync behavior
+** ============= ====================== =======================
+** a) PA-7x00LC uncachable host memory flush/purge
+** b) U2/Uturn cachable host memory NOP
+** c) Ike/Astro cachable host memory NOP
+** d) EPIC/SAGA memory on EPIC/SAGA flush/reset DMA channel
+**
+** PA-7[13]00LC processors have a GSC bus interface and no I/O MMU.
+**
+** Systems (eg PCX-T workstations) that don't fall into the above
+** categories will need to modify the needed drivers to perform
+** flush/purge and allocate "regular" cacheable pages for everything.
+*/
+
+#ifdef CONFIG_PA11
+extern struct hppa_dma_ops pcxl_dma_ops;
+extern struct hppa_dma_ops pcx_dma_ops;
+#endif
+
+extern struct hppa_dma_ops *hppa_dma_ops;
+
+static inline void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag)
+{
+ return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
+}
+
+static inline void *
+dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag)
+{
+ return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
+}
+
+static inline void
+dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
+}
+
+static inline void
+dma_free_noncoherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
+}
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction)
+{
+ return hppa_dma_ops->map_single(dev, ptr, size, direction);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ hppa_dma_ops->unmap_single(dev, dma_addr, size, direction);
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ return hppa_dma_ops->map_sg(dev, sg, nents, direction);
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+ hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction);
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction direction)
+{
+ return dma_map_single(dev, (page_address(page) + (offset)), size, direction);
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_unmap_single(dev, dma_address, size, direction);
+}
+
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ if(hppa_dma_ops->dma_sync_single_for_cpu)
+ hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, 0, size, direction);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ if(hppa_dma_ops->dma_sync_single_for_device)
+ hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, 0, size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ if(hppa_dma_ops->dma_sync_single_for_cpu)
+ hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, offset, size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ if(hppa_dma_ops->dma_sync_single_for_device)
+ hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, offset, size, direction);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ if(hppa_dma_ops->dma_sync_sg_for_cpu)
+ hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ if(hppa_dma_ops->dma_sync_sg_for_device)
+ hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction);
+}
+
+static inline int
+dma_supported(struct device *dev, u64 mask)
+{
+ return hppa_dma_ops->dma_supported(dev, mask);
+}
+
+static inline int
+dma_set_mask(struct device *dev, u64 mask)
+{
+ if(!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+
+static inline void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ if(hppa_dma_ops->dma_sync_single_for_cpu)
+ flush_kernel_dcache_range((unsigned long)vaddr, size);
+}
+
+static inline void *
+parisc_walk_tree(struct device *dev)
+{
+ struct device *otherdev;
+ if(likely(dev->platform_data != NULL))
+ return dev->platform_data;
+ /* OK, just traverse the bus to find it */
+ for(otherdev = dev->parent; otherdev;
+ otherdev = otherdev->parent) {
+ if(otherdev->platform_data) {
+ dev->platform_data = otherdev->platform_data;
+ break;
+ }
+ }
+ BUG_ON(!dev->platform_data);
+ return dev->platform_data;
+}
+
+#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu);
+
+
+#ifdef CONFIG_IOMMU_CCIO
+struct parisc_device;
+struct ioc;
+void * ccio_get_iommu(const struct parisc_device *dev);
+int ccio_request_resource(const struct parisc_device *dev,
+ struct resource *res);
+int ccio_allocate_resource(const struct parisc_device *dev,
+ struct resource *res, unsigned long size,
+ unsigned long min, unsigned long max, unsigned long align);
+#else /* !CONFIG_IOMMU_CCIO */
+#define ccio_get_iommu(dev) NULL
+#define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res)
+#define ccio_allocate_resource(dev, res, size, min, max, align) \
+ allocate_resource(&iomem_resource, res, size, min, max, \
+ align, NULL, NULL)
+#endif /* !CONFIG_IOMMU_CCIO */
+
+#ifdef CONFIG_IOMMU_SBA
+struct parisc_device;
+void * sba_get_iommu(struct parisc_device *dev);
+#endif
+
+/* At the moment, we panic on error for IOMMU resource exaustion */
+#define dma_mapping_error(dev, x) 0
+
+#endif
diff --git a/arch/parisc/include/asm/dma.h b/arch/parisc/include/asm/dma.h
new file mode 100644
index 00000000..f7a18f96
--- /dev/null
+++ b/arch/parisc/include/asm/dma.h
@@ -0,0 +1,185 @@
+/* asm/dma.h: Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ * (c) Copyright 2000, Grant Grundler
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+#include <asm/io.h> /* need byte IO */
+#include <asm/system.h>
+
+#define dma_outb outb
+#define dma_inb inb
+
+/*
+** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
+** (or rather not merge) DMAs into manageable chunks.
+** On parisc, this is more of the software/tuning constraint
+** rather than the HW. I/O MMU allocation algorithms can be
+** faster with smaller sizes (to some degree).
+*/
+#define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE)
+
+/* The maximum address that we can perform a DMA transfer to on this platform
+** New dynamic DMA interfaces should obsolete this....
+*/
+#define MAX_DMA_ADDRESS (~0UL)
+
+/*
+** We don't have DMA channels... well V-class does but the
+** Dynamic DMA Mapping interface will support them... right? :^)
+** Note: this is not relevant right now for PA-RISC, but we cannot
+** leave this as undefined because some things (e.g. sound)
+** won't compile :-(
+*/
+#define MAX_DMA_CHANNELS 8
+#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
+#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
+#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+#define DMA_AUTOINIT 0x10
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG 0x08 /* command register (w) */
+#define DMA1_STAT_REG 0x08 /* status register (r) */
+#define DMA1_REQ_REG 0x09 /* request register (w) */
+#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
+#define DMA1_MODE_REG 0x0B /* mode register (w) */
+#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
+#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
+#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
+#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
+#define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG)
+
+#define DMA2_CMD_REG 0xD0 /* command register (w) */
+#define DMA2_STAT_REG 0xD0 /* status register (r) */
+#define DMA2_REQ_REG 0xD2 /* request register (w) */
+#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
+#define DMA2_MODE_REG 0xD6 /* mode register (w) */
+#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
+#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
+#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
+#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
+#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG)
+
+static __inline__ unsigned long claim_dma_lock(void)
+{
+ return 0;
+}
+
+static __inline__ void release_dma_lock(unsigned long flags)
+{
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+ unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
+ : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
+
+ /* using short to get 16-bit wrap around */
+ unsigned short count;
+
+ count = 1 + dma_inb(io_port);
+ count += dma_inb(io_port) << 8;
+
+ return (dmanr<=3)? count : (count<<1);
+}
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+#ifdef CONFIG_SUPERIO
+ if (dmanr<=3)
+ dma_outb(dmanr, DMA1_MASK_REG);
+ else
+ dma_outb(dmanr & 3, DMA2_MASK_REG);
+#endif
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+#ifdef CONFIG_SUPERIO
+ if (dmanr<=3)
+ dma_outb(dmanr | 4, DMA1_MASK_REG);
+ else
+ dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
+#endif
+}
+
+/* reserve a DMA channel */
+#define request_dma(dmanr, device_id) (0)
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while holding the DMA lock ! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
+{
+}
+
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+}
+
+
+/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+}
+
+
+#define free_dma(dmanr)
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* _ASM_DMA_H */
diff --git a/arch/parisc/include/asm/eisa_bus.h b/arch/parisc/include/asm/eisa_bus.h
new file mode 100644
index 00000000..201085f8
--- /dev/null
+++ b/arch/parisc/include/asm/eisa_bus.h
@@ -0,0 +1,23 @@
+/*
+ * eisa_bus.h interface between the eisa BA driver and the bus enumerator
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (c) 2002 Daniel Engstrom <5116@telia.com>
+ *
+ */
+
+#ifndef ASM_EISA_H
+#define ASM_EISA_H
+
+extern void eisa_make_irq_level(int num);
+extern void eisa_make_irq_edge(int num);
+extern int eisa_enumerator(unsigned long eeprom_addr,
+ struct resource *io_parent,
+ struct resource *mem_parent);
+extern int eisa_eeprom_init(unsigned long addr);
+
+#endif
diff --git a/arch/parisc/include/asm/eisa_eeprom.h b/arch/parisc/include/asm/eisa_eeprom.h
new file mode 100644
index 00000000..8ce8b85c
--- /dev/null
+++ b/arch/parisc/include/asm/eisa_eeprom.h
@@ -0,0 +1,153 @@
+/*
+ * eisa_eeprom.h - provide support for EISA adapters in PA-RISC machines
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (c) 2001, 2002 Daniel Engstrom <5116@telia.com>
+ *
+ */
+
+#ifndef ASM_EISA_EEPROM_H
+#define ASM_EISA_EEPROM_H
+
+extern void __iomem *eisa_eeprom_addr;
+
+#define HPEE_MAX_LENGTH 0x2000 /* maximum eeprom length */
+
+#define HPEE_SLOT_INFO(slot) (20+(48*slot))
+
+struct eeprom_header
+{
+
+ u_int32_t num_writes; /* number of writes */
+ u_int8_t flags; /* flags, usage? */
+ u_int8_t ver_maj;
+ u_int8_t ver_min;
+ u_int8_t num_slots; /* number of EISA slots in system */
+ u_int16_t csum; /* checksum, I don't know how to calculate this */
+ u_int8_t pad[10];
+} __attribute__ ((packed));
+
+
+struct eeprom_eisa_slot_info
+{
+ u_int32_t eisa_slot_id;
+ u_int32_t config_data_offset;
+ u_int32_t num_writes;
+ u_int16_t csum;
+ u_int16_t num_functions;
+ u_int16_t config_data_length;
+
+ /* bits 0..3 are the duplicate slot id */
+#define HPEE_SLOT_INFO_EMBEDDED 0x10
+#define HPEE_SLOT_INFO_VIRTUAL 0x20
+#define HPEE_SLOT_INFO_NO_READID 0x40
+#define HPEE_SLOT_INFO_DUPLICATE 0x80
+ u_int8_t slot_info;
+
+#define HPEE_SLOT_FEATURES_ENABLE 0x01
+#define HPEE_SLOT_FEATURES_IOCHK 0x02
+#define HPEE_SLOT_FEATURES_CFG_INCOMPLETE 0x80
+ u_int8_t slot_features;
+
+ u_int8_t ver_min;
+ u_int8_t ver_maj;
+
+#define HPEE_FUNCTION_INFO_HAVE_TYPE 0x01
+#define HPEE_FUNCTION_INFO_HAVE_MEMORY 0x02
+#define HPEE_FUNCTION_INFO_HAVE_IRQ 0x04
+#define HPEE_FUNCTION_INFO_HAVE_DMA 0x08
+#define HPEE_FUNCTION_INFO_HAVE_PORT 0x10
+#define HPEE_FUNCTION_INFO_HAVE_PORT_INIT 0x20
+/* I think there are two slighty different
+ * versions of the function_info field
+ * one int the fixed header and one optional
+ * in the parsed slot data area */
+#define HPEE_FUNCTION_INFO_HAVE_FUNCTION 0x01
+#define HPEE_FUNCTION_INFO_F_DISABLED 0x80
+#define HPEE_FUNCTION_INFO_CFG_FREE_FORM 0x40
+ u_int8_t function_info;
+
+#define HPEE_FLAG_BOARD_IS_ISA 0x01 /* flag and minor version for isa board */
+ u_int8_t flags;
+ u_int8_t pad[24];
+} __attribute__ ((packed));
+
+
+#define HPEE_MEMORY_MAX_ENT 9
+/* memory descriptor: byte 0 */
+#define HPEE_MEMORY_WRITABLE 0x01
+#define HPEE_MEMORY_CACHABLE 0x02
+#define HPEE_MEMORY_TYPE_MASK 0x18
+#define HPEE_MEMORY_TYPE_SYS 0x00
+#define HPEE_MEMORY_TYPE_EXP 0x08
+#define HPEE_MEMORY_TYPE_VIR 0x10
+#define HPEE_MEMORY_TYPE_OTH 0x18
+#define HPEE_MEMORY_SHARED 0x20
+#define HPEE_MEMORY_MORE 0x80
+
+/* memory descriptor: byte 1 */
+#define HPEE_MEMORY_WIDTH_MASK 0x03
+#define HPEE_MEMORY_WIDTH_BYTE 0x00
+#define HPEE_MEMORY_WIDTH_WORD 0x01
+#define HPEE_MEMORY_WIDTH_DWORD 0x02
+#define HPEE_MEMORY_DECODE_MASK 0x0c
+#define HPEE_MEMORY_DECODE_20BITS 0x00
+#define HPEE_MEMORY_DECODE_24BITS 0x04
+#define HPEE_MEMORY_DECODE_32BITS 0x08
+/* byte 2 and 3 are a 16bit LE value
+ * containging the memory size in kilobytes */
+/* byte 4,5,6 are a 24bit LE value
+ * containing the memory base address */
+
+
+#define HPEE_IRQ_MAX_ENT 7
+/* Interrupt entry: byte 0 */
+#define HPEE_IRQ_CHANNEL_MASK 0xf
+#define HPEE_IRQ_TRIG_LEVEL 0x20
+#define HPEE_IRQ_MORE 0x80
+/* byte 1 seems to be unused */
+
+#define HPEE_DMA_MAX_ENT 4
+
+/* dma entry: byte 0 */
+#define HPEE_DMA_CHANNEL_MASK 7
+#define HPEE_DMA_SIZE_MASK 0xc
+#define HPEE_DMA_SIZE_BYTE 0x0
+#define HPEE_DMA_SIZE_WORD 0x4
+#define HPEE_DMA_SIZE_DWORD 0x8
+#define HPEE_DMA_SHARED 0x40
+#define HPEE_DMA_MORE 0x80
+
+/* dma entry: byte 1 */
+#define HPEE_DMA_TIMING_MASK 0x30
+#define HPEE_DMA_TIMING_ISA 0x0
+#define HPEE_DMA_TIMING_TYPEA 0x10
+#define HPEE_DMA_TIMING_TYPEB 0x20
+#define HPEE_DMA_TIMING_TYPEC 0x30
+
+#define HPEE_PORT_MAX_ENT 20
+/* port entry byte 0 */
+#define HPEE_PORT_SIZE_MASK 0x1f
+#define HPEE_PORT_SHARED 0x40
+#define HPEE_PORT_MORE 0x80
+/* byte 1 and 2 is a 16bit LE value
+ * conating the start port number */
+
+#define HPEE_PORT_INIT_MAX_LEN 60 /* in bytes here */
+/* port init entry byte 0 */
+#define HPEE_PORT_INIT_WIDTH_MASK 0x3
+#define HPEE_PORT_INIT_WIDTH_BYTE 0x0
+#define HPEE_PORT_INIT_WIDTH_WORD 0x1
+#define HPEE_PORT_INIT_WIDTH_DWORD 0x2
+#define HPEE_PORT_INIT_MASK 0x4
+#define HPEE_PORT_INIT_MORE 0x80
+
+#define HPEE_SELECTION_MAX_ENT 26
+
+#define HPEE_TYPE_MAX_LEN 80
+
+#endif
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
new file mode 100644
index 00000000..19f6cb1a
--- /dev/null
+++ b/arch/parisc/include/asm/elf.h
@@ -0,0 +1,351 @@
+#ifndef __ASMPARISC_ELF_H
+#define __ASMPARISC_ELF_H
+
+/*
+ * ELF register definitions..
+ */
+
+#include <asm/ptrace.h>
+
+#define EM_PARISC 15
+
+/* HPPA specific definitions. */
+
+/* Legal values for e_flags field of Elf32_Ehdr. */
+
+#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */
+#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */
+#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */
+#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */
+#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch
+ prediction. */
+#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */
+#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */
+
+/* Defined values for `e_flags & EF_PARISC_ARCH' are: */
+
+#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */
+#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */
+#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */
+
+/* Additional section indices. */
+
+#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared
+ symbols in ANSI C. */
+#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */
+
+/* Legal values for sh_type field of Elf32_Shdr. */
+
+#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */
+#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */
+#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */
+
+/* Legal values for sh_flags field of Elf32_Shdr. */
+
+#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */
+#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */
+#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */
+
+/* Legal values for ST_TYPE subfield of st_info (symbol type). */
+
+#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */
+
+#define STT_HP_OPAQUE (STT_LOOS + 0x1)
+#define STT_HP_STUB (STT_LOOS + 0x2)
+
+/* HPPA relocs. */
+
+#define R_PARISC_NONE 0 /* No reloc. */
+#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */
+#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */
+#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */
+#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */
+#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */
+#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */
+#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */
+#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */
+#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */
+#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */
+#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */
+#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */
+#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */
+#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */
+#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */
+#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */
+#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */
+#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */
+#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */
+#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */
+#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */
+#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */
+#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */
+#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */
+#define R_PARISC_FPTR64 64 /* 64 bits function address. */
+#define R_PARISC_PLABEL32 65 /* 32 bits function address. */
+#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */
+#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */
+#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */
+#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */
+#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */
+#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */
+#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */
+#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */
+#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */
+#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */
+#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */
+#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */
+#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */
+#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */
+#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */
+#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */
+#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */
+#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */
+#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */
+#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */
+#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */
+#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */
+#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */
+#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */
+#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */
+#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */
+#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */
+#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */
+#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */
+#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */
+#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LORESERVE 128
+#define R_PARISC_COPY 128 /* Copy relocation. */
+#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */
+#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */
+#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */
+#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */
+#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */
+#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */
+#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */
+#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */
+#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */
+#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */
+#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_HIRESERVE 255
+
+#define PA_PLABEL_FDESC 0x02 /* bit set if PLABEL points to
+ * a function descriptor, not
+ * an address */
+
+/* The following are PA function descriptors
+ *
+ * addr: the absolute address of the function
+ * gp: either the data pointer (r27) for non-PIC code or the
+ * the PLT pointer (r19) for PIC code */
+
+/* Format for the Elf32 Function descriptor */
+typedef struct elf32_fdesc {
+ __u32 addr;
+ __u32 gp;
+} Elf32_Fdesc;
+
+/* Format for the Elf64 Function descriptor */
+typedef struct elf64_fdesc {
+ __u64 dummy[2]; /* FIXME: nothing uses these, why waste
+ * the space */
+ __u64 addr;
+ __u64 gp;
+} Elf64_Fdesc;
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_64BIT
+#define Elf_Fdesc Elf64_Fdesc
+#else
+#define Elf_Fdesc Elf32_Fdesc
+#endif /*CONFIG_64BIT*/
+
+#endif /*__KERNEL__*/
+
+/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */
+
+#define PT_HP_TLS (PT_LOOS + 0x0)
+#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
+#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
+#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
+#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
+#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
+#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
+#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
+#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
+#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
+#define PT_HP_PARALLEL (PT_LOOS + 0x10)
+#define PT_HP_FASTBIND (PT_LOOS + 0x11)
+#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
+#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
+#define PT_HP_STACK (PT_LOOS + 0x14)
+
+#define PT_PARISC_ARCHEXT 0x70000000
+#define PT_PARISC_UNWIND 0x70000001
+
+/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */
+
+#define PF_PARISC_SBP 0x08000000
+
+#define PF_HP_PAGE_SIZE 0x00100000
+#define PF_HP_FAR_SHARED 0x00200000
+#define PF_HP_NEAR_SHARED 0x00400000
+#define PF_HP_CODE 0x01000000
+#define PF_HP_MODIFY 0x02000000
+#define PF_HP_LAZYSWAP 0x04000000
+#define PF_HP_SBP 0x08000000
+
+/*
+ * The following definitions are those for 32-bit ELF binaries on a 32-bit
+ * kernel and for 64-bit binaries on a 64-bit kernel. To run 32-bit binaries
+ * on a 64-bit kernel, arch/parisc/kernel/binfmt_elf32.c defines these
+ * macros appropriately and then #includes binfmt_elf.c, which then includes
+ * this file.
+ */
+#ifndef ELF_CLASS
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ *
+ * Note that this header file is used by default in fs/binfmt_elf.c. So
+ * the following macros are for the default case. However, for the 64
+ * bit kernel we also support 32 bit parisc binaries. To do that
+ * arch/parisc/kernel/binfmt_elf32.c defines its own set of these
+ * macros, and then it includes fs/binfmt_elf.c to provide an alternate
+ * elf binary handler for 32 bit binaries (on the 64 bit kernel).
+ */
+#ifdef CONFIG_64BIT
+#define ELF_CLASS ELFCLASS64
+#else
+#define ELF_CLASS ELFCLASS32
+#endif
+
+typedef unsigned long elf_greg_t;
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+
+#define ELF_PLATFORM ("PARISC\0")
+
+#define SET_PERSONALITY(ex) \
+ current->personality = PER_LINUX; \
+ current->thread.map_base = DEFAULT_MAP_BASE; \
+ current->thread.task_size = DEFAULT_TASK_SIZE \
+
+/*
+ * Fill in general registers in a core dump. This saves pretty
+ * much the same registers as hp-ux, although in a different order.
+ * Registers marked # below are not currently saved in pt_regs, so
+ * we use their current values here.
+ *
+ * gr0..gr31
+ * sr0..sr7
+ * iaoq0..iaoq1
+ * iasq0..iasq1
+ * cr11 (sar)
+ * cr19 (iir)
+ * cr20 (isr)
+ * cr21 (ior)
+ * # cr22 (ipsw)
+ * # cr0 (recovery counter)
+ * # cr24..cr31 (temporary registers)
+ * # cr8,9,12,13 (protection IDs)
+ * # cr10 (scr/ccr)
+ * # cr15 (ext int enable mask)
+ *
+ */
+
+#define ELF_CORE_COPY_REGS(dst, pt) \
+ memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \
+ memcpy(dst + 0, pt->gr, 32 * sizeof(elf_greg_t)); \
+ memcpy(dst + 32, pt->sr, 8 * sizeof(elf_greg_t)); \
+ memcpy(dst + 40, pt->iaoq, 2 * sizeof(elf_greg_t)); \
+ memcpy(dst + 42, pt->iasq, 2 * sizeof(elf_greg_t)); \
+ dst[44] = pt->sar; dst[45] = pt->iir; \
+ dst[46] = pt->isr; dst[47] = pt->ior; \
+ dst[48] = mfctl(22); dst[49] = mfctl(0); \
+ dst[50] = mfctl(24); dst[51] = mfctl(25); \
+ dst[52] = mfctl(26); dst[53] = mfctl(27); \
+ dst[54] = mfctl(28); dst[55] = mfctl(29); \
+ dst[56] = mfctl(30); dst[57] = mfctl(31); \
+ dst[58] = mfctl( 8); dst[59] = mfctl( 9); \
+ dst[60] = mfctl(12); dst[61] = mfctl(13); \
+ dst[62] = mfctl(10); dst[63] = mfctl(15);
+
+#endif /* ! ELF_CLASS */
+
+#define ELF_NGREG 80 /* We only need 64 at present, but leave space
+ for expansion. */
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+#define ELF_NFPREG 32
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+struct task_struct;
+
+extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
+#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
+
+struct pt_regs; /* forward declaration... */
+
+
+#define elf_check_arch(x) ((x)->e_machine == EM_PARISC && (x)->e_ident[EI_CLASS] == ELF_CLASS)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_DATA ELFDATA2MSB
+#define ELF_ARCH EM_PARISC
+#define ELF_OSABI ELFOSABI_LINUX
+
+/* %r23 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a means for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+ been loaded before the code runs.
+
+ So that we can use the same startup file with static executables,
+ we start programs with a value of 0 to indicate that there is no
+ such function. */
+#define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0
+
+#define ELF_EXEC_PAGESIZE 4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk.
+
+ (2 * TASK_SIZE / 3) turns into something undefined when run through a
+ 32 bit preprocessor and in some cases results in the kernel trying to map
+ ld.so to the kernel virtual base. Use a sane value instead. /Jes
+ */
+
+#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+
+#define ELF_HWCAP 0
+
+#endif
diff --git a/arch/parisc/include/asm/emergency-restart.h b/arch/parisc/include/asm/emergency-restart.h
new file mode 100644
index 00000000..108d8c48
--- /dev/null
+++ b/arch/parisc/include/asm/emergency-restart.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_EMERGENCY_RESTART_H
+#define _ASM_EMERGENCY_RESTART_H
+
+#include <asm-generic/emergency-restart.h>
+
+#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/parisc/include/asm/errno.h b/arch/parisc/include/asm/errno.h
new file mode 100644
index 00000000..135ad604
--- /dev/null
+++ b/arch/parisc/include/asm/errno.h
@@ -0,0 +1,127 @@
+#ifndef _PARISC_ERRNO_H
+#define _PARISC_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#define ENOMSG 35 /* No message of desired type */
+#define EIDRM 36 /* Identifier removed */
+#define ECHRNG 37 /* Channel number out of range */
+#define EL2NSYNC 38 /* Level 2 not synchronized */
+#define EL3HLT 39 /* Level 3 halted */
+#define EL3RST 40 /* Level 3 reset */
+#define ELNRNG 41 /* Link number out of range */
+#define EUNATCH 42 /* Protocol driver not attached */
+#define ENOCSI 43 /* No CSI structure available */
+#define EL2HLT 44 /* Level 2 halted */
+#define EDEADLK 45 /* Resource deadlock would occur */
+#define EDEADLOCK EDEADLK
+#define ENOLCK 46 /* No record locks available */
+#define EILSEQ 47 /* Illegal byte sequence */
+
+#define ENONET 50 /* Machine is not on the network */
+#define ENODATA 51 /* No data available */
+#define ETIME 52 /* Timer expired */
+#define ENOSR 53 /* Out of streams resources */
+#define ENOSTR 54 /* Device not a stream */
+#define ENOPKG 55 /* Package not installed */
+
+#define ENOLINK 57 /* Link has been severed */
+#define EADV 58 /* Advertise error */
+#define ESRMNT 59 /* Srmount error */
+#define ECOMM 60 /* Communication error on send */
+#define EPROTO 61 /* Protocol error */
+
+#define EMULTIHOP 64 /* Multihop attempted */
+
+#define EDOTDOT 66 /* RFS specific error */
+#define EBADMSG 67 /* Not a data message */
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Quota exceeded */
+#define ESTALE 70 /* Stale NFS file handle */
+#define EREMOTE 71 /* Object is remote */
+#define EOVERFLOW 72 /* Value too large for defined data type */
+
+/* these errnos are defined by Linux but not HPUX. */
+
+#define EBADE 160 /* Invalid exchange */
+#define EBADR 161 /* Invalid request descriptor */
+#define EXFULL 162 /* Exchange full */
+#define ENOANO 163 /* No anode */
+#define EBADRQC 164 /* Invalid request code */
+#define EBADSLT 165 /* Invalid slot */
+#define EBFONT 166 /* Bad font file format */
+#define ENOTUNIQ 167 /* Name not unique on network */
+#define EBADFD 168 /* File descriptor in bad state */
+#define EREMCHG 169 /* Remote address changed */
+#define ELIBACC 170 /* Can not access a needed shared library */
+#define ELIBBAD 171 /* Accessing a corrupted shared library */
+#define ELIBSCN 172 /* .lib section in a.out corrupted */
+#define ELIBMAX 173 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 174 /* Cannot exec a shared library directly */
+#define ERESTART 175 /* Interrupted system call should be restarted */
+#define ESTRPIPE 176 /* Streams pipe error */
+#define EUCLEAN 177 /* Structure needs cleaning */
+#define ENOTNAM 178 /* Not a XENIX named type file */
+#define ENAVAIL 179 /* No XENIX semaphores available */
+#define EISNAM 180 /* Is a named type file */
+#define EREMOTEIO 181 /* Remote I/O error */
+#define ENOMEDIUM 182 /* No medium found */
+#define EMEDIUMTYPE 183 /* Wrong medium type */
+#define ENOKEY 184 /* Required key not available */
+#define EKEYEXPIRED 185 /* Key has expired */
+#define EKEYREVOKED 186 /* Key has been revoked */
+#define EKEYREJECTED 187 /* Key was rejected by service */
+
+/* We now return you to your regularly scheduled HPUX. */
+
+#define ENOSYM 215 /* symbol does not exist in executable */
+#define ENOTSOCK 216 /* Socket operation on non-socket */
+#define EDESTADDRREQ 217 /* Destination address required */
+#define EMSGSIZE 218 /* Message too long */
+#define EPROTOTYPE 219 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 220 /* Protocol not available */
+#define EPROTONOSUPPORT 221 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 222 /* Socket type not supported */
+#define EOPNOTSUPP 223 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 224 /* Protocol family not supported */
+#define EAFNOSUPPORT 225 /* Address family not supported by protocol */
+#define EADDRINUSE 226 /* Address already in use */
+#define EADDRNOTAVAIL 227 /* Cannot assign requested address */
+#define ENETDOWN 228 /* Network is down */
+#define ENETUNREACH 229 /* Network is unreachable */
+#define ENETRESET 230 /* Network dropped connection because of reset */
+#define ECONNABORTED 231 /* Software caused connection abort */
+#define ECONNRESET 232 /* Connection reset by peer */
+#define ENOBUFS 233 /* No buffer space available */
+#define EISCONN 234 /* Transport endpoint is already connected */
+#define ENOTCONN 235 /* Transport endpoint is not connected */
+#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 237 /* Too many references: cannot splice */
+#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+#define ETIMEDOUT 238 /* Connection timed out */
+#define ECONNREFUSED 239 /* Connection refused */
+#define EREMOTERELEASE 240 /* Remote peer released connection */
+#define EHOSTDOWN 241 /* Host is down */
+#define EHOSTUNREACH 242 /* No route to host */
+
+#define EALREADY 244 /* Operation already in progress */
+#define EINPROGRESS 245 /* Operation now in progress */
+#define EWOULDBLOCK 246 /* Operation would block (Linux returns EAGAIN) */
+#define ENOTEMPTY 247 /* Directory not empty */
+#define ENAMETOOLONG 248 /* File name too long */
+#define ELOOP 249 /* Too many symbolic links encountered */
+#define ENOSYS 251 /* Function not implemented */
+
+#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
+#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
+#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */
+
+/* for robust mutexes */
+#define EOWNERDEAD 254 /* Owner died */
+#define ENOTRECOVERABLE 255 /* State not recoverable */
+
+#define ERFKILL 256 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 257 /* Memory page has hardware error */
+
+#endif
diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h
new file mode 100644
index 00000000..4d503a02
--- /dev/null
+++ b/arch/parisc/include/asm/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/parisc/include/asm/fcntl.h b/arch/parisc/include/asm/fcntl.h
new file mode 100644
index 00000000..0304b92c
--- /dev/null
+++ b/arch/parisc/include/asm/fcntl.h
@@ -0,0 +1,40 @@
+#ifndef _PARISC_FCNTL_H
+#define _PARISC_FCNTL_H
+
+#define O_APPEND 000000010
+#define O_BLKSEEK 000000100 /* HPUX only */
+#define O_CREAT 000000400 /* not fcntl */
+#define O_EXCL 000002000 /* not fcntl */
+#define O_LARGEFILE 000004000
+#define __O_SYNC 000100000
+#define O_SYNC (__O_SYNC|O_DSYNC)
+#define O_NONBLOCK 000200004 /* HPUX has separate NDELAY & NONBLOCK */
+#define O_NOCTTY 000400000 /* not fcntl */
+#define O_DSYNC 001000000 /* HPUX only */
+#define O_RSYNC 002000000 /* HPUX only */
+#define O_NOATIME 004000000
+#define O_CLOEXEC 010000000 /* set close_on_exec */
+
+#define O_DIRECTORY 000010000 /* must be a directory */
+#define O_NOFOLLOW 000000200 /* don't follow links */
+#define O_INVISIBLE 004000000 /* invisible I/O, for DMAPI/XDSM */
+
+#define O_PATH 020000000
+
+#define F_GETLK64 8
+#define F_SETLK64 9
+#define F_SETLKW64 10
+
+#define F_GETOWN 11 /* for sockets. */
+#define F_SETOWN 12 /* for sockets. */
+#define F_SETSIG 13 /* for sockets. */
+#define F_GETSIG 14 /* for sockets. */
+
+/* for posix fcntl() and lockf() */
+#define F_RDLCK 01
+#define F_WRLCK 02
+#define F_UNLCK 03
+
+#include <asm-generic/fcntl.h>
+
+#endif
diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h
new file mode 100644
index 00000000..6fec4d4a
--- /dev/null
+++ b/arch/parisc/include/asm/fixmap.h
@@ -0,0 +1,30 @@
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+/*
+ * This file defines the locations of the fixed mappings on parisc.
+ *
+ * All of the values in this file are machine virtual addresses.
+ *
+ * All of the values in this file must be <4GB (because of assembly
+ * loading restrictions). If you place this region anywhere above
+ * __PAGE_OFFSET, you must adjust the memory map accordingly */
+
+/* The alias region is used in kernel space to do copy/clear to or
+ * from areas congruently mapped with user space. It is 8MB large
+ * and must be 16MB aligned */
+#define TMPALIAS_MAP_START ((__PAGE_OFFSET) - 16*1024*1024)
+/* This is the kernel area for all maps (vmalloc, dma etc.) most
+ * usually, it extends up to TMPALIAS_MAP_START. Virtual addresses
+ * 0..GATEWAY_PAGE_SIZE are reserved for the gateway page */
+#define KERNEL_MAP_START (GATEWAY_PAGE_SIZE)
+#define KERNEL_MAP_END (TMPALIAS_MAP_START)
+
+#ifndef __ASSEMBLY__
+extern void *parisc_vmalloc_start;
+#define PCXL_DMA_MAP_SIZE (8*1024*1024)
+#define VMALLOC_START ((unsigned long)parisc_vmalloc_start)
+#define VMALLOC_END (KERNEL_MAP_END)
+#endif /*__ASSEMBLY__*/
+
+#endif /*_ASM_FIXMAP_H*/
diff --git a/arch/parisc/include/asm/floppy.h b/arch/parisc/include/asm/floppy.h
new file mode 100644
index 00000000..4ca69f55
--- /dev/null
+++ b/arch/parisc/include/asm/floppy.h
@@ -0,0 +1,271 @@
+/* Architecture specific parts of the Floppy driver
+ *
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ * Copyright (C) 2000 Matthew Wilcox (willy a debian . org)
+ * Copyright (C) 2000 Dave Kennedy
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_PARISC_FLOPPY_H
+#define __ASM_PARISC_FLOPPY_H
+
+#include <linux/vmalloc.h>
+
+
+/*
+ * The DMA channel used by the floppy controller cannot access data at
+ * addresses >= 16MB
+ *
+ * Went back to the 1MB limit, as some people had problems with the floppy
+ * driver otherwise. It doesn't matter much for performance anyway, as most
+ * floppy accesses go through the track buffer.
+ */
+#define _CROSS_64KB(a,s,vdma) \
+(!vdma && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
+
+#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
+
+
+#define SW fd_routine[use_virtual_dma&1]
+#define CSW fd_routine[can_use_virtual_dma & 1]
+
+
+#define fd_inb(port) readb(port)
+#define fd_outb(value, port) writeb(value, port)
+
+#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
+#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
+#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
+#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
+#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
+#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
+
+#define FLOPPY_CAN_FALLBACK_ON_NODMA
+
+static int virtual_dma_count=0;
+static int virtual_dma_residue=0;
+static char *virtual_dma_addr=0;
+static int virtual_dma_mode=0;
+static int doing_pdma=0;
+
+static void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
+{
+ register unsigned char st;
+
+#undef TRACE_FLPY_INT
+
+#ifdef TRACE_FLPY_INT
+ static int calls=0;
+ static int bytes=0;
+ static int dma_wait=0;
+#endif
+ if (!doing_pdma) {
+ floppy_interrupt(irq, dev_id, regs);
+ return;
+ }
+
+#ifdef TRACE_FLPY_INT
+ if(!calls)
+ bytes = virtual_dma_count;
+#endif
+
+ {
+ register int lcount;
+ register char *lptr = virtual_dma_addr;
+
+ for (lcount = virtual_dma_count; lcount; lcount--) {
+ st = fd_inb(virtual_dma_port+4) & 0xa0 ;
+ if (st != 0xa0)
+ break;
+ if (virtual_dma_mode) {
+ fd_outb(*lptr, virtual_dma_port+5);
+ } else {
+ *lptr = fd_inb(virtual_dma_port+5);
+ }
+ lptr++;
+ }
+ virtual_dma_count = lcount;
+ virtual_dma_addr = lptr;
+ st = fd_inb(virtual_dma_port+4);
+ }
+
+#ifdef TRACE_FLPY_INT
+ calls++;
+#endif
+ if (st == 0x20)
+ return;
+ if (!(st & 0x20)) {
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count = 0;
+#ifdef TRACE_FLPY_INT
+ printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
+ virtual_dma_count, virtual_dma_residue, calls, bytes,
+ dma_wait);
+ calls = 0;
+ dma_wait=0;
+#endif
+ doing_pdma = 0;
+ floppy_interrupt(irq, dev_id, regs);
+ return;
+ }
+#ifdef TRACE_FLPY_INT
+ if (!virtual_dma_count)
+ dma_wait++;
+#endif
+}
+
+static void fd_disable_dma(void)
+{
+ if(! (can_use_virtual_dma & 1))
+ disable_dma(FLOPPY_DMA);
+ doing_pdma = 0;
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+}
+
+static int vdma_request_dma(unsigned int dmanr, const char * device_id)
+{
+ return 0;
+}
+
+static void vdma_nop(unsigned int dummy)
+{
+}
+
+
+static int vdma_get_dma_residue(unsigned int dummy)
+{
+ return virtual_dma_count + virtual_dma_residue;
+}
+
+
+static int fd_request_irq(void)
+{
+ if(can_use_virtual_dma)
+ return request_irq(FLOPPY_IRQ, floppy_hardint,
+ IRQF_DISABLED, "floppy", NULL);
+ else
+ return request_irq(FLOPPY_IRQ, floppy_interrupt,
+ IRQF_DISABLED, "floppy", NULL);
+}
+
+static unsigned long dma_mem_alloc(unsigned long size)
+{
+ return __get_dma_pages(GFP_KERNEL, get_order(size));
+}
+
+
+static unsigned long vdma_mem_alloc(unsigned long size)
+{
+ return (unsigned long) vmalloc(size);
+
+}
+
+#define nodma_mem_alloc(size) vdma_mem_alloc(size)
+
+static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
+{
+ if((unsigned int) addr >= (unsigned int) high_memory)
+ return vfree((void *)addr);
+ else
+ free_pages(addr, get_order(size));
+}
+
+#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
+
+static void _fd_chose_dma_mode(char *addr, unsigned long size)
+{
+ if(can_use_virtual_dma == 2) {
+ if((unsigned int) addr >= (unsigned int) high_memory ||
+ virt_to_bus(addr) >= 0x1000000 ||
+ _CROSS_64KB(addr, size, 0))
+ use_virtual_dma = 1;
+ else
+ use_virtual_dma = 0;
+ } else {
+ use_virtual_dma = can_use_virtual_dma & 1;
+ }
+}
+
+#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
+
+
+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+ doing_pdma = 1;
+ virtual_dma_port = io;
+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
+ virtual_dma_addr = addr;
+ virtual_dma_count = size;
+ virtual_dma_residue = 0;
+ return 0;
+}
+
+static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+#ifdef FLOPPY_SANITY_CHECK
+ if (CROSS_64KB(addr, size)) {
+ printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
+ return -1;
+ }
+#endif
+ /* actual, physical DMA */
+ doing_pdma = 0;
+ clear_dma_ff(FLOPPY_DMA);
+ set_dma_mode(FLOPPY_DMA,mode);
+ set_dma_addr(FLOPPY_DMA,virt_to_bus(addr));
+ set_dma_count(FLOPPY_DMA,size);
+ enable_dma(FLOPPY_DMA);
+ return 0;
+}
+
+static struct fd_routine_l {
+ int (*_request_dma)(unsigned int dmanr, const char * device_id);
+ void (*_free_dma)(unsigned int dmanr);
+ int (*_get_dma_residue)(unsigned int dummy);
+ unsigned long (*_dma_mem_alloc) (unsigned long size);
+ int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
+} fd_routine[] = {
+ {
+ request_dma,
+ free_dma,
+ get_dma_residue,
+ dma_mem_alloc,
+ hard_dma_setup
+ },
+ {
+ vdma_request_dma,
+ vdma_nop,
+ vdma_get_dma_residue,
+ vdma_mem_alloc,
+ vdma_dma_setup
+ }
+};
+
+
+static int FDC1 = 0x3f0; /* Lies. Floppy controller is memory mapped, not io mapped */
+static int FDC2 = -1;
+
+#define FLOPPY0_TYPE 0
+#define FLOPPY1_TYPE 0
+
+#define N_FDC 1
+#define N_DRIVE 8
+
+#define EXTRA_FLOPPY_PARAMS
+
+#endif /* __ASM_PARISC_FLOPPY_H */
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
new file mode 100644
index 00000000..72c0fafa
--- /dev/null
+++ b/arch/parisc/include/asm/ftrace.h
@@ -0,0 +1,39 @@
+#ifndef _ASM_PARISC_FTRACE_H
+#define _ASM_PARISC_FTRACE_H
+
+#ifndef __ASSEMBLY__
+extern void mcount(void);
+
+/*
+ * Stack of return addresses for functions of a thread.
+ * Used in struct thread_info
+ */
+struct ftrace_ret_stack {
+ unsigned long ret;
+ unsigned long func;
+ unsigned long long calltime;
+};
+
+/*
+ * Primary handler of a function return.
+ * It relays on ftrace_return_to_handler.
+ * Defined in entry.S
+ */
+extern void return_to_handler(void);
+
+
+extern unsigned long return_address(unsigned int);
+
+#define HAVE_ARCH_CALLER_ADDR
+
+#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+#define CALLER_ADDR1 return_address(1)
+#define CALLER_ADDR2 return_address(2)
+#define CALLER_ADDR3 return_address(3)
+#define CALLER_ADDR4 return_address(4)
+#define CALLER_ADDR5 return_address(5)
+#define CALLER_ADDR6 return_address(6)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PARISC_FTRACE_H */
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
new file mode 100644
index 00000000..2388bdb3
--- /dev/null
+++ b/arch/parisc/include/asm/futex.h
@@ -0,0 +1,131 @@
+#ifndef _ASM_PARISC_FUTEX_H
+#define _ASM_PARISC_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/atomic.h>
+#include <asm/errno.h>
+
+static inline int
+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+{
+ unsigned long int flags;
+ u32 val;
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ _atomic_spin_lock_irqsave(uaddr, flags);
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ /* *(int *)UADDR2 = OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret)
+ ret = put_user(oparg, uaddr);
+ break;
+ case FUTEX_OP_ADD:
+ /* *(int *)UADDR2 += OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval + oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
+ case FUTEX_OP_OR:
+ /* *(int *)UADDR2 |= OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval | oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
+ case FUTEX_OP_ANDN:
+ /* *(int *)UADDR2 &= ~OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval & ~oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
+ case FUTEX_OP_XOR:
+ /* *(int *)UADDR2 ^= OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval ^ oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ _atomic_spin_unlock_irqrestore(uaddr, flags);
+
+ pagefault_enable();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+/* Non-atomic version */
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret;
+ u32 val;
+ unsigned long flags;
+
+ /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
+ * our gateway page, and causes no end of trouble...
+ */
+ if (segment_eq(KERNEL_DS, get_fs()) && !uaddr)
+ return -EFAULT;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ /* HPPA has no cmpxchg in hardware and therefore the
+ * best we can do here is use an array of locks. The
+ * lock selected is based on a hash of the userspace
+ * address. This should scale to a couple of CPUs.
+ */
+
+ _atomic_spin_lock_irqsave(uaddr, flags);
+
+ ret = get_user(val, uaddr);
+
+ if (!ret && val == oldval)
+ ret = put_user(newval, uaddr);
+
+ *uval = val;
+
+ _atomic_spin_unlock_irqrestore(uaddr, flags);
+
+ return ret;
+}
+
+#endif /*__KERNEL__*/
+#endif /*_ASM_PARISC_FUTEX_H*/
diff --git a/arch/parisc/include/asm/grfioctl.h b/arch/parisc/include/asm/grfioctl.h
new file mode 100644
index 00000000..671e0604
--- /dev/null
+++ b/arch/parisc/include/asm/grfioctl.h
@@ -0,0 +1,113 @@
+/* Architecture specific parts of HP's STI (framebuffer) driver.
+ * Structures are HP-UX compatible for XFree86 usage.
+ *
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ * Copyright (C) 2001 Helge Deller (deller a parisc-linux org)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_PARISC_GRFIOCTL_H
+#define __ASM_PARISC_GRFIOCTL_H
+
+/* upper 32 bits of graphics id (HP/UX identifier) */
+
+#define GRFGATOR 8
+#define S9000_ID_S300 9
+#define GRFBOBCAT 9
+#define GRFCATSEYE 9
+#define S9000_ID_98720 10
+#define GRFRBOX 10
+#define S9000_ID_98550 11
+#define GRFFIREEYE 11
+#define S9000_ID_A1096A 12
+#define GRFHYPERION 12
+#define S9000_ID_FRI 13
+#define S9000_ID_98730 14
+#define GRFDAVINCI 14
+#define S9000_ID_98705 0x26C08070 /* Tigershark */
+#define S9000_ID_98736 0x26D148AB
+#define S9000_ID_A1659A 0x26D1482A /* CRX 8 plane color (=ELK) */
+#define S9000_ID_ELK S9000_ID_A1659A
+#define S9000_ID_A1439A 0x26D148EE /* CRX24 = CRX+ (24-plane color) */
+#define S9000_ID_A1924A 0x26D1488C /* GRX gray-scale */
+#define S9000_ID_ELM S9000_ID_A1924A
+#define S9000_ID_98765 0x27480DEF
+#define S9000_ID_ELK_768 0x27482101
+#define S9000_ID_STINGER 0x27A4A402
+#define S9000_ID_TIMBER 0x27F12392 /* Bushmaster (710) Graphics */
+#define S9000_ID_TOMCAT 0x27FCCB6D /* dual-headed ELK (Dual CRX) */
+#define S9000_ID_ARTIST 0x2B4DED6D /* Artist (Gecko/712 & 715) onboard Graphics */
+#define S9000_ID_HCRX 0x2BCB015A /* Hyperdrive/Hyperbowl (A4071A) Graphics */
+#define CRX24_OVERLAY_PLANES 0x920825AA /* Overlay planes on CRX24 */
+
+#define CRT_ID_ELK_1024 S9000_ID_ELK_768 /* Elk 1024x768 CRX */
+#define CRT_ID_ELK_1280 S9000_ID_A1659A /* Elk 1280x1024 CRX */
+#define CRT_ID_ELK_1024DB 0x27849CA5 /* Elk 1024x768 double buffer */
+#define CRT_ID_ELK_GS S9000_ID_A1924A /* Elk 1280x1024 GreyScale */
+#define CRT_ID_CRX24 S9000_ID_A1439A /* Piranha */
+#define CRT_ID_VISUALIZE_EG 0x2D08C0A7 /* Graffiti, A4450A (built-in B132+/B160L) */
+#define CRT_ID_THUNDER 0x2F23E5FC /* Thunder 1 VISUALIZE 48*/
+#define CRT_ID_THUNDER2 0x2F8D570E /* Thunder 2 VISUALIZE 48 XP*/
+#define CRT_ID_HCRX S9000_ID_HCRX /* Hyperdrive HCRX */
+#define CRT_ID_CRX48Z S9000_ID_STINGER /* Stinger */
+#define CRT_ID_DUAL_CRX S9000_ID_TOMCAT /* Tomcat */
+#define CRT_ID_PVRX S9000_ID_98705 /* Tigershark */
+#define CRT_ID_TIMBER S9000_ID_TIMBER /* Timber (710 builtin) */
+#define CRT_ID_TVRX S9000_ID_98765 /* TVRX (gto/falcon) */
+#define CRT_ID_ARTIST S9000_ID_ARTIST /* Artist */
+#define CRT_ID_SUMMIT 0x2FC1066B /* Summit FX2, FX4, FX6 ... */
+#define CRT_ID_LEGO 0x35ACDA30 /* Lego FX5, FX10 ... */
+#define CRT_ID_PINNACLE 0x35ACDA16 /* Pinnacle FXe */
+
+/* structure for ioctl(GCDESCRIBE) */
+
+#define gaddr_t unsigned long /* FIXME: PA2.0 (64bit) portable ? */
+
+struct grf_fbinfo {
+ unsigned int id; /* upper 32 bits of graphics id */
+ unsigned int mapsize; /* mapped size of framebuffer */
+ unsigned int dwidth, dlength;/* x and y sizes */
+ unsigned int width, length; /* total x and total y size */
+ unsigned int xlen; /* x pitch size */
+ unsigned int bpp, bppu; /* bits per pixel and used bpp */
+ unsigned int npl, nplbytes; /* # of planes and bytes per plane */
+ char name[32]; /* name of the device (from ROM) */
+ unsigned int attr; /* attributes */
+ gaddr_t fbbase, regbase;/* framebuffer and register base addr */
+ gaddr_t regions[6]; /* region bases */
+};
+
+#define GCID _IOR('G', 0, int)
+#define GCON _IO('G', 1)
+#define GCOFF _IO('G', 2)
+#define GCAON _IO('G', 3)
+#define GCAOFF _IO('G', 4)
+#define GCMAP _IOWR('G', 5, int)
+#define GCUNMAP _IOWR('G', 6, int)
+#define GCMAP_HPUX _IO('G', 5)
+#define GCUNMAP_HPUX _IO('G', 6)
+#define GCLOCK _IO('G', 7)
+#define GCUNLOCK _IO('G', 8)
+#define GCLOCK_MINIMUM _IO('G', 9)
+#define GCUNLOCK_MINIMUM _IO('G', 10)
+#define GCSTATIC_CMAP _IO('G', 11)
+#define GCVARIABLE_CMAP _IO('G', 12)
+#define GCTERM _IOWR('G',20,int) /* multi-headed Tomcat */
+#define GCDESCRIBE _IOR('G', 21, struct grf_fbinfo)
+#define GCFASTLOCK _IO('G', 26)
+
+#endif /* __ASM_PARISC_GRFIOCTL_H */
+
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
new file mode 100644
index 00000000..0d68184a
--- /dev/null
+++ b/arch/parisc/include/asm/hardirq.h
@@ -0,0 +1,11 @@
+/* hardirq.h: PA-RISC hard IRQ support.
+ *
+ * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
+ */
+
+#ifndef _PARISC_HARDIRQ_H
+#define _PARISC_HARDIRQ_H
+
+#include <asm-generic/hardirq.h>
+
+#endif /* _PARISC_HARDIRQ_H */
diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h
new file mode 100644
index 00000000..4e962683
--- /dev/null
+++ b/arch/parisc/include/asm/hardware.h
@@ -0,0 +1,127 @@
+#ifndef _PARISC_HARDWARE_H
+#define _PARISC_HARDWARE_H
+
+#include <linux/mod_devicetable.h>
+#include <asm/pdc.h>
+
+#define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID
+#define HVERSION_ANY_ID PA_HVERSION_ANY_ID
+#define HVERSION_REV_ANY_ID PA_HVERSION_REV_ANY_ID
+#define SVERSION_ANY_ID PA_SVERSION_ANY_ID
+
+struct hp_hardware {
+ unsigned short hw_type:5; /* HPHW_xxx */
+ unsigned short hversion;
+ unsigned long sversion:28;
+ unsigned short opt;
+ const char name[80]; /* The hardware description */
+};
+
+struct parisc_device;
+
+enum cpu_type {
+ pcx = 0, /* pa7000 pa 1.0 */
+ pcxs = 1, /* pa7000 pa 1.1a */
+ pcxt = 2, /* pa7100 pa 1.1b */
+ pcxt_ = 3, /* pa7200 (t') pa 1.1c */
+ pcxl = 4, /* pa7100lc pa 1.1d */
+ pcxl2 = 5, /* pa7300lc pa 1.1e */
+ pcxu = 6, /* pa8000 pa 2.0 */
+ pcxu_ = 7, /* pa8200 (u+) pa 2.0 */
+ pcxw = 8, /* pa8500 pa 2.0 */
+ pcxw_ = 9, /* pa8600 (w+) pa 2.0 */
+ pcxw2 = 10, /* pa8700 pa 2.0 */
+ mako = 11, /* pa8800 pa 2.0 */
+ mako2 = 12 /* pa8900 pa 2.0 */
+};
+
+extern const char * const cpu_name_version[][2]; /* mapping from enum cpu_type to strings */
+
+struct parisc_driver;
+
+struct io_module {
+ volatile uint32_t nothing; /* reg 0 */
+ volatile uint32_t io_eim;
+ volatile uint32_t io_dc_adata;
+ volatile uint32_t io_ii_cdata;
+ volatile uint32_t io_dma_link; /* reg 4 */
+ volatile uint32_t io_dma_command;
+ volatile uint32_t io_dma_address;
+ volatile uint32_t io_dma_count;
+ volatile uint32_t io_flex; /* reg 8 */
+ volatile uint32_t io_spa_address;
+ volatile uint32_t reserved1[2];
+ volatile uint32_t io_command; /* reg 12 */
+ volatile uint32_t io_status;
+ volatile uint32_t io_control;
+ volatile uint32_t io_data;
+ volatile uint32_t reserved2; /* reg 16 */
+ volatile uint32_t chain_addr;
+ volatile uint32_t sub_mask_clr;
+ volatile uint32_t reserved3[13];
+ volatile uint32_t undefined[480];
+ volatile uint32_t unpriv[512];
+};
+
+struct bc_module {
+ volatile uint32_t unused1[12];
+ volatile uint32_t io_command;
+ volatile uint32_t io_status;
+ volatile uint32_t io_control;
+ volatile uint32_t unused2[1];
+ volatile uint32_t io_err_resp;
+ volatile uint32_t io_err_info;
+ volatile uint32_t io_err_req;
+ volatile uint32_t unused3[11];
+ volatile uint32_t io_io_low;
+ volatile uint32_t io_io_high;
+};
+
+#define HPHW_NPROC 0
+#define HPHW_MEMORY 1
+#define HPHW_B_DMA 2
+#define HPHW_OBSOLETE 3
+#define HPHW_A_DMA 4
+#define HPHW_A_DIRECT 5
+#define HPHW_OTHER 6
+#define HPHW_BCPORT 7
+#define HPHW_CIO 8
+#define HPHW_CONSOLE 9
+#define HPHW_FIO 10
+#define HPHW_BA 11
+#define HPHW_IOA 12
+#define HPHW_BRIDGE 13
+#define HPHW_FABRIC 14
+#define HPHW_MC 15
+#define HPHW_FAULTY 31
+
+
+/* hardware.c: */
+extern const char *parisc_hardware_description(struct parisc_device_id *id);
+extern enum cpu_type parisc_get_cpu_type(unsigned long hversion);
+
+struct pci_dev;
+
+/* drivers.c: */
+extern struct parisc_device *alloc_pa_dev(unsigned long hpa,
+ struct hardware_path *path);
+extern int register_parisc_device(struct parisc_device *dev);
+extern int register_parisc_driver(struct parisc_driver *driver);
+extern int count_parisc_driver(struct parisc_driver *driver);
+extern int unregister_parisc_driver(struct parisc_driver *driver);
+extern void walk_central_bus(void);
+extern const struct parisc_device *find_pa_parent_type(const struct parisc_device *, int);
+extern void print_parisc_devices(void);
+extern char *print_pa_hwpath(struct parisc_device *dev, char *path);
+extern char *print_pci_hwpath(struct pci_dev *dev, char *path);
+extern void get_pci_node_path(struct pci_dev *dev, struct hardware_path *path);
+extern void init_parisc_bus(void);
+extern struct device *hwpath_to_device(struct hardware_path *modpath);
+extern void device_to_hwpath(struct device *dev, struct hardware_path *path);
+
+
+/* inventory.c: */
+extern void do_memory_inventory(void);
+extern void do_device_inventory(void);
+
+#endif /* _PARISC_HARDWARE_H */
diff --git a/arch/parisc/include/asm/hw_irq.h b/arch/parisc/include/asm/hw_irq.h
new file mode 100644
index 00000000..6707f7df
--- /dev/null
+++ b/arch/parisc/include/asm/hw_irq.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_HW_IRQ_H
+#define _ASM_HW_IRQ_H
+
+/*
+ * linux/include/asm/hw_irq.h
+ */
+
+#endif
diff --git a/arch/parisc/include/asm/ide.h b/arch/parisc/include/asm/ide.h
new file mode 100644
index 00000000..81700a23
--- /dev/null
+++ b/arch/parisc/include/asm/ide.h
@@ -0,0 +1,57 @@
+/*
+ * linux/include/asm-parisc/ide.h
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ */
+
+/*
+ * This file contains the PARISC architecture specific IDE code.
+ */
+
+#ifndef __ASM_PARISC_IDE_H
+#define __ASM_PARISC_IDE_H
+
+#ifdef __KERNEL__
+
+/* Generic I/O and MEMIO string operations. */
+
+#define __ide_insw insw
+#define __ide_insl insl
+#define __ide_outsw outsw
+#define __ide_outsl outsl
+
+static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ *(u16 *)addr = __raw_readw(port);
+ addr += 2;
+ }
+}
+
+static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ *(u32 *)addr = __raw_readl(port);
+ addr += 4;
+ }
+}
+
+static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ __raw_writew(*(u16 *)addr, port);
+ addr += 2;
+ }
+}
+
+static __inline__ void __ide_mm_outsl(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ __raw_writel(*(u32 *)addr, port);
+ addr += 4;
+ }
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_PARISC_IDE_H */
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
new file mode 100644
index 00000000..1f6d2ae7
--- /dev/null
+++ b/arch/parisc/include/asm/io.h
@@ -0,0 +1,320 @@
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#include <linux/types.h>
+#include <asm/pgtable.h>
+
+#define virt_to_phys(a) ((unsigned long)__pa(a))
+#define phys_to_virt(a) __va(a)
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+static inline unsigned long isa_bus_to_virt(unsigned long addr) {
+ BUG();
+ return 0;
+}
+
+static inline unsigned long isa_virt_to_bus(void *addr) {
+ BUG();
+ return 0;
+}
+
+/*
+ * Memory mapped I/O
+ *
+ * readX()/writeX() do byteswapping and take an ioremapped address
+ * __raw_readX()/__raw_writeX() don't byteswap and take an ioremapped address.
+ * gsc_*() don't byteswap and operate on physical addresses;
+ * eg dev->hpa or 0xfee00000.
+ */
+
+static inline unsigned char gsc_readb(unsigned long addr)
+{
+ long flags;
+ unsigned char ret;
+
+ __asm__ __volatile__(
+ " rsm 2,%0\n"
+ " ldbx 0(%2),%1\n"
+ " mtsm %0\n"
+ : "=&r" (flags), "=r" (ret) : "r" (addr) );
+
+ return ret;
+}
+
+static inline unsigned short gsc_readw(unsigned long addr)
+{
+ long flags;
+ unsigned short ret;
+
+ __asm__ __volatile__(
+ " rsm 2,%0\n"
+ " ldhx 0(%2),%1\n"
+ " mtsm %0\n"
+ : "=&r" (flags), "=r" (ret) : "r" (addr) );
+
+ return ret;
+}
+
+static inline unsigned int gsc_readl(unsigned long addr)
+{
+ u32 ret;
+
+ __asm__ __volatile__(
+ " ldwax 0(%1),%0\n"
+ : "=r" (ret) : "r" (addr) );
+
+ return ret;
+}
+
+static inline unsigned long long gsc_readq(unsigned long addr)
+{
+ unsigned long long ret;
+
+#ifdef CONFIG_64BIT
+ __asm__ __volatile__(
+ " ldda 0(%1),%0\n"
+ : "=r" (ret) : "r" (addr) );
+#else
+ /* two reads may have side effects.. */
+ ret = ((u64) gsc_readl(addr)) << 32;
+ ret |= gsc_readl(addr+4);
+#endif
+ return ret;
+}
+
+static inline void gsc_writeb(unsigned char val, unsigned long addr)
+{
+ long flags;
+ __asm__ __volatile__(
+ " rsm 2,%0\n"
+ " stbs %1,0(%2)\n"
+ " mtsm %0\n"
+ : "=&r" (flags) : "r" (val), "r" (addr) );
+}
+
+static inline void gsc_writew(unsigned short val, unsigned long addr)
+{
+ long flags;
+ __asm__ __volatile__(
+ " rsm 2,%0\n"
+ " sths %1,0(%2)\n"
+ " mtsm %0\n"
+ : "=&r" (flags) : "r" (val), "r" (addr) );
+}
+
+static inline void gsc_writel(unsigned int val, unsigned long addr)
+{
+ __asm__ __volatile__(
+ " stwas %0,0(%1)\n"
+ : : "r" (val), "r" (addr) );
+}
+
+static inline void gsc_writeq(unsigned long long val, unsigned long addr)
+{
+#ifdef CONFIG_64BIT
+ __asm__ __volatile__(
+ " stda %0,0(%1)\n"
+ : : "r" (val), "r" (addr) );
+#else
+ /* two writes may have side effects.. */
+ gsc_writel(val >> 32, addr);
+ gsc_writel(val, addr+4);
+#endif
+}
+
+/*
+ * The standard PCI ioremap interfaces
+ */
+
+extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+
+/* Most machines react poorly to I/O-space being cacheable... Instead let's
+ * define ioremap() in terms of ioremap_nocache().
+ */
+static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, _PAGE_NO_CACHE);
+}
+#define ioremap_nocache(off, sz) ioremap((off), (sz))
+
+extern void iounmap(const volatile void __iomem *addr);
+
+static inline unsigned char __raw_readb(const volatile void __iomem *addr)
+{
+ return (*(volatile unsigned char __force *) (addr));
+}
+static inline unsigned short __raw_readw(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned short __force *) addr;
+}
+static inline unsigned int __raw_readl(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned int __force *) addr;
+}
+static inline unsigned long long __raw_readq(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned long long __force *) addr;
+}
+
+static inline void __raw_writeb(unsigned char b, volatile void __iomem *addr)
+{
+ *(volatile unsigned char __force *) addr = b;
+}
+static inline void __raw_writew(unsigned short b, volatile void __iomem *addr)
+{
+ *(volatile unsigned short __force *) addr = b;
+}
+static inline void __raw_writel(unsigned int b, volatile void __iomem *addr)
+{
+ *(volatile unsigned int __force *) addr = b;
+}
+static inline void __raw_writeq(unsigned long long b, volatile void __iomem *addr)
+{
+ *(volatile unsigned long long __force *) addr = b;
+}
+
+static inline unsigned char readb(const volatile void __iomem *addr)
+{
+ return __raw_readb(addr);
+}
+static inline unsigned short readw(const volatile void __iomem *addr)
+{
+ return le16_to_cpu(__raw_readw(addr));
+}
+static inline unsigned int readl(const volatile void __iomem *addr)
+{
+ return le32_to_cpu(__raw_readl(addr));
+}
+static inline unsigned long long readq(const volatile void __iomem *addr)
+{
+ return le64_to_cpu(__raw_readq(addr));
+}
+
+static inline void writeb(unsigned char b, volatile void __iomem *addr)
+{
+ __raw_writeb(b, addr);
+}
+static inline void writew(unsigned short w, volatile void __iomem *addr)
+{
+ __raw_writew(cpu_to_le16(w), addr);
+}
+static inline void writel(unsigned int l, volatile void __iomem *addr)
+{
+ __raw_writel(cpu_to_le32(l), addr);
+}
+static inline void writeq(unsigned long long q, volatile void __iomem *addr)
+{
+ __raw_writeq(cpu_to_le64(q), addr);
+}
+
+#define readb readb
+#define readw readw
+#define readl readl
+#define readq readq
+#define writeb writeb
+#define writew writew
+#define writel writel
+#define writeq writeq
+
+#define readb_relaxed(addr) readb(addr)
+#define readw_relaxed(addr) readw(addr)
+#define readl_relaxed(addr) readl(addr)
+#define readq_relaxed(addr) readq(addr)
+
+#define mmiowb() do { } while (0)
+
+void memset_io(volatile void __iomem *addr, unsigned char val, int count);
+void memcpy_fromio(void *dst, const volatile void __iomem *src, int count);
+void memcpy_toio(volatile void __iomem *dst, const void *src, int count);
+
+/* Port-space IO */
+
+#define inb_p inb
+#define inw_p inw
+#define inl_p inl
+#define outb_p outb
+#define outw_p outw
+#define outl_p outl
+
+extern unsigned char eisa_in8(unsigned short port);
+extern unsigned short eisa_in16(unsigned short port);
+extern unsigned int eisa_in32(unsigned short port);
+extern void eisa_out8(unsigned char data, unsigned short port);
+extern void eisa_out16(unsigned short data, unsigned short port);
+extern void eisa_out32(unsigned int data, unsigned short port);
+
+#if defined(CONFIG_PCI)
+extern unsigned char inb(int addr);
+extern unsigned short inw(int addr);
+extern unsigned int inl(int addr);
+
+extern void outb(unsigned char b, int addr);
+extern void outw(unsigned short b, int addr);
+extern void outl(unsigned int b, int addr);
+#elif defined(CONFIG_EISA)
+#define inb eisa_in8
+#define inw eisa_in16
+#define inl eisa_in32
+#define outb eisa_out8
+#define outw eisa_out16
+#define outl eisa_out32
+#else
+static inline char inb(unsigned long addr)
+{
+ BUG();
+ return -1;
+}
+
+static inline short inw(unsigned long addr)
+{
+ BUG();
+ return -1;
+}
+
+static inline int inl(unsigned long addr)
+{
+ BUG();
+ return -1;
+}
+
+#define outb(x, y) BUG()
+#define outw(x, y) BUG()
+#define outl(x, y) BUG()
+#endif
+
+/*
+ * String versions of in/out ops:
+ */
+extern void insb (unsigned long port, void *dst, unsigned long count);
+extern void insw (unsigned long port, void *dst, unsigned long count);
+extern void insl (unsigned long port, void *dst, unsigned long count);
+extern void outsb (unsigned long port, const void *src, unsigned long count);
+extern void outsw (unsigned long port, const void *src, unsigned long count);
+extern void outsl (unsigned long port, const void *src, unsigned long count);
+
+
+/* IO Port space is : BBiiii where BB is HBA number. */
+#define IO_SPACE_LIMIT 0x00ffffff
+
+/* PA machines have an MM I/O space from 0xf0000000-0xffffffff in 32
+ * bit mode and from 0xfffffffff0000000-0xfffffffffffffff in 64 bit
+ * mode (essentially just sign extending. This macro takes in a 32
+ * bit I/O address (still with the leading f) and outputs the correct
+ * value for either 32 or 64 bit mode */
+#define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL)))
+
+#include <asm-generic/iomap.h>
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p) __va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p) p
+
+#endif
diff --git a/arch/parisc/include/asm/ioctl.h b/arch/parisc/include/asm/ioctl.h
new file mode 100644
index 00000000..ec8efa02
--- /dev/null
+++ b/arch/parisc/include/asm/ioctl.h
@@ -0,0 +1,44 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ * Copyright (C) 1999,2003 Matthew Wilcox < willy at debian . org >
+ * portions from "linux/ioctl.h for Linux" by H.H. Bergman.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#ifndef _ASM_PARISC_IOCTL_H
+#define _ASM_PARISC_IOCTL_H
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE 0U
+#define _IOC_WRITE 2U
+#define _IOC_READ 1U
+
+#include <asm-generic/ioctl.h>
+
+#endif /* _ASM_PARISC_IOCTL_H */
diff --git a/arch/parisc/include/asm/ioctls.h b/arch/parisc/include/asm/ioctls.h
new file mode 100644
index 00000000..054ec06f
--- /dev/null
+++ b/arch/parisc/include/asm/ioctls.h
@@ -0,0 +1,92 @@
+#ifndef __ARCH_PARISC_IOCTLS_H__
+#define __ARCH_PARISC_IOCTLS_H__
+
+#include <asm/ioctl.h>
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TCGETS _IOR('T', 16, struct termios) /* TCGETATTR */
+#define TCSETS _IOW('T', 17, struct termios) /* TCSETATTR */
+#define TCSETSW _IOW('T', 18, struct termios) /* TCSETATTRD */
+#define TCSETSF _IOW('T', 19, struct termios) /* TCSETATTRF */
+#define TCGETA _IOR('T', 1, struct termio)
+#define TCSETA _IOW('T', 2, struct termio)
+#define TCSETAW _IOW('T', 3, struct termio)
+#define TCSETAF _IOW('T', 4, struct termio)
+#define TCSBRK _IO('T', 5)
+#define TCXONC _IO('T', 6)
+#define TCFLSH _IO('T', 7)
+#define TIOCEXCL 0x540C
+#define TIOCNXCL 0x540D
+#define TIOCSCTTY 0x540E
+#define TIOCGPGRP _IOR('T', 30, int)
+#define TIOCSPGRP _IOW('T', 29, int)
+#define TIOCOUTQ 0x5411
+#define TIOCSTI 0x5412
+#define TIOCGWINSZ 0x5413
+#define TIOCSWINSZ 0x5414
+#define TIOCMGET 0x5415
+#define TIOCMBIS 0x5416
+#define TIOCMBIC 0x5417
+#define TIOCMSET 0x5418
+#define TIOCGSOFTCAR 0x5419
+#define TIOCSSOFTCAR 0x541A
+#define FIONREAD 0x541B
+#define TIOCINQ FIONREAD
+#define TIOCLINUX 0x541C
+#define TIOCCONS 0x541D
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TIOCPKT 0x5420
+#define FIONBIO 0x5421
+#define TIOCNOTTY 0x5422
+#define TIOCSETD 0x5423
+#define TIOCGETD 0x5424
+#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TIOCSBRK 0x5427 /* BSD compatibility */
+#define TIOCCBRK 0x5428 /* BSD compatibility */
+#define TIOCGSID _IOR('T', 20, int) /* Return the session ID of FD */
+#define TCGETS2 _IOR('T',0x2A, struct termios2)
+#define TCSETS2 _IOW('T',0x2B, struct termios2)
+#define TCSETSW2 _IOW('T',0x2C, struct termios2)
+#define TCSETSF2 _IOW('T',0x2D, struct termios2)
+#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+#define TIOCGDEV _IOR('T',0x32, int) /* Get primary device node of /dev/console */
+#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
+#define TIOCVHANGUP 0x5437
+
+#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
+#define FIOCLEX 0x5451
+#define FIOASYNC 0x5452
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+#define FIOQSIZE 0x5460 /* Get exact space used by quota */
+
+#define TIOCSTART 0x5461
+#define TIOCSTOP 0x5462
+#define TIOCSLTC 0x5462
+
+/* Used for packet mode */
+#define TIOCPKT_DATA 0
+#define TIOCPKT_FLUSHREAD 1
+#define TIOCPKT_FLUSHWRITE 2
+#define TIOCPKT_STOP 4
+#define TIOCPKT_START 8
+#define TIOCPKT_NOSTOP 16
+#define TIOCPKT_DOSTOP 32
+#define TIOCPKT_IOCTL 64
+
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
+#endif /* _ASM_PARISC_IOCTLS_H */
diff --git a/arch/parisc/include/asm/ipcbuf.h b/arch/parisc/include/asm/ipcbuf.h
new file mode 100644
index 00000000..bd956c42
--- /dev/null
+++ b/arch/parisc/include/asm/ipcbuf.h
@@ -0,0 +1,27 @@
+#ifndef __PARISC_IPCBUF_H__
+#define __PARISC_IPCBUF_H__
+
+/*
+ * The ipc64_perm structure for PA-RISC is almost identical to
+ * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the kernel.
+ * 'seq' has been changed from long to int so that it's the same size
+ * on 64-bit kernels as on 32-bit ones.
+ */
+
+struct ipc64_perm
+{
+ key_t key;
+ uid_t uid;
+ gid_t gid;
+ uid_t cuid;
+ gid_t cgid;
+ unsigned short int __pad1;
+ mode_t mode;
+ unsigned short int __pad2;
+ unsigned short int seq;
+ unsigned int __pad3;
+ unsigned long long int __unused1;
+ unsigned long long int __unused2;
+};
+
+#endif /* __PARISC_IPCBUF_H__ */
diff --git a/arch/parisc/include/asm/irq.h b/arch/parisc/include/asm/irq.h
new file mode 100644
index 00000000..1073599a
--- /dev/null
+++ b/arch/parisc/include/asm/irq.h
@@ -0,0 +1,52 @@
+/*
+ * include/asm-parisc/irq.h
+ *
+ * Copyright 2005 Matthew Wilcox <matthew@wil.cx>
+ */
+
+#ifndef _ASM_PARISC_IRQ_H
+#define _ASM_PARISC_IRQ_H
+
+#include <linux/cpumask.h>
+#include <asm/types.h>
+
+#define NO_IRQ (-1)
+
+#ifdef CONFIG_GSC
+#define GSC_IRQ_BASE 16
+#define GSC_IRQ_MAX 63
+#define CPU_IRQ_BASE 64
+#else
+#define CPU_IRQ_BASE 16
+#endif
+
+#define TIMER_IRQ (CPU_IRQ_BASE + 0)
+#define IPI_IRQ (CPU_IRQ_BASE + 1)
+#define CPU_IRQ_MAX (CPU_IRQ_BASE + (BITS_PER_LONG - 1))
+
+#define NR_IRQS (CPU_IRQ_MAX + 1)
+
+static __inline__ int irq_canonicalize(int irq)
+{
+ return (irq == 2) ? 9 : irq;
+}
+
+struct irq_chip;
+struct irq_data;
+
+void cpu_ack_irq(struct irq_data *d);
+void cpu_eoi_irq(struct irq_data *d);
+
+extern int txn_alloc_irq(unsigned int nbits);
+extern int txn_claim_irq(int);
+extern unsigned int txn_alloc_data(unsigned int);
+extern unsigned long txn_alloc_addr(unsigned int);
+extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
+
+extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
+extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest);
+
+/* soft power switch support (power.c) */
+extern struct tasklet_struct power_tasklet;
+
+#endif /* _ASM_PARISC_IRQ_H */
diff --git a/arch/parisc/include/asm/irq_regs.h b/arch/parisc/include/asm/irq_regs.h
new file mode 100644
index 00000000..3dd9c0b7
--- /dev/null
+++ b/arch/parisc/include/asm/irq_regs.h
@@ -0,0 +1 @@
+#include <asm-generic/irq_regs.h>
diff --git a/arch/parisc/include/asm/irqflags.h b/arch/parisc/include/asm/irqflags.h
new file mode 100644
index 00000000..34f9cb9b
--- /dev/null
+++ b/arch/parisc/include/asm/irqflags.h
@@ -0,0 +1,46 @@
+#ifndef __PARISC_IRQFLAGS_H
+#define __PARISC_IRQFLAGS_H
+
+#include <linux/types.h>
+#include <asm/psw.h>
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+ asm volatile("ssm 0, %0" : "=r" (flags) : : "memory");
+ return flags;
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ asm volatile("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory");
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ asm volatile("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory");
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+ asm volatile("rsm %1,%0" : "=r" (flags) : "i" (PSW_I) : "memory");
+ return flags;
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ asm volatile("mtsm %0" : : "r" (flags) : "memory");
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+ return (flags & PSW_I) == 0;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* __PARISC_IRQFLAGS_H */
diff --git a/arch/parisc/include/asm/kdebug.h b/arch/parisc/include/asm/kdebug.h
new file mode 100644
index 00000000..6ece1b03
--- /dev/null
+++ b/arch/parisc/include/asm/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/arch/parisc/include/asm/kmap_types.h b/arch/parisc/include/asm/kmap_types.h
new file mode 100644
index 00000000..58e91ed0
--- /dev/null
+++ b/arch/parisc/include/asm/kmap_types.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+#define __WITH_KM_FENCE
+#endif
+
+#include <asm-generic/kmap_types.h>
+
+#undef __WITH_KM_FENCE
+
+#endif
diff --git a/arch/parisc/include/asm/led.h b/arch/parisc/include/asm/led.h
new file mode 100644
index 00000000..c3405ab9
--- /dev/null
+++ b/arch/parisc/include/asm/led.h
@@ -0,0 +1,42 @@
+#ifndef LED_H
+#define LED_H
+
+#define LED7 0x80 /* top (or furthest right) LED */
+#define LED6 0x40
+#define LED5 0x20
+#define LED4 0x10
+#define LED3 0x08
+#define LED2 0x04
+#define LED1 0x02
+#define LED0 0x01 /* bottom (or furthest left) LED */
+
+#define LED_LAN_TX LED0 /* for LAN transmit activity */
+#define LED_LAN_RCV LED1 /* for LAN receive activity */
+#define LED_DISK_IO LED2 /* for disk activity */
+#define LED_HEARTBEAT LED3 /* heartbeat */
+
+/* values for pdc_chassis_lcd_info_ret_block.model: */
+#define DISPLAY_MODEL_LCD 0 /* KittyHawk LED or LCD */
+#define DISPLAY_MODEL_NONE 1 /* no LED or LCD */
+#define DISPLAY_MODEL_LASI 2 /* LASI style 8 bit LED */
+#define DISPLAY_MODEL_OLD_ASP 0x7F /* faked: ASP style 8 x 1 bit LED (only very old ASP versions) */
+
+#define LED_CMD_REG_NONE 0 /* NULL == no addr for the cmd register */
+
+/* register_led_driver() */
+int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg);
+
+/* registers the LED regions for procfs */
+void __init register_led_regions(void);
+
+#ifdef CONFIG_CHASSIS_LCD_LED
+/* writes a string to the LCD display (if possible on this h/w) */
+int lcd_print(const char *str);
+#else
+#define lcd_print(str)
+#endif
+
+/* main LED initialization function (uses PDC) */
+int __init led_init(void);
+
+#endif /* LED_H */
diff --git a/arch/parisc/include/asm/linkage.h b/arch/parisc/include/asm/linkage.h
new file mode 100644
index 00000000..0b19a724
--- /dev/null
+++ b/arch/parisc/include/asm/linkage.h
@@ -0,0 +1,31 @@
+#ifndef __ASM_PARISC_LINKAGE_H
+#define __ASM_PARISC_LINKAGE_H
+
+#ifndef __ALIGN
+#define __ALIGN .align 4
+#define __ALIGN_STR ".align 4"
+#endif
+
+/*
+ * In parisc assembly a semicolon marks a comment while a
+ * exclamation mark is used to separate independent lines.
+ */
+#ifdef __ASSEMBLY__
+
+#define ENTRY(name) \
+ .export name !\
+ ALIGN !\
+name:
+
+#ifdef CONFIG_64BIT
+#define ENDPROC(name) \
+ END(name)
+#else
+#define ENDPROC(name) \
+ .type name, @function !\
+ END(name)
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_PARISC_LINKAGE_H */
diff --git a/arch/parisc/include/asm/local.h b/arch/parisc/include/asm/local.h
new file mode 100644
index 00000000..c11c530f
--- /dev/null
+++ b/arch/parisc/include/asm/local.h
@@ -0,0 +1 @@
+#include <asm-generic/local.h>
diff --git a/arch/parisc/include/asm/local64.h b/arch/parisc/include/asm/local64.h
new file mode 100644
index 00000000..36c93b5c
--- /dev/null
+++ b/arch/parisc/include/asm/local64.h
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/parisc/include/asm/machdep.h b/arch/parisc/include/asm/machdep.h
new file mode 100644
index 00000000..a231c97d
--- /dev/null
+++ b/arch/parisc/include/asm/machdep.h
@@ -0,0 +1,16 @@
+#ifndef _PARISC_MACHDEP_H
+#define _PARISC_MACHDEP_H
+
+#include <linux/notifier.h>
+
+#define MACH_RESTART 1
+#define MACH_HALT 2
+#define MACH_POWER_ON 3
+#define MACH_POWER_OFF 4
+
+extern struct notifier_block *mach_notifier;
+extern void pa7300lc_init(void);
+
+extern void (*cpu_lpmc)(int, struct pt_regs *);
+
+#endif
diff --git a/arch/parisc/include/asm/mc146818rtc.h b/arch/parisc/include/asm/mc146818rtc.h
new file mode 100644
index 00000000..adf41631
--- /dev/null
+++ b/arch/parisc/include/asm/mc146818rtc.h
@@ -0,0 +1,9 @@
+/*
+ * Machine dependent access functions for RTC registers.
+ */
+#ifndef _ASM_MC146818RTC_H
+#define _ASM_MC146818RTC_H
+
+/* empty include file to satisfy the include in genrtc.c */
+
+#endif /* _ASM_MC146818RTC_H */
diff --git a/arch/parisc/include/asm/mckinley.h b/arch/parisc/include/asm/mckinley.h
new file mode 100644
index 00000000..d1ea6f12
--- /dev/null
+++ b/arch/parisc/include/asm/mckinley.h
@@ -0,0 +1,9 @@
+#ifndef ASM_PARISC_MCKINLEY_H
+#define ASM_PARISC_MCKINLEY_H
+#ifdef __KERNEL__
+
+/* declared in arch/parisc/kernel/setup.c */
+extern struct proc_dir_entry * proc_mckinley_root;
+
+#endif /*__KERNEL__*/
+#endif /*ASM_PARISC_MCKINLEY_H*/
diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h
new file mode 100644
index 00000000..f5b7bf5f
--- /dev/null
+++ b/arch/parisc/include/asm/mman.h
@@ -0,0 +1,69 @@
+#ifndef __PARISC_MMAN_H__
+#define __PARISC_MMAN_H__
+
+#define PROT_READ 0x1 /* page can be read */
+#define PROT_WRITE 0x2 /* page can be written */
+#define PROT_EXEC 0x4 /* page can be executed */
+#define PROT_SEM 0x8 /* page may be used for atomic ops */
+#define PROT_NONE 0x0 /* page can not be accessed */
+#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
+#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
+
+#define MAP_SHARED 0x01 /* Share changes */
+#define MAP_PRIVATE 0x02 /* Changes are private */
+#define MAP_TYPE 0x03 /* Mask for type of mapping */
+#define MAP_FIXED 0x04 /* Interpret addr exactly */
+#define MAP_ANONYMOUS 0x10 /* don't use a file */
+
+#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+#define MAP_LOCKED 0x2000 /* pages are locked */
+#define MAP_NORESERVE 0x4000 /* don't check for reservations */
+#define MAP_GROWSDOWN 0x8000 /* stack-like segment */
+#define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
+#define MAP_NONBLOCK 0x20000 /* do not block on IO */
+#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x80000 /* create a huge page mapping */
+
+#define MS_SYNC 1 /* synchronous memory sync */
+#define MS_ASYNC 2 /* sync memory asynchronously */
+#define MS_INVALIDATE 4 /* invalidate the caches */
+
+#define MCL_CURRENT 1 /* lock all current mappings */
+#define MCL_FUTURE 2 /* lock all future mappings */
+
+#define MADV_NORMAL 0 /* no further special treatment */
+#define MADV_RANDOM 1 /* expect random page references */
+#define MADV_SEQUENTIAL 2 /* expect sequential page references */
+#define MADV_WILLNEED 3 /* will need these pages */
+#define MADV_DONTNEED 4 /* don't need these pages */
+#define MADV_SPACEAVAIL 5 /* insure that resources are reserved */
+#define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */
+#define MADV_VPS_INHERIT 7 /* Inherit parents page size */
+
+/* common/generic parameters */
+#define MADV_REMOVE 9 /* remove these pages & resources */
+#define MADV_DONTFORK 10 /* don't inherit across fork */
+#define MADV_DOFORK 11 /* do inherit across fork */
+
+/* The range 12-64 is reserved for page size specification. */
+#define MADV_4K_PAGES 12 /* Use 4K pages */
+#define MADV_16K_PAGES 14 /* Use 16K pages */
+#define MADV_64K_PAGES 16 /* Use 64K pages */
+#define MADV_256K_PAGES 18 /* Use 256K pages */
+#define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */
+#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */
+#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */
+#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */
+
+#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
+#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
+
+#define MADV_HUGEPAGE 67 /* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE 68 /* Not worth backing with hugepages */
+
+/* compatibility flags */
+#define MAP_FILE 0
+#define MAP_VARIABLE 0
+
+#endif /* __PARISC_MMAN_H__ */
diff --git a/arch/parisc/include/asm/mmu.h b/arch/parisc/include/asm/mmu.h
new file mode 100644
index 00000000..6a310cf8
--- /dev/null
+++ b/arch/parisc/include/asm/mmu.h
@@ -0,0 +1,7 @@
+#ifndef _PARISC_MMU_H_
+#define _PARISC_MMU_H_
+
+/* On parisc, we store the space id here */
+typedef unsigned long mm_context_t;
+
+#endif /* _PARISC_MMU_H_ */
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
new file mode 100644
index 00000000..354b2aca
--- /dev/null
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -0,0 +1,80 @@
+#ifndef __PARISC_MMU_CONTEXT_H
+#define __PARISC_MMU_CONTEXT_H
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm-generic/mm_hooks.h>
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+/* on PA-RISC, we actually have enough contexts to justify an allocator
+ * for them. prumpf */
+
+extern unsigned long alloc_sid(void);
+extern void free_sid(unsigned long);
+
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ BUG_ON(atomic_read(&mm->mm_users) != 1);
+
+ mm->context = alloc_sid();
+ return 0;
+}
+
+static inline void
+destroy_context(struct mm_struct *mm)
+{
+ free_sid(mm->context);
+ mm->context = 0;
+}
+
+static inline unsigned long __space_to_prot(mm_context_t context)
+{
+#if SPACEID_SHIFT == 0
+ return context << 1;
+#else
+ return context >> (SPACEID_SHIFT - 1);
+#endif
+}
+
+static inline void load_context(mm_context_t context)
+{
+ mtsp(context, 3);
+ mtctl(__space_to_prot(context), 8);
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
+{
+
+ if (prev != next) {
+ mtctl(__pa(next->pgd), 25);
+ load_context(next->context);
+ }
+}
+
+#define deactivate_mm(tsk,mm) do { } while (0)
+
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+ /*
+ * Activate_mm is our one chance to allocate a space id
+ * for a new mm created in the exec path. There's also
+ * some lazy tlb stuff, which is currently dead code, but
+ * we only allocate a space id if one hasn't been allocated
+ * already, so we should be OK.
+ */
+
+ BUG_ON(next == &init_mm); /* Should never happen */
+
+ if (next->context == 0)
+ next->context = alloc_sid();
+
+ switch_mm(prev,next,current);
+}
+#endif
diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h
new file mode 100644
index 00000000..e67eb9c3
--- /dev/null
+++ b/arch/parisc/include/asm/mmzone.h
@@ -0,0 +1,66 @@
+#ifndef _PARISC_MMZONE_H
+#define _PARISC_MMZONE_H
+
+#ifdef CONFIG_DISCONTIGMEM
+
+#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
+extern int npmem_ranges;
+
+struct node_map_data {
+ pg_data_t pg_data;
+};
+
+extern struct node_map_data node_data[];
+
+#define NODE_DATA(nid) (&node_data[nid].pg_data)
+
+/* We have these possible memory map layouts:
+ * Astro: 0-3.75, 67.75-68, 4-64
+ * zx1: 0-1, 257-260, 4-256
+ * Stretch (N-class): 0-2, 4-32, 34-xxx
+ */
+
+/* Since each 1GB can only belong to one region (node), we can create
+ * an index table for pfn to nid lookup; each entry in pfnnid_map
+ * represents 1GB, and contains the node that the memory belongs to. */
+
+#define PFNNID_SHIFT (30 - PAGE_SHIFT)
+#define PFNNID_MAP_MAX 512 /* support 512GB */
+extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
+
+#ifndef CONFIG_64BIT
+#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
+#else
+/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */
+#define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
+#endif
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+ unsigned int i;
+ unsigned char r;
+
+ if (unlikely(pfn_is_io(pfn)))
+ return 0;
+
+ i = pfn >> PFNNID_SHIFT;
+ BUG_ON(i >= sizeof(pfnnid_map) / sizeof(pfnnid_map[0]));
+ r = pfnnid_map[i];
+ BUG_ON(r == 0xff);
+
+ return (int)r;
+}
+
+static inline int pfn_valid(int pfn)
+{
+ int nid = pfn_to_nid(pfn);
+
+ if (nid >= 0)
+ return (pfn < node_end_pfn(nid));
+ return 0;
+}
+
+#else /* !CONFIG_DISCONTIGMEM */
+#define MAX_PHYSMEM_RANGES 1
+#endif
+#endif /* _PARISC_MMZONE_H */
diff --git a/arch/parisc/include/asm/module.h b/arch/parisc/include/asm/module.h
new file mode 100644
index 00000000..1f412342
--- /dev/null
+++ b/arch/parisc/include/asm/module.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_PARISC_MODULE_H
+#define _ASM_PARISC_MODULE_H
+/*
+ * This file contains the parisc architecture specific module code.
+ */
+#ifdef CONFIG_64BIT
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Sym Elf64_Sym
+#define Elf_Ehdr Elf64_Ehdr
+#define Elf_Addr Elf64_Addr
+#define Elf_Rela Elf64_Rela
+#else
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Addr Elf32_Addr
+#define Elf_Rela Elf32_Rela
+#endif
+
+struct unwind_table;
+
+struct mod_arch_specific
+{
+ unsigned long got_offset, got_count, got_max;
+ unsigned long fdesc_offset, fdesc_count, fdesc_max;
+ struct {
+ unsigned long stub_offset;
+ unsigned int stub_entries;
+ } *section;
+ int unwind_section;
+ struct unwind_table *unwind;
+};
+
+#endif /* _ASM_PARISC_MODULE_H */
diff --git a/arch/parisc/include/asm/msgbuf.h b/arch/parisc/include/asm/msgbuf.h
new file mode 100644
index 00000000..fe88f264
--- /dev/null
+++ b/arch/parisc/include/asm/msgbuf.h
@@ -0,0 +1,37 @@
+#ifndef _PARISC_MSGBUF_H
+#define _PARISC_MSGBUF_H
+
+/*
+ * The msqid64_ds structure for parisc architecture, copied from sparc.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+#ifndef CONFIG_64BIT
+ unsigned int __pad1;
+#endif
+ __kernel_time_t msg_stime; /* last msgsnd time */
+#ifndef CONFIG_64BIT
+ unsigned int __pad2;
+#endif
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+#ifndef CONFIG_64BIT
+ unsigned int __pad3;
+#endif
+ __kernel_time_t msg_ctime; /* last change time */
+ unsigned int msg_cbytes; /* current number of bytes on queue */
+ unsigned int msg_qnum; /* number of messages in queue */
+ unsigned int msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned int __unused1;
+ unsigned int __unused2;
+};
+
+#endif /* _PARISC_MSGBUF_H */
diff --git a/arch/parisc/include/asm/mutex.h b/arch/parisc/include/asm/mutex.h
new file mode 100644
index 00000000..458c1f7f
--- /dev/null
+++ b/arch/parisc/include/asm/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
new file mode 100644
index 00000000..a84cc1f9
--- /dev/null
+++ b/arch/parisc/include/asm/page.h
@@ -0,0 +1,164 @@
+#ifndef _PARISC_PAGE_H
+#define _PARISC_PAGE_H
+
+#include <linux/const.h>
+
+#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+# define PAGE_SHIFT 12
+#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
+# define PAGE_SHIFT 14
+#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
+# define PAGE_SHIFT 16
+#else
+# error "unknown default kernel page size"
+#endif
+#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+
+#ifndef __ASSEMBLY__
+
+#include <asm/types.h>
+#include <asm/cache.h>
+
+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
+#define copy_page(to,from) copy_user_page_asm((void *)(to), (void *)(from))
+
+struct page;
+
+void copy_user_page_asm(void *to, void *from);
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+ struct page *pg);
+void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
+
+/*
+ * These are used to make use of C type-checking..
+ */
+#define STRICT_MM_TYPECHECKS
+#ifdef STRICT_MM_TYPECHECKS
+typedef struct { unsigned long pte; } pte_t; /* either 32 or 64bit */
+
+/* NOTE: even on 64 bits, these entries are __u32 because we allocate
+ * the pmd and pgd in ZONE_DMA (i.e. under 4GB) */
+typedef struct { __u32 pmd; } pmd_t;
+typedef struct { __u32 pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+/* These do not work lvalues, so make sure we don't use them as such. */
+#define pmd_val(x) ((x).pmd + 0)
+#define pgd_val(x) ((x).pgd + 0)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#define __pmd_val_set(x,n) (x).pmd = (n)
+#define __pgd_val_set(x,n) (x).pgd = (n)
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef __u32 pmd_t;
+typedef __u32 pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#define __pmd_val_set(x,n) (x) = (n)
+#define __pgd_val_set(x,n) (x) = (n)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+typedef struct page *pgtable_t;
+
+typedef struct __physmem_range {
+ unsigned long start_pfn;
+ unsigned long pages; /* PAGE_SIZE pages */
+} physmem_range_t;
+
+extern physmem_range_t pmem_ranges[];
+extern int npmem_ranges;
+
+#endif /* !__ASSEMBLY__ */
+
+/* WARNING: The definitions below must match exactly to sizeof(pte_t)
+ * etc
+ */
+#ifdef CONFIG_64BIT
+#define BITS_PER_PTE_ENTRY 3
+#define BITS_PER_PMD_ENTRY 2
+#define BITS_PER_PGD_ENTRY 2
+#else
+#define BITS_PER_PTE_ENTRY 2
+#define BITS_PER_PMD_ENTRY 2
+#define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY
+#endif
+#define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY)
+#define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY)
+#define PTE_ENTRY_SIZE (1UL << BITS_PER_PTE_ENTRY)
+
+#define LINUX_GATEWAY_SPACE 0
+
+/* This governs the relationship between virtual and physical addresses.
+ * If you alter it, make sure to take care of our various fixed mapping
+ * segments in fixmap.h */
+#ifdef CONFIG_64BIT
+#define __PAGE_OFFSET (0x40000000) /* 1GB */
+#else
+#define __PAGE_OFFSET (0x10000000) /* 256MB */
+#endif
+
+#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+
+/* The size of the gateway page (we leave lots of room for expansion) */
+#define GATEWAY_PAGE_SIZE 0x4000
+
+/* The start of the actual kernel binary---used in vmlinux.lds.S
+ * Leave some space after __PAGE_OFFSET for detecting kernel null
+ * ptr derefs */
+#define KERNEL_BINARY_TEXT_START (__PAGE_OFFSET + 0x100000)
+
+/* These macros don't work for 64-bit C code -- don't allow in C at all */
+#ifdef __ASSEMBLY__
+# define PA(x) ((x)-__PAGE_OFFSET)
+# define VA(x) ((x)+__PAGE_OFFSET)
+#endif
+#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
+#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+
+#ifndef CONFIG_DISCONTIGMEM
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#endif /* CONFIG_DISCONTIGMEM */
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */
+#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+#endif
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* _PARISC_PAGE_H */
diff --git a/arch/parisc/include/asm/param.h b/arch/parisc/include/asm/param.h
new file mode 100644
index 00000000..965d4542
--- /dev/null
+++ b/arch/parisc/include/asm/param.h
@@ -0,0 +1 @@
+#include <asm-generic/param.h>
diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h
new file mode 100644
index 00000000..9afdad6c
--- /dev/null
+++ b/arch/parisc/include/asm/parisc-device.h
@@ -0,0 +1,64 @@
+#ifndef _ASM_PARISC_PARISC_DEVICE_H_
+#define _ASM_PARISC_PARISC_DEVICE_H_
+
+#include <linux/device.h>
+
+struct parisc_device {
+ struct resource hpa; /* Hard Physical Address */
+ struct parisc_device_id id;
+ struct parisc_driver *driver; /* Driver for this device */
+ char name[80]; /* The hardware description */
+ int irq;
+ int aux_irq; /* Some devices have a second IRQ */
+
+ char hw_path; /* The module number on this bus */
+ unsigned int num_addrs; /* some devices have additional address ranges. */
+ unsigned long *addr; /* which will be stored here */
+
+#ifdef CONFIG_64BIT
+ /* parms for pdc_pat_cell_module() call */
+ unsigned long pcell_loc; /* Physical Cell location */
+ unsigned long mod_index; /* PAT specific - Misc Module info */
+
+ /* generic info returned from pdc_pat_cell_module() */
+ unsigned long mod_info; /* PAT specific - Misc Module info */
+ unsigned long pmod_loc; /* physical Module location */
+#endif
+ u64 dma_mask; /* DMA mask for I/O */
+ struct device dev;
+};
+
+struct parisc_driver {
+ struct parisc_driver *next;
+ char *name;
+ const struct parisc_device_id *id_table;
+ int (*probe) (struct parisc_device *dev); /* New device discovered */
+ int (*remove) (struct parisc_device *dev);
+ struct device_driver drv;
+};
+
+
+#define to_parisc_device(d) container_of(d, struct parisc_device, dev)
+#define to_parisc_driver(d) container_of(d, struct parisc_driver, drv)
+#define parisc_parent(d) to_parisc_device(d->dev.parent)
+
+static inline const char *parisc_pathname(struct parisc_device *d)
+{
+ return dev_name(&d->dev);
+}
+
+static inline void
+parisc_set_drvdata(struct parisc_device *d, void *p)
+{
+ dev_set_drvdata(&d->dev, p);
+}
+
+static inline void *
+parisc_get_drvdata(struct parisc_device *d)
+{
+ return dev_get_drvdata(&d->dev);
+}
+
+extern struct bus_type parisc_bus_type;
+
+#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/
diff --git a/arch/parisc/include/asm/parport.h b/arch/parisc/include/asm/parport.h
new file mode 100644
index 00000000..00d9cc3e
--- /dev/null
+++ b/arch/parisc/include/asm/parport.h
@@ -0,0 +1,18 @@
+/*
+ *
+ * parport.h: ia32-compatible parport initialisation
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+#ifndef _ASM_PARPORT_H
+#define _ASM_PARPORT_H 1
+
+
+static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
+{
+ /* nothing ! */
+ return 0;
+}
+
+
+#endif /* !(_ASM_PARPORT_H) */
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h
new file mode 100644
index 00000000..2242a5c6
--- /dev/null
+++ b/arch/parisc/include/asm/pci.h
@@ -0,0 +1,266 @@
+#ifndef __ASM_PARISC_PCI_H
+#define __ASM_PARISC_PCI_H
+
+#include <asm/scatterlist.h>
+
+
+
+/*
+** HP PCI platforms generally support multiple bus adapters.
+** (workstations 1-~4, servers 2-~32)
+**
+** Newer platforms number the busses across PCI bus adapters *sparsely*.
+** E.g. 0, 8, 16, ...
+**
+** Under a PCI bus, most HP platforms support PPBs up to two or three
+** levels deep. See "Bit3" product line.
+*/
+#define PCI_MAX_BUSSES 256
+
+
+/* To be used as: mdelay(pci_post_reset_delay);
+ *
+ * post_reset is the time the kernel should stall to prevent anyone from
+ * accessing the PCI bus once #RESET is de-asserted.
+ * PCI spec somewhere says 1 second but with multi-PCI bus systems,
+ * this makes the boot time much longer than necessary.
+ * 20ms seems to work for all the HP PCI implementations to date.
+ */
+#define pci_post_reset_delay 50
+
+
+/*
+** pci_hba_data (aka H2P_OBJECT in HP/UX)
+**
+** This is the "common" or "base" data structure which HBA drivers
+** (eg Dino or LBA) are required to place at the top of their own
+** platform_data structure. I've heard this called "C inheritance" too.
+**
+** Data needed by pcibios layer belongs here.
+*/
+struct pci_hba_data {
+ void __iomem *base_addr; /* aka Host Physical Address */
+ const struct parisc_device *dev; /* device from PA bus walk */
+ struct pci_bus *hba_bus; /* primary PCI bus below HBA */
+ int hba_num; /* I/O port space access "key" */
+ struct resource bus_num; /* PCI bus numbers */
+ struct resource io_space; /* PIOP */
+ struct resource lmmio_space; /* bus addresses < 4Gb */
+ struct resource elmmio_space; /* additional bus addresses < 4Gb */
+ struct resource gmmio_space; /* bus addresses > 4Gb */
+
+ /* NOTE: Dino code assumes it can use *all* of the lmmio_space,
+ * elmmio_space and gmmio_space as a contiguous array of
+ * resources. This #define represents the array size */
+ #define DINO_MAX_LMMIO_RESOURCES 3
+
+ unsigned long lmmio_space_offset; /* CPU view - PCI view */
+ void * iommu; /* IOMMU this device is under */
+ /* REVISIT - spinlock to protect resources? */
+
+ #define HBA_NAME_SIZE 16
+ char io_name[HBA_NAME_SIZE];
+ char lmmio_name[HBA_NAME_SIZE];
+ char elmmio_name[HBA_NAME_SIZE];
+ char gmmio_name[HBA_NAME_SIZE];
+};
+
+#define HBA_DATA(d) ((struct pci_hba_data *) (d))
+
+/*
+** We support 2^16 I/O ports per HBA. These are set up in the form
+** 0xbbxxxx, where bb is the bus number and xxxx is the I/O port
+** space address.
+*/
+#define HBA_PORT_SPACE_BITS 16
+
+#define HBA_PORT_BASE(h) ((h) << HBA_PORT_SPACE_BITS)
+#define HBA_PORT_SPACE_SIZE (1UL << HBA_PORT_SPACE_BITS)
+
+#define PCI_PORT_HBA(a) ((a) >> HBA_PORT_SPACE_BITS)
+#define PCI_PORT_ADDR(a) ((a) & (HBA_PORT_SPACE_SIZE - 1))
+
+#ifdef CONFIG_64BIT
+#define PCI_F_EXTEND 0xffffffff00000000UL
+#define PCI_IS_LMMIO(hba,a) pci_is_lmmio(hba,a)
+
+/* We need to know if an address is LMMMIO or GMMIO.
+ * LMMIO requires mangling and GMMIO we must use as-is.
+ */
+static __inline__ int pci_is_lmmio(struct pci_hba_data *hba, unsigned long a)
+{
+ return(((a) & PCI_F_EXTEND) == PCI_F_EXTEND);
+}
+
+/*
+** Convert between PCI (IO_VIEW) addresses and processor (PA_VIEW) addresses.
+** See pci.c for more conversions used by Generic PCI code.
+**
+** Platform characteristics/firmware guarantee that
+** (1) PA_VIEW - IO_VIEW = lmmio_offset for both LMMIO and ELMMIO
+** (2) PA_VIEW == IO_VIEW for GMMIO
+*/
+#define PCI_BUS_ADDR(hba,a) (PCI_IS_LMMIO(hba,a) \
+ ? ((a) - hba->lmmio_space_offset) /* mangle LMMIO */ \
+ : (a)) /* GMMIO */
+#define PCI_HOST_ADDR(hba,a) (((a) & PCI_F_EXTEND) == 0 \
+ ? (a) + hba->lmmio_space_offset \
+ : (a))
+
+#else /* !CONFIG_64BIT */
+
+#define PCI_BUS_ADDR(hba,a) (a)
+#define PCI_HOST_ADDR(hba,a) (a)
+#define PCI_F_EXTEND 0UL
+#define PCI_IS_LMMIO(hba,a) (1) /* 32-bit doesn't support GMMIO */
+
+#endif /* !CONFIG_64BIT */
+
+/*
+** KLUGE: linux/pci.h include asm/pci.h BEFORE declaring struct pci_bus
+** (This eliminates some of the warnings).
+*/
+struct pci_bus;
+struct pci_dev;
+
+/*
+ * If the PCI device's view of memory is the same as the CPU's view of memory,
+ * PCI_DMA_BUS_IS_PHYS is true. The networking and block device layers use
+ * this boolean for bounce buffer decisions.
+ */
+#ifdef CONFIG_PA20
+/* All PA-2.0 machines have an IOMMU. */
+#define PCI_DMA_BUS_IS_PHYS 0
+#define parisc_has_iommu() do { } while (0)
+#else
+
+#if defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA)
+extern int parisc_bus_is_phys; /* in arch/parisc/kernel/setup.c */
+#define PCI_DMA_BUS_IS_PHYS parisc_bus_is_phys
+#define parisc_has_iommu() do { parisc_bus_is_phys = 0; } while (0)
+#else
+#define PCI_DMA_BUS_IS_PHYS 1
+#define parisc_has_iommu() do { } while (0)
+#endif
+
+#endif /* !CONFIG_PA20 */
+
+
+/*
+** Most PCI devices (eg Tulip, NCR720) also export the same registers
+** to both MMIO and I/O port space. Due to poor performance of I/O Port
+** access under HP PCI bus adapters, strongly recommend the use of MMIO
+** address space.
+**
+** While I'm at it more PA programming notes:
+**
+** 1) MMIO stores (writes) are posted operations. This means the processor
+** gets an "ACK" before the write actually gets to the device. A read
+** to the same device (or typically the bus adapter above it) will
+** force in-flight write transaction(s) out to the targeted device
+** before the read can complete.
+**
+** 2) The Programmed I/O (PIO) data may not always be strongly ordered with
+** respect to DMA on all platforms. Ie PIO data can reach the processor
+** before in-flight DMA reaches memory. Since most SMP PA platforms
+** are I/O coherent, it generally doesn't matter...but sometimes
+** it does.
+**
+** I've helped device driver writers debug both types of problems.
+*/
+struct pci_port_ops {
+ u8 (*inb) (struct pci_hba_data *hba, u16 port);
+ u16 (*inw) (struct pci_hba_data *hba, u16 port);
+ u32 (*inl) (struct pci_hba_data *hba, u16 port);
+ void (*outb) (struct pci_hba_data *hba, u16 port, u8 data);
+ void (*outw) (struct pci_hba_data *hba, u16 port, u16 data);
+ void (*outl) (struct pci_hba_data *hba, u16 port, u32 data);
+};
+
+
+struct pci_bios_ops {
+ void (*init)(void);
+ void (*fixup_bus)(struct pci_bus *bus);
+};
+
+/*
+** Stuff declared in arch/parisc/kernel/pci.c
+*/
+extern struct pci_port_ops *pci_port;
+extern struct pci_bios_ops *pci_bios;
+
+#ifdef CONFIG_PCI
+extern void pcibios_register_hba(struct pci_hba_data *);
+extern void pcibios_set_master(struct pci_dev *);
+#else
+static inline void pcibios_register_hba(struct pci_hba_data *x)
+{
+}
+#endif
+
+/*
+ * pcibios_assign_all_busses() is used in drivers/pci/pci.c:pci_do_scan_bus()
+ * 0 == check if bridge is numbered before re-numbering.
+ * 1 == pci_do_scan_bus() should automatically number all PCI-PCI bridges.
+ *
+ * We *should* set this to zero for "legacy" platforms and one
+ * for PAT platforms.
+ *
+ * But legacy platforms also need to renumber the busses below a Host
+ * Bus controller. Adding a 4-port Tulip card on the first PCI root
+ * bus of a C200 resulted in the secondary bus being numbered as 1.
+ * The second PCI host bus controller's root bus had already been
+ * assigned bus number 1 by firmware and sysfs complained.
+ *
+ * Firmware isn't doing anything wrong here since each controller
+ * is its own PCI domain. It's simpler and easier for us to renumber
+ * the busses rather than treat each Dino as a separate PCI domain.
+ * Eventually, we may want to introduce PCI domains for Superdome or
+ * rp7420/8420 boxes and then revisit this issue.
+ */
+#define pcibios_assign_all_busses() (1)
+
+#define PCIBIOS_MIN_IO 0x10
+#define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */
+
+/* export the pci_ DMA API in terms of the dma_ one */
+#include <asm-generic/pci-dma-compat.h>
+
+#ifdef CONFIG_PCI
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ enum pci_dma_burst_strategy *strat,
+ unsigned long *strategy_parameter)
+{
+ unsigned long cacheline_size;
+ u8 byte;
+
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
+ if (byte == 0)
+ cacheline_size = 1024;
+ else
+ cacheline_size = (int) byte * 4;
+
+ *strat = PCI_DMA_BURST_MULTIPLE;
+ *strategy_parameter = cacheline_size;
+}
+#endif
+
+extern void
+pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+ struct resource *res);
+
+extern void
+pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+ struct pci_bus_region *region);
+
+static inline void pcibios_penalize_isa_irq(int irq, int active)
+{
+ /* We don't need to penalize isa irq's */
+}
+
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ return channel ? 15 : 14;
+}
+
+#endif /* __ASM_PARISC_PCI_H */
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
new file mode 100644
index 00000000..4ca510b3
--- /dev/null
+++ b/arch/parisc/include/asm/pdc.h
@@ -0,0 +1,767 @@
+#ifndef _PARISC_PDC_H
+#define _PARISC_PDC_H
+
+/*
+ * PDC return values ...
+ * All PDC calls return a subset of these errors.
+ */
+
+#define PDC_WARN 3 /* Call completed with a warning */
+#define PDC_REQ_ERR_1 2 /* See above */
+#define PDC_REQ_ERR_0 1 /* Call would generate a requestor error */
+#define PDC_OK 0 /* Call completed successfully */
+#define PDC_BAD_PROC -1 /* Called non-existent procedure*/
+#define PDC_BAD_OPTION -2 /* Called with non-existent option */
+#define PDC_ERROR -3 /* Call could not complete without an error */
+#define PDC_NE_MOD -5 /* Module not found */
+#define PDC_NE_CELL_MOD -7 /* Cell module not found */
+#define PDC_INVALID_ARG -10 /* Called with an invalid argument */
+#define PDC_BUS_POW_WARN -12 /* Call could not complete in allowed power budget */
+#define PDC_NOT_NARROW -17 /* Narrow mode not supported */
+
+/*
+ * PDC entry points...
+ */
+
+#define PDC_POW_FAIL 1 /* perform a power-fail */
+#define PDC_POW_FAIL_PREPARE 0 /* prepare for powerfail */
+
+#define PDC_CHASSIS 2 /* PDC-chassis functions */
+#define PDC_CHASSIS_DISP 0 /* update chassis display */
+#define PDC_CHASSIS_WARN 1 /* return chassis warnings */
+#define PDC_CHASSIS_DISPWARN 2 /* update&return chassis status */
+#define PDC_RETURN_CHASSIS_INFO 128 /* HVERSION dependent: return chassis LED/LCD info */
+
+#define PDC_PIM 3 /* Get PIM data */
+#define PDC_PIM_HPMC 0 /* Transfer HPMC data */
+#define PDC_PIM_RETURN_SIZE 1 /* Get Max buffer needed for PIM*/
+#define PDC_PIM_LPMC 2 /* Transfer HPMC data */
+#define PDC_PIM_SOFT_BOOT 3 /* Transfer Soft Boot data */
+#define PDC_PIM_TOC 4 /* Transfer TOC data */
+
+#define PDC_MODEL 4 /* PDC model information call */
+#define PDC_MODEL_INFO 0 /* returns information */
+#define PDC_MODEL_BOOTID 1 /* set the BOOT_ID */
+#define PDC_MODEL_VERSIONS 2 /* returns cpu-internal versions*/
+#define PDC_MODEL_SYSMODEL 3 /* return system model info */
+#define PDC_MODEL_ENSPEC 4 /* enable specific option */
+#define PDC_MODEL_DISPEC 5 /* disable specific option */
+#define PDC_MODEL_CPU_ID 6 /* returns cpu-id (only newer machines!) */
+#define PDC_MODEL_CAPABILITIES 7 /* returns OS32/OS64-flags */
+/* Values for PDC_MODEL_CAPABILITIES non-equivalent virtual aliasing support */
+#define PDC_MODEL_OS64 (1 << 0)
+#define PDC_MODEL_OS32 (1 << 1)
+#define PDC_MODEL_IOPDIR_FDC (1 << 2)
+#define PDC_MODEL_NVA_MASK (3 << 4)
+#define PDC_MODEL_NVA_SUPPORTED (0 << 4)
+#define PDC_MODEL_NVA_SLOW (1 << 4)
+#define PDC_MODEL_NVA_UNSUPPORTED (3 << 4)
+#define PDC_MODEL_GET_BOOT__OP 8 /* returns boot test options */
+#define PDC_MODEL_SET_BOOT__OP 9 /* set boot test options */
+
+#define PA89_INSTRUCTION_SET 0x4 /* capatibilies returned */
+#define PA90_INSTRUCTION_SET 0x8
+
+#define PDC_CACHE 5 /* return/set cache (& TLB) info*/
+#define PDC_CACHE_INFO 0 /* returns information */
+#define PDC_CACHE_SET_COH 1 /* set coherence state */
+#define PDC_CACHE_RET_SPID 2 /* returns space-ID bits */
+
+#define PDC_HPA 6 /* return HPA of processor */
+#define PDC_HPA_PROCESSOR 0
+#define PDC_HPA_MODULES 1
+
+#define PDC_COPROC 7 /* Co-Processor (usually FP unit(s)) */
+#define PDC_COPROC_CFG 0 /* Co-Processor Cfg (FP unit(s) enabled?) */
+
+#define PDC_IODC 8 /* talk to IODC */
+#define PDC_IODC_READ 0 /* read IODC entry point */
+/* PDC_IODC_RI_ * INDEX parameter of PDC_IODC_READ */
+#define PDC_IODC_RI_DATA_BYTES 0 /* IODC Data Bytes */
+/* 1, 2 obsolete - HVERSION dependent*/
+#define PDC_IODC_RI_INIT 3 /* Initialize module */
+#define PDC_IODC_RI_IO 4 /* Module input/output */
+#define PDC_IODC_RI_SPA 5 /* Module input/output */
+#define PDC_IODC_RI_CONFIG 6 /* Module input/output */
+/* 7 obsolete - HVERSION dependent */
+#define PDC_IODC_RI_TEST 8 /* Module input/output */
+#define PDC_IODC_RI_TLB 9 /* Module input/output */
+#define PDC_IODC_NINIT 2 /* non-destructive init */
+#define PDC_IODC_DINIT 3 /* destructive init */
+#define PDC_IODC_MEMERR 4 /* check for memory errors */
+#define PDC_IODC_INDEX_DATA 0 /* get first 16 bytes from mod IODC */
+#define PDC_IODC_BUS_ERROR -4 /* bus error return value */
+#define PDC_IODC_INVALID_INDEX -5 /* invalid index return value */
+#define PDC_IODC_COUNT -6 /* count is too small */
+
+#define PDC_TOD 9 /* time-of-day clock (TOD) */
+#define PDC_TOD_READ 0 /* read TOD */
+#define PDC_TOD_WRITE 1 /* write TOD */
+
+
+#define PDC_STABLE 10 /* stable storage (sprockets) */
+#define PDC_STABLE_READ 0
+#define PDC_STABLE_WRITE 1
+#define PDC_STABLE_RETURN_SIZE 2
+#define PDC_STABLE_VERIFY_CONTENTS 3
+#define PDC_STABLE_INITIALIZE 4
+
+#define PDC_NVOLATILE 11 /* often not implemented */
+
+#define PDC_ADD_VALID 12 /* Memory validation PDC call */
+#define PDC_ADD_VALID_VERIFY 0 /* Make PDC_ADD_VALID verify region */
+
+#define PDC_INSTR 15 /* get instr to invoke PDCE_CHECK() */
+
+#define PDC_PROC 16 /* (sprockets) */
+
+#define PDC_CONFIG 16 /* (sprockets) */
+#define PDC_CONFIG_DECONFIG 0
+#define PDC_CONFIG_DRECONFIG 1
+#define PDC_CONFIG_DRETURN_CONFIG 2
+
+#define PDC_BLOCK_TLB 18 /* manage hardware block-TLB */
+#define PDC_BTLB_INFO 0 /* returns parameter */
+#define PDC_BTLB_INSERT 1 /* insert BTLB entry */
+#define PDC_BTLB_PURGE 2 /* purge BTLB entries */
+#define PDC_BTLB_PURGE_ALL 3 /* purge all BTLB entries */
+
+#define PDC_TLB 19 /* manage hardware TLB miss handling */
+#define PDC_TLB_INFO 0 /* returns parameter */
+#define PDC_TLB_SETUP 1 /* set up miss handling */
+
+#define PDC_MEM 20 /* Manage memory */
+#define PDC_MEM_MEMINFO 0
+#define PDC_MEM_ADD_PAGE 1
+#define PDC_MEM_CLEAR_PDT 2
+#define PDC_MEM_READ_PDT 3
+#define PDC_MEM_RESET_CLEAR 4
+#define PDC_MEM_GOODMEM 5
+#define PDC_MEM_TABLE 128 /* Non contig mem map (sprockets) */
+#define PDC_MEM_RETURN_ADDRESS_TABLE PDC_MEM_TABLE
+#define PDC_MEM_GET_MEMORY_SYSTEM_TABLES_SIZE 131
+#define PDC_MEM_GET_MEMORY_SYSTEM_TABLES 132
+#define PDC_MEM_GET_PHYSICAL_LOCATION_FROM_MEMORY_ADDRESS 133
+
+#define PDC_MEM_RET_SBE_REPLACED 5 /* PDC_MEM return values */
+#define PDC_MEM_RET_DUPLICATE_ENTRY 4
+#define PDC_MEM_RET_BUF_SIZE_SMALL 1
+#define PDC_MEM_RET_PDT_FULL -11
+#define PDC_MEM_RET_INVALID_PHYSICAL_LOCATION ~0ULL
+
+#define PDC_PSW 21 /* Get/Set default System Mask */
+#define PDC_PSW_MASK 0 /* Return mask */
+#define PDC_PSW_GET_DEFAULTS 1 /* Return defaults */
+#define PDC_PSW_SET_DEFAULTS 2 /* Set default */
+#define PDC_PSW_ENDIAN_BIT 1 /* set for big endian */
+#define PDC_PSW_WIDE_BIT 2 /* set for wide mode */
+
+#define PDC_SYSTEM_MAP 22 /* find system modules */
+#define PDC_FIND_MODULE 0
+#define PDC_FIND_ADDRESS 1
+#define PDC_TRANSLATE_PATH 2
+
+#define PDC_SOFT_POWER 23 /* soft power switch */
+#define PDC_SOFT_POWER_INFO 0 /* return info about the soft power switch */
+#define PDC_SOFT_POWER_ENABLE 1 /* enable/disable soft power switch */
+
+
+/* HVERSION dependent */
+
+/* The PDC_MEM_MAP calls */
+#define PDC_MEM_MAP 128 /* on s700: return page info */
+#define PDC_MEM_MAP_HPA 0 /* returns hpa of a module */
+
+#define PDC_EEPROM 129 /* EEPROM access */
+#define PDC_EEPROM_READ_WORD 0
+#define PDC_EEPROM_WRITE_WORD 1
+#define PDC_EEPROM_READ_BYTE 2
+#define PDC_EEPROM_WRITE_BYTE 3
+#define PDC_EEPROM_EEPROM_PASSWORD -1000
+
+#define PDC_NVM 130 /* NVM (non-volatile memory) access */
+#define PDC_NVM_READ_WORD 0
+#define PDC_NVM_WRITE_WORD 1
+#define PDC_NVM_READ_BYTE 2
+#define PDC_NVM_WRITE_BYTE 3
+
+#define PDC_SEED_ERROR 132 /* (sprockets) */
+
+#define PDC_IO 135 /* log error info, reset IO system */
+#define PDC_IO_READ_AND_CLEAR_ERRORS 0
+#define PDC_IO_RESET 1
+#define PDC_IO_RESET_DEVICES 2
+/* sets bits 6&7 (little endian) of the HcControl Register */
+#define PDC_IO_USB_SUSPEND 0xC000000000000000
+#define PDC_IO_EEPROM_IO_ERR_TABLE_FULL -5 /* return value */
+#define PDC_IO_NO_SUSPEND -6 /* return value */
+
+#define PDC_BROADCAST_RESET 136 /* reset all processors */
+#define PDC_DO_RESET 0 /* option: perform a broadcast reset */
+#define PDC_DO_FIRM_TEST_RESET 1 /* Do broadcast reset with bitmap */
+#define PDC_BR_RECONFIGURATION 2 /* reset w/reconfiguration */
+#define PDC_FIRM_TEST_MAGIC 0xab9ec36fUL /* for this reboot only */
+
+#define PDC_LAN_STATION_ID 138 /* Hversion dependent mechanism for */
+#define PDC_LAN_STATION_ID_READ 0 /* getting the lan station address */
+
+#define PDC_LAN_STATION_ID_SIZE 6
+
+#define PDC_CHECK_RANGES 139 /* (sprockets) */
+
+#define PDC_NV_SECTIONS 141 /* (sprockets) */
+
+#define PDC_PERFORMANCE 142 /* performance monitoring */
+
+#define PDC_SYSTEM_INFO 143 /* system information */
+#define PDC_SYSINFO_RETURN_INFO_SIZE 0
+#define PDC_SYSINFO_RRETURN_SYS_INFO 1
+#define PDC_SYSINFO_RRETURN_ERRORS 2
+#define PDC_SYSINFO_RRETURN_WARNINGS 3
+#define PDC_SYSINFO_RETURN_REVISIONS 4
+#define PDC_SYSINFO_RRETURN_DIAGNOSE 5
+#define PDC_SYSINFO_RRETURN_HV_DIAGNOSE 1005
+
+#define PDC_RDR 144 /* (sprockets) */
+#define PDC_RDR_READ_BUFFER 0
+#define PDC_RDR_READ_SINGLE 1
+#define PDC_RDR_WRITE_SINGLE 2
+
+#define PDC_INTRIGUE 145 /* (sprockets) */
+#define PDC_INTRIGUE_WRITE_BUFFER 0
+#define PDC_INTRIGUE_GET_SCRATCH_BUFSIZE 1
+#define PDC_INTRIGUE_START_CPU_COUNTERS 2
+#define PDC_INTRIGUE_STOP_CPU_COUNTERS 3
+
+#define PDC_STI 146 /* STI access */
+/* same as PDC_PCI_XXX values (see below) */
+
+/* Legacy PDC definitions for same stuff */
+#define PDC_PCI_INDEX 147
+#define PDC_PCI_INTERFACE_INFO 0
+#define PDC_PCI_SLOT_INFO 1
+#define PDC_PCI_INFLIGHT_BYTES 2
+#define PDC_PCI_READ_CONFIG 3
+#define PDC_PCI_WRITE_CONFIG 4
+#define PDC_PCI_READ_PCI_IO 5
+#define PDC_PCI_WRITE_PCI_IO 6
+#define PDC_PCI_READ_CONFIG_DELAY 7
+#define PDC_PCI_UPDATE_CONFIG_DELAY 8
+#define PDC_PCI_PCI_PATH_TO_PCI_HPA 9
+#define PDC_PCI_PCI_HPA_TO_PCI_PATH 10
+#define PDC_PCI_PCI_PATH_TO_PCI_BUS 11
+#define PDC_PCI_PCI_RESERVED 12
+#define PDC_PCI_PCI_INT_ROUTE_SIZE 13
+#define PDC_PCI_GET_INT_TBL_SIZE PDC_PCI_PCI_INT_ROUTE_SIZE
+#define PDC_PCI_PCI_INT_ROUTE 14
+#define PDC_PCI_GET_INT_TBL PDC_PCI_PCI_INT_ROUTE
+#define PDC_PCI_READ_MON_TYPE 15
+#define PDC_PCI_WRITE_MON_TYPE 16
+
+
+/* Get SCSI Interface Card info: SDTR, SCSI ID, mode (SE vs LVD) */
+#define PDC_INITIATOR 163
+#define PDC_GET_INITIATOR 0
+#define PDC_SET_INITIATOR 1
+#define PDC_DELETE_INITIATOR 2
+#define PDC_RETURN_TABLE_SIZE 3
+#define PDC_RETURN_TABLE 4
+
+#define PDC_LINK 165 /* (sprockets) */
+#define PDC_LINK_PCI_ENTRY_POINTS 0 /* list (Arg1) = 0 */
+#define PDC_LINK_USB_ENTRY_POINTS 1 /* list (Arg1) = 1 */
+
+/* cl_class
+ * page 3-33 of IO-Firmware ARS
+ * IODC ENTRY_INIT(Search first) RET[1]
+ */
+#define CL_NULL 0 /* invalid */
+#define CL_RANDOM 1 /* random access (as disk) */
+#define CL_SEQU 2 /* sequential access (as tape) */
+#define CL_DUPLEX 7 /* full-duplex point-to-point (RS-232, Net) */
+#define CL_KEYBD 8 /* half-duplex console (HIL Keyboard) */
+#define CL_DISPL 9 /* half-duplex console (display) */
+#define CL_FC 10 /* FiberChannel access media */
+
+/* IODC ENTRY_INIT() */
+#define ENTRY_INIT_SRCH_FRST 2
+#define ENTRY_INIT_SRCH_NEXT 3
+#define ENTRY_INIT_MOD_DEV 4
+#define ENTRY_INIT_DEV 5
+#define ENTRY_INIT_MOD 6
+#define ENTRY_INIT_MSG 9
+
+/* IODC ENTRY_IO() */
+#define ENTRY_IO_BOOTIN 0
+#define ENTRY_IO_BOOTOUT 1
+#define ENTRY_IO_CIN 2
+#define ENTRY_IO_COUT 3
+#define ENTRY_IO_CLOSE 4
+#define ENTRY_IO_GETMSG 9
+#define ENTRY_IO_BBLOCK_IN 16
+#define ENTRY_IO_BBLOCK_OUT 17
+
+/* IODC ENTRY_SPA() */
+
+/* IODC ENTRY_CONFIG() */
+
+/* IODC ENTRY_TEST() */
+
+/* IODC ENTRY_TLB() */
+
+/* constants for OS (NVM...) */
+#define OS_ID_NONE 0 /* Undefined OS ID */
+#define OS_ID_HPUX 1 /* HP-UX OS */
+#define OS_ID_MPEXL 2 /* MPE XL OS */
+#define OS_ID_OSF 3 /* OSF OS */
+#define OS_ID_HPRT 4 /* HP-RT OS */
+#define OS_ID_NOVEL 5 /* NOVELL OS */
+#define OS_ID_LINUX 6 /* Linux */
+
+
+/* constants for PDC_CHASSIS */
+#define OSTAT_OFF 0
+#define OSTAT_FLT 1
+#define OSTAT_TEST 2
+#define OSTAT_INIT 3
+#define OSTAT_SHUT 4
+#define OSTAT_WARN 5
+#define OSTAT_RUN 6
+#define OSTAT_ON 7
+
+/* Page Zero constant offsets used by the HPMC handler */
+#define BOOT_CONSOLE_HPA_OFFSET 0x3c0
+#define BOOT_CONSOLE_SPA_OFFSET 0x3c4
+#define BOOT_CONSOLE_PATH_OFFSET 0x3a8
+
+/* size of the pdc_result buffer for firmware.c */
+#define NUM_PDC_RESULT 32
+
+#if !defined(__ASSEMBLY__)
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+
+#include <asm/page.h> /* for __PAGE_OFFSET */
+
+extern int pdc_type;
+
+/* Values for pdc_type */
+#define PDC_TYPE_ILLEGAL -1
+#define PDC_TYPE_PAT 0 /* 64-bit PAT-PDC */
+#define PDC_TYPE_SYSTEM_MAP 1 /* 32-bit, but supports PDC_SYSTEM_MAP */
+#define PDC_TYPE_SNAKE 2 /* Doesn't support SYSTEM_MAP */
+
+struct pdc_chassis_info { /* for PDC_CHASSIS_INFO */
+ unsigned long actcnt; /* actual number of bytes returned */
+ unsigned long maxcnt; /* maximum number of bytes that could be returned */
+};
+
+struct pdc_coproc_cfg { /* for PDC_COPROC_CFG */
+ unsigned long ccr_functional;
+ unsigned long ccr_present;
+ unsigned long revision;
+ unsigned long model;
+};
+
+struct pdc_model { /* for PDC_MODEL */
+ unsigned long hversion;
+ unsigned long sversion;
+ unsigned long hw_id;
+ unsigned long boot_id;
+ unsigned long sw_id;
+ unsigned long sw_cap;
+ unsigned long arch_rev;
+ unsigned long pot_key;
+ unsigned long curr_key;
+};
+
+struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
+ unsigned long
+#ifdef CONFIG_64BIT
+ cc_padW:32,
+#endif
+ cc_alias: 4, /* alias boundaries for virtual addresses */
+ cc_block: 4, /* to determine most efficient stride */
+ cc_line : 3, /* maximum amount written back as a result of store (multiple of 16 bytes) */
+ cc_shift: 2, /* how much to shift cc_block left */
+ cc_wt : 1, /* 0 = WT-Dcache, 1 = WB-Dcache */
+ cc_sh : 2, /* 0 = separate I/D-cache, else shared I/D-cache */
+ cc_cst : 3, /* 0 = incoherent D-cache, 1=coherent D-cache */
+ cc_pad1 : 10, /* reserved */
+ cc_hv : 3; /* hversion dependent */
+};
+
+struct pdc_tlb_cf { /* for PDC_CACHE (I/D-TLB's) */
+ unsigned long tc_pad0:12, /* reserved */
+#ifdef CONFIG_64BIT
+ tc_padW:32,
+#endif
+ tc_sh : 2, /* 0 = separate I/D-TLB, else shared I/D-TLB */
+ tc_hv : 1, /* HV */
+ tc_page : 1, /* 0 = 2K page-size-machine, 1 = 4k page size */
+ tc_cst : 3, /* 0 = incoherent operations, else coherent operations */
+ tc_aid : 5, /* ITLB: width of access ids of processor (encoded!) */
+ tc_pad1 : 8; /* ITLB: width of space-registers (encoded) */
+};
+
+struct pdc_cache_info { /* main-PDC_CACHE-structure (caches & TLB's) */
+ /* I-cache */
+ unsigned long ic_size; /* size in bytes */
+ struct pdc_cache_cf ic_conf; /* configuration */
+ unsigned long ic_base; /* base-addr */
+ unsigned long ic_stride;
+ unsigned long ic_count;
+ unsigned long ic_loop;
+ /* D-cache */
+ unsigned long dc_size; /* size in bytes */
+ struct pdc_cache_cf dc_conf; /* configuration */
+ unsigned long dc_base; /* base-addr */
+ unsigned long dc_stride;
+ unsigned long dc_count;
+ unsigned long dc_loop;
+ /* Instruction-TLB */
+ unsigned long it_size; /* number of entries in I-TLB */
+ struct pdc_tlb_cf it_conf; /* I-TLB-configuration */
+ unsigned long it_sp_base;
+ unsigned long it_sp_stride;
+ unsigned long it_sp_count;
+ unsigned long it_off_base;
+ unsigned long it_off_stride;
+ unsigned long it_off_count;
+ unsigned long it_loop;
+ /* data-TLB */
+ unsigned long dt_size; /* number of entries in D-TLB */
+ struct pdc_tlb_cf dt_conf; /* D-TLB-configuration */
+ unsigned long dt_sp_base;
+ unsigned long dt_sp_stride;
+ unsigned long dt_sp_count;
+ unsigned long dt_off_base;
+ unsigned long dt_off_stride;
+ unsigned long dt_off_count;
+ unsigned long dt_loop;
+};
+
+#if 0
+/* If you start using the next struct, you'll have to adjust it to
+ * work with 64-bit firmware I think -PB
+ */
+struct pdc_iodc { /* PDC_IODC */
+ unsigned char hversion_model;
+ unsigned char hversion;
+ unsigned char spa;
+ unsigned char type;
+ unsigned int sversion_rev:4;
+ unsigned int sversion_model:19;
+ unsigned int sversion_opt:8;
+ unsigned char rev;
+ unsigned char dep;
+ unsigned char features;
+ unsigned char pad1;
+ unsigned int checksum:16;
+ unsigned int length:16;
+ unsigned int pad[15];
+} __attribute__((aligned(8))) ;
+#endif
+
+#ifndef CONFIG_PA20
+/* no BLTBs in pa2.0 processors */
+struct pdc_btlb_info_range {
+ __u8 res00;
+ __u8 num_i;
+ __u8 num_d;
+ __u8 num_comb;
+};
+
+struct pdc_btlb_info { /* PDC_BLOCK_TLB, return of PDC_BTLB_INFO */
+ unsigned int min_size; /* minimum size of BTLB in pages */
+ unsigned int max_size; /* maximum size of BTLB in pages */
+ struct pdc_btlb_info_range fixed_range_info;
+ struct pdc_btlb_info_range variable_range_info;
+};
+
+#endif /* !CONFIG_PA20 */
+
+#ifdef CONFIG_64BIT
+struct pdc_memory_table_raddr { /* PDC_MEM/PDC_MEM_TABLE (return info) */
+ unsigned long entries_returned;
+ unsigned long entries_total;
+};
+
+struct pdc_memory_table { /* PDC_MEM/PDC_MEM_TABLE (arguments) */
+ unsigned long paddr;
+ unsigned int pages;
+ unsigned int reserved;
+};
+#endif /* CONFIG_64BIT */
+
+struct pdc_system_map_mod_info { /* PDC_SYSTEM_MAP/FIND_MODULE */
+ unsigned long mod_addr;
+ unsigned long mod_pgs;
+ unsigned long add_addrs;
+};
+
+struct pdc_system_map_addr_info { /* PDC_SYSTEM_MAP/FIND_ADDRESS */
+ unsigned long mod_addr;
+ unsigned long mod_pgs;
+};
+
+struct pdc_initiator { /* PDC_INITIATOR */
+ int host_id;
+ int factor;
+ int width;
+ int mode;
+};
+
+struct hardware_path {
+ char flags; /* see bit definitions below */
+ char bc[6]; /* Bus Converter routing info to a specific */
+ /* I/O adaptor (< 0 means none, > 63 resvd) */
+ char mod; /* fixed field of specified module */
+};
+
+/*
+ * Device path specifications used by PDC.
+ */
+struct pdc_module_path {
+ struct hardware_path path;
+ unsigned int layers[6]; /* device-specific info (ctlr #, unit # ...) */
+};
+
+#ifndef CONFIG_PA20
+/* Only used on some pre-PA2.0 boxes */
+struct pdc_memory_map { /* PDC_MEMORY_MAP */
+ unsigned long hpa; /* mod's register set address */
+ unsigned long more_pgs; /* number of additional I/O pgs */
+};
+#endif
+
+struct pdc_tod {
+ unsigned long tod_sec;
+ unsigned long tod_usec;
+};
+
+/* architected results from PDC_PIM/transfer hpmc on a PA1.1 machine */
+
+struct pdc_hpmc_pim_11 { /* PDC_PIM */
+ __u32 gr[32];
+ __u32 cr[32];
+ __u32 sr[8];
+ __u32 iasq_back;
+ __u32 iaoq_back;
+ __u32 check_type;
+ __u32 cpu_state;
+ __u32 rsvd1;
+ __u32 cache_check;
+ __u32 tlb_check;
+ __u32 bus_check;
+ __u32 assists_check;
+ __u32 rsvd2;
+ __u32 assist_state;
+ __u32 responder_addr;
+ __u32 requestor_addr;
+ __u32 path_info;
+ __u64 fr[32];
+};
+
+/*
+ * architected results from PDC_PIM/transfer hpmc on a PA2.0 machine
+ *
+ * Note that PDC_PIM doesn't care whether or not wide mode was enabled
+ * so the results are different on PA1.1 vs. PA2.0 when in narrow mode.
+ *
+ * Note also that there are unarchitected results available, which
+ * are hversion dependent. Do a "ser pim 0 hpmc" after rebooting, since
+ * the firmware is probably the best way of printing hversion dependent
+ * data.
+ */
+
+struct pdc_hpmc_pim_20 { /* PDC_PIM */
+ __u64 gr[32];
+ __u64 cr[32];
+ __u64 sr[8];
+ __u64 iasq_back;
+ __u64 iaoq_back;
+ __u32 check_type;
+ __u32 cpu_state;
+ __u32 cache_check;
+ __u32 tlb_check;
+ __u32 bus_check;
+ __u32 assists_check;
+ __u32 assist_state;
+ __u32 path_info;
+ __u64 responder_addr;
+ __u64 requestor_addr;
+ __u64 fr[32];
+};
+
+void pdc_console_init(void); /* in pdc_console.c */
+void pdc_console_restart(void);
+
+void setup_pdc(void); /* in inventory.c */
+
+/* wrapper-functions from pdc.c */
+
+int pdc_add_valid(unsigned long address);
+int pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len);
+int pdc_chassis_disp(unsigned long disp);
+int pdc_chassis_warn(unsigned long *warn);
+int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info);
+int pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info);
+int pdc_iodc_read(unsigned long *actcnt, unsigned long hpa, unsigned int index,
+ void *iodc_data, unsigned int iodc_data_size);
+int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info,
+ struct pdc_module_path *mod_path, long mod_index);
+int pdc_system_map_find_addrs(struct pdc_system_map_addr_info *pdc_addr_info,
+ long mod_index, long addr_index);
+int pdc_model_info(struct pdc_model *model);
+int pdc_model_sysmodel(char *name);
+int pdc_model_cpuid(unsigned long *cpu_id);
+int pdc_model_versions(unsigned long *versions, int id);
+int pdc_model_capabilities(unsigned long *capabilities);
+int pdc_cache_info(struct pdc_cache_info *cache);
+int pdc_spaceid_bits(unsigned long *space_bits);
+#ifndef CONFIG_PA20
+int pdc_btlb_info(struct pdc_btlb_info *btlb);
+int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path);
+#endif /* !CONFIG_PA20 */
+int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa);
+
+int pdc_stable_read(unsigned long staddr, void *memaddr, unsigned long count);
+int pdc_stable_write(unsigned long staddr, void *memaddr, unsigned long count);
+int pdc_stable_get_size(unsigned long *size);
+int pdc_stable_verify_contents(void);
+int pdc_stable_initialize(void);
+
+int pdc_pci_irt_size(unsigned long *num_entries, unsigned long hpa);
+int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl);
+
+int pdc_get_initiator(struct hardware_path *, struct pdc_initiator *);
+int pdc_tod_read(struct pdc_tod *tod);
+int pdc_tod_set(unsigned long sec, unsigned long usec);
+
+#ifdef CONFIG_64BIT
+int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr,
+ struct pdc_memory_table *tbl, unsigned long entries);
+#endif
+
+void set_firmware_width(void);
+void set_firmware_width_unlocked(void);
+int pdc_do_firm_test_reset(unsigned long ftc_bitmap);
+int pdc_do_reset(void);
+int pdc_soft_power_info(unsigned long *power_reg);
+int pdc_soft_power_button(int sw_control);
+void pdc_io_reset(void);
+void pdc_io_reset_devices(void);
+int pdc_iodc_getc(void);
+int pdc_iodc_print(const unsigned char *str, unsigned count);
+
+void pdc_emergency_unlock(void);
+int pdc_sti_call(unsigned long func, unsigned long flags,
+ unsigned long inptr, unsigned long outputr,
+ unsigned long glob_cfg);
+
+static inline char * os_id_to_string(u16 os_id) {
+ switch(os_id) {
+ case OS_ID_NONE: return "No OS";
+ case OS_ID_HPUX: return "HP-UX";
+ case OS_ID_MPEXL: return "MPE-iX";
+ case OS_ID_OSF: return "OSF";
+ case OS_ID_HPRT: return "HP-RT";
+ case OS_ID_NOVEL: return "Novell Netware";
+ case OS_ID_LINUX: return "Linux";
+ default: return "Unknown";
+ }
+}
+
+#endif /* __KERNEL__ */
+
+#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
+
+/* DEFINITION OF THE ZERO-PAGE (PAG0) */
+/* based on work by Jason Eckhardt (jason@equator.com) */
+
+/* flags of the device_path */
+#define PF_AUTOBOOT 0x80
+#define PF_AUTOSEARCH 0x40
+#define PF_TIMER 0x0F
+
+struct device_path { /* page 1-69 */
+ unsigned char flags; /* flags see above! */
+ unsigned char bc[6]; /* bus converter routing info */
+ unsigned char mod;
+ unsigned int layers[6];/* device-specific layer-info */
+} __attribute__((aligned(8))) ;
+
+struct pz_device {
+ struct device_path dp; /* see above */
+ /* struct iomod *hpa; */
+ unsigned int hpa; /* HPA base address */
+ /* char *spa; */
+ unsigned int spa; /* SPA base address */
+ /* int (*iodc_io)(struct iomod*, ...); */
+ unsigned int iodc_io; /* device entry point */
+ short pad; /* reserved */
+ unsigned short cl_class;/* see below */
+} __attribute__((aligned(8))) ;
+
+struct zeropage {
+ /* [0x000] initialize vectors (VEC) */
+ unsigned int vec_special; /* must be zero */
+ /* int (*vec_pow_fail)(void);*/
+ unsigned int vec_pow_fail; /* power failure handler */
+ /* int (*vec_toc)(void); */
+ unsigned int vec_toc;
+ unsigned int vec_toclen;
+ /* int (*vec_rendz)(void); */
+ unsigned int vec_rendz;
+ int vec_pow_fail_flen;
+ int vec_pad[10];
+
+ /* [0x040] reserved processor dependent */
+ int pad0[112];
+
+ /* [0x200] reserved */
+ int pad1[84];
+
+ /* [0x350] memory configuration (MC) */
+ int memc_cont; /* contiguous mem size (bytes) */
+ int memc_phsize; /* physical memory size */
+ int memc_adsize; /* additional mem size, bytes of SPA space used by PDC */
+ unsigned int mem_pdc_hi; /* used for 64-bit */
+
+ /* [0x360] various parameters for the boot-CPU */
+ /* unsigned int *mem_booterr[8]; */
+ unsigned int mem_booterr[8]; /* ptr to boot errors */
+ unsigned int mem_free; /* first location, where OS can be loaded */
+ /* struct iomod *mem_hpa; */
+ unsigned int mem_hpa; /* HPA of the boot-CPU */
+ /* int (*mem_pdc)(int, ...); */
+ unsigned int mem_pdc; /* PDC entry point */
+ unsigned int mem_10msec; /* number of clock ticks in 10msec */
+
+ /* [0x390] initial memory module (IMM) */
+ /* struct iomod *imm_hpa; */
+ unsigned int imm_hpa; /* HPA of the IMM */
+ int imm_soft_boot; /* 0 = was hard boot, 1 = was soft boot */
+ unsigned int imm_spa_size; /* SPA size of the IMM in bytes */
+ unsigned int imm_max_mem; /* bytes of mem in IMM */
+
+ /* [0x3A0] boot console, display device and keyboard */
+ struct pz_device mem_cons; /* description of console device */
+ struct pz_device mem_boot; /* description of boot device */
+ struct pz_device mem_kbd; /* description of keyboard device */
+
+ /* [0x430] reserved */
+ int pad430[116];
+
+ /* [0x600] processor dependent */
+ __u32 pad600[1];
+ __u32 proc_sti; /* pointer to STI ROM */
+ __u32 pad608[126];
+};
+
+#endif /* !defined(__ASSEMBLY__) */
+
+#endif /* _PARISC_PDC_H */
diff --git a/arch/parisc/include/asm/pdc_chassis.h b/arch/parisc/include/asm/pdc_chassis.h
new file mode 100644
index 00000000..a609273d
--- /dev/null
+++ b/arch/parisc/include/asm/pdc_chassis.h
@@ -0,0 +1,381 @@
+/*
+ * include/asm-parisc/pdc_chassis.h
+ *
+ * Copyright (C) 2002 Laurent Canet <canetl@esiee.fr>
+ * Copyright (C) 2002 Thibaut Varene <varenet@parisc-linux.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * TODO: - handle processor number on SMP systems (Reporting Entity ID)
+ * - handle message ID
+ * - handle timestamps
+ */
+
+
+#ifndef _PARISC_PDC_CHASSIS_H
+#define _PARISC_PDC_CHASSIS_H
+
+/*
+ * ----------
+ * Prototypes
+ * ----------
+ */
+
+int pdc_chassis_send_status(int message);
+void parisc_pdc_chassis_init(void);
+
+
+/*
+ * -----------------
+ * Direct call names
+ * -----------------
+ * They setup everything for you, the Log message and the corresponding LED state
+ */
+
+#define PDC_CHASSIS_DIRECT_BSTART 0
+#define PDC_CHASSIS_DIRECT_BCOMPLETE 1
+#define PDC_CHASSIS_DIRECT_SHUTDOWN 2
+#define PDC_CHASSIS_DIRECT_PANIC 3
+#define PDC_CHASSIS_DIRECT_HPMC 4
+#define PDC_CHASSIS_DIRECT_LPMC 5
+#define PDC_CHASSIS_DIRECT_DUMP 6 /* not yet implemented */
+#define PDC_CHASSIS_DIRECT_OOPS 7 /* not yet implemented */
+
+
+/*
+ * ------------
+ * LEDs control
+ * ------------
+ * Set the three LEDs -- Run, Attn, and Fault.
+ */
+
+/* Old PDC LED control */
+#define PDC_CHASSIS_DISP_DATA(v) ((unsigned long)(v) << 17)
+
+/*
+ * Available PDC PAT LED states
+ */
+
+#define PDC_CHASSIS_LED_RUN_OFF (0ULL << 4)
+#define PDC_CHASSIS_LED_RUN_FLASH (1ULL << 4)
+#define PDC_CHASSIS_LED_RUN_ON (2ULL << 4)
+#define PDC_CHASSIS_LED_RUN_NC (3ULL << 4)
+#define PDC_CHASSIS_LED_ATTN_OFF (0ULL << 6)
+#define PDC_CHASSIS_LED_ATTN_FLASH (1ULL << 6)
+#define PDC_CHASSIS_LED_ATTN_NC (3ULL << 6) /* ATTN ON is invalid */
+#define PDC_CHASSIS_LED_FAULT_OFF (0ULL << 8)
+#define PDC_CHASSIS_LED_FAULT_FLASH (1ULL << 8)
+#define PDC_CHASSIS_LED_FAULT_ON (2ULL << 8)
+#define PDC_CHASSIS_LED_FAULT_NC (3ULL << 8)
+#define PDC_CHASSIS_LED_VALID (1ULL << 10)
+
+/*
+ * Valid PDC PAT LED states combinations
+ */
+
+/* System running normally */
+#define PDC_CHASSIS_LSTATE_RUN_NORMAL (PDC_CHASSIS_LED_RUN_ON | \
+ PDC_CHASSIS_LED_ATTN_OFF | \
+ PDC_CHASSIS_LED_FAULT_OFF | \
+ PDC_CHASSIS_LED_VALID )
+/* System crashed and rebooted itself successfully */
+#define PDC_CHASSIS_LSTATE_RUN_CRASHREC (PDC_CHASSIS_LED_RUN_ON | \
+ PDC_CHASSIS_LED_ATTN_OFF | \
+ PDC_CHASSIS_LED_FAULT_FLASH | \
+ PDC_CHASSIS_LED_VALID )
+/* There was a system interruption that did not take the system down */
+#define PDC_CHASSIS_LSTATE_RUN_SYSINT (PDC_CHASSIS_LED_RUN_ON | \
+ PDC_CHASSIS_LED_ATTN_FLASH | \
+ PDC_CHASSIS_LED_FAULT_OFF | \
+ PDC_CHASSIS_LED_VALID )
+/* System running and unexpected reboot or non-critical error detected */
+#define PDC_CHASSIS_LSTATE_RUN_NCRIT (PDC_CHASSIS_LED_RUN_ON | \
+ PDC_CHASSIS_LED_ATTN_FLASH | \
+ PDC_CHASSIS_LED_FAULT_FLASH | \
+ PDC_CHASSIS_LED_VALID )
+/* Executing non-OS code */
+#define PDC_CHASSIS_LSTATE_NONOS (PDC_CHASSIS_LED_RUN_FLASH | \
+ PDC_CHASSIS_LED_ATTN_OFF | \
+ PDC_CHASSIS_LED_FAULT_OFF | \
+ PDC_CHASSIS_LED_VALID )
+/* Boot failed - Executing non-OS code */
+#define PDC_CHASSIS_LSTATE_NONOS_BFAIL (PDC_CHASSIS_LED_RUN_FLASH | \
+ PDC_CHASSIS_LED_ATTN_OFF | \
+ PDC_CHASSIS_LED_FAULT_ON | \
+ PDC_CHASSIS_LED_VALID )
+/* Unexpected reboot occurred - Executing non-OS code */
+#define PDC_CHASSIS_LSTATE_NONOS_UNEXP (PDC_CHASSIS_LED_RUN_FLASH | \
+ PDC_CHASSIS_LED_ATTN_OFF | \
+ PDC_CHASSIS_LED_FAULT_FLASH | \
+ PDC_CHASSIS_LED_VALID )
+/* Executing non-OS code - Non-critical error detected */
+#define PDC_CHASSIS_LSTATE_NONOS_NCRIT (PDC_CHASSIS_LED_RUN_FLASH | \
+ PDC_CHASSIS_LED_ATTN_FLASH | \
+ PDC_CHASSIS_LED_FAULT_OFF | \
+ PDC_CHASSIS_LED_VALID )
+/* Boot failed - Executing non-OS code - Non-critical error detected */
+#define PDC_CHASSIS_LSTATE_BFAIL_NCRIT (PDC_CHASSIS_LED_RUN_FLASH | \
+ PDC_CHASSIS_LED_ATTN_FLASH | \
+ PDC_CHASSIS_LED_FAULT_ON | \
+ PDC_CHASSIS_LED_VALID )
+/* Unexpected reboot/recovering - Executing non-OS code - Non-critical error detected */
+#define PDC_CHASSIS_LSTATE_UNEXP_NCRIT (PDC_CHASSIS_LED_RUN_FLASH | \
+ PDC_CHASSIS_LED_ATTN_FLASH | \
+ PDC_CHASSIS_LED_FAULT_FLASH | \
+ PDC_CHASSIS_LED_VALID )
+/* Cannot execute PDC */
+#define PDC_CHASSIS_LSTATE_CANNOT_PDC (PDC_CHASSIS_LED_RUN_OFF | \
+ PDC_CHASSIS_LED_ATTN_OFF | \
+ PDC_CHASSIS_LED_FAULT_OFF | \
+ PDC_CHASSIS_LED_VALID )
+/* Boot failed - OS not up - PDC has detected a failure that prevents boot */
+#define PDC_CHASSIS_LSTATE_FATAL_BFAIL (PDC_CHASSIS_LED_RUN_OFF | \
+ PDC_CHASSIS_LED_ATTN_OFF | \
+ PDC_CHASSIS_LED_FAULT_ON | \
+ PDC_CHASSIS_LED_VALID )
+/* No code running - Non-critical error detected (double fault situation) */
+#define PDC_CHASSIS_LSTATE_NOCODE_NCRIT (PDC_CHASSIS_LED_RUN_OFF | \
+ PDC_CHASSIS_LED_ATTN_FLASH | \
+ PDC_CHASSIS_LED_FAULT_OFF | \
+ PDC_CHASSIS_LED_VALID )
+/* Boot failed - OS not up - Fatal failure detected - Non-critical error detected */
+#define PDC_CHASSIS_LSTATE_FATAL_NCRIT (PDC_CHASSIS_LED_RUN_OFF | \
+ PDC_CHASSIS_LED_ATTN_FLASH | \
+ PDC_CHASSIS_LED_FAULT_ON | \
+ PDC_CHASSIS_LED_VALID )
+/* All other states are invalid */
+
+
+/*
+ * --------------
+ * PDC Log events
+ * --------------
+ * Here follows bits needed to fill up the log event sent to PDC_CHASSIS
+ * The log message contains: Alert level, Source, Source detail,
+ * Source ID, Problem detail, Caller activity, Activity status,
+ * Caller subactivity, Reporting entity type, Reporting entity ID,
+ * Data type, Unique message ID and EOM.
+ */
+
+/* Alert level */
+#define PDC_CHASSIS_ALERT_FORWARD (0ULL << 36) /* no failure detected */
+#define PDC_CHASSIS_ALERT_SERPROC (1ULL << 36) /* service proc - no failure */
+#define PDC_CHASSIS_ALERT_NURGENT (2ULL << 36) /* non-urgent operator attn */
+#define PDC_CHASSIS_ALERT_BLOCKED (3ULL << 36) /* system blocked */
+#define PDC_CHASSIS_ALERT_CONF_CHG (4ULL << 36) /* unexpected configuration change */
+#define PDC_CHASSIS_ALERT_ENV_PB (5ULL << 36) /* boot possible, environmental pb */
+#define PDC_CHASSIS_ALERT_PENDING (6ULL << 36) /* boot possible, pending failure */
+#define PDC_CHASSIS_ALERT_PERF_IMP (8ULL << 36) /* boot possible, performance impaired */
+#define PDC_CHASSIS_ALERT_FUNC_IMP (10ULL << 36) /* boot possible, functionality impaired */
+#define PDC_CHASSIS_ALERT_SOFT_FAIL (12ULL << 36) /* software failure */
+#define PDC_CHASSIS_ALERT_HANG (13ULL << 36) /* system hang */
+#define PDC_CHASSIS_ALERT_ENV_FATAL (14ULL << 36) /* fatal power or environmental pb */
+#define PDC_CHASSIS_ALERT_HW_FATAL (15ULL << 36) /* fatal hardware problem */
+
+/* Source */
+#define PDC_CHASSIS_SRC_NONE (0ULL << 28) /* unknown, no source stated */
+#define PDC_CHASSIS_SRC_PROC (1ULL << 28) /* processor */
+/* For later use ? */
+#define PDC_CHASSIS_SRC_PROC_CACHE (2ULL << 28) /* processor cache*/
+#define PDC_CHASSIS_SRC_PDH (3ULL << 28) /* processor dependent hardware */
+#define PDC_CHASSIS_SRC_PWR (4ULL << 28) /* power */
+#define PDC_CHASSIS_SRC_FAB (5ULL << 28) /* fabric connector */
+#define PDC_CHASSIS_SRC_PLATi (6ULL << 28) /* platform */
+#define PDC_CHASSIS_SRC_MEM (7ULL << 28) /* memory */
+#define PDC_CHASSIS_SRC_IO (8ULL << 28) /* I/O */
+#define PDC_CHASSIS_SRC_CELL (9ULL << 28) /* cell */
+#define PDC_CHASSIS_SRC_PD (10ULL << 28) /* protected domain */
+
+/* Source detail field */
+#define PDC_CHASSIS_SRC_D_PROC (1ULL << 24) /* processor general */
+
+/* Source ID - platform dependent */
+#define PDC_CHASSIS_SRC_ID_UNSPEC (0ULL << 16)
+
+/* Problem detail - problem source dependent */
+#define PDC_CHASSIS_PB_D_PROC_NONE (0ULL << 32) /* no problem detail */
+#define PDC_CHASSIS_PB_D_PROC_TIMEOUT (4ULL << 32) /* timeout */
+
+/* Caller activity */
+#define PDC_CHASSIS_CALL_ACT_HPUX_BL (7ULL << 12) /* Boot Loader */
+#define PDC_CHASSIS_CALL_ACT_HPUX_PD (8ULL << 12) /* SAL_PD activities */
+#define PDC_CHASSIS_CALL_ACT_HPUX_EVENT (9ULL << 12) /* SAL_EVENTS activities */
+#define PDC_CHASSIS_CALL_ACT_HPUX_IO (10ULL << 12) /* SAL_IO activities */
+#define PDC_CHASSIS_CALL_ACT_HPUX_PANIC (11ULL << 12) /* System panic */
+#define PDC_CHASSIS_CALL_ACT_HPUX_INIT (12ULL << 12) /* System initialization */
+#define PDC_CHASSIS_CALL_ACT_HPUX_SHUT (13ULL << 12) /* System shutdown */
+#define PDC_CHASSIS_CALL_ACT_HPUX_WARN (14ULL << 12) /* System warning */
+#define PDC_CHASSIS_CALL_ACT_HPUX_DU (15ULL << 12) /* Display_Activity() update */
+
+/* Activity status - implementation dependent */
+#define PDC_CHASSIS_ACT_STATUS_UNSPEC (0ULL << 0)
+
+/* Caller subactivity - implementation dependent */
+/* FIXME: other subactivities ? */
+#define PDC_CHASSIS_CALL_SACT_UNSPEC (0ULL << 4) /* implementation dependent */
+
+/* Reporting entity type */
+#define PDC_CHASSIS_RET_GENERICOS (12ULL << 52) /* generic OSes */
+#define PDC_CHASSIS_RET_IA64_NT (13ULL << 52) /* IA-64 NT */
+#define PDC_CHASSIS_RET_HPUX (14ULL << 52) /* HP-UX */
+#define PDC_CHASSIS_RET_DIAG (15ULL << 52) /* offline diagnostics & utilities */
+
+/* Reporting entity ID */
+#define PDC_CHASSIS_REID_UNSPEC (0ULL << 44)
+
+/* Data type */
+#define PDC_CHASSIS_DT_NONE (0ULL << 59) /* data field unused */
+/* For later use ? Do we need these ? */
+#define PDC_CHASSIS_DT_PHYS_ADDR (1ULL << 59) /* physical address */
+#define PDC_CHASSIS_DT_DATA_EXPECT (2ULL << 59) /* expected data */
+#define PDC_CHASSIS_DT_ACTUAL (3ULL << 59) /* actual data */
+#define PDC_CHASSIS_DT_PHYS_LOC (4ULL << 59) /* physical location */
+#define PDC_CHASSIS_DT_PHYS_LOC_EXT (5ULL << 59) /* physical location extension */
+#define PDC_CHASSIS_DT_TAG (6ULL << 59) /* tag */
+#define PDC_CHASSIS_DT_SYNDROME (7ULL << 59) /* syndrome */
+#define PDC_CHASSIS_DT_CODE_ADDR (8ULL << 59) /* code address */
+#define PDC_CHASSIS_DT_ASCII_MSG (9ULL << 59) /* ascii message */
+#define PDC_CHASSIS_DT_POST (10ULL << 59) /* POST code */
+#define PDC_CHASSIS_DT_TIMESTAMP (11ULL << 59) /* timestamp */
+#define PDC_CHASSIS_DT_DEV_STAT (12ULL << 59) /* device status */
+#define PDC_CHASSIS_DT_DEV_TYPE (13ULL << 59) /* device type */
+#define PDC_CHASSIS_DT_PB_DET (14ULL << 59) /* problem detail */
+#define PDC_CHASSIS_DT_ACT_LEV (15ULL << 59) /* activity level/timeout */
+#define PDC_CHASSIS_DT_SER_NUM (16ULL << 59) /* serial number */
+#define PDC_CHASSIS_DT_REV_NUM (17ULL << 59) /* revision number */
+#define PDC_CHASSIS_DT_INTERRUPT (18ULL << 59) /* interruption information */
+#define PDC_CHASSIS_DT_TEST_NUM (19ULL << 59) /* test number */
+#define PDC_CHASSIS_DT_STATE_CHG (20ULL << 59) /* major changes in system state */
+#define PDC_CHASSIS_DT_PROC_DEALLOC (21ULL << 59) /* processor deallocate */
+#define PDC_CHASSIS_DT_RESET (30ULL << 59) /* reset type and cause */
+#define PDC_CHASSIS_DT_PA_LEGACY (31ULL << 59) /* legacy PA hex chassis code */
+
+/* System states - part of major changes in system state data field */
+#define PDC_CHASSIS_SYSTATE_BSTART (0ULL << 0) /* boot start */
+#define PDC_CHASSIS_SYSTATE_BCOMP (1ULL << 0) /* boot complete */
+#define PDC_CHASSIS_SYSTATE_CHANGE (2ULL << 0) /* major change */
+#define PDC_CHASSIS_SYSTATE_LED (3ULL << 0) /* LED change */
+#define PDC_CHASSIS_SYSTATE_PANIC (9ULL << 0) /* OS Panic */
+#define PDC_CHASSIS_SYSTATE_DUMP (10ULL << 0) /* memory dump */
+#define PDC_CHASSIS_SYSTATE_HPMC (11ULL << 0) /* processing HPMC */
+#define PDC_CHASSIS_SYSTATE_HALT (15ULL << 0) /* system halted */
+
+/* Message ID */
+#define PDC_CHASSIS_MSG_ID (0ULL << 40) /* we do not handle msg IDs atm */
+
+/* EOM - separates log entries */
+#define PDC_CHASSIS_EOM_CLEAR (0ULL << 43)
+#define PDC_CHASSIS_EOM_SET (1ULL << 43)
+
+/*
+ * Preformated well known messages
+ */
+
+/* Boot started */
+#define PDC_CHASSIS_PMSG_BSTART (PDC_CHASSIS_ALERT_SERPROC | \
+ PDC_CHASSIS_SRC_PROC | \
+ PDC_CHASSIS_SRC_D_PROC | \
+ PDC_CHASSIS_SRC_ID_UNSPEC | \
+ PDC_CHASSIS_PB_D_PROC_NONE | \
+ PDC_CHASSIS_CALL_ACT_HPUX_INIT | \
+ PDC_CHASSIS_ACT_STATUS_UNSPEC | \
+ PDC_CHASSIS_CALL_SACT_UNSPEC | \
+ PDC_CHASSIS_RET_HPUX | \
+ PDC_CHASSIS_REID_UNSPEC | \
+ PDC_CHASSIS_DT_STATE_CHG | \
+ PDC_CHASSIS_SYSTATE_BSTART | \
+ PDC_CHASSIS_MSG_ID | \
+ PDC_CHASSIS_EOM_SET )
+
+/* Boot complete */
+#define PDC_CHASSIS_PMSG_BCOMPLETE (PDC_CHASSIS_ALERT_SERPROC | \
+ PDC_CHASSIS_SRC_PROC | \
+ PDC_CHASSIS_SRC_D_PROC | \
+ PDC_CHASSIS_SRC_ID_UNSPEC | \
+ PDC_CHASSIS_PB_D_PROC_NONE | \
+ PDC_CHASSIS_CALL_ACT_HPUX_INIT | \
+ PDC_CHASSIS_ACT_STATUS_UNSPEC | \
+ PDC_CHASSIS_CALL_SACT_UNSPEC | \
+ PDC_CHASSIS_RET_HPUX | \
+ PDC_CHASSIS_REID_UNSPEC | \
+ PDC_CHASSIS_DT_STATE_CHG | \
+ PDC_CHASSIS_SYSTATE_BCOMP | \
+ PDC_CHASSIS_MSG_ID | \
+ PDC_CHASSIS_EOM_SET )
+
+/* Shutdown */
+#define PDC_CHASSIS_PMSG_SHUTDOWN (PDC_CHASSIS_ALERT_SERPROC | \
+ PDC_CHASSIS_SRC_PROC | \
+ PDC_CHASSIS_SRC_D_PROC | \
+ PDC_CHASSIS_SRC_ID_UNSPEC | \
+ PDC_CHASSIS_PB_D_PROC_NONE | \
+ PDC_CHASSIS_CALL_ACT_HPUX_SHUT | \
+ PDC_CHASSIS_ACT_STATUS_UNSPEC | \
+ PDC_CHASSIS_CALL_SACT_UNSPEC | \
+ PDC_CHASSIS_RET_HPUX | \
+ PDC_CHASSIS_REID_UNSPEC | \
+ PDC_CHASSIS_DT_STATE_CHG | \
+ PDC_CHASSIS_SYSTATE_HALT | \
+ PDC_CHASSIS_MSG_ID | \
+ PDC_CHASSIS_EOM_SET )
+
+/* Panic */
+#define PDC_CHASSIS_PMSG_PANIC (PDC_CHASSIS_ALERT_SOFT_FAIL | \
+ PDC_CHASSIS_SRC_PROC | \
+ PDC_CHASSIS_SRC_D_PROC | \
+ PDC_CHASSIS_SRC_ID_UNSPEC | \
+ PDC_CHASSIS_PB_D_PROC_NONE | \
+ PDC_CHASSIS_CALL_ACT_HPUX_PANIC| \
+ PDC_CHASSIS_ACT_STATUS_UNSPEC | \
+ PDC_CHASSIS_CALL_SACT_UNSPEC | \
+ PDC_CHASSIS_RET_HPUX | \
+ PDC_CHASSIS_REID_UNSPEC | \
+ PDC_CHASSIS_DT_STATE_CHG | \
+ PDC_CHASSIS_SYSTATE_PANIC | \
+ PDC_CHASSIS_MSG_ID | \
+ PDC_CHASSIS_EOM_SET )
+
+// FIXME: extrapolated data
+/* HPMC */
+#define PDC_CHASSIS_PMSG_HPMC (PDC_CHASSIS_ALERT_CONF_CHG /*?*/ | \
+ PDC_CHASSIS_SRC_PROC | \
+ PDC_CHASSIS_SRC_D_PROC | \
+ PDC_CHASSIS_SRC_ID_UNSPEC | \
+ PDC_CHASSIS_PB_D_PROC_NONE | \
+ PDC_CHASSIS_CALL_ACT_HPUX_WARN | \
+ PDC_CHASSIS_RET_HPUX | \
+ PDC_CHASSIS_DT_STATE_CHG | \
+ PDC_CHASSIS_SYSTATE_HPMC | \
+ PDC_CHASSIS_MSG_ID | \
+ PDC_CHASSIS_EOM_SET )
+
+/* LPMC */
+#define PDC_CHASSIS_PMSG_LPMC (PDC_CHASSIS_ALERT_BLOCKED /*?*/| \
+ PDC_CHASSIS_SRC_PROC | \
+ PDC_CHASSIS_SRC_D_PROC | \
+ PDC_CHASSIS_SRC_ID_UNSPEC | \
+ PDC_CHASSIS_PB_D_PROC_NONE | \
+ PDC_CHASSIS_CALL_ACT_HPUX_WARN | \
+ PDC_CHASSIS_ACT_STATUS_UNSPEC | \
+ PDC_CHASSIS_CALL_SACT_UNSPEC | \
+ PDC_CHASSIS_RET_HPUX | \
+ PDC_CHASSIS_REID_UNSPEC | \
+ PDC_CHASSIS_DT_STATE_CHG | \
+ PDC_CHASSIS_SYSTATE_CHANGE | \
+ PDC_CHASSIS_MSG_ID | \
+ PDC_CHASSIS_EOM_SET )
+
+#endif /* _PARISC_PDC_CHASSIS_H */
+/* vim: set ts=8 */
diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h
new file mode 100644
index 00000000..47539f11
--- /dev/null
+++ b/arch/parisc/include/asm/pdcpat.h
@@ -0,0 +1,308 @@
+#ifndef __PARISC_PATPDC_H
+#define __PARISC_PATPDC_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright 2000 (c) Hewlett Packard (Paul Bame <bame()spam.parisc-linux.org>)
+ * Copyright 2000,2004 (c) Grant Grundler <grundler()nahspam.parisc-linux.org>
+ */
+
+
+#define PDC_PAT_CELL 64L /* Interface for gaining and
+ * manipulatin g cell state within PD */
+#define PDC_PAT_CELL_GET_NUMBER 0L /* Return Cell number */
+#define PDC_PAT_CELL_GET_INFO 1L /* Returns info about Cell */
+#define PDC_PAT_CELL_MODULE 2L /* Returns info about Module */
+#define PDC_PAT_CELL_SET_ATTENTION 9L /* Set Cell Attention indicator */
+#define PDC_PAT_CELL_NUMBER_TO_LOC 10L /* Cell Number -> Location */
+#define PDC_PAT_CELL_WALK_FABRIC 11L /* Walk the Fabric */
+#define PDC_PAT_CELL_GET_RDT_SIZE 12L /* Return Route Distance Table Sizes */
+#define PDC_PAT_CELL_GET_RDT 13L /* Return Route Distance Tables */
+#define PDC_PAT_CELL_GET_LOCAL_PDH_SZ 14L /* Read Local PDH Buffer Size */
+#define PDC_PAT_CELL_SET_LOCAL_PDH 15L /* Write Local PDH Buffer */
+#define PDC_PAT_CELL_GET_REMOTE_PDH_SZ 16L /* Return Remote PDH Buffer Size */
+#define PDC_PAT_CELL_GET_REMOTE_PDH 17L /* Read Remote PDH Buffer */
+#define PDC_PAT_CELL_GET_DBG_INFO 128L /* Return DBG Buffer Info */
+#define PDC_PAT_CELL_CHANGE_ALIAS 129L /* Change Non-Equivalent Alias Chacking */
+
+
+/*
+** Arg to PDC_PAT_CELL_MODULE memaddr[4]
+**
+** Addresses on the Merced Bus != all Runway Bus addresses.
+** This is intended for programming SBA/LBA chips range registers.
+*/
+#define IO_VIEW 0UL
+#define PA_VIEW 1UL
+
+/* PDC_PAT_CELL_MODULE entity type values */
+#define PAT_ENTITY_CA 0 /* central agent */
+#define PAT_ENTITY_PROC 1 /* processor */
+#define PAT_ENTITY_MEM 2 /* memory controller */
+#define PAT_ENTITY_SBA 3 /* system bus adapter */
+#define PAT_ENTITY_LBA 4 /* local bus adapter */
+#define PAT_ENTITY_PBC 5 /* processor bus converter */
+#define PAT_ENTITY_XBC 6 /* crossbar fabric connect */
+#define PAT_ENTITY_RC 7 /* fabric interconnect */
+
+/* PDC_PAT_CELL_MODULE address range type values */
+#define PAT_PBNUM 0 /* PCI Bus Number */
+#define PAT_LMMIO 1 /* < 4G MMIO Space */
+#define PAT_GMMIO 2 /* > 4G MMIO Space */
+#define PAT_NPIOP 3 /* Non Postable I/O Port Space */
+#define PAT_PIOP 4 /* Postable I/O Port Space */
+#define PAT_AHPA 5 /* Addional HPA Space */
+#define PAT_UFO 6 /* HPA Space (UFO for Mariposa) */
+#define PAT_GNIP 7 /* GNI Reserved Space */
+
+
+
+/* PDC PAT CHASSIS LOG -- Platform logging & forward progress functions */
+
+#define PDC_PAT_CHASSIS_LOG 65L
+#define PDC_PAT_CHASSIS_WRITE_LOG 0L /* Write Log Entry */
+#define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */
+
+
+/* PDC PAT CPU -- CPU configuration within the protection domain */
+
+#define PDC_PAT_CPU 67L
+#define PDC_PAT_CPU_INFO 0L /* Return CPU config info */
+#define PDC_PAT_CPU_DELETE 1L /* Delete CPU */
+#define PDC_PAT_CPU_ADD 2L /* Add CPU */
+#define PDC_PAT_CPU_GET_NUMBER 3L /* Return CPU Number */
+#define PDC_PAT_CPU_GET_HPA 4L /* Return CPU HPA */
+#define PDC_PAT_CPU_STOP 5L /* Stop CPU */
+#define PDC_PAT_CPU_RENDEZVOUS 6L /* Rendezvous CPU */
+#define PDC_PAT_CPU_GET_CLOCK_INFO 7L /* Return CPU Clock info */
+#define PDC_PAT_CPU_GET_RENDEZVOUS_STATE 8L /* Return Rendezvous State */
+#define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */
+#define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache
+ * Cleansing Mode */
+/* PDC PAT EVENT -- Platform Events */
+
+#define PDC_PAT_EVENT 68L
+#define PDC_PAT_EVENT_GET_CAPS 0L /* Get Capabilities */
+#define PDC_PAT_EVENT_SET_MODE 1L /* Set Notification Mode */
+#define PDC_PAT_EVENT_SCAN 2L /* Scan Event */
+#define PDC_PAT_EVENT_HANDLE 3L /* Handle Event */
+#define PDC_PAT_EVENT_GET_NB_CALL 4L /* Get Non-Blocking call Args */
+
+/* PDC PAT HPMC -- Cause processor to go into spin loop, and wait
+ * for wake up from Monarch Processor.
+ */
+
+#define PDC_PAT_HPMC 70L
+#define PDC_PAT_HPMC_RENDEZ_CPU 0L /* go into spin loop */
+#define PDC_PAT_HPMC_SET_PARAMS 1L /* Allows OS to specify intr which PDC
+ * will use to interrupt OS during
+ * machine check rendezvous */
+
+/* parameters for PDC_PAT_HPMC_SET_PARAMS: */
+#define HPMC_SET_PARAMS_INTR 1L /* Rendezvous Interrupt */
+#define HPMC_SET_PARAMS_WAKE 2L /* Wake up processor */
+
+
+/* PDC PAT IO -- On-line services for I/O modules */
+
+#define PDC_PAT_IO 71L
+#define PDC_PAT_IO_GET_SLOT_STATUS 5L /* Get Slot Status Info*/
+#define PDC_PAT_IO_GET_LOC_FROM_HARDWARE 6L /* Get Physical Location from */
+ /* Hardware Path */
+#define PDC_PAT_IO_GET_HARDWARE_FROM_LOC 7L /* Get Hardware Path from
+ * Physical Location */
+#define PDC_PAT_IO_GET_PCI_CONFIG_FROM_HW 11L /* Get PCI Configuration
+ * Address from Hardware Path */
+#define PDC_PAT_IO_GET_HW_FROM_PCI_CONFIG 12L /* Get Hardware Path
+ * from PCI Configuration Address */
+#define PDC_PAT_IO_READ_HOST_BRIDGE_INFO 13L /* Read Host Bridge State Info */
+#define PDC_PAT_IO_CLEAR_HOST_BRIDGE_INFO 14L /* Clear Host Bridge State Info*/
+#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE_SIZE 15L /* Get PCI INT Routing Table
+ * Size */
+#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE 16L /* Get PCI INT Routing Table */
+#define PDC_PAT_IO_GET_HINT_TABLE_SIZE 17L /* Get Hint Table Size */
+#define PDC_PAT_IO_GET_HINT_TABLE 18L /* Get Hint Table */
+#define PDC_PAT_IO_PCI_CONFIG_READ 19L /* PCI Config Read */
+#define PDC_PAT_IO_PCI_CONFIG_WRITE 20L /* PCI Config Write */
+#define PDC_PAT_IO_GET_NUM_IO_SLOTS 21L /* Get Number of I/O Bay Slots in
+ * Cabinet */
+#define PDC_PAT_IO_GET_LOC_IO_SLOTS 22L /* Get Physical Location of I/O */
+ /* Bay Slots in Cabinet */
+#define PDC_PAT_IO_BAY_STATUS_INFO 28L /* Get I/O Bay Slot Status Info */
+#define PDC_PAT_IO_GET_PROC_VIEW 29L /* Get Processor view of IO address */
+#define PDC_PAT_IO_PROG_SBA_DIR_RANGE 30L /* Program directed range */
+
+
+/* PDC PAT MEM -- Manage memory page deallocation */
+
+#define PDC_PAT_MEM 72L
+#define PDC_PAT_MEM_PD_INFO 0L /* Return PDT info for PD */
+#define PDC_PAT_MEM_PD_CLEAR 1L /* Clear PDT for PD */
+#define PDC_PAT_MEM_PD_READ 2L /* Read PDT entries for PD */
+#define PDC_PAT_MEM_PD_RESET 3L /* Reset clear bit for PD */
+#define PDC_PAT_MEM_CELL_INFO 5L /* Return PDT info For Cell */
+#define PDC_PAT_MEM_CELL_CLEAR 6L /* Clear PDT For Cell */
+#define PDC_PAT_MEM_CELL_READ 7L /* Read PDT entries For Cell */
+#define PDC_PAT_MEM_CELL_RESET 8L /* Reset clear bit For Cell */
+#define PDC_PAT_MEM_SETGM 9L /* Set Golden Memory value */
+#define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */
+#define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From */
+ /* Memory Address */
+#define PDC_PAT_MEM_GET_TXT_SIZE 12L /* Get Formatted Text Size */
+#define PDC_PAT_MEM_GET_PD_TXT 13L /* Get PD Formatted Text */
+#define PDC_PAT_MEM_GET_CELL_TXT 14L /* Get Cell Formatted Text */
+#define PDC_PAT_MEM_RD_STATE_INFO 15L /* Read Mem Module State Info*/
+#define PDC_PAT_MEM_CLR_STATE_INFO 16L /*Clear Mem Module State Info*/
+#define PDC_PAT_MEM_CLEAN_RANGE 128L /*Clean Mem in specific range*/
+#define PDC_PAT_MEM_GET_TBL_SIZE 131L /* Get Memory Table Size */
+#define PDC_PAT_MEM_GET_TBL 132L /* Get Memory Table */
+
+
+/* PDC PAT NVOLATILE -- Access Non-Volatile Memory */
+
+#define PDC_PAT_NVOLATILE 73L
+#define PDC_PAT_NVOLATILE_READ 0L /* Read Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_WRITE 1L /* Write Non-Volatile Memory */
+#define PDC_PAT_NVOLATILE_GET_SIZE 2L /* Return size of NVM */
+#define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */
+#define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */
+
+/* PDC PAT PD */
+#define PDC_PAT_PD 74L /* Protection Domain Info */
+#define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */
+
+/* PDC_PAT_PD_GET_ADDR_MAP entry types */
+#define PAT_MEMORY_DESCRIPTOR 1
+
+/* PDC_PAT_PD_GET_ADDR_MAP memory types */
+#define PAT_MEMTYPE_MEMORY 0
+#define PAT_MEMTYPE_FIRMWARE 4
+
+/* PDC_PAT_PD_GET_ADDR_MAP memory usage */
+#define PAT_MEMUSE_GENERAL 0
+#define PAT_MEMUSE_GI 128
+#define PAT_MEMUSE_GNI 129
+
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+#ifdef CONFIG_64BIT
+#define is_pdc_pat() (PDC_TYPE_PAT == pdc_type)
+extern int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num);
+extern int pdc_pat_get_irt(void *r_addr, unsigned long cell_num);
+#else /* ! CONFIG_64BIT */
+/* No PAT support for 32-bit kernels...sorry */
+#define is_pdc_pat() (0)
+#define pdc_pat_get_irt_size(num_entries, cell_numn) PDC_BAD_PROC
+#define pdc_pat_get_irt(r_addr, cell_num) PDC_BAD_PROC
+#endif /* ! CONFIG_64BIT */
+
+
+struct pdc_pat_cell_num {
+ unsigned long cell_num;
+ unsigned long cell_loc;
+};
+
+struct pdc_pat_cpu_num {
+ unsigned long cpu_num;
+ unsigned long cpu_loc;
+};
+
+struct pdc_pat_pd_addr_map_entry {
+ unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */
+ unsigned char reserve1[5];
+ unsigned char memory_type;
+ unsigned char memory_usage;
+ unsigned long paddr;
+ unsigned int pages; /* Length in 4K pages */
+ unsigned int reserve2;
+ unsigned long cell_map;
+};
+
+/********************************************************************
+* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
+* ----------------------------------------------------------
+* Bit 0 to 51 - conf_base_addr
+* Bit 52 to 62 - reserved
+* Bit 63 - endianess bit
+********************************************************************/
+#define PAT_GET_CBA(value) ((value) & 0xfffffffffffff000UL)
+
+/********************************************************************
+* PDC_PAT_CELL[Return Cell Module] memaddr[1] mod_info
+* ----------------------------------------------------
+* Bit 0 to 7 - entity type
+* 0 = central agent, 1 = processor,
+* 2 = memory controller, 3 = system bus adapter,
+* 4 = local bus adapter, 5 = processor bus converter,
+* 6 = crossbar fabric connect, 7 = fabric interconnect,
+* 8 to 254 reserved, 255 = unknown.
+* Bit 8 to 15 - DVI
+* Bit 16 to 23 - IOC functions
+* Bit 24 to 39 - reserved
+* Bit 40 to 63 - mod_pages
+* number of 4K pages a module occupies starting at conf_base_addr
+********************************************************************/
+#define PAT_GET_ENTITY(value) (((value) >> 56) & 0xffUL)
+#define PAT_GET_DVI(value) (((value) >> 48) & 0xffUL)
+#define PAT_GET_IOC(value) (((value) >> 40) & 0xffUL)
+#define PAT_GET_MOD_PAGES(value) ((value) & 0xffffffUL)
+
+
+/*
+** PDC_PAT_CELL_GET_INFO return block
+*/
+typedef struct pdc_pat_cell_info_rtn_block {
+ unsigned long cpu_info;
+ unsigned long cell_info;
+ unsigned long cell_location;
+ unsigned long reo_location;
+ unsigned long mem_size;
+ unsigned long dimm_status;
+ unsigned long pdc_rev;
+ unsigned long fabric_info0;
+ unsigned long fabric_info1;
+ unsigned long fabric_info2;
+ unsigned long fabric_info3;
+ unsigned long reserved[21];
+} pdc_pat_cell_info_rtn_block_t;
+
+
+/* FIXME: mod[508] should really be a union of the various mod components */
+struct pdc_pat_cell_mod_maddr_block { /* PDC_PAT_CELL_MODULE */
+ unsigned long cba; /* func 0 cfg space address */
+ unsigned long mod_info; /* module information */
+ unsigned long mod_location; /* physical location of the module */
+ struct hardware_path mod_path; /* module path (device path - layers) */
+ unsigned long mod[508]; /* PAT cell module components */
+} __attribute__((aligned(8))) ;
+
+typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
+
+
+extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
+extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
+extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr);
+extern int pdc_pat_cell_num_to_loc(void *, unsigned long);
+
+extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa);
+
+extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset);
+
+
+extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *val);
+extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val);
+
+
+/* Flag to indicate this is a PAT box...don't use this unless you
+** really have to...it might go away some day.
+*/
+extern int pdc_pat; /* arch/parisc/kernel/inventory.c */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* ! __PARISC_PATPDC_H */
diff --git a/arch/parisc/include/asm/percpu.h b/arch/parisc/include/asm/percpu.h
new file mode 100644
index 00000000..a0dcd197
--- /dev/null
+++ b/arch/parisc/include/asm/percpu.h
@@ -0,0 +1,7 @@
+#ifndef _PARISC_PERCPU_H
+#define _PARISC_PERCPU_H
+
+#include <asm-generic/percpu.h>
+
+#endif
+
diff --git a/arch/parisc/include/asm/perf.h b/arch/parisc/include/asm/perf.h
new file mode 100644
index 00000000..a18e1197
--- /dev/null
+++ b/arch/parisc/include/asm/perf.h
@@ -0,0 +1,74 @@
+#ifndef _ASM_PERF_H_
+#define _ASM_PERF_H_
+
+/* ioctls */
+#define PA_PERF_ON _IO('p', 1)
+#define PA_PERF_OFF _IOR('p', 2, unsigned int)
+#define PA_PERF_VERSION _IOR('p', 3, int)
+
+#define PA_PERF_DEV "perf"
+#define PA_PERF_MINOR 146
+
+/* Interface types */
+#define UNKNOWN_INTF 255
+#define ONYX_INTF 0
+#define CUDA_INTF 1
+
+/* Common Onyx and Cuda images */
+#define CPI 0
+#define BUSUTIL 1
+#define TLBMISS 2
+#define TLBHANDMISS 3
+#define PTKN 4
+#define PNTKN 5
+#define IMISS 6
+#define DMISS 7
+#define DMISS_ACCESS 8
+#define BIG_CPI 9
+#define BIG_LS 10
+#define BR_ABORT 11
+#define ISNT 12
+#define QUADRANT 13
+#define RW_PDFET 14
+#define RW_WDFET 15
+#define SHLIB_CPI 16
+
+/* Cuda only Images */
+#define FLOPS 17
+#define CACHEMISS 18
+#define BRANCHES 19
+#define CRSTACK 20
+#define I_CACHE_SPEC 21
+#define MAX_CUDA_IMAGES 22
+
+/* Onyx only Images */
+#define ADDR_INV_ABORT_ALU 17
+#define BRAD_STALL 18
+#define CNTL_IN_PIPEL 19
+#define DSNT_XFH 20
+#define FET_SIG1 21
+#define FET_SIG2 22
+#define G7_1 23
+#define G7_2 24
+#define G7_3 25
+#define G7_4 26
+#define MPB_LABORT 27
+#define PANIC 28
+#define RARE_INST 29
+#define RW_DFET 30
+#define RW_IFET 31
+#define RW_SDFET 32
+#define SPEC_IFET 33
+#define ST_COND0 34
+#define ST_COND1 35
+#define ST_COND2 36
+#define ST_COND3 37
+#define ST_COND4 38
+#define ST_UNPRED0 39
+#define ST_UNPRED1 40
+#define UNPRED 41
+#define GO_STORE 42
+#define SHLIB_CALL 43
+#define MAX_ONYX_IMAGES 44
+
+#endif
diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h
new file mode 100644
index 00000000..1e0fd8ba
--- /dev/null
+++ b/arch/parisc/include/asm/perf_event.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_PARISC_PERF_EVENT_H
+#define __ASM_PARISC_PERF_EVENT_H
+
+/* Empty, just to avoid compiling error */
+
+#endif /* __ASM_PARISC_PERF_EVENT_H */
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
new file mode 100644
index 00000000..fc987a1c
--- /dev/null
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -0,0 +1,149 @@
+#ifndef _ASM_PGALLOC_H
+#define _ASM_PGALLOC_H
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/threads.h>
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+
+#include <asm/cache.h>
+
+/* Allocate the top level pgd (page directory)
+ *
+ * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
+ * allocate the first pmd adjacent to the pgd. This means that we can
+ * subtract a constant offset to get to it. The pmd and pgd sizes are
+ * arranged so that a single pmd covers 4GB (giving a full 64-bit
+ * process access to 8TB) so our lookups are effectively L2 for the
+ * first 4GB of the kernel (i.e. for all ILP32 processes and all the
+ * kernel for machines with under 4GB of memory) */
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
+ PGD_ALLOC_ORDER);
+ pgd_t *actual_pgd = pgd;
+
+ if (likely(pgd != NULL)) {
+ memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
+#ifdef CONFIG_64BIT
+ actual_pgd += PTRS_PER_PGD;
+ /* Populate first pmd with allocated memory. We mark it
+ * with PxD_FLAG_ATTACHED as a signal to the system that this
+ * pmd entry may not be cleared. */
+ __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
+ PxD_FLAG_VALID |
+ PxD_FLAG_ATTACHED)
+ + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
+ /* The first pmd entry also is marked with _PAGE_GATEWAY as
+ * a signal that this pmd may not be freed */
+ __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
+#endif
+ }
+ return actual_pgd;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+#ifdef CONFIG_64BIT
+ pgd -= PTRS_PER_PGD;
+#endif
+ free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
+}
+
+#if PT_NLEVELS == 3
+
+/* Three Level Page Table Support for pmd's */
+
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+{
+ __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
+ (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
+}
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
+ PMD_ORDER);
+ if (pmd)
+ memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
+ return pmd;
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+#ifdef CONFIG_64BIT
+ if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
+ /* This is the permanent pmd attached to the pgd;
+ * cannot free it */
+ return;
+#endif
+ free_pages((unsigned long)pmd, PMD_ORDER);
+}
+
+#else
+
+/* Two Level Page Table Support for pmd's */
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+
+#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+#define pmd_free(mm, x) do { } while (0)
+#define pgd_populate(mm, pmd, pte) BUG()
+
+#endif
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+#ifdef CONFIG_64BIT
+ /* preserve the gateway marker if this is the beginning of
+ * the permanent pmd */
+ if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
+ __pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
+ PxD_FLAG_VALID |
+ PxD_FLAG_ATTACHED)
+ + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
+ else
+#endif
+ __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
+ + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
+}
+
+#define pmd_populate(mm, pmd, pte_page) \
+ pmd_populate_kernel(mm, pmd, page_address(pte_page))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ if (page)
+ pgtable_page_ctor(page);
+ return page;
+}
+
+static inline pte_t *
+pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
+{
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ return pte;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+ pgtable_page_dtor(pte);
+ pte_free_kernel(mm, page_address(pte));
+}
+
+#define check_pgt_cache() do { } while (0)
+
+#endif
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
new file mode 100644
index 00000000..22dadeb5
--- /dev/null
+++ b/arch/parisc/include/asm/pgtable.h
@@ -0,0 +1,512 @@
+#ifndef _PARISC_PGTABLE_H
+#define _PARISC_PGTABLE_H
+
+#include <asm-generic/4level-fixup.h>
+
+#include <asm/fixmap.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * we simulate an x86-style page table for the linux mm code
+ */
+
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+struct vm_area_struct;
+
+/*
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+ * memory. For the return value to be meaningful, ADDR must be >=
+ * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
+ * require a hash-, or multi-level tree-lookup or something of that
+ * sort) but it guarantees to return TRUE only if accessing the page
+ * at that address does not cause an error. Note that there may be
+ * addresses for which kern_addr_valid() returns FALSE even though an
+ * access would not cause an error (e.g., this is typically true for
+ * memory mapped I/O regions.
+ *
+ * XXX Need to implement this for parisc.
+ */
+#define kern_addr_valid(addr) (1)
+
+/* Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) \
+ do{ \
+ *(pteptr) = (pteval); \
+ } while(0)
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
+#endif /* !__ASSEMBLY__ */
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
+
+/* This is the size of the initially mapped kernel memory */
+#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */
+#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+#define PT_NLEVELS 3
+#define PGD_ORDER 1 /* Number of pages per pgd */
+#define PMD_ORDER 1 /* Number of pages per pmd */
+#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */
+#else
+#define PT_NLEVELS 2
+#define PGD_ORDER 1 /* Number of pages per pgd */
+#define PGD_ALLOC_ORDER PGD_ORDER
+#endif
+
+/* Definitions for 3rd level (we use PLD here for Page Lower directory
+ * because PTE_SHIFT is used lower down to mean shift that has to be
+ * done to get usable bits out of the PTE) */
+#define PLD_SHIFT PAGE_SHIFT
+#define PLD_SIZE PAGE_SIZE
+#define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY)
+#define PTRS_PER_PTE (1UL << BITS_PER_PTE)
+
+/* Definitions for 2nd level */
+#define pgtable_cache_init() do { } while (0)
+
+#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#if PT_NLEVELS == 3
+#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
+#else
+#define BITS_PER_PMD 0
+#endif
+#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
+
+/* Definitions for 1st level */
+#define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD)
+#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
+#define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT)
+#else
+#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
+#endif
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PTRS_PER_PGD (1UL << BITS_PER_PGD)
+#define USER_PTRS_PER_PGD PTRS_PER_PGD
+
+#ifdef CONFIG_64BIT
+#define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD)
+#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
+#define SPACEID_SHIFT (MAX_ADDRBITS - 32)
+#else
+#define MAX_ADDRBITS (BITS_PER_LONG)
+#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
+#define SPACEID_SHIFT 0
+#endif
+
+/* This calculates the number of initial pages we need for the initial
+ * page tables */
+#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
+# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
+#else
+# define PT_INITIAL (1) /* all initial PTEs fit into one page */
+#endif
+
+/*
+ * pgd entries used up by user/kernel:
+ */
+
+#define FIRST_USER_ADDRESS 0
+
+/* NB: The tlb miss handlers make certain assumptions about the order */
+/* of the following bits, so be careful (One example, bits 25-31 */
+/* are moved together in one instruction). */
+
+#define _PAGE_READ_BIT 31 /* (0x001) read access allowed */
+#define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */
+#define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */
+#define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */
+#define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */
+#define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */
+#define _PAGE_FILE_BIT _PAGE_DIRTY_BIT /* overload this bit */
+#define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */
+#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
+#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
+#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
+/* bit 21 was formerly the FLUSH bit but is now unused */
+#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
+
+/* N.B. The bits are defined in terms of a 32 bit word above, so the */
+/* following macro is ok for both 32 and 64 bit. */
+
+#define xlate_pabit(x) (31 - x)
+
+/* this defines the shift to the usable bits in the PTE it is set so
+ * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
+ * to zero */
+#define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT)
+
+/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
+#define PFN_PTE_SHIFT 12
+
+
+/* this is how many bits may be used by the file functions */
+#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT)
+
+#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT)
+#define pgoff_to_pte(off) ((pte_t) { ((off) << PTE_SHIFT) | _PAGE_FILE })
+
+#define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT))
+#define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT))
+#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
+#define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT))
+#define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
+#define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT))
+#define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT))
+#define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
+#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
+#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
+#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
+#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
+#define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT))
+
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC)
+#define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE)
+#define _PAGE_KERNEL (_PAGE_KERNEL_RO | _PAGE_WRITE)
+
+/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
+ * are page-aligned, we don't care about the PAGE_OFFSET bits, except
+ * for a few meta-information bits, so we shift the address to be
+ * able to effectively address 40/42/44-bits of physical address space
+ * depending on 4k/16k/64k PAGE_SIZE */
+#define _PxD_PRESENT_BIT 31
+#define _PxD_ATTACHED_BIT 30
+#define _PxD_VALID_BIT 29
+
+#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))
+#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
+#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
+#define PxD_FLAG_MASK (0xf)
+#define PxD_FLAG_SHIFT (4)
+#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */
+
+#ifndef __ASSEMBLY__
+
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
+/* Others seem to make this executable, I don't know if that's correct
+ or not. The stack is mapped this way though so this is necessary
+ in the short term - dhd@linuxcare.com, 2000-08-08 */
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
+#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED)
+#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+#define PAGE_COPY PAGE_EXECREAD
+#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
+#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
+#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
+
+
+/*
+ * We could have an execute only page using "gateway - promote to priv
+ * level 3", but that is kind of silly. So, the way things are defined
+ * now, we must always have read permission for pages with execute
+ * permission. For the fun of it we'll go ahead and support write only
+ * pages.
+ */
+
+ /*xwr*/
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 __P000 /* copy on write */
+#define __P011 __P001 /* copy on write */
+#define __P100 PAGE_EXECREAD
+#define __P101 PAGE_EXECREAD
+#define __P110 __P100 /* copy on write */
+#define __P111 __P101 /* copy on write */
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_WRITEONLY
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_EXECREAD
+#define __S101 PAGE_EXECREAD
+#define __S110 PAGE_RWX
+#define __S111 PAGE_RWX
+
+
+extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
+
+/* initial page tables for 0-8MB for kernel */
+
+extern pte_t pg0[];
+
+/* zero page used for uninitialized stuff */
+
+extern unsigned long *empty_zero_page;
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#define pte_none(x) (pte_val(x) == 0)
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)
+
+#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
+#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
+#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
+#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
+
+#if PT_NLEVELS == 3
+/* The first entry of the permanent pmd is not there if it contains
+ * the gateway marker */
+#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
+#else
+#define pmd_none(x) (!pmd_val(x))
+#endif
+#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
+#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
+static inline void pmd_clear(pmd_t *pmd) {
+#if PT_NLEVELS == 3
+ if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
+ /* This is the entry pointing to the permanent pmd
+ * attached to the pgd; cannot clear it */
+ __pmd_val_set(*pmd, PxD_FLAG_ATTACHED);
+ else
+#endif
+ __pmd_val_set(*pmd, 0);
+}
+
+
+
+#if PT_NLEVELS == 3
+#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
+#define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd))
+
+/* For 64 bit we have three level tables */
+
+#define pgd_none(x) (!pgd_val(x))
+#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID))
+#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT)
+static inline void pgd_clear(pgd_t *pgd) {
+#if PT_NLEVELS == 3
+ if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
+ /* This is the permanent pmd attached to the pgd; cannot
+ * free it */
+ return;
+#endif
+ __pgd_val_set(*pgd, 0);
+}
+#else
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+static inline void pgd_clear(pgd_t * pgdp) { }
+#endif
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
+
+static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define __mk_pte(addr,pgprot) \
+({ \
+ pte_t __pte; \
+ \
+ pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \
+ \
+ __pte; \
+})
+
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+ pte_t pte;
+ pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
+ return pte;
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
+
+/* Permanent address of a page. On parisc we don't have highmem. */
+
+#define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT)
+
+#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
+
+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_address(pmd)))
+
+#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
+#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
+
+#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+
+/* to find an entry in a page-table-directory */
+#define pgd_offset(mm, address) \
+((mm)->pgd + ((address) >> PGDIR_SHIFT))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* Find an entry in the second-level page table.. */
+
+#if PT_NLEVELS == 3
+#define pmd_offset(dir,address) \
+((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1)))
+#else
+#define pmd_offset(dir,addr) ((pmd_t *) dir)
+#endif
+
+/* Find an entry in the third-level page table.. */
+#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
+#define pte_offset_kernel(pmd, address) \
+ ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
+#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
+#define pte_unmap(pte) do { } while (0)
+
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
+extern void paging_init (void);
+
+/* Used for deferring calls to flush_dcache_page() */
+
+#define PG_dcache_dirty PG_arch_1
+
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
+
+/* Encode and de-code a swap entry */
+
+#define __swp_type(x) ((x).val & 0x1f)
+#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \
+ (((x).val >> 8) & ~0x7) )
+#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \
+ ((offset & 0x7) << 6) | \
+ ((offset & ~0x7) << 8) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+ if (!pte_young(*ptep))
+ return 0;
+ return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
+#else
+ pte_t pte = *ptep;
+ if (!pte_young(pte))
+ return 0;
+ set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
+ return 1;
+#endif
+}
+
+extern spinlock_t pa_dbit_lock;
+
+struct mm_struct;
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t old_pte;
+
+ spin_lock(&pa_dbit_lock);
+ old_pte = *ptep;
+ pte_clear(mm,addr,ptep);
+ spin_unlock(&pa_dbit_lock);
+
+ return old_pte;
+}
+
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+ unsigned long new, old;
+
+ do {
+ old = pte_val(*ptep);
+ new = pte_val(pte_wrprotect(__pte (old)));
+ } while (cmpxchg((unsigned long *) ptep, old, new) != old);
+#else
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+#endif
+}
+
+#define pte_same(A,B) (pte_val(A) == pte_val(B))
+
+#endif /* !__ASSEMBLY__ */
+
+
+/* TLB page size encoding - see table 3-1 in parisc20.pdf */
+#define _PAGE_SIZE_ENCODING_4K 0
+#define _PAGE_SIZE_ENCODING_16K 1
+#define _PAGE_SIZE_ENCODING_64K 2
+#define _PAGE_SIZE_ENCODING_256K 3
+#define _PAGE_SIZE_ENCODING_1M 4
+#define _PAGE_SIZE_ENCODING_4M 5
+#define _PAGE_SIZE_ENCODING_16M 6
+#define _PAGE_SIZE_ENCODING_64M 7
+
+#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
+#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
+# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
+#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
+# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
+#endif
+
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
+
+/* We provide our own get_unmapped_area to provide cache coherency */
+
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTE_SAME
+#include <asm-generic/pgtable.h>
+
+#endif /* _PARISC_PGTABLE_H */
diff --git a/arch/parisc/include/asm/poll.h b/arch/parisc/include/asm/poll.h
new file mode 100644
index 00000000..c98509d3
--- /dev/null
+++ b/arch/parisc/include/asm/poll.h
@@ -0,0 +1 @@
+#include <asm-generic/poll.h>
diff --git a/arch/parisc/include/asm/posix_types.h b/arch/parisc/include/asm/posix_types.h
new file mode 100644
index 00000000..00da29a3
--- /dev/null
+++ b/arch/parisc/include/asm/posix_types.h
@@ -0,0 +1,128 @@
+#ifndef __ARCH_PARISC_POSIX_TYPES_H
+#define __ARCH_PARISC_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+typedef unsigned long __kernel_ino_t;
+typedef unsigned short __kernel_mode_t;
+typedef unsigned short __kernel_nlink_t;
+typedef long __kernel_off_t;
+typedef int __kernel_pid_t;
+typedef unsigned short __kernel_ipc_pid_t;
+typedef unsigned int __kernel_uid_t;
+typedef unsigned int __kernel_gid_t;
+typedef int __kernel_suseconds_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_timer_t;
+typedef int __kernel_clockid_t;
+typedef int __kernel_daddr_t;
+/* Note these change from narrow to wide kernels */
+#ifdef CONFIG_64BIT
+typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
+typedef long __kernel_ptrdiff_t;
+#else
+typedef unsigned int __kernel_size_t;
+typedef int __kernel_ssize_t;
+typedef int __kernel_ptrdiff_t;
+#endif
+typedef long __kernel_time_t;
+typedef char * __kernel_caddr_t;
+
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+typedef unsigned int __kernel_uid32_t;
+typedef unsigned int __kernel_gid32_t;
+
+#ifdef __GNUC__
+typedef long long __kernel_loff_t;
+typedef long long __kernel_off64_t;
+typedef unsigned long long __kernel_ino64_t;
+#endif
+
+typedef unsigned int __kernel_old_dev_t;
+
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+
+/* compatibility stuff */
+typedef __kernel_uid_t __kernel_old_uid_t;
+typedef __kernel_gid_t __kernel_old_gid_t;
+
+#if defined(__KERNEL__)
+
+#undef __FD_SET
+static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+ unsigned long __tmp = __fd / __NFDBITS;
+ unsigned long __rem = __fd % __NFDBITS;
+ __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
+}
+
+#undef __FD_CLR
+static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+ unsigned long __tmp = __fd / __NFDBITS;
+ unsigned long __rem = __fd % __NFDBITS;
+ __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
+}
+
+#undef __FD_ISSET
+static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
+{
+ unsigned long __tmp = __fd / __NFDBITS;
+ unsigned long __rem = __fd % __NFDBITS;
+ return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant case (8 ints,
+ * for a 256-bit fd_set)
+ */
+#undef __FD_ZERO
+static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
+{
+ unsigned long *__tmp = __p->fds_bits;
+ int __i;
+
+ if (__builtin_constant_p(__FDSET_LONGS)) {
+ switch (__FDSET_LONGS) {
+ case 16:
+ __tmp[ 0] = 0; __tmp[ 1] = 0;
+ __tmp[ 2] = 0; __tmp[ 3] = 0;
+ __tmp[ 4] = 0; __tmp[ 5] = 0;
+ __tmp[ 6] = 0; __tmp[ 7] = 0;
+ __tmp[ 8] = 0; __tmp[ 9] = 0;
+ __tmp[10] = 0; __tmp[11] = 0;
+ __tmp[12] = 0; __tmp[13] = 0;
+ __tmp[14] = 0; __tmp[15] = 0;
+ return;
+
+ case 8:
+ __tmp[ 0] = 0; __tmp[ 1] = 0;
+ __tmp[ 2] = 0; __tmp[ 3] = 0;
+ __tmp[ 4] = 0; __tmp[ 5] = 0;
+ __tmp[ 6] = 0; __tmp[ 7] = 0;
+ return;
+
+ case 4:
+ __tmp[ 0] = 0; __tmp[ 1] = 0;
+ __tmp[ 2] = 0; __tmp[ 3] = 0;
+ return;
+ }
+ }
+ __i = __FDSET_LONGS;
+ while (__i) {
+ __i--;
+ *__tmp = 0;
+ __tmp++;
+ }
+}
+
+#endif /* defined(__KERNEL__) */
+
+#endif
diff --git a/arch/parisc/include/asm/prefetch.h b/arch/parisc/include/asm/prefetch.h
new file mode 100644
index 00000000..1ee7c826
--- /dev/null
+++ b/arch/parisc/include/asm/prefetch.h
@@ -0,0 +1,44 @@
+/*
+ * include/asm-parisc/prefetch.h
+ *
+ * PA 2.0 defines data prefetch instructions on page 6-11 of the Kane book.
+ * In addition, many implementations do hardware prefetching of both
+ * instructions and data.
+ *
+ * PA7300LC (page 14-4 of the ERS) also implements prefetching by a load
+ * to gr0 but not in a way that Linux can use. If the load would cause an
+ * interruption (eg due to prefetching 0), it is suppressed on PA2.0
+ * processors, but not on 7300LC.
+ *
+ */
+
+#ifndef __ASM_PARISC_PREFETCH_H
+#define __ASM_PARISC_PREFETCH_H
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_PREFETCH
+
+#define ARCH_HAS_PREFETCH
+static inline void prefetch(const void *addr)
+{
+ __asm__(
+#ifndef CONFIG_PA20
+ /* Need to avoid prefetch of NULL on PA7300LC */
+ " extrw,u,= %0,31,32,%%r0\n"
+#endif
+ " ldw 0(%0), %%r0" : : "r" (addr));
+}
+
+/* LDD is a PA2.0 addition. */
+#ifdef CONFIG_PA20
+#define ARCH_HAS_PREFETCHW
+static inline void prefetchw(const void *addr)
+{
+ __asm__("ldd 0(%0), %%r0" : : "r" (addr));
+}
+#endif /* CONFIG_PA20 */
+
+#endif /* CONFIG_PREFETCH */
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_PARISC_PROCESSOR_H */
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
new file mode 100644
index 00000000..9ce66e9d
--- /dev/null
+++ b/arch/parisc/include/asm/processor.h
@@ -0,0 +1,360 @@
+/*
+ * include/asm-parisc/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ * Copyright (C) 2001 Grant Grundler
+ */
+
+#ifndef __ASM_PARISC_PROCESSOR_H
+#define __ASM_PARISC_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+#include <linux/threads.h>
+
+#include <asm/prefetch.h>
+#include <asm/hardware.h>
+#include <asm/pdc.h>
+#include <asm/ptrace.h>
+#include <asm/types.h>
+#include <asm/system.h>
+#include <asm/percpu.h>
+
+#endif /* __ASSEMBLY__ */
+
+#define KERNEL_STACK_SIZE (4*PAGE_SIZE)
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#ifdef CONFIG_PA20
+#define current_ia(x) __asm__("mfia %0" : "=r"(x))
+#else /* mfia added in pa2.0 */
+#define current_ia(x) __asm__("blr 0,%0\n\tnop" : "=r"(x))
+#endif
+#define current_text_addr() ({ void *pc; current_ia(pc); pc; })
+
+#define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size)
+#define TASK_SIZE TASK_SIZE_OF(current)
+#define TASK_UNMAPPED_BASE (current->thread.map_base)
+
+#define DEFAULT_TASK_SIZE32 (0xFFF00000UL)
+#define DEFAULT_MAP_BASE32 (0x40000000UL)
+
+#ifdef CONFIG_64BIT
+#define DEFAULT_TASK_SIZE (MAX_ADDRESS-0xf000000)
+#define DEFAULT_MAP_BASE (0x200000000UL)
+#else
+#define DEFAULT_TASK_SIZE DEFAULT_TASK_SIZE32
+#define DEFAULT_MAP_BASE DEFAULT_MAP_BASE32
+#endif
+
+#ifdef __KERNEL__
+
+/* XXX: STACK_TOP actually should be STACK_BOTTOM for parisc.
+ * prumpf */
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX DEFAULT_TASK_SIZE
+
+#endif
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Data detected about CPUs at boot time which is the same for all CPU's.
+ * HP boxes are SMP - ie identical processors.
+ *
+ * FIXME: some CPU rev info may be processor specific...
+ */
+struct system_cpuinfo_parisc {
+ unsigned int cpu_count;
+ unsigned int cpu_hz;
+ unsigned int hversion;
+ unsigned int sversion;
+ enum cpu_type cpu_type;
+
+ struct {
+ struct pdc_model model;
+ unsigned long versions;
+ unsigned long cpuid;
+ unsigned long capabilities;
+ char sys_model_name[81]; /* PDC-ROM returnes this model name */
+ } pdc;
+
+ const char *cpu_name; /* e.g. "PA7300LC (PCX-L2)" */
+ const char *family_name; /* e.g. "1.1e" */
+};
+
+
+/* Per CPU data structure - ie varies per CPU. */
+struct cpuinfo_parisc {
+ unsigned long it_value; /* Interval Timer at last timer Intr */
+ unsigned long it_delta; /* Interval delta (tic_10ms / HZ * 100) */
+ unsigned long irq_count; /* number of IRQ's since boot */
+ unsigned long irq_max_cr16; /* longest time to handle a single IRQ */
+ unsigned long cpuid; /* aka slot_number or set to NO_PROC_ID */
+ unsigned long hpa; /* Host Physical address */
+ unsigned long txn_addr; /* MMIO addr of EIR or id_eid */
+#ifdef CONFIG_SMP
+ unsigned long pending_ipi; /* bitmap of type ipi_message_type */
+ unsigned long ipi_count; /* number ipi Interrupts */
+#endif
+ unsigned long bh_count; /* number of times bh was invoked */
+ unsigned long prof_counter; /* per CPU profiling support */
+ unsigned long prof_multiplier; /* per CPU profiling support */
+ unsigned long fp_rev;
+ unsigned long fp_model;
+ unsigned int state;
+ struct parisc_device *dev;
+ unsigned long loops_per_jiffy;
+};
+
+extern struct system_cpuinfo_parisc boot_cpu_data;
+DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data);
+
+#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
+
+typedef struct {
+ int seg;
+} mm_segment_t;
+
+#define ARCH_MIN_TASKALIGN 8
+
+struct thread_struct {
+ struct pt_regs regs;
+ unsigned long task_size;
+ unsigned long map_base;
+ unsigned long flags;
+};
+
+#define task_pt_regs(tsk) ((struct pt_regs *)&((tsk)->thread.regs))
+
+/* Thread struct flags. */
+#define PARISC_UAC_NOPRINT (1UL << 0) /* see prctl and unaligned.c */
+#define PARISC_UAC_SIGBUS (1UL << 1)
+#define PARISC_KERNEL_DEATH (1UL << 31) /* see die_if_kernel()... */
+
+#define PARISC_UAC_SHIFT 0
+#define PARISC_UAC_MASK (PARISC_UAC_NOPRINT|PARISC_UAC_SIGBUS)
+
+#define SET_UNALIGN_CTL(task,value) \
+ ({ \
+ (task)->thread.flags = (((task)->thread.flags & ~PARISC_UAC_MASK) \
+ | (((value) << PARISC_UAC_SHIFT) & \
+ PARISC_UAC_MASK)); \
+ 0; \
+ })
+
+#define GET_UNALIGN_CTL(task,addr) \
+ ({ \
+ put_user(((task)->thread.flags & PARISC_UAC_MASK) \
+ >> PARISC_UAC_SHIFT, (int __user *) (addr)); \
+ })
+
+#define INIT_THREAD { \
+ .regs = { .gr = { 0, }, \
+ .fr = { 0, }, \
+ .sr = { 0, }, \
+ .iasq = { 0, }, \
+ .iaoq = { 0, }, \
+ .cr27 = 0, \
+ }, \
+ .task_size = DEFAULT_TASK_SIZE, \
+ .map_base = DEFAULT_MAP_BASE, \
+ .flags = 0 \
+ }
+
+/*
+ * Return saved PC of a blocked thread. This is used by ps mostly.
+ */
+
+unsigned long thread_saved_pc(struct task_struct *t);
+void show_trace(struct task_struct *task, unsigned long *stack);
+
+/*
+ * Start user thread in another space.
+ *
+ * Note that we set both the iaoq and r31 to the new pc. When
+ * the kernel initially calls execve it will return through an
+ * rfi path that will use the values in the iaoq. The execve
+ * syscall path will return through the gateway page, and
+ * that uses r31 to branch to.
+ *
+ * For ELF we clear r23, because the dynamic linker uses it to pass
+ * the address of the finalizer function.
+ *
+ * We also initialize sr3 to an illegal value (illegal for our
+ * implementation, not for the architecture).
+ */
+typedef unsigned int elf_caddr_t;
+
+#define start_thread_som(regs, new_pc, new_sp) do { \
+ unsigned long *sp = (unsigned long *)new_sp; \
+ __u32 spaceid = (__u32)current->mm->context; \
+ unsigned long pc = (unsigned long)new_pc; \
+ /* offset pc for priv. level */ \
+ pc |= 3; \
+ \
+ set_fs(USER_DS); \
+ regs->iasq[0] = spaceid; \
+ regs->iasq[1] = spaceid; \
+ regs->iaoq[0] = pc; \
+ regs->iaoq[1] = pc + 4; \
+ regs->sr[2] = LINUX_GATEWAY_SPACE; \
+ regs->sr[3] = 0xffff; \
+ regs->sr[4] = spaceid; \
+ regs->sr[5] = spaceid; \
+ regs->sr[6] = spaceid; \
+ regs->sr[7] = spaceid; \
+ regs->gr[ 0] = USER_PSW; \
+ regs->gr[30] = ((new_sp)+63)&~63; \
+ regs->gr[31] = pc; \
+ \
+ get_user(regs->gr[26],&sp[0]); \
+ get_user(regs->gr[25],&sp[-1]); \
+ get_user(regs->gr[24],&sp[-2]); \
+ get_user(regs->gr[23],&sp[-3]); \
+} while(0)
+
+/* The ELF abi wants things done a "wee bit" differently than
+ * som does. Supporting this behavior here avoids
+ * having our own version of create_elf_tables.
+ *
+ * Oh, and yes, that is not a typo, we are really passing argc in r25
+ * and argv in r24 (rather than r26 and r25). This is because that's
+ * where __libc_start_main wants them.
+ *
+ * Duplicated from dl-machine.h for the benefit of readers:
+ *
+ * Our initial stack layout is rather different from everyone else's
+ * due to the unique PA-RISC ABI. As far as I know it looks like
+ * this:
+
+ ----------------------------------- (user startup code creates this frame)
+ | 32 bytes of magic |
+ |---------------------------------|
+ | 32 bytes argument/sp save area |
+ |---------------------------------| (bprm->p)
+ | ELF auxiliary info |
+ | (up to 28 words) |
+ |---------------------------------|
+ | NULL |
+ |---------------------------------|
+ | Environment pointers |
+ |---------------------------------|
+ | NULL |
+ |---------------------------------|
+ | Argument pointers |
+ |---------------------------------| <- argv
+ | argc (1 word) |
+ |---------------------------------| <- bprm->exec (HACK!)
+ | N bytes of slack |
+ |---------------------------------|
+ | filename passed to execve |
+ |---------------------------------| (mm->env_end)
+ | env strings |
+ |---------------------------------| (mm->env_start, mm->arg_end)
+ | arg strings |
+ |---------------------------------|
+ | additional faked arg strings if |
+ | we're invoked via binfmt_script |
+ |---------------------------------| (mm->arg_start)
+ stack base is at TASK_SIZE - rlim_max.
+
+on downward growing arches, it looks like this:
+ stack base at TASK_SIZE
+ | filename passed to execve
+ | env strings
+ | arg strings
+ | faked arg strings
+ | slack
+ | ELF
+ | envps
+ | argvs
+ | argc
+
+ * The pleasant part of this is that if we need to skip arguments we
+ * can just decrement argc and move argv, because the stack pointer
+ * is utterly unrelated to the location of the environment and
+ * argument vectors.
+ *
+ * Note that the S/390 people took the easy way out and hacked their
+ * GCC to make the stack grow downwards.
+ *
+ * Final Note: For entry from syscall, the W (wide) bit of the PSW
+ * is stuffed into the lowest bit of the user sp (%r30), so we fill
+ * it in here from the current->personality
+ */
+
+#ifdef CONFIG_64BIT
+#define USER_WIDE_MODE (!test_thread_flag(TIF_32BIT))
+#else
+#define USER_WIDE_MODE 0
+#endif
+
+#define start_thread(regs, new_pc, new_sp) do { \
+ elf_addr_t *sp = (elf_addr_t *)new_sp; \
+ __u32 spaceid = (__u32)current->mm->context; \
+ elf_addr_t pc = (elf_addr_t)new_pc | 3; \
+ elf_caddr_t *argv = (elf_caddr_t *)bprm->exec + 1; \
+ \
+ set_fs(USER_DS); \
+ regs->iasq[0] = spaceid; \
+ regs->iasq[1] = spaceid; \
+ regs->iaoq[0] = pc; \
+ regs->iaoq[1] = pc + 4; \
+ regs->sr[2] = LINUX_GATEWAY_SPACE; \
+ regs->sr[3] = 0xffff; \
+ regs->sr[4] = spaceid; \
+ regs->sr[5] = spaceid; \
+ regs->sr[6] = spaceid; \
+ regs->sr[7] = spaceid; \
+ regs->gr[ 0] = USER_PSW | (USER_WIDE_MODE ? PSW_W : 0); \
+ regs->fr[ 0] = 0LL; \
+ regs->fr[ 1] = 0LL; \
+ regs->fr[ 2] = 0LL; \
+ regs->fr[ 3] = 0LL; \
+ regs->gr[30] = (((unsigned long)sp + 63) &~ 63) | (USER_WIDE_MODE ? 1 : 0); \
+ regs->gr[31] = pc; \
+ \
+ get_user(regs->gr[25], (argv - 1)); \
+ regs->gr[24] = (long) argv; \
+ regs->gr[23] = 0; \
+} while(0)
+
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+extern void map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm);
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) ((tsk)->thread.regs.iaoq[0])
+#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
+
+#define cpu_relax() barrier()
+
+/* Used as a macro to identify the combined VIPT/PIPT cached
+ * CPUs which require a guarantee of coherency (no inequivalent
+ * aliases with different data, whether clean or not) to operate */
+static inline int parisc_requires_coherency(void)
+{
+#ifdef CONFIG_PA8X00
+ return (boot_cpu_data.cpu_type == mako) ||
+ (boot_cpu_data.cpu_type == mako2);
+#else
+ return 0;
+#endif
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_PARISC_PROCESSOR_H */
diff --git a/arch/parisc/include/asm/psw.h b/arch/parisc/include/asm/psw.h
new file mode 100644
index 00000000..5a3e23c9
--- /dev/null
+++ b/arch/parisc/include/asm/psw.h
@@ -0,0 +1,62 @@
+#ifndef _PARISC_PSW_H
+
+
+#define PSW_I 0x00000001
+#define PSW_D 0x00000002
+#define PSW_P 0x00000004
+#define PSW_Q 0x00000008
+
+#define PSW_R 0x00000010
+#define PSW_F 0x00000020
+#define PSW_G 0x00000040 /* PA1.x only */
+#define PSW_O 0x00000080 /* PA2.0 only */
+
+/* ssm/rsm instructions number PSW_W and PSW_E differently */
+#define PSW_SM_I PSW_I /* Enable External Interrupts */
+#define PSW_SM_D PSW_D
+#define PSW_SM_P PSW_P
+#define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */
+#define PSW_SM_R PSW_R /* Enable Recover Counter Trap */
+#define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */
+
+#define PSW_SM_QUIET PSW_SM_R+PSW_SM_Q+PSW_SM_P+PSW_SM_D+PSW_SM_I
+
+#define PSW_CB 0x0000ff00
+
+#define PSW_M 0x00010000
+#define PSW_V 0x00020000
+#define PSW_C 0x00040000
+#define PSW_B 0x00080000
+
+#define PSW_X 0x00100000
+#define PSW_N 0x00200000
+#define PSW_L 0x00400000
+#define PSW_H 0x00800000
+
+#define PSW_T 0x01000000
+#define PSW_S 0x02000000
+#define PSW_E 0x04000000
+#define PSW_W 0x08000000 /* PA2.0 only */
+#define PSW_W_BIT 36 /* PA2.0 only */
+
+#define PSW_Z 0x40000000 /* PA1.x only */
+#define PSW_Y 0x80000000 /* PA1.x only */
+
+#ifdef CONFIG_64BIT
+# define PSW_HI_CB 0x000000ff /* PA2.0 only */
+#endif
+
+#ifdef CONFIG_64BIT
+# define USER_PSW_HI_MASK PSW_HI_CB
+# define WIDE_PSW PSW_W
+#else
+# define WIDE_PSW 0
+#endif
+
+/* Used when setting up for rfi */
+#define KERNEL_PSW (WIDE_PSW | PSW_C | PSW_Q | PSW_P | PSW_D)
+#define REAL_MODE_PSW (WIDE_PSW | PSW_Q)
+#define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)
+#define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)
+
+#endif
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
new file mode 100644
index 00000000..7f09533d
--- /dev/null
+++ b/arch/parisc/include/asm/ptrace.h
@@ -0,0 +1,64 @@
+#ifndef _PARISC_PTRACE_H
+#define _PARISC_PTRACE_H
+
+/* written by Philipp Rumpf, Copyright (C) 1999 SuSE GmbH Nuernberg
+** Copyright (C) 2000 Grant Grundler, Hewlett-Packard
+*/
+
+#include <linux/types.h>
+
+/* This struct defines the way the registers are stored on the
+ * stack during a system call.
+ *
+ * N.B. gdb/strace care about the size and offsets within this
+ * structure. If you change things, you may break object compatibility
+ * for those applications.
+ */
+
+struct pt_regs {
+ unsigned long gr[32]; /* PSW is in gr[0] */
+ __u64 fr[32];
+ unsigned long sr[ 8];
+ unsigned long iasq[2];
+ unsigned long iaoq[2];
+ unsigned long cr27;
+ unsigned long pad0; /* available for other uses */
+ unsigned long orig_r28;
+ unsigned long ksp;
+ unsigned long kpc;
+ unsigned long sar; /* CR11 */
+ unsigned long iir; /* CR19 */
+ unsigned long isr; /* CR20 */
+ unsigned long ior; /* CR21 */
+ unsigned long ipsw; /* CR22 */
+};
+
+/*
+ * The numbers chosen here are somewhat arbitrary but absolutely MUST
+ * not overlap with any of the number assigned in <linux/ptrace.h>.
+ *
+ * These ones are taken from IA-64 on the assumption that theirs are
+ * the most correct (and we also want to support PTRACE_SINGLEBLOCK
+ * since we have taken branch traps too)
+ */
+#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
+
+#ifdef __KERNEL__
+
+#define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS))
+
+#define arch_has_single_step() 1
+#define arch_has_block_step() 1
+
+/* XXX should we use iaoq[1] or iaoq[0] ? */
+#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0)
+#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0)
+#define instruction_pointer(regs) ((regs)->iaoq[0] & ~3)
+#define user_stack_pointer(regs) ((regs)->gr[30])
+unsigned long profile_pc(struct pt_regs *);
+extern void show_regs(struct pt_regs *);
+
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/parisc/include/asm/real.h b/arch/parisc/include/asm/real.h
new file mode 100644
index 00000000..82acb25d
--- /dev/null
+++ b/arch/parisc/include/asm/real.h
@@ -0,0 +1,5 @@
+#ifndef _PARISC_REAL_H
+#define _PARISC_REAL_H
+
+
+#endif
diff --git a/arch/parisc/include/asm/resource.h b/arch/parisc/include/asm/resource.h
new file mode 100644
index 00000000..8b06343b
--- /dev/null
+++ b/arch/parisc/include/asm/resource.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_PARISC_RESOURCE_H
+#define _ASM_PARISC_RESOURCE_H
+
+#define _STK_LIM_MAX 10 * _STK_LIM
+#include <asm-generic/resource.h>
+
+#endif
diff --git a/arch/parisc/include/asm/ropes.h b/arch/parisc/include/asm/ropes.h
new file mode 100644
index 00000000..09f51d5a
--- /dev/null
+++ b/arch/parisc/include/asm/ropes.h
@@ -0,0 +1,322 @@
+#ifndef _ASM_PARISC_ROPES_H_
+#define _ASM_PARISC_ROPES_H_
+
+#include <asm/parisc-device.h>
+
+#ifdef CONFIG_64BIT
+/* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */
+#define ZX1_SUPPORT
+#endif
+
+#ifdef CONFIG_PROC_FS
+/* depends on proc fs support. But costs CPU performance */
+#undef SBA_COLLECT_STATS
+#endif
+
+/*
+** The number of pdir entries to "free" before issuing
+** a read to PCOM register to flush out PCOM writes.
+** Interacts with allocation granularity (ie 4 or 8 entries
+** allocated and free'd/purged at a time might make this
+** less interesting).
+*/
+#define DELAYED_RESOURCE_CNT 16
+
+#define MAX_IOC 2 /* per Ike. Pluto/Astro only have 1. */
+#define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */
+
+struct ioc {
+ void __iomem *ioc_hpa; /* I/O MMU base address */
+ char *res_map; /* resource map, bit == pdir entry */
+ u64 *pdir_base; /* physical base address */
+ unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
+ unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
+#ifdef ZX1_SUPPORT
+ unsigned long iovp_mask; /* help convert IOVA to IOVP */
+#endif
+ unsigned long *res_hint; /* next avail IOVP - circular search */
+ spinlock_t res_lock;
+ unsigned int res_bitshift; /* from the LEFT! */
+ unsigned int res_size; /* size of resource map in bytes */
+#ifdef SBA_HINT_SUPPORT
+/* FIXME : DMA HINTs not used */
+ unsigned long hint_mask_pdir; /* bits used for DMA hints */
+ unsigned int hint_shift_pdir;
+#endif
+#if DELAYED_RESOURCE_CNT > 0
+ int saved_cnt;
+ struct sba_dma_pair {
+ dma_addr_t iova;
+ size_t size;
+ } saved[DELAYED_RESOURCE_CNT];
+#endif
+
+#ifdef SBA_COLLECT_STATS
+#define SBA_SEARCH_SAMPLE 0x100
+ unsigned long avg_search[SBA_SEARCH_SAMPLE];
+ unsigned long avg_idx; /* current index into avg_search */
+ unsigned long used_pages;
+ unsigned long msingle_calls;
+ unsigned long msingle_pages;
+ unsigned long msg_calls;
+ unsigned long msg_pages;
+ unsigned long usingle_calls;
+ unsigned long usingle_pages;
+ unsigned long usg_calls;
+ unsigned long usg_pages;
+#endif
+ /* STUFF We don't need in performance path */
+ unsigned int pdir_size; /* in bytes, determined by IOV Space size */
+};
+
+struct sba_device {
+ struct sba_device *next; /* list of SBA's in system */
+ struct parisc_device *dev; /* dev found in bus walk */
+ const char *name;
+ void __iomem *sba_hpa; /* base address */
+ spinlock_t sba_lock;
+ unsigned int flags; /* state/functionality enabled */
+ unsigned int hw_rev; /* HW revision of chip */
+
+ struct resource chip_resv; /* MMIO reserved for chip */
+ struct resource iommu_resv; /* MMIO reserved for iommu */
+
+ unsigned int num_ioc; /* number of on-board IOC's */
+ struct ioc ioc[MAX_IOC];
+};
+
+#define ASTRO_RUNWAY_PORT 0x582
+#define IKE_MERCED_PORT 0x803
+#define REO_MERCED_PORT 0x804
+#define REOG_MERCED_PORT 0x805
+#define PLUTO_MCKINLEY_PORT 0x880
+
+static inline int IS_ASTRO(struct parisc_device *d) {
+ return d->id.hversion == ASTRO_RUNWAY_PORT;
+}
+
+static inline int IS_IKE(struct parisc_device *d) {
+ return d->id.hversion == IKE_MERCED_PORT;
+}
+
+static inline int IS_PLUTO(struct parisc_device *d) {
+ return d->id.hversion == PLUTO_MCKINLEY_PORT;
+}
+
+#define PLUTO_IOVA_BASE (1UL*1024*1024*1024) /* 1GB */
+#define PLUTO_IOVA_SIZE (1UL*1024*1024*1024) /* 1GB */
+#define PLUTO_GART_SIZE (PLUTO_IOVA_SIZE / 2)
+
+#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
+
+#define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL
+
+#define SBA_FUNC_ID 0x0000 /* function id */
+#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
+
+#define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */
+
+#define ASTRO_IOC_OFFSET (32 * SBA_FUNC_SIZE)
+#define PLUTO_IOC_OFFSET (1 * SBA_FUNC_SIZE)
+/* Ike's IOC's occupy functions 2 and 3 */
+#define IKE_IOC_OFFSET(p) ((p+2) * SBA_FUNC_SIZE)
+
+#define IOC_CTRL 0x8 /* IOC_CTRL offset */
+#define IOC_CTRL_TC (1 << 0) /* TOC Enable */
+#define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */
+#define IOC_CTRL_DE (1 << 2) /* Dillon Enable */
+#define IOC_CTRL_RM (1 << 8) /* Real Mode */
+#define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */
+#define IOC_CTRL_D4 (1 << 11) /* Disable 4-byte coalescing */
+#define IOC_CTRL_DD (1 << 13) /* Disable distr. LMMIO range coalescing */
+
+/*
+** Offsets into MBIB (Function 0 on Ike and hopefully Astro)
+** Firmware programs this stuff. Don't touch it.
+*/
+#define LMMIO_DIRECT0_BASE 0x300
+#define LMMIO_DIRECT0_MASK 0x308
+#define LMMIO_DIRECT0_ROUTE 0x310
+
+#define LMMIO_DIST_BASE 0x360
+#define LMMIO_DIST_MASK 0x368
+#define LMMIO_DIST_ROUTE 0x370
+
+#define IOS_DIST_BASE 0x390
+#define IOS_DIST_MASK 0x398
+#define IOS_DIST_ROUTE 0x3A0
+
+#define IOS_DIRECT_BASE 0x3C0
+#define IOS_DIRECT_MASK 0x3C8
+#define IOS_DIRECT_ROUTE 0x3D0
+
+/*
+** Offsets into I/O TLB (Function 2 and 3 on Ike)
+*/
+#define ROPE0_CTL 0x200 /* "regbus pci0" */
+#define ROPE1_CTL 0x208
+#define ROPE2_CTL 0x210
+#define ROPE3_CTL 0x218
+#define ROPE4_CTL 0x220
+#define ROPE5_CTL 0x228
+#define ROPE6_CTL 0x230
+#define ROPE7_CTL 0x238
+
+#define IOC_ROPE0_CFG 0x500 /* pluto only */
+#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
+
+#define HF_ENABLE 0x40
+
+#define IOC_IBASE 0x300 /* IO TLB */
+#define IOC_IMASK 0x308
+#define IOC_PCOM 0x310
+#define IOC_TCNFG 0x318
+#define IOC_PDIR_BASE 0x320
+
+/*
+** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
+** It's safer (avoid memory corruption) to keep DMA page mappings
+** equivalently sized to VM PAGE_SIZE.
+**
+** We really can't avoid generating a new mapping for each
+** page since the Virtual Coherence Index has to be generated
+** and updated for each page.
+**
+** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse.
+*/
+#define IOVP_SIZE PAGE_SIZE
+#define IOVP_SHIFT PAGE_SHIFT
+#define IOVP_MASK PAGE_MASK
+
+#define SBA_PERF_CFG 0x708 /* Performance Counter stuff */
+#define SBA_PERF_MASK1 0x718
+#define SBA_PERF_MASK2 0x730
+
+/*
+** Offsets into PCI Performance Counters (functions 12 and 13)
+** Controlled by PERF registers in function 2 & 3 respectively.
+*/
+#define SBA_PERF_CNT1 0x200
+#define SBA_PERF_CNT2 0x208
+#define SBA_PERF_CNT3 0x210
+
+/*
+** lba_device: Per instance Elroy data structure
+*/
+struct lba_device {
+ struct pci_hba_data hba;
+
+ spinlock_t lba_lock;
+ void *iosapic_obj;
+
+#ifdef CONFIG_64BIT
+ void __iomem *iop_base; /* PA_VIEW - for IO port accessor funcs */
+#endif
+
+ int flags; /* state/functionality enabled */
+ int hw_rev; /* HW revision of chip */
+};
+
+#define ELROY_HVERS 0x782
+#define MERCURY_HVERS 0x783
+#define QUICKSILVER_HVERS 0x784
+
+static inline int IS_ELROY(struct parisc_device *d) {
+ return (d->id.hversion == ELROY_HVERS);
+}
+
+static inline int IS_MERCURY(struct parisc_device *d) {
+ return (d->id.hversion == MERCURY_HVERS);
+}
+
+static inline int IS_QUICKSILVER(struct parisc_device *d) {
+ return (d->id.hversion == QUICKSILVER_HVERS);
+}
+
+static inline int agp_mode_mercury(void __iomem *hpa) {
+ u64 bus_mode;
+
+ bus_mode = readl(hpa + 0x0620);
+ if (bus_mode & 1)
+ return 1;
+
+ return 0;
+}
+
+/*
+** I/O SAPIC init function
+** Caller knows where an I/O SAPIC is. LBA has an integrated I/O SAPIC.
+** Call setup as part of per instance initialization.
+** (ie *not* init_module() function unless only one is present.)
+** fixup_irq is to initialize PCI IRQ line support and
+** virtualize pcidev->irq value. To be called by pci_fixup_bus().
+*/
+extern void *iosapic_register(unsigned long hpa);
+extern int iosapic_fixup_irq(void *obj, struct pci_dev *pcidev);
+
+#define LBA_FUNC_ID 0x0000 /* function id */
+#define LBA_FCLASS 0x0008 /* function class, bist, header, rev... */
+#define LBA_CAPABLE 0x0030 /* capabilities register */
+
+#define LBA_PCI_CFG_ADDR 0x0040 /* poke CFG address here */
+#define LBA_PCI_CFG_DATA 0x0048 /* read or write data here */
+
+#define LBA_PMC_MTLT 0x0050 /* Firmware sets this - read only. */
+#define LBA_FW_SCRATCH 0x0058 /* Firmware writes the PCI bus number here. */
+#define LBA_ERROR_ADDR 0x0070 /* On error, address gets logged here */
+
+#define LBA_ARB_MASK 0x0080 /* bit 0 enable arbitration. PAT/PDC enables */
+#define LBA_ARB_PRI 0x0088 /* firmware sets this. */
+#define LBA_ARB_MODE 0x0090 /* firmware sets this. */
+#define LBA_ARB_MTLT 0x0098 /* firmware sets this. */
+
+#define LBA_MOD_ID 0x0100 /* Module ID. PDC_PAT_CELL reports 4 */
+
+#define LBA_STAT_CTL 0x0108 /* Status & Control */
+#define LBA_BUS_RESET 0x01 /* Deassert PCI Bus Reset Signal */
+#define CLEAR_ERRLOG 0x10 /* "Clear Error Log" cmd */
+#define CLEAR_ERRLOG_ENABLE 0x20 /* "Clear Error Log" Enable */
+#define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */
+
+#define LBA_LMMIO_BASE 0x0200 /* < 4GB I/O address range */
+#define LBA_LMMIO_MASK 0x0208
+
+#define LBA_GMMIO_BASE 0x0210 /* > 4GB I/O address range */
+#define LBA_GMMIO_MASK 0x0218
+
+#define LBA_WLMMIO_BASE 0x0220 /* All < 4GB ranges under the same *SBA* */
+#define LBA_WLMMIO_MASK 0x0228
+
+#define LBA_WGMMIO_BASE 0x0230 /* All > 4GB ranges under the same *SBA* */
+#define LBA_WGMMIO_MASK 0x0238
+
+#define LBA_IOS_BASE 0x0240 /* I/O port space for this LBA */
+#define LBA_IOS_MASK 0x0248
+
+#define LBA_ELMMIO_BASE 0x0250 /* Extra LMMIO range */
+#define LBA_ELMMIO_MASK 0x0258
+
+#define LBA_EIOS_BASE 0x0260 /* Extra I/O port space */
+#define LBA_EIOS_MASK 0x0268
+
+#define LBA_GLOBAL_MASK 0x0270 /* Mercury only: Global Address Mask */
+#define LBA_DMA_CTL 0x0278 /* firmware sets this */
+
+#define LBA_IBASE 0x0300 /* SBA DMA support */
+#define LBA_IMASK 0x0308
+
+/* FIXME: ignore DMA Hint stuff until we can measure performance */
+#define LBA_HINT_CFG 0x0310
+#define LBA_HINT_BASE 0x0380 /* 14 registers at every 8 bytes. */
+
+#define LBA_BUS_MODE 0x0620
+
+/* ERROR regs are needed for config cycle kluges */
+#define LBA_ERROR_CONFIG 0x0680
+#define LBA_SMART_MODE 0x20
+#define LBA_ERROR_STATUS 0x0688
+#define LBA_ROPE_CTL 0x06A0
+
+#define LBA_IOSAPIC_BASE 0x800 /* Offset of IRQ logic */
+
+#endif /*_ASM_PARISC_ROPES_H_*/
diff --git a/arch/parisc/include/asm/rt_sigframe.h b/arch/parisc/include/asm/rt_sigframe.h
new file mode 100644
index 00000000..f0dd3b30
--- /dev/null
+++ b/arch/parisc/include/asm/rt_sigframe.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_PARISC_RT_SIGFRAME_H
+#define _ASM_PARISC_RT_SIGFRAME_H
+
+#define SIGRETURN_TRAMP 4
+#define SIGRESTARTBLOCK_TRAMP 5
+#define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP)
+
+struct rt_sigframe {
+ /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c
+ Secondary to that it must protect the ERESTART_RESTARTBLOCK
+ trampoline we left on the stack (we were bad and didn't
+ change sp so we could run really fast.) */
+ unsigned int tramp[TRAMP_SIZE];
+ struct siginfo info;
+ struct ucontext uc;
+};
+
+#define SIGFRAME 128
+#define FUNCTIONCALLFRAME 96
+#define PARISC_RT_SIGFRAME_SIZE \
+ (((sizeof(struct rt_sigframe) + FUNCTIONCALLFRAME) + SIGFRAME) & -SIGFRAME)
+
+#endif
diff --git a/arch/parisc/include/asm/rtc.h b/arch/parisc/include/asm/rtc.h
new file mode 100644
index 00000000..099d641a
--- /dev/null
+++ b/arch/parisc/include/asm/rtc.h
@@ -0,0 +1,131 @@
+/*
+ * include/asm-parisc/rtc.h
+ *
+ * Copyright 2002 Randolph CHung <tausq@debian.org>
+ *
+ * Based on: include/asm-ppc/rtc.h and the genrtc driver in the
+ * 2.4 parisc linux tree
+ */
+
+#ifndef __ASM_RTC_H__
+#define __ASM_RTC_H__
+
+#ifdef __KERNEL__
+
+#include <linux/rtc.h>
+
+#include <asm/pdc.h>
+
+#define SECS_PER_HOUR (60 * 60)
+#define SECS_PER_DAY (SECS_PER_HOUR * 24)
+
+
+#define RTC_PIE 0x40 /* periodic interrupt enable */
+#define RTC_AIE 0x20 /* alarm interrupt enable */
+#define RTC_UIE 0x10 /* update-finished interrupt enable */
+
+#define RTC_BATT_BAD 0x100 /* battery bad */
+
+/* some dummy definitions */
+#define RTC_SQWE 0x08 /* enable square-wave output */
+#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
+#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
+#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
+
+# define __isleap(year) \
+ ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
+
+/* How many days come before each month (0-12). */
+static const unsigned short int __mon_yday[2][13] =
+{
+ /* Normal years. */
+ { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
+ /* Leap years. */
+ { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
+};
+
+static inline unsigned int get_rtc_time(struct rtc_time *wtime)
+{
+ struct pdc_tod tod_data;
+ long int days, rem, y;
+ const unsigned short int *ip;
+
+ memset(wtime, 0, sizeof(*wtime));
+ if (pdc_tod_read(&tod_data) < 0)
+ return RTC_24H | RTC_BATT_BAD;
+
+ // most of the remainder of this function is:
+// Copyright (C) 1991, 1993, 1997, 1998 Free Software Foundation, Inc.
+// This was originally a part of the GNU C Library.
+// It is distributed under the GPL, and was swiped from offtime.c
+
+
+ days = tod_data.tod_sec / SECS_PER_DAY;
+ rem = tod_data.tod_sec % SECS_PER_DAY;
+
+ wtime->tm_hour = rem / SECS_PER_HOUR;
+ rem %= SECS_PER_HOUR;
+ wtime->tm_min = rem / 60;
+ wtime->tm_sec = rem % 60;
+
+ y = 1970;
+
+#define DIV(a, b) ((a) / (b) - ((a) % (b) < 0))
+#define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
+
+ while (days < 0 || days >= (__isleap (y) ? 366 : 365))
+ {
+ /* Guess a corrected year, assuming 365 days per year. */
+ long int yg = y + days / 365 - (days % 365 < 0);
+
+ /* Adjust DAYS and Y to match the guessed year. */
+ days -= ((yg - y) * 365
+ + LEAPS_THRU_END_OF (yg - 1)
+ - LEAPS_THRU_END_OF (y - 1));
+ y = yg;
+ }
+ wtime->tm_year = y - 1900;
+
+ ip = __mon_yday[__isleap(y)];
+ for (y = 11; days < (long int) ip[y]; --y)
+ continue;
+ days -= ip[y];
+ wtime->tm_mon = y;
+ wtime->tm_mday = days + 1;
+
+ return RTC_24H;
+}
+
+static int set_rtc_time(struct rtc_time *wtime)
+{
+ u_int32_t secs;
+
+ secs = mktime(wtime->tm_year + 1900, wtime->tm_mon + 1, wtime->tm_mday,
+ wtime->tm_hour, wtime->tm_min, wtime->tm_sec);
+
+ if(pdc_tod_set(secs, 0) < 0)
+ return -1;
+ else
+ return 0;
+
+}
+
+static inline unsigned int get_rtc_ss(void)
+{
+ struct rtc_time h;
+
+ get_rtc_time(&h);
+ return h.tm_sec;
+}
+
+static inline int get_rtc_pll(struct rtc_pll_info *pll)
+{
+ return -EINVAL;
+}
+static inline int set_rtc_pll(struct rtc_pll_info *pll)
+{
+ return -EINVAL;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_RTC_H__ */
diff --git a/arch/parisc/include/asm/runway.h b/arch/parisc/include/asm/runway.h
new file mode 100644
index 00000000..5bea02da
--- /dev/null
+++ b/arch/parisc/include/asm/runway.h
@@ -0,0 +1,12 @@
+#ifndef ASM_PARISC_RUNWAY_H
+#define ASM_PARISC_RUNWAY_H
+#ifdef __KERNEL__
+
+/* declared in arch/parisc/kernel/setup.c */
+extern struct proc_dir_entry * proc_runway_root;
+
+#define RUNWAY_STATUS 0x10
+#define RUNWAY_DEBUG 0x40
+
+#endif /* __KERNEL__ */
+#endif /* ASM_PARISC_RUNWAY_H */
diff --git a/arch/parisc/include/asm/scatterlist.h b/arch/parisc/include/asm/scatterlist.h
new file mode 100644
index 00000000..8bf1f0dd
--- /dev/null
+++ b/arch/parisc/include/asm/scatterlist.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_PARISC_SCATTERLIST_H
+#define _ASM_PARISC_SCATTERLIST_H
+
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm-generic/scatterlist.h>
+
+#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
+
+#endif /* _ASM_PARISC_SCATTERLIST_H */
diff --git a/arch/parisc/include/asm/sections.h b/arch/parisc/include/asm/sections.h
new file mode 100644
index 00000000..9d13c350
--- /dev/null
+++ b/arch/parisc/include/asm/sections.h
@@ -0,0 +1,12 @@
+#ifndef _PARISC_SECTIONS_H
+#define _PARISC_SECTIONS_H
+
+/* nothing to see, move along */
+#include <asm-generic/sections.h>
+
+#ifdef CONFIG_64BIT
+#undef dereference_function_descriptor
+void *dereference_function_descriptor(void *);
+#endif
+
+#endif
diff --git a/arch/parisc/include/asm/segment.h b/arch/parisc/include/asm/segment.h
new file mode 100644
index 00000000..26794ddb
--- /dev/null
+++ b/arch/parisc/include/asm/segment.h
@@ -0,0 +1,6 @@
+#ifndef __PARISC_SEGMENT_H
+#define __PARISC_SEGMENT_H
+
+/* Only here because we have some old header files that expect it.. */
+
+#endif
diff --git a/arch/parisc/include/asm/sembuf.h b/arch/parisc/include/asm/sembuf.h
new file mode 100644
index 00000000..1e59ffd3
--- /dev/null
+++ b/arch/parisc/include/asm/sembuf.h
@@ -0,0 +1,29 @@
+#ifndef _PARISC_SEMBUF_H
+#define _PARISC_SEMBUF_H
+
+/*
+ * The semid64_ds structure for parisc architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+#ifndef CONFIG_64BIT
+ unsigned int __pad1;
+#endif
+ __kernel_time_t sem_otime; /* last semop time */
+#ifndef CONFIG_64BIT
+ unsigned int __pad2;
+#endif
+ __kernel_time_t sem_ctime; /* last change time */
+ unsigned int sem_nsems; /* no. of semaphores in array */
+ unsigned int __unused1;
+ unsigned int __unused2;
+};
+
+#endif /* _PARISC_SEMBUF_H */
diff --git a/arch/parisc/include/asm/serial.h b/arch/parisc/include/asm/serial.h
new file mode 100644
index 00000000..d7e3cc60
--- /dev/null
+++ b/arch/parisc/include/asm/serial.h
@@ -0,0 +1,10 @@
+/*
+ * include/asm-parisc/serial.h
+ */
+
+/*
+ * This is used for 16550-compatible UARTs
+ */
+#define BASE_BAUD ( 1843200 / 16 )
+
+#define SERIAL_PORT_DFNS
diff --git a/arch/parisc/include/asm/setup.h b/arch/parisc/include/asm/setup.h
new file mode 100644
index 00000000..7da2e5b8
--- /dev/null
+++ b/arch/parisc/include/asm/setup.h
@@ -0,0 +1,6 @@
+#ifndef _PARISC_SETUP_H
+#define _PARISC_SETUP_H
+
+#define COMMAND_LINE_SIZE 1024
+
+#endif /* _PARISC_SETUP_H */
diff --git a/arch/parisc/include/asm/shmbuf.h b/arch/parisc/include/asm/shmbuf.h
new file mode 100644
index 00000000..0a3eada1
--- /dev/null
+++ b/arch/parisc/include/asm/shmbuf.h
@@ -0,0 +1,58 @@
+#ifndef _PARISC_SHMBUF_H
+#define _PARISC_SHMBUF_H
+
+/*
+ * The shmid64_ds structure for parisc architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+#ifndef CONFIG_64BIT
+ unsigned int __pad1;
+#endif
+ __kernel_time_t shm_atime; /* last attach time */
+#ifndef CONFIG_64BIT
+ unsigned int __pad2;
+#endif
+ __kernel_time_t shm_dtime; /* last detach time */
+#ifndef CONFIG_64BIT
+ unsigned int __pad3;
+#endif
+ __kernel_time_t shm_ctime; /* last change time */
+#ifndef CONFIG_64BIT
+ unsigned int __pad4;
+#endif
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned int shm_nattch; /* no. of current attaches */
+ unsigned int __unused1;
+ unsigned int __unused2;
+};
+
+#ifdef CONFIG_64BIT
+/* The 'unsigned int' (formerly 'unsigned long') data types below will
+ * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on
+ * a wide kernel, but if some of these values are meant to contain pointers
+ * they may need to be 'long long' instead. -PB XXX FIXME
+ */
+#endif
+struct shminfo64 {
+ unsigned int shmmax;
+ unsigned int shmmin;
+ unsigned int shmmni;
+ unsigned int shmseg;
+ unsigned int shmall;
+ unsigned int __unused1;
+ unsigned int __unused2;
+ unsigned int __unused3;
+ unsigned int __unused4;
+};
+
+#endif /* _PARISC_SHMBUF_H */
diff --git a/arch/parisc/include/asm/shmparam.h b/arch/parisc/include/asm/shmparam.h
new file mode 100644
index 00000000..628ddc22
--- /dev/null
+++ b/arch/parisc/include/asm/shmparam.h
@@ -0,0 +1,8 @@
+#ifndef _ASMPARISC_SHMPARAM_H
+#define _ASMPARISC_SHMPARAM_H
+
+#define __ARCH_FORCE_SHMLBA 1
+
+#define SHMLBA 0x00400000 /* attach addr needs to be 4 Mb aligned */
+
+#endif /* _ASMPARISC_SHMPARAM_H */
diff --git a/arch/parisc/include/asm/sigcontext.h b/arch/parisc/include/asm/sigcontext.h
new file mode 100644
index 00000000..27ef31bb
--- /dev/null
+++ b/arch/parisc/include/asm/sigcontext.h
@@ -0,0 +1,20 @@
+#ifndef _ASMPARISC_SIGCONTEXT_H
+#define _ASMPARISC_SIGCONTEXT_H
+
+#define PARISC_SC_FLAG_ONSTACK 1<<0
+#define PARISC_SC_FLAG_IN_SYSCALL 1<<1
+
+/* We will add more stuff here as it becomes necessary, until we know
+ it works. */
+struct sigcontext {
+ unsigned long sc_flags;
+
+ unsigned long sc_gr[32]; /* PSW in sc_gr[0] */
+ unsigned long long sc_fr[32]; /* FIXME, do we need other state info? */
+ unsigned long sc_iasq[2];
+ unsigned long sc_iaoq[2];
+ unsigned long sc_sar; /* cr11 */
+};
+
+
+#endif
diff --git a/arch/parisc/include/asm/siginfo.h b/arch/parisc/include/asm/siginfo.h
new file mode 100644
index 00000000..d7034728
--- /dev/null
+++ b/arch/parisc/include/asm/siginfo.h
@@ -0,0 +1,9 @@
+#ifndef _PARISC_SIGINFO_H
+#define _PARISC_SIGINFO_H
+
+#include <asm-generic/siginfo.h>
+
+#undef NSIGTRAP
+#define NSIGTRAP 4
+
+#endif
diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h
new file mode 100644
index 00000000..c2035637
--- /dev/null
+++ b/arch/parisc/include/asm/signal.h
@@ -0,0 +1,153 @@
+#ifndef _ASM_PARISC_SIGNAL_H
+#define _ASM_PARISC_SIGNAL_H
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGEMT 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGBUS 10
+#define SIGSEGV 11
+#define SIGSYS 12 /* Linux doesn't use this */
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGUSR1 16
+#define SIGUSR2 17
+#define SIGCHLD 18
+#define SIGPWR 19
+#define SIGVTALRM 20
+#define SIGPROF 21
+#define SIGIO 22
+#define SIGPOLL SIGIO
+#define SIGWINCH 23
+#define SIGSTOP 24
+#define SIGTSTP 25
+#define SIGCONT 26
+#define SIGTTIN 27
+#define SIGTTOU 28
+#define SIGURG 29
+#define SIGLOST 30 /* Linux doesn't use this either */
+#define SIGUNUSED 31
+#define SIGRESERVE SIGUNUSED
+
+#define SIGXCPU 33
+#define SIGXFSZ 34
+#define SIGSTKFLT 36
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 37
+#define SIGRTMAX _NSIG /* it's 44 under HP/UX */
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_ONSTACK 0x00000001
+#define SA_RESETHAND 0x00000004
+#define SA_NOCLDSTOP 0x00000008
+#define SA_SIGINFO 0x00000010
+#define SA_NODEFER 0x00000020
+#define SA_RESTART 0x00000040
+#define SA_NOCLDWAIT 0x00000080
+#define _SA_SIGGFAULT 0x00000100 /* HPUX */
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#ifdef __KERNEL__
+
+#define _NSIG 64
+/* bits-per-word, where word apparently means 'long' not 'int' */
+#define _NSIG_BPW BITS_PER_LONG
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+#endif /* __KERNEL__ */
+
+#define SIG_BLOCK 0 /* for blocking signals */
+#define SIG_UNBLOCK 1 /* for unblocking signals */
+#define SIG_SETMASK 2 /* for setting the signal mask */
+
+#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
+#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
+
+# ifndef __ASSEMBLY__
+
+# include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+/* Type of a signal handler. */
+#ifdef CONFIG_64BIT
+/* function pointers on 64-bit parisc are pointers to little structs and the
+ * compiler doesn't support code which changes or tests the address of
+ * the function in the little struct. This is really ugly -PB
+ */
+typedef char __user *__sighandler_t;
+#else
+typedef void __signalfn_t(int);
+typedef __signalfn_t __user *__sighandler_t;
+#endif
+
+typedef struct sigaltstack {
+ void __user *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+
+/* Most things should be clean enough to redefine this at will, if care
+ is taken to make libc match. */
+
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ /* next_signal() assumes this is a long - no choice */
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+
+#define ptrace_signal_deliver(regs, cookie) do { } while (0)
+
+#include <asm/sigcontext.h>
+
+#endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY */
+#endif /* _ASM_PARISC_SIGNAL_H */
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
new file mode 100644
index 00000000..e8f8037d
--- /dev/null
+++ b/arch/parisc/include/asm/smp.h
@@ -0,0 +1,55 @@
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+
+#if defined(CONFIG_SMP)
+
+/* Page Zero Location PDC will look for the address to branch to when we poke
+** slave CPUs still in "Icache loop".
+*/
+#define PDC_OS_BOOT_RENDEZVOUS 0x10
+#define PDC_OS_BOOT_RENDEZVOUS_HI 0x28
+
+#ifndef ASSEMBLY
+#include <linux/bitops.h>
+#include <linux/threads.h> /* for NR_CPUS */
+#include <linux/cpumask.h>
+typedef unsigned long address_t;
+
+
+/*
+ * Private routines/data
+ *
+ * physical and logical are equivalent until we support CPU hotplug.
+ */
+#define cpu_number_map(cpu) (cpu)
+#define cpu_logical_map(cpu) (cpu)
+
+extern void smp_send_reschedule(int cpu);
+extern void smp_send_all_nop(void);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#endif /* !ASSEMBLY */
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+#else /* CONFIG_SMP */
+
+static inline void smp_send_all_nop(void) { return; }
+
+#endif
+
+#define NO_PROC_ID 0xFF /* No processor magic marker */
+#define ANY_PROC_ID 0xFF /* Any processor magic marker */
+static inline int __cpu_disable (void) {
+ return 0;
+}
+static inline void __cpu_die (unsigned int cpu) {
+ while(1)
+ ;
+}
+extern int __cpu_up (unsigned int cpu);
+
+#endif /* __ASM_SMP_H */
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h
new file mode 100644
index 00000000..225b7d6a
--- /dev/null
+++ b/arch/parisc/include/asm/socket.h
@@ -0,0 +1,69 @@
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockopt(2) */
+#define SOL_SOCKET 0xffff
+
+#define SO_DEBUG 0x0001
+#define SO_REUSEADDR 0x0004
+#define SO_KEEPALIVE 0x0008
+#define SO_DONTROUTE 0x0010
+#define SO_BROADCAST 0x0020
+#define SO_LINGER 0x0080
+#define SO_OOBINLINE 0x0100
+/* To add :#define SO_REUSEPORT 0x0200 */
+#define SO_SNDBUF 0x1001
+#define SO_RCVBUF 0x1002
+#define SO_SNDBUFFORCE 0x100a
+#define SO_RCVBUFFORCE 0x100b
+#define SO_SNDLOWAT 0x1003
+#define SO_RCVLOWAT 0x1004
+#define SO_SNDTIMEO 0x1005
+#define SO_RCVTIMEO 0x1006
+#define SO_ERROR 0x1007
+#define SO_TYPE 0x1008
+#define SO_PROTOCOL 0x1028
+#define SO_DOMAIN 0x1029
+#define SO_PEERNAME 0x2000
+
+#define SO_NO_CHECK 0x400b
+#define SO_PRIORITY 0x400c
+#define SO_BSDCOMPAT 0x400e
+#define SO_PASSCRED 0x4010
+#define SO_PEERCRED 0x4011
+#define SO_TIMESTAMP 0x4012
+#define SCM_TIMESTAMP SO_TIMESTAMP
+#define SO_TIMESTAMPNS 0x4013
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION 0x4016
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x4017
+#define SO_SECURITY_ENCRYPTION_NETWORK 0x4018
+
+#define SO_BINDTODEVICE 0x4019
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER 0x401a
+#define SO_DETACH_FILTER 0x401b
+
+#define SO_ACCEPTCONN 0x401c
+
+#define SO_PEERSEC 0x401d
+#define SO_PASSSEC 0x401e
+
+#define SO_MARK 0x401f
+
+#define SO_TIMESTAMPING 0x4020
+#define SCM_TIMESTAMPING SO_TIMESTAMPING
+
+#define SO_RXQ_OVFL 0x4021
+
+/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
+ * have to define SOCK_NONBLOCK to a different value here.
+ */
+#define SOCK_NONBLOCK 0x40000000
+
+#endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/include/asm/sockios.h b/arch/parisc/include/asm/sockios.h
new file mode 100644
index 00000000..dabfbc74
--- /dev/null
+++ b/arch/parisc/include/asm/sockios.h
@@ -0,0 +1,13 @@
+#ifndef __ARCH_PARISC_SOCKIOS__
+#define __ARCH_PARISC_SOCKIOS__
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 0x8901
+#define SIOCSPGRP 0x8902
+#define FIOGETOWN 0x8903
+#define SIOCGPGRP 0x8904
+#define SIOCATMARK 0x8905
+#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
+#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+
+#endif
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
new file mode 100644
index 00000000..74036f43
--- /dev/null
+++ b/arch/parisc/include/asm/spinlock.h
@@ -0,0 +1,197 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/spinlock_types.h>
+
+static inline int arch_spin_is_locked(arch_spinlock_t *x)
+{
+ volatile unsigned int *a = __ldcw_align(x);
+ return *a == 0;
+}
+
+#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
+#define arch_spin_unlock_wait(x) \
+ do { cpu_relax(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_lock_flags(arch_spinlock_t *x,
+ unsigned long flags)
+{
+ volatile unsigned int *a;
+
+ mb();
+ a = __ldcw_align(x);
+ while (__ldcw(a) == 0)
+ while (*a == 0)
+ if (flags & PSW_SM_I) {
+ local_irq_enable();
+ cpu_relax();
+ local_irq_disable();
+ } else
+ cpu_relax();
+ mb();
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *x)
+{
+ volatile unsigned int *a;
+ mb();
+ a = __ldcw_align(x);
+ *a = 1;
+ mb();
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *x)
+{
+ volatile unsigned int *a;
+ int ret;
+
+ mb();
+ a = __ldcw_align(x);
+ ret = __ldcw(a) != 0;
+ mb();
+
+ return ret;
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Linux rwlocks are unfair to writers; they can be starved for an indefinite
+ * time by readers. With care, they can also be taken in interrupt context.
+ *
+ * In the PA-RISC implementation, we have a spinlock and a counter.
+ * Readers use the lock to serialise their access to the counter (which
+ * records how many readers currently hold the lock).
+ * Writers hold the spinlock, preventing any readers or other writers from
+ * grabbing the rwlock.
+ */
+
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to grab the same read lock */
+static __inline__ void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ arch_spin_lock_flags(&rw->lock, flags);
+ rw->counter++;
+ arch_spin_unlock(&rw->lock);
+ local_irq_restore(flags);
+}
+
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to grab the same read lock */
+static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ arch_spin_lock_flags(&rw->lock, flags);
+ rw->counter--;
+ arch_spin_unlock(&rw->lock);
+ local_irq_restore(flags);
+}
+
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to grab the same read lock */
+static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ retry:
+ local_irq_save(flags);
+ if (arch_spin_trylock(&rw->lock)) {
+ rw->counter++;
+ arch_spin_unlock(&rw->lock);
+ local_irq_restore(flags);
+ return 1;
+ }
+
+ local_irq_restore(flags);
+ /* If write-locked, we fail to acquire the lock */
+ if (rw->counter < 0)
+ return 0;
+
+ /* Wait until we have a realistic chance at the lock */
+ while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
+ cpu_relax();
+
+ goto retry;
+}
+
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to read_trylock() this lock */
+static __inline__ void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+retry:
+ local_irq_save(flags);
+ arch_spin_lock_flags(&rw->lock, flags);
+
+ if (rw->counter != 0) {
+ arch_spin_unlock(&rw->lock);
+ local_irq_restore(flags);
+
+ while (rw->counter != 0)
+ cpu_relax();
+
+ goto retry;
+ }
+
+ rw->counter = -1; /* mark as write-locked */
+ mb();
+ local_irq_restore(flags);
+}
+
+static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
+{
+ rw->counter = 0;
+ arch_spin_unlock(&rw->lock);
+}
+
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to read_trylock() this lock */
+static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ int result = 0;
+
+ local_irq_save(flags);
+ if (arch_spin_trylock(&rw->lock)) {
+ if (rw->counter == 0) {
+ rw->counter = -1;
+ result = 1;
+ } else {
+ /* Read-locked. Oh well. */
+ arch_spin_unlock(&rw->lock);
+ }
+ }
+ local_irq_restore(flags);
+
+ return result;
+}
+
+/*
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
+{
+ return rw->counter >= 0;
+}
+
+/*
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
+{
+ return !rw->counter;
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
new file mode 100644
index 00000000..8c373aa2
--- /dev/null
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -0,0 +1,21 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+typedef struct {
+#ifdef CONFIG_PA20
+ volatile unsigned int slock;
+# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
+#else
+ volatile unsigned int lock[4];
+# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
+#endif
+} arch_spinlock_t;
+
+typedef struct {
+ arch_spinlock_t lock;
+ volatile int counter;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
+
+#endif
diff --git a/arch/parisc/include/asm/stat.h b/arch/parisc/include/asm/stat.h
new file mode 100644
index 00000000..9d5fbbc5
--- /dev/null
+++ b/arch/parisc/include/asm/stat.h
@@ -0,0 +1,100 @@
+#ifndef _PARISC_STAT_H
+#define _PARISC_STAT_H
+
+#include <linux/types.h>
+
+struct stat {
+ unsigned int st_dev; /* dev_t is 32 bits on parisc */
+ ino_t st_ino; /* 32 bits */
+ mode_t st_mode; /* 16 bits */
+ nlink_t st_nlink; /* 16 bits */
+ unsigned short st_reserved1; /* old st_uid */
+ unsigned short st_reserved2; /* old st_gid */
+ unsigned int st_rdev;
+ off_t st_size;
+ time_t st_atime;
+ unsigned int st_atime_nsec;
+ time_t st_mtime;
+ unsigned int st_mtime_nsec;
+ time_t st_ctime;
+ unsigned int st_ctime_nsec;
+ int st_blksize;
+ int st_blocks;
+ unsigned int __unused1; /* ACL stuff */
+ unsigned int __unused2; /* network */
+ ino_t __unused3; /* network */
+ unsigned int __unused4; /* cnodes */
+ unsigned short __unused5; /* netsite */
+ short st_fstype;
+ unsigned int st_realdev;
+ unsigned short st_basemode;
+ unsigned short st_spareshort;
+ uid_t st_uid;
+ gid_t st_gid;
+ unsigned int st_spare4[3];
+};
+
+#define STAT_HAVE_NSEC
+
+typedef __kernel_off64_t off64_t;
+
+struct hpux_stat64 {
+ unsigned int st_dev; /* dev_t is 32 bits on parisc */
+ ino_t st_ino; /* 32 bits */
+ mode_t st_mode; /* 16 bits */
+ nlink_t st_nlink; /* 16 bits */
+ unsigned short st_reserved1; /* old st_uid */
+ unsigned short st_reserved2; /* old st_gid */
+ unsigned int st_rdev;
+ off64_t st_size;
+ time_t st_atime;
+ unsigned int st_spare1;
+ time_t st_mtime;
+ unsigned int st_spare2;
+ time_t st_ctime;
+ unsigned int st_spare3;
+ int st_blksize;
+ __u64 st_blocks;
+ unsigned int __unused1; /* ACL stuff */
+ unsigned int __unused2; /* network */
+ ino_t __unused3; /* network */
+ unsigned int __unused4; /* cnodes */
+ unsigned short __unused5; /* netsite */
+ short st_fstype;
+ unsigned int st_realdev;
+ unsigned short st_basemode;
+ unsigned short st_spareshort;
+ uid_t st_uid;
+ gid_t st_gid;
+ unsigned int st_spare4[3];
+};
+
+/* This is the struct that 32-bit userspace applications are expecting.
+ * How 64-bit apps are going to be compiled, I have no idea. But at least
+ * this way, we don't have a wrapper in the kernel.
+ */
+struct stat64 {
+ unsigned long long st_dev;
+ unsigned int __pad1;
+
+ unsigned int __st_ino; /* Not actually filled in */
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned long long st_rdev;
+ unsigned int __pad2;
+ signed long long st_size;
+ signed int st_blksize;
+
+ signed long long st_blocks;
+ signed int st_atime;
+ unsigned int st_atime_nsec;
+ signed int st_mtime;
+ unsigned int st_mtime_nsec;
+ signed int st_ctime;
+ unsigned int st_ctime_nsec;
+ unsigned long long st_ino;
+};
+
+#endif
diff --git a/arch/parisc/include/asm/statfs.h b/arch/parisc/include/asm/statfs.h
new file mode 100644
index 00000000..324bea90
--- /dev/null
+++ b/arch/parisc/include/asm/statfs.h
@@ -0,0 +1,7 @@
+#ifndef _PARISC_STATFS_H
+#define _PARISC_STATFS_H
+
+#define __statfs_word long
+#include <asm-generic/statfs.h>
+
+#endif
diff --git a/arch/parisc/include/asm/string.h b/arch/parisc/include/asm/string.h
new file mode 100644
index 00000000..eda01be6
--- /dev/null
+++ b/arch/parisc/include/asm/string.h
@@ -0,0 +1,10 @@
+#ifndef _PA_STRING_H_
+#define _PA_STRING_H_
+
+#define __HAVE_ARCH_MEMSET
+extern void * memset(void *, int, size_t);
+
+#define __HAVE_ARCH_MEMCPY
+void * memcpy(void * dest,const void *src,size_t count);
+
+#endif
diff --git a/arch/parisc/include/asm/superio.h b/arch/parisc/include/asm/superio.h
new file mode 100644
index 00000000..6598acb4
--- /dev/null
+++ b/arch/parisc/include/asm/superio.h
@@ -0,0 +1,85 @@
+#ifndef _PARISC_SUPERIO_H
+#define _PARISC_SUPERIO_H
+
+#define IC_PIC1 0x20 /* PCI I/O address of master 8259 */
+#define IC_PIC2 0xA0 /* PCI I/O address of slave */
+
+/* Config Space Offsets to configuration and base address registers */
+#define SIO_CR 0x5A /* Configuration Register */
+#define SIO_ACPIBAR 0x88 /* ACPI BAR */
+#define SIO_FDCBAR 0x90 /* Floppy Disk Controller BAR */
+#define SIO_SP1BAR 0x94 /* Serial 1 BAR */
+#define SIO_SP2BAR 0x98 /* Serial 2 BAR */
+#define SIO_PPBAR 0x9C /* Parallel BAR */
+
+#define TRIGGER_1 0x67 /* Edge/level trigger register 1 */
+#define TRIGGER_2 0x68 /* Edge/level trigger register 2 */
+
+/* Interrupt Routing Control registers */
+#define CFG_IR_SER 0x69 /* Serial 1 [0:3] and Serial 2 [4:7] */
+#define CFG_IR_PFD 0x6a /* Parallel [0:3] and Floppy [4:7] */
+#define CFG_IR_IDE 0x6b /* IDE1 [0:3] and IDE2 [4:7] */
+#define CFG_IR_INTAB 0x6c /* PCI INTA [0:3] and INT B [4:7] */
+#define CFG_IR_INTCD 0x6d /* PCI INTC [0:3] and INT D [4:7] */
+#define CFG_IR_PS2 0x6e /* PS/2 KBINT [0:3] and Mouse [4:7] */
+#define CFG_IR_FXBUS 0x6f /* FXIRQ[0] [0:3] and FXIRQ[1] [4:7] */
+#define CFG_IR_USB 0x70 /* FXIRQ[2] [0:3] and USB [4:7] */
+#define CFG_IR_ACPI 0x71 /* ACPI SCI [0:3] and reserved [4:7] */
+
+#define CFG_IR_LOW CFG_IR_SER /* Lowest interrupt routing reg */
+#define CFG_IR_HIGH CFG_IR_ACPI /* Highest interrupt routing reg */
+
+/* 8259 operational control words */
+#define OCW2_EOI 0x20 /* Non-specific EOI */
+#define OCW2_SEOI 0x60 /* Specific EOI */
+#define OCW3_IIR 0x0A /* Read request register */
+#define OCW3_ISR 0x0B /* Read service register */
+#define OCW3_POLL 0x0C /* Poll the PIC for an interrupt vector */
+
+/* Interrupt lines. Only PIC1 is used */
+#define USB_IRQ 1 /* USB */
+#define SP1_IRQ 3 /* Serial port 1 */
+#define SP2_IRQ 4 /* Serial port 2 */
+#define PAR_IRQ 5 /* Parallel port */
+#define FDC_IRQ 6 /* Floppy controller */
+#define IDE_IRQ 7 /* IDE (pri+sec) */
+
+/* ACPI registers */
+#define USB_REG_CR 0x1f /* USB Regulator Control Register */
+
+#define SUPERIO_NIRQS 8
+
+struct superio_device {
+ u32 fdc_base;
+ u32 sp1_base;
+ u32 sp2_base;
+ u32 pp_base;
+ u32 acpi_base;
+ int suckyio_irq_enabled;
+ struct pci_dev *lio_pdev; /* pci device for legacy IO (fn 1) */
+ struct pci_dev *usb_pdev; /* pci device for USB (fn 2) */
+};
+
+/*
+ * Does NS make a 87415 based plug in PCI card? If so, because of this
+ * macro we currently don't support it being plugged into a machine
+ * that contains a SuperIO chip AND has CONFIG_SUPERIO enabled.
+ *
+ * This could be fixed by checking to see if function 1 exists, and
+ * if it is SuperIO Legacy IO; but really now, is this combination
+ * going to EVER happen?
+ */
+
+#define SUPERIO_IDE_FN 0 /* Function number of IDE controller */
+#define SUPERIO_LIO_FN 1 /* Function number of Legacy IO controller */
+#define SUPERIO_USB_FN 2 /* Function number of USB controller */
+
+#define is_superio_device(x) \
+ (((x)->vendor == PCI_VENDOR_ID_NS) && \
+ ( ((x)->device == PCI_DEVICE_ID_NS_87415) \
+ || ((x)->device == PCI_DEVICE_ID_NS_87560_LIO) \
+ || ((x)->device == PCI_DEVICE_ID_NS_87560_USB) ) )
+
+extern int superio_fixup_irq(struct pci_dev *pcidev); /* called by iosapic */
+
+#endif /* _PARISC_SUPERIO_H */
diff --git a/arch/parisc/include/asm/swab.h b/arch/parisc/include/asm/swab.h
new file mode 100644
index 00000000..e78403b1
--- /dev/null
+++ b/arch/parisc/include/asm/swab.h
@@ -0,0 +1,66 @@
+#ifndef _PARISC_SWAB_H
+#define _PARISC_SWAB_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#define __SWAB_64_THRU_32__
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+ __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */
+ "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */
+ : "=r" (x)
+ : "0" (x));
+ return x;
+}
+#define __arch_swab16 __arch_swab16
+
+static inline __attribute_const__ __u32 __arch_swab24(__u32 x)
+{
+ __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */
+ "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */
+ "shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */
+ : "=r" (x)
+ : "0" (x));
+ return x;
+}
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+ unsigned int temp;
+ __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */
+ "dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */
+ "shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */
+ : "=r" (x), "=&r" (temp)
+ : "0" (x));
+ return x;
+}
+#define __arch_swab32 __arch_swab32
+
+#if BITS_PER_LONG > 32
+/*
+** From "PA-RISC 2.0 Architecture", HP Professional Books.
+** See Appendix I page 8 , "Endian Byte Swapping".
+**
+** Pretty cool algorithm: (* == zero'd bits)
+** PERMH 01234567 -> 67452301 into %0
+** HSHL 67452301 -> 7*5*3*1* into %1
+** HSHR 67452301 -> *6*4*2*0 into %0
+** OR %0 | %1 -> 76543210 into %0 (all done!)
+*/
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+{
+ __u64 temp;
+ __asm__("permh,3210 %0, %0\n\t"
+ "hshl %0, 8, %1\n\t"
+ "hshr,u %0, 8, %0\n\t"
+ "or %1, %0, %0"
+ : "=r" (x), "=&r" (temp)
+ : "0" (x));
+ return x;
+}
+#define __arch_swab64 __arch_swab64
+#endif /* BITS_PER_LONG > 32 */
+
+#endif /* _PARISC_SWAB_H */
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
new file mode 100644
index 00000000..8bdfd2c8
--- /dev/null
+++ b/arch/parisc/include/asm/syscall.h
@@ -0,0 +1,40 @@
+/* syscall.h */
+
+#ifndef _ASM_PARISC_SYSCALL_H_
+#define _ASM_PARISC_SYSCALL_H_
+
+#include <linux/err.h>
+#include <asm/ptrace.h>
+
+static inline long syscall_get_nr(struct task_struct *tsk,
+ struct pt_regs *regs)
+{
+ return regs->gr[20];
+}
+
+static inline void syscall_get_arguments(struct task_struct *tsk,
+ struct pt_regs *regs, unsigned int i,
+ unsigned int n, unsigned long *args)
+{
+ BUG_ON(i);
+
+ switch (n) {
+ case 6:
+ args[5] = regs->gr[21];
+ case 5:
+ args[4] = regs->gr[22];
+ case 4:
+ args[3] = regs->gr[23];
+ case 3:
+ args[2] = regs->gr[24];
+ case 2:
+ args[1] = regs->gr[25];
+ case 1:
+ args[0] = regs->gr[26];
+ break;
+ default:
+ BUG();
+ }
+}
+
+#endif /*_ASM_PARISC_SYSCALL_H_*/
diff --git a/arch/parisc/include/asm/system.h b/arch/parisc/include/asm/system.h
new file mode 100644
index 00000000..b19e63a8
--- /dev/null
+++ b/arch/parisc/include/asm/system.h
@@ -0,0 +1,165 @@
+#ifndef __PARISC_SYSTEM_H
+#define __PARISC_SYSTEM_H
+
+#include <linux/irqflags.h>
+
+/* The program status word as bitfields. */
+struct pa_psw {
+ unsigned int y:1;
+ unsigned int z:1;
+ unsigned int rv:2;
+ unsigned int w:1;
+ unsigned int e:1;
+ unsigned int s:1;
+ unsigned int t:1;
+
+ unsigned int h:1;
+ unsigned int l:1;
+ unsigned int n:1;
+ unsigned int x:1;
+ unsigned int b:1;
+ unsigned int c:1;
+ unsigned int v:1;
+ unsigned int m:1;
+
+ unsigned int cb:8;
+
+ unsigned int o:1;
+ unsigned int g:1;
+ unsigned int f:1;
+ unsigned int r:1;
+ unsigned int q:1;
+ unsigned int p:1;
+ unsigned int d:1;
+ unsigned int i:1;
+};
+
+#ifdef CONFIG_64BIT
+#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4))
+#else
+#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW))
+#endif
+
+struct task_struct;
+
+extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *);
+
+#define switch_to(prev, next, last) do { \
+ (last) = _switch_to(prev, next); \
+} while(0)
+
+#define mfctl(reg) ({ \
+ unsigned long cr; \
+ __asm__ __volatile__( \
+ "mfctl " #reg ",%0" : \
+ "=r" (cr) \
+ ); \
+ cr; \
+})
+
+#define mtctl(gr, cr) \
+ __asm__ __volatile__("mtctl %0,%1" \
+ : /* no outputs */ \
+ : "r" (gr), "i" (cr) : "memory")
+
+/* these are here to de-mystefy the calling code, and to provide hooks */
+/* which I needed for debugging EIEM problems -PB */
+#define get_eiem() mfctl(15)
+static inline void set_eiem(unsigned long val)
+{
+ mtctl(val, 15);
+}
+
+#define mfsp(reg) ({ \
+ unsigned long cr; \
+ __asm__ __volatile__( \
+ "mfsp " #reg ",%0" : \
+ "=r" (cr) \
+ ); \
+ cr; \
+})
+
+#define mtsp(gr, cr) \
+ __asm__ __volatile__("mtsp %0,%1" \
+ : /* no outputs */ \
+ : "r" (gr), "i" (cr) : "memory")
+
+
+/*
+** This is simply the barrier() macro from linux/kernel.h but when serial.c
+** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h
+** hasn't yet been included yet so it fails, thus repeating the macro here.
+**
+** PA-RISC architecture allows for weakly ordered memory accesses although
+** none of the processors use it. There is a strong ordered bit that is
+** set in the O-bit of the page directory entry. Operating systems that
+** can not tolerate out of order accesses should set this bit when mapping
+** pages. The O-bit of the PSW should also be set to 1 (I don't believe any
+** of the processor implemented the PSW O-bit). The PCX-W ERS states that
+** the TLB O-bit is not implemented so the page directory does not need to
+** have the O-bit set when mapping pages (section 3.1). This section also
+** states that the PSW Y, Z, G, and O bits are not implemented.
+** So it looks like nothing needs to be done for parisc-linux (yet).
+** (thanks to chada for the above comment -ggg)
+**
+** The __asm__ op below simple prevents gcc/ld from reordering
+** instructions across the mb() "call".
+*/
+#define mb() __asm__ __volatile__("":::"memory") /* barrier() */
+#define rmb() mb()
+#define wmb() mb()
+#define smp_mb() mb()
+#define smp_rmb() mb()
+#define smp_wmb() mb()
+#define smp_read_barrier_depends() do { } while(0)
+#define read_barrier_depends() do { } while(0)
+
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+
+#ifndef CONFIG_PA20
+/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
+ and GCC only guarantees 8-byte alignment for stack locals, we can't
+ be assured of 16-byte alignment for atomic lock data even if we
+ specify "__attribute ((aligned(16)))" in the type declaration. So,
+ we use a struct containing an array of four ints for the atomic lock
+ type and dynamically select the 16-byte aligned int from the array
+ for the semaphore. */
+
+#define __PA_LDCW_ALIGNMENT 16
+#define __ldcw_align(a) ({ \
+ unsigned long __ret = (unsigned long) &(a)->lock[0]; \
+ __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
+ & ~(__PA_LDCW_ALIGNMENT - 1); \
+ (volatile unsigned int *) __ret; \
+})
+#define __LDCW "ldcw"
+
+#else /*CONFIG_PA20*/
+/* From: "Jim Hull" <jim.hull of hp.com>
+ I've attached a summary of the change, but basically, for PA 2.0, as
+ long as the ",CO" (coherent operation) completer is specified, then the
+ 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
+ they only require "natural" alignment (4-byte for ldcw, 8-byte for
+ ldcd). */
+
+#define __PA_LDCW_ALIGNMENT 4
+#define __ldcw_align(a) (&(a)->slock)
+#define __LDCW "ldcw,co"
+
+#endif /*!CONFIG_PA20*/
+
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
+#define __ldcw(a) ({ \
+ unsigned __ret; \
+ __asm__ __volatile__(__LDCW " 0(%2),%0" \
+ : "=r" (__ret), "+m" (*(a)) : "r" (a)); \
+ __ret; \
+})
+
+#ifdef CONFIG_SMP
+# define __lock_aligned __attribute__((__section__(".data..lock_aligned")))
+#endif
+
+#define arch_align_stack(x) (x)
+
+#endif
diff --git a/arch/parisc/include/asm/termbits.h b/arch/parisc/include/asm/termbits.h
new file mode 100644
index 00000000..d1ab9217
--- /dev/null
+++ b/arch/parisc/include/asm/termbits.h
@@ -0,0 +1,201 @@
+#ifndef __ARCH_PARISC_TERMBITS_H__
+#define __ARCH_PARISC_TERMBITS_H__
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+struct termios2 {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+struct ktermios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+
+/* c_iflag bits */
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0040000
+#define IUTF8 0100000
+
+/* c_oflag bits */
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define BOTHER 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
+#define CIBAUD 002003600000 /* input baud rate */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CRTSCTS 020000000000 /* flow control */
+
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+
+/* c_lflag bits */
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+#define EXTPROC 0200000
+
+/* tcflow() and TCXONC use these */
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif
diff --git a/arch/parisc/include/asm/termios.h b/arch/parisc/include/asm/termios.h
new file mode 100644
index 00000000..a2a57a45
--- /dev/null
+++ b/arch/parisc/include/asm/termios.h
@@ -0,0 +1,90 @@
+#ifndef _PARISC_TERMIOS_H
+#define _PARISC_TERMIOS_H
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+#ifdef __KERNEL__
+
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
+ unsigned short __tmp; \
+ get_user(__tmp,&(termio)->x); \
+ *(unsigned short *) &(termios)->x = __tmp; \
+}
+
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+ copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+ put_user((termios)->c_iflag, &(termio)->c_iflag); \
+ put_user((termios)->c_oflag, &(termio)->c_oflag); \
+ put_user((termios)->c_cflag, &(termio)->c_cflag); \
+ put_user((termios)->c_lflag, &(termio)->c_lflag); \
+ put_user((termios)->c_line, &(termio)->c_line); \
+ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
+#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif /* __KERNEL__ */
+
+#endif /* _PARISC_TERMIOS_H */
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
new file mode 100644
index 00000000..aa8de727
--- /dev/null
+++ b/arch/parisc/include/asm/thread_info.h
@@ -0,0 +1,82 @@
+#ifndef _ASM_PARISC_THREAD_INFO_H
+#define _ASM_PARISC_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain;/* execution domain */
+ unsigned long flags; /* thread_info flags (see TIF_*) */
+ mm_segment_t addr_limit; /* user-level address space limit */
+ __u32 cpu; /* current CPU */
+ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
+ struct restart_block restart_block;
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .addr_limit = KERNEL_DS, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall \
+ } \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/* how to get the thread information struct from C */
+#define current_thread_info() ((struct thread_info *)mfctl(30))
+
+#endif /* !__ASSEMBLY */
+
+/* thread information allocation */
+
+#define THREAD_SIZE_ORDER 2
+/* Be sure to hunt all references to this down when you change the size of
+ * the kernel stack */
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
+
+#define PREEMPT_ACTIVE_BIT 28
+#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
+
+/*
+ * thread information flags
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_32BIT 4 /* 32 bit binary */
+#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
+#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
+#define TIF_FREEZE 7 /* is freezing for suspend */
+#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
+#define TIF_SINGLESTEP 9 /* single stepping? */
+#define TIF_BLOCKSTEP 10 /* branch stepping? */
+
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+#define _TIF_32BIT (1 << TIF_32BIT)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+#define _TIF_FREEZE (1 << TIF_FREEZE)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
+
+#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
+ _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_PARISC_THREAD_INFO_H */
diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h
new file mode 100644
index 00000000..3b68d772
--- /dev/null
+++ b/arch/parisc/include/asm/timex.h
@@ -0,0 +1,20 @@
+/*
+ * linux/include/asm-parisc/timex.h
+ *
+ * PARISC architecture timex specifications
+ */
+#ifndef _ASMPARISC_TIMEX_H
+#define _ASMPARISC_TIMEX_H
+
+#include <asm/system.h>
+
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+
+typedef unsigned long cycles_t;
+
+static inline cycles_t get_cycles (void)
+{
+ return mfctl(16);
+}
+
+#endif
diff --git a/arch/parisc/include/asm/tlb.h b/arch/parisc/include/asm/tlb.h
new file mode 100644
index 00000000..07924903
--- /dev/null
+++ b/arch/parisc/include/asm/tlb.h
@@ -0,0 +1,27 @@
+#ifndef _PARISC_TLB_H
+#define _PARISC_TLB_H
+
+#define tlb_flush(tlb) \
+do { if ((tlb)->fullmm) \
+ flush_tlb_mm((tlb)->mm);\
+} while (0)
+
+#define tlb_start_vma(tlb, vma) \
+do { if (!(tlb)->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+} while (0)
+
+#define tlb_end_vma(tlb, vma) \
+do { if (!(tlb)->fullmm) \
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
+} while (0)
+
+#define __tlb_remove_tlb_entry(tlb, pte, address) \
+ do { } while (0)
+
+#include <asm-generic/tlb.h>
+
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
+
+#endif
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
new file mode 100644
index 00000000..8f1a8100
--- /dev/null
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -0,0 +1,83 @@
+#ifndef _PARISC_TLBFLUSH_H
+#define _PARISC_TLBFLUSH_H
+
+/* TLB flushing routines.... */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/mmu_context.h>
+
+
+/* This is for the serialisation of PxTLB broadcasts. At least on the
+ * N class systems, only one PxTLB inter processor broadcast can be
+ * active at any one time on the Merced bus. This tlb purge
+ * synchronisation is fairly lightweight and harmless so we activate
+ * it on all systems not just the N class.
+ */
+extern spinlock_t pa_tlb_lock;
+
+#define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags)
+#define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags)
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_all_local(void *);
+
+/*
+ * flush_tlb_mm()
+ *
+ * XXX This code is NOT valid for HP-UX compatibility processes,
+ * (although it will probably work 99% of the time). HP-UX
+ * processes are free to play with the space id's and save them
+ * over long periods of time, etc. so we have to preserve the
+ * space and just flush the entire tlb. We need to check the
+ * personality in order to do that, but the personality is not
+ * currently being set correctly.
+ *
+ * Of course, Linux processes could do the same thing, but
+ * we don't support that (and the compilers, dynamic linker,
+ * etc. do not do that).
+ */
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ BUG_ON(mm == &init_mm); /* Should never happen */
+
+#if 1 || defined(CONFIG_SMP)
+ flush_tlb_all();
+#else
+ /* FIXME: currently broken, causing space id and protection ids
+ * to go out of sync, resulting in faults on userspace accesses.
+ */
+ if (mm) {
+ if (mm->context != 0)
+ free_sid(mm->context);
+ mm->context = alloc_sid();
+ if (mm == current->active_mm)
+ load_context(mm->context);
+ }
+#endif
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ unsigned long flags;
+
+ /* For one page, it's not worth testing the split_tlb variable */
+
+ mb();
+ mtsp(vma->vm_mm->context,1);
+ purge_tlb_start(flags);
+ pdtlb(addr);
+ pitlb(addr);
+ purge_tlb_end(flags);
+}
+
+void __flush_tlb_range(unsigned long sid,
+ unsigned long start, unsigned long end);
+
+#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
+
+#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
+
+#endif
diff --git a/arch/parisc/include/asm/topology.h b/arch/parisc/include/asm/topology.h
new file mode 100644
index 00000000..d8133eb0
--- /dev/null
+++ b/arch/parisc/include/asm/topology.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_PARISC_TOPOLOGY_H
+#define _ASM_PARISC_TOPOLOGY_H
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_PARISC_TOPOLOGY_H */
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
new file mode 100644
index 00000000..1945f995
--- /dev/null
+++ b/arch/parisc/include/asm/traps.h
@@ -0,0 +1,16 @@
+#ifndef __ASM_TRAPS_H
+#define __ASM_TRAPS_H
+
+#ifdef __KERNEL__
+struct pt_regs;
+
+/* traps.c */
+void parisc_terminate(char *msg, struct pt_regs *regs,
+ int code, unsigned long offset);
+
+/* mm/fault.c */
+void do_page_fault(struct pt_regs *regs, unsigned long code,
+ unsigned long address);
+#endif
+
+#endif
diff --git a/arch/parisc/include/asm/types.h b/arch/parisc/include/asm/types.h
new file mode 100644
index 00000000..80e415c9
--- /dev/null
+++ b/arch/parisc/include/asm/types.h
@@ -0,0 +1,12 @@
+#ifndef _PARISC_TYPES_H
+#define _PARISC_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned short umode_t;
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
new file mode 100644
index 00000000..ff4cf9da
--- /dev/null
+++ b/arch/parisc/include/asm/uaccess.h
@@ -0,0 +1,270 @@
+#ifndef __PARISC_UACCESS_H
+#define __PARISC_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <asm/page.h>
+#include <asm/system.h>
+#include <asm/cache.h>
+#include <asm/errno.h>
+#include <asm-generic/uaccess-unaligned.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+#define KERNEL_DS ((mm_segment_t){0})
+#define USER_DS ((mm_segment_t){1})
+
+#define segment_eq(a,b) ((a).seg == (b).seg)
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+/*
+ * Note that since kernel addresses are in a separate address space on
+ * parisc, we don't need to do anything for access_ok().
+ * We just let the page fault handler do the right thing. This also means
+ * that put_user is the same as __put_user, etc.
+ */
+
+extern int __get_kernel_bad(void);
+extern int __get_user_bad(void);
+extern int __put_kernel_bad(void);
+extern int __put_user_bad(void);
+
+static inline long access_ok(int type, const void __user * addr,
+ unsigned long size)
+{
+ return 1;
+}
+
+#define put_user __put_user
+#define get_user __get_user
+
+#if !defined(CONFIG_64BIT)
+#define LDD_KERNEL(ptr) __get_kernel_bad();
+#define LDD_USER(ptr) __get_user_bad();
+#define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr)
+#define STD_USER(x, ptr) __put_user_asm64(x,ptr)
+#define ASM_WORD_INSN ".word\t"
+#else
+#define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr)
+#define LDD_USER(ptr) __get_user_asm("ldd",ptr)
+#define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr)
+#define STD_USER(x, ptr) __put_user_asm("std",x,ptr)
+#define ASM_WORD_INSN ".dword\t"
+#endif
+
+/*
+ * The exception table contains two values: the first is an address
+ * for an instruction that is allowed to fault, and the second is
+ * the address to the fixup routine.
+ */
+
+struct exception_table_entry {
+ unsigned long insn; /* address of insn that is allowed to fault. */
+ long fixup; /* fixup routine */
+};
+
+#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
+ ".section __ex_table,\"aw\"\n" \
+ ASM_WORD_INSN #fault_addr ", " #except_addr "\n\t" \
+ ".previous\n"
+
+/*
+ * The page fault handler stores, in a per-cpu area, the following information
+ * if a fixup routine is available.
+ */
+struct exception_data {
+ unsigned long fault_ip;
+ unsigned long fault_space;
+ unsigned long fault_addr;
+};
+
+#define __get_user(x,ptr) \
+({ \
+ register long __gu_err __asm__ ("r8") = 0; \
+ register long __gu_val __asm__ ("r9") = 0; \
+ \
+ if (segment_eq(get_fs(),KERNEL_DS)) { \
+ switch (sizeof(*(ptr))) { \
+ case 1: __get_kernel_asm("ldb",ptr); break; \
+ case 2: __get_kernel_asm("ldh",ptr); break; \
+ case 4: __get_kernel_asm("ldw",ptr); break; \
+ case 8: LDD_KERNEL(ptr); break; \
+ default: __get_kernel_bad(); break; \
+ } \
+ } \
+ else { \
+ switch (sizeof(*(ptr))) { \
+ case 1: __get_user_asm("ldb",ptr); break; \
+ case 2: __get_user_asm("ldh",ptr); break; \
+ case 4: __get_user_asm("ldw",ptr); break; \
+ case 8: LDD_USER(ptr); break; \
+ default: __get_user_bad(); break; \
+ } \
+ } \
+ \
+ (x) = (__typeof__(*(ptr))) __gu_val; \
+ __gu_err; \
+})
+
+#define __get_kernel_asm(ldx,ptr) \
+ __asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
+ : "=r"(__gu_val), "=r"(__gu_err) \
+ : "r"(ptr), "1"(__gu_err) \
+ : "r1");
+
+#define __get_user_asm(ldx,ptr) \
+ __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_get_user_skip_1)\
+ : "=r"(__gu_val), "=r"(__gu_err) \
+ : "r"(ptr), "1"(__gu_err) \
+ : "r1");
+
+#define __put_user(x,ptr) \
+({ \
+ register long __pu_err __asm__ ("r8") = 0; \
+ __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
+ \
+ if (segment_eq(get_fs(),KERNEL_DS)) { \
+ switch (sizeof(*(ptr))) { \
+ case 1: __put_kernel_asm("stb",__x,ptr); break; \
+ case 2: __put_kernel_asm("sth",__x,ptr); break; \
+ case 4: __put_kernel_asm("stw",__x,ptr); break; \
+ case 8: STD_KERNEL(__x,ptr); break; \
+ default: __put_kernel_bad(); break; \
+ } \
+ } \
+ else { \
+ switch (sizeof(*(ptr))) { \
+ case 1: __put_user_asm("stb",__x,ptr); break; \
+ case 2: __put_user_asm("sth",__x,ptr); break; \
+ case 4: __put_user_asm("stw",__x,ptr); break; \
+ case 8: STD_USER(__x,ptr); break; \
+ default: __put_user_bad(); break; \
+ } \
+ } \
+ \
+ __pu_err; \
+})
+
+/*
+ * The "__put_user/kernel_asm()" macros tell gcc they read from memory
+ * instead of writing. This is because they do not write to any memory
+ * gcc knows about, so there are no aliasing issues. These macros must
+ * also be aware that "fixup_put_user_skip_[12]" are executed in the
+ * context of the fault, and any registers used there must be listed
+ * as clobbers. In this case only "r1" is used by the current routines.
+ * r8/r9 are already listed as err/val.
+ */
+
+#define __put_kernel_asm(stx,x,ptr) \
+ __asm__ __volatile__ ( \
+ "\n1:\t" stx "\t%2,0(%1)\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
+ : "=r"(__pu_err) \
+ : "r"(ptr), "r"(x), "0"(__pu_err) \
+ : "r1")
+
+#define __put_user_asm(stx,x,ptr) \
+ __asm__ __volatile__ ( \
+ "\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
+ : "=r"(__pu_err) \
+ : "r"(ptr), "r"(x), "0"(__pu_err) \
+ : "r1")
+
+
+#if !defined(CONFIG_64BIT)
+
+#define __put_kernel_asm64(__val,ptr) do { \
+ u64 __val64 = (u64)(__val); \
+ u32 hi = (__val64) >> 32; \
+ u32 lo = (__val64) & 0xffffffff; \
+ __asm__ __volatile__ ( \
+ "\n1:\tstw %2,0(%1)" \
+ "\n2:\tstw %3,4(%1)\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
+ ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
+ : "=r"(__pu_err) \
+ : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
+ : "r1"); \
+} while (0)
+
+#define __put_user_asm64(__val,ptr) do { \
+ u64 __val64 = (u64)(__val); \
+ u32 hi = (__val64) >> 32; \
+ u32 lo = (__val64) & 0xffffffff; \
+ __asm__ __volatile__ ( \
+ "\n1:\tstw %2,0(%%sr3,%1)" \
+ "\n2:\tstw %3,4(%%sr3,%1)\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
+ ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
+ : "=r"(__pu_err) \
+ : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
+ : "r1"); \
+} while (0)
+
+#endif /* !defined(CONFIG_64BIT) */
+
+
+/*
+ * Complex access routines -- external declarations
+ */
+
+extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
+extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
+extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
+extern long lstrncpy_from_user(char *, const char __user *, long);
+extern unsigned lclear_user(void __user *,unsigned long);
+extern long lstrnlen_user(const char __user *,long);
+
+/*
+ * Complex access routines -- macros
+ */
+
+#define strncpy_from_user lstrncpy_from_user
+#define strnlen_user lstrnlen_user
+#define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
+#define clear_user lclear_user
+#define __clear_user lclear_user
+
+unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len);
+#define __copy_to_user copy_to_user
+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len);
+unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len);
+#define __copy_in_user copy_in_user
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
+extern void copy_from_user_overflow(void)
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+ __compiletime_error("copy_from_user() buffer size is not provably correct")
+#else
+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
+#endif
+;
+
+static inline unsigned long __must_check copy_from_user(void *to,
+ const void __user *from,
+ unsigned long n)
+{
+ int sz = __compiletime_object_size(to);
+ int ret = -EFAULT;
+
+ if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
+ ret = __copy_from_user(to, from, n);
+ else
+ copy_from_user_overflow();
+
+ return ret;
+}
+
+struct pt_regs;
+int fixup_exception(struct pt_regs *regs);
+
+#endif /* __PARISC_UACCESS_H */
diff --git a/arch/parisc/include/asm/ucontext.h b/arch/parisc/include/asm/ucontext.h
new file mode 100644
index 00000000..6c8883e4
--- /dev/null
+++ b/arch/parisc/include/asm/ucontext.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_PARISC_UCONTEXT_H
+#define _ASM_PARISC_UCONTEXT_H
+
+struct ucontext {
+ unsigned int uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif /* !_ASM_PARISC_UCONTEXT_H */
diff --git a/arch/parisc/include/asm/unaligned.h b/arch/parisc/include/asm/unaligned.h
new file mode 100644
index 00000000..dfc5d332
--- /dev/null
+++ b/arch/parisc/include/asm/unaligned.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_PARISC_UNALIGNED_H
+#define _ASM_PARISC_UNALIGNED_H
+
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
+#define get_unaligned __get_unaligned_be
+#define put_unaligned __put_unaligned_be
+
+#ifdef __KERNEL__
+struct pt_regs;
+void handle_unaligned(struct pt_regs *regs);
+int check_unaligned(struct pt_regs *regs);
+#endif
+
+#endif /* _ASM_PARISC_UNALIGNED_H */
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
new file mode 100644
index 00000000..d61de64f
--- /dev/null
+++ b/arch/parisc/include/asm/unistd.h
@@ -0,0 +1,1012 @@
+#ifndef _ASM_PARISC_UNISTD_H_
+#define _ASM_PARISC_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ */
+
+/*
+ * HP-UX system calls get their native numbers for binary compatibility.
+ */
+
+#define __NR_HPUX_exit 1
+#define __NR_HPUX_fork 2
+#define __NR_HPUX_read 3
+#define __NR_HPUX_write 4
+#define __NR_HPUX_open 5
+#define __NR_HPUX_close 6
+#define __NR_HPUX_wait 7
+#define __NR_HPUX_creat 8
+#define __NR_HPUX_link 9
+#define __NR_HPUX_unlink 10
+#define __NR_HPUX_execv 11
+#define __NR_HPUX_chdir 12
+#define __NR_HPUX_time 13
+#define __NR_HPUX_mknod 14
+#define __NR_HPUX_chmod 15
+#define __NR_HPUX_chown 16
+#define __NR_HPUX_break 17
+#define __NR_HPUX_lchmod 18
+#define __NR_HPUX_lseek 19
+#define __NR_HPUX_getpid 20
+#define __NR_HPUX_mount 21
+#define __NR_HPUX_umount 22
+#define __NR_HPUX_setuid 23
+#define __NR_HPUX_getuid 24
+#define __NR_HPUX_stime 25
+#define __NR_HPUX_ptrace 26
+#define __NR_HPUX_alarm 27
+#define __NR_HPUX_oldfstat 28
+#define __NR_HPUX_pause 29
+#define __NR_HPUX_utime 30
+#define __NR_HPUX_stty 31
+#define __NR_HPUX_gtty 32
+#define __NR_HPUX_access 33
+#define __NR_HPUX_nice 34
+#define __NR_HPUX_ftime 35
+#define __NR_HPUX_sync 36
+#define __NR_HPUX_kill 37
+#define __NR_HPUX_stat 38
+#define __NR_HPUX_setpgrp3 39
+#define __NR_HPUX_lstat 40
+#define __NR_HPUX_dup 41
+#define __NR_HPUX_pipe 42
+#define __NR_HPUX_times 43
+#define __NR_HPUX_profil 44
+#define __NR_HPUX_ki_call 45
+#define __NR_HPUX_setgid 46
+#define __NR_HPUX_getgid 47
+#define __NR_HPUX_sigsys 48
+#define __NR_HPUX_reserved1 49
+#define __NR_HPUX_reserved2 50
+#define __NR_HPUX_acct 51
+#define __NR_HPUX_set_userthreadid 52
+#define __NR_HPUX_oldlock 53
+#define __NR_HPUX_ioctl 54
+#define __NR_HPUX_reboot 55
+#define __NR_HPUX_symlink 56
+#define __NR_HPUX_utssys 57
+#define __NR_HPUX_readlink 58
+#define __NR_HPUX_execve 59
+#define __NR_HPUX_umask 60
+#define __NR_HPUX_chroot 61
+#define __NR_HPUX_fcntl 62
+#define __NR_HPUX_ulimit 63
+#define __NR_HPUX_getpagesize 64
+#define __NR_HPUX_mremap 65
+#define __NR_HPUX_vfork 66
+#define __NR_HPUX_vread 67
+#define __NR_HPUX_vwrite 68
+#define __NR_HPUX_sbrk 69
+#define __NR_HPUX_sstk 70
+#define __NR_HPUX_mmap 71
+#define __NR_HPUX_vadvise 72
+#define __NR_HPUX_munmap 73
+#define __NR_HPUX_mprotect 74
+#define __NR_HPUX_madvise 75
+#define __NR_HPUX_vhangup 76
+#define __NR_HPUX_swapoff 77
+#define __NR_HPUX_mincore 78
+#define __NR_HPUX_getgroups 79
+#define __NR_HPUX_setgroups 80
+#define __NR_HPUX_getpgrp2 81
+#define __NR_HPUX_setpgrp2 82
+#define __NR_HPUX_setitimer 83
+#define __NR_HPUX_wait3 84
+#define __NR_HPUX_swapon 85
+#define __NR_HPUX_getitimer 86
+#define __NR_HPUX_gethostname42 87
+#define __NR_HPUX_sethostname42 88
+#define __NR_HPUX_getdtablesize 89
+#define __NR_HPUX_dup2 90
+#define __NR_HPUX_getdopt 91
+#define __NR_HPUX_fstat 92
+#define __NR_HPUX_select 93
+#define __NR_HPUX_setdopt 94
+#define __NR_HPUX_fsync 95
+#define __NR_HPUX_setpriority 96
+#define __NR_HPUX_socket_old 97
+#define __NR_HPUX_connect_old 98
+#define __NR_HPUX_accept_old 99
+#define __NR_HPUX_getpriority 100
+#define __NR_HPUX_send_old 101
+#define __NR_HPUX_recv_old 102
+#define __NR_HPUX_socketaddr_old 103
+#define __NR_HPUX_bind_old 104
+#define __NR_HPUX_setsockopt_old 105
+#define __NR_HPUX_listen_old 106
+#define __NR_HPUX_vtimes_old 107
+#define __NR_HPUX_sigvector 108
+#define __NR_HPUX_sigblock 109
+#define __NR_HPUX_siggetmask 110
+#define __NR_HPUX_sigpause 111
+#define __NR_HPUX_sigstack 112
+#define __NR_HPUX_recvmsg_old 113
+#define __NR_HPUX_sendmsg_old 114
+#define __NR_HPUX_vtrace_old 115
+#define __NR_HPUX_gettimeofday 116
+#define __NR_HPUX_getrusage 117
+#define __NR_HPUX_getsockopt_old 118
+#define __NR_HPUX_resuba_old 119
+#define __NR_HPUX_readv 120
+#define __NR_HPUX_writev 121
+#define __NR_HPUX_settimeofday 122
+#define __NR_HPUX_fchown 123
+#define __NR_HPUX_fchmod 124
+#define __NR_HPUX_recvfrom_old 125
+#define __NR_HPUX_setresuid 126
+#define __NR_HPUX_setresgid 127
+#define __NR_HPUX_rename 128
+#define __NR_HPUX_truncate 129
+#define __NR_HPUX_ftruncate 130
+#define __NR_HPUX_flock_old 131
+#define __NR_HPUX_sysconf 132
+#define __NR_HPUX_sendto_old 133
+#define __NR_HPUX_shutdown_old 134
+#define __NR_HPUX_socketpair_old 135
+#define __NR_HPUX_mkdir 136
+#define __NR_HPUX_rmdir 137
+#define __NR_HPUX_utimes_old 138
+#define __NR_HPUX_sigcleanup_old 139
+#define __NR_HPUX_setcore 140
+#define __NR_HPUX_getpeername_old 141
+#define __NR_HPUX_gethostid 142
+#define __NR_HPUX_sethostid 143
+#define __NR_HPUX_getrlimit 144
+#define __NR_HPUX_setrlimit 145
+#define __NR_HPUX_killpg_old 146
+#define __NR_HPUX_cachectl 147
+#define __NR_HPUX_quotactl 148
+#define __NR_HPUX_get_sysinfo 149
+#define __NR_HPUX_getsockname_old 150
+#define __NR_HPUX_privgrp 151
+#define __NR_HPUX_rtprio 152
+#define __NR_HPUX_plock 153
+#define __NR_HPUX_reserved3 154
+#define __NR_HPUX_lockf 155
+#define __NR_HPUX_semget 156
+#define __NR_HPUX_osemctl 157
+#define __NR_HPUX_semop 158
+#define __NR_HPUX_msgget 159
+#define __NR_HPUX_omsgctl 160
+#define __NR_HPUX_msgsnd 161
+#define __NR_HPUX_msgrecv 162
+#define __NR_HPUX_shmget 163
+#define __NR_HPUX_oshmctl 164
+#define __NR_HPUX_shmat 165
+#define __NR_HPUX_shmdt 166
+#define __NR_HPUX_m68020_advise 167
+/* [168,189] are for Discless/DUX */
+#define __NR_HPUX_csp 168
+#define __NR_HPUX_cluster 169
+#define __NR_HPUX_mkrnod 170
+#define __NR_HPUX_test 171
+#define __NR_HPUX_unsp_open 172
+#define __NR_HPUX_reserved4 173
+#define __NR_HPUX_getcontext_old 174
+#define __NR_HPUX_osetcontext 175
+#define __NR_HPUX_bigio 176
+#define __NR_HPUX_pipenode 177
+#define __NR_HPUX_lsync 178
+#define __NR_HPUX_getmachineid 179
+#define __NR_HPUX_cnodeid 180
+#define __NR_HPUX_cnodes 181
+#define __NR_HPUX_swapclients 182
+#define __NR_HPUX_rmt_process 183
+#define __NR_HPUX_dskless_stats 184
+#define __NR_HPUX_sigprocmask 185
+#define __NR_HPUX_sigpending 186
+#define __NR_HPUX_sigsuspend 187
+#define __NR_HPUX_sigaction 188
+#define __NR_HPUX_reserved5 189
+#define __NR_HPUX_nfssvc 190
+#define __NR_HPUX_getfh 191
+#define __NR_HPUX_getdomainname 192
+#define __NR_HPUX_setdomainname 193
+#define __NR_HPUX_async_daemon 194
+#define __NR_HPUX_getdirentries 195
+#define __NR_HPUX_statfs 196
+#define __NR_HPUX_fstatfs 197
+#define __NR_HPUX_vfsmount 198
+#define __NR_HPUX_reserved6 199
+#define __NR_HPUX_waitpid 200
+/* 201 - 223 missing */
+#define __NR_HPUX_sigsetreturn 224
+#define __NR_HPUX_sigsetstatemask 225
+/* 226 missing */
+#define __NR_HPUX_cs 227
+#define __NR_HPUX_cds 228
+#define __NR_HPUX_set_no_trunc 229
+#define __NR_HPUX_pathconf 230
+#define __NR_HPUX_fpathconf 231
+/* 232, 233 missing */
+#define __NR_HPUX_nfs_fcntl 234
+#define __NR_HPUX_ogetacl 235
+#define __NR_HPUX_ofgetacl 236
+#define __NR_HPUX_osetacl 237
+#define __NR_HPUX_ofsetacl 238
+#define __NR_HPUX_pstat 239
+#define __NR_HPUX_getaudid 240
+#define __NR_HPUX_setaudid 241
+#define __NR_HPUX_getaudproc 242
+#define __NR_HPUX_setaudproc 243
+#define __NR_HPUX_getevent 244
+#define __NR_HPUX_setevent 245
+#define __NR_HPUX_audwrite 246
+#define __NR_HPUX_audswitch 247
+#define __NR_HPUX_audctl 248
+#define __NR_HPUX_ogetaccess 249
+#define __NR_HPUX_fsctl 250
+/* 251 - 258 missing */
+#define __NR_HPUX_swapfs 259
+#define __NR_HPUX_fss 260
+/* 261 - 266 missing */
+#define __NR_HPUX_tsync 267
+#define __NR_HPUX_getnumfds 268
+#define __NR_HPUX_poll 269
+#define __NR_HPUX_getmsg 270
+#define __NR_HPUX_putmsg 271
+#define __NR_HPUX_fchdir 272
+#define __NR_HPUX_getmount_cnt 273
+#define __NR_HPUX_getmount_entry 274
+#define __NR_HPUX_accept 275
+#define __NR_HPUX_bind 276
+#define __NR_HPUX_connect 277
+#define __NR_HPUX_getpeername 278
+#define __NR_HPUX_getsockname 279
+#define __NR_HPUX_getsockopt 280
+#define __NR_HPUX_listen 281
+#define __NR_HPUX_recv 282
+#define __NR_HPUX_recvfrom 283
+#define __NR_HPUX_recvmsg 284
+#define __NR_HPUX_send 285
+#define __NR_HPUX_sendmsg 286
+#define __NR_HPUX_sendto 287
+#define __NR_HPUX_setsockopt 288
+#define __NR_HPUX_shutdown 289
+#define __NR_HPUX_socket 290
+#define __NR_HPUX_socketpair 291
+#define __NR_HPUX_proc_open 292
+#define __NR_HPUX_proc_close 293
+#define __NR_HPUX_proc_send 294
+#define __NR_HPUX_proc_recv 295
+#define __NR_HPUX_proc_sendrecv 296
+#define __NR_HPUX_proc_syscall 297
+/* 298 - 311 missing */
+#define __NR_HPUX_semctl 312
+#define __NR_HPUX_msgctl 313
+#define __NR_HPUX_shmctl 314
+#define __NR_HPUX_mpctl 315
+#define __NR_HPUX_exportfs 316
+#define __NR_HPUX_getpmsg 317
+#define __NR_HPUX_putpmsg 318
+/* 319 missing */
+#define __NR_HPUX_msync 320
+#define __NR_HPUX_msleep 321
+#define __NR_HPUX_mwakeup 322
+#define __NR_HPUX_msem_init 323
+#define __NR_HPUX_msem_remove 324
+#define __NR_HPUX_adjtime 325
+#define __NR_HPUX_kload 326
+#define __NR_HPUX_fattach 327
+#define __NR_HPUX_fdetach 328
+#define __NR_HPUX_serialize 329
+#define __NR_HPUX_statvfs 330
+#define __NR_HPUX_fstatvfs 331
+#define __NR_HPUX_lchown 332
+#define __NR_HPUX_getsid 333
+#define __NR_HPUX_sysfs 334
+/* 335, 336 missing */
+#define __NR_HPUX_sched_setparam 337
+#define __NR_HPUX_sched_getparam 338
+#define __NR_HPUX_sched_setscheduler 339
+#define __NR_HPUX_sched_getscheduler 340
+#define __NR_HPUX_sched_yield 341
+#define __NR_HPUX_sched_get_priority_max 342
+#define __NR_HPUX_sched_get_priority_min 343
+#define __NR_HPUX_sched_rr_get_interval 344
+#define __NR_HPUX_clock_settime 345
+#define __NR_HPUX_clock_gettime 346
+#define __NR_HPUX_clock_getres 347
+#define __NR_HPUX_timer_create 348
+#define __NR_HPUX_timer_delete 349
+#define __NR_HPUX_timer_settime 350
+#define __NR_HPUX_timer_gettime 351
+#define __NR_HPUX_timer_getoverrun 352
+#define __NR_HPUX_nanosleep 353
+#define __NR_HPUX_toolbox 354
+/* 355 missing */
+#define __NR_HPUX_getdents 356
+#define __NR_HPUX_getcontext 357
+#define __NR_HPUX_sysinfo 358
+#define __NR_HPUX_fcntl64 359
+#define __NR_HPUX_ftruncate64 360
+#define __NR_HPUX_fstat64 361
+#define __NR_HPUX_getdirentries64 362
+#define __NR_HPUX_getrlimit64 363
+#define __NR_HPUX_lockf64 364
+#define __NR_HPUX_lseek64 365
+#define __NR_HPUX_lstat64 366
+#define __NR_HPUX_mmap64 367
+#define __NR_HPUX_setrlimit64 368
+#define __NR_HPUX_stat64 369
+#define __NR_HPUX_truncate64 370
+#define __NR_HPUX_ulimit64 371
+#define __NR_HPUX_pread 372
+#define __NR_HPUX_preadv 373
+#define __NR_HPUX_pwrite 374
+#define __NR_HPUX_pwritev 375
+#define __NR_HPUX_pread64 376
+#define __NR_HPUX_preadv64 377
+#define __NR_HPUX_pwrite64 378
+#define __NR_HPUX_pwritev64 379
+#define __NR_HPUX_setcontext 380
+#define __NR_HPUX_sigaltstack 381
+#define __NR_HPUX_waitid 382
+#define __NR_HPUX_setpgrp 383
+#define __NR_HPUX_recvmsg2 384
+#define __NR_HPUX_sendmsg2 385
+#define __NR_HPUX_socket2 386
+#define __NR_HPUX_socketpair2 387
+#define __NR_HPUX_setregid 388
+#define __NR_HPUX_lwp_create 389
+#define __NR_HPUX_lwp_terminate 390
+#define __NR_HPUX_lwp_wait 391
+#define __NR_HPUX_lwp_suspend 392
+#define __NR_HPUX_lwp_resume 393
+/* 394 missing */
+#define __NR_HPUX_lwp_abort_syscall 395
+#define __NR_HPUX_lwp_info 396
+#define __NR_HPUX_lwp_kill 397
+#define __NR_HPUX_ksleep 398
+#define __NR_HPUX_kwakeup 399
+/* 400 missing */
+#define __NR_HPUX_pstat_getlwp 401
+#define __NR_HPUX_lwp_exit 402
+#define __NR_HPUX_lwp_continue 403
+#define __NR_HPUX_getacl 404
+#define __NR_HPUX_fgetacl 405
+#define __NR_HPUX_setacl 406
+#define __NR_HPUX_fsetacl 407
+#define __NR_HPUX_getaccess 408
+#define __NR_HPUX_lwp_mutex_init 409
+#define __NR_HPUX_lwp_mutex_lock_sys 410
+#define __NR_HPUX_lwp_mutex_unlock 411
+#define __NR_HPUX_lwp_cond_init 412
+#define __NR_HPUX_lwp_cond_signal 413
+#define __NR_HPUX_lwp_cond_broadcast 414
+#define __NR_HPUX_lwp_cond_wait_sys 415
+#define __NR_HPUX_lwp_getscheduler 416
+#define __NR_HPUX_lwp_setscheduler 417
+#define __NR_HPUX_lwp_getstate 418
+#define __NR_HPUX_lwp_setstate 419
+#define __NR_HPUX_lwp_detach 420
+#define __NR_HPUX_mlock 421
+#define __NR_HPUX_munlock 422
+#define __NR_HPUX_mlockall 423
+#define __NR_HPUX_munlockall 424
+#define __NR_HPUX_shm_open 425
+#define __NR_HPUX_shm_unlink 426
+#define __NR_HPUX_sigqueue 427
+#define __NR_HPUX_sigwaitinfo 428
+#define __NR_HPUX_sigtimedwait 429
+#define __NR_HPUX_sigwait 430
+#define __NR_HPUX_aio_read 431
+#define __NR_HPUX_aio_write 432
+#define __NR_HPUX_lio_listio 433
+#define __NR_HPUX_aio_error 434
+#define __NR_HPUX_aio_return 435
+#define __NR_HPUX_aio_cancel 436
+#define __NR_HPUX_aio_suspend 437
+#define __NR_HPUX_aio_fsync 438
+#define __NR_HPUX_mq_open 439
+#define __NR_HPUX_mq_close 440
+#define __NR_HPUX_mq_unlink 441
+#define __NR_HPUX_mq_send 442
+#define __NR_HPUX_mq_receive 443
+#define __NR_HPUX_mq_notify 444
+#define __NR_HPUX_mq_setattr 445
+#define __NR_HPUX_mq_getattr 446
+#define __NR_HPUX_ksem_open 447
+#define __NR_HPUX_ksem_unlink 448
+#define __NR_HPUX_ksem_close 449
+#define __NR_HPUX_ksem_post 450
+#define __NR_HPUX_ksem_wait 451
+#define __NR_HPUX_ksem_read 452
+#define __NR_HPUX_ksem_trywait 453
+#define __NR_HPUX_lwp_rwlock_init 454
+#define __NR_HPUX_lwp_rwlock_destroy 455
+#define __NR_HPUX_lwp_rwlock_rdlock_sys 456
+#define __NR_HPUX_lwp_rwlock_wrlock_sys 457
+#define __NR_HPUX_lwp_rwlock_tryrdlock 458
+#define __NR_HPUX_lwp_rwlock_trywrlock 459
+#define __NR_HPUX_lwp_rwlock_unlock 460
+#define __NR_HPUX_ttrace 461
+#define __NR_HPUX_ttrace_wait 462
+#define __NR_HPUX_lf_wire_mem 463
+#define __NR_HPUX_lf_unwire_mem 464
+#define __NR_HPUX_lf_send_pin_map 465
+#define __NR_HPUX_lf_free_buf 466
+#define __NR_HPUX_lf_wait_nq 467
+#define __NR_HPUX_lf_wakeup_conn_q 468
+#define __NR_HPUX_lf_unused 469
+#define __NR_HPUX_lwp_sema_init 470
+#define __NR_HPUX_lwp_sema_post 471
+#define __NR_HPUX_lwp_sema_wait 472
+#define __NR_HPUX_lwp_sema_trywait 473
+#define __NR_HPUX_lwp_sema_destroy 474
+#define __NR_HPUX_statvfs64 475
+#define __NR_HPUX_fstatvfs64 476
+#define __NR_HPUX_msh_register 477
+#define __NR_HPUX_ptrace64 478
+#define __NR_HPUX_sendfile 479
+#define __NR_HPUX_sendpath 480
+#define __NR_HPUX_sendfile64 481
+#define __NR_HPUX_sendpath64 482
+#define __NR_HPUX_modload 483
+#define __NR_HPUX_moduload 484
+#define __NR_HPUX_modpath 485
+#define __NR_HPUX_getksym 486
+#define __NR_HPUX_modadm 487
+#define __NR_HPUX_modstat 488
+#define __NR_HPUX_lwp_detached_exit 489
+#define __NR_HPUX_crashconf 490
+#define __NR_HPUX_siginhibit 491
+#define __NR_HPUX_sigenable 492
+#define __NR_HPUX_spuctl 493
+#define __NR_HPUX_zerokernelsum 494
+#define __NR_HPUX_nfs_kstat 495
+#define __NR_HPUX_aio_read64 496
+#define __NR_HPUX_aio_write64 497
+#define __NR_HPUX_aio_error64 498
+#define __NR_HPUX_aio_return64 499
+#define __NR_HPUX_aio_cancel64 500
+#define __NR_HPUX_aio_suspend64 501
+#define __NR_HPUX_aio_fsync64 502
+#define __NR_HPUX_lio_listio64 503
+#define __NR_HPUX_recv2 504
+#define __NR_HPUX_recvfrom2 505
+#define __NR_HPUX_send2 506
+#define __NR_HPUX_sendto2 507
+#define __NR_HPUX_acl 508
+#define __NR_HPUX___cnx_p2p_ctl 509
+#define __NR_HPUX___cnx_gsched_ctl 510
+#define __NR_HPUX___cnx_pmon_ctl 511
+
+#define __NR_HPUX_syscalls 512
+
+/*
+ * Linux system call numbers.
+ *
+ * Cary Coutant says that we should just use another syscall gateway
+ * page to avoid clashing with the HPUX space, and I think he's right:
+ * it will would keep a branch out of our syscall entry path, at the
+ * very least. If we decide to change it later, we can ``just'' tweak
+ * the LINUX_GATEWAY_ADDR define at the bottom and make __NR_Linux be
+ * 1024 or something. Oh, and recompile libc. =)
+ *
+ * 64-bit HPUX binaries get the syscall gateway address passed in a register
+ * from the kernel at startup, which seems a sane strategy.
+ */
+
+#define __NR_Linux 0
+#define __NR_restart_syscall (__NR_Linux + 0)
+#define __NR_exit (__NR_Linux + 1)
+#define __NR_fork (__NR_Linux + 2)
+#define __NR_read (__NR_Linux + 3)
+#define __NR_write (__NR_Linux + 4)
+#define __NR_open (__NR_Linux + 5)
+#define __NR_close (__NR_Linux + 6)
+#define __NR_waitpid (__NR_Linux + 7)
+#define __NR_creat (__NR_Linux + 8)
+#define __NR_link (__NR_Linux + 9)
+#define __NR_unlink (__NR_Linux + 10)
+#define __NR_execve (__NR_Linux + 11)
+#define __NR_chdir (__NR_Linux + 12)
+#define __NR_time (__NR_Linux + 13)
+#define __NR_mknod (__NR_Linux + 14)
+#define __NR_chmod (__NR_Linux + 15)
+#define __NR_lchown (__NR_Linux + 16)
+#define __NR_socket (__NR_Linux + 17)
+#define __NR_stat (__NR_Linux + 18)
+#define __NR_lseek (__NR_Linux + 19)
+#define __NR_getpid (__NR_Linux + 20)
+#define __NR_mount (__NR_Linux + 21)
+#define __NR_bind (__NR_Linux + 22)
+#define __NR_setuid (__NR_Linux + 23)
+#define __NR_getuid (__NR_Linux + 24)
+#define __NR_stime (__NR_Linux + 25)
+#define __NR_ptrace (__NR_Linux + 26)
+#define __NR_alarm (__NR_Linux + 27)
+#define __NR_fstat (__NR_Linux + 28)
+#define __NR_pause (__NR_Linux + 29)
+#define __NR_utime (__NR_Linux + 30)
+#define __NR_connect (__NR_Linux + 31)
+#define __NR_listen (__NR_Linux + 32)
+#define __NR_access (__NR_Linux + 33)
+#define __NR_nice (__NR_Linux + 34)
+#define __NR_accept (__NR_Linux + 35)
+#define __NR_sync (__NR_Linux + 36)
+#define __NR_kill (__NR_Linux + 37)
+#define __NR_rename (__NR_Linux + 38)
+#define __NR_mkdir (__NR_Linux + 39)
+#define __NR_rmdir (__NR_Linux + 40)
+#define __NR_dup (__NR_Linux + 41)
+#define __NR_pipe (__NR_Linux + 42)
+#define __NR_times (__NR_Linux + 43)
+#define __NR_getsockname (__NR_Linux + 44)
+#define __NR_brk (__NR_Linux + 45)
+#define __NR_setgid (__NR_Linux + 46)
+#define __NR_getgid (__NR_Linux + 47)
+#define __NR_signal (__NR_Linux + 48)
+#define __NR_geteuid (__NR_Linux + 49)
+#define __NR_getegid (__NR_Linux + 50)
+#define __NR_acct (__NR_Linux + 51)
+#define __NR_umount2 (__NR_Linux + 52)
+#define __NR_getpeername (__NR_Linux + 53)
+#define __NR_ioctl (__NR_Linux + 54)
+#define __NR_fcntl (__NR_Linux + 55)
+#define __NR_socketpair (__NR_Linux + 56)
+#define __NR_setpgid (__NR_Linux + 57)
+#define __NR_send (__NR_Linux + 58)
+#define __NR_uname (__NR_Linux + 59)
+#define __NR_umask (__NR_Linux + 60)
+#define __NR_chroot (__NR_Linux + 61)
+#define __NR_ustat (__NR_Linux + 62)
+#define __NR_dup2 (__NR_Linux + 63)
+#define __NR_getppid (__NR_Linux + 64)
+#define __NR_getpgrp (__NR_Linux + 65)
+#define __NR_setsid (__NR_Linux + 66)
+#define __NR_pivot_root (__NR_Linux + 67)
+#define __NR_sgetmask (__NR_Linux + 68)
+#define __NR_ssetmask (__NR_Linux + 69)
+#define __NR_setreuid (__NR_Linux + 70)
+#define __NR_setregid (__NR_Linux + 71)
+#define __NR_mincore (__NR_Linux + 72)
+#define __NR_sigpending (__NR_Linux + 73)
+#define __NR_sethostname (__NR_Linux + 74)
+#define __NR_setrlimit (__NR_Linux + 75)
+#define __NR_getrlimit (__NR_Linux + 76)
+#define __NR_getrusage (__NR_Linux + 77)
+#define __NR_gettimeofday (__NR_Linux + 78)
+#define __NR_settimeofday (__NR_Linux + 79)
+#define __NR_getgroups (__NR_Linux + 80)
+#define __NR_setgroups (__NR_Linux + 81)
+#define __NR_sendto (__NR_Linux + 82)
+#define __NR_symlink (__NR_Linux + 83)
+#define __NR_lstat (__NR_Linux + 84)
+#define __NR_readlink (__NR_Linux + 85)
+#define __NR_uselib (__NR_Linux + 86)
+#define __NR_swapon (__NR_Linux + 87)
+#define __NR_reboot (__NR_Linux + 88)
+#define __NR_mmap2 (__NR_Linux + 89)
+#define __NR_mmap (__NR_Linux + 90)
+#define __NR_munmap (__NR_Linux + 91)
+#define __NR_truncate (__NR_Linux + 92)
+#define __NR_ftruncate (__NR_Linux + 93)
+#define __NR_fchmod (__NR_Linux + 94)
+#define __NR_fchown (__NR_Linux + 95)
+#define __NR_getpriority (__NR_Linux + 96)
+#define __NR_setpriority (__NR_Linux + 97)
+#define __NR_recv (__NR_Linux + 98)
+#define __NR_statfs (__NR_Linux + 99)
+#define __NR_fstatfs (__NR_Linux + 100)
+#define __NR_stat64 (__NR_Linux + 101)
+/* #define __NR_socketcall (__NR_Linux + 102) */
+#define __NR_syslog (__NR_Linux + 103)
+#define __NR_setitimer (__NR_Linux + 104)
+#define __NR_getitimer (__NR_Linux + 105)
+#define __NR_capget (__NR_Linux + 106)
+#define __NR_capset (__NR_Linux + 107)
+#define __NR_pread64 (__NR_Linux + 108)
+#define __NR_pwrite64 (__NR_Linux + 109)
+#define __NR_getcwd (__NR_Linux + 110)
+#define __NR_vhangup (__NR_Linux + 111)
+#define __NR_fstat64 (__NR_Linux + 112)
+#define __NR_vfork (__NR_Linux + 113)
+#define __NR_wait4 (__NR_Linux + 114)
+#define __NR_swapoff (__NR_Linux + 115)
+#define __NR_sysinfo (__NR_Linux + 116)
+#define __NR_shutdown (__NR_Linux + 117)
+#define __NR_fsync (__NR_Linux + 118)
+#define __NR_madvise (__NR_Linux + 119)
+#define __NR_clone (__NR_Linux + 120)
+#define __NR_setdomainname (__NR_Linux + 121)
+#define __NR_sendfile (__NR_Linux + 122)
+#define __NR_recvfrom (__NR_Linux + 123)
+#define __NR_adjtimex (__NR_Linux + 124)
+#define __NR_mprotect (__NR_Linux + 125)
+#define __NR_sigprocmask (__NR_Linux + 126)
+#define __NR_create_module (__NR_Linux + 127)
+#define __NR_init_module (__NR_Linux + 128)
+#define __NR_delete_module (__NR_Linux + 129)
+#define __NR_get_kernel_syms (__NR_Linux + 130)
+#define __NR_quotactl (__NR_Linux + 131)
+#define __NR_getpgid (__NR_Linux + 132)
+#define __NR_fchdir (__NR_Linux + 133)
+#define __NR_bdflush (__NR_Linux + 134)
+#define __NR_sysfs (__NR_Linux + 135)
+#define __NR_personality (__NR_Linux + 136)
+#define __NR_afs_syscall (__NR_Linux + 137) /* Syscall for Andrew File System */
+#define __NR_setfsuid (__NR_Linux + 138)
+#define __NR_setfsgid (__NR_Linux + 139)
+#define __NR__llseek (__NR_Linux + 140)
+#define __NR_getdents (__NR_Linux + 141)
+#define __NR__newselect (__NR_Linux + 142)
+#define __NR_flock (__NR_Linux + 143)
+#define __NR_msync (__NR_Linux + 144)
+#define __NR_readv (__NR_Linux + 145)
+#define __NR_writev (__NR_Linux + 146)
+#define __NR_getsid (__NR_Linux + 147)
+#define __NR_fdatasync (__NR_Linux + 148)
+#define __NR__sysctl (__NR_Linux + 149)
+#define __NR_mlock (__NR_Linux + 150)
+#define __NR_munlock (__NR_Linux + 151)
+#define __NR_mlockall (__NR_Linux + 152)
+#define __NR_munlockall (__NR_Linux + 153)
+#define __NR_sched_setparam (__NR_Linux + 154)
+#define __NR_sched_getparam (__NR_Linux + 155)
+#define __NR_sched_setscheduler (__NR_Linux + 156)
+#define __NR_sched_getscheduler (__NR_Linux + 157)
+#define __NR_sched_yield (__NR_Linux + 158)
+#define __NR_sched_get_priority_max (__NR_Linux + 159)
+#define __NR_sched_get_priority_min (__NR_Linux + 160)
+#define __NR_sched_rr_get_interval (__NR_Linux + 161)
+#define __NR_nanosleep (__NR_Linux + 162)
+#define __NR_mremap (__NR_Linux + 163)
+#define __NR_setresuid (__NR_Linux + 164)
+#define __NR_getresuid (__NR_Linux + 165)
+#define __NR_sigaltstack (__NR_Linux + 166)
+#define __NR_query_module (__NR_Linux + 167)
+#define __NR_poll (__NR_Linux + 168)
+#define __NR_nfsservctl (__NR_Linux + 169)
+#define __NR_setresgid (__NR_Linux + 170)
+#define __NR_getresgid (__NR_Linux + 171)
+#define __NR_prctl (__NR_Linux + 172)
+#define __NR_rt_sigreturn (__NR_Linux + 173)
+#define __NR_rt_sigaction (__NR_Linux + 174)
+#define __NR_rt_sigprocmask (__NR_Linux + 175)
+#define __NR_rt_sigpending (__NR_Linux + 176)
+#define __NR_rt_sigtimedwait (__NR_Linux + 177)
+#define __NR_rt_sigqueueinfo (__NR_Linux + 178)
+#define __NR_rt_sigsuspend (__NR_Linux + 179)
+#define __NR_chown (__NR_Linux + 180)
+#define __NR_setsockopt (__NR_Linux + 181)
+#define __NR_getsockopt (__NR_Linux + 182)
+#define __NR_sendmsg (__NR_Linux + 183)
+#define __NR_recvmsg (__NR_Linux + 184)
+#define __NR_semop (__NR_Linux + 185)
+#define __NR_semget (__NR_Linux + 186)
+#define __NR_semctl (__NR_Linux + 187)
+#define __NR_msgsnd (__NR_Linux + 188)
+#define __NR_msgrcv (__NR_Linux + 189)
+#define __NR_msgget (__NR_Linux + 190)
+#define __NR_msgctl (__NR_Linux + 191)
+#define __NR_shmat (__NR_Linux + 192)
+#define __NR_shmdt (__NR_Linux + 193)
+#define __NR_shmget (__NR_Linux + 194)
+#define __NR_shmctl (__NR_Linux + 195)
+
+#define __NR_getpmsg (__NR_Linux + 196) /* Somebody *wants* streams? */
+#define __NR_putpmsg (__NR_Linux + 197)
+
+#define __NR_lstat64 (__NR_Linux + 198)
+#define __NR_truncate64 (__NR_Linux + 199)
+#define __NR_ftruncate64 (__NR_Linux + 200)
+#define __NR_getdents64 (__NR_Linux + 201)
+#define __NR_fcntl64 (__NR_Linux + 202)
+#define __NR_attrctl (__NR_Linux + 203)
+#define __NR_acl_get (__NR_Linux + 204)
+#define __NR_acl_set (__NR_Linux + 205)
+#define __NR_gettid (__NR_Linux + 206)
+#define __NR_readahead (__NR_Linux + 207)
+#define __NR_tkill (__NR_Linux + 208)
+#define __NR_sendfile64 (__NR_Linux + 209)
+#define __NR_futex (__NR_Linux + 210)
+#define __NR_sched_setaffinity (__NR_Linux + 211)
+#define __NR_sched_getaffinity (__NR_Linux + 212)
+#define __NR_set_thread_area (__NR_Linux + 213)
+#define __NR_get_thread_area (__NR_Linux + 214)
+#define __NR_io_setup (__NR_Linux + 215)
+#define __NR_io_destroy (__NR_Linux + 216)
+#define __NR_io_getevents (__NR_Linux + 217)
+#define __NR_io_submit (__NR_Linux + 218)
+#define __NR_io_cancel (__NR_Linux + 219)
+#define __NR_alloc_hugepages (__NR_Linux + 220)
+#define __NR_free_hugepages (__NR_Linux + 221)
+#define __NR_exit_group (__NR_Linux + 222)
+#define __NR_lookup_dcookie (__NR_Linux + 223)
+#define __NR_epoll_create (__NR_Linux + 224)
+#define __NR_epoll_ctl (__NR_Linux + 225)
+#define __NR_epoll_wait (__NR_Linux + 226)
+#define __NR_remap_file_pages (__NR_Linux + 227)
+#define __NR_semtimedop (__NR_Linux + 228)
+#define __NR_mq_open (__NR_Linux + 229)
+#define __NR_mq_unlink (__NR_Linux + 230)
+#define __NR_mq_timedsend (__NR_Linux + 231)
+#define __NR_mq_timedreceive (__NR_Linux + 232)
+#define __NR_mq_notify (__NR_Linux + 233)
+#define __NR_mq_getsetattr (__NR_Linux + 234)
+#define __NR_waitid (__NR_Linux + 235)
+#define __NR_fadvise64_64 (__NR_Linux + 236)
+#define __NR_set_tid_address (__NR_Linux + 237)
+#define __NR_setxattr (__NR_Linux + 238)
+#define __NR_lsetxattr (__NR_Linux + 239)
+#define __NR_fsetxattr (__NR_Linux + 240)
+#define __NR_getxattr (__NR_Linux + 241)
+#define __NR_lgetxattr (__NR_Linux + 242)
+#define __NR_fgetxattr (__NR_Linux + 243)
+#define __NR_listxattr (__NR_Linux + 244)
+#define __NR_llistxattr (__NR_Linux + 245)
+#define __NR_flistxattr (__NR_Linux + 246)
+#define __NR_removexattr (__NR_Linux + 247)
+#define __NR_lremovexattr (__NR_Linux + 248)
+#define __NR_fremovexattr (__NR_Linux + 249)
+#define __NR_timer_create (__NR_Linux + 250)
+#define __NR_timer_settime (__NR_Linux + 251)
+#define __NR_timer_gettime (__NR_Linux + 252)
+#define __NR_timer_getoverrun (__NR_Linux + 253)
+#define __NR_timer_delete (__NR_Linux + 254)
+#define __NR_clock_settime (__NR_Linux + 255)
+#define __NR_clock_gettime (__NR_Linux + 256)
+#define __NR_clock_getres (__NR_Linux + 257)
+#define __NR_clock_nanosleep (__NR_Linux + 258)
+#define __NR_tgkill (__NR_Linux + 259)
+#define __NR_mbind (__NR_Linux + 260)
+#define __NR_get_mempolicy (__NR_Linux + 261)
+#define __NR_set_mempolicy (__NR_Linux + 262)
+#define __NR_vserver (__NR_Linux + 263)
+#define __NR_add_key (__NR_Linux + 264)
+#define __NR_request_key (__NR_Linux + 265)
+#define __NR_keyctl (__NR_Linux + 266)
+#define __NR_ioprio_set (__NR_Linux + 267)
+#define __NR_ioprio_get (__NR_Linux + 268)
+#define __NR_inotify_init (__NR_Linux + 269)
+#define __NR_inotify_add_watch (__NR_Linux + 270)
+#define __NR_inotify_rm_watch (__NR_Linux + 271)
+#define __NR_migrate_pages (__NR_Linux + 272)
+#define __NR_pselect6 (__NR_Linux + 273)
+#define __NR_ppoll (__NR_Linux + 274)
+#define __NR_openat (__NR_Linux + 275)
+#define __NR_mkdirat (__NR_Linux + 276)
+#define __NR_mknodat (__NR_Linux + 277)
+#define __NR_fchownat (__NR_Linux + 278)
+#define __NR_futimesat (__NR_Linux + 279)
+#define __NR_fstatat64 (__NR_Linux + 280)
+#define __NR_unlinkat (__NR_Linux + 281)
+#define __NR_renameat (__NR_Linux + 282)
+#define __NR_linkat (__NR_Linux + 283)
+#define __NR_symlinkat (__NR_Linux + 284)
+#define __NR_readlinkat (__NR_Linux + 285)
+#define __NR_fchmodat (__NR_Linux + 286)
+#define __NR_faccessat (__NR_Linux + 287)
+#define __NR_unshare (__NR_Linux + 288)
+#define __NR_set_robust_list (__NR_Linux + 289)
+#define __NR_get_robust_list (__NR_Linux + 290)
+#define __NR_splice (__NR_Linux + 291)
+#define __NR_sync_file_range (__NR_Linux + 292)
+#define __NR_tee (__NR_Linux + 293)
+#define __NR_vmsplice (__NR_Linux + 294)
+#define __NR_move_pages (__NR_Linux + 295)
+#define __NR_getcpu (__NR_Linux + 296)
+#define __NR_epoll_pwait (__NR_Linux + 297)
+#define __NR_statfs64 (__NR_Linux + 298)
+#define __NR_fstatfs64 (__NR_Linux + 299)
+#define __NR_kexec_load (__NR_Linux + 300)
+#define __NR_utimensat (__NR_Linux + 301)
+#define __NR_signalfd (__NR_Linux + 302)
+#define __NR_timerfd (__NR_Linux + 303)
+#define __NR_eventfd (__NR_Linux + 304)
+#define __NR_fallocate (__NR_Linux + 305)
+#define __NR_timerfd_create (__NR_Linux + 306)
+#define __NR_timerfd_settime (__NR_Linux + 307)
+#define __NR_timerfd_gettime (__NR_Linux + 308)
+#define __NR_signalfd4 (__NR_Linux + 309)
+#define __NR_eventfd2 (__NR_Linux + 310)
+#define __NR_epoll_create1 (__NR_Linux + 311)
+#define __NR_dup3 (__NR_Linux + 312)
+#define __NR_pipe2 (__NR_Linux + 313)
+#define __NR_inotify_init1 (__NR_Linux + 314)
+#define __NR_preadv (__NR_Linux + 315)
+#define __NR_pwritev (__NR_Linux + 316)
+#define __NR_rt_tgsigqueueinfo (__NR_Linux + 317)
+#define __NR_perf_event_open (__NR_Linux + 318)
+#define __NR_recvmmsg (__NR_Linux + 319)
+#define __NR_accept4 (__NR_Linux + 320)
+#define __NR_prlimit64 (__NR_Linux + 321)
+#define __NR_fanotify_init (__NR_Linux + 322)
+#define __NR_fanotify_mark (__NR_Linux + 323)
+#define __NR_clock_adjtime (__NR_Linux + 324)
+#define __NR_name_to_handle_at (__NR_Linux + 325)
+#define __NR_open_by_handle_at (__NR_Linux + 326)
+#define __NR_syncfs (__NR_Linux + 327)
+#define __NR_setns (__NR_Linux + 328)
+#define __NR_sendmmsg (__NR_Linux + 329)
+
+#define __NR_Linux_syscalls (__NR_sendmmsg + 1)
+
+
+#define __IGNORE_select /* newselect */
+#define __IGNORE_fadvise64 /* fadvise64_64 */
+#define __IGNORE_utimes /* utime */
+
+
+#define HPUX_GATEWAY_ADDR 0xC0000004
+#define LINUX_GATEWAY_ADDR 0x100
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#define SYS_ify(syscall_name) __NR_##syscall_name
+
+#ifndef ASM_LINE_SEP
+# define ASM_LINE_SEP ;
+#endif
+
+/* Definition taken from glibc 2.3.3
+ * sysdeps/unix/sysv/linux/hppa/sysdep.h
+ */
+
+#ifdef PIC
+/* WARNING: CANNOT BE USED IN A NOP! */
+# define K_STW_ASM_PIC " copy %%r19, %%r4\n"
+# define K_LDW_ASM_PIC " copy %%r4, %%r19\n"
+# define K_USING_GR4 "%r4",
+#else
+# define K_STW_ASM_PIC " \n"
+# define K_LDW_ASM_PIC " \n"
+# define K_USING_GR4
+#endif
+
+/* GCC has to be warned that a syscall may clobber all the ABI
+ registers listed as "caller-saves", see page 8, Table 2
+ in section 2.2.6 of the PA-RISC RUN-TIME architecture
+ document. However! r28 is the result and will conflict with
+ the clobber list so it is left out. Also the input arguments
+ registers r20 -> r26 will conflict with the list so they
+ are treated specially. Although r19 is clobbered by the syscall
+ we cannot say this because it would violate ABI, thus we say
+ r4 is clobbered and use that register to save/restore r19
+ across the syscall. */
+
+#define K_CALL_CLOB_REGS "%r1", "%r2", K_USING_GR4 \
+ "%r20", "%r29", "%r31"
+
+#undef K_INLINE_SYSCALL
+#define K_INLINE_SYSCALL(name, nr, args...) ({ \
+ long __sys_res; \
+ { \
+ register unsigned long __res __asm__("r28"); \
+ K_LOAD_ARGS_##nr(args) \
+ /* FIXME: HACK stw/ldw r19 around syscall */ \
+ __asm__ volatile( \
+ K_STW_ASM_PIC \
+ " ble 0x100(%%sr2, %%r0)\n" \
+ " ldi %1, %%r20\n" \
+ K_LDW_ASM_PIC \
+ : "=r" (__res) \
+ : "i" (SYS_ify(name)) K_ASM_ARGS_##nr \
+ : "memory", K_CALL_CLOB_REGS K_CLOB_ARGS_##nr \
+ ); \
+ __sys_res = (long)__res; \
+ } \
+ if ( (unsigned long)__sys_res >= (unsigned long)-4095 ){ \
+ errno = -__sys_res; \
+ __sys_res = -1; \
+ } \
+ __sys_res; \
+})
+
+#define K_LOAD_ARGS_0()
+#define K_LOAD_ARGS_1(r26) \
+ register unsigned long __r26 __asm__("r26") = (unsigned long)(r26); \
+ K_LOAD_ARGS_0()
+#define K_LOAD_ARGS_2(r26,r25) \
+ register unsigned long __r25 __asm__("r25") = (unsigned long)(r25); \
+ K_LOAD_ARGS_1(r26)
+#define K_LOAD_ARGS_3(r26,r25,r24) \
+ register unsigned long __r24 __asm__("r24") = (unsigned long)(r24); \
+ K_LOAD_ARGS_2(r26,r25)
+#define K_LOAD_ARGS_4(r26,r25,r24,r23) \
+ register unsigned long __r23 __asm__("r23") = (unsigned long)(r23); \
+ K_LOAD_ARGS_3(r26,r25,r24)
+#define K_LOAD_ARGS_5(r26,r25,r24,r23,r22) \
+ register unsigned long __r22 __asm__("r22") = (unsigned long)(r22); \
+ K_LOAD_ARGS_4(r26,r25,r24,r23)
+#define K_LOAD_ARGS_6(r26,r25,r24,r23,r22,r21) \
+ register unsigned long __r21 __asm__("r21") = (unsigned long)(r21); \
+ K_LOAD_ARGS_5(r26,r25,r24,r23,r22)
+
+/* Even with zero args we use r20 for the syscall number */
+#define K_ASM_ARGS_0
+#define K_ASM_ARGS_1 K_ASM_ARGS_0, "r" (__r26)
+#define K_ASM_ARGS_2 K_ASM_ARGS_1, "r" (__r25)
+#define K_ASM_ARGS_3 K_ASM_ARGS_2, "r" (__r24)
+#define K_ASM_ARGS_4 K_ASM_ARGS_3, "r" (__r23)
+#define K_ASM_ARGS_5 K_ASM_ARGS_4, "r" (__r22)
+#define K_ASM_ARGS_6 K_ASM_ARGS_5, "r" (__r21)
+
+/* The registers not listed as inputs but clobbered */
+#define K_CLOB_ARGS_6
+#define K_CLOB_ARGS_5 K_CLOB_ARGS_6, "%r21"
+#define K_CLOB_ARGS_4 K_CLOB_ARGS_5, "%r22"
+#define K_CLOB_ARGS_3 K_CLOB_ARGS_4, "%r23"
+#define K_CLOB_ARGS_2 K_CLOB_ARGS_3, "%r24"
+#define K_CLOB_ARGS_1 K_CLOB_ARGS_2, "%r25"
+#define K_CLOB_ARGS_0 K_CLOB_ARGS_1, "%r26"
+
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+ return K_INLINE_SYSCALL(name, 0); \
+}
+
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+ return K_INLINE_SYSCALL(name, 1, arg1); \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1, type2 arg2) \
+{ \
+ return K_INLINE_SYSCALL(name, 2, arg1, arg2); \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1, type2 arg2, type3 arg3) \
+{ \
+ return K_INLINE_SYSCALL(name, 3, arg1, arg2, arg3); \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+ return K_INLINE_SYSCALL(name, 4, arg1, arg2, arg3, arg4); \
+}
+
+/* select takes 5 arguments */
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+ return K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5); \
+}
+
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_SGETMASK
+#define __ARCH_WANT_SYS_SIGNAL
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_COMPAT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
+#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
+
+#endif /* __ASSEMBLY__ */
+
+#undef STR
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_PARISC_UNISTD_H_ */
diff --git a/arch/parisc/include/asm/unwind.h b/arch/parisc/include/asm/unwind.h
new file mode 100644
index 00000000..52482e4f
--- /dev/null
+++ b/arch/parisc/include/asm/unwind.h
@@ -0,0 +1,79 @@
+#ifndef _UNWIND_H_
+#define _UNWIND_H_
+
+#include <linux/list.h>
+
+/* From ABI specifications */
+struct unwind_table_entry {
+ unsigned int region_start;
+ unsigned int region_end;
+ unsigned int Cannot_unwind:1; /* 0 */
+ unsigned int Millicode:1; /* 1 */
+ unsigned int Millicode_save_sr0:1; /* 2 */
+ unsigned int Region_description:2; /* 3..4 */
+ unsigned int reserved1:1; /* 5 */
+ unsigned int Entry_SR:1; /* 6 */
+ unsigned int Entry_FR:4; /* number saved *//* 7..10 */
+ unsigned int Entry_GR:5; /* number saved *//* 11..15 */
+ unsigned int Args_stored:1; /* 16 */
+ unsigned int Variable_Frame:1; /* 17 */
+ unsigned int Separate_Package_Body:1; /* 18 */
+ unsigned int Frame_Extension_Millicode:1; /* 19 */
+ unsigned int Stack_Overflow_Check:1; /* 20 */
+ unsigned int Two_Instruction_SP_Increment:1; /* 21 */
+ unsigned int Ada_Region:1; /* 22 */
+ unsigned int cxx_info:1; /* 23 */
+ unsigned int cxx_try_catch:1; /* 24 */
+ unsigned int sched_entry_seq:1; /* 25 */
+ unsigned int reserved2:1; /* 26 */
+ unsigned int Save_SP:1; /* 27 */
+ unsigned int Save_RP:1; /* 28 */
+ unsigned int Save_MRP_in_frame:1; /* 29 */
+ unsigned int extn_ptr_defined:1; /* 30 */
+ unsigned int Cleanup_defined:1; /* 31 */
+
+ unsigned int MPE_XL_interrupt_marker:1; /* 0 */
+ unsigned int HP_UX_interrupt_marker:1; /* 1 */
+ unsigned int Large_frame:1; /* 2 */
+ unsigned int Pseudo_SP_Set:1; /* 3 */
+ unsigned int reserved4:1; /* 4 */
+ unsigned int Total_frame_size:27; /* 5..31 */
+};
+
+struct unwind_table {
+ struct list_head list;
+ const char *name;
+ unsigned long gp;
+ unsigned long base_addr;
+ unsigned long start;
+ unsigned long end;
+ const struct unwind_table_entry *table;
+ unsigned long length;
+};
+
+struct unwind_frame_info {
+ struct task_struct *t;
+ /* Eventually we would like to be able to get at any of the registers
+ available; but for now we only try to get the sp and ip for each
+ frame */
+ /* struct pt_regs regs; */
+ unsigned long sp, ip, rp, r31;
+ unsigned long prev_sp, prev_ip;
+};
+
+struct unwind_table *
+unwind_table_add(const char *name, unsigned long base_addr,
+ unsigned long gp, void *start, void *end);
+void
+unwind_table_remove(struct unwind_table *table);
+
+void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
+ struct pt_regs *regs);
+void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t);
+void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs);
+int unwind_once(struct unwind_frame_info *info);
+int unwind_to_user(struct unwind_frame_info *info);
+
+int unwind_init(void);
+
+#endif
diff --git a/arch/parisc/include/asm/user.h b/arch/parisc/include/asm/user.h
new file mode 100644
index 00000000..80224753
--- /dev/null
+++ b/arch/parisc/include/asm/user.h
@@ -0,0 +1,5 @@
+/* This file should not exist, but lots of generic code still includes
+ it. It's a hangover from old a.out days and the traditional core
+ dump format. We are ELF-only, and so are our core dumps. If we
+ need to support HP/UX core format then we'll do it here
+ eventually. */
diff --git a/arch/parisc/include/asm/vga.h b/arch/parisc/include/asm/vga.h
new file mode 100644
index 00000000..171399a8
--- /dev/null
+++ b/arch/parisc/include/asm/vga.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_PARISC_VGA_H__
+#define __ASM_PARISC_VGA_H__
+
+/* nothing */
+
+#endif /* __ASM_PARISC_VGA_H__ */
diff --git a/arch/parisc/include/asm/xor.h b/arch/parisc/include/asm/xor.h
new file mode 100644
index 00000000..c82eb12a
--- /dev/null
+++ b/arch/parisc/include/asm/xor.h
@@ -0,0 +1 @@
+#include <asm-generic/xor.h>
diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh
new file mode 100644
index 00000000..e593fc8d
--- /dev/null
+++ b/arch/parisc/install.sh
@@ -0,0 +1,38 @@
+#!/bin/sh
+#
+# arch/parisc/install.sh, derived from arch/i386/boot/install.sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+#
+# "make install" script for i386 architecture
+#
+# Arguments:
+# $1 - kernel version
+# $2 - kernel image file
+# $3 - kernel map file
+# $4 - default install path (blank if root directory)
+#
+
+# User may have a custom install script
+
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+
+# Default install
+
+if [ -f $4/vmlinux ]; then
+ mv $4/vmlinux $4/vmlinux.old
+fi
+
+if [ -f $4/System.map ]; then
+ mv $4/System.map $4/System.old
+fi
+
+cat $2 > $4/vmlinux
+cp $3 $4/System.map
diff --git a/arch/parisc/kernel/.gitignore b/arch/parisc/kernel/.gitignore
new file mode 100644
index 00000000..c5f676c3
--- /dev/null
+++ b/arch/parisc/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
new file mode 100644
index 00000000..67db0722
--- /dev/null
+++ b/arch/parisc/kernel/Makefile
@@ -0,0 +1,35 @@
+#
+# Makefile for arch/parisc/kernel
+#
+
+extra-y := init_task.o head.o vmlinux.lds
+
+obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
+ pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
+ ptrace.o hardware.o inventory.o drivers.o \
+ signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
+ process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
+ topology.o
+
+ifdef CONFIG_FUNCTION_TRACER
+# Do not profile debug and lowlevel utilities
+CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_cache.o = -pg
+CFLAGS_REMOVE_irq.o = -pg
+CFLAGS_REMOVE_pacache.o = -pg
+CFLAGS_REMOVE_perf.o = -pg
+CFLAGS_REMOVE_traps.o = -pg
+CFLAGS_REMOVE_unaligned.o = -pg
+CFLAGS_REMOVE_unwind.o = -pg
+endif
+
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_PA11) += pci-dma.o
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o
+obj-$(CONFIG_STACKTRACE)+= stacktrace.o
+# only supported for PCX-W/U in 64-bit mode at the moment
+obj-$(CONFIG_64BIT) += perf.o perf_asm.o
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
new file mode 100644
index 00000000..dcd55103
--- /dev/null
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -0,0 +1,301 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ *
+ * Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
+ * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
+ * Copyright (C) 2000 Sam Creasey <sammy@sammy.net>
+ * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
+ * Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2001 Richard Hirst <rhirst at parisc-linux.org>
+ * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/kbuild.h>
+
+#include <asm/pgtable.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/pdc.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_64BIT
+#define FRAME_SIZE 128
+#else
+#define FRAME_SIZE 64
+#endif
+#define FRAME_ALIGN 64
+
+/* Add FRAME_SIZE to the size x and align it to y. All definitions
+ * that use align_frame will include space for a frame.
+ */
+#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
+
+int main(void)
+{
+ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
+ DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+ DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+ DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
+ DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+ DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+ DEFINE(TASK_PERSONALITY, offsetof(struct task_struct, personality));
+ DEFINE(TASK_PID, offsetof(struct task_struct, pid));
+ BLANK();
+ DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
+ DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
+ DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1]));
+ DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2]));
+ DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3]));
+ DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4]));
+ DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5]));
+ DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6]));
+ DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7]));
+ DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8]));
+ DEFINE(TASK_PT_GR9, offsetof(struct task_struct, thread.regs.gr[ 9]));
+ DEFINE(TASK_PT_GR10, offsetof(struct task_struct, thread.regs.gr[10]));
+ DEFINE(TASK_PT_GR11, offsetof(struct task_struct, thread.regs.gr[11]));
+ DEFINE(TASK_PT_GR12, offsetof(struct task_struct, thread.regs.gr[12]));
+ DEFINE(TASK_PT_GR13, offsetof(struct task_struct, thread.regs.gr[13]));
+ DEFINE(TASK_PT_GR14, offsetof(struct task_struct, thread.regs.gr[14]));
+ DEFINE(TASK_PT_GR15, offsetof(struct task_struct, thread.regs.gr[15]));
+ DEFINE(TASK_PT_GR16, offsetof(struct task_struct, thread.regs.gr[16]));
+ DEFINE(TASK_PT_GR17, offsetof(struct task_struct, thread.regs.gr[17]));
+ DEFINE(TASK_PT_GR18, offsetof(struct task_struct, thread.regs.gr[18]));
+ DEFINE(TASK_PT_GR19, offsetof(struct task_struct, thread.regs.gr[19]));
+ DEFINE(TASK_PT_GR20, offsetof(struct task_struct, thread.regs.gr[20]));
+ DEFINE(TASK_PT_GR21, offsetof(struct task_struct, thread.regs.gr[21]));
+ DEFINE(TASK_PT_GR22, offsetof(struct task_struct, thread.regs.gr[22]));
+ DEFINE(TASK_PT_GR23, offsetof(struct task_struct, thread.regs.gr[23]));
+ DEFINE(TASK_PT_GR24, offsetof(struct task_struct, thread.regs.gr[24]));
+ DEFINE(TASK_PT_GR25, offsetof(struct task_struct, thread.regs.gr[25]));
+ DEFINE(TASK_PT_GR26, offsetof(struct task_struct, thread.regs.gr[26]));
+ DEFINE(TASK_PT_GR27, offsetof(struct task_struct, thread.regs.gr[27]));
+ DEFINE(TASK_PT_GR28, offsetof(struct task_struct, thread.regs.gr[28]));
+ DEFINE(TASK_PT_GR29, offsetof(struct task_struct, thread.regs.gr[29]));
+ DEFINE(TASK_PT_GR30, offsetof(struct task_struct, thread.regs.gr[30]));
+ DEFINE(TASK_PT_GR31, offsetof(struct task_struct, thread.regs.gr[31]));
+ DEFINE(TASK_PT_FR0, offsetof(struct task_struct, thread.regs.fr[ 0]));
+ DEFINE(TASK_PT_FR1, offsetof(struct task_struct, thread.regs.fr[ 1]));
+ DEFINE(TASK_PT_FR2, offsetof(struct task_struct, thread.regs.fr[ 2]));
+ DEFINE(TASK_PT_FR3, offsetof(struct task_struct, thread.regs.fr[ 3]));
+ DEFINE(TASK_PT_FR4, offsetof(struct task_struct, thread.regs.fr[ 4]));
+ DEFINE(TASK_PT_FR5, offsetof(struct task_struct, thread.regs.fr[ 5]));
+ DEFINE(TASK_PT_FR6, offsetof(struct task_struct, thread.regs.fr[ 6]));
+ DEFINE(TASK_PT_FR7, offsetof(struct task_struct, thread.regs.fr[ 7]));
+ DEFINE(TASK_PT_FR8, offsetof(struct task_struct, thread.regs.fr[ 8]));
+ DEFINE(TASK_PT_FR9, offsetof(struct task_struct, thread.regs.fr[ 9]));
+ DEFINE(TASK_PT_FR10, offsetof(struct task_struct, thread.regs.fr[10]));
+ DEFINE(TASK_PT_FR11, offsetof(struct task_struct, thread.regs.fr[11]));
+ DEFINE(TASK_PT_FR12, offsetof(struct task_struct, thread.regs.fr[12]));
+ DEFINE(TASK_PT_FR13, offsetof(struct task_struct, thread.regs.fr[13]));
+ DEFINE(TASK_PT_FR14, offsetof(struct task_struct, thread.regs.fr[14]));
+ DEFINE(TASK_PT_FR15, offsetof(struct task_struct, thread.regs.fr[15]));
+ DEFINE(TASK_PT_FR16, offsetof(struct task_struct, thread.regs.fr[16]));
+ DEFINE(TASK_PT_FR17, offsetof(struct task_struct, thread.regs.fr[17]));
+ DEFINE(TASK_PT_FR18, offsetof(struct task_struct, thread.regs.fr[18]));
+ DEFINE(TASK_PT_FR19, offsetof(struct task_struct, thread.regs.fr[19]));
+ DEFINE(TASK_PT_FR20, offsetof(struct task_struct, thread.regs.fr[20]));
+ DEFINE(TASK_PT_FR21, offsetof(struct task_struct, thread.regs.fr[21]));
+ DEFINE(TASK_PT_FR22, offsetof(struct task_struct, thread.regs.fr[22]));
+ DEFINE(TASK_PT_FR23, offsetof(struct task_struct, thread.regs.fr[23]));
+ DEFINE(TASK_PT_FR24, offsetof(struct task_struct, thread.regs.fr[24]));
+ DEFINE(TASK_PT_FR25, offsetof(struct task_struct, thread.regs.fr[25]));
+ DEFINE(TASK_PT_FR26, offsetof(struct task_struct, thread.regs.fr[26]));
+ DEFINE(TASK_PT_FR27, offsetof(struct task_struct, thread.regs.fr[27]));
+ DEFINE(TASK_PT_FR28, offsetof(struct task_struct, thread.regs.fr[28]));
+ DEFINE(TASK_PT_FR29, offsetof(struct task_struct, thread.regs.fr[29]));
+ DEFINE(TASK_PT_FR30, offsetof(struct task_struct, thread.regs.fr[30]));
+ DEFINE(TASK_PT_FR31, offsetof(struct task_struct, thread.regs.fr[31]));
+ DEFINE(TASK_PT_SR0, offsetof(struct task_struct, thread.regs.sr[ 0]));
+ DEFINE(TASK_PT_SR1, offsetof(struct task_struct, thread.regs.sr[ 1]));
+ DEFINE(TASK_PT_SR2, offsetof(struct task_struct, thread.regs.sr[ 2]));
+ DEFINE(TASK_PT_SR3, offsetof(struct task_struct, thread.regs.sr[ 3]));
+ DEFINE(TASK_PT_SR4, offsetof(struct task_struct, thread.regs.sr[ 4]));
+ DEFINE(TASK_PT_SR5, offsetof(struct task_struct, thread.regs.sr[ 5]));
+ DEFINE(TASK_PT_SR6, offsetof(struct task_struct, thread.regs.sr[ 6]));
+ DEFINE(TASK_PT_SR7, offsetof(struct task_struct, thread.regs.sr[ 7]));
+ DEFINE(TASK_PT_IASQ0, offsetof(struct task_struct, thread.regs.iasq[0]));
+ DEFINE(TASK_PT_IASQ1, offsetof(struct task_struct, thread.regs.iasq[1]));
+ DEFINE(TASK_PT_IAOQ0, offsetof(struct task_struct, thread.regs.iaoq[0]));
+ DEFINE(TASK_PT_IAOQ1, offsetof(struct task_struct, thread.regs.iaoq[1]));
+ DEFINE(TASK_PT_CR27, offsetof(struct task_struct, thread.regs.cr27));
+ DEFINE(TASK_PT_ORIG_R28, offsetof(struct task_struct, thread.regs.orig_r28));
+ DEFINE(TASK_PT_KSP, offsetof(struct task_struct, thread.regs.ksp));
+ DEFINE(TASK_PT_KPC, offsetof(struct task_struct, thread.regs.kpc));
+ DEFINE(TASK_PT_SAR, offsetof(struct task_struct, thread.regs.sar));
+ DEFINE(TASK_PT_IIR, offsetof(struct task_struct, thread.regs.iir));
+ DEFINE(TASK_PT_ISR, offsetof(struct task_struct, thread.regs.isr));
+ DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
+ BLANK();
+ DEFINE(TASK_SZ, sizeof(struct task_struct));
+ /* TASK_SZ_ALGN includes space for a stack frame. */
+ DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN));
+ BLANK();
+ DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
+ DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
+ DEFINE(PT_GR2, offsetof(struct pt_regs, gr[ 2]));
+ DEFINE(PT_GR3, offsetof(struct pt_regs, gr[ 3]));
+ DEFINE(PT_GR4, offsetof(struct pt_regs, gr[ 4]));
+ DEFINE(PT_GR5, offsetof(struct pt_regs, gr[ 5]));
+ DEFINE(PT_GR6, offsetof(struct pt_regs, gr[ 6]));
+ DEFINE(PT_GR7, offsetof(struct pt_regs, gr[ 7]));
+ DEFINE(PT_GR8, offsetof(struct pt_regs, gr[ 8]));
+ DEFINE(PT_GR9, offsetof(struct pt_regs, gr[ 9]));
+ DEFINE(PT_GR10, offsetof(struct pt_regs, gr[10]));
+ DEFINE(PT_GR11, offsetof(struct pt_regs, gr[11]));
+ DEFINE(PT_GR12, offsetof(struct pt_regs, gr[12]));
+ DEFINE(PT_GR13, offsetof(struct pt_regs, gr[13]));
+ DEFINE(PT_GR14, offsetof(struct pt_regs, gr[14]));
+ DEFINE(PT_GR15, offsetof(struct pt_regs, gr[15]));
+ DEFINE(PT_GR16, offsetof(struct pt_regs, gr[16]));
+ DEFINE(PT_GR17, offsetof(struct pt_regs, gr[17]));
+ DEFINE(PT_GR18, offsetof(struct pt_regs, gr[18]));
+ DEFINE(PT_GR19, offsetof(struct pt_regs, gr[19]));
+ DEFINE(PT_GR20, offsetof(struct pt_regs, gr[20]));
+ DEFINE(PT_GR21, offsetof(struct pt_regs, gr[21]));
+ DEFINE(PT_GR22, offsetof(struct pt_regs, gr[22]));
+ DEFINE(PT_GR23, offsetof(struct pt_regs, gr[23]));
+ DEFINE(PT_GR24, offsetof(struct pt_regs, gr[24]));
+ DEFINE(PT_GR25, offsetof(struct pt_regs, gr[25]));
+ DEFINE(PT_GR26, offsetof(struct pt_regs, gr[26]));
+ DEFINE(PT_GR27, offsetof(struct pt_regs, gr[27]));
+ DEFINE(PT_GR28, offsetof(struct pt_regs, gr[28]));
+ DEFINE(PT_GR29, offsetof(struct pt_regs, gr[29]));
+ DEFINE(PT_GR30, offsetof(struct pt_regs, gr[30]));
+ DEFINE(PT_GR31, offsetof(struct pt_regs, gr[31]));
+ DEFINE(PT_FR0, offsetof(struct pt_regs, fr[ 0]));
+ DEFINE(PT_FR1, offsetof(struct pt_regs, fr[ 1]));
+ DEFINE(PT_FR2, offsetof(struct pt_regs, fr[ 2]));
+ DEFINE(PT_FR3, offsetof(struct pt_regs, fr[ 3]));
+ DEFINE(PT_FR4, offsetof(struct pt_regs, fr[ 4]));
+ DEFINE(PT_FR5, offsetof(struct pt_regs, fr[ 5]));
+ DEFINE(PT_FR6, offsetof(struct pt_regs, fr[ 6]));
+ DEFINE(PT_FR7, offsetof(struct pt_regs, fr[ 7]));
+ DEFINE(PT_FR8, offsetof(struct pt_regs, fr[ 8]));
+ DEFINE(PT_FR9, offsetof(struct pt_regs, fr[ 9]));
+ DEFINE(PT_FR10, offsetof(struct pt_regs, fr[10]));
+ DEFINE(PT_FR11, offsetof(struct pt_regs, fr[11]));
+ DEFINE(PT_FR12, offsetof(struct pt_regs, fr[12]));
+ DEFINE(PT_FR13, offsetof(struct pt_regs, fr[13]));
+ DEFINE(PT_FR14, offsetof(struct pt_regs, fr[14]));
+ DEFINE(PT_FR15, offsetof(struct pt_regs, fr[15]));
+ DEFINE(PT_FR16, offsetof(struct pt_regs, fr[16]));
+ DEFINE(PT_FR17, offsetof(struct pt_regs, fr[17]));
+ DEFINE(PT_FR18, offsetof(struct pt_regs, fr[18]));
+ DEFINE(PT_FR19, offsetof(struct pt_regs, fr[19]));
+ DEFINE(PT_FR20, offsetof(struct pt_regs, fr[20]));
+ DEFINE(PT_FR21, offsetof(struct pt_regs, fr[21]));
+ DEFINE(PT_FR22, offsetof(struct pt_regs, fr[22]));
+ DEFINE(PT_FR23, offsetof(struct pt_regs, fr[23]));
+ DEFINE(PT_FR24, offsetof(struct pt_regs, fr[24]));
+ DEFINE(PT_FR25, offsetof(struct pt_regs, fr[25]));
+ DEFINE(PT_FR26, offsetof(struct pt_regs, fr[26]));
+ DEFINE(PT_FR27, offsetof(struct pt_regs, fr[27]));
+ DEFINE(PT_FR28, offsetof(struct pt_regs, fr[28]));
+ DEFINE(PT_FR29, offsetof(struct pt_regs, fr[29]));
+ DEFINE(PT_FR30, offsetof(struct pt_regs, fr[30]));
+ DEFINE(PT_FR31, offsetof(struct pt_regs, fr[31]));
+ DEFINE(PT_SR0, offsetof(struct pt_regs, sr[ 0]));
+ DEFINE(PT_SR1, offsetof(struct pt_regs, sr[ 1]));
+ DEFINE(PT_SR2, offsetof(struct pt_regs, sr[ 2]));
+ DEFINE(PT_SR3, offsetof(struct pt_regs, sr[ 3]));
+ DEFINE(PT_SR4, offsetof(struct pt_regs, sr[ 4]));
+ DEFINE(PT_SR5, offsetof(struct pt_regs, sr[ 5]));
+ DEFINE(PT_SR6, offsetof(struct pt_regs, sr[ 6]));
+ DEFINE(PT_SR7, offsetof(struct pt_regs, sr[ 7]));
+ DEFINE(PT_IASQ0, offsetof(struct pt_regs, iasq[0]));
+ DEFINE(PT_IASQ1, offsetof(struct pt_regs, iasq[1]));
+ DEFINE(PT_IAOQ0, offsetof(struct pt_regs, iaoq[0]));
+ DEFINE(PT_IAOQ1, offsetof(struct pt_regs, iaoq[1]));
+ DEFINE(PT_CR27, offsetof(struct pt_regs, cr27));
+ DEFINE(PT_ORIG_R28, offsetof(struct pt_regs, orig_r28));
+ DEFINE(PT_KSP, offsetof(struct pt_regs, ksp));
+ DEFINE(PT_KPC, offsetof(struct pt_regs, kpc));
+ DEFINE(PT_SAR, offsetof(struct pt_regs, sar));
+ DEFINE(PT_IIR, offsetof(struct pt_regs, iir));
+ DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
+ DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ /* PT_SZ_ALGN includes space for a stack frame. */
+ DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
+ BLANK();
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
+ DEFINE(THREAD_SZ, sizeof(struct thread_info));
+ /* THREAD_SZ_ALGN includes space for a stack frame. */
+ DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN));
+ BLANK();
+ DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
+ DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
+ DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
+ DEFINE(ICACHE_LOOP, offsetof(struct pdc_cache_info, ic_loop));
+ DEFINE(DCACHE_BASE, offsetof(struct pdc_cache_info, dc_base));
+ DEFINE(DCACHE_STRIDE, offsetof(struct pdc_cache_info, dc_stride));
+ DEFINE(DCACHE_COUNT, offsetof(struct pdc_cache_info, dc_count));
+ DEFINE(DCACHE_LOOP, offsetof(struct pdc_cache_info, dc_loop));
+ DEFINE(ITLB_SID_BASE, offsetof(struct pdc_cache_info, it_sp_base));
+ DEFINE(ITLB_SID_STRIDE, offsetof(struct pdc_cache_info, it_sp_stride));
+ DEFINE(ITLB_SID_COUNT, offsetof(struct pdc_cache_info, it_sp_count));
+ DEFINE(ITLB_OFF_BASE, offsetof(struct pdc_cache_info, it_off_base));
+ DEFINE(ITLB_OFF_STRIDE, offsetof(struct pdc_cache_info, it_off_stride));
+ DEFINE(ITLB_OFF_COUNT, offsetof(struct pdc_cache_info, it_off_count));
+ DEFINE(ITLB_LOOP, offsetof(struct pdc_cache_info, it_loop));
+ DEFINE(DTLB_SID_BASE, offsetof(struct pdc_cache_info, dt_sp_base));
+ DEFINE(DTLB_SID_STRIDE, offsetof(struct pdc_cache_info, dt_sp_stride));
+ DEFINE(DTLB_SID_COUNT, offsetof(struct pdc_cache_info, dt_sp_count));
+ DEFINE(DTLB_OFF_BASE, offsetof(struct pdc_cache_info, dt_off_base));
+ DEFINE(DTLB_OFF_STRIDE, offsetof(struct pdc_cache_info, dt_off_stride));
+ DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
+ DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
+ BLANK();
+ DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
+ DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
+ BLANK();
+ DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
+ DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
+ DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD);
+ DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD);
+ DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE);
+ DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER));
+ DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT));
+ DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT);
+ DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
+ DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
+ DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
+ DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
+ DEFINE(ASM_PT_INITIAL, PT_INITIAL);
+ BLANK();
+ DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
+ DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
+ DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
+ BLANK();
+ DEFINE(ASM_PDC_RESULT_SIZE, NUM_PDC_RESULT * sizeof(unsigned long));
+ BLANK();
+ return 0;
+}
diff --git a/arch/parisc/kernel/binfmt_elf32.c b/arch/parisc/kernel/binfmt_elf32.c
new file mode 100644
index 00000000..f61692d2
--- /dev/null
+++ b/arch/parisc/kernel/binfmt_elf32.c
@@ -0,0 +1,103 @@
+/*
+ * Support for 32-bit Linux/Parisc ELF binaries on 64 bit kernels
+ *
+ * Copyright (C) 2000 John Marvin
+ * Copyright (C) 2000 Hewlett Packard Co.
+ *
+ * Heavily inspired from various other efforts to do the same thing
+ * (ia64,sparc64/mips64)
+ */
+
+/* Make sure include/asm-parisc/elf.h does the right thing */
+
+#define ELF_CLASS ELFCLASS32
+
+#define ELF_CORE_COPY_REGS(dst, pt) \
+ memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \
+ { int i; \
+ for (i = 0; i < 32; i++) dst[i] = (elf_greg_t) pt->gr[i]; \
+ for (i = 0; i < 8; i++) dst[32 + i] = (elf_greg_t) pt->sr[i]; \
+ } \
+ dst[40] = (elf_greg_t) pt->iaoq[0]; dst[41] = (elf_greg_t) pt->iaoq[1]; \
+ dst[42] = (elf_greg_t) pt->iasq[0]; dst[43] = (elf_greg_t) pt->iasq[1]; \
+ dst[44] = (elf_greg_t) pt->sar; dst[45] = (elf_greg_t) pt->iir; \
+ dst[46] = (elf_greg_t) pt->isr; dst[47] = (elf_greg_t) pt->ior; \
+ dst[48] = (elf_greg_t) mfctl(22); dst[49] = (elf_greg_t) mfctl(0); \
+ dst[50] = (elf_greg_t) mfctl(24); dst[51] = (elf_greg_t) mfctl(25); \
+ dst[52] = (elf_greg_t) mfctl(26); dst[53] = (elf_greg_t) mfctl(27); \
+ dst[54] = (elf_greg_t) mfctl(28); dst[55] = (elf_greg_t) mfctl(29); \
+ dst[56] = (elf_greg_t) mfctl(30); dst[57] = (elf_greg_t) mfctl(31); \
+ dst[58] = (elf_greg_t) mfctl( 8); dst[59] = (elf_greg_t) mfctl( 9); \
+ dst[60] = (elf_greg_t) mfctl(12); dst[61] = (elf_greg_t) mfctl(13); \
+ dst[62] = (elf_greg_t) mfctl(10); dst[63] = (elf_greg_t) mfctl(15);
+
+
+typedef unsigned int elf_greg_t;
+
+#include <linux/spinlock.h>
+#include <asm/processor.h>
+#include <linux/module.h>
+#include <linux/elfcore.h>
+#include <linux/compat.h> /* struct compat_timeval */
+
+#define elf_prstatus elf_prstatus32
+struct elf_prstatus32
+{
+ struct elf_siginfo pr_info; /* Info associated with signal */
+ short pr_cursig; /* Current signal */
+ unsigned int pr_sigpend; /* Set of pending signals */
+ unsigned int pr_sighold; /* Set of held signals */
+ pid_t pr_pid;
+ pid_t pr_ppid;
+ pid_t pr_pgrp;
+ pid_t pr_sid;
+ struct compat_timeval pr_utime; /* User time */
+ struct compat_timeval pr_stime; /* System time */
+ struct compat_timeval pr_cutime; /* Cumulative user time */
+ struct compat_timeval pr_cstime; /* Cumulative system time */
+ elf_gregset_t pr_reg; /* GP registers */
+ int pr_fpvalid; /* True if math co-processor being used. */
+};
+
+#define elf_prpsinfo elf_prpsinfo32
+struct elf_prpsinfo32
+{
+ char pr_state; /* numeric process state */
+ char pr_sname; /* char for pr_state */
+ char pr_zomb; /* zombie */
+ char pr_nice; /* nice val */
+ unsigned int pr_flag; /* flags */
+ u16 pr_uid;
+ u16 pr_gid;
+ pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /* Lots missing */
+ char pr_fname[16]; /* filename of executable */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+};
+
+#define init_elf_binfmt init_elf32_binfmt
+
+#define ELF_PLATFORM ("PARISC32\0")
+
+/*
+ * We should probably use this macro to set a flag somewhere to indicate
+ * this is a 32 on 64 process. We could use PER_LINUX_32BIT, or we
+ * could set a processor dependent flag in the thread_struct.
+ */
+
+#define SET_PERSONALITY(ex) \
+ set_thread_flag(TIF_32BIT); \
+ current->thread.map_base = DEFAULT_MAP_BASE32; \
+ current->thread.task_size = DEFAULT_TASK_SIZE32 \
+
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+ unsigned long jiffies = cputime_to_jiffies(cputime);
+ value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+}
+
+#include "../../../fs/binfmt_elf.c"
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
new file mode 100644
index 00000000..83335f3d
--- /dev/null
+++ b/arch/parisc/kernel/cache.c
@@ -0,0 +1,515 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
+ * Copyright (C) 1999 SuSE GmbH Nuernberg
+ * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
+ *
+ * Cache and TLB management
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <asm/pdc.h>
+#include <asm/cache.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/shmparam.h>
+
+int split_tlb __read_mostly;
+int dcache_stride __read_mostly;
+int icache_stride __read_mostly;
+EXPORT_SYMBOL(dcache_stride);
+
+void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+EXPORT_SYMBOL(flush_dcache_page_asm);
+void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+
+
+/* On some machines (e.g. ones with the Merced bus), there can be
+ * only a single PxTLB broadcast at a time; this must be guaranteed
+ * by software. We put a spinlock around all TLB flushes to
+ * ensure this.
+ */
+DEFINE_SPINLOCK(pa_tlb_lock);
+
+struct pdc_cache_info cache_info __read_mostly;
+#ifndef CONFIG_PA20
+static struct pdc_btlb_info btlb_info __read_mostly;
+#endif
+
+#ifdef CONFIG_SMP
+void
+flush_data_cache(void)
+{
+ on_each_cpu(flush_data_cache_local, NULL, 1);
+}
+void
+flush_instruction_cache(void)
+{
+ on_each_cpu(flush_instruction_cache_local, NULL, 1);
+}
+#endif
+
+void
+flush_cache_all_local(void)
+{
+ flush_instruction_cache_local(NULL);
+ flush_data_cache_local(NULL);
+}
+EXPORT_SYMBOL(flush_cache_all_local);
+
+void
+update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+ struct page *page = pte_page(*ptep);
+
+ if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
+ test_bit(PG_dcache_dirty, &page->flags)) {
+
+ flush_kernel_dcache_page(page);
+ clear_bit(PG_dcache_dirty, &page->flags);
+ } else if (parisc_requires_coherency())
+ flush_kernel_dcache_page(page);
+}
+
+void
+show_cache_info(struct seq_file *m)
+{
+ char buf[32];
+
+ seq_printf(m, "I-cache\t\t: %ld KB\n",
+ cache_info.ic_size/1024 );
+ if (cache_info.dc_loop != 1)
+ snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
+ seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
+ cache_info.dc_size/1024,
+ (cache_info.dc_conf.cc_wt ? "WT":"WB"),
+ (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
+ ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
+ seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
+ cache_info.it_size,
+ cache_info.dt_size,
+ cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
+ );
+
+#ifndef CONFIG_PA20
+ /* BTLB - Block TLB */
+ if (btlb_info.max_size==0) {
+ seq_printf(m, "BTLB\t\t: not supported\n" );
+ } else {
+ seq_printf(m,
+ "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
+ "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
+ "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
+ btlb_info.max_size, (int)4096,
+ btlb_info.max_size>>8,
+ btlb_info.fixed_range_info.num_i,
+ btlb_info.fixed_range_info.num_d,
+ btlb_info.fixed_range_info.num_comb,
+ btlb_info.variable_range_info.num_i,
+ btlb_info.variable_range_info.num_d,
+ btlb_info.variable_range_info.num_comb
+ );
+ }
+#endif
+}
+
+void __init
+parisc_cache_init(void)
+{
+ if (pdc_cache_info(&cache_info) < 0)
+ panic("parisc_cache_init: pdc_cache_info failed");
+
+#if 0
+ printk("ic_size %lx dc_size %lx it_size %lx\n",
+ cache_info.ic_size,
+ cache_info.dc_size,
+ cache_info.it_size);
+
+ printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
+ cache_info.dc_base,
+ cache_info.dc_stride,
+ cache_info.dc_count,
+ cache_info.dc_loop);
+
+ printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
+ *(unsigned long *) (&cache_info.dc_conf),
+ cache_info.dc_conf.cc_alias,
+ cache_info.dc_conf.cc_block,
+ cache_info.dc_conf.cc_line,
+ cache_info.dc_conf.cc_shift);
+ printk(" wt %d sh %d cst %d hv %d\n",
+ cache_info.dc_conf.cc_wt,
+ cache_info.dc_conf.cc_sh,
+ cache_info.dc_conf.cc_cst,
+ cache_info.dc_conf.cc_hv);
+
+ printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
+ cache_info.ic_base,
+ cache_info.ic_stride,
+ cache_info.ic_count,
+ cache_info.ic_loop);
+
+ printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
+ *(unsigned long *) (&cache_info.ic_conf),
+ cache_info.ic_conf.cc_alias,
+ cache_info.ic_conf.cc_block,
+ cache_info.ic_conf.cc_line,
+ cache_info.ic_conf.cc_shift);
+ printk(" wt %d sh %d cst %d hv %d\n",
+ cache_info.ic_conf.cc_wt,
+ cache_info.ic_conf.cc_sh,
+ cache_info.ic_conf.cc_cst,
+ cache_info.ic_conf.cc_hv);
+
+ printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
+ cache_info.dt_conf.tc_sh,
+ cache_info.dt_conf.tc_page,
+ cache_info.dt_conf.tc_cst,
+ cache_info.dt_conf.tc_aid,
+ cache_info.dt_conf.tc_pad1);
+
+ printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
+ cache_info.it_conf.tc_sh,
+ cache_info.it_conf.tc_page,
+ cache_info.it_conf.tc_cst,
+ cache_info.it_conf.tc_aid,
+ cache_info.it_conf.tc_pad1);
+#endif
+
+ split_tlb = 0;
+ if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
+ if (cache_info.dt_conf.tc_sh == 2)
+ printk(KERN_WARNING "Unexpected TLB configuration. "
+ "Will flush I/D separately (could be optimized).\n");
+
+ split_tlb = 1;
+ }
+
+ /* "New and Improved" version from Jim Hull
+ * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
+ * The following CAFL_STRIDE is an optimized version, see
+ * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
+ * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
+ */
+#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
+ dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
+ icache_stride = CAFL_STRIDE(cache_info.ic_conf);
+#undef CAFL_STRIDE
+
+#ifndef CONFIG_PA20
+ if (pdc_btlb_info(&btlb_info) < 0) {
+ memset(&btlb_info, 0, sizeof btlb_info);
+ }
+#endif
+
+ if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
+ PDC_MODEL_NVA_UNSUPPORTED) {
+ printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
+#if 0
+ panic("SMP kernel required to avoid non-equivalent aliasing");
+#endif
+ }
+}
+
+void disable_sr_hashing(void)
+{
+ int srhash_type, retval;
+ unsigned long space_bits;
+
+ switch (boot_cpu_data.cpu_type) {
+ case pcx: /* We shouldn't get this far. setup.c should prevent it. */
+ BUG();
+ return;
+
+ case pcxs:
+ case pcxt:
+ case pcxt_:
+ srhash_type = SRHASH_PCXST;
+ break;
+
+ case pcxl:
+ srhash_type = SRHASH_PCXL;
+ break;
+
+ case pcxl2: /* pcxl2 doesn't support space register hashing */
+ return;
+
+ default: /* Currently all PA2.0 machines use the same ins. sequence */
+ srhash_type = SRHASH_PA20;
+ break;
+ }
+
+ disable_sr_hashing_asm(srhash_type);
+
+ retval = pdc_spaceid_bits(&space_bits);
+ /* If this procedure isn't implemented, don't panic. */
+ if (retval < 0 && retval != PDC_BAD_OPTION)
+ panic("pdc_spaceid_bits call failed.\n");
+ if (space_bits != 0)
+ panic("SpaceID hashing is still on!\n");
+}
+
+static inline void
+__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ unsigned long physaddr)
+{
+ flush_dcache_page_asm(physaddr, vmaddr);
+ if (vma->vm_flags & VM_EXEC)
+ flush_icache_page_asm(physaddr, vmaddr);
+}
+
+void flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+ struct vm_area_struct *mpnt;
+ struct prio_tree_iter iter;
+ unsigned long offset;
+ unsigned long addr, old_addr = 0;
+ pgoff_t pgoff;
+
+ if (mapping && !mapping_mapped(mapping)) {
+ set_bit(PG_dcache_dirty, &page->flags);
+ return;
+ }
+
+ flush_kernel_dcache_page(page);
+
+ if (!mapping)
+ return;
+
+ pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
+ /* We have carefully arranged in arch_get_unmapped_area() that
+ * *any* mappings of a file are always congruently mapped (whether
+ * declared as MAP_PRIVATE or MAP_SHARED), so we only need
+ * to flush one address here for them all to become coherent */
+
+ flush_dcache_mmap_lock(mapping);
+ vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
+ addr = mpnt->vm_start + offset;
+
+ /* The TLB is the engine of coherence on parisc: The
+ * CPU is entitled to speculate any page with a TLB
+ * mapping, so here we kill the mapping then flush the
+ * page along a special flush only alias mapping.
+ * This guarantees that the page is no-longer in the
+ * cache for any process and nor may it be
+ * speculatively read in (until the user or kernel
+ * specifically accesses it, of course) */
+
+ flush_tlb_page(mpnt, addr);
+ if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
+ __flush_cache_page(mpnt, addr, page_to_phys(page));
+ if (old_addr)
+ printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
+ old_addr = addr;
+ }
+ }
+ flush_dcache_mmap_unlock(mapping);
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+/* Defined in arch/parisc/kernel/pacache.S */
+EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
+EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
+EXPORT_SYMBOL(flush_data_cache_local);
+EXPORT_SYMBOL(flush_kernel_icache_range_asm);
+
+void clear_user_page_asm(void *page, unsigned long vaddr)
+{
+ unsigned long flags;
+ /* This function is implemented in assembly in pacache.S */
+ extern void __clear_user_page_asm(void *page, unsigned long vaddr);
+
+ purge_tlb_start(flags);
+ __clear_user_page_asm(page, vaddr);
+ purge_tlb_end(flags);
+}
+
+#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
+int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
+
+void __init parisc_setup_cache_timing(void)
+{
+ unsigned long rangetime, alltime;
+ unsigned long size;
+
+ alltime = mfctl(16);
+ flush_data_cache();
+ alltime = mfctl(16) - alltime;
+
+ size = (unsigned long)(_end - _text);
+ rangetime = mfctl(16);
+ flush_kernel_dcache_range((unsigned long)_text, size);
+ rangetime = mfctl(16) - rangetime;
+
+ printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
+ alltime, size, rangetime);
+
+ /* Racy, but if we see an intermediate value, it's ok too... */
+ parisc_cache_flush_threshold = size * alltime / rangetime;
+
+ parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
+ if (!parisc_cache_flush_threshold)
+ parisc_cache_flush_threshold = FLUSH_THRESHOLD;
+
+ if (parisc_cache_flush_threshold > cache_info.dc_size)
+ parisc_cache_flush_threshold = cache_info.dc_size;
+
+ printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
+}
+
+extern void purge_kernel_dcache_page(unsigned long);
+extern void clear_user_page_asm(void *page, unsigned long vaddr);
+
+void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
+{
+ unsigned long flags;
+
+ purge_kernel_dcache_page((unsigned long)page);
+ purge_tlb_start(flags);
+ pdtlb_kernel(page);
+ purge_tlb_end(flags);
+ clear_user_page_asm(page, vaddr);
+}
+EXPORT_SYMBOL(clear_user_page);
+
+void flush_kernel_dcache_page_addr(void *addr)
+{
+ unsigned long flags;
+
+ flush_kernel_dcache_page_asm(addr);
+ purge_tlb_start(flags);
+ pdtlb_kernel(addr);
+ purge_tlb_end(flags);
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
+
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+ struct page *pg)
+{
+ /* no coherency needed (all in kmap/kunmap) */
+ copy_user_page_asm(vto, vfrom);
+ if (!parisc_requires_coherency())
+ flush_kernel_dcache_page_asm(vto);
+}
+EXPORT_SYMBOL(copy_user_page);
+
+#ifdef CONFIG_PA8X00
+
+void kunmap_parisc(void *addr)
+{
+ if (parisc_requires_coherency())
+ flush_kernel_dcache_page_addr(addr);
+}
+EXPORT_SYMBOL(kunmap_parisc);
+#endif
+
+void __flush_tlb_range(unsigned long sid, unsigned long start,
+ unsigned long end)
+{
+ unsigned long npages;
+
+ npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
+ flush_tlb_all();
+ else {
+ unsigned long flags;
+
+ mtsp(sid, 1);
+ purge_tlb_start(flags);
+ if (split_tlb) {
+ while (npages--) {
+ pdtlb(start);
+ pitlb(start);
+ start += PAGE_SIZE;
+ }
+ } else {
+ while (npages--) {
+ pdtlb(start);
+ start += PAGE_SIZE;
+ }
+ }
+ purge_tlb_end(flags);
+ }
+}
+
+static void cacheflush_h_tmp_function(void *dummy)
+{
+ flush_cache_all_local();
+}
+
+void flush_cache_all(void)
+{
+ on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
+}
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+ flush_cache_all();
+#else
+ flush_cache_all_local();
+#endif
+}
+
+void
+flush_user_dcache_range(unsigned long start, unsigned long end)
+{
+ if ((end - start) < parisc_cache_flush_threshold)
+ flush_user_dcache_range_asm(start,end);
+ else
+ flush_data_cache();
+}
+
+void
+flush_user_icache_range(unsigned long start, unsigned long end)
+{
+ if ((end - start) < parisc_cache_flush_threshold)
+ flush_user_icache_range_asm(start,end);
+ else
+ flush_instruction_cache();
+}
+
+
+void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ int sr3;
+
+ BUG_ON(!vma->vm_mm->context);
+
+ sr3 = mfsp(3);
+ if (vma->vm_mm->context == sr3) {
+ flush_user_dcache_range(start,end);
+ flush_user_icache_range(start,end);
+ } else {
+ flush_cache_all();
+ }
+}
+
+void
+flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
+{
+ BUG_ON(!vma->vm_mm->context);
+
+ flush_tlb_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
+
+}
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
new file mode 100644
index 00000000..994bcd98
--- /dev/null
+++ b/arch/parisc/kernel/drivers.c
@@ -0,0 +1,922 @@
+/*
+ * drivers.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (c) 1999 The Puffin Group
+ * Copyright (c) 2001 Matthew Wilcox for Hewlett Packard
+ * Copyright (c) 2001 Helge Deller <deller@gmx.de>
+ * Copyright (c) 2001,2002 Ryan Bradetich
+ * Copyright (c) 2004-2005 Thibaut VARENE <varenet@parisc-linux.org>
+ *
+ * The file handles registering devices and drivers, then matching them.
+ * It's the closest we get to a dating agency.
+ *
+ * If you're thinking about modifying this file, here are some gotchas to
+ * bear in mind:
+ * - 715/Mirage device paths have a dummy device between Lasi and its children
+ * - The EISA adapter may show up as a sibling or child of Wax
+ * - Dino has an optionally functional serial port. If firmware enables it,
+ * it shows up as a child of Dino. If firmware disables it, the buswalk
+ * finds it and it shows up as a child of Cujo
+ * - Dino has both parisc and pci devices as children
+ * - parisc devices are discovered in a random order, including children
+ * before parents in some cases.
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/pdc.h>
+#include <asm/parisc-device.h>
+
+/* See comments in include/asm-parisc/pci.h */
+struct hppa_dma_ops *hppa_dma_ops __read_mostly;
+EXPORT_SYMBOL(hppa_dma_ops);
+
+static struct device root = {
+ .init_name = "parisc",
+};
+
+static inline int check_dev(struct device *dev)
+{
+ if (dev->bus == &parisc_bus_type) {
+ struct parisc_device *pdev;
+ pdev = to_parisc_device(dev);
+ return pdev->id.hw_type != HPHW_FAULTY;
+ }
+ return 1;
+}
+
+static struct device *
+parse_tree_node(struct device *parent, int index, struct hardware_path *modpath);
+
+struct recurse_struct {
+ void * obj;
+ int (*fn)(struct device *, void *);
+};
+
+static int descend_children(struct device * dev, void * data)
+{
+ struct recurse_struct * recurse_data = (struct recurse_struct *)data;
+
+ if (recurse_data->fn(dev, recurse_data->obj))
+ return 1;
+ else
+ return device_for_each_child(dev, recurse_data, descend_children);
+}
+
+/**
+ * for_each_padev - Iterate over all devices in the tree
+ * @fn: Function to call for each device.
+ * @data: Data to pass to the called function.
+ *
+ * This performs a depth-first traversal of the tree, calling the
+ * function passed for each node. It calls the function for parents
+ * before children.
+ */
+
+static int for_each_padev(int (*fn)(struct device *, void *), void * data)
+{
+ struct recurse_struct recurse_data = {
+ .obj = data,
+ .fn = fn,
+ };
+ return device_for_each_child(&root, &recurse_data, descend_children);
+}
+
+/**
+ * match_device - Report whether this driver can handle this device
+ * @driver: the PA-RISC driver to try
+ * @dev: the PA-RISC device to try
+ */
+static int match_device(struct parisc_driver *driver, struct parisc_device *dev)
+{
+ const struct parisc_device_id *ids;
+
+ for (ids = driver->id_table; ids->sversion; ids++) {
+ if ((ids->sversion != SVERSION_ANY_ID) &&
+ (ids->sversion != dev->id.sversion))
+ continue;
+
+ if ((ids->hw_type != HWTYPE_ANY_ID) &&
+ (ids->hw_type != dev->id.hw_type))
+ continue;
+
+ if ((ids->hversion != HVERSION_ANY_ID) &&
+ (ids->hversion != dev->id.hversion))
+ continue;
+
+ return 1;
+ }
+ return 0;
+}
+
+static int parisc_driver_probe(struct device *dev)
+{
+ int rc;
+ struct parisc_device *pa_dev = to_parisc_device(dev);
+ struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
+
+ rc = pa_drv->probe(pa_dev);
+
+ if (!rc)
+ pa_dev->driver = pa_drv;
+
+ return rc;
+}
+
+static int parisc_driver_remove(struct device *dev)
+{
+ struct parisc_device *pa_dev = to_parisc_device(dev);
+ struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
+ if (pa_drv->remove)
+ pa_drv->remove(pa_dev);
+
+ return 0;
+}
+
+
+/**
+ * register_parisc_driver - Register this driver if it can handle a device
+ * @driver: the PA-RISC driver to try
+ */
+int register_parisc_driver(struct parisc_driver *driver)
+{
+ /* FIXME: we need this because apparently the sti
+ * driver can be registered twice */
+ if(driver->drv.name) {
+ printk(KERN_WARNING
+ "BUG: skipping previously registered driver %s\n",
+ driver->name);
+ return 1;
+ }
+
+ if (!driver->probe) {
+ printk(KERN_WARNING
+ "BUG: driver %s has no probe routine\n",
+ driver->name);
+ return 1;
+ }
+
+ driver->drv.bus = &parisc_bus_type;
+
+ /* We install our own probe and remove routines */
+ WARN_ON(driver->drv.probe != NULL);
+ WARN_ON(driver->drv.remove != NULL);
+
+ driver->drv.name = driver->name;
+
+ return driver_register(&driver->drv);
+}
+EXPORT_SYMBOL(register_parisc_driver);
+
+
+struct match_count {
+ struct parisc_driver * driver;
+ int count;
+};
+
+static int match_and_count(struct device * dev, void * data)
+{
+ struct match_count * m = data;
+ struct parisc_device * pdev = to_parisc_device(dev);
+
+ if (check_dev(dev)) {
+ if (match_device(m->driver, pdev))
+ m->count++;
+ }
+ return 0;
+}
+
+/**
+ * count_parisc_driver - count # of devices this driver would match
+ * @driver: the PA-RISC driver to try
+ *
+ * Use by IOMMU support to "guess" the right size IOPdir.
+ * Formula is something like memsize/(num_iommu * entry_size).
+ */
+int count_parisc_driver(struct parisc_driver *driver)
+{
+ struct match_count m = {
+ .driver = driver,
+ .count = 0,
+ };
+
+ for_each_padev(match_and_count, &m);
+
+ return m.count;
+}
+
+
+
+/**
+ * unregister_parisc_driver - Unregister this driver from the list of drivers
+ * @driver: the PA-RISC driver to unregister
+ */
+int unregister_parisc_driver(struct parisc_driver *driver)
+{
+ driver_unregister(&driver->drv);
+ return 0;
+}
+EXPORT_SYMBOL(unregister_parisc_driver);
+
+struct find_data {
+ unsigned long hpa;
+ struct parisc_device * dev;
+};
+
+static int find_device(struct device * dev, void * data)
+{
+ struct parisc_device * pdev = to_parisc_device(dev);
+ struct find_data * d = (struct find_data*)data;
+
+ if (check_dev(dev)) {
+ if (pdev->hpa.start == d->hpa) {
+ d->dev = pdev;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static struct parisc_device *find_device_by_addr(unsigned long hpa)
+{
+ struct find_data d = {
+ .hpa = hpa,
+ };
+ int ret;
+
+ ret = for_each_padev(find_device, &d);
+ return ret ? d.dev : NULL;
+}
+
+/**
+ * find_pa_parent_type - Find a parent of a specific type
+ * @dev: The device to start searching from
+ * @type: The device type to search for.
+ *
+ * Walks up the device tree looking for a device of the specified type.
+ * If it finds it, it returns it. If not, it returns NULL.
+ */
+const struct parisc_device *
+find_pa_parent_type(const struct parisc_device *padev, int type)
+{
+ const struct device *dev = &padev->dev;
+ while (dev != &root) {
+ struct parisc_device *candidate = to_parisc_device(dev);
+ if (candidate->id.hw_type == type)
+ return candidate;
+ dev = dev->parent;
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_PCI
+static inline int is_pci_dev(struct device *dev)
+{
+ return dev->bus == &pci_bus_type;
+}
+#else
+static inline int is_pci_dev(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+/*
+ * get_node_path fills in @path with the firmware path to the device.
+ * Note that if @node is a parisc device, we don't fill in the 'mod' field.
+ * This is because both callers pass the parent and fill in the mod
+ * themselves. If @node is a PCI device, we do fill it in, even though this
+ * is inconsistent.
+ */
+static void get_node_path(struct device *dev, struct hardware_path *path)
+{
+ int i = 5;
+ memset(&path->bc, -1, 6);
+
+ if (is_pci_dev(dev)) {
+ unsigned int devfn = to_pci_dev(dev)->devfn;
+ path->mod = PCI_FUNC(devfn);
+ path->bc[i--] = PCI_SLOT(devfn);
+ dev = dev->parent;
+ }
+
+ while (dev != &root) {
+ if (is_pci_dev(dev)) {
+ unsigned int devfn = to_pci_dev(dev)->devfn;
+ path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5);
+ } else if (dev->bus == &parisc_bus_type) {
+ path->bc[i--] = to_parisc_device(dev)->hw_path;
+ }
+ dev = dev->parent;
+ }
+}
+
+static char *print_hwpath(struct hardware_path *path, char *output)
+{
+ int i;
+ for (i = 0; i < 6; i++) {
+ if (path->bc[i] == -1)
+ continue;
+ output += sprintf(output, "%u/", (unsigned char) path->bc[i]);
+ }
+ output += sprintf(output, "%u", (unsigned char) path->mod);
+ return output;
+}
+
+/**
+ * print_pa_hwpath - Returns hardware path for PA devices
+ * dev: The device to return the path for
+ * output: Pointer to a previously-allocated array to place the path in.
+ *
+ * This function fills in the output array with a human-readable path
+ * to a PA device. This string is compatible with that used by PDC, and
+ * may be printed on the outside of the box.
+ */
+char *print_pa_hwpath(struct parisc_device *dev, char *output)
+{
+ struct hardware_path path;
+
+ get_node_path(dev->dev.parent, &path);
+ path.mod = dev->hw_path;
+ return print_hwpath(&path, output);
+}
+EXPORT_SYMBOL(print_pa_hwpath);
+
+#if defined(CONFIG_PCI) || defined(CONFIG_ISA)
+/**
+ * get_pci_node_path - Determines the hardware path for a PCI device
+ * @pdev: The device to return the path for
+ * @path: Pointer to a previously-allocated array to place the path in.
+ *
+ * This function fills in the hardware_path structure with the route to
+ * the specified PCI device. This structure is suitable for passing to
+ * PDC calls.
+ */
+void get_pci_node_path(struct pci_dev *pdev, struct hardware_path *path)
+{
+ get_node_path(&pdev->dev, path);
+}
+EXPORT_SYMBOL(get_pci_node_path);
+
+/**
+ * print_pci_hwpath - Returns hardware path for PCI devices
+ * dev: The device to return the path for
+ * output: Pointer to a previously-allocated array to place the path in.
+ *
+ * This function fills in the output array with a human-readable path
+ * to a PCI device. This string is compatible with that used by PDC, and
+ * may be printed on the outside of the box.
+ */
+char *print_pci_hwpath(struct pci_dev *dev, char *output)
+{
+ struct hardware_path path;
+
+ get_pci_node_path(dev, &path);
+ return print_hwpath(&path, output);
+}
+EXPORT_SYMBOL(print_pci_hwpath);
+
+#endif /* defined(CONFIG_PCI) || defined(CONFIG_ISA) */
+
+static void setup_bus_id(struct parisc_device *padev)
+{
+ struct hardware_path path;
+ char name[20];
+ char *output = name;
+ int i;
+
+ get_node_path(padev->dev.parent, &path);
+
+ for (i = 0; i < 6; i++) {
+ if (path.bc[i] == -1)
+ continue;
+ output += sprintf(output, "%u:", (unsigned char) path.bc[i]);
+ }
+ sprintf(output, "%u", (unsigned char) padev->hw_path);
+ dev_set_name(&padev->dev, name);
+}
+
+struct parisc_device * create_tree_node(char id, struct device *parent)
+{
+ struct parisc_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ dev->hw_path = id;
+ dev->id.hw_type = HPHW_FAULTY;
+
+ dev->dev.parent = parent;
+ setup_bus_id(dev);
+
+ dev->dev.bus = &parisc_bus_type;
+ dev->dma_mask = 0xffffffffUL; /* PARISC devices are 32-bit */
+
+ /* make the generic dma mask a pointer to the parisc one */
+ dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.coherent_dma_mask = dev->dma_mask;
+ if (device_register(&dev->dev)) {
+ kfree(dev);
+ return NULL;
+ }
+
+ return dev;
+}
+
+struct match_id_data {
+ char id;
+ struct parisc_device * dev;
+};
+
+static int match_by_id(struct device * dev, void * data)
+{
+ struct parisc_device * pdev = to_parisc_device(dev);
+ struct match_id_data * d = data;
+
+ if (pdev->hw_path == d->id) {
+ d->dev = pdev;
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * alloc_tree_node - returns a device entry in the iotree
+ * @parent: the parent node in the tree
+ * @id: the element of the module path for this entry
+ *
+ * Checks all the children of @parent for a matching @id. If none
+ * found, it allocates a new device and returns it.
+ */
+static struct parisc_device * alloc_tree_node(struct device *parent, char id)
+{
+ struct match_id_data d = {
+ .id = id,
+ };
+ if (device_for_each_child(parent, &d, match_by_id))
+ return d.dev;
+ else
+ return create_tree_node(id, parent);
+}
+
+static struct parisc_device *create_parisc_device(struct hardware_path *modpath)
+{
+ int i;
+ struct device *parent = &root;
+ for (i = 0; i < 6; i++) {
+ if (modpath->bc[i] == -1)
+ continue;
+ parent = &alloc_tree_node(parent, modpath->bc[i])->dev;
+ }
+ return alloc_tree_node(parent, modpath->mod);
+}
+
+struct parisc_device *
+alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
+{
+ int status;
+ unsigned long bytecnt;
+ u8 iodc_data[32];
+ struct parisc_device *dev;
+ const char *name;
+
+ /* Check to make sure this device has not already been added - Ryan */
+ if (find_device_by_addr(hpa) != NULL)
+ return NULL;
+
+ status = pdc_iodc_read(&bytecnt, hpa, 0, &iodc_data, 32);
+ if (status != PDC_OK)
+ return NULL;
+
+ dev = create_parisc_device(mod_path);
+ if (dev->id.hw_type != HPHW_FAULTY) {
+ printk(KERN_ERR "Two devices have hardware path [%s]. "
+ "IODC data for second device: "
+ "%02x%02x%02x%02x%02x%02x\n"
+ "Rearranging GSC cards sometimes helps\n",
+ parisc_pathname(dev), iodc_data[0], iodc_data[1],
+ iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]);
+ return NULL;
+ }
+
+ dev->id.hw_type = iodc_data[3] & 0x1f;
+ dev->id.hversion = (iodc_data[0] << 4) | ((iodc_data[1] & 0xf0) >> 4);
+ dev->id.hversion_rev = iodc_data[1] & 0x0f;
+ dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) |
+ (iodc_data[5] << 8) | iodc_data[6];
+ dev->hpa.name = parisc_pathname(dev);
+ dev->hpa.start = hpa;
+ /* This is awkward. The STI spec says that gfx devices may occupy
+ * 32MB or 64MB. Unfortunately, we don't know how to tell whether
+ * it's the former or the latter. Assumptions either way can hurt us.
+ */
+ if (hpa == 0xf4000000 || hpa == 0xf8000000) {
+ dev->hpa.end = hpa + 0x03ffffff;
+ } else if (hpa == 0xf6000000 || hpa == 0xfa000000) {
+ dev->hpa.end = hpa + 0x01ffffff;
+ } else {
+ dev->hpa.end = hpa + 0xfff;
+ }
+ dev->hpa.flags = IORESOURCE_MEM;
+ name = parisc_hardware_description(&dev->id);
+ if (name) {
+ strlcpy(dev->name, name, sizeof(dev->name));
+ }
+
+ /* Silently fail things like mouse ports which are subsumed within
+ * the keyboard controller
+ */
+ if ((hpa & 0xfff) == 0 && insert_resource(&iomem_resource, &dev->hpa))
+ printk("Unable to claim HPA %lx for device %s\n",
+ hpa, name);
+
+ return dev;
+}
+
+static int parisc_generic_match(struct device *dev, struct device_driver *drv)
+{
+ return match_device(to_parisc_driver(drv), to_parisc_device(dev));
+}
+
+static ssize_t make_modalias(struct device *dev, char *buf)
+{
+ const struct parisc_device *padev = to_parisc_device(dev);
+ const struct parisc_device_id *id = &padev->id;
+
+ return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
+ (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
+ (u32)id->sversion);
+}
+
+static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct parisc_device *padev;
+ char modalias[40];
+
+ if (!dev)
+ return -ENODEV;
+
+ padev = to_parisc_device(dev);
+ if (!padev)
+ return -ENODEV;
+
+ if (add_uevent_var(env, "PARISC_NAME=%s", padev->name))
+ return -ENOMEM;
+
+ make_modalias(dev, modalias);
+ if (add_uevent_var(env, "MODALIAS=%s", modalias))
+ return -ENOMEM;
+
+ return 0;
+}
+
+#define pa_dev_attr(name, field, format_string) \
+static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct parisc_device *padev = to_parisc_device(dev); \
+ return sprintf(buf, format_string, padev->field); \
+}
+
+#define pa_dev_attr_id(field, format) pa_dev_attr(field, id.field, format)
+
+pa_dev_attr(irq, irq, "%u\n");
+pa_dev_attr_id(hw_type, "0x%02x\n");
+pa_dev_attr(rev, id.hversion_rev, "0x%x\n");
+pa_dev_attr_id(hversion, "0x%03x\n");
+pa_dev_attr_id(sversion, "0x%05x\n");
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return make_modalias(dev, buf);
+}
+
+static struct device_attribute parisc_device_attrs[] = {
+ __ATTR_RO(irq),
+ __ATTR_RO(hw_type),
+ __ATTR_RO(rev),
+ __ATTR_RO(hversion),
+ __ATTR_RO(sversion),
+ __ATTR_RO(modalias),
+ __ATTR_NULL,
+};
+
+struct bus_type parisc_bus_type = {
+ .name = "parisc",
+ .match = parisc_generic_match,
+ .uevent = parisc_uevent,
+ .dev_attrs = parisc_device_attrs,
+ .probe = parisc_driver_probe,
+ .remove = parisc_driver_remove,
+};
+
+/**
+ * register_parisc_device - Locate a driver to manage this device.
+ * @dev: The parisc device.
+ *
+ * Search the driver list for a driver that is willing to manage
+ * this device.
+ */
+int register_parisc_device(struct parisc_device *dev)
+{
+ if (!dev)
+ return 0;
+
+ if (dev->driver)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * match_pci_device - Matches a pci device against a given hardware path
+ * entry.
+ * @dev: the generic device (known to be contained by a pci_dev).
+ * @index: the current BC index
+ * @modpath: the hardware path.
+ * @return: true if the device matches the hardware path.
+ */
+static int match_pci_device(struct device *dev, int index,
+ struct hardware_path *modpath)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int id;
+
+ if (index == 5) {
+ /* we are at the end of the path, and on the actual device */
+ unsigned int devfn = pdev->devfn;
+ return ((modpath->bc[5] == PCI_SLOT(devfn)) &&
+ (modpath->mod == PCI_FUNC(devfn)));
+ }
+
+ id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
+ return (modpath->bc[index] == id);
+}
+
+/**
+ * match_parisc_device - Matches a parisc device against a given hardware
+ * path entry.
+ * @dev: the generic device (known to be contained by a parisc_device).
+ * @index: the current BC index
+ * @modpath: the hardware path.
+ * @return: true if the device matches the hardware path.
+ */
+static int match_parisc_device(struct device *dev, int index,
+ struct hardware_path *modpath)
+{
+ struct parisc_device *curr = to_parisc_device(dev);
+ char id = (index == 6) ? modpath->mod : modpath->bc[index];
+
+ return (curr->hw_path == id);
+}
+
+struct parse_tree_data {
+ int index;
+ struct hardware_path * modpath;
+ struct device * dev;
+};
+
+static int check_parent(struct device * dev, void * data)
+{
+ struct parse_tree_data * d = data;
+
+ if (check_dev(dev)) {
+ if (dev->bus == &parisc_bus_type) {
+ if (match_parisc_device(dev, d->index, d->modpath))
+ d->dev = dev;
+ } else if (is_pci_dev(dev)) {
+ if (match_pci_device(dev, d->index, d->modpath))
+ d->dev = dev;
+ } else if (dev->bus == NULL) {
+ /* we are on a bus bridge */
+ struct device *new = parse_tree_node(dev, d->index, d->modpath);
+ if (new)
+ d->dev = new;
+ }
+ }
+ return d->dev != NULL;
+}
+
+/**
+ * parse_tree_node - returns a device entry in the iotree
+ * @parent: the parent node in the tree
+ * @index: the current BC index
+ * @modpath: the hardware_path struct to match a device against
+ * @return: The corresponding device if found, NULL otherwise.
+ *
+ * Checks all the children of @parent for a matching @id. If none
+ * found, it returns NULL.
+ */
+static struct device *
+parse_tree_node(struct device *parent, int index, struct hardware_path *modpath)
+{
+ struct parse_tree_data d = {
+ .index = index,
+ .modpath = modpath,
+ };
+
+ struct recurse_struct recurse_data = {
+ .obj = &d,
+ .fn = check_parent,
+ };
+
+ if (device_for_each_child(parent, &recurse_data, descend_children))
+ /* nothing */;
+
+ return d.dev;
+}
+
+/**
+ * hwpath_to_device - Finds the generic device corresponding to a given hardware path.
+ * @modpath: the hardware path.
+ * @return: The target device, NULL if not found.
+ */
+struct device *hwpath_to_device(struct hardware_path *modpath)
+{
+ int i;
+ struct device *parent = &root;
+ for (i = 0; i < 6; i++) {
+ if (modpath->bc[i] == -1)
+ continue;
+ parent = parse_tree_node(parent, i, modpath);
+ if (!parent)
+ return NULL;
+ }
+ if (is_pci_dev(parent)) /* pci devices already parse MOD */
+ return parent;
+ else
+ return parse_tree_node(parent, 6, modpath);
+}
+EXPORT_SYMBOL(hwpath_to_device);
+
+/**
+ * device_to_hwpath - Populates the hwpath corresponding to the given device.
+ * @param dev the target device
+ * @param path pointer to a previously allocated hwpath struct to be filled in
+ */
+void device_to_hwpath(struct device *dev, struct hardware_path *path)
+{
+ struct parisc_device *padev;
+ if (dev->bus == &parisc_bus_type) {
+ padev = to_parisc_device(dev);
+ get_node_path(dev->parent, path);
+ path->mod = padev->hw_path;
+ } else if (is_pci_dev(dev)) {
+ get_node_path(dev, path);
+ }
+}
+EXPORT_SYMBOL(device_to_hwpath);
+
+#define BC_PORT_MASK 0x8
+#define BC_LOWER_PORT 0x8
+
+#define BUS_CONVERTER(dev) \
+ ((dev->id.hw_type == HPHW_IOA) || (dev->id.hw_type == HPHW_BCPORT))
+
+#define IS_LOWER_PORT(dev) \
+ ((gsc_readl(dev->hpa.start + offsetof(struct bc_module, io_status)) \
+ & BC_PORT_MASK) == BC_LOWER_PORT)
+
+#define MAX_NATIVE_DEVICES 64
+#define NATIVE_DEVICE_OFFSET 0x1000
+
+#define FLEX_MASK F_EXTEND(0xfffc0000)
+#define IO_IO_LOW offsetof(struct bc_module, io_io_low)
+#define IO_IO_HIGH offsetof(struct bc_module, io_io_high)
+#define READ_IO_IO_LOW(dev) (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_LOW)
+#define READ_IO_IO_HIGH(dev) (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_HIGH)
+
+static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
+ struct device *parent);
+
+void walk_lower_bus(struct parisc_device *dev)
+{
+ unsigned long io_io_low, io_io_high;
+
+ if (!BUS_CONVERTER(dev) || IS_LOWER_PORT(dev))
+ return;
+
+ if (dev->id.hw_type == HPHW_IOA) {
+ io_io_low = (unsigned long)(signed int)(READ_IO_IO_LOW(dev) << 16);
+ io_io_high = io_io_low + MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET;
+ } else {
+ io_io_low = (READ_IO_IO_LOW(dev) + ~FLEX_MASK) & FLEX_MASK;
+ io_io_high = (READ_IO_IO_HIGH(dev)+ ~FLEX_MASK) & FLEX_MASK;
+ }
+
+ walk_native_bus(io_io_low, io_io_high, &dev->dev);
+}
+
+/**
+ * walk_native_bus -- Probe a bus for devices
+ * @io_io_low: Base address of this bus.
+ * @io_io_high: Last address of this bus.
+ * @parent: The parent bus device.
+ *
+ * A native bus (eg Runway or GSC) may have up to 64 devices on it,
+ * spaced at intervals of 0x1000 bytes. PDC may not inform us of these
+ * devices, so we have to probe for them. Unfortunately, we may find
+ * devices which are not physically connected (such as extra serial &
+ * keyboard ports). This problem is not yet solved.
+ */
+static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
+ struct device *parent)
+{
+ int i, devices_found = 0;
+ unsigned long hpa = io_io_low;
+ struct hardware_path path;
+
+ get_node_path(parent, &path);
+ do {
+ for(i = 0; i < MAX_NATIVE_DEVICES; i++, hpa += NATIVE_DEVICE_OFFSET) {
+ struct parisc_device *dev;
+
+ /* Was the device already added by Firmware? */
+ dev = find_device_by_addr(hpa);
+ if (!dev) {
+ path.mod = i;
+ dev = alloc_pa_dev(hpa, &path);
+ if (!dev)
+ continue;
+
+ register_parisc_device(dev);
+ devices_found++;
+ }
+ walk_lower_bus(dev);
+ }
+ } while(!devices_found && hpa < io_io_high);
+}
+
+#define CENTRAL_BUS_ADDR F_EXTEND(0xfff80000)
+
+/**
+ * walk_central_bus - Find devices attached to the central bus
+ *
+ * PDC doesn't tell us about all devices in the system. This routine
+ * finds devices connected to the central bus.
+ */
+void walk_central_bus(void)
+{
+ walk_native_bus(CENTRAL_BUS_ADDR,
+ CENTRAL_BUS_ADDR + (MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET),
+ &root);
+}
+
+static void print_parisc_device(struct parisc_device *dev)
+{
+ char hw_path[64];
+ static int count;
+
+ print_pa_hwpath(dev, hw_path);
+ printk(KERN_INFO "%d. %s at 0x%p [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+ ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
+ dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
+
+ if (dev->num_addrs) {
+ int k;
+ printk(", additional addresses: ");
+ for (k = 0; k < dev->num_addrs; k++)
+ printk("0x%lx ", dev->addr[k]);
+ }
+ printk("\n");
+}
+
+/**
+ * init_parisc_bus - Some preparation to be done before inventory
+ */
+void init_parisc_bus(void)
+{
+ if (bus_register(&parisc_bus_type))
+ panic("Could not register PA-RISC bus type\n");
+ if (device_register(&root))
+ panic("Could not register PA-RISC root device\n");
+ get_device(&root);
+}
+
+
+static int print_one_device(struct device * dev, void * data)
+{
+ struct parisc_device * pdev = to_parisc_device(dev);
+
+ if (check_dev(dev))
+ print_parisc_device(pdev);
+ return 0;
+}
+
+/**
+ * print_parisc_devices - Print out a list of devices found in this system
+ */
+void print_parisc_devices(void)
+{
+ for_each_padev(print_one_device, NULL);
+}
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
new file mode 100644
index 00000000..07ef351e
--- /dev/null
+++ b/arch/parisc/kernel/entry.S
@@ -0,0 +1,2381 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * kernel entry points (interruptions, system call wrappers)
+ * Copyright (C) 1999,2000 Philipp Rumpf
+ * Copyright (C) 1999 SuSE GmbH Nuernberg
+ * Copyright (C) 2000 Hewlett-Packard (John Marvin)
+ * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/asm-offsets.h>
+
+/* we have the following possibilities to act on an interruption:
+ * - handle in assembly and use shadowed registers only
+ * - save registers to kernel stack and handle in assembly or C */
+
+
+#include <asm/psw.h>
+#include <asm/cache.h> /* for L1_CACHE_SHIFT */
+#include <asm/assembly.h> /* for LDREG/STREG defines */
+#include <asm/pgtable.h>
+#include <asm/signal.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+
+#include <linux/linkage.h>
+
+#ifdef CONFIG_64BIT
+ .level 2.0w
+#else
+ .level 2.0
+#endif
+
+ .import pa_dbit_lock,data
+
+ /* space_to_prot macro creates a prot id from a space id */
+
+#if (SPACEID_SHIFT) == 0
+ .macro space_to_prot spc prot
+ depd,z \spc,62,31,\prot
+ .endm
+#else
+ .macro space_to_prot spc prot
+ extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
+ .endm
+#endif
+
+ /* Switch to virtual mapping, trashing only %r1 */
+ .macro virt_map
+ /* pcxt_ssm_bug */
+ rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
+ mtsp %r0, %sr4
+ mtsp %r0, %sr5
+ mfsp %sr7, %r1
+ or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
+ mtsp %r1, %sr3
+ tovirt_r1 %r29
+ load32 KERNEL_PSW, %r1
+
+ rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
+ mtsp %r0, %sr6
+ mtsp %r0, %sr7
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ mtctl %r1, %ipsw
+ load32 4f, %r1
+ mtctl %r1, %cr18 /* Set IIAOQ tail */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* Set IIAOQ head */
+ rfir
+ nop
+4:
+ .endm
+
+ /*
+ * The "get_stack" macros are responsible for determining the
+ * kernel stack value.
+ *
+ * If sr7 == 0
+ * Already using a kernel stack, so call the
+ * get_stack_use_r30 macro to push a pt_regs structure
+ * on the stack, and store registers there.
+ * else
+ * Need to set up a kernel stack, so call the
+ * get_stack_use_cr30 macro to set up a pointer
+ * to the pt_regs structure contained within the
+ * task pointer pointed to by cr30. Set the stack
+ * pointer to point to the end of the task structure.
+ *
+ * Note that we use shadowed registers for temps until
+ * we can save %r26 and %r29. %r26 is used to preserve
+ * %r8 (a shadowed register) which temporarily contained
+ * either the fault type ("code") or the eirr. We need
+ * to use a non-shadowed register to carry the value over
+ * the rfir in virt_map. We use %r26 since this value winds
+ * up being passed as the argument to either do_cpu_irq_mask
+ * or handle_interruption. %r29 is used to hold a pointer
+ * the register save area, and once again, it needs to
+ * be a non-shadowed register so that it survives the rfir.
+ *
+ * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
+ */
+
+ .macro get_stack_use_cr30
+
+ /* we save the registers in the task struct */
+
+ mfctl %cr30, %r1
+ tophys %r1,%r9
+ LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
+ tophys %r1,%r9
+ ldo TASK_REGS(%r9),%r9
+ STREG %r30, PT_GR30(%r9)
+ STREG %r29,PT_GR29(%r9)
+ STREG %r26,PT_GR26(%r9)
+ copy %r9,%r29
+ mfctl %cr30, %r1
+ ldo THREAD_SZ_ALGN(%r1), %r30
+ .endm
+
+ .macro get_stack_use_r30
+
+ /* we put a struct pt_regs on the stack and save the registers there */
+
+ tophys %r30,%r9
+ STREG %r30,PT_GR30(%r9)
+ ldo PT_SZ_ALGN(%r30),%r30
+ STREG %r29,PT_GR29(%r9)
+ STREG %r26,PT_GR26(%r9)
+ copy %r9,%r29
+ .endm
+
+ .macro rest_stack
+ LDREG PT_GR1(%r29), %r1
+ LDREG PT_GR30(%r29),%r30
+ LDREG PT_GR29(%r29),%r29
+ .endm
+
+ /* default interruption handler
+ * (calls traps.c:handle_interruption) */
+ .macro def code
+ b intr_save
+ ldi \code, %r8
+ .align 32
+ .endm
+
+ /* Interrupt interruption handler
+ * (calls irq.c:do_cpu_irq_mask) */
+ .macro extint code
+ b intr_extint
+ mfsp %sr7,%r16
+ .align 32
+ .endm
+
+ .import os_hpmc, code
+
+ /* HPMC handler */
+ .macro hpmc code
+ nop /* must be a NOP, will be patched later */
+ load32 PA(os_hpmc), %r3
+ bv,n 0(%r3)
+ nop
+ .word 0 /* checksum (will be patched) */
+ .word PA(os_hpmc) /* address of handler */
+ .word 0 /* length of handler */
+ .endm
+
+ /*
+ * Performance Note: Instructions will be moved up into
+ * this part of the code later on, once we are sure
+ * that the tlb miss handlers are close to final form.
+ */
+
+ /* Register definitions for tlb miss handler macros */
+
+ va = r8 /* virtual address for which the trap occurred */
+ spc = r24 /* space for which the trap occurred */
+
+#ifndef CONFIG_64BIT
+
+ /*
+ * itlb miss interruption handler (parisc 1.1 - 32 bit)
+ */
+
+ .macro itlb_11 code
+
+ mfctl %pcsq, spc
+ b itlb_miss_11
+ mfctl %pcoq, va
+
+ .align 32
+ .endm
+#endif
+
+ /*
+ * itlb miss interruption handler (parisc 2.0)
+ */
+
+ .macro itlb_20 code
+ mfctl %pcsq, spc
+#ifdef CONFIG_64BIT
+ b itlb_miss_20w
+#else
+ b itlb_miss_20
+#endif
+ mfctl %pcoq, va
+
+ .align 32
+ .endm
+
+#ifndef CONFIG_64BIT
+ /*
+ * naitlb miss interruption handler (parisc 1.1 - 32 bit)
+ */
+
+ .macro naitlb_11 code
+
+ mfctl %isr,spc
+ b naitlb_miss_11
+ mfctl %ior,va
+
+ .align 32
+ .endm
+#endif
+
+ /*
+ * naitlb miss interruption handler (parisc 2.0)
+ */
+
+ .macro naitlb_20 code
+
+ mfctl %isr,spc
+#ifdef CONFIG_64BIT
+ b naitlb_miss_20w
+#else
+ b naitlb_miss_20
+#endif
+ mfctl %ior,va
+
+ .align 32
+ .endm
+
+#ifndef CONFIG_64BIT
+ /*
+ * dtlb miss interruption handler (parisc 1.1 - 32 bit)
+ */
+
+ .macro dtlb_11 code
+
+ mfctl %isr, spc
+ b dtlb_miss_11
+ mfctl %ior, va
+
+ .align 32
+ .endm
+#endif
+
+ /*
+ * dtlb miss interruption handler (parisc 2.0)
+ */
+
+ .macro dtlb_20 code
+
+ mfctl %isr, spc
+#ifdef CONFIG_64BIT
+ b dtlb_miss_20w
+#else
+ b dtlb_miss_20
+#endif
+ mfctl %ior, va
+
+ .align 32
+ .endm
+
+#ifndef CONFIG_64BIT
+ /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
+
+ .macro nadtlb_11 code
+
+ mfctl %isr,spc
+ b nadtlb_miss_11
+ mfctl %ior,va
+
+ .align 32
+ .endm
+#endif
+
+ /* nadtlb miss interruption handler (parisc 2.0) */
+
+ .macro nadtlb_20 code
+
+ mfctl %isr,spc
+#ifdef CONFIG_64BIT
+ b nadtlb_miss_20w
+#else
+ b nadtlb_miss_20
+#endif
+ mfctl %ior,va
+
+ .align 32
+ .endm
+
+#ifndef CONFIG_64BIT
+ /*
+ * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
+ */
+
+ .macro dbit_11 code
+
+ mfctl %isr,spc
+ b dbit_trap_11
+ mfctl %ior,va
+
+ .align 32
+ .endm
+#endif
+
+ /*
+ * dirty bit trap interruption handler (parisc 2.0)
+ */
+
+ .macro dbit_20 code
+
+ mfctl %isr,spc
+#ifdef CONFIG_64BIT
+ b dbit_trap_20w
+#else
+ b dbit_trap_20
+#endif
+ mfctl %ior,va
+
+ .align 32
+ .endm
+
+ /* In LP64, the space contains part of the upper 32 bits of the
+ * fault. We have to extract this and place it in the va,
+ * zeroing the corresponding bits in the space register */
+ .macro space_adjust spc,va,tmp
+#ifdef CONFIG_64BIT
+ extrd,u \spc,63,SPACEID_SHIFT,\tmp
+ depd %r0,63,SPACEID_SHIFT,\spc
+ depd \tmp,31,SPACEID_SHIFT,\va
+#endif
+ .endm
+
+ .import swapper_pg_dir,code
+
+ /* Get the pgd. For faults on space zero (kernel space), this
+ * is simply swapper_pg_dir. For user space faults, the
+ * pgd is stored in %cr25 */
+ .macro get_pgd spc,reg
+ ldil L%PA(swapper_pg_dir),\reg
+ ldo R%PA(swapper_pg_dir)(\reg),\reg
+ or,COND(=) %r0,\spc,%r0
+ mfctl %cr25,\reg
+ .endm
+
+ /*
+ space_check(spc,tmp,fault)
+
+ spc - The space we saw the fault with.
+ tmp - The place to store the current space.
+ fault - Function to call on failure.
+
+ Only allow faults on different spaces from the
+ currently active one if we're the kernel
+
+ */
+ .macro space_check spc,tmp,fault
+ mfsp %sr7,\tmp
+ or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
+ * as kernel, so defeat the space
+ * check if it is */
+ copy \spc,\tmp
+ or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
+ cmpb,COND(<>),n \tmp,\spc,\fault
+ .endm
+
+ /* Look up a PTE in a 2-Level scheme (faulting at each
+ * level if the entry isn't present
+ *
+ * NOTE: we use ldw even for LP64, since the short pointers
+ * can address up to 1TB
+ */
+ .macro L2_ptep pmd,pte,index,va,fault
+#if PT_NLEVELS == 3
+ extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
+#else
+ extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+#endif
+ dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ copy %r0,\pte
+ ldw,s \index(\pmd),\pmd
+ bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
+ dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
+ copy \pmd,%r9
+ SHLREG %r9,PxD_VALUE_SHIFT,\pmd
+ extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
+ dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
+ LDREG %r0(\pmd),\pte /* pmd is now pte */
+ bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
+ .endm
+
+ /* Look up PTE in a 3-Level scheme.
+ *
+ * Here we implement a Hybrid L2/L3 scheme: we allocate the
+ * first pmd adjacent to the pgd. This means that we can
+ * subtract a constant offset to get to it. The pmd and pgd
+ * sizes are arranged so that a single pmd covers 4GB (giving
+ * a full LP64 process access to 8TB) so our lookups are
+ * effectively L2 for the first 4GB of the kernel (i.e. for
+ * all ILP32 processes and all the kernel for machines with
+ * under 4GB of memory) */
+ .macro L3_ptep pgd,pte,index,va,fault
+#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
+ extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+ copy %r0,\pte
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ ldw,s \index(\pgd),\pgd
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ shld \pgd,PxD_VALUE_SHIFT,\index
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ copy \index,\pgd
+ extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
+#endif
+ L2_ptep \pgd,\pte,\index,\va,\fault
+ .endm
+
+ /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
+ * don't needlessly dirty the cache line if it was already set */
+ .macro update_ptep ptep,pte,tmp,tmp1
+ ldi _PAGE_ACCESSED,\tmp1
+ or \tmp1,\pte,\tmp
+ and,COND(<>) \tmp1,\pte,%r0
+ STREG \tmp,0(\ptep)
+ .endm
+
+ /* Set the dirty bit (and accessed bit). No need to be
+ * clever, this is only used from the dirty fault */
+ .macro update_dirty ptep,pte,tmp
+ ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
+ or \tmp,\pte,\pte
+ STREG \pte,0(\ptep)
+ .endm
+
+ /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
+ * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
+ #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
+
+ /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+ .macro convert_for_tlb_insert20 pte
+ extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+ 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
+ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_SHIFT,\pte
+ .endm
+
+ /* Convert the pte and prot to tlb insertion values. How
+ * this happens is quite subtle, read below */
+ .macro make_insert_tlb spc,pte,prot
+ space_to_prot \spc \prot /* create prot id from space */
+ /* The following is the real subtlety. This is depositing
+ * T <-> _PAGE_REFTRAP
+ * D <-> _PAGE_DIRTY
+ * B <-> _PAGE_DMB (memory break)
+ *
+ * Then incredible subtlety: The access rights are
+ * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
+ * See 3-14 of the parisc 2.0 manual
+ *
+ * Finally, _PAGE_READ goes in the top bit of PL1 (so we
+ * trigger an access rights trap in user space if the user
+ * tries to read an unreadable page */
+ depd \pte,8,7,\prot
+
+ /* PAGE_USER indicates the page can be read with user privileges,
+ * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
+ * contains _PAGE_READ */
+ extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
+ depdi 7,11,3,\prot
+ /* If we're a gateway page, drop PL2 back to zero for promotion
+ * to kernel privilege (so we can execute the page as kernel).
+ * Any privilege promotion page always denys read and write */
+ extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
+ depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
+
+ /* Enforce uncacheable pages.
+ * This should ONLY be use for MMIO on PA 2.0 machines.
+ * Memory/DMA is cache coherent on all PA2.0 machines we support
+ * (that means T-class is NOT supported) and the memory controllers
+ * on most of those machines only handles cache transactions.
+ */
+ extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
+ depdi 1,12,1,\prot
+
+ /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+ convert_for_tlb_insert20 \pte
+ .endm
+
+ /* Identical macro to make_insert_tlb above, except it
+ * makes the tlb entry for the differently formatted pa11
+ * insertion instructions */
+ .macro make_insert_tlb_11 spc,pte,prot
+ zdep \spc,30,15,\prot
+ dep \pte,8,7,\prot
+ extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
+ depi 1,12,1,\prot
+ extru,= \pte,_PAGE_USER_BIT,1,%r0
+ depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
+ extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
+ depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
+
+ /* Get rid of prot bits and convert to page addr for iitlba */
+
+ depi 0,31,ASM_PFN_PTE_SHIFT,\pte
+ SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
+ .endm
+
+ /* This is for ILP32 PA2.0 only. The TLB insertion needs
+ * to extend into I/O space if the address is 0xfXXXXXXX
+ * so we extend the f's into the top word of the pte in
+ * this case */
+ .macro f_extend pte,tmp
+ extrd,s \pte,42,4,\tmp
+ addi,<> 1,\tmp,%r0
+ extrd,s \pte,63,25,\pte
+ .endm
+
+ /* The alias region is an 8MB aligned 16MB to do clear and
+ * copy user pages at addresses congruent with the user
+ * virtual address.
+ *
+ * To use the alias page, you set %r26 up with the to TLB
+ * entry (identifying the physical page) and %r23 up with
+ * the from tlb entry (or nothing if only a to entry---for
+ * clear_user_page_asm) */
+ .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
+ cmpib,COND(<>),n 0,\spc,\fault
+ ldil L%(TMPALIAS_MAP_START),\tmp
+#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
+ /* on LP64, ldi will sign extend into the upper 32 bits,
+ * which is behaviour we don't want */
+ depdi 0,31,32,\tmp
+#endif
+ copy \va,\tmp1
+ depi 0,31,23,\tmp1
+ cmpb,COND(<>),n \tmp,\tmp1,\fault
+ mfctl %cr19,\tmp /* iir */
+ /* get the opcode (first six bits) into \tmp */
+ extrw,u \tmp,5,6,\tmp
+ /*
+ * Only setting the T bit prevents data cache movein
+ * Setting access rights to zero prevents instruction cache movein
+ *
+ * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
+ * to type field and _PAGE_READ goes to top bit of PL1
+ */
+ ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
+ /*
+ * so if the opcode is one (i.e. this is a memory management
+ * instruction) nullify the next load so \prot is only T.
+ * Otherwise this is a normal data operation
+ */
+ cmpiclr,= 0x01,\tmp,%r0
+ ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
+.ifc \patype,20
+ depd,z \prot,8,7,\prot
+.else
+.ifc \patype,11
+ depw,z \prot,8,7,\prot
+.else
+ .error "undefined PA type to do_alias"
+.endif
+.endif
+ /*
+ * OK, it is in the temp alias region, check whether "from" or "to".
+ * Check "subtle" note in pacache.S re: r23/r26.
+ */
+#ifdef CONFIG_64BIT
+ extrd,u,*= \va,41,1,%r0
+#else
+ extrw,u,= \va,9,1,%r0
+#endif
+ or,COND(tr) %r23,%r0,\pte
+ or %r26,%r0,\pte
+ .endm
+
+
+ /*
+ * Align fault_vector_20 on 4K boundary so that both
+ * fault_vector_11 and fault_vector_20 are on the
+ * same page. This is only necessary as long as we
+ * write protect the kernel text, which we may stop
+ * doing once we use large page translations to cover
+ * the static part of the kernel address space.
+ */
+
+ .text
+
+ .align PAGE_SIZE
+
+ENTRY(fault_vector_20)
+ /* First vector is invalid (0) */
+ .ascii "cows can fly"
+ .byte 0
+ .align 32
+
+ hpmc 1
+ def 2
+ def 3
+ extint 4
+ def 5
+ itlb_20 6
+ def 7
+ def 8
+ def 9
+ def 10
+ def 11
+ def 12
+ def 13
+ def 14
+ dtlb_20 15
+ naitlb_20 16
+ nadtlb_20 17
+ def 18
+ def 19
+ dbit_20 20
+ def 21
+ def 22
+ def 23
+ def 24
+ def 25
+ def 26
+ def 27
+ def 28
+ def 29
+ def 30
+ def 31
+END(fault_vector_20)
+
+#ifndef CONFIG_64BIT
+
+ .align 2048
+
+ENTRY(fault_vector_11)
+ /* First vector is invalid (0) */
+ .ascii "cows can fly"
+ .byte 0
+ .align 32
+
+ hpmc 1
+ def 2
+ def 3
+ extint 4
+ def 5
+ itlb_11 6
+ def 7
+ def 8
+ def 9
+ def 10
+ def 11
+ def 12
+ def 13
+ def 14
+ dtlb_11 15
+ naitlb_11 16
+ nadtlb_11 17
+ def 18
+ def 19
+ dbit_11 20
+ def 21
+ def 22
+ def 23
+ def 24
+ def 25
+ def 26
+ def 27
+ def 28
+ def 29
+ def 30
+ def 31
+END(fault_vector_11)
+
+#endif
+ /* Fault vector is separately protected and *must* be on its own page */
+ .align PAGE_SIZE
+ENTRY(end_fault_vector)
+
+ .import handle_interruption,code
+ .import do_cpu_irq_mask,code
+
+ /*
+ * r26 = function to be called
+ * r25 = argument to pass in
+ * r24 = flags for do_fork()
+ *
+ * Kernel threads don't ever return, so they don't need
+ * a true register context. We just save away the arguments
+ * for copy_thread/ret_ to properly set up the child.
+ */
+
+#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
+#define CLONE_UNTRACED 0x00800000
+
+ .import do_fork
+ENTRY(__kernel_thread)
+ STREG %r2, -RP_OFFSET(%r30)
+
+ copy %r30, %r1
+ ldo PT_SZ_ALGN(%r30),%r30
+#ifdef CONFIG_64BIT
+ /* Yo, function pointers in wide mode are little structs... -PB */
+ ldd 24(%r26), %r2
+ STREG %r2, PT_GR27(%r1) /* Store childs %dp */
+ ldd 16(%r26), %r26
+
+ STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
+ copy %r0, %r22 /* user_tid */
+#endif
+ STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
+ STREG %r25, PT_GR25(%r1)
+ ldil L%CLONE_UNTRACED, %r26
+ ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
+ or %r26, %r24, %r26 /* will have kernel mappings. */
+ ldi 1, %r25 /* stack_start, signals kernel thread */
+ stw %r0, -52(%r30) /* user_tid */
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+ BL do_fork, %r2
+ copy %r1, %r24 /* pt_regs */
+
+ /* Parent Returns here */
+
+ LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
+ ldo -PT_SZ_ALGN(%r30), %r30
+ bv %r0(%r2)
+ nop
+ENDPROC(__kernel_thread)
+
+ /*
+ * Child Returns here
+ *
+ * copy_thread moved args from temp save area set up above
+ * into task save area.
+ */
+
+ENTRY(ret_from_kernel_thread)
+
+ /* Call schedule_tail first though */
+ BL schedule_tail, %r2
+ nop
+
+ LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
+ LDREG TASK_PT_GR25(%r1), %r26
+#ifdef CONFIG_64BIT
+ LDREG TASK_PT_GR27(%r1), %r27
+ LDREG TASK_PT_GR22(%r1), %r22
+#endif
+ LDREG TASK_PT_GR26(%r1), %r1
+ ble 0(%sr7, %r1)
+ copy %r31, %r2
+
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+ loadgp /* Thread could have been in a module */
+#endif
+#ifndef CONFIG_64BIT
+ b sys_exit
+#else
+ load32 sys_exit, %r1
+ bv %r0(%r1)
+#endif
+ ldi 0, %r26
+ENDPROC(ret_from_kernel_thread)
+
+ .import sys_execve, code
+ENTRY(__execve)
+ copy %r2, %r15
+ copy %r30, %r16
+ ldo PT_SZ_ALGN(%r30), %r30
+ STREG %r26, PT_GR26(%r16)
+ STREG %r25, PT_GR25(%r16)
+ STREG %r24, PT_GR24(%r16)
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+ BL sys_execve, %r2
+ copy %r16, %r26
+
+ cmpib,=,n 0,%r28,intr_return /* forward */
+
+ /* yes, this will trap and die. */
+ copy %r15, %r2
+ copy %r16, %r30
+ bv %r0(%r2)
+ nop
+ENDPROC(__execve)
+
+
+ /*
+ * struct task_struct *_switch_to(struct task_struct *prev,
+ * struct task_struct *next)
+ *
+ * switch kernel stacks and return prev */
+ENTRY(_switch_to)
+ STREG %r2, -RP_OFFSET(%r30)
+
+ callee_save_float
+ callee_save
+
+ load32 _switch_to_ret, %r2
+
+ STREG %r2, TASK_PT_KPC(%r26)
+ LDREG TASK_PT_KPC(%r25), %r2
+
+ STREG %r30, TASK_PT_KSP(%r26)
+ LDREG TASK_PT_KSP(%r25), %r30
+ LDREG TASK_THREAD_INFO(%r25), %r25
+ bv %r0(%r2)
+ mtctl %r25,%cr30
+
+_switch_to_ret:
+ mtctl %r0, %cr0 /* Needed for single stepping */
+ callee_rest
+ callee_rest_float
+
+ LDREG -RP_OFFSET(%r30), %r2
+ bv %r0(%r2)
+ copy %r26, %r28
+ENDPROC(_switch_to)
+
+ /*
+ * Common rfi return path for interruptions, kernel execve, and
+ * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
+ * return via this path if the signal was received when the process
+ * was running; if the process was blocked on a syscall then the
+ * normal syscall_exit path is used. All syscalls for traced
+ * proceses exit via intr_restore.
+ *
+ * XXX If any syscalls that change a processes space id ever exit
+ * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
+ * adjust IASQ[0..1].
+ *
+ */
+
+ .align PAGE_SIZE
+
+ENTRY(syscall_exit_rfi)
+ mfctl %cr30,%r16
+ LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
+ ldo TASK_REGS(%r16),%r16
+ /* Force iaoq to userspace, as the user has had access to our current
+ * context via sigcontext. Also Filter the PSW for the same reason.
+ */
+ LDREG PT_IAOQ0(%r16),%r19
+ depi 3,31,2,%r19
+ STREG %r19,PT_IAOQ0(%r16)
+ LDREG PT_IAOQ1(%r16),%r19
+ depi 3,31,2,%r19
+ STREG %r19,PT_IAOQ1(%r16)
+ LDREG PT_PSW(%r16),%r19
+ load32 USER_PSW_MASK,%r1
+#ifdef CONFIG_64BIT
+ load32 USER_PSW_HI_MASK,%r20
+ depd %r20,31,32,%r1
+#endif
+ and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
+ load32 USER_PSW,%r1
+ or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
+ STREG %r19,PT_PSW(%r16)
+
+ /*
+ * If we aren't being traced, we never saved space registers
+ * (we don't store them in the sigcontext), so set them
+ * to "proper" values now (otherwise we'll wind up restoring
+ * whatever was last stored in the task structure, which might
+ * be inconsistent if an interrupt occurred while on the gateway
+ * page). Note that we may be "trashing" values the user put in
+ * them, but we don't support the user changing them.
+ */
+
+ STREG %r0,PT_SR2(%r16)
+ mfsp %sr3,%r19
+ STREG %r19,PT_SR0(%r16)
+ STREG %r19,PT_SR1(%r16)
+ STREG %r19,PT_SR3(%r16)
+ STREG %r19,PT_SR4(%r16)
+ STREG %r19,PT_SR5(%r16)
+ STREG %r19,PT_SR6(%r16)
+ STREG %r19,PT_SR7(%r16)
+
+intr_return:
+ /* NOTE: Need to enable interrupts incase we schedule. */
+ ssm PSW_SM_I, %r0
+
+intr_check_resched:
+
+ /* check for reschedule */
+ mfctl %cr30,%r1
+ LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
+ bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
+
+ .import do_notify_resume,code
+intr_check_sig:
+ /* As above */
+ mfctl %cr30,%r1
+ LDREG TI_FLAGS(%r1),%r19
+ ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20
+ and,COND(<>) %r19, %r20, %r0
+ b,n intr_restore /* skip past if we've nothing to do */
+
+ /* This check is critical to having LWS
+ * working. The IASQ is zero on the gateway
+ * page and we cannot deliver any signals until
+ * we get off the gateway page.
+ *
+ * Only do signals if we are returning to user space
+ */
+ LDREG PT_IASQ0(%r16), %r20
+ cmpib,COND(=),n 0,%r20,intr_restore /* backward */
+ LDREG PT_IASQ1(%r16), %r20
+ cmpib,COND(=),n 0,%r20,intr_restore /* backward */
+
+ copy %r0, %r25 /* long in_syscall = 0 */
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ BL do_notify_resume,%r2
+ copy %r16, %r26 /* struct pt_regs *regs */
+
+ b,n intr_check_sig
+
+intr_restore:
+ copy %r16,%r29
+ ldo PT_FR31(%r29),%r1
+ rest_fp %r1
+ rest_general %r29
+
+ /* inverse of virt_map */
+ pcxt_ssm_bug
+ rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
+ tophys_r1 %r29
+
+ /* Restore space id's and special cr's from PT_REGS
+ * structure pointed to by r29
+ */
+ rest_specials %r29
+
+ /* IMPORTANT: rest_stack restores r29 last (we are using it)!
+ * It also restores r1 and r30.
+ */
+ rest_stack
+
+ rfi
+ nop
+
+#ifndef CONFIG_PREEMPT
+# define intr_do_preempt intr_restore
+#endif /* !CONFIG_PREEMPT */
+
+ .import schedule,code
+intr_do_resched:
+ /* Only call schedule on return to userspace. If we're returning
+ * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
+ * we jump back to intr_restore.
+ */
+ LDREG PT_IASQ0(%r16), %r20
+ cmpib,COND(=) 0, %r20, intr_do_preempt
+ nop
+ LDREG PT_IASQ1(%r16), %r20
+ cmpib,COND(=) 0, %r20, intr_do_preempt
+ nop
+
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ ldil L%intr_check_sig, %r2
+#ifndef CONFIG_64BIT
+ b schedule
+#else
+ load32 schedule, %r20
+ bv %r0(%r20)
+#endif
+ ldo R%intr_check_sig(%r2), %r2
+
+ /* preempt the current task on returning to kernel
+ * mode from an interrupt, iff need_resched is set,
+ * and preempt_count is 0. otherwise, we continue on
+ * our merry way back to the current running task.
+ */
+#ifdef CONFIG_PREEMPT
+ .import preempt_schedule_irq,code
+intr_do_preempt:
+ rsm PSW_SM_I, %r0 /* disable interrupts */
+
+ /* current_thread_info()->preempt_count */
+ mfctl %cr30, %r1
+ LDREG TI_PRE_COUNT(%r1), %r19
+ cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
+ nop /* prev insn branched backwards */
+
+ /* check if we interrupted a critical path */
+ LDREG PT_PSW(%r16), %r20
+ bb,<,n %r20, 31 - PSW_SM_I, intr_restore
+ nop
+
+ BL preempt_schedule_irq, %r2
+ nop
+
+ b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
+#endif /* CONFIG_PREEMPT */
+
+ /*
+ * External interrupts.
+ */
+
+intr_extint:
+ cmpib,COND(=),n 0,%r16,1f
+
+ get_stack_use_cr30
+ b,n 2f
+
+1:
+ get_stack_use_r30
+2:
+ save_specials %r29
+ virt_map
+ save_general %r29
+
+ ldo PT_FR0(%r29), %r24
+ save_fp %r24
+
+ loadgp
+
+ copy %r29, %r26 /* arg0 is pt_regs */
+ copy %r29, %r16 /* save pt_regs */
+
+ ldil L%intr_return, %r2
+
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ b do_cpu_irq_mask
+ ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
+ENDPROC(syscall_exit_rfi)
+
+
+ /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
+
+ENTRY(intr_save) /* for os_hpmc */
+ mfsp %sr7,%r16
+ cmpib,COND(=),n 0,%r16,1f
+ get_stack_use_cr30
+ b 2f
+ copy %r8,%r26
+
+1:
+ get_stack_use_r30
+ copy %r8,%r26
+
+2:
+ save_specials %r29
+
+ /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
+
+ /*
+ * FIXME: 1) Use a #define for the hardwired "6" below (and in
+ * traps.c.
+ * 2) Once we start executing code above 4 Gb, we need
+ * to adjust iasq/iaoq here in the same way we
+ * adjust isr/ior below.
+ */
+
+ cmpib,COND(=),n 6,%r26,skip_save_ior
+
+
+ mfctl %cr20, %r16 /* isr */
+ nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
+ mfctl %cr21, %r17 /* ior */
+
+
+#ifdef CONFIG_64BIT
+ /*
+ * If the interrupted code was running with W bit off (32 bit),
+ * clear the b bits (bits 0 & 1) in the ior.
+ * save_specials left ipsw value in r8 for us to test.
+ */
+ extrd,u,*<> %r8,PSW_W_BIT,1,%r0
+ depdi 0,1,2,%r17
+
+ /*
+ * FIXME: This code has hardwired assumptions about the split
+ * between space bits and offset bits. This will change
+ * when we allow alternate page sizes.
+ */
+
+ /* adjust isr/ior. */
+ extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
+ depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
+ depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
+#endif
+ STREG %r16, PT_ISR(%r29)
+ STREG %r17, PT_IOR(%r29)
+
+
+skip_save_ior:
+ virt_map
+ save_general %r29
+
+ ldo PT_FR0(%r29), %r25
+ save_fp %r25
+
+ loadgp
+
+ copy %r29, %r25 /* arg1 is pt_regs */
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ ldil L%intr_check_sig, %r2
+ copy %r25, %r16 /* save pt_regs */
+
+ b handle_interruption
+ ldo R%intr_check_sig(%r2), %r2
+ENDPROC(intr_save)
+
+
+ /*
+ * Note for all tlb miss handlers:
+ *
+ * cr24 contains a pointer to the kernel address space
+ * page directory.
+ *
+ * cr25 contains a pointer to the current user address
+ * space page directory.
+ *
+ * sr3 will contain the space id of the user address space
+ * of the current running thread while that thread is
+ * running in the kernel.
+ */
+
+ /*
+ * register number allocations. Note that these are all
+ * in the shadowed registers
+ */
+
+ t0 = r1 /* temporary register 0 */
+ va = r8 /* virtual address for which the trap occurred */
+ t1 = r9 /* temporary register 1 */
+ pte = r16 /* pte/phys page # */
+ prot = r17 /* prot bits */
+ spc = r24 /* space for which the trap occurred */
+ ptp = r25 /* page directory/page table pointer */
+
+#ifdef CONFIG_64BIT
+
+dtlb_miss_20w:
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,dtlb_fault
+
+ L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+dtlb_check_alias_20w:
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+nadtlb_miss_20w:
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,nadtlb_fault
+
+ L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+nadtlb_check_alias_20w:
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+#else
+
+dtlb_miss_11:
+ get_pgd spc,ptp
+
+ space_check spc,t0,dtlb_fault
+
+ L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+ mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ idtlba pte,(%sr1,va)
+ idtlbp prot,(%sr1,va)
+
+ mtsp t0, %sr1 /* Restore sr1 */
+
+ rfir
+ nop
+
+dtlb_check_alias_11:
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
+
+ idtlba pte,(va)
+ idtlbp prot,(va)
+
+ rfir
+ nop
+
+nadtlb_miss_11:
+ get_pgd spc,ptp
+
+ space_check spc,t0,nadtlb_fault
+
+ L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+
+ mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ idtlba pte,(%sr1,va)
+ idtlbp prot,(%sr1,va)
+
+ mtsp t0, %sr1 /* Restore sr1 */
+
+ rfir
+ nop
+
+nadtlb_check_alias_11:
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
+
+ idtlba pte,(va)
+ idtlbp prot,(va)
+
+ rfir
+ nop
+
+dtlb_miss_20:
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,dtlb_fault
+
+ L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ f_extend pte,t0
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+dtlb_check_alias_20:
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+nadtlb_miss_20:
+ get_pgd spc,ptp
+
+ space_check spc,t0,nadtlb_fault
+
+ L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ f_extend pte,t0
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+nadtlb_check_alias_20:
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+#endif
+
+nadtlb_emulate:
+
+ /*
+ * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
+ * probei instructions. We don't want to fault for these
+ * instructions (not only does it not make sense, it can cause
+ * deadlocks, since some flushes are done with the mmap
+ * semaphore held). If the translation doesn't exist, we can't
+ * insert a translation, so have to emulate the side effects
+ * of the instruction. Since we don't insert a translation
+ * we can get a lot of faults during a flush loop, so it makes
+ * sense to try to do it here with minimum overhead. We only
+ * emulate fdc,fic,pdc,probew,prober instructions whose base
+ * and index registers are not shadowed. We defer everything
+ * else to the "slow" path.
+ */
+
+ mfctl %cr19,%r9 /* Get iir */
+
+ /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
+ Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
+
+ /* Checks for fdc,fdce,pdc,"fic,4f" only */
+ ldi 0x280,%r16
+ and %r9,%r16,%r17
+ cmpb,<>,n %r16,%r17,nadtlb_probe_check
+ bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
+ BL get_register,%r25
+ extrw,u %r9,15,5,%r8 /* Get index register # */
+ cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
+ copy %r1,%r24
+ BL get_register,%r25
+ extrw,u %r9,10,5,%r8 /* Get base register # */
+ cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
+ BL set_register,%r25
+ add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
+
+nadtlb_nullify:
+ mfctl %ipsw,%r8
+ ldil L%PSW_N,%r9
+ or %r8,%r9,%r8 /* Set PSW_N */
+ mtctl %r8,%ipsw
+
+ rfir
+ nop
+
+ /*
+ When there is no translation for the probe address then we
+ must nullify the insn and return zero in the target regsiter.
+ This will indicate to the calling code that it does not have
+ write/read privileges to this address.
+
+ This should technically work for prober and probew in PA 1.1,
+ and also probe,r and probe,w in PA 2.0
+
+ WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
+ THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
+
+ */
+nadtlb_probe_check:
+ ldi 0x80,%r16
+ and %r9,%r16,%r17
+ cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
+ BL get_register,%r25 /* Find the target register */
+ extrw,u %r9,31,5,%r8 /* Get target register */
+ cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
+ BL set_register,%r25
+ copy %r0,%r1 /* Write zero to target register */
+ b nadtlb_nullify /* Nullify return insn */
+ nop
+
+
+#ifdef CONFIG_64BIT
+itlb_miss_20w:
+
+ /*
+ * I miss is a little different, since we allow users to fault
+ * on the gateway page which is in the kernel address space.
+ */
+
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,itlb_fault
+
+ L3_ptep ptp,pte,t0,va,itlb_fault
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ iitlbt pte,prot
+
+ rfir
+ nop
+
+naitlb_miss_20w:
+
+ /*
+ * I miss is a little different, since we allow users to fault
+ * on the gateway page which is in the kernel address space.
+ */
+
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,naitlb_fault
+
+ L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ iitlbt pte,prot
+
+ rfir
+ nop
+
+naitlb_check_alias_20w:
+ do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
+
+ iitlbt pte,prot
+
+ rfir
+ nop
+
+#else
+
+itlb_miss_11:
+ get_pgd spc,ptp
+
+ space_check spc,t0,itlb_fault
+
+ L2_ptep ptp,pte,t0,va,itlb_fault
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+ mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ iitlba pte,(%sr1,va)
+ iitlbp prot,(%sr1,va)
+
+ mtsp t0, %sr1 /* Restore sr1 */
+
+ rfir
+ nop
+
+naitlb_miss_11:
+ get_pgd spc,ptp
+
+ space_check spc,t0,naitlb_fault
+
+ L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+ mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ iitlba pte,(%sr1,va)
+ iitlbp prot,(%sr1,va)
+
+ mtsp t0, %sr1 /* Restore sr1 */
+
+ rfir
+ nop
+
+naitlb_check_alias_11:
+ do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
+
+ iitlba pte,(%sr0, va)
+ iitlbp prot,(%sr0, va)
+
+ rfir
+ nop
+
+
+itlb_miss_20:
+ get_pgd spc,ptp
+
+ space_check spc,t0,itlb_fault
+
+ L2_ptep ptp,pte,t0,va,itlb_fault
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ f_extend pte,t0
+
+ iitlbt pte,prot
+
+ rfir
+ nop
+
+naitlb_miss_20:
+ get_pgd spc,ptp
+
+ space_check spc,t0,naitlb_fault
+
+ L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ f_extend pte,t0
+
+ iitlbt pte,prot
+
+ rfir
+ nop
+
+naitlb_check_alias_20:
+ do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
+
+ iitlbt pte,prot
+
+ rfir
+ nop
+
+#endif
+
+#ifdef CONFIG_64BIT
+
+dbit_trap_20w:
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,dbit_fault
+
+ L3_ptep ptp,pte,t0,va,dbit_fault
+
+#ifdef CONFIG_SMP
+ cmpib,COND(=),n 0,spc,dbit_nolock_20w
+ load32 PA(pa_dbit_lock),t0
+
+dbit_spin_20w:
+ LDCW 0(t0),t1
+ cmpib,COND(=) 0,t1,dbit_spin_20w
+ nop
+
+dbit_nolock_20w:
+#endif
+ update_dirty ptp,pte,t1
+
+ make_insert_tlb spc,pte,prot
+
+ idtlbt pte,prot
+#ifdef CONFIG_SMP
+ cmpib,COND(=),n 0,spc,dbit_nounlock_20w
+ ldi 1,t1
+ stw t1,0(t0)
+
+dbit_nounlock_20w:
+#endif
+
+ rfir
+ nop
+#else
+
+dbit_trap_11:
+
+ get_pgd spc,ptp
+
+ space_check spc,t0,dbit_fault
+
+ L2_ptep ptp,pte,t0,va,dbit_fault
+
+#ifdef CONFIG_SMP
+ cmpib,COND(=),n 0,spc,dbit_nolock_11
+ load32 PA(pa_dbit_lock),t0
+
+dbit_spin_11:
+ LDCW 0(t0),t1
+ cmpib,= 0,t1,dbit_spin_11
+ nop
+
+dbit_nolock_11:
+#endif
+ update_dirty ptp,pte,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ idtlba pte,(%sr1,va)
+ idtlbp prot,(%sr1,va)
+
+ mtsp t1, %sr1 /* Restore sr1 */
+#ifdef CONFIG_SMP
+ cmpib,COND(=),n 0,spc,dbit_nounlock_11
+ ldi 1,t1
+ stw t1,0(t0)
+
+dbit_nounlock_11:
+#endif
+
+ rfir
+ nop
+
+dbit_trap_20:
+ get_pgd spc,ptp
+
+ space_check spc,t0,dbit_fault
+
+ L2_ptep ptp,pte,t0,va,dbit_fault
+
+#ifdef CONFIG_SMP
+ cmpib,COND(=),n 0,spc,dbit_nolock_20
+ load32 PA(pa_dbit_lock),t0
+
+dbit_spin_20:
+ LDCW 0(t0),t1
+ cmpib,= 0,t1,dbit_spin_20
+ nop
+
+dbit_nolock_20:
+#endif
+ update_dirty ptp,pte,t1
+
+ make_insert_tlb spc,pte,prot
+
+ f_extend pte,t1
+
+ idtlbt pte,prot
+
+#ifdef CONFIG_SMP
+ cmpib,COND(=),n 0,spc,dbit_nounlock_20
+ ldi 1,t1
+ stw t1,0(t0)
+
+dbit_nounlock_20:
+#endif
+
+ rfir
+ nop
+#endif
+
+ .import handle_interruption,code
+
+kernel_bad_space:
+ b intr_save
+ ldi 31,%r8 /* Use an unused code */
+
+dbit_fault:
+ b intr_save
+ ldi 20,%r8
+
+itlb_fault:
+ b intr_save
+ ldi 6,%r8
+
+nadtlb_fault:
+ b intr_save
+ ldi 17,%r8
+
+naitlb_fault:
+ b intr_save
+ ldi 16,%r8
+
+dtlb_fault:
+ b intr_save
+ ldi 15,%r8
+
+ /* Register saving semantics for system calls:
+
+ %r1 clobbered by system call macro in userspace
+ %r2 saved in PT_REGS by gateway page
+ %r3 - %r18 preserved by C code (saved by signal code)
+ %r19 - %r20 saved in PT_REGS by gateway page
+ %r21 - %r22 non-standard syscall args
+ stored in kernel stack by gateway page
+ %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
+ %r27 - %r30 saved in PT_REGS by gateway page
+ %r31 syscall return pointer
+ */
+
+ /* Floating point registers (FIXME: what do we do with these?)
+
+ %fr0 - %fr3 status/exception, not preserved
+ %fr4 - %fr7 arguments
+ %fr8 - %fr11 not preserved by C code
+ %fr12 - %fr21 preserved by C code
+ %fr22 - %fr31 not preserved by C code
+ */
+
+ .macro reg_save regs
+ STREG %r3, PT_GR3(\regs)
+ STREG %r4, PT_GR4(\regs)
+ STREG %r5, PT_GR5(\regs)
+ STREG %r6, PT_GR6(\regs)
+ STREG %r7, PT_GR7(\regs)
+ STREG %r8, PT_GR8(\regs)
+ STREG %r9, PT_GR9(\regs)
+ STREG %r10,PT_GR10(\regs)
+ STREG %r11,PT_GR11(\regs)
+ STREG %r12,PT_GR12(\regs)
+ STREG %r13,PT_GR13(\regs)
+ STREG %r14,PT_GR14(\regs)
+ STREG %r15,PT_GR15(\regs)
+ STREG %r16,PT_GR16(\regs)
+ STREG %r17,PT_GR17(\regs)
+ STREG %r18,PT_GR18(\regs)
+ .endm
+
+ .macro reg_restore regs
+ LDREG PT_GR3(\regs), %r3
+ LDREG PT_GR4(\regs), %r4
+ LDREG PT_GR5(\regs), %r5
+ LDREG PT_GR6(\regs), %r6
+ LDREG PT_GR7(\regs), %r7
+ LDREG PT_GR8(\regs), %r8
+ LDREG PT_GR9(\regs), %r9
+ LDREG PT_GR10(\regs),%r10
+ LDREG PT_GR11(\regs),%r11
+ LDREG PT_GR12(\regs),%r12
+ LDREG PT_GR13(\regs),%r13
+ LDREG PT_GR14(\regs),%r14
+ LDREG PT_GR15(\regs),%r15
+ LDREG PT_GR16(\regs),%r16
+ LDREG PT_GR17(\regs),%r17
+ LDREG PT_GR18(\regs),%r18
+ .endm
+
+ENTRY(sys_fork_wrapper)
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
+ ldo TASK_REGS(%r1),%r1
+ reg_save %r1
+ mfctl %cr27, %r3
+ STREG %r3, PT_CR27(%r1)
+
+ STREG %r2,-RP_OFFSET(%r30)
+ ldo FRAME_SIZE(%r30),%r30
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ /* These are call-clobbered registers and therefore
+ also syscall-clobbered (we hope). */
+ STREG %r2,PT_GR19(%r1) /* save for child */
+ STREG %r30,PT_GR21(%r1)
+
+ LDREG PT_GR30(%r1),%r25
+ copy %r1,%r24
+ BL sys_clone,%r2
+ ldi SIGCHLD,%r26
+
+ LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
+wrapper_exit:
+ ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
+
+ LDREG PT_CR27(%r1), %r3
+ mtctl %r3, %cr27
+ reg_restore %r1
+
+ /* strace expects syscall # to be preserved in r20 */
+ ldi __NR_fork,%r20
+ bv %r0(%r2)
+ STREG %r20,PT_GR20(%r1)
+ENDPROC(sys_fork_wrapper)
+
+ /* Set the return value for the child */
+ENTRY(child_return)
+ BL schedule_tail, %r2
+ nop
+
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
+ LDREG TASK_PT_GR19(%r1),%r2
+ b wrapper_exit
+ copy %r0,%r28
+ENDPROC(child_return)
+
+
+ENTRY(sys_clone_wrapper)
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
+ reg_save %r1
+ mfctl %cr27, %r3
+ STREG %r3, PT_CR27(%r1)
+
+ STREG %r2,-RP_OFFSET(%r30)
+ ldo FRAME_SIZE(%r30),%r30
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ /* WARNING - Clobbers r19 and r21, userspace must save these! */
+ STREG %r2,PT_GR19(%r1) /* save for child */
+ STREG %r30,PT_GR21(%r1)
+ BL sys_clone,%r2
+ copy %r1,%r24
+
+ b wrapper_exit
+ LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
+ENDPROC(sys_clone_wrapper)
+
+
+ENTRY(sys_vfork_wrapper)
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
+ reg_save %r1
+ mfctl %cr27, %r3
+ STREG %r3, PT_CR27(%r1)
+
+ STREG %r2,-RP_OFFSET(%r30)
+ ldo FRAME_SIZE(%r30),%r30
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ STREG %r2,PT_GR19(%r1) /* save for child */
+ STREG %r30,PT_GR21(%r1)
+
+ BL sys_vfork,%r2
+ copy %r1,%r26
+
+ b wrapper_exit
+ LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
+ENDPROC(sys_vfork_wrapper)
+
+
+ .macro execve_wrapper execve
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
+
+ /*
+ * Do we need to save/restore r3-r18 here?
+ * I don't think so. why would new thread need old
+ * threads registers?
+ */
+
+ /* %arg0 - %arg3 are already saved for us. */
+
+ STREG %r2,-RP_OFFSET(%r30)
+ ldo FRAME_SIZE(%r30),%r30
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+ BL \execve,%r2
+ copy %r1,%arg0
+
+ ldo -FRAME_SIZE(%r30),%r30
+ LDREG -RP_OFFSET(%r30),%r2
+
+ /* If exec succeeded we need to load the args */
+
+ ldo -1024(%r0),%r1
+ cmpb,>>= %r28,%r1,error_\execve
+ copy %r2,%r19
+
+error_\execve:
+ bv %r0(%r19)
+ nop
+ .endm
+
+ .import sys_execve
+ENTRY(sys_execve_wrapper)
+ execve_wrapper sys_execve
+ENDPROC(sys_execve_wrapper)
+
+#ifdef CONFIG_64BIT
+ .import sys32_execve
+ENTRY(sys32_execve_wrapper)
+ execve_wrapper sys32_execve
+ENDPROC(sys32_execve_wrapper)
+#endif
+
+ENTRY(sys_rt_sigreturn_wrapper)
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
+ ldo TASK_REGS(%r26),%r26 /* get pt regs */
+ /* Don't save regs, we are going to restore them from sigcontext. */
+ STREG %r2, -RP_OFFSET(%r30)
+#ifdef CONFIG_64BIT
+ ldo FRAME_SIZE(%r30), %r30
+ BL sys_rt_sigreturn,%r2
+ ldo -16(%r30),%r29 /* Reference param save area */
+#else
+ BL sys_rt_sigreturn,%r2
+ ldo FRAME_SIZE(%r30), %r30
+#endif
+
+ ldo -FRAME_SIZE(%r30), %r30
+ LDREG -RP_OFFSET(%r30), %r2
+
+ /* FIXME: I think we need to restore a few more things here. */
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
+ reg_restore %r1
+
+ /* If the signal was received while the process was blocked on a
+ * syscall, then r2 will take us to syscall_exit; otherwise r2 will
+ * take us to syscall_exit_rfi and on to intr_return.
+ */
+ bv %r0(%r2)
+ LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
+ENDPROC(sys_rt_sigreturn_wrapper)
+
+ENTRY(sys_sigaltstack_wrapper)
+ /* Get the user stack pointer */
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r24 /* get pt regs */
+ LDREG TASK_PT_GR30(%r24),%r24
+ STREG %r2, -RP_OFFSET(%r30)
+#ifdef CONFIG_64BIT
+ ldo FRAME_SIZE(%r30), %r30
+ BL do_sigaltstack,%r2
+ ldo -16(%r30),%r29 /* Reference param save area */
+#else
+ BL do_sigaltstack,%r2
+ ldo FRAME_SIZE(%r30), %r30
+#endif
+
+ ldo -FRAME_SIZE(%r30), %r30
+ LDREG -RP_OFFSET(%r30), %r2
+ bv %r0(%r2)
+ nop
+ENDPROC(sys_sigaltstack_wrapper)
+
+#ifdef CONFIG_64BIT
+ENTRY(sys32_sigaltstack_wrapper)
+ /* Get the user stack pointer */
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
+ LDREG TASK_PT_GR30(%r24),%r24
+ STREG %r2, -RP_OFFSET(%r30)
+ ldo FRAME_SIZE(%r30), %r30
+ BL do_sigaltstack32,%r2
+ ldo -16(%r30),%r29 /* Reference param save area */
+
+ ldo -FRAME_SIZE(%r30), %r30
+ LDREG -RP_OFFSET(%r30), %r2
+ bv %r0(%r2)
+ nop
+ENDPROC(sys32_sigaltstack_wrapper)
+#endif
+
+ENTRY(syscall_exit)
+ /* NOTE: HP-UX syscalls also come through here
+ * after hpux_syscall_exit fixes up return
+ * values. */
+
+ /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
+ * via syscall_exit_rfi if the signal was received while the process
+ * was running.
+ */
+
+ /* save return value now */
+
+ mfctl %cr30, %r1
+ LDREG TI_TASK(%r1),%r1
+ STREG %r28,TASK_PT_GR28(%r1)
+
+#ifdef CONFIG_HPUX
+/* <linux/personality.h> cannot be easily included */
+#define PER_HPUX 0x10
+ ldw TASK_PERSONALITY(%r1),%r19
+
+ /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
+ ldo -PER_HPUX(%r19), %r19
+ cmpib,COND(<>),n 0,%r19,1f
+
+ /* Save other hpux returns if personality is PER_HPUX */
+ STREG %r22,TASK_PT_GR22(%r1)
+ STREG %r29,TASK_PT_GR29(%r1)
+1:
+
+#endif /* CONFIG_HPUX */
+
+ /* Seems to me that dp could be wrong here, if the syscall involved
+ * calling a module, and nothing got round to restoring dp on return.
+ */
+ loadgp
+
+syscall_check_resched:
+
+ /* check for reschedule */
+
+ LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
+ bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
+
+ .import do_signal,code
+syscall_check_sig:
+ LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
+ ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
+ and,COND(<>) %r19, %r26, %r0
+ b,n syscall_restore /* skip past if we've nothing to do */
+
+syscall_do_signal:
+ /* Save callee-save registers (for sigcontext).
+ * FIXME: After this point the process structure should be
+ * consistent with all the relevant state of the process
+ * before the syscall. We need to verify this.
+ */
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
+ reg_save %r26
+
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ BL do_notify_resume,%r2
+ ldi 1, %r25 /* long in_syscall = 1 */
+
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
+ reg_restore %r20
+
+ b,n syscall_check_sig
+
+syscall_restore:
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+
+ /* Are we being ptraced? */
+ ldw TASK_FLAGS(%r1),%r19
+ ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2
+ and,COND(=) %r19,%r2,%r0
+ b,n syscall_restore_rfi
+
+ ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
+ rest_fp %r19
+
+ LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
+ mtsar %r19
+
+ LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
+ LDREG TASK_PT_GR19(%r1),%r19
+ LDREG TASK_PT_GR20(%r1),%r20
+ LDREG TASK_PT_GR21(%r1),%r21
+ LDREG TASK_PT_GR22(%r1),%r22
+ LDREG TASK_PT_GR23(%r1),%r23
+ LDREG TASK_PT_GR24(%r1),%r24
+ LDREG TASK_PT_GR25(%r1),%r25
+ LDREG TASK_PT_GR26(%r1),%r26
+ LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
+ LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
+ LDREG TASK_PT_GR29(%r1),%r29
+ LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
+
+ /* NOTE: We use rsm/ssm pair to make this operation atomic */
+ LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
+ rsm PSW_SM_I, %r0
+ copy %r1,%r30 /* Restore user sp */
+ mfsp %sr3,%r1 /* Get user space id */
+ mtsp %r1,%sr7 /* Restore sr7 */
+ ssm PSW_SM_I, %r0
+
+ /* Set sr2 to zero for userspace syscalls to work. */
+ mtsp %r0,%sr2
+ mtsp %r1,%sr4 /* Restore sr4 */
+ mtsp %r1,%sr5 /* Restore sr5 */
+ mtsp %r1,%sr6 /* Restore sr6 */
+
+ depi 3,31,2,%r31 /* ensure return to user mode. */
+
+#ifdef CONFIG_64BIT
+ /* decide whether to reset the wide mode bit
+ *
+ * For a syscall, the W bit is stored in the lowest bit
+ * of sp. Extract it and reset W if it is zero */
+ extrd,u,*<> %r30,63,1,%r1
+ rsm PSW_SM_W, %r0
+ /* now reset the lowest bit of sp if it was set */
+ xor %r30,%r1,%r30
+#endif
+ be,n 0(%sr3,%r31) /* return to user space */
+
+ /* We have to return via an RFI, so that PSW T and R bits can be set
+ * appropriately.
+ * This sets up pt_regs so we can return via intr_restore, which is not
+ * the most efficient way of doing things, but it works.
+ */
+syscall_restore_rfi:
+ ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
+ mtctl %r2,%cr0 /* for immediate trap */
+ LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
+ ldi 0x0b,%r20 /* Create new PSW */
+ depi -1,13,1,%r20 /* C, Q, D, and I bits */
+
+ /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
+ * set in thread_info.h and converted to PA bitmap
+ * numbers in asm-offsets.c */
+
+ /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
+ extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
+ depi -1,27,1,%r20 /* R bit */
+
+ /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
+ extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
+ depi -1,7,1,%r20 /* T bit */
+
+ STREG %r20,TASK_PT_PSW(%r1)
+
+ /* Always store space registers, since sr3 can be changed (e.g. fork) */
+
+ mfsp %sr3,%r25
+ STREG %r25,TASK_PT_SR3(%r1)
+ STREG %r25,TASK_PT_SR4(%r1)
+ STREG %r25,TASK_PT_SR5(%r1)
+ STREG %r25,TASK_PT_SR6(%r1)
+ STREG %r25,TASK_PT_SR7(%r1)
+ STREG %r25,TASK_PT_IASQ0(%r1)
+ STREG %r25,TASK_PT_IASQ1(%r1)
+
+ /* XXX W bit??? */
+ /* Now if old D bit is clear, it means we didn't save all registers
+ * on syscall entry, so do that now. This only happens on TRACEME
+ * calls, or if someone attached to us while we were on a syscall.
+ * We could make this more efficient by not saving r3-r18, but
+ * then we wouldn't be able to use the common intr_restore path.
+ * It is only for traced processes anyway, so performance is not
+ * an issue.
+ */
+ bb,< %r2,30,pt_regs_ok /* Branch if D set */
+ ldo TASK_REGS(%r1),%r25
+ reg_save %r25 /* Save r3 to r18 */
+
+ /* Save the current sr */
+ mfsp %sr0,%r2
+ STREG %r2,TASK_PT_SR0(%r1)
+
+ /* Save the scratch sr */
+ mfsp %sr1,%r2
+ STREG %r2,TASK_PT_SR1(%r1)
+
+ /* sr2 should be set to zero for userspace syscalls */
+ STREG %r0,TASK_PT_SR2(%r1)
+
+pt_regs_ok:
+ LDREG TASK_PT_GR31(%r1),%r2
+ depi 3,31,2,%r2 /* ensure return to user mode. */
+ STREG %r2,TASK_PT_IAOQ0(%r1)
+ ldo 4(%r2),%r2
+ STREG %r2,TASK_PT_IAOQ1(%r1)
+ copy %r25,%r16
+ b intr_restore
+ nop
+
+ .import schedule,code
+syscall_do_resched:
+ BL schedule,%r2
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#else
+ nop
+#endif
+ b syscall_check_resched /* if resched, we start over again */
+ nop
+ENDPROC(syscall_exit)
+
+
+#ifdef CONFIG_FUNCTION_TRACER
+ .import ftrace_function_trampoline,code
+ENTRY(_mcount)
+ copy %r3, %arg2
+ b ftrace_function_trampoline
+ nop
+ENDPROC(_mcount)
+
+ENTRY(return_to_handler)
+ load32 return_trampoline, %rp
+ copy %ret0, %arg0
+ copy %ret1, %arg1
+ b ftrace_return_to_handler
+ nop
+return_trampoline:
+ copy %ret0, %rp
+ copy %r23, %ret0
+ copy %r24, %ret1
+
+.globl ftrace_stub
+ftrace_stub:
+ bv %r0(%rp)
+ nop
+ENDPROC(return_to_handler)
+#endif /* CONFIG_FUNCTION_TRACER */
+
+
+get_register:
+ /*
+ * get_register is used by the non access tlb miss handlers to
+ * copy the value of the general register specified in r8 into
+ * r1. This routine can't be used for shadowed registers, since
+ * the rfir will restore the original value. So, for the shadowed
+ * registers we put a -1 into r1 to indicate that the register
+ * should not be used (the register being copied could also have
+ * a -1 in it, but that is OK, it just means that we will have
+ * to use the slow path instead).
+ */
+ blr %r8,%r0
+ nop
+ bv %r0(%r25) /* r0 */
+ copy %r0,%r1
+ bv %r0(%r25) /* r1 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r2 */
+ copy %r2,%r1
+ bv %r0(%r25) /* r3 */
+ copy %r3,%r1
+ bv %r0(%r25) /* r4 */
+ copy %r4,%r1
+ bv %r0(%r25) /* r5 */
+ copy %r5,%r1
+ bv %r0(%r25) /* r6 */
+ copy %r6,%r1
+ bv %r0(%r25) /* r7 */
+ copy %r7,%r1
+ bv %r0(%r25) /* r8 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r9 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r10 */
+ copy %r10,%r1
+ bv %r0(%r25) /* r11 */
+ copy %r11,%r1
+ bv %r0(%r25) /* r12 */
+ copy %r12,%r1
+ bv %r0(%r25) /* r13 */
+ copy %r13,%r1
+ bv %r0(%r25) /* r14 */
+ copy %r14,%r1
+ bv %r0(%r25) /* r15 */
+ copy %r15,%r1
+ bv %r0(%r25) /* r16 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r17 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r18 */
+ copy %r18,%r1
+ bv %r0(%r25) /* r19 */
+ copy %r19,%r1
+ bv %r0(%r25) /* r20 */
+ copy %r20,%r1
+ bv %r0(%r25) /* r21 */
+ copy %r21,%r1
+ bv %r0(%r25) /* r22 */
+ copy %r22,%r1
+ bv %r0(%r25) /* r23 */
+ copy %r23,%r1
+ bv %r0(%r25) /* r24 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r25 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r26 */
+ copy %r26,%r1
+ bv %r0(%r25) /* r27 */
+ copy %r27,%r1
+ bv %r0(%r25) /* r28 */
+ copy %r28,%r1
+ bv %r0(%r25) /* r29 */
+ copy %r29,%r1
+ bv %r0(%r25) /* r30 */
+ copy %r30,%r1
+ bv %r0(%r25) /* r31 */
+ copy %r31,%r1
+
+
+set_register:
+ /*
+ * set_register is used by the non access tlb miss handlers to
+ * copy the value of r1 into the general register specified in
+ * r8.
+ */
+ blr %r8,%r0
+ nop
+ bv %r0(%r25) /* r0 (silly, but it is a place holder) */
+ copy %r1,%r0
+ bv %r0(%r25) /* r1 */
+ copy %r1,%r1
+ bv %r0(%r25) /* r2 */
+ copy %r1,%r2
+ bv %r0(%r25) /* r3 */
+ copy %r1,%r3
+ bv %r0(%r25) /* r4 */
+ copy %r1,%r4
+ bv %r0(%r25) /* r5 */
+ copy %r1,%r5
+ bv %r0(%r25) /* r6 */
+ copy %r1,%r6
+ bv %r0(%r25) /* r7 */
+ copy %r1,%r7
+ bv %r0(%r25) /* r8 */
+ copy %r1,%r8
+ bv %r0(%r25) /* r9 */
+ copy %r1,%r9
+ bv %r0(%r25) /* r10 */
+ copy %r1,%r10
+ bv %r0(%r25) /* r11 */
+ copy %r1,%r11
+ bv %r0(%r25) /* r12 */
+ copy %r1,%r12
+ bv %r0(%r25) /* r13 */
+ copy %r1,%r13
+ bv %r0(%r25) /* r14 */
+ copy %r1,%r14
+ bv %r0(%r25) /* r15 */
+ copy %r1,%r15
+ bv %r0(%r25) /* r16 */
+ copy %r1,%r16
+ bv %r0(%r25) /* r17 */
+ copy %r1,%r17
+ bv %r0(%r25) /* r18 */
+ copy %r1,%r18
+ bv %r0(%r25) /* r19 */
+ copy %r1,%r19
+ bv %r0(%r25) /* r20 */
+ copy %r1,%r20
+ bv %r0(%r25) /* r21 */
+ copy %r1,%r21
+ bv %r0(%r25) /* r22 */
+ copy %r1,%r22
+ bv %r0(%r25) /* r23 */
+ copy %r1,%r23
+ bv %r0(%r25) /* r24 */
+ copy %r1,%r24
+ bv %r0(%r25) /* r25 */
+ copy %r1,%r25
+ bv %r0(%r25) /* r26 */
+ copy %r1,%r26
+ bv %r0(%r25) /* r27 */
+ copy %r1,%r27
+ bv %r0(%r25) /* r28 */
+ copy %r1,%r28
+ bv %r0(%r25) /* r29 */
+ copy %r1,%r29
+ bv %r0(%r25) /* r30 */
+ copy %r1,%r30
+ bv %r0(%r25) /* r31 */
+ copy %r1,%r31
+
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
new file mode 100644
index 00000000..4896ed09
--- /dev/null
+++ b/arch/parisc/kernel/firmware.c
@@ -0,0 +1,1495 @@
+/*
+ * arch/parisc/kernel/firmware.c - safe PDC access routines
+ *
+ * PDC == Processor Dependent Code
+ *
+ * See http://www.parisc-linux.org/documentation/index.html
+ * for documentation describing the entry points and calling
+ * conventions defined below.
+ *
+ * Copyright 1999 SuSE GmbH Nuernberg (Philipp Rumpf, prumpf@tux.org)
+ * Copyright 1999 The Puffin Group, (Alex deVries, David Kennedy)
+ * Copyright 2003 Grant Grundler <grundler parisc-linux org>
+ * Copyright 2003,2004 Ryan Bradetich <rbrad@parisc-linux.org>
+ * Copyright 2004,2006 Thibaut VARENE <varenet@parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+/* I think it would be in everyone's best interest to follow this
+ * guidelines when writing PDC wrappers:
+ *
+ * - the name of the pdc wrapper should match one of the macros
+ * used for the first two arguments
+ * - don't use caps for random parts of the name
+ * - use the static PDC result buffers and "copyout" to structs
+ * supplied by the caller to encapsulate alignment restrictions
+ * - hold pdc_lock while in PDC or using static result buffers
+ * - use __pa() to convert virtual (kernel) pointers to physical
+ * ones.
+ * - the name of the struct used for pdc return values should equal
+ * one of the macros used for the first two arguments to the
+ * corresponding PDC call
+ * - keep the order of arguments
+ * - don't be smart (setting trailing NUL bytes for strings, return
+ * something useful even if the call failed) unless you are sure
+ * it's not going to affect functionality or performance
+ *
+ * Example:
+ * int pdc_cache_info(struct pdc_cache_info *cache_info )
+ * {
+ * int retval;
+ *
+ * spin_lock_irq(&pdc_lock);
+ * retval = mem_pdc_call(PDC_CACHE,PDC_CACHE_INFO,__pa(cache_info),0);
+ * convert_to_wide(pdc_result);
+ * memcpy(cache_info, pdc_result, sizeof(*cache_info));
+ * spin_unlock_irq(&pdc_lock);
+ *
+ * return retval;
+ * }
+ * prumpf 991016
+ */
+
+#include <stdarg.h>
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+
+#include <asm/page.h>
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+#include <asm/system.h>
+#include <asm/processor.h> /* for boot_cpu_data */
+
+static DEFINE_SPINLOCK(pdc_lock);
+extern unsigned long pdc_result[NUM_PDC_RESULT];
+extern unsigned long pdc_result2[NUM_PDC_RESULT];
+
+#ifdef CONFIG_64BIT
+#define WIDE_FIRMWARE 0x1
+#define NARROW_FIRMWARE 0x2
+
+/* Firmware needs to be initially set to narrow to determine the
+ * actual firmware width. */
+int parisc_narrow_firmware __read_mostly = 1;
+#endif
+
+/* On most currently-supported platforms, IODC I/O calls are 32-bit calls
+ * and MEM_PDC calls are always the same width as the OS.
+ * Some PAT boxes may have 64-bit IODC I/O.
+ *
+ * Ryan Bradetich added the now obsolete CONFIG_PDC_NARROW to allow
+ * 64-bit kernels to run on systems with 32-bit MEM_PDC calls.
+ * This allowed wide kernels to run on Cxxx boxes.
+ * We now detect 32-bit-only PDC and dynamically switch to 32-bit mode
+ * when running a 64-bit kernel on such boxes (e.g. C200 or C360).
+ */
+
+#ifdef CONFIG_64BIT
+long real64_call(unsigned long function, ...);
+#endif
+long real32_call(unsigned long function, ...);
+
+#ifdef CONFIG_64BIT
+# define MEM_PDC (unsigned long)(PAGE0->mem_pdc_hi) << 32 | PAGE0->mem_pdc
+# define mem_pdc_call(args...) unlikely(parisc_narrow_firmware) ? real32_call(MEM_PDC, args) : real64_call(MEM_PDC, args)
+#else
+# define MEM_PDC (unsigned long)PAGE0->mem_pdc
+# define mem_pdc_call(args...) real32_call(MEM_PDC, args)
+#endif
+
+
+/**
+ * f_extend - Convert PDC addresses to kernel addresses.
+ * @address: Address returned from PDC.
+ *
+ * This function is used to convert PDC addresses into kernel addresses
+ * when the PDC address size and kernel address size are different.
+ */
+static unsigned long f_extend(unsigned long address)
+{
+#ifdef CONFIG_64BIT
+ if(unlikely(parisc_narrow_firmware)) {
+ if((address & 0xff000000) == 0xf0000000)
+ return 0xf0f0f0f000000000UL | (u32)address;
+
+ if((address & 0xf0000000) == 0xf0000000)
+ return 0xffffffff00000000UL | (u32)address;
+ }
+#endif
+ return address;
+}
+
+/**
+ * convert_to_wide - Convert the return buffer addresses into kernel addresses.
+ * @address: The return buffer from PDC.
+ *
+ * This function is used to convert the return buffer addresses retrieved from PDC
+ * into kernel addresses when the PDC address size and kernel address size are
+ * different.
+ */
+static void convert_to_wide(unsigned long *addr)
+{
+#ifdef CONFIG_64BIT
+ int i;
+ unsigned int *p = (unsigned int *)addr;
+
+ if(unlikely(parisc_narrow_firmware)) {
+ for(i = 31; i >= 0; --i)
+ addr[i] = p[i];
+ }
+#endif
+}
+
+#ifdef CONFIG_64BIT
+void __cpuinit set_firmware_width_unlocked(void)
+{
+ int ret;
+
+ ret = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES,
+ __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ if (pdc_result[0] != NARROW_FIRMWARE)
+ parisc_narrow_firmware = 0;
+}
+
+/**
+ * set_firmware_width - Determine if the firmware is wide or narrow.
+ *
+ * This function must be called before any pdc_* function that uses the
+ * convert_to_wide function.
+ */
+void __cpuinit set_firmware_width(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&pdc_lock, flags);
+ set_firmware_width_unlocked();
+ spin_unlock_irqrestore(&pdc_lock, flags);
+}
+#else
+void __cpuinit set_firmware_width_unlocked(void) {
+ return;
+}
+
+void __cpuinit set_firmware_width(void) {
+ return;
+}
+#endif /*CONFIG_64BIT*/
+
+/**
+ * pdc_emergency_unlock - Unlock the linux pdc lock
+ *
+ * This call unlocks the linux pdc lock in case we need some PDC functions
+ * (like pdc_add_valid) during kernel stack dump.
+ */
+void pdc_emergency_unlock(void)
+{
+ /* Spinlock DEBUG code freaks out if we unconditionally unlock */
+ if (spin_is_locked(&pdc_lock))
+ spin_unlock(&pdc_lock);
+}
+
+
+/**
+ * pdc_add_valid - Verify address can be accessed without causing a HPMC.
+ * @address: Address to be verified.
+ *
+ * This PDC call attempts to read from the specified address and verifies
+ * if the address is valid.
+ *
+ * The return value is PDC_OK (0) in case accessing this address is valid.
+ */
+int pdc_add_valid(unsigned long address)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_ADD_VALID, PDC_ADD_VALID_VERIFY, address);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_add_valid);
+
+/**
+ * pdc_chassis_info - Return chassis information.
+ * @result: The return buffer.
+ * @chassis_info: The memory buffer address.
+ * @len: The size of the memory buffer address.
+ *
+ * An HVERSION dependent call for returning the chassis information.
+ */
+int __init pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ memcpy(&pdc_result, chassis_info, sizeof(*chassis_info));
+ memcpy(&pdc_result2, led_info, len);
+ retval = mem_pdc_call(PDC_CHASSIS, PDC_RETURN_CHASSIS_INFO,
+ __pa(pdc_result), __pa(pdc_result2), len);
+ memcpy(chassis_info, pdc_result, sizeof(*chassis_info));
+ memcpy(led_info, pdc_result2, len);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_chassis_send_log - Sends a PDC PAT CHASSIS log message.
+ * @retval: -1 on error, 0 on success. Other value are PDC errors
+ *
+ * Must be correctly formatted or expect system crash
+ */
+#ifdef CONFIG_64BIT
+int pdc_pat_chassis_send_log(unsigned long state, unsigned long data)
+{
+ int retval = 0;
+ unsigned long flags;
+
+ if (!is_pdc_pat())
+ return -1;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_CHASSIS_LOG, PDC_PAT_CHASSIS_WRITE_LOG, __pa(&state), __pa(&data));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+#endif
+
+/**
+ * pdc_chassis_disp - Updates chassis code
+ * @retval: -1 on error, 0 on success
+ */
+int pdc_chassis_disp(unsigned long disp)
+{
+ int retval = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_CHASSIS, PDC_CHASSIS_DISP, disp);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_chassis_warn - Fetches chassis warnings
+ * @retval: -1 on error, 0 on success
+ */
+int pdc_chassis_warn(unsigned long *warn)
+{
+ int retval = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_CHASSIS, PDC_CHASSIS_WARN, __pa(pdc_result));
+ *warn = pdc_result[0];
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+int __cpuinit pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info)
+{
+ int ret;
+
+ ret = mem_pdc_call(PDC_COPROC, PDC_COPROC_CFG, __pa(pdc_result));
+ convert_to_wide(pdc_result);
+ pdc_coproc_info->ccr_functional = pdc_result[0];
+ pdc_coproc_info->ccr_present = pdc_result[1];
+ pdc_coproc_info->revision = pdc_result[17];
+ pdc_coproc_info->model = pdc_result[18];
+
+ return ret;
+}
+
+/**
+ * pdc_coproc_cfg - To identify coprocessors attached to the processor.
+ * @pdc_coproc_info: Return buffer address.
+ *
+ * This PDC call returns the presence and status of all the coprocessors
+ * attached to the processor.
+ */
+int __cpuinit pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ ret = pdc_coproc_cfg_unlocked(pdc_coproc_info);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return ret;
+}
+
+/**
+ * pdc_iodc_read - Read data from the modules IODC.
+ * @actcnt: The actual number of bytes.
+ * @hpa: The HPA of the module for the iodc read.
+ * @index: The iodc entry point.
+ * @iodc_data: A buffer memory for the iodc options.
+ * @iodc_data_size: Size of the memory buffer.
+ *
+ * This PDC call reads from the IODC of the module specified by the hpa
+ * argument.
+ */
+int pdc_iodc_read(unsigned long *actcnt, unsigned long hpa, unsigned int index,
+ void *iodc_data, unsigned int iodc_data_size)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_IODC, PDC_IODC_READ, __pa(pdc_result), hpa,
+ index, __pa(pdc_result2), iodc_data_size);
+ convert_to_wide(pdc_result);
+ *actcnt = pdc_result[0];
+ memcpy(iodc_data, pdc_result2, iodc_data_size);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_iodc_read);
+
+/**
+ * pdc_system_map_find_mods - Locate unarchitected modules.
+ * @pdc_mod_info: Return buffer address.
+ * @mod_path: pointer to dev path structure.
+ * @mod_index: fixed address module index.
+ *
+ * To locate and identify modules which reside at fixed I/O addresses, which
+ * do not self-identify via architected bus walks.
+ */
+int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info,
+ struct pdc_module_path *mod_path, long mod_index)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_MODULE, __pa(pdc_result),
+ __pa(pdc_result2), mod_index);
+ convert_to_wide(pdc_result);
+ memcpy(pdc_mod_info, pdc_result, sizeof(*pdc_mod_info));
+ memcpy(mod_path, pdc_result2, sizeof(*mod_path));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ pdc_mod_info->mod_addr = f_extend(pdc_mod_info->mod_addr);
+ return retval;
+}
+
+/**
+ * pdc_system_map_find_addrs - Retrieve additional address ranges.
+ * @pdc_addr_info: Return buffer address.
+ * @mod_index: Fixed address module index.
+ * @addr_index: Address range index.
+ *
+ * Retrieve additional information about subsequent address ranges for modules
+ * with multiple address ranges.
+ */
+int pdc_system_map_find_addrs(struct pdc_system_map_addr_info *pdc_addr_info,
+ long mod_index, long addr_index)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_ADDRESS, __pa(pdc_result),
+ mod_index, addr_index);
+ convert_to_wide(pdc_result);
+ memcpy(pdc_addr_info, pdc_result, sizeof(*pdc_addr_info));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ pdc_addr_info->mod_addr = f_extend(pdc_addr_info->mod_addr);
+ return retval;
+}
+
+/**
+ * pdc_model_info - Return model information about the processor.
+ * @model: The return buffer.
+ *
+ * Returns the version numbers, identifiers, and capabilities from the processor module.
+ */
+int pdc_model_info(struct pdc_model *model)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_INFO, __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ memcpy(model, pdc_result, sizeof(*model));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_model_sysmodel - Get the system model name.
+ * @name: A char array of at least 81 characters.
+ *
+ * Get system model name from PDC ROM (e.g. 9000/715 or 9000/778/B160L).
+ * Using OS_ID_HPUX will return the equivalent of the 'modelname' command
+ * on HP/UX.
+ */
+int pdc_model_sysmodel(char *name)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_SYSMODEL, __pa(pdc_result),
+ OS_ID_HPUX, __pa(name));
+ convert_to_wide(pdc_result);
+
+ if (retval == PDC_OK) {
+ name[pdc_result[0]] = '\0'; /* add trailing '\0' */
+ } else {
+ name[0] = 0;
+ }
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_model_versions - Identify the version number of each processor.
+ * @cpu_id: The return buffer.
+ * @id: The id of the processor to check.
+ *
+ * Returns the version number for each processor component.
+ *
+ * This comment was here before, but I do not know what it means :( -RB
+ * id: 0 = cpu revision, 1 = boot-rom-version
+ */
+int pdc_model_versions(unsigned long *versions, int id)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_VERSIONS, __pa(pdc_result), id);
+ convert_to_wide(pdc_result);
+ *versions = pdc_result[0];
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_model_cpuid - Returns the CPU_ID.
+ * @cpu_id: The return buffer.
+ *
+ * Returns the CPU_ID value which uniquely identifies the cpu portion of
+ * the processor module.
+ */
+int pdc_model_cpuid(unsigned long *cpu_id)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ pdc_result[0] = 0; /* preset zero (call may not be implemented!) */
+ retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CPU_ID, __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ *cpu_id = pdc_result[0];
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_model_capabilities - Returns the platform capabilities.
+ * @capabilities: The return buffer.
+ *
+ * Returns information about platform support for 32- and/or 64-bit
+ * OSes, IO-PDIR coherency, and virtual aliasing.
+ */
+int pdc_model_capabilities(unsigned long *capabilities)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ pdc_result[0] = 0; /* preset zero (call may not be implemented!) */
+ retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ if (retval == PDC_OK) {
+ *capabilities = pdc_result[0];
+ } else {
+ *capabilities = PDC_MODEL_OS32;
+ }
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_cache_info - Return cache and TLB information.
+ * @cache_info: The return buffer.
+ *
+ * Returns information about the processor's cache and TLB.
+ */
+int pdc_cache_info(struct pdc_cache_info *cache_info)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_CACHE, PDC_CACHE_INFO, __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ memcpy(cache_info, pdc_result, sizeof(*cache_info));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_spaceid_bits - Return whether Space ID hashing is turned on.
+ * @space_bits: Should be 0, if not, bad mojo!
+ *
+ * Returns information about Space ID hashing.
+ */
+int pdc_spaceid_bits(unsigned long *space_bits)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ pdc_result[0] = 0;
+ retval = mem_pdc_call(PDC_CACHE, PDC_CACHE_RET_SPID, __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ *space_bits = pdc_result[0];
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+#ifndef CONFIG_PA20
+/**
+ * pdc_btlb_info - Return block TLB information.
+ * @btlb: The return buffer.
+ *
+ * Returns information about the hardware Block TLB.
+ */
+int pdc_btlb_info(struct pdc_btlb_info *btlb)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
+ memcpy(btlb, pdc_result, sizeof(*btlb));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ if(retval < 0) {
+ btlb->max_size = 0;
+ }
+ return retval;
+}
+
+/**
+ * pdc_mem_map_hpa - Find fixed module information.
+ * @address: The return buffer
+ * @mod_path: pointer to dev path structure.
+ *
+ * This call was developed for S700 workstations to allow the kernel to find
+ * the I/O devices (Core I/O). In the future (Kittyhawk and beyond) this
+ * call will be replaced (on workstations) by the architected PDC_SYSTEM_MAP
+ * call.
+ *
+ * This call is supported by all existing S700 workstations (up to Gecko).
+ */
+int pdc_mem_map_hpa(struct pdc_memory_map *address,
+ struct pdc_module_path *mod_path)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ memcpy(pdc_result2, mod_path, sizeof(*mod_path));
+ retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result),
+ __pa(pdc_result2));
+ memcpy(address, pdc_result, sizeof(*address));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+#endif /* !CONFIG_PA20 */
+
+/**
+ * pdc_lan_station_id - Get the LAN address.
+ * @lan_addr: The return buffer.
+ * @hpa: The network device HPA.
+ *
+ * Get the LAN station address when it is not directly available from the LAN hardware.
+ */
+int pdc_lan_station_id(char *lan_addr, unsigned long hpa)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_LAN_STATION_ID, PDC_LAN_STATION_ID_READ,
+ __pa(pdc_result), hpa);
+ if (retval < 0) {
+ /* FIXME: else read MAC from NVRAM */
+ memset(lan_addr, 0, PDC_LAN_STATION_ID_SIZE);
+ } else {
+ memcpy(lan_addr, pdc_result, PDC_LAN_STATION_ID_SIZE);
+ }
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_lan_station_id);
+
+/**
+ * pdc_stable_read - Read data from Stable Storage.
+ * @staddr: Stable Storage address to access.
+ * @memaddr: The memory address where Stable Storage data shall be copied.
+ * @count: number of bytes to transfer. count is multiple of 4.
+ *
+ * This PDC call reads from the Stable Storage address supplied in staddr
+ * and copies count bytes to the memory address memaddr.
+ * The call will fail if staddr+count > PDC_STABLE size.
+ */
+int pdc_stable_read(unsigned long staddr, void *memaddr, unsigned long count)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_READ, staddr,
+ __pa(pdc_result), count);
+ convert_to_wide(pdc_result);
+ memcpy(memaddr, pdc_result, count);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_stable_read);
+
+/**
+ * pdc_stable_write - Write data to Stable Storage.
+ * @staddr: Stable Storage address to access.
+ * @memaddr: The memory address where Stable Storage data shall be read from.
+ * @count: number of bytes to transfer. count is multiple of 4.
+ *
+ * This PDC call reads count bytes from the supplied memaddr address,
+ * and copies count bytes to the Stable Storage address staddr.
+ * The call will fail if staddr+count > PDC_STABLE size.
+ */
+int pdc_stable_write(unsigned long staddr, void *memaddr, unsigned long count)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ memcpy(pdc_result, memaddr, count);
+ convert_to_wide(pdc_result);
+ retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_WRITE, staddr,
+ __pa(pdc_result), count);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_stable_write);
+
+/**
+ * pdc_stable_get_size - Get Stable Storage size in bytes.
+ * @size: pointer where the size will be stored.
+ *
+ * This PDC call returns the number of bytes in the processor's Stable
+ * Storage, which is the number of contiguous bytes implemented in Stable
+ * Storage starting from staddr=0. size in an unsigned 64-bit integer
+ * which is a multiple of four.
+ */
+int pdc_stable_get_size(unsigned long *size)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_RETURN_SIZE, __pa(pdc_result));
+ *size = pdc_result[0];
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_stable_get_size);
+
+/**
+ * pdc_stable_verify_contents - Checks that Stable Storage contents are valid.
+ *
+ * This PDC call is meant to be used to check the integrity of the current
+ * contents of Stable Storage.
+ */
+int pdc_stable_verify_contents(void)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_VERIFY_CONTENTS);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_stable_verify_contents);
+
+/**
+ * pdc_stable_initialize - Sets Stable Storage contents to zero and initialize
+ * the validity indicator.
+ *
+ * This PDC call will erase all contents of Stable Storage. Use with care!
+ */
+int pdc_stable_initialize(void)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_INITIALIZE);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_stable_initialize);
+
+/**
+ * pdc_get_initiator - Get the SCSI Interface Card params (SCSI ID, SDTR, SE or LVD)
+ * @hwpath: fully bc.mod style path to the device.
+ * @initiator: the array to return the result into
+ *
+ * Get the SCSI operational parameters from PDC.
+ * Needed since HPUX never used BIOS or symbios card NVRAM.
+ * Most ncr/sym cards won't have an entry and just use whatever
+ * capabilities of the card are (eg Ultra, LVD). But there are
+ * several cases where it's useful:
+ * o set SCSI id for Multi-initiator clusters,
+ * o cable too long (ie SE scsi 10Mhz won't support 6m length),
+ * o bus width exported is less than what the interface chip supports.
+ */
+int pdc_get_initiator(struct hardware_path *hwpath, struct pdc_initiator *initiator)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+
+/* BCJ-XXXX series boxes. E.G. "9000/785/C3000" */
+#define IS_SPROCKETS() (strlen(boot_cpu_data.pdc.sys_model_name) == 14 && \
+ strncmp(boot_cpu_data.pdc.sys_model_name, "9000/785", 8) == 0)
+
+ retval = mem_pdc_call(PDC_INITIATOR, PDC_GET_INITIATOR,
+ __pa(pdc_result), __pa(hwpath));
+ if (retval < PDC_OK)
+ goto out;
+
+ if (pdc_result[0] < 16) {
+ initiator->host_id = pdc_result[0];
+ } else {
+ initiator->host_id = -1;
+ }
+
+ /*
+ * Sprockets and Piranha return 20 or 40 (MT/s). Prelude returns
+ * 1, 2, 5 or 10 for 5, 10, 20 or 40 MT/s, respectively
+ */
+ switch (pdc_result[1]) {
+ case 1: initiator->factor = 50; break;
+ case 2: initiator->factor = 25; break;
+ case 5: initiator->factor = 12; break;
+ case 25: initiator->factor = 10; break;
+ case 20: initiator->factor = 12; break;
+ case 40: initiator->factor = 10; break;
+ default: initiator->factor = -1; break;
+ }
+
+ if (IS_SPROCKETS()) {
+ initiator->width = pdc_result[4];
+ initiator->mode = pdc_result[5];
+ } else {
+ initiator->width = -1;
+ initiator->mode = -1;
+ }
+
+ out:
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return (retval >= PDC_OK);
+}
+EXPORT_SYMBOL(pdc_get_initiator);
+
+
+/**
+ * pdc_pci_irt_size - Get the number of entries in the interrupt routing table.
+ * @num_entries: The return value.
+ * @hpa: The HPA for the device.
+ *
+ * This PDC function returns the number of entries in the specified cell's
+ * interrupt table.
+ * Similar to PDC_PAT stuff - but added for Forte/Allegro boxes
+ */
+int pdc_pci_irt_size(unsigned long *num_entries, unsigned long hpa)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_GET_INT_TBL_SIZE,
+ __pa(pdc_result), hpa);
+ convert_to_wide(pdc_result);
+ *num_entries = pdc_result[0];
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_pci_irt - Get the PCI interrupt routing table.
+ * @num_entries: The number of entries in the table.
+ * @hpa: The Hard Physical Address of the device.
+ * @tbl:
+ *
+ * Get the PCI interrupt routing table for the device at the given HPA.
+ * Similar to PDC_PAT stuff - but added for Forte/Allegro boxes
+ */
+int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl)
+{
+ int retval;
+ unsigned long flags;
+
+ BUG_ON((unsigned long)tbl & 0x7);
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ pdc_result[0] = num_entries;
+ retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_GET_INT_TBL,
+ __pa(pdc_result), hpa, __pa(tbl));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+
+#if 0 /* UNTEST CODE - left here in case someone needs it */
+
+/**
+ * pdc_pci_config_read - read PCI config space.
+ * @hpa token from PDC to indicate which PCI device
+ * @pci_addr configuration space address to read from
+ *
+ * Read PCI Configuration space *before* linux PCI subsystem is running.
+ */
+unsigned int pdc_pci_config_read(void *hpa, unsigned long cfg_addr)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ pdc_result[0] = 0;
+ pdc_result[1] = 0;
+ retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_READ_CONFIG,
+ __pa(pdc_result), hpa, cfg_addr&~3UL, 4UL);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval ? ~0 : (unsigned int) pdc_result[0];
+}
+
+
+/**
+ * pdc_pci_config_write - read PCI config space.
+ * @hpa token from PDC to indicate which PCI device
+ * @pci_addr configuration space address to write
+ * @val value we want in the 32-bit register
+ *
+ * Write PCI Configuration space *before* linux PCI subsystem is running.
+ */
+void pdc_pci_config_write(void *hpa, unsigned long cfg_addr, unsigned int val)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ pdc_result[0] = 0;
+ retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_WRITE_CONFIG,
+ __pa(pdc_result), hpa,
+ cfg_addr&~3UL, 4UL, (unsigned long) val);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+#endif /* UNTESTED CODE */
+
+/**
+ * pdc_tod_read - Read the Time-Of-Day clock.
+ * @tod: The return buffer:
+ *
+ * Read the Time-Of-Day clock
+ */
+int pdc_tod_read(struct pdc_tod *tod)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_TOD, PDC_TOD_READ, __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ memcpy(tod, pdc_result, sizeof(*tod));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_tod_read);
+
+/**
+ * pdc_tod_set - Set the Time-Of-Day clock.
+ * @sec: The number of seconds since epoch.
+ * @usec: The number of micro seconds.
+ *
+ * Set the Time-Of-Day clock.
+ */
+int pdc_tod_set(unsigned long sec, unsigned long usec)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_TOD, PDC_TOD_WRITE, sec, usec);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_tod_set);
+
+#ifdef CONFIG_64BIT
+int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr,
+ struct pdc_memory_table *tbl, unsigned long entries)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_MEM, PDC_MEM_TABLE, __pa(pdc_result), __pa(pdc_result2), entries);
+ convert_to_wide(pdc_result);
+ memcpy(r_addr, pdc_result, sizeof(*r_addr));
+ memcpy(tbl, pdc_result2, entries * sizeof(*tbl));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+#endif /* CONFIG_64BIT */
+
+/* FIXME: Is this pdc used? I could not find type reference to ftc_bitmap
+ * so I guessed at unsigned long. Someone who knows what this does, can fix
+ * it later. :)
+ */
+int pdc_do_firm_test_reset(unsigned long ftc_bitmap)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BROADCAST_RESET, PDC_DO_FIRM_TEST_RESET,
+ PDC_FIRM_TEST_MAGIC, ftc_bitmap);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/*
+ * pdc_do_reset - Reset the system.
+ *
+ * Reset the system.
+ */
+int pdc_do_reset(void)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_BROADCAST_RESET, PDC_DO_RESET);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/*
+ * pdc_soft_power_info - Enable soft power switch.
+ * @power_reg: address of soft power register
+ *
+ * Return the absolute address of the soft power switch register
+ */
+int __init pdc_soft_power_info(unsigned long *power_reg)
+{
+ int retval;
+ unsigned long flags;
+
+ *power_reg = (unsigned long) (-1);
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_INFO, __pa(pdc_result), 0);
+ if (retval == PDC_OK) {
+ convert_to_wide(pdc_result);
+ *power_reg = f_extend(pdc_result[0]);
+ }
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/*
+ * pdc_soft_power_button - Control the soft power button behaviour
+ * @sw_control: 0 for hardware control, 1 for software control
+ *
+ *
+ * This PDC function places the soft power button under software or
+ * hardware control.
+ * Under software control the OS may control to when to allow to shut
+ * down the system. Under hardware control pressing the power button
+ * powers off the system immediately.
+ */
+int pdc_soft_power_button(int sw_control)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_ENABLE, __pa(pdc_result), sw_control);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/*
+ * pdc_io_reset - Hack to avoid overlapping range registers of Bridges devices.
+ * Primarily a problem on T600 (which parisc-linux doesn't support) but
+ * who knows what other platform firmware might do with this OS "hook".
+ */
+void pdc_io_reset(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ mem_pdc_call(PDC_IO, PDC_IO_RESET, 0);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+}
+
+/*
+ * pdc_io_reset_devices - Hack to Stop USB controller
+ *
+ * If PDC used the usb controller, the usb controller
+ * is still running and will crash the machines during iommu
+ * setup, because of still running DMA. This PDC call
+ * stops the USB controller.
+ * Normally called after calling pdc_io_reset().
+ */
+void pdc_io_reset_devices(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ mem_pdc_call(PDC_IO, PDC_IO_RESET_DEVICES, 0);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+}
+
+/* locked by pdc_console_lock */
+static int __attribute__((aligned(8))) iodc_retbuf[32];
+static char __attribute__((aligned(64))) iodc_dbuf[4096];
+
+/**
+ * pdc_iodc_print - Console print using IODC.
+ * @str: the string to output.
+ * @count: length of str
+ *
+ * Note that only these special chars are architected for console IODC io:
+ * BEL, BS, CR, and LF. Others are passed through.
+ * Since the HP console requires CR+LF to perform a 'newline', we translate
+ * "\n" to "\r\n".
+ */
+int pdc_iodc_print(const unsigned char *str, unsigned count)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ for (i = 0; i < count;) {
+ switch(str[i]) {
+ case '\n':
+ iodc_dbuf[i+0] = '\r';
+ iodc_dbuf[i+1] = '\n';
+ i += 2;
+ goto print;
+ default:
+ iodc_dbuf[i] = str[i];
+ i++;
+ break;
+ }
+ }
+
+print:
+ spin_lock_irqsave(&pdc_lock, flags);
+ real32_call(PAGE0->mem_cons.iodc_io,
+ (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT,
+ PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers),
+ __pa(iodc_retbuf), 0, __pa(iodc_dbuf), i, 0);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return i;
+}
+
+/**
+ * pdc_iodc_getc - Read a character (non-blocking) from the PDC console.
+ *
+ * Read a character (non-blocking) from the PDC console, returns -1 if
+ * key is not present.
+ */
+int pdc_iodc_getc(void)
+{
+ int ch;
+ int status;
+ unsigned long flags;
+
+ /* Bail if no console input device. */
+ if (!PAGE0->mem_kbd.iodc_io)
+ return 0;
+
+ /* wait for a keyboard (rs232)-input */
+ spin_lock_irqsave(&pdc_lock, flags);
+ real32_call(PAGE0->mem_kbd.iodc_io,
+ (unsigned long)PAGE0->mem_kbd.hpa, ENTRY_IO_CIN,
+ PAGE0->mem_kbd.spa, __pa(PAGE0->mem_kbd.dp.layers),
+ __pa(iodc_retbuf), 0, __pa(iodc_dbuf), 1, 0);
+
+ ch = *iodc_dbuf;
+ status = *iodc_retbuf;
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ if (status == 0)
+ return -1;
+
+ return ch;
+}
+
+int pdc_sti_call(unsigned long func, unsigned long flags,
+ unsigned long inptr, unsigned long outputr,
+ unsigned long glob_cfg)
+{
+ int retval;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&pdc_lock, irqflags);
+ retval = real32_call(func, flags, inptr, outputr, glob_cfg);
+ spin_unlock_irqrestore(&pdc_lock, irqflags);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_sti_call);
+
+#ifdef CONFIG_64BIT
+/**
+ * pdc_pat_cell_get_number - Returns the cell number.
+ * @cell_info: The return buffer.
+ *
+ * This PDC call returns the cell number of the cell from which the call
+ * is made.
+ */
+int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_CELL, PDC_PAT_CELL_GET_NUMBER, __pa(pdc_result));
+ memcpy(cell_info, pdc_result, sizeof(*cell_info));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_cell_module - Retrieve the cell's module information.
+ * @actcnt: The number of bytes written to mem_addr.
+ * @ploc: The physical location.
+ * @mod: The module index.
+ * @view_type: The view of the address type.
+ * @mem_addr: The return buffer.
+ *
+ * This PDC call returns information about each module attached to the cell
+ * at the specified location.
+ */
+int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod,
+ unsigned long view_type, void *mem_addr)
+{
+ int retval;
+ unsigned long flags;
+ static struct pdc_pat_cell_mod_maddr_block result __attribute__ ((aligned (8)));
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_CELL, PDC_PAT_CELL_MODULE, __pa(pdc_result),
+ ploc, mod, view_type, __pa(&result));
+ if(!retval) {
+ *actcnt = pdc_result[0];
+ memcpy(mem_addr, &result, *actcnt);
+ }
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_cpu_get_number - Retrieve the cpu number.
+ * @cpu_info: The return buffer.
+ * @hpa: The Hard Physical Address of the CPU.
+ *
+ * Retrieve the cpu number for the cpu at the specified HPA.
+ */
+int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_CPU, PDC_PAT_CPU_GET_NUMBER,
+ __pa(&pdc_result), hpa);
+ memcpy(cpu_info, pdc_result, sizeof(*cpu_info));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_get_irt_size - Retrieve the number of entries in the cell's interrupt table.
+ * @num_entries: The return value.
+ * @cell_num: The target cell.
+ *
+ * This PDC function returns the number of entries in the specified cell's
+ * interrupt table.
+ */
+int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_GET_PCI_ROUTING_TABLE_SIZE,
+ __pa(pdc_result), cell_num);
+ *num_entries = pdc_result[0];
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_get_irt - Retrieve the cell's interrupt table.
+ * @r_addr: The return buffer.
+ * @cell_num: The target cell.
+ *
+ * This PDC function returns the actual interrupt table for the specified cell.
+ */
+int pdc_pat_get_irt(void *r_addr, unsigned long cell_num)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_GET_PCI_ROUTING_TABLE,
+ __pa(r_addr), cell_num);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_pd_get_addr_map - Retrieve information about memory address ranges.
+ * @actlen: The return buffer.
+ * @mem_addr: Pointer to the memory buffer.
+ * @count: The number of bytes to read from the buffer.
+ * @offset: The offset with respect to the beginning of the buffer.
+ *
+ */
+int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr,
+ unsigned long count, unsigned long offset)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_PD, PDC_PAT_PD_GET_ADDR_MAP, __pa(pdc_result),
+ __pa(pdc_result2), count, offset);
+ *actual_len = pdc_result[0];
+ memcpy(mem_addr, pdc_result2, *actual_len);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_io_pci_cfg_read - Read PCI configuration space.
+ * @pci_addr: PCI configuration space address for which the read request is being made.
+ * @pci_size: Size of read in bytes. Valid values are 1, 2, and 4.
+ * @mem_addr: Pointer to return memory buffer.
+ *
+ */
+int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *mem_addr)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_READ,
+ __pa(pdc_result), pci_addr, pci_size);
+ switch(pci_size) {
+ case 1: *(u8 *) mem_addr = (u8) pdc_result[0];
+ case 2: *(u16 *)mem_addr = (u16) pdc_result[0];
+ case 4: *(u32 *)mem_addr = (u32) pdc_result[0];
+ }
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_io_pci_cfg_write - Retrieve information about memory address ranges.
+ * @pci_addr: PCI configuration space address for which the write request is being made.
+ * @pci_size: Size of write in bytes. Valid values are 1, 2, and 4.
+ * @value: Pointer to 1, 2, or 4 byte value in low order end of argument to be
+ * written to PCI Config space.
+ *
+ */
+int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_WRITE,
+ pci_addr, pci_size, val);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+#endif /* CONFIG_64BIT */
+
+
+/***************** 32-bit real-mode calls ***********/
+/* The struct below is used
+ * to overlay real_stack (real2.S), preparing a 32-bit call frame.
+ * real32_call_asm() then uses this stack in narrow real mode
+ */
+
+struct narrow_stack {
+ /* use int, not long which is 64 bits */
+ unsigned int arg13;
+ unsigned int arg12;
+ unsigned int arg11;
+ unsigned int arg10;
+ unsigned int arg9;
+ unsigned int arg8;
+ unsigned int arg7;
+ unsigned int arg6;
+ unsigned int arg5;
+ unsigned int arg4;
+ unsigned int arg3;
+ unsigned int arg2;
+ unsigned int arg1;
+ unsigned int arg0;
+ unsigned int frame_marker[8];
+ unsigned int sp;
+ /* in reality, there's nearly 8k of stack after this */
+};
+
+long real32_call(unsigned long fn, ...)
+{
+ va_list args;
+ extern struct narrow_stack real_stack;
+ extern unsigned long real32_call_asm(unsigned int *,
+ unsigned int *,
+ unsigned int);
+
+ va_start(args, fn);
+ real_stack.arg0 = va_arg(args, unsigned int);
+ real_stack.arg1 = va_arg(args, unsigned int);
+ real_stack.arg2 = va_arg(args, unsigned int);
+ real_stack.arg3 = va_arg(args, unsigned int);
+ real_stack.arg4 = va_arg(args, unsigned int);
+ real_stack.arg5 = va_arg(args, unsigned int);
+ real_stack.arg6 = va_arg(args, unsigned int);
+ real_stack.arg7 = va_arg(args, unsigned int);
+ real_stack.arg8 = va_arg(args, unsigned int);
+ real_stack.arg9 = va_arg(args, unsigned int);
+ real_stack.arg10 = va_arg(args, unsigned int);
+ real_stack.arg11 = va_arg(args, unsigned int);
+ real_stack.arg12 = va_arg(args, unsigned int);
+ real_stack.arg13 = va_arg(args, unsigned int);
+ va_end(args);
+
+ return real32_call_asm(&real_stack.sp, &real_stack.arg0, fn);
+}
+
+#ifdef CONFIG_64BIT
+/***************** 64-bit real-mode calls ***********/
+
+struct wide_stack {
+ unsigned long arg0;
+ unsigned long arg1;
+ unsigned long arg2;
+ unsigned long arg3;
+ unsigned long arg4;
+ unsigned long arg5;
+ unsigned long arg6;
+ unsigned long arg7;
+ unsigned long arg8;
+ unsigned long arg9;
+ unsigned long arg10;
+ unsigned long arg11;
+ unsigned long arg12;
+ unsigned long arg13;
+ unsigned long frame_marker[2]; /* rp, previous sp */
+ unsigned long sp;
+ /* in reality, there's nearly 8k of stack after this */
+};
+
+long real64_call(unsigned long fn, ...)
+{
+ va_list args;
+ extern struct wide_stack real64_stack;
+ extern unsigned long real64_call_asm(unsigned long *,
+ unsigned long *,
+ unsigned long);
+
+ va_start(args, fn);
+ real64_stack.arg0 = va_arg(args, unsigned long);
+ real64_stack.arg1 = va_arg(args, unsigned long);
+ real64_stack.arg2 = va_arg(args, unsigned long);
+ real64_stack.arg3 = va_arg(args, unsigned long);
+ real64_stack.arg4 = va_arg(args, unsigned long);
+ real64_stack.arg5 = va_arg(args, unsigned long);
+ real64_stack.arg6 = va_arg(args, unsigned long);
+ real64_stack.arg7 = va_arg(args, unsigned long);
+ real64_stack.arg8 = va_arg(args, unsigned long);
+ real64_stack.arg9 = va_arg(args, unsigned long);
+ real64_stack.arg10 = va_arg(args, unsigned long);
+ real64_stack.arg11 = va_arg(args, unsigned long);
+ real64_stack.arg12 = va_arg(args, unsigned long);
+ real64_stack.arg13 = va_arg(args, unsigned long);
+ va_end(args);
+
+ return real64_call_asm(&real64_stack.sp, &real64_stack.arg0, fn);
+}
+
+#endif /* CONFIG_64BIT */
+
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
new file mode 100644
index 00000000..5beb97ba
--- /dev/null
+++ b/arch/parisc/kernel/ftrace.c
@@ -0,0 +1,185 @@
+/*
+ * Code for tracing calls in Linux kernel.
+ * Copyright (C) 2009 Helge Deller <deller@gmx.de>
+ *
+ * based on code for x86 which is:
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ *
+ * future possible enhancements:
+ * - add CONFIG_DYNAMIC_FTRACE
+ * - add CONFIG_STACK_TRACER
+ */
+
+#include <linux/init.h>
+#include <linux/ftrace.h>
+
+#include <asm/sections.h>
+#include <asm/ftrace.h>
+
+
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/* Add a function return address to the trace stack on thread info.*/
+static int push_return_trace(unsigned long ret, unsigned long long time,
+ unsigned long func, int *depth)
+{
+ int index;
+
+ if (!current->ret_stack)
+ return -EBUSY;
+
+ /* The return trace stack is full */
+ if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
+ atomic_inc(&current->trace_overrun);
+ return -EBUSY;
+ }
+
+ index = ++current->curr_ret_stack;
+ barrier();
+ current->ret_stack[index].ret = ret;
+ current->ret_stack[index].func = func;
+ current->ret_stack[index].calltime = time;
+ *depth = index;
+
+ return 0;
+}
+
+/* Retrieve a function return address to the trace stack on thread info.*/
+static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
+{
+ int index;
+
+ index = current->curr_ret_stack;
+
+ if (unlikely(index < 0)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ /* Might as well panic, otherwise we have no where to go */
+ *ret = (unsigned long)
+ dereference_function_descriptor(&panic);
+ return;
+ }
+
+ *ret = current->ret_stack[index].ret;
+ trace->func = current->ret_stack[index].func;
+ trace->calltime = current->ret_stack[index].calltime;
+ trace->overrun = atomic_read(&current->trace_overrun);
+ trace->depth = index;
+ barrier();
+ current->curr_ret_stack--;
+
+}
+
+/*
+ * Send the trace to the ring-buffer.
+ * @return the original return address.
+ */
+unsigned long ftrace_return_to_handler(unsigned long retval0,
+ unsigned long retval1)
+{
+ struct ftrace_graph_ret trace;
+ unsigned long ret;
+
+ pop_return_trace(&trace, &ret);
+ trace.rettime = local_clock();
+ ftrace_graph_return(&trace);
+
+ if (unlikely(!ret)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ /* Might as well panic. What else to do? */
+ ret = (unsigned long)
+ dereference_function_descriptor(&panic);
+ }
+
+ /* HACK: we hand over the old functions' return values
+ in %r23 and %r24. Assembly in entry.S will take care
+ and move those to their final registers %ret0 and %ret1 */
+ asm( "copy %0, %%r23 \n\t"
+ "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
+
+ return ret;
+}
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+ unsigned long old;
+ unsigned long long calltime;
+ struct ftrace_graph_ent trace;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ old = *parent;
+ *parent = (unsigned long)
+ dereference_function_descriptor(&return_to_handler);
+
+ if (unlikely(!__kernel_text_address(old))) {
+ ftrace_graph_stop();
+ *parent = old;
+ WARN_ON(1);
+ return;
+ }
+
+ calltime = local_clock();
+
+ if (push_return_trace(old, calltime,
+ self_addr, &trace.depth) == -EBUSY) {
+ *parent = old;
+ return;
+ }
+
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ *parent = old;
+ }
+}
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+
+void ftrace_function_trampoline(unsigned long parent,
+ unsigned long self_addr,
+ unsigned long org_sp_gr3)
+{
+ extern ftrace_func_t ftrace_trace_function;
+
+ if (function_trace_stop)
+ return;
+
+ if (ftrace_trace_function != ftrace_stub) {
+ ftrace_trace_function(parent, self_addr);
+ return;
+ }
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if (ftrace_graph_entry && ftrace_graph_return) {
+ unsigned long sp;
+ unsigned long *parent_rp;
+
+ asm volatile ("copy %%r30, %0" : "=r"(sp));
+ /* sanity check: is stack pointer which we got from
+ assembler function in entry.S in a reasonable
+ range compared to current stack pointer? */
+ if ((sp - org_sp_gr3) > 0x400)
+ return;
+
+ /* calculate pointer to %rp in stack */
+ parent_rp = (unsigned long *) org_sp_gr3 - 0x10;
+ /* sanity check: parent_rp should hold parent */
+ if (*parent_rp != parent)
+ return;
+
+ prepare_ftrace_return(parent_rp, self_addr);
+ return;
+ }
+#endif
+}
+
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
new file mode 100644
index 00000000..f48a640b
--- /dev/null
+++ b/arch/parisc/kernel/hardware.c
@@ -0,0 +1,1383 @@
+/*
+ * Hardware descriptions for HP 9000 based hardware, including
+ * system types, SCSI controllers, DMA controllers, HPPB controllers
+ * and lots more.
+ *
+ * Based on the document "PA-RISC 1.1 I/O Firmware Architecture
+ * Reference Specification", March 7, 1999, version 0.96. This
+ * is available at http://parisc-linux.org/documentation/
+ *
+ * Copyright 1999 by Alex deVries <alex@onefishtwo.ca>
+ * and copyright 1999 The Puffin Group Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+
+#include <asm/hardware.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+/*
+ * HP PARISC Hardware Database
+ * Access to this database is only possible during bootup
+ * so don't reference this table after starting the init process
+ */
+
+static struct hp_hardware hp_hardware_list[] __devinitdata = {
+ {HPHW_NPROC,0x01,0x4,0x0,"Indigo (840, 930)"},
+ {HPHW_NPROC,0x8,0x4,0x01,"Firefox(825,925)"},
+ {HPHW_NPROC,0xA,0x4,0x01,"Top Gun (835,834,935,635)"},
+ {HPHW_NPROC,0xB,0x4,0x01,"Technical Shogun (845, 645)"},
+ {HPHW_NPROC,0xF,0x4,0x01,"Commercial Shogun (949)"},
+ {HPHW_NPROC,0xC,0x4,0x01,"Cheetah (850, 950)"},
+ {HPHW_NPROC,0x80,0x4,0x01,"Cheetah (950S)"},
+ {HPHW_NPROC,0x81,0x4,0x01,"Jaguar (855, 955)"},
+ {HPHW_NPROC,0x82,0x4,0x01,"Cougar (860, 960)"},
+ {HPHW_NPROC,0x83,0x4,0x13,"Panther (865, 870, 980)"},
+ {HPHW_NPROC,0x100,0x4,0x01,"Burgundy (810)"},
+ {HPHW_NPROC,0x101,0x4,0x01,"SilverFox Low (822, 922)"},
+ {HPHW_NPROC,0x102,0x4,0x01,"SilverFox High (832, 932)"},
+ {HPHW_NPROC,0x103,0x4,0x01,"Lego, SilverLite (815, 808, 920)"},
+ {HPHW_NPROC,0x104,0x4,0x03,"SilverBullet Low (842, 948)"},
+ {HPHW_NPROC,0x105,0x4,0x03,"SilverBullet High (852, 958)"},
+ {HPHW_NPROC,0x106,0x4,0x81,"Oboe"},
+ {HPHW_NPROC,0x180,0x4,0x12,"Dragon"},
+ {HPHW_NPROC,0x181,0x4,0x13,"Chimera (890, 990, 992)"},
+ {HPHW_NPROC,0x182,0x4,0x91,"TNT 100 (891,T500)"},
+ {HPHW_NPROC,0x183,0x4,0x91,"TNT 120 (892,T520)"},
+ {HPHW_NPROC,0x184,0x4,0x91,"Jade 180 U (893,T540)"},
+ {HPHW_NPROC,0x1FF,0x4,0x91,"Hitachi X Processor"},
+ {HPHW_NPROC,0x200,0x4,0x81,"Cobra (720)"},
+ {HPHW_NPROC,0x201,0x4,0x81,"Coral (750)"},
+ {HPHW_NPROC,0x202,0x4,0x81,"King Cobra (730)"},
+ {HPHW_NPROC,0x203,0x4,0x81,"Hardball (735/99)"},
+ {HPHW_NPROC,0x204,0x4,0x81,"Coral II (755/99)"},
+ {HPHW_NPROC,0x205,0x4,0x81,"Coral II (755/125)"},
+ {HPHW_NPROC,0x205,0x4,0x91,"Snake Eagle "},
+ {HPHW_NPROC,0x206,0x4,0x81,"Snake Cheetah (735/130)"},
+ {HPHW_NPROC,0x280,0x4,0x81,"Nova Low (817, 827, 957, 957LX)"},
+ {HPHW_NPROC,0x281,0x4,0x81,"Nova High (837, 847, 857, 967, 967LX)"},
+ {HPHW_NPROC,0x282,0x4,0x81,"Nova8 (807, 917, 917LX, 927,927LX, 937, 937LX, 947,947LX)"},
+ {HPHW_NPROC,0x283,0x4,0x81,"Nova64 (867, 877, 977)"},
+ {HPHW_NPROC,0x284,0x4,0x81,"TNova (887, 897, 987)"},
+ {HPHW_NPROC,0x285,0x4,0x81,"TNova64"},
+ {HPHW_NPROC,0x286,0x4,0x91,"Hydra64 (Nova)"},
+ {HPHW_NPROC,0x287,0x4,0x91,"Hydra96 (Nova)"},
+ {HPHW_NPROC,0x288,0x4,0x81,"TNova96"},
+ {HPHW_NPROC,0x300,0x4,0x81,"Bushmaster (710)"},
+ {HPHW_NPROC,0x302,0x4,0x81,"Flounder (705)"},
+ {HPHW_NPROC,0x310,0x4,0x81,"Scorpio (715/50)"},
+ {HPHW_NPROC,0x311,0x4,0x81,"Scorpio Jr.(715/33)"},
+ {HPHW_NPROC,0x312,0x4,0x81,"Strider-50 (715S/50)"},
+ {HPHW_NPROC,0x313,0x4,0x81,"Strider-33 (715S/33)"},
+ {HPHW_NPROC,0x314,0x4,0x81,"Trailways-50 (715T/50)"},
+ {HPHW_NPROC,0x315,0x4,0x81,"Trailways-33 (715T/33)"},
+ {HPHW_NPROC,0x316,0x4,0x81,"Scorpio Sr.(715/75)"},
+ {HPHW_NPROC,0x317,0x4,0x81,"Scorpio 100 (715/100)"},
+ {HPHW_NPROC,0x318,0x4,0x81,"Spectra (725/50)"},
+ {HPHW_NPROC,0x319,0x4,0x81,"Spectra (725/75)"},
+ {HPHW_NPROC,0x320,0x4,0x81,"Spectra (725/100)"},
+ {HPHW_NPROC,0x401,0x4,0x81,"Pace (745i, 747i)"},
+ {HPHW_NPROC,0x402,0x4,0x81,"Sidewinder (742i)"},
+ {HPHW_NPROC,0x403,0x4,0x81,"Fast Pace"},
+ {HPHW_NPROC,0x480,0x4,0x81,"Orville (E23)"},
+ {HPHW_NPROC,0x481,0x4,0x81,"Wilbur (E25)"},
+ {HPHW_NPROC,0x482,0x4,0x81,"WB-80 (E35)"},
+ {HPHW_NPROC,0x483,0x4,0x81,"WB-96 (E45)"},
+ {HPHW_NPROC,0x484,0x4,0x81,"UL Proc L-100 (811/D210,D310)"},
+ {HPHW_NPROC,0x485,0x4,0x81,"UL Proc L-75 (801/D200)"},
+ {HPHW_NPROC,0x501,0x4,0x81,"Merlin L2 132 (9000/778/B132L)"},
+ {HPHW_NPROC,0x502,0x4,0x81,"Merlin L2 160 (9000/778/B160L)"},
+ {HPHW_NPROC,0x503,0x4,0x81,"Merlin L2+ 132 (9000/778/B132L)"},
+ {HPHW_NPROC,0x504,0x4,0x81,"Merlin L2+ 180 (9000/778/B180L)"},
+ {HPHW_NPROC,0x505,0x4,0x81,"Raven L2 132 (9000/778/C132L)"},
+ {HPHW_NPROC,0x506,0x4,0x81,"Raven L2 160 (9000/779/C160L)"},
+ {HPHW_NPROC,0x507,0x4,0x81,"Raven L2 180 (9000/779/C180L)"},
+ {HPHW_NPROC,0x508,0x4,0x81,"Raven L2 160 (9000/779/C160L)"},
+ {HPHW_NPROC,0x509,0x4,0x81,"712/132 L2 Upgrade"},
+ {HPHW_NPROC,0x50A,0x4,0x81,"712/160 L2 Upgrade"},
+ {HPHW_NPROC,0x50B,0x4,0x81,"715/132 L2 Upgrade"},
+ {HPHW_NPROC,0x50C,0x4,0x81,"715/160 L2 Upgrade"},
+ {HPHW_NPROC,0x50D,0x4,0x81,"Rocky2 L2 120"},
+ {HPHW_NPROC,0x50E,0x4,0x81,"Rocky2 L2 150"},
+ {HPHW_NPROC,0x50F,0x4,0x81,"Anole L2 132 (744)"},
+ {HPHW_NPROC,0x510,0x4,0x81,"Anole L2 165 (744)"},
+ {HPHW_NPROC,0x511,0x4,0x81,"Kiji L2 132"},
+ {HPHW_NPROC,0x512,0x4,0x81,"UL L2 132 (803/D220,D320)"},
+ {HPHW_NPROC,0x513,0x4,0x81,"UL L2 160 (813/D220,D320)"},
+ {HPHW_NPROC,0x514,0x4,0x81,"Merlin Jr L2 132"},
+ {HPHW_NPROC,0x515,0x4,0x81,"Staccato L2 132"},
+ {HPHW_NPROC,0x516,0x4,0x81,"Staccato L2 180 (A Class 180)"},
+ {HPHW_NPROC,0x580,0x4,0x81,"KittyHawk DC2-100 (K100)"},
+ {HPHW_NPROC,0x581,0x4,0x91,"KittyHawk DC3-120 (K210)"},
+ {HPHW_NPROC,0x582,0x4,0x91,"KittyHawk DC3 100 (K400)"},
+ {HPHW_NPROC,0x583,0x4,0x91,"KittyHawk DC3 120 (K410)"},
+ {HPHW_NPROC,0x584,0x4,0x91,"LighteningHawk T120"},
+ {HPHW_NPROC,0x585,0x4,0x91,"SkyHawk 100"},
+ {HPHW_NPROC,0x586,0x4,0x91,"SkyHawk 120"},
+ {HPHW_NPROC,0x587,0x4,0x81,"UL Proc 1-way T'120"},
+ {HPHW_NPROC,0x588,0x4,0x91,"UL Proc 2-way T'120"},
+ {HPHW_NPROC,0x589,0x4,0x81,"UL Proc 1-way T'100 (821/D250,D350)"},
+ {HPHW_NPROC,0x58A,0x4,0x91,"UL Proc 2-way T'100 (831/D250,D350)"},
+ {HPHW_NPROC,0x58B,0x4,0x91,"KittyHawk DC2 100 (K200)"},
+ {HPHW_NPROC,0x58C,0x4,0x91,"ThunderHawk DC3- 120 1M (K220)"},
+ {HPHW_NPROC,0x58D,0x4,0x91,"ThunderHawk DC3 120 1M (K420)"},
+ {HPHW_NPROC,0x58E,0x4,0x81,"Raven 120 T'"},
+ {HPHW_NPROC,0x58F,0x4,0x91,"Mohawk 160 U 1M DC3 (K450)"},
+ {HPHW_NPROC,0x590,0x4,0x91,"Mohawk 180 U 1M DC3 (K460)"},
+ {HPHW_NPROC,0x591,0x4,0x91,"Mohawk 200 U 1M DC3"},
+ {HPHW_NPROC,0x592,0x4,0x81,"Raven 100 T'"},
+ {HPHW_NPROC,0x593,0x4,0x91,"FireHawk 160 U"},
+ {HPHW_NPROC,0x594,0x4,0x91,"FireHawk 180 U"},
+ {HPHW_NPROC,0x595,0x4,0x91,"FireHawk 220 U"},
+ {HPHW_NPROC,0x596,0x4,0x91,"FireHawk 240 U"},
+ {HPHW_NPROC,0x597,0x4,0x91,"SPP2000 processor"},
+ {HPHW_NPROC,0x598,0x4,0x81,"Raven U 230 (9000/780/C230)"},
+ {HPHW_NPROC,0x599,0x4,0x81,"Raven U 240 (9000/780/C240)"},
+ {HPHW_NPROC,0x59A,0x4,0x91,"Unlisted but reserved"},
+ {HPHW_NPROC,0x59A,0x4,0x81,"Unlisted but reserved"},
+ {HPHW_NPROC,0x59B,0x4,0x81,"Raven U 160 (9000/780/C160)"},
+ {HPHW_NPROC,0x59C,0x4,0x81,"Raven U 180 (9000/780/C180)"},
+ {HPHW_NPROC,0x59D,0x4,0x81,"Raven U 200 (9000/780/C200)"},
+ {HPHW_NPROC,0x59E,0x4,0x91,"ThunderHawk T' 120"},
+ {HPHW_NPROC,0x59F,0x4,0x91,"Raven U 180+ (9000/780)"},
+ {HPHW_NPROC,0x5A0,0x4,0x81,"UL 1w T120 1MB/1MB (841/D260,D360)"},
+ {HPHW_NPROC,0x5A1,0x4,0x91,"UL 2w T120 1MB/1MB (851/D260,D360)"},
+ {HPHW_NPROC,0x5A2,0x4,0x81,"UL 1w U160 512K/512K (861/D270,D370)"},
+ {HPHW_NPROC,0x5A3,0x4,0x91,"UL 2w U160 512K/512K (871/D270,D370)"},
+ {HPHW_NPROC,0x5A4,0x4,0x91,"Mohawk 160 U 1M DC3- (K250)"},
+ {HPHW_NPROC,0x5A5,0x4,0x91,"Mohawk 180 U 1M DC3- (K260)"},
+ {HPHW_NPROC,0x5A6,0x4,0x91,"Mohawk 200 U 1M DC3-"},
+ {HPHW_NPROC,0x5A7,0x4,0x81,"UL proc 1-way U160 1M/1M"},
+ {HPHW_NPROC,0x5A8,0x4,0x91,"UL proc 2-way U160 1M/1M"},
+ {HPHW_NPROC,0x5A9,0x4,0x81,"UL proc 1-way U180 1M/1M"},
+ {HPHW_NPROC,0x5AA,0x4,0x91,"UL proc 2-way U180 1M/1M"},
+ {HPHW_NPROC,0x5AB,0x4,0x91,"Obsolete"},
+ {HPHW_NPROC,0x5AB,0x4,0x81,"Obsolete"},
+ {HPHW_NPROC,0x5AC,0x4,0x91,"Obsolete"},
+ {HPHW_NPROC,0x5AC,0x4,0x81,"Obsolete"},
+ {HPHW_NPROC,0x5AD,0x4,0x91,"BraveHawk 180MHz DC3-"},
+ {HPHW_NPROC,0x5AE,0x4,0x91,"BraveHawk 200MHz DC3- (898/K370)"},
+ {HPHW_NPROC,0x5AF,0x4,0x91,"BraveHawk 220MHz DC3-"},
+ {HPHW_NPROC,0x5B0,0x4,0x91,"BraveHawk 180MHz DC3"},
+ {HPHW_NPROC,0x5B1,0x4,0x91,"BraveHawk 200MHz DC3 (899/K570)"},
+ {HPHW_NPROC,0x5B2,0x4,0x91,"BraveHawk 220MHz DC3"},
+ {HPHW_NPROC,0x5B3,0x4,0x91,"FireHawk 200"},
+ {HPHW_NPROC,0x5B4,0x4,0x91,"SPP2500"},
+ {HPHW_NPROC,0x5B5,0x4,0x91,"SummitHawk U+"},
+ {HPHW_NPROC,0x5B6,0x4,0x91,"DragonHawk U+ 240 DC3"},
+ {HPHW_NPROC,0x5B7,0x4,0x91,"DragonHawk U+ 240 DC3-"},
+ {HPHW_NPROC,0x5B8,0x4,0x91,"SPP2250 240 MHz"},
+ {HPHW_NPROC,0x5B9,0x4,0x81,"UL 1w U+/240 (350/550)"},
+ {HPHW_NPROC,0x5BA,0x4,0x91,"UL 2w U+/240 (350/550)"},
+ {HPHW_NPROC,0x5BB,0x4,0x81,"AllegroHigh W"},
+ {HPHW_NPROC,0x5BC,0x4,0x91,"AllegroLow W"},
+ {HPHW_NPROC,0x5BD,0x4,0x91,"Forte W 2-way"},
+ {HPHW_NPROC,0x5BE,0x4,0x91,"Prelude W"},
+ {HPHW_NPROC,0x5BF,0x4,0x91,"Forte W 4-way"},
+ {HPHW_NPROC,0x5C0,0x4,0x91,"M2250"},
+ {HPHW_NPROC,0x5C1,0x4,0x91,"M2500"},
+ {HPHW_NPROC,0x5C2,0x4,0x91,"Sonata 440"},
+ {HPHW_NPROC,0x5C3,0x4,0x91,"Sonata 360"},
+ {HPHW_NPROC,0x5C4,0x4,0x91,"Rhapsody 440"},
+ {HPHW_NPROC,0x5C5,0x4,0x91,"Rhapsody 360"},
+ {HPHW_NPROC,0x5C6,0x4,0x91,"Raven W 360 (9000/780)"},
+ {HPHW_NPROC,0x5C7,0x4,0x91,"Halfdome W 440"},
+ {HPHW_NPROC,0x5C8,0x4,0x81,"Lego 360 processor"},
+ {HPHW_NPROC,0x5C9,0x4,0x91,"Rhapsody DC- 440"},
+ {HPHW_NPROC,0x5CA,0x4,0x91,"Rhapsody DC- 360"},
+ {HPHW_NPROC,0x5CB,0x4,0x91,"Crescendo 440"},
+ {HPHW_NPROC,0x5CC,0x4,0x91,"Prelude W 440"},
+ {HPHW_NPROC,0x5CD,0x4,0x91,"SPP2600"},
+ {HPHW_NPROC,0x5CE,0x4,0x91,"M2600"},
+ {HPHW_NPROC,0x5CF,0x4,0x81,"Allegro W+"},
+ {HPHW_NPROC,0x5D0,0x4,0x81,"Kazoo W+"},
+ {HPHW_NPROC,0x5D1,0x4,0x91,"Forte W+ 2w"},
+ {HPHW_NPROC,0x5D2,0x4,0x91,"Forte W+ 4w"},
+ {HPHW_NPROC,0x5D3,0x4,0x91,"Prelude W+ 540"},
+ {HPHW_NPROC,0x5D4,0x4,0x91,"Duet W+"},
+ {HPHW_NPROC,0x5D5,0x4,0x91,"Crescendo 550"},
+ {HPHW_NPROC,0x5D6,0x4,0x81,"Crescendo DC- 440"},
+ {HPHW_NPROC,0x5D7,0x4,0x91,"Keystone W+"},
+ {HPHW_NPROC,0x5D8,0x4,0x91,"Rhapsody wave 2 W+ DC-"},
+ {HPHW_NPROC,0x5D9,0x4,0x91,"Rhapsody wave 2 W+"},
+ {HPHW_NPROC,0x5DA,0x4,0x91,"Marcato W+ DC-"},
+ {HPHW_NPROC,0x5DB,0x4,0x91,"Marcato W+"},
+ {HPHW_NPROC,0x5DC,0x4,0x91,"Allegro W2"},
+ {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
+ {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
+ {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
+ {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
+ {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
+ {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
+ {HPHW_NPROC,0x5E3,0x4,0x91,"Crescendo 750 W2"},
+ {HPHW_NPROC,0x5E4,0x4,0x91,"Keystone/Matterhorn W2 750"},
+ {HPHW_NPROC,0x5E5,0x4,0x91,"PowerBar W+"},
+ {HPHW_NPROC,0x5E6,0x4,0x91,"Keystone/Matterhorn W2 650"},
+ {HPHW_NPROC,0x5E7,0x4,0x91,"Caribe W2 800"},
+ {HPHW_NPROC,0x5E8,0x4,0x91,"Pikes Peak W2"},
+ {HPHW_NPROC,0x5EB,0x4,0x91,"Perf/Leone 875 W2+"},
+ {HPHW_NPROC,0x5FF,0x4,0x91,"Hitachi W"},
+ {HPHW_NPROC,0x600,0x4,0x81,"Gecko (712/60)"},
+ {HPHW_NPROC,0x601,0x4,0x81,"Gecko 80 (712/80)"},
+ {HPHW_NPROC,0x602,0x4,0x81,"Gecko 100 (712/100)"},
+ {HPHW_NPROC,0x603,0x4,0x81,"Anole 64 (743/64)"},
+ {HPHW_NPROC,0x604,0x4,0x81,"Anole 100 (743/100)"},
+ {HPHW_NPROC,0x605,0x4,0x81,"Gecko 120 (712/120)"},
+ {HPHW_NPROC,0x606,0x4,0x81,"Gila 80"},
+ {HPHW_NPROC,0x607,0x4,0x81,"Gila 100"},
+ {HPHW_NPROC,0x608,0x4,0x81,"Gila 120"},
+ {HPHW_NPROC,0x609,0x4,0x81,"Scorpio-L 80"},
+ {HPHW_NPROC,0x60A,0x4,0x81,"Mirage Jr (715/64)"},
+ {HPHW_NPROC,0x60B,0x4,0x81,"Mirage 100"},
+ {HPHW_NPROC,0x60C,0x4,0x81,"Mirage 100+"},
+ {HPHW_NPROC,0x60D,0x4,0x81,"Electra 100"},
+ {HPHW_NPROC,0x60E,0x4,0x81,"Electra 120"},
+ {HPHW_NPROC,0x610,0x4,0x81,"Scorpio-L 100"},
+ {HPHW_NPROC,0x611,0x4,0x81,"Scorpio-L 120"},
+ {HPHW_NPROC,0x612,0x4,0x81,"Spectra-L 80"},
+ {HPHW_NPROC,0x613,0x4,0x81,"Spectra-L 100"},
+ {HPHW_NPROC,0x614,0x4,0x81,"Spectra-L 120"},
+ {HPHW_NPROC,0x615,0x4,0x81,"Piranha 100"},
+ {HPHW_NPROC,0x616,0x4,0x81,"Piranha 120"},
+ {HPHW_NPROC,0x617,0x4,0x81,"Jason 50"},
+ {HPHW_NPROC,0x618,0x4,0x81,"Jason 100"},
+ {HPHW_NPROC,0x619,0x4,0x81,"Mirage 80"},
+ {HPHW_NPROC,0x61A,0x4,0x81,"SAIC L-80"},
+ {HPHW_NPROC,0x61B,0x4,0x81,"Rocky1 L-60"},
+ {HPHW_NPROC,0x61C,0x4,0x81,"Anole T (743/T)"},
+ {HPHW_NPROC,0x67E,0x4,0x81,"Hitachi Tiny 80"},
+ {HPHW_NPROC,0x67F,0x4,0x81,"Hitachi Tiny 64"},
+ {HPHW_NPROC,0x700,0x4,0x91,"NEC Aska Processor"},
+ {HPHW_NPROC,0x880,0x4,0x91,"Orca Mako"},
+ {HPHW_NPROC,0x881,0x4,0x91,"Everest Mako"},
+ {HPHW_NPROC,0x882,0x4,0x91,"Rainier/Medel Mako Slow"},
+ {HPHW_NPROC,0x883,0x4,0x91,"Rainier/Medel Mako Fast"},
+ {HPHW_NPROC,0x884,0x4,0x91,"Mt. Hamilton"},
+ {HPHW_NPROC,0x885,0x4,0x91,"Mt. Hamilton DC-"},
+ {HPHW_NPROC,0x886,0x4,0x91,"Storm Peak Slow DC-"},
+ {HPHW_NPROC,0x887,0x4,0x91,"Storm Peak Slow"},
+ {HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"},
+ {HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"},
+ {HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"},
+ {HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"},
+ {HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"},
+ {HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"},
+ {HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"},
+ {HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"},
+ {HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"},
+ {HPHW_NPROC,0x897,0x4,0x91,"Storm Peak DC- Slow Mako+"},
+ {HPHW_NPROC,0x898,0x4,0x91,"Storm Peak DC- Fast Mako+"},
+ {HPHW_NPROC,0x899,0x4,0x91,"Mt. Hamilton Slow Mako+"},
+ {HPHW_NPROC,0x89B,0x4,0x91,"Crestone Peak Mako+ Slow"},
+ {HPHW_NPROC,0x89C,0x4,0x91,"Crestone Peak Mako+ Fast"},
+ {HPHW_A_DIRECT, 0x004, 0x0000D, 0x00, "Arrakis MUX"},
+ {HPHW_A_DIRECT, 0x005, 0x0000D, 0x00, "Dyun Kiuh MUX"},
+ {HPHW_A_DIRECT, 0x006, 0x0000D, 0x00, "Baat Kiuh AP/MUX (40299B)"},
+ {HPHW_A_DIRECT, 0x007, 0x0000D, 0x00, "Dino AP"},
+ {HPHW_A_DIRECT, 0x009, 0x0000D, 0x00, "Solaris Direct Connect MUX (J2092A)"},
+ {HPHW_A_DIRECT, 0x00A, 0x0000D, 0x00, "Solaris RS-422/423 MUX (J2093A)"},
+ {HPHW_A_DIRECT, 0x00B, 0x0000D, 0x00, "Solaris RS-422/423 Quadriloops MUX"},
+ {HPHW_A_DIRECT, 0x00C, 0x0000D, 0x00, "Solaris Modem MUX (J2094A)"},
+ {HPHW_A_DIRECT, 0x00D, 0x0000D, 0x00, "Twins Direct Connect MUX"},
+ {HPHW_A_DIRECT, 0x00E, 0x0000D, 0x00, "Twins Modem MUX"},
+ {HPHW_A_DIRECT, 0x00F, 0x0000D, 0x00, "Nautilus RS-485"},
+ {HPHW_A_DIRECT, 0x010, 0x0000D, 0x00, "UltraLight CAP/MUX"},
+ {HPHW_A_DIRECT, 0x015, 0x0000D, 0x00, "Eole CAP/MUX"},
+ {HPHW_A_DIRECT, 0x024, 0x0000D, 0x00, "Sahp Kiuh AP/MUX"},
+ {HPHW_A_DIRECT, 0x034, 0x0000D, 0x00, "Sahp Kiuh Low AP/MUX"},
+ {HPHW_A_DIRECT, 0x044, 0x0000D, 0x00, "Sahp Baat Kiuh AP/MUX"},
+ {HPHW_A_DIRECT, 0x004, 0x0000E, 0x80, "Burgundy RS-232"},
+ {HPHW_A_DIRECT, 0x005, 0x0000E, 0x80, "Silverfox RS-232"},
+ {HPHW_A_DIRECT, 0x006, 0x0000E, 0x80, "Lego RS-232"},
+ {HPHW_A_DIRECT, 0x004, 0x0000F, 0x00, "Peacock Graphics"},
+ {HPHW_A_DIRECT, 0x004, 0x00014, 0x80, "Burgundy HIL"},
+ {HPHW_A_DIRECT, 0x005, 0x00014, 0x80, "Peacock HIL"},
+ {HPHW_A_DIRECT, 0x004, 0x00015, 0x80, "Leonardo"},
+ {HPHW_A_DIRECT, 0x004, 0x00016, 0x80, "HP-PB HRM"},
+ {HPHW_A_DIRECT, 0x004, 0x00017, 0x80, "HP-PB HRC"},
+ {HPHW_A_DIRECT, 0x004, 0x0003A, 0x80, "Skunk Centronics (28655A)"},
+ {HPHW_A_DIRECT, 0x024, 0x0003A, 0x80, "Sahp Kiuh Centronics"},
+ {HPHW_A_DIRECT, 0x044, 0x0003A, 0x80, "Sahp Baat Kiuh Centronics"},
+ {HPHW_A_DIRECT, 0x004, 0x0004E, 0x80, "AT&T DataKit (AMSO)"},
+ {HPHW_A_DIRECT, 0x004, 0x0009B, 0x80, "Test&Meas GSC HPIB"},
+ {HPHW_A_DIRECT, 0x004, 0x000A8, 0x00, "Rocky2-120 Front Keyboard"},
+ {HPHW_A_DIRECT, 0x005, 0x000A8, 0x00, "Rocky2-150 Front Keyboard"},
+ {HPHW_A_DIRECT, 0x004, 0x00101, 0x80, "Hitachi Console Module"},
+ {HPHW_A_DIRECT, 0x004, 0x00102, 0x80, "Hitachi Boot Module"},
+ {HPHW_A_DIRECT, 0x004, 0x00203, 0x80, "MELCO HBMLA MLAIT"},
+ {HPHW_A_DIRECT, 0x004, 0x00208, 0x80, "MELCO HBDPC"},
+ {HPHW_A_DIRECT, 0x004, 0x00300, 0x00, "DCI TWINAX TERM IO MUX"},
+ {HPHW_A_DMA, 0x004, 0x00039, 0x80, "Skunk SCSI (28655A)"},
+ {HPHW_A_DMA, 0x005, 0x00039, 0x80, "KittyHawk CSY Core SCSI"},
+ {HPHW_A_DMA, 0x014, 0x00039, 0x80, "Diablo SCSI"},
+ {HPHW_A_DMA, 0x024, 0x00039, 0x80, "Sahp Kiuh SCSI"},
+ {HPHW_A_DMA, 0x034, 0x00039, 0x80, "Sahp Kiuh Low SCSI"},
+ {HPHW_A_DMA, 0x044, 0x00039, 0x80, "Sahp Baat Kiuh SCSI"},
+ {HPHW_A_DMA, 0x004, 0x0003B, 0x80, "Wizard SCSI"},
+ {HPHW_A_DMA, 0x005, 0x0003B, 0x80, "KittyHawk CSY Core FW-SCSI"},
+ {HPHW_A_DMA, 0x006, 0x0003B, 0x80, "Symbios EPIC FW-SCSI"},
+ {HPHW_A_DMA, 0x004, 0x00040, 0x80, "HP-PB Shazam HPIB (28650A)"},
+ {HPHW_A_DMA, 0x005, 0x00040, 0x80, "Burgundy HPIB"},
+ {HPHW_A_DMA, 0x004, 0x00041, 0x80, "HP-PB HP-FL"},
+ {HPHW_A_DMA, 0x004, 0x00042, 0x80, "HP-PB LoQuix HPIB (28650B)"},
+ {HPHW_A_DMA, 0x004, 0x00043, 0x80, "HP-PB Crypt LoQuix"},
+ {HPHW_A_DMA, 0x004, 0x00044, 0x80, "HP-PB Shazam GPIO (28651A)"},
+ {HPHW_A_DMA, 0x004, 0x00045, 0x80, "HP-PB LoQuix GPIO"},
+ {HPHW_A_DMA, 0x004, 0x00046, 0x80, "2-Port X.25 NIO_ACC (AMSO)"},
+ {HPHW_A_DMA, 0x004, 0x00047, 0x80, "4-Port X.25 NIO_ACC (AMSO)"},
+ {HPHW_A_DMA, 0x004, 0x0004B, 0x80, "LGB Control"},
+ {HPHW_A_DMA, 0x004, 0x0004C, 0x80, "Martian RTI (AMSO)"},
+ {HPHW_A_DMA, 0x004, 0x0004D, 0x80, "ACC Mux (AMSO)"},
+ {HPHW_A_DMA, 0x004, 0x00050, 0x80, "Lanbrusca 802.3 (36967A)"},
+ {HPHW_A_DMA, 0x004, 0x00056, 0x80, "HP-PB LoQuix FDDI"},
+ {HPHW_A_DMA, 0x004, 0x00057, 0x80, "HP-PB LoQuix FDDI (28670A)"},
+ {HPHW_A_DMA, 0x004, 0x0005E, 0x00, "Gecko Add-on Token Ring"},
+ {HPHW_A_DMA, 0x012, 0x00089, 0x80, "Barracuda Add-on FW-SCSI"},
+ {HPHW_A_DMA, 0x013, 0x00089, 0x80, "Bluefish Add-on FW-SCSI"},
+ {HPHW_A_DMA, 0x014, 0x00089, 0x80, "Shrike Add-on FW-SCSI"},
+ {HPHW_A_DMA, 0x015, 0x00089, 0x80, "KittyHawk GSY Core FW-SCSI"},
+ {HPHW_A_DMA, 0x017, 0x00089, 0x80, "Shrike Jade Add-on FW-SCSI (A3644A)"},
+ {HPHW_A_DMA, 0x01F, 0x00089, 0x80, "SkyHawk 100/120 FW-SCSI"},
+ {HPHW_A_DMA, 0x027, 0x00089, 0x80, "Piranha 100 FW-SCSI"},
+ {HPHW_A_DMA, 0x032, 0x00089, 0x80, "Raven T' Core FW-SCSI"},
+ {HPHW_A_DMA, 0x03B, 0x00089, 0x80, "Raven U/L2 Core FW-SCSI"},
+ {HPHW_A_DMA, 0x03C, 0x00089, 0x80, "Merlin 132 Core FW-SCSI"},
+ {HPHW_A_DMA, 0x03D, 0x00089, 0x80, "Merlin 160 Core FW-SCSI"},
+ {HPHW_A_DMA, 0x044, 0x00089, 0x80, "Mohawk Core FW-SCSI"},
+ {HPHW_A_DMA, 0x051, 0x00089, 0x80, "Firehawk FW-SCSI"},
+ {HPHW_A_DMA, 0x058, 0x00089, 0x80, "FireHawk 200 FW-SCSI"},
+ {HPHW_A_DMA, 0x05C, 0x00089, 0x80, "SummitHawk 230 Ultra-SCSI"},
+ {HPHW_A_DMA, 0x014, 0x00091, 0x80, "Baby Hugo Add-on Net FC (A3406A)"},
+ {HPHW_A_DMA, 0x020, 0x00091, 0x80, "Baby Jade Add-on Net FC (A3638A)"},
+ {HPHW_A_DMA, 0x004, 0x00092, 0x80, "GSC+ YLIASTER ATM"},
+ {HPHW_A_DMA, 0x004, 0x00095, 0x80, "Hamlyn GSC+ Network Card"},
+ {HPHW_A_DMA, 0x004, 0x00098, 0x80, "Lo-fat Emulator"},
+ {HPHW_A_DMA, 0x004, 0x0009A, 0x80, "GSC+ Venus ATM"},
+ {HPHW_A_DMA, 0x005, 0x0009A, 0x80, "GSC+ Samorobrive ATM"},
+ {HPHW_A_DMA, 0x004, 0x0009D, 0x80, "HP HSC-PCI Cards"},
+ {HPHW_A_DMA, 0x004, 0x0009E, 0x80, "Alaxis GSC+ 155Mb ATM"},
+ {HPHW_A_DMA, 0x005, 0x0009E, 0x80, "Alaxis GSC+ 622Mb ATM"},
+ {HPHW_A_DMA, 0x05C, 0x0009F, 0x80, "SummitHawk 230 USB"},
+ {HPHW_A_DMA, 0x05C, 0x000A0, 0x80, "SummitHawk 230 100BaseT"},
+ {HPHW_A_DMA, 0x015, 0x000A7, 0x80, "Baby Hugo Add-on mass FC (A3404A)"},
+ {HPHW_A_DMA, 0x018, 0x000A7, 0x80, "Mombasa GS Add-on mass FC (A3591)"},
+ {HPHW_A_DMA, 0x021, 0x000A7, 0x80, "Baby Jade Add-on mass FC (A3636A)"},
+ {HPHW_A_DMA, 0x004, 0x00201, 0x80, "MELCO HCMAP"},
+ {HPHW_A_DMA, 0x004, 0x00202, 0x80, "MELCO HBMLA MLAMA"},
+ {HPHW_A_DMA, 0x004, 0x00205, 0x80, "MELCO HBRFU"},
+ {HPHW_A_DMA, 0x004, 0x00380, 0x80, "Interphase NIO-FC"},
+ {HPHW_A_DMA, 0x004, 0x00381, 0x80, "Interphase NIO-ATM"},
+ {HPHW_A_DMA, 0x004, 0x00382, 0x80, "Interphase NIO-100BaseTX"},
+ {HPHW_BA, 0x004, 0x00070, 0x0, "Cobra Core BA"},
+ {HPHW_BA, 0x005, 0x00070, 0x0, "Coral Core BA"},
+ {HPHW_BA, 0x006, 0x00070, 0x0, "Bushmaster Core BA"},
+ {HPHW_BA, 0x007, 0x00070, 0x0, "Scorpio Core BA"},
+ {HPHW_BA, 0x008, 0x00070, 0x0, "Flounder Core BA"},
+ {HPHW_BA, 0x009, 0x00070, 0x0, "Outfield Core BA"},
+ {HPHW_BA, 0x00A, 0x00070, 0x0, "CoralII Core BA"},
+ {HPHW_BA, 0x00B, 0x00070, 0x0, "Scorpio Jr. Core BA"},
+ {HPHW_BA, 0x00C, 0x00070, 0x0, "Strider-50 Core BA"},
+ {HPHW_BA, 0x00D, 0x00070, 0x0, "Strider-33 Core BA"},
+ {HPHW_BA, 0x00E, 0x00070, 0x0, "Trailways-50 Core BA"},
+ {HPHW_BA, 0x00F, 0x00070, 0x0, "Trailways-33 Core BA"},
+ {HPHW_BA, 0x010, 0x00070, 0x0, "Pace Core BA"},
+ {HPHW_BA, 0x011, 0x00070, 0x0, "Sidewinder Core BA"},
+ {HPHW_BA, 0x019, 0x00070, 0x0, "Scorpio Sr. Core BA"},
+ {HPHW_BA, 0x020, 0x00070, 0x0, "Scorpio 100 Core BA"},
+ {HPHW_BA, 0x021, 0x00070, 0x0, "Spectra 50 Core BA"},
+ {HPHW_BA, 0x022, 0x00070, 0x0, "Spectra 75 Core BA"},
+ {HPHW_BA, 0x023, 0x00070, 0x0, "Spectra 100 Core BA"},
+ {HPHW_BA, 0x024, 0x00070, 0x0, "Fast Pace Core BA"},
+ {HPHW_BA, 0x026, 0x00070, 0x0, "CoralII Jaguar Core BA"},
+ {HPHW_BA, 0x004, 0x00076, 0x0, "Cobra EISA BA"},
+ {HPHW_BA, 0x005, 0x00076, 0x0, "Coral EISA BA"},
+ {HPHW_BA, 0x007, 0x00076, 0x0, "Scorpio EISA BA"},
+ {HPHW_BA, 0x00A, 0x00076, 0x0, "CoralII EISA BA"},
+ {HPHW_BA, 0x00B, 0x00076, 0x0, "Scorpio Jr. EISA BA"},
+ {HPHW_BA, 0x00C, 0x00076, 0x0, "Strider-50 Core EISA"},
+ {HPHW_BA, 0x00D, 0x00076, 0x0, "Strider-33 Core EISA"},
+ {HPHW_BA, 0x00E, 0x00076, 0x0, "Trailways-50 Core EISA"},
+ {HPHW_BA, 0x00F, 0x00076, 0x0, "Trailways-33 Core EISA"},
+ {HPHW_BA, 0x010, 0x00076, 0x0, "Pace Core EISA"},
+ {HPHW_BA, 0x019, 0x00076, 0x0, "Scorpio Sr. EISA BA"},
+ {HPHW_BA, 0x020, 0x00076, 0x0, "Scorpio 100 EISA BA"},
+ {HPHW_BA, 0x021, 0x00076, 0x0, "Spectra 50 EISA BA"},
+ {HPHW_BA, 0x022, 0x00076, 0x0, "Spectra 75 EISA BA"},
+ {HPHW_BA, 0x023, 0x00076, 0x0, "Spectra 100 EISA BA"},
+ {HPHW_BA, 0x026, 0x00076, 0x0, "CoralII Jaguar EISA BA"},
+ {HPHW_BA, 0x010, 0x00078, 0x0, "Pace VME BA"},
+ {HPHW_BA, 0x011, 0x00078, 0x0, "Sidewinder VME BA"},
+ {HPHW_BA, 0x01A, 0x00078, 0x0, "Anole 64 VME BA"},
+ {HPHW_BA, 0x01B, 0x00078, 0x0, "Anole 100 VME BA"},
+ {HPHW_BA, 0x024, 0x00078, 0x0, "Fast Pace VME BA"},
+ {HPHW_BA, 0x034, 0x00078, 0x0, "Anole T VME BA"},
+ {HPHW_BA, 0x04A, 0x00078, 0x0, "Anole L2 132 VME BA"},
+ {HPHW_BA, 0x04C, 0x00078, 0x0, "Anole L2 165 VME BA"},
+ {HPHW_BA, 0x011, 0x00081, 0x0, "WB-96 Core BA"},
+ {HPHW_BA, 0x012, 0x00081, 0x0, "Orville UX Core BA"},
+ {HPHW_BA, 0x013, 0x00081, 0x0, "Wilbur UX Core BA"},
+ {HPHW_BA, 0x014, 0x00081, 0x0, "WB-80 Core BA"},
+ {HPHW_BA, 0x015, 0x00081, 0x0, "KittyHawk GSY Core BA"},
+ {HPHW_BA, 0x016, 0x00081, 0x0, "Gecko Core BA"},
+ {HPHW_BA, 0x018, 0x00081, 0x0, "Gecko Optional BA"},
+ {HPHW_BA, 0x01A, 0x00081, 0x0, "Anole 64 Core BA"},
+ {HPHW_BA, 0x01B, 0x00081, 0x0, "Anole 100 Core BA"},
+ {HPHW_BA, 0x01C, 0x00081, 0x0, "Gecko 80 Core BA"},
+ {HPHW_BA, 0x01D, 0x00081, 0x0, "Gecko 100 Core BA"},
+ {HPHW_BA, 0x01F, 0x00081, 0x0, "SkyHawk 100/120 Core BA"},
+ {HPHW_BA, 0x027, 0x00081, 0x0, "Piranha 100 Core BA"},
+ {HPHW_BA, 0x028, 0x00081, 0x0, "Mirage Jr Core BA"},
+ {HPHW_BA, 0x029, 0x00081, 0x0, "Mirage Core BA"},
+ {HPHW_BA, 0x02A, 0x00081, 0x0, "Electra Core BA"},
+ {HPHW_BA, 0x02B, 0x00081, 0x0, "Mirage 80 Core BA"},
+ {HPHW_BA, 0x02C, 0x00081, 0x0, "Mirage 100+ Core BA"},
+ {HPHW_BA, 0x02E, 0x00081, 0x0, "UL 350 Lasi Core BA"},
+ {HPHW_BA, 0x02F, 0x00081, 0x0, "UL 550 Lasi Core BA"},
+ {HPHW_BA, 0x032, 0x00081, 0x0, "Raven T' Core BA"},
+ {HPHW_BA, 0x033, 0x00081, 0x0, "Anole T Core BA"},
+ {HPHW_BA, 0x034, 0x00081, 0x0, "SAIC L-80 Core BA"},
+ {HPHW_BA, 0x035, 0x00081, 0x0, "PCX-L2 712/132 Core BA"},
+ {HPHW_BA, 0x036, 0x00081, 0x0, "PCX-L2 712/160 Core BA"},
+ {HPHW_BA, 0x03B, 0x00081, 0x0, "Raven U/L2 Core BA"},
+ {HPHW_BA, 0x03C, 0x00081, 0x0, "Merlin 132 Core BA"},
+ {HPHW_BA, 0x03D, 0x00081, 0x0, "Merlin 160 Core BA"},
+ {HPHW_BA, 0x03E, 0x00081, 0x0, "Merlin+ 132 Core BA"},
+ {HPHW_BA, 0x03F, 0x00081, 0x0, "Merlin+ 180 Core BA"},
+ {HPHW_BA, 0x044, 0x00081, 0x0, "Mohawk Core BA"},
+ {HPHW_BA, 0x045, 0x00081, 0x0, "Rocky1 Core BA"},
+ {HPHW_BA, 0x046, 0x00081, 0x0, "Rocky2 120 Core BA"},
+ {HPHW_BA, 0x047, 0x00081, 0x0, "Rocky2 150 Core BA"},
+ {HPHW_BA, 0x04B, 0x00081, 0x0, "Anole L2 132 Core BA"},
+ {HPHW_BA, 0x04D, 0x00081, 0x0, "Anole L2 165 Core BA"},
+ {HPHW_BA, 0x04E, 0x00081, 0x0, "Kiji L2 132 Core BA"},
+ {HPHW_BA, 0x050, 0x00081, 0x0, "Merlin Jr 132 Core BA"},
+ {HPHW_BA, 0x051, 0x00081, 0x0, "Firehawk Core BA"},
+ {HPHW_BA, 0x056, 0x00081, 0x0, "Raven+ w SE FWSCSI Core BA"},
+ {HPHW_BA, 0x057, 0x00081, 0x0, "Raven+ w Diff FWSCSI Core BA"},
+ {HPHW_BA, 0x058, 0x00081, 0x0, "FireHawk 200 Core BA"},
+ {HPHW_BA, 0x05C, 0x00081, 0x0, "SummitHawk 230 Core BA"},
+ {HPHW_BA, 0x05E, 0x00081, 0x0, "Staccato 132 Core BA"},
+ {HPHW_BA, 0x05E, 0x00081, 0x0, "Staccato 180 Core BA"},
+ {HPHW_BA, 0x05F, 0x00081, 0x0, "Staccato 180 Lasi"},
+ {HPHW_BA, 0x800, 0x00081, 0x0, "Hitachi Tiny 64 Core BA"},
+ {HPHW_BA, 0x801, 0x00081, 0x0, "Hitachi Tiny 80 Core BA"},
+ {HPHW_BA, 0x004, 0x0008B, 0x0, "Anole Optional PCMCIA BA"},
+ {HPHW_BA, 0x004, 0x0008E, 0x0, "GSC ITR Wax BA"},
+ {HPHW_BA, 0x00C, 0x0008E, 0x0, "Gecko Optional Wax BA"},
+ {HPHW_BA, 0x010, 0x0008E, 0x0, "Pace Wax BA"},
+ {HPHW_BA, 0x011, 0x0008E, 0x0, "SuperPace Wax BA"},
+ {HPHW_BA, 0x012, 0x0008E, 0x0, "Mirage Jr Wax BA"},
+ {HPHW_BA, 0x013, 0x0008E, 0x0, "Mirage Wax BA"},
+ {HPHW_BA, 0x014, 0x0008E, 0x0, "Electra Wax BA"},
+ {HPHW_BA, 0x017, 0x0008E, 0x0, "Raven Backplane Wax BA"},
+ {HPHW_BA, 0x01E, 0x0008E, 0x0, "Raven T' Wax BA"},
+ {HPHW_BA, 0x01F, 0x0008E, 0x0, "SkyHawk Wax BA"},
+ {HPHW_BA, 0x023, 0x0008E, 0x0, "Rocky1 Wax BA"},
+ {HPHW_BA, 0x02B, 0x0008E, 0x0, "Mirage 80 Wax BA"},
+ {HPHW_BA, 0x02C, 0x0008E, 0x0, "Mirage 100+ Wax BA"},
+ {HPHW_BA, 0x030, 0x0008E, 0x0, "UL 350 Core Wax BA"},
+ {HPHW_BA, 0x031, 0x0008E, 0x0, "UL 550 Core Wax BA"},
+ {HPHW_BA, 0x034, 0x0008E, 0x0, "SAIC L-80 Wax BA"},
+ {HPHW_BA, 0x03A, 0x0008E, 0x0, "Merlin+ Wax BA"},
+ {HPHW_BA, 0x040, 0x0008E, 0x0, "Merlin 132 Wax BA"},
+ {HPHW_BA, 0x041, 0x0008E, 0x0, "Merlin 160 Wax BA"},
+ {HPHW_BA, 0x043, 0x0008E, 0x0, "Merlin 132/160 Wax BA"},
+ {HPHW_BA, 0x052, 0x0008E, 0x0, "Raven+ Hi Power Backplane w/EISA Wax BA"},
+ {HPHW_BA, 0x054, 0x0008E, 0x0, "Raven+ Lo Power Backplane w/EISA Wax BA"},
+ {HPHW_BA, 0x059, 0x0008E, 0x0, "FireHawk 200 Wax BA"},
+ {HPHW_BA, 0x05A, 0x0008E, 0x0, "Raven+ L2 Backplane w/EISA Wax BA"},
+ {HPHW_BA, 0x05D, 0x0008E, 0x0, "SummitHawk Wax BA"},
+ {HPHW_BA, 0x800, 0x0008E, 0x0, "Hitachi Tiny 64 Wax BA"},
+ {HPHW_BA, 0x801, 0x0008E, 0x0, "Hitachi Tiny 80 Wax BA"},
+ {HPHW_BA, 0x011, 0x00090, 0x0, "SuperPace Wax EISA BA"},
+ {HPHW_BA, 0x017, 0x00090, 0x0, "Raven Backplane Wax EISA BA"},
+ {HPHW_BA, 0x01E, 0x00090, 0x0, "Raven T' Wax EISA BA"},
+ {HPHW_BA, 0x01F, 0x00090, 0x0, "SkyHawk 100/120 Wax EISA BA"},
+ {HPHW_BA, 0x027, 0x00090, 0x0, "Piranha 100 Wax EISA BA"},
+ {HPHW_BA, 0x028, 0x00090, 0x0, "Mirage Jr Wax EISA BA"},
+ {HPHW_BA, 0x029, 0x00090, 0x0, "Mirage Wax EISA BA"},
+ {HPHW_BA, 0x02A, 0x00090, 0x0, "Electra Wax EISA BA"},
+ {HPHW_BA, 0x02B, 0x00090, 0x0, "Mirage 80 Wax EISA BA"},
+ {HPHW_BA, 0x02C, 0x00090, 0x0, "Mirage 100+ Wax EISA BA"},
+ {HPHW_BA, 0x030, 0x00090, 0x0, "UL 350 Wax EISA BA"},
+ {HPHW_BA, 0x031, 0x00090, 0x0, "UL 550 Wax EISA BA"},
+ {HPHW_BA, 0x034, 0x00090, 0x0, "SAIC L-80 Wax EISA BA"},
+ {HPHW_BA, 0x03A, 0x00090, 0x0, "Merlin+ Wax EISA BA"},
+ {HPHW_BA, 0x040, 0x00090, 0x0, "Merlin 132 Wax EISA BA"},
+ {HPHW_BA, 0x041, 0x00090, 0x0, "Merlin 160 Wax EISA BA"},
+ {HPHW_BA, 0x043, 0x00090, 0x0, "Merlin 132/160 Wax EISA BA"},
+ {HPHW_BA, 0x052, 0x00090, 0x0, "Raven Hi Power Backplane Wax EISA BA"},
+ {HPHW_BA, 0x054, 0x00090, 0x0, "Raven Lo Power Backplane Wax EISA BA"},
+ {HPHW_BA, 0x059, 0x00090, 0x0, "FireHawk 200 Wax EISA BA"},
+ {HPHW_BA, 0x05A, 0x00090, 0x0, "Raven L2 Backplane Wax EISA BA"},
+ {HPHW_BA, 0x05D, 0x00090, 0x0, "SummitHawk Wax EISA BA"},
+ {HPHW_BA, 0x800, 0x00090, 0x0, "Hitachi Tiny 64 Wax EISA BA"},
+ {HPHW_BA, 0x801, 0x00090, 0x0, "Hitachi Tiny 80 Wax EISA BA"},
+ {HPHW_BA, 0x01A, 0x00093, 0x0, "Anole 64 TIMI BA"},
+ {HPHW_BA, 0x01B, 0x00093, 0x0, "Anole 100 TIMI BA"},
+ {HPHW_BA, 0x034, 0x00093, 0x0, "Anole T TIMI BA"},
+ {HPHW_BA, 0x04A, 0x00093, 0x0, "Anole L2 132 TIMI BA"},
+ {HPHW_BA, 0x04C, 0x00093, 0x0, "Anole L2 165 TIMI BA"},
+ {HPHW_BA, 0x582, 0x000A5, 0x00, "Epic PCI Bridge"},
+ {HPHW_BCPORT, 0x504, 0x00000, 0x00, "Phantom PseudoBC GSC+ Port"},
+ {HPHW_BCPORT, 0x505, 0x00000, 0x00, "Phantom PseudoBC GSC+ Port"},
+ {HPHW_BCPORT, 0x503, 0x0000C, 0x00, "Java BC GSC+ Port"},
+ {HPHW_BCPORT, 0x57F, 0x0000C, 0x00, "Hitachi Ghostview GSC+ Port"},
+ {HPHW_BCPORT, 0x501, 0x0000C, 0x00, "U2-IOA BC GSC+ Port"},
+ {HPHW_BCPORT, 0x502, 0x0000C, 0x00, "Uturn-IOA BC GSC+ Port"},
+ {HPHW_BCPORT, 0x780, 0x0000C, 0x00, "Astro BC Ropes Port"},
+ {HPHW_BCPORT, 0x506, 0x0000C, 0x00, "NEC-IOS BC HSC Port"},
+ {HPHW_BCPORT, 0x004, 0x0000C, 0x00, "Cheetah BC SMB Port"},
+ {HPHW_BCPORT, 0x006, 0x0000C, 0x00, "Cheetah BC MID_BUS Port"},
+ {HPHW_BCPORT, 0x005, 0x0000C, 0x00, "Condor BC MID_BUS Port"},
+ {HPHW_BCPORT, 0x100, 0x0000C, 0x00, "Condor BC HP-PB Port"},
+ {HPHW_BCPORT, 0x184, 0x0000C, 0x00, "Summit BC Port"},
+ {HPHW_BCPORT, 0x101, 0x0000C, 0x00, "Summit BC HP-PB Port"},
+ {HPHW_BCPORT, 0x102, 0x0000C, 0x00, "HP-PB Port (prefetch)"},
+ {HPHW_BCPORT, 0x500, 0x0000C, 0x00, "Gecko BOA BC GSC+ Port"},
+ {HPHW_BCPORT, 0x103, 0x0000C, 0x00, "Gecko BOA BC HP-PB Port"},
+ {HPHW_BCPORT, 0x507, 0x0000C, 0x00, "Keyaki BC GSC+ Port"},
+ {HPHW_BCPORT, 0x508, 0x0000C, 0x00, "Keyaki-DX BC GSC+ Port"},
+ {HPHW_BCPORT, 0x584, 0x0000C, 0x10, "DEW BC Runway Port"},
+ {HPHW_BCPORT, 0x800, 0x0000C, 0x10, "DEW BC Merced Port"},
+ {HPHW_BCPORT, 0x801, 0x0000C, 0x10, "SMC Bus Interface Merced Bus0"},
+ {HPHW_BCPORT, 0x802, 0x0000C, 0x10, "SMC Bus INterface Merced Bus1"},
+ {HPHW_BCPORT, 0x803, 0x0000C, 0x10, "IKE I/O BC Merced Port"},
+ {HPHW_BCPORT, 0x781, 0x0000C, 0x00, "IKE I/O BC Ropes Port"},
+ {HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O BC Merced Port"},
+ {HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O BC Ropes Port"},
+ {HPHW_BCPORT, 0x784, 0x0000C, 0x00, "Pluto I/O BC Ropes Port"},
+ {HPHW_BRIDGE, 0x05D, 0x0000A, 0x00, "SummitHawk Dino PCI Bridge"},
+ {HPHW_BRIDGE, 0x680, 0x0000A, 0x00, "Dino PCI Bridge"},
+ {HPHW_BRIDGE, 0x682, 0x0000A, 0x00, "Cujo PCI Bridge"},
+ {HPHW_BRIDGE, 0x782, 0x0000A, 0x00, "Elroy PCI Bridge"},
+ {HPHW_BRIDGE, 0x583, 0x000A5, 0x00, "Saga PCI Bridge"},
+ {HPHW_BRIDGE, 0x783, 0x0000A, 0x00, "Mercury PCI Bridge"},
+ {HPHW_BRIDGE, 0x784, 0x0000A, 0x00, "Quicksilver AGP Bridge"},
+ {HPHW_B_DMA, 0x004, 0x00018, 0x00, "Parallel I/O"},
+ {HPHW_B_DMA, 0x004, 0x00019, 0x00, "Parallel RDB"},
+ {HPHW_B_DMA, 0x004, 0x00020, 0x80, "MID_BUS PSI"},
+ {HPHW_B_DMA, 0x004, 0x0002F, 0x80, "HP-PB Transit PSI (36960A)"},
+ {HPHW_B_DMA, 0x008, 0x00051, 0x80, "HP-PB Transit 802.3"},
+ {HPHW_B_DMA, 0x004, 0x00052, 0x80, "Miura LAN/Console (J2146A)"},
+ {HPHW_B_DMA, 0x008, 0x00058, 0x80, "HP-PB Transit 802.4"},
+ {HPHW_B_DMA, 0x005, 0x00060, 0x80, "KittyHawk CSY Core LAN/Console"},
+ {HPHW_B_DMA, 0x014, 0x00060, 0x80, "Diablo LAN/Console"},
+ {HPHW_B_DMA, 0x054, 0x00060, 0x80, "Countach LAN/Console"},
+ {HPHW_B_DMA, 0x004, 0x00094, 0x80, "KittyHawk GSC+ Exerciser"},
+ {HPHW_B_DMA, 0x004, 0x00100, 0x80, "HP-PB HF Interface"},
+ {HPHW_B_DMA, 0x000, 0x00206, 0x80, "MELCO HMPHA"},
+ {HPHW_B_DMA, 0x005, 0x00206, 0x80, "MELCO HMPHA_10"},
+ {HPHW_B_DMA, 0x006, 0x00206, 0x80, "MELCO HMQHA"},
+ {HPHW_B_DMA, 0x007, 0x00206, 0x80, "MELCO HMQHA_10"},
+ {HPHW_B_DMA, 0x004, 0x207, 0x80, "MELCO HNDWA MDWS-70"},
+ {HPHW_CIO, 0x004, 0x00010, 0x00, "VLSI CIO"},
+ {HPHW_CIO, 0x005, 0x00010, 0x00, "Silverfox CIO"},
+ {HPHW_CIO, 0x006, 0x00010, 0x00, "Emerald CIO"},
+ {HPHW_CIO, 0x008, 0x00010, 0x00, "Discrete CIO"},
+ {HPHW_CONSOLE, 0x004, 0x0001C, 0x00, "Cheetah console"},
+ {HPHW_CONSOLE, 0x005, 0x0001C, 0x00, "Emerald console"},
+ {HPHW_CONSOLE, 0x01A, 0x0001F, 0x00, "Jason/Anole 64 Null Console"},
+ {HPHW_CONSOLE, 0x01B, 0x0001F, 0x00, "Jason/Anole 100 Null Console"},
+ {HPHW_FABRIC, 0x004, 0x000AA, 0x80, "Halfdome DNA Central Agent"},
+ {HPHW_FABRIC, 0x005, 0x000AA, 0x80, "Keystone DNA Central Agent"},
+ {HPHW_FABRIC, 0x007, 0x000AA, 0x80, "Caribe DNA Central Agent"},
+ {HPHW_FABRIC, 0x004, 0x000AB, 0x00, "Halfdome TOGO Fabric Crossbar"},
+ {HPHW_FABRIC, 0x005, 0x000AB, 0x00, "Keystone TOGO Fabric Crossbar"},
+ {HPHW_FABRIC, 0x004, 0x000AC, 0x00, "Halfdome Sakura Fabric Router"},
+ {HPHW_FIO, 0x025, 0x0002E, 0x80, "Armyknife Optional X.25"},
+ {HPHW_FIO, 0x004, 0x0004F, 0x0, "8-Port X.25 EISA-ACC (AMSO)"},
+ {HPHW_FIO, 0x004, 0x00071, 0x0, "Cobra Core SCSI"},
+ {HPHW_FIO, 0x005, 0x00071, 0x0, "Coral Core SCSI"},
+ {HPHW_FIO, 0x006, 0x00071, 0x0, "Bushmaster Core SCSI"},
+ {HPHW_FIO, 0x007, 0x00071, 0x0, "Scorpio Core SCSI"},
+ {HPHW_FIO, 0x008, 0x00071, 0x0, "Flounder Core SCSI"},
+ {HPHW_FIO, 0x009, 0x00071, 0x0, "Outfield Core SCSI"},
+ {HPHW_FIO, 0x00A, 0x00071, 0x0, "CoralII Core SCSI"},
+ {HPHW_FIO, 0x00B, 0x00071, 0x0, "Scorpio Jr. Core SCSI"},
+ {HPHW_FIO, 0x00C, 0x00071, 0x0, "Strider-50 Core SCSI"},
+ {HPHW_FIO, 0x00D, 0x00071, 0x0, "Strider-33 Core SCSI"},
+ {HPHW_FIO, 0x00E, 0x00071, 0x0, "Trailways-50 Core SCSI"},
+ {HPHW_FIO, 0x00F, 0x00071, 0x0, "Trailways-33 Core SCSI"},
+ {HPHW_FIO, 0x010, 0x00071, 0x0, "Pace Core SCSI"},
+ {HPHW_FIO, 0x011, 0x00071, 0x0, "Sidewinder Core SCSI"},
+ {HPHW_FIO, 0x019, 0x00071, 0x0, "Scorpio Sr. Core SCSI"},
+ {HPHW_FIO, 0x020, 0x00071, 0x0, "Scorpio 100 Core SCSI"},
+ {HPHW_FIO, 0x021, 0x00071, 0x0, "Spectra 50 Core SCSI"},
+ {HPHW_FIO, 0x022, 0x00071, 0x0, "Spectra 75 Core SCSI"},
+ {HPHW_FIO, 0x023, 0x00071, 0x0, "Spectra 100 Core SCSI"},
+ {HPHW_FIO, 0x024, 0x00071, 0x0, "Fast Pace Core SCSI"},
+ {HPHW_FIO, 0x026, 0x00071, 0x0, "CoralII Jaguar Core SCSI"},
+ {HPHW_FIO, 0x004, 0x00072, 0x0, "Cobra Core LAN (802.3)"},
+ {HPHW_FIO, 0x005, 0x00072, 0x0, "Coral Core LAN (802.3)"},
+ {HPHW_FIO, 0x006, 0x00072, 0x0, "Bushmaster Core LAN (802.3)"},
+ {HPHW_FIO, 0x007, 0x00072, 0x0, "Scorpio Core LAN (802.3)"},
+ {HPHW_FIO, 0x008, 0x00072, 0x0, "Flounder Core LAN (802.3)"},
+ {HPHW_FIO, 0x009, 0x00072, 0x0, "Outfield Core LAN (802.3)"},
+ {HPHW_FIO, 0x00A, 0x00072, 0x0, "CoralII Core LAN (802.3)"},
+ {HPHW_FIO, 0x00B, 0x00072, 0x0, "Scorpio Jr. Core LAN (802.3)"},
+ {HPHW_FIO, 0x00C, 0x00072, 0x0, "Strider-50 Core LAN (802.3)"},
+ {HPHW_FIO, 0x00D, 0x00072, 0x0, "Strider-33 Core LAN (802.3)"},
+ {HPHW_FIO, 0x00E, 0x00072, 0x0, "Trailways-50 Core LAN (802.3)"},
+ {HPHW_FIO, 0x00F, 0x00072, 0x0, "Trailways-33 Core LAN (802.3)"},
+ {HPHW_FIO, 0x010, 0x00072, 0x0, "Pace Core LAN (802.3)"},
+ {HPHW_FIO, 0x011, 0x00072, 0x0, "Sidewinder Core LAN (802.3)"},
+ {HPHW_FIO, 0x019, 0x00072, 0x0, "Scorpio Sr. Core LAN (802.3)"},
+ {HPHW_FIO, 0x020, 0x00072, 0x0, "Scorpio 100 Core LAN (802.3)"},
+ {HPHW_FIO, 0x021, 0x00072, 0x0, "Spectra 50 Core LAN (802.3)"},
+ {HPHW_FIO, 0x022, 0x00072, 0x0, "Spectra 75 Core LAN (802.3)"},
+ {HPHW_FIO, 0x023, 0x00072, 0x0, "Spectra 100 Core LAN (802.3)"},
+ {HPHW_FIO, 0x024, 0x00072, 0x0, "Fast Pace Core LAN (802.3)"},
+ {HPHW_FIO, 0x026, 0x00072, 0x0, "CoralII Jaguar Core LAN (802.3)"},
+ {HPHW_FIO, 0x004, 0x00073, 0x0, "Cobra Core HIL"},
+ {HPHW_FIO, 0x005, 0x00073, 0x0, "Coral Core HIL"},
+ {HPHW_FIO, 0x006, 0x00073, 0x0, "Bushmaster Core HIL"},
+ {HPHW_FIO, 0x007, 0x00073, 0x0, "Scorpio Core HIL"},
+ {HPHW_FIO, 0x008, 0x00073, 0x0, "Flounder Core HIL"},
+ {HPHW_FIO, 0x009, 0x00073, 0x0, "Outfield Core HIL"},
+ {HPHW_FIO, 0x00A, 0x00073, 0x0, "CoralII Core HIL"},
+ {HPHW_FIO, 0x00B, 0x00073, 0x0, "Scorpio Jr. Core HIL"},
+ {HPHW_FIO, 0x00C, 0x00073, 0x0, "Strider-50 Core HIL"},
+ {HPHW_FIO, 0x00D, 0x00073, 0x0, "Strider-33 Core HIL"},
+ {HPHW_FIO, 0x00E, 0x00073, 0x0, "Trailways-50 Core HIL"},
+ {HPHW_FIO, 0x00F, 0x00073, 0x0, "Trailways-33 Core HIL"},
+ {HPHW_FIO, 0x010, 0x00073, 0x0, "Pace Core HIL"},
+ {HPHW_FIO, 0x011, 0x00073, 0xcc, "SuperPace Wax HIL"},
+ {HPHW_FIO, 0x012, 0x00073, 0x0, "Mirage Jr Wax HIL"},
+ {HPHW_FIO, 0x013, 0x00073, 0x0, "Mirage 100 Wax HIL"},
+ {HPHW_FIO, 0x014, 0x00073, 0x0, "Electra Wax HIL"},
+ {HPHW_FIO, 0x017, 0x00073, 0x0, "Raven Backplane Wax HIL"},
+ {HPHW_FIO, 0x019, 0x00073, 0x0, "Scorpio Sr. Core HIL"},
+ {HPHW_FIO, 0x01E, 0x00073, 0x0, "Raven T' Wax HIL"},
+ {HPHW_FIO, 0x01F, 0x00073, 0x0, "SkyHawk 100/120 Wax HIL"},
+ {HPHW_FIO, 0x020, 0x00073, 0x0, "Scorpio 100 Core HIL"},
+ {HPHW_FIO, 0x021, 0x00073, 0x0, "Spectra 50 Core HIL"},
+ {HPHW_FIO, 0x022, 0x00073, 0x0, "Spectra 75 Core HIL"},
+ {HPHW_FIO, 0x023, 0x00073, 0x0, "Spectra 100 Core HIL"},
+ {HPHW_FIO, 0x024, 0x00073, 0x0, "Fast Pace Core HIL"},
+ {HPHW_FIO, 0x026, 0x00073, 0x0, "CoralII Jaguar Core HIL"},
+ {HPHW_FIO, 0x02B, 0x00073, 0x0, "Mirage 80 Wax HIL"},
+ {HPHW_FIO, 0x02C, 0x00073, 0x0, "Mirage 100+ Wax HIL"},
+ {HPHW_FIO, 0x03A, 0x00073, 0x0, "Merlin+ Wax HIL"},
+ {HPHW_FIO, 0x040, 0x00073, 0x0, "Merlin 132 Wax HIL"},
+ {HPHW_FIO, 0x041, 0x00073, 0x0, "Merlin 160 Wax HIL"},
+ {HPHW_FIO, 0x043, 0x00073, 0x0, "Merlin 132/160 Wax HIL"},
+ {HPHW_FIO, 0x052, 0x00073, 0x0, "Raven+ Hi Power Backplane w/EISA Wax HIL"},
+ {HPHW_FIO, 0x053, 0x00073, 0x0, "Raven+ Hi Power Backplane wo/EISA Wax HIL"},
+ {HPHW_FIO, 0x054, 0x00073, 0x0, "Raven+ Lo Power Backplane w/EISA Wax HIL"},
+ {HPHW_FIO, 0x055, 0x00073, 0x0, "Raven+ Lo Power Backplane wo/EISA Wax HIL"},
+ {HPHW_FIO, 0x059, 0x00073, 0x0, "FireHawk 200 Wax HIL"},
+ {HPHW_FIO, 0x05A, 0x00073, 0x0, "Raven+ L2 Backplane w/EISA Wax HIL"},
+ {HPHW_FIO, 0x05B, 0x00073, 0x0, "Raven+ L2 Backplane wo/EISA Wax HIL"},
+ {HPHW_FIO, 0x05D, 0x00073, 0x0, "SummitHawk Wax HIL"},
+ {HPHW_FIO, 0x800, 0x00073, 0x0, "Hitachi Tiny 64 Wax HIL"},
+ {HPHW_FIO, 0x801, 0x00073, 0x0, "Hitachi Tiny 80 Wax HIL"},
+ {HPHW_FIO, 0x004, 0x00074, 0x0, "Cobra Core Centronics"},
+ {HPHW_FIO, 0x005, 0x00074, 0x0, "Coral Core Centronics"},
+ {HPHW_FIO, 0x006, 0x00074, 0x0, "Bushmaster Core Centronics"},
+ {HPHW_FIO, 0x007, 0x00074, 0x0, "Scorpio Core Centronics"},
+ {HPHW_FIO, 0x008, 0x00074, 0x0, "Flounder Core Centronics"},
+ {HPHW_FIO, 0x009, 0x00074, 0x0, "Outfield Core Centronics"},
+ {HPHW_FIO, 0x00A, 0x00074, 0x0, "CoralII Core Centronics"},
+ {HPHW_FIO, 0x00B, 0x00074, 0x0, "Scorpio Jr. Core Centronics"},
+ {HPHW_FIO, 0x00C, 0x00074, 0x0, "Strider-50 Core Centronics"},
+ {HPHW_FIO, 0x00D, 0x00074, 0x0, "Strider-33 Core Centronics"},
+ {HPHW_FIO, 0x00E, 0x00074, 0x0, "Trailways-50 Core Centronics"},
+ {HPHW_FIO, 0x00F, 0x00074, 0x0, "Trailways-33 Core Centronics"},
+ {HPHW_FIO, 0x010, 0x00074, 0x0, "Pace Core Centronics"},
+ {HPHW_FIO, 0x011, 0x00074, 0x0, "Sidewinder Core Centronics"},
+ {HPHW_FIO, 0x015, 0x00074, 0x0, "KittyHawk GSY Core Centronics"},
+ {HPHW_FIO, 0x016, 0x00074, 0x0, "Gecko Core Centronics"},
+ {HPHW_FIO, 0x019, 0x00074, 0x0, "Scorpio Sr. Core Centronics"},
+ {HPHW_FIO, 0x01A, 0x00074, 0x0, "Anole 64 Core Centronics"},
+ {HPHW_FIO, 0x01B, 0x00074, 0x0, "Anole 100 Core Centronics"},
+ {HPHW_FIO, 0x01C, 0x00074, 0x0, "Gecko 80 Core Centronics"},
+ {HPHW_FIO, 0x01D, 0x00074, 0x0, "Gecko 100 Core Centronics"},
+ {HPHW_FIO, 0x01F, 0x00074, 0x0, "SkyHawk 100/120 Core Centronics"},
+ {HPHW_FIO, 0x020, 0x00074, 0x0, "Scorpio 100 Core Centronics"},
+ {HPHW_FIO, 0x021, 0x00074, 0x0, "Spectra 50 Core Centronics"},
+ {HPHW_FIO, 0x022, 0x00074, 0x0, "Spectra 75 Core Centronics"},
+ {HPHW_FIO, 0x023, 0x00074, 0x0, "Spectra 100 Core Centronics"},
+ {HPHW_FIO, 0x024, 0x00074, 0x0, "Fast Pace Core Centronics"},
+ {HPHW_FIO, 0x026, 0x00074, 0x0, "CoralII Jaguar Core Centronics"},
+ {HPHW_FIO, 0x027, 0x00074, 0x0, "Piranha 100 Core Centronics"},
+ {HPHW_FIO, 0x028, 0x00074, 0x0, "Mirage Jr Core Centronics"},
+ {HPHW_FIO, 0x029, 0x00074, 0x0, "Mirage Core Centronics"},
+ {HPHW_FIO, 0x02A, 0x00074, 0x0, "Electra Core Centronics"},
+ {HPHW_FIO, 0x02B, 0x00074, 0x0, "Mirage 80 Core Centronics"},
+ {HPHW_FIO, 0x02C, 0x00074, 0x0, "Mirage 100+ Core Centronics"},
+ {HPHW_FIO, 0x02E, 0x00074, 0x0, "UL 350 Core Centronics"},
+ {HPHW_FIO, 0x02F, 0x00074, 0x0, "UL 550 Core Centronics"},
+ {HPHW_FIO, 0x032, 0x00074, 0x0, "Raven T' Core Centronics"},
+ {HPHW_FIO, 0x033, 0x00074, 0x0, "Anole T Core Centronics"},
+ {HPHW_FIO, 0x034, 0x00074, 0x0, "SAIC L-80 Core Centronics"},
+ {HPHW_FIO, 0x035, 0x00074, 0x0, "PCX-L2 712/132 Core Centronics"},
+ {HPHW_FIO, 0x036, 0x00074, 0x0, "PCX-L2 712/160 Core Centronics"},
+ {HPHW_FIO, 0x03B, 0x00074, 0x0, "Raven U/L2 Core Centronics"},
+ {HPHW_FIO, 0x03C, 0x00074, 0x0, "Merlin 132 Core Centronics"},
+ {HPHW_FIO, 0x03D, 0x00074, 0x0, "Merlin 160 Core Centronics"},
+ {HPHW_FIO, 0x03E, 0x00074, 0x0, "Merlin+ 132 Core Centronics"},
+ {HPHW_FIO, 0x03F, 0x00074, 0x0, "Merlin+ 180 Core Centronics"},
+ {HPHW_FIO, 0x044, 0x00074, 0x0, "Mohawk Core Centronics"},
+ {HPHW_FIO, 0x045, 0x00074, 0x0, "Rocky1 Core Centronics"},
+ {HPHW_FIO, 0x046, 0x00074, 0x0, "Rocky2 120 Core Centronics"},
+ {HPHW_FIO, 0x047, 0x00074, 0x0, "Rocky2 150 Core Centronics"},
+ {HPHW_FIO, 0x04B, 0x00074, 0x0, "Anole L2 132 Core Centronics"},
+ {HPHW_FIO, 0x04D, 0x00074, 0x0, "Anole L2 165 Core Centronics"},
+ {HPHW_FIO, 0x050, 0x00074, 0x0, "Merlin Jr 132 Core Centronics"},
+ {HPHW_FIO, 0x051, 0x00074, 0x0, "Firehawk Core Centronics"},
+ {HPHW_FIO, 0x056, 0x00074, 0x0, "Raven+ w SE FWSCSI Core Centronics"},
+ {HPHW_FIO, 0x057, 0x00074, 0x0, "Raven+ w Diff FWSCSI Core Centronics"},
+ {HPHW_FIO, 0x058, 0x00074, 0x0, "FireHawk 200 Core Centronics"},
+ {HPHW_FIO, 0x05C, 0x00074, 0x0, "SummitHawk 230 Core Centronics"},
+ {HPHW_FIO, 0x800, 0x00074, 0x0, "Hitachi Tiny 64 Core Centronics"},
+ {HPHW_FIO, 0x801, 0x00074, 0x0, "Hitachi Tiny 80 Core Centronics"},
+ {HPHW_FIO, 0x004, 0x00075, 0x0, "Cobra Core RS-232"},
+ {HPHW_FIO, 0x005, 0x00075, 0x0, "Coral Core RS-232"},
+ {HPHW_FIO, 0x006, 0x00075, 0x0, "Bushmaster Core RS-232"},
+ {HPHW_FIO, 0x007, 0x00075, 0x0, "Scorpio Core RS-232"},
+ {HPHW_FIO, 0x008, 0x00075, 0x0, "Flounder Core RS-232"},
+ {HPHW_FIO, 0x009, 0x00075, 0x0, "Outfield Core RS-232"},
+ {HPHW_FIO, 0x00A, 0x00075, 0x0, "CoralII Core RS-232"},
+ {HPHW_FIO, 0x00B, 0x00075, 0x0, "Scorpio Jr. Core RS-232"},
+ {HPHW_FIO, 0x00C, 0x00075, 0x0, "Strider-50 Core RS-232"},
+ {HPHW_FIO, 0x00D, 0x00075, 0x0, "Strider-33 Core RS-232"},
+ {HPHW_FIO, 0x00E, 0x00075, 0x0, "Trailways-50 Core RS-232"},
+ {HPHW_FIO, 0x00F, 0x00075, 0x0, "Trailways-33 Core RS-232"},
+ {HPHW_FIO, 0x010, 0x00075, 0x0, "Pace Core RS-232"},
+ {HPHW_FIO, 0x011, 0x00075, 0x0, "Sidewinder Core RS-232"},
+ {HPHW_FIO, 0x019, 0x00075, 0x0, "Scorpio Sr. Core RS-232"},
+ {HPHW_FIO, 0x020, 0x00075, 0x0, "Scorpio 100 Core RS-232"},
+ {HPHW_FIO, 0x021, 0x00075, 0x0, "Spectra 50 Core RS-232"},
+ {HPHW_FIO, 0x022, 0x00075, 0x0, "Spectra 75 Core RS-232"},
+ {HPHW_FIO, 0x023, 0x00075, 0x0, "Spectra 100 Core RS-232"},
+ {HPHW_FIO, 0x024, 0x00075, 0x0, "Fast Pace Core RS-232"},
+ {HPHW_FIO, 0x026, 0x00075, 0x0, "CoralII Jaguar Core RS-232"},
+ {HPHW_FIO, 0x004, 0x00077, 0x0, "Coral SGC Graphics"},
+ {HPHW_FIO, 0x005, 0x00077, 0x0, "Hyperdrive Optional Graphics"},
+ {HPHW_FIO, 0x006, 0x00077, 0x0, "Stinger Optional Graphics"},
+ {HPHW_FIO, 0x007, 0x00077, 0x0, "Scorpio Builtin Graphics"},
+ {HPHW_FIO, 0x008, 0x00077, 0x0, "Anole Hyperdrive Optional Graphics"},
+ {HPHW_FIO, 0x009, 0x00077, 0x0, "Thunder II graphics EISA form"},
+ {HPHW_FIO, 0x00A, 0x00077, 0x0, "Thunder II graphics GSA form"},
+ {HPHW_FIO, 0x00B, 0x00077, 0x0, "Scorpio Jr Builtin Graphics"},
+ {HPHW_FIO, 0x00C, 0x00077, 0x0, "Strider-50 SSC Graphics"},
+ {HPHW_FIO, 0x00D, 0x00077, 0x0, "Strider-33 SSC Graphics"},
+ {HPHW_FIO, 0x00E, 0x00077, 0x0, "Trailways-50 SSC Graphics"},
+ {HPHW_FIO, 0x00F, 0x00077, 0x0, "Trailways-33 SSC Graphics"},
+ {HPHW_FIO, 0x010, 0x00077, 0x0, "Pace SGC Graphics"},
+ {HPHW_FIO, 0x011, 0x00077, 0x0, "Mohawk Opt. 2D Graphics (Kid)"},
+ {HPHW_FIO, 0x012, 0x00077, 0x0, "Raven Opt. 2D Graphics (Goat)"},
+ {HPHW_FIO, 0x016, 0x00077, 0x0, "Lego 24 SCG Graphics"},
+ {HPHW_FIO, 0x017, 0x00077, 0x0, "Lego 24Z SCG Graphics"},
+ {HPHW_FIO, 0x018, 0x00077, 0x0, "Lego 48Z SCG Graphics"},
+ {HPHW_FIO, 0x019, 0x00077, 0x0, "Scorpio Sr Builtin Graphics"},
+ {HPHW_FIO, 0x020, 0x00077, 0x0, "Scorpio 100 Builtin Graphics"},
+ {HPHW_FIO, 0x021, 0x00077, 0x0, "Spectra 50 Builtin Graphics"},
+ {HPHW_FIO, 0x022, 0x00077, 0x0, "Spectra 75 Builtin Graphics"},
+ {HPHW_FIO, 0x023, 0x00077, 0x0, "Spectra 100 Builtin Graphics"},
+ {HPHW_FIO, 0x024, 0x00077, 0x0, "Fast Pace SGC Graphics"},
+ {HPHW_FIO, 0x006, 0x0007A, 0x0, "Bushmaster Audio"},
+ {HPHW_FIO, 0x008, 0x0007A, 0x0, "Flounder Audio"},
+ {HPHW_FIO, 0x004, 0x0007B, 0x0, "UL Optional Audio"},
+ {HPHW_FIO, 0x007, 0x0007B, 0x0, "Scorpio Audio"},
+ {HPHW_FIO, 0x00B, 0x0007B, 0x0, "Scorpio Jr. Audio"},
+ {HPHW_FIO, 0x00C, 0x0007B, 0x0, "Strider-50 Audio"},
+ {HPHW_FIO, 0x00D, 0x0007B, 0x0, "Strider-33 Audio"},
+ {HPHW_FIO, 0x00E, 0x0007B, 0x0, "Trailways-50 Audio"},
+ {HPHW_FIO, 0x00F, 0x0007B, 0x0, "Trailways-33 Audio"},
+ {HPHW_FIO, 0x015, 0x0007B, 0x0, "KittyHawk GSY Core Audio"},
+ {HPHW_FIO, 0x016, 0x0007B, 0x0, "Gecko Audio"},
+ {HPHW_FIO, 0x019, 0x0007B, 0x0, "Scorpio Sr. Audio"},
+ {HPHW_FIO, 0x01A, 0x0007B, 0x0, "Anole 64 Audio"},
+ {HPHW_FIO, 0x01B, 0x0007B, 0x0, "Anole 100 Audio"},
+ {HPHW_FIO, 0x01C, 0x0007B, 0x0, "Gecko 80 Audio"},
+ {HPHW_FIO, 0x01D, 0x0007B, 0x0, "Gecko 100 Audio"},
+ {HPHW_FIO, 0x01F, 0x0007B, 0x0, "SkyHawk 100/120 Audio"},
+ {HPHW_FIO, 0x020, 0x0007B, 0x0, "Scorpio 100 Audio"},
+ {HPHW_FIO, 0x021, 0x0007B, 0x0, "Spectra 50 Audio"},
+ {HPHW_FIO, 0x022, 0x0007B, 0x0, "Spectra 75 Audio"},
+ {HPHW_FIO, 0x023, 0x0007B, 0x0, "Spectra 100 Audio"},
+ {HPHW_FIO, 0x028, 0x0007B, 0x0, "Mirage Jr Audio"},
+ {HPHW_FIO, 0x029, 0x0007B, 0x0, "Mirage Audio"},
+ {HPHW_FIO, 0x02A, 0x0007B, 0x0, "Electra Audio"},
+ {HPHW_FIO, 0x02B, 0x0007B, 0x0, "Mirage 80 Audio"},
+ {HPHW_FIO, 0x02C, 0x0007B, 0x0, "Mirage 100+ Audio"},
+ {HPHW_FIO, 0x032, 0x0007B, 0x0, "Raven T' Audio"},
+ {HPHW_FIO, 0x034, 0x0007B, 0x0, "SAIC L-80 Audio"},
+ {HPHW_FIO, 0x035, 0x0007B, 0x0, "PCX-L2 712/132 Core Audio"},
+ {HPHW_FIO, 0x036, 0x0007B, 0x0, "PCX-L2 712/160 Core Audio"},
+ {HPHW_FIO, 0x03B, 0x0007B, 0x0, "Raven U/L2 Core Audio"},
+ {HPHW_FIO, 0x03C, 0x0007B, 0x0, "Merlin 132 Core Audio"},
+ {HPHW_FIO, 0x03D, 0x0007B, 0x0, "Merlin 160 Core Audio"},
+ {HPHW_FIO, 0x03E, 0x0007B, 0x0, "Merlin+ 132 Core Audio"},
+ {HPHW_FIO, 0x03F, 0x0007B, 0x0, "Merlin+ 180 Core Audio"},
+ {HPHW_FIO, 0x044, 0x0007B, 0x0, "Mohawk Core Audio"},
+ {HPHW_FIO, 0x046, 0x0007B, 0x0, "Rocky2 120 Core Audio"},
+ {HPHW_FIO, 0x047, 0x0007B, 0x0, "Rocky2 150 Core Audio"},
+ {HPHW_FIO, 0x04B, 0x0007B, 0x0, "Anole L2 132 Core Audio"},
+ {HPHW_FIO, 0x04D, 0x0007B, 0x0, "Anole L2 165 Core Audio"},
+ {HPHW_FIO, 0x04E, 0x0007B, 0x0, "Kiji L2 132 Core Audio"},
+ {HPHW_FIO, 0x050, 0x0007B, 0x0, "Merlin Jr 132 Core Audio"},
+ {HPHW_FIO, 0x051, 0x0007B, 0x0, "Firehawk Audio"},
+ {HPHW_FIO, 0x056, 0x0007B, 0x0, "Raven+ w SE FWSCSI Core Audio"},
+ {HPHW_FIO, 0x057, 0x0007B, 0x0, "Raven+ w Diff FWSCSI Core Audio"},
+ {HPHW_FIO, 0x058, 0x0007B, 0x0, "FireHawk 200 Audio"},
+ {HPHW_FIO, 0x05C, 0x0007B, 0x0, "SummitHawk 230 Core Audio"},
+ {HPHW_FIO, 0x800, 0x0007B, 0x0, "Hitachi Tiny 64 Audio"},
+ {HPHW_FIO, 0x801, 0x0007B, 0x0, "Hitachi Tiny 80 Audio"},
+ {HPHW_FIO, 0x009, 0x0007C, 0x0, "Outfield FW SCSI"},
+ {HPHW_FIO, 0x00A, 0x0007C, 0x0, "CoralII FW SCSI"},
+ {HPHW_FIO, 0x026, 0x0007C, 0x0, "CoralII Jaguar FW SCSI"},
+ {HPHW_FIO, 0x009, 0x0007D, 0x0, "Outfield FDDI"},
+ {HPHW_FIO, 0x00A, 0x0007D, 0x0, "CoralII FDDI"},
+ {HPHW_FIO, 0x026, 0x0007D, 0x0, "CoralII Jaguar FDDI"},
+ {HPHW_FIO, 0x010, 0x0007E, 0x0, "Pace Audio"},
+ {HPHW_FIO, 0x024, 0x0007E, 0x0, "Fast Pace Audio"},
+ {HPHW_FIO, 0x009, 0x0007F, 0x0, "Outfield Audio"},
+ {HPHW_FIO, 0x00A, 0x0007F, 0x0, "CoralII Audio"},
+ {HPHW_FIO, 0x026, 0x0007F, 0x0, "CoralII Jaguar Audio"},
+ {HPHW_FIO, 0x010, 0x00080, 0x0, "Pace Core HPIB"},
+ {HPHW_FIO, 0x024, 0x00080, 0x0, "Fast Pace Core HPIB"},
+ {HPHW_FIO, 0x015, 0x00082, 0x0, "KittyHawk GSY Core SCSI"},
+ {HPHW_FIO, 0x016, 0x00082, 0x0, "Gecko Core SCSI"},
+ {HPHW_FIO, 0x01A, 0x00082, 0x0, "Anole 64 Core SCSI"},
+ {HPHW_FIO, 0x01B, 0x00082, 0x0, "Anole 100 Core SCSI"},
+ {HPHW_FIO, 0x01C, 0x00082, 0x0, "Gecko 80 Core SCSI"},
+ {HPHW_FIO, 0x01D, 0x00082, 0x0, "Gecko 100 Core SCSI"},
+ {HPHW_FIO, 0x01F, 0x00082, 0x0, "SkyHawk 100/120 Core SCSI"},
+ {HPHW_FIO, 0x027, 0x00082, 0x0, "Piranha 100 Core SCSI"},
+ {HPHW_FIO, 0x028, 0x00082, 0x0, "Mirage Jr Core SCSI"},
+ {HPHW_FIO, 0x029, 0x00082, 0x0, "Mirage Core SCSI"},
+ {HPHW_FIO, 0x02A, 0x00082, 0x0, "Electra Core SCSI"},
+ {HPHW_FIO, 0x02B, 0x00082, 0x0, "Mirage 80 Core SCSI"},
+ {HPHW_FIO, 0x02C, 0x00082, 0x0, "Mirage 100+ Core SCSI"},
+ {HPHW_FIO, 0x02E, 0x00082, 0x0, "UL 350 Core SCSI"},
+ {HPHW_FIO, 0x02F, 0x00082, 0x0, "UL 550 Core SCSI"},
+ {HPHW_FIO, 0x032, 0x00082, 0x0, "Raven T' Core SCSI"},
+ {HPHW_FIO, 0x033, 0x00082, 0x0, "Anole T Core SCSI"},
+ {HPHW_FIO, 0x034, 0x00082, 0x0, "SAIC L-80 Core SCSI"},
+ {HPHW_FIO, 0x035, 0x00082, 0x0, "PCX-L2 712/132 Core SCSI"},
+ {HPHW_FIO, 0x036, 0x00082, 0x0, "PCX-L2 712/160 Core SCSI"},
+ {HPHW_FIO, 0x03B, 0x00082, 0x0, "Raven U/L2 Core SCSI"},
+ {HPHW_FIO, 0x03C, 0x00082, 0x0, "Merlin 132 Core SCSI"},
+ {HPHW_FIO, 0x03D, 0x00082, 0x0, "Merlin 160 Core SCSI"},
+ {HPHW_FIO, 0x03E, 0x00082, 0x0, "Merlin+ 132 Core SCSI"},
+ {HPHW_FIO, 0x03F, 0x00082, 0x0, "Merlin+ 180 Core SCSI"},
+ {HPHW_FIO, 0x044, 0x00082, 0x0, "Mohawk Core SCSI"},
+ {HPHW_FIO, 0x045, 0x00082, 0x0, "Rocky1 Core SCSI"},
+ {HPHW_FIO, 0x046, 0x00082, 0x0, "Rocky2 120 Core SCSI"},
+ {HPHW_FIO, 0x047, 0x00082, 0x0, "Rocky2 150 Core SCSI"},
+ {HPHW_FIO, 0x04B, 0x00082, 0x0, "Anole L2 132 Core SCSI"},
+ {HPHW_FIO, 0x04D, 0x00082, 0x0, "Anole L2 165 Core SCSI"},
+ {HPHW_FIO, 0x04E, 0x00082, 0x0, "Kiji L2 132 Core SCSI"},
+ {HPHW_FIO, 0x050, 0x00082, 0x0, "Merlin Jr 132 Core SCSI"},
+ {HPHW_FIO, 0x051, 0x00082, 0x0, "Firehawk Core SCSI"},
+ {HPHW_FIO, 0x056, 0x00082, 0x0, "Raven+ w SE FWSCSI Core SCSI"},
+ {HPHW_FIO, 0x057, 0x00082, 0x0, "Raven+ w Diff FWSCSI Core SCSI"},
+ {HPHW_FIO, 0x058, 0x00082, 0x0, "FireHawk 200 Core SCSI"},
+ {HPHW_FIO, 0x05C, 0x00082, 0x0, "SummitHawk 230 Core SCSI"},
+ {HPHW_FIO, 0x05E, 0x00082, 0x0, "Staccato 132 Core SCSI"},
+ {HPHW_FIO, 0x05F, 0x00082, 0x0, "Staccato 180 Core SCSI"},
+ {HPHW_FIO, 0x800, 0x00082, 0x0, "Hitachi Tiny 64 Core SCSI"},
+ {HPHW_FIO, 0x801, 0x00082, 0x0, "Hitachi Tiny 80 Core SCSI"},
+ {HPHW_FIO, 0x016, 0x00083, 0x0, "Gecko Core PC Floppy"},
+ {HPHW_FIO, 0x01C, 0x00083, 0x0, "Gecko 80 Core PC Floppy"},
+ {HPHW_FIO, 0x01D, 0x00083, 0x0, "Gecko 100 Core PC Floppy"},
+ {HPHW_FIO, 0x051, 0x00083, 0x0, "Firehawk Core PC Floppy"},
+ {HPHW_FIO, 0x058, 0x00083, 0x0, "FireHawk 200 Core PC Floppy"},
+ {HPHW_FIO, 0x027, 0x00083, 0x0, "Piranha 100 Core PC Floppy"},
+ {HPHW_FIO, 0x028, 0x00083, 0x0, "Mirage Jr Core PC Floppy"},
+ {HPHW_FIO, 0x029, 0x00083, 0x0, "Mirage Core PC Floppy"},
+ {HPHW_FIO, 0x02A, 0x00083, 0x0, "Electra Core PC Floppy"},
+ {HPHW_FIO, 0x02B, 0x00083, 0x0, "Mirage 80 Core PC Floppy"},
+ {HPHW_FIO, 0x02C, 0x00083, 0x0, "Mirage 100+ Core PC Floppy"},
+ {HPHW_FIO, 0x02E, 0x00083, 0x0, "UL 350 Core PC Floppy"},
+ {HPHW_FIO, 0x02F, 0x00083, 0x0, "UL 550 Core PC Floppy"},
+ {HPHW_FIO, 0x032, 0x00083, 0x0, "Raven T' Core PC Floppy"},
+ {HPHW_FIO, 0x034, 0x00083, 0x0, "SAIC L-80 Core PC Floppy"},
+ {HPHW_FIO, 0x035, 0x00083, 0x0, "PCX-L2 712/132 Core Floppy"},
+ {HPHW_FIO, 0x036, 0x00083, 0x0, "PCX-L2 712/160 Core Floppy"},
+ {HPHW_FIO, 0x03B, 0x00083, 0x0, "Raven U/L2 Core PC Floppy"},
+ {HPHW_FIO, 0x03C, 0x00083, 0x0, "Merlin 132 Core PC Floppy"},
+ {HPHW_FIO, 0x03D, 0x00083, 0x0, "Merlin 160 Core PC Floppy"},
+ {HPHW_FIO, 0x03E, 0x00083, 0x0, "Merlin+ 132 Core PC Floppy"},
+ {HPHW_FIO, 0x03F, 0x00083, 0x0, "Merlin+ 180 Core PC Floppy"},
+ {HPHW_FIO, 0x045, 0x00083, 0x0, "Rocky1 Core PC Floppy"},
+ {HPHW_FIO, 0x046, 0x00083, 0x0, "Rocky2 120 Core PC Floppy"},
+ {HPHW_FIO, 0x047, 0x00083, 0x0, "Rocky2 150 Core PC Floppy"},
+ {HPHW_FIO, 0x04E, 0x00083, 0x0, "Kiji L2 132 Core PC Floppy"},
+ {HPHW_FIO, 0x050, 0x00083, 0x0, "Merlin Jr 132 Core PC Floppy"},
+ {HPHW_FIO, 0x056, 0x00083, 0x0, "Raven+ w SE FWSCSI Core PC Floppy"},
+ {HPHW_FIO, 0x057, 0x00083, 0x0, "Raven+ w Diff FWSCSI Core PC Floppy"},
+ {HPHW_FIO, 0x800, 0x00083, 0x0, "Hitachi Tiny 64 Core PC Floppy"},
+ {HPHW_FIO, 0x801, 0x00083, 0x0, "Hitachi Tiny 80 Core PC Floppy"},
+ {HPHW_FIO, 0x015, 0x00084, 0x0, "KittyHawk GSY Core PS/2 Port"},
+ {HPHW_FIO, 0x016, 0x00084, 0x0, "Gecko Core PS/2 Port"},
+ {HPHW_FIO, 0x018, 0x00084, 0x0, "Gecko Optional PS/2 Port"},
+ {HPHW_FIO, 0x01A, 0x00084, 0x0, "Anole 64 Core PS/2 Port"},
+ {HPHW_FIO, 0x01B, 0x00084, 0x0, "Anole 100 Core PS/2 Port"},
+ {HPHW_FIO, 0x01C, 0x00084, 0x0, "Gecko 80 Core PS/2 Port"},
+ {HPHW_FIO, 0x01D, 0x00084, 0x0, "Gecko 100 Core PS/2 Port"},
+ {HPHW_FIO, 0x01F, 0x00084, 0x0, "SkyHawk 100/120 Core PS/2 Port"},
+ {HPHW_FIO, 0x027, 0x00084, 0x0, "Piranha 100 Core PS/2 Port"},
+ {HPHW_FIO, 0x028, 0x00084, 0x0, "Mirage Jr Core PS/2 Port"},
+ {HPHW_FIO, 0x029, 0x00084, 0x0, "Mirage Core PS/2 Port"},
+ {HPHW_FIO, 0x02A, 0x00084, 0x0, "Electra Core PS/2 Port"},
+ {HPHW_FIO, 0x02B, 0x00084, 0x0, "Mirage 80 Core PS/2 Port"},
+ {HPHW_FIO, 0x02C, 0x00084, 0x0, "Mirage 100+ Core PS/2 Port"},
+ {HPHW_FIO, 0x02E, 0x00084, 0x0, "UL 350 Core PS/2 Port"},
+ {HPHW_FIO, 0x02F, 0x00084, 0x0, "UL 550 Core PS/2 Port"},
+ {HPHW_FIO, 0x032, 0x00084, 0x0, "Raven T' Core PS/2 Port"},
+ {HPHW_FIO, 0x033, 0x00084, 0x0, "Anole T Core PS/2 Port"},
+ {HPHW_FIO, 0x034, 0x00084, 0x0, "SAIC L-80 Core PS/2 Port"},
+ {HPHW_FIO, 0x035, 0x00084, 0x0, "PCX-L2 712/132 Core PS/2 Port"},
+ {HPHW_FIO, 0x036, 0x00084, 0x0, "PCX-L2 712/160 Core PS/2 Port"},
+ {HPHW_FIO, 0x03B, 0x00084, 0x0, "Raven U/L2 Core PS/2 Port"},
+ {HPHW_FIO, 0x03C, 0x00084, 0x0, "Merlin 132 Core PS/2 Port"},
+ {HPHW_FIO, 0x03D, 0x00084, 0x0, "Merlin 160 Core PS/2 Port"},
+ {HPHW_FIO, 0x03E, 0x00084, 0x0, "Merlin+ 132 Core PS/2 Port"},
+ {HPHW_FIO, 0x03F, 0x00084, 0x0, "Merlin+ 180 Core PS/2 Port"},
+ {HPHW_FIO, 0x044, 0x00084, 0x0, "Mohawk Core PS/2 Port"},
+ {HPHW_FIO, 0x045, 0x00084, 0x0, "Rocky1 Core PS/2 Port"},
+ {HPHW_FIO, 0x046, 0x00084, 0x0, "Rocky2 120 Core PS/2 Port"},
+ {HPHW_FIO, 0x047, 0x00084, 0x0, "Rocky2 150 Core PS/2 Port"},
+ {HPHW_FIO, 0x048, 0x00084, 0x0, "Rocky2 120 Dino PS/2 Port"},
+ {HPHW_FIO, 0x049, 0x00084, 0x0, "Rocky2 150 Dino PS/2 Port"},
+ {HPHW_FIO, 0x04B, 0x00084, 0x0, "Anole L2 132 Core PS/2 Port"},
+ {HPHW_FIO, 0x04D, 0x00084, 0x0, "Anole L2 165 Core PS/2 Port"},
+ {HPHW_FIO, 0x04E, 0x00084, 0x0, "Kiji L2 132 Core PS/2 Port"},
+ {HPHW_FIO, 0x050, 0x00084, 0x0, "Merlin Jr 132 Core PS/2 Port"},
+ {HPHW_FIO, 0x051, 0x00084, 0x0, "Firehawk Core PS/2 Port"},
+ {HPHW_FIO, 0x056, 0x00084, 0x0, "Raven+ w SE FWSCSI Core PS/2 Port"},
+ {HPHW_FIO, 0x057, 0x00084, 0x0, "Raven+ w Diff FWSCSI Core PS/2 Port"},
+ {HPHW_FIO, 0x058, 0x00084, 0x0, "FireHawk 200 Core PS/2 Port"},
+ {HPHW_FIO, 0x05C, 0x00084, 0x0, "SummitHawk 230 Core PS/2 Port"},
+ {HPHW_FIO, 0x800, 0x00084, 0x0, "Hitachi Tiny 64 Core PS/2 Port"},
+ {HPHW_FIO, 0x801, 0x00084, 0x0, "Hitachi Tiny 80 Core PS/2 Port"},
+ {HPHW_FIO, 0x004, 0x00085, 0x0, "Solo GSC Optional Graphics"},
+ {HPHW_FIO, 0x005, 0x00085, 0x0, "Duet GSC Optional Graphics"},
+ {HPHW_FIO, 0x008, 0x00085, 0x0, "Anole Artist Optional Graphics"},
+ {HPHW_FIO, 0x010, 0x00085, 0x0, "Mirage 80 GSC Builtin Graphics"},
+ {HPHW_FIO, 0x011, 0x00085, 0x0, "Mirage 100+ GSC Builtin Graphics"},
+ {HPHW_FIO, 0x012, 0x00085, 0x0, "Mirage Jr GSC Builtin Graphics"},
+ {HPHW_FIO, 0x013, 0x00085, 0x0, "Mirage GSC Builtin Graphics"},
+ {HPHW_FIO, 0x014, 0x00085, 0x0, "Electra GSC Builtin Graphics"},
+ {HPHW_FIO, 0x016, 0x00085, 0x0, "Gecko GSC Core Graphics"},
+ {HPHW_FIO, 0x017, 0x00085, 0x0, "Gecko GSC Optional Graphics"},
+ {HPHW_FIO, 0x01A, 0x00085, 0x0, "Anole 64 Artist Builtin Graphics"},
+ {HPHW_FIO, 0x01B, 0x00085, 0x0, "Anole 100 Artist Builtin Graphics"},
+ {HPHW_FIO, 0x01C, 0x00085, 0x0, "Gecko 80 GSC Core Graphics"},
+ {HPHW_FIO, 0x01D, 0x00085, 0x0, "Gecko 100 GSC Core Graphics"},
+ {HPHW_FIO, 0x032, 0x00085, 0x0, "Raven T' GSC Core Graphics"},
+ {HPHW_FIO, 0x033, 0x00085, 0x0, "Anole T Artist Builtin Graphics"},
+ {HPHW_FIO, 0x034, 0x00085, 0x0, "SAIC L-80 GSC Core Graphics"},
+ {HPHW_FIO, 0x035, 0x00085, 0x0, "PCX-L2 712/132 Core Graphics"},
+ {HPHW_FIO, 0x036, 0x00085, 0x0, "PCX-L2 712/160 Core Graphics"},
+ {HPHW_FIO, 0x03B, 0x00085, 0x0, "Raven U/L2 Core Graphics"},
+ {HPHW_FIO, 0x03C, 0x00085, 0x0, "Merlin 132 Core Graphics"},
+ {HPHW_FIO, 0x03D, 0x00085, 0x0, "Merlin 160 Core Graphics"},
+ {HPHW_FIO, 0x03E, 0x00085, 0x0, "Merlin+ 132 Core Graphics"},
+ {HPHW_FIO, 0x03F, 0x00085, 0x0, "Merlin+ 180 Core Graphics"},
+ {HPHW_FIO, 0x045, 0x00085, 0x0, "Rocky1 Core Graphics"},
+ {HPHW_FIO, 0x046, 0x00085, 0x0, "Rocky2 120 Core Graphics"},
+ {HPHW_FIO, 0x047, 0x00085, 0x0, "Rocky2 150 Core Graphics"},
+ {HPHW_FIO, 0x04B, 0x00085, 0x0, "Anole L2 132 Core Graphics"},
+ {HPHW_FIO, 0x04D, 0x00085, 0x0, "Anole L2 165 Core Graphics"},
+ {HPHW_FIO, 0x04E, 0x00085, 0x0, "Kiji L2 132 Core Graphics"},
+ {HPHW_FIO, 0x050, 0x00085, 0x0, "Merlin Jr 132 Core Graphics"},
+ {HPHW_FIO, 0x056, 0x00085, 0x0, "Raven+ w SE FWSCSI Core Graphics"},
+ {HPHW_FIO, 0x057, 0x00085, 0x0, "Raven+ w Diff FWSCSI Core Graphics"},
+ {HPHW_FIO, 0x800, 0x00085, 0x0, "Hitachi Tiny 64 Core Graphics"},
+ {HPHW_FIO, 0x801, 0x00085, 0x0, "Hitachi Tiny 80 Core Graphics"},
+ {HPHW_FIO, 0x004, 0x00086, 0x0, "GSC IBM Token Ring"},
+ {HPHW_FIO, 0x015, 0x00087, 0x0, "Gecko Optional ISDN"},
+ {HPHW_FIO, 0x016, 0x00087, 0x0, "Gecko Core ISDN"},
+ {HPHW_FIO, 0x01C, 0x00087, 0x0, "Gecko 80 Core ISDN"},
+ {HPHW_FIO, 0x01D, 0x00087, 0x0, "Gecko 100 Core ISDN"},
+ {HPHW_FIO, 0x010, 0x00088, 0x0, "Pace VME Networking"},
+ {HPHW_FIO, 0x011, 0x00088, 0x0, "Sidewinder VME Networking"},
+ {HPHW_FIO, 0x01A, 0x00088, 0x0, "Anole 64 VME Networking"},
+ {HPHW_FIO, 0x01B, 0x00088, 0x0, "Anole 100 VME Networking"},
+ {HPHW_FIO, 0x024, 0x00088, 0x0, "Fast Pace VME Networking"},
+ {HPHW_FIO, 0x034, 0x00088, 0x0, "Anole T VME Networking"},
+ {HPHW_FIO, 0x04A, 0x00088, 0x0, "Anole L2 132 VME Networking"},
+ {HPHW_FIO, 0x04C, 0x00088, 0x0, "Anole L2 165 VME Networking"},
+ {HPHW_FIO, 0x011, 0x0008A, 0x0, "WB-96 Core LAN (802.3)"},
+ {HPHW_FIO, 0x012, 0x0008A, 0x0, "Orville Core LAN (802.3)"},
+ {HPHW_FIO, 0x013, 0x0008A, 0x0, "Wilbur Core LAN (802.3)"},
+ {HPHW_FIO, 0x014, 0x0008A, 0x0, "WB-80 Core LAN (802.3)"},
+ {HPHW_FIO, 0x015, 0x0008A, 0x0, "KittyHawk GSY Core LAN (802.3)"},
+ {HPHW_FIO, 0x016, 0x0008A, 0x0, "Gecko Core LAN (802.3)"},
+ {HPHW_FIO, 0x018, 0x0008A, 0x0, "Gecko Optional LAN (802.3)"},
+ {HPHW_FIO, 0x01A, 0x0008A, 0x0, "Anole 64 Core LAN (802.3)"},
+ {HPHW_FIO, 0x01B, 0x0008A, 0x0, "Anole 100 Core LAN (802.3)"},
+ {HPHW_FIO, 0x01C, 0x0008A, 0x0, "Gecko 80 Core LAN (802.3)"},
+ {HPHW_FIO, 0x01D, 0x0008A, 0x0, "Gecko 100 Core LAN (802.3)"},
+ {HPHW_FIO, 0x01F, 0x0008A, 0x0, "SkyHawk 100/120 Core LAN (802.3)"},
+ {HPHW_FIO, 0x027, 0x0008A, 0x0, "Piranha 100 Core LAN (802.3)"},
+ {HPHW_FIO, 0x028, 0x0008A, 0x0, "Mirage Jr Core LAN (802.3)"},
+ {HPHW_FIO, 0x029, 0x0008A, 0x0, "Mirage Core LAN (802.3)"},
+ {HPHW_FIO, 0x02A, 0x0008A, 0x0, "Electra Core LAN (802.3)"},
+ {HPHW_FIO, 0x02B, 0x0008A, 0x0, "Mirage 80 Core LAN (802.3)"},
+ {HPHW_FIO, 0x02C, 0x0008A, 0x0, "Mirage 100+ Core LAN (802.3)"},
+ {HPHW_FIO, 0x02E, 0x0008A, 0x0, "UL 350 Core LAN (802.3)"},
+ {HPHW_FIO, 0x02F, 0x0008A, 0x0, "UL 350 Core LAN (802.3)"},
+ {HPHW_FIO, 0x032, 0x0008A, 0x0, "Raven T' Core LAN (802.3)"},
+ {HPHW_FIO, 0x033, 0x0008A, 0x0, "Anole T Core LAN (802.3)"},
+ {HPHW_FIO, 0x034, 0x0008A, 0x0, "SAIC L-80 Core LAN (802.3)"},
+ {HPHW_FIO, 0x035, 0x0008A, 0x0, "PCX-L2 712/132 Core LAN (802.3)"},
+ {HPHW_FIO, 0x036, 0x0008A, 0x0, "PCX-L2 712/160 Core LAN (802.3)"},
+ {HPHW_FIO, 0x03B, 0x0008A, 0x0, "Raven U/L2 Core LAN (802.3)"},
+ {HPHW_FIO, 0x03C, 0x0008A, 0x0, "Merlin 132 Core LAN (802.3)"},
+ {HPHW_FIO, 0x03D, 0x0008A, 0x0, "Merlin 160 Core LAN (802.3)"},
+ {HPHW_FIO, 0x044, 0x0008A, 0x0, "Mohawk Core LAN (802.3)"},
+ {HPHW_FIO, 0x045, 0x0008A, 0x0, "Rocky1 Core LAN (802.3)"},
+ {HPHW_FIO, 0x046, 0x0008A, 0x0, "Rocky2 120 Core LAN (802.3)"},
+ {HPHW_FIO, 0x047, 0x0008A, 0x0, "Rocky2 150 Core LAN (802.3)"},
+ {HPHW_FIO, 0x04B, 0x0008A, 0x0, "Anole L2 132 Core LAN (802.3)"},
+ {HPHW_FIO, 0x04D, 0x0008A, 0x0, "Anole L2 165 Core LAN (802.3)"},
+ {HPHW_FIO, 0x04E, 0x0008A, 0x0, "Kiji L2 132 Core LAN (802.3)"},
+ {HPHW_FIO, 0x050, 0x0008A, 0x0, "Merlin Jr 132 Core LAN (802.3)"},
+ {HPHW_FIO, 0x058, 0x0008A, 0x0, "FireHawk 200 Core LAN (802.3)"},
+ {HPHW_FIO, 0x800, 0x0008A, 0x0, "Hitachi Tiny 64 Core LAN (802.3)"},
+ {HPHW_FIO, 0x801, 0x0008A, 0x0, "Hitachi Tiny 80 Core LAN (802.3)"},
+ {HPHW_FIO, 0x004, 0x0008C, 0x0, "SkyHawk 100/120 Wax RS-232"},
+ {HPHW_FIO, 0x005, 0x0008C, 0x0, "SAIC L-80 Wax RS-232"},
+ {HPHW_FIO, 0x006, 0x0008C, 0x0, "Raven U/L2 Dino RS-232"},
+ {HPHW_FIO, 0x007, 0x0008C, 0x0, "Dino RS-232"},
+ {HPHW_FIO, 0x008, 0x0008C, 0x0, "Merlin 132 Dino RS-232"},
+ {HPHW_FIO, 0x009, 0x0008C, 0x0, "Merlin 160 Dino RS-232"},
+ {HPHW_FIO, 0x00A, 0x0008C, 0x0, "Merlin Jr 132 Dino RS-232"},
+ {HPHW_FIO, 0x010, 0x0008C, 0x0, "Mirage 80 Wax RS-232"},
+ {HPHW_FIO, 0x011, 0x0008C, 0x0, "Mirage 100+ Wax RS-232"},
+ {HPHW_FIO, 0x012, 0x0008C, 0x0, "Mirage Jr Wax RS-232"},
+ {HPHW_FIO, 0x013, 0x0008C, 0x0, "Mirage Wax RS-232"},
+ {HPHW_FIO, 0x014, 0x0008C, 0x0, "Electra Wax RS-232"},
+ {HPHW_FIO, 0x015, 0x0008C, 0x0, "KittyHawk GSY Core RS-232"},
+ {HPHW_FIO, 0x016, 0x0008C, 0x0, "Gecko Core RS-232"},
+ {HPHW_FIO, 0x017, 0x0008C, 0x0, "Raven Backplane RS-232"},
+ {HPHW_FIO, 0x018, 0x0008C, 0x0, "Gecko Optional RS-232"},
+ {HPHW_FIO, 0x019, 0x0008C, 0x0, "Merlin+ 180 Dino RS-232"},
+ {HPHW_FIO, 0x01A, 0x0008C, 0x0, "Anole 64 Core RS-232"},
+ {HPHW_FIO, 0x01B, 0x0008C, 0x0, "Anole 100 Core RS-232"},
+ {HPHW_FIO, 0x01C, 0x0008C, 0x0, "Gecko 80 Core RS-232"},
+ {HPHW_FIO, 0x01D, 0x0008C, 0x0, "Gecko 100 Core RS-232"},
+ {HPHW_FIO, 0x01E, 0x0008C, 0x0, "Raven T' Wax RS-232"},
+ {HPHW_FIO, 0x01F, 0x0008C, 0x0, "SkyHawk 100/120 Core RS-232"},
+ {HPHW_FIO, 0x020, 0x0008C, 0x0, "Anole 64 Timi RS-232"},
+ {HPHW_FIO, 0x021, 0x0008C, 0x0, "Anole 100 Timi RS-232"},
+ {HPHW_FIO, 0x022, 0x0008C, 0x0, "Merlin+ 132 Dino RS-232"},
+ {HPHW_FIO, 0x023, 0x0008C, 0x0, "Rocky1 Wax RS-232"},
+ {HPHW_FIO, 0x025, 0x0008C, 0x0, "Armyknife Optional RS-232"},
+ {HPHW_FIO, 0x026, 0x0008C, 0x0, "Piranha 100 Wax RS-232"},
+ {HPHW_FIO, 0x027, 0x0008C, 0x0, "Piranha 100 Core RS-232"},
+ {HPHW_FIO, 0x028, 0x0008C, 0x0, "Mirage Jr Core RS-232"},
+ {HPHW_FIO, 0x029, 0x0008C, 0x0, "Mirage Core RS-232"},
+ {HPHW_FIO, 0x02A, 0x0008C, 0x0, "Electra Core RS-232"},
+ {HPHW_FIO, 0x02B, 0x0008C, 0x0, "Mirage 80 Core RS-232"},
+ {HPHW_FIO, 0x02C, 0x0008C, 0x0, "Mirage 100+ Core RS-232"},
+ {HPHW_FIO, 0x02E, 0x0008C, 0x0, "UL 350 Lasi Core RS-232"},
+ {HPHW_FIO, 0x02F, 0x0008C, 0x0, "UL 550 Lasi Core RS-232"},
+ {HPHW_FIO, 0x030, 0x0008C, 0x0, "UL 350 Wax Core RS-232"},
+ {HPHW_FIO, 0x031, 0x0008C, 0x0, "UL 550 Wax Core RS-232"},
+ {HPHW_FIO, 0x032, 0x0008C, 0x0, "Raven T' Lasi Core RS-232"},
+ {HPHW_FIO, 0x033, 0x0008C, 0x0, "Anole T Core RS-232"},
+ {HPHW_FIO, 0x034, 0x0008C, 0x0, "SAIC L-80 Core RS-232"},
+ {HPHW_FIO, 0x035, 0x0008C, 0x0, "PCX-L2 712/132 Core RS-232"},
+ {HPHW_FIO, 0x036, 0x0008C, 0x0, "PCX-L2 712/160 Core RS-232"},
+ {HPHW_FIO, 0x03A, 0x0008C, 0x0, "Merlin+ Wax RS-232"},
+ {HPHW_FIO, 0x03B, 0x0008C, 0x0, "Raven U/L2 Core RS-232"},
+ {HPHW_FIO, 0x03C, 0x0008C, 0x0, "Merlin 132 Core RS-232"},
+ {HPHW_FIO, 0x03D, 0x0008C, 0x0, "Merlin 160 Core RS-232"},
+ {HPHW_FIO, 0x03E, 0x0008C, 0x0, "Merlin+ 132 Core RS-232"},
+ {HPHW_FIO, 0x03F, 0x0008C, 0x0, "Merlin+ 180 Core RS-232"},
+ {HPHW_FIO, 0x040, 0x0008C, 0x0, "Merlin 132 Wax RS-232"},
+ {HPHW_FIO, 0x041, 0x0008C, 0x0, "Merlin 160 Wax RS-232"},
+ {HPHW_FIO, 0x043, 0x0008C, 0x0, "Merlin 132/160 Wax RS-232"},
+ {HPHW_FIO, 0x044, 0x0008C, 0x0, "Mohawk Core RS-232"},
+ {HPHW_FIO, 0x045, 0x0008C, 0x0, "Rocky1 Core RS-232"},
+ {HPHW_FIO, 0x046, 0x0008C, 0x0, "Rocky2 120 Core RS-232"},
+ {HPHW_FIO, 0x047, 0x0008C, 0x0, "Rocky2 150 Core RS-232"},
+ {HPHW_FIO, 0x048, 0x0008C, 0x0, "Rocky2 120 Dino RS-232"},
+ {HPHW_FIO, 0x049, 0x0008C, 0x0, "Rocky2 150 Dino RS-232"},
+ {HPHW_FIO, 0x04A, 0x0008C, 0x0, "Anole L2 132 TIMI RS-232"},
+ {HPHW_FIO, 0x04B, 0x0008C, 0x0, "Anole L2 l32 Core RS-232"},
+ {HPHW_FIO, 0x04C, 0x0008D, 0x0, "Anole L2 165 TIMI RS-232"},
+ {HPHW_FIO, 0x04D, 0x0008C, 0x0, "Anole L2 165 Core RS-232"},
+ {HPHW_FIO, 0x04E, 0x0008C, 0x0, "Kiji L2 132 Core RS-232"},
+ {HPHW_FIO, 0x04F, 0x0008C, 0x0, "Kiji L2 132 Dino RS-232"},
+ {HPHW_FIO, 0x050, 0x0008C, 0x0, "Merlin Jr 132 Core RS-232"},
+ {HPHW_FIO, 0x051, 0x0008C, 0x0, "Firehawk Core RS-232"},
+ {HPHW_FIO, 0x052, 0x0008C, 0x0, "Raven+ Hi Power Backplane w EISA RS-232"},
+ {HPHW_FIO, 0x053, 0x0008C, 0x0, "Raven+ Hi Power Backplane w/o EISA RS-232"},
+ {HPHW_FIO, 0x054, 0x0008C, 0x0, "Raven+ Lo Power Backplane w EISA RS-232"},
+ {HPHW_FIO, 0x055, 0x0008C, 0x0, "Raven+ Lo Power Backplane w/o EISA RS-232"},
+ {HPHW_FIO, 0x056, 0x0008C, 0x0, "Raven+ w SE FWSCSI Core RS-232"},
+ {HPHW_FIO, 0x057, 0x0008C, 0x0, "Raven+ w Diff FWSCSI Core RS-232"},
+ {HPHW_FIO, 0x058, 0x0008C, 0x0, "FireHawk 200 Core RS-232"},
+ {HPHW_FIO, 0x059, 0x0008C, 0x0, "FireHawk 200 Wax RS-232"},
+ {HPHW_FIO, 0x05A, 0x0008C, 0x0, "Raven+ L2 Backplane w EISA RS-232"},
+ {HPHW_FIO, 0x05B, 0x0008C, 0x0, "Raven+ L2 Backplane w/o EISA RS-232"},
+ {HPHW_FIO, 0x05D, 0x0008C, 0x0, "SummitHawk Dino RS-232"},
+ {HPHW_FIO, 0x05E, 0x0008C, 0x0, "Staccato 132 Core LAN RS-232"},
+ {HPHW_FIO, 0x05F, 0x0008C, 0x0, "Staccato 180 Core LAN RS-232"},
+ {HPHW_FIO, 0x800, 0x0008C, 0x0, "Hitachi Tiny 64 Core RS-232"},
+ {HPHW_FIO, 0x801, 0x0008C, 0x0, "Hitachi Tiny 80 Core RS-232"},
+ {HPHW_FIO, 0x015, 0x0008D, 0x0, "Gecko Optional RJ-16"},
+ {HPHW_FIO, 0x016, 0x0008D, 0x0, "Gecko Core RJ-16"},
+ {HPHW_FIO, 0x01C, 0x0008D, 0x0, "Gecko 80 Core RJ-16"},
+ {HPHW_FIO, 0x01D, 0x0008D, 0x0, "Gecko 100 Core RJ-16"},
+ {HPHW_FIO, 0x004, 0x0008F, 0x0, "Anole Boot Rom"},
+ {HPHW_FIO, 0x005, 0x0008F, 0x0, "Rocky1 Boot Rom"},
+ {HPHW_FIO, 0x006, 0x0008F, 0x0, "Rocky2 120 Boot Rom"},
+ {HPHW_FIO, 0x007, 0x0008F, 0x0, "Rocky2 150 Boot Rom"},
+ {HPHW_FIO, 0x01B, 0x0008F, 0x0, "Anole 100 Boot Rom"},
+ {HPHW_FIO, 0x006, 0x00096, 0x0, "Raven U/L2 Dino PS/2 Port"},
+ {HPHW_FIO, 0x007, 0x00096, 0x0, "Dino PS/2 Port"},
+ {HPHW_FIO, 0x008, 0x00096, 0x0, "Merlin 132 Dino PS/2 Port"},
+ {HPHW_FIO, 0x009, 0x00096, 0x0, "Merlin 160 Dino PS/2 Port"},
+ {HPHW_FIO, 0x00A, 0x00096, 0x0, "Merlin Jr 132 Dino PS/2 Port"},
+ {HPHW_FIO, 0x019, 0x00096, 0x0, "Merlin+ 180 Dino PS/2 Port"},
+ {HPHW_FIO, 0x022, 0x00096, 0x0, "Merlin+ 132 Dino PS/2 Port"},
+ {HPHW_FIO, 0x004, 0x00097, 0x0, "Cascade EISA 100VG LAN"},
+ {HPHW_FIO, 0x023, 0x00099, 0x0, "Rocky1 Wax HPIB"},
+ {HPHW_FIO, 0x048, 0x00099, 0x0, "Rocky2 120 Clark/Dino HPIB"},
+ {HPHW_FIO, 0x049, 0x00099, 0x0, "Rocky2 150 Clark/Dino HPIB"},
+ {HPHW_FIO, 0x004, 0x000A1, 0x0, "SPP2000 Console TTY"},
+ {HPHW_FIO, 0x004, 0x000A2, 0x0, "Forte Core PCI 10/100BT LAN"},
+ {HPHW_FIO, 0x005, 0x000A2, 0x0, "AllegroLow PCI 10/100BT LAN"},
+ {HPHW_FIO, 0x006, 0x000A2, 0x0, "AllegroHIgh Core PCI 10/100BT LAN"},
+ {HPHW_FIO, 0x007, 0x000A2, 0x0, "PCI Plug-in LAN"},
+ {HPHW_FIO, 0x00A, 0x000A2, 0x0, "Lego 360 Core PCI 10/100BT LAN"},
+ {HPHW_FIO, 0x03E, 0x000A2, 0x0, "Merlin+ 132 Core PCI LAN"},
+ {HPHW_FIO, 0x03F, 0x000A2, 0x0, "Merlin+ 180 Core PCI LAN"},
+ {HPHW_FIO, 0x056, 0x000A2, 0x0, "Raven+ w SE FWSCSI Core PCI LAN"},
+ {HPHW_FIO, 0x057, 0x000A2, 0x0, "Raven+ w Diff FWSCSI Core PCI LAN"},
+ {HPHW_FIO, 0x05E, 0x000A2, 0x0, "Staccato 132 PCI LAN"},
+ {HPHW_FIO, 0x05F, 0x000A2, 0x0, "Staccato 180 PCI LAN"},
+ {HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI LVD Ultra2 SCSI"},
+ {HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI SE UltraSCSI"},
+ {HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI IDE/ATAPI CD-ROM"},
+ {HPHW_FIO, 0x005, 0x000A3, 0x0, "AllegroLow Core PCI LVD Ultra2 SCSI"},
+ {HPHW_FIO, 0x005, 0x000A3, 0x0, "AllegroLow Core PCI IDE/ATAPI CD-ROM"},
+ {HPHW_FIO, 0x006, 0x000A3, 0x0, "AllegroHigh Core PCI LVD Ultra2 SCSI"},
+ {HPHW_FIO, 0x006, 0x000A3, 0x0, "AllegroHigh Core PCI IDE/ATAPI CD-ROM"},
+ {HPHW_FIO, 0x007, 0x000A3, 0x0, "PCI Plug-in Disk"},
+ {HPHW_FIO, 0x008, 0x000A3, 0x0, "A5158A S FC Tachlite HBA"},
+ {HPHW_FIO, 0x009, 0x000A3, 0x0, "A5157A D FC HBA"},
+ {HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI LVD Ultra2 SCSI"},
+ {HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI NSE UltraSCSI"},
+ {HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI WSE UltraSCSI"},
+ {HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI IDE/ATAPI CD-ROM"},
+ {HPHW_FIO, 0x03E, 0x000A3, 0x0, "Merlin+ 132 Core SE FWSCSI PCI Disk"},
+ {HPHW_FIO, 0x03F, 0x000A3, 0x0, "Merlin+ 180 Core SE FWSCSI PCI Disk"},
+ {HPHW_FIO, 0x056, 0x000A3, 0x0, "Raven+ w SE FWSCSI Core PCI Disk"},
+ {HPHW_FIO, 0x057, 0x000A3, 0x0, "Raven+ w Diff FWSCSI Core PCI Disk"},
+ {HPHW_FIO, 0x004, 0x000A4, 0x0, "SPP2000 Core BA"},
+ {HPHW_FIO, 0x004, 0x000A6, 0x0, "Sonic Ethernet 802.3 Card"},
+ {HPHW_FIO, 0x004, 0x000A9, 0x00, "Forte Core PCI SuperIO RS-232"},
+ {HPHW_FIO, 0x004, 0x000A9, 0x00, "Forte Core PCI USB KB"},
+ {HPHW_FIO, 0x005, 0x000A9, 0x00, "AllegroLow Core PCI SuperIO RS-232"},
+ {HPHW_FIO, 0x005, 0x000A9, 0x00, "AllegroLow Core PCI USB KB"},
+ {HPHW_FIO, 0x006, 0x000A9, 0x00, "AllegroHigh Core PCI SuperIO RS-232"},
+ {HPHW_FIO, 0x006, 0x000A9, 0x00, "AllegroHigh Core PCI USB KB"},
+ {HPHW_FIO, 0x007, 0x000A9, 0x0, "Miscellaneous PCI Plug-in"},
+ {HPHW_FIO, 0x00A, 0x000A9, 0x0, "Lego 360 Core PCI SuperIO RS-232"},
+ {HPHW_FIO, 0x00A, 0x000A9, 0x0, "Lego 360 Core PCI USB KB"},
+ {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"},
+ {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
+ {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
+ {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
+ {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
+ {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
+ {HPHW_IOA, 0x581, 0x0000B, 0x10, "Uturn-IOA BC Runway Port"},
+ {HPHW_IOA, 0x582, 0x0000B, 0x10, "Astro BC Runway Port"},
+ {HPHW_IOA, 0x700, 0x0000B, 0x00, "NEC-IOS BC System Bus Port"},
+ {HPHW_IOA, 0x880, 0x0000C, 0x10, "Pluto BC McKinley Port"},
+ {HPHW_MEMORY, 0x002, 0x00008, 0x00, "MID_BUS"},
+ {HPHW_MEMORY, 0x063, 0x00009, 0x00, "712/132 L2 Upgrade"},
+ {HPHW_MEMORY, 0x064, 0x00009, 0x00, "712/160 L2 Upgrade"},
+ {HPHW_MEMORY, 0x065, 0x00009, 0x00, "715/132 L2 Upgrade"},
+ {HPHW_MEMORY, 0x066, 0x00009, 0x00, "715/160 L2 Upgrade"},
+ {HPHW_MEMORY, 0x0AF, 0x00009, 0x00, "Everest Mako Memory"},
+ {HPHW_OTHER, 0x004, 0x00030, 0x00, "Master"},
+ {HPHW_OTHER, 0x004, 0x00034, 0x00, "Slave"},
+ {HPHW_OTHER, 0x004, 0x00038, 0x00, "EDU"},
+ {HPHW_OTHER, 0x004, 0x00049, 0x00, "LGB Control"},
+ {HPHW_MC, 0x004, 0x000C0, 0x00, "BMC IPMI Mgmt Ctlr"},
+ {HPHW_FAULTY, 0, } /* Special Marker for last entry */
+};
+
+
+static struct hp_cpu_type_mask {
+ unsigned short model;
+ unsigned short mask;
+ enum cpu_type cpu;
+} hp_cpu_type_mask_list[] __devinitdata = {
+
+ { 0x0000, 0x0ff0, pcx }, /* 0x0000 - 0x000f */
+ { 0x0048, 0x0ff0, pcxl }, /* 0x0040 - 0x004f */
+ { 0x0080, 0x0ff0, pcx }, /* 0x0080 - 0x008f */
+ { 0x0100, 0x0ff0, pcx }, /* 0x0100 - 0x010f */
+ { 0x0182, 0x0ffe, pcx }, /* 0x0182 - 0x0183 */
+ { 0x0182, 0x0ffe, pcxt }, /* 0x0182 - 0x0183 */
+ { 0x0184, 0x0fff, pcxu }, /* 0x0184 - 0x0184 */
+ { 0x0200, 0x0ffe, pcxs }, /* 0x0200 - 0x0201 */
+ { 0x0202, 0x0fff, pcxs }, /* 0x0202 - 0x0202 */
+ { 0x0203, 0x0fff, pcxt }, /* 0x0203 - 0x0203 */
+ { 0x0204, 0x0ffc, pcxt }, /* 0x0204 - 0x0207 */
+ { 0x0280, 0x0ffc, pcxs }, /* 0x0280 - 0x0283 */
+ { 0x0284, 0x0ffc, pcxt }, /* 0x0284 - 0x0287 */
+ { 0x0288, 0x0fff, pcxt }, /* 0x0288 - 0x0288 */
+ { 0x0300, 0x0ffc, pcxs }, /* 0x0300 - 0x0303 */
+ { 0x0310, 0x0ff0, pcxt }, /* 0x0310 - 0x031f */
+ { 0x0320, 0x0ff0, pcxt }, /* 0x0320 - 0x032f */
+ { 0x0400, 0x0ff0, pcxt }, /* 0x0400 - 0x040f */
+ { 0x0480, 0x0ff0, pcxl }, /* 0x0480 - 0x048f */
+ { 0x0500, 0x0ff0, pcxl2 }, /* 0x0500 - 0x050f */
+ { 0x0510, 0x0ff0, pcxl2 }, /* 0x0510 - 0x051f */
+ { 0x0580, 0x0ff8, pcxt_ }, /* 0x0580 - 0x0587 */
+ { 0x0588, 0x0ffc, pcxt_ }, /* 0x0588 - 0x058b */
+ { 0x058c, 0x0ffe, pcxt_ }, /* 0x058c - 0x058d */
+ { 0x058e, 0x0fff, pcxt_ }, /* 0x058e - 0x058e */
+ { 0x058f, 0x0fff, pcxu }, /* 0x058f - 0x058f */
+ { 0x0590, 0x0ffe, pcxu }, /* 0x0590 - 0x0591 */
+ { 0x0592, 0x0fff, pcxt_ }, /* 0x0592 - 0x0592 */
+ { 0x0593, 0x0fff, pcxu }, /* 0x0593 - 0x0593 */
+ { 0x0594, 0x0ffc, pcxu }, /* 0x0594 - 0x0597 */
+ { 0x0598, 0x0ffe, pcxu_ }, /* 0x0598 - 0x0599 */
+ { 0x059a, 0x0ffe, pcxu }, /* 0x059a - 0x059b */
+ { 0x059c, 0x0fff, pcxu }, /* 0x059c - 0x059c */
+ { 0x059d, 0x0fff, pcxu_ }, /* 0x059d - 0x059d */
+ { 0x059e, 0x0fff, pcxt_ }, /* 0x059e - 0x059e */
+ { 0x059f, 0x0fff, pcxu }, /* 0x059f - 0x059f */
+ { 0x05a0, 0x0ffe, pcxt_ }, /* 0x05a0 - 0x05a1 */
+ { 0x05a2, 0x0ffe, pcxu }, /* 0x05a2 - 0x05a3 */
+ { 0x05a4, 0x0ffc, pcxu }, /* 0x05a4 - 0x05a7 */
+ { 0x05a8, 0x0ffc, pcxu }, /* 0x05a8 - 0x05ab */
+ { 0x05ad, 0x0fff, pcxu_ }, /* 0x05ad - 0x05ad */
+ { 0x05ae, 0x0ffe, pcxu_ }, /* 0x05ae - 0x05af */
+ { 0x05b0, 0x0ffe, pcxu_ }, /* 0x05b0 - 0x05b1 */
+ { 0x05b2, 0x0fff, pcxu_ }, /* 0x05b2 - 0x05b2 */
+ { 0x05b3, 0x0fff, pcxu }, /* 0x05b3 - 0x05b3 */
+ { 0x05b4, 0x0fff, pcxw }, /* 0x05b4 - 0x05b4 */
+ { 0x05b5, 0x0fff, pcxu_ }, /* 0x05b5 - 0x05b5 */
+ { 0x05b6, 0x0ffe, pcxu_ }, /* 0x05b6 - 0x05b7 */
+ { 0x05b8, 0x0ffe, pcxu_ }, /* 0x05b8 - 0x05b9 */
+ { 0x05ba, 0x0fff, pcxu_ }, /* 0x05ba - 0x05ba */
+ { 0x05bb, 0x0fff, pcxw }, /* 0x05bb - 0x05bb */
+ { 0x05bc, 0x0ffc, pcxw }, /* 0x05bc - 0x05bf */
+ { 0x05c0, 0x0ffc, pcxw }, /* 0x05c0 - 0x05c3 */
+ { 0x05c4, 0x0ffe, pcxw }, /* 0x05c4 - 0x05c5 */
+ { 0x05c6, 0x0fff, pcxw }, /* 0x05c6 - 0x05c6 */
+ { 0x05c7, 0x0fff, pcxw_ }, /* 0x05c7 - 0x05c7 */
+ { 0x05c8, 0x0ffc, pcxw }, /* 0x05c8 - 0x05cb */
+ { 0x05cc, 0x0ffe, pcxw }, /* 0x05cc - 0x05cd */
+ { 0x05ce, 0x0ffe, pcxw_ }, /* 0x05ce - 0x05cf */
+ { 0x05d0, 0x0ffc, pcxw_ }, /* 0x05d0 - 0x05d3 */
+ { 0x05d4, 0x0ffe, pcxw_ }, /* 0x05d4 - 0x05d5 */
+ { 0x05d6, 0x0fff, pcxw }, /* 0x05d6 - 0x05d6 */
+ { 0x05d7, 0x0fff, pcxw_ }, /* 0x05d7 - 0x05d7 */
+ { 0x05d8, 0x0ffc, pcxw_ }, /* 0x05d8 - 0x05db */
+ { 0x05dc, 0x0ffe, pcxw2 }, /* 0x05dc - 0x05dd */
+ { 0x05de, 0x0fff, pcxw_ }, /* 0x05de - 0x05de */
+ { 0x05df, 0x0fff, pcxw2 }, /* 0x05df - 0x05df */
+ { 0x05e0, 0x0ffc, pcxw2 }, /* 0x05e0 - 0x05e3 */
+ { 0x05e4, 0x0fff, pcxw2 }, /* 0x05e4 - 0x05e4 */
+ { 0x05e5, 0x0fff, pcxw_ }, /* 0x05e5 - 0x05e5 */
+ { 0x05e6, 0x0ffe, pcxw2 }, /* 0x05e6 - 0x05e7 */
+ { 0x05e8, 0x0ff8, pcxw2 }, /* 0x05e8 - 0x05ef */
+ { 0x05f0, 0x0ff0, pcxw2 }, /* 0x05f0 - 0x05ff */
+ { 0x0600, 0x0fe0, pcxl }, /* 0x0600 - 0x061f */
+ { 0x0880, 0x0ff0, mako }, /* 0x0880 - 0x088f */
+ { 0x0890, 0x0ff0, mako2 }, /* 0x0890 - 0x089f */
+ { 0x0000, 0x0000, pcx } /* terminate table */
+};
+
+const char * const cpu_name_version[][2] = {
+ [pcx] = { "PA7000 (PCX)", "1.0" },
+ [pcxs] = { "PA7000 (PCX-S)", "1.1a" },
+ [pcxt] = { "PA7100 (PCX-T)", "1.1b" },
+ [pcxt_] = { "PA7200 (PCX-T')", "1.1c" },
+ [pcxl] = { "PA7100LC (PCX-L)", "1.1d" },
+ [pcxl2] = { "PA7300LC (PCX-L2)", "1.1e" },
+ [pcxu] = { "PA8000 (PCX-U)", "2.0" },
+ [pcxu_] = { "PA8200 (PCX-U+)", "2.0" },
+ [pcxw] = { "PA8500 (PCX-W)", "2.0" },
+ [pcxw_] = { "PA8600 (PCX-W+)", "2.0" },
+ [pcxw2] = { "PA8700 (PCX-W2)", "2.0" },
+ [mako] = { "PA8800 (Mako)", "2.0" },
+ [mako2] = { "PA8900 (Shortfin)", "2.0" }
+};
+
+const char * __devinit
+parisc_hardware_description(struct parisc_device_id *id)
+{
+ struct hp_hardware *listptr;
+
+ for (listptr = hp_hardware_list; listptr->hw_type != HPHW_FAULTY; listptr++) {
+ if ((listptr->hw_type == id->hw_type) &&
+ (listptr->hversion == id->hversion) &&
+ (listptr->sversion == id->sversion)){
+ return listptr->name;
+ }
+ }
+
+ /*
+ * ok, the above hardware table isn't complete, and we haven't found
+ * our device in this table. So let's now try to find a generic name
+ * to describe the given hardware...
+ */
+ switch (id->hw_type) {
+ case HPHW_NPROC:
+ return "Unknown machine";
+
+ case HPHW_A_DIRECT:
+ switch (id->sversion) {
+ case 0x0D: return "MUX port";
+ case 0x0E: return "RS-232 port";
+ }
+ break;
+
+ case HPHW_MEMORY:
+ return "Memory";
+
+ }
+
+ return "unknown device";
+}
+
+
+/* Interpret hversion (ret[0]) from PDC_MODEL(4)/PDC_MODEL_INFO(0) */
+enum cpu_type __cpuinit
+parisc_get_cpu_type(unsigned long hversion)
+{
+ struct hp_cpu_type_mask *ptr;
+ unsigned short model = ((unsigned short) (hversion)) >> 4;
+
+ for (ptr = hp_cpu_type_mask_list; 0 != ptr->mask; ptr++) {
+ if (ptr->model == (model & ptr->mask))
+ return ptr->cpu;
+ }
+ panic("could not identify CPU type\n");
+
+ return pcx; /* not reached: */
+}
+
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
new file mode 100644
index 00000000..37aabd77
--- /dev/null
+++ b/arch/parisc/kernel/head.S
@@ -0,0 +1,358 @@
+/* This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
+ * Copyright 1999 SuSE GmbH (Philipp Rumpf)
+ * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
+ * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
+ * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
+ * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
+ *
+ * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/psw.h>
+#include <asm/pdc.h>
+
+#include <asm/assembly.h>
+#include <asm/pgtable.h>
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ .level LEVEL
+
+ __INITDATA
+ENTRY(boot_args)
+ .word 0 /* arg0 */
+ .word 0 /* arg1 */
+ .word 0 /* arg2 */
+ .word 0 /* arg3 */
+END(boot_args)
+
+ __HEAD
+
+ .align 4
+ .import init_thread_union,data
+ .import fault_vector_20,code /* IVA parisc 2.0 32 bit */
+#ifndef CONFIG_64BIT
+ .import fault_vector_11,code /* IVA parisc 1.1 32 bit */
+ .import $global$ /* forward declaration */
+#endif /*!CONFIG_64BIT*/
+ .export _stext,data /* Kernel want it this way! */
+_stext:
+ENTRY(stext)
+ .proc
+ .callinfo
+
+ /* Make sure sr4-sr7 are set to zero for the kernel address space */
+ mtsp %r0,%sr4
+ mtsp %r0,%sr5
+ mtsp %r0,%sr6
+ mtsp %r0,%sr7
+
+ /* Clear BSS (shouldn't the boot loader do this?) */
+
+ .import __bss_start,data
+ .import __bss_stop,data
+
+ load32 PA(__bss_start),%r3
+ load32 PA(__bss_stop),%r4
+$bss_loop:
+ cmpb,<<,n %r3,%r4,$bss_loop
+ stw,ma %r0,4(%r3)
+
+ /* Save away the arguments the boot loader passed in (32 bit args) */
+ load32 PA(boot_args),%r1
+ stw,ma %arg0,4(%r1)
+ stw,ma %arg1,4(%r1)
+ stw,ma %arg2,4(%r1)
+ stw,ma %arg3,4(%r1)
+
+ /* Initialize startup VM. Just map first 8/16 MB of memory */
+ load32 PA(swapper_pg_dir),%r4
+ mtctl %r4,%cr24 /* Initialize kernel root pointer */
+ mtctl %r4,%cr25 /* Initialize user root pointer */
+
+#if PT_NLEVELS == 3
+ /* Set pmd in pgd */
+ load32 PA(pmd0),%r5
+ shrd %r5,PxD_VALUE_SHIFT,%r3
+ ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
+ stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
+ ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
+#else
+ /* 2-level page table, so pmd == pgd */
+ ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
+#endif
+
+ /* Fill in pmd with enough pte directories */
+ load32 PA(pg0),%r1
+ SHRREG %r1,PxD_VALUE_SHIFT,%r3
+ ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
+
+ ldi ASM_PT_INITIAL,%r1
+
+1:
+ stw %r3,0(%r4)
+ ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
+ addib,> -1,%r1,1b
+#if PT_NLEVELS == 3
+ ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
+#else
+ ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
+#endif
+
+
+ /* Now initialize the PTEs themselves. We use RWX for
+ * everything ... it will get remapped correctly later */
+ ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
+ ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
+ load32 PA(pg0),%r1
+
+$pgt_fill_loop:
+ STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
+ ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
+ addib,> -1,%r11,$pgt_fill_loop
+ nop
+
+ /* Load the return address...er...crash 'n burn */
+ copy %r0,%r2
+
+ /* And the RFI Target address too */
+ load32 start_parisc,%r11
+
+ /* And the initial task pointer */
+ load32 init_thread_union,%r6
+ mtctl %r6,%cr30
+
+ /* And the stack pointer too */
+ ldo THREAD_SZ_ALGN(%r6),%sp
+
+#ifdef CONFIG_SMP
+ /* Set the smp rendezvous address into page zero.
+ ** It would be safer to do this in init_smp_config() but
+ ** it's just way easier to deal with here because
+ ** of 64-bit function ptrs and the address is local to this file.
+ */
+ load32 PA(smp_slave_stext),%r10
+ stw %r10,0x10(%r0) /* MEM_RENDEZ */
+ stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
+
+ /* FALLTHROUGH */
+ .procend
+
+ /*
+ ** Code Common to both Monarch and Slave processors.
+ ** Entry:
+ **
+ ** 1.1:
+ ** %r11 must contain RFI target address.
+ ** %r25/%r26 args to pass to target function
+ ** %r2 in case rfi target decides it didn't like something
+ **
+ ** 2.0w:
+ ** %r3 PDCE_PROC address
+ ** %r11 RFI target address
+ **
+ ** Caller must init: SR4-7, %sp, %r10, %cr24/25,
+ */
+common_stext:
+ .proc
+ .callinfo
+#else
+ /* Clear PDC entry point - we won't use it */
+ stw %r0,0x10(%r0) /* MEM_RENDEZ */
+ stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
+#endif /*CONFIG_SMP*/
+
+#ifdef CONFIG_64BIT
+ tophys_r1 %sp
+
+ /* Save the rfi target address */
+ ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
+ tophys_r1 %r10
+ std %r11, TASK_PT_GR11(%r10)
+ /* Switch to wide mode Superdome doesn't support narrow PDC
+ ** calls.
+ */
+1: mfia %rp /* clear upper part of pcoq */
+ ldo 2f-1b(%rp),%rp
+ depdi 0,31,32,%rp
+ bv (%rp)
+ ssm PSW_SM_W,%r0
+
+ /* Set Wide mode as the "Default" (eg for traps)
+ ** First trap occurs *right* after (or part of) rfi for slave CPUs.
+ ** Someday, palo might not do this for the Monarch either.
+ */
+2:
+#define MEM_PDC_LO 0x388
+#define MEM_PDC_HI 0x35C
+ ldw MEM_PDC_LO(%r0),%r3
+ ldw MEM_PDC_HI(%r0),%r6
+ depd %r6, 31, 32, %r3 /* move to upper word */
+
+ ldo PDC_PSW(%r0),%arg0 /* 21 */
+ ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
+ ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
+ load32 PA(stext_pdc_ret), %rp
+ bv (%r3)
+ copy %r0,%arg3
+
+stext_pdc_ret:
+ /* restore rfi target address*/
+ ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
+ tophys_r1 %r10
+ ldd TASK_PT_GR11(%r10), %r11
+ tovirt_r1 %sp
+#endif
+
+ /* PARANOID: clear user scratch/user space SR's */
+ mtsp %r0,%sr0
+ mtsp %r0,%sr1
+ mtsp %r0,%sr2
+ mtsp %r0,%sr3
+
+ /* Initialize Protection Registers */
+ mtctl %r0,%cr8
+ mtctl %r0,%cr9
+ mtctl %r0,%cr12
+ mtctl %r0,%cr13
+
+ /* Initialize the global data pointer */
+ loadgp
+
+ /* Set up our interrupt table. HPMCs might not work after this!
+ *
+ * We need to install the correct iva for PA1.1 or PA2.0. The
+ * following short sequence of instructions can determine this
+ * (without being illegal on a PA1.1 machine).
+ */
+#ifndef CONFIG_64BIT
+ ldi 32,%r10
+ mtctl %r10,%cr11
+ .level 2.0
+ mfctl,w %cr11,%r10
+ .level 1.1
+ comib,<>,n 0,%r10,$is_pa20
+ ldil L%PA(fault_vector_11),%r10
+ b $install_iva
+ ldo R%PA(fault_vector_11)(%r10),%r10
+
+$is_pa20:
+ .level LEVEL /* restore 1.1 || 2.0w */
+#endif /*!CONFIG_64BIT*/
+ load32 PA(fault_vector_20),%r10
+
+$install_iva:
+ mtctl %r10,%cr14
+
+ b aligned_rfi /* Prepare to RFI! Man all the cannons! */
+ nop
+
+ .align 128
+aligned_rfi:
+ pcxt_ssm_bug
+
+ rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */
+ /* Don't need NOPs, have 8 compliant insn before rfi */
+
+ mtctl %r0,%cr17 /* Clear IIASQ tail */
+ mtctl %r0,%cr17 /* Clear IIASQ head */
+
+ /* Load RFI target into PC queue */
+ mtctl %r11,%cr18 /* IIAOQ head */
+ ldo 4(%r11),%r11
+ mtctl %r11,%cr18 /* IIAOQ tail */
+
+ load32 KERNEL_PSW,%r10
+ mtctl %r10,%ipsw
+
+ /* Jump through hyperspace to Virt Mode */
+ rfi
+ nop
+
+ .procend
+
+#ifdef CONFIG_SMP
+
+ .import smp_init_current_idle_task,data
+ .import smp_callin,code
+
+#ifndef CONFIG_64BIT
+smp_callin_rtn:
+ .proc
+ .callinfo
+ break 1,1 /* Break if returned from start_secondary */
+ nop
+ nop
+ .procend
+#endif /*!CONFIG_64BIT*/
+
+/***************************************************************************
+* smp_slave_stext is executed by all non-monarch Processors when the Monarch
+* pokes the slave CPUs in smp.c:smp_boot_cpus().
+*
+* Once here, registers values are initialized in order to branch to virtual
+* mode. Once all available/eligible CPUs are in virtual mode, all are
+* released and start out by executing their own idle task.
+*****************************************************************************/
+smp_slave_stext:
+ .proc
+ .callinfo
+
+ /*
+ ** Initialize Space registers
+ */
+ mtsp %r0,%sr4
+ mtsp %r0,%sr5
+ mtsp %r0,%sr6
+ mtsp %r0,%sr7
+
+ /* Initialize the SP - monarch sets up smp_init_current_idle_task */
+ load32 PA(smp_init_current_idle_task),%sp
+ LDREG 0(%sp),%sp /* load task address */
+ tophys_r1 %sp
+ LDREG TASK_THREAD_INFO(%sp),%sp
+ mtctl %sp,%cr30 /* store in cr30 */
+ ldo THREAD_SZ_ALGN(%sp),%sp
+
+ /* point CPU to kernel page tables */
+ load32 PA(swapper_pg_dir),%r4
+ mtctl %r4,%cr24 /* Initialize kernel root pointer */
+ mtctl %r4,%cr25 /* Initialize user root pointer */
+
+#ifdef CONFIG_64BIT
+ /* Setup PDCE_PROC entry */
+ copy %arg0,%r3
+#else
+ /* Load RFI *return* address in case smp_callin bails */
+ load32 smp_callin_rtn,%r2
+#endif
+
+ /* Load RFI target address. */
+ load32 smp_callin,%r11
+
+ /* ok...common code can handle the rest */
+ b common_stext
+ nop
+
+ .procend
+#endif /* CONFIG_SMP */
+
+ENDPROC(stext)
+
+#ifndef CONFIG_64BIT
+ .section .data..read_mostly
+
+ .align 4
+ .export $global$,data
+
+ .type $global$,@object
+ .size $global$,4
+$global$:
+ .word 0
+#endif /*!CONFIG_64BIT*/
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
new file mode 100644
index 00000000..5595a2f3
--- /dev/null
+++ b/arch/parisc/kernel/hpmc.S
@@ -0,0 +1,305 @@
+/*
+ * HPMC (High Priority Machine Check) handler.
+ *
+ * Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
+ * Copyright (C) 2000 Hewlett-Packard (John Marvin)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/*
+ * This HPMC handler retrieves the HPMC pim data, resets IO and
+ * returns to the default trap handler with code set to 1 (HPMC).
+ * The default trap handler calls handle interruption, which
+ * does a stack and register dump. This at least allows kernel
+ * developers to get back to C code in virtual mode, where they
+ * have the option to examine and print values from memory that
+ * would help in debugging an HPMC caused by a software bug.
+ *
+ * There is more to do here:
+ *
+ * 1) On MP systems we need to synchronize processors
+ * before calling pdc/iodc.
+ * 2) We should be checking the system state and not
+ * returning to the fault handler if things are really
+ * bad.
+ *
+ */
+
+ .level 1.1
+ .data
+
+#include <asm/assembly.h>
+#include <asm/pdc.h>
+
+#include <linux/linkage.h>
+
+ /*
+ * stack for os_hpmc, the HPMC handler.
+ * buffer for IODC procedures (for the HPMC handler).
+ *
+ * IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
+ */
+
+ .align PAGE_SIZE
+hpmc_stack:
+ .block 16384
+
+#define HPMC_IODC_BUF_SIZE 0x8000
+
+ .align PAGE_SIZE
+hpmc_iodc_buf:
+ .block HPMC_IODC_BUF_SIZE
+
+ .align 8
+hpmc_raddr:
+ .block 128
+
+#define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */
+
+ .align 8
+ENTRY(hpmc_pim_data)
+ .block HPMC_PIM_DATA_SIZE
+END(hpmc_pim_data)
+
+ .text
+
+ .import intr_save, code
+ENTRY(os_hpmc)
+.os_hpmc:
+
+ /*
+ * registers modified:
+ *
+ * Using callee saves registers without saving them. The
+ * original values are in the pim dump if we need them.
+ *
+ * r2 (rp) return pointer
+ * r3 address of PDCE_PROC
+ * r4 scratch
+ * r5 scratch
+ * r23 (arg3) procedure arg
+ * r24 (arg2) procedure arg
+ * r25 (arg1) procedure arg
+ * r26 (arg0) procedure arg
+ * r30 (sp) stack pointer
+ *
+ * registers read:
+ *
+ * r26 contains address of PDCE_PROC on entry
+ * r28 (ret0) return value from procedure
+ */
+
+ copy arg0, %r3 /* save address of PDCE_PROC */
+
+ /*
+ * disable nested HPMCs
+ *
+ * Increment os_hpmc checksum to invalidate it.
+ * Do this before turning the PSW M bit off.
+ */
+
+ mfctl %cr14, %r4
+ ldw 52(%r4),%r5
+ addi 1,%r5,%r5
+ stw %r5,52(%r4)
+
+ /* MP_FIXME: synchronize all processors. */
+
+ /* Setup stack pointer. */
+
+ load32 PA(hpmc_stack),sp
+
+ ldo 128(sp),sp /* leave room for arguments */
+
+ /*
+ * Most PDC routines require that the M bit be off.
+ * So turn on the Q bit and turn off the M bit.
+ */
+
+ ldo 8(%r0),%r4 /* PSW Q on, PSW M off */
+ mtctl %r4,ipsw
+ mtctl %r0,pcsq
+ mtctl %r0,pcsq
+ load32 PA(os_hpmc_1),%r4
+ mtctl %r4,pcoq
+ ldo 4(%r4),%r4
+ mtctl %r4,pcoq
+ rfi
+ nop
+
+os_hpmc_1:
+
+ /* Call PDC_PIM to get HPMC pim info */
+
+ /*
+ * Note that on some newer boxes, PDC_PIM must be called
+ * before PDC_IO if you want IO to be reset. PDC_PIM sets
+ * a flag that PDC_IO examines.
+ */
+
+ ldo PDC_PIM(%r0), arg0
+ ldo PDC_PIM_HPMC(%r0),arg1 /* Transfer HPMC data */
+ load32 PA(hpmc_raddr),arg2
+ load32 PA(hpmc_pim_data),arg3
+ load32 HPMC_PIM_DATA_SIZE,%r4
+ stw %r4,-52(sp)
+
+ ldil L%PA(os_hpmc_2), rp
+ bv (r3) /* call pdce_proc */
+ ldo R%PA(os_hpmc_2)(rp), rp
+
+os_hpmc_2:
+ comib,<> 0,ret0, os_hpmc_fail
+
+ /* Reset IO by calling the hversion dependent PDC_IO routine */
+
+ ldo PDC_IO(%r0),arg0
+ ldo 0(%r0),arg1 /* log IO errors */
+ ldo 0(%r0),arg2 /* reserved */
+ ldo 0(%r0),arg3 /* reserved */
+ stw %r0,-52(sp) /* reserved */
+
+ ldil L%PA(os_hpmc_3),rp
+ bv (%r3) /* call pdce_proc */
+ ldo R%PA(os_hpmc_3)(rp),rp
+
+os_hpmc_3:
+
+ /* FIXME? Check for errors from PDC_IO (-1 might be OK) */
+
+ /*
+ * Initialize the IODC console device (HPA,SPA, path etc.
+ * are stored on page 0.
+ */
+
+ /*
+ * Load IODC into hpmc_iodc_buf by calling PDC_IODC.
+ * Note that PDC_IODC handles flushing the appropriate
+ * data and instruction cache lines.
+ */
+
+ ldo PDC_IODC(%r0),arg0
+ ldo PDC_IODC_READ(%r0),arg1
+ load32 PA(hpmc_raddr),arg2
+ ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg3 /* console hpa */
+ ldo PDC_IODC_RI_INIT(%r0),%r4
+ stw %r4,-52(sp)
+ load32 PA(hpmc_iodc_buf),%r4
+ stw %r4,-56(sp)
+ load32 HPMC_IODC_BUF_SIZE,%r4
+ stw %r4,-60(sp)
+
+ ldil L%PA(os_hpmc_4),rp
+ bv (%r3) /* call pdce_proc */
+ ldo R%PA(os_hpmc_4)(rp),rp
+
+os_hpmc_4:
+ comib,<> 0,ret0,os_hpmc_fail
+
+ /* Call the entry init (just loaded by PDC_IODC) */
+
+ ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg0 /* console hpa */
+ ldo ENTRY_INIT_MOD_DEV(%r0), arg1
+ ldw BOOT_CONSOLE_SPA_OFFSET(%r0),arg2 /* console spa */
+ depi 0,31,11,arg2 /* clear bits 21-31 */
+ ldo BOOT_CONSOLE_PATH_OFFSET(%r0),arg3 /* console path */
+ load32 PA(hpmc_raddr),%r4
+ stw %r4, -52(sp)
+ stw %r0, -56(sp) /* HV */
+ stw %r0, -60(sp) /* HV */
+ stw %r0, -64(sp) /* HV */
+ stw %r0, -68(sp) /* lang, must be zero */
+
+ load32 PA(hpmc_iodc_buf),%r5
+ ldil L%PA(os_hpmc_5),rp
+ bv (%r5)
+ ldo R%PA(os_hpmc_5)(rp),rp
+
+os_hpmc_5:
+ comib,<> 0,ret0,os_hpmc_fail
+
+ /* Prepare to call intr_save */
+
+ /*
+ * Load kernel page directory (load into user also, since
+ * we don't intend to ever return to user land anyway)
+ */
+
+ load32 PA(swapper_pg_dir),%r4
+ mtctl %r4,%cr24 /* Initialize kernel root pointer */
+ mtctl %r4,%cr25 /* Initialize user root pointer */
+
+ /* Clear sr4-sr7 */
+
+ mtsp %r0, %sr4
+ mtsp %r0, %sr5
+ mtsp %r0, %sr6
+ mtsp %r0, %sr7
+
+ tovirt_r1 %r30 /* make sp virtual */
+
+ rsm 8,%r0 /* Clear Q bit */
+ ldi 1,%r8 /* Set trap code to "1" for HPMC */
+ load32 PA(intr_save),%r1
+ be 0(%sr7,%r1)
+ nop
+
+os_hpmc_fail:
+
+ /*
+ * Reset the system
+ *
+ * Some systems may lockup from a broadcast reset, so try the
+ * hversion PDC_BROADCAST_RESET() first.
+ * MP_FIXME: reset all processors if more than one central bus.
+ */
+
+ /* PDC_BROADCAST_RESET() */
+
+ ldo PDC_BROADCAST_RESET(%r0),arg0
+ ldo 0(%r0),arg1 /* do reset */
+
+ ldil L%PA(os_hpmc_6),rp
+ bv (%r3) /* call pdce_proc */
+ ldo R%PA(os_hpmc_6)(rp),rp
+
+os_hpmc_6:
+
+ /*
+ * possible return values:
+ * -1 non-existent procedure
+ * -2 non-existent option
+ * -16 unaligned stack
+ *
+ * If call returned, do a broadcast reset.
+ */
+
+ ldil L%0xfffc0000,%r4 /* IO_BROADCAST */
+ ldo 5(%r0),%r5
+ stw %r5,48(%r4) /* CMD_RESET to IO_COMMAND offset */
+
+ b .
+ nop
+ENDPROC(os_hpmc)
+.os_hpmc_end:
+ nop
+.data
+.align 4
+ .export os_hpmc_size
+os_hpmc_size:
+ .word .os_hpmc_end-.os_hpmc
diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c
new file mode 100644
index 00000000..4a91e433
--- /dev/null
+++ b/arch/parisc/kernel/init_task.c
@@ -0,0 +1,70 @@
+/*
+ * Static declaration of "init" task data structure.
+ *
+ * Copyright (C) 2000 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
+ * Copyright (C) 2001 Helge Deller <deller @ parisc-linux.org>
+ * Copyright (C) 2002 Matthew Wilcox <willy with parisc-linux.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+/*
+ * Initial task structure.
+ *
+ * We need to make sure that this is 16384-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union __init_task_data
+ __attribute__((aligned(128))) =
+ { INIT_THREAD_INFO(init_task) };
+
+#if PT_NLEVELS == 3
+/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
+ * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
+ * guarantee that global objects will be laid out in memory in the same order
+ * as the order of declaration, so put these in different sections and use
+ * the linker script to order them. */
+pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
+#endif
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
+pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+EXPORT_SYMBOL(init_task);
+
+__asm__(".data");
+struct task_struct init_task = INIT_TASK(init_task);
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
new file mode 100644
index 00000000..08324aac
--- /dev/null
+++ b/arch/parisc/kernel/inventory.c
@@ -0,0 +1,619 @@
+/*
+ * inventory.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
+ * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
+ *
+ * These are the routines to discover what hardware exists in this box.
+ * This task is complicated by there being 3 different ways of
+ * performing an inventory, depending largely on the age of the box.
+ * The recommended way to do this is to check to see whether the machine
+ * is a `Snake' first, then try System Map, then try PAT. We try System
+ * Map before checking for a Snake -- this probably doesn't cause any
+ * problems, but...
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/mmzone.h>
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/parisc-device.h>
+
+/*
+** Debug options
+** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
+*/
+#undef DEBUG_PAT
+
+int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
+
+void __init setup_pdc(void)
+{
+ long status;
+ unsigned int bus_id;
+ struct pdc_system_map_mod_info module_result;
+ struct pdc_module_path module_path;
+ struct pdc_model model;
+#ifdef CONFIG_64BIT
+ struct pdc_pat_cell_num cell_info;
+#endif
+
+ /* Determine the pdc "type" used on this machine */
+
+ printk(KERN_INFO "Determining PDC firmware type: ");
+
+ status = pdc_system_map_find_mods(&module_result, &module_path, 0);
+ if (status == PDC_OK) {
+ pdc_type = PDC_TYPE_SYSTEM_MAP;
+ printk("System Map.\n");
+ return;
+ }
+
+ /*
+ * If the machine doesn't support PDC_SYSTEM_MAP then either it
+ * is a pdc pat box, or it is an older box. All 64 bit capable
+ * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
+ */
+
+ /*
+ * TODO: We should test for 64 bit capability and give a
+ * clearer message.
+ */
+
+#ifdef CONFIG_64BIT
+ status = pdc_pat_cell_get_number(&cell_info);
+ if (status == PDC_OK) {
+ pdc_type = PDC_TYPE_PAT;
+ printk("64 bit PAT.\n");
+ return;
+ }
+#endif
+
+ /* Check the CPU's bus ID. There's probably a better test. */
+
+ status = pdc_model_info(&model);
+
+ bus_id = (model.hversion >> (4 + 7)) & 0x1f;
+
+ switch (bus_id) {
+ case 0x4: /* 720, 730, 750, 735, 755 */
+ case 0x6: /* 705, 710 */
+ case 0x7: /* 715, 725 */
+ case 0x8: /* 745, 747, 742 */
+ case 0xA: /* 712 and similar */
+ case 0xC: /* 715/64, at least */
+
+ pdc_type = PDC_TYPE_SNAKE;
+ printk("Snake.\n");
+ return;
+
+ default: /* Everything else */
+
+ printk("Unsupported.\n");
+ panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
+ }
+}
+
+#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
+
+static void __init
+set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
+ unsigned long pages4k)
+{
+ /* Rather than aligning and potentially throwing away
+ * memory, we'll assume that any ranges are already
+ * nicely aligned with any reasonable page size, and
+ * panic if they are not (it's more likely that the
+ * pdc info is bad in this case).
+ */
+
+ if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
+ || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
+
+ panic("Memory range doesn't align with page size!\n");
+ }
+
+ pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
+ pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
+}
+
+static void __init pagezero_memconfig(void)
+{
+ unsigned long npages;
+
+ /* Use the 32 bit information from page zero to create a single
+ * entry in the pmem_ranges[] table.
+ *
+ * We currently don't support machines with contiguous memory
+ * >= 4 Gb, who report that memory using 64 bit only fields
+ * on page zero. It's not worth doing until it can be tested,
+ * and it is not clear we can support those machines for other
+ * reasons.
+ *
+ * If that support is done in the future, this is where it
+ * should be done.
+ */
+
+ npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
+ set_pmem_entry(pmem_ranges,0UL,npages);
+ npmem_ranges = 1;
+}
+
+#ifdef CONFIG_64BIT
+
+/* All of the PDC PAT specific code is 64-bit only */
+
+/*
+** The module object is filled via PDC_PAT_CELL[Return Cell Module].
+** If a module is found, register module will get the IODC bytes via
+** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
+**
+** The IO view can be used by PDC_PAT_CELL[Return Cell Module]
+** only for SBAs and LBAs. This view will cause an invalid
+** argument error for all other cell module types.
+**
+*/
+
+static int __init
+pat_query_module(ulong pcell_loc, ulong mod_index)
+{
+ pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
+ unsigned long bytecnt;
+ unsigned long temp; /* 64-bit scratch value */
+ long status; /* PDC return value status */
+ struct parisc_device *dev;
+
+ pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
+ if (!pa_pdc_cell)
+ panic("couldn't allocate memory for PDC_PAT_CELL!");
+
+ /* return cell module (PA or Processor view) */
+ status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
+ PA_VIEW, pa_pdc_cell);
+
+ if (status != PDC_OK) {
+ /* no more cell modules or error */
+ return status;
+ }
+
+ temp = pa_pdc_cell->cba;
+ dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
+ if (!dev) {
+ return PDC_OK;
+ }
+
+ /* alloc_pa_dev sets dev->hpa */
+
+ /*
+ ** save parameters in the parisc_device
+ ** (The idea being the device driver will call pdc_pat_cell_module()
+ ** and store the results in its own data structure.)
+ */
+ dev->pcell_loc = pcell_loc;
+ dev->mod_index = mod_index;
+
+ /* save generic info returned from the call */
+ /* REVISIT: who is the consumer of this? not sure yet... */
+ dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
+ dev->pmod_loc = pa_pdc_cell->mod_location;
+
+ register_parisc_device(dev); /* advertise device */
+
+#ifdef DEBUG_PAT
+ pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
+ /* dump what we see so far... */
+ switch (PAT_GET_ENTITY(dev->mod_info)) {
+ unsigned long i;
+
+ case PAT_ENTITY_PROC:
+ printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
+ pa_pdc_cell->mod[0]);
+ break;
+
+ case PAT_ENTITY_MEM:
+ printk(KERN_DEBUG
+ "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
+ pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
+ pa_pdc_cell->mod[2]);
+ break;
+ case PAT_ENTITY_CA:
+ printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
+ break;
+
+ case PAT_ENTITY_PBC:
+ printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
+ goto print_ranges;
+
+ case PAT_ENTITY_SBA:
+ printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
+ goto print_ranges;
+
+ case PAT_ENTITY_LBA:
+ printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
+
+ print_ranges:
+ pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
+ IO_VIEW, &io_pdc_cell);
+ printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
+ for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
+ printk(KERN_DEBUG
+ " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
+ i, pa_pdc_cell->mod[2 + i * 3], /* type */
+ pa_pdc_cell->mod[3 + i * 3], /* start */
+ pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
+ printk(KERN_DEBUG
+ " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
+ i, io_pdc_cell->mod[2 + i * 3], /* type */
+ io_pdc_cell->mod[3 + i * 3], /* start */
+ io_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
+ }
+ printk(KERN_DEBUG "\n");
+ break;
+ }
+#endif /* DEBUG_PAT */
+
+ kfree(pa_pdc_cell);
+
+ return PDC_OK;
+}
+
+
+/* pat pdc can return information about a variety of different
+ * types of memory (e.g. firmware,i/o, etc) but we only care about
+ * the usable physical ram right now. Since the firmware specific
+ * information is allocated on the stack, we'll be generous, in
+ * case there is a lot of other information we don't care about.
+ */
+
+#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
+
+static void __init pat_memconfig(void)
+{
+ unsigned long actual_len;
+ struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
+ struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
+ physmem_range_t *pmem_ptr;
+ long status;
+ int entries;
+ unsigned long length;
+ int i;
+
+ length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
+
+ status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
+
+ if ((status != PDC_OK)
+ || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
+
+ /* The above pdc call shouldn't fail, but, just in
+ * case, just use the PAGE0 info.
+ */
+
+ printk("\n\n\n");
+ printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
+ "All memory may not be used!\n\n\n");
+ pagezero_memconfig();
+ return;
+ }
+
+ entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
+
+ if (entries > PAT_MAX_RANGES) {
+ printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
+ printk(KERN_WARNING "Some memory may not be used!\n");
+ }
+
+ /* Copy information into the firmware independent pmem_ranges
+ * array, skipping types we don't care about. Notice we said
+ * "may" above. We'll use all the entries that were returned.
+ */
+
+ npmem_ranges = 0;
+ mtbl_ptr = mem_table;
+ pmem_ptr = pmem_ranges; /* Global firmware independent table */
+ for (i = 0; i < entries; i++,mtbl_ptr++) {
+ if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
+ || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
+ || (mtbl_ptr->pages == 0)
+ || ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
+ && (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
+ && (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
+
+ continue;
+ }
+
+ if (npmem_ranges == MAX_PHYSMEM_RANGES) {
+ printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
+ printk(KERN_WARNING "Some memory will not be used!\n");
+ break;
+ }
+
+ set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
+ npmem_ranges++;
+ }
+}
+
+static int __init pat_inventory(void)
+{
+ int status;
+ ulong mod_index = 0;
+ struct pdc_pat_cell_num cell_info;
+
+ /*
+ ** Note: Prelude (and it's successors: Lclass, A400/500) only
+ ** implement PDC_PAT_CELL sub-options 0 and 2.
+ */
+ status = pdc_pat_cell_get_number(&cell_info);
+ if (status != PDC_OK) {
+ return 0;
+ }
+
+#ifdef DEBUG_PAT
+ printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
+ cell_info.cell_loc);
+#endif
+
+ while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
+ mod_index++;
+ }
+
+ return mod_index;
+}
+
+/* We only look for extended memory ranges on a 64 bit capable box */
+static void __init sprockets_memconfig(void)
+{
+ struct pdc_memory_table_raddr r_addr;
+ struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
+ struct pdc_memory_table *mtbl_ptr;
+ physmem_range_t *pmem_ptr;
+ long status;
+ int entries;
+ int i;
+
+ status = pdc_mem_mem_table(&r_addr,mem_table,
+ (unsigned long)MAX_PHYSMEM_RANGES);
+
+ if (status != PDC_OK) {
+
+ /* The above pdc call only works on boxes with sprockets
+ * firmware (newer B,C,J class). Other non PAT PDC machines
+ * do support more than 3.75 Gb of memory, but we don't
+ * support them yet.
+ */
+
+ pagezero_memconfig();
+ return;
+ }
+
+ if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
+ printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
+ printk(KERN_WARNING "Some memory will not be used!\n");
+ }
+
+ entries = (int)r_addr.entries_returned;
+
+ npmem_ranges = 0;
+ mtbl_ptr = mem_table;
+ pmem_ptr = pmem_ranges; /* Global firmware independent table */
+ for (i = 0; i < entries; i++,mtbl_ptr++) {
+ set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
+ npmem_ranges++;
+ }
+}
+
+#else /* !CONFIG_64BIT */
+
+#define pat_inventory() do { } while (0)
+#define pat_memconfig() do { } while (0)
+#define sprockets_memconfig() pagezero_memconfig()
+
+#endif /* !CONFIG_64BIT */
+
+
+#ifndef CONFIG_PA20
+
+/* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
+
+static struct parisc_device * __init
+legacy_create_device(struct pdc_memory_map *r_addr,
+ struct pdc_module_path *module_path)
+{
+ struct parisc_device *dev;
+ int status = pdc_mem_map_hpa(r_addr, module_path);
+ if (status != PDC_OK)
+ return NULL;
+
+ dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
+ if (dev == NULL)
+ return NULL;
+
+ register_parisc_device(dev);
+ return dev;
+}
+
+/**
+ * snake_inventory
+ *
+ * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
+ * To use it, we initialise the mod_path.bc to 0xff and try all values of
+ * mod to get the HPA for the top-level devices. Bus adapters may have
+ * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
+ * module, then trying all possible functions.
+ */
+static void __init snake_inventory(void)
+{
+ int mod;
+ for (mod = 0; mod < 16; mod++) {
+ struct parisc_device *dev;
+ struct pdc_module_path module_path;
+ struct pdc_memory_map r_addr;
+ unsigned int func;
+
+ memset(module_path.path.bc, 0xff, 6);
+ module_path.path.mod = mod;
+ dev = legacy_create_device(&r_addr, &module_path);
+ if ((!dev) || (dev->id.hw_type != HPHW_BA))
+ continue;
+
+ memset(module_path.path.bc, 0xff, 4);
+ module_path.path.bc[4] = mod;
+
+ for (func = 0; func < 16; func++) {
+ module_path.path.bc[5] = 0;
+ module_path.path.mod = func;
+ legacy_create_device(&r_addr, &module_path);
+ }
+ }
+}
+
+#else /* CONFIG_PA20 */
+#define snake_inventory() do { } while (0)
+#endif /* CONFIG_PA20 */
+
+/* Common 32/64 bit based code goes here */
+
+/**
+ * add_system_map_addresses - Add additional addresses to the parisc device.
+ * @dev: The parisc device.
+ * @num_addrs: Then number of addresses to add;
+ * @module_instance: The system_map module instance.
+ *
+ * This function adds any additional addresses reported by the system_map
+ * firmware to the parisc device.
+ */
+static void __init
+add_system_map_addresses(struct parisc_device *dev, int num_addrs,
+ int module_instance)
+{
+ int i;
+ long status;
+ struct pdc_system_map_addr_info addr_result;
+
+ dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
+ if(!dev->addr) {
+ printk(KERN_ERR "%s %s(): memory allocation failure\n",
+ __FILE__, __func__);
+ return;
+ }
+
+ for(i = 1; i <= num_addrs; ++i) {
+ status = pdc_system_map_find_addrs(&addr_result,
+ module_instance, i);
+ if(PDC_OK == status) {
+ dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
+ dev->num_addrs++;
+ } else {
+ printk(KERN_WARNING
+ "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
+ status, i);
+ }
+ }
+}
+
+/**
+ * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
+ *
+ * This function attempts to retrieve and register all the devices firmware
+ * knows about via the SYSTEM_MAP PDC call.
+ */
+static void __init system_map_inventory(void)
+{
+ int i;
+ long status = PDC_OK;
+
+ for (i = 0; i < 256; i++) {
+ struct parisc_device *dev;
+ struct pdc_system_map_mod_info module_result;
+ struct pdc_module_path module_path;
+
+ status = pdc_system_map_find_mods(&module_result,
+ &module_path, i);
+ if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
+ break;
+ if (status != PDC_OK)
+ continue;
+
+ dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
+ if (!dev)
+ continue;
+
+ register_parisc_device(dev);
+
+ /* if available, get the additional addresses for a module */
+ if (!module_result.add_addrs)
+ continue;
+
+ add_system_map_addresses(dev, module_result.add_addrs, i);
+ }
+
+ walk_central_bus();
+ return;
+}
+
+void __init do_memory_inventory(void)
+{
+ switch (pdc_type) {
+
+ case PDC_TYPE_PAT:
+ pat_memconfig();
+ break;
+
+ case PDC_TYPE_SYSTEM_MAP:
+ sprockets_memconfig();
+ break;
+
+ case PDC_TYPE_SNAKE:
+ pagezero_memconfig();
+ return;
+
+ default:
+ panic("Unknown PDC type!\n");
+ }
+
+ if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
+ printk(KERN_WARNING "Bad memory configuration returned!\n");
+ printk(KERN_WARNING "Some memory may not be used!\n");
+ pagezero_memconfig();
+ }
+}
+
+void __init do_device_inventory(void)
+{
+ printk(KERN_INFO "Searching for devices...\n");
+
+ init_parisc_bus();
+
+ switch (pdc_type) {
+
+ case PDC_TYPE_PAT:
+ pat_inventory();
+ break;
+
+ case PDC_TYPE_SYSTEM_MAP:
+ system_map_inventory();
+ break;
+
+ case PDC_TYPE_SNAKE:
+ snake_inventory();
+ break;
+
+ default:
+ panic("Unknown PDC type!\n");
+ }
+ printk(KERN_INFO "Found devices:\n");
+ print_parisc_devices();
+}
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
new file mode 100644
index 00000000..c0b1affc
--- /dev/null
+++ b/arch/parisc/kernel/irq.c
@@ -0,0 +1,423 @@
+/*
+ * Code to handle x86 style IRQs plus some generic interrupt stuff.
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
+ * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
+ * Copyright (C) 1999-2000 Grant Grundler
+ * Copyright (c) 2005 Matthew Wilcox
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#include <asm/smp.h>
+
+#undef PARISC_IRQ_CR16_COUNTS
+
+extern irqreturn_t timer_interrupt(int, void *);
+extern irqreturn_t ipi_interrupt(int, void *);
+
+#define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
+
+/* Bits in EIEM correlate with cpu_irq_action[].
+** Numbered *Big Endian*! (ie bit 0 is MSB)
+*/
+static volatile unsigned long cpu_eiem = 0;
+
+/*
+** local ACK bitmap ... habitually set to 1, but reset to zero
+** between ->ack() and ->end() of the interrupt to prevent
+** re-interruption of a processing interrupt.
+*/
+static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
+
+static void cpu_mask_irq(struct irq_data *d)
+{
+ unsigned long eirr_bit = EIEM_MASK(d->irq);
+
+ cpu_eiem &= ~eirr_bit;
+ /* Do nothing on the other CPUs. If they get this interrupt,
+ * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
+ * handle it, and the set_eiem() at the bottom will ensure it
+ * then gets disabled */
+}
+
+static void __cpu_unmask_irq(unsigned int irq)
+{
+ unsigned long eirr_bit = EIEM_MASK(irq);
+
+ cpu_eiem |= eirr_bit;
+
+ /* This is just a simple NOP IPI. But what it does is cause
+ * all the other CPUs to do a set_eiem(cpu_eiem) at the end
+ * of the interrupt handler */
+ smp_send_all_nop();
+}
+
+static void cpu_unmask_irq(struct irq_data *d)
+{
+ __cpu_unmask_irq(d->irq);
+}
+
+void cpu_ack_irq(struct irq_data *d)
+{
+ unsigned long mask = EIEM_MASK(d->irq);
+ int cpu = smp_processor_id();
+
+ /* Clear in EIEM so we can no longer process */
+ per_cpu(local_ack_eiem, cpu) &= ~mask;
+
+ /* disable the interrupt */
+ set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
+
+ /* and now ack it */
+ mtctl(mask, 23);
+}
+
+void cpu_eoi_irq(struct irq_data *d)
+{
+ unsigned long mask = EIEM_MASK(d->irq);
+ int cpu = smp_processor_id();
+
+ /* set it in the eiems---it's no longer in process */
+ per_cpu(local_ack_eiem, cpu) |= mask;
+
+ /* enable the interrupt */
+ set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
+}
+
+#ifdef CONFIG_SMP
+int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
+{
+ int cpu_dest;
+
+ /* timer and ipi have to always be received on all CPUs */
+ if (irqd_is_per_cpu(d))
+ return -EINVAL;
+
+ /* whatever mask they set, we just allow one CPU */
+ cpu_dest = first_cpu(*dest);
+
+ return cpu_dest;
+}
+
+static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
+ bool force)
+{
+ int cpu_dest;
+
+ cpu_dest = cpu_check_affinity(d, dest);
+ if (cpu_dest < 0)
+ return -1;
+
+ cpumask_copy(d->affinity, dest);
+
+ return 0;
+}
+#endif
+
+static struct irq_chip cpu_interrupt_type = {
+ .name = "CPU",
+ .irq_mask = cpu_mask_irq,
+ .irq_unmask = cpu_unmask_irq,
+ .irq_ack = cpu_ack_irq,
+ .irq_eoi = cpu_eoi_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = cpu_set_affinity_irq,
+#endif
+ /* XXX: Needs to be written. We managed without it so far, but
+ * we really ought to write it.
+ */
+ .irq_retrigger = NULL,
+};
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v, j;
+ unsigned long flags;
+
+ if (i == 0) {
+ seq_puts(p, " ");
+ for_each_online_cpu(j)
+ seq_printf(p, " CPU%d", j);
+
+#ifdef PARISC_IRQ_CR16_COUNTS
+ seq_printf(p, " [min/avg/max] (CPU cycle counts)");
+#endif
+ seq_putc(p, '\n');
+ }
+
+ if (i < NR_IRQS) {
+ struct irq_desc *desc = irq_to_desc(i);
+ struct irqaction *action;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ action = desc->action;
+ if (!action)
+ goto skip;
+ seq_printf(p, "%3d: ", i);
+#ifdef CONFIG_SMP
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+#else
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#endif
+
+ seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
+#ifndef PARISC_IRQ_CR16_COUNTS
+ seq_printf(p, " %s", action->name);
+
+ while ((action = action->next))
+ seq_printf(p, ", %s", action->name);
+#else
+ for ( ;action; action = action->next) {
+ unsigned int k, avg, min, max;
+
+ min = max = action->cr16_hist[0];
+
+ for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
+ int hist = action->cr16_hist[k];
+
+ if (hist) {
+ avg += hist;
+ } else
+ break;
+
+ if (hist > max) max = hist;
+ if (hist < min) min = hist;
+ }
+
+ avg /= k;
+ seq_printf(p, " %s[%d/%d/%d]", action->name,
+ min,avg,max);
+ }
+#endif
+
+ seq_putc(p, '\n');
+ skip:
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+
+ return 0;
+}
+
+
+
+/*
+** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
+** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
+**
+** To use txn_XXX() interfaces, get a Virtual IRQ first.
+** Then use that to get the Transaction address and data.
+*/
+
+int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
+{
+ if (irq_has_action(irq))
+ return -EBUSY;
+ if (irq_get_chip(irq) != &cpu_interrupt_type)
+ return -EBUSY;
+
+ /* for iosapic interrupts */
+ if (type) {
+ irq_set_chip_and_handler(irq, type, handle_percpu_irq);
+ irq_set_chip_data(irq, data);
+ __cpu_unmask_irq(irq);
+ }
+ return 0;
+}
+
+int txn_claim_irq(int irq)
+{
+ return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
+}
+
+/*
+ * The bits_wide parameter accommodates the limitations of the HW/SW which
+ * use these bits:
+ * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
+ * V-class (EPIC): 6 bits
+ * N/L/A-class (iosapic): 8 bits
+ * PCI 2.2 MSI: 16 bits
+ * Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric)
+ *
+ * On the service provider side:
+ * o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
+ * o PA 2.0 wide mode 6-bits (per processor)
+ * o IA64 8-bits (0-256 total)
+ *
+ * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
+ * by the processor...and the N/L-class I/O subsystem supports more bits than
+ * PA2.0 has. The first case is the problem.
+ */
+int txn_alloc_irq(unsigned int bits_wide)
+{
+ int irq;
+
+ /* never return irq 0 cause that's the interval timer */
+ for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
+ if (cpu_claim_irq(irq, NULL, NULL) < 0)
+ continue;
+ if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
+ continue;
+ return irq;
+ }
+
+ /* unlikely, but be prepared */
+ return -1;
+}
+
+
+unsigned long txn_affinity_addr(unsigned int irq, int cpu)
+{
+#ifdef CONFIG_SMP
+ struct irq_data *d = irq_get_irq_data(irq);
+ cpumask_copy(d->affinity, cpumask_of(cpu));
+#endif
+
+ return per_cpu(cpu_data, cpu).txn_addr;
+}
+
+
+unsigned long txn_alloc_addr(unsigned int virt_irq)
+{
+ static int next_cpu = -1;
+
+ next_cpu++; /* assign to "next" CPU we want this bugger on */
+
+ /* validate entry */
+ while ((next_cpu < nr_cpu_ids) &&
+ (!per_cpu(cpu_data, next_cpu).txn_addr ||
+ !cpu_online(next_cpu)))
+ next_cpu++;
+
+ if (next_cpu >= nr_cpu_ids)
+ next_cpu = 0; /* nothing else, assign monarch */
+
+ return txn_affinity_addr(virt_irq, next_cpu);
+}
+
+
+unsigned int txn_alloc_data(unsigned int virt_irq)
+{
+ return virt_irq - CPU_IRQ_BASE;
+}
+
+static inline int eirr_to_irq(unsigned long eirr)
+{
+ int bit = fls_long(eirr);
+ return (BITS_PER_LONG - bit) + TIMER_IRQ;
+}
+
+/* ONLY called from entry.S:intr_extint() */
+void do_cpu_irq_mask(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs;
+ unsigned long eirr_val;
+ int irq, cpu = smp_processor_id();
+#ifdef CONFIG_SMP
+ struct irq_desc *desc;
+ cpumask_t dest;
+#endif
+
+ old_regs = set_irq_regs(regs);
+ local_irq_disable();
+ irq_enter();
+
+ eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
+ if (!eirr_val)
+ goto set_out;
+ irq = eirr_to_irq(eirr_val);
+
+#ifdef CONFIG_SMP
+ desc = irq_to_desc(irq);
+ cpumask_copy(&dest, desc->irq_data.affinity);
+ if (irqd_is_per_cpu(&desc->irq_data) &&
+ !cpu_isset(smp_processor_id(), dest)) {
+ int cpu = first_cpu(dest);
+
+ printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
+ irq, smp_processor_id(), cpu);
+ gsc_writel(irq + CPU_IRQ_BASE,
+ per_cpu(cpu_data, cpu).hpa);
+ goto set_out;
+ }
+#endif
+ generic_handle_irq(irq);
+
+ out:
+ irq_exit();
+ set_irq_regs(old_regs);
+ return;
+
+ set_out:
+ set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
+ goto out;
+}
+
+static struct irqaction timer_action = {
+ .handler = timer_interrupt,
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
+};
+
+#ifdef CONFIG_SMP
+static struct irqaction ipi_action = {
+ .handler = ipi_interrupt,
+ .name = "IPI",
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+};
+#endif
+
+static void claim_cpu_irqs(void)
+{
+ int i;
+ for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
+ irq_set_chip_and_handler(i, &cpu_interrupt_type,
+ handle_percpu_irq);
+ }
+
+ irq_set_handler(TIMER_IRQ, handle_percpu_irq);
+ setup_irq(TIMER_IRQ, &timer_action);
+#ifdef CONFIG_SMP
+ irq_set_handler(IPI_IRQ, handle_percpu_irq);
+ setup_irq(IPI_IRQ, &ipi_action);
+#endif
+}
+
+void __init init_IRQ(void)
+{
+ local_irq_disable(); /* PARANOID - should already be disabled */
+ mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
+ claim_cpu_irqs();
+#ifdef CONFIG_SMP
+ if (!cpu_eiem)
+ cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
+#else
+ cpu_eiem = EIEM_MASK(TIMER_IRQ);
+#endif
+ set_eiem(cpu_eiem); /* EIEM : enable all external intr */
+
+}
+
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
new file mode 100644
index 00000000..cedbbb8b
--- /dev/null
+++ b/arch/parisc/kernel/module.c
@@ -0,0 +1,958 @@
+/* Kernel dynamically loadable module help for PARISC.
+ *
+ * The best reference for this stuff is probably the Processor-
+ * Specific ELF Supplement for PA-RISC:
+ * http://ftp.parisc-linux.org/docs/arch/elf-pa-hp.pdf
+ *
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ * Copyright (C) 2003 Randolph Chung <tausq at debian . org>
+ * Copyright (C) 2008 Helge Deller <deller@gmx.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * Notes:
+ * - PLT stub handling
+ * On 32bit (and sometimes 64bit) and with big kernel modules like xfs or
+ * ipv6 the relocation types R_PARISC_PCREL17F and R_PARISC_PCREL22F may
+ * fail to reach their PLT stub if we only create one big stub array for
+ * all sections at the beginning of the core or init section.
+ * Instead we now insert individual PLT stub entries directly in front of
+ * of the code sections where the stubs are actually called.
+ * This reduces the distance between the PCREL location and the stub entry
+ * so that the relocations can be fulfilled.
+ * While calculating the final layout of the kernel module in memory, the
+ * kernel module loader calls arch_mod_section_prepend() to request the
+ * to be reserved amount of memory in front of each individual section.
+ *
+ * - SEGREL32 handling
+ * We are not doing SEGREL32 handling correctly. According to the ABI, we
+ * should do a value offset, like this:
+ * if (in_init(me, (void *)val))
+ * val -= (uint32_t)me->module_init;
+ * else
+ * val -= (uint32_t)me->module_core;
+ * However, SEGREL32 is used only for PARISC unwind entries, and we want
+ * those entries to have an absolute address, and not just an offset.
+ *
+ * The unwind table mechanism has the ability to specify an offset for
+ * the unwind table; however, because we split off the init functions into
+ * a different piece of memory, it is not possible to do this using a
+ * single offset. Instead, we use the above hack for now.
+ */
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+#include <asm/unwind.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt...)
+#endif
+
+#define RELOC_REACHABLE(val, bits) \
+ (( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
+ ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \
+ 0 : 1)
+
+#define CHECK_RELOC(val, bits) \
+ if (!RELOC_REACHABLE(val, bits)) { \
+ printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \
+ me->name, strtab + sym->st_name, (unsigned long)val, bits); \
+ return -ENOEXEC; \
+ }
+
+/* Maximum number of GOT entries. We use a long displacement ldd from
+ * the bottom of the table, which has a maximum signed displacement of
+ * 0x3fff; however, since we're only going forward, this becomes
+ * 0x1fff, and thus, since each GOT entry is 8 bytes long we can have
+ * at most 1023 entries.
+ * To overcome this 14bit displacement with some kernel modules, we'll
+ * use instead the unusal 16bit displacement method (see reassemble_16a)
+ * which gives us a maximum positive displacement of 0x7fff, and as such
+ * allows us to allocate up to 4095 GOT entries. */
+#define MAX_GOTS 4095
+
+/* three functions to determine where in the module core
+ * or init pieces the location is */
+static inline int in_init(struct module *me, void *loc)
+{
+ return (loc >= me->module_init &&
+ loc <= (me->module_init + me->init_size));
+}
+
+static inline int in_core(struct module *me, void *loc)
+{
+ return (loc >= me->module_core &&
+ loc <= (me->module_core + me->core_size));
+}
+
+static inline int in_local(struct module *me, void *loc)
+{
+ return in_init(me, loc) || in_core(me, loc);
+}
+
+#ifndef CONFIG_64BIT
+struct got_entry {
+ Elf32_Addr addr;
+};
+
+struct stub_entry {
+ Elf32_Word insns[2]; /* each stub entry has two insns */
+};
+#else
+struct got_entry {
+ Elf64_Addr addr;
+};
+
+struct stub_entry {
+ Elf64_Word insns[4]; /* each stub entry has four insns */
+};
+#endif
+
+/* Field selection types defined by hppa */
+#define rnd(x) (((x)+0x1000)&~0x1fff)
+/* fsel: full 32 bits */
+#define fsel(v,a) ((v)+(a))
+/* lsel: select left 21 bits */
+#define lsel(v,a) (((v)+(a))>>11)
+/* rsel: select right 11 bits */
+#define rsel(v,a) (((v)+(a))&0x7ff)
+/* lrsel with rounding of addend to nearest 8k */
+#define lrsel(v,a) (((v)+rnd(a))>>11)
+/* rrsel with rounding of addend to nearest 8k */
+#define rrsel(v,a) ((((v)+rnd(a))&0x7ff)+((a)-rnd(a)))
+
+#define mask(x,sz) ((x) & ~((1<<(sz))-1))
+
+
+/* The reassemble_* functions prepare an immediate value for
+ insertion into an opcode. pa-risc uses all sorts of weird bitfields
+ in the instruction to hold the value. */
+static inline int sign_unext(int x, int len)
+{
+ int len_ones;
+
+ len_ones = (1 << len) - 1;
+ return x & len_ones;
+}
+
+static inline int low_sign_unext(int x, int len)
+{
+ int sign, temp;
+
+ sign = (x >> (len-1)) & 1;
+ temp = sign_unext(x, len-1);
+ return (temp << 1) | sign;
+}
+
+static inline int reassemble_14(int as14)
+{
+ return (((as14 & 0x1fff) << 1) |
+ ((as14 & 0x2000) >> 13));
+}
+
+static inline int reassemble_16a(int as16)
+{
+ int s, t;
+
+ /* Unusual 16-bit encoding, for wide mode only. */
+ t = (as16 << 1) & 0xffff;
+ s = (as16 & 0x8000);
+ return (t ^ s ^ (s >> 1)) | (s >> 15);
+}
+
+
+static inline int reassemble_17(int as17)
+{
+ return (((as17 & 0x10000) >> 16) |
+ ((as17 & 0x0f800) << 5) |
+ ((as17 & 0x00400) >> 8) |
+ ((as17 & 0x003ff) << 3));
+}
+
+static inline int reassemble_21(int as21)
+{
+ return (((as21 & 0x100000) >> 20) |
+ ((as21 & 0x0ffe00) >> 8) |
+ ((as21 & 0x000180) << 7) |
+ ((as21 & 0x00007c) << 14) |
+ ((as21 & 0x000003) << 12));
+}
+
+static inline int reassemble_22(int as22)
+{
+ return (((as22 & 0x200000) >> 21) |
+ ((as22 & 0x1f0000) << 5) |
+ ((as22 & 0x00f800) << 5) |
+ ((as22 & 0x000400) >> 8) |
+ ((as22 & 0x0003ff) << 3));
+}
+
+void *module_alloc(unsigned long size)
+{
+ if (size == 0)
+ return NULL;
+ /* using RWX means less protection for modules, but it's
+ * easier than trying to map the text, data, init_text and
+ * init_data correctly */
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL | __GFP_HIGHMEM,
+ PAGE_KERNEL_RWX, -1,
+ __builtin_return_address(0));
+}
+
+#ifndef CONFIG_64BIT
+static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
+{
+ return 0;
+}
+
+static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
+{
+ return 0;
+}
+
+static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
+{
+ unsigned long cnt = 0;
+
+ for (; n > 0; n--, rela++)
+ {
+ switch (ELF32_R_TYPE(rela->r_info)) {
+ case R_PARISC_PCREL17F:
+ case R_PARISC_PCREL22F:
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+#else
+static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
+{
+ unsigned long cnt = 0;
+
+ for (; n > 0; n--, rela++)
+ {
+ switch (ELF64_R_TYPE(rela->r_info)) {
+ case R_PARISC_LTOFF21L:
+ case R_PARISC_LTOFF14R:
+ case R_PARISC_PCREL22F:
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+
+static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
+{
+ unsigned long cnt = 0;
+
+ for (; n > 0; n--, rela++)
+ {
+ switch (ELF64_R_TYPE(rela->r_info)) {
+ case R_PARISC_FPTR64:
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+
+static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
+{
+ unsigned long cnt = 0;
+
+ for (; n > 0; n--, rela++)
+ {
+ switch (ELF64_R_TYPE(rela->r_info)) {
+ case R_PARISC_PCREL22F:
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+#endif
+
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+ kfree(mod->arch.section);
+ mod->arch.section = NULL;
+
+ vfree(module_region);
+}
+
+/* Additional bytes needed in front of individual sections */
+unsigned int arch_mod_section_prepend(struct module *mod,
+ unsigned int section)
+{
+ /* size needed for all stubs of this section (including
+ * one additional for correct alignment of the stubs) */
+ return (mod->arch.section[section].stub_entries + 1)
+ * sizeof(struct stub_entry);
+}
+
+#define CONST
+int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
+ CONST Elf_Shdr *sechdrs,
+ CONST char *secstrings,
+ struct module *me)
+{
+ unsigned long gots = 0, fdescs = 0, len;
+ unsigned int i;
+
+ len = hdr->e_shnum * sizeof(me->arch.section[0]);
+ me->arch.section = kzalloc(len, GFP_KERNEL);
+ if (!me->arch.section)
+ return -ENOMEM;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ const Elf_Rela *rels = (void *)sechdrs[i].sh_addr;
+ unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels);
+ unsigned int count, s;
+
+ if (strncmp(secstrings + sechdrs[i].sh_name,
+ ".PARISC.unwind", 14) == 0)
+ me->arch.unwind_section = i;
+
+ if (sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ /* some of these are not relevant for 32-bit/64-bit
+ * we leave them here to make the code common. the
+ * compiler will do its thing and optimize out the
+ * stuff we don't need
+ */
+ gots += count_gots(rels, nrels);
+ fdescs += count_fdescs(rels, nrels);
+
+ /* XXX: By sorting the relocs and finding duplicate entries
+ * we could reduce the number of necessary stubs and save
+ * some memory. */
+ count = count_stubs(rels, nrels);
+ if (!count)
+ continue;
+
+ /* so we need relocation stubs. reserve necessary memory. */
+ /* sh_info gives the section for which we need to add stubs. */
+ s = sechdrs[i].sh_info;
+
+ /* each code section should only have one relocation section */
+ WARN_ON(me->arch.section[s].stub_entries);
+
+ /* store number of stubs we need for this section */
+ me->arch.section[s].stub_entries += count;
+ }
+
+ /* align things a bit */
+ me->core_size = ALIGN(me->core_size, 16);
+ me->arch.got_offset = me->core_size;
+ me->core_size += gots * sizeof(struct got_entry);
+
+ me->core_size = ALIGN(me->core_size, 16);
+ me->arch.fdesc_offset = me->core_size;
+ me->core_size += fdescs * sizeof(Elf_Fdesc);
+
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+
+ return 0;
+}
+
+#ifdef CONFIG_64BIT
+static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+{
+ unsigned int i;
+ struct got_entry *got;
+
+ value += addend;
+
+ BUG_ON(value == 0);
+
+ got = me->module_core + me->arch.got_offset;
+ for (i = 0; got[i].addr; i++)
+ if (got[i].addr == value)
+ goto out;
+
+ BUG_ON(++me->arch.got_count > me->arch.got_max);
+
+ got[i].addr = value;
+ out:
+ DEBUGP("GOT ENTRY %d[%x] val %lx\n", i, i*sizeof(struct got_entry),
+ value);
+ return i * sizeof(struct got_entry);
+}
+#endif /* CONFIG_64BIT */
+
+#ifdef CONFIG_64BIT
+static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+{
+ Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
+
+ if (!value) {
+ printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+ return 0;
+ }
+
+ /* Look for existing fdesc entry. */
+ while (fdesc->addr) {
+ if (fdesc->addr == value)
+ return (Elf_Addr)fdesc;
+ fdesc++;
+ }
+
+ BUG_ON(++me->arch.fdesc_count > me->arch.fdesc_max);
+
+ /* Create new one */
+ fdesc->addr = value;
+ fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
+ return (Elf_Addr)fdesc;
+}
+#endif /* CONFIG_64BIT */
+
+enum elf_stub_type {
+ ELF_STUB_GOT,
+ ELF_STUB_MILLI,
+ ELF_STUB_DIRECT,
+};
+
+static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
+ enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec)
+{
+ struct stub_entry *stub;
+ int __maybe_unused d;
+
+ /* initialize stub_offset to point in front of the section */
+ if (!me->arch.section[targetsec].stub_offset) {
+ loc0 -= (me->arch.section[targetsec].stub_entries + 1) *
+ sizeof(struct stub_entry);
+ /* get correct alignment for the stubs */
+ loc0 = ALIGN(loc0, sizeof(struct stub_entry));
+ me->arch.section[targetsec].stub_offset = loc0;
+ }
+
+ /* get address of stub entry */
+ stub = (void *) me->arch.section[targetsec].stub_offset;
+ me->arch.section[targetsec].stub_offset += sizeof(struct stub_entry);
+
+ /* do not write outside available stub area */
+ BUG_ON(0 == me->arch.section[targetsec].stub_entries--);
+
+
+#ifndef CONFIG_64BIT
+/* for 32-bit the stub looks like this:
+ * ldil L'XXX,%r1
+ * be,n R'XXX(%sr4,%r1)
+ */
+ //value = *(unsigned long *)((value + addend) & ~3); /* why? */
+
+ stub->insns[0] = 0x20200000; /* ldil L'XXX,%r1 */
+ stub->insns[1] = 0xe0202002; /* be,n R'XXX(%sr4,%r1) */
+
+ stub->insns[0] |= reassemble_21(lrsel(value, addend));
+ stub->insns[1] |= reassemble_17(rrsel(value, addend) / 4);
+
+#else
+/* for 64-bit we have three kinds of stubs:
+ * for normal function calls:
+ * ldd 0(%dp),%dp
+ * ldd 10(%dp), %r1
+ * bve (%r1)
+ * ldd 18(%dp), %dp
+ *
+ * for millicode:
+ * ldil 0, %r1
+ * ldo 0(%r1), %r1
+ * ldd 10(%r1), %r1
+ * bve,n (%r1)
+ *
+ * for direct branches (jumps between different section of the
+ * same module):
+ * ldil 0, %r1
+ * ldo 0(%r1), %r1
+ * bve,n (%r1)
+ */
+ switch (stub_type) {
+ case ELF_STUB_GOT:
+ d = get_got(me, value, addend);
+ if (d <= 15) {
+ /* Format 5 */
+ stub->insns[0] = 0x0f6010db; /* ldd 0(%dp),%dp */
+ stub->insns[0] |= low_sign_unext(d, 5) << 16;
+ } else {
+ /* Format 3 */
+ stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */
+ stub->insns[0] |= reassemble_16a(d);
+ }
+ stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */
+ stub->insns[2] = 0xe820d000; /* bve (%r1) */
+ stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */
+ break;
+ case ELF_STUB_MILLI:
+ stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
+ stub->insns[1] = 0x34210000; /* ldo 0(%r1), %r1 */
+ stub->insns[2] = 0x50210020; /* ldd 10(%r1),%r1 */
+ stub->insns[3] = 0xe820d002; /* bve,n (%r1) */
+
+ stub->insns[0] |= reassemble_21(lrsel(value, addend));
+ stub->insns[1] |= reassemble_14(rrsel(value, addend));
+ break;
+ case ELF_STUB_DIRECT:
+ stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
+ stub->insns[1] = 0x34210000; /* ldo 0(%r1), %r1 */
+ stub->insns[2] = 0xe820d002; /* bve,n (%r1) */
+
+ stub->insns[0] |= reassemble_21(lrsel(value, addend));
+ stub->insns[1] |= reassemble_14(rrsel(value, addend));
+ break;
+ }
+
+#endif
+
+ return (Elf_Addr)stub;
+}
+
+int apply_relocate(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ /* parisc should not need this ... */
+ printk(KERN_ERR "module %s: RELOCATION unsupported\n",
+ me->name);
+ return -ENOEXEC;
+}
+
+#ifndef CONFIG_64BIT
+int apply_relocate_add(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ int i;
+ Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ Elf32_Word *loc;
+ Elf32_Addr val;
+ Elf32_Sword addend;
+ Elf32_Addr dot;
+ Elf_Addr loc0;
+ unsigned int targetsec = sechdrs[relsec].sh_info;
+ //unsigned long dp = (unsigned long)$global$;
+ register unsigned long dp asm ("r27");
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ targetsec);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ loc = (void *)sechdrs[targetsec].sh_addr
+ + rel[i].r_offset;
+ /* This is the start of the target section */
+ loc0 = sechdrs[targetsec].sh_addr;
+ /* This is the symbol it is referring to */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rel[i].r_info);
+ if (!sym->st_value) {
+ printk(KERN_WARNING "%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
+ return -ENOENT;
+ }
+ //dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
+ dot = (Elf32_Addr)loc & ~0x03;
+
+ val = sym->st_value;
+ addend = rel[i].r_addend;
+
+#if 0
+#define r(t) ELF32_R_TYPE(rel[i].r_info)==t ? #t :
+ DEBUGP("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n",
+ strtab + sym->st_name,
+ (uint32_t)loc, val, addend,
+ r(R_PARISC_PLABEL32)
+ r(R_PARISC_DIR32)
+ r(R_PARISC_DIR21L)
+ r(R_PARISC_DIR14R)
+ r(R_PARISC_SEGREL32)
+ r(R_PARISC_DPREL21L)
+ r(R_PARISC_DPREL14R)
+ r(R_PARISC_PCREL17F)
+ r(R_PARISC_PCREL22F)
+ "UNKNOWN");
+#undef r
+#endif
+
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_PARISC_PLABEL32:
+ /* 32-bit function address */
+ /* no function descriptors... */
+ *loc = fsel(val, addend);
+ break;
+ case R_PARISC_DIR32:
+ /* direct 32-bit ref */
+ *loc = fsel(val, addend);
+ break;
+ case R_PARISC_DIR21L:
+ /* left 21 bits of effective address */
+ val = lrsel(val, addend);
+ *loc = mask(*loc, 21) | reassemble_21(val);
+ break;
+ case R_PARISC_DIR14R:
+ /* right 14 bits of effective address */
+ val = rrsel(val, addend);
+ *loc = mask(*loc, 14) | reassemble_14(val);
+ break;
+ case R_PARISC_SEGREL32:
+ /* 32-bit segment relative address */
+ /* See note about special handling of SEGREL32 at
+ * the beginning of this file.
+ */
+ *loc = fsel(val, addend);
+ break;
+ case R_PARISC_DPREL21L:
+ /* left 21 bit of relative address */
+ val = lrsel(val - dp, addend);
+ *loc = mask(*loc, 21) | reassemble_21(val);
+ break;
+ case R_PARISC_DPREL14R:
+ /* right 14 bit of relative address */
+ val = rrsel(val - dp, addend);
+ *loc = mask(*loc, 14) | reassemble_14(val);
+ break;
+ case R_PARISC_PCREL17F:
+ /* 17-bit PC relative address */
+ /* calculate direct call offset */
+ val += addend;
+ val = (val - dot - 8)/4;
+ if (!RELOC_REACHABLE(val, 17)) {
+ /* direct distance too far, create
+ * stub entry instead */
+ val = get_stub(me, sym->st_value, addend,
+ ELF_STUB_DIRECT, loc0, targetsec);
+ val = (val - dot - 8)/4;
+ CHECK_RELOC(val, 17);
+ }
+ *loc = (*loc & ~0x1f1ffd) | reassemble_17(val);
+ break;
+ case R_PARISC_PCREL22F:
+ /* 22-bit PC relative address; only defined for pa20 */
+ /* calculate direct call offset */
+ val += addend;
+ val = (val - dot - 8)/4;
+ if (!RELOC_REACHABLE(val, 22)) {
+ /* direct distance too far, create
+ * stub entry instead */
+ val = get_stub(me, sym->st_value, addend,
+ ELF_STUB_DIRECT, loc0, targetsec);
+ val = (val - dot - 8)/4;
+ CHECK_RELOC(val, 22);
+ }
+ *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
+ break;
+
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+ me->name, ELF32_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+
+ return 0;
+}
+
+#else
+int apply_relocate_add(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ int i;
+ Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf64_Sym *sym;
+ Elf64_Word *loc;
+ Elf64_Xword *loc64;
+ Elf64_Addr val;
+ Elf64_Sxword addend;
+ Elf64_Addr dot;
+ Elf_Addr loc0;
+ unsigned int targetsec = sechdrs[relsec].sh_info;
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ targetsec);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ loc = (void *)sechdrs[targetsec].sh_addr
+ + rel[i].r_offset;
+ /* This is the start of the target section */
+ loc0 = sechdrs[targetsec].sh_addr;
+ /* This is the symbol it is referring to */
+ sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ + ELF64_R_SYM(rel[i].r_info);
+ if (!sym->st_value) {
+ printk(KERN_WARNING "%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
+ return -ENOENT;
+ }
+ //dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
+ dot = (Elf64_Addr)loc & ~0x03;
+ loc64 = (Elf64_Xword *)loc;
+
+ val = sym->st_value;
+ addend = rel[i].r_addend;
+
+#if 0
+#define r(t) ELF64_R_TYPE(rel[i].r_info)==t ? #t :
+ printk("Symbol %s loc %p val 0x%Lx addend 0x%Lx: %s\n",
+ strtab + sym->st_name,
+ loc, val, addend,
+ r(R_PARISC_LTOFF14R)
+ r(R_PARISC_LTOFF21L)
+ r(R_PARISC_PCREL22F)
+ r(R_PARISC_DIR64)
+ r(R_PARISC_SEGREL32)
+ r(R_PARISC_FPTR64)
+ "UNKNOWN");
+#undef r
+#endif
+
+ switch (ELF64_R_TYPE(rel[i].r_info)) {
+ case R_PARISC_LTOFF21L:
+ /* LT-relative; left 21 bits */
+ val = get_got(me, val, addend);
+ DEBUGP("LTOFF21L Symbol %s loc %p val %lx\n",
+ strtab + sym->st_name,
+ loc, val);
+ val = lrsel(val, 0);
+ *loc = mask(*loc, 21) | reassemble_21(val);
+ break;
+ case R_PARISC_LTOFF14R:
+ /* L(ltoff(val+addend)) */
+ /* LT-relative; right 14 bits */
+ val = get_got(me, val, addend);
+ val = rrsel(val, 0);
+ DEBUGP("LTOFF14R Symbol %s loc %p val %lx\n",
+ strtab + sym->st_name,
+ loc, val);
+ *loc = mask(*loc, 14) | reassemble_14(val);
+ break;
+ case R_PARISC_PCREL22F:
+ /* PC-relative; 22 bits */
+ DEBUGP("PCREL22F Symbol %s loc %p val %lx\n",
+ strtab + sym->st_name,
+ loc, val);
+ val += addend;
+ /* can we reach it locally? */
+ if (in_local(me, (void *)val)) {
+ /* this is the case where the symbol is local
+ * to the module, but in a different section,
+ * so stub the jump in case it's more than 22
+ * bits away */
+ val = (val - dot - 8)/4;
+ if (!RELOC_REACHABLE(val, 22)) {
+ /* direct distance too far, create
+ * stub entry instead */
+ val = get_stub(me, sym->st_value,
+ addend, ELF_STUB_DIRECT,
+ loc0, targetsec);
+ } else {
+ /* Ok, we can reach it directly. */
+ val = sym->st_value;
+ val += addend;
+ }
+ } else {
+ val = sym->st_value;
+ if (strncmp(strtab + sym->st_name, "$$", 2)
+ == 0)
+ val = get_stub(me, val, addend, ELF_STUB_MILLI,
+ loc0, targetsec);
+ else
+ val = get_stub(me, val, addend, ELF_STUB_GOT,
+ loc0, targetsec);
+ }
+ DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n",
+ strtab + sym->st_name, loc, sym->st_value,
+ addend, val);
+ val = (val - dot - 8)/4;
+ CHECK_RELOC(val, 22);
+ *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
+ break;
+ case R_PARISC_DIR64:
+ /* 64-bit effective address */
+ *loc64 = val + addend;
+ break;
+ case R_PARISC_SEGREL32:
+ /* 32-bit segment relative address */
+ /* See note about special handling of SEGREL32 at
+ * the beginning of this file.
+ */
+ *loc = fsel(val, addend);
+ break;
+ case R_PARISC_FPTR64:
+ /* 64-bit function address */
+ if(in_local(me, (void *)(val + addend))) {
+ *loc64 = get_fdesc(me, val+addend);
+ DEBUGP("FDESC for %s at %p points to %lx\n",
+ strtab + sym->st_name, *loc64,
+ ((Elf_Fdesc *)*loc64)->addr);
+ } else {
+ /* if the symbol is not local to this
+ * module then val+addend is a pointer
+ * to the function descriptor */
+ DEBUGP("Non local FPTR64 Symbol %s loc %p val %lx\n",
+ strtab + sym->st_name,
+ loc, val);
+ *loc64 = val + addend;
+ }
+ break;
+
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %Lu\n",
+ me->name, ELF64_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+#endif
+
+static void
+register_unwind_table(struct module *me,
+ const Elf_Shdr *sechdrs)
+{
+ unsigned char *table, *end;
+ unsigned long gp;
+
+ if (!me->arch.unwind_section)
+ return;
+
+ table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+ end = table + sechdrs[me->arch.unwind_section].sh_size;
+ gp = (Elf_Addr)me->module_core + me->arch.got_offset;
+
+ DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ me->arch.unwind_section, table, end, gp);
+ me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end);
+}
+
+static void
+deregister_unwind_table(struct module *me)
+{
+ if (me->arch.unwind)
+ unwind_table_remove(me->arch.unwind);
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ int i;
+ unsigned long nsyms;
+ const char *strtab = NULL;
+ Elf_Sym *newptr, *oldptr;
+ Elf_Shdr *symhdr = NULL;
+#ifdef DEBUG
+ Elf_Fdesc *entry;
+ u32 *addr;
+
+ entry = (Elf_Fdesc *)me->init;
+ printk("FINALIZE, ->init FPTR is %p, GP %lx ADDR %lx\n", entry,
+ entry->gp, entry->addr);
+ addr = (u32 *)entry->addr;
+ printk("INSNS: %x %x %x %x\n",
+ addr[0], addr[1], addr[2], addr[3]);
+ printk("got entries used %ld, gots max %ld\n"
+ "fdescs used %ld, fdescs max %ld\n",
+ me->arch.got_count, me->arch.got_max,
+ me->arch.fdesc_count, me->arch.fdesc_max);
+#endif
+
+ register_unwind_table(me, sechdrs);
+
+ /* haven't filled in me->symtab yet, so have to find it
+ * ourselves */
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if(sechdrs[i].sh_type == SHT_SYMTAB
+ && (sechdrs[i].sh_flags & SHF_ALLOC)) {
+ int strindex = sechdrs[i].sh_link;
+ /* FIXME: AWFUL HACK
+ * The cast is to drop the const from
+ * the sechdrs pointer */
+ symhdr = (Elf_Shdr *)&sechdrs[i];
+ strtab = (char *)sechdrs[strindex].sh_addr;
+ break;
+ }
+ }
+
+ DEBUGP("module %s: strtab %p, symhdr %p\n",
+ me->name, strtab, symhdr);
+
+ if(me->arch.got_count > MAX_GOTS) {
+ printk(KERN_ERR "%s: Global Offset Table overflow (used %ld, allowed %d)\n",
+ me->name, me->arch.got_count, MAX_GOTS);
+ return -EINVAL;
+ }
+
+ kfree(me->arch.section);
+ me->arch.section = NULL;
+
+ /* no symbol table */
+ if(symhdr == NULL)
+ return 0;
+
+ oldptr = (void *)symhdr->sh_addr;
+ newptr = oldptr + 1; /* we start counting at 1 */
+ nsyms = symhdr->sh_size / sizeof(Elf_Sym);
+ DEBUGP("OLD num_symtab %lu\n", nsyms);
+
+ for (i = 1; i < nsyms; i++) {
+ oldptr++; /* note, count starts at 1 so preincrement */
+ if(strncmp(strtab + oldptr->st_name,
+ ".L", 2) == 0)
+ continue;
+
+ if(newptr != oldptr)
+ *newptr++ = *oldptr;
+ else
+ newptr++;
+
+ }
+ nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
+ DEBUGP("NEW num_symtab %lu\n", nsyms);
+ symhdr->sh_size = nsyms * sizeof(Elf_Sym);
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+ deregister_unwind_table(mod);
+}
diff --git a/arch/parisc/kernel/pa7300lc.c b/arch/parisc/kernel/pa7300lc.c
new file mode 100644
index 00000000..8a897802
--- /dev/null
+++ b/arch/parisc/kernel/pa7300lc.c
@@ -0,0 +1,49 @@
+/*
+ * linux/arch/parisc/kernel/pa7300lc.c
+ * - PA7300LC-specific functions
+ *
+ * Copyright (C) 2000 Philipp Rumpf */
+
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/ptrace.h>
+#include <asm/machdep.h>
+
+/* CPU register indices */
+
+#define MIOC_STATUS 0xf040
+#define MIOC_CONTROL 0xf080
+#define MDERRADD 0xf0e0
+#define DMAERR 0xf0e8
+#define DIOERR 0xf0ec
+#define HIDMAMEM 0xf0f4
+
+/* this returns the HPA of the CPU it was called on */
+static u32 cpu_hpa(void)
+{
+ return 0xfffb0000;
+}
+
+static void pa7300lc_lpmc(int code, struct pt_regs *regs)
+{
+ u32 hpa;
+ printk(KERN_WARNING "LPMC on CPU %d\n", smp_processor_id());
+
+ show_regs(regs);
+
+ hpa = cpu_hpa();
+ printk(KERN_WARNING
+ "MIOC_CONTROL %08x\n" "MIOC_STATUS %08x\n"
+ "MDERRADD %08x\n" "DMAERR %08x\n"
+ "DIOERR %08x\n" "HIDMAMEM %08x\n",
+ gsc_readl(hpa+MIOC_CONTROL), gsc_readl(hpa+MIOC_STATUS),
+ gsc_readl(hpa+MDERRADD), gsc_readl(hpa+DMAERR),
+ gsc_readl(hpa+DIOERR), gsc_readl(hpa+HIDMAMEM));
+}
+
+void pa7300lc_init(void)
+{
+ cpu_lpmc = pa7300lc_lpmc;
+}
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
new file mode 100644
index 00000000..5d7218ad
--- /dev/null
+++ b/arch/parisc/kernel/pacache.S
@@ -0,0 +1,1044 @@
+/*
+ * PARISC TLB and cache flushing support
+ * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
+ * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
+ * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * NOTE: fdc,fic, and pdc instructions that use base register modification
+ * should only use index and base registers that are not shadowed,
+ * so that the fast path emulation in the non access miss handler
+ * can be used.
+ */
+
+#ifdef CONFIG_64BIT
+ .level 2.0w
+#else
+ .level 2.0
+#endif
+
+#include <asm/psw.h>
+#include <asm/assembly.h>
+#include <asm/pgtable.h>
+#include <asm/cache.h>
+#include <linux/linkage.h>
+
+ .text
+ .align 128
+
+ENTRY(flush_tlb_all_local)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ /*
+ * The pitlbe and pdtlbe instructions should only be used to
+ * flush the entire tlb. Also, there needs to be no intervening
+ * tlb operations, e.g. tlb misses, so the operation needs
+ * to happen in real mode with all interruptions disabled.
+ */
+
+ /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
+ rsm PSW_SM_I, %r19 /* save I-bit state */
+ load32 PA(1f), %r1
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ rsm PSW_SM_Q, %r0 /* prep to load iia queue */
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ mtctl %r1, %cr18 /* IIAOQ head */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ tail */
+ load32 REAL_MODE_PSW, %r1
+ mtctl %r1, %ipsw
+ rfi
+ nop
+
+1: load32 PA(cache_info), %r1
+
+ /* Flush Instruction Tlb */
+
+ LDREG ITLB_SID_BASE(%r1), %r20
+ LDREG ITLB_SID_STRIDE(%r1), %r21
+ LDREG ITLB_SID_COUNT(%r1), %r22
+ LDREG ITLB_OFF_BASE(%r1), %arg0
+ LDREG ITLB_OFF_STRIDE(%r1), %arg1
+ LDREG ITLB_OFF_COUNT(%r1), %arg2
+ LDREG ITLB_LOOP(%r1), %arg3
+
+ addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
+ movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
+ copy %arg0, %r28 /* Init base addr */
+
+fitmanyloop: /* Loop if LOOP >= 2 */
+ mtsp %r20, %sr1
+ add %r21, %r20, %r20 /* increment space */
+ copy %arg2, %r29 /* Init middle loop count */
+
+fitmanymiddle: /* Loop if LOOP >= 2 */
+ addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
+ pitlbe 0(%sr1, %r28)
+ pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
+ addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
+ copy %arg3, %r31 /* Re-init inner loop count */
+
+ movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
+ addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
+
+fitoneloop: /* Loop if LOOP = 1 */
+ mtsp %r20, %sr1
+ copy %arg0, %r28 /* init base addr */
+ copy %arg2, %r29 /* init middle loop count */
+
+fitonemiddle: /* Loop if LOOP = 1 */
+ addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
+ pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
+
+ addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
+ add %r21, %r20, %r20 /* increment space */
+
+fitdone:
+
+ /* Flush Data Tlb */
+
+ LDREG DTLB_SID_BASE(%r1), %r20
+ LDREG DTLB_SID_STRIDE(%r1), %r21
+ LDREG DTLB_SID_COUNT(%r1), %r22
+ LDREG DTLB_OFF_BASE(%r1), %arg0
+ LDREG DTLB_OFF_STRIDE(%r1), %arg1
+ LDREG DTLB_OFF_COUNT(%r1), %arg2
+ LDREG DTLB_LOOP(%r1), %arg3
+
+ addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
+ movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
+ copy %arg0, %r28 /* Init base addr */
+
+fdtmanyloop: /* Loop if LOOP >= 2 */
+ mtsp %r20, %sr1
+ add %r21, %r20, %r20 /* increment space */
+ copy %arg2, %r29 /* Init middle loop count */
+
+fdtmanymiddle: /* Loop if LOOP >= 2 */
+ addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
+ pdtlbe 0(%sr1, %r28)
+ pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
+ addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
+ copy %arg3, %r31 /* Re-init inner loop count */
+
+ movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
+ addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
+
+fdtoneloop: /* Loop if LOOP = 1 */
+ mtsp %r20, %sr1
+ copy %arg0, %r28 /* init base addr */
+ copy %arg2, %r29 /* init middle loop count */
+
+fdtonemiddle: /* Loop if LOOP = 1 */
+ addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
+ pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
+
+ addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
+ add %r21, %r20, %r20 /* increment space */
+
+
+fdtdone:
+ /*
+ * Switch back to virtual mode
+ */
+ /* pcxt_ssm_bug */
+ rsm PSW_SM_I, %r0
+ load32 2f, %r1
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ rsm PSW_SM_Q, %r0 /* prep to load iia queue */
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ mtctl %r1, %cr18 /* IIAOQ head */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ tail */
+ load32 KERNEL_PSW, %r1
+ or %r1, %r19, %r1 /* I-bit to state on entry */
+ mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
+ rfi
+ nop
+
+2: bv %r0(%r2)
+ nop
+
+ .exit
+ .procend
+ENDPROC(flush_tlb_all_local)
+
+ .import cache_info,data
+
+ENTRY(flush_instruction_cache_local)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ mtsp %r0, %sr1
+ load32 cache_info, %r1
+
+ /* Flush Instruction Cache */
+
+ LDREG ICACHE_BASE(%r1), %arg0
+ LDREG ICACHE_STRIDE(%r1), %arg1
+ LDREG ICACHE_COUNT(%r1), %arg2
+ LDREG ICACHE_LOOP(%r1), %arg3
+ rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
+ addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
+ movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
+
+fimanyloop: /* Loop if LOOP >= 2 */
+ addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
+ fice %r0(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
+ movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
+ addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
+
+fioneloop: /* Loop if LOOP = 1 */
+ addib,COND(>) -1, %arg2, fioneloop /* Outer loop count decr */
+ fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
+
+fisync:
+ sync
+ mtsm %r22 /* restore I-bit */
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC(flush_instruction_cache_local)
+
+
+ .import cache_info, data
+ENTRY(flush_data_cache_local)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ mtsp %r0, %sr1
+ load32 cache_info, %r1
+
+ /* Flush Data Cache */
+
+ LDREG DCACHE_BASE(%r1), %arg0
+ LDREG DCACHE_STRIDE(%r1), %arg1
+ LDREG DCACHE_COUNT(%r1), %arg2
+ LDREG DCACHE_LOOP(%r1), %arg3
+ rsm PSW_SM_I, %r22
+ addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
+ movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
+
+fdmanyloop: /* Loop if LOOP >= 2 */
+ addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
+ fdce %r0(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
+ movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
+ addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
+
+fdoneloop: /* Loop if LOOP = 1 */
+ addib,COND(>) -1, %arg2, fdoneloop /* Outer loop count decr */
+ fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
+
+fdsync:
+ syncdma
+ sync
+ mtsm %r22 /* restore I-bit */
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC(flush_data_cache_local)
+
+ .align 16
+
+ENTRY(copy_user_page_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+#ifdef CONFIG_64BIT
+ /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
+ * Unroll the loop by hand and arrange insn appropriately.
+ * GCC probably can do this just as well.
+ */
+
+ ldd 0(%r25), %r19
+ ldi (PAGE_SIZE / 128), %r1
+
+ ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */
+ ldw 128(%r25), %r0 /* prefetch 2 */
+
+1: ldd 8(%r25), %r20
+ ldw 192(%r25), %r0 /* prefetch 3 */
+ ldw 256(%r25), %r0 /* prefetch 4 */
+
+ ldd 16(%r25), %r21
+ ldd 24(%r25), %r22
+ std %r19, 0(%r26)
+ std %r20, 8(%r26)
+
+ ldd 32(%r25), %r19
+ ldd 40(%r25), %r20
+ std %r21, 16(%r26)
+ std %r22, 24(%r26)
+
+ ldd 48(%r25), %r21
+ ldd 56(%r25), %r22
+ std %r19, 32(%r26)
+ std %r20, 40(%r26)
+
+ ldd 64(%r25), %r19
+ ldd 72(%r25), %r20
+ std %r21, 48(%r26)
+ std %r22, 56(%r26)
+
+ ldd 80(%r25), %r21
+ ldd 88(%r25), %r22
+ std %r19, 64(%r26)
+ std %r20, 72(%r26)
+
+ ldd 96(%r25), %r19
+ ldd 104(%r25), %r20
+ std %r21, 80(%r26)
+ std %r22, 88(%r26)
+
+ ldd 112(%r25), %r21
+ ldd 120(%r25), %r22
+ std %r19, 96(%r26)
+ std %r20, 104(%r26)
+
+ ldo 128(%r25), %r25
+ std %r21, 112(%r26)
+ std %r22, 120(%r26)
+ ldo 128(%r26), %r26
+
+ /* conditional branches nullify on forward taken branch, and on
+ * non-taken backward branch. Note that .+4 is a backwards branch.
+ * The ldd should only get executed if the branch is taken.
+ */
+ addib,COND(>),n -1, %r1, 1b /* bundle 10 */
+ ldd 0(%r25), %r19 /* start next loads */
+
+#else
+
+ /*
+ * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
+ * bundles (very restricted rules for bundling).
+ * Note that until (if) we start saving
+ * the full 64 bit register values on interrupt, we can't
+ * use ldd/std on a 32 bit kernel.
+ */
+ ldw 0(%r25), %r19
+ ldi (PAGE_SIZE / 64), %r1
+
+1:
+ ldw 4(%r25), %r20
+ ldw 8(%r25), %r21
+ ldw 12(%r25), %r22
+ stw %r19, 0(%r26)
+ stw %r20, 4(%r26)
+ stw %r21, 8(%r26)
+ stw %r22, 12(%r26)
+ ldw 16(%r25), %r19
+ ldw 20(%r25), %r20
+ ldw 24(%r25), %r21
+ ldw 28(%r25), %r22
+ stw %r19, 16(%r26)
+ stw %r20, 20(%r26)
+ stw %r21, 24(%r26)
+ stw %r22, 28(%r26)
+ ldw 32(%r25), %r19
+ ldw 36(%r25), %r20
+ ldw 40(%r25), %r21
+ ldw 44(%r25), %r22
+ stw %r19, 32(%r26)
+ stw %r20, 36(%r26)
+ stw %r21, 40(%r26)
+ stw %r22, 44(%r26)
+ ldw 48(%r25), %r19
+ ldw 52(%r25), %r20
+ ldw 56(%r25), %r21
+ ldw 60(%r25), %r22
+ stw %r19, 48(%r26)
+ stw %r20, 52(%r26)
+ ldo 64(%r25), %r25
+ stw %r21, 56(%r26)
+ stw %r22, 60(%r26)
+ ldo 64(%r26), %r26
+ addib,COND(>),n -1, %r1, 1b
+ ldw 0(%r25), %r19
+#endif
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC(copy_user_page_asm)
+
+/*
+ * NOTE: Code in clear_user_page has a hard coded dependency on the
+ * maximum alias boundary being 4 Mb. We've been assured by the
+ * parisc chip designers that there will not ever be a parisc
+ * chip with a larger alias boundary (Never say never :-) ).
+ *
+ * Subtle: the dtlb miss handlers support the temp alias region by
+ * "knowing" that if a dtlb miss happens within the temp alias
+ * region it must have occurred while in clear_user_page. Since
+ * this routine makes use of processor local translations, we
+ * don't want to insert them into the kernel page table. Instead,
+ * we load up some general registers (they need to be registers
+ * which aren't shadowed) with the physical page numbers (preshifted
+ * for tlb insertion) needed to insert the translations. When we
+ * miss on the translation, the dtlb miss handler inserts the
+ * translation into the tlb using these values:
+ *
+ * %r26 physical page (shifted for tlb insert) of "to" translation
+ * %r23 physical page (shifted for tlb insert) of "from" translation
+ */
+
+#if 0
+
+ /*
+ * We can't do this since copy_user_page is used to bring in
+ * file data that might have instructions. Since the data would
+ * then need to be flushed out so the i-fetch can see it, it
+ * makes more sense to just copy through the kernel translation
+ * and flush it.
+ *
+ * I'm still keeping this around because it may be possible to
+ * use it if more information is passed into copy_user_page().
+ * Have to do some measurements to see if it is worthwhile to
+ * lobby for such a change.
+ */
+
+ENTRY(copy_user_page_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%(__PAGE_OFFSET), %r1
+ sub %r26, %r1, %r26
+ sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
+
+ ldil L%(TMPALIAS_MAP_START), %r28
+ /* FIXME for different page sizes != 4k */
+#ifdef CONFIG_64BIT
+ extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
+ extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
+ depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
+ depdi 0, 63,12, %r28 /* Clear any offset bits */
+ copy %r28, %r29
+ depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
+#else
+ extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
+ extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
+ depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
+ depwi 0, 31,12, %r28 /* Clear any offset bits */
+ copy %r28, %r29
+ depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
+#endif
+
+ /* Purge any old translations */
+
+ pdtlb 0(%r28)
+ pdtlb 0(%r29)
+
+ ldi 64, %r1
+
+ /*
+ * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
+ * bundles (very restricted rules for bundling). It probably
+ * does OK on PCXU and better, but we could do better with
+ * ldd/std instructions. Note that until (if) we start saving
+ * the full 64 bit register values on interrupt, we can't
+ * use ldd/std on a 32 bit kernel.
+ */
+
+
+1:
+ ldw 0(%r29), %r19
+ ldw 4(%r29), %r20
+ ldw 8(%r29), %r21
+ ldw 12(%r29), %r22
+ stw %r19, 0(%r28)
+ stw %r20, 4(%r28)
+ stw %r21, 8(%r28)
+ stw %r22, 12(%r28)
+ ldw 16(%r29), %r19
+ ldw 20(%r29), %r20
+ ldw 24(%r29), %r21
+ ldw 28(%r29), %r22
+ stw %r19, 16(%r28)
+ stw %r20, 20(%r28)
+ stw %r21, 24(%r28)
+ stw %r22, 28(%r28)
+ ldw 32(%r29), %r19
+ ldw 36(%r29), %r20
+ ldw 40(%r29), %r21
+ ldw 44(%r29), %r22
+ stw %r19, 32(%r28)
+ stw %r20, 36(%r28)
+ stw %r21, 40(%r28)
+ stw %r22, 44(%r28)
+ ldw 48(%r29), %r19
+ ldw 52(%r29), %r20
+ ldw 56(%r29), %r21
+ ldw 60(%r29), %r22
+ stw %r19, 48(%r28)
+ stw %r20, 52(%r28)
+ stw %r21, 56(%r28)
+ stw %r22, 60(%r28)
+ ldo 64(%r28), %r28
+ addib,COND(>) -1, %r1,1b
+ ldo 64(%r29), %r29
+
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC(copy_user_page_asm)
+#endif
+
+ENTRY(__clear_user_page_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ tophys_r1 %r26
+
+ ldil L%(TMPALIAS_MAP_START), %r28
+#ifdef CONFIG_64BIT
+#if (TMPALIAS_MAP_START >= 0x80000000)
+ depdi 0, 31,32, %r28 /* clear any sign extension */
+ /* FIXME: page size dependend */
+#endif
+ extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
+ depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
+ depdi 0, 63,12, %r28 /* Clear any offset bits */
+#else
+ extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
+ depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
+ depwi 0, 31,12, %r28 /* Clear any offset bits */
+#endif
+
+ /* Purge any old translation */
+
+ pdtlb 0(%r28)
+
+#ifdef CONFIG_64BIT
+ ldi (PAGE_SIZE / 128), %r1
+
+ /* PREFETCH (Write) has not (yet) been proven to help here */
+ /* #define PREFETCHW_OP ldd 256(%0), %r0 */
+
+1: std %r0, 0(%r28)
+ std %r0, 8(%r28)
+ std %r0, 16(%r28)
+ std %r0, 24(%r28)
+ std %r0, 32(%r28)
+ std %r0, 40(%r28)
+ std %r0, 48(%r28)
+ std %r0, 56(%r28)
+ std %r0, 64(%r28)
+ std %r0, 72(%r28)
+ std %r0, 80(%r28)
+ std %r0, 88(%r28)
+ std %r0, 96(%r28)
+ std %r0, 104(%r28)
+ std %r0, 112(%r28)
+ std %r0, 120(%r28)
+ addib,COND(>) -1, %r1, 1b
+ ldo 128(%r28), %r28
+
+#else /* ! CONFIG_64BIT */
+ ldi (PAGE_SIZE / 64), %r1
+
+1:
+ stw %r0, 0(%r28)
+ stw %r0, 4(%r28)
+ stw %r0, 8(%r28)
+ stw %r0, 12(%r28)
+ stw %r0, 16(%r28)
+ stw %r0, 20(%r28)
+ stw %r0, 24(%r28)
+ stw %r0, 28(%r28)
+ stw %r0, 32(%r28)
+ stw %r0, 36(%r28)
+ stw %r0, 40(%r28)
+ stw %r0, 44(%r28)
+ stw %r0, 48(%r28)
+ stw %r0, 52(%r28)
+ stw %r0, 56(%r28)
+ stw %r0, 60(%r28)
+ addib,COND(>) -1, %r1, 1b
+ ldo 64(%r28), %r28
+#endif /* CONFIG_64BIT */
+
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC(__clear_user_page_asm)
+
+ENTRY(flush_dcache_page_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%(TMPALIAS_MAP_START), %r28
+#ifdef CONFIG_64BIT
+#if (TMPALIAS_MAP_START >= 0x80000000)
+ depdi 0, 31,32, %r28 /* clear any sign extension */
+ /* FIXME: page size dependend */
+#endif
+ extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
+ depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
+ depdi 0, 63,12, %r28 /* Clear any offset bits */
+#else
+ extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
+ depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
+ depwi 0, 31,12, %r28 /* Clear any offset bits */
+#endif
+
+ /* Purge any old translation */
+
+ pdtlb 0(%r28)
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r1
+
+#ifdef CONFIG_64BIT
+ depdi,z 1, 63-PAGE_SHIFT,1, %r25
+#else
+ depwi,z 1, 31-PAGE_SHIFT,1, %r25
+#endif
+ add %r28, %r25, %r25
+ sub %r25, %r1, %r25
+
+
+1: fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ fdc,m %r1(%r28)
+ cmpb,COND(<<) %r28, %r25,1b
+ fdc,m %r1(%r28)
+
+ sync
+ bv %r0(%r2)
+ pdtlb (%r25)
+ .exit
+
+ .procend
+ENDPROC(flush_dcache_page_asm)
+
+ENTRY(flush_icache_page_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%(TMPALIAS_MAP_START), %r28
+#ifdef CONFIG_64BIT
+#if (TMPALIAS_MAP_START >= 0x80000000)
+ depdi 0, 31,32, %r28 /* clear any sign extension */
+ /* FIXME: page size dependend */
+#endif
+ extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
+ depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
+ depdi 0, 63,12, %r28 /* Clear any offset bits */
+#else
+ extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
+ depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
+ depwi 0, 31,12, %r28 /* Clear any offset bits */
+#endif
+
+ /* Purge any old translation */
+
+ pitlb (%sr4,%r28)
+
+ ldil L%icache_stride, %r1
+ ldw R%icache_stride(%r1), %r1
+
+#ifdef CONFIG_64BIT
+ depdi,z 1, 63-PAGE_SHIFT,1, %r25
+#else
+ depwi,z 1, 31-PAGE_SHIFT,1, %r25
+#endif
+ add %r28, %r25, %r25
+ sub %r25, %r1, %r25
+
+
+ /* fic only has the type 26 form on PA1.1, requiring an
+ * explicit space specification, so use %sr4 */
+1: fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ fic,m %r1(%sr4,%r28)
+ cmpb,COND(<<) %r28, %r25,1b
+ fic,m %r1(%sr4,%r28)
+
+ sync
+ bv %r0(%r2)
+ pitlb (%sr4,%r25)
+ .exit
+
+ .procend
+ENDPROC(flush_icache_page_asm)
+
+ENTRY(flush_kernel_dcache_page_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r23
+
+#ifdef CONFIG_64BIT
+ depdi,z 1, 63-PAGE_SHIFT,1, %r25
+#else
+ depwi,z 1, 31-PAGE_SHIFT,1, %r25
+#endif
+ add %r26, %r25, %r25
+ sub %r25, %r23, %r25
+
+
+1: fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ cmpb,COND(<<) %r26, %r25,1b
+ fdc,m %r23(%r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC(flush_kernel_dcache_page_asm)
+
+ENTRY(purge_kernel_dcache_page)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r23
+
+#ifdef CONFIG_64BIT
+ depdi,z 1, 63-PAGE_SHIFT,1, %r25
+#else
+ depwi,z 1, 31-PAGE_SHIFT,1, %r25
+#endif
+ add %r26, %r25, %r25
+ sub %r25, %r23, %r25
+
+1: pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ cmpb,COND(<<) %r26, %r25, 1b
+ pdc,m %r23(%r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC(purge_kernel_dcache_page)
+
+ENTRY(flush_user_dcache_range_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r23
+ ldo -1(%r23), %r21
+ ANDCM %r26, %r21, %r26
+
+1: cmpb,COND(<<),n %r26, %r25, 1b
+ fdc,m %r23(%sr3, %r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC(flush_user_dcache_range_asm)
+
+ENTRY(flush_kernel_dcache_range_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r23
+ ldo -1(%r23), %r21
+ ANDCM %r26, %r21, %r26
+
+1: cmpb,COND(<<),n %r26, %r25,1b
+ fdc,m %r23(%r26)
+
+ sync
+ syncdma
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC(flush_kernel_dcache_range_asm)
+
+ENTRY(flush_user_icache_range_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%icache_stride, %r1
+ ldw R%icache_stride(%r1), %r23
+ ldo -1(%r23), %r21
+ ANDCM %r26, %r21, %r26
+
+1: cmpb,COND(<<),n %r26, %r25,1b
+ fic,m %r23(%sr3, %r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC(flush_user_icache_range_asm)
+
+ENTRY(flush_kernel_icache_page)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%icache_stride, %r1
+ ldw R%icache_stride(%r1), %r23
+
+#ifdef CONFIG_64BIT
+ depdi,z 1, 63-PAGE_SHIFT,1, %r25
+#else
+ depwi,z 1, 31-PAGE_SHIFT,1, %r25
+#endif
+ add %r26, %r25, %r25
+ sub %r25, %r23, %r25
+
+
+1: fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ cmpb,COND(<<) %r26, %r25, 1b
+ fic,m %r23(%sr4, %r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC(flush_kernel_icache_page)
+
+ENTRY(flush_kernel_icache_range_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%icache_stride, %r1
+ ldw R%icache_stride(%r1), %r23
+ ldo -1(%r23), %r21
+ ANDCM %r26, %r21, %r26
+
+1: cmpb,COND(<<),n %r26, %r25, 1b
+ fic,m %r23(%sr4, %r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+ .procend
+ENDPROC(flush_kernel_icache_range_asm)
+
+ /* align should cover use of rfi in disable_sr_hashing_asm and
+ * srdis_done.
+ */
+ .align 256
+ENTRY(disable_sr_hashing_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ /*
+ * Switch to real mode
+ */
+ /* pcxt_ssm_bug */
+ rsm PSW_SM_I, %r0
+ load32 PA(1f), %r1
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ rsm PSW_SM_Q, %r0 /* prep to load iia queue */
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ mtctl %r1, %cr18 /* IIAOQ head */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ tail */
+ load32 REAL_MODE_PSW, %r1
+ mtctl %r1, %ipsw
+ rfi
+ nop
+
+1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
+ cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
+ cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
+ b,n srdis_done
+
+srdis_pcxs:
+
+ /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
+
+ .word 0x141c1a00 /* mfdiag %dr0, %r28 */
+ .word 0x141c1a00 /* must issue twice */
+ depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
+ depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
+ .word 0x141c1600 /* mtdiag %r28, %dr0 */
+ .word 0x141c1600 /* must issue twice */
+ b,n srdis_done
+
+srdis_pcxl:
+
+ /* Disable Space Register Hashing for PCXL */
+
+ .word 0x141c0600 /* mfdiag %dr0, %r28 */
+ depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
+ .word 0x141c0240 /* mtdiag %r28, %dr0 */
+ b,n srdis_done
+
+srdis_pa20:
+
+ /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
+
+ .word 0x144008bc /* mfdiag %dr2, %r28 */
+ depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
+ .word 0x145c1840 /* mtdiag %r28, %dr2 */
+
+
+srdis_done:
+ /* Switch back to virtual mode */
+ rsm PSW_SM_I, %r0 /* prep to load iia queue */
+ load32 2f, %r1
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ rsm PSW_SM_Q, %r0 /* prep to load iia queue */
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ mtctl %r1, %cr18 /* IIAOQ head */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ tail */
+ load32 KERNEL_PSW, %r1
+ mtctl %r1, %ipsw
+ rfi
+ nop
+
+2: bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC(disable_sr_hashing_asm)
+
+ .end
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
new file mode 100644
index 00000000..df653663
--- /dev/null
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -0,0 +1,162 @@
+/*
+ * Architecture-specific kernel symbols
+ *
+ * Copyright (C) 2000-2001 Richard Hirst <rhirst with parisc-linux.org>
+ * Copyright (C) 2001 Dave Kennedy
+ * Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2001-2003 Grant Grundler <grundler with parisc-linux.org>
+ * Copyright (C) 2002-2003 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2002 Randolph Chung <tausq at parisc-linux.org>
+ * Copyright (C) 2002-2007 Helge Deller <deller with parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+
+#include <linux/string.h>
+EXPORT_SYMBOL(memset);
+
+#include <asm/atomic.h>
+EXPORT_SYMBOL(__xchg8);
+EXPORT_SYMBOL(__xchg32);
+EXPORT_SYMBOL(__cmpxchg_u32);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(__atomic_hash);
+#endif
+#ifdef CONFIG_64BIT
+EXPORT_SYMBOL(__xchg64);
+EXPORT_SYMBOL(__cmpxchg_u64);
+#endif
+
+#include <asm/uaccess.h>
+EXPORT_SYMBOL(lstrncpy_from_user);
+EXPORT_SYMBOL(lclear_user);
+EXPORT_SYMBOL(lstrnlen_user);
+
+/* Global fixups */
+extern void fixup_get_user_skip_1(void);
+extern void fixup_get_user_skip_2(void);
+extern void fixup_put_user_skip_1(void);
+extern void fixup_put_user_skip_2(void);
+EXPORT_SYMBOL(fixup_get_user_skip_1);
+EXPORT_SYMBOL(fixup_get_user_skip_2);
+EXPORT_SYMBOL(fixup_put_user_skip_1);
+EXPORT_SYMBOL(fixup_put_user_skip_2);
+
+#ifndef CONFIG_64BIT
+/* Needed so insmod can set dp value */
+extern int $global$;
+EXPORT_SYMBOL($global$);
+#endif
+
+#include <asm/io.h>
+EXPORT_SYMBOL(memcpy_toio);
+EXPORT_SYMBOL(memcpy_fromio);
+EXPORT_SYMBOL(memset_io);
+
+extern void $$divI(void);
+extern void $$divU(void);
+extern void $$remI(void);
+extern void $$remU(void);
+extern void $$mulI(void);
+extern void $$divU_3(void);
+extern void $$divU_5(void);
+extern void $$divU_6(void);
+extern void $$divU_9(void);
+extern void $$divU_10(void);
+extern void $$divU_12(void);
+extern void $$divU_7(void);
+extern void $$divU_14(void);
+extern void $$divU_15(void);
+extern void $$divI_3(void);
+extern void $$divI_5(void);
+extern void $$divI_6(void);
+extern void $$divI_7(void);
+extern void $$divI_9(void);
+extern void $$divI_10(void);
+extern void $$divI_12(void);
+extern void $$divI_14(void);
+extern void $$divI_15(void);
+
+EXPORT_SYMBOL($$divI);
+EXPORT_SYMBOL($$divU);
+EXPORT_SYMBOL($$remI);
+EXPORT_SYMBOL($$remU);
+EXPORT_SYMBOL($$mulI);
+EXPORT_SYMBOL($$divU_3);
+EXPORT_SYMBOL($$divU_5);
+EXPORT_SYMBOL($$divU_6);
+EXPORT_SYMBOL($$divU_9);
+EXPORT_SYMBOL($$divU_10);
+EXPORT_SYMBOL($$divU_12);
+EXPORT_SYMBOL($$divU_7);
+EXPORT_SYMBOL($$divU_14);
+EXPORT_SYMBOL($$divU_15);
+EXPORT_SYMBOL($$divI_3);
+EXPORT_SYMBOL($$divI_5);
+EXPORT_SYMBOL($$divI_6);
+EXPORT_SYMBOL($$divI_7);
+EXPORT_SYMBOL($$divI_9);
+EXPORT_SYMBOL($$divI_10);
+EXPORT_SYMBOL($$divI_12);
+EXPORT_SYMBOL($$divI_14);
+EXPORT_SYMBOL($$divI_15);
+
+extern void __ashrdi3(void);
+extern void __ashldi3(void);
+extern void __lshrdi3(void);
+extern void __muldi3(void);
+
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__muldi3);
+
+asmlinkage void * __canonicalize_funcptr_for_compare(void *);
+EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
+
+#ifdef CONFIG_64BIT
+extern void __divdi3(void);
+extern void __udivdi3(void);
+extern void __umoddi3(void);
+extern void __moddi3(void);
+
+EXPORT_SYMBOL(__divdi3);
+EXPORT_SYMBOL(__udivdi3);
+EXPORT_SYMBOL(__umoddi3);
+EXPORT_SYMBOL(__moddi3);
+#endif
+
+#ifndef CONFIG_64BIT
+extern void $$dyncall(void);
+EXPORT_SYMBOL($$dyncall);
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+#include <asm/mmzone.h>
+EXPORT_SYMBOL(node_data);
+EXPORT_SYMBOL(pfnnid_map);
+#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
+
+/* from pacache.S -- needed for copy_page */
+EXPORT_SYMBOL(copy_user_page_asm);
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
new file mode 100644
index 00000000..a029f74a
--- /dev/null
+++ b/arch/parisc/kernel/pci-dma.c
@@ -0,0 +1,595 @@
+/*
+** PARISC 1.1 Dynamic DMA mapping support.
+** This implementation is for PA-RISC platforms that do not support
+** I/O TLBs (aka DMA address translation hardware).
+** See Documentation/PCI/PCI-DMA-mapping.txt for interface definitions.
+**
+** (c) Copyright 1999,2000 Hewlett-Packard Company
+** (c) Copyright 2000 Grant Grundler
+** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
+** (c) Copyright 2000 John Marvin
+**
+** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
+** (I assume it's from David Mosberger-Tang but there was no Copyright)
+**
+** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
+**
+** - ggg
+*/
+
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cacheflush.h>
+#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
+#include <asm/io.h>
+#include <asm/page.h> /* get_order */
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+#include <asm/tlbflush.h> /* for purge_tlb_*() macros */
+
+static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
+static unsigned long pcxl_used_bytes __read_mostly = 0;
+static unsigned long pcxl_used_pages __read_mostly = 0;
+
+extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
+static spinlock_t pcxl_res_lock;
+static char *pcxl_res_map;
+static int pcxl_res_hint;
+static int pcxl_res_size;
+
+#ifdef DEBUG_PCXL_RESOURCE
+#define DBG_RES(x...) printk(x)
+#else
+#define DBG_RES(x...)
+#endif
+
+
+/*
+** Dump a hex representation of the resource map.
+*/
+
+#ifdef DUMP_RESMAP
+static
+void dump_resmap(void)
+{
+ u_long *res_ptr = (unsigned long *)pcxl_res_map;
+ u_long i = 0;
+
+ printk("res_map: ");
+ for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
+ printk("%08lx ", *res_ptr);
+
+ printk("\n");
+}
+#else
+static inline void dump_resmap(void) {;}
+#endif
+
+static int pa11_dma_supported( struct device *dev, u64 mask)
+{
+ return 1;
+}
+
+static inline int map_pte_uncached(pte_t * pte,
+ unsigned long vaddr,
+ unsigned long size, unsigned long *paddr_ptr)
+{
+ unsigned long end;
+ unsigned long orig_vaddr = vaddr;
+
+ vaddr &= ~PMD_MASK;
+ end = vaddr + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ unsigned long flags;
+
+ if (!pte_none(*pte))
+ printk(KERN_ERR "map_pte_uncached: page already exists\n");
+ set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
+ purge_tlb_start(flags);
+ pdtlb_kernel(orig_vaddr);
+ purge_tlb_end(flags);
+ vaddr += PAGE_SIZE;
+ orig_vaddr += PAGE_SIZE;
+ (*paddr_ptr) += PAGE_SIZE;
+ pte++;
+ } while (vaddr < end);
+ return 0;
+}
+
+static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
+ unsigned long size, unsigned long *paddr_ptr)
+{
+ unsigned long end;
+ unsigned long orig_vaddr = vaddr;
+
+ vaddr &= ~PGDIR_MASK;
+ end = vaddr + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ do {
+ pte_t * pte = pte_alloc_kernel(pmd, vaddr);
+ if (!pte)
+ return -ENOMEM;
+ if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
+ return -ENOMEM;
+ vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
+ orig_vaddr += PMD_SIZE;
+ pmd++;
+ } while (vaddr < end);
+ return 0;
+}
+
+static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
+ unsigned long paddr)
+{
+ pgd_t * dir;
+ unsigned long end = vaddr + size;
+
+ dir = pgd_offset_k(vaddr);
+ do {
+ pmd_t *pmd;
+
+ pmd = pmd_alloc(NULL, dir, vaddr);
+ if (!pmd)
+ return -ENOMEM;
+ if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
+ return -ENOMEM;
+ vaddr = vaddr + PGDIR_SIZE;
+ dir++;
+ } while (vaddr && (vaddr < end));
+ return 0;
+}
+
+static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
+ unsigned long size)
+{
+ pte_t * pte;
+ unsigned long end;
+ unsigned long orig_vaddr = vaddr;
+
+ if (pmd_none(*pmd))
+ return;
+ if (pmd_bad(*pmd)) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ return;
+ }
+ pte = pte_offset_map(pmd, vaddr);
+ vaddr &= ~PMD_MASK;
+ end = vaddr + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ unsigned long flags;
+ pte_t page = *pte;
+
+ pte_clear(&init_mm, vaddr, pte);
+ purge_tlb_start(flags);
+ pdtlb_kernel(orig_vaddr);
+ purge_tlb_end(flags);
+ vaddr += PAGE_SIZE;
+ orig_vaddr += PAGE_SIZE;
+ pte++;
+ if (pte_none(page) || pte_present(page))
+ continue;
+ printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
+ } while (vaddr < end);
+}
+
+static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
+ unsigned long size)
+{
+ pmd_t * pmd;
+ unsigned long end;
+ unsigned long orig_vaddr = vaddr;
+
+ if (pgd_none(*dir))
+ return;
+ if (pgd_bad(*dir)) {
+ pgd_ERROR(*dir);
+ pgd_clear(dir);
+ return;
+ }
+ pmd = pmd_offset(dir, vaddr);
+ vaddr &= ~PGDIR_MASK;
+ end = vaddr + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ do {
+ unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
+ vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
+ orig_vaddr += PMD_SIZE;
+ pmd++;
+ } while (vaddr < end);
+}
+
+static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
+{
+ pgd_t * dir;
+ unsigned long end = vaddr + size;
+
+ dir = pgd_offset_k(vaddr);
+ do {
+ unmap_uncached_pmd(dir, vaddr, end - vaddr);
+ vaddr = vaddr + PGDIR_SIZE;
+ dir++;
+ } while (vaddr && (vaddr < end));
+}
+
+#define PCXL_SEARCH_LOOP(idx, mask, size) \
+ for(; res_ptr < res_end; ++res_ptr) \
+ { \
+ if(0 == ((*res_ptr) & mask)) { \
+ *res_ptr |= mask; \
+ idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
+ pcxl_res_hint = idx + (size >> 3); \
+ goto resource_found; \
+ } \
+ }
+
+#define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \
+ u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
+ u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
+ PCXL_SEARCH_LOOP(idx, mask, size); \
+ res_ptr = (u##size *)&pcxl_res_map[0]; \
+ PCXL_SEARCH_LOOP(idx, mask, size); \
+}
+
+unsigned long
+pcxl_alloc_range(size_t size)
+{
+ int res_idx;
+ u_long mask, flags;
+ unsigned int pages_needed = size >> PAGE_SHIFT;
+
+ mask = (u_long) -1L;
+ mask >>= BITS_PER_LONG - pages_needed;
+
+ DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
+ size, pages_needed, mask);
+
+ spin_lock_irqsave(&pcxl_res_lock, flags);
+
+ if(pages_needed <= 8) {
+ PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
+ } else if(pages_needed <= 16) {
+ PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
+ } else if(pages_needed <= 32) {
+ PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
+ } else {
+ panic("%s: pcxl_alloc_range() Too many pages to map.\n",
+ __FILE__);
+ }
+
+ dump_resmap();
+ panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
+ __FILE__);
+
+resource_found:
+
+ DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
+ res_idx, mask, pcxl_res_hint);
+
+ pcxl_used_pages += pages_needed;
+ pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
+
+ spin_unlock_irqrestore(&pcxl_res_lock, flags);
+
+ dump_resmap();
+
+ /*
+ ** return the corresponding vaddr in the pcxl dma map
+ */
+ return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
+}
+
+#define PCXL_FREE_MAPPINGS(idx, m, size) \
+ u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
+ /* BUG_ON((*res_ptr & m) != m); */ \
+ *res_ptr &= ~m;
+
+/*
+** clear bits in the pcxl resource map
+*/
+static void
+pcxl_free_range(unsigned long vaddr, size_t size)
+{
+ u_long mask, flags;
+ unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
+ unsigned int pages_mapped = size >> PAGE_SHIFT;
+
+ mask = (u_long) -1L;
+ mask >>= BITS_PER_LONG - pages_mapped;
+
+ DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
+ res_idx, size, pages_mapped, mask);
+
+ spin_lock_irqsave(&pcxl_res_lock, flags);
+
+ if(pages_mapped <= 8) {
+ PCXL_FREE_MAPPINGS(res_idx, mask, 8);
+ } else if(pages_mapped <= 16) {
+ PCXL_FREE_MAPPINGS(res_idx, mask, 16);
+ } else if(pages_mapped <= 32) {
+ PCXL_FREE_MAPPINGS(res_idx, mask, 32);
+ } else {
+ panic("%s: pcxl_free_range() Too many pages to unmap.\n",
+ __FILE__);
+ }
+
+ pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
+ pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
+
+ spin_unlock_irqrestore(&pcxl_res_lock, flags);
+
+ dump_resmap();
+}
+
+static int proc_pcxl_dma_show(struct seq_file *m, void *v)
+{
+#if 0
+ u_long i = 0;
+ unsigned long *res_ptr = (u_long *)pcxl_res_map;
+#endif
+ unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */
+
+ seq_printf(m, "\nDMA Mapping Area size : %d bytes (%ld pages)\n",
+ PCXL_DMA_MAP_SIZE, total_pages);
+
+ seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
+
+ seq_puts(m, " total: free: used: % used:\n");
+ seq_printf(m, "blocks %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
+ pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
+ (pcxl_used_bytes * 100) / pcxl_res_size);
+
+ seq_printf(m, "pages %8ld %8ld %8ld %8ld%%\n", total_pages,
+ total_pages - pcxl_used_pages, pcxl_used_pages,
+ (pcxl_used_pages * 100 / total_pages));
+
+#if 0
+ seq_puts(m, "\nResource bitmap:");
+
+ for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
+ if ((i & 7) == 0)
+ seq_puts(m,"\n ");
+ seq_printf(m, "%s %08lx", buf, *res_ptr);
+ }
+#endif
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static int proc_pcxl_dma_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_pcxl_dma_show, NULL);
+}
+
+static const struct file_operations proc_pcxl_dma_ops = {
+ .owner = THIS_MODULE,
+ .open = proc_pcxl_dma_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init
+pcxl_dma_init(void)
+{
+ if (pcxl_dma_start == 0)
+ return 0;
+
+ spin_lock_init(&pcxl_res_lock);
+ pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
+ pcxl_res_hint = 0;
+ pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
+ get_order(pcxl_res_size));
+ memset(pcxl_res_map, 0, pcxl_res_size);
+ proc_gsc_root = proc_mkdir("gsc", NULL);
+ if (!proc_gsc_root)
+ printk(KERN_WARNING
+ "pcxl_dma_init: Unable to create gsc /proc dir entry\n");
+ else {
+ struct proc_dir_entry* ent;
+ ent = proc_create("pcxl_dma", 0, proc_gsc_root,
+ &proc_pcxl_dma_ops);
+ if (!ent)
+ printk(KERN_WARNING
+ "pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
+ }
+ return 0;
+}
+
+__initcall(pcxl_dma_init);
+
+static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
+{
+ unsigned long vaddr;
+ unsigned long paddr;
+ int order;
+
+ order = get_order(size);
+ size = 1 << (order + PAGE_SHIFT);
+ vaddr = pcxl_alloc_range(size);
+ paddr = __get_free_pages(flag, order);
+ flush_kernel_dcache_range(paddr, size);
+ paddr = __pa(paddr);
+ map_uncached_pages(vaddr, size, paddr);
+ *dma_handle = (dma_addr_t) paddr;
+
+#if 0
+/* This probably isn't needed to support EISA cards.
+** ISA cards will certainly only support 24-bit DMA addressing.
+** Not clear if we can, want, or need to support ISA.
+*/
+ if (!dev || *dev->coherent_dma_mask < 0xffffffff)
+ gfp |= GFP_DMA;
+#endif
+ return (void *)vaddr;
+}
+
+static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
+{
+ int order;
+
+ order = get_order(size);
+ size = 1 << (order + PAGE_SHIFT);
+ unmap_uncached_pages((unsigned long)vaddr, size);
+ pcxl_free_range((unsigned long)vaddr, size);
+ free_pages((unsigned long)__va(dma_handle), order);
+}
+
+static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ flush_kernel_dcache_range((unsigned long) addr, size);
+ return virt_to_phys(addr);
+}
+
+static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ if (direction == DMA_TO_DEVICE)
+ return;
+
+ /*
+ * For PCI_DMA_FROMDEVICE this flush is not necessary for the
+ * simple map/unmap case. However, it IS necessary if if
+ * pci_dma_sync_single_* has been called and the buffer reused.
+ */
+
+ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
+ return;
+}
+
+static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for (i = 0; i < nents; i++, sglist++ ) {
+ unsigned long vaddr = sg_virt_addr(sglist);
+ sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr);
+ sg_dma_len(sglist) = sglist->length;
+ flush_kernel_dcache_range(vaddr, sglist->length);
+ }
+ return nents;
+}
+
+static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ if (direction == DMA_TO_DEVICE)
+ return;
+
+ /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
+
+ for (i = 0; i < nents; i++, sglist++ )
+ flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
+ return;
+}
+
+static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
+}
+
+static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+
+ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
+}
+
+static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
+
+ for (i = 0; i < nents; i++, sglist++ )
+ flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
+}
+
+static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
+
+ for (i = 0; i < nents; i++, sglist++ )
+ flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
+}
+
+struct hppa_dma_ops pcxl_dma_ops = {
+ .dma_supported = pa11_dma_supported,
+ .alloc_consistent = pa11_dma_alloc_consistent,
+ .alloc_noncoherent = pa11_dma_alloc_consistent,
+ .free_consistent = pa11_dma_free_consistent,
+ .map_single = pa11_dma_map_single,
+ .unmap_single = pa11_dma_unmap_single,
+ .map_sg = pa11_dma_map_sg,
+ .unmap_sg = pa11_dma_unmap_sg,
+ .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
+ .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
+ .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
+ .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
+};
+
+static void *fail_alloc_consistent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ return NULL;
+}
+
+static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ void *addr;
+
+ addr = (void *)__get_free_pages(flag, get_order(size));
+ if (addr)
+ *dma_handle = (dma_addr_t)virt_to_phys(addr);
+
+ return addr;
+}
+
+static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t iova)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+ return;
+}
+
+struct hppa_dma_ops pcx_dma_ops = {
+ .dma_supported = pa11_dma_supported,
+ .alloc_consistent = fail_alloc_consistent,
+ .alloc_noncoherent = pa11_dma_alloc_noncoherent,
+ .free_consistent = pa11_dma_free_noncoherent,
+ .map_single = pa11_dma_map_single,
+ .unmap_single = pa11_dma_unmap_single,
+ .map_sg = pa11_dma_map_sg,
+ .unmap_sg = pa11_dma_unmap_sg,
+ .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
+ .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
+ .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
+ .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
+};
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
new file mode 100644
index 00000000..9efd9740
--- /dev/null
+++ b/arch/parisc/kernel/pci.c
@@ -0,0 +1,330 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997, 1998 Ralf Baechle
+ * Copyright (C) 1999 SuSE GmbH
+ * Copyright (C) 1999-2001 Hewlett-Packard Company
+ * Copyright (C) 1999-2001 Grant Grundler
+ */
+#include <linux/eisa.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/superio.h>
+
+#define DEBUG_RESOURCES 0
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBGC(x...) printk(KERN_DEBUG x)
+#else
+# define DBGC(x...)
+#endif
+
+
+#if DEBUG_RESOURCES
+#define DBG_RES(x...) printk(KERN_DEBUG x)
+#else
+#define DBG_RES(x...)
+#endif
+
+/* To be used as: mdelay(pci_post_reset_delay);
+ *
+ * post_reset is the time the kernel should stall to prevent anyone from
+ * accessing the PCI bus once #RESET is de-asserted.
+ * PCI spec somewhere says 1 second but with multi-PCI bus systems,
+ * this makes the boot time much longer than necessary.
+ * 20ms seems to work for all the HP PCI implementations to date.
+ *
+ * #define pci_post_reset_delay 50
+ */
+
+struct pci_port_ops *pci_port __read_mostly;
+struct pci_bios_ops *pci_bios __read_mostly;
+
+static int pci_hba_count __read_mostly;
+
+/* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data. */
+#define PCI_HBA_MAX 32
+static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __read_mostly;
+
+
+/********************************************************************
+**
+** I/O port space support
+**
+*********************************************************************/
+
+/* EISA port numbers and PCI port numbers share the same interface. Some
+ * machines have both EISA and PCI adapters installed. Rather than turn
+ * pci_port into an array, we reserve bus 0 for EISA and call the EISA
+ * routines if the access is to a port on bus 0. We don't want to fix
+ * EISA and ISA drivers which assume port space is <= 0xffff.
+ */
+
+#ifdef CONFIG_EISA
+#define EISA_IN(size) if (EISA_bus && (b == 0)) return eisa_in##size(addr)
+#define EISA_OUT(size) if (EISA_bus && (b == 0)) return eisa_out##size(d, addr)
+#else
+#define EISA_IN(size)
+#define EISA_OUT(size)
+#endif
+
+#define PCI_PORT_IN(type, size) \
+u##size in##type (int addr) \
+{ \
+ int b = PCI_PORT_HBA(addr); \
+ EISA_IN(size); \
+ if (!parisc_pci_hba[b]) return (u##size) -1; \
+ return pci_port->in##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr)); \
+} \
+EXPORT_SYMBOL(in##type);
+
+PCI_PORT_IN(b, 8)
+PCI_PORT_IN(w, 16)
+PCI_PORT_IN(l, 32)
+
+
+#define PCI_PORT_OUT(type, size) \
+void out##type (u##size d, int addr) \
+{ \
+ int b = PCI_PORT_HBA(addr); \
+ EISA_OUT(size); \
+ if (!parisc_pci_hba[b]) return; \
+ pci_port->out##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr), d); \
+} \
+EXPORT_SYMBOL(out##type);
+
+PCI_PORT_OUT(b, 8)
+PCI_PORT_OUT(w, 16)
+PCI_PORT_OUT(l, 32)
+
+
+
+/*
+ * BIOS32 replacement.
+ */
+static int __init pcibios_init(void)
+{
+ if (!pci_bios)
+ return -1;
+
+ if (pci_bios->init) {
+ pci_bios->init();
+ } else {
+ printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
+ }
+
+ /* Set the CLS for PCI as early as possible. */
+ pci_cache_line_size = pci_dfl_cache_line_size;
+
+ return 0;
+}
+
+
+/* Called from pci_do_scan_bus() *after* walking a bus but before walking PPBs. */
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+ if (pci_bios->fixup_bus) {
+ pci_bios->fixup_bus(bus);
+ } else {
+ printk(KERN_WARNING "pci_bios != NULL but fixup_bus() is!\n");
+ }
+}
+
+
+char *pcibios_setup(char *str)
+{
+ return str;
+}
+
+/*
+ * Called by pci_set_master() - a driver interface.
+ *
+ * Legacy PDC guarantees to set:
+ * Map Memory BAR's into PA IO space.
+ * Map Expansion ROM BAR into one common PA IO space per bus.
+ * Map IO BAR's into PCI IO space.
+ * Command (see below)
+ * Cache Line Size
+ * Latency Timer
+ * Interrupt Line
+ * PPB: secondary latency timer, io/mmio base/limit,
+ * bus numbers, bridge control
+ *
+ */
+void pcibios_set_master(struct pci_dev *dev)
+{
+ u8 lat;
+
+ /* If someone already mucked with this, don't touch it. */
+ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
+ if (lat >= 16) return;
+
+ /*
+ ** HP generally has fewer devices on the bus than other architectures.
+ ** upper byte is PCI_LATENCY_TIMER.
+ */
+ pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
+ (0x80 << 8) | pci_cache_line_size);
+}
+
+
+void __init pcibios_init_bus(struct pci_bus *bus)
+{
+ struct pci_dev *dev = bus->self;
+ unsigned short bridge_ctl;
+
+ /* We deal only with pci controllers and pci-pci bridges. */
+ if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+ return;
+
+ /* PCI-PCI bridge - set the cache line and default latency
+ (32) for primary and secondary buses. */
+ pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
+
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
+ bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
+}
+
+/* called by drivers/pci/setup-bus.c:pci_setup_bridge(). */
+void __devinit pcibios_resource_to_bus(struct pci_dev *dev,
+ struct pci_bus_region *region, struct resource *res)
+{
+#ifdef CONFIG_64BIT
+ struct pci_hba_data *hba = HBA_DATA(dev->bus->bridge->platform_data);
+#endif
+
+ if (res->flags & IORESOURCE_IO) {
+ /*
+ ** I/O space may see busnumbers here. Something
+ ** in the form of 0xbbxxxx where bb is the bus num
+ ** and xxxx is the I/O port space address.
+ ** Remaining address translation are done in the
+ ** PCI Host adapter specific code - ie dino_out8.
+ */
+ region->start = PCI_PORT_ADDR(res->start);
+ region->end = PCI_PORT_ADDR(res->end);
+ } else if (res->flags & IORESOURCE_MEM) {
+ /* Convert MMIO addr to PCI addr (undo global virtualization) */
+ region->start = PCI_BUS_ADDR(hba, res->start);
+ region->end = PCI_BUS_ADDR(hba, res->end);
+ }
+
+ DBG_RES("pcibios_resource_to_bus(%02x %s [%lx,%lx])\n",
+ dev->bus->number, res->flags & IORESOURCE_IO ? "IO" : "MEM",
+ region->start, region->end);
+}
+
+void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+ struct pci_bus_region *region)
+{
+#ifdef CONFIG_64BIT
+ struct pci_hba_data *hba = HBA_DATA(dev->bus->bridge->platform_data);
+#endif
+
+ if (res->flags & IORESOURCE_MEM) {
+ res->start = PCI_HOST_ADDR(hba, region->start);
+ res->end = PCI_HOST_ADDR(hba, region->end);
+ }
+
+ if (res->flags & IORESOURCE_IO) {
+ res->start = region->start;
+ res->end = region->end;
+ }
+}
+
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pcibios_resource_to_bus);
+EXPORT_SYMBOL(pcibios_bus_to_resource);
+#endif
+
+/*
+ * pcibios align resources() is called every time generic PCI code
+ * wants to generate a new address. The process of looking for
+ * an available address, each candidate is first "aligned" and
+ * then checked if the resource is available until a match is found.
+ *
+ * Since we are just checking candidates, don't use any fields other
+ * than res->start.
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t alignment)
+{
+ resource_size_t mask, align, start = res->start;
+
+ DBG_RES("pcibios_align_resource(%s, (%p) [%lx,%lx]/%x, 0x%lx, 0x%lx)\n",
+ pci_name(((struct pci_dev *) data)),
+ res->parent, res->start, res->end,
+ (int) res->flags, size, alignment);
+
+ /* If it's not IO, then it's gotta be MEM */
+ align = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
+
+ /* Align to largest of MIN or input size */
+ mask = max(alignment, align) - 1;
+ start += mask;
+ start &= ~mask;
+
+ return start;
+}
+
+
+/*
+ * A driver is enabling the device. We make sure that all the appropriate
+ * bits are set to allow the device to operate as the driver is expecting.
+ * We enable the port IO and memory IO bits if the device has any BARs of
+ * that type, and we enable the PERR and SERR bits unconditionally.
+ * Drivers that do not need parity (eg graphics and possibly networking)
+ * can clear these bits if they want.
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ int err;
+ u16 cmd, old_cmd;
+
+ err = pci_enable_resources(dev, mask);
+ if (err < 0)
+ return err;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+
+ cmd |= (PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+
+#if 0
+ /* If bridge/bus controller has FBB enabled, child must too. */
+ if (dev->bus->bridge_ctl & PCI_BRIDGE_CTL_FAST_BACK)
+ cmd |= PCI_COMMAND_FAST_BACK;
+#endif
+
+ if (cmd != old_cmd) {
+ dev_info(&dev->dev, "enabling SERR and PARITY (%04x -> %04x)\n",
+ old_cmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
+
+/* PA-RISC specific */
+void pcibios_register_hba(struct pci_hba_data *hba)
+{
+ if (pci_hba_count >= PCI_HBA_MAX) {
+ printk(KERN_ERR "PCI: Too many Host Bus Adapters\n");
+ return;
+ }
+
+ parisc_pci_hba[pci_hba_count] = hba;
+ hba->hba_num = pci_hba_count++;
+}
+
+subsys_initcall(pcibios_init);
diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c
new file mode 100644
index 00000000..d47ba1aa
--- /dev/null
+++ b/arch/parisc/kernel/pdc_chassis.c
@@ -0,0 +1,301 @@
+/*
+ * interfaces to Chassis Codes via PDC (firmware)
+ *
+ * Copyright (C) 2002 Laurent Canet <canetl@esiee.fr>
+ * Copyright (C) 2002-2006 Thibaut VARENE <varenet@parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * TODO: poll chassis warns, trigger (configurable) machine shutdown when
+ * needed.
+ * Find out how to get Chassis warnings out of PAT boxes?
+ */
+
+#undef PDC_CHASSIS_DEBUG
+#ifdef PDC_CHASSIS_DEBUG
+#define DPRINTK(fmt, args...) printk(fmt, ## args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+#include <linux/cache.h>
+#include <linux/proc_fs.h>
+
+#include <asm/pdc_chassis.h>
+#include <asm/processor.h>
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+
+#define PDC_CHASSIS_VER "0.05"
+
+#ifdef CONFIG_PDC_CHASSIS
+static unsigned int pdc_chassis_enabled __read_mostly = 1;
+
+
+/**
+ * pdc_chassis_setup() - Enable/disable pdc_chassis code at boot time.
+ * @str configuration param: 0 to disable chassis log
+ * @return 1
+ */
+
+static int __init pdc_chassis_setup(char *str)
+{
+ /*panic_timeout = simple_strtoul(str, NULL, 0);*/
+ get_option(&str, &pdc_chassis_enabled);
+ return 1;
+}
+__setup("pdcchassis=", pdc_chassis_setup);
+
+
+/**
+ * pdc_chassis_checkold() - Checks for old PDC_CHASSIS compatibility
+ * @pdc_chassis_old: 1 if old pdc chassis style
+ *
+ * Currently, only E class and A180 are known to work with this.
+ * Inspired by Christoph Plattner
+ */
+#if 0
+static void __init pdc_chassis_checkold(void)
+{
+ switch(CPU_HVERSION) {
+ case 0x480: /* E25 */
+ case 0x481: /* E35 */
+ case 0x482: /* E45 */
+ case 0x483: /* E55 */
+ case 0x516: /* A180 */
+ break;
+
+ default:
+ break;
+ }
+ DPRINTK(KERN_DEBUG "%s: pdc_chassis_checkold(); pdc_chassis_old = %d\n", __FILE__, pdc_chassis_old);
+}
+#endif
+
+/**
+ * pdc_chassis_panic_event() - Called by the panic handler.
+ *
+ * As soon as a panic occurs, we should inform the PDC.
+ */
+
+static int pdc_chassis_panic_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+ return NOTIFY_DONE;
+}
+
+
+static struct notifier_block pdc_chassis_panic_block = {
+ .notifier_call = pdc_chassis_panic_event,
+ .priority = INT_MAX,
+};
+
+
+/**
+ * parisc_reboot_event() - Called by the reboot handler.
+ *
+ * As soon as a reboot occurs, we should inform the PDC.
+ */
+
+static int pdc_chassis_reboot_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
+ return NOTIFY_DONE;
+}
+
+
+static struct notifier_block pdc_chassis_reboot_block = {
+ .notifier_call = pdc_chassis_reboot_event,
+ .priority = INT_MAX,
+};
+#endif /* CONFIG_PDC_CHASSIS */
+
+
+/**
+ * parisc_pdc_chassis_init() - Called at boot time.
+ */
+
+void __init parisc_pdc_chassis_init(void)
+{
+#ifdef CONFIG_PDC_CHASSIS
+ if (likely(pdc_chassis_enabled)) {
+ DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__);
+
+ /* Let see if we have something to handle... */
+ printk(KERN_INFO "Enabling %s chassis codes support v%s\n",
+ is_pdc_pat() ? "PDC_PAT" : "regular",
+ PDC_CHASSIS_VER);
+
+ /* initialize panic notifier chain */
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &pdc_chassis_panic_block);
+
+ /* initialize reboot notifier chain */
+ register_reboot_notifier(&pdc_chassis_reboot_block);
+ }
+#endif /* CONFIG_PDC_CHASSIS */
+}
+
+
+/**
+ * pdc_chassis_send_status() - Sends a predefined message to the chassis,
+ * and changes the front panel LEDs according to the new system state
+ * @retval: PDC call return value.
+ *
+ * Only machines with 64 bits PDC PAT and those reported in
+ * pdc_chassis_checkold() are supported atm.
+ *
+ * returns 0 if no error, -1 if no supported PDC is present or invalid message,
+ * else returns the appropriate PDC error code.
+ *
+ * For a list of predefined messages, see asm-parisc/pdc_chassis.h
+ */
+
+int pdc_chassis_send_status(int message)
+{
+ /* Maybe we should do that in an other way ? */
+ int retval = 0;
+#ifdef CONFIG_PDC_CHASSIS
+ if (likely(pdc_chassis_enabled)) {
+
+ DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message);
+
+#ifdef CONFIG_64BIT
+ if (is_pdc_pat()) {
+ switch(message) {
+ case PDC_CHASSIS_DIRECT_BSTART:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BSTART, PDC_CHASSIS_LSTATE_RUN_NORMAL);
+ break;
+
+ case PDC_CHASSIS_DIRECT_BCOMPLETE:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BCOMPLETE, PDC_CHASSIS_LSTATE_RUN_NORMAL);
+ break;
+
+ case PDC_CHASSIS_DIRECT_SHUTDOWN:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_SHUTDOWN, PDC_CHASSIS_LSTATE_NONOS);
+ break;
+
+ case PDC_CHASSIS_DIRECT_PANIC:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_PANIC, PDC_CHASSIS_LSTATE_RUN_CRASHREC);
+ break;
+
+ case PDC_CHASSIS_DIRECT_LPMC:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_LPMC, PDC_CHASSIS_LSTATE_RUN_SYSINT);
+ break;
+
+ case PDC_CHASSIS_DIRECT_HPMC:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_HPMC, PDC_CHASSIS_LSTATE_RUN_NCRIT);
+ break;
+
+ default:
+ retval = -1;
+ }
+ } else retval = -1;
+#else
+ if (1) {
+ switch (message) {
+ case PDC_CHASSIS_DIRECT_BSTART:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_INIT));
+ break;
+
+ case PDC_CHASSIS_DIRECT_BCOMPLETE:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_RUN));
+ break;
+
+ case PDC_CHASSIS_DIRECT_SHUTDOWN:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_SHUT));
+ break;
+
+ case PDC_CHASSIS_DIRECT_HPMC:
+ case PDC_CHASSIS_DIRECT_PANIC:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_FLT));
+ break;
+
+ case PDC_CHASSIS_DIRECT_LPMC:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_WARN));
+ break;
+
+ default:
+ retval = -1;
+ }
+ } else retval = -1;
+#endif /* CONFIG_64BIT */
+ } /* if (pdc_chassis_enabled) */
+#endif /* CONFIG_PDC_CHASSIS */
+ return retval;
+}
+
+#ifdef CONFIG_PDC_CHASSIS_WARN
+#ifdef CONFIG_PROC_FS
+static int pdc_chassis_warn_pread(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ char *out = page;
+ int len, ret;
+ unsigned long warn;
+ u32 warnreg;
+
+ ret = pdc_chassis_warn(&warn);
+ if (ret != PDC_OK)
+ return -EIO;
+
+ warnreg = (warn & 0xFFFFFFFF);
+
+ if ((warnreg >> 24) & 0xFF)
+ out += sprintf(out, "Chassis component failure! (eg fan or PSU): 0x%.2x\n", ((warnreg >> 24) & 0xFF));
+
+ out += sprintf(out, "Battery: %s\n", (warnreg & 0x04) ? "Low!" : "OK");
+ out += sprintf(out, "Temp low: %s\n", (warnreg & 0x02) ? "Exceeded!" : "OK");
+ out += sprintf(out, "Temp mid: %s\n", (warnreg & 0x01) ? "Exceeded!" : "OK");
+
+ len = out - page - off;
+ if (len < count) {
+ *eof = 1;
+ if (len <= 0) return 0;
+ } else {
+ len = count;
+ }
+ *start = page + off;
+ return len;
+}
+
+static int __init pdc_chassis_create_procfs(void)
+{
+ unsigned long test;
+ int ret;
+
+ ret = pdc_chassis_warn(&test);
+ if ((ret == PDC_BAD_PROC) || (ret == PDC_BAD_OPTION)) {
+ /* seems that some boxes (eg L1000) do not implement this */
+ printk(KERN_INFO "Chassis warnings not supported.\n");
+ return 0;
+ }
+
+ printk(KERN_INFO "Enabling PDC chassis warnings support v%s\n",
+ PDC_CHASSIS_VER);
+ create_proc_read_entry("chassis", 0400, NULL, pdc_chassis_warn_pread,
+ NULL);
+ return 0;
+}
+
+__initcall(pdc_chassis_create_procfs);
+
+#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_PDC_CHASSIS_WARN */
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
new file mode 100644
index 00000000..fc770be4
--- /dev/null
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -0,0 +1,286 @@
+/*
+ * PDC Console support - ie use firmware to dump text via boot console
+ *
+ * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
+ * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
+ * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
+ * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
+ * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
+ * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
+ * Copyright (C) 2001 Helge Deller <deller at parisc-linux.org>
+ * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
+ * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ * Copyright (C) 2010 Guy Martin <gmsoft at tuxicoman.be>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * The PDC console is a simple console, which can be used for debugging
+ * boot related problems on HP PA-RISC machines. It is also useful when no
+ * other console works.
+ *
+ * This code uses the ROM (=PDC) based functions to read and write characters
+ * from and to PDC's boot path.
+ */
+
+/* Define EARLY_BOOTUP_DEBUG to debug kernel related boot problems.
+ * On production kernels EARLY_BOOTUP_DEBUG should be undefined. */
+#define EARLY_BOOTUP_DEBUG
+
+
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/major.h>
+#include <linux/tty.h>
+#include <asm/pdc.h> /* for iodc_call() proto and friends */
+
+static DEFINE_SPINLOCK(pdc_console_lock);
+static struct console pdc_cons;
+
+static void pdc_console_write(struct console *co, const char *s, unsigned count)
+{
+ int i = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_console_lock, flags);
+ do {
+ i += pdc_iodc_print(s + i, count - i);
+ } while (i < count);
+ spin_unlock_irqrestore(&pdc_console_lock, flags);
+}
+
+int pdc_console_poll_key(struct console *co)
+{
+ int c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_console_lock, flags);
+ c = pdc_iodc_getc();
+ spin_unlock_irqrestore(&pdc_console_lock, flags);
+
+ return c;
+}
+
+static int pdc_console_setup(struct console *co, char *options)
+{
+ return 0;
+}
+
+#if defined(CONFIG_PDC_CONSOLE)
+#include <linux/vt_kern.h>
+#include <linux/tty_flip.h>
+
+#define PDC_CONS_POLL_DELAY (30 * HZ / 1000)
+
+static struct timer_list pdc_console_timer;
+
+static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp)
+{
+
+ mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
+
+ return 0;
+}
+
+static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ if (!tty->count)
+ del_timer(&pdc_console_timer);
+}
+
+static int pdc_console_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ pdc_console_write(NULL, buf, count);
+ return count;
+}
+
+static int pdc_console_tty_write_room(struct tty_struct *tty)
+{
+ return 32768; /* no limit, no buffer used */
+}
+
+static int pdc_console_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0; /* no buffer */
+}
+
+static struct tty_driver *pdc_console_tty_driver;
+
+static const struct tty_operations pdc_console_tty_ops = {
+ .open = pdc_console_tty_open,
+ .close = pdc_console_tty_close,
+ .write = pdc_console_tty_write,
+ .write_room = pdc_console_tty_write_room,
+ .chars_in_buffer = pdc_console_tty_chars_in_buffer,
+};
+
+static void pdc_console_poll(unsigned long unused)
+{
+
+ int data, count = 0;
+
+ struct tty_struct *tty = pdc_console_tty_driver->ttys[0];
+
+ if (!tty)
+ return;
+
+ while (1) {
+ data = pdc_console_poll_key(NULL);
+ if (data == -1)
+ break;
+ tty_insert_flip_char(tty, data & 0xFF, TTY_NORMAL);
+ count ++;
+ }
+
+ if (count)
+ tty_flip_buffer_push(tty);
+
+ if (tty->count && (pdc_cons.flags & CON_ENABLED))
+ mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
+}
+
+static int __init pdc_console_tty_driver_init(void)
+{
+
+ int err;
+ struct tty_driver *drv;
+
+ /* Check if the console driver is still registered.
+ * It is unregistered if the pdc console was not selected as the
+ * primary console. */
+
+ struct console *tmp;
+
+ console_lock();
+ for_each_console(tmp)
+ if (tmp == &pdc_cons)
+ break;
+ console_unlock();
+
+ if (!tmp) {
+ printk(KERN_INFO "PDC console driver not registered anymore, not creating %s\n", pdc_cons.name);
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "The PDC console driver is still registered, removing CON_BOOT flag\n");
+ pdc_cons.flags &= ~CON_BOOT;
+
+ drv = alloc_tty_driver(1);
+
+ if (!drv)
+ return -ENOMEM;
+
+ drv->driver_name = "pdc_cons";
+ drv->name = "ttyB";
+ drv->major = MUX_MAJOR;
+ drv->minor_start = 0;
+ drv->type = TTY_DRIVER_TYPE_SYSTEM;
+ drv->init_termios = tty_std_termios;
+ drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
+ tty_set_operations(drv, &pdc_console_tty_ops);
+
+ err = tty_register_driver(drv);
+ if (err) {
+ printk(KERN_ERR "Unable to register the PDC console TTY driver\n");
+ return err;
+ }
+
+ pdc_console_tty_driver = drv;
+
+ /* No need to initialize the pdc_console_timer if tty isn't allocated */
+ init_timer(&pdc_console_timer);
+ pdc_console_timer.function = pdc_console_poll;
+
+ return 0;
+}
+
+module_init(pdc_console_tty_driver_init);
+
+static struct tty_driver * pdc_console_device (struct console *c, int *index)
+{
+ *index = c->index;
+ return pdc_console_tty_driver;
+}
+#else
+#define pdc_console_device NULL
+#endif
+
+static struct console pdc_cons = {
+ .name = "ttyB",
+ .write = pdc_console_write,
+ .device = pdc_console_device,
+ .setup = pdc_console_setup,
+ .flags = CON_BOOT | CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static int pdc_console_initialized;
+
+static void pdc_console_init_force(void)
+{
+ if (pdc_console_initialized)
+ return;
+ ++pdc_console_initialized;
+
+ /* If the console is duplex then copy the COUT parameters to CIN. */
+ if (PAGE0->mem_cons.cl_class == CL_DUPLEX)
+ memcpy(&PAGE0->mem_kbd, &PAGE0->mem_cons, sizeof(PAGE0->mem_cons));
+
+ /* register the pdc console */
+ register_console(&pdc_cons);
+}
+
+void __init pdc_console_init(void)
+{
+#if defined(EARLY_BOOTUP_DEBUG) || defined(CONFIG_PDC_CONSOLE)
+ pdc_console_init_force();
+#endif
+#ifdef EARLY_BOOTUP_DEBUG
+ printk(KERN_INFO "Initialized PDC Console for debugging.\n");
+#endif
+}
+
+
+/*
+ * Used for emergencies. Currently only used if an HPMC occurs. If an
+ * HPMC occurs, it is possible that the current console may not be
+ * properly initialised after the PDC IO reset. This routine unregisters
+ * all of the current consoles, reinitializes the pdc console and
+ * registers it.
+ */
+
+void pdc_console_restart(void)
+{
+ struct console *console;
+
+ if (pdc_console_initialized)
+ return;
+
+ /* If we've already seen the output, don't bother to print it again */
+ if (console_drivers != NULL)
+ pdc_cons.flags &= ~CON_PRINTBUFFER;
+
+ while ((console = console_drivers) != NULL)
+ unregister_console(console_drivers);
+
+ /* force registering the pdc console */
+ pdc_console_init_force();
+}
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
new file mode 100644
index 00000000..ba0c053e
--- /dev/null
+++ b/arch/parisc/kernel/perf.c
@@ -0,0 +1,851 @@
+/*
+ * Parisc performance counters
+ * Copyright (C) 2001 Randolph Chung <tausq@debian.org>
+ *
+ * This code is derived, with permission, from HP/UX sources.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Edited comment from original sources:
+ *
+ * This driver programs the PCX-U/PCX-W performance counters
+ * on the PA-RISC 2.0 chips. The driver keeps all images now
+ * internally to the kernel to hopefully eliminate the possibility
+ * of a bad image halting the CPU. Also, there are different
+ * images for the PCX-W and later chips vs the PCX-U chips.
+ *
+ * Only 1 process is allowed to access the driver at any time,
+ * so the only protection that is needed is at open and close.
+ * A variable "perf_enabled" is used to hold the state of the
+ * driver. The spinlock "perf_lock" is used to protect the
+ * modification of the state during open/close operations so
+ * multiple processes don't get into the driver simultaneously.
+ *
+ * This driver accesses the processor directly vs going through
+ * the PDC INTRIGUE calls. This is done to eliminate bugs introduced
+ * in various PDC revisions. The code is much more maintainable
+ * and reliable this way vs having to debug on every version of PDC
+ * on every box.
+ */
+
+#include <linux/capability.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+
+#include <asm/uaccess.h>
+#include <asm/perf.h>
+#include <asm/parisc-device.h>
+#include <asm/processor.h>
+#include <asm/runway.h>
+#include <asm/io.h> /* for __raw_read() */
+
+#include "perf_images.h"
+
+#define MAX_RDR_WORDS 24
+#define PERF_VERSION 2 /* derived from hpux's PI v2 interface */
+
+/* definition of RDR regs */
+struct rdr_tbl_ent {
+ uint16_t width;
+ uint8_t num_words;
+ uint8_t write_control;
+};
+
+static int perf_processor_interface __read_mostly = UNKNOWN_INTF;
+static int perf_enabled __read_mostly;
+static spinlock_t perf_lock;
+struct parisc_device *cpu_device __read_mostly;
+
+/* RDRs to write for PCX-W */
+static const int perf_rdrs_W[] =
+ { 0, 1, 4, 5, 6, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
+
+/* RDRs to write for PCX-U */
+static const int perf_rdrs_U[] =
+ { 0, 1, 4, 5, 6, 7, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
+
+/* RDR register descriptions for PCX-W */
+static const struct rdr_tbl_ent perf_rdr_tbl_W[] = {
+ { 19, 1, 8 }, /* RDR 0 */
+ { 16, 1, 16 }, /* RDR 1 */
+ { 72, 2, 0 }, /* RDR 2 */
+ { 81, 2, 0 }, /* RDR 3 */
+ { 328, 6, 0 }, /* RDR 4 */
+ { 160, 3, 0 }, /* RDR 5 */
+ { 336, 6, 0 }, /* RDR 6 */
+ { 164, 3, 0 }, /* RDR 7 */
+ { 0, 0, 0 }, /* RDR 8 */
+ { 35, 1, 0 }, /* RDR 9 */
+ { 6, 1, 0 }, /* RDR 10 */
+ { 18, 1, 0 }, /* RDR 11 */
+ { 13, 1, 0 }, /* RDR 12 */
+ { 8, 1, 0 }, /* RDR 13 */
+ { 8, 1, 0 }, /* RDR 14 */
+ { 8, 1, 0 }, /* RDR 15 */
+ { 1530, 24, 0 }, /* RDR 16 */
+ { 16, 1, 0 }, /* RDR 17 */
+ { 4, 1, 0 }, /* RDR 18 */
+ { 0, 0, 0 }, /* RDR 19 */
+ { 152, 3, 24 }, /* RDR 20 */
+ { 152, 3, 24 }, /* RDR 21 */
+ { 233, 4, 48 }, /* RDR 22 */
+ { 233, 4, 48 }, /* RDR 23 */
+ { 71, 2, 0 }, /* RDR 24 */
+ { 71, 2, 0 }, /* RDR 25 */
+ { 11, 1, 0 }, /* RDR 26 */
+ { 18, 1, 0 }, /* RDR 27 */
+ { 128, 2, 0 }, /* RDR 28 */
+ { 0, 0, 0 }, /* RDR 29 */
+ { 16, 1, 0 }, /* RDR 30 */
+ { 16, 1, 0 }, /* RDR 31 */
+};
+
+/* RDR register descriptions for PCX-U */
+static const struct rdr_tbl_ent perf_rdr_tbl_U[] = {
+ { 19, 1, 8 }, /* RDR 0 */
+ { 32, 1, 16 }, /* RDR 1 */
+ { 20, 1, 0 }, /* RDR 2 */
+ { 0, 0, 0 }, /* RDR 3 */
+ { 344, 6, 0 }, /* RDR 4 */
+ { 176, 3, 0 }, /* RDR 5 */
+ { 336, 6, 0 }, /* RDR 6 */
+ { 0, 0, 0 }, /* RDR 7 */
+ { 0, 0, 0 }, /* RDR 8 */
+ { 0, 0, 0 }, /* RDR 9 */
+ { 28, 1, 0 }, /* RDR 10 */
+ { 33, 1, 0 }, /* RDR 11 */
+ { 0, 0, 0 }, /* RDR 12 */
+ { 230, 4, 0 }, /* RDR 13 */
+ { 32, 1, 0 }, /* RDR 14 */
+ { 128, 2, 0 }, /* RDR 15 */
+ { 1494, 24, 0 }, /* RDR 16 */
+ { 18, 1, 0 }, /* RDR 17 */
+ { 4, 1, 0 }, /* RDR 18 */
+ { 0, 0, 0 }, /* RDR 19 */
+ { 158, 3, 24 }, /* RDR 20 */
+ { 158, 3, 24 }, /* RDR 21 */
+ { 194, 4, 48 }, /* RDR 22 */
+ { 194, 4, 48 }, /* RDR 23 */
+ { 71, 2, 0 }, /* RDR 24 */
+ { 71, 2, 0 }, /* RDR 25 */
+ { 28, 1, 0 }, /* RDR 26 */
+ { 33, 1, 0 }, /* RDR 27 */
+ { 88, 2, 0 }, /* RDR 28 */
+ { 32, 1, 0 }, /* RDR 29 */
+ { 24, 1, 0 }, /* RDR 30 */
+ { 16, 1, 0 }, /* RDR 31 */
+};
+
+/*
+ * A non-zero write_control in the above tables is a byte offset into
+ * this array.
+ */
+static const uint64_t perf_bitmasks[] = {
+ 0x0000000000000000ul, /* first dbl word must be zero */
+ 0xfdffe00000000000ul, /* RDR0 bitmask */
+ 0x003f000000000000ul, /* RDR1 bitmask */
+ 0x00fffffffffffffful, /* RDR20-RDR21 bitmask (152 bits) */
+ 0xfffffffffffffffful,
+ 0xfffffffc00000000ul,
+ 0xfffffffffffffffful, /* RDR22-RDR23 bitmask (233 bits) */
+ 0xfffffffffffffffful,
+ 0xfffffffffffffffcul,
+ 0xff00000000000000ul
+};
+
+/*
+ * Write control bitmasks for Pa-8700 processor given
+ * some things have changed slightly.
+ */
+static const uint64_t perf_bitmasks_piranha[] = {
+ 0x0000000000000000ul, /* first dbl word must be zero */
+ 0xfdffe00000000000ul, /* RDR0 bitmask */
+ 0x003f000000000000ul, /* RDR1 bitmask */
+ 0x00fffffffffffffful, /* RDR20-RDR21 bitmask (158 bits) */
+ 0xfffffffffffffffful,
+ 0xfffffffc00000000ul,
+ 0xfffffffffffffffful, /* RDR22-RDR23 bitmask (210 bits) */
+ 0xfffffffffffffffful,
+ 0xfffffffffffffffful,
+ 0xfffc000000000000ul
+};
+
+static const uint64_t *bitmask_array; /* array of bitmasks to use */
+
+/******************************************************************************
+ * Function Prototypes
+ *****************************************************************************/
+static int perf_config(uint32_t *image_ptr);
+static int perf_release(struct inode *inode, struct file *file);
+static int perf_open(struct inode *inode, struct file *file);
+static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
+static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos);
+static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+static void perf_start_counters(void);
+static int perf_stop_counters(uint32_t *raddr);
+static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
+static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer);
+static int perf_rdr_clear(uint32_t rdr_num);
+static int perf_write_image(uint64_t *memaddr);
+static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer);
+
+/* External Assembly Routines */
+extern uint64_t perf_rdr_shift_in_W (uint32_t rdr_num, uint16_t width);
+extern uint64_t perf_rdr_shift_in_U (uint32_t rdr_num, uint16_t width);
+extern void perf_rdr_shift_out_W (uint32_t rdr_num, uint64_t buffer);
+extern void perf_rdr_shift_out_U (uint32_t rdr_num, uint64_t buffer);
+extern void perf_intrigue_enable_perf_counters (void);
+extern void perf_intrigue_disable_perf_counters (void);
+
+/******************************************************************************
+ * Function Definitions
+ *****************************************************************************/
+
+
+/*
+ * configure:
+ *
+ * Configure the cpu with a given data image. First turn off the counters,
+ * then download the image, then turn the counters back on.
+ */
+static int perf_config(uint32_t *image_ptr)
+{
+ long error;
+ uint32_t raddr[4];
+
+ /* Stop the counters*/
+ error = perf_stop_counters(raddr);
+ if (error != 0) {
+ printk("perf_config: perf_stop_counters = %ld\n", error);
+ return -EINVAL;
+ }
+
+printk("Preparing to write image\n");
+ /* Write the image to the chip */
+ error = perf_write_image((uint64_t *)image_ptr);
+ if (error != 0) {
+ printk("perf_config: DOWNLOAD = %ld\n", error);
+ return -EINVAL;
+ }
+
+printk("Preparing to start counters\n");
+
+ /* Start the counters */
+ perf_start_counters();
+
+ return sizeof(uint32_t);
+}
+
+/*
+ * Open the device and initialize all of its memory. The device is only
+ * opened once, but can be "queried" by multiple processes that know its
+ * file descriptor.
+ */
+static int perf_open(struct inode *inode, struct file *file)
+{
+ spin_lock(&perf_lock);
+ if (perf_enabled) {
+ spin_unlock(&perf_lock);
+ return -EBUSY;
+ }
+ perf_enabled = 1;
+ spin_unlock(&perf_lock);
+
+ return 0;
+}
+
+/*
+ * Close the device.
+ */
+static int perf_release(struct inode *inode, struct file *file)
+{
+ spin_lock(&perf_lock);
+ perf_enabled = 0;
+ spin_unlock(&perf_lock);
+
+ return 0;
+}
+
+/*
+ * Read does nothing for this driver
+ */
+static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos)
+{
+ return 0;
+}
+
+/*
+ * write:
+ *
+ * This routine downloads the image to the chip. It must be
+ * called on the processor that the download should happen
+ * on.
+ */
+static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ int err;
+ size_t image_size;
+ uint32_t image_type;
+ uint32_t interface_type;
+ uint32_t test;
+
+ if (perf_processor_interface == ONYX_INTF)
+ image_size = PCXU_IMAGE_SIZE;
+ else if (perf_processor_interface == CUDA_INTF)
+ image_size = PCXW_IMAGE_SIZE;
+ else
+ return -EFAULT;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (count != sizeof(uint32_t))
+ return -EIO;
+
+ if ((err = copy_from_user(&image_type, buf, sizeof(uint32_t))) != 0)
+ return err;
+
+ /* Get the interface type and test type */
+ interface_type = (image_type >> 16) & 0xffff;
+ test = (image_type & 0xffff);
+
+ /* Make sure everything makes sense */
+
+ /* First check the machine type is correct for
+ the requested image */
+ if (((perf_processor_interface == CUDA_INTF) &&
+ (interface_type != CUDA_INTF)) ||
+ ((perf_processor_interface == ONYX_INTF) &&
+ (interface_type != ONYX_INTF)))
+ return -EINVAL;
+
+ /* Next check to make sure the requested image
+ is valid */
+ if (((interface_type == CUDA_INTF) &&
+ (test >= MAX_CUDA_IMAGES)) ||
+ ((interface_type == ONYX_INTF) &&
+ (test >= MAX_ONYX_IMAGES)))
+ return -EINVAL;
+
+ /* Copy the image into the processor */
+ if (interface_type == CUDA_INTF)
+ return perf_config(cuda_images[test]);
+ else
+ return perf_config(onyx_images[test]);
+
+ return count;
+}
+
+/*
+ * Patch the images that need to know the IVA addresses.
+ */
+static void perf_patch_images(void)
+{
+#if 0 /* FIXME!! */
+/*
+ * NOTE: this routine is VERY specific to the current TLB image.
+ * If the image is changed, this routine might also need to be changed.
+ */
+ extern void $i_itlb_miss_2_0();
+ extern void $i_dtlb_miss_2_0();
+ extern void PA2_0_iva();
+
+ /*
+ * We can only use the lower 32-bits, the upper 32-bits should be 0
+ * anyway given this is in the kernel
+ */
+ uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
+ uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
+ uint32_t IVAaddress = (uint32_t)&PA2_0_iva;
+
+ if (perf_processor_interface == ONYX_INTF) {
+ /* clear last 2 bytes */
+ onyx_images[TLBMISS][15] &= 0xffffff00;
+ /* set 2 bytes */
+ onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
+ onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
+ onyx_images[TLBMISS][17] = itlb_addr;
+
+ /* clear last 2 bytes */
+ onyx_images[TLBHANDMISS][15] &= 0xffffff00;
+ /* set 2 bytes */
+ onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
+ onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
+ onyx_images[TLBHANDMISS][17] = itlb_addr;
+
+ /* clear last 2 bytes */
+ onyx_images[BIG_CPI][15] &= 0xffffff00;
+ /* set 2 bytes */
+ onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
+ onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
+ onyx_images[BIG_CPI][17] = itlb_addr;
+
+ onyx_images[PANIC][15] &= 0xffffff00; /* clear last 2 bytes */
+ onyx_images[PANIC][15] |= (0x000000ff&((IVAaddress) >> 24)); /* set 2 bytes */
+ onyx_images[PANIC][16] = (IVAaddress << 8)&0xffffff00;
+
+
+ } else if (perf_processor_interface == CUDA_INTF) {
+ /* Cuda interface */
+ cuda_images[TLBMISS][16] =
+ (cuda_images[TLBMISS][16]&0xffff0000) |
+ ((dtlb_addr >> 8)&0x0000ffff);
+ cuda_images[TLBMISS][17] =
+ ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
+ cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
+
+ cuda_images[TLBHANDMISS][16] =
+ (cuda_images[TLBHANDMISS][16]&0xffff0000) |
+ ((dtlb_addr >> 8)&0x0000ffff);
+ cuda_images[TLBHANDMISS][17] =
+ ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
+ cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
+
+ cuda_images[BIG_CPI][16] =
+ (cuda_images[BIG_CPI][16]&0xffff0000) |
+ ((dtlb_addr >> 8)&0x0000ffff);
+ cuda_images[BIG_CPI][17] =
+ ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
+ cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
+ } else {
+ /* Unknown type */
+ }
+#endif
+}
+
+
+/*
+ * ioctl routine
+ * All routines effect the processor that they are executed on. Thus you
+ * must be running on the processor that you wish to change.
+ */
+
+static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long error_start;
+ uint32_t raddr[4];
+ int error = 0;
+
+ switch (cmd) {
+
+ case PA_PERF_ON:
+ /* Start the counters */
+ perf_start_counters();
+ break;
+
+ case PA_PERF_OFF:
+ error_start = perf_stop_counters(raddr);
+ if (error_start != 0) {
+ printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
+ error = -EFAULT;
+ break;
+ }
+
+ /* copy out the Counters */
+ if (copy_to_user((void __user *)arg, raddr,
+ sizeof (raddr)) != 0) {
+ error = -EFAULT;
+ break;
+ }
+ break;
+
+ case PA_PERF_VERSION:
+ /* Return the version # */
+ error = put_user(PERF_VERSION, (int *)arg);
+ break;
+
+ default:
+ error = -ENOTTY;
+ }
+
+ return error;
+}
+
+static const struct file_operations perf_fops = {
+ .llseek = no_llseek,
+ .read = perf_read,
+ .write = perf_write,
+ .unlocked_ioctl = perf_ioctl,
+ .compat_ioctl = perf_ioctl,
+ .open = perf_open,
+ .release = perf_release
+};
+
+static struct miscdevice perf_dev = {
+ MISC_DYNAMIC_MINOR,
+ PA_PERF_DEV,
+ &perf_fops
+};
+
+/*
+ * Initialize the module
+ */
+static int __init perf_init(void)
+{
+ int ret;
+
+ /* Determine correct processor interface to use */
+ bitmask_array = perf_bitmasks;
+
+ if (boot_cpu_data.cpu_type == pcxu ||
+ boot_cpu_data.cpu_type == pcxu_) {
+ perf_processor_interface = ONYX_INTF;
+ } else if (boot_cpu_data.cpu_type == pcxw ||
+ boot_cpu_data.cpu_type == pcxw_ ||
+ boot_cpu_data.cpu_type == pcxw2 ||
+ boot_cpu_data.cpu_type == mako ||
+ boot_cpu_data.cpu_type == mako2) {
+ perf_processor_interface = CUDA_INTF;
+ if (boot_cpu_data.cpu_type == pcxw2 ||
+ boot_cpu_data.cpu_type == mako ||
+ boot_cpu_data.cpu_type == mako2)
+ bitmask_array = perf_bitmasks_piranha;
+ } else {
+ perf_processor_interface = UNKNOWN_INTF;
+ printk("Performance monitoring counters not supported on this processor\n");
+ return -ENODEV;
+ }
+
+ ret = misc_register(&perf_dev);
+ if (ret) {
+ printk(KERN_ERR "Performance monitoring counters: "
+ "cannot register misc device.\n");
+ return ret;
+ }
+
+ /* Patch the images to match the system */
+ perf_patch_images();
+
+ spin_lock_init(&perf_lock);
+
+ /* TODO: this only lets us access the first cpu.. what to do for SMP? */
+ cpu_device = per_cpu(cpu_data, 0).dev;
+ printk("Performance monitoring counters enabled for %s\n",
+ per_cpu(cpu_data, 0).dev->name);
+
+ return 0;
+}
+
+/*
+ * perf_start_counters(void)
+ *
+ * Start the counters.
+ */
+static void perf_start_counters(void)
+{
+ /* Enable performance monitor counters */
+ perf_intrigue_enable_perf_counters();
+}
+
+/*
+ * perf_stop_counters
+ *
+ * Stop the performance counters and save counts
+ * in a per_processor array.
+ */
+static int perf_stop_counters(uint32_t *raddr)
+{
+ uint64_t userbuf[MAX_RDR_WORDS];
+
+ /* Disable performance counters */
+ perf_intrigue_disable_perf_counters();
+
+ if (perf_processor_interface == ONYX_INTF) {
+ uint64_t tmp64;
+ /*
+ * Read the counters
+ */
+ if (!perf_rdr_read_ubuf(16, userbuf))
+ return -13;
+
+ /* Counter0 is bits 1398 to 1429 */
+ tmp64 = (userbuf[21] << 22) & 0x00000000ffc00000;
+ tmp64 |= (userbuf[22] >> 42) & 0x00000000003fffff;
+ /* OR sticky0 (bit 1430) to counter0 bit 32 */
+ tmp64 |= (userbuf[22] >> 10) & 0x0000000080000000;
+ raddr[0] = (uint32_t)tmp64;
+
+ /* Counter1 is bits 1431 to 1462 */
+ tmp64 = (userbuf[22] >> 9) & 0x00000000ffffffff;
+ /* OR sticky1 (bit 1463) to counter1 bit 32 */
+ tmp64 |= (userbuf[22] << 23) & 0x0000000080000000;
+ raddr[1] = (uint32_t)tmp64;
+
+ /* Counter2 is bits 1464 to 1495 */
+ tmp64 = (userbuf[22] << 24) & 0x00000000ff000000;
+ tmp64 |= (userbuf[23] >> 40) & 0x0000000000ffffff;
+ /* OR sticky2 (bit 1496) to counter2 bit 32 */
+ tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
+ raddr[2] = (uint32_t)tmp64;
+
+ /* Counter3 is bits 1497 to 1528 */
+ tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
+ /* OR sticky3 (bit 1529) to counter3 bit 32 */
+ tmp64 |= (userbuf[23] << 25) & 0x0000000080000000;
+ raddr[3] = (uint32_t)tmp64;
+
+ /*
+ * Zero out the counters
+ */
+
+ /*
+ * The counters and sticky-bits comprise the last 132 bits
+ * (1398 - 1529) of RDR16 on a U chip. We'll zero these
+ * out the easy way: zero out last 10 bits of dword 21,
+ * all of dword 22 and 58 bits (plus 6 don't care bits) of
+ * dword 23.
+ */
+ userbuf[21] &= 0xfffffffffffffc00ul; /* 0 to last 10 bits */
+ userbuf[22] = 0;
+ userbuf[23] = 0;
+
+ /*
+ * Write back the zeroed bytes + the image given
+ * the read was destructive.
+ */
+ perf_rdr_write(16, userbuf);
+ } else {
+
+ /*
+ * Read RDR-15 which contains the counters and sticky bits
+ */
+ if (!perf_rdr_read_ubuf(15, userbuf)) {
+ return -13;
+ }
+
+ /*
+ * Clear out the counters
+ */
+ perf_rdr_clear(15);
+
+ /*
+ * Copy the counters
+ */
+ raddr[0] = (uint32_t)((userbuf[0] >> 32) & 0x00000000ffffffffUL);
+ raddr[1] = (uint32_t)(userbuf[0] & 0x00000000ffffffffUL);
+ raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
+ raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
+ }
+
+ return 0;
+}
+
+/*
+ * perf_rdr_get_entry
+ *
+ * Retrieve a pointer to the description of what this
+ * RDR contains.
+ */
+static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num)
+{
+ if (perf_processor_interface == ONYX_INTF) {
+ return &perf_rdr_tbl_U[rdr_num];
+ } else {
+ return &perf_rdr_tbl_W[rdr_num];
+ }
+}
+
+/*
+ * perf_rdr_read_ubuf
+ *
+ * Read the RDR value into the buffer specified.
+ */
+static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
+{
+ uint64_t data, data_mask = 0;
+ uint32_t width, xbits, i;
+ const struct rdr_tbl_ent *tentry;
+
+ tentry = perf_rdr_get_entry(rdr_num);
+ if ((width = tentry->width) == 0)
+ return 0;
+
+ /* Clear out buffer */
+ i = tentry->num_words;
+ while (i--) {
+ buffer[i] = 0;
+ }
+
+ /* Check for bits an even number of 64 */
+ if ((xbits = width & 0x03f) != 0) {
+ data_mask = 1;
+ data_mask <<= (64 - xbits);
+ data_mask--;
+ }
+
+ /* Grab all of the data */
+ i = tentry->num_words;
+ while (i--) {
+
+ if (perf_processor_interface == ONYX_INTF) {
+ data = perf_rdr_shift_in_U(rdr_num, width);
+ } else {
+ data = perf_rdr_shift_in_W(rdr_num, width);
+ }
+ if (xbits) {
+ buffer[i] |= (data << (64 - xbits));
+ if (i) {
+ buffer[i-1] |= ((data >> xbits) & data_mask);
+ }
+ } else {
+ buffer[i] = data;
+ }
+ }
+
+ return 1;
+}
+
+/*
+ * perf_rdr_clear
+ *
+ * Zero out the given RDR register
+ */
+static int perf_rdr_clear(uint32_t rdr_num)
+{
+ const struct rdr_tbl_ent *tentry;
+ int32_t i;
+
+ tentry = perf_rdr_get_entry(rdr_num);
+
+ if (tentry->width == 0) {
+ return -1;
+ }
+
+ i = tentry->num_words;
+ while (i--) {
+ if (perf_processor_interface == ONYX_INTF) {
+ perf_rdr_shift_out_U(rdr_num, 0UL);
+ } else {
+ perf_rdr_shift_out_W(rdr_num, 0UL);
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * perf_write_image
+ *
+ * Write the given image out to the processor
+ */
+static int perf_write_image(uint64_t *memaddr)
+{
+ uint64_t buffer[MAX_RDR_WORDS];
+ uint64_t *bptr;
+ uint32_t dwords;
+ const uint32_t *intrigue_rdr;
+ const uint64_t *intrigue_bitmask;
+ uint64_t tmp64;
+ void __iomem *runway;
+ const struct rdr_tbl_ent *tentry;
+ int i;
+
+ /* Clear out counters */
+ if (perf_processor_interface == ONYX_INTF) {
+
+ perf_rdr_clear(16);
+
+ /* Toggle performance monitor */
+ perf_intrigue_enable_perf_counters();
+ perf_intrigue_disable_perf_counters();
+
+ intrigue_rdr = perf_rdrs_U;
+ } else {
+ perf_rdr_clear(15);
+ intrigue_rdr = perf_rdrs_W;
+ }
+
+ /* Write all RDRs */
+ while (*intrigue_rdr != -1) {
+ tentry = perf_rdr_get_entry(*intrigue_rdr);
+ perf_rdr_read_ubuf(*intrigue_rdr, buffer);
+ bptr = &buffer[0];
+ dwords = tentry->num_words;
+ if (tentry->write_control) {
+ intrigue_bitmask = &bitmask_array[tentry->write_control >> 3];
+ while (dwords--) {
+ tmp64 = *intrigue_bitmask & *memaddr++;
+ tmp64 |= (~(*intrigue_bitmask++)) & *bptr;
+ *bptr++ = tmp64;
+ }
+ } else {
+ while (dwords--) {
+ *bptr++ = *memaddr++;
+ }
+ }
+
+ perf_rdr_write(*intrigue_rdr, buffer);
+ intrigue_rdr++;
+ }
+
+ /*
+ * Now copy out the Runway stuff which is not in RDRs
+ */
+
+ if (cpu_device == NULL)
+ {
+ printk(KERN_ERR "write_image: cpu_device not yet initialized!\n");
+ return -1;
+ }
+
+ runway = ioremap_nocache(cpu_device->hpa.start, 4096);
+
+ /* Merge intrigue bits into Runway STATUS 0 */
+ tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
+ __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
+ runway + RUNWAY_STATUS);
+
+ /* Write RUNWAY DEBUG registers */
+ for (i = 0; i < 8; i++) {
+ __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
+ }
+
+ return 0;
+}
+
+/*
+ * perf_rdr_write
+ *
+ * Write the given RDR register with the contents
+ * of the given buffer.
+ */
+static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer)
+{
+ const struct rdr_tbl_ent *tentry;
+ int32_t i;
+
+printk("perf_rdr_write\n");
+ tentry = perf_rdr_get_entry(rdr_num);
+ if (tentry->width == 0) { return; }
+
+ i = tentry->num_words;
+ while (i--) {
+ if (perf_processor_interface == ONYX_INTF) {
+ perf_rdr_shift_out_U(rdr_num, buffer[i]);
+ } else {
+ perf_rdr_shift_out_W(rdr_num, buffer[i]);
+ }
+ }
+printk("perf_rdr_write done\n");
+}
+
+module_init(perf_init);
diff --git a/arch/parisc/kernel/perf_asm.S b/arch/parisc/kernel/perf_asm.S
new file mode 100644
index 00000000..fa6ea99b
--- /dev/null
+++ b/arch/parisc/kernel/perf_asm.S
@@ -0,0 +1,1692 @@
+
+/* low-level asm for "intrigue" (PA8500-8700 CPU perf counters)
+ *
+ * Copyright (C) 2001 Randolph Chung <tausq at parisc-linux.org>
+ * Copyright (C) 2001 Hewlett-Packard (Grant Grundler)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/assembly.h>
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+#ifdef CONFIG_64BIT
+ .level 2.0w
+#endif /* CONFIG_64BIT */
+
+#define MTDIAG_1(gr) .word 0x14201840 + gr*0x10000
+#define MTDIAG_2(gr) .word 0x14401840 + gr*0x10000
+#define MFDIAG_1(gr) .word 0x142008A0 + gr
+#define MFDIAG_2(gr) .word 0x144008A0 + gr
+#define STDIAG(dr) .word 0x14000AA0 + dr*0x200000
+#define SFDIAG(dr) .word 0x14000BA0 + dr*0x200000
+#define DR2_SLOW_RET 53
+
+
+;
+; Enable the performance counters
+;
+; The coprocessor only needs to be enabled when
+; starting/stopping the coprocessor with the pmenb/pmdis.
+;
+ .text
+
+ENTRY(perf_intrigue_enable_perf_counters)
+ .proc
+ .callinfo frame=0,NO_CALLS
+ .entry
+
+ ldi 0x20,%r25 ; load up perfmon bit
+ mfctl ccr,%r26 ; get coprocessor register
+ or %r25,%r26,%r26 ; set bit
+ mtctl %r26,ccr ; turn on performance coprocessor
+ pmenb ; enable performance monitor
+ ssm 0,0 ; dummy op to ensure completion
+ sync ; follow ERS
+ andcm %r26,%r25,%r26 ; clear bit now
+ mtctl %r26,ccr ; turn off performance coprocessor
+ nop ; NOPs as specified in ERS
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ bve (%r2)
+ nop
+ .exit
+ .procend
+ENDPROC(perf_intrigue_enable_perf_counters)
+
+ENTRY(perf_intrigue_disable_perf_counters)
+ .proc
+ .callinfo frame=0,NO_CALLS
+ .entry
+ ldi 0x20,%r25 ; load up perfmon bit
+ mfctl ccr,%r26 ; get coprocessor register
+ or %r25,%r26,%r26 ; set bit
+ mtctl %r26,ccr ; turn on performance coprocessor
+ pmdis ; disable performance monitor
+ ssm 0,0 ; dummy op to ensure completion
+ andcm %r26,%r25,%r26 ; clear bit now
+ bve (%r2)
+ mtctl %r26,ccr ; turn off performance coprocessor
+ .exit
+ .procend
+ENDPROC(perf_intrigue_disable_perf_counters)
+
+;***********************************************************************
+;*
+;* Name: perf_rdr_shift_in_W
+;*
+;* Description:
+;* This routine shifts data in from the RDR in arg0 and returns
+;* the result in ret0. If the RDR is <= 64 bits in length, it
+;* is shifted shifted backup immediately. This is to compensate
+;* for RDR10 which has bits that preclude PDC stack operations
+;* when they are in the wrong state.
+;*
+;* Arguments:
+;* arg0 : rdr to be read
+;* arg1 : bit length of rdr
+;*
+;* Returns:
+;* ret0 = next 64 bits of rdr data from staging register
+;*
+;* Register usage:
+;* arg0 : rdr to be read
+;* arg1 : bit length of rdr
+;* %r24 - original DR2 value
+;* %r1 - scratch
+;* %r29 - scratch
+;*
+;* Returns:
+;* ret0 = RDR data (right justified)
+;*
+;***********************************************************************
+
+ENTRY(perf_rdr_shift_in_W)
+ .proc
+ .callinfo frame=0,NO_CALLS
+ .entry
+;
+; read(shift in) the RDR.
+;
+
+; NOTE: The PCX-W ERS states that DR2_SLOW_RET must be set before any
+; shifting is done, from or to, remote diagnose registers.
+;
+
+ depdi,z 1,DR2_SLOW_RET,1,%r29
+ MFDIAG_2 (24)
+ or %r24,%r29,%r29
+ MTDIAG_2 (29) ; set DR2_SLOW_RET
+
+ nop
+ nop
+ nop
+ nop
+
+;
+; Cacheline start (32-byte cacheline)
+;
+ nop
+ nop
+ nop
+ extrd,u arg1,63,6,%r1 ; setup shift amount by bits to move
+
+ mtsar %r1
+ shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
+ blr %r1,%r0 ; branch to 8-instruction sequence
+ nop
+
+;
+; Cacheline start (32-byte cacheline)
+;
+
+ ;
+ ; RDR 0 sequence
+ ;
+ SFDIAG (0)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1) ; mtdiag %dr1, %r1
+ STDIAG (0)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 1 sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (1)
+ ssm 0,0
+ MFDIAG_1 (28)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+ nop
+
+ ;
+ ; RDR 2 read sequence
+ ;
+ SFDIAG (2)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (2)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 3 read sequence
+ ;
+ b,n perf_rdr_shift_in_W_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ;
+ ; RDR 4 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (4)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 5 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (5)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 6 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (6)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 7 read sequence
+ ;
+ b,n perf_rdr_shift_in_W_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ;
+ ; RDR 8 read sequence
+ ;
+ b,n perf_rdr_shift_in_W_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ;
+ ; RDR 9 read sequence
+ ;
+ b,n perf_rdr_shift_in_W_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ;
+ ; RDR 10 read sequence
+ ;
+ SFDIAG (10)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (10)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 11 read sequence
+ ;
+ SFDIAG (11)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (11)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 12 read sequence
+ ;
+ b,n perf_rdr_shift_in_W_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ;
+ ; RDR 13 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (13)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 14 read sequence
+ ;
+ SFDIAG (14)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (14)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 15 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (15)
+ ssm 0,0
+ MFDIAG_1 (28)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+ nop
+
+ ;
+ ; RDR 16 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (16)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 17 read sequence
+ ;
+ SFDIAG (17)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (17)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 18 read sequence
+ ;
+ SFDIAG (18)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (18)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 19 read sequence
+ ;
+ b,n perf_rdr_shift_in_W_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ;
+ ; RDR 20 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (20)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 21 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (21)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 22 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (22)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 23 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (23)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 24 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (24)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 25 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (25)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 26 read sequence
+ ;
+ SFDIAG (26)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (26)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 27 read sequence
+ ;
+ SFDIAG (27)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (27)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 28 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (28)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 29 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (29)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 30 read sequence
+ ;
+ SFDIAG (30)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (30)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 31 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (31)
+ ssm 0,0
+ MFDIAG_1 (28)
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; Fallthrough
+ ;
+
+perf_rdr_shift_in_W_leave:
+ bve (%r2)
+ .exit
+ MTDIAG_2 (24) ; restore DR2
+ .procend
+ENDPROC(perf_rdr_shift_in_W)
+
+
+;***********************************************************************
+;*
+;* Name: perf_rdr_shift_out_W
+;*
+;* Description:
+;* This routine moves data to the RDR's. The double-word that
+;* arg1 points to is loaded and moved into the staging register.
+;* Then the STDIAG instruction for the RDR # in arg0 is called
+;* to move the data to the RDR.
+;*
+;* Arguments:
+;* arg0 = rdr number
+;* arg1 = 64-bit value to write
+;* %r24 - DR2 | DR2_SLOW_RET
+;* %r23 - original DR2 value
+;*
+;* Returns:
+;* None
+;*
+;* Register usage:
+;*
+;***********************************************************************
+
+ENTRY(perf_rdr_shift_out_W)
+ .proc
+ .callinfo frame=0,NO_CALLS
+ .entry
+;
+; NOTE: The PCX-W ERS states that DR2_SLOW_RET must be set before any
+; shifting is done, from or to, the remote diagnose registers.
+;
+
+ depdi,z 1,DR2_SLOW_RET,1,%r24
+ MFDIAG_2 (23)
+ or %r24,%r23,%r24
+ MTDIAG_2 (24) ; set DR2_SLOW_RET
+ MTDIAG_1 (25) ; data to the staging register
+ shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
+ blr %r1,%r0 ; branch to 8-instruction sequence
+ nop
+
+ ;
+ ; RDR 0 write sequence
+ ;
+ sync ; RDR 0 write sequence
+ ssm 0,0
+ STDIAG (0)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 1 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (1)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 2 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (2)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 3 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (3)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 4 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (4)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 5 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (5)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 6 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (6)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 7 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (7)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 8 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (8)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 9 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (9)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 10 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (10)
+ STDIAG (26)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 11 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (11)
+ STDIAG (27)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 12 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (12)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 13 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (13)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 14 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (14)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 15 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (15)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 16 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (16)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 17 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (17)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 18 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (18)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 19 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (19)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 20 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (20)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 21 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (21)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 22 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (22)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 23 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (23)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 24 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (24)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 25 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (25)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 26 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (10)
+ STDIAG (26)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 27 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (11)
+ STDIAG (27)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 28 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (28)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 29 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (29)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 30 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (30)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 31 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (31)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+perf_rdr_shift_out_W_leave:
+ bve (%r2)
+ .exit
+ MTDIAG_2 (23) ; restore DR2
+ .procend
+ENDPROC(perf_rdr_shift_out_W)
+
+
+;***********************************************************************
+;*
+;* Name: rdr_shift_in_U
+;*
+;* Description:
+;* This routine shifts data in from the RDR in arg0 and returns
+;* the result in ret0. If the RDR is <= 64 bits in length, it
+;* is shifted shifted backup immediately. This is to compensate
+;* for RDR10 which has bits that preclude PDC stack operations
+;* when they are in the wrong state.
+;*
+;* Arguments:
+;* arg0 : rdr to be read
+;* arg1 : bit length of rdr
+;*
+;* Returns:
+;* ret0 = next 64 bits of rdr data from staging register
+;*
+;* Register usage:
+;* arg0 : rdr to be read
+;* arg1 : bit length of rdr
+;* %r24 - original DR2 value
+;* %r23 - DR2 | DR2_SLOW_RET
+;* %r1 - scratch
+;*
+;***********************************************************************
+
+ENTRY(perf_rdr_shift_in_U)
+ .proc
+ .callinfo frame=0,NO_CALLS
+ .entry
+
+; read(shift in) the RDR.
+;
+; NOTE: The PCX-U ERS states that DR2_SLOW_RET must be set before any
+; shifting is done, from or to, remote diagnose registers.
+
+ depdi,z 1,DR2_SLOW_RET,1,%r29
+ MFDIAG_2 (24)
+ or %r24,%r29,%r29
+ MTDIAG_2 (29) ; set DR2_SLOW_RET
+
+ nop
+ nop
+ nop
+ nop
+
+;
+; Start of next 32-byte cacheline
+;
+ nop
+ nop
+ nop
+ extrd,u arg1,63,6,%r1
+
+ mtsar %r1
+ shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
+ blr %r1,%r0 ; branch to 8-instruction sequence
+ nop
+
+;
+; Start of next 32-byte cacheline
+;
+ SFDIAG (0) ; RDR 0 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (0)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (1) ; RDR 1 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (1)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ sync ; RDR 2 read sequence
+ ssm 0,0
+ SFDIAG (4)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 3 read sequence
+ ssm 0,0
+ SFDIAG (3)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 4 read sequence
+ ssm 0,0
+ SFDIAG (4)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 5 read sequence
+ ssm 0,0
+ SFDIAG (5)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 6 read sequence
+ ssm 0,0
+ SFDIAG (6)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 7 read sequence
+ ssm 0,0
+ SFDIAG (7)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ b,n perf_rdr_shift_in_U_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ SFDIAG (9) ; RDR 9 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (9)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (10) ; RDR 10 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (10)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (11) ; RDR 11 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (11)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (12) ; RDR 12 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (12)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (13) ; RDR 13 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (13)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (14) ; RDR 14 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (14)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (15) ; RDR 15 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (15)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ sync ; RDR 16 read sequence
+ ssm 0,0
+ SFDIAG (16)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ SFDIAG (17) ; RDR 17 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (17)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (18) ; RDR 18 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (18)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ b,n perf_rdr_shift_in_U_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ sync ; RDR 20 read sequence
+ ssm 0,0
+ SFDIAG (20)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 21 read sequence
+ ssm 0,0
+ SFDIAG (21)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 22 read sequence
+ ssm 0,0
+ SFDIAG (22)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 23 read sequence
+ ssm 0,0
+ SFDIAG (23)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 24 read sequence
+ ssm 0,0
+ SFDIAG (24)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 25 read sequence
+ ssm 0,0
+ SFDIAG (25)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ SFDIAG (26) ; RDR 26 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (26)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (27) ; RDR 27 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (27)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ sync ; RDR 28 read sequence
+ ssm 0,0
+ SFDIAG (28)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ b,n perf_rdr_shift_in_U_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ SFDIAG (30) ; RDR 30 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (30)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (31) ; RDR 31 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (31)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+ nop
+
+perf_rdr_shift_in_U_leave:
+ bve (%r2)
+ .exit
+ MTDIAG_2 (24) ; restore DR2
+ .procend
+ENDPROC(perf_rdr_shift_in_U)
+
+;***********************************************************************
+;*
+;* Name: rdr_shift_out_U
+;*
+;* Description:
+;* This routine moves data to the RDR's. The double-word that
+;* arg1 points to is loaded and moved into the staging register.
+;* Then the STDIAG instruction for the RDR # in arg0 is called
+;* to move the data to the RDR.
+;*
+;* Arguments:
+;* arg0 = rdr target
+;* arg1 = buffer pointer
+;*
+;* Returns:
+;* None
+;*
+;* Register usage:
+;* arg0 = rdr target
+;* arg1 = buffer pointer
+;* %r24 - DR2 | DR2_SLOW_RET
+;* %r23 - original DR2 value
+;*
+;***********************************************************************
+
+ENTRY(perf_rdr_shift_out_U)
+ .proc
+ .callinfo frame=0,NO_CALLS
+ .entry
+
+;
+; NOTE: The PCX-U ERS states that DR2_SLOW_RET must be set before any
+; shifting is done, from or to, the remote diagnose registers.
+;
+
+ depdi,z 1,DR2_SLOW_RET,1,%r24
+ MFDIAG_2 (23)
+ or %r24,%r23,%r24
+ MTDIAG_2 (24) ; set DR2_SLOW_RET
+
+ MTDIAG_1 (25) ; data to the staging register
+ shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
+ blr %r1,%r0 ; branch to 8-instruction sequence
+ nop
+
+;
+; 32-byte cachline aligned
+;
+
+ sync ; RDR 0 write sequence
+ ssm 0,0
+ STDIAG (0)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 1 write sequence
+ ssm 0,0
+ STDIAG (1)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 2 write sequence
+ ssm 0,0
+ STDIAG (2)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 3 write sequence
+ ssm 0,0
+ STDIAG (3)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 4 write sequence
+ ssm 0,0
+ STDIAG (4)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 5 write sequence
+ ssm 0,0
+ STDIAG (5)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 6 write sequence
+ ssm 0,0
+ STDIAG (6)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 7 write sequence
+ ssm 0,0
+ STDIAG (7)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 8 write sequence
+ ssm 0,0
+ STDIAG (8)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 9 write sequence
+ ssm 0,0
+ STDIAG (9)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 10 write sequence
+ ssm 0,0
+ STDIAG (10)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 11 write sequence
+ ssm 0,0
+ STDIAG (11)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 12 write sequence
+ ssm 0,0
+ STDIAG (12)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 13 write sequence
+ ssm 0,0
+ STDIAG (13)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 14 write sequence
+ ssm 0,0
+ STDIAG (14)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 15 write sequence
+ ssm 0,0
+ STDIAG (15)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 16 write sequence
+ ssm 0,0
+ STDIAG (16)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 17 write sequence
+ ssm 0,0
+ STDIAG (17)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 18 write sequence
+ ssm 0,0
+ STDIAG (18)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 19 write sequence
+ ssm 0,0
+ STDIAG (19)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 20 write sequence
+ ssm 0,0
+ STDIAG (20)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 21 write sequence
+ ssm 0,0
+ STDIAG (21)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 22 write sequence
+ ssm 0,0
+ STDIAG (22)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 23 write sequence
+ ssm 0,0
+ STDIAG (23)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 24 write sequence
+ ssm 0,0
+ STDIAG (24)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 25 write sequence
+ ssm 0,0
+ STDIAG (25)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 26 write sequence
+ ssm 0,0
+ STDIAG (26)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 27 write sequence
+ ssm 0,0
+ STDIAG (27)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 28 write sequence
+ ssm 0,0
+ STDIAG (28)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 29 write sequence
+ ssm 0,0
+ STDIAG (29)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 30 write sequence
+ ssm 0,0
+ STDIAG (30)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 31 write sequence
+ ssm 0,0
+ STDIAG (31)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+perf_rdr_shift_out_U_leave:
+ bve (%r2)
+ .exit
+ MTDIAG_2 (23) ; restore DR2
+ .procend
+ENDPROC(perf_rdr_shift_out_U)
+
diff --git a/arch/parisc/kernel/perf_images.h b/arch/parisc/kernel/perf_images.h
new file mode 100644
index 00000000..7fef9644
--- /dev/null
+++ b/arch/parisc/kernel/perf_images.h
@@ -0,0 +1,3138 @@
+/*
+ * Imagine for use with the Onyx (PCX-U) CPU interface
+ *
+ * Copyright (C) 2001 Randolph Chung <tausq at parisc-linux.org>
+ * Copyright (C) 2001 Hewlett-Packard (Grant Grundler)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef PERF_IMAGES_H
+#define PERF_IMAGES_H
+
+/* Magic numbers taken without modification from HPUX stuff */
+
+#define PCXU_IMAGE_SIZE 584
+
+static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = {
+/*
+ * CPI:
+ *
+ * Counts the following:
+ *
+ * ctr0 : total cycles
+ * ctr1 : total cycles where nothing retired
+ * ctr2 : total instructions retired, including nullified
+ * ctr3 : total instructions retired, less nullified instructions
+ */
+ {
+ 0x4c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0x004e0004, 0x07ffffff, 0xffc01380,
+ 0x0101ffff, 0xfffff104, 0xe000c07f, 0xfffffffc,
+ 0x01380010, 0x1fffffff, 0xff000000, 0x00000000,
+ 0x00000fff, 0xff00000f, 0xffff0000, 0x0fffff00,
+ 0x000fffff, 0x00000000, 0x00000000, 0x00ffffff,
+ 0xfffff000, 0x0000000f, 0xffffffff, 0xff000000,
+ 0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+ 0xffff0000, 0x00000000, 0x6fffffff, 0xffffffff,
+ 0xfff55fff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0xf0000030, 0x00003c00, 0x067f080c, 0x02019fc0,
+ 0x02804067, 0xf0009030, 0x19fc002c, 0x40067f08,
+ 0x0c12019f, 0xc0028440, 0x67f00091, 0x3019fc00,
+ 0x2fc007ff, 0xf800f001, 0xfffe003c, 0x007fff80,
+ 0x0f001fff, 0xe003c007, 0xfff800f0, 0x01fffe00,
+ 0x3c007fff, 0x800f001f, 0xffe003c0, 0x07fff800,
+ 0xf001fffe, 0x003c007f, 0xff800f00, 0x1fffe003,
+ 0xc007fff8, 0x00f001ff, 0xfe003c00, 0x7fff800f,
+ 0x001fffe0, 0x03c007ff, 0xf800f001, 0xfffe003c,
+ 0x007fff80, 0x0f001fff, 0xe003c007, 0xfff800f0,
+ 0x01fffe00, 0x3c007fff, 0x800f001f, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x6fff0000, 0x00000000, 0x60000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff},
+
+/* Bus utilization image (bus_util)
+ *
+ * ctr0 : counts address valid cycles
+ * ctr1 : counts data valid cycles
+ * ctr2 : counts overflow from counter 0
+ * ctr3 : counts overflow from counter 1
+ */
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0x0000000c, 0x00003c00, 0x07930000, 0x0041e4c0,
+ 0x01002079, 0x3000800c, 0x1e4c0030, 0x00279300,
+ 0x010049e4, 0xc0014022, 0x79300090, 0x0c9e4c00,
+ 0x34004793, 0x00020051, 0xe4c00180, 0x24793000,
+ 0xa00d1e4c, 0x00380067, 0x93000300, 0x59e4c001,
+ 0xc0267930, 0x00b00d9e, 0x4c003fff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffc00,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xffff0000, 0x00000000, 0xf0000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00100000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff },
+
+/*
+ * TLB counts (same as tlbStats image):
+ *
+ * Counts the following:
+ *
+ * ctr0: DTLB misses
+ * ctr1: ITLB misses
+ * ctr2: total cycles in the miss handlers
+ * ctr3: total cycles
+ */
+
+ {
+ 0x0c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe7e7e0e0, 0x004e0004, 0x07ffffff, 0xffc01380,
+ 0x0101ffff, 0xfffff104, 0xe000c06a, 0xafffc85c,
+ 0x01380010, 0x1fffffff, 0xff000000, 0x00000000,
+ 0x01b9e000, 0x0001b8c0, 0x00000000, 0x0fffff00,
+ 0x000fffff, 0x00000000, 0x00000000, 0x00400000,
+ 0x00001000, 0x00000004, 0x00000000, 0x01000000,
+ 0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+ 0xffff0000, 0x00000000, 0x6fffffff, 0xffffffff,
+ 0xfff55ff5, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0xf0000000, 0x00003c00, 0x01ff0001, 0x08007fc2,
+ 0x02c1001f, 0xf0807100, 0x1bfc200c, 0x4806ff00,
+ 0x03f001ff, 0xfe003c00, 0x7fff800f, 0x001fffe0,
+ 0x03c007ff, 0xf800f001, 0xfffe003c, 0x007fff80,
+ 0x0f001fff, 0xe003c007, 0xfff800f0, 0x01fffe00,
+ 0x3c007fff, 0x800f001f, 0xffe003c0, 0x07fff800,
+ 0xf001fffe, 0x003c007f, 0xff800f00, 0x1fffe003,
+ 0xc007fff8, 0x00f001ff, 0xfe003c00, 0x7fff800f,
+ 0x001fffe0, 0x03c007ff, 0xf800f001, 0xfffe003c,
+ 0x007fff80, 0x0f001fff, 0xe003c007, 0xfff800f0,
+ 0x01fffe00, 0x3c007fff, 0x800f001f, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x6fff0000, 0x00000000, 0x60000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff },
+
+/* tlbHandMiss
+ *
+ * ctr0: counts TLB misses
+ * ctr1: counts dmisses inside tlb miss handlers
+ * ctr2: counts cycles in the tlb miss handlers
+ * ctr3: counts overflows of ctr2
+ */
+{
+0x1c00c000,00000000,0x00060000,00000000,
+0xe7e7e0e0,0x004e0004,0x07ffffff,0xffc01380,
+0x0101ffff,0xfffff104,0xe000c06a,0xafffc85c,
+0x01380010,0x1fffffff,0xff000000,00000000,
+0x01b9e000,0x0001b8c0,00000000,0x0fffff00,
+0x000fffff,00000000,00000000,0x00400000,
+0x00001000,0x00000004,00000000,0x01000000,
+0x0000ffff,0xfffffff0,00000000,0x0fffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xfff55ff5,0xffffffff,0xffffffff,0xf0000000,
+0xf0000000,0x00003c00,0x01fd0000,0x08007f42,
+0x0281001f,0xd080a100,0x19f42008,0x44067d08,
+0x0612019f,0x400084c0,0x67d00060,0x0047f400,
+0x042011fd,0x080b0404,0x7f4202c4,0x0167d080,
+0x311059f4,0x201c4816,0x7d000313,0x059f4001,
+0xfc007fff,0x800f001f,0xffe003c0,0x07fff800,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x6fff0000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* branch_taken image (ptkn image)
+ *
+ * ctr0: overflow for ctr1
+ * ctr1: predicted taken branches, actually taken
+ * ctr2: all predicted taken branches (nullfied or not)
+ * ctr3: overflow for ctr2
+ */
+
+ {
+ 0xcc01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xa08080a0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0xf0000000, 0x00003c00, 0x04f90000, 0x02013e40,
+ 0x0081004f, 0x90004060, 0x13e40018, 0x0024f900,
+ 0x0802093e, 0x40028102, 0x4f9000c0, 0x6093e400,
+ 0x380014f9, 0x00010205, 0x3e4000c1, 0x014f9000,
+ 0x506053e4, 0x001c0034, 0xf9000902, 0x0d3e4002,
+ 0xc1034f90, 0x00d060d3, 0xe4003fff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffc00,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xffff0000, 0x00000000, 0xf0000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff },
+
+/* branch_nottaken (pntkn image)
+ *
+ * ctr0: overflow for ctr1
+ * ctr1: counts branches predicted not-taken, but actually taken
+ * ctr2: counts all predictable branches predicted not-taken
+ * ctr3: overflow for ctr2
+ */
+{
+0xcc01e000,00000000,0x00060000,00000000,
+0xc0c0c0e0,0xffb1fffb,0xfff7ffff,0xffffffff,
+0xffffffff,0xfffffffb,0x1fffbfff,0x7fffffff,
+0xfcc7ffff,0xffdffffa,0x5f000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+
+/* imiss image
+ *
+ * ctr0 : counts imiss aligned on 0
+ * ctr1 : counts imiss aligned on 4
+ * ctr2 : counts imiss aligned on 8
+ * ctr3 : counts imiss aligned on C
+ */
+ {
+ 0x0c00c000, 0x00000000, 0x00010000, 0x00000000,
+ 0xe7ebedee, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x6fffffff, 0xffffffff,
+ 0xfff55fff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0xf0000000, 0x00003c00, 0x007f0000, 0x01001fc0,
+ 0x00408007, 0xf0002030, 0x01fc000c, 0x10007f00,
+ 0x0405001f, 0xc0014180, 0x07f00060, 0x7001fc00,
+ 0x1c20007f, 0x00080900, 0x1fc00242, 0x8007f000,
+ 0xa0b001fc, 0x002c3000, 0x7f000c0d, 0x001fc003,
+ 0x438007f0, 0x00e0f001, 0xfc003fff, 0xfffff800,
+ 0xfffffffe, 0x003fffff, 0xff800fff, 0xffffe003,
+ 0xfffffff8, 0x00ffffff, 0xfe003fff, 0xffff800f,
+ 0xffffffe0, 0x03ffffff, 0xf800ffff, 0xfffe003f,
+ 0xffffff80, 0x0fffffff, 0xe003ffff, 0xfff800ff,
+ 0xfffffe00, 0x3fffffff, 0x800fffff, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x6fff0000, 0x00000000, 0x60000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff},
+
+/* dmiss image
+ *
+ * ctr0 : counts cycles
+ * ctr1 : counts cycles where something retired
+ * ctr2 : counts dmisses
+ * ctr3 : (same as ctr2)
+ */
+ {
+ 0x3c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x6fffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0xf0000000, 0x00003c04, 0x007f0009, 0x02001fc0,
+ 0x0280c007, 0xf000b040, 0x01fc0030, 0x14007f00,
+ 0x0d06001f, 0xc00381c0, 0x07f000f0, 0x8001fc00,
+ 0x2024007f, 0x00090a00, 0x1fc00282, 0xc007f000,
+ 0xb0c001fc, 0x00303400, 0x7f000d0e, 0x001fc003,
+ 0x83c007f0, 0x00f00001, 0xfc0023ff, 0xfffff800,
+ 0xfffffffe, 0x003fffff, 0xff800fff, 0xffffe003,
+ 0xfffffff8, 0x00ffffff, 0xfe003fff, 0xffff800f,
+ 0xffffffe0, 0x03ffffff, 0xf800ffff, 0xfffe003f,
+ 0xffffff80, 0x0fffffff, 0xe003ffff, 0xfff800ff,
+ 0xfffffe00, 0x3fffffff, 0x800fffff, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x6fff0000, 0x00000000, 0x60000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff },
+
+/* dcmiss
+ *
+ * ctr0: counts store instructions retired
+ * ctr1: counts load instructions retired
+ * ctr2: counts dmisses
+ * ctr3: counts READ_SHARED_OR_PRIV and READ_PRIVATE transactions on Runway
+ */
+{
+0x2c90c000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf00000e8,0x00003c02,0x00bf0001,0x02002fc0,
+0x0080a00b,0xf0003040,0x02fc0010,0x1200bf00,
+0x0506002f,0xc00181a0,0x0bf00070,0x8002fc00,
+0x202200bf,0x00090a00,0x2fc00282,0xa00bf000,
+0xb0c002fc,0x00303200,0xbf000d0e,0x002fc003,
+0x83a00bf0,0x00ffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0x6fff0000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0x55555555,0xd5555555,
+0x55555555,0x75555555,0x5e1ffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xf8000000,00000000,
+00000000,00000000,0xf4000000,00000000,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+00000000,00000000,0x00ffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* big_cpi
+ *
+ * ctr0: counts total cycles
+ * ctr1: counts overflows of ctr0 (for greater than 32-bit values)
+ * ctr2: counts overflows of ctr3 (for greater than 32-bit values)
+ * ctr3: counts unnullified instructions retired
+ */
+{
+0x0c00c000,00000000,0x00060000,00000000,
+0xe7e7e0e0,0x004e0004,0x07ffffff,0xffc01380,
+0x0101ffff,0xfffff104,0xe000c06a,0xafffc85c,
+0x01380010,0x1fffffff,0xff000000,00000000,
+0x01b9e000,0x0001b8c0,00000000,0x0fffff00,
+0x000fffff,00000000,00000000,0x00400000,
+0x00001000,0x00000004,00000000,0x01000000,
+0x0000ffff,0xfffffff0,00000000,0x0fffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xfff55ff5,0xffffffff,0xffffffff,0xf0000000,
+0xf0000010,0x00003c00,0x01760008,0x00025d80,
+0x02800417,0x6000c001,0x25d80038,0x04017600,
+0x0901025d,0x8002c044,0x176000d0,0x1125d800,
+0x3c2001f6,0x08080400,0x7d820203,0x001f6080,
+0x804027d8,0x20282009,0xf6080a0c,0x027d8202,
+0x81041f60,0x80c08107,0xd8203030,0x41f6080c,
+0x04127d82,0x0382049f,0x6080e0c1,0x27d82038,
+0x4006f608,0x081011bd,0x82030400,0xef6080a1,
+0x013bd820,0x384806f6,0x00081211,0xbd800304,
+0x80ef6000,0xa1213bd8,0x003bc007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x6fff0000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* big_ls
+ *
+ * ctr0:counts the total number of cycles for which local_stall_A1 is asserted.
+ * ctr1: is the overflow for counter 0.
+ * ctr2: counts IFLUSH_AV
+ * ctr3: is the overflow for counter 2.
+ */
+{
+0x0c000000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x0fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00029408,0x02f50002,0x0800bd40,
+0x0202802f,0x5000a000,0x4bd40004,0x0812f500,
+0x030804bd,0x40024281,0x2f5000b0,0x010bd400,
+0x100842f5,0x00060810,0xbd400302,0x842f5000,
+0xe0014bd4,0x00140852,0xf5000708,0x14bd4003,
+0x42852f50,0x00ff001f,0xffe003c0,0x07fff800,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x0df70000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* br_abort
+ *
+ * ctr0: counts BRAD_STALLH
+ * ctr1: counts ONE_QUAD
+ * ctr2: counts BR0_ABRT
+ * ctr3: counts BR1_ABRT
+ */
+{
+0x0c002000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x1fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x1a250000,00000000,0x10000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* isnt
+ *
+ * ctr0: counts the total number of cycles for which iside_notrans is asserted
+ * ctr1: counts the number of times iside_notrans is asserted for 1-4 cycles
+ * ctr2: counts the number of times iside_notrans is asserted for 5-7 cycles
+ * ctr3: counts the number of times iside_notrans is asserted for > 7 cycles
+ */
+{
+0x0c018000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xcfffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00021c20,0x03ff0808,0x1800ffc4,
+0x0204003f,0xf0004280,0x0ffc6020,0x8003ff00,
+0x043800ff,0xc8020c00,0x3ff00044,0x800ffca0,
+0x210003ff,0x00045800,0xffcc0214,0x003ff000,
+0x26800ffc,0xe0218003,0xff000278,0x00ffd002,
+0x1c003ff0,0x0028800f,0xfd002200,0x03ff0001,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0xcdff0000,00000000,0xc0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* quadrant
+ *
+ * ctr0: Total number of instructions in quadrant 0
+ * ctr1: Total number of instructions in quadrant 1
+ * ctr2: Total number of instructions in quadrant 2
+ * ctr3: Total number of instructions in quadrant 3
+ * Works only with 32-bit
+ */
+
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0x004e0004, 0x07ffffff, 0xffc01380,
+ 0x0101ffff, 0xfffff004, 0xe000407f, 0xfffffffc,
+ 0x01380010, 0x1fffffff, 0xff000000, 0x00000000,
+ 0x00000fff, 0xff00000f, 0xffff0000, 0x0fffff00,
+ 0x000fffff, 0x00000000, 0x00000000, 0x00ffffff,
+ 0xffcff000, 0x0000040f, 0xfffffffc, 0xff000000,
+ 0x0080ffff, 0xffffcff0, 0x0000000c, 0x0fffffff,
+ 0xfcff0000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xfff55ff5, 0x5fffffff, 0xffffffff, 0xf0000000,
+ 0xf00000f0, 0x00003c00, 0x007f0000, 0x01001fc0,
+ 0x00408007, 0xf0002030, 0x01fc000c, 0x10007f00,
+ 0x0405001f, 0xc0014180, 0x07f00060, 0x7001fc00,
+ 0x1c20007f, 0x00080900, 0x1fc00242, 0x8007f000,
+ 0xa0b001fc, 0x002c3000, 0x7f000c0d, 0x001fc003,
+ 0x438007f0, 0x00e0f001, 0xfc003fff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffc00,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xffff0000, 0x00000000, 0xf0000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff},
+
+/* rw_pdfet (READ_PRIV transactions)
+ *
+ * ctr0: counts address valid cycles
+ * ctr1: counts *all* data valid cycles
+ * ctr2: is the overflow from counter 0
+ * ctr3: is the overflow from counter 1
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xf8000000,00000000,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff},
+
+/* rw_wdfet (WRITEBACKS)
+ *
+ * ctr0: counts address valid cycles
+ * ctr1: counts *all* data valid cycles
+ * ctr2: is the overflow from counter 0
+ * ctr3: is the overflow from counter 1
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0x98000000,00000000,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff},
+
+/* shlib_cpi
+ *
+ * ctr0: Total number of instructions in quad 0
+ * ctr1: Total number of CPU clock cycles in quad 0
+ * ctr2: total instructions without nullified
+ * ctr3: total number of CPU clock cycles
+ */
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0x004e0004, 0x07ffffff, 0xffc01380,
+ 0x0101ffff, 0xfffff004, 0xe000407f, 0xfffffffc,
+ 0x01380010, 0x1fffffff, 0xff000000, 0x00000000,
+ 0x00000fff, 0xff00000f, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0x00000000, 0x00ffffff,
+ 0xffcff000, 0x0000000f, 0xfffffffc, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xfff77ff5, 0x7fffffff, 0xffffffff, 0xf0000000,
+ 0xf00000a0, 0x00003c00, 0x01ff0005, 0x08007fc0,
+ 0x03c1001f, 0xf08030c0, 0x07fc203c, 0x4001ff08,
+ 0x0118007f, 0xc003c500, 0x1ff08031, 0xc007fc00,
+ 0x3fffffff, 0xf800ffff, 0xfffe003f, 0xffffff80,
+ 0x0fffffff, 0xe003ffff, 0xfff800ff, 0xfffffe00,
+ 0x3fffffff, 0x800fffff, 0xffe003ff, 0xfffff800,
+ 0xfffffffe, 0x003fffff, 0xff800fff, 0xffffe003,
+ 0xfffffff8, 0x00ffffff, 0xfe003fff, 0xffff800f,
+ 0xffffffe0, 0x03ffffff, 0xf800ffff, 0xfffe003f,
+ 0xffffff80, 0x0fffffff, 0xe003ffff, 0xfff800ff,
+ 0xfffffe00, 0x3fffffff, 0x800fffff, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xffff0000, 0x00000000, 0xf0000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff},
+
+
+/* addr_inv_abort_alu
+ *
+ * ctr0: counts ABORT_ALU0L
+ * ctr1: counts ABORT_ALU1L
+ * ctr2: counts ADDR0_INVALID
+ * ctr3: counts ADDR1_INVALID
+ */
+
+{
+0x0c00c000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000d,0x01001fc0,
+0x03008007,0xf000f030,0x01fc0038,0x10007f00,
+0x0905001f,0xc0020180,0x07f000b0,0x7001fc00,
+0x2820007f,0x00050900,0x1fc00102,0x8007f000,
+0x70b001fc,0x00183000,0x7f00010d,0x001fc000,
+0x038007f0,0x0030f001,0xfc000bff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x65380000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+
+
+/* brad_stall
+ *
+ * ctr0: counts the total number of cycles for which brad_stall is asserted
+ * ctr1: counts the number of times brad_stall is asserted for 1-4 cycles
+ * ctr2: counts the number of times brad_stall is asserted for 5-7 cycles
+ * ctr3: counts the number of times brad_stall is asserted for > 7 cycles
+ */
+{
+0x0c002000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x1fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00021c20,0x03ff0808,0x1800ffc4,
+0x0204003f,0xf0004280,0x0ffc6020,0x8003ff00,
+0x043800ff,0xc8020c00,0x3ff00044,0x800ffca0,
+0x210003ff,0x00045800,0xffcc0214,0x003ff000,
+0x26800ffc,0xe0218003,0xff000278,0x00ffd002,
+0x1c003ff0,0x0028800f,0xfd002200,0x03ff0001,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x1bff0000,00000000,0x10000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* cntl_in_pipel
+ *
+ * ctr0: counts the total number of cycles for which cntl_in_pipel is asserted
+ * ctr1: counts the number of times cntl_in_pipel is asserted for 1-4 cycles
+ * ctr2: counts the number of times cntl_in_pipel is asserted for 5-7 cycles
+ * ctr3: counts the number of times cntl_in_pipel is asserted for > 7 cycles
+ */
+{
+0x0c006000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x3fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00021c00,0x03ff0808,0x1000ffc4,
+0x0206003f,0xf0004200,0x0ffc6020,0xa003ff00,
+0x043000ff,0xc8020e00,0x3ff00044,0x000ffca0,
+0x212003ff,0x00045000,0xffcc0216,0x003ff000,
+0x26000ffc,0xe021a003,0xff000270,0x00ffd002,
+0x1e003ff0,0x0028000f,0xfd002220,0x03ff0001,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x3fff0000,00000000,0x30000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+
+/* dsnt_xfh
+ *
+ * ctr0: counts dside_notrans
+ * ctr1: counts xfhang
+ * ctr2: is the overflow for ctr0
+ * ctr3: is the overflow for ctr1
+ */
+{
+0x0c018000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xcfffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00030000,0x01f30000,0x00087cc0,
+0x0040041f,0x30002001,0x87cc000c,0x1001f300,
+0x0404087c,0xc0014104,0x1f300060,0x4187cc00,
+0x1c2001f3,0x00080808,0x7cc00242,0x041f3000,
+0xa08187cc,0x002c3001,0xf3000c0c,0x087cc003,
+0x43041f30,0x00e0c187,0xcc003fc0,0x07fff800,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0xcb3f0000,00000000,0xc0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* fet_sig1
+ *
+ * ctr0: counts ICORE_AV
+ * ctr1: counts ITRANS_STALL
+ * ctr2: counts SEL_PCQH
+ * ctr3: counts OUT_OF_CONTEXT
+ */
+{
+0x0c000000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x0fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x07c10000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* fet_sig2
+ *
+ * ctr0: counts ICORE_AV
+ * ctr1: counts IRTN_AV
+ * ctr2: counts ADDRESS_INC
+ * ctr3: counts ADDRESS_DEC
+ */
+{
+0x0c000000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x0fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x06930000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* g7_1
+ *
+ * ctr0: counts HIT_RETRY0
+ * ctr1: counts HIT_RETRY1
+ * ctr2: counts GO_TAG_E
+ * ctr3: counts GO_TAG_O
+ */
+{
+0x0c00e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x7fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x71c10000,00000000,0x70000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* g7_2
+ *
+ * ctr0: counts HIT_DM0
+ * ctr1: counts HIT_DM1
+ * ctr2: counts GO_STORE_E
+ * ctr3: counts GO_STORE_O
+ */
+{
+0x0c00e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x7fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x72930000,00000000,0x70000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* g7_3
+ *
+ * ctr0: counts HIT_DV0
+ * ctr1: counts HIT_DV1
+ * ctr2: counts STBYPT_E (load bypasses from store queue)
+ * ctr3: counts STBYPT_O
+ */
+{
+0x0c00e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x7fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f0002,0x01001fc0,
+0x00c08007,0xf0000030,0x01fc0004,0x10007f00,
+0x0605001f,0xc001c180,0x07f00040,0x7001fc00,
+0x1420007f,0x000a0900,0x1fc002c2,0x8007f000,
+0x80b001fc,0x00243000,0x7f000e0d,0x001fc003,
+0xc38007f0,0x00c0f001,0xfc0037ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x77250000,00000000,0x70000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* g7_4
+ *
+ * ctr0: counts HIT_DIRTY0
+ * ctr1: counts HIT_DIRTY1
+ * ctr2: counts CA_BYP_E (quick launch)
+ * ctr3: counts CA_BYP_O
+ */
+{
+0x0c00e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x7fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x7bb70000,00000000,0x70000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+
+/* mpb_labort
+ *
+ * ctr0: counts L_ABORT_ALU0L
+ * ctr1: counts L_ABORT_ALU1L
+ * ctr2: counts MPB0H
+ * ctr3: counts MPB1H
+ */
+{
+0x0c00c000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x605c0000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* panic
+ *
+ * ctr0: is the overflow for counter 1
+ * ctr1: counts traps and RFI's
+ * ctr2: counts panic traps
+ * ctr3: is the overflow for counter 2
+ */
+{
+0x0c002000,00000000,0x00060000,00000000,
+0xe7efe0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffffc,
+0x41380030,0x1aabfff2,0x17000000,00000000,
+0x01b80000,0x3effffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,00000000,0x00400000,
+0x00001fff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x1fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+0xb0000000,0x00012c04,0x05790804,0x14013e44,
+0x0008004f,0x90000040,0x15e46000,0xc0047920,
+0x004a003e,0x40011080,0x0f900024,0x4003e460,
+0x00c80479,0x00023301,0x1e400100,0x4157d080,
+0x514053f4,0x40048014,0xfd000104,0x055f4600,
+0x4c0147d2,0x0014a043,0xf4001508,0x10fd0003,
+0x44043f46,0x004c8147,0xd0003330,0x51f40014,
+0x04257908,0x0c14093e,0x44020802,0x4f900080,
+0x4095e460,0x20c02479,0x20084a08,0x3e400310,
+0x820f9000,0xa44083e4,0x6020c824,0x79000a33,
+0x091e4003,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x10400000,00000000,0x10000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* rare_inst
+ *
+ * ctr0: counts sync and syncdma instructions
+ * ctr1: counts pxtlbx,x instructions
+ * ctr2: counts ixtlbt instructions
+ * ctr3: counts cycles
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0x004e000c,0x000843fc,0x85c09380,
+0x0121ebfd,0xff217124,0xe0004000,0x943fc85f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xe00000e0,0x00003c00,0x007f0001,0x01001fc0,
+0x00408007,0xf0003030,0x01fc000c,0x10007f00,
+0x0505001f,0xc0014180,0x07f00070,0x7001fc00,
+0x1c20007f,0x00090900,0x1fc00242,0x8007f000,
+0xb0b001fc,0x002c3000,0x7f000d0d,0x001fc003,
+0x438007f0,0x00f0f001,0xfc003fff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* rw_dfet (for D-cache misses and writebacks)
+ *
+ * ctr0: counts address valid cycles
+ * ctr1: counts *all* data valid cycles
+ * ctr2: is the overflow from counter 0
+ * ctr3: is the overflow from counter 1
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xf0000000,00000000,
+00000000,00000000,0x98000000,00000000,
+0xffffffff,0xffffffff,0x0fffffff,0xffffffff,
+00000000,00000000,0x00ffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* rw_ifet (I-cache misses -- actually dumb READ transactions)
+ *
+ * ctr0: counts address valid cycles
+ * ctr1: counts *all* data valid cycles
+ * ctr2: is the overflow from counter 0
+ * ctr3: is the overflow from counter 1
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xd0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+0xffffffff,0xffffffff,00000000,00000000,
+0xffffffff,0xffffffff },
+
+
+/* rw_sdfet (READ_SHARED_OR_PRIVATE transactions)
+ *
+ * ctr0: counts address valid cycles
+ * ctr1: counts *all* data valid cycles
+ * ctr2: is the overflow from counter 0
+ * ctr3: is the overflow from counter 1
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xf4000000,00000000,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff },
+
+
+/* spec_ifet
+ *
+ * ICORE_AV fires for every request which the Instruction Fetch Unit sends
+ * to the Runway Interface Block. Hence, this counts all I-misses, speculative
+ * or not, but does *not* include I-cache prefetches, which are generated by
+ * RIB.
+ * IRTN_AV fires twice for every I-cache miss returning from RIB to the IFU.
+ * It will not fire if a second I-cache miss is issued from the IFU to RIB
+ * before the first returns. Therefore, if the IRTN_AV count is much less
+ * than 2x the ICORE_AV count, many speculative I-cache misses are occurring
+ * which are "discovered" to be incorrect fairly quickly.
+ * The ratio of I-cache miss transactions on Runway to the ICORE_AV count is
+ * a measure of the effectiveness of instruction prefetching. This ratio
+ * should be between 1 and 2. If it is close to 1, most prefetches are
+ * eventually called for by the IFU; if it is close to 2, almost no prefetches
+ * are useful and they are wasted bus traffic.
+ *
+ * ctr0: counts ICORE_AV
+ * ctr1: counts IRTN_AV
+ * ctr2: counts all non-coherent READ transactions on Runway. (TTYPE D0)
+ * This should be just I-cache miss and I-prefetch transactions.
+ * ctr3: counts total processor cycles
+ */
+{
+0x0c000000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x0fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x00000008,0x00030c00,0x01bf0001,0x00806fc0,
+0x00c1001b,0xf0005048,0x06fc001c,0x2001bf00,
+0x0908806f,0xc002c300,0x1bf000d0,0xc806fc00,
+0x3fffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0x06bf0000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00110000,00000000,0xd0ffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+0xffffffff,0xffffffff,00000000,00000000,
+0xffffffff,0xffffffff },
+
+/* st_cond0
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts major ops 0C and 0E (fp ops, not fmac or fmpyadd)
+ * ctr2: counts B,L (including long and push) and GATE (including nullified),
+ * predicted not-taken
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc13380,
+0x0101ffff,0xffa1f057,0xe000407f,0xdfffc87f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_cond1
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts major ops 1x (most of the load/stores)
+ * ctr2: counts CMPB (dw) predicted not-taken
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc01b80,
+0x0101ffff,0xffb7f03d,0xe000407f,0xffffc8ff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_cond2
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts major op 03
+ * ctr2: counts CMPIB (dw) predicted not taken.
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc09780,
+0x0101ffff,0xff21f077,0xe000407f,0xffffc87f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_cond3
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts major ops 06 & 26
+ * ctr2: counts BB, BVB, MOVB, MOVIB (incl. nullified) predicted not-taken
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc03780,
+0x0101ffff,0xff29f016,0xe000407f,0xffffe97f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_cond4
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts major op 2E
+ * ctr2: counts CMPB, CMPIB, ADDB, ADDIB (incl. nullified) predicted not-taken
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc17780,
+0x0101ffff,0xff21f014,0xe000407f,0xffffe9ff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_unpred0
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts BE and BE,L
+ * ctr2: counts BE and BE,L including nullified
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0c0c0e0,0xffffffff,0xffffffff,0xffdf5bbf,
+0xffffffff,0xff25f7d6,0xefffffff,0xffffc97f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_unpred1
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts BLR, BV, BVE, BVE,L
+ * ctr2: counts BLR, BV, BVE, BVE,L including nullified
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0c0c0e0,0xffffffff,0xffffffff,0xffc15f80,
+0x0501ff7f,0xff21f057,0xe001407f,0xdfffc87f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* unpred
+ *
+ * ctr0: counts non-nullified unpredictable branches
+ * ctr1: is the overflow for ctr0
+ * ctr2: counts all unpredictable branches (nullified or not)
+ * ctr3: is the overflow for ctr2
+ */
+{
+0xcc01e000,00000000,0x00060000,00000000,
+0x20202020,0xff31ffff,0xfff7fffe,0x97ffcc7f,
+0xfffffdff,0xffa5fff3,0x1fffffff,0x7fffe97f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf00000a0,0x00003c00,0x02f50000,0x0004bd40,
+0x0040802f,0x50002020,0x4bd4000c,0x0042f500,
+0x040014bd,0x40014084,0x2f500060,0x214bd400,
+0x1c2002f5,0x00080804,0xbd400242,0x802f5000,
+0xa0a04bd4,0x002c2042,0xf5000c08,0x14bd4003,
+0x42842f50,0x00e0a14b,0xd4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+
+/* go_store
+ *
+ * ctr0: Overflow for counter 2
+ * ctr1: Overflow for counter 3
+ * ctr2: count of GO_STORE_E signal
+ * ctr3: count of GO_STORE_O signal
+ */
+
+ {
+ 0x0c00e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffa5ffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x7fffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0x00000000, 0x0000c000, 0x067c0000, 0x01019f00,
+ 0x00408067, 0xc0002030, 0x19f0000c, 0x000e7c00,
+ 0x0401039f, 0x00014080, 0xe7c00060, 0x3039f000,
+ 0x1c00167c, 0x00080105, 0x9f000240, 0x8167c000,
+ 0xa03059f0, 0x002c001e, 0x7c000c01, 0x079f0003,
+ 0x4081e7c0, 0x00e03079, 0xf0003fc0, 0x07fff800,
+ 0xf001fffe, 0x003c007f, 0xff800f00, 0x1fffe003,
+ 0xc007fff8, 0x00f001ff, 0xfe003c00, 0x7fff800f,
+ 0x001fffe0, 0x03c007ff, 0xf800f001, 0xfffe003c,
+ 0x007fff80, 0x0f001fff, 0xe003c007, 0xfff800f0,
+ 0x01fffe00, 0x3c007fff, 0x800f001f, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x70130000, 0x00000000, 0x70000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff
+ },
+
+
+/* shlib_call
+ *
+ * ctr0: SharedLib call Depth1
+ * ctr1: SharedLib call Depth2
+ * ctr2: SharedLib call Depth3
+ * ctr3: SharedLib call Depth>3
+ */
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0xc76fa005, 0x07dd7e9c, 0x87115b80,
+ 0x01100200, 0x07200004, 0xe000407f, 0xfffffffc,
+ 0x01380010, 0x1fffffff, 0xff000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0xf0000000, 0x00003c20, 0x01ff0808, 0x04007fc0,
+ 0x0003001f, 0xf0000180, 0x07fc4010, 0x5001ff00,
+ 0x001c007f, 0xc2000a00, 0x1ff18022, 0x4007fc20,
+ 0x00b001ff, 0x10003800, 0x7fc8004d, 0x001ff100,
+ 0x03c007fc, 0x60012001, 0xff280144, 0x007fc600,
+ 0x13001ff2, 0x00058007, 0xfcc00550, 0x01ff2000,
+ 0x5c007fca, 0x001a001f, 0xf3801640, 0x07fca001,
+ 0xb001ff30, 0x0078007f, 0xd0005d00, 0x1ff30007,
+ 0xc007fce0, 0x022001ff, 0x48018400, 0x7fce0023,
+ 0x001ff400, 0x098007fd, 0x20065001, 0xff40009c,
+ 0x007fd200, 0x3fffffff, 0x800fffff, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xffff0000, 0x00000000, 0xf0000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff
+ }
+};
+#define PCXW_IMAGE_SIZE 576
+
+static uint32_t cuda_images[][PCXW_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = {
+/*
+ * CPI: FROM CPI.IDF (Image 0)
+ *
+ * Counts the following:
+ *
+ * ctr0 : total cycles
+ * ctr1 : total cycles where nothing retired
+ * ctr2 : total instructions retired, including nullified
+ * ctr3 : total instructions retired, less nullified instructions
+ */
+ {
+ 0x4c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0x00001fff, 0xfc00007f, 0xfff00001,
+ 0xffffc000, 0x07ffff00, 0x07ffffff, 0x6007ffff,
+ 0xff0007ff, 0xffff0007, 0xffffff00, 0x00000000,
+ 0x60f00000, 0x0fffff00, 0x000fffff, 0x00000fff,
+ 0xff00000f, 0xffff0000, 0x00000000, 0x00ffffff,
+ 0xfffff000, 0x0000000f, 0xffffffff, 0xff000000,
+ 0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00270000, 0x00000055,
+ 0x0200000e, 0x4d300000, 0x00000000, 0x0ff00002,
+ 0x70000000, 0x00000020, 0x0000e400, 0x00000ff0,
+ 0x00000000, 0x00000000, 0x00000055, 0xffffff00,
+ 0x00000000, 0x0000ff00, 0x00000000, 0x0f000000,
+ 0x0000055f, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00000000, 0x000055ff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0xf0000000,
+ 0x000055ff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0x00000000, 0x00055fff, 0xfff00000,
+ 0x00000000, 0x0ff00000, 0x00000030, 0x00000000,
+ 0x00157fff, 0xffc00000, 0x034c0000, 0x00000000,
+ 0x03fc0000, 0x00000000, 0x6fff0000, 0x00000000,
+ 0x60000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* Bus utilization image FROM BUS_UTIL.IDF (Image 1)
+ *
+ * ctr0 : counts address valid cycles
+ * ctr1 : counts data valid cycles
+ * ctr2 : counts overflow from counter 0
+ * ctr3 : counts overflow from counter 1
+ */
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00001b00, 0xaa000000,
+ 0x00000001, 0x30700000, 0x00055aaf, 0xf0000000,
+ 0x01b00000, 0x00000000, 0x00001037, 0x00000000,
+ 0x55aaff00, 0x00c00000, 0x1b55aa00, 0x00000000,
+ 0x0001fff0, 0xcfffff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xffffffff, 0x30ffff0c, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xfffffff3,
+ 0x0ffff0cf, 0xffff0000, 0x00000000, 0x00ffffff,
+ 0xffffffff, 0xfffffff3, 0x0ffff0cf, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffff30,
+ 0xfff70000, 0x000055aa, 0xff000000, 0x000006d5,
+ 0x40000000, 0x00000000, 0x731c0000, 0x000156ab,
+ 0xfc000000, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00100000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/*
+ * TLB counts: FROM TLBSTATS.IDF (Image 2)
+ *
+ * Counts the following:
+ *
+ * ctr0: DTLB misses
+ * ctr1: ITLB misses
+ * ctr2: total cycles in the miss handlers
+ * ctr3: total cycles
+ */
+
+ {
+ 0x0c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe7e7e0e0, 0x00001fff, 0xfc00007f, 0xfff00001,
+ 0xfff00000, 0x07ffff00, 0x07ffffff, 0x6007ffff,
+ 0xa00007ff, 0xffff0007, 0xffffff00, 0x00000000,
+ 0x603001c1, 0xe0000001, 0xc0c00000, 0x00000fff,
+ 0xff00000f, 0xffff0000, 0x00000000, 0x00400000,
+ 0x00001000, 0x00000004, 0x00000000, 0x01000000,
+ 0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00800000, 0x00153f7f,
+ 0x55000000, 0xaf800000, 0xc0000000, 0x0403f240,
+ 0x00000000, 0x00001010, 0x00004700, 0x00000ff0,
+ 0x00000000, 0x00000000, 0x00000055, 0xffffff00,
+ 0x00000000, 0x0000ff00, 0x00000000, 0x0f000000,
+ 0x0000055f, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00000000, 0x000055ff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0xf0000000,
+ 0x000055ff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0x00000000, 0x00055fff, 0xfff00000,
+ 0x00000000, 0x0ff00000, 0x00000000, 0x00000000,
+ 0x00157fff, 0xffc00000, 0x00000000, 0x3fc00000,
+ 0x00040000, 0x00000000, 0x6fff0000, 0x00000000,
+ 0x60000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* tlbhandler FROM tlbHandMiss.idf (Image 3)
+ *
+ * ctr0: TLB misses
+ * ctr1: dmisses inside the TLB miss handler
+ * ctr2: cycles in the TLB miss handler
+ * ctr3: overflow of ctr2
+ */
+ {
+ 0x1c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe7e7e0e0, 0x00001fff, 0xfc00007f, 0xfff00001,
+ 0xfff00000, 0x07ffff00, 0x07ffffff, 0x6007ffff,
+ 0xa00007ff, 0xffff0007, 0xffffff00, 0x00000000,
+ 0x603001c1, 0xe0000001, 0xc0c00000, 0x00000fff,
+ 0xff00000f, 0xffff0000, 0x00000000, 0x00400000,
+ 0x00001000, 0x00000004, 0x00000000, 0x01000000,
+ 0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x006c0000, 0x01000054,
+ 0x02000002, 0xc3200000, 0xc00aa000, 0x0c03f240,
+ 0x00000000, 0x00001010, 0x000044f4, 0x00000c00,
+ 0xaa0000f0, 0x0f0000b0, 0x00005005, 0x0f5f0000,
+ 0x0001f000, 0x0000ff00, 0x00000000, 0x0f000000,
+ 0x0000055f, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00000000, 0x000055ff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0xf0000000,
+ 0x000055ff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0x00000000, 0x00055fff, 0xfff00000,
+ 0x00000000, 0x0ff00a00, 0x000f0000, 0x24004000,
+ 0x15400001, 0x40c00003, 0x3da00000, 0x0002a800,
+ 0x00ff0000, 0x00000000, 0x6fff0000, 0x00000000,
+ 0x60000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* branch_taken image FROM PTKN.IDF (Image 4)
+ *
+ * ctr0: mispredicted branches
+ * ctr1: predicted taken branches, actually taken
+ * ctr2: predicted taken branches (includes nullfied)
+ * ctr3: all branches
+ */
+
+ {
+ 0xcc01e000, 0x00000000, 0x00000000, 0x00000000,
+ 0xa08080a0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xfffffeff, 0xfffeffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf4ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd22d0000, 0x00000000,
+ 0x0000000b, 0x46000000, 0x00000000, 0x0ffff900,
+ 0x90000000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x000000ff, 0xff00bfdf, 0x03030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x003f3ff0, 0x2766c000,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* branch_nottaken FROM PNTKN.IDF (Image 5)
+ *
+ * ctr0: mispredicted branches
+ * ctr1: branches predicted not-taken, but actually taken
+ * ctr2: branches predicted not-taken (includes nullified)
+ * ctr3: all branches
+ */
+ {
+ 0xcc01e000, 0x00000000, 0x00000000, 0x00000000,
+ 0xe0c0c0e0, 0xffffffff, 0xffffffff, 0xffefffff,
+ 0xffffbfff, 0xfffffeff, 0xfffeffff, 0xfffffeff,
+ 0xfffffffe, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf4ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd22d0000, 0x00000000,
+ 0x0000000b, 0x46000000, 0x00000000, 0x0ffff900,
+ 0x90000000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x000000ff, 0xff00bfdf, 0x03030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x003f3ff0, 0x2766c000,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* IMISS image (Image 6)
+ *
+ * ctr0 : icache misses for retired instructions
+ * ctr1 : total cycles
+ * ctr2 : dcache misses for retired instructions
+ * ctr3 : number of retired instructions
+ */
+ {
+ 0x2801e000, 0x00000000, 0x00010000, 0x00000000,
+ 0x00001000, 0xffffffff, 0xffffffff, 0xfff00fff,
+ 0xfffa3fff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xf2fdf0f0, 0xf0f0f0f0,
+ 0xffffffff, 0xf6c00000, 0x00000000, 0x0ff55800,
+ 0x90000000, 0x00000000, 0x0000b0ff, 0xfffffff0,
+ 0x00000003, 0x0100bfff, 0x3f3f3f3f, 0x3f3f5555,
+ 0x555fffff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xfff00000, 0x000301b0, 0x2fefcfcf,
+ 0xcfcfcfcf, 0xd5555557, 0xf7b40000, 0x00000000,
+ 0x03c14000, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* DMISS image (Image 7)
+ *
+ * ctr0 : icache misses for retired instructions
+ * ctr1 : total cycles
+ * ctr2 : dcache misses for retired instructions
+ * ctr3 : number of retired instructions
+ */
+ {
+ 0x2801e000, 0x00000000, 0x00010000, 0x00000000,
+ 0x00001000, 0xffffffff, 0xffffffff, 0xfff00fff,
+ 0xfffa3fff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xf2fdf0f0, 0xf0f0f0f0,
+ 0xffffffff, 0xf6c00000, 0x00000000, 0x0ff55800,
+ 0x90000000, 0x00000000, 0x0000b0ff, 0xfffffff0,
+ 0x00000003, 0x0100bfff, 0x3f3f3f3f, 0x3f3f5555,
+ 0x555fffff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xfff00000, 0x000301b0, 0x2fefcfcf,
+ 0xcfcfcfcf, 0xd5555557, 0xf7b40000, 0x00000000,
+ 0x03c14000, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* dmiss_access image FROM DMISS_RATIO.IDF (Image 8)
+ *
+ * ctr0 : all loads and stores that retire (even lines)
+ * ctr1 : all loads and stores that retire (odd lines)
+ * ctr2 : dcache misses of retired loads/stores
+ * ctr3 : all READ_PRIV and READ_SHAR_OR_PRIV on Runway
+ * (Speculative and Non-Speculative)
+ */
+ {
+ 0x2d81e000, 0x00000000, 0x00000000, 0x00000000,
+ 0x10101010, 0x00ffffff, 0xa003ffff, 0xfe800fff,
+ 0xfffa003f, 0xffffe8ff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd2280a00, 0x00000000,
+ 0x0000000b, 0x46000000, 0x00000005, 0x555ff900,
+ 0x80200000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x00005555, 0xff80bf8b, 0xab030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x15153fe0, 0x27628880,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000001,
+ 0x5557fc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00110000, 0x00000000,
+ 0xf4ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xf8ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+
+/* big_cpi image (Image 9)
+ *
+ * ctr0 : Total number of CPU clock cycles.
+ * ctr1 : Unused
+ * ctr2 : Unused
+ * ctr3 : Total number of Non-Nullified instructions retired.
+ */
+ {
+ 0x0c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe7e7e0e0, 0x00001fff, 0xfc00007f, 0xfff00001,
+ 0xfff00000, 0x07ffff00, 0x07ffffff, 0x6007ffff,
+ 0xa00007ff, 0xffff0007, 0xffffff00, 0x00000000,
+ 0x603001c1, 0xe0000001, 0xc0c00000, 0x00000fff,
+ 0xff00000f, 0xffff0000, 0x00000000, 0x00400000,
+ 0x00001000, 0x00000004, 0x00000000, 0x01000000,
+ 0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00550005, 0x00220000,
+ 0x0000000c, 0x71f00000, 0x00f00aa0, 0x0aaff000,
+ 0x00005002, 0x20000000, 0x0000c413, 0x00000c0f,
+ 0x00aa0000, 0xff00b600, 0x000500a0, 0x00000300,
+ 0x000cc3f0, 0x0000c0f0, 0x0aa0000f, 0xff000000,
+ 0x011000a0, 0x05503000, 0x00d03700, 0x00000f00,
+ 0xaa005500, 0x00000000, 0x000055ff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0xf000aa00,
+ 0x11000a00, 0x55000000, 0x0d037000, 0x00c0f00a,
+ 0xa0055000, 0x0db00005, 0x5002a000, 0x00300000,
+ 0xf40f0000, 0x0c0f00aa, 0x0000ff10, 0x27400000,
+ 0x00008000, 0x00c00003, 0x037c0000, 0x003c02a8,
+ 0x02abfc00, 0x00000000, 0x6fff0000, 0x00000000,
+ 0x60000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* big_ls image (Image 10)
+ *
+ * ctr0 : Total number of CPU clock cycles during which local_stall_A1 is asserted
+ * ctr1 : Overflow of Counter 0
+ * ctr2 : Total number of IFLUSH_AV
+ * ctr3 : Overflow of Counter 2
+ */
+ {
+ 0x0c000000, 0x00000000, 0x00060000, 0x00000000,
+ 0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x28880001, 0x54000000,
+ 0x00000004, 0xb6200000, 0x000aaaa0, 0x05555288,
+ 0x80000010, 0x00000000, 0x0000486e, 0x00000000,
+ 0xaaaa0055, 0x55002888, 0x00545401, 0x03030000,
+ 0x0007b000, 0x0000ff00, 0x00000000, 0x05000000,
+ 0x0000055f, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00000000, 0x000055ff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0x00000000,
+ 0x000055ff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0xa0000000, 0x00055fff, 0xfff00000,
+ 0x00aa0000, 0x05502a2a, 0x00151500, 0x0a220015,
+ 0x40400000, 0x00000001, 0xe2980000, 0x0002aaa8,
+ 0x01555400, 0x00000000, 0x0df70000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* br_abort image (Image 12)
+ *
+ * ctr0 : Total number of BRAD_STALLH
+ * ctr1 : Total number of ONE_QUAD
+ * ctr2 : Total number of BR0_ABRT
+ * ctr3 : Total number of BR1_ABRT
+ */
+
+ {
+ 0x0c002000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0xffffffff, 0xffffffff, 0xff0fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0x1077ffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x551b0000, 0x00000000,
+ 0x0000000c, 0xd4f00000, 0x00000000, 0x0ffff001,
+ 0xb0000000, 0x00000000, 0x0000fd4c, 0x00000000,
+ 0x000000ff, 0xff00ff1b, 0x00000000, 0x00000000,
+ 0x0000d000, 0x0000ff00, 0x00000000, 0x0e0fffff,
+ 0xffffffff, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0x00ffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0xffffffff, 0xffffffff, 0xfff00000,
+ 0x00400000, 0x00000000, 0x00ffff00, 0x2a86c000,
+ 0x00000000, 0x00000000, 0xf50c0000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0x1a250000, 0x00000000,
+ 0x10000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+
+/* isnt image (Image 13)
+ *
+ * ctr0 : Total number of cycles for which iside_notrans is asserted.
+ * ctr1 : Total number of times iside_notrans is asserted for 1-4 cycles.
+ * ctr2 : Total number of times iside_notrans is asserted for 5-7 cycles.
+ * ctr3 : Total number of times iside_notrans is asserted for > 7 cycles.
+ */
+
+ {
+ 0x0c018000, 0x00000000, 0x00060000, 0x00000000,
+ 0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xc0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x22000000, 0x000001bc,
+ 0x10000006, 0x00900000, 0x50000000, 0x00055a20,
+ 0x00000000, 0x00016060, 0x0000c021, 0x00000540,
+ 0x00000000, 0x55002200, 0x00000000, 0x56bc4000,
+ 0x00048000, 0x0000ff00, 0x00000000, 0x17000000,
+ 0x0000055f, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00000000, 0x000055ff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0x00000000,
+ 0x000055ff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0x80000000, 0x00015bf3, 0xf5500000,
+ 0x02210000, 0x00100000, 0x00005500, 0x08800000,
+ 0x00001545, 0x85000001, 0x80240000, 0x11000000,
+ 0x00015400, 0x00000000, 0xcdff0000, 0x00000000,
+ 0xc0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* quadrant image (image 14)
+ *
+ * ctr0 : Total number of instructions in quadrant 0.
+ * ctr1 : Total number of instructions in quadrant 1.
+ * ctr2 : Total number of instructions in quadrant 2.
+ * ctr3 : Total number of instructions in quadrant 3.
+ *
+ * Only works for 32-bit applications.
+ */
+
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0x00001fff, 0xfc00007f, 0xfff00001,
+ 0xffffc000, 0x07ffff00, 0x07ffffff, 0x0007ffff,
+ 0xff0007ff, 0xffff0007, 0xffffff00, 0x00000000,
+ 0xf0000000, 0x0fffff00, 0x000fffff, 0x00000fff,
+ 0xff00000f, 0xffff0000, 0x00000000, 0x00ffffff,
+ 0xffcff000, 0x0000040f, 0xfffffffc, 0xff000000,
+ 0x0080ffff, 0xffffcff0, 0x0000000c, 0x0fffffff,
+ 0xfcff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x551b0000, 0x00000000,
+ 0x00000003, 0x17000000, 0x00000000, 0x0ffff001,
+ 0xb0000000, 0x00000000, 0x00000173, 0x00000000,
+ 0x000000ff, 0xff00ff1b, 0x00000000, 0x00000000,
+ 0x000f1ff0, 0xcfffff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xffffffff, 0x30ffff0c, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xfffffff3,
+ 0x0ffff0cf, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xfffffff3, 0x0ffff0cf, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffff30,
+ 0xff7f0000, 0x00000000, 0x00fffff0, 0x2a86c000,
+ 0x00000000, 0x00000003, 0x05f00000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* rw_pdfet image (Image 15)
+ *
+ * ctr0 : Total of all READ_PRIV address valid cycles.
+ * ctr1 : Total of all READ_PRIV data valid cycles.
+ * ctr2 : Overflow of Counter 0.
+ * ctr3 : Overflow of Counter 1.
+ */
+
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00001b00, 0xaa000000,
+ 0x00000001, 0x30700000, 0x00055aaf, 0xf0000000,
+ 0x01b00000, 0x00000000, 0x00001037, 0x00000000,
+ 0x55aaff00, 0x00c00000, 0x1b55aa00, 0x00000000,
+ 0x0001fff0, 0xcfffff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xffffffff, 0x30ffff0c, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xfffffff3,
+ 0x0ffff0cf, 0xffff0000, 0x00000000, 0x00ffffff,
+ 0xffffffff, 0xfffffff3, 0x0ffff0cf, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffff30,
+ 0xfff70000, 0x000055aa, 0xff000000, 0x000006d5,
+ 0x40000000, 0x00000000, 0x731c0000, 0x000156ab,
+ 0xfc000000, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00100000, 0x00000000,
+ 0xf8000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0x00ffffff, 0xffffffff, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
+ },
+
+
+/* rw_wdfet image (Image 16)
+ *
+ * ctr0 : Counts total number of writeback transactions.
+ * ctr1 : Total number of data valid Runway cycles.
+ * ctr2 : Overflow of Counter 0.
+ * ctr3 : Overflow of Counter 1.
+ */
+
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00001b00, 0xaa000000,
+ 0x00000001, 0x30700000, 0x00055aaf, 0xf0000000,
+ 0x01b00000, 0x00000000, 0x00001037, 0x00000000,
+ 0x55aaff00, 0x00c00000, 0x1b55aa00, 0x00000000,
+ 0x0001fff0, 0xcfffff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xffffffff, 0x30ffff0c, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xfffffff3,
+ 0x0ffff0cf, 0xffff0000, 0x00000000, 0x00ffffff,
+ 0xffffffff, 0xfffffff3, 0x0ffff0cf, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffff30,
+ 0xfff70000, 0x000055aa, 0xff000000, 0x000006d5,
+ 0x40000000, 0x00000000, 0x731c0000, 0x000156ab,
+ 0xfc000000, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00100000, 0x00000000,
+ 0x98000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0x00ffffff, 0xffffffff, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
+ },
+
+/* shlib_cpi image (Image 17)
+ *
+ * ctr0 : Total number of instructions in quadrant 0.
+ * ctr1 : Total number of CPU clock cycles in quadrant 0.
+ * ctr2 : Total number of Non-Nullified instructions retired.
+ * ctr3 : Total number of CPU clock cycles.
+ *
+ * Only works for 32-bit shared libraries.
+ */
+
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0x00001fff, 0xfc00007f, 0xfff00001,
+ 0xffffc000, 0x07ffff00, 0x07ffffff, 0x0007ffff,
+ 0xff0007ff, 0xffff0007, 0xffffff00, 0x00000000,
+ 0xf0150000, 0x0fffff00, 0x000fffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0x00ffffff,
+ 0xffcff000, 0x0000000f, 0xfffffffc, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x27000000, 0x00000055,
+ 0x02000005, 0x7f500000, 0xc0000000, 0x000ff270,
+ 0x00000000, 0x00000000, 0x00007700, 0x00000ff0,
+ 0x00000000, 0x0000ffff, 0xffffffff, 0xffffff00,
+ 0x00000000, 0x0000ff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xfff00000,
+ 0x00000000, 0x0ff00000, 0x000000a0, 0x3fffffff,
+ 0xffffffff, 0xffc00000, 0x03d40000, 0x20000000,
+ 0x0003fc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* flop image (Image 18)
+ *
+ * ctr0 : Total number of floating point instructions (opcode = 0xc).
+ * ctr1 : Total number of floating point instructions (opcode = 0xe, 0x6, 0x2e, 0x26).
+ * ctr2 : Unused
+ * ctr3 : Unused
+ */
+
+ {
+ 0x0001e000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00001010, 0x33ffffff, 0x006fffff, 0xfc5fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd22d0000, 0x00000000,
+ 0x0000000b, 0x46000000, 0x00000000, 0x0ffff900,
+ 0x90000000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x000000ff, 0xff00bfdf, 0x03030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x003f3ff0, 0x2766c000,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* cachemiss image FROM I_D_MISSES.IDF (Image 19)
+ *
+ * ctr0 : icache misses for retired instructions
+ * ctr1 : total cycles
+ * ctr2 : dcache misses for retired instructions
+ * ctr3 : number of retired instructions
+ */
+ {
+ 0x2801e000, 0x00000000, 0x00010000, 0x00000000,
+ 0x00001000, 0xffffffff, 0xffffffff, 0xfff00fff,
+ 0xfffa3fff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xf2fdf0f0, 0xf0f0f0f0,
+ 0xffffffff, 0xf6c00000, 0x00000000, 0x0ff55800,
+ 0x90000000, 0x00000000, 0x0000b0ff, 0xfffffff0,
+ 0x00000003, 0x0100bfff, 0x3f3f3f3f, 0x3f3f5555,
+ 0x555fffff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xfff00000, 0x000301b0, 0x2fefcfcf,
+ 0xcfcfcfcf, 0xd5555557, 0xf7b40000, 0x00000000,
+ 0x03c14000, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* branch FROM br_report3.idf
+ *
+ * ctr0 : Total number of mispredicted branches.
+ * ctr1 : Some Non-Nullified unpredictable branches.
+ * ctr2 : Total number of branches (Nullified + Non-Nullified)
+ * (Unpredicted+ Predicted Taken +Predicted Not Taken).
+ * Total of All Branches.
+ * ctr3 : Remaining Non-Nullified unpredictable branches.
+ */
+ {
+ 0x4001e000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xffffffff, 0xff9fffff, 0xfe0fffff,
+ 0xffffbaff, 0xfdffc0ff, 0xfffdffff, 0xfffffeff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf4ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd22d0000, 0x00000000,
+ 0x0000000b, 0x46000000, 0x00000000, 0x0ffff900,
+ 0x90000000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x000000ff, 0xff00bfdf, 0x03030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x003f3ff0, 0x2766c000,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* crstack FROM crs_report.idf
+ *
+ * ctr0: correctly predicted branches by the pop_latch
+ * ctr1: some procedure returns
+ * ctr2: all branches, (includes nullified)
+ * ctr3: remaining procedure returns
+ */
+ {
+ 0x4001e000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xffffffff, 0xffa10300, 0x000fffff,
+ 0xffffbaf8, 0x3000007f, 0xffffffff, 0xfffffeff,
+ 0xff7fffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf2ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd22d0000, 0x00000000,
+ 0x0000000b, 0x46000000, 0x00000000, 0x0ffff900,
+ 0x90000000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x000000ff, 0xff00bfdf, 0x03030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x003f3ff0, 0x2766c000,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* icache_report image
+ *
+ * ctr0 : Icache misses actually used by the core.
+ * ctr1 : ICORE_AV (Icache misses the core THINKS it needs, including fetching down speculative paths).
+ * ctr2 : READs on Runway (Icache misses that made it out to Runway, including
+ * prefetches).
+ * ctr3 : Prefetch returns (1x and 2x).
+ */
+ {
+ 0x00000000, 0x00000000, 0x00010000, 0x00000000,
+ 0x00000000, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd2002d00, 0x00000000,
+ 0x0000000b, 0x46000000, 0x0000000f, 0xf00ff900,
+ 0x00900000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x0000ff00, 0xff83bf03, 0xdf030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0x80ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x4fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x3f003f80, 0x274026c0,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000003,
+ 0xfc03fc00, 0x00000000, 0x0eff0000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xd0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+
+ }
+
+};
+
+#endif
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
new file mode 100644
index 00000000..4b4b9181
--- /dev/null
+++ b/arch/parisc/kernel/process.c
@@ -0,0 +1,406 @@
+/*
+ * PARISC Architecture-dependent parts of process handling
+ * based on the work for i386
+ *
+ * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
+ * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
+ * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
+ * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
+ * Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com>
+ * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
+ * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
+ * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
+ * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
+ * Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org>
+ * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <stdarg.h>
+
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/personality.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
+
+#include <asm/io.h>
+#include <asm/asm-offsets.h>
+#include <asm/pdc.h>
+#include <asm/pdc_chassis.h>
+#include <asm/pgalloc.h>
+#include <asm/unwind.h>
+#include <asm/sections.h>
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle(void)
+{
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ while (!need_resched())
+ barrier();
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ check_pgt_cache();
+ }
+}
+
+
+#define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
+#define CMD_RESET 5 /* reset any module */
+
+/*
+** The Wright Brothers and Gecko systems have a H/W problem
+** (Lasi...'nuf said) may cause a broadcast reset to lockup
+** the system. An HVERSION dependent PDC call was developed
+** to perform a "safe", platform specific broadcast reset instead
+** of kludging up all the code.
+**
+** Older machines which do not implement PDC_BROADCAST_RESET will
+** return (with an error) and the regular broadcast reset can be
+** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET
+** the PDC call will not return (the system will be reset).
+*/
+void machine_restart(char *cmd)
+{
+#ifdef FASTBOOT_SELFTEST_SUPPORT
+ /*
+ ** If user has modified the Firmware Selftest Bitmap,
+ ** run the tests specified in the bitmap after the
+ ** system is rebooted w/PDC_DO_RESET.
+ **
+ ** ftc_bitmap = 0x1AUL "Skip destructive memory tests"
+ **
+ ** Using "directed resets" at each processor with the MEM_TOC
+ ** vector cleared will also avoid running destructive
+ ** memory self tests. (Not implemented yet)
+ */
+ if (ftc_bitmap) {
+ pdc_do_firm_test_reset(ftc_bitmap);
+ }
+#endif
+ /* set up a new led state on systems shipped with a LED State panel */
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
+
+ /* "Normal" system reset */
+ pdc_do_reset();
+
+ /* Nope...box should reset with just CMD_RESET now */
+ gsc_writel(CMD_RESET, COMMAND_GLOBAL);
+
+ /* Wait for RESET to lay us to rest. */
+ while (1) ;
+
+}
+
+void machine_halt(void)
+{
+ /*
+ ** The LED/ChassisCodes are updated by the led_halt()
+ ** function, called by the reboot notifier chain.
+ */
+}
+
+void (*chassis_power_off)(void);
+
+/*
+ * This routine is called from sys_reboot to actually turn off the
+ * machine
+ */
+void machine_power_off(void)
+{
+ /* If there is a registered power off handler, call it. */
+ if (chassis_power_off)
+ chassis_power_off();
+
+ /* Put the soft power button back under hardware control.
+ * If the user had already pressed the power button, the
+ * following call will immediately power off. */
+ pdc_soft_power_button(0);
+
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
+
+ /* It seems we have no way to power the system off via
+ * software. The user has to press the button himself. */
+
+ printk(KERN_EMERG "System shut down completed.\n"
+ "Please power this system off now.");
+}
+
+void (*pm_power_off)(void) = machine_power_off;
+EXPORT_SYMBOL(pm_power_off);
+
+/*
+ * Create a kernel thread
+ */
+
+extern pid_t __kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+
+ /*
+ * FIXME: Once we are sure we don't need any debug here,
+ * kernel_thread can become a #define.
+ */
+
+ return __kernel_thread(fn, arg, flags);
+}
+EXPORT_SYMBOL(kernel_thread);
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+}
+
+void flush_thread(void)
+{
+ /* Only needs to handle fpu stuff or perf monitors.
+ ** REVISIT: several arches implement a "lazy fpu state".
+ */
+ set_fs(USER_DS);
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+/*
+ * Fill in the FPU structure for a core dump.
+ */
+
+int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r)
+{
+ if (regs == NULL)
+ return 0;
+
+ memcpy(r, regs->fr, sizeof *r);
+ return 1;
+}
+
+int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
+{
+ memcpy(r, tsk->thread.regs.fr, sizeof(*r));
+ return 1;
+}
+
+/* Note that "fork()" is implemented in terms of clone, with
+ parameters (SIGCHLD, regs->gr[30], regs). */
+int
+sys_clone(unsigned long clone_flags, unsigned long usp,
+ struct pt_regs *regs)
+{
+ /* Arugments from userspace are:
+ r26 = Clone flags.
+ r25 = Child stack.
+ r24 = parent_tidptr.
+ r23 = Is the TLS storage descriptor
+ r22 = child_tidptr
+
+ However, these last 3 args are only examined
+ if the proper flags are set. */
+ int __user *parent_tidptr = (int __user *)regs->gr[24];
+ int __user *child_tidptr = (int __user *)regs->gr[22];
+
+ /* usp must be word aligned. This also prevents users from
+ * passing in the value 1 (which is the signal for a special
+ * return for a kernel thread) */
+ usp = ALIGN(usp, 4);
+
+ /* A zero value for usp means use the current stack */
+ if (usp == 0)
+ usp = regs->gr[30];
+
+ return do_fork(clone_flags, usp, regs, 0, parent_tidptr, child_tidptr);
+}
+
+int
+sys_vfork(struct pt_regs *regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gr[30], regs, 0, NULL, NULL);
+}
+
+int
+copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long unused, /* in ia64 this is "user_stack_size" */
+ struct task_struct * p, struct pt_regs * pregs)
+{
+ struct pt_regs * cregs = &(p->thread.regs);
+ void *stack = task_stack_page(p);
+
+ /* We have to use void * instead of a function pointer, because
+ * function pointers aren't a pointer to the function on 64-bit.
+ * Make them const so the compiler knows they live in .text */
+ extern void * const ret_from_kernel_thread;
+ extern void * const child_return;
+#ifdef CONFIG_HPUX
+ extern void * const hpux_child_return;
+#endif
+
+ *cregs = *pregs;
+
+ /* Set the return value for the child. Note that this is not
+ actually restored by the syscall exit path, but we put it
+ here for consistency in case of signals. */
+ cregs->gr[28] = 0; /* child */
+
+ /*
+ * We need to differentiate between a user fork and a
+ * kernel fork. We can't use user_mode, because the
+ * the syscall path doesn't save iaoq. Right now
+ * We rely on the fact that kernel_thread passes
+ * in zero for usp.
+ */
+ if (usp == 1) {
+ /* kernel thread */
+ cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN;
+ /* Must exit via ret_from_kernel_thread in order
+ * to call schedule_tail()
+ */
+ cregs->kpc = (unsigned long) &ret_from_kernel_thread;
+ /*
+ * Copy function and argument to be called from
+ * ret_from_kernel_thread.
+ */
+#ifdef CONFIG_64BIT
+ cregs->gr[27] = pregs->gr[27];
+#endif
+ cregs->gr[26] = pregs->gr[26];
+ cregs->gr[25] = pregs->gr[25];
+ } else {
+ /* user thread */
+ /*
+ * Note that the fork wrappers are responsible
+ * for setting gr[21].
+ */
+
+ /* Use same stack depth as parent */
+ cregs->ksp = (unsigned long)stack
+ + (pregs->gr[21] & (THREAD_SIZE - 1));
+ cregs->gr[30] = usp;
+ if (p->personality == PER_HPUX) {
+#ifdef CONFIG_HPUX
+ cregs->kpc = (unsigned long) &hpux_child_return;
+#else
+ BUG();
+#endif
+ } else {
+ cregs->kpc = (unsigned long) &child_return;
+ }
+ /* Setup thread TLS area from the 4th parameter in clone */
+ if (clone_flags & CLONE_SETTLS)
+ cregs->cr27 = pregs->gr[23];
+
+ }
+
+ return 0;
+}
+
+unsigned long thread_saved_pc(struct task_struct *t)
+{
+ return t->thread.regs.kpc;
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+
+asmlinkage int sys_execve(struct pt_regs *regs)
+{
+ int error;
+ char *filename;
+
+ filename = getname((const char __user *) regs->gr[26]);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename,
+ (const char __user *const __user *) regs->gr[25],
+ (const char __user *const __user *) regs->gr[24],
+ regs);
+ putname(filename);
+out:
+
+ return error;
+}
+
+extern int __execve(const char *filename,
+ const char *const argv[],
+ const char *const envp[], struct task_struct *task);
+int kernel_execve(const char *filename,
+ const char *const argv[],
+ const char *const envp[])
+{
+ return __execve(filename, argv, envp, current);
+}
+
+unsigned long
+get_wchan(struct task_struct *p)
+{
+ struct unwind_frame_info info;
+ unsigned long ip;
+ int count = 0;
+
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ /*
+ * These bracket the sleeping functions..
+ */
+
+ unwind_frame_init_from_blocked_task(&info, p);
+ do {
+ if (unwind_once(&info) < 0)
+ return 0;
+ ip = info.ip;
+ if (!in_sched_functions(ip))
+ return ip;
+ } while (count++ < 16);
+ return 0;
+}
+
+#ifdef CONFIG_64BIT
+void *dereference_function_descriptor(void *ptr)
+{
+ Elf64_Fdesc *desc = ptr;
+ void *p;
+
+ if (!probe_kernel_address(&desc->addr, p))
+ ptr = p;
+ return ptr;
+}
+#endif
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
new file mode 100644
index 00000000..c8fb61ed
--- /dev/null
+++ b/arch/parisc/kernel/processor.c
@@ -0,0 +1,422 @@
+/*
+ * Initial setup-routines for HP 9000 based hardware.
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ * Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
+ * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
+ * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
+ * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
+ * Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
+ *
+ * Initial PA-RISC Version: 04-23-1999 by Helge Deller
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <asm/param.h>
+#include <asm/cache.h>
+#include <asm/hardware.h> /* for register_parisc_driver() stuff */
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+#include <asm/irq.h> /* for struct irq_region */
+#include <asm/parisc-device.h>
+
+struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
+EXPORT_SYMBOL(boot_cpu_data);
+
+DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
+
+extern int update_cr16_clocksource(void); /* from time.c */
+
+/*
+** PARISC CPU driver - claim "device" and initialize CPU data structures.
+**
+** Consolidate per CPU initialization into (mostly) one module.
+** Monarch CPU will initialize boot_cpu_data which shouldn't
+** change once the system has booted.
+**
+** The callback *should* do per-instance initialization of
+** everything including the monarch. "Per CPU" init code in
+** setup.c:start_parisc() has migrated here and start_parisc()
+** will call register_parisc_driver(&cpu_driver) before calling do_inventory().
+**
+** The goal of consolidating CPU initialization into one place is
+** to make sure all CPUs get initialized the same way.
+** The code path not shared is how PDC hands control of the CPU to the OS.
+** The initialization of OS data structures is the same (done below).
+*/
+
+/**
+ * init_cpu_profiler - enable/setup per cpu profiling hooks.
+ * @cpunum: The processor instance.
+ *
+ * FIXME: doesn't do much yet...
+ */
+static void __cpuinit
+init_percpu_prof(unsigned long cpunum)
+{
+ struct cpuinfo_parisc *p;
+
+ p = &per_cpu(cpu_data, cpunum);
+ p->prof_counter = 1;
+ p->prof_multiplier = 1;
+}
+
+
+/**
+ * processor_probe - Determine if processor driver should claim this device.
+ * @dev: The device which has been found.
+ *
+ * Determine if processor driver should claim this chip (return 0) or not
+ * (return 1). If so, initialize the chip and tell other partners in crime
+ * they have work to do.
+ */
+static int __cpuinit processor_probe(struct parisc_device *dev)
+{
+ unsigned long txn_addr;
+ unsigned long cpuid;
+ struct cpuinfo_parisc *p;
+
+#ifdef CONFIG_SMP
+ if (num_online_cpus() >= nr_cpu_ids) {
+ printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
+ return 1;
+ }
+#else
+ if (boot_cpu_data.cpu_count > 0) {
+ printk(KERN_INFO "CONFIG_SMP=n ignoring additional CPUs\n");
+ return 1;
+ }
+#endif
+
+ /* logical CPU ID and update global counter
+ * May get overwritten by PAT code.
+ */
+ cpuid = boot_cpu_data.cpu_count;
+ txn_addr = dev->hpa.start; /* for legacy PDC */
+
+#ifdef CONFIG_64BIT
+ if (is_pdc_pat()) {
+ ulong status;
+ unsigned long bytecnt;
+ pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
+#undef USE_PAT_CPUID
+#ifdef USE_PAT_CPUID
+ struct pdc_pat_cpu_num cpu_info;
+#endif
+
+ pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
+ if (!pa_pdc_cell)
+ panic("couldn't allocate memory for PDC_PAT_CELL!");
+
+ status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
+ dev->mod_index, PA_VIEW, pa_pdc_cell);
+
+ BUG_ON(PDC_OK != status);
+
+ /* verify it's the same as what do_pat_inventory() found */
+ BUG_ON(dev->mod_info != pa_pdc_cell->mod_info);
+ BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location);
+
+ txn_addr = pa_pdc_cell->mod[0]; /* id_eid for IO sapic */
+
+ kfree(pa_pdc_cell);
+
+#ifdef USE_PAT_CPUID
+/* We need contiguous numbers for cpuid. Firmware's notion
+ * of cpuid is for physical CPUs and we just don't care yet.
+ * We'll care when we need to query PAT PDC about a CPU *after*
+ * boot time (ie shutdown a CPU from an OS perspective).
+ */
+ /* get the cpu number */
+ status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
+
+ BUG_ON(PDC_OK != status);
+
+ if (cpu_info.cpu_num >= NR_CPUS) {
+ printk(KERN_WARNING "IGNORING CPU at 0x%x,"
+ " cpu_slot_id > NR_CPUS"
+ " (%ld > %d)\n",
+ dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
+ /* Ignore CPU since it will only crash */
+ boot_cpu_data.cpu_count--;
+ return 1;
+ } else {
+ cpuid = cpu_info.cpu_num;
+ }
+#endif
+ }
+#endif
+
+ p = &per_cpu(cpu_data, cpuid);
+ boot_cpu_data.cpu_count++;
+
+ /* initialize counters - CPU 0 gets it_value set in time_init() */
+ if (cpuid)
+ memset(p, 0, sizeof(struct cpuinfo_parisc));
+
+ p->loops_per_jiffy = loops_per_jiffy;
+ p->dev = dev; /* Save IODC data in case we need it */
+ p->hpa = dev->hpa.start; /* save CPU hpa */
+ p->cpuid = cpuid; /* save CPU id */
+ p->txn_addr = txn_addr; /* save CPU IRQ address */
+#ifdef CONFIG_SMP
+ /*
+ ** FIXME: review if any other initialization is clobbered
+ ** for boot_cpu by the above memset().
+ */
+ init_percpu_prof(cpuid);
+#endif
+
+ /*
+ ** CONFIG_SMP: init_smp_config() will attempt to get CPUs into
+ ** OS control. RENDEZVOUS is the default state - see mem_set above.
+ ** p->state = STATE_RENDEZVOUS;
+ */
+
+#if 0
+ /* CPU 0 IRQ table is statically allocated/initialized */
+ if (cpuid) {
+ struct irqaction actions[];
+
+ /*
+ ** itimer and ipi IRQ handlers are statically initialized in
+ ** arch/parisc/kernel/irq.c. ie Don't need to register them.
+ */
+ actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC);
+ if (!actions) {
+ /* not getting it's own table, share with monarch */
+ actions = cpu_irq_actions[0];
+ }
+
+ cpu_irq_actions[cpuid] = actions;
+ }
+#endif
+
+ /*
+ * Bring this CPU up now! (ignore bootstrap cpuid == 0)
+ */
+#ifdef CONFIG_SMP
+ if (cpuid) {
+ set_cpu_present(cpuid, true);
+ cpu_up(cpuid);
+ }
+#endif
+
+ /* If we've registered more than one cpu,
+ * we'll use the jiffies clocksource since cr16
+ * is not synchronized between CPUs.
+ */
+ update_cr16_clocksource();
+
+ return 0;
+}
+
+/**
+ * collect_boot_cpu_data - Fill the boot_cpu_data structure.
+ *
+ * This function collects and stores the generic processor information
+ * in the boot_cpu_data structure.
+ */
+void __init collect_boot_cpu_data(void)
+{
+ memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
+
+ boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */
+
+ /* get CPU-Model Information... */
+#define p ((unsigned long *)&boot_cpu_data.pdc.model)
+ if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK)
+ printk(KERN_INFO
+ "model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
+#undef p
+
+ if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK)
+ printk(KERN_INFO "vers %08lx\n",
+ boot_cpu_data.pdc.versions);
+
+ if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK)
+ printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n",
+ (boot_cpu_data.pdc.cpuid >> 5) & 127,
+ boot_cpu_data.pdc.cpuid & 31,
+ boot_cpu_data.pdc.cpuid);
+
+ if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK)
+ printk(KERN_INFO "capabilities 0x%lx\n",
+ boot_cpu_data.pdc.capabilities);
+
+ if (pdc_model_sysmodel(boot_cpu_data.pdc.sys_model_name) == PDC_OK)
+ printk(KERN_INFO "model %s\n",
+ boot_cpu_data.pdc.sys_model_name);
+
+ boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion;
+ boot_cpu_data.sversion = boot_cpu_data.pdc.model.sversion;
+
+ boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion);
+ boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0];
+ boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1];
+}
+
+
+
+/**
+ * init_per_cpu - Handle individual processor initializations.
+ * @cpunum: logical processor number.
+ *
+ * This function handles initialization for *every* CPU
+ * in the system:
+ *
+ * o Set "default" CPU width for trap handlers
+ *
+ * o Enable FP coprocessor
+ * REVISIT: this could be done in the "code 22" trap handler.
+ * (frowands idea - that way we know which processes need FP
+ * registers saved on the interrupt stack.)
+ * NEWS FLASH: wide kernels need FP coprocessor enabled to handle
+ * formatted printing of %lx for example (double divides I think)
+ *
+ * o Enable CPU profiling hooks.
+ */
+int __cpuinit init_per_cpu(int cpunum)
+{
+ int ret;
+ struct pdc_coproc_cfg coproc_cfg;
+
+ set_firmware_width();
+ ret = pdc_coproc_cfg(&coproc_cfg);
+
+ if(ret >= 0 && coproc_cfg.ccr_functional) {
+ mtctl(coproc_cfg.ccr_functional, 10); /* 10 == Coprocessor Control Reg */
+
+ /* FWIW, FP rev/model is a more accurate way to determine
+ ** CPU type. CPU rev/model has some ambiguous cases.
+ */
+ per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
+ per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
+
+ printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
+ cpunum, coproc_cfg.revision, coproc_cfg.model);
+
+ /*
+ ** store status register to stack (hopefully aligned)
+ ** and clear the T-bit.
+ */
+ asm volatile ("fstd %fr0,8(%sp)");
+
+ } else {
+ printk(KERN_WARNING "WARNING: No FP CoProcessor?!"
+ " (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n"
+#ifdef CONFIG_64BIT
+ "Halting Machine - FP required\n"
+#endif
+ , coproc_cfg.ccr_functional);
+#ifdef CONFIG_64BIT
+ mdelay(100); /* previous chars get pushed to console */
+ panic("FP CoProc not reported");
+#endif
+ }
+
+ /* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
+ init_percpu_prof(cpunum);
+
+ return ret;
+}
+
+/*
+ * Display CPU info for all CPUs.
+ */
+int
+show_cpuinfo (struct seq_file *m, void *v)
+{
+ unsigned long cpu;
+
+ for_each_online_cpu(cpu) {
+ const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
+#ifdef CONFIG_SMP
+ if (0 == cpuinfo->hpa)
+ continue;
+#endif
+ seq_printf(m, "processor\t: %lu\n"
+ "cpu family\t: PA-RISC %s\n",
+ cpu, boot_cpu_data.family_name);
+
+ seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name );
+
+ /* cpu MHz */
+ seq_printf(m, "cpu MHz\t\t: %d.%06d\n",
+ boot_cpu_data.cpu_hz / 1000000,
+ boot_cpu_data.cpu_hz % 1000000 );
+
+ seq_printf(m, "capabilities\t:");
+ if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
+ seq_printf(m, " os32");
+ if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64)
+ seq_printf(m, " os64");
+ seq_printf(m, "\n");
+
+ seq_printf(m, "model\t\t: %s\n"
+ "model name\t: %s\n",
+ boot_cpu_data.pdc.sys_model_name,
+ cpuinfo->dev ?
+ cpuinfo->dev->name : "Unknown");
+
+ seq_printf(m, "hversion\t: 0x%08x\n"
+ "sversion\t: 0x%08x\n",
+ boot_cpu_data.hversion,
+ boot_cpu_data.sversion );
+
+ /* print cachesize info */
+ show_cache_info(m);
+
+ seq_printf(m, "bogomips\t: %lu.%02lu\n",
+ cpuinfo->loops_per_jiffy / (500000 / HZ),
+ (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
+
+ seq_printf(m, "software id\t: %ld\n\n",
+ boot_cpu_data.pdc.model.sw_id);
+ }
+ return 0;
+}
+
+static const struct parisc_device_id processor_tbl[] = {
+ { HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
+ { 0, }
+};
+
+static struct parisc_driver cpu_driver = {
+ .name = "CPU",
+ .id_table = processor_tbl,
+ .probe = processor_probe
+};
+
+/**
+ * processor_init - Processor initialization procedure.
+ *
+ * Register this driver.
+ */
+void __init processor_init(void)
+{
+ register_parisc_driver(&cpu_driver);
+}
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
new file mode 100644
index 00000000..2905b1f5
--- /dev/null
+++ b/arch/parisc/kernel/ptrace.c
@@ -0,0 +1,285 @@
+/*
+ * Kernel support for the ptrace() and syscall tracing interfaces.
+ *
+ * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc.
+ * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx>
+ * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
+ * Copyright (C) 2008 Helge Deller <deller@gmx.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/user.h>
+#include <linux/personality.h>
+#include <linux/security.h>
+#include <linux/compat.h>
+#include <linux/signal.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/asm-offsets.h>
+
+/* PSW bits we allow the debugger to modify */
+#define USER_PSW_BITS (PSW_N | PSW_V | PSW_CB)
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *task)
+{
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+ clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
+
+ /* make sure the trap bits are not set */
+ pa_psw(task)->r = 0;
+ pa_psw(task)->t = 0;
+ pa_psw(task)->h = 0;
+ pa_psw(task)->l = 0;
+}
+
+/*
+ * The following functions are called by ptrace_resume() when
+ * enabling or disabling single/block tracing.
+ */
+void user_disable_single_step(struct task_struct *task)
+{
+ ptrace_disable(task);
+}
+
+void user_enable_single_step(struct task_struct *task)
+{
+ clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
+
+ if (pa_psw(task)->n) {
+ struct siginfo si;
+
+ /* Nullified, just crank over the queue. */
+ task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1];
+ task_regs(task)->iasq[0] = task_regs(task)->iasq[1];
+ task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4;
+ pa_psw(task)->n = 0;
+ pa_psw(task)->x = 0;
+ pa_psw(task)->y = 0;
+ pa_psw(task)->z = 0;
+ pa_psw(task)->b = 0;
+ ptrace_disable(task);
+ /* Don't wake up the task, but let the
+ parent know something happened. */
+ si.si_code = TRAP_TRACE;
+ si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3);
+ si.si_signo = SIGTRAP;
+ si.si_errno = 0;
+ force_sig_info(SIGTRAP, &si, task);
+ /* notify_parent(task, SIGCHLD); */
+ return;
+ }
+
+ /* Enable recovery counter traps. The recovery counter
+ * itself will be set to zero on a task switch. If the
+ * task is suspended on a syscall then the syscall return
+ * path will overwrite the recovery counter with a suitable
+ * value such that it traps once back in user space. We
+ * disable interrupts in the tasks PSW here also, to avoid
+ * interrupts while the recovery counter is decrementing.
+ */
+ pa_psw(task)->r = 1;
+ pa_psw(task)->t = 0;
+ pa_psw(task)->h = 0;
+ pa_psw(task)->l = 0;
+}
+
+void user_enable_block_step(struct task_struct *task)
+{
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+ set_tsk_thread_flag(task, TIF_BLOCKSTEP);
+
+ /* Enable taken branch trap. */
+ pa_psw(task)->r = 0;
+ pa_psw(task)->t = 1;
+ pa_psw(task)->h = 0;
+ pa_psw(task)->l = 0;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ unsigned long tmp;
+ long ret = -EIO;
+
+ switch (request) {
+
+ /* Read the word at location addr in the USER area. For ptraced
+ processes, the kernel saves all regs on a syscall. */
+ case PTRACE_PEEKUSR:
+ if ((addr & (sizeof(unsigned long)-1)) ||
+ addr >= sizeof(struct pt_regs))
+ break;
+ tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
+ ret = put_user(tmp, (unsigned long __user *) data);
+ break;
+
+ /* Write the word at location addr in the USER area. This will need
+ to change when the kernel no longer saves all regs on a syscall.
+ FIXME. There is a problem at the moment in that r3-r18 are only
+ saved if the process is ptraced on syscall entry, and even then
+ those values are overwritten by actual register values on syscall
+ exit. */
+ case PTRACE_POKEUSR:
+ /* Some register values written here may be ignored in
+ * entry.S:syscall_restore_rfi; e.g. iaoq is written with
+ * r31/r31+4, and not with the values in pt_regs.
+ */
+ if (addr == PT_PSW) {
+ /* Allow writing to Nullify, Divide-step-correction,
+ * and carry/borrow bits.
+ * BEWARE, if you set N, and then single step, it won't
+ * stop on the nullified instruction.
+ */
+ data &= USER_PSW_BITS;
+ task_regs(child)->gr[0] &= ~USER_PSW_BITS;
+ task_regs(child)->gr[0] |= data;
+ ret = 0;
+ break;
+ }
+
+ if ((addr & (sizeof(unsigned long)-1)) ||
+ addr >= sizeof(struct pt_regs))
+ break;
+ if ((addr >= PT_GR1 && addr <= PT_GR31) ||
+ addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
+ (addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
+ addr == PT_SAR) {
+ *(unsigned long *) ((char *) task_regs(child) + addr) = data;
+ ret = 0;
+ }
+ break;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+
+#ifdef CONFIG_COMPAT
+
+/* This function is needed to translate 32 bit pt_regs offsets in to
+ * 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel
+ * will request offset 12 if it wants gr3, but the lower 32 bits of
+ * the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4).
+ * This code relies on a 32 bit pt_regs being comprised of 32 bit values
+ * except for the fp registers which (a) are 64 bits, and (b) follow
+ * the gr registers at the start of pt_regs. The 32 bit pt_regs should
+ * be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[]
+ * being 64 bit in both cases.
+ */
+
+static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
+{
+ if (offset < 0)
+ return sizeof(struct pt_regs);
+ else if (offset <= 32*4) /* gr[0..31] */
+ return offset * 2 + 4;
+ else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
+ return offset + 32*4;
+ else if (offset < sizeof(struct pt_regs)/2 + 32*4)
+ return offset * 2 + 4 - 32*8;
+ else
+ return sizeof(struct pt_regs);
+}
+
+long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ compat_ulong_t addr, compat_ulong_t data)
+{
+ compat_uint_t tmp;
+ long ret = -EIO;
+
+ switch (request) {
+
+ case PTRACE_PEEKUSR:
+ if (addr & (sizeof(compat_uint_t)-1))
+ break;
+ addr = translate_usr_offset(addr);
+ if (addr >= sizeof(struct pt_regs))
+ break;
+
+ tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr);
+ ret = put_user(tmp, (compat_uint_t *) (unsigned long) data);
+ break;
+
+ /* Write the word at location addr in the USER area. This will need
+ to change when the kernel no longer saves all regs on a syscall.
+ FIXME. There is a problem at the moment in that r3-r18 are only
+ saved if the process is ptraced on syscall entry, and even then
+ those values are overwritten by actual register values on syscall
+ exit. */
+ case PTRACE_POKEUSR:
+ /* Some register values written here may be ignored in
+ * entry.S:syscall_restore_rfi; e.g. iaoq is written with
+ * r31/r31+4, and not with the values in pt_regs.
+ */
+ if (addr == PT_PSW) {
+ /* Since PT_PSW==0, it is valid for 32 bit processes
+ * under 64 bit kernels as well.
+ */
+ ret = arch_ptrace(child, request, addr, data);
+ } else {
+ if (addr & (sizeof(compat_uint_t)-1))
+ break;
+ addr = translate_usr_offset(addr);
+ if (addr >= sizeof(struct pt_regs))
+ break;
+ if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
+ /* Special case, fp regs are 64 bits anyway */
+ *(__u64 *) ((char *) task_regs(child) + addr) = data;
+ ret = 0;
+ }
+ else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
+ addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 ||
+ addr == PT_SAR+4) {
+ /* Zero the top 32 bits */
+ *(__u32 *) ((char *) task_regs(child) + addr - 4) = 0;
+ *(__u32 *) ((char *) task_regs(child) + addr) = data;
+ ret = 0;
+ }
+ }
+ break;
+
+ default:
+ ret = compat_ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+#endif
+
+long do_syscall_trace_enter(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ return -1L;
+
+ return regs->gr[20];
+}
+
+void do_syscall_trace_exit(struct pt_regs *regs)
+{
+ int stepping = test_thread_flag(TIF_SINGLESTEP) ||
+ test_thread_flag(TIF_BLOCKSTEP);
+
+ if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, stepping);
+}
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
new file mode 100644
index 00000000..5f3d3a1f
--- /dev/null
+++ b/arch/parisc/kernel/real2.S
@@ -0,0 +1,304 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
+ *
+ */
+
+#include <asm/pdc.h>
+#include <asm/psw.h>
+#include <asm/assembly.h>
+#include <asm/asm-offsets.h>
+
+#include <linux/linkage.h>
+
+
+ .section .bss
+
+ .export pdc_result
+ .export pdc_result2
+ .align 8
+pdc_result:
+ .block ASM_PDC_RESULT_SIZE
+pdc_result2:
+ .block ASM_PDC_RESULT_SIZE
+
+ .export real_stack
+ .export real32_stack
+ .export real64_stack
+ .align 64
+real_stack:
+real32_stack:
+real64_stack:
+ .block 8192
+
+#ifdef CONFIG_64BIT
+# define REG_SZ 8
+#else
+# define REG_SZ 4
+#endif
+
+#define N_SAVED_REGS 9
+
+save_cr_space:
+ .block REG_SZ * N_SAVED_REGS
+save_cr_end:
+
+
+/************************ 32-bit real-mode calls ***********************/
+/* This can be called in both narrow and wide kernels */
+
+ .text
+
+ /* unsigned long real32_call_asm(unsigned int *sp,
+ * unsigned int *arg0p,
+ * unsigned int iodc_fn)
+ * sp is value of stack pointer to adopt before calling PDC (virt)
+ * arg0p points to where saved arg values may be found
+ * iodc_fn is the IODC function to call
+ */
+
+ENTRY(real32_call_asm)
+ STREG %rp, -RP_OFFSET(%sp) /* save RP */
+#ifdef CONFIG_64BIT
+ callee_save
+ ldo 2*REG_SZ(%sp), %sp /* room for a couple more saves */
+ STREG %r27, -1*REG_SZ(%sp)
+ STREG %r29, -2*REG_SZ(%sp)
+#endif
+ STREG %sp, -REG_SZ(%arg0) /* save SP on real-mode stack */
+ copy %arg0, %sp /* adopt the real-mode SP */
+
+ /* save iodc_fn */
+ copy %arg2, %r31
+
+ /* load up the arg registers from the saved arg area */
+ /* 32-bit calling convention passes first 4 args in registers */
+ ldw 0(%arg1), %arg0 /* note overwriting arg0 */
+ ldw -8(%arg1), %arg2
+ ldw -12(%arg1), %arg3
+ ldw -4(%arg1), %arg1 /* obviously must do this one last! */
+
+ tophys_r1 %sp
+
+ b,l rfi_virt2real,%r2
+ nop
+
+ b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
+ nop
+
+#ifdef CONFIG_64BIT
+ rsm PSW_SM_W, %r0 /* go narrow */
+#endif
+
+ load32 PA(ric_ret), %r2
+ bv 0(%r31)
+ nop
+ric_ret:
+#ifdef CONFIG_64BIT
+ ssm PSW_SM_W, %r0 /* go wide */
+#endif
+ /* restore CRs before going virtual in case we page fault */
+ b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */
+ nop
+
+ b,l rfi_real2virt,%r2
+ nop
+
+ tovirt_r1 %sp
+ LDREG -REG_SZ(%sp), %sp /* restore SP */
+#ifdef CONFIG_64BIT
+ LDREG -1*REG_SZ(%sp), %r27
+ LDREG -2*REG_SZ(%sp), %r29
+ ldo -2*REG_SZ(%sp), %sp
+ callee_rest
+#endif
+ LDREG -RP_OFFSET(%sp), %rp /* restore RP */
+ bv 0(%rp)
+ nop
+ENDPROC(real32_call_asm)
+
+
+# define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
+# define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r
+
+ .text
+save_control_regs:
+ load32 PA(save_cr_space), %r28
+ PUSH_CR(%cr24, %r28)
+ PUSH_CR(%cr25, %r28)
+ PUSH_CR(%cr26, %r28)
+ PUSH_CR(%cr27, %r28)
+ PUSH_CR(%cr28, %r28)
+ PUSH_CR(%cr29, %r28)
+ PUSH_CR(%cr30, %r28)
+ PUSH_CR(%cr31, %r28)
+ PUSH_CR(%cr15, %r28)
+ bv 0(%r2)
+ nop
+
+restore_control_regs:
+ load32 PA(save_cr_end), %r26
+ POP_CR(%cr15, %r26)
+ POP_CR(%cr31, %r26)
+ POP_CR(%cr30, %r26)
+ POP_CR(%cr29, %r26)
+ POP_CR(%cr28, %r26)
+ POP_CR(%cr27, %r26)
+ POP_CR(%cr26, %r26)
+ POP_CR(%cr25, %r26)
+ POP_CR(%cr24, %r26)
+ bv 0(%r2)
+ nop
+
+/* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for
+ * more general-purpose use by the several places which need RFIs
+ */
+ .text
+ .align 128
+rfi_virt2real:
+ /* switch to real mode... */
+ rsm PSW_SM_I,%r0
+ load32 PA(rfi_v2r_1), %r1
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ rsm PSW_SM_Q,%r0 /* disable Q & I bits to load iia queue */
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ mtctl %r1, %cr18 /* IIAOQ head */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ tail */
+ load32 REAL_MODE_PSW, %r1
+ mtctl %r1, %cr22
+ rfi
+
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+rfi_v2r_1:
+ tophys_r1 %r2
+ bv 0(%r2)
+ nop
+
+ .text
+ .align 128
+rfi_real2virt:
+ rsm PSW_SM_I,%r0
+ load32 (rfi_r2v_1), %r1
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ rsm PSW_SM_Q,%r0 /* disable Q bit to load iia queue */
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ mtctl %r1, %cr18 /* IIAOQ head */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ tail */
+ load32 KERNEL_PSW, %r1
+ mtctl %r1, %cr22
+ rfi
+
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+rfi_r2v_1:
+ tovirt_r1 %r2
+ bv 0(%r2)
+ nop
+
+#ifdef CONFIG_64BIT
+
+/************************ 64-bit real-mode calls ***********************/
+/* This is only usable in wide kernels right now and will probably stay so */
+ .text
+ /* unsigned long real64_call_asm(unsigned long *sp,
+ * unsigned long *arg0p,
+ * unsigned long fn)
+ * sp is value of stack pointer to adopt before calling PDC (virt)
+ * arg0p points to where saved arg values may be found
+ * iodc_fn is the IODC function to call
+ */
+ENTRY(real64_call_asm)
+ std %rp, -0x10(%sp) /* save RP */
+ std %sp, -8(%arg0) /* save SP on real-mode stack */
+ copy %arg0, %sp /* adopt the real-mode SP */
+
+ /* save fn */
+ copy %arg2, %r31
+
+ /* set up the new ap */
+ ldo 64(%arg1), %r29
+
+ /* load up the arg registers from the saved arg area */
+ /* 32-bit calling convention passes first 4 args in registers */
+ ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */
+ ldd 2*REG_SZ(%arg1), %arg2
+ ldd 3*REG_SZ(%arg1), %arg3
+ ldd 4*REG_SZ(%arg1), %r22
+ ldd 5*REG_SZ(%arg1), %r21
+ ldd 6*REG_SZ(%arg1), %r20
+ ldd 7*REG_SZ(%arg1), %r19
+ ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */
+
+ tophys_r1 %sp
+
+ b,l rfi_virt2real,%r2
+ nop
+
+ b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
+ nop
+
+ load32 PA(r64_ret), %r2
+ bv 0(%r31)
+ nop
+r64_ret:
+ /* restore CRs before going virtual in case we page fault */
+ b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */
+ nop
+
+ b,l rfi_real2virt,%r2
+ nop
+
+ tovirt_r1 %sp
+ ldd -8(%sp), %sp /* restore SP */
+ ldd -0x10(%sp), %rp /* restore RP */
+ bv 0(%rp)
+ nop
+ENDPROC(real64_call_asm)
+
+#endif
+
+ .text
+ /* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html
+ ** GCC 3.3 and later has a new function in libgcc.a for
+ ** comparing function pointers.
+ */
+ENTRY(__canonicalize_funcptr_for_compare)
+#ifdef CONFIG_64BIT
+ bve (%r2)
+#else
+ bv %r0(%r2)
+#endif
+ copy %r26,%r28
+ENDPROC(__canonicalize_funcptr_for_compare)
+
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
new file mode 100644
index 00000000..cb71f3da
--- /dev/null
+++ b/arch/parisc/kernel/setup.c
@@ -0,0 +1,394 @@
+/*
+ * Initial setup-routines for HP 9000 based hardware.
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ * Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de>
+ * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
+ * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
+ * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
+ * Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
+ *
+ * Initial PA-RISC Version: 04-23-1999 by Helge Deller
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/initrd.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/seq_file.h>
+#define PCI_DEBUG
+#include <linux/pci.h>
+#undef PCI_DEBUG
+#include <linux/proc_fs.h>
+
+#include <asm/processor.h>
+#include <asm/pdc.h>
+#include <asm/led.h>
+#include <asm/machdep.h> /* for pa7300lc_init() proto */
+#include <asm/pdc_chassis.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/unwind.h>
+
+static char __initdata command_line[COMMAND_LINE_SIZE];
+
+/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */
+struct proc_dir_entry * proc_runway_root __read_mostly = NULL;
+struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
+struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL;
+
+#if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA))
+int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */
+EXPORT_SYMBOL(parisc_bus_is_phys);
+#endif
+
+void __init setup_cmdline(char **cmdline_p)
+{
+ extern unsigned int boot_args[];
+
+ /* Collect stuff passed in from the boot loader */
+
+ /* boot_args[0] is free-mem start, boot_args[1] is ptr to command line */
+ if (boot_args[0] < 64) {
+ /* called from hpux boot loader */
+ boot_command_line[0] = '\0';
+ } else {
+ strcpy(boot_command_line, (char *)__va(boot_args[1]));
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
+ {
+ initrd_start = (unsigned long)__va(boot_args[2]);
+ initrd_end = (unsigned long)__va(boot_args[3]);
+ }
+#endif
+ }
+
+ strcpy(command_line, boot_command_line);
+ *cmdline_p = command_line;
+}
+
+#ifdef CONFIG_PA11
+void __init dma_ops_init(void)
+{
+ switch (boot_cpu_data.cpu_type) {
+ case pcx:
+ /*
+ * We've got way too many dependencies on 1.1 semantics
+ * to support 1.0 boxes at this point.
+ */
+ panic( "PA-RISC Linux currently only supports machines that conform to\n"
+ "the PA-RISC 1.1 or 2.0 architecture specification.\n");
+
+ case pcxs:
+ case pcxt:
+ hppa_dma_ops = &pcx_dma_ops;
+ break;
+ case pcxl2:
+ pa7300lc_init();
+ case pcxl: /* falls through */
+ hppa_dma_ops = &pcxl_dma_ops;
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
+extern int init_per_cpu(int cpuid);
+extern void collect_boot_cpu_data(void);
+
+void __init setup_arch(char **cmdline_p)
+{
+#ifdef CONFIG_64BIT
+ extern int parisc_narrow_firmware;
+#endif
+ unwind_init();
+
+ init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */
+
+#ifdef CONFIG_64BIT
+ printk(KERN_INFO "The 64-bit Kernel has started...\n");
+#else
+ printk(KERN_INFO "The 32-bit Kernel has started...\n");
+#endif
+
+ pdc_console_init();
+
+#ifdef CONFIG_64BIT
+ if(parisc_narrow_firmware) {
+ printk(KERN_INFO "Kernel is using PDC in 32-bit mode.\n");
+ }
+#endif
+ setup_pdc();
+ setup_cmdline(cmdline_p);
+ collect_boot_cpu_data();
+ do_memory_inventory(); /* probe for physical memory */
+ parisc_cache_init();
+ paging_init();
+
+#ifdef CONFIG_CHASSIS_LCD_LED
+ /* initialize the LCD/LED after boot_cpu_data is available ! */
+ led_init(); /* LCD/LED initialization */
+#endif
+
+#ifdef CONFIG_PA11
+ dma_ops_init();
+#endif
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con; /* we use take_over_console() later ! */
+#endif
+
+}
+
+/*
+ * Display CPU info for all CPUs.
+ * for parisc this is in processor.c
+ */
+extern int show_cpuinfo (struct seq_file *m, void *v);
+
+static void *
+c_start (struct seq_file *m, loff_t *pos)
+{
+ /* Looks like the caller will call repeatedly until we return
+ * 0, signaling EOF perhaps. This could be used to sequence
+ * through CPUs for example. Since we print all cpu info in our
+ * show_cpuinfo() disregarding 'pos' (which I assume is 'v' above)
+ * we only allow for one "position". */
+ return ((long)*pos < 1) ? (void *)1 : NULL;
+}
+
+static void *
+c_next (struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void
+c_stop (struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo
+};
+
+static void __init parisc_proc_mkdir(void)
+{
+ /*
+ ** Can't call proc_mkdir() until after proc_root_init() has been
+ ** called by start_kernel(). In other words, this code can't
+ ** live in arch/.../setup.c because start_parisc() calls
+ ** start_kernel().
+ */
+ switch (boot_cpu_data.cpu_type) {
+ case pcxl:
+ case pcxl2:
+ if (NULL == proc_gsc_root)
+ {
+ proc_gsc_root = proc_mkdir("bus/gsc", NULL);
+ }
+ break;
+ case pcxt_:
+ case pcxu:
+ case pcxu_:
+ case pcxw:
+ case pcxw_:
+ case pcxw2:
+ if (NULL == proc_runway_root)
+ {
+ proc_runway_root = proc_mkdir("bus/runway", NULL);
+ }
+ break;
+ case mako:
+ case mako2:
+ if (NULL == proc_mckinley_root)
+ {
+ proc_mckinley_root = proc_mkdir("bus/mckinley", NULL);
+ }
+ break;
+ default:
+ /* FIXME: this was added to prevent the compiler
+ * complaining about missing pcx, pcxs and pcxt
+ * I'm assuming they have neither gsc nor runway */
+ break;
+ }
+}
+
+static struct resource central_bus = {
+ .name = "Central Bus",
+ .start = F_EXTEND(0xfff80000),
+ .end = F_EXTEND(0xfffaffff),
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource local_broadcast = {
+ .name = "Local Broadcast",
+ .start = F_EXTEND(0xfffb0000),
+ .end = F_EXTEND(0xfffdffff),
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource global_broadcast = {
+ .name = "Global Broadcast",
+ .start = F_EXTEND(0xfffe0000),
+ .end = F_EXTEND(0xffffffff),
+ .flags = IORESOURCE_MEM,
+};
+
+static int __init parisc_init_resources(void)
+{
+ int result;
+
+ result = request_resource(&iomem_resource, &central_bus);
+ if (result < 0) {
+ printk(KERN_ERR
+ "%s: failed to claim %s address space!\n",
+ __FILE__, central_bus.name);
+ return result;
+ }
+
+ result = request_resource(&iomem_resource, &local_broadcast);
+ if (result < 0) {
+ printk(KERN_ERR
+ "%s: failed to claim %saddress space!\n",
+ __FILE__, local_broadcast.name);
+ return result;
+ }
+
+ result = request_resource(&iomem_resource, &global_broadcast);
+ if (result < 0) {
+ printk(KERN_ERR
+ "%s: failed to claim %s address space!\n",
+ __FILE__, global_broadcast.name);
+ return result;
+ }
+
+ return 0;
+}
+
+extern void gsc_init(void);
+extern void processor_init(void);
+extern void ccio_init(void);
+extern void hppb_init(void);
+extern void dino_init(void);
+extern void iosapic_init(void);
+extern void lba_init(void);
+extern void sba_init(void);
+extern void eisa_init(void);
+
+static int __init parisc_init(void)
+{
+ u32 osid = (OS_ID_LINUX << 16);
+
+ parisc_proc_mkdir();
+ parisc_init_resources();
+ do_device_inventory(); /* probe for hardware */
+
+ parisc_pdc_chassis_init();
+
+ /* set up a new led state on systems shipped LED State panel */
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BSTART);
+
+ /* tell PDC we're Linux. Nevermind failure. */
+ pdc_stable_write(0x40, &osid, sizeof(osid));
+
+ processor_init();
+ printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n",
+ num_present_cpus(),
+ boot_cpu_data.cpu_name,
+ boot_cpu_data.cpu_hz / 1000000,
+ boot_cpu_data.cpu_hz % 1000000 );
+
+ parisc_setup_cache_timing();
+
+ /* These are in a non-obvious order, will fix when we have an iotree */
+#if defined(CONFIG_IOSAPIC)
+ iosapic_init();
+#endif
+#if defined(CONFIG_IOMMU_SBA)
+ sba_init();
+#endif
+#if defined(CONFIG_PCI_LBA)
+ lba_init();
+#endif
+
+ /* CCIO before any potential subdevices */
+#if defined(CONFIG_IOMMU_CCIO)
+ ccio_init();
+#endif
+
+ /*
+ * Need to register Asp & Wax before the EISA adapters for the IRQ
+ * regions. EISA must come before PCI to be sure it gets IRQ region
+ * 0.
+ */
+#if defined(CONFIG_GSC_LASI) || defined(CONFIG_GSC_WAX)
+ gsc_init();
+#endif
+#ifdef CONFIG_EISA
+ eisa_init();
+#endif
+
+#if defined(CONFIG_HPPB)
+ hppb_init();
+#endif
+
+#if defined(CONFIG_GSC_DINO)
+ dino_init();
+#endif
+
+#ifdef CONFIG_CHASSIS_LCD_LED
+ register_led_regions(); /* register LED port info in procfs */
+#endif
+
+ return 0;
+}
+arch_initcall(parisc_init);
+
+void start_parisc(void)
+{
+ extern void start_kernel(void);
+
+ int ret, cpunum;
+ struct pdc_coproc_cfg coproc_cfg;
+
+ cpunum = smp_processor_id();
+
+ set_firmware_width_unlocked();
+
+ ret = pdc_coproc_cfg_unlocked(&coproc_cfg);
+ if (ret >= 0 && coproc_cfg.ccr_functional) {
+ mtctl(coproc_cfg.ccr_functional, 10);
+
+ per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
+ per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
+
+ asm volatile ("fstd %fr0,8(%sp)");
+ } else {
+ panic("must have an fpu to boot linux");
+ }
+
+ start_kernel();
+ // not reached
+}
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
new file mode 100644
index 00000000..12c1ed33
--- /dev/null
+++ b/arch/parisc/kernel/signal.c
@@ -0,0 +1,653 @@
+/*
+ * linux/arch/parisc/kernel/signal.c: Architecture-specific signal
+ * handling support.
+ *
+ * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
+ * Copyright (C) 2000 Linuxcare, Inc.
+ *
+ * Based on the ia64, i386, and alpha versions.
+ *
+ * Like the IA-64, we are a recent enough port (we are *starting*
+ * with glibc2.2) that we do not need to support the old non-realtime
+ * Linux signals. Therefore we don't. HP/UX signals will go in
+ * arch/parisc/hpux/signal.c when we figure out how to do them.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/compat.h>
+#include <linux/elf.h>
+#include <asm/ucontext.h>
+#include <asm/rt_sigframe.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_COMPAT
+#include "signal32.h"
+#endif
+
+#define DEBUG_SIG 0
+#define DEBUG_SIG_LEVEL 2
+
+#if DEBUG_SIG
+#define DBG(LEVEL, ...) \
+ ((DEBUG_SIG_LEVEL >= LEVEL) \
+ ? printk(__VA_ARGS__) : (void) 0)
+#else
+#define DBG(LEVEL, ...)
+#endif
+
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/* gcc will complain if a pointer is cast to an integer of different
+ * size. If you really need to do this (and we do for an ELF32 user
+ * application in an ELF64 kernel) then you have to do a cast to an
+ * integer of the same size first. The A() macro accomplishes
+ * this. */
+#define A(__x) ((unsigned long)(__x))
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+#ifdef CONFIG_64BIT
+#include "sys32.h"
+#endif
+
+/*
+ * Do a signal return - restore sigcontext.
+ */
+
+/* Trampoline for calling rt_sigreturn() */
+#define INSN_LDI_R25_0 0x34190000 /* ldi 0,%r25 (in_syscall=0) */
+#define INSN_LDI_R25_1 0x34190002 /* ldi 1,%r25 (in_syscall=1) */
+#define INSN_LDI_R20 0x3414015a /* ldi __NR_rt_sigreturn,%r20 */
+#define INSN_BLE_SR2_R0 0xe4008200 /* be,l 0x100(%sr2,%r0),%sr0,%r31 */
+#define INSN_NOP 0x08000240 /* nop */
+/* For debugging */
+#define INSN_DIE_HORRIBLY 0x68000ccc /* stw %r0,0x666(%sr0,%r0) */
+
+static long
+restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
+{
+ long err = 0;
+
+ err |= __copy_from_user(regs->gr, sc->sc_gr, sizeof(regs->gr));
+ err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr));
+ err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq));
+ err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq));
+ err |= __get_user(regs->sar, &sc->sc_sar);
+ DBG(2,"restore_sigcontext: iaoq is 0x%#lx / 0x%#lx\n",
+ regs->iaoq[0],regs->iaoq[1]);
+ DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]);
+ return err;
+}
+
+void
+sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
+{
+ struct rt_sigframe __user *frame;
+ sigset_t set;
+ unsigned long usp = (regs->gr[30] & ~(0x01UL));
+ unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE;
+#ifdef CONFIG_64BIT
+ compat_sigset_t compat_set;
+ struct compat_rt_sigframe __user * compat_frame;
+
+ if (is_compat_task())
+ sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
+#endif
+
+
+ /* Unwind the user stack to get the rt_sigframe structure. */
+ frame = (struct rt_sigframe __user *)
+ (usp - sigframe_size);
+ DBG(2,"sys_rt_sigreturn: frame is %p\n", frame);
+
+#ifdef CONFIG_64BIT
+ compat_frame = (struct compat_rt_sigframe __user *)frame;
+
+ if (is_compat_task()) {
+ DBG(2,"sys_rt_sigreturn: ELF32 process.\n");
+ if (__copy_from_user(&compat_set, &compat_frame->uc.uc_sigmask, sizeof(compat_set)))
+ goto give_sigsegv;
+ sigset_32to64(&set,&compat_set);
+ } else
+#endif
+ {
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto give_sigsegv;
+ }
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ /* Good thing we saved the old gr[30], eh? */
+#ifdef CONFIG_64BIT
+ if (is_compat_task()) {
+ DBG(1,"sys_rt_sigreturn: compat_frame->uc.uc_mcontext 0x%p\n",
+ &compat_frame->uc.uc_mcontext);
+// FIXME: Load upper half from register file
+ if (restore_sigcontext32(&compat_frame->uc.uc_mcontext,
+ &compat_frame->regs, regs))
+ goto give_sigsegv;
+ DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n",
+ usp, &compat_frame->uc.uc_stack);
+ if (do_sigaltstack32(&compat_frame->uc.uc_stack, NULL, usp) == -EFAULT)
+ goto give_sigsegv;
+ } else
+#endif
+ {
+ DBG(1,"sys_rt_sigreturn: frame->uc.uc_mcontext 0x%p\n",
+ &frame->uc.uc_mcontext);
+ if (restore_sigcontext(&frame->uc.uc_mcontext, regs))
+ goto give_sigsegv;
+ DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n",
+ usp, &frame->uc.uc_stack);
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, usp) == -EFAULT)
+ goto give_sigsegv;
+ }
+
+
+
+ /* If we are on the syscall path IAOQ will not be restored, and
+ * if we are on the interrupt path we must not corrupt gr31.
+ */
+ if (in_syscall)
+ regs->gr[31] = regs->iaoq[0];
+#if DEBUG_SIG
+ DBG(1,"sys_rt_sigreturn: returning to %#lx, DUMPING REGS:\n", regs->iaoq[0]);
+ show_regs(regs);
+#endif
+ return;
+
+give_sigsegv:
+ DBG(1,"sys_rt_sigreturn: Sending SIGSEGV\n");
+ force_sig(SIGSEGV, current);
+ return;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
+{
+ /*FIXME: ELF32 vs. ELF64 has different frame_size, but since we
+ don't use the parameter it doesn't matter */
+
+ DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n",
+ (unsigned long)ka, sp, frame_size);
+
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
+ sp = current->sas_ss_sp; /* Stacks grow up! */
+
+ DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp);
+ return (void __user *) sp; /* Stacks grow up. Fun. */
+}
+
+static long
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_syscall)
+
+{
+ unsigned long flags = 0;
+ long err = 0;
+
+ if (on_sig_stack((unsigned long) sc))
+ flags |= PARISC_SC_FLAG_ONSTACK;
+ if (in_syscall) {
+ flags |= PARISC_SC_FLAG_IN_SYSCALL;
+ /* regs->iaoq is undefined in the syscall return path */
+ err |= __put_user(regs->gr[31], &sc->sc_iaoq[0]);
+ err |= __put_user(regs->gr[31]+4, &sc->sc_iaoq[1]);
+ err |= __put_user(regs->sr[3], &sc->sc_iasq[0]);
+ err |= __put_user(regs->sr[3], &sc->sc_iasq[1]);
+ DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (in syscall)\n",
+ regs->gr[31], regs->gr[31]+4);
+ } else {
+ err |= __copy_to_user(sc->sc_iaoq, regs->iaoq, sizeof(regs->iaoq));
+ err |= __copy_to_user(sc->sc_iasq, regs->iasq, sizeof(regs->iasq));
+ DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (not in syscall)\n",
+ regs->iaoq[0], regs->iaoq[1]);
+ }
+
+ err |= __put_user(flags, &sc->sc_flags);
+ err |= __copy_to_user(sc->sc_gr, regs->gr, sizeof(regs->gr));
+ err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr));
+ err |= __put_user(regs->sar, &sc->sc_sar);
+ DBG(1,"setup_sigcontext: r28 is %ld\n", regs->gr[28]);
+
+ return err;
+}
+
+static long
+setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs, int in_syscall)
+{
+ struct rt_sigframe __user *frame;
+ unsigned long rp, usp;
+ unsigned long haddr, sigframe_size;
+ int err = 0;
+#ifdef CONFIG_64BIT
+ compat_int_t compat_val;
+ struct compat_rt_sigframe __user * compat_frame;
+ compat_sigset_t compat_set;
+#endif
+
+ usp = (regs->gr[30] & ~(0x01UL));
+ /*FIXME: frame_size parameter is unused, remove it. */
+ frame = get_sigframe(ka, usp, sizeof(*frame));
+
+ DBG(1,"SETUP_RT_FRAME: START\n");
+ DBG(1,"setup_rt_frame: frame %p info %p\n", frame, info);
+
+
+#ifdef CONFIG_64BIT
+
+ compat_frame = (struct compat_rt_sigframe __user *)frame;
+
+ if (is_compat_task()) {
+ DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info);
+ err |= copy_siginfo_to_user32(&compat_frame->info, info);
+ DBG(1,"SETUP_RT_FRAME: 1\n");
+ compat_val = (compat_int_t)current->sas_ss_sp;
+ err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_sp);
+ DBG(1,"SETUP_RT_FRAME: 2\n");
+ compat_val = (compat_int_t)current->sas_ss_size;
+ err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_size);
+ DBG(1,"SETUP_RT_FRAME: 3\n");
+ compat_val = sas_ss_flags(regs->gr[30]);
+ err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_flags);
+ DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &compat_frame->uc);
+ DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &compat_frame->uc.uc_mcontext);
+ err |= setup_sigcontext32(&compat_frame->uc.uc_mcontext,
+ &compat_frame->regs, regs, in_syscall);
+ sigset_64to32(&compat_set,set);
+ err |= __copy_to_user(&compat_frame->uc.uc_sigmask, &compat_set, sizeof(compat_set));
+ } else
+#endif
+ {
+ DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &frame->info);
+ err |= copy_siginfo_to_user(&frame->info, info);
+ err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __put_user(sas_ss_flags(regs->gr[30]),
+ &frame->uc.uc_stack.ss_flags);
+ DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &frame->uc);
+ DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &frame->uc.uc_mcontext);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, in_syscall);
+ /* FIXME: Should probably be converted as well for the compat case */
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ }
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. The first words of tramp are used to
+ save the previous sigrestartblock trampoline that might be
+ on the stack. We start the sigreturn trampoline at
+ SIGRESTARTBLOCK_TRAMP+X. */
+ err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0,
+ &frame->tramp[SIGRESTARTBLOCK_TRAMP+0]);
+ err |= __put_user(INSN_LDI_R20,
+ &frame->tramp[SIGRESTARTBLOCK_TRAMP+1]);
+ err |= __put_user(INSN_BLE_SR2_R0,
+ &frame->tramp[SIGRESTARTBLOCK_TRAMP+2]);
+ err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]);
+
+#if DEBUG_SIG
+ /* Assert that we're flushing in the correct space... */
+ {
+ int sid;
+ asm ("mfsp %%sr3,%0" : "=r" (sid));
+ DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n",
+ sid, frame->tramp);
+ }
+#endif
+
+ flush_user_dcache_range((unsigned long) &frame->tramp[0],
+ (unsigned long) &frame->tramp[TRAMP_SIZE]);
+ flush_user_icache_range((unsigned long) &frame->tramp[0],
+ (unsigned long) &frame->tramp[TRAMP_SIZE]);
+
+ /* TRAMP Words 0-4, Length 5 = SIGRESTARTBLOCK_TRAMP
+ * TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP
+ * So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP
+ */
+ rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP];
+
+ if (err)
+ goto give_sigsegv;
+
+ haddr = A(ka->sa.sa_handler);
+ /* The sa_handler may be a pointer to a function descriptor */
+#ifdef CONFIG_64BIT
+ if (is_compat_task()) {
+#endif
+ if (haddr & PA_PLABEL_FDESC) {
+ Elf32_Fdesc fdesc;
+ Elf32_Fdesc __user *ufdesc = (Elf32_Fdesc __user *)A(haddr & ~3);
+
+ err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
+
+ if (err)
+ goto give_sigsegv;
+
+ haddr = fdesc.addr;
+ regs->gr[19] = fdesc.gp;
+ }
+#ifdef CONFIG_64BIT
+ } else {
+ Elf64_Fdesc fdesc;
+ Elf64_Fdesc __user *ufdesc = (Elf64_Fdesc __user *)A(haddr & ~3);
+
+ err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
+
+ if (err)
+ goto give_sigsegv;
+
+ haddr = fdesc.addr;
+ regs->gr[19] = fdesc.gp;
+ DBG(1,"setup_rt_frame: 64 bit signal, exe=%#lx, r19=%#lx, in_syscall=%d\n",
+ haddr, regs->gr[19], in_syscall);
+ }
+#endif
+
+ /* The syscall return path will create IAOQ values from r31.
+ */
+ sigframe_size = PARISC_RT_SIGFRAME_SIZE;
+#ifdef CONFIG_64BIT
+ if (is_compat_task())
+ sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
+#endif
+ if (in_syscall) {
+ regs->gr[31] = haddr;
+#ifdef CONFIG_64BIT
+ if (!test_thread_flag(TIF_32BIT))
+ sigframe_size |= 1;
+#endif
+ } else {
+ unsigned long psw = USER_PSW;
+#ifdef CONFIG_64BIT
+ if (!test_thread_flag(TIF_32BIT))
+ psw |= PSW_W;
+#endif
+
+ /* If we are singlestepping, arrange a trap to be delivered
+ when we return to userspace. Note the semantics -- we
+ should trap before the first insn in the handler is
+ executed. Ref:
+ http://sources.redhat.com/ml/gdb/2004-11/msg00245.html
+ */
+ if (pa_psw(current)->r) {
+ pa_psw(current)->r = 0;
+ psw |= PSW_R;
+ mtctl(-1, 0);
+ }
+
+ regs->gr[0] = psw;
+ regs->iaoq[0] = haddr | 3;
+ regs->iaoq[1] = regs->iaoq[0] + 4;
+ }
+
+ regs->gr[2] = rp; /* userland return pointer */
+ regs->gr[26] = sig; /* signal number */
+
+#ifdef CONFIG_64BIT
+ if (is_compat_task()) {
+ regs->gr[25] = A(&compat_frame->info); /* siginfo pointer */
+ regs->gr[24] = A(&compat_frame->uc); /* ucontext pointer */
+ } else
+#endif
+ {
+ regs->gr[25] = A(&frame->info); /* siginfo pointer */
+ regs->gr[24] = A(&frame->uc); /* ucontext pointer */
+ }
+
+ DBG(1,"setup_rt_frame: making sigreturn frame: %#lx + %#lx = %#lx\n",
+ regs->gr[30], sigframe_size,
+ regs->gr[30] + sigframe_size);
+ /* Raise the user stack pointer to make a proper call frame. */
+ regs->gr[30] = (A(frame) + sigframe_size);
+
+
+ DBG(1,"setup_rt_frame: sig deliver (%s,%d) frame=0x%p sp=%#lx iaoq=%#lx/%#lx rp=%#lx\n",
+ current->comm, current->pid, frame, regs->gr[30],
+ regs->iaoq[0], regs->iaoq[1], rp);
+
+ return 1;
+
+give_sigsegv:
+ DBG(1,"setup_rt_frame: sending SIGSEGV\n");
+ force_sigsegv(sig, current);
+ return 0;
+}
+
+/*
+ * OK, we're invoking a handler.
+ */
+
+static long
+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+ sigset_t *oldset, struct pt_regs *regs, int in_syscall)
+{
+ DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n",
+ sig, ka, info, oldset, regs);
+
+ /* Set up the stack frame */
+ if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall))
+ return 0;
+
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ tracehook_signal_handler(sig, info, ka, regs,
+ test_thread_flag(TIF_SINGLESTEP) ||
+ test_thread_flag(TIF_BLOCKSTEP));
+
+ return 1;
+}
+
+static inline void
+syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
+{
+ /* Check the return code */
+ switch (regs->gr[28]) {
+ case -ERESTART_RESTARTBLOCK:
+ current_thread_info()->restart_block.fn =
+ do_no_restart_syscall;
+ case -ERESTARTNOHAND:
+ DBG(1,"ERESTARTNOHAND: returning -EINTR\n");
+ regs->gr[28] = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ DBG(1,"ERESTARTSYS: putting -EINTR\n");
+ regs->gr[28] = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ /* A syscall is just a branch, so all
+ * we have to do is fiddle the return pointer.
+ */
+ regs->gr[31] -= 8; /* delayed branching */
+ /* Preserve original r28. */
+ regs->gr[28] = regs->orig_r28;
+ break;
+ }
+}
+
+static inline void
+insert_restart_trampoline(struct pt_regs *regs)
+{
+ switch(regs->gr[28]) {
+ case -ERESTART_RESTARTBLOCK: {
+ /* Restart the system call - no handlers present */
+ unsigned int *usp = (unsigned int *)regs->gr[30];
+
+ /* Setup a trampoline to restart the syscall
+ * with __NR_restart_syscall
+ *
+ * 0: <return address (orig r31)>
+ * 4: <2nd half for 64-bit>
+ * 8: ldw 0(%sp), %r31
+ * 12: be 0x100(%sr2, %r0)
+ * 16: ldi __NR_restart_syscall, %r20
+ */
+#ifdef CONFIG_64BIT
+ put_user(regs->gr[31] >> 32, &usp[0]);
+ put_user(regs->gr[31] & 0xffffffff, &usp[1]);
+ put_user(0x0fc010df, &usp[2]);
+#else
+ put_user(regs->gr[31], &usp[0]);
+ put_user(0x0fc0109f, &usp[2]);
+#endif
+ put_user(0xe0008200, &usp[3]);
+ put_user(0x34140000, &usp[4]);
+
+ /* Stack is 64-byte aligned, and we only need
+ * to flush 1 cache line.
+ * Flushing one cacheline is cheap.
+ * "sync" on bigger (> 4 way) boxes is not.
+ */
+ flush_user_dcache_range(regs->gr[30], regs->gr[30] + 4);
+ flush_user_icache_range(regs->gr[30], regs->gr[30] + 4);
+
+ regs->gr[31] = regs->gr[30] + 8;
+ /* Preserve original r28. */
+ regs->gr[28] = regs->orig_r28;
+
+ return;
+ }
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR: {
+ /* Hooray for delayed branching. We don't
+ * have to restore %r20 (the system call
+ * number) because it gets loaded in the delay
+ * slot of the branch external instruction.
+ */
+ regs->gr[31] -= 8;
+ /* Preserve original r28. */
+ regs->gr[28] = regs->orig_r28;
+
+ return;
+ }
+ default:
+ break;
+ }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * We need to be able to restore the syscall arguments (r21-r26) to
+ * restart syscalls. Thus, the syscall path should save them in the
+ * pt_regs structure (it's okay to do so since they are caller-save
+ * registers). As noted below, the syscall number gets restored for
+ * us due to the magic of delayed branching.
+ */
+asmlinkage void
+do_signal(struct pt_regs *regs, long in_syscall)
+{
+ siginfo_t info;
+ struct k_sigaction ka;
+ int signr;
+ sigset_t *oldset;
+
+ DBG(1,"\ndo_signal: oldset=0x%p, regs=0x%p, sr7 %#lx, in_syscall=%d\n",
+ oldset, regs, regs->sr[7], in_syscall);
+
+ /* Everyone else checks to see if they are in kernel mode at
+ this point and exits if that's the case. I'm not sure why
+ we would be called in that case, but for some reason we
+ are. */
+
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = &current->saved_sigmask;
+ else
+ oldset = &current->blocked;
+
+ DBG(1,"do_signal: oldset %08lx / %08lx\n",
+ oldset->sig[0], oldset->sig[1]);
+
+
+ /* May need to force signal if handle_signal failed to deliver */
+ while (1) {
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]);
+
+ if (signr <= 0)
+ break;
+
+ /* Restart a system call if necessary. */
+ if (in_syscall)
+ syscall_restart(regs, &ka);
+
+ /* Whee! Actually deliver the signal. If the
+ delivery failed, we need to continue to iterate in
+ this loop so we can deliver the SIGSEGV... */
+ if (handle_signal(signr, &info, &ka, oldset,
+ regs, in_syscall)) {
+ DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
+ regs->gr[28]);
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ return;
+ }
+ }
+ /* end of while(1) looping forever if we can't force a signal */
+
+ /* Did we come from a system call? */
+ if (in_syscall)
+ insert_restart_trampoline(regs);
+
+ DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n",
+ regs->gr[28]);
+
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
+
+ return;
+}
+
+void do_notify_resume(struct pt_regs *regs, long in_syscall)
+{
+ if (test_thread_flag(TIF_SIGPENDING) ||
+ test_thread_flag(TIF_RESTORE_SIGMASK))
+ do_signal(regs, in_syscall);
+
+ if (test_thread_flag(TIF_NOTIFY_RESUME)) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
+}
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
new file mode 100644
index 00000000..e1413243
--- /dev/null
+++ b/arch/parisc/kernel/signal32.c
@@ -0,0 +1,520 @@
+/* Signal support for 32-bit kernel builds
+ *
+ * Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2006 Kyle McMartin <kyle at parisc-linux.org>
+ *
+ * Code was mostly borrowed from kernel/signal.c.
+ * See kernel/signal.c for additional Copyrights.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/compat.h>
+#include <linux/module.h>
+#include <linux/unistd.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#include <asm/uaccess.h>
+
+#include "signal32.h"
+#include "sys32.h"
+
+#define DEBUG_COMPAT_SIG 0
+#define DEBUG_COMPAT_SIG_LEVEL 2
+
+#if DEBUG_COMPAT_SIG
+#define DBG(LEVEL, ...) \
+ ((DEBUG_COMPAT_SIG_LEVEL >= LEVEL) \
+ ? printk(__VA_ARGS__) : (void) 0)
+#else
+#define DBG(LEVEL, ...)
+#endif
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+inline void
+sigset_32to64(sigset_t *s64, compat_sigset_t *s32)
+{
+ s64->sig[0] = s32->sig[0] | ((unsigned long)s32->sig[1] << 32);
+}
+
+inline void
+sigset_64to32(compat_sigset_t *s32, sigset_t *s64)
+{
+ s32->sig[0] = s64->sig[0] & 0xffffffffUL;
+ s32->sig[1] = (s64->sig[0] >> 32) & 0xffffffffUL;
+}
+
+static int
+put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
+{
+ compat_sigset_t s;
+
+ if (sz != sizeof *set) panic("put_sigset32()");
+ sigset_64to32(&s, set);
+
+ return copy_to_user(up, &s, sizeof s);
+}
+
+static int
+get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
+{
+ compat_sigset_t s;
+ int r;
+
+ if (sz != sizeof *set) panic("put_sigset32()");
+
+ if ((r = copy_from_user(&s, up, sz)) == 0) {
+ sigset_32to64(set, &s);
+ }
+
+ return r;
+}
+
+int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, compat_sigset_t __user *oset,
+ unsigned int sigsetsize)
+{
+ sigset_t old_set, new_set;
+ int ret;
+
+ if (set && get_sigset32(set, &new_set, sigsetsize))
+ return -EFAULT;
+
+ KERNEL_SYSCALL(ret, sys_rt_sigprocmask, how, set ? (sigset_t __user *)&new_set : NULL,
+ oset ? (sigset_t __user *)&old_set : NULL, sigsetsize);
+
+ if (!ret && oset && put_sigset32(oset, &old_set, sigsetsize))
+ return -EFAULT;
+
+ return ret;
+}
+
+
+int sys32_rt_sigpending(compat_sigset_t __user *uset, unsigned int sigsetsize)
+{
+ int ret;
+ sigset_t set;
+
+ KERNEL_SYSCALL(ret, sys_rt_sigpending, (sigset_t __user *)&set, sigsetsize);
+
+ if (!ret && put_sigset32(uset, &set, sigsetsize))
+ return -EFAULT;
+
+ return ret;
+}
+
+long
+sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, struct sigaction32 __user *oact,
+ size_t sigsetsize)
+{
+ struct k_sigaction32 new_sa32, old_sa32;
+ struct k_sigaction new_sa, old_sa;
+ int ret = -EINVAL;
+
+ if (act) {
+ if (copy_from_user(&new_sa32.sa, act, sizeof new_sa32.sa))
+ return -EFAULT;
+ new_sa.sa.sa_handler = (__sighandler_t)(unsigned long)new_sa32.sa.sa_handler;
+ new_sa.sa.sa_flags = new_sa32.sa.sa_flags;
+ sigset_32to64(&new_sa.sa.sa_mask, &new_sa32.sa.sa_mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
+
+ if (!ret && oact) {
+ sigset_64to32(&old_sa32.sa.sa_mask, &old_sa.sa.sa_mask);
+ old_sa32.sa.sa_flags = old_sa.sa.sa_flags;
+ old_sa32.sa.sa_handler = (__sighandler_t32)(unsigned long)old_sa.sa.sa_handler;
+ if (copy_to_user(oact, &old_sa32.sa, sizeof old_sa32.sa))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+int
+do_sigaltstack32 (const compat_stack_t __user *uss32, compat_stack_t __user *uoss32, unsigned long sp)
+{
+ compat_stack_t ss32, oss32;
+ stack_t ss, oss;
+ stack_t *ssp = NULL, *ossp = NULL;
+ int ret;
+
+ if (uss32) {
+ if (copy_from_user(&ss32, uss32, sizeof ss32))
+ return -EFAULT;
+
+ ss.ss_sp = (void __user *)(unsigned long)ss32.ss_sp;
+ ss.ss_flags = ss32.ss_flags;
+ ss.ss_size = ss32.ss_size;
+
+ ssp = &ss;
+ }
+
+ if (uoss32)
+ ossp = &oss;
+
+ KERNEL_SYSCALL(ret, do_sigaltstack, (const stack_t __user *)ssp, (stack_t __user *)ossp, sp);
+
+ if (!ret && uoss32) {
+ oss32.ss_sp = (unsigned int)(unsigned long)oss.ss_sp;
+ oss32.ss_flags = oss.ss_flags;
+ oss32.ss_size = oss.ss_size;
+ if (copy_to_user(uoss32, &oss32, sizeof *uoss32))
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+long
+restore_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf,
+ struct pt_regs *regs)
+{
+ long err = 0;
+ compat_uint_t compat_reg;
+ compat_uint_t compat_regt;
+ int regn;
+
+ /* When loading 32-bit values into 64-bit registers make
+ sure to clear the upper 32-bits */
+ DBG(2,"restore_sigcontext32: PER_LINUX32 process\n");
+ DBG(2,"restore_sigcontext32: sc = 0x%p, rf = 0x%p, regs = 0x%p\n", sc, rf, regs);
+ DBG(2,"restore_sigcontext32: compat_sigcontext is %#lx bytes\n", sizeof(*sc));
+ for(regn=0; regn < 32; regn++){
+ err |= __get_user(compat_reg,&sc->sc_gr[regn]);
+ regs->gr[regn] = compat_reg;
+ /* Load upper half */
+ err |= __get_user(compat_regt,&rf->rf_gr[regn]);
+ regs->gr[regn] = ((u64)compat_regt << 32) | (u64)compat_reg;
+ DBG(3,"restore_sigcontext32: gr%02d = %#lx (%#x / %#x)\n",
+ regn, regs->gr[regn], compat_regt, compat_reg);
+ }
+ DBG(2,"restore_sigcontext32: sc->sc_fr = 0x%p (%#lx)\n",sc->sc_fr, sizeof(sc->sc_fr));
+ /* XXX: BE WARNED FR's are 64-BIT! */
+ err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr));
+
+ /* Better safe than sorry, pass __get_user two things of
+ the same size and let gcc do the upward conversion to
+ 64-bits */
+ err |= __get_user(compat_reg, &sc->sc_iaoq[0]);
+ /* Load upper half */
+ err |= __get_user(compat_regt, &rf->rf_iaoq[0]);
+ regs->iaoq[0] = ((u64)compat_regt << 32) | (u64)compat_reg;
+ DBG(2,"restore_sigcontext32: upper half of iaoq[0] = %#lx\n", compat_regt);
+ DBG(2,"restore_sigcontext32: sc->sc_iaoq[0] = %p => %#x\n",
+ &sc->sc_iaoq[0], compat_reg);
+
+ err |= __get_user(compat_reg, &sc->sc_iaoq[1]);
+ /* Load upper half */
+ err |= __get_user(compat_regt, &rf->rf_iaoq[1]);
+ regs->iaoq[1] = ((u64)compat_regt << 32) | (u64)compat_reg;
+ DBG(2,"restore_sigcontext32: upper half of iaoq[1] = %#lx\n", compat_regt);
+ DBG(2,"restore_sigcontext32: sc->sc_iaoq[1] = %p => %#x\n",
+ &sc->sc_iaoq[1],compat_reg);
+ DBG(2,"restore_sigcontext32: iaoq is %#lx / %#lx\n",
+ regs->iaoq[0],regs->iaoq[1]);
+
+ err |= __get_user(compat_reg, &sc->sc_iasq[0]);
+ /* Load the upper half for iasq */
+ err |= __get_user(compat_regt, &rf->rf_iasq[0]);
+ regs->iasq[0] = ((u64)compat_regt << 32) | (u64)compat_reg;
+ DBG(2,"restore_sigcontext32: upper half of iasq[0] = %#lx\n", compat_regt);
+
+ err |= __get_user(compat_reg, &sc->sc_iasq[1]);
+ /* Load the upper half for iasq */
+ err |= __get_user(compat_regt, &rf->rf_iasq[1]);
+ regs->iasq[1] = ((u64)compat_regt << 32) | (u64)compat_reg;
+ DBG(2,"restore_sigcontext32: upper half of iasq[1] = %#lx\n", compat_regt);
+ DBG(2,"restore_sigcontext32: iasq is %#lx / %#lx\n",
+ regs->iasq[0],regs->iasq[1]);
+
+ err |= __get_user(compat_reg, &sc->sc_sar);
+ /* Load the upper half for sar */
+ err |= __get_user(compat_regt, &rf->rf_sar);
+ regs->sar = ((u64)compat_regt << 32) | (u64)compat_reg;
+ DBG(2,"restore_sigcontext32: upper_half & sar = %#lx\n", compat_regt);
+ DBG(2,"restore_sigcontext32: sar is %#lx\n", regs->sar);
+ DBG(2,"restore_sigcontext32: r28 is %ld\n", regs->gr[28]);
+
+ return err;
+}
+
+/*
+ * Set up the sigcontext structure for this process.
+ * This is not an easy task if the kernel is 64-bit, it will require
+ * that we examine the process personality to determine if we need to
+ * truncate for a 32-bit userspace.
+ */
+long
+setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf,
+ struct pt_regs *regs, int in_syscall)
+{
+ compat_int_t flags = 0;
+ long err = 0;
+ compat_uint_t compat_reg;
+ compat_uint_t compat_regb;
+ int regn;
+
+ if (on_sig_stack((unsigned long) sc))
+ flags |= PARISC_SC_FLAG_ONSTACK;
+
+ if (in_syscall) {
+
+ DBG(1,"setup_sigcontext32: in_syscall\n");
+
+ flags |= PARISC_SC_FLAG_IN_SYSCALL;
+ /* Truncate gr31 */
+ compat_reg = (compat_uint_t)(regs->gr[31]);
+ /* regs->iaoq is undefined in the syscall return path */
+ err |= __put_user(compat_reg, &sc->sc_iaoq[0]);
+ DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n",
+ &sc->sc_iaoq[0], compat_reg);
+
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->gr[31] >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iaoq[0]);
+ DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg);
+
+
+ compat_reg = (compat_uint_t)(regs->gr[31]+4);
+ err |= __put_user(compat_reg, &sc->sc_iaoq[1]);
+ DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n",
+ &sc->sc_iaoq[1], compat_reg);
+ /* Store upper half */
+ compat_reg = (compat_uint_t)((regs->gr[31]+4) >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iaoq[1]);
+ DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg);
+
+ /* Truncate sr3 */
+ compat_reg = (compat_uint_t)(regs->sr[3]);
+ err |= __put_user(compat_reg, &sc->sc_iasq[0]);
+ err |= __put_user(compat_reg, &sc->sc_iasq[1]);
+
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->sr[3] >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iasq[0]);
+ err |= __put_user(compat_reg, &rf->rf_iasq[1]);
+
+ DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg);
+ DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg);
+ DBG(1,"setup_sigcontext32: iaoq %#lx / %#lx\n",
+ regs->gr[31], regs->gr[31]+4);
+
+ } else {
+
+ compat_reg = (compat_uint_t)(regs->iaoq[0]);
+ err |= __put_user(compat_reg, &sc->sc_iaoq[0]);
+ DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n",
+ &sc->sc_iaoq[0], compat_reg);
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->iaoq[0] >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iaoq[0]);
+ DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg);
+
+ compat_reg = (compat_uint_t)(regs->iaoq[1]);
+ err |= __put_user(compat_reg, &sc->sc_iaoq[1]);
+ DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n",
+ &sc->sc_iaoq[1], compat_reg);
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->iaoq[1] >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iaoq[1]);
+ DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg);
+
+
+ compat_reg = (compat_uint_t)(regs->iasq[0]);
+ err |= __put_user(compat_reg, &sc->sc_iasq[0]);
+ DBG(2,"setup_sigcontext32: sc->sc_iasq[0] = %p <= %#x\n",
+ &sc->sc_iasq[0], compat_reg);
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->iasq[0] >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iasq[0]);
+ DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg);
+
+
+ compat_reg = (compat_uint_t)(regs->iasq[1]);
+ err |= __put_user(compat_reg, &sc->sc_iasq[1]);
+ DBG(2,"setup_sigcontext32: sc->sc_iasq[1] = %p <= %#x\n",
+ &sc->sc_iasq[1], compat_reg);
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->iasq[1] >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iasq[1]);
+ DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg);
+
+ /* Print out the IAOQ for debugging */
+ DBG(1,"setup_sigcontext32: ia0q %#lx / %#lx\n",
+ regs->iaoq[0], regs->iaoq[1]);
+ }
+
+ err |= __put_user(flags, &sc->sc_flags);
+
+ DBG(1,"setup_sigcontext32: Truncating general registers.\n");
+
+ for(regn=0; regn < 32; regn++){
+ /* Truncate a general register */
+ compat_reg = (compat_uint_t)(regs->gr[regn]);
+ err |= __put_user(compat_reg, &sc->sc_gr[regn]);
+ /* Store upper half */
+ compat_regb = (compat_uint_t)(regs->gr[regn] >> 32);
+ err |= __put_user(compat_regb, &rf->rf_gr[regn]);
+
+ /* DEBUG: Write out the "upper / lower" register data */
+ DBG(2,"setup_sigcontext32: gr%02d = %#x / %#x\n", regn,
+ compat_regb, compat_reg);
+ }
+
+ /* Copy the floating point registers (same size)
+ XXX: BE WARNED FR's are 64-BIT! */
+ DBG(1,"setup_sigcontext32: Copying from regs to sc, "
+ "sc->sc_fr size = %#lx, regs->fr size = %#lx\n",
+ sizeof(regs->fr), sizeof(sc->sc_fr));
+ err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr));
+
+ compat_reg = (compat_uint_t)(regs->sar);
+ err |= __put_user(compat_reg, &sc->sc_sar);
+ DBG(2,"setup_sigcontext32: sar is %#x\n", compat_reg);
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->sar >> 32);
+ err |= __put_user(compat_reg, &rf->rf_sar);
+ DBG(2,"setup_sigcontext32: upper half sar = %#x\n", compat_reg);
+ DBG(1,"setup_sigcontext32: r28 is %ld\n", regs->gr[28]);
+
+ return err;
+}
+
+int
+copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
+{
+ compat_uptr_t addr;
+ int err;
+
+ if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
+ return -EFAULT;
+
+ err = __get_user(to->si_signo, &from->si_signo);
+ err |= __get_user(to->si_errno, &from->si_errno);
+ err |= __get_user(to->si_code, &from->si_code);
+
+ if (to->si_code < 0)
+ err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+ else {
+ switch (to->si_code >> 16) {
+ case __SI_CHLD >> 16:
+ err |= __get_user(to->si_utime, &from->si_utime);
+ err |= __get_user(to->si_stime, &from->si_stime);
+ err |= __get_user(to->si_status, &from->si_status);
+ default:
+ err |= __get_user(to->si_pid, &from->si_pid);
+ err |= __get_user(to->si_uid, &from->si_uid);
+ break;
+ case __SI_FAULT >> 16:
+ err |= __get_user(addr, &from->si_addr);
+ to->si_addr = compat_ptr(addr);
+ break;
+ case __SI_POLL >> 16:
+ err |= __get_user(to->si_band, &from->si_band);
+ err |= __get_user(to->si_fd, &from->si_fd);
+ break;
+ case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
+ case __SI_MESGQ >> 16:
+ err |= __get_user(to->si_pid, &from->si_pid);
+ err |= __get_user(to->si_uid, &from->si_uid);
+ err |= __get_user(to->si_int, &from->si_int);
+ break;
+ }
+ }
+ return err;
+}
+
+int
+copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from)
+{
+ compat_uptr_t addr;
+ compat_int_t val;
+ int err;
+
+ if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
+ return -EFAULT;
+
+ /* If you change siginfo_t structure, please be sure
+ this code is fixed accordingly.
+ It should never copy any pad contained in the structure
+ to avoid security leaks, but must copy the generic
+ 3 ints plus the relevant union member.
+ This routine must convert siginfo from 64bit to 32bit as well
+ at the same time. */
+ err = __put_user(from->si_signo, &to->si_signo);
+ err |= __put_user(from->si_errno, &to->si_errno);
+ err |= __put_user((short)from->si_code, &to->si_code);
+ if (from->si_code < 0)
+ err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+ else {
+ switch (from->si_code >> 16) {
+ case __SI_CHLD >> 16:
+ err |= __put_user(from->si_utime, &to->si_utime);
+ err |= __put_user(from->si_stime, &to->si_stime);
+ err |= __put_user(from->si_status, &to->si_status);
+ default:
+ err |= __put_user(from->si_pid, &to->si_pid);
+ err |= __put_user(from->si_uid, &to->si_uid);
+ break;
+ case __SI_FAULT >> 16:
+ addr = ptr_to_compat(from->si_addr);
+ err |= __put_user(addr, &to->si_addr);
+ break;
+ case __SI_POLL >> 16:
+ err |= __put_user(from->si_band, &to->si_band);
+ err |= __put_user(from->si_fd, &to->si_fd);
+ break;
+ case __SI_TIMER >> 16:
+ err |= __put_user(from->si_tid, &to->si_tid);
+ err |= __put_user(from->si_overrun, &to->si_overrun);
+ val = (compat_int_t)from->si_int;
+ err |= __put_user(val, &to->si_int);
+ break;
+ case __SI_RT >> 16: /* Not generated by the kernel as of now. */
+ case __SI_MESGQ >> 16:
+ err |= __put_user(from->si_uid, &to->si_uid);
+ err |= __put_user(from->si_pid, &to->si_pid);
+ val = (compat_int_t)from->si_int;
+ err |= __put_user(val, &to->si_int);
+ break;
+ }
+ }
+ return err;
+}
+
+asmlinkage long compat_sys_rt_sigqueueinfo(int pid, int sig,
+ struct compat_siginfo __user *uinfo)
+{
+ siginfo_t info;
+
+ if (copy_siginfo_from_user32(&info, uinfo))
+ return -EFAULT;
+
+ /* Not even root can pretend to send signals from the kernel.
+ Nor can they impersonate a kill(), which adds source info. */
+ if (info.si_code >= 0)
+ return -EPERM;
+ info.si_signo = sig;
+
+ /* POSIX.1b doesn't mention process groups. */
+ return kill_proc_info(sig, &info, pid);
+}
+
diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h
new file mode 100644
index 00000000..c7800846
--- /dev/null
+++ b/arch/parisc/kernel/signal32.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2003 Carlos O'Donell <carlos at parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _PARISC64_KERNEL_SIGNAL32_H
+#define _PARISC64_KERNEL_SIGNAL32_H
+
+#include <linux/compat.h>
+
+typedef compat_uptr_t compat_sighandler_t;
+
+typedef struct compat_sigaltstack {
+ compat_uptr_t ss_sp;
+ compat_int_t ss_flags;
+ compat_size_t ss_size;
+} compat_stack_t;
+
+/* Most things should be clean enough to redefine this at will, if care
+ is taken to make libc match. */
+
+struct compat_sigaction {
+ compat_sighandler_t sa_handler;
+ compat_uint_t sa_flags;
+ compat_sigset_t sa_mask; /* mask last for extensibility */
+};
+
+/* 32-bit ucontext as seen from an 64-bit kernel */
+struct compat_ucontext {
+ compat_uint_t uc_flags;
+ compat_uptr_t uc_link;
+ compat_stack_t uc_stack; /* struct compat_sigaltstack (12 bytes)*/
+ /* FIXME: Pad out to get uc_mcontext to start at an 8-byte aligned boundary */
+ compat_uint_t pad[1];
+ struct compat_sigcontext uc_mcontext;
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+/* ELF32 signal handling */
+
+struct k_sigaction32 {
+ struct compat_sigaction sa;
+};
+
+typedef struct compat_siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[((128/sizeof(int)) - 3)];
+
+ /* kill() */
+ struct {
+ unsigned int _pid; /* sender's pid */
+ unsigned int _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ compat_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ char _pad[sizeof(unsigned int) - sizeof(int)];
+ compat_sigval_t _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ unsigned int _pid; /* sender's pid */
+ unsigned int _uid; /* sender's uid */
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ unsigned int _pid; /* which child */
+ unsigned int _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ unsigned int _addr; /* faulting insn/memory ref. */
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} compat_siginfo_t;
+
+int copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from);
+int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from);
+
+/* In a deft move of uber-hackery, we decide to carry the top half of all
+ * 64-bit registers in a non-portable, non-ABI, hidden structure.
+ * Userspace can read the hidden structure if it *wants* but is never
+ * guaranteed to be in the same place. In fact the uc_sigmask from the
+ * ucontext_t structure may push the hidden register file downards
+ */
+struct compat_regfile {
+ /* Upper half of all the 64-bit registers that were truncated
+ on a copy to a 32-bit userspace */
+ compat_int_t rf_gr[32];
+ compat_int_t rf_iasq[2];
+ compat_int_t rf_iaoq[2];
+ compat_int_t rf_sar;
+};
+
+#define COMPAT_SIGRETURN_TRAMP 4
+#define COMPAT_SIGRESTARTBLOCK_TRAMP 5
+#define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + \
+ COMPAT_SIGRESTARTBLOCK_TRAMP)
+
+struct compat_rt_sigframe {
+ /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c
+ Secondary to that it must protect the ERESTART_RESTARTBLOCK
+ trampoline we left on the stack (we were bad and didn't
+ change sp so we could run really fast.) */
+ compat_uint_t tramp[COMPAT_TRAMP_SIZE];
+ compat_siginfo_t info;
+ struct compat_ucontext uc;
+ /* Hidden location of truncated registers, *must* be last. */
+ struct compat_regfile regs;
+};
+
+/*
+ * The 32-bit ABI wants at least 48 bytes for a function call frame:
+ * 16 bytes for arg0-arg3, and 32 bytes for magic (the only part of
+ * which Linux/parisc uses is sp-20 for the saved return pointer...)
+ * Then, the stack pointer must be rounded to a cache line (64 bytes).
+ */
+#define SIGFRAME32 64
+#define FUNCTIONCALLFRAME32 48
+#define PARISC_RT_SIGFRAME_SIZE32 (((sizeof(struct compat_rt_sigframe) + FUNCTIONCALLFRAME32) + SIGFRAME32) & -SIGFRAME32)
+
+void sigset_32to64(sigset_t *s64, compat_sigset_t *s32);
+void sigset_64to32(compat_sigset_t *s32, sigset_t *s64);
+int do_sigaltstack32 (const compat_stack_t __user *uss32,
+ compat_stack_t __user *uoss32, unsigned long sp);
+long restore_sigcontext32(struct compat_sigcontext __user *sc,
+ struct compat_regfile __user *rf,
+ struct pt_regs *regs);
+long setup_sigcontext32(struct compat_sigcontext __user *sc,
+ struct compat_regfile __user *rf,
+ struct pt_regs *regs, int in_syscall);
+
+#endif
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
new file mode 100644
index 00000000..828305f1
--- /dev/null
+++ b/arch/parisc/kernel/smp.c
@@ -0,0 +1,468 @@
+/*
+** SMP Support
+**
+** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
+**
+** Lots of stuff stolen from arch/alpha/kernel/smp.c
+** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
+**
+** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work.
+** -grant (1/12/2001)
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+*/
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel_stat.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/ftrace.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/current.h>
+#include <asm/delay.h>
+#include <asm/tlbflush.h>
+
+#include <asm/io.h>
+#include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/unistd.h>
+#include <asm/cacheflush.h>
+
+#undef DEBUG_SMP
+#ifdef DEBUG_SMP
+static int smp_debug_lvl = 0;
+#define smp_debug(lvl, printargs...) \
+ if (lvl >= smp_debug_lvl) \
+ printk(printargs);
+#else
+#define smp_debug(lvl, ...) do { } while(0)
+#endif /* DEBUG_SMP */
+
+volatile struct task_struct *smp_init_current_idle_task;
+
+/* track which CPU is booting */
+static volatile int cpu_now_booting __cpuinitdata;
+
+static int parisc_max_cpus __cpuinitdata = 1;
+
+static DEFINE_PER_CPU(spinlock_t, ipi_lock);
+
+enum ipi_message_type {
+ IPI_NOP=0,
+ IPI_RESCHEDULE=1,
+ IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
+ IPI_CPU_START,
+ IPI_CPU_STOP,
+ IPI_CPU_TEST
+};
+
+
+/********** SMP inter processor interrupt and communication routines */
+
+#undef PER_CPU_IRQ_REGION
+#ifdef PER_CPU_IRQ_REGION
+/* XXX REVISIT Ignore for now.
+** *May* need this "hook" to register IPI handler
+** once we have perCPU ExtIntr switch tables.
+*/
+static void
+ipi_init(int cpuid)
+{
+#error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
+
+ if(cpu_online(cpuid) )
+ {
+ switch_to_idle_task(current);
+ }
+
+ return;
+}
+#endif
+
+
+/*
+** Yoink this CPU from the runnable list...
+**
+*/
+static void
+halt_processor(void)
+{
+ /* REVISIT : redirect I/O Interrupts to another CPU? */
+ /* REVISIT : does PM *know* this CPU isn't available? */
+ set_cpu_online(smp_processor_id(), false);
+ local_irq_disable();
+ for (;;)
+ ;
+}
+
+
+irqreturn_t __irq_entry
+ipi_interrupt(int irq, void *dev_id)
+{
+ int this_cpu = smp_processor_id();
+ struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
+ unsigned long ops;
+ unsigned long flags;
+
+ /* Count this now; we may make a call that never returns. */
+ p->ipi_count++;
+
+ mb(); /* Order interrupt and bit testing. */
+
+ for (;;) {
+ spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
+ spin_lock_irqsave(lock, flags);
+ ops = p->pending_ipi;
+ p->pending_ipi = 0;
+ spin_unlock_irqrestore(lock, flags);
+
+ mb(); /* Order bit clearing and data access. */
+
+ if (!ops)
+ break;
+
+ while (ops) {
+ unsigned long which = ffz(~ops);
+
+ ops &= ~(1 << which);
+
+ switch (which) {
+ case IPI_NOP:
+ smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu);
+ break;
+
+ case IPI_RESCHEDULE:
+ smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
+ scheduler_ipi();
+ break;
+
+ case IPI_CALL_FUNC:
+ smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
+ generic_smp_call_function_single_interrupt();
+ break;
+
+ case IPI_CPU_START:
+ smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
+ break;
+
+ case IPI_CPU_STOP:
+ smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
+ halt_processor();
+ break;
+
+ case IPI_CPU_TEST:
+ smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu);
+ break;
+
+ default:
+ printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
+ this_cpu, which);
+ return IRQ_NONE;
+ } /* Switch */
+ /* let in any pending interrupts */
+ local_irq_enable();
+ local_irq_disable();
+ } /* while (ops) */
+ }
+ return IRQ_HANDLED;
+}
+
+
+static inline void
+ipi_send(int cpu, enum ipi_message_type op)
+{
+ struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
+ spinlock_t *lock = &per_cpu(ipi_lock, cpu);
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ p->pending_ipi |= 1 << op;
+ gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static void
+send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
+{
+ int cpu;
+
+ for_each_cpu(cpu, mask)
+ ipi_send(cpu, op);
+}
+
+static inline void
+send_IPI_single(int dest_cpu, enum ipi_message_type op)
+{
+ BUG_ON(dest_cpu == NO_PROC_ID);
+
+ ipi_send(dest_cpu, op);
+}
+
+static inline void
+send_IPI_allbutself(enum ipi_message_type op)
+{
+ int i;
+
+ for_each_online_cpu(i) {
+ if (i != smp_processor_id())
+ send_IPI_single(i, op);
+ }
+}
+
+
+inline void
+smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
+
+static inline void
+smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
+
+void
+smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
+
+void
+smp_send_all_nop(void)
+{
+ send_IPI_allbutself(IPI_NOP);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ send_IPI_mask(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
+}
+
+/*
+ * Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
+ * as we want to ensure all TLB's flushed before proceeding.
+ */
+
+void
+smp_flush_tlb_all(void)
+{
+ on_each_cpu(flush_tlb_all_local, NULL, 1);
+}
+
+/*
+ * Called by secondaries to update state and initialize CPU registers.
+ */
+static void __init
+smp_cpu_init(int cpunum)
+{
+ extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */
+ extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
+ extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
+
+ /* Set modes and Enable floating point coprocessor */
+ (void) init_per_cpu(cpunum);
+
+ disable_sr_hashing();
+
+ mb();
+
+ /* Well, support 2.4 linux scheme as well. */
+ if (cpu_isset(cpunum, cpu_online_map))
+ {
+ extern void machine_halt(void); /* arch/parisc.../process.c */
+
+ printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
+ machine_halt();
+ }
+ set_cpu_online(cpunum, true);
+
+ /* Initialise the idle task for this CPU */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+ enter_lazy_tlb(&init_mm, current);
+
+ init_IRQ(); /* make sure no IRQs are enabled or pending */
+ start_cpu_itimer();
+}
+
+
+/*
+ * Slaves start using C here. Indirectly called from smp_slave_stext.
+ * Do what start_kernel() and main() do for boot strap processor (aka monarch)
+ */
+void __init smp_callin(void)
+{
+ int slave_id = cpu_now_booting;
+
+ smp_cpu_init(slave_id);
+ preempt_disable();
+
+ flush_cache_all_local(); /* start with known state */
+ flush_tlb_all_local(NULL);
+
+ local_irq_enable(); /* Interrupts have been off until now */
+
+ cpu_idle(); /* Wait for timer to schedule some work */
+
+ /* NOTREACHED */
+ panic("smp_callin() AAAAaaaaahhhh....\n");
+}
+
+/*
+ * Bring one cpu online.
+ */
+int __cpuinit smp_boot_one_cpu(int cpuid)
+{
+ const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
+ struct task_struct *idle;
+ long timeout;
+
+ /*
+ * Create an idle task for this CPU. Note the address wed* give
+ * to kernel_thread is irrelevant -- it's going to start
+ * where OS_BOOT_RENDEVZ vector in SAL says to start. But
+ * this gets all the other task-y sort of data structures set
+ * up like we wish. We need to pull the just created idle task
+ * off the run queue and stuff it into the init_tasks[] array.
+ * Sheesh . . .
+ */
+
+ idle = fork_idle(cpuid);
+ if (IS_ERR(idle))
+ panic("SMP: fork failed for CPU:%d", cpuid);
+
+ task_thread_info(idle)->cpu = cpuid;
+
+ /* Let _start know what logical CPU we're booting
+ ** (offset into init_tasks[],cpu_data[])
+ */
+ cpu_now_booting = cpuid;
+
+ /*
+ ** boot strap code needs to know the task address since
+ ** it also contains the process stack.
+ */
+ smp_init_current_idle_task = idle ;
+ mb();
+
+ printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
+
+ /*
+ ** This gets PDC to release the CPU from a very tight loop.
+ **
+ ** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
+ ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
+ ** is executed after receiving the rendezvous signal (an interrupt to
+ ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
+ ** contents of memory are valid."
+ */
+ gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
+ mb();
+
+ /*
+ * OK, wait a bit for that CPU to finish staggering about.
+ * Slave will set a bit when it reaches smp_cpu_init().
+ * Once the "monarch CPU" sees the bit change, it can move on.
+ */
+ for (timeout = 0; timeout < 10000; timeout++) {
+ if(cpu_online(cpuid)) {
+ /* Which implies Slave has started up */
+ cpu_now_booting = 0;
+ smp_init_current_idle_task = NULL;
+ goto alive ;
+ }
+ udelay(100);
+ barrier();
+ }
+
+ put_task_struct(idle);
+ idle = NULL;
+
+ printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
+ return -1;
+
+alive:
+ /* Remember the Slave data */
+ smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
+ cpuid, timeout * 100);
+ return 0;
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+ int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
+
+ /* Setup BSP mappings */
+ printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
+
+ set_cpu_online(bootstrap_processor, true);
+ set_cpu_present(bootstrap_processor, true);
+}
+
+
+
+/*
+** inventory.c:do_inventory() hasn't yet been run and thus we
+** don't 'discover' the additional CPUs until later.
+*/
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(ipi_lock, cpu));
+
+ init_cpu_present(cpumask_of(0));
+
+ parisc_max_cpus = max_cpus;
+ if (!max_cpus)
+ printk(KERN_INFO "SMP mode deactivated.\n");
+}
+
+
+void smp_cpus_done(unsigned int cpu_max)
+{
+ return;
+}
+
+
+int __cpuinit __cpu_up(unsigned int cpu)
+{
+ if (cpu != 0 && cpu < parisc_max_cpus)
+ smp_boot_one_cpu(cpu);
+
+ return cpu_online(cpu) ? 0 : -ENOSYS;
+}
+
+#ifdef CONFIG_PROC_FS
+int __init
+setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+#endif
diff --git a/arch/parisc/kernel/stacktrace.c b/arch/parisc/kernel/stacktrace.c
new file mode 100644
index 00000000..2fe914c5
--- /dev/null
+++ b/arch/parisc/kernel/stacktrace.c
@@ -0,0 +1,63 @@
+/*
+ * Stack trace management functions
+ *
+ * Copyright (C) 2009 Helge Deller <deller@gmx.de>
+ * based on arch/x86/kernel/stacktrace.c by Ingo Molnar <mingo@redhat.com>
+ * and parisc unwind functions by Randolph Chung <tausq@debian.org>
+ *
+ * TODO: Userspace stacktrace (CONFIG_USER_STACKTRACE_SUPPORT)
+ */
+#include <linux/module.h>
+#include <linux/stacktrace.h>
+
+#include <asm/unwind.h>
+
+static void dump_trace(struct task_struct *task, struct stack_trace *trace)
+{
+ struct unwind_frame_info info;
+
+ /* initialize unwind info */
+ if (task == current) {
+ unsigned long sp;
+ struct pt_regs r;
+HERE:
+ asm volatile ("copy %%r30, %0" : "=r"(sp));
+ memset(&r, 0, sizeof(struct pt_regs));
+ r.iaoq[0] = (unsigned long)&&HERE;
+ r.gr[2] = (unsigned long)__builtin_return_address(0);
+ r.gr[30] = sp;
+ unwind_frame_init(&info, task, &r);
+ } else {
+ unwind_frame_init_from_blocked_task(&info, task);
+ }
+
+ /* unwind stack and save entries in stack_trace struct */
+ trace->nr_entries = 0;
+ while (trace->nr_entries < trace->max_entries) {
+ if (unwind_once(&info) < 0 || info.ip == 0)
+ break;
+
+ if (__kernel_text_address(info.ip))
+ trace->entries[trace->nr_entries++] = info.ip;
+ }
+}
+
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace)
+{
+ dump_trace(current, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ dump_trace(tsk, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/parisc/kernel/sys32.h b/arch/parisc/kernel/sys32.h
new file mode 100644
index 00000000..06c2090c
--- /dev/null
+++ b/arch/parisc/kernel/sys32.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2002 Richard Hirst <rhirst at parisc-linux.org>
+ * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
+ * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _PARISC64_KERNEL_SYS32_H
+#define _PARISC64_KERNEL_SYS32_H
+
+#include <linux/compat.h>
+
+/* Call a kernel syscall which will use kernel space instead of user
+ * space for its copy_to/from_user.
+ */
+#define KERNEL_SYSCALL(ret, syscall, args...) \
+{ \
+ mm_segment_t old_fs = get_fs(); \
+ set_fs(KERNEL_DS); \
+ ret = syscall(args); \
+ set_fs (old_fs); \
+}
+
+#ifdef CONFIG_COMPAT
+
+typedef __u32 __sighandler_t32;
+
+struct sigaction32 {
+ __sighandler_t32 sa_handler;
+ unsigned int sa_flags;
+ compat_sigset_t sa_mask; /* mask last for extensibility */
+};
+
+#endif
+
+#endif
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
new file mode 100644
index 00000000..c9b93226
--- /dev/null
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -0,0 +1,236 @@
+
+/*
+ * PARISC specific syscalls
+ *
+ * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/uaccess.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/linkage.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/shm.h>
+#include <linux/syscalls.h>
+#include <linux/utsname.h>
+#include <linux/personality.h>
+
+static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
+{
+ struct vm_area_struct *vma;
+
+ addr = PAGE_ALIGN(addr);
+
+ for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+ if (!vma || addr + len <= vma->vm_start)
+ return addr;
+ addr = vma->vm_end;
+ }
+}
+
+#define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1))
+
+/*
+ * We need to know the offset to use. Old scheme was to look for
+ * existing mapping and use the same offset. New scheme is to use the
+ * address of the kernel data structure as the seed for the offset.
+ * We'll see how that works...
+ *
+ * The mapping is cacheline aligned, so there's no information in the bottom
+ * few bits of the address. We're looking for 10 bits (4MB / 4k), so let's
+ * drop the bottom 8 bits and use bits 8-17.
+ */
+static int get_offset(struct address_space *mapping)
+{
+ int offset = (unsigned long) mapping << (PAGE_SHIFT - 8);
+ return offset & 0x3FF000;
+}
+
+static unsigned long get_shared_area(struct address_space *mapping,
+ unsigned long addr, unsigned long len, unsigned long pgoff)
+{
+ struct vm_area_struct *vma;
+ int offset = mapping ? get_offset(mapping) : 0;
+
+ addr = DCACHE_ALIGN(addr - offset) + offset;
+
+ for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+ if (!vma || addr + len <= vma->vm_start)
+ return addr;
+ addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
+ if (addr < vma->vm_end) /* handle wraparound */
+ return -ENOMEM;
+ }
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+ /* Might want to check for cache aliasing issues for MAP_FIXED case
+ * like ARM or MIPS ??? --BenH.
+ */
+ if (flags & MAP_FIXED)
+ return addr;
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+ if (filp) {
+ addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
+ } else if(flags & MAP_SHARED) {
+ addr = get_shared_area(NULL, addr, len, pgoff);
+ } else {
+ addr = get_unshared_area(addr, len);
+ }
+ return addr;
+}
+
+asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long fd,
+ unsigned long pgoff)
+{
+ /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
+ we have. */
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT - 12));
+}
+
+asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long fd,
+ unsigned long offset)
+{
+ if (!(offset & ~PAGE_MASK)) {
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ offset >> PAGE_SHIFT);
+ } else {
+ return -EINVAL;
+ }
+}
+
+/* Fucking broken ABI */
+
+#ifdef CONFIG_64BIT
+asmlinkage long parisc_truncate64(const char __user * path,
+ unsigned int high, unsigned int low)
+{
+ return sys_truncate(path, (long)high << 32 | low);
+}
+
+asmlinkage long parisc_ftruncate64(unsigned int fd,
+ unsigned int high, unsigned int low)
+{
+ return sys_ftruncate(fd, (long)high << 32 | low);
+}
+
+/* stubs for the benefit of the syscall_table since truncate64 and truncate
+ * are identical on LP64 */
+asmlinkage long sys_truncate64(const char __user * path, unsigned long length)
+{
+ return sys_truncate(path, length);
+}
+asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length)
+{
+ return sys_ftruncate(fd, length);
+}
+asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ return sys_fcntl(fd, cmd, arg);
+}
+#else
+
+asmlinkage long parisc_truncate64(const char __user * path,
+ unsigned int high, unsigned int low)
+{
+ return sys_truncate64(path, (loff_t)high << 32 | low);
+}
+
+asmlinkage long parisc_ftruncate64(unsigned int fd,
+ unsigned int high, unsigned int low)
+{
+ return sys_ftruncate64(fd, (loff_t)high << 32 | low);
+}
+#endif
+
+asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count,
+ unsigned int high, unsigned int low)
+{
+ return sys_pread64(fd, buf, count, (loff_t)high << 32 | low);
+}
+
+asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf,
+ size_t count, unsigned int high, unsigned int low)
+{
+ return sys_pwrite64(fd, buf, count, (loff_t)high << 32 | low);
+}
+
+asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low,
+ size_t count)
+{
+ return sys_readahead(fd, (loff_t)high << 32 | low, count);
+}
+
+asmlinkage long parisc_fadvise64_64(int fd,
+ unsigned int high_off, unsigned int low_off,
+ unsigned int high_len, unsigned int low_len, int advice)
+{
+ return sys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
+ (loff_t)high_len << 32 | low_len, advice);
+}
+
+asmlinkage long parisc_sync_file_range(int fd,
+ u32 hi_off, u32 lo_off, u32 hi_nbytes, u32 lo_nbytes,
+ unsigned int flags)
+{
+ return sys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off,
+ (loff_t)hi_nbytes << 32 | lo_nbytes, flags);
+}
+
+asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
+{
+ return -ENOMEM;
+}
+
+asmlinkage int sys_free_hugepages(unsigned long addr)
+{
+ return -EINVAL;
+}
+
+long parisc_personality(unsigned long personality)
+{
+ long err;
+
+ if (personality(current->personality) == PER_LINUX32
+ && personality == PER_LINUX)
+ personality = PER_LINUX32;
+
+ err = sys_personality(personality);
+ if (err == PER_LINUX32)
+ err = PER_LINUX;
+
+ return err;
+}
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
new file mode 100644
index 00000000..dc9a6246
--- /dev/null
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -0,0 +1,238 @@
+/*
+ * sys_parisc32.c: Conversion between 32bit and 64bit native syscalls.
+ *
+ * Copyright (C) 2000-2001 Hewlett Packard Company
+ * Copyright (C) 2000 John Marvin
+ * Copyright (C) 2001 Matthew Wilcox
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * environment. Based heavily on sys_ia32.c and sys_sparc32.c.
+ */
+
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/signal.h>
+#include <linux/resource.h>
+#include <linux/times.h>
+#include <linux/time.h>
+#include <linux/smp.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/ncp_fs.h>
+#include <linux/poll.h>
+#include <linux/personality.h>
+#include <linux/stat.h>
+#include <linux/highmem.h>
+#include <linux/highuid.h>
+#include <linux/mman.h>
+#include <linux/binfmts.h>
+#include <linux/namei.h>
+#include <linux/vfs.h>
+#include <linux/ptrace.h>
+#include <linux/swap.h>
+#include <linux/syscalls.h>
+
+#include <asm/types.h>
+#include <asm/uaccess.h>
+#include <asm/mmu_context.h>
+
+#include "sys32.h"
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x) printk x
+#else
+#define DBG(x)
+#endif
+
+/*
+ * sys32_execve() executes a new program.
+ */
+
+asmlinkage int sys32_execve(struct pt_regs *regs)
+{
+ int error;
+ char *filename;
+
+ DBG(("sys32_execve(%p) r26 = 0x%lx\n", regs, regs->gr[26]));
+ filename = getname((const char __user *) regs->gr[26]);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = compat_do_execve(filename, compat_ptr(regs->gr[25]),
+ compat_ptr(regs->gr[24]), regs);
+ putname(filename);
+out:
+
+ return error;
+}
+
+asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
+ int r22, int r21, int r20)
+{
+ printk(KERN_ERR "%s(%d): Unimplemented 32 on 64 syscall #%d!\n",
+ current->comm, current->pid, r20);
+ return -ENOSYS;
+}
+
+asmlinkage long sys32_sched_rr_get_interval(pid_t pid,
+ struct compat_timespec __user *interval)
+{
+ struct timespec t;
+ int ret;
+
+ KERNEL_SYSCALL(ret, sys_sched_rr_get_interval, pid, (struct timespec __user *)&t);
+ if (put_compat_timespec(&t, interval))
+ return -EFAULT;
+ return ret;
+}
+
+struct msgbuf32 {
+ int mtype;
+ char mtext[1];
+};
+
+asmlinkage long sys32_msgsnd(int msqid,
+ struct msgbuf32 __user *umsgp32,
+ size_t msgsz, int msgflg)
+{
+ struct msgbuf *mb;
+ struct msgbuf32 mb32;
+ int err;
+
+ if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ err = get_user(mb32.mtype, &umsgp32->mtype);
+ mb->mtype = mb32.mtype;
+ err |= copy_from_user(mb->mtext, &umsgp32->mtext, msgsz);
+
+ if (err)
+ err = -EFAULT;
+ else
+ KERNEL_SYSCALL(err, sys_msgsnd, msqid, (struct msgbuf __user *)mb, msgsz, msgflg);
+
+ kfree(mb);
+ return err;
+}
+
+asmlinkage long sys32_msgrcv(int msqid,
+ struct msgbuf32 __user *umsgp32,
+ size_t msgsz, long msgtyp, int msgflg)
+{
+ struct msgbuf *mb;
+ struct msgbuf32 mb32;
+ int err, len;
+
+ if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ KERNEL_SYSCALL(err, sys_msgrcv, msqid, (struct msgbuf __user *)mb, msgsz, msgtyp, msgflg);
+
+ if (err >= 0) {
+ len = err;
+ mb32.mtype = mb->mtype;
+ err = put_user(mb32.mtype, &umsgp32->mtype);
+ err |= copy_to_user(&umsgp32->mtext, mb->mtext, len);
+ if (err)
+ err = -EFAULT;
+ else
+ err = len;
+ }
+
+ kfree(mb);
+ return err;
+}
+
+asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count)
+{
+ mm_segment_t old_fs = get_fs();
+ int ret;
+ off_t of;
+
+ if (offset && get_user(of, offset))
+ return -EFAULT;
+
+ set_fs(KERNEL_DS);
+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, count);
+ set_fs(old_fs);
+
+ if (offset && put_user(of, offset))
+ return -EFAULT;
+
+ return ret;
+}
+
+asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
+{
+ mm_segment_t old_fs = get_fs();
+ int ret;
+ loff_t lof;
+
+ if (offset && get_user(lof, offset))
+ return -EFAULT;
+
+ set_fs(KERNEL_DS);
+ ret = sys_sendfile64(out_fd, in_fd, offset ? (loff_t __user *)&lof : NULL, count);
+ set_fs(old_fs);
+
+ if (offset && put_user(lof, offset))
+ return -EFAULT;
+
+ return ret;
+}
+
+
+/* lseek() needs a wrapper because 'offset' can be negative, but the top
+ * half of the argument has been zeroed by syscall.S.
+ */
+
+asmlinkage int sys32_lseek(unsigned int fd, int offset, unsigned int origin)
+{
+ return sys_lseek(fd, offset, origin);
+}
+
+asmlinkage long sys32_semctl(int semid, int semnum, int cmd, union semun arg)
+{
+ union semun u;
+
+ if (cmd == SETVAL) {
+ /* Ugh. arg is a union of int,ptr,ptr,ptr, so is 8 bytes.
+ * The int should be in the first 4, but our argument
+ * frobbing has left it in the last 4.
+ */
+ u.val = *((int *)&arg + 1);
+ return sys_semctl (semid, semnum, cmd, u);
+ }
+ return sys_semctl (semid, semnum, cmd, arg);
+}
+
+long sys32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
+ size_t len)
+{
+ return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low,
+ buf, len);
+}
+
+asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
+ u32 lenhi, u32 lenlo)
+{
+ return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
+ ((loff_t)lenhi << 32) | lenlo);
+}
+
+asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
+ u32 mask_lo, int fd,
+ const char __user *pathname)
+{
+ return sys_fanotify_mark(fan_fd, flags, ((u64)mask_hi << 32) | mask_lo,
+ fd, pathname);
+}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
new file mode 100644
index 00000000..82a52b2f
--- /dev/null
+++ b/arch/parisc/kernel/syscall.S
@@ -0,0 +1,688 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
+ * Licensed under the GNU GPL.
+ * thanks to Philipp Rumpf, Mike Shaver and various others
+ * sorry about the wall, puffin..
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/page.h>
+#include <asm/psw.h>
+#include <asm/thread_info.h>
+#include <asm/assembly.h>
+#include <asm/processor.h>
+
+#include <linux/linkage.h>
+
+ /* We fill the empty parts of the gateway page with
+ * something that will kill the kernel or a
+ * userspace application.
+ */
+#define KILL_INSN break 0,0
+
+ .level LEVEL
+
+ .text
+
+ .import syscall_exit,code
+ .import syscall_exit_rfi,code
+
+ /* Linux gateway page is aliased to virtual page 0 in the kernel
+ * address space. Since it is a gateway page it cannot be
+ * dereferenced, so null pointers will still fault. We start
+ * the actual entry point at 0x100. We put break instructions
+ * at the beginning of the page to trap null indirect function
+ * pointers.
+ */
+
+ .align PAGE_SIZE
+ENTRY(linux_gateway_page)
+
+ /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
+ .rept 44
+ KILL_INSN
+ .endr
+
+ /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
+ /* Light-weight-syscall entry must always be located at 0xb0 */
+ /* WARNING: Keep this number updated with table size changes */
+#define __NR_lws_entries (2)
+
+lws_entry:
+ gate lws_start, %r0 /* increase privilege */
+ depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
+
+ /* Fill from 0xb8 to 0xe0 */
+ .rept 10
+ KILL_INSN
+ .endr
+
+ /* This function MUST be located at 0xe0 for glibc's threading
+ mechanism to work. DO NOT MOVE THIS CODE EVER! */
+set_thread_pointer:
+ gate .+8, %r0 /* increase privilege */
+ depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
+ be 0(%sr7,%r31) /* return to user space */
+ mtctl %r26, %cr27 /* move arg0 to the control register */
+
+ /* Increase the chance of trapping if random jumps occur to this
+ address, fill from 0xf0 to 0x100 */
+ .rept 4
+ KILL_INSN
+ .endr
+
+/* This address must remain fixed at 0x100 for glibc's syscalls to work */
+ .align 256
+linux_gateway_entry:
+ gate .+8, %r0 /* become privileged */
+ mtsp %r0,%sr4 /* get kernel space into sr4 */
+ mtsp %r0,%sr5 /* get kernel space into sr5 */
+ mtsp %r0,%sr6 /* get kernel space into sr6 */
+ mfsp %sr7,%r1 /* save user sr7 */
+ mtsp %r1,%sr3 /* and store it in sr3 */
+
+#ifdef CONFIG_64BIT
+ /* for now we can *always* set the W bit on entry to the syscall
+ * since we don't support wide userland processes. We could
+ * also save the current SM other than in r0 and restore it on
+ * exit from the syscall, and also use that value to know
+ * whether to do narrow or wide syscalls. -PB
+ */
+ ssm PSW_SM_W, %r1
+ extrd,u %r1,PSW_W_BIT,1,%r1
+ /* sp must be aligned on 4, so deposit the W bit setting into
+ * the bottom of sp temporarily */
+ or,ev %r1,%r30,%r30
+ b,n 1f
+ /* The top halves of argument registers must be cleared on syscall
+ * entry from narrow executable.
+ */
+ depdi 0, 31, 32, %r26
+ depdi 0, 31, 32, %r25
+ depdi 0, 31, 32, %r24
+ depdi 0, 31, 32, %r23
+ depdi 0, 31, 32, %r22
+ depdi 0, 31, 32, %r21
+1:
+#endif
+ mfctl %cr30,%r1
+ xor %r1,%r30,%r30 /* ye olde xor trick */
+ xor %r1,%r30,%r1
+ xor %r1,%r30,%r30
+
+ ldo THREAD_SZ_ALGN+FRAME_SIZE(%r30),%r30 /* set up kernel stack */
+
+ /* N.B.: It is critical that we don't set sr7 to 0 until r30
+ * contains a valid kernel stack pointer. It is also
+ * critical that we don't start using the kernel stack
+ * until after sr7 has been set to 0.
+ */
+
+ mtsp %r0,%sr7 /* get kernel space into sr7 */
+ STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
+ mfctl %cr30,%r1 /* get task ptr in %r1 */
+ LDREG TI_TASK(%r1),%r1
+
+ /* Save some registers for sigcontext and potential task
+ switch (see entry.S for the details of which ones are
+ saved/restored). TASK_PT_PSW is zeroed so we can see whether
+ a process is on a syscall or not. For an interrupt the real
+ PSW value is stored. This is needed for gdb and sys_ptrace. */
+ STREG %r0, TASK_PT_PSW(%r1)
+ STREG %r2, TASK_PT_GR2(%r1) /* preserve rp */
+ STREG %r19, TASK_PT_GR19(%r1)
+
+ LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */
+#ifdef CONFIG_64BIT
+ extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */
+#if 0
+ xor %r19,%r2,%r2 /* clear bottom bit */
+ depd,z %r19,1,1,%r19
+ std %r19,TASK_PT_PSW(%r1)
+#endif
+#endif
+ STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */
+
+ STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */
+ STREG %r21, TASK_PT_GR21(%r1)
+ STREG %r22, TASK_PT_GR22(%r1)
+ STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
+ STREG %r24, TASK_PT_GR24(%r1) /* 3rd argument */
+ STREG %r25, TASK_PT_GR25(%r1) /* 2nd argument */
+ STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */
+ STREG %r27, TASK_PT_GR27(%r1) /* user dp */
+ STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */
+ STREG %r28, TASK_PT_ORIG_R28(%r1) /* return value 0 (saved for signals) */
+ STREG %r29, TASK_PT_GR29(%r1) /* return value 1 */
+ STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */
+
+ ldo TASK_PT_FR0(%r1), %r27 /* save fpregs from the kernel */
+ save_fp %r27 /* or potential task switch */
+
+ mfctl %cr11, %r27 /* i.e. SAR */
+ STREG %r27, TASK_PT_SAR(%r1)
+
+ loadgp
+
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+ copy %r19,%r2 /* W bit back to r2 */
+#else
+ /* no need to save these on stack in wide mode because the first 8
+ * args are passed in registers */
+ stw %r22, -52(%r30) /* 5th argument */
+ stw %r21, -56(%r30) /* 6th argument */
+#endif
+
+ /* Are we being ptraced? */
+ mfctl %cr30, %r1
+ LDREG TI_TASK(%r1),%r1
+ ldw TASK_PTRACE(%r1), %r1
+ bb,<,n %r1,31,.Ltracesys
+
+ /* Note! We cannot use the syscall table that is mapped
+ nearby since the gateway page is mapped execute-only. */
+
+#ifdef CONFIG_64BIT
+ ldil L%sys_call_table, %r1
+ or,= %r2,%r2,%r2
+ addil L%(sys_call_table64-sys_call_table), %r1
+ ldo R%sys_call_table(%r1), %r19
+ or,= %r2,%r2,%r2
+ ldo R%sys_call_table64(%r1), %r19
+#else
+ ldil L%sys_call_table, %r1
+ ldo R%sys_call_table(%r1), %r19
+#endif
+ comiclr,>> __NR_Linux_syscalls, %r20, %r0
+ b,n .Lsyscall_nosys
+
+ LDREGX %r20(%r19), %r19
+
+ /* If this is a sys_rt_sigreturn call, and the signal was received
+ * when not in_syscall, then we want to return via syscall_exit_rfi,
+ * not syscall_exit. Signal no. in r20, in_syscall in r25 (see
+ * trampoline code in signal.c).
+ */
+ ldi __NR_rt_sigreturn,%r2
+ comb,= %r2,%r20,.Lrt_sigreturn
+.Lin_syscall:
+ ldil L%syscall_exit,%r2
+ be 0(%sr7,%r19)
+ ldo R%syscall_exit(%r2),%r2
+.Lrt_sigreturn:
+ comib,<> 0,%r25,.Lin_syscall
+ ldil L%syscall_exit_rfi,%r2
+ be 0(%sr7,%r19)
+ ldo R%syscall_exit_rfi(%r2),%r2
+
+ /* Note! Because we are not running where we were linked, any
+ calls to functions external to this file must be indirect. To
+ be safe, we apply the opposite rule to functions within this
+ file, with local labels given to them to ensure correctness. */
+
+.Lsyscall_nosys:
+syscall_nosys:
+ ldil L%syscall_exit,%r1
+ be R%syscall_exit(%sr7,%r1)
+ ldo -ENOSYS(%r0),%r28 /* set errno */
+
+
+/* Warning! This trace code is a virtual duplicate of the code above so be
+ * sure to maintain both! */
+.Ltracesys:
+tracesys:
+ /* Need to save more registers so the debugger can see where we
+ * are. This saves only the lower 8 bits of PSW, so that the C
+ * bit is still clear on syscalls, and the D bit is set if this
+ * full register save path has been executed. We check the D
+ * bit on syscall_return_rfi to determine which registers to
+ * restore. An interrupt results in a full PSW saved with the
+ * C bit set, a non-straced syscall entry results in C and D clear
+ * in the saved PSW.
+ */
+ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
+ LDREG TI_TASK(%r1), %r1
+ ssm 0,%r2
+ STREG %r2,TASK_PT_PSW(%r1) /* Lower 8 bits only!! */
+ mfsp %sr0,%r2
+ STREG %r2,TASK_PT_SR0(%r1)
+ mfsp %sr1,%r2
+ STREG %r2,TASK_PT_SR1(%r1)
+ mfsp %sr2,%r2
+ STREG %r2,TASK_PT_SR2(%r1)
+ mfsp %sr3,%r2
+ STREG %r2,TASK_PT_SR3(%r1)
+ STREG %r2,TASK_PT_SR4(%r1)
+ STREG %r2,TASK_PT_SR5(%r1)
+ STREG %r2,TASK_PT_SR6(%r1)
+ STREG %r2,TASK_PT_SR7(%r1)
+ STREG %r2,TASK_PT_IASQ0(%r1)
+ STREG %r2,TASK_PT_IASQ1(%r1)
+ LDREG TASK_PT_GR31(%r1),%r2
+ STREG %r2,TASK_PT_IAOQ0(%r1)
+ ldo 4(%r2),%r2
+ STREG %r2,TASK_PT_IAOQ1(%r1)
+ ldo TASK_REGS(%r1),%r2
+ /* reg_save %r2 */
+ STREG %r3,PT_GR3(%r2)
+ STREG %r4,PT_GR4(%r2)
+ STREG %r5,PT_GR5(%r2)
+ STREG %r6,PT_GR6(%r2)
+ STREG %r7,PT_GR7(%r2)
+ STREG %r8,PT_GR8(%r2)
+ STREG %r9,PT_GR9(%r2)
+ STREG %r10,PT_GR10(%r2)
+ STREG %r11,PT_GR11(%r2)
+ STREG %r12,PT_GR12(%r2)
+ STREG %r13,PT_GR13(%r2)
+ STREG %r14,PT_GR14(%r2)
+ STREG %r15,PT_GR15(%r2)
+ STREG %r16,PT_GR16(%r2)
+ STREG %r17,PT_GR17(%r2)
+ STREG %r18,PT_GR18(%r2)
+ /* Finished saving things for the debugger */
+
+ copy %r2,%r26
+ ldil L%do_syscall_trace_enter,%r1
+ ldil L%tracesys_next,%r2
+ be R%do_syscall_trace_enter(%sr7,%r1)
+ ldo R%tracesys_next(%r2),%r2
+
+tracesys_next:
+ /* do_syscall_trace_enter either returned the syscallno, or -1L,
+ * so we skip restoring the PT_GR20 below, since we pulled it from
+ * task->thread.regs.gr[20] above.
+ */
+ copy %ret0,%r20
+ ldil L%sys_call_table,%r1
+ ldo R%sys_call_table(%r1), %r19
+
+ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
+ LDREG TI_TASK(%r1), %r1
+ LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
+ LDREG TASK_PT_GR25(%r1), %r25
+ LDREG TASK_PT_GR24(%r1), %r24
+ LDREG TASK_PT_GR23(%r1), %r23
+#ifdef CONFIG_64BIT
+ LDREG TASK_PT_GR22(%r1), %r22
+ LDREG TASK_PT_GR21(%r1), %r21
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ comiclr,>>= __NR_Linux_syscalls, %r20, %r0
+ b,n .Lsyscall_nosys
+
+ LDREGX %r20(%r19), %r19
+
+ /* If this is a sys_rt_sigreturn call, and the signal was received
+ * when not in_syscall, then we want to return via syscall_exit_rfi,
+ * not syscall_exit. Signal no. in r20, in_syscall in r25 (see
+ * trampoline code in signal.c).
+ */
+ ldi __NR_rt_sigreturn,%r2
+ comb,= %r2,%r20,.Ltrace_rt_sigreturn
+.Ltrace_in_syscall:
+ ldil L%tracesys_exit,%r2
+ be 0(%sr7,%r19)
+ ldo R%tracesys_exit(%r2),%r2
+
+ /* Do *not* call this function on the gateway page, because it
+ makes a direct call to syscall_trace. */
+
+tracesys_exit:
+ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
+ LDREG TI_TASK(%r1), %r1
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+ ldo TASK_REGS(%r1),%r26
+ bl do_syscall_trace_exit,%r2
+ STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
+ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
+ LDREG TI_TASK(%r1), %r1
+ LDREG TASK_PT_GR28(%r1), %r28 /* Restore return val. */
+
+ ldil L%syscall_exit,%r1
+ be,n R%syscall_exit(%sr7,%r1)
+
+.Ltrace_rt_sigreturn:
+ comib,<> 0,%r25,.Ltrace_in_syscall
+ ldil L%tracesys_sigexit,%r2
+ be 0(%sr7,%r19)
+ ldo R%tracesys_sigexit(%r2),%r2
+
+tracesys_sigexit:
+ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
+ LDREG TI_TASK(%r1), %r1
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+ bl do_syscall_trace_exit,%r2
+ ldo TASK_REGS(%r1),%r26
+
+ ldil L%syscall_exit_rfi,%r1
+ be,n R%syscall_exit_rfi(%sr7,%r1)
+
+
+ /*********************************************************
+ 32/64-bit Light-Weight-Syscall ABI
+
+ * - Indicates a hint for userspace inline asm
+ implementations.
+
+ Syscall number (caller-saves)
+ - %r20
+ * In asm clobber.
+
+ Argument registers (caller-saves)
+ - %r26, %r25, %r24, %r23, %r22
+ * In asm input.
+
+ Return registers (caller-saves)
+ - %r28 (return), %r21 (errno)
+ * In asm output.
+
+ Caller-saves registers
+ - %r1, %r27, %r29
+ - %r2 (return pointer)
+ - %r31 (ble link register)
+ * In asm clobber.
+
+ Callee-saves registers
+ - %r3-%r18
+ - %r30 (stack pointer)
+ * Not in asm clobber.
+
+ If userspace is 32-bit:
+ Callee-saves registers
+ - %r19 (32-bit PIC register)
+
+ Differences from 32-bit calling convention:
+ - Syscall number in %r20
+ - Additional argument register %r22 (arg4)
+ - Callee-saves %r19.
+
+ If userspace is 64-bit:
+ Callee-saves registers
+ - %r27 (64-bit PIC register)
+
+ Differences from 64-bit calling convention:
+ - Syscall number in %r20
+ - Additional argument register %r22 (arg4)
+ - Callee-saves %r27.
+
+ Error codes returned by entry path:
+
+ ENOSYS - r20 was an invalid LWS number.
+
+ *********************************************************/
+lws_start:
+
+#ifdef CONFIG_64BIT
+ /* FIXME: If we are a 64-bit kernel just
+ * turn this on unconditionally.
+ */
+ ssm PSW_SM_W, %r1
+ extrd,u %r1,PSW_W_BIT,1,%r1
+ /* sp must be aligned on 4, so deposit the W bit setting into
+ * the bottom of sp temporarily */
+ or,ev %r1,%r30,%r30
+
+ /* Clip LWS number to a 32-bit value always */
+ depdi 0, 31, 32, %r20
+#endif
+
+ /* Is the lws entry number valid? */
+ comiclr,>> __NR_lws_entries, %r20, %r0
+ b,n lws_exit_nosys
+
+ /* WARNING: Trashing sr2 and sr3 */
+ mfsp %sr7,%r1 /* get userspace into sr3 */
+ mtsp %r1,%sr3
+ mtsp %r0,%sr2 /* get kernel space into sr2 */
+
+ /* Load table start */
+ ldil L%lws_table, %r1
+ ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
+ LDREGX %r20(%sr2,r28), %r21 /* Scratch use of r21 */
+
+ /* Jump to lws, lws table pointers already relocated */
+ be,n 0(%sr2,%r21)
+
+lws_exit_nosys:
+ ldo -ENOSYS(%r0),%r21 /* set errno */
+ /* Fall through: Return to userspace */
+
+lws_exit:
+#ifdef CONFIG_64BIT
+ /* decide whether to reset the wide mode bit
+ *
+ * For a syscall, the W bit is stored in the lowest bit
+ * of sp. Extract it and reset W if it is zero */
+ extrd,u,*<> %r30,63,1,%r1
+ rsm PSW_SM_W, %r0
+ /* now reset the lowest bit of sp if it was set */
+ xor %r30,%r1,%r30
+#endif
+ be,n 0(%sr7, %r31)
+
+
+
+ /***************************************************
+ Implementing CAS as an atomic operation:
+
+ %r26 - Address to examine
+ %r25 - Old value to check (old)
+ %r24 - New value to set (new)
+ %r28 - Return prev through this register.
+ %r21 - Kernel error code
+
+ If debugging is DISabled:
+
+ %r21 has the following meanings:
+
+ EAGAIN - CAS is busy, ldcw failed, try again.
+ EFAULT - Read or write failed.
+
+ If debugging is enabled:
+
+ EDEADLOCK - CAS called recursively.
+ EAGAIN && r28 == 1 - CAS is busy. Lock contended.
+ EAGAIN && r28 == 2 - CAS is busy. ldcw failed.
+ EFAULT - Read or write failed.
+
+ Scratch: r20, r28, r1
+
+ ****************************************************/
+
+ /* Do not enable LWS debugging */
+#define ENABLE_LWS_DEBUG 0
+
+ /* ELF64 Process entry path */
+lws_compare_and_swap64:
+#ifdef CONFIG_64BIT
+ b,n lws_compare_and_swap
+#else
+ /* If we are not a 64-bit kernel, then we don't
+ * have 64-bit input registers, and calling
+ * the 64-bit LWS CAS returns ENOSYS.
+ */
+ b,n lws_exit_nosys
+#endif
+
+ /* ELF32 Process entry path */
+lws_compare_and_swap32:
+#ifdef CONFIG_64BIT
+ /* Clip all the input registers */
+ depdi 0, 31, 32, %r26
+ depdi 0, 31, 32, %r25
+ depdi 0, 31, 32, %r24
+#endif
+
+lws_compare_and_swap:
+ /* Load start of lock table */
+ ldil L%lws_lock_start, %r20
+ ldo R%lws_lock_start(%r20), %r28
+
+ /* Extract four bits from r26 and hash lock (Bits 4-7) */
+ extru %r26, 27, 4, %r20
+
+ /* Find lock to use, the hash is either one of 0 to
+ 15, multiplied by 16 (keep it 16-byte aligned)
+ and add to the lock table offset. */
+ shlw %r20, 4, %r20
+ add %r20, %r28, %r20
+
+# if ENABLE_LWS_DEBUG
+ /*
+ DEBUG, check for deadlock!
+ If the thread register values are the same
+ then we were the one that locked it last and
+ this is a recurisve call that will deadlock.
+ We *must* giveup this call and fail.
+ */
+ ldw 4(%sr2,%r20), %r28 /* Load thread register */
+ /* WARNING: If cr27 cycles to the same value we have problems */
+ mfctl %cr27, %r21 /* Get current thread register */
+ cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
+ b lws_exit /* Return error! */
+ ldo -EDEADLOCK(%r0), %r21
+cas_lock:
+ cmpb,=,n %r0, %r28, cas_nocontend /* Is nobody using it? */
+ ldo 1(%r0), %r28 /* 1st case */
+ b lws_exit /* Contended... */
+ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
+cas_nocontend:
+# endif
+/* ENABLE_LWS_DEBUG */
+
+ LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
+ cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
+cas_wouldblock:
+ ldo 2(%r0), %r28 /* 2nd case */
+ b lws_exit /* Contended... */
+ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
+
+ /*
+ prev = *addr;
+ if ( prev == old )
+ *addr = new;
+ return prev;
+ */
+
+ /* NOTES:
+ This all works becuse intr_do_signal
+ and schedule both check the return iasq
+ and see that we are on the kernel page
+ so this process is never scheduled off
+ or is ever sent any signal of any sort,
+ thus it is wholly atomic from usrspaces
+ perspective
+ */
+cas_action:
+#if defined CONFIG_SMP && ENABLE_LWS_DEBUG
+ /* DEBUG */
+ mfctl %cr27, %r1
+ stw %r1, 4(%sr2,%r20)
+#endif
+ /* The load and store could fail */
+1: ldw 0(%sr3,%r26), %r28
+ sub,<> %r28, %r25, %r0
+2: stw %r24, 0(%sr3,%r26)
+ /* Free lock */
+ stw %r20, 0(%sr2,%r20)
+#if ENABLE_LWS_DEBUG
+ /* Clear thread register indicator */
+ stw %r0, 4(%sr2,%r20)
+#endif
+ /* Return to userspace, set no error */
+ b lws_exit
+ copy %r0, %r21
+
+3:
+ /* Error occurred on load or store */
+ /* Free lock */
+ stw %r20, 0(%sr2,%r20)
+#if ENABLE_LWS_DEBUG
+ stw %r0, 4(%sr2,%r20)
+#endif
+ b lws_exit
+ ldo -EFAULT(%r0),%r21 /* set errno */
+ nop
+ nop
+ nop
+ nop
+
+ /* Two exception table entries, one for the load,
+ the other for the store. Either return -EFAULT.
+ Each of the entries must be relocated. */
+ .section __ex_table,"aw"
+ ASM_ULONG_INSN (1b - linux_gateway_page), (3b - linux_gateway_page)
+ ASM_ULONG_INSN (2b - linux_gateway_page), (3b - linux_gateway_page)
+ .previous
+
+
+ /* Make sure nothing else is placed on this page */
+ .align PAGE_SIZE
+END(linux_gateway_page)
+ENTRY(end_linux_gateway_page)
+
+ /* Relocate symbols assuming linux_gateway_page is mapped
+ to virtual address 0x0 */
+
+#define LWS_ENTRY(_name_) ASM_ULONG_INSN (lws_##_name_ - linux_gateway_page)
+
+ .section .rodata,"a"
+
+ .align PAGE_SIZE
+ /* Light-weight-syscall table */
+ /* Start of lws table. */
+ENTRY(lws_table)
+ LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */
+ LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
+END(lws_table)
+ /* End of lws table */
+
+ .align PAGE_SIZE
+ENTRY(sys_call_table)
+#include "syscall_table.S"
+END(sys_call_table)
+
+#ifdef CONFIG_64BIT
+ .align PAGE_SIZE
+ENTRY(sys_call_table64)
+#define SYSCALL_TABLE_64BIT
+#include "syscall_table.S"
+END(sys_call_table64)
+#endif
+
+ /*
+ All light-weight-syscall atomic operations
+ will use this set of locks
+
+ NOTE: The lws_lock_start symbol must be
+ at least 16-byte aligned for safe use
+ with ldcw.
+ */
+ .section .data
+ .align PAGE_SIZE
+ENTRY(lws_lock_start)
+ /* lws locks */
+ .rept 16
+ /* Keep locks aligned at 16-bytes */
+ .word 1
+ .word 0
+ .word 0
+ .word 0
+ .endr
+END(lws_lock_start)
+ .previous
+
+.end
+
+
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
new file mode 100644
index 00000000..e66366fd
--- /dev/null
+++ b/arch/parisc/kernel/syscall_table.S
@@ -0,0 +1,438 @@
+/* System Call Table
+ *
+ * Copyright (C) 1999-2004 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
+ * Copyright (C) 2000 Alan Modra <amodra at parisc-linux.org>
+ * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
+ * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
+ * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
+ * Copyright (C) 2000 Grant Grundler <grundler at parisc-linux.org>
+ * Copyright (C) 2001 Richard Hirst <rhirst with parisc-linux.org>
+ * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
+ * Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org>
+ * Copyright (C) 2000-2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
+ * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ * Copyright (C) 2005-2006 Kyle McMartin <kyle at parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#if defined(CONFIG_64BIT) && !defined(SYSCALL_TABLE_64BIT)
+/* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and
+ * narrow palinux. Use ENTRY_DIFF for those where a 32-bit specific
+ * implementation is required on wide palinux. Use ENTRY_COMP where
+ * the compatibility layer has a useful 32-bit implementation.
+ */
+#define ENTRY_SAME(_name_) .dword sys_##_name_
+#define ENTRY_DIFF(_name_) .dword sys32_##_name_
+#define ENTRY_UHOH(_name_) .dword sys32_##unimplemented
+#define ENTRY_OURS(_name_) .dword parisc_##_name_
+#define ENTRY_COMP(_name_) .dword compat_sys_##_name_
+#elif defined(CONFIG_64BIT) && defined(SYSCALL_TABLE_64BIT)
+#define ENTRY_SAME(_name_) .dword sys_##_name_
+#define ENTRY_DIFF(_name_) .dword sys_##_name_
+#define ENTRY_UHOH(_name_) .dword sys_##_name_
+#define ENTRY_OURS(_name_) .dword sys_##_name_
+#define ENTRY_COMP(_name_) .dword sys_##_name_
+#else
+#define ENTRY_SAME(_name_) .word sys_##_name_
+#define ENTRY_DIFF(_name_) .word sys_##_name_
+#define ENTRY_UHOH(_name_) .word sys_##_name_
+#define ENTRY_OURS(_name_) .word parisc_##_name_
+#define ENTRY_COMP(_name_) .word sys_##_name_
+#endif
+
+ ENTRY_SAME(restart_syscall) /* 0 */
+ ENTRY_SAME(exit)
+ ENTRY_SAME(fork_wrapper)
+ ENTRY_SAME(read)
+ ENTRY_SAME(write)
+ ENTRY_SAME(open) /* 5 */
+ ENTRY_SAME(close)
+ ENTRY_SAME(waitpid)
+ ENTRY_SAME(creat)
+ ENTRY_SAME(link)
+ ENTRY_SAME(unlink) /* 10 */
+ ENTRY_DIFF(execve_wrapper)
+ ENTRY_SAME(chdir)
+ /* See comments in kernel/time.c!!! Maybe we don't need this? */
+ ENTRY_COMP(time)
+ ENTRY_SAME(mknod)
+ ENTRY_SAME(chmod) /* 15 */
+ ENTRY_SAME(lchown)
+ ENTRY_SAME(socket)
+ /* struct stat is MAYBE identical wide and narrow ?? */
+ ENTRY_COMP(newstat)
+ ENTRY_DIFF(lseek)
+ ENTRY_SAME(getpid) /* 20 */
+ /* the 'void * data' parameter may need re-packing in wide */
+ ENTRY_COMP(mount)
+ /* concerned about struct sockaddr in wide/narrow */
+ /* ---> I think sockaddr is OK unless the compiler packs the struct */
+ /* differently to align the char array */
+ ENTRY_SAME(bind)
+ ENTRY_SAME(setuid)
+ ENTRY_SAME(getuid)
+ ENTRY_COMP(stime) /* 25 */
+ ENTRY_COMP(ptrace)
+ ENTRY_SAME(alarm)
+ /* see stat comment */
+ ENTRY_COMP(newfstat)
+ ENTRY_SAME(pause)
+ /* struct utimbuf uses time_t which might vary */
+ ENTRY_COMP(utime) /* 30 */
+ /* struct sockaddr... */
+ ENTRY_SAME(connect)
+ ENTRY_SAME(listen)
+ ENTRY_SAME(access)
+ ENTRY_SAME(nice)
+ /* struct sockaddr... */
+ ENTRY_SAME(accept) /* 35 */
+ ENTRY_SAME(sync)
+ ENTRY_SAME(kill)
+ ENTRY_SAME(rename)
+ ENTRY_SAME(mkdir)
+ ENTRY_SAME(rmdir) /* 40 */
+ ENTRY_SAME(dup)
+ ENTRY_SAME(pipe)
+ ENTRY_COMP(times)
+ /* struct sockaddr... */
+ ENTRY_SAME(getsockname)
+ /* it seems possible brk() could return a >4G pointer... */
+ ENTRY_SAME(brk) /* 45 */
+ ENTRY_SAME(setgid)
+ ENTRY_SAME(getgid)
+ ENTRY_SAME(signal)
+ ENTRY_SAME(geteuid)
+ ENTRY_SAME(getegid) /* 50 */
+ ENTRY_SAME(acct)
+ ENTRY_SAME(umount)
+ /* struct sockaddr... */
+ ENTRY_SAME(getpeername)
+ ENTRY_COMP(ioctl)
+ ENTRY_COMP(fcntl) /* 55 */
+ ENTRY_SAME(socketpair)
+ ENTRY_SAME(setpgid)
+ ENTRY_SAME(send)
+ ENTRY_SAME(newuname)
+ ENTRY_SAME(umask) /* 60 */
+ ENTRY_SAME(chroot)
+ ENTRY_COMP(ustat)
+ ENTRY_SAME(dup2)
+ ENTRY_SAME(getppid)
+ ENTRY_SAME(getpgrp) /* 65 */
+ ENTRY_SAME(setsid)
+ ENTRY_SAME(pivot_root)
+ /* I don't like this */
+ ENTRY_UHOH(sgetmask)
+ ENTRY_UHOH(ssetmask)
+ ENTRY_SAME(setreuid) /* 70 */
+ ENTRY_SAME(setregid)
+ ENTRY_SAME(mincore)
+ ENTRY_COMP(sigpending)
+ ENTRY_SAME(sethostname)
+ /* Following 3 have linux-common-code structs containing longs -( */
+ ENTRY_COMP(setrlimit) /* 75 */
+ ENTRY_COMP(getrlimit)
+ ENTRY_COMP(getrusage)
+ /* struct timeval and timezone are maybe?? consistent wide and narrow */
+ ENTRY_COMP(gettimeofday)
+ ENTRY_COMP(settimeofday)
+ ENTRY_SAME(getgroups) /* 80 */
+ ENTRY_SAME(setgroups)
+ /* struct socketaddr... */
+ ENTRY_SAME(sendto)
+ ENTRY_SAME(symlink)
+ /* see stat comment */
+ ENTRY_COMP(newlstat)
+ ENTRY_SAME(readlink) /* 85 */
+ ENTRY_SAME(ni_syscall) /* was uselib */
+ ENTRY_SAME(swapon)
+ ENTRY_SAME(reboot)
+ ENTRY_SAME(mmap2)
+ ENTRY_SAME(mmap) /* 90 */
+ ENTRY_SAME(munmap)
+ ENTRY_SAME(truncate)
+ ENTRY_SAME(ftruncate)
+ ENTRY_SAME(fchmod)
+ ENTRY_SAME(fchown) /* 95 */
+ ENTRY_SAME(getpriority)
+ ENTRY_SAME(setpriority)
+ ENTRY_SAME(recv)
+ ENTRY_COMP(statfs)
+ ENTRY_COMP(fstatfs) /* 100 */
+ ENTRY_SAME(stat64)
+ ENTRY_SAME(ni_syscall) /* was socketcall */
+ ENTRY_SAME(syslog)
+ /* even though manpage says struct timeval contains longs, ours has
+ * time_t and suseconds_t -- both of which are safe wide/narrow */
+ ENTRY_COMP(setitimer)
+ ENTRY_COMP(getitimer) /* 105 */
+ ENTRY_SAME(capget)
+ ENTRY_SAME(capset)
+ ENTRY_OURS(pread64)
+ ENTRY_OURS(pwrite64)
+ ENTRY_SAME(getcwd) /* 110 */
+ ENTRY_SAME(vhangup)
+ ENTRY_SAME(fstat64)
+ ENTRY_SAME(vfork_wrapper)
+ /* struct rusage contains longs... */
+ ENTRY_COMP(wait4)
+ ENTRY_SAME(swapoff) /* 115 */
+ ENTRY_COMP(sysinfo)
+ ENTRY_SAME(shutdown)
+ ENTRY_SAME(fsync)
+ ENTRY_SAME(madvise)
+ ENTRY_SAME(clone_wrapper) /* 120 */
+ ENTRY_SAME(setdomainname)
+ ENTRY_DIFF(sendfile)
+ /* struct sockaddr... */
+ ENTRY_SAME(recvfrom)
+ /* struct timex contains longs */
+ ENTRY_COMP(adjtimex)
+ ENTRY_SAME(mprotect) /* 125 */
+ /* old_sigset_t forced to 32 bits. Beware glibc sigset_t */
+ ENTRY_COMP(sigprocmask)
+ ENTRY_SAME(ni_syscall) /* create_module */
+ ENTRY_SAME(init_module)
+ ENTRY_SAME(delete_module)
+ ENTRY_SAME(ni_syscall) /* 130: get_kernel_syms */
+ /* time_t inside struct dqblk */
+ ENTRY_SAME(quotactl)
+ ENTRY_SAME(getpgid)
+ ENTRY_SAME(fchdir)
+ ENTRY_SAME(bdflush)
+ ENTRY_SAME(sysfs) /* 135 */
+ ENTRY_OURS(personality)
+ ENTRY_SAME(ni_syscall) /* for afs_syscall */
+ ENTRY_SAME(setfsuid)
+ ENTRY_SAME(setfsgid)
+ /* I think this might work */
+ ENTRY_SAME(llseek) /* 140 */
+ ENTRY_COMP(getdents)
+ /* it is POSSIBLE that select will be OK because even though fd_set
+ * contains longs, the macros and sizes are clever. */
+ ENTRY_COMP(select)
+ ENTRY_SAME(flock)
+ ENTRY_SAME(msync)
+ /* struct iovec contains pointers */
+ ENTRY_COMP(readv) /* 145 */
+ ENTRY_COMP(writev)
+ ENTRY_SAME(getsid)
+ ENTRY_SAME(fdatasync)
+ /* struct __sysctl_args is a mess */
+ ENTRY_COMP(sysctl)
+ ENTRY_SAME(mlock) /* 150 */
+ ENTRY_SAME(munlock)
+ ENTRY_SAME(mlockall)
+ ENTRY_SAME(munlockall)
+ /* struct sched_param is ok for now */
+ ENTRY_SAME(sched_setparam)
+ ENTRY_SAME(sched_getparam) /* 155 */
+ ENTRY_SAME(sched_setscheduler)
+ ENTRY_SAME(sched_getscheduler)
+ ENTRY_SAME(sched_yield)
+ ENTRY_SAME(sched_get_priority_max)
+ ENTRY_SAME(sched_get_priority_min) /* 160 */
+ /* These 2 would've worked if someone had defined struct timespec
+ * carefully, like timeval for example (which is about the same).
+ * Unfortunately it contains a long :-( */
+ ENTRY_DIFF(sched_rr_get_interval)
+ ENTRY_COMP(nanosleep)
+ ENTRY_SAME(mremap)
+ ENTRY_SAME(setresuid)
+ ENTRY_SAME(getresuid) /* 165 */
+ ENTRY_DIFF(sigaltstack_wrapper)
+ ENTRY_SAME(ni_syscall) /* query_module */
+ ENTRY_SAME(poll)
+ /* structs contain pointers and an in_addr... */
+ ENTRY_COMP(nfsservctl)
+ ENTRY_SAME(setresgid) /* 170 */
+ ENTRY_SAME(getresgid)
+ ENTRY_SAME(prctl)
+ /* signals need a careful review */
+ ENTRY_SAME(rt_sigreturn_wrapper)
+ ENTRY_DIFF(rt_sigaction)
+ ENTRY_DIFF(rt_sigprocmask) /* 175 */
+ ENTRY_DIFF(rt_sigpending)
+ ENTRY_COMP(rt_sigtimedwait)
+ /* even though the struct siginfo_t is different, it appears like
+ * all the paths use values which should be same wide and narrow.
+ * Also the struct is padded to 128 bytes which means we don't have
+ * to worry about faulting trying to copy in a larger 64-bit
+ * struct from a 32-bit user-space app.
+ */
+ ENTRY_COMP(rt_sigqueueinfo)
+ ENTRY_COMP(rt_sigsuspend)
+ ENTRY_SAME(chown) /* 180 */
+ /* setsockopt() used by iptables: SO_SET_REPLACE/SO_SET_ADD_COUNTERS */
+ ENTRY_COMP(setsockopt)
+ ENTRY_COMP(getsockopt)
+ ENTRY_COMP(sendmsg)
+ ENTRY_COMP(recvmsg)
+ ENTRY_SAME(semop) /* 185 */
+ ENTRY_SAME(semget)
+ ENTRY_DIFF(semctl)
+ ENTRY_DIFF(msgsnd)
+ ENTRY_DIFF(msgrcv)
+ ENTRY_SAME(msgget) /* 190 */
+ ENTRY_SAME(msgctl)
+ ENTRY_SAME(shmat)
+ ENTRY_SAME(shmdt)
+ ENTRY_SAME(shmget)
+ ENTRY_SAME(shmctl) /* 195 */
+ ENTRY_SAME(ni_syscall) /* streams1 */
+ ENTRY_SAME(ni_syscall) /* streams2 */
+ ENTRY_SAME(lstat64)
+ ENTRY_OURS(truncate64)
+ ENTRY_OURS(ftruncate64) /* 200 */
+ ENTRY_SAME(getdents64)
+ ENTRY_COMP(fcntl64)
+ ENTRY_SAME(ni_syscall) /* attrctl -- dead */
+ ENTRY_SAME(ni_syscall) /* acl_get -- dead */
+ ENTRY_SAME(ni_syscall) /* 205 (acl_set -- dead) */
+ ENTRY_SAME(gettid)
+ ENTRY_OURS(readahead)
+ ENTRY_SAME(tkill)
+ ENTRY_SAME(sendfile64)
+ ENTRY_COMP(futex) /* 210 */
+ ENTRY_COMP(sched_setaffinity)
+ ENTRY_COMP(sched_getaffinity)
+ ENTRY_SAME(ni_syscall) /* set_thread_area */
+ ENTRY_SAME(ni_syscall) /* get_thread_area */
+ ENTRY_SAME(io_setup) /* 215 */
+ ENTRY_SAME(io_destroy)
+ ENTRY_SAME(io_getevents)
+ ENTRY_SAME(io_submit)
+ ENTRY_SAME(io_cancel)
+ ENTRY_SAME(alloc_hugepages) /* 220 */
+ ENTRY_SAME(free_hugepages)
+ ENTRY_SAME(exit_group)
+ ENTRY_DIFF(lookup_dcookie)
+ ENTRY_SAME(epoll_create)
+ ENTRY_SAME(epoll_ctl) /* 225 */
+ ENTRY_SAME(epoll_wait)
+ ENTRY_SAME(remap_file_pages)
+ ENTRY_SAME(semtimedop)
+ ENTRY_SAME(mq_open)
+ ENTRY_SAME(mq_unlink) /* 230 */
+ ENTRY_SAME(mq_timedsend)
+ ENTRY_SAME(mq_timedreceive)
+ ENTRY_SAME(mq_notify)
+ ENTRY_SAME(mq_getsetattr)
+ ENTRY_COMP(waitid) /* 235 */
+ ENTRY_OURS(fadvise64_64)
+ ENTRY_SAME(set_tid_address)
+ ENTRY_SAME(setxattr)
+ ENTRY_SAME(lsetxattr)
+ ENTRY_SAME(fsetxattr) /* 240 */
+ ENTRY_SAME(getxattr)
+ ENTRY_SAME(lgetxattr)
+ ENTRY_SAME(fgetxattr)
+ ENTRY_SAME(listxattr)
+ ENTRY_SAME(llistxattr) /* 245 */
+ ENTRY_SAME(flistxattr)
+ ENTRY_SAME(removexattr)
+ ENTRY_SAME(lremovexattr)
+ ENTRY_SAME(fremovexattr)
+ ENTRY_COMP(timer_create) /* 250 */
+ ENTRY_COMP(timer_settime)
+ ENTRY_COMP(timer_gettime)
+ ENTRY_SAME(timer_getoverrun)
+ ENTRY_SAME(timer_delete)
+ ENTRY_COMP(clock_settime) /* 255 */
+ ENTRY_COMP(clock_gettime)
+ ENTRY_COMP(clock_getres)
+ ENTRY_COMP(clock_nanosleep)
+ ENTRY_SAME(tgkill)
+ ENTRY_COMP(mbind) /* 260 */
+ ENTRY_COMP(get_mempolicy)
+ ENTRY_COMP(set_mempolicy)
+ ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
+ ENTRY_SAME(add_key)
+ ENTRY_SAME(request_key) /* 265 */
+ ENTRY_SAME(keyctl)
+ ENTRY_SAME(ioprio_set)
+ ENTRY_SAME(ioprio_get)
+ ENTRY_SAME(inotify_init)
+ ENTRY_SAME(inotify_add_watch) /* 270 */
+ ENTRY_SAME(inotify_rm_watch)
+ ENTRY_SAME(migrate_pages)
+ ENTRY_COMP(pselect6)
+ ENTRY_COMP(ppoll)
+ ENTRY_COMP(openat) /* 275 */
+ ENTRY_SAME(mkdirat)
+ ENTRY_SAME(mknodat)
+ ENTRY_SAME(fchownat)
+ ENTRY_COMP(futimesat)
+ ENTRY_SAME(fstatat64) /* 280 */
+ ENTRY_SAME(unlinkat)
+ ENTRY_SAME(renameat)
+ ENTRY_SAME(linkat)
+ ENTRY_SAME(symlinkat)
+ ENTRY_SAME(readlinkat) /* 285 */
+ ENTRY_SAME(fchmodat)
+ ENTRY_SAME(faccessat)
+ ENTRY_SAME(unshare)
+ ENTRY_COMP(set_robust_list)
+ ENTRY_COMP(get_robust_list) /* 290 */
+ ENTRY_SAME(splice)
+ ENTRY_OURS(sync_file_range)
+ ENTRY_SAME(tee)
+ ENTRY_COMP(vmsplice)
+ ENTRY_COMP(move_pages) /* 295 */
+ ENTRY_SAME(getcpu)
+ ENTRY_SAME(epoll_pwait)
+ ENTRY_COMP(statfs64)
+ ENTRY_COMP(fstatfs64)
+ ENTRY_COMP(kexec_load) /* 300 */
+ ENTRY_COMP(utimensat)
+ ENTRY_COMP(signalfd)
+ ENTRY_SAME(ni_syscall) /* was timerfd */
+ ENTRY_SAME(eventfd)
+ ENTRY_COMP(fallocate) /* 305 */
+ ENTRY_SAME(timerfd_create)
+ ENTRY_COMP(timerfd_settime)
+ ENTRY_COMP(timerfd_gettime)
+ ENTRY_COMP(signalfd4)
+ ENTRY_SAME(eventfd2) /* 310 */
+ ENTRY_SAME(epoll_create1)
+ ENTRY_SAME(dup3)
+ ENTRY_SAME(pipe2)
+ ENTRY_SAME(inotify_init1)
+ ENTRY_COMP(preadv) /* 315 */
+ ENTRY_COMP(pwritev)
+ ENTRY_COMP(rt_tgsigqueueinfo)
+ ENTRY_SAME(perf_event_open)
+ ENTRY_COMP(recvmmsg)
+ ENTRY_SAME(accept4) /* 320 */
+ ENTRY_SAME(prlimit64)
+ ENTRY_SAME(fanotify_init)
+ ENTRY_COMP(fanotify_mark)
+ ENTRY_COMP(clock_adjtime)
+ ENTRY_SAME(name_to_handle_at) /* 325 */
+ ENTRY_COMP(open_by_handle_at)
+ ENTRY_SAME(syncfs)
+ ENTRY_SAME(setns)
+ ENTRY_COMP(sendmmsg)
+
+ /* Nothing yet */
+
+#undef ENTRY_SAME
+#undef ENTRY_DIFF
+#undef ENTRY_UHOH
+#undef ENTRY_COMP
+#undef ENTRY_OURS
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
new file mode 100644
index 00000000..45b7389d
--- /dev/null
+++ b/arch/parisc/kernel/time.c
@@ -0,0 +1,276 @@
+/*
+ * linux/arch/parisc/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
+ * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
+ *
+ * 1994-07-02 Alan Modra
+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ */
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/profile.h>
+#include <linux/clocksource.h>
+#include <linux/platform_device.h>
+#include <linux/ftrace.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/param.h>
+#include <asm/pdc.h>
+#include <asm/led.h>
+
+#include <linux/timex.h>
+
+static unsigned long clocktick __read_mostly; /* timer cycles per tick */
+
+/*
+ * We keep time on PA-RISC Linux by using the Interval Timer which is
+ * a pair of registers; one is read-only and one is write-only; both
+ * accessed through CR16. The read-only register is 32 or 64 bits wide,
+ * and increments by 1 every CPU clock tick. The architecture only
+ * guarantees us a rate between 0.5 and 2, but all implementations use a
+ * rate of 1. The write-only register is 32-bits wide. When the lowest
+ * 32 bits of the read-only register compare equal to the write-only
+ * register, it raises a maskable external interrupt. Each processor has
+ * an Interval Timer of its own and they are not synchronised.
+ *
+ * We want to generate an interrupt every 1/HZ seconds. So we program
+ * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data
+ * is programmed with the intended time of the next tick. We can be
+ * held off for an arbitrarily long period of time by interrupts being
+ * disabled, so we may miss one or more ticks.
+ */
+irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
+{
+ unsigned long now, now2;
+ unsigned long next_tick;
+ unsigned long cycles_elapsed, ticks_elapsed = 1;
+ unsigned long cycles_remainder;
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
+
+ /* gcc can optimize for "read-only" case with a local clocktick */
+ unsigned long cpt = clocktick;
+
+ profile_tick(CPU_PROFILING);
+
+ /* Initialize next_tick to the expected tick time. */
+ next_tick = cpuinfo->it_value;
+
+ /* Get current cycle counter (Control Register 16). */
+ now = mfctl(16);
+
+ cycles_elapsed = now - next_tick;
+
+ if ((cycles_elapsed >> 6) < cpt) {
+ /* use "cheap" math (add/subtract) instead
+ * of the more expensive div/mul method
+ */
+ cycles_remainder = cycles_elapsed;
+ while (cycles_remainder > cpt) {
+ cycles_remainder -= cpt;
+ ticks_elapsed++;
+ }
+ } else {
+ /* TODO: Reduce this to one fdiv op */
+ cycles_remainder = cycles_elapsed % cpt;
+ ticks_elapsed += cycles_elapsed / cpt;
+ }
+
+ /* convert from "division remainder" to "remainder of clock tick" */
+ cycles_remainder = cpt - cycles_remainder;
+
+ /* Determine when (in CR16 cycles) next IT interrupt will fire.
+ * We want IT to fire modulo clocktick even if we miss/skip some.
+ * But those interrupts don't in fact get delivered that regularly.
+ */
+ next_tick = now + cycles_remainder;
+
+ cpuinfo->it_value = next_tick;
+
+ /* Program the IT when to deliver the next interrupt.
+ * Only bottom 32-bits of next_tick are writable in CR16!
+ */
+ mtctl(next_tick, 16);
+
+ /* Skip one clocktick on purpose if we missed next_tick.
+ * The new CR16 must be "later" than current CR16 otherwise
+ * itimer would not fire until CR16 wrapped - e.g 4 seconds
+ * later on a 1Ghz processor. We'll account for the missed
+ * tick on the next timer interrupt.
+ *
+ * "next_tick - now" will always give the difference regardless
+ * if one or the other wrapped. If "now" is "bigger" we'll end up
+ * with a very large unsigned number.
+ */
+ now2 = mfctl(16);
+ if (next_tick - now2 > cpt)
+ mtctl(next_tick+cpt, 16);
+
+#if 1
+/*
+ * GGG: DEBUG code for how many cycles programming CR16 used.
+ */
+ if (unlikely(now2 - now > 0x3000)) /* 12K cycles */
+ printk (KERN_CRIT "timer_interrupt(CPU %d): SLOW! 0x%lx cycles!"
+ " cyc %lX rem %lX "
+ " next/now %lX/%lX\n",
+ cpu, now2 - now, cycles_elapsed, cycles_remainder,
+ next_tick, now );
+#endif
+
+ /* Can we differentiate between "early CR16" (aka Scenario 1) and
+ * "long delay" (aka Scenario 3)? I don't think so.
+ *
+ * Timer_interrupt will be delivered at least a few hundred cycles
+ * after the IT fires. But it's arbitrary how much time passes
+ * before we call it "late". I've picked one second.
+ *
+ * It's important NO printk's are between reading CR16 and
+ * setting up the next value. May introduce huge variance.
+ */
+ if (unlikely(ticks_elapsed > HZ)) {
+ /* Scenario 3: very long delay? bad in any case */
+ printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
+ " cycles %lX rem %lX "
+ " next/now %lX/%lX\n",
+ cpu,
+ cycles_elapsed, cycles_remainder,
+ next_tick, now );
+ }
+
+ /* Done mucking with unreliable delivery of interrupts.
+ * Go do system house keeping.
+ */
+
+ if (!--cpuinfo->prof_counter) {
+ cpuinfo->prof_counter = cpuinfo->prof_multiplier;
+ update_process_times(user_mode(get_irq_regs()));
+ }
+
+ if (cpu == 0)
+ xtime_update(ticks_elapsed);
+
+ return IRQ_HANDLED;
+}
+
+
+unsigned long profile_pc(struct pt_regs *regs)
+{
+ unsigned long pc = instruction_pointer(regs);
+
+ if (regs->gr[0] & PSW_N)
+ pc -= 4;
+
+#ifdef CONFIG_SMP
+ if (in_lock_functions(pc))
+ pc = regs->gr[2];
+#endif
+
+ return pc;
+}
+EXPORT_SYMBOL(profile_pc);
+
+
+/* clock source code */
+
+static cycle_t read_cr16(struct clocksource *cs)
+{
+ return get_cycles();
+}
+
+static struct clocksource clocksource_cr16 = {
+ .name = "cr16",
+ .rating = 300,
+ .read = read_cr16,
+ .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
+ .mult = 0, /* to be set */
+ .shift = 22,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#ifdef CONFIG_SMP
+int update_cr16_clocksource(void)
+{
+ /* since the cr16 cycle counters are not synchronized across CPUs,
+ we'll check if we should switch to a safe clocksource: */
+ if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
+ clocksource_change_rating(&clocksource_cr16, 0);
+ return 1;
+ }
+
+ return 0;
+}
+#else
+int update_cr16_clocksource(void)
+{
+ return 0; /* no change */
+}
+#endif /*CONFIG_SMP*/
+
+void __init start_cpu_itimer(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long next_tick = mfctl(16) + clocktick;
+
+ mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
+
+ per_cpu(cpu_data, cpu).it_value = next_tick;
+}
+
+static struct platform_device rtc_generic_dev = {
+ .name = "rtc-generic",
+ .id = -1,
+};
+
+static int __init rtc_init(void)
+{
+ if (platform_device_register(&rtc_generic_dev) < 0)
+ printk(KERN_ERR "unable to register rtc device...\n");
+
+ /* not necessarily an error */
+ return 0;
+}
+module_init(rtc_init);
+
+void read_persistent_clock(struct timespec *ts)
+{
+ static struct pdc_tod tod_data;
+ if (pdc_tod_read(&tod_data) == 0) {
+ ts->tv_sec = tod_data.tod_sec;
+ ts->tv_nsec = tod_data.tod_usec * 1000;
+ } else {
+ printk(KERN_ERR "Error reading tod clock\n");
+ ts->tv_sec = 0;
+ ts->tv_nsec = 0;
+ }
+}
+
+void __init time_init(void)
+{
+ unsigned long current_cr16_khz;
+
+ clocktick = (100 * PAGE0->mem_10msec) / HZ;
+
+ start_cpu_itimer(); /* get CPU 0 started */
+
+ /* register at clocksource framework */
+ current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
+ clocksource_cr16.mult = clocksource_khz2mult(current_cr16_khz,
+ clocksource_cr16.shift);
+ clocksource_register(&clocksource_cr16);
+}
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c
new file mode 100644
index 00000000..f5159381
--- /dev/null
+++ b/arch/parisc/kernel/topology.c
@@ -0,0 +1,37 @@
+/*
+ * arch/parisc/kernel/topology.c - Populate sysfs with topology information
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/cache.h>
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+ int num;
+
+ for_each_present_cpu(num) {
+ register_cpu(&per_cpu(cpu_devices, num), num);
+ }
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
new file mode 100644
index 00000000..8b58bf0b
--- /dev/null
+++ b/arch/parisc/kernel/traps.c
@@ -0,0 +1,885 @@
+/*
+ * linux/arch/parisc/traps.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'asm.s'.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/console.h>
+#include <linux/bug.h>
+
+#include <asm/assembly.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/unaligned.h>
+#include <asm/atomic.h>
+#include <asm/smp.h>
+#include <asm/pdc.h>
+#include <asm/pdc_chassis.h>
+#include <asm/unwind.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+#include "../math-emu/math-emu.h" /* for handle_fpe() */
+
+#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
+ /* dumped to the console via printk) */
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+DEFINE_SPINLOCK(pa_dbit_lock);
+#endif
+
+static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
+ struct pt_regs *regs);
+
+static int printbinary(char *buf, unsigned long x, int nbits)
+{
+ unsigned long mask = 1UL << (nbits - 1);
+ while (mask != 0) {
+ *buf++ = (mask & x ? '1' : '0');
+ mask >>= 1;
+ }
+ *buf = '\0';
+
+ return nbits;
+}
+
+#ifdef CONFIG_64BIT
+#define RFMT "%016lx"
+#else
+#define RFMT "%08lx"
+#endif
+#define FFMT "%016llx" /* fpregs are 64-bit always */
+
+#define PRINTREGS(lvl,r,f,fmt,x) \
+ printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
+ lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
+ (r)[(x)+2], (r)[(x)+3])
+
+static void print_gr(char *level, struct pt_regs *regs)
+{
+ int i;
+ char buf[64];
+
+ printk("%s\n", level);
+ printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
+ printbinary(buf, regs->gr[0], 32);
+ printk("%sPSW: %s %s\n", level, buf, print_tainted());
+
+ for (i = 0; i < 32; i += 4)
+ PRINTREGS(level, regs->gr, "r", RFMT, i);
+}
+
+static void print_fr(char *level, struct pt_regs *regs)
+{
+ int i;
+ char buf[64];
+ struct { u32 sw[2]; } s;
+
+ /* FR are 64bit everywhere. Need to use asm to get the content
+ * of fpsr/fper1, and we assume that we won't have a FP Identify
+ * in our way, otherwise we're screwed.
+ * The fldd is used to restore the T-bit if there was one, as the
+ * store clears it anyway.
+ * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
+ asm volatile ("fstd %%fr0,0(%1) \n\t"
+ "fldd 0(%1),%%fr0 \n\t"
+ : "=m" (s) : "r" (&s) : "r0");
+
+ printk("%s\n", level);
+ printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
+ printbinary(buf, s.sw[0], 32);
+ printk("%sFPSR: %s\n", level, buf);
+ printk("%sFPER1: %08x\n", level, s.sw[1]);
+
+ /* here we'll print fr0 again, tho it'll be meaningless */
+ for (i = 0; i < 32; i += 4)
+ PRINTREGS(level, regs->fr, "fr", FFMT, i);
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ int i, user;
+ char *level;
+ unsigned long cr30, cr31;
+
+ user = user_mode(regs);
+ level = user ? KERN_DEBUG : KERN_CRIT;
+
+ print_gr(level, regs);
+
+ for (i = 0; i < 8; i += 4)
+ PRINTREGS(level, regs->sr, "sr", RFMT, i);
+
+ if (user)
+ print_fr(level, regs);
+
+ cr30 = mfctl(30);
+ cr31 = mfctl(31);
+ printk("%s\n", level);
+ printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
+ level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
+ printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
+ level, regs->iir, regs->isr, regs->ior);
+ printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
+ level, current_thread_info()->cpu, cr30, cr31);
+ printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
+
+ if (user) {
+ printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
+ printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
+ printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
+ } else {
+ printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
+ printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
+ printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
+
+ parisc_show_stack(current, NULL, regs);
+ }
+}
+
+
+void dump_stack(void)
+{
+ show_stack(NULL, NULL);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+static void do_show_stack(struct unwind_frame_info *info)
+{
+ int i = 1;
+
+ printk(KERN_CRIT "Backtrace:\n");
+ while (i <= 16) {
+ if (unwind_once(info) < 0 || info->ip == 0)
+ break;
+
+ if (__kernel_text_address(info->ip)) {
+ printk(KERN_CRIT " [<" RFMT ">] %pS\n",
+ info->ip, (void *) info->ip);
+ i++;
+ }
+ }
+ printk(KERN_CRIT "\n");
+}
+
+static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
+ struct pt_regs *regs)
+{
+ struct unwind_frame_info info;
+ struct task_struct *t;
+
+ t = task ? task : current;
+ if (regs) {
+ unwind_frame_init(&info, t, regs);
+ goto show_stack;
+ }
+
+ if (t == current) {
+ unsigned long sp;
+
+HERE:
+ asm volatile ("copy %%r30, %0" : "=r"(sp));
+ {
+ struct pt_regs r;
+
+ memset(&r, 0, sizeof(struct pt_regs));
+ r.iaoq[0] = (unsigned long)&&HERE;
+ r.gr[2] = (unsigned long)__builtin_return_address(0);
+ r.gr[30] = sp;
+
+ unwind_frame_init(&info, current, &r);
+ }
+ } else {
+ unwind_frame_init_from_blocked_task(&info, t);
+ }
+
+show_stack:
+ do_show_stack(&info);
+}
+
+void show_stack(struct task_struct *t, unsigned long *sp)
+{
+ return parisc_show_stack(t, sp, NULL);
+}
+
+int is_valid_bugaddr(unsigned long iaoq)
+{
+ return 1;
+}
+
+void die_if_kernel(char *str, struct pt_regs *regs, long err)
+{
+ if (user_mode(regs)) {
+ if (err == 0)
+ return; /* STFU */
+
+ printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
+ current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
+#ifdef PRINT_USER_FAULTS
+ /* XXX for debugging only */
+ show_regs(regs);
+#endif
+ return;
+ }
+
+ oops_in_progress = 1;
+
+ oops_enter();
+
+ /* Amuse the user in a SPARC fashion */
+ if (err) printk(KERN_CRIT
+ " _______________________________ \n"
+ " < Your System ate a SPARC! Gah! >\n"
+ " ------------------------------- \n"
+ " \\ ^__^\n"
+ " (__)\\ )\\/\\\n"
+ " U ||----w |\n"
+ " || ||\n");
+
+ /* unlock the pdc lock if necessary */
+ pdc_emergency_unlock();
+
+ /* maybe the kernel hasn't booted very far yet and hasn't been able
+ * to initialize the serial or STI console. In that case we should
+ * re-enable the pdc console, so that the user will be able to
+ * identify the problem. */
+ if (!console_drivers)
+ pdc_console_restart();
+
+ if (err)
+ printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
+ current->comm, task_pid_nr(current), str, err);
+
+ /* Wot's wrong wif bein' racy? */
+ if (current->thread.flags & PARISC_KERNEL_DEATH) {
+ printk(KERN_CRIT "%s() recursion detected.\n", __func__);
+ local_irq_enable();
+ while (1);
+ }
+ current->thread.flags |= PARISC_KERNEL_DEATH;
+
+ show_regs(regs);
+ dump_stack();
+ add_taint(TAINT_DIE);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops) {
+ printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+ ssleep(5);
+ panic("Fatal exception");
+ }
+
+ oops_exit();
+ do_exit(SIGSEGV);
+}
+
+int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
+{
+ return syscall(regs);
+}
+
+/* gdb uses break 4,8 */
+#define GDB_BREAK_INSN 0x10004
+static void handle_gdb_break(struct pt_regs *regs, int wot)
+{
+ struct siginfo si;
+
+ si.si_signo = SIGTRAP;
+ si.si_errno = 0;
+ si.si_code = wot;
+ si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
+ force_sig_info(SIGTRAP, &si, current);
+}
+
+static void handle_break(struct pt_regs *regs)
+{
+ unsigned iir = regs->iir;
+
+ if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
+ /* check if a BUG() or WARN() trapped here. */
+ enum bug_trap_type tt;
+ tt = report_bug(regs->iaoq[0] & ~3, regs);
+ if (tt == BUG_TRAP_TYPE_WARN) {
+ regs->iaoq[0] += 4;
+ regs->iaoq[1] += 4;
+ return; /* return to next instruction when WARN_ON(). */
+ }
+ die_if_kernel("Unknown kernel breakpoint", regs,
+ (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
+ }
+
+#ifdef PRINT_USER_FAULTS
+ if (unlikely(iir != GDB_BREAK_INSN)) {
+ printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
+ iir & 31, (iir>>13) & ((1<<13)-1),
+ task_pid_nr(current), current->comm);
+ show_regs(regs);
+ }
+#endif
+
+ /* send standard GDB signal */
+ handle_gdb_break(regs, TRAP_BRKPT);
+}
+
+static void default_trap(int code, struct pt_regs *regs)
+{
+ printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
+ show_regs(regs);
+}
+
+void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
+
+
+void transfer_pim_to_trap_frame(struct pt_regs *regs)
+{
+ register int i;
+ extern unsigned int hpmc_pim_data[];
+ struct pdc_hpmc_pim_11 *pim_narrow;
+ struct pdc_hpmc_pim_20 *pim_wide;
+
+ if (boot_cpu_data.cpu_type >= pcxu) {
+
+ pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
+
+ /*
+ * Note: The following code will probably generate a
+ * bunch of truncation error warnings from the compiler.
+ * Could be handled with an ifdef, but perhaps there
+ * is a better way.
+ */
+
+ regs->gr[0] = pim_wide->cr[22];
+
+ for (i = 1; i < 32; i++)
+ regs->gr[i] = pim_wide->gr[i];
+
+ for (i = 0; i < 32; i++)
+ regs->fr[i] = pim_wide->fr[i];
+
+ for (i = 0; i < 8; i++)
+ regs->sr[i] = pim_wide->sr[i];
+
+ regs->iasq[0] = pim_wide->cr[17];
+ regs->iasq[1] = pim_wide->iasq_back;
+ regs->iaoq[0] = pim_wide->cr[18];
+ regs->iaoq[1] = pim_wide->iaoq_back;
+
+ regs->sar = pim_wide->cr[11];
+ regs->iir = pim_wide->cr[19];
+ regs->isr = pim_wide->cr[20];
+ regs->ior = pim_wide->cr[21];
+ }
+ else {
+ pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
+
+ regs->gr[0] = pim_narrow->cr[22];
+
+ for (i = 1; i < 32; i++)
+ regs->gr[i] = pim_narrow->gr[i];
+
+ for (i = 0; i < 32; i++)
+ regs->fr[i] = pim_narrow->fr[i];
+
+ for (i = 0; i < 8; i++)
+ regs->sr[i] = pim_narrow->sr[i];
+
+ regs->iasq[0] = pim_narrow->cr[17];
+ regs->iasq[1] = pim_narrow->iasq_back;
+ regs->iaoq[0] = pim_narrow->cr[18];
+ regs->iaoq[1] = pim_narrow->iaoq_back;
+
+ regs->sar = pim_narrow->cr[11];
+ regs->iir = pim_narrow->cr[19];
+ regs->isr = pim_narrow->cr[20];
+ regs->ior = pim_narrow->cr[21];
+ }
+
+ /*
+ * The following fields only have meaning if we came through
+ * another path. So just zero them here.
+ */
+
+ regs->ksp = 0;
+ regs->kpc = 0;
+ regs->orig_r28 = 0;
+}
+
+
+/*
+ * This routine is called as a last resort when everything else
+ * has gone clearly wrong. We get called for faults in kernel space,
+ * and HPMC's.
+ */
+void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
+{
+ static DEFINE_SPINLOCK(terminate_lock);
+
+ oops_in_progress = 1;
+
+ set_eiem(0);
+ local_irq_disable();
+ spin_lock(&terminate_lock);
+
+ /* unlock the pdc lock if necessary */
+ pdc_emergency_unlock();
+
+ /* restart pdc console if necessary */
+ if (!console_drivers)
+ pdc_console_restart();
+
+ /* Not all paths will gutter the processor... */
+ switch(code){
+
+ case 1:
+ transfer_pim_to_trap_frame(regs);
+ break;
+
+ default:
+ /* Fall through */
+ break;
+
+ }
+
+ {
+ /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
+ struct unwind_frame_info info;
+ unwind_frame_init(&info, current, regs);
+ do_show_stack(&info);
+ }
+
+ printk("\n");
+ printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
+ msg, code, regs, offset);
+ show_regs(regs);
+
+ spin_unlock(&terminate_lock);
+
+ /* put soft power button back under hardware control;
+ * if the user had pressed it once at any time, the
+ * system will shut down immediately right here. */
+ pdc_soft_power_button(0);
+
+ /* Call kernel panic() so reboot timeouts work properly
+ * FIXME: This function should be on the list of
+ * panic notifiers, and we should call panic
+ * directly from the location that we wish.
+ * e.g. We should not call panic from
+ * parisc_terminate, but rather the oter way around.
+ * This hack works, prints the panic message twice,
+ * and it enables reboot timers!
+ */
+ panic(msg);
+}
+
+void notrace handle_interruption(int code, struct pt_regs *regs)
+{
+ unsigned long fault_address = 0;
+ unsigned long fault_space = 0;
+ struct siginfo si;
+
+ if (code == 1)
+ pdc_console_restart(); /* switch back to pdc if HPMC */
+ else
+ local_irq_enable();
+
+ /* Security check:
+ * If the priority level is still user, and the
+ * faulting space is not equal to the active space
+ * then the user is attempting something in a space
+ * that does not belong to them. Kill the process.
+ *
+ * This is normally the situation when the user
+ * attempts to jump into the kernel space at the
+ * wrong offset, be it at the gateway page or a
+ * random location.
+ *
+ * We cannot normally signal the process because it
+ * could *be* on the gateway page, and processes
+ * executing on the gateway page can't have signals
+ * delivered.
+ *
+ * We merely readjust the address into the users
+ * space, at a destination address of zero, and
+ * allow processing to continue.
+ */
+ if (((unsigned long)regs->iaoq[0] & 3) &&
+ ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
+ /* Kill the user process later */
+ regs->iaoq[0] = 0 | 3;
+ regs->iaoq[1] = regs->iaoq[0] + 4;
+ regs->iasq[0] = regs->iasq[1] = regs->sr[7];
+ regs->gr[0] &= ~PSW_B;
+ return;
+ }
+
+#if 0
+ printk(KERN_CRIT "Interruption # %d\n", code);
+#endif
+
+ switch(code) {
+
+ case 1:
+ /* High-priority machine check (HPMC) */
+
+ /* set up a new led state on systems shipped with a LED State panel */
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
+
+ parisc_terminate("High Priority Machine Check (HPMC)",
+ regs, code, 0);
+ /* NOT REACHED */
+
+ case 2:
+ /* Power failure interrupt */
+ printk(KERN_CRIT "Power failure interrupt !\n");
+ return;
+
+ case 3:
+ /* Recovery counter trap */
+ regs->gr[0] &= ~PSW_R;
+ if (user_space(regs))
+ handle_gdb_break(regs, TRAP_TRACE);
+ /* else this must be the start of a syscall - just let it run */
+ return;
+
+ case 5:
+ /* Low-priority machine check */
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
+
+ flush_cache_all();
+ flush_tlb_all();
+ cpu_lpmc(5, regs);
+ return;
+
+ case 6:
+ /* Instruction TLB miss fault/Instruction page fault */
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+ break;
+
+ case 8:
+ /* Illegal instruction trap */
+ die_if_kernel("Illegal instruction", regs, code);
+ si.si_code = ILL_ILLOPC;
+ goto give_sigill;
+
+ case 9:
+ /* Break instruction trap */
+ handle_break(regs);
+ return;
+
+ case 10:
+ /* Privileged operation trap */
+ die_if_kernel("Privileged operation", regs, code);
+ si.si_code = ILL_PRVOPC;
+ goto give_sigill;
+
+ case 11:
+ /* Privileged register trap */
+ if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
+
+ /* This is a MFCTL cr26/cr27 to gr instruction.
+ * PCXS traps on this, so we need to emulate it.
+ */
+
+ if (regs->iir & 0x00200000)
+ regs->gr[regs->iir & 0x1f] = mfctl(27);
+ else
+ regs->gr[regs->iir & 0x1f] = mfctl(26);
+
+ regs->iaoq[0] = regs->iaoq[1];
+ regs->iaoq[1] += 4;
+ regs->iasq[0] = regs->iasq[1];
+ return;
+ }
+
+ die_if_kernel("Privileged register usage", regs, code);
+ si.si_code = ILL_PRVREG;
+ give_sigill:
+ si.si_signo = SIGILL;
+ si.si_errno = 0;
+ si.si_addr = (void __user *) regs->iaoq[0];
+ force_sig_info(SIGILL, &si, current);
+ return;
+
+ case 12:
+ /* Overflow Trap, let the userland signal handler do the cleanup */
+ si.si_signo = SIGFPE;
+ si.si_code = FPE_INTOVF;
+ si.si_addr = (void __user *) regs->iaoq[0];
+ force_sig_info(SIGFPE, &si, current);
+ return;
+
+ case 13:
+ /* Conditional Trap
+ The condition succeeds in an instruction which traps
+ on condition */
+ if(user_mode(regs)){
+ si.si_signo = SIGFPE;
+ /* Set to zero, and let the userspace app figure it out from
+ the insn pointed to by si_addr */
+ si.si_code = 0;
+ si.si_addr = (void __user *) regs->iaoq[0];
+ force_sig_info(SIGFPE, &si, current);
+ return;
+ }
+ /* The kernel doesn't want to handle condition codes */
+ break;
+
+ case 14:
+ /* Assist Exception Trap, i.e. floating point exception. */
+ die_if_kernel("Floating point exception", regs, 0); /* quiet */
+ handle_fpe(regs);
+ return;
+
+ case 15:
+ /* Data TLB miss fault/Data page fault */
+ /* Fall through */
+ case 16:
+ /* Non-access instruction TLB miss fault */
+ /* The instruction TLB entry needed for the target address of the FIC
+ is absent, and hardware can't find it, so we get to cleanup */
+ /* Fall through */
+ case 17:
+ /* Non-access data TLB miss fault/Non-access data page fault */
+ /* FIXME:
+ Still need to add slow path emulation code here!
+ If the insn used a non-shadow register, then the tlb
+ handlers could not have their side-effect (e.g. probe
+ writing to a target register) emulated since rfir would
+ erase the changes to said register. Instead we have to
+ setup everything, call this function we are in, and emulate
+ by hand. Technically we need to emulate:
+ fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
+ */
+ fault_address = regs->ior;
+ fault_space = regs->isr;
+ break;
+
+ case 18:
+ /* PCXS only -- later cpu's split this into types 26,27 & 28 */
+ /* Check for unaligned access */
+ if (check_unaligned(regs)) {
+ handle_unaligned(regs);
+ return;
+ }
+ /* Fall Through */
+ case 26:
+ /* PCXL: Data memory access rights trap */
+ fault_address = regs->ior;
+ fault_space = regs->isr;
+ break;
+
+ case 19:
+ /* Data memory break trap */
+ regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
+ /* fall thru */
+ case 21:
+ /* Page reference trap */
+ handle_gdb_break(regs, TRAP_HWBKPT);
+ return;
+
+ case 25:
+ /* Taken branch trap */
+ regs->gr[0] &= ~PSW_T;
+ if (user_space(regs))
+ handle_gdb_break(regs, TRAP_BRANCH);
+ /* else this must be the start of a syscall - just let it
+ * run.
+ */
+ return;
+
+ case 7:
+ /* Instruction access rights */
+ /* PCXL: Instruction memory protection trap */
+
+ /*
+ * This could be caused by either: 1) a process attempting
+ * to execute within a vma that does not have execute
+ * permission, or 2) an access rights violation caused by a
+ * flush only translation set up by ptep_get_and_clear().
+ * So we check the vma permissions to differentiate the two.
+ * If the vma indicates we have execute permission, then
+ * the cause is the latter one. In this case, we need to
+ * call do_page_fault() to fix the problem.
+ */
+
+ if (user_mode(regs)) {
+ struct vm_area_struct *vma;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+ if (vma && (regs->iaoq[0] >= vma->vm_start)
+ && (vma->vm_flags & VM_EXEC)) {
+
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+ up_read(&current->mm->mmap_sem);
+ break; /* call do_page_fault() */
+ }
+ up_read(&current->mm->mmap_sem);
+ }
+ /* Fall Through */
+ case 27:
+ /* Data memory protection ID trap */
+ if (code == 27 && !user_mode(regs) &&
+ fixup_exception(regs))
+ return;
+
+ die_if_kernel("Protection id trap", regs, code);
+ si.si_code = SEGV_MAPERR;
+ si.si_signo = SIGSEGV;
+ si.si_errno = 0;
+ if (code == 7)
+ si.si_addr = (void __user *) regs->iaoq[0];
+ else
+ si.si_addr = (void __user *) regs->ior;
+ force_sig_info(SIGSEGV, &si, current);
+ return;
+
+ case 28:
+ /* Unaligned data reference trap */
+ handle_unaligned(regs);
+ return;
+
+ default:
+ if (user_mode(regs)) {
+#ifdef PRINT_USER_FAULTS
+ printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
+ task_pid_nr(current), current->comm);
+ show_regs(regs);
+#endif
+ /* SIGBUS, for lack of a better one. */
+ si.si_signo = SIGBUS;
+ si.si_code = BUS_OBJERR;
+ si.si_errno = 0;
+ si.si_addr = (void __user *) regs->ior;
+ force_sig_info(SIGBUS, &si, current);
+ return;
+ }
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+
+ parisc_terminate("Unexpected interruption", regs, code, 0);
+ /* NOT REACHED */
+ }
+
+ if (user_mode(regs)) {
+ if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
+#ifdef PRINT_USER_FAULTS
+ if (fault_space == 0)
+ printk(KERN_DEBUG "User Fault on Kernel Space ");
+ else
+ printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
+ code);
+ printk(KERN_CONT "pid=%d command='%s'\n",
+ task_pid_nr(current), current->comm);
+ show_regs(regs);
+#endif
+ si.si_signo = SIGSEGV;
+ si.si_errno = 0;
+ si.si_code = SEGV_MAPERR;
+ si.si_addr = (void __user *) regs->ior;
+ force_sig_info(SIGSEGV, &si, current);
+ return;
+ }
+ }
+ else {
+
+ /*
+ * The kernel should never fault on its own address space.
+ */
+
+ if (fault_space == 0)
+ {
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+ parisc_terminate("Kernel Fault", regs, code, fault_address);
+
+ }
+ }
+
+ do_page_fault(regs, code, fault_address);
+}
+
+
+int __init check_ivt(void *iva)
+{
+ extern u32 os_hpmc_size;
+ extern const u32 os_hpmc[];
+
+ int i;
+ u32 check = 0;
+ u32 *ivap;
+ u32 *hpmcp;
+ u32 length;
+
+ if (strcmp((char *)iva, "cows can fly"))
+ return -1;
+
+ ivap = (u32 *)iva;
+
+ for (i = 0; i < 8; i++)
+ *ivap++ = 0;
+
+ /* Compute Checksum for HPMC handler */
+ length = os_hpmc_size;
+ ivap[7] = length;
+
+ hpmcp = (u32 *)os_hpmc;
+
+ for (i=0; i<length/4; i++)
+ check += *hpmcp++;
+
+ for (i=0; i<8; i++)
+ check += ivap[i];
+
+ ivap[5] = -check;
+
+ return 0;
+}
+
+#ifndef CONFIG_64BIT
+extern const void fault_vector_11;
+#endif
+extern const void fault_vector_20;
+
+void __init trap_init(void)
+{
+ void *iva;
+
+ if (boot_cpu_data.cpu_type >= pcxu)
+ iva = (void *) &fault_vector_20;
+ else
+#ifdef CONFIG_64BIT
+ panic("Can't boot 64-bit OS on PA1.1 processor!");
+#else
+ iva = (void *) &fault_vector_11;
+#endif
+
+ if (check_ivt(iva))
+ panic("IVT invalid");
+}
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
new file mode 100644
index 00000000..234e3682
--- /dev/null
+++ b/arch/parisc/kernel/unaligned.c
@@ -0,0 +1,754 @@
+/*
+ * Unaligned memory access handler
+ *
+ * Copyright (C) 2001 Randolph Chung <tausq@debian.org>
+ * Significantly tweaked by LaMont Jones <lamont@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/ratelimit.h>
+#include <asm/uaccess.h>
+
+/* #define DEBUG_UNALIGNED 1 */
+
+#ifdef DEBUG_UNALIGNED
+#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
+#else
+#define DPRINTF(fmt, args...)
+#endif
+
+#ifdef CONFIG_64BIT
+#define RFMT "%016lx"
+#else
+#define RFMT "%08lx"
+#endif
+
+#define FIXUP_BRANCH(lbl) \
+ "\tldil L%%" #lbl ", %%r1\n" \
+ "\tldo R%%" #lbl "(%%r1), %%r1\n" \
+ "\tbv,n %%r0(%%r1)\n"
+/* If you use FIXUP_BRANCH, then you must list this clobber */
+#define FIXUP_BRANCH_CLOBBER "r1"
+
+/* 1111 1100 0000 0000 0001 0011 1100 0000 */
+#define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6)
+#define OPCODE2(a,b) ((a)<<26|(b)<<1)
+#define OPCODE3(a,b) ((a)<<26|(b)<<2)
+#define OPCODE4(a) ((a)<<26)
+#define OPCODE1_MASK OPCODE1(0x3f,1,0xf)
+#define OPCODE2_MASK OPCODE2(0x3f,1)
+#define OPCODE3_MASK OPCODE3(0x3f,1)
+#define OPCODE4_MASK OPCODE4(0x3f)
+
+/* skip LDB - never unaligned (index) */
+#define OPCODE_LDH_I OPCODE1(0x03,0,0x1)
+#define OPCODE_LDW_I OPCODE1(0x03,0,0x2)
+#define OPCODE_LDD_I OPCODE1(0x03,0,0x3)
+#define OPCODE_LDDA_I OPCODE1(0x03,0,0x4)
+#define OPCODE_LDCD_I OPCODE1(0x03,0,0x5)
+#define OPCODE_LDWA_I OPCODE1(0x03,0,0x6)
+#define OPCODE_LDCW_I OPCODE1(0x03,0,0x7)
+/* skip LDB - never unaligned (short) */
+#define OPCODE_LDH_S OPCODE1(0x03,1,0x1)
+#define OPCODE_LDW_S OPCODE1(0x03,1,0x2)
+#define OPCODE_LDD_S OPCODE1(0x03,1,0x3)
+#define OPCODE_LDDA_S OPCODE1(0x03,1,0x4)
+#define OPCODE_LDCD_S OPCODE1(0x03,1,0x5)
+#define OPCODE_LDWA_S OPCODE1(0x03,1,0x6)
+#define OPCODE_LDCW_S OPCODE1(0x03,1,0x7)
+/* skip STB - never unaligned */
+#define OPCODE_STH OPCODE1(0x03,1,0x9)
+#define OPCODE_STW OPCODE1(0x03,1,0xa)
+#define OPCODE_STD OPCODE1(0x03,1,0xb)
+/* skip STBY - never unaligned */
+/* skip STDBY - never unaligned */
+#define OPCODE_STWA OPCODE1(0x03,1,0xe)
+#define OPCODE_STDA OPCODE1(0x03,1,0xf)
+
+#define OPCODE_FLDWX OPCODE1(0x09,0,0x0)
+#define OPCODE_FLDWXR OPCODE1(0x09,0,0x1)
+#define OPCODE_FSTWX OPCODE1(0x09,0,0x8)
+#define OPCODE_FSTWXR OPCODE1(0x09,0,0x9)
+#define OPCODE_FLDWS OPCODE1(0x09,1,0x0)
+#define OPCODE_FLDWSR OPCODE1(0x09,1,0x1)
+#define OPCODE_FSTWS OPCODE1(0x09,1,0x8)
+#define OPCODE_FSTWSR OPCODE1(0x09,1,0x9)
+#define OPCODE_FLDDX OPCODE1(0x0b,0,0x0)
+#define OPCODE_FSTDX OPCODE1(0x0b,0,0x8)
+#define OPCODE_FLDDS OPCODE1(0x0b,1,0x0)
+#define OPCODE_FSTDS OPCODE1(0x0b,1,0x8)
+
+#define OPCODE_LDD_L OPCODE2(0x14,0)
+#define OPCODE_FLDD_L OPCODE2(0x14,1)
+#define OPCODE_STD_L OPCODE2(0x1c,0)
+#define OPCODE_FSTD_L OPCODE2(0x1c,1)
+
+#define OPCODE_LDW_M OPCODE3(0x17,1)
+#define OPCODE_FLDW_L OPCODE3(0x17,0)
+#define OPCODE_FSTW_L OPCODE3(0x1f,0)
+#define OPCODE_STW_M OPCODE3(0x1f,1)
+
+#define OPCODE_LDH_L OPCODE4(0x11)
+#define OPCODE_LDW_L OPCODE4(0x12)
+#define OPCODE_LDWM OPCODE4(0x13)
+#define OPCODE_STH_L OPCODE4(0x19)
+#define OPCODE_STW_L OPCODE4(0x1A)
+#define OPCODE_STWM OPCODE4(0x1B)
+
+#define MAJOR_OP(i) (((i)>>26)&0x3f)
+#define R1(i) (((i)>>21)&0x1f)
+#define R2(i) (((i)>>16)&0x1f)
+#define R3(i) ((i)&0x1f)
+#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
+#define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
+#define IM5_2(i) IM((i)>>16,5)
+#define IM5_3(i) IM((i),5)
+#define IM14(i) IM((i),14)
+
+#define ERR_NOTHANDLED -1
+#define ERR_PAGEFAULT -2
+
+int unaligned_enabled __read_mostly = 1;
+
+void die_if_kernel (char *str, struct pt_regs *regs, long err);
+
+static int emulate_ldh(struct pt_regs *regs, int toreg)
+{
+ unsigned long saddr = regs->ior;
+ unsigned long val = 0;
+ int ret;
+
+ DPRINTF("load " RFMT ":" RFMT " to r%d for 2 bytes\n",
+ regs->isr, regs->ior, toreg);
+
+ __asm__ __volatile__ (
+" mtsp %4, %%sr1\n"
+"1: ldbs 0(%%sr1,%3), %%r20\n"
+"2: ldbs 1(%%sr1,%3), %0\n"
+" depw %%r20, 23, 24, %0\n"
+" copy %%r0, %1\n"
+"3: \n"
+" .section .fixup,\"ax\"\n"
+"4: ldi -2, %1\n"
+ FIXUP_BRANCH(3b)
+" .previous\n"
+ ASM_EXCEPTIONTABLE_ENTRY(1b, 4b)
+ ASM_EXCEPTIONTABLE_ENTRY(2b, 4b)
+ : "=r" (val), "=r" (ret)
+ : "0" (val), "r" (saddr), "r" (regs->isr)
+ : "r20", FIXUP_BRANCH_CLOBBER );
+
+ DPRINTF("val = 0x" RFMT "\n", val);
+
+ if (toreg)
+ regs->gr[toreg] = val;
+
+ return ret;
+}
+
+static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
+{
+ unsigned long saddr = regs->ior;
+ unsigned long val = 0;
+ int ret;
+
+ DPRINTF("load " RFMT ":" RFMT " to r%d for 4 bytes\n",
+ regs->isr, regs->ior, toreg);
+
+ __asm__ __volatile__ (
+" zdep %3,28,2,%%r19\n" /* r19=(ofs&3)*8 */
+" mtsp %4, %%sr1\n"
+" depw %%r0,31,2,%3\n"
+"1: ldw 0(%%sr1,%3),%0\n"
+"2: ldw 4(%%sr1,%3),%%r20\n"
+" subi 32,%%r19,%%r19\n"
+" mtctl %%r19,11\n"
+" vshd %0,%%r20,%0\n"
+" copy %%r0, %1\n"
+"3: \n"
+" .section .fixup,\"ax\"\n"
+"4: ldi -2, %1\n"
+ FIXUP_BRANCH(3b)
+" .previous\n"
+ ASM_EXCEPTIONTABLE_ENTRY(1b, 4b)
+ ASM_EXCEPTIONTABLE_ENTRY(2b, 4b)
+ : "=r" (val), "=r" (ret)
+ : "0" (val), "r" (saddr), "r" (regs->isr)
+ : "r19", "r20", FIXUP_BRANCH_CLOBBER );
+
+ DPRINTF("val = 0x" RFMT "\n", val);
+
+ if (flop)
+ ((__u32*)(regs->fr))[toreg] = val;
+ else if (toreg)
+ regs->gr[toreg] = val;
+
+ return ret;
+}
+static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+{
+ unsigned long saddr = regs->ior;
+ __u64 val = 0;
+ int ret;
+
+ DPRINTF("load " RFMT ":" RFMT " to r%d for 8 bytes\n",
+ regs->isr, regs->ior, toreg);
+#ifdef CONFIG_PA20
+
+#ifndef CONFIG_64BIT
+ if (!flop)
+ return -1;
+#endif
+ __asm__ __volatile__ (
+" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */
+" mtsp %4, %%sr1\n"
+" depd %%r0,63,3,%3\n"
+"1: ldd 0(%%sr1,%3),%0\n"
+"2: ldd 8(%%sr1,%3),%%r20\n"
+" subi 64,%%r19,%%r19\n"
+" mtsar %%r19\n"
+" shrpd %0,%%r20,%%sar,%0\n"
+" copy %%r0, %1\n"
+"3: \n"
+" .section .fixup,\"ax\"\n"
+"4: ldi -2, %1\n"
+ FIXUP_BRANCH(3b)
+" .previous\n"
+ ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
+ ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
+ : "=r" (val), "=r" (ret)
+ : "0" (val), "r" (saddr), "r" (regs->isr)
+ : "r19", "r20", FIXUP_BRANCH_CLOBBER );
+#else
+ {
+ unsigned long valh=0,vall=0;
+ __asm__ __volatile__ (
+" zdep %5,29,2,%%r19\n" /* r19=(ofs&3)*8 */
+" mtsp %6, %%sr1\n"
+" dep %%r0,31,2,%5\n"
+"1: ldw 0(%%sr1,%5),%0\n"
+"2: ldw 4(%%sr1,%5),%1\n"
+"3: ldw 8(%%sr1,%5),%%r20\n"
+" subi 32,%%r19,%%r19\n"
+" mtsar %%r19\n"
+" vshd %0,%1,%0\n"
+" vshd %1,%%r20,%1\n"
+" copy %%r0, %2\n"
+"4: \n"
+" .section .fixup,\"ax\"\n"
+"5: ldi -2, %2\n"
+ FIXUP_BRANCH(4b)
+" .previous\n"
+ ASM_EXCEPTIONTABLE_ENTRY(1b,5b)
+ ASM_EXCEPTIONTABLE_ENTRY(2b,5b)
+ ASM_EXCEPTIONTABLE_ENTRY(3b,5b)
+ : "=r" (valh), "=r" (vall), "=r" (ret)
+ : "0" (valh), "1" (vall), "r" (saddr), "r" (regs->isr)
+ : "r19", "r20", FIXUP_BRANCH_CLOBBER );
+ val=((__u64)valh<<32)|(__u64)vall;
+ }
+#endif
+
+ DPRINTF("val = 0x%llx\n", val);
+
+ if (flop)
+ regs->fr[toreg] = val;
+ else if (toreg)
+ regs->gr[toreg] = val;
+
+ return ret;
+}
+
+static int emulate_sth(struct pt_regs *regs, int frreg)
+{
+ unsigned long val = regs->gr[frreg];
+ int ret;
+
+ if (!frreg)
+ val = 0;
+
+ DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg,
+ val, regs->isr, regs->ior);
+
+ __asm__ __volatile__ (
+" mtsp %3, %%sr1\n"
+" extrw,u %1, 23, 8, %%r19\n"
+"1: stb %1, 1(%%sr1, %2)\n"
+"2: stb %%r19, 0(%%sr1, %2)\n"
+" copy %%r0, %0\n"
+"3: \n"
+" .section .fixup,\"ax\"\n"
+"4: ldi -2, %0\n"
+ FIXUP_BRANCH(3b)
+" .previous\n"
+ ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
+ ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
+ : "=r" (ret)
+ : "r" (val), "r" (regs->ior), "r" (regs->isr)
+ : "r19", FIXUP_BRANCH_CLOBBER );
+
+ return ret;
+}
+
+static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
+{
+ unsigned long val;
+ int ret;
+
+ if (flop)
+ val = ((__u32*)(regs->fr))[frreg];
+ else if (frreg)
+ val = regs->gr[frreg];
+ else
+ val = 0;
+
+ DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg,
+ val, regs->isr, regs->ior);
+
+
+ __asm__ __volatile__ (
+" mtsp %3, %%sr1\n"
+" zdep %2, 28, 2, %%r19\n"
+" dep %%r0, 31, 2, %2\n"
+" mtsar %%r19\n"
+" depwi,z -2, %%sar, 32, %%r19\n"
+"1: ldw 0(%%sr1,%2),%%r20\n"
+"2: ldw 4(%%sr1,%2),%%r21\n"
+" vshd %%r0, %1, %%r22\n"
+" vshd %1, %%r0, %%r1\n"
+" and %%r20, %%r19, %%r20\n"
+" andcm %%r21, %%r19, %%r21\n"
+" or %%r22, %%r20, %%r20\n"
+" or %%r1, %%r21, %%r21\n"
+" stw %%r20,0(%%sr1,%2)\n"
+" stw %%r21,4(%%sr1,%2)\n"
+" copy %%r0, %0\n"
+"3: \n"
+" .section .fixup,\"ax\"\n"
+"4: ldi -2, %0\n"
+ FIXUP_BRANCH(3b)
+" .previous\n"
+ ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
+ ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
+ : "=r" (ret)
+ : "r" (val), "r" (regs->ior), "r" (regs->isr)
+ : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
+
+ return 0;
+}
+static int emulate_std(struct pt_regs *regs, int frreg, int flop)
+{
+ __u64 val;
+ int ret;
+
+ if (flop)
+ val = regs->fr[frreg];
+ else if (frreg)
+ val = regs->gr[frreg];
+ else
+ val = 0;
+
+ DPRINTF("store r%d (0x%016llx) to " RFMT ":" RFMT " for 8 bytes\n", frreg,
+ val, regs->isr, regs->ior);
+
+#ifdef CONFIG_PA20
+#ifndef CONFIG_64BIT
+ if (!flop)
+ return -1;
+#endif
+ __asm__ __volatile__ (
+" mtsp %3, %%sr1\n"
+" depd,z %2, 60, 3, %%r19\n"
+" depd %%r0, 63, 3, %2\n"
+" mtsar %%r19\n"
+" depdi,z -2, %%sar, 64, %%r19\n"
+"1: ldd 0(%%sr1,%2),%%r20\n"
+"2: ldd 8(%%sr1,%2),%%r21\n"
+" shrpd %%r0, %1, %%sar, %%r22\n"
+" shrpd %1, %%r0, %%sar, %%r1\n"
+" and %%r20, %%r19, %%r20\n"
+" andcm %%r21, %%r19, %%r21\n"
+" or %%r22, %%r20, %%r20\n"
+" or %%r1, %%r21, %%r21\n"
+"3: std %%r20,0(%%sr1,%2)\n"
+"4: std %%r21,8(%%sr1,%2)\n"
+" copy %%r0, %0\n"
+"5: \n"
+" .section .fixup,\"ax\"\n"
+"6: ldi -2, %0\n"
+ FIXUP_BRANCH(5b)
+" .previous\n"
+ ASM_EXCEPTIONTABLE_ENTRY(1b,6b)
+ ASM_EXCEPTIONTABLE_ENTRY(2b,6b)
+ ASM_EXCEPTIONTABLE_ENTRY(3b,6b)
+ ASM_EXCEPTIONTABLE_ENTRY(4b,6b)
+ : "=r" (ret)
+ : "r" (val), "r" (regs->ior), "r" (regs->isr)
+ : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
+#else
+ {
+ unsigned long valh=(val>>32),vall=(val&0xffffffffl);
+ __asm__ __volatile__ (
+" mtsp %4, %%sr1\n"
+" zdep %2, 29, 2, %%r19\n"
+" dep %%r0, 31, 2, %2\n"
+" mtsar %%r19\n"
+" zvdepi -2, 32, %%r19\n"
+"1: ldw 0(%%sr1,%3),%%r20\n"
+"2: ldw 8(%%sr1,%3),%%r21\n"
+" vshd %1, %2, %%r1\n"
+" vshd %%r0, %1, %1\n"
+" vshd %2, %%r0, %2\n"
+" and %%r20, %%r19, %%r20\n"
+" andcm %%r21, %%r19, %%r21\n"
+" or %1, %%r20, %1\n"
+" or %2, %%r21, %2\n"
+"3: stw %1,0(%%sr1,%1)\n"
+"4: stw %%r1,4(%%sr1,%3)\n"
+"5: stw %2,8(%%sr1,%3)\n"
+" copy %%r0, %0\n"
+"6: \n"
+" .section .fixup,\"ax\"\n"
+"7: ldi -2, %0\n"
+ FIXUP_BRANCH(6b)
+" .previous\n"
+ ASM_EXCEPTIONTABLE_ENTRY(1b,7b)
+ ASM_EXCEPTIONTABLE_ENTRY(2b,7b)
+ ASM_EXCEPTIONTABLE_ENTRY(3b,7b)
+ ASM_EXCEPTIONTABLE_ENTRY(4b,7b)
+ ASM_EXCEPTIONTABLE_ENTRY(5b,7b)
+ : "=r" (ret)
+ : "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr)
+ : "r19", "r20", "r21", "r1", FIXUP_BRANCH_CLOBBER );
+ }
+#endif
+
+ return ret;
+}
+
+void handle_unaligned(struct pt_regs *regs)
+{
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+ unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0;
+ int modify = 0;
+ int ret = ERR_NOTHANDLED;
+ struct siginfo si;
+ register int flop=0; /* true if this is a flop */
+
+ /* log a message with pacing */
+ if (user_mode(regs)) {
+ if (current->thread.flags & PARISC_UAC_SIGBUS) {
+ goto force_sigbus;
+ }
+
+ if (!(current->thread.flags & PARISC_UAC_NOPRINT) &&
+ __ratelimit(&ratelimit)) {
+ char buf[256];
+ sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n",
+ current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]);
+ printk(KERN_WARNING "%s", buf);
+#ifdef DEBUG_UNALIGNED
+ show_regs(regs);
+#endif
+ }
+
+ if (!unaligned_enabled)
+ goto force_sigbus;
+ }
+
+ /* handle modification - OK, it's ugly, see the instruction manual */
+ switch (MAJOR_OP(regs->iir))
+ {
+ case 0x03:
+ case 0x09:
+ case 0x0b:
+ if (regs->iir&0x20)
+ {
+ modify = 1;
+ if (regs->iir&0x1000) /* short loads */
+ if (regs->iir&0x200)
+ newbase += IM5_3(regs->iir);
+ else
+ newbase += IM5_2(regs->iir);
+ else if (regs->iir&0x2000) /* scaled indexed */
+ {
+ int shift=0;
+ switch (regs->iir & OPCODE1_MASK)
+ {
+ case OPCODE_LDH_I:
+ shift= 1; break;
+ case OPCODE_LDW_I:
+ shift= 2; break;
+ case OPCODE_LDD_I:
+ case OPCODE_LDDA_I:
+ shift= 3; break;
+ }
+ newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0)<<shift;
+ } else /* simple indexed */
+ newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0);
+ }
+ break;
+ case 0x13:
+ case 0x1b:
+ modify = 1;
+ newbase += IM14(regs->iir);
+ break;
+ case 0x14:
+ case 0x1c:
+ if (regs->iir&8)
+ {
+ modify = 1;
+ newbase += IM14(regs->iir&~0xe);
+ }
+ break;
+ case 0x16:
+ case 0x1e:
+ modify = 1;
+ newbase += IM14(regs->iir&6);
+ break;
+ case 0x17:
+ case 0x1f:
+ if (regs->iir&4)
+ {
+ modify = 1;
+ newbase += IM14(regs->iir&~4);
+ }
+ break;
+ }
+
+ /* TODO: make this cleaner... */
+ switch (regs->iir & OPCODE1_MASK)
+ {
+ case OPCODE_LDH_I:
+ case OPCODE_LDH_S:
+ ret = emulate_ldh(regs, R3(regs->iir));
+ break;
+
+ case OPCODE_LDW_I:
+ case OPCODE_LDWA_I:
+ case OPCODE_LDW_S:
+ case OPCODE_LDWA_S:
+ ret = emulate_ldw(regs, R3(regs->iir),0);
+ break;
+
+ case OPCODE_STH:
+ ret = emulate_sth(regs, R2(regs->iir));
+ break;
+
+ case OPCODE_STW:
+ case OPCODE_STWA:
+ ret = emulate_stw(regs, R2(regs->iir),0);
+ break;
+
+#ifdef CONFIG_PA20
+ case OPCODE_LDD_I:
+ case OPCODE_LDDA_I:
+ case OPCODE_LDD_S:
+ case OPCODE_LDDA_S:
+ ret = emulate_ldd(regs, R3(regs->iir),0);
+ break;
+
+ case OPCODE_STD:
+ case OPCODE_STDA:
+ ret = emulate_std(regs, R2(regs->iir),0);
+ break;
+#endif
+
+ case OPCODE_FLDWX:
+ case OPCODE_FLDWS:
+ case OPCODE_FLDWXR:
+ case OPCODE_FLDWSR:
+ flop=1;
+ ret = emulate_ldw(regs,FR3(regs->iir),1);
+ break;
+
+ case OPCODE_FLDDX:
+ case OPCODE_FLDDS:
+ flop=1;
+ ret = emulate_ldd(regs,R3(regs->iir),1);
+ break;
+
+ case OPCODE_FSTWX:
+ case OPCODE_FSTWS:
+ case OPCODE_FSTWXR:
+ case OPCODE_FSTWSR:
+ flop=1;
+ ret = emulate_stw(regs,FR3(regs->iir),1);
+ break;
+
+ case OPCODE_FSTDX:
+ case OPCODE_FSTDS:
+ flop=1;
+ ret = emulate_std(regs,R3(regs->iir),1);
+ break;
+
+ case OPCODE_LDCD_I:
+ case OPCODE_LDCW_I:
+ case OPCODE_LDCD_S:
+ case OPCODE_LDCW_S:
+ ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
+ break;
+ }
+#ifdef CONFIG_PA20
+ switch (regs->iir & OPCODE2_MASK)
+ {
+ case OPCODE_FLDD_L:
+ flop=1;
+ ret = emulate_ldd(regs,R2(regs->iir),1);
+ break;
+ case OPCODE_FSTD_L:
+ flop=1;
+ ret = emulate_std(regs, R2(regs->iir),1);
+ break;
+ case OPCODE_LDD_L:
+ ret = emulate_ldd(regs, R2(regs->iir),0);
+ break;
+ case OPCODE_STD_L:
+ ret = emulate_std(regs, R2(regs->iir),0);
+ break;
+ }
+#endif
+ switch (regs->iir & OPCODE3_MASK)
+ {
+ case OPCODE_FLDW_L:
+ flop=1;
+ ret = emulate_ldw(regs, R2(regs->iir),0);
+ break;
+ case OPCODE_LDW_M:
+ ret = emulate_ldw(regs, R2(regs->iir),1);
+ break;
+
+ case OPCODE_FSTW_L:
+ flop=1;
+ ret = emulate_stw(regs, R2(regs->iir),1);
+ break;
+ case OPCODE_STW_M:
+ ret = emulate_stw(regs, R2(regs->iir),0);
+ break;
+ }
+ switch (regs->iir & OPCODE4_MASK)
+ {
+ case OPCODE_LDH_L:
+ ret = emulate_ldh(regs, R2(regs->iir));
+ break;
+ case OPCODE_LDW_L:
+ case OPCODE_LDWM:
+ ret = emulate_ldw(regs, R2(regs->iir),0);
+ break;
+ case OPCODE_STH_L:
+ ret = emulate_sth(regs, R2(regs->iir));
+ break;
+ case OPCODE_STW_L:
+ case OPCODE_STWM:
+ ret = emulate_stw(regs, R2(regs->iir),0);
+ break;
+ }
+
+ if (modify && R1(regs->iir))
+ regs->gr[R1(regs->iir)] = newbase;
+
+
+ if (ret == ERR_NOTHANDLED)
+ printk(KERN_CRIT "Not-handled unaligned insn 0x%08lx\n", regs->iir);
+
+ DPRINTF("ret = %d\n", ret);
+
+ if (ret)
+ {
+ printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
+ die_if_kernel("Unaligned data reference", regs, 28);
+
+ if (ret == ERR_PAGEFAULT)
+ {
+ si.si_signo = SIGSEGV;
+ si.si_errno = 0;
+ si.si_code = SEGV_MAPERR;
+ si.si_addr = (void __user *)regs->ior;
+ force_sig_info(SIGSEGV, &si, current);
+ }
+ else
+ {
+force_sigbus:
+ /* couldn't handle it ... */
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_ADRALN;
+ si.si_addr = (void __user *)regs->ior;
+ force_sig_info(SIGBUS, &si, current);
+ }
+
+ return;
+ }
+
+ /* else we handled it, let life go on. */
+ regs->gr[0]|=PSW_N;
+}
+
+/*
+ * NB: check_unaligned() is only used for PCXS processors right
+ * now, so we only check for PA1.1 encodings at this point.
+ */
+
+int
+check_unaligned(struct pt_regs *regs)
+{
+ unsigned long align_mask;
+
+ /* Get alignment mask */
+
+ align_mask = 0UL;
+ switch (regs->iir & OPCODE1_MASK) {
+
+ case OPCODE_LDH_I:
+ case OPCODE_LDH_S:
+ case OPCODE_STH:
+ align_mask = 1UL;
+ break;
+
+ case OPCODE_LDW_I:
+ case OPCODE_LDWA_I:
+ case OPCODE_LDW_S:
+ case OPCODE_LDWA_S:
+ case OPCODE_STW:
+ case OPCODE_STWA:
+ align_mask = 3UL;
+ break;
+
+ default:
+ switch (regs->iir & OPCODE4_MASK) {
+ case OPCODE_LDH_L:
+ case OPCODE_STH_L:
+ align_mask = 1UL;
+ break;
+ case OPCODE_LDW_L:
+ case OPCODE_LDWM:
+ case OPCODE_STW_L:
+ case OPCODE_STWM:
+ align_mask = 3UL;
+ break;
+ }
+ break;
+ }
+
+ return (int)(regs->ior & align_mask);
+}
+
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
new file mode 100644
index 00000000..76ed62ed
--- /dev/null
+++ b/arch/parisc/kernel/unwind.c
@@ -0,0 +1,444 @@
+/*
+ * Kernel unwinding support
+ *
+ * (c) 2002-2004 Randolph Chung <tausq@debian.org>
+ *
+ * Derived partially from the IA64 implementation. The PA-RISC
+ * Runtime Architecture Document is also a useful reference to
+ * understand what is happening here
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kallsyms.h>
+#include <linux/sort.h>
+
+#include <asm/uaccess.h>
+#include <asm/assembly.h>
+#include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
+
+#include <asm/unwind.h>
+
+/* #define DEBUG 1 */
+#ifdef DEBUG
+#define dbg(x...) printk(x)
+#else
+#define dbg(x...)
+#endif
+
+#define KERNEL_START (KERNEL_BINARY_TEXT_START)
+
+extern struct unwind_table_entry __start___unwind[];
+extern struct unwind_table_entry __stop___unwind[];
+
+static spinlock_t unwind_lock;
+/*
+ * the kernel unwind block is not dynamically allocated so that
+ * we can call unwind_init as early in the bootup process as
+ * possible (before the slab allocator is initialized)
+ */
+static struct unwind_table kernel_unwind_table __read_mostly;
+static LIST_HEAD(unwind_tables);
+
+static inline const struct unwind_table_entry *
+find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
+{
+ const struct unwind_table_entry *e = NULL;
+ unsigned long lo, hi, mid;
+
+ lo = 0;
+ hi = table->length - 1;
+
+ while (lo <= hi) {
+ mid = (hi - lo) / 2 + lo;
+ e = &table->table[mid];
+ if (addr < e->region_start)
+ hi = mid - 1;
+ else if (addr > e->region_end)
+ lo = mid + 1;
+ else
+ return e;
+ }
+
+ return NULL;
+}
+
+static const struct unwind_table_entry *
+find_unwind_entry(unsigned long addr)
+{
+ struct unwind_table *table;
+ const struct unwind_table_entry *e = NULL;
+
+ if (addr >= kernel_unwind_table.start &&
+ addr <= kernel_unwind_table.end)
+ e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
+ else
+ list_for_each_entry(table, &unwind_tables, list) {
+ if (addr >= table->start &&
+ addr <= table->end)
+ e = find_unwind_entry_in_table(table, addr);
+ if (e) {
+ /* Move-to-front to exploit common traces */
+ list_move(&table->list, &unwind_tables);
+ break;
+ }
+ }
+
+ return e;
+}
+
+static void
+unwind_table_init(struct unwind_table *table, const char *name,
+ unsigned long base_addr, unsigned long gp,
+ void *table_start, void *table_end)
+{
+ struct unwind_table_entry *start = table_start;
+ struct unwind_table_entry *end =
+ (struct unwind_table_entry *)table_end - 1;
+
+ table->name = name;
+ table->base_addr = base_addr;
+ table->gp = gp;
+ table->start = base_addr + start->region_start;
+ table->end = base_addr + end->region_end;
+ table->table = (struct unwind_table_entry *)table_start;
+ table->length = end - start + 1;
+ INIT_LIST_HEAD(&table->list);
+
+ for (; start <= end; start++) {
+ if (start < end &&
+ start->region_end > (start+1)->region_start) {
+ printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
+ }
+
+ start->region_start += base_addr;
+ start->region_end += base_addr;
+ }
+}
+
+static int cmp_unwind_table_entry(const void *a, const void *b)
+{
+ return ((const struct unwind_table_entry *)a)->region_start
+ - ((const struct unwind_table_entry *)b)->region_start;
+}
+
+static void
+unwind_table_sort(struct unwind_table_entry *start,
+ struct unwind_table_entry *finish)
+{
+ sort(start, finish - start, sizeof(struct unwind_table_entry),
+ cmp_unwind_table_entry, NULL);
+}
+
+struct unwind_table *
+unwind_table_add(const char *name, unsigned long base_addr,
+ unsigned long gp,
+ void *start, void *end)
+{
+ struct unwind_table *table;
+ unsigned long flags;
+ struct unwind_table_entry *s = (struct unwind_table_entry *)start;
+ struct unwind_table_entry *e = (struct unwind_table_entry *)end;
+
+ unwind_table_sort(s, e);
+
+ table = kmalloc(sizeof(struct unwind_table), GFP_USER);
+ if (table == NULL)
+ return NULL;
+ unwind_table_init(table, name, base_addr, gp, start, end);
+ spin_lock_irqsave(&unwind_lock, flags);
+ list_add_tail(&table->list, &unwind_tables);
+ spin_unlock_irqrestore(&unwind_lock, flags);
+
+ return table;
+}
+
+void unwind_table_remove(struct unwind_table *table)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&unwind_lock, flags);
+ list_del(&table->list);
+ spin_unlock_irqrestore(&unwind_lock, flags);
+
+ kfree(table);
+}
+
+/* Called from setup_arch to import the kernel unwind info */
+int unwind_init(void)
+{
+ long start, stop;
+ register unsigned long gp __asm__ ("r27");
+
+ start = (long)&__start___unwind[0];
+ stop = (long)&__stop___unwind[0];
+
+ spin_lock_init(&unwind_lock);
+
+ printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
+ start, stop,
+ (stop - start) / sizeof(struct unwind_table_entry));
+
+ unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
+ gp,
+ &__start___unwind[0], &__stop___unwind[0]);
+#if 0
+ {
+ int i;
+ for (i = 0; i < 10; i++)
+ {
+ printk("region 0x%x-0x%x\n",
+ __start___unwind[i].region_start,
+ __start___unwind[i].region_end);
+ }
+ }
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_64BIT
+#define get_func_addr(fptr) fptr[2]
+#else
+#define get_func_addr(fptr) fptr[0]
+#endif
+
+static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
+{
+ extern void handle_interruption(int, struct pt_regs *);
+ static unsigned long *hi = (unsigned long *)&handle_interruption;
+
+ if (pc == get_func_addr(hi)) {
+ struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
+ dbg("Unwinding through handle_interruption()\n");
+ info->prev_sp = regs->gr[30];
+ info->prev_ip = regs->iaoq[0];
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static void unwind_frame_regs(struct unwind_frame_info *info)
+{
+ const struct unwind_table_entry *e;
+ unsigned long npc;
+ unsigned int insn;
+ long frame_size = 0;
+ int looking_for_rp, rpoffset = 0;
+
+ e = find_unwind_entry(info->ip);
+ if (e == NULL) {
+ unsigned long sp;
+ extern char _stext[], _etext[];
+
+ dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
+
+#ifdef CONFIG_KALLSYMS
+ /* Handle some frequent special cases.... */
+ {
+ char symname[KSYM_NAME_LEN];
+ char *modname;
+
+ kallsyms_lookup(info->ip, NULL, NULL, &modname,
+ symname);
+
+ dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
+
+ if (strcmp(symname, "_switch_to_ret") == 0) {
+ info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
+ info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
+ dbg("_switch_to_ret @ %lx - setting "
+ "prev_sp=%lx prev_ip=%lx\n",
+ info->ip, info->prev_sp,
+ info->prev_ip);
+ return;
+ } else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
+ strcmp(symname, "syscall_exit") == 0) {
+ info->prev_ip = info->prev_sp = 0;
+ return;
+ }
+ }
+#endif
+
+ /* Since we are doing the unwinding blind, we don't know if
+ we are adjusting the stack correctly or extracting the rp
+ correctly. The rp is checked to see if it belongs to the
+ kernel text section, if not we assume we don't have a
+ correct stack frame and we continue to unwind the stack.
+ This is not quite correct, and will fail for loadable
+ modules. */
+ sp = info->sp & ~63;
+ do {
+ unsigned long tmp;
+
+ info->prev_sp = sp - 64;
+ info->prev_ip = 0;
+ if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
+ break;
+ info->prev_ip = tmp;
+ sp = info->prev_sp;
+ } while (info->prev_ip < (unsigned long)_stext ||
+ info->prev_ip > (unsigned long)_etext);
+
+ info->rp = 0;
+
+ dbg("analyzing func @ %lx with no unwind info, setting "
+ "prev_sp=%lx prev_ip=%lx\n", info->ip,
+ info->prev_sp, info->prev_ip);
+ } else {
+ dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
+ "Save_RP = %d, Millicode = %d size = %u\n",
+ e->region_start, e->region_end, e->Save_SP, e->Save_RP,
+ e->Millicode, e->Total_frame_size);
+
+ looking_for_rp = e->Save_RP;
+
+ for (npc = e->region_start;
+ (frame_size < (e->Total_frame_size << 3) ||
+ looking_for_rp) &&
+ npc < info->ip;
+ npc += 4) {
+
+ insn = *(unsigned int *)npc;
+
+ if ((insn & 0xffffc000) == 0x37de0000 ||
+ (insn & 0xffe00000) == 0x6fc00000) {
+ /* ldo X(sp), sp, or stwm X,D(sp) */
+ frame_size += (insn & 0x1 ? -1 << 13 : 0) |
+ ((insn & 0x3fff) >> 1);
+ dbg("analyzing func @ %lx, insn=%08x @ "
+ "%lx, frame_size = %ld\n", info->ip,
+ insn, npc, frame_size);
+ } else if ((insn & 0xffe00008) == 0x73c00008) {
+ /* std,ma X,D(sp) */
+ frame_size += (insn & 0x1 ? -1 << 13 : 0) |
+ (((insn >> 4) & 0x3ff) << 3);
+ dbg("analyzing func @ %lx, insn=%08x @ "
+ "%lx, frame_size = %ld\n", info->ip,
+ insn, npc, frame_size);
+ } else if (insn == 0x6bc23fd9) {
+ /* stw rp,-20(sp) */
+ rpoffset = 20;
+ looking_for_rp = 0;
+ dbg("analyzing func @ %lx, insn=stw rp,"
+ "-20(sp) @ %lx\n", info->ip, npc);
+ } else if (insn == 0x0fc212c1) {
+ /* std rp,-16(sr0,sp) */
+ rpoffset = 16;
+ looking_for_rp = 0;
+ dbg("analyzing func @ %lx, insn=std rp,"
+ "-16(sp) @ %lx\n", info->ip, npc);
+ }
+ }
+
+ if (!unwind_special(info, e->region_start, frame_size)) {
+ info->prev_sp = info->sp - frame_size;
+ if (e->Millicode)
+ info->rp = info->r31;
+ else if (rpoffset)
+ info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
+ info->prev_ip = info->rp;
+ info->rp = 0;
+ }
+
+ dbg("analyzing func @ %lx, setting prev_sp=%lx "
+ "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
+ info->prev_ip, npc);
+ }
+}
+
+void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
+ struct pt_regs *regs)
+{
+ memset(info, 0, sizeof(struct unwind_frame_info));
+ info->t = t;
+ info->sp = regs->gr[30];
+ info->ip = regs->iaoq[0];
+ info->rp = regs->gr[2];
+ info->r31 = regs->gr[31];
+
+ dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
+ t ? (int)t->pid : -1, info->sp, info->ip);
+}
+
+void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
+{
+ struct pt_regs *r = &t->thread.regs;
+ struct pt_regs *r2;
+
+ r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
+ if (!r2)
+ return;
+ *r2 = *r;
+ r2->gr[30] = r->ksp;
+ r2->iaoq[0] = r->kpc;
+ unwind_frame_init(info, t, r2);
+ kfree(r2);
+}
+
+void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
+{
+ unwind_frame_init(info, current, regs);
+}
+
+int unwind_once(struct unwind_frame_info *next_frame)
+{
+ unwind_frame_regs(next_frame);
+
+ if (next_frame->prev_sp == 0 ||
+ next_frame->prev_ip == 0)
+ return -1;
+
+ next_frame->sp = next_frame->prev_sp;
+ next_frame->ip = next_frame->prev_ip;
+ next_frame->prev_sp = 0;
+ next_frame->prev_ip = 0;
+
+ dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
+ next_frame->t ? (int)next_frame->t->pid : -1,
+ next_frame->sp, next_frame->ip);
+
+ return 0;
+}
+
+int unwind_to_user(struct unwind_frame_info *info)
+{
+ int ret;
+
+ do {
+ ret = unwind_once(info);
+ } while (!ret && !(info->ip & 3));
+
+ return ret;
+}
+
+unsigned long return_address(unsigned int level)
+{
+ struct unwind_frame_info info;
+ struct pt_regs r;
+ unsigned long sp;
+
+ /* initialize unwind info */
+ asm volatile ("copy %%r30, %0" : "=r"(sp));
+ memset(&r, 0, sizeof(struct pt_regs));
+ r.iaoq[0] = (unsigned long) current_text_addr();
+ r.gr[2] = (unsigned long) __builtin_return_address(0);
+ r.gr[30] = sp;
+ unwind_frame_init(&info, current, &r);
+
+ /* unwind stack */
+ ++level;
+ do {
+ if (unwind_once(&info) < 0 || info.ip == 0)
+ return 0;
+ if (!__kernel_text_address(info.ip)) {
+ return 0;
+ }
+ } while (info.ip && level--);
+
+ return info.ip;
+}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
new file mode 100644
index 00000000..64a99988
--- /dev/null
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -0,0 +1,178 @@
+/* Kernel link layout for various "sections"
+ *
+ * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
+ * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
+ * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
+ * Copyright (C) 2006 Helge Deller <deller@gmx.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <asm-generic/vmlinux.lds.h>
+/* needed for the processor specific cache alignment size */
+#include <asm/cache.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+/* ld script to make hppa Linux kernel */
+#ifndef CONFIG_64BIT
+OUTPUT_FORMAT("elf32-hppa-linux")
+OUTPUT_ARCH(hppa)
+#else
+OUTPUT_FORMAT("elf64-hppa-linux")
+OUTPUT_ARCH(hppa:hppa2.0w)
+#endif
+
+ENTRY(_stext)
+#ifndef CONFIG_64BIT
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+SECTIONS
+{
+ . = KERNEL_BINARY_TEXT_START;
+
+ _text = .; /* Text and read-only data */
+ .head ALIGN(16) : {
+ HEAD_TEXT
+ } = 0
+ .text ALIGN(16) : {
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
+ *(.text.do_softirq)
+ *(.text.sys_exit)
+ *(.text.do_sigaltstack)
+ *(.text.do_fork)
+ *(.text.*)
+ *(.fixup)
+ *(.lock.text) /* out-of-line lock text */
+ *(.gnu.warning)
+ }
+ /* End of text section */
+ _etext = .;
+
+ /* Start of data section */
+ _sdata = .;
+
+ RODATA
+
+ /* writeable */
+ /* Make sure this is page aligned so
+ * that we can properly leave these
+ * as writable
+ */
+ . = ALIGN(PAGE_SIZE);
+ data_start = .;
+
+ /* unwind info */
+ .PARISC.unwind : {
+ __start___unwind = .;
+ *(.PARISC.unwind)
+ __stop___unwind = .;
+ }
+
+ EXCEPTION_TABLE(16)
+ NOTES
+
+ /* Data */
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+
+ /* PA-RISC locks requires 16-byte alignment */
+ . = ALIGN(16);
+ .data..lock_aligned : {
+ *(.data..lock_aligned)
+ }
+
+ /* End of data section */
+ _edata = .;
+
+ /* BSS */
+ __bss_start = .;
+ /* page table entries need to be PAGE_SIZE aligned */
+ . = ALIGN(PAGE_SIZE);
+ .data..vmpages : {
+ *(.data..vm0.pmd)
+ *(.data..vm0.pgd)
+ *(.data..vm0.pte)
+ }
+ .bss : {
+ *(.bss)
+ *(COMMON)
+ }
+ __bss_stop = .;
+
+#ifdef CONFIG_64BIT
+ . = ALIGN(16);
+ /* Linkage tables */
+ .opd : {
+ *(.opd)
+ } PROVIDE (__gp = .);
+ .plt : {
+ *(.plt)
+ }
+ .dlt : {
+ *(.dlt)
+ }
+#endif
+
+ /* reserve space for interrupt stack by aligning __init* to 16k */
+ . = ALIGN(16384);
+ __init_begin = .;
+ INIT_TEXT_SECTION(16384)
+ . = ALIGN(PAGE_SIZE);
+ INIT_DATA_SECTION(16)
+ /* we have to discard exit text and such at runtime, not link time */
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
+ .exit.data :
+ {
+ EXIT_DATA
+ }
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+ /* freed after init ends here */
+ _end = . ;
+
+ STABS_DEBUG
+ .note 0 : { *(.note) }
+
+ /* Sections to be discarded */
+ DISCARDS
+ /DISCARD/ : {
+#ifdef CONFIG_64BIT
+ /* temporary hack until binutils is fixed to not emit these
+ * for static binaries
+ */
+ *(.interp)
+ *(.dynsym)
+ *(.dynstr)
+ *(.dynamic)
+ *(.hash)
+ *(.gnu.hash)
+#endif
+ }
+}
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
new file mode 100644
index 00000000..5f2e6904
--- /dev/null
+++ b/arch/parisc/lib/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for parisc-specific library files
+#
+
+lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o
+
+obj-y := iomap.o
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
new file mode 100644
index 00000000..353963d4
--- /dev/null
+++ b/arch/parisc/lib/bitops.c
@@ -0,0 +1,83 @@
+/*
+ * bitops.c: atomic operations which got too long to be inlined all over
+ * the place.
+ *
+ * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
+ * Copyright 2000 Grant Grundler (grundler@cup.hp.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+
+#ifdef CONFIG_SMP
+arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
+ [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
+};
+#endif
+
+#ifdef CONFIG_64BIT
+unsigned long __xchg64(unsigned long x, unsigned long *ptr)
+{
+ unsigned long temp, flags;
+
+ _atomic_spin_lock_irqsave(ptr, flags);
+ temp = *ptr;
+ *ptr = x;
+ _atomic_spin_unlock_irqrestore(ptr, flags);
+ return temp;
+}
+#endif
+
+unsigned long __xchg32(int x, int *ptr)
+{
+ unsigned long flags;
+ long temp;
+
+ _atomic_spin_lock_irqsave(ptr, flags);
+ temp = (long) *ptr; /* XXX - sign extension wanted? */
+ *ptr = x;
+ _atomic_spin_unlock_irqrestore(ptr, flags);
+ return (unsigned long)temp;
+}
+
+
+unsigned long __xchg8(char x, char *ptr)
+{
+ unsigned long flags;
+ long temp;
+
+ _atomic_spin_lock_irqsave(ptr, flags);
+ temp = (long) *ptr; /* XXX - sign extension wanted? */
+ *ptr = x;
+ _atomic_spin_unlock_irqrestore(ptr, flags);
+ return (unsigned long)temp;
+}
+
+
+#ifdef CONFIG_64BIT
+unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new)
+{
+ unsigned long flags;
+ unsigned long prev;
+
+ _atomic_spin_lock_irqsave(ptr, flags);
+ if ((prev = *ptr) == old)
+ *ptr = new;
+ _atomic_spin_unlock_irqrestore(ptr, flags);
+ return prev;
+}
+#endif
+
+unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
+{
+ unsigned long flags;
+ unsigned int prev;
+
+ _atomic_spin_lock_irqsave(ptr, flags);
+ if ((prev = *ptr) == old)
+ *ptr = new;
+ _atomic_spin_unlock_irqrestore(ptr, flags);
+ return (unsigned long)prev;
+}
diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c
new file mode 100644
index 00000000..ae66d31f
--- /dev/null
+++ b/arch/parisc/lib/checksum.c
@@ -0,0 +1,149 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * MIPS specific IP/TCP/UDP checksumming routines
+ *
+ * Authors: Ralf Baechle, <ralf@waldorf-gmbh.de>
+ * Lots of code moved from tcp.c and ip.c; see those files
+ * for more names.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <net/checksum.h>
+#include <asm/byteorder.h>
+#include <asm/string.h>
+#include <asm/uaccess.h>
+
+#define addc(_t,_r) \
+ __asm__ __volatile__ ( \
+" add %0, %1, %0\n" \
+" addc %0, %%r0, %0\n" \
+ : "=r"(_t) \
+ : "r"(_r), "0"(_t));
+
+static inline unsigned short from32to16(unsigned int x)
+{
+ /* 32 bits --> 16 bits + carry */
+ x = (x & 0xffff) + (x >> 16);
+ /* 16 bits + carry --> 16 bits including carry */
+ x = (x & 0xffff) + (x >> 16);
+ return (unsigned short)x;
+}
+
+static inline unsigned int do_csum(const unsigned char * buff, int len)
+{
+ int odd, count;
+ unsigned int result = 0;
+
+ if (len <= 0)
+ goto out;
+ odd = 1 & (unsigned long) buff;
+ if (odd) {
+ result = be16_to_cpu(*buff);
+ len--;
+ buff++;
+ }
+ count = len >> 1; /* nr of 16-bit words.. */
+ if (count) {
+ if (2 & (unsigned long) buff) {
+ result += *(unsigned short *) buff;
+ count--;
+ len -= 2;
+ buff += 2;
+ }
+ count >>= 1; /* nr of 32-bit words.. */
+ if (count) {
+ while (count >= 4) {
+ unsigned int r1, r2, r3, r4;
+ r1 = *(unsigned int *)(buff + 0);
+ r2 = *(unsigned int *)(buff + 4);
+ r3 = *(unsigned int *)(buff + 8);
+ r4 = *(unsigned int *)(buff + 12);
+ addc(result, r1);
+ addc(result, r2);
+ addc(result, r3);
+ addc(result, r4);
+ count -= 4;
+ buff += 16;
+ }
+ while (count) {
+ unsigned int w = *(unsigned int *) buff;
+ count--;
+ buff += 4;
+ addc(result, w);
+ }
+ result = (result & 0xffff) + (result >> 16);
+ }
+ if (len & 2) {
+ result += *(unsigned short *) buff;
+ buff += 2;
+ }
+ }
+ if (len & 1)
+ result += le16_to_cpu(*buff);
+ result = from32to16(result);
+ if (odd)
+ result = swab16(result);
+out:
+ return result;
+}
+
+/*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+/*
+ * why bother folding?
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum)
+{
+ unsigned int result = do_csum(buff, len);
+ addc(result, sum);
+ return (__force __wsum)from32to16(result);
+}
+
+EXPORT_SYMBOL(csum_partial);
+
+/*
+ * copy while checksumming, otherwise like csum_partial
+ */
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
+{
+ /*
+ * It's 2:30 am and I don't feel like doing it real ...
+ * This is lots slower than the real thing (tm)
+ */
+ sum = csum_partial(src, len, sum);
+ memcpy(dst, src, len);
+
+ return sum;
+}
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+
+/*
+ * Copy from userspace and compute checksum. If we catch an exception
+ * then zero the rest of the buffer.
+ */
+__wsum csum_partial_copy_from_user(const void __user *src,
+ void *dst, int len,
+ __wsum sum, int *err_ptr)
+{
+ int missing;
+
+ missing = copy_from_user(dst, src, len);
+ if (missing) {
+ memset(dst + len - missing, 0, missing);
+ *err_ptr = -EFAULT;
+ }
+
+ return csum_partial(dst, len, sum);
+}
+EXPORT_SYMBOL(csum_partial_copy_from_user);
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
new file mode 100644
index 00000000..f8c45cc2
--- /dev/null
+++ b/arch/parisc/lib/fixup.S
@@ -0,0 +1,92 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Fixup routines for kernel exception handling.
+ */
+#include <asm/asm-offsets.h>
+#include <asm/assembly.h>
+#include <asm/errno.h>
+#include <linux/linkage.h>
+
+#ifdef CONFIG_SMP
+ .macro get_fault_ip t1 t2
+ addil LT%__per_cpu_offset,%r27
+ LDREG RT%__per_cpu_offset(%r1),\t1
+ /* t2 = smp_processor_id() */
+ mfctl 30,\t2
+ ldw TI_CPU(\t2),\t2
+#ifdef CONFIG_64BIT
+ extrd,u \t2,63,32,\t2
+#endif
+ /* t2 = &__per_cpu_offset[smp_processor_id()]; */
+ LDREGX \t2(\t1),\t2
+ addil LT%exception_data,%r27
+ LDREG RT%exception_data(%r1),\t1
+ /* t1 = &__get_cpu_var(exception_data) */
+ add,l \t1,\t2,\t1
+ /* t1 = t1->fault_ip */
+ LDREG EXCDATA_IP(\t1), \t1
+ .endm
+#else
+ .macro get_fault_ip t1 t2
+ /* t1 = &__get_cpu_var(exception_data) */
+ addil LT%exception_data,%r27
+ LDREG RT%exception_data(%r1),\t2
+ /* t1 = t2->fault_ip */
+ LDREG EXCDATA_IP(\t2), \t1
+ .endm
+#endif
+
+ .level LEVEL
+
+ .text
+ .section .fixup, "ax"
+
+ /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
+ENTRY(fixup_get_user_skip_1)
+ get_fault_ip %r1,%r8
+ ldo 4(%r1), %r1
+ ldi -EFAULT, %r8
+ bv %r0(%r1)
+ copy %r0, %r9
+ENDPROC(fixup_get_user_skip_1)
+
+ENTRY(fixup_get_user_skip_2)
+ get_fault_ip %r1,%r8
+ ldo 8(%r1), %r1
+ ldi -EFAULT, %r8
+ bv %r0(%r1)
+ copy %r0, %r9
+ENDPROC(fixup_get_user_skip_2)
+
+ /* put_user() fixups, store -EFAULT in r8 */
+ENTRY(fixup_put_user_skip_1)
+ get_fault_ip %r1,%r8
+ ldo 4(%r1), %r1
+ bv %r0(%r1)
+ ldi -EFAULT, %r8
+ENDPROC(fixup_put_user_skip_1)
+
+ENTRY(fixup_put_user_skip_2)
+ get_fault_ip %r1,%r8
+ ldo 8(%r1), %r1
+ bv %r0(%r1)
+ ldi -EFAULT, %r8
+ENDPROC(fixup_put_user_skip_2)
+
diff --git a/arch/parisc/lib/io.c b/arch/parisc/lib/io.c
new file mode 100644
index 00000000..7c1406ff
--- /dev/null
+++ b/arch/parisc/lib/io.c
@@ -0,0 +1,488 @@
+/*
+ * arch/parisc/lib/io.c
+ *
+ * Copyright (c) Matthew Wilcox 2001 for Hewlett-Packard
+ * Copyright (c) Randolph Chung 2001 <tausq@debian.org>
+ *
+ * IO accessing functions which shouldn't be inlined because they're too big
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+/* Copies a block of memory to a device in an efficient manner.
+ * Assumes the device can cope with 32-bit transfers. If it can't,
+ * don't use this function.
+ */
+void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
+{
+ if (((unsigned long)dst & 3) != ((unsigned long)src & 3))
+ goto bytecopy;
+ while ((unsigned long)dst & 3) {
+ writeb(*(char *)src, dst++);
+ src++;
+ count--;
+ }
+ while (count > 3) {
+ __raw_writel(*(u32 *)src, dst);
+ src += 4;
+ dst += 4;
+ count -= 4;
+ }
+ bytecopy:
+ while (count--) {
+ writeb(*(char *)src, dst++);
+ src++;
+ }
+}
+
+/*
+** Copies a block of memory from a device in an efficient manner.
+** Assumes the device can cope with 32-bit transfers. If it can't,
+** don't use this function.
+**
+** CR16 counts on C3000 reading 256 bytes from Symbios 896 RAM:
+** 27341/64 = 427 cyc per int
+** 61311/128 = 478 cyc per short
+** 122637/256 = 479 cyc per byte
+** Ergo bus latencies dominant (not transfer size).
+** Minimize total number of transfers at cost of CPU cycles.
+** TODO: only look at src alignment and adjust the stores to dest.
+*/
+void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
+{
+ /* first compare alignment of src/dst */
+ if ( (((unsigned long)dst ^ (unsigned long)src) & 1) || (count < 2) )
+ goto bytecopy;
+
+ if ( (((unsigned long)dst ^ (unsigned long)src) & 2) || (count < 4) )
+ goto shortcopy;
+
+ /* Then check for misaligned start address */
+ if ((unsigned long)src & 1) {
+ *(u8 *)dst = readb(src);
+ src++;
+ dst++;
+ count--;
+ if (count < 2) goto bytecopy;
+ }
+
+ if ((unsigned long)src & 2) {
+ *(u16 *)dst = __raw_readw(src);
+ src += 2;
+ dst += 2;
+ count -= 2;
+ }
+
+ while (count > 3) {
+ *(u32 *)dst = __raw_readl(src);
+ dst += 4;
+ src += 4;
+ count -= 4;
+ }
+
+ shortcopy:
+ while (count > 1) {
+ *(u16 *)dst = __raw_readw(src);
+ src += 2;
+ dst += 2;
+ count -= 2;
+ }
+
+ bytecopy:
+ while (count--) {
+ *(char *)dst = readb(src);
+ src++;
+ dst++;
+ }
+}
+
+/* Sets a block of memory on a device to a given value.
+ * Assumes the device can cope with 32-bit transfers. If it can't,
+ * don't use this function.
+ */
+void memset_io(volatile void __iomem *addr, unsigned char val, int count)
+{
+ u32 val32 = (val << 24) | (val << 16) | (val << 8) | val;
+ while ((unsigned long)addr & 3) {
+ writeb(val, addr++);
+ count--;
+ }
+ while (count > 3) {
+ __raw_writel(val32, addr);
+ addr += 4;
+ count -= 4;
+ }
+ while (count--) {
+ writeb(val, addr++);
+ }
+}
+
+/*
+ * Read COUNT 8-bit bytes from port PORT into memory starting at
+ * SRC.
+ */
+void insb (unsigned long port, void *dst, unsigned long count)
+{
+ unsigned char *p;
+
+ p = (unsigned char *)dst;
+
+ while (((unsigned long)p) & 0x3) {
+ if (!count)
+ return;
+ count--;
+ *p = inb(port);
+ p++;
+ }
+
+ while (count >= 4) {
+ unsigned int w;
+ count -= 4;
+ w = inb(port) << 24;
+ w |= inb(port) << 16;
+ w |= inb(port) << 8;
+ w |= inb(port);
+ *(unsigned int *) p = w;
+ p += 4;
+ }
+
+ while (count) {
+ --count;
+ *p = inb(port);
+ p++;
+ }
+}
+
+
+/*
+ * Read COUNT 16-bit words from port PORT into memory starting at
+ * SRC. SRC must be at least short aligned. This is used by the
+ * IDE driver to read disk sectors. Performance is important, but
+ * the interfaces seems to be slow: just using the inlined version
+ * of the inw() breaks things.
+ */
+void insw (unsigned long port, void *dst, unsigned long count)
+{
+ unsigned int l = 0, l2;
+ unsigned char *p;
+
+ p = (unsigned char *)dst;
+
+ if (!count)
+ return;
+
+ switch (((unsigned long)p) & 0x3)
+ {
+ case 0x00: /* Buffer 32-bit aligned */
+ while (count>=2) {
+
+ count -= 2;
+ l = cpu_to_le16(inw(port)) << 16;
+ l |= cpu_to_le16(inw(port));
+ *(unsigned int *)p = l;
+ p += 4;
+ }
+ if (count) {
+ *(unsigned short *)p = cpu_to_le16(inw(port));
+ }
+ break;
+
+ case 0x02: /* Buffer 16-bit aligned */
+ *(unsigned short *)p = cpu_to_le16(inw(port));
+ p += 2;
+ count--;
+ while (count>=2) {
+
+ count -= 2;
+ l = cpu_to_le16(inw(port)) << 16;
+ l |= cpu_to_le16(inw(port));
+ *(unsigned int *)p = l;
+ p += 4;
+ }
+ if (count) {
+ *(unsigned short *)p = cpu_to_le16(inw(port));
+ }
+ break;
+
+ case 0x01: /* Buffer 8-bit aligned */
+ case 0x03:
+ /* I don't bother with 32bit transfers
+ * in this case, 16bit will have to do -- DE */
+ --count;
+
+ l = cpu_to_le16(inw(port));
+ *p = l >> 8;
+ p++;
+ while (count--)
+ {
+ l2 = cpu_to_le16(inw(port));
+ *(unsigned short *)p = (l & 0xff) << 8 | (l2 >> 8);
+ p += 2;
+ l = l2;
+ }
+ *p = l & 0xff;
+ break;
+ }
+}
+
+
+
+/*
+ * Read COUNT 32-bit words from port PORT into memory starting at
+ * SRC. Now works with any alignment in SRC. Performance is important,
+ * but the interfaces seems to be slow: just using the inlined version
+ * of the inl() breaks things.
+ */
+void insl (unsigned long port, void *dst, unsigned long count)
+{
+ unsigned int l = 0, l2;
+ unsigned char *p;
+
+ p = (unsigned char *)dst;
+
+ if (!count)
+ return;
+
+ switch (((unsigned long) dst) & 0x3)
+ {
+ case 0x00: /* Buffer 32-bit aligned */
+ while (count--)
+ {
+ *(unsigned int *)p = cpu_to_le32(inl(port));
+ p += 4;
+ }
+ break;
+
+ case 0x02: /* Buffer 16-bit aligned */
+ --count;
+
+ l = cpu_to_le32(inl(port));
+ *(unsigned short *)p = l >> 16;
+ p += 2;
+
+ while (count--)
+ {
+ l2 = cpu_to_le32(inl(port));
+ *(unsigned int *)p = (l & 0xffff) << 16 | (l2 >> 16);
+ p += 4;
+ l = l2;
+ }
+ *(unsigned short *)p = l & 0xffff;
+ break;
+ case 0x01: /* Buffer 8-bit aligned */
+ --count;
+
+ l = cpu_to_le32(inl(port));
+ *(unsigned char *)p = l >> 24;
+ p++;
+ *(unsigned short *)p = (l >> 8) & 0xffff;
+ p += 2;
+ while (count--)
+ {
+ l2 = cpu_to_le32(inl(port));
+ *(unsigned int *)p = (l & 0xff) << 24 | (l2 >> 8);
+ p += 4;
+ l = l2;
+ }
+ *p = l & 0xff;
+ break;
+ case 0x03: /* Buffer 8-bit aligned */
+ --count;
+
+ l = cpu_to_le32(inl(port));
+ *p = l >> 24;
+ p++;
+ while (count--)
+ {
+ l2 = cpu_to_le32(inl(port));
+ *(unsigned int *)p = (l & 0xffffff) << 8 | l2 >> 24;
+ p += 4;
+ l = l2;
+ }
+ *(unsigned short *)p = (l >> 8) & 0xffff;
+ p += 2;
+ *p = l & 0xff;
+ break;
+ }
+}
+
+
+/*
+ * Like insb but in the opposite direction.
+ * Don't worry as much about doing aligned memory transfers:
+ * doing byte reads the "slow" way isn't nearly as slow as
+ * doing byte writes the slow way (no r-m-w cycle).
+ */
+void outsb(unsigned long port, const void * src, unsigned long count)
+{
+ const unsigned char *p;
+
+ p = (const unsigned char *)src;
+ while (count) {
+ count--;
+ outb(*p, port);
+ p++;
+ }
+}
+
+/*
+ * Like insw but in the opposite direction. This is used by the IDE
+ * driver to write disk sectors. Performance is important, but the
+ * interfaces seems to be slow: just using the inlined version of the
+ * outw() breaks things.
+ */
+void outsw (unsigned long port, const void *src, unsigned long count)
+{
+ unsigned int l = 0, l2;
+ const unsigned char *p;
+
+ p = (const unsigned char *)src;
+
+ if (!count)
+ return;
+
+ switch (((unsigned long)p) & 0x3)
+ {
+ case 0x00: /* Buffer 32-bit aligned */
+ while (count>=2) {
+ count -= 2;
+ l = *(unsigned int *)p;
+ p += 4;
+ outw(le16_to_cpu(l >> 16), port);
+ outw(le16_to_cpu(l & 0xffff), port);
+ }
+ if (count) {
+ outw(le16_to_cpu(*(unsigned short*)p), port);
+ }
+ break;
+
+ case 0x02: /* Buffer 16-bit aligned */
+
+ outw(le16_to_cpu(*(unsigned short*)p), port);
+ p += 2;
+ count--;
+
+ while (count>=2) {
+ count -= 2;
+ l = *(unsigned int *)p;
+ p += 4;
+ outw(le16_to_cpu(l >> 16), port);
+ outw(le16_to_cpu(l & 0xffff), port);
+ }
+ if (count) {
+ outw(le16_to_cpu(*(unsigned short *)p), port);
+ }
+ break;
+
+ case 0x01: /* Buffer 8-bit aligned */
+ /* I don't bother with 32bit transfers
+ * in this case, 16bit will have to do -- DE */
+
+ l = *p << 8;
+ p++;
+ count--;
+ while (count)
+ {
+ count--;
+ l2 = *(unsigned short *)p;
+ p += 2;
+ outw(le16_to_cpu(l | l2 >> 8), port);
+ l = l2 << 8;
+ }
+ l2 = *(unsigned char *)p;
+ outw (le16_to_cpu(l | l2>>8), port);
+ break;
+
+ }
+}
+
+
+/*
+ * Like insl but in the opposite direction. This is used by the IDE
+ * driver to write disk sectors. Works with any alignment in SRC.
+ * Performance is important, but the interfaces seems to be slow:
+ * just using the inlined version of the outl() breaks things.
+ */
+void outsl (unsigned long port, const void *src, unsigned long count)
+{
+ unsigned int l = 0, l2;
+ const unsigned char *p;
+
+ p = (const unsigned char *)src;
+
+ if (!count)
+ return;
+
+ switch (((unsigned long)p) & 0x3)
+ {
+ case 0x00: /* Buffer 32-bit aligned */
+ while (count--)
+ {
+ outl(le32_to_cpu(*(unsigned int *)p), port);
+ p += 4;
+ }
+ break;
+
+ case 0x02: /* Buffer 16-bit aligned */
+ --count;
+
+ l = *(unsigned short *)p;
+ p += 2;
+
+ while (count--)
+ {
+ l2 = *(unsigned int *)p;
+ p += 4;
+ outl (le32_to_cpu(l << 16 | l2 >> 16), port);
+ l = l2;
+ }
+ l2 = *(unsigned short *)p;
+ outl (le32_to_cpu(l << 16 | l2), port);
+ break;
+ case 0x01: /* Buffer 8-bit aligned */
+ --count;
+
+ l = *p << 24;
+ p++;
+ l |= *(unsigned short *)p << 8;
+ p += 2;
+
+ while (count--)
+ {
+ l2 = *(unsigned int *)p;
+ p += 4;
+ outl (le32_to_cpu(l | l2 >> 24), port);
+ l = l2 << 8;
+ }
+ l2 = *p;
+ outl (le32_to_cpu(l | l2), port);
+ break;
+ case 0x03: /* Buffer 8-bit aligned */
+ --count;
+
+ l = *p << 24;
+ p++;
+
+ while (count--)
+ {
+ l2 = *(unsigned int *)p;
+ p += 4;
+ outl (le32_to_cpu(l | l2 >> 8), port);
+ l = l2 << 24;
+ }
+ l2 = *(unsigned short *)p << 16;
+ p += 2;
+ l2 |= *p;
+ outl (le32_to_cpu(l | l2), port);
+ break;
+ }
+}
+
+EXPORT_SYMBOL(insb);
+EXPORT_SYMBOL(insw);
+EXPORT_SYMBOL(insl);
+EXPORT_SYMBOL(outsb);
+EXPORT_SYMBOL(outsw);
+EXPORT_SYMBOL(outsl);
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
new file mode 100644
index 00000000..5069e8b2
--- /dev/null
+++ b/arch/parisc/lib/iomap.c
@@ -0,0 +1,486 @@
+/*
+ * iomap.c - Implement iomap interface for PA-RISC
+ * Copyright (c) 2004 Matthew Wilcox
+ */
+
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+/*
+ * The iomap space on 32-bit PA-RISC is intended to look like this:
+ * 00000000-7fffffff virtual mapped IO
+ * 80000000-8fffffff ISA/EISA port space that can't be virtually mapped
+ * 90000000-9fffffff Dino port space
+ * a0000000-afffffff Astro port space
+ * b0000000-bfffffff PAT port space
+ * c0000000-cfffffff non-swapped memory IO
+ * f0000000-ffffffff legacy IO memory pointers
+ *
+ * For the moment, here's what it looks like:
+ * 80000000-8fffffff All ISA/EISA port space
+ * f0000000-ffffffff legacy IO memory pointers
+ *
+ * On 64-bit, everything is extended, so:
+ * 8000000000000000-8fffffffffffffff All ISA/EISA port space
+ * f000000000000000-ffffffffffffffff legacy IO memory pointers
+ */
+
+/*
+ * Technically, this should be 'if (VMALLOC_START < addr < VMALLOC_END),
+ * but that's slow and we know it'll be within the first 2GB.
+ */
+#ifdef CONFIG_64BIT
+#define INDIRECT_ADDR(addr) (((unsigned long)(addr) & 1UL<<63) != 0)
+#define ADDR_TO_REGION(addr) (((unsigned long)addr >> 60) & 7)
+#define IOPORT_MAP_BASE (8UL << 60)
+#else
+#define INDIRECT_ADDR(addr) (((unsigned long)(addr) & 1UL<<31) != 0)
+#define ADDR_TO_REGION(addr) (((unsigned long)addr >> 28) & 7)
+#define IOPORT_MAP_BASE (8UL << 28)
+#endif
+
+struct iomap_ops {
+ unsigned int (*read8)(void __iomem *);
+ unsigned int (*read16)(void __iomem *);
+ unsigned int (*read16be)(void __iomem *);
+ unsigned int (*read32)(void __iomem *);
+ unsigned int (*read32be)(void __iomem *);
+ void (*write8)(u8, void __iomem *);
+ void (*write16)(u16, void __iomem *);
+ void (*write16be)(u16, void __iomem *);
+ void (*write32)(u32, void __iomem *);
+ void (*write32be)(u32, void __iomem *);
+ void (*read8r)(void __iomem *, void *, unsigned long);
+ void (*read16r)(void __iomem *, void *, unsigned long);
+ void (*read32r)(void __iomem *, void *, unsigned long);
+ void (*write8r)(void __iomem *, const void *, unsigned long);
+ void (*write16r)(void __iomem *, const void *, unsigned long);
+ void (*write32r)(void __iomem *, const void *, unsigned long);
+};
+
+/* Generic ioport ops. To be replaced later by specific dino/elroy/wax code */
+
+#define ADDR2PORT(addr) ((unsigned long __force)(addr) & 0xffffff)
+
+static unsigned int ioport_read8(void __iomem *addr)
+{
+ return inb(ADDR2PORT(addr));
+}
+
+static unsigned int ioport_read16(void __iomem *addr)
+{
+ return inw(ADDR2PORT(addr));
+}
+
+static unsigned int ioport_read32(void __iomem *addr)
+{
+ return inl(ADDR2PORT(addr));
+}
+
+static void ioport_write8(u8 datum, void __iomem *addr)
+{
+ outb(datum, ADDR2PORT(addr));
+}
+
+static void ioport_write16(u16 datum, void __iomem *addr)
+{
+ outw(datum, ADDR2PORT(addr));
+}
+
+static void ioport_write32(u32 datum, void __iomem *addr)
+{
+ outl(datum, ADDR2PORT(addr));
+}
+
+static void ioport_read8r(void __iomem *addr, void *dst, unsigned long count)
+{
+ insb(ADDR2PORT(addr), dst, count);
+}
+
+static void ioport_read16r(void __iomem *addr, void *dst, unsigned long count)
+{
+ insw(ADDR2PORT(addr), dst, count);
+}
+
+static void ioport_read32r(void __iomem *addr, void *dst, unsigned long count)
+{
+ insl(ADDR2PORT(addr), dst, count);
+}
+
+static void ioport_write8r(void __iomem *addr, const void *s, unsigned long n)
+{
+ outsb(ADDR2PORT(addr), s, n);
+}
+
+static void ioport_write16r(void __iomem *addr, const void *s, unsigned long n)
+{
+ outsw(ADDR2PORT(addr), s, n);
+}
+
+static void ioport_write32r(void __iomem *addr, const void *s, unsigned long n)
+{
+ outsl(ADDR2PORT(addr), s, n);
+}
+
+static const struct iomap_ops ioport_ops = {
+ ioport_read8,
+ ioport_read16,
+ ioport_read16,
+ ioport_read32,
+ ioport_read32,
+ ioport_write8,
+ ioport_write16,
+ ioport_write16,
+ ioport_write32,
+ ioport_write32,
+ ioport_read8r,
+ ioport_read16r,
+ ioport_read32r,
+ ioport_write8r,
+ ioport_write16r,
+ ioport_write32r,
+};
+
+/* Legacy I/O memory ops */
+
+static unsigned int iomem_read8(void __iomem *addr)
+{
+ return readb(addr);
+}
+
+static unsigned int iomem_read16(void __iomem *addr)
+{
+ return readw(addr);
+}
+
+static unsigned int iomem_read16be(void __iomem *addr)
+{
+ return __raw_readw(addr);
+}
+
+static unsigned int iomem_read32(void __iomem *addr)
+{
+ return readl(addr);
+}
+
+static unsigned int iomem_read32be(void __iomem *addr)
+{
+ return __raw_readl(addr);
+}
+
+static void iomem_write8(u8 datum, void __iomem *addr)
+{
+ writeb(datum, addr);
+}
+
+static void iomem_write16(u16 datum, void __iomem *addr)
+{
+ writew(datum, addr);
+}
+
+static void iomem_write16be(u16 datum, void __iomem *addr)
+{
+ __raw_writew(datum, addr);
+}
+
+static void iomem_write32(u32 datum, void __iomem *addr)
+{
+ writel(datum, addr);
+}
+
+static void iomem_write32be(u32 datum, void __iomem *addr)
+{
+ __raw_writel(datum, addr);
+}
+
+static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count)
+{
+ while (count--) {
+ *(u8 *)dst = __raw_readb(addr);
+ dst++;
+ }
+}
+
+static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count)
+{
+ while (count--) {
+ *(u16 *)dst = __raw_readw(addr);
+ dst += 2;
+ }
+}
+
+static void iomem_read32r(void __iomem *addr, void *dst, unsigned long count)
+{
+ while (count--) {
+ *(u32 *)dst = __raw_readl(addr);
+ dst += 4;
+ }
+}
+
+static void iomem_write8r(void __iomem *addr, const void *s, unsigned long n)
+{
+ while (n--) {
+ __raw_writeb(*(u8 *)s, addr);
+ s++;
+ }
+}
+
+static void iomem_write16r(void __iomem *addr, const void *s, unsigned long n)
+{
+ while (n--) {
+ __raw_writew(*(u16 *)s, addr);
+ s += 2;
+ }
+}
+
+static void iomem_write32r(void __iomem *addr, const void *s, unsigned long n)
+{
+ while (n--) {
+ __raw_writel(*(u32 *)s, addr);
+ s += 4;
+ }
+}
+
+static const struct iomap_ops iomem_ops = {
+ iomem_read8,
+ iomem_read16,
+ iomem_read16be,
+ iomem_read32,
+ iomem_read32be,
+ iomem_write8,
+ iomem_write16,
+ iomem_write16be,
+ iomem_write32,
+ iomem_write32be,
+ iomem_read8r,
+ iomem_read16r,
+ iomem_read32r,
+ iomem_write8r,
+ iomem_write16r,
+ iomem_write32r,
+};
+
+static const struct iomap_ops *iomap_ops[8] = {
+ [0] = &ioport_ops,
+ [7] = &iomem_ops
+};
+
+
+unsigned int ioread8(void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr)))
+ return iomap_ops[ADDR_TO_REGION(addr)]->read8(addr);
+ return *((u8 *)addr);
+}
+
+unsigned int ioread16(void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr)))
+ return iomap_ops[ADDR_TO_REGION(addr)]->read16(addr);
+ return le16_to_cpup((u16 *)addr);
+}
+
+unsigned int ioread16be(void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr)))
+ return iomap_ops[ADDR_TO_REGION(addr)]->read16be(addr);
+ return *((u16 *)addr);
+}
+
+unsigned int ioread32(void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr)))
+ return iomap_ops[ADDR_TO_REGION(addr)]->read32(addr);
+ return le32_to_cpup((u32 *)addr);
+}
+
+unsigned int ioread32be(void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr)))
+ return iomap_ops[ADDR_TO_REGION(addr)]->read32be(addr);
+ return *((u32 *)addr);
+}
+
+void iowrite8(u8 datum, void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->write8(datum, addr);
+ } else {
+ *((u8 *)addr) = datum;
+ }
+}
+
+void iowrite16(u16 datum, void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->write16(datum, addr);
+ } else {
+ *((u16 *)addr) = cpu_to_le16(datum);
+ }
+}
+
+void iowrite16be(u16 datum, void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->write16be(datum, addr);
+ } else {
+ *((u16 *)addr) = datum;
+ }
+}
+
+void iowrite32(u32 datum, void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->write32(datum, addr);
+ } else {
+ *((u32 *)addr) = cpu_to_le32(datum);
+ }
+}
+
+void iowrite32be(u32 datum, void __iomem *addr)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->write32be(datum, addr);
+ } else {
+ *((u32 *)addr) = datum;
+ }
+}
+
+/* Repeating interfaces */
+
+void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->read8r(addr, dst, count);
+ } else {
+ while (count--) {
+ *(u8 *)dst = *(u8 *)addr;
+ dst++;
+ }
+ }
+}
+
+void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->read16r(addr, dst, count);
+ } else {
+ while (count--) {
+ *(u16 *)dst = *(u16 *)addr;
+ dst += 2;
+ }
+ }
+}
+
+void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->read32r(addr, dst, count);
+ } else {
+ while (count--) {
+ *(u32 *)dst = *(u32 *)addr;
+ dst += 4;
+ }
+ }
+}
+
+void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->write8r(addr, src, count);
+ } else {
+ while (count--) {
+ *(u8 *)addr = *(u8 *)src;
+ src++;
+ }
+ }
+}
+
+void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->write16r(addr, src, count);
+ } else {
+ while (count--) {
+ *(u16 *)addr = *(u16 *)src;
+ src += 2;
+ }
+ }
+}
+
+void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+ if (unlikely(INDIRECT_ADDR(addr))) {
+ iomap_ops[ADDR_TO_REGION(addr)]->write32r(addr, src, count);
+ } else {
+ while (count--) {
+ *(u32 *)addr = *(u32 *)src;
+ src += 4;
+ }
+ }
+}
+
+/* Mapping interfaces */
+
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return (void __iomem *)(IOPORT_MAP_BASE | port);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+ if (!INDIRECT_ADDR(addr)) {
+ iounmap(addr);
+ }
+}
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len || !start)
+ return NULL;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+ if (flags & IORESOURCE_IO)
+ return ioport_map(start, len);
+ if (flags & IORESOURCE_MEM) {
+ if (flags & IORESOURCE_CACHEABLE)
+ return ioremap(start, len);
+ return ioremap_nocache(start, len);
+ }
+ /* What? */
+ return NULL;
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+ if (!INDIRECT_ADDR(addr)) {
+ iounmap(addr);
+ }
+}
+
+EXPORT_SYMBOL(ioread8);
+EXPORT_SYMBOL(ioread16);
+EXPORT_SYMBOL(ioread16be);
+EXPORT_SYMBOL(ioread32);
+EXPORT_SYMBOL(ioread32be);
+EXPORT_SYMBOL(iowrite8);
+EXPORT_SYMBOL(iowrite16);
+EXPORT_SYMBOL(iowrite16be);
+EXPORT_SYMBOL(iowrite32);
+EXPORT_SYMBOL(iowrite32be);
+EXPORT_SYMBOL(ioread8_rep);
+EXPORT_SYMBOL(ioread16_rep);
+EXPORT_SYMBOL(ioread32_rep);
+EXPORT_SYMBOL(iowrite8_rep);
+EXPORT_SYMBOL(iowrite16_rep);
+EXPORT_SYMBOL(iowrite32_rep);
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
new file mode 100644
index 00000000..1bd23cce
--- /dev/null
+++ b/arch/parisc/lib/lusercopy.S
@@ -0,0 +1,180 @@
+/*
+ * User Space Access Routines
+ *
+ * Copyright (C) 2000-2002 Hewlett-Packard (John Marvin)
+ * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
+ * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
+ * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * These routines still have plenty of room for optimization
+ * (word & doubleword load/store, dual issue, store hints, etc.).
+ */
+
+/*
+ * The following routines assume that space register 3 (sr3) contains
+ * the space id associated with the current users address space.
+ */
+
+
+ .text
+
+#include <asm/assembly.h>
+#include <asm/errno.h>
+#include <linux/linkage.h>
+
+ /*
+ * get_sr gets the appropriate space value into
+ * sr1 for kernel/user space access, depending
+ * on the flag stored in the task structure.
+ */
+
+ .macro get_sr
+ mfctl %cr30,%r1
+ ldw TI_SEGMENT(%r1),%r22
+ mfsp %sr3,%r1
+ or,<> %r22,%r0,%r0
+ copy %r0,%r1
+ mtsp %r1,%sr1
+ .endm
+
+ .macro fixup_branch lbl
+ ldil L%\lbl, %r1
+ ldo R%\lbl(%r1), %r1
+ bv %r0(%r1)
+ .endm
+
+ /*
+ * long lstrncpy_from_user(char *dst, const char *src, long n)
+ *
+ * Returns -EFAULT if exception before terminator,
+ * N if the entire buffer filled,
+ * otherwise strlen (i.e. excludes zero byte)
+ */
+
+ENTRY(lstrncpy_from_user)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+ comib,= 0,%r24,$lsfu_done
+ copy %r24,%r23
+ get_sr
+1: ldbs,ma 1(%sr1,%r25),%r1
+$lsfu_loop:
+ stbs,ma %r1,1(%r26)
+ comib,=,n 0,%r1,$lsfu_done
+ addib,<>,n -1,%r24,$lsfu_loop
+2: ldbs,ma 1(%sr1,%r25),%r1
+$lsfu_done:
+ sub %r23,%r24,%r28
+$lsfu_exit:
+ bv %r0(%r2)
+ nop
+ .exit
+ENDPROC(lstrncpy_from_user)
+
+ .section .fixup,"ax"
+3: fixup_branch $lsfu_exit
+ ldi -EFAULT,%r28
+ .previous
+
+ .section __ex_table,"aw"
+ ASM_ULONG_INSN 1b,3b
+ ASM_ULONG_INSN 2b,3b
+ .previous
+
+ .procend
+
+ /*
+ * unsigned long lclear_user(void *to, unsigned long n)
+ *
+ * Returns 0 for success.
+ * otherwise, returns number of bytes not transferred.
+ */
+
+ENTRY(lclear_user)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+ comib,=,n 0,%r25,$lclu_done
+ get_sr
+$lclu_loop:
+ addib,<> -1,%r25,$lclu_loop
+1: stbs,ma %r0,1(%sr1,%r26)
+
+$lclu_done:
+ bv %r0(%r2)
+ copy %r25,%r28
+ .exit
+ENDPROC(lclear_user)
+
+ .section .fixup,"ax"
+2: fixup_branch $lclu_done
+ ldo 1(%r25),%r25
+ .previous
+
+ .section __ex_table,"aw"
+ ASM_ULONG_INSN 1b,2b
+ .previous
+
+ .procend
+
+ /*
+ * long lstrnlen_user(char *s, long n)
+ *
+ * Returns 0 if exception before zero byte or reaching N,
+ * N+1 if N would be exceeded,
+ * else strlen + 1 (i.e. includes zero byte).
+ */
+
+ENTRY(lstrnlen_user)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+ comib,= 0,%r25,$lslen_nzero
+ copy %r26,%r24
+ get_sr
+1: ldbs,ma 1(%sr1,%r26),%r1
+$lslen_loop:
+ comib,=,n 0,%r1,$lslen_done
+ addib,<> -1,%r25,$lslen_loop
+2: ldbs,ma 1(%sr1,%r26),%r1
+$lslen_done:
+ bv %r0(%r2)
+ sub %r26,%r24,%r28
+ .exit
+
+$lslen_nzero:
+ b $lslen_done
+ ldo 1(%r26),%r26 /* special case for N == 0 */
+ENDPROC(lstrnlen_user)
+
+ .section .fixup,"ax"
+3: fixup_branch $lslen_done
+ copy %r24,%r26 /* reset r26 so 0 is returned on fault */
+ .previous
+
+ .section __ex_table,"aw"
+ ASM_ULONG_INSN 1b,3b
+ ASM_ULONG_INSN 2b,3b
+ .previous
+
+ .procend
+
+ .end
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
new file mode 100644
index 00000000..1dbca5c3
--- /dev/null
+++ b/arch/parisc/lib/memcpy.c
@@ -0,0 +1,506 @@
+/*
+ * Optimized memory copy routines.
+ *
+ * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Portions derived from the GNU C Library
+ * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
+ *
+ * Several strategies are tried to try to get the best performance for various
+ * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using
+ * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
+ * general registers. Unaligned copies are handled either by aligning the
+ * destination and then using shift-and-write method, or in a few cases by
+ * falling back to a byte-at-a-time copy.
+ *
+ * I chose to implement this in C because it is easier to maintain and debug,
+ * and in my experiments it appears that the C code generated by gcc (3.3/3.4
+ * at the time of writing) is fairly optimal. Unfortunately some of the
+ * semantics of the copy routine (exception handling) is difficult to express
+ * in C, so we have to play some tricks to get it to work.
+ *
+ * All the loads and stores are done via explicit asm() code in order to use
+ * the right space registers.
+ *
+ * Testing with various alignments and buffer sizes shows that this code is
+ * often >10x faster than a simple byte-at-a-time copy, even for strangely
+ * aligned operands. It is interesting to note that the glibc version
+ * of memcpy (written in C) is actually quite fast already. This routine is
+ * able to beat it by 30-40% for aligned copies because of the loop unrolling,
+ * but in some cases the glibc version is still slightly faster. This lends
+ * more credibility that gcc can generate very good code as long as we are
+ * careful.
+ *
+ * TODO:
+ * - cache prefetching needs more experimentation to get optimal settings
+ * - try not to use the post-increment address modifiers; they create additional
+ * interlocks
+ * - replace byte-copy loops with stybs sequences
+ */
+
+#ifdef __KERNEL__
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <asm/uaccess.h>
+#define s_space "%%sr1"
+#define d_space "%%sr2"
+#else
+#include "memcpy.h"
+#define s_space "%%sr0"
+#define d_space "%%sr0"
+#define pa_memcpy new2_copy
+#endif
+
+DECLARE_PER_CPU(struct exception_data, exception_data);
+
+#define preserve_branch(label) do { \
+ volatile int dummy; \
+ /* The following branch is never taken, it's just here to */ \
+ /* prevent gcc from optimizing away our exception code. */ \
+ if (unlikely(dummy != dummy)) \
+ goto label; \
+} while (0)
+
+#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
+#define get_kernel_space() (0)
+
+#define MERGE(w0, sh_1, w1, sh_2) ({ \
+ unsigned int _r; \
+ asm volatile ( \
+ "mtsar %3\n" \
+ "shrpw %1, %2, %%sar, %0\n" \
+ : "=r"(_r) \
+ : "r"(w0), "r"(w1), "r"(sh_2) \
+ ); \
+ _r; \
+})
+#define THRESHOLD 16
+
+#ifdef DEBUG_MEMCPY
+#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
+#else
+#define DPRINTF(fmt, args...)
+#endif
+
+#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
+ __asm__ __volatile__ ( \
+ "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
+ : _tt(_t), "+r"(_a) \
+ : \
+ : "r8")
+
+#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
+ __asm__ __volatile__ ( \
+ "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
+ : "+r"(_a) \
+ : _tt(_t) \
+ : "r8")
+
+#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
+#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
+#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
+#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
+#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
+#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
+
+#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
+ __asm__ __volatile__ ( \
+ "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
+ : _tt(_t) \
+ : "r"(_a) \
+ : "r8")
+
+#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
+ __asm__ __volatile__ ( \
+ "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
+ : \
+ : _tt(_t), "r"(_a) \
+ : "r8")
+
+#define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
+#define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e)
+
+#ifdef CONFIG_PREFETCH
+static inline void prefetch_src(const void *addr)
+{
+ __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
+}
+
+static inline void prefetch_dst(const void *addr)
+{
+ __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
+}
+#else
+#define prefetch_src(addr) do { } while(0)
+#define prefetch_dst(addr) do { } while(0)
+#endif
+
+/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
+ * per loop. This code is derived from glibc.
+ */
+static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len, unsigned long o_dst, unsigned long o_src, unsigned long o_len)
+{
+ /* gcc complains that a2 and a3 may be uninitialized, but actually
+ * they cannot be. Initialize a2/a3 to shut gcc up.
+ */
+ register unsigned int a0, a1, a2 = 0, a3 = 0;
+ int sh_1, sh_2;
+ struct exception_data *d;
+
+ /* prefetch_src((const void *)src); */
+
+ /* Calculate how to shift a word read at the memory operation
+ aligned srcp to make it aligned for copy. */
+ sh_1 = 8 * (src % sizeof(unsigned int));
+ sh_2 = 8 * sizeof(unsigned int) - sh_1;
+
+ /* Make src aligned by rounding it down. */
+ src &= -sizeof(unsigned int);
+
+ switch (len % 4)
+ {
+ case 2:
+ /* a1 = ((unsigned int *) src)[0];
+ a2 = ((unsigned int *) src)[1]; */
+ ldw(s_space, 0, src, a1, cda_ldw_exc);
+ ldw(s_space, 4, src, a2, cda_ldw_exc);
+ src -= 1 * sizeof(unsigned int);
+ dst -= 3 * sizeof(unsigned int);
+ len += 2;
+ goto do1;
+ case 3:
+ /* a0 = ((unsigned int *) src)[0];
+ a1 = ((unsigned int *) src)[1]; */
+ ldw(s_space, 0, src, a0, cda_ldw_exc);
+ ldw(s_space, 4, src, a1, cda_ldw_exc);
+ src -= 0 * sizeof(unsigned int);
+ dst -= 2 * sizeof(unsigned int);
+ len += 1;
+ goto do2;
+ case 0:
+ if (len == 0)
+ return 0;
+ /* a3 = ((unsigned int *) src)[0];
+ a0 = ((unsigned int *) src)[1]; */
+ ldw(s_space, 0, src, a3, cda_ldw_exc);
+ ldw(s_space, 4, src, a0, cda_ldw_exc);
+ src -=-1 * sizeof(unsigned int);
+ dst -= 1 * sizeof(unsigned int);
+ len += 0;
+ goto do3;
+ case 1:
+ /* a2 = ((unsigned int *) src)[0];
+ a3 = ((unsigned int *) src)[1]; */
+ ldw(s_space, 0, src, a2, cda_ldw_exc);
+ ldw(s_space, 4, src, a3, cda_ldw_exc);
+ src -=-2 * sizeof(unsigned int);
+ dst -= 0 * sizeof(unsigned int);
+ len -= 1;
+ if (len == 0)
+ goto do0;
+ goto do4; /* No-op. */
+ }
+
+ do
+ {
+ /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
+do4:
+ /* a0 = ((unsigned int *) src)[0]; */
+ ldw(s_space, 0, src, a0, cda_ldw_exc);
+ /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
+ stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
+do3:
+ /* a1 = ((unsigned int *) src)[1]; */
+ ldw(s_space, 4, src, a1, cda_ldw_exc);
+ /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
+ stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
+do2:
+ /* a2 = ((unsigned int *) src)[2]; */
+ ldw(s_space, 8, src, a2, cda_ldw_exc);
+ /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
+ stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
+do1:
+ /* a3 = ((unsigned int *) src)[3]; */
+ ldw(s_space, 12, src, a3, cda_ldw_exc);
+ /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
+ stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
+
+ src += 4 * sizeof(unsigned int);
+ dst += 4 * sizeof(unsigned int);
+ len -= 4;
+ }
+ while (len != 0);
+
+do0:
+ /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
+ stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
+
+ preserve_branch(handle_load_error);
+ preserve_branch(handle_store_error);
+
+ return 0;
+
+handle_load_error:
+ __asm__ __volatile__ ("cda_ldw_exc:\n");
+ d = &__get_cpu_var(exception_data);
+ DPRINTF("cda_ldw_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
+ o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
+ return o_len * 4 - d->fault_addr + o_src;
+
+handle_store_error:
+ __asm__ __volatile__ ("cda_stw_exc:\n");
+ d = &__get_cpu_var(exception_data);
+ DPRINTF("cda_stw_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
+ o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
+ return o_len * 4 - d->fault_addr + o_dst;
+}
+
+
+/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
+static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+{
+ register unsigned long src, dst, t1, t2, t3;
+ register unsigned char *pcs, *pcd;
+ register unsigned int *pws, *pwd;
+ register double *pds, *pdd;
+ unsigned long ret = 0;
+ unsigned long o_dst, o_src, o_len;
+ struct exception_data *d;
+
+ src = (unsigned long)srcp;
+ dst = (unsigned long)dstp;
+ pcs = (unsigned char *)srcp;
+ pcd = (unsigned char *)dstp;
+
+ o_dst = dst; o_src = src; o_len = len;
+
+ /* prefetch_src((const void *)srcp); */
+
+ if (len < THRESHOLD)
+ goto byte_copy;
+
+ /* Check alignment */
+ t1 = (src ^ dst);
+ if (unlikely(t1 & (sizeof(double)-1)))
+ goto unaligned_copy;
+
+ /* src and dst have same alignment. */
+
+ /* Copy bytes till we are double-aligned. */
+ t2 = src & (sizeof(double) - 1);
+ if (unlikely(t2 != 0)) {
+ t2 = sizeof(double) - t2;
+ while (t2 && len) {
+ /* *pcd++ = *pcs++; */
+ ldbma(s_space, pcs, t3, pmc_load_exc);
+ len--;
+ stbma(d_space, t3, pcd, pmc_store_exc);
+ t2--;
+ }
+ }
+
+ pds = (double *)pcs;
+ pdd = (double *)pcd;
+
+#if 0
+ /* Copy 8 doubles at a time */
+ while (len >= 8*sizeof(double)) {
+ register double r1, r2, r3, r4, r5, r6, r7, r8;
+ /* prefetch_src((char *)pds + L1_CACHE_BYTES); */
+ flddma(s_space, pds, r1, pmc_load_exc);
+ flddma(s_space, pds, r2, pmc_load_exc);
+ flddma(s_space, pds, r3, pmc_load_exc);
+ flddma(s_space, pds, r4, pmc_load_exc);
+ fstdma(d_space, r1, pdd, pmc_store_exc);
+ fstdma(d_space, r2, pdd, pmc_store_exc);
+ fstdma(d_space, r3, pdd, pmc_store_exc);
+ fstdma(d_space, r4, pdd, pmc_store_exc);
+
+#if 0
+ if (L1_CACHE_BYTES <= 32)
+ prefetch_src((char *)pds + L1_CACHE_BYTES);
+#endif
+ flddma(s_space, pds, r5, pmc_load_exc);
+ flddma(s_space, pds, r6, pmc_load_exc);
+ flddma(s_space, pds, r7, pmc_load_exc);
+ flddma(s_space, pds, r8, pmc_load_exc);
+ fstdma(d_space, r5, pdd, pmc_store_exc);
+ fstdma(d_space, r6, pdd, pmc_store_exc);
+ fstdma(d_space, r7, pdd, pmc_store_exc);
+ fstdma(d_space, r8, pdd, pmc_store_exc);
+ len -= 8*sizeof(double);
+ }
+#endif
+
+ pws = (unsigned int *)pds;
+ pwd = (unsigned int *)pdd;
+
+word_copy:
+ while (len >= 8*sizeof(unsigned int)) {
+ register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
+ /* prefetch_src((char *)pws + L1_CACHE_BYTES); */
+ ldwma(s_space, pws, r1, pmc_load_exc);
+ ldwma(s_space, pws, r2, pmc_load_exc);
+ ldwma(s_space, pws, r3, pmc_load_exc);
+ ldwma(s_space, pws, r4, pmc_load_exc);
+ stwma(d_space, r1, pwd, pmc_store_exc);
+ stwma(d_space, r2, pwd, pmc_store_exc);
+ stwma(d_space, r3, pwd, pmc_store_exc);
+ stwma(d_space, r4, pwd, pmc_store_exc);
+
+ ldwma(s_space, pws, r5, pmc_load_exc);
+ ldwma(s_space, pws, r6, pmc_load_exc);
+ ldwma(s_space, pws, r7, pmc_load_exc);
+ ldwma(s_space, pws, r8, pmc_load_exc);
+ stwma(d_space, r5, pwd, pmc_store_exc);
+ stwma(d_space, r6, pwd, pmc_store_exc);
+ stwma(d_space, r7, pwd, pmc_store_exc);
+ stwma(d_space, r8, pwd, pmc_store_exc);
+ len -= 8*sizeof(unsigned int);
+ }
+
+ while (len >= 4*sizeof(unsigned int)) {
+ register unsigned int r1,r2,r3,r4;
+ ldwma(s_space, pws, r1, pmc_load_exc);
+ ldwma(s_space, pws, r2, pmc_load_exc);
+ ldwma(s_space, pws, r3, pmc_load_exc);
+ ldwma(s_space, pws, r4, pmc_load_exc);
+ stwma(d_space, r1, pwd, pmc_store_exc);
+ stwma(d_space, r2, pwd, pmc_store_exc);
+ stwma(d_space, r3, pwd, pmc_store_exc);
+ stwma(d_space, r4, pwd, pmc_store_exc);
+ len -= 4*sizeof(unsigned int);
+ }
+
+ pcs = (unsigned char *)pws;
+ pcd = (unsigned char *)pwd;
+
+byte_copy:
+ while (len) {
+ /* *pcd++ = *pcs++; */
+ ldbma(s_space, pcs, t3, pmc_load_exc);
+ stbma(d_space, t3, pcd, pmc_store_exc);
+ len--;
+ }
+
+ return 0;
+
+unaligned_copy:
+ /* possibly we are aligned on a word, but not on a double... */
+ if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
+ t2 = src & (sizeof(unsigned int) - 1);
+
+ if (unlikely(t2 != 0)) {
+ t2 = sizeof(unsigned int) - t2;
+ while (t2) {
+ /* *pcd++ = *pcs++; */
+ ldbma(s_space, pcs, t3, pmc_load_exc);
+ stbma(d_space, t3, pcd, pmc_store_exc);
+ len--;
+ t2--;
+ }
+ }
+
+ pws = (unsigned int *)pcs;
+ pwd = (unsigned int *)pcd;
+ goto word_copy;
+ }
+
+ /* Align the destination. */
+ if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
+ t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
+ while (t2) {
+ /* *pcd++ = *pcs++; */
+ ldbma(s_space, pcs, t3, pmc_load_exc);
+ stbma(d_space, t3, pcd, pmc_store_exc);
+ len--;
+ t2--;
+ }
+ dst = (unsigned long)pcd;
+ src = (unsigned long)pcs;
+ }
+
+ ret = copy_dstaligned(dst, src, len / sizeof(unsigned int),
+ o_dst, o_src, o_len);
+ if (ret)
+ return ret;
+
+ pcs += (len & -sizeof(unsigned int));
+ pcd += (len & -sizeof(unsigned int));
+ len %= sizeof(unsigned int);
+
+ preserve_branch(handle_load_error);
+ preserve_branch(handle_store_error);
+
+ goto byte_copy;
+
+handle_load_error:
+ __asm__ __volatile__ ("pmc_load_exc:\n");
+ d = &__get_cpu_var(exception_data);
+ DPRINTF("pmc_load_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
+ o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
+ return o_len - d->fault_addr + o_src;
+
+handle_store_error:
+ __asm__ __volatile__ ("pmc_store_exc:\n");
+ d = &__get_cpu_var(exception_data);
+ DPRINTF("pmc_store_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
+ o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
+ return o_len - d->fault_addr + o_dst;
+}
+
+#ifdef __KERNEL__
+unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len)
+{
+ mtsp(get_kernel_space(), 1);
+ mtsp(get_user_space(), 2);
+ return pa_memcpy((void __force *)dst, src, len);
+}
+
+EXPORT_SYMBOL(__copy_from_user);
+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len)
+{
+ mtsp(get_user_space(), 1);
+ mtsp(get_kernel_space(), 2);
+ return pa_memcpy(dst, (void __force *)src, len);
+}
+
+unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len)
+{
+ mtsp(get_user_space(), 1);
+ mtsp(get_user_space(), 2);
+ return pa_memcpy((void __force *)dst, (void __force *)src, len);
+}
+
+
+void * memcpy(void * dst,const void *src, size_t count)
+{
+ mtsp(get_kernel_space(), 1);
+ mtsp(get_kernel_space(), 2);
+ pa_memcpy(dst, src, count);
+ return dst;
+}
+
+EXPORT_SYMBOL(copy_to_user);
+EXPORT_SYMBOL(copy_from_user);
+EXPORT_SYMBOL(copy_in_user);
+EXPORT_SYMBOL(memcpy);
+#endif
diff --git a/arch/parisc/lib/memset.c b/arch/parisc/lib/memset.c
new file mode 100644
index 00000000..1d7929bd
--- /dev/null
+++ b/arch/parisc/lib/memset.c
@@ -0,0 +1,91 @@
+/* Copyright (C) 1991, 1997 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* Slight modifications for pa-risc linux - Paul Bame <bame@debian.org> */
+
+#include <linux/types.h>
+#include <asm/string.h>
+
+#define OPSIZ (BITS_PER_LONG/8)
+typedef unsigned long op_t;
+
+void *
+memset (void *dstpp, int sc, size_t len)
+{
+ unsigned int c = sc;
+ long int dstp = (long int) dstpp;
+
+ if (len >= 8)
+ {
+ size_t xlen;
+ op_t cccc;
+
+ cccc = (unsigned char) c;
+ cccc |= cccc << 8;
+ cccc |= cccc << 16;
+ if (OPSIZ > 4)
+ /* Do the shift in two steps to avoid warning if long has 32 bits. */
+ cccc |= (cccc << 16) << 16;
+
+ /* There are at least some bytes to set.
+ No need to test for LEN == 0 in this alignment loop. */
+ while (dstp % OPSIZ != 0)
+ {
+ ((unsigned char *) dstp)[0] = c;
+ dstp += 1;
+ len -= 1;
+ }
+
+ /* Write 8 `op_t' per iteration until less than 8 `op_t' remain. */
+ xlen = len / (OPSIZ * 8);
+ while (xlen > 0)
+ {
+ ((op_t *) dstp)[0] = cccc;
+ ((op_t *) dstp)[1] = cccc;
+ ((op_t *) dstp)[2] = cccc;
+ ((op_t *) dstp)[3] = cccc;
+ ((op_t *) dstp)[4] = cccc;
+ ((op_t *) dstp)[5] = cccc;
+ ((op_t *) dstp)[6] = cccc;
+ ((op_t *) dstp)[7] = cccc;
+ dstp += 8 * OPSIZ;
+ xlen -= 1;
+ }
+ len %= OPSIZ * 8;
+
+ /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain. */
+ xlen = len / OPSIZ;
+ while (xlen > 0)
+ {
+ ((op_t *) dstp)[0] = cccc;
+ dstp += OPSIZ;
+ xlen -= 1;
+ }
+ len %= OPSIZ;
+ }
+
+ /* Write the last few bytes. */
+ while (len > 0)
+ {
+ ((unsigned char *) dstp)[0] = c;
+ dstp += 1;
+ len -= 1;
+ }
+
+ return dstpp;
+}
diff --git a/arch/parisc/math-emu/Makefile b/arch/parisc/math-emu/Makefile
new file mode 100644
index 00000000..0bd63b08
--- /dev/null
+++ b/arch/parisc/math-emu/Makefile
@@ -0,0 +1,19 @@
+#
+# Makefile for the linux/parisc floating point code
+#
+
+# See arch/parisc/math-emu/README
+ccflags-y := -Wno-parentheses -Wno-implicit-function-declaration \
+ -Wno-uninitialized -Wno-strict-prototypes -Wno-return-type \
+ -Wno-implicit-int
+
+obj-y := frnd.o driver.o decode_exc.o fpudispatch.o denormal.o \
+ dfmpy.o sfmpy.o sfsqrt.o dfsqrt.o dfadd.o fmpyfadd.o \
+ sfadd.o dfsub.o sfsub.o fcnvfxt.o fcnvff.o fcnvxf.o \
+ fcnvfx.o fcnvuf.o fcnvfu.o fcnvfut.o dfdiv.o sfdiv.o \
+ dfrem.o sfrem.o dfcmp.o sfcmp.o
+
+# Math emulation code beyond the FRND is required for 712/80i and
+# other very old or stripped-down PA-RISC CPUs -- not currently supported
+
+obj-$(CONFIG_MATH_EMULATION) += unimplemented-math-emulation.o
diff --git a/arch/parisc/math-emu/README b/arch/parisc/math-emu/README
new file mode 100644
index 00000000..1a0124ef
--- /dev/null
+++ b/arch/parisc/math-emu/README
@@ -0,0 +1,11 @@
+All files except driver.c are snapshots from the HP-UX kernel. They've
+been modified as little as possible. Even though they don't fit the
+Linux coding style, please leave them in their funny format just in case
+someone in the future, with access to HP-UX source code, is generous
+enough to update our copies with later changes from HP-UX -- it'll
+make their 'diff' job easier if our code is relatively unmodified.
+
+Required Disclaimer: Hewlett-Packard makes no implied or expressed
+warranties about this code nor any promises to maintain or test it
+in any way. This copy of this snapshot is no longer the property
+of Hewlett-Packard.
diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h
new file mode 100644
index 00000000..9071e093
--- /dev/null
+++ b/arch/parisc/math-emu/cnv_float.h
@@ -0,0 +1,377 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifdef __NO_PA_HDRS
+ PA header file -- do not include this header file for non-PA builds.
+#endif
+
+/*
+ * Some more constants
+ */
+#define SGL_FX_MAX_EXP 30
+#define DBL_FX_MAX_EXP 62
+#define QUAD_FX_MAX_EXP 126
+
+#define Dintp1(object) (object)
+#define Dintp2(object) (object)
+
+#define Duintp1(object) (object)
+#define Duintp2(object) (object)
+
+#define Qintp0(object) (object)
+#define Qintp1(object) (object)
+#define Qintp2(object) (object)
+#define Qintp3(object) (object)
+
+
+/*
+ * These macros will be used specifically by the convert instructions.
+ *
+ *
+ * Single format macros
+ */
+
+#define Sgl_to_dbl_exponent(src_exponent,dest) \
+ Deposit_dexponent(dest,src_exponent+(DBL_BIAS-SGL_BIAS))
+
+#define Sgl_to_dbl_mantissa(src_mantissa,destA,destB) \
+ Deposit_dmantissap1(destA,src_mantissa>>3); \
+ Dmantissap2(destB) = src_mantissa << 29
+
+#define Sgl_isinexact_to_fix(sgl_value,exponent) \
+ ((exponent < (SGL_P - 1)) ? \
+ (Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) : FALSE)
+
+#define Int_isinexact_to_sgl(int_value) (int_value << 33 - SGL_EXP_LENGTH)
+
+#define Sgl_roundnearest_from_int(int_value,sgl_value) \
+ if (int_value & 1<<(SGL_EXP_LENGTH - 2)) /* round bit */ \
+ if ((int_value << 34 - SGL_EXP_LENGTH) || Slow(sgl_value)) \
+ Sall(sgl_value)++
+
+#define Dint_isinexact_to_sgl(dint_valueA,dint_valueB) \
+ ((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) || Dintp2(dint_valueB))
+
+#define Sgl_roundnearest_from_dint(dint_valueA,dint_valueB,sgl_value) \
+ if (Dintp1(dint_valueA) & 1<<(SGL_EXP_LENGTH - 2)) \
+ if ((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) || \
+ Dintp2(dint_valueB) || Slow(sgl_value)) Sall(sgl_value)++
+
+#define Dint_isinexact_to_dbl(dint_value) \
+ (Dintp2(dint_value) << 33 - DBL_EXP_LENGTH)
+
+#define Dbl_roundnearest_from_dint(dint_opndB,dbl_opndA,dbl_opndB) \
+ if (Dintp2(dint_opndB) & 1<<(DBL_EXP_LENGTH - 2)) \
+ if ((Dintp2(dint_opndB) << 34 - DBL_EXP_LENGTH) || Dlowp2(dbl_opndB)) \
+ if ((++Dallp2(dbl_opndB))==0) Dallp1(dbl_opndA)++
+
+#define Sgl_isone_roundbit(sgl_value,exponent) \
+ ((Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) >> 31)
+
+#define Sgl_isone_stickybit(sgl_value,exponent) \
+ (exponent < (SGL_P - 2) ? \
+ Sall(sgl_value) << (SGL_EXP_LENGTH + 2 + exponent) : FALSE)
+
+
+/*
+ * Double format macros
+ */
+
+#define Dbl_to_sgl_exponent(src_exponent,dest) \
+ dest = src_exponent + (SGL_BIAS - DBL_BIAS)
+
+#define Dbl_to_sgl_mantissa(srcA,srcB,dest,inexact,guard,sticky,odd) \
+ Shiftdouble(Dmantissap1(srcA),Dmantissap2(srcB),29,dest); \
+ guard = Dbit3p2(srcB); \
+ sticky = Dallp2(srcB)<<4; \
+ inexact = guard | sticky; \
+ odd = Dbit2p2(srcB)
+
+#define Dbl_to_sgl_denormalized(srcA,srcB,exp,dest,inexact,guard,sticky,odd,tiny) \
+ Deposit_dexponent(srcA,1); \
+ tiny = TRUE; \
+ if (exp >= -2) { \
+ if (exp == 0) { \
+ inexact = Dallp2(srcB) << 3; \
+ guard = inexact >> 31; \
+ sticky = inexact << 1; \
+ Shiftdouble(Dmantissap1(srcA),Dmantissap2(srcB),29,dest); \
+ odd = dest << 31; \
+ if (inexact) { \
+ switch(Rounding_mode()) { \
+ case ROUNDPLUS: \
+ if (Dbl_iszero_sign(srcA)) { \
+ dest++; \
+ if (Sgl_isone_hidden(dest)) \
+ tiny = FALSE; \
+ dest--; \
+ } \
+ break; \
+ case ROUNDMINUS: \
+ if (Dbl_isone_sign(srcA)) { \
+ dest++; \
+ if (Sgl_isone_hidden(dest)) \
+ tiny = FALSE; \
+ dest--; \
+ } \
+ break; \
+ case ROUNDNEAREST: \
+ if (guard && (sticky || odd)) { \
+ dest++; \
+ if (Sgl_isone_hidden(dest)) \
+ tiny = FALSE; \
+ dest--; \
+ } \
+ break; \
+ } \
+ } \
+ /* shift right by one to get correct result */ \
+ guard = odd; \
+ sticky = inexact; \
+ inexact |= guard; \
+ dest >>= 1; \
+ Deposit_dsign(srcA,0); \
+ Shiftdouble(Dallp1(srcA),Dallp2(srcB),30,dest); \
+ odd = dest << 31; \
+ } \
+ else { \
+ inexact = Dallp2(srcB) << (2 + exp); \
+ guard = inexact >> 31; \
+ sticky = inexact << 1; \
+ Deposit_dsign(srcA,0); \
+ if (exp == -2) dest = Dallp1(srcA); \
+ else Variable_shift_double(Dallp1(srcA),Dallp2(srcB),30-exp,dest); \
+ odd = dest << 31; \
+ } \
+ } \
+ else { \
+ Deposit_dsign(srcA,0); \
+ if (exp > (1 - SGL_P)) { \
+ dest = Dallp1(srcA) >> (- 2 - exp); \
+ inexact = Dallp1(srcA) << (34 + exp); \
+ guard = inexact >> 31; \
+ sticky = (inexact << 1) | Dallp2(srcB); \
+ inexact |= Dallp2(srcB); \
+ odd = dest << 31; \
+ } \
+ else { \
+ dest = 0; \
+ inexact = Dallp1(srcA) | Dallp2(srcB); \
+ if (exp == (1 - SGL_P)) { \
+ guard = Dhidden(srcA); \
+ sticky = Dmantissap1(srcA) | Dallp2(srcB); \
+ } \
+ else { \
+ guard = 0; \
+ sticky = inexact; \
+ } \
+ odd = 0; \
+ } \
+ } \
+ exp = 0
+
+#define Dbl_isinexact_to_fix(dbl_valueA,dbl_valueB,exponent) \
+ (exponent < (DBL_P-33) ? \
+ Dallp2(dbl_valueB) || Dallp1(dbl_valueA) << (DBL_EXP_LENGTH+1+exponent) : \
+ (exponent < (DBL_P-1) ? Dallp2(dbl_valueB) << (exponent + (33-DBL_P)) : \
+ FALSE))
+
+#define Dbl_isoverflow_to_int(exponent,dbl_valueA,dbl_valueB) \
+ ((exponent > SGL_FX_MAX_EXP + 1) || Dsign(dbl_valueA)==0 || \
+ Dmantissap1(dbl_valueA)!=0 || (Dallp2(dbl_valueB)>>21)!=0 )
+
+#define Dbl_isone_roundbit(dbl_valueA,dbl_valueB,exponent) \
+ ((exponent < (DBL_P - 33) ? \
+ Dallp1(dbl_valueA) >> ((30 - DBL_EXP_LENGTH) - exponent) : \
+ Dallp2(dbl_valueB) >> ((DBL_P - 2) - exponent)) & 1)
+
+#define Dbl_isone_stickybit(dbl_valueA,dbl_valueB,exponent) \
+ (exponent < (DBL_P-34) ? \
+ (Dallp2(dbl_valueB) || Dallp1(dbl_valueA)<<(DBL_EXP_LENGTH+2+exponent)) : \
+ (exponent<(DBL_P-2) ? (Dallp2(dbl_valueB) << (exponent + (34-DBL_P))) : \
+ FALSE))
+
+
+/* Int macros */
+
+#define Int_from_sgl_mantissa(sgl_value,exponent) \
+ Sall(sgl_value) = \
+ (unsigned)(Sall(sgl_value) << SGL_EXP_LENGTH)>>(31 - exponent)
+
+#define Int_from_dbl_mantissa(dbl_valueA,dbl_valueB,exponent) \
+ Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),22,Dallp1(dbl_valueA)); \
+ if (exponent < 31) Dallp1(dbl_valueA) >>= 30 - exponent; \
+ else Dallp1(dbl_valueA) <<= 1
+
+#define Int_negate(int_value) int_value = -int_value
+
+
+/* Dint macros */
+
+#define Dint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB) \
+ {Sall(sgl_value) <<= SGL_EXP_LENGTH; /* left-justify */ \
+ if (exponent <= 31) { \
+ Dintp1(dresultA) = 0; \
+ Dintp2(dresultB) = (unsigned)Sall(sgl_value) >> (31 - exponent); \
+ } \
+ else { \
+ Dintp1(dresultA) = Sall(sgl_value) >> (63 - exponent); \
+ Dintp2(dresultB) = Sall(sgl_value) << (exponent - 31); \
+ }}
+
+
+#define Dint_from_dbl_mantissa(dbl_valueA,dbl_valueB,exponent,destA,destB) \
+ {if (exponent < 32) { \
+ Dintp1(destA) = 0; \
+ if (exponent <= 20) \
+ Dintp2(destB) = Dallp1(dbl_valueA) >> 20-exponent; \
+ else Variable_shift_double(Dallp1(dbl_valueA),Dallp2(dbl_valueB), \
+ 52-exponent,Dintp2(destB)); \
+ } \
+ else { \
+ if (exponent <= 52) { \
+ Dintp1(destA) = Dallp1(dbl_valueA) >> 52-exponent; \
+ if (exponent == 52) Dintp2(destB) = Dallp2(dbl_valueB); \
+ else Variable_shift_double(Dallp1(dbl_valueA),Dallp2(dbl_valueB), \
+ 52-exponent,Dintp2(destB)); \
+ } \
+ else { \
+ Variable_shift_double(Dallp1(dbl_valueA),Dallp2(dbl_valueB), \
+ 84-exponent,Dintp1(destA)); \
+ Dintp2(destB) = Dallp2(dbl_valueB) << exponent-52; \
+ } \
+ }}
+
+#define Dint_setzero(dresultA,dresultB) \
+ Dintp1(dresultA) = 0; \
+ Dintp2(dresultB) = 0
+
+#define Dint_setone_sign(dresultA,dresultB) \
+ Dintp1(dresultA) = ~Dintp1(dresultA); \
+ if ((Dintp2(dresultB) = -Dintp2(dresultB)) == 0) Dintp1(dresultA)++
+
+#define Dint_set_minint(dresultA,dresultB) \
+ Dintp1(dresultA) = (unsigned int)1<<31; \
+ Dintp2(dresultB) = 0
+
+#define Dint_isone_lowp2(dresultB) (Dintp2(dresultB) & 01)
+
+#define Dint_increment(dresultA,dresultB) \
+ if ((++Dintp2(dresultB))==0) Dintp1(dresultA)++
+
+#define Dint_decrement(dresultA,dresultB) \
+ if ((Dintp2(dresultB)--)==0) Dintp1(dresultA)--
+
+#define Dint_negate(dresultA,dresultB) \
+ Dintp1(dresultA) = ~Dintp1(dresultA); \
+ if ((Dintp2(dresultB) = -Dintp2(dresultB))==0) Dintp1(dresultA)++
+
+#define Dint_copyfromptr(src,destA,destB) \
+ Dintp1(destA) = src->wd0; \
+ Dintp2(destB) = src->wd1
+#define Dint_copytoptr(srcA,srcB,dest) \
+ dest->wd0 = Dintp1(srcA); \
+ dest->wd1 = Dintp2(srcB)
+
+
+/* other macros */
+
+#define Find_ms_one_bit(value, position) \
+ { \
+ int var; \
+ for (var=8; var >=1; var >>= 1) { \
+ if (value >> 32 - position) \
+ position -= var; \
+ else position += var; \
+ } \
+ if ((value >> 32 - position) == 0) \
+ position--; \
+ else position -= 2; \
+ }
+
+
+/*
+ * Unsigned int macros
+ */
+#define Duint_copyfromptr(src,destA,destB) \
+ Dint_copyfromptr(src,destA,destB)
+#define Duint_copytoptr(srcA,srcB,dest) \
+ Dint_copytoptr(srcA,srcB,dest)
+
+#define Suint_isinexact_to_sgl(int_value) \
+ (int_value << 32 - SGL_EXP_LENGTH)
+
+#define Sgl_roundnearest_from_suint(suint_value,sgl_value) \
+ if (suint_value & 1<<(SGL_EXP_LENGTH - 1)) /* round bit */ \
+ if ((suint_value << 33 - SGL_EXP_LENGTH) || Slow(sgl_value)) \
+ Sall(sgl_value)++
+
+#define Duint_isinexact_to_sgl(duint_valueA,duint_valueB) \
+ ((Duintp1(duint_valueA) << 32 - SGL_EXP_LENGTH) || Duintp2(duint_valueB))
+
+#define Sgl_roundnearest_from_duint(duint_valueA,duint_valueB,sgl_value) \
+ if (Duintp1(duint_valueA) & 1<<(SGL_EXP_LENGTH - 1)) \
+ if ((Duintp1(duint_valueA) << 33 - SGL_EXP_LENGTH) || \
+ Duintp2(duint_valueB) || Slow(sgl_value)) Sall(sgl_value)++
+
+#define Duint_isinexact_to_dbl(duint_value) \
+ (Duintp2(duint_value) << 32 - DBL_EXP_LENGTH)
+
+#define Dbl_roundnearest_from_duint(duint_opndB,dbl_opndA,dbl_opndB) \
+ if (Duintp2(duint_opndB) & 1<<(DBL_EXP_LENGTH - 1)) \
+ if ((Duintp2(duint_opndB) << 33 - DBL_EXP_LENGTH) || Dlowp2(dbl_opndB)) \
+ if ((++Dallp2(dbl_opndB))==0) Dallp1(dbl_opndA)++
+
+#define Suint_from_sgl_mantissa(src,exponent,result) \
+ Sall(result) = (unsigned)(Sall(src) << SGL_EXP_LENGTH)>>(31 - exponent)
+
+#define Sgl_isinexact_to_unsigned(sgl_value,exponent) \
+ Sgl_isinexact_to_fix(sgl_value,exponent)
+
+#define Duint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB) \
+ {Sall(sgl_value) <<= SGL_EXP_LENGTH; /* left-justify */ \
+ if (exponent <= 31) { \
+ Dintp1(dresultA) = 0; \
+ Dintp2(dresultB) = (unsigned)Sall(sgl_value) >> (31 - exponent); \
+ } \
+ else { \
+ Dintp1(dresultA) = Sall(sgl_value) >> (63 - exponent); \
+ Dintp2(dresultB) = Sall(sgl_value) << (exponent - 31); \
+ } \
+ Sall(sgl_value) >>= SGL_EXP_LENGTH; /* return to original */ \
+ }
+
+#define Duint_setzero(dresultA,dresultB) \
+ Dint_setzero(dresultA,dresultB)
+
+#define Duint_increment(dresultA,dresultB) Dint_increment(dresultA,dresultB)
+
+#define Duint_isone_lowp2(dresultB) Dint_isone_lowp2(dresultB)
+
+#define Suint_from_dbl_mantissa(srcA,srcB,exponent,dest) \
+ Shiftdouble(Dallp1(srcA),Dallp2(srcB),21,dest); \
+ dest = (unsigned)dest >> 31 - exponent
+
+#define Dbl_isinexact_to_unsigned(dbl_valueA,dbl_valueB,exponent) \
+ Dbl_isinexact_to_fix(dbl_valueA,dbl_valueB,exponent)
+
+#define Duint_from_dbl_mantissa(dbl_valueA,dbl_valueB,exponent,destA,destB) \
+ Dint_from_dbl_mantissa(dbl_valueA,dbl_valueB,exponent,destA,destB)
diff --git a/arch/parisc/math-emu/dbl_float.h b/arch/parisc/math-emu/dbl_float.h
new file mode 100644
index 00000000..0c2fa9a9
--- /dev/null
+++ b/arch/parisc/math-emu/dbl_float.h
@@ -0,0 +1,847 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifdef __NO_PA_HDRS
+ PA header file -- do not include this header file for non-PA builds.
+#endif
+
+/* 32-bit word grabbing functions */
+#define Dbl_firstword(value) Dallp1(value)
+#define Dbl_secondword(value) Dallp2(value)
+#define Dbl_thirdword(value) dummy_location
+#define Dbl_fourthword(value) dummy_location
+
+#define Dbl_sign(object) Dsign(object)
+#define Dbl_exponent(object) Dexponent(object)
+#define Dbl_signexponent(object) Dsignexponent(object)
+#define Dbl_mantissap1(object) Dmantissap1(object)
+#define Dbl_mantissap2(object) Dmantissap2(object)
+#define Dbl_exponentmantissap1(object) Dexponentmantissap1(object)
+#define Dbl_allp1(object) Dallp1(object)
+#define Dbl_allp2(object) Dallp2(object)
+
+/* dbl_and_signs ANDs the sign bits of each argument and puts the result
+ * into the first argument. dbl_or_signs ors those same sign bits */
+#define Dbl_and_signs( src1dst, src2) \
+ Dallp1(src1dst) = (Dallp1(src2)|~((unsigned int)1<<31)) & Dallp1(src1dst)
+#define Dbl_or_signs( src1dst, src2) \
+ Dallp1(src1dst) = (Dallp1(src2)&((unsigned int)1<<31)) | Dallp1(src1dst)
+
+/* The hidden bit is always the low bit of the exponent */
+#define Dbl_clear_exponent_set_hidden(srcdst) Deposit_dexponent(srcdst,1)
+#define Dbl_clear_signexponent_set_hidden(srcdst) \
+ Deposit_dsignexponent(srcdst,1)
+#define Dbl_clear_sign(srcdst) Dallp1(srcdst) &= ~((unsigned int)1<<31)
+#define Dbl_clear_signexponent(srcdst) \
+ Dallp1(srcdst) &= Dmantissap1((unsigned int)-1)
+
+/* Exponent field for doubles has already been cleared and may be
+ * included in the shift. Here we need to generate two double width
+ * variable shifts. The insignificant bits can be ignored.
+ * MTSAR f(varamount)
+ * VSHD srcdst.high,srcdst.low => srcdst.low
+ * VSHD 0,srcdst.high => srcdst.high
+ * This is very difficult to model with C expressions since the shift amount
+ * could exceed 32. */
+/* varamount must be less than 64 */
+#define Dbl_rightshift(srcdstA, srcdstB, varamount) \
+ {if((varamount) >= 32) { \
+ Dallp2(srcdstB) = Dallp1(srcdstA) >> (varamount-32); \
+ Dallp1(srcdstA)=0; \
+ } \
+ else if(varamount > 0) { \
+ Variable_shift_double(Dallp1(srcdstA), Dallp2(srcdstB), \
+ (varamount), Dallp2(srcdstB)); \
+ Dallp1(srcdstA) >>= varamount; \
+ } }
+/* varamount must be less than 64 */
+#define Dbl_rightshift_exponentmantissa(srcdstA, srcdstB, varamount) \
+ {if((varamount) >= 32) { \
+ Dallp2(srcdstB) = Dexponentmantissap1(srcdstA) >> (varamount-32); \
+ Dallp1(srcdstA) &= ((unsigned int)1<<31); /* clear expmant field */ \
+ } \
+ else if(varamount > 0) { \
+ Variable_shift_double(Dexponentmantissap1(srcdstA), Dallp2(srcdstB), \
+ (varamount), Dallp2(srcdstB)); \
+ Deposit_dexponentmantissap1(srcdstA, \
+ (Dexponentmantissap1(srcdstA)>>varamount)); \
+ } }
+/* varamount must be less than 64 */
+#define Dbl_leftshift(srcdstA, srcdstB, varamount) \
+ {if((varamount) >= 32) { \
+ Dallp1(srcdstA) = Dallp2(srcdstB) << (varamount-32); \
+ Dallp2(srcdstB)=0; \
+ } \
+ else { \
+ if ((varamount) > 0) { \
+ Dallp1(srcdstA) = (Dallp1(srcdstA) << (varamount)) | \
+ (Dallp2(srcdstB) >> (32-(varamount))); \
+ Dallp2(srcdstB) <<= varamount; \
+ } \
+ } }
+#define Dbl_leftshiftby1_withextent(lefta,leftb,right,resulta,resultb) \
+ Shiftdouble(Dallp1(lefta), Dallp2(leftb), 31, Dallp1(resulta)); \
+ Shiftdouble(Dallp2(leftb), Extall(right), 31, Dallp2(resultb))
+
+#define Dbl_rightshiftby1_withextent(leftb,right,dst) \
+ Extall(dst) = (Dallp2(leftb) << 31) | ((unsigned int)Extall(right) >> 1) | \
+ Extlow(right)
+
+#define Dbl_arithrightshiftby1(srcdstA,srcdstB) \
+ Shiftdouble(Dallp1(srcdstA),Dallp2(srcdstB),1,Dallp2(srcdstB));\
+ Dallp1(srcdstA) = (int)Dallp1(srcdstA) >> 1
+
+/* Sign extend the sign bit with an integer destination */
+#define Dbl_signextendedsign(value) Dsignedsign(value)
+
+#define Dbl_isone_hidden(dbl_value) (Is_dhidden(dbl_value)!=0)
+/* Singles and doubles may include the sign and exponent fields. The
+ * hidden bit and the hidden overflow must be included. */
+#define Dbl_increment(dbl_valueA,dbl_valueB) \
+ if( (Dallp2(dbl_valueB) += 1) == 0 ) Dallp1(dbl_valueA) += 1
+#define Dbl_increment_mantissa(dbl_valueA,dbl_valueB) \
+ if( (Dmantissap2(dbl_valueB) += 1) == 0 ) \
+ Deposit_dmantissap1(dbl_valueA,dbl_valueA+1)
+#define Dbl_decrement(dbl_valueA,dbl_valueB) \
+ if( Dallp2(dbl_valueB) == 0 ) Dallp1(dbl_valueA) -= 1; \
+ Dallp2(dbl_valueB) -= 1
+
+#define Dbl_isone_sign(dbl_value) (Is_dsign(dbl_value)!=0)
+#define Dbl_isone_hiddenoverflow(dbl_value) (Is_dhiddenoverflow(dbl_value)!=0)
+#define Dbl_isone_lowmantissap1(dbl_valueA) (Is_dlowp1(dbl_valueA)!=0)
+#define Dbl_isone_lowmantissap2(dbl_valueB) (Is_dlowp2(dbl_valueB)!=0)
+#define Dbl_isone_signaling(dbl_value) (Is_dsignaling(dbl_value)!=0)
+#define Dbl_is_signalingnan(dbl_value) (Dsignalingnan(dbl_value)==0xfff)
+#define Dbl_isnotzero(dbl_valueA,dbl_valueB) \
+ (Dallp1(dbl_valueA) || Dallp2(dbl_valueB))
+#define Dbl_isnotzero_hiddenhigh7mantissa(dbl_value) \
+ (Dhiddenhigh7mantissa(dbl_value)!=0)
+#define Dbl_isnotzero_exponent(dbl_value) (Dexponent(dbl_value)!=0)
+#define Dbl_isnotzero_mantissa(dbl_valueA,dbl_valueB) \
+ (Dmantissap1(dbl_valueA) || Dmantissap2(dbl_valueB))
+#define Dbl_isnotzero_mantissap1(dbl_valueA) (Dmantissap1(dbl_valueA)!=0)
+#define Dbl_isnotzero_mantissap2(dbl_valueB) (Dmantissap2(dbl_valueB)!=0)
+#define Dbl_isnotzero_exponentmantissa(dbl_valueA,dbl_valueB) \
+ (Dexponentmantissap1(dbl_valueA) || Dmantissap2(dbl_valueB))
+#define Dbl_isnotzero_low4p2(dbl_value) (Dlow4p2(dbl_value)!=0)
+#define Dbl_iszero(dbl_valueA,dbl_valueB) (Dallp1(dbl_valueA)==0 && \
+ Dallp2(dbl_valueB)==0)
+#define Dbl_iszero_allp1(dbl_value) (Dallp1(dbl_value)==0)
+#define Dbl_iszero_allp2(dbl_value) (Dallp2(dbl_value)==0)
+#define Dbl_iszero_hidden(dbl_value) (Is_dhidden(dbl_value)==0)
+#define Dbl_iszero_hiddenoverflow(dbl_value) (Is_dhiddenoverflow(dbl_value)==0)
+#define Dbl_iszero_hiddenhigh3mantissa(dbl_value) \
+ (Dhiddenhigh3mantissa(dbl_value)==0)
+#define Dbl_iszero_hiddenhigh7mantissa(dbl_value) \
+ (Dhiddenhigh7mantissa(dbl_value)==0)
+#define Dbl_iszero_sign(dbl_value) (Is_dsign(dbl_value)==0)
+#define Dbl_iszero_exponent(dbl_value) (Dexponent(dbl_value)==0)
+#define Dbl_iszero_mantissa(dbl_valueA,dbl_valueB) \
+ (Dmantissap1(dbl_valueA)==0 && Dmantissap2(dbl_valueB)==0)
+#define Dbl_iszero_exponentmantissa(dbl_valueA,dbl_valueB) \
+ (Dexponentmantissap1(dbl_valueA)==0 && Dmantissap2(dbl_valueB)==0)
+#define Dbl_isinfinity_exponent(dbl_value) \
+ (Dexponent(dbl_value)==DBL_INFINITY_EXPONENT)
+#define Dbl_isnotinfinity_exponent(dbl_value) \
+ (Dexponent(dbl_value)!=DBL_INFINITY_EXPONENT)
+#define Dbl_isinfinity(dbl_valueA,dbl_valueB) \
+ (Dexponent(dbl_valueA)==DBL_INFINITY_EXPONENT && \
+ Dmantissap1(dbl_valueA)==0 && Dmantissap2(dbl_valueB)==0)
+#define Dbl_isnan(dbl_valueA,dbl_valueB) \
+ (Dexponent(dbl_valueA)==DBL_INFINITY_EXPONENT && \
+ (Dmantissap1(dbl_valueA)!=0 || Dmantissap2(dbl_valueB)!=0))
+#define Dbl_isnotnan(dbl_valueA,dbl_valueB) \
+ (Dexponent(dbl_valueA)!=DBL_INFINITY_EXPONENT || \
+ (Dmantissap1(dbl_valueA)==0 && Dmantissap2(dbl_valueB)==0))
+
+#define Dbl_islessthan(dbl_op1a,dbl_op1b,dbl_op2a,dbl_op2b) \
+ (Dallp1(dbl_op1a) < Dallp1(dbl_op2a) || \
+ (Dallp1(dbl_op1a) == Dallp1(dbl_op2a) && \
+ Dallp2(dbl_op1b) < Dallp2(dbl_op2b)))
+#define Dbl_isgreaterthan(dbl_op1a,dbl_op1b,dbl_op2a,dbl_op2b) \
+ (Dallp1(dbl_op1a) > Dallp1(dbl_op2a) || \
+ (Dallp1(dbl_op1a) == Dallp1(dbl_op2a) && \
+ Dallp2(dbl_op1b) > Dallp2(dbl_op2b)))
+#define Dbl_isnotlessthan(dbl_op1a,dbl_op1b,dbl_op2a,dbl_op2b) \
+ (Dallp1(dbl_op1a) > Dallp1(dbl_op2a) || \
+ (Dallp1(dbl_op1a) == Dallp1(dbl_op2a) && \
+ Dallp2(dbl_op1b) >= Dallp2(dbl_op2b)))
+#define Dbl_isnotgreaterthan(dbl_op1a,dbl_op1b,dbl_op2a,dbl_op2b) \
+ (Dallp1(dbl_op1a) < Dallp1(dbl_op2a) || \
+ (Dallp1(dbl_op1a) == Dallp1(dbl_op2a) && \
+ Dallp2(dbl_op1b) <= Dallp2(dbl_op2b)))
+#define Dbl_isequal(dbl_op1a,dbl_op1b,dbl_op2a,dbl_op2b) \
+ ((Dallp1(dbl_op1a) == Dallp1(dbl_op2a)) && \
+ (Dallp2(dbl_op1b) == Dallp2(dbl_op2b)))
+
+#define Dbl_leftshiftby8(dbl_valueA,dbl_valueB) \
+ Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),24,Dallp1(dbl_valueA)); \
+ Dallp2(dbl_valueB) <<= 8
+#define Dbl_leftshiftby7(dbl_valueA,dbl_valueB) \
+ Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),25,Dallp1(dbl_valueA)); \
+ Dallp2(dbl_valueB) <<= 7
+#define Dbl_leftshiftby4(dbl_valueA,dbl_valueB) \
+ Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),28,Dallp1(dbl_valueA)); \
+ Dallp2(dbl_valueB) <<= 4
+#define Dbl_leftshiftby3(dbl_valueA,dbl_valueB) \
+ Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),29,Dallp1(dbl_valueA)); \
+ Dallp2(dbl_valueB) <<= 3
+#define Dbl_leftshiftby2(dbl_valueA,dbl_valueB) \
+ Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),30,Dallp1(dbl_valueA)); \
+ Dallp2(dbl_valueB) <<= 2
+#define Dbl_leftshiftby1(dbl_valueA,dbl_valueB) \
+ Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),31,Dallp1(dbl_valueA)); \
+ Dallp2(dbl_valueB) <<= 1
+
+#define Dbl_rightshiftby8(dbl_valueA,dbl_valueB) \
+ Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),8,Dallp2(dbl_valueB)); \
+ Dallp1(dbl_valueA) >>= 8
+#define Dbl_rightshiftby4(dbl_valueA,dbl_valueB) \
+ Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),4,Dallp2(dbl_valueB)); \
+ Dallp1(dbl_valueA) >>= 4
+#define Dbl_rightshiftby2(dbl_valueA,dbl_valueB) \
+ Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),2,Dallp2(dbl_valueB)); \
+ Dallp1(dbl_valueA) >>= 2
+#define Dbl_rightshiftby1(dbl_valueA,dbl_valueB) \
+ Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),1,Dallp2(dbl_valueB)); \
+ Dallp1(dbl_valueA) >>= 1
+
+/* This magnitude comparison uses the signless first words and
+ * the regular part2 words. The comparison is graphically:
+ *
+ * 1st greater? -------------
+ * |
+ * 1st less?-----------------+---------
+ * | |
+ * 2nd greater or equal----->| |
+ * False True
+ */
+#define Dbl_ismagnitudeless(leftB,rightB,signlessleft,signlessright) \
+ ((signlessleft <= signlessright) && \
+ ( (signlessleft < signlessright) || (Dallp2(leftB)<Dallp2(rightB)) ))
+
+#define Dbl_copytoint_exponentmantissap1(src,dest) \
+ dest = Dexponentmantissap1(src)
+
+/* A quiet NaN has the high mantissa bit clear and at least on other (in this
+ * case the adjacent bit) bit set. */
+#define Dbl_set_quiet(dbl_value) Deposit_dhigh2mantissa(dbl_value,1)
+#define Dbl_set_exponent(dbl_value, exp) Deposit_dexponent(dbl_value,exp)
+
+#define Dbl_set_mantissa(desta,destb,valuea,valueb) \
+ Deposit_dmantissap1(desta,valuea); \
+ Dmantissap2(destb) = Dmantissap2(valueb)
+#define Dbl_set_mantissap1(desta,valuea) \
+ Deposit_dmantissap1(desta,valuea)
+#define Dbl_set_mantissap2(destb,valueb) \
+ Dmantissap2(destb) = Dmantissap2(valueb)
+
+#define Dbl_set_exponentmantissa(desta,destb,valuea,valueb) \
+ Deposit_dexponentmantissap1(desta,valuea); \
+ Dmantissap2(destb) = Dmantissap2(valueb)
+#define Dbl_set_exponentmantissap1(dest,value) \
+ Deposit_dexponentmantissap1(dest,value)
+
+#define Dbl_copyfromptr(src,desta,destb) \
+ Dallp1(desta) = src->wd0; \
+ Dallp2(destb) = src->wd1
+#define Dbl_copytoptr(srca,srcb,dest) \
+ dest->wd0 = Dallp1(srca); \
+ dest->wd1 = Dallp2(srcb)
+
+/* An infinity is represented with the max exponent and a zero mantissa */
+#define Dbl_setinfinity_exponent(dbl_value) \
+ Deposit_dexponent(dbl_value,DBL_INFINITY_EXPONENT)
+#define Dbl_setinfinity_exponentmantissa(dbl_valueA,dbl_valueB) \
+ Deposit_dexponentmantissap1(dbl_valueA, \
+ (DBL_INFINITY_EXPONENT << (32-(1+DBL_EXP_LENGTH)))); \
+ Dmantissap2(dbl_valueB) = 0
+#define Dbl_setinfinitypositive(dbl_valueA,dbl_valueB) \
+ Dallp1(dbl_valueA) \
+ = (DBL_INFINITY_EXPONENT << (32-(1+DBL_EXP_LENGTH))); \
+ Dmantissap2(dbl_valueB) = 0
+#define Dbl_setinfinitynegative(dbl_valueA,dbl_valueB) \
+ Dallp1(dbl_valueA) = ((unsigned int)1<<31) | \
+ (DBL_INFINITY_EXPONENT << (32-(1+DBL_EXP_LENGTH))); \
+ Dmantissap2(dbl_valueB) = 0
+#define Dbl_setinfinity(dbl_valueA,dbl_valueB,sign) \
+ Dallp1(dbl_valueA) = ((unsigned int)sign << 31) | \
+ (DBL_INFINITY_EXPONENT << (32-(1+DBL_EXP_LENGTH))); \
+ Dmantissap2(dbl_valueB) = 0
+
+#define Dbl_sethigh4bits(dbl_value, extsign) Deposit_dhigh4p1(dbl_value,extsign)
+#define Dbl_set_sign(dbl_value,sign) Deposit_dsign(dbl_value,sign)
+#define Dbl_invert_sign(dbl_value) Deposit_dsign(dbl_value,~Dsign(dbl_value))
+#define Dbl_setone_sign(dbl_value) Deposit_dsign(dbl_value,1)
+#define Dbl_setone_lowmantissap2(dbl_value) Deposit_dlowp2(dbl_value,1)
+#define Dbl_setzero_sign(dbl_value) Dallp1(dbl_value) &= 0x7fffffff
+#define Dbl_setzero_exponent(dbl_value) \
+ Dallp1(dbl_value) &= 0x800fffff
+#define Dbl_setzero_mantissa(dbl_valueA,dbl_valueB) \
+ Dallp1(dbl_valueA) &= 0xfff00000; \
+ Dallp2(dbl_valueB) = 0
+#define Dbl_setzero_mantissap1(dbl_value) Dallp1(dbl_value) &= 0xfff00000
+#define Dbl_setzero_mantissap2(dbl_value) Dallp2(dbl_value) = 0
+#define Dbl_setzero_exponentmantissa(dbl_valueA,dbl_valueB) \
+ Dallp1(dbl_valueA) &= 0x80000000; \
+ Dallp2(dbl_valueB) = 0
+#define Dbl_setzero_exponentmantissap1(dbl_valueA) \
+ Dallp1(dbl_valueA) &= 0x80000000
+#define Dbl_setzero(dbl_valueA,dbl_valueB) \
+ Dallp1(dbl_valueA) = 0; Dallp2(dbl_valueB) = 0
+#define Dbl_setzerop1(dbl_value) Dallp1(dbl_value) = 0
+#define Dbl_setzerop2(dbl_value) Dallp2(dbl_value) = 0
+#define Dbl_setnegativezero(dbl_value) \
+ Dallp1(dbl_value) = (unsigned int)1 << 31; Dallp2(dbl_value) = 0
+#define Dbl_setnegativezerop1(dbl_value) Dallp1(dbl_value) = (unsigned int)1<<31
+
+/* Use the following macro for both overflow & underflow conditions */
+#define ovfl -
+#define unfl +
+#define Dbl_setwrapped_exponent(dbl_value,exponent,op) \
+ Deposit_dexponent(dbl_value,(exponent op DBL_WRAP))
+
+#define Dbl_setlargestpositive(dbl_valueA,dbl_valueB) \
+ Dallp1(dbl_valueA) = ((DBL_EMAX+DBL_BIAS) << (32-(1+DBL_EXP_LENGTH))) \
+ | ((1<<(32-(1+DBL_EXP_LENGTH))) - 1 ); \
+ Dallp2(dbl_valueB) = 0xFFFFFFFF
+#define Dbl_setlargestnegative(dbl_valueA,dbl_valueB) \
+ Dallp1(dbl_valueA) = ((DBL_EMAX+DBL_BIAS) << (32-(1+DBL_EXP_LENGTH))) \
+ | ((1<<(32-(1+DBL_EXP_LENGTH))) - 1 ) \
+ | ((unsigned int)1<<31); \
+ Dallp2(dbl_valueB) = 0xFFFFFFFF
+#define Dbl_setlargest_exponentmantissa(dbl_valueA,dbl_valueB) \
+ Deposit_dexponentmantissap1(dbl_valueA, \
+ (((DBL_EMAX+DBL_BIAS) << (32-(1+DBL_EXP_LENGTH))) \
+ | ((1<<(32-(1+DBL_EXP_LENGTH))) - 1 ))); \
+ Dallp2(dbl_valueB) = 0xFFFFFFFF
+
+#define Dbl_setnegativeinfinity(dbl_valueA,dbl_valueB) \
+ Dallp1(dbl_valueA) = ((1<<DBL_EXP_LENGTH) | DBL_INFINITY_EXPONENT) \
+ << (32-(1+DBL_EXP_LENGTH)) ; \
+ Dallp2(dbl_valueB) = 0
+#define Dbl_setlargest(dbl_valueA,dbl_valueB,sign) \
+ Dallp1(dbl_valueA) = ((unsigned int)sign << 31) | \
+ ((DBL_EMAX+DBL_BIAS) << (32-(1+DBL_EXP_LENGTH))) | \
+ ((1 << (32-(1+DBL_EXP_LENGTH))) - 1 ); \
+ Dallp2(dbl_valueB) = 0xFFFFFFFF
+
+
+/* The high bit is always zero so arithmetic or logical shifts will work. */
+#define Dbl_right_align(srcdstA,srcdstB,shift,extent) \
+ if( shift >= 32 ) \
+ { \
+ /* Big shift requires examining the portion shift off \
+ the end to properly set inexact. */ \
+ if(shift < 64) \
+ { \
+ if(shift > 32) \
+ { \
+ Variable_shift_double(Dallp1(srcdstA),Dallp2(srcdstB), \
+ shift-32, Extall(extent)); \
+ if(Dallp2(srcdstB) << 64 - (shift)) Ext_setone_low(extent); \
+ } \
+ else Extall(extent) = Dallp2(srcdstB); \
+ Dallp2(srcdstB) = Dallp1(srcdstA) >> (shift - 32); \
+ } \
+ else \
+ { \
+ Extall(extent) = Dallp1(srcdstA); \
+ if(Dallp2(srcdstB)) Ext_setone_low(extent); \
+ Dallp2(srcdstB) = 0; \
+ } \
+ Dallp1(srcdstA) = 0; \
+ } \
+ else \
+ { \
+ /* Small alignment is simpler. Extension is easily set. */ \
+ if (shift > 0) \
+ { \
+ Extall(extent) = Dallp2(srcdstB) << 32 - (shift); \
+ Variable_shift_double(Dallp1(srcdstA),Dallp2(srcdstB),shift, \
+ Dallp2(srcdstB)); \
+ Dallp1(srcdstA) >>= shift; \
+ } \
+ else Extall(extent) = 0; \
+ }
+
+/*
+ * Here we need to shift the result right to correct for an overshift
+ * (due to the exponent becoming negative) during normalization.
+ */
+#define Dbl_fix_overshift(srcdstA,srcdstB,shift,extent) \
+ Extall(extent) = Dallp2(srcdstB) << 32 - (shift); \
+ Dallp2(srcdstB) = (Dallp1(srcdstA) << 32 - (shift)) | \
+ (Dallp2(srcdstB) >> (shift)); \
+ Dallp1(srcdstA) = Dallp1(srcdstA) >> shift
+
+#define Dbl_hiddenhigh3mantissa(dbl_value) Dhiddenhigh3mantissa(dbl_value)
+#define Dbl_hidden(dbl_value) Dhidden(dbl_value)
+#define Dbl_lowmantissap2(dbl_value) Dlowp2(dbl_value)
+
+/* The left argument is never smaller than the right argument */
+#define Dbl_subtract(lefta,leftb,righta,rightb,resulta,resultb) \
+ if( Dallp2(rightb) > Dallp2(leftb) ) Dallp1(lefta)--; \
+ Dallp2(resultb) = Dallp2(leftb) - Dallp2(rightb); \
+ Dallp1(resulta) = Dallp1(lefta) - Dallp1(righta)
+
+/* Subtract right augmented with extension from left augmented with zeros and
+ * store into result and extension. */
+#define Dbl_subtract_withextension(lefta,leftb,righta,rightb,extent,resulta,resultb) \
+ Dbl_subtract(lefta,leftb,righta,rightb,resulta,resultb); \
+ if( (Extall(extent) = 0-Extall(extent)) ) \
+ { \
+ if((Dallp2(resultb)--) == 0) Dallp1(resulta)--; \
+ }
+
+#define Dbl_addition(lefta,leftb,righta,rightb,resulta,resultb) \
+ /* If the sum of the low words is less than either source, then \
+ * an overflow into the next word occurred. */ \
+ Dallp1(resulta) = Dallp1(lefta) + Dallp1(righta); \
+ if((Dallp2(resultb) = Dallp2(leftb) + Dallp2(rightb)) < Dallp2(rightb)) \
+ Dallp1(resulta)++
+
+#define Dbl_xortointp1(left,right,result) \
+ result = Dallp1(left) XOR Dallp1(right)
+
+#define Dbl_xorfromintp1(left,right,result) \
+ Dallp1(result) = left XOR Dallp1(right)
+
+#define Dbl_swap_lower(left,right) \
+ Dallp2(left) = Dallp2(left) XOR Dallp2(right); \
+ Dallp2(right) = Dallp2(left) XOR Dallp2(right); \
+ Dallp2(left) = Dallp2(left) XOR Dallp2(right)
+
+/* Need to Initialize */
+#define Dbl_makequietnan(desta,destb) \
+ Dallp1(desta) = ((DBL_EMAX+DBL_BIAS)+1)<< (32-(1+DBL_EXP_LENGTH)) \
+ | (1<<(32-(1+DBL_EXP_LENGTH+2))); \
+ Dallp2(destb) = 0
+#define Dbl_makesignalingnan(desta,destb) \
+ Dallp1(desta) = ((DBL_EMAX+DBL_BIAS)+1)<< (32-(1+DBL_EXP_LENGTH)) \
+ | (1<<(32-(1+DBL_EXP_LENGTH+1))); \
+ Dallp2(destb) = 0
+
+#define Dbl_normalize(dbl_opndA,dbl_opndB,exponent) \
+ while(Dbl_iszero_hiddenhigh7mantissa(dbl_opndA)) { \
+ Dbl_leftshiftby8(dbl_opndA,dbl_opndB); \
+ exponent -= 8; \
+ } \
+ if(Dbl_iszero_hiddenhigh3mantissa(dbl_opndA)) { \
+ Dbl_leftshiftby4(dbl_opndA,dbl_opndB); \
+ exponent -= 4; \
+ } \
+ while(Dbl_iszero_hidden(dbl_opndA)) { \
+ Dbl_leftshiftby1(dbl_opndA,dbl_opndB); \
+ exponent -= 1; \
+ }
+
+#define Twoword_add(src1dstA,src1dstB,src2A,src2B) \
+ /* \
+ * want this macro to generate: \
+ * ADD src1dstB,src2B,src1dstB; \
+ * ADDC src1dstA,src2A,src1dstA; \
+ */ \
+ if ((src1dstB) + (src2B) < (src1dstB)) Dallp1(src1dstA)++; \
+ Dallp1(src1dstA) += (src2A); \
+ Dallp2(src1dstB) += (src2B)
+
+#define Twoword_subtract(src1dstA,src1dstB,src2A,src2B) \
+ /* \
+ * want this macro to generate: \
+ * SUB src1dstB,src2B,src1dstB; \
+ * SUBB src1dstA,src2A,src1dstA; \
+ */ \
+ if ((src1dstB) < (src2B)) Dallp1(src1dstA)--; \
+ Dallp1(src1dstA) -= (src2A); \
+ Dallp2(src1dstB) -= (src2B)
+
+#define Dbl_setoverflow(resultA,resultB) \
+ /* set result to infinity or largest number */ \
+ switch (Rounding_mode()) { \
+ case ROUNDPLUS: \
+ if (Dbl_isone_sign(resultA)) { \
+ Dbl_setlargestnegative(resultA,resultB); \
+ } \
+ else { \
+ Dbl_setinfinitypositive(resultA,resultB); \
+ } \
+ break; \
+ case ROUNDMINUS: \
+ if (Dbl_iszero_sign(resultA)) { \
+ Dbl_setlargestpositive(resultA,resultB); \
+ } \
+ else { \
+ Dbl_setinfinitynegative(resultA,resultB); \
+ } \
+ break; \
+ case ROUNDNEAREST: \
+ Dbl_setinfinity_exponentmantissa(resultA,resultB); \
+ break; \
+ case ROUNDZERO: \
+ Dbl_setlargest_exponentmantissa(resultA,resultB); \
+ }
+
+#define Dbl_denormalize(opndp1,opndp2,exponent,guard,sticky,inexact) \
+ Dbl_clear_signexponent_set_hidden(opndp1); \
+ if (exponent >= (1-DBL_P)) { \
+ if (exponent >= -31) { \
+ guard = (Dallp2(opndp2) >> -exponent) & 1; \
+ if (exponent < 0) sticky |= Dallp2(opndp2) << (32+exponent); \
+ if (exponent > -31) { \
+ Variable_shift_double(opndp1,opndp2,1-exponent,opndp2); \
+ Dallp1(opndp1) >>= 1-exponent; \
+ } \
+ else { \
+ Dallp2(opndp2) = Dallp1(opndp1); \
+ Dbl_setzerop1(opndp1); \
+ } \
+ } \
+ else { \
+ guard = (Dallp1(opndp1) >> -32-exponent) & 1; \
+ if (exponent == -32) sticky |= Dallp2(opndp2); \
+ else sticky |= (Dallp2(opndp2) | Dallp1(opndp1) << 64+exponent); \
+ Dallp2(opndp2) = Dallp1(opndp1) >> -31-exponent; \
+ Dbl_setzerop1(opndp1); \
+ } \
+ inexact = guard | sticky; \
+ } \
+ else { \
+ guard = 0; \
+ sticky |= (Dallp1(opndp1) | Dallp2(opndp2)); \
+ Dbl_setzero(opndp1,opndp2); \
+ inexact = sticky; \
+ }
+
+/*
+ * The fused multiply add instructions requires a double extended format,
+ * with 106 bits of mantissa.
+ */
+#define DBLEXT_THRESHOLD 106
+
+#define Dblext_setzero(valA,valB,valC,valD) \
+ Dextallp1(valA) = 0; Dextallp2(valB) = 0; \
+ Dextallp3(valC) = 0; Dextallp4(valD) = 0
+
+
+#define Dblext_isnotzero_mantissap3(valC) (Dextallp3(valC)!=0)
+#define Dblext_isnotzero_mantissap4(valD) (Dextallp3(valD)!=0)
+#define Dblext_isone_lowp2(val) (Dextlowp2(val)!=0)
+#define Dblext_isone_highp3(val) (Dexthighp3(val)!=0)
+#define Dblext_isnotzero_low31p3(val) (Dextlow31p3(val)!=0)
+#define Dblext_iszero(valA,valB,valC,valD) (Dextallp1(valA)==0 && \
+ Dextallp2(valB)==0 && Dextallp3(valC)==0 && Dextallp4(valD)==0)
+
+#define Dblext_copy(srca,srcb,srcc,srcd,desta,destb,destc,destd) \
+ Dextallp1(desta) = Dextallp4(srca); \
+ Dextallp2(destb) = Dextallp4(srcb); \
+ Dextallp3(destc) = Dextallp4(srcc); \
+ Dextallp4(destd) = Dextallp4(srcd)
+
+#define Dblext_swap_lower(leftp2,leftp3,leftp4,rightp2,rightp3,rightp4) \
+ Dextallp2(leftp2) = Dextallp2(leftp2) XOR Dextallp2(rightp2); \
+ Dextallp2(rightp2) = Dextallp2(leftp2) XOR Dextallp2(rightp2); \
+ Dextallp2(leftp2) = Dextallp2(leftp2) XOR Dextallp2(rightp2); \
+ Dextallp3(leftp3) = Dextallp3(leftp3) XOR Dextallp3(rightp3); \
+ Dextallp3(rightp3) = Dextallp3(leftp3) XOR Dextallp3(rightp3); \
+ Dextallp3(leftp3) = Dextallp3(leftp3) XOR Dextallp3(rightp3); \
+ Dextallp4(leftp4) = Dextallp4(leftp4) XOR Dextallp4(rightp4); \
+ Dextallp4(rightp4) = Dextallp4(leftp4) XOR Dextallp4(rightp4); \
+ Dextallp4(leftp4) = Dextallp4(leftp4) XOR Dextallp4(rightp4)
+
+#define Dblext_setone_lowmantissap4(dbl_value) Deposit_dextlowp4(dbl_value,1)
+
+/* The high bit is always zero so arithmetic or logical shifts will work. */
+#define Dblext_right_align(srcdstA,srcdstB,srcdstC,srcdstD,shift) \
+ {int shiftamt, sticky; \
+ shiftamt = shift % 32; \
+ sticky = 0; \
+ switch (shift/32) { \
+ case 0: if (shiftamt > 0) { \
+ sticky = Dextallp4(srcdstD) << 32 - (shiftamt); \
+ Variable_shift_double(Dextallp3(srcdstC), \
+ Dextallp4(srcdstD),shiftamt,Dextallp4(srcdstD)); \
+ Variable_shift_double(Dextallp2(srcdstB), \
+ Dextallp3(srcdstC),shiftamt,Dextallp3(srcdstC)); \
+ Variable_shift_double(Dextallp1(srcdstA), \
+ Dextallp2(srcdstB),shiftamt,Dextallp2(srcdstB)); \
+ Dextallp1(srcdstA) >>= shiftamt; \
+ } \
+ break; \
+ case 1: if (shiftamt > 0) { \
+ sticky = (Dextallp3(srcdstC) << 31 - shiftamt) | \
+ Dextallp4(srcdstD); \
+ Variable_shift_double(Dextallp2(srcdstB), \
+ Dextallp3(srcdstC),shiftamt,Dextallp4(srcdstD)); \
+ Variable_shift_double(Dextallp1(srcdstA), \
+ Dextallp2(srcdstB),shiftamt,Dextallp3(srcdstC)); \
+ } \
+ else { \
+ sticky = Dextallp4(srcdstD); \
+ Dextallp4(srcdstD) = Dextallp3(srcdstC); \
+ Dextallp3(srcdstC) = Dextallp2(srcdstB); \
+ } \
+ Dextallp2(srcdstB) = Dextallp1(srcdstA) >> shiftamt; \
+ Dextallp1(srcdstA) = 0; \
+ break; \
+ case 2: if (shiftamt > 0) { \
+ sticky = (Dextallp2(srcdstB) << 31 - shiftamt) | \
+ Dextallp3(srcdstC) | Dextallp4(srcdstD); \
+ Variable_shift_double(Dextallp1(srcdstA), \
+ Dextallp2(srcdstB),shiftamt,Dextallp4(srcdstD)); \
+ } \
+ else { \
+ sticky = Dextallp3(srcdstC) | Dextallp4(srcdstD); \
+ Dextallp4(srcdstD) = Dextallp2(srcdstB); \
+ } \
+ Dextallp3(srcdstC) = Dextallp1(srcdstA) >> shiftamt; \
+ Dextallp1(srcdstA) = Dextallp2(srcdstB) = 0; \
+ break; \
+ case 3: if (shiftamt > 0) { \
+ sticky = (Dextallp1(srcdstA) << 31 - shiftamt) | \
+ Dextallp2(srcdstB) | Dextallp3(srcdstC) | \
+ Dextallp4(srcdstD); \
+ } \
+ else { \
+ sticky = Dextallp2(srcdstB) | Dextallp3(srcdstC) | \
+ Dextallp4(srcdstD); \
+ } \
+ Dextallp4(srcdstD) = Dextallp1(srcdstA) >> shiftamt; \
+ Dextallp1(srcdstA) = Dextallp2(srcdstB) = 0; \
+ Dextallp3(srcdstC) = 0; \
+ break; \
+ } \
+ if (sticky) Dblext_setone_lowmantissap4(srcdstD); \
+ }
+
+/* The left argument is never smaller than the right argument */
+#define Dblext_subtract(lefta,leftb,leftc,leftd,righta,rightb,rightc,rightd,resulta,resultb,resultc,resultd) \
+ if( Dextallp4(rightd) > Dextallp4(leftd) ) \
+ if( (Dextallp3(leftc)--) == 0) \
+ if( (Dextallp2(leftb)--) == 0) Dextallp1(lefta)--; \
+ Dextallp4(resultd) = Dextallp4(leftd) - Dextallp4(rightd); \
+ if( Dextallp3(rightc) > Dextallp3(leftc) ) \
+ if( (Dextallp2(leftb)--) == 0) Dextallp1(lefta)--; \
+ Dextallp3(resultc) = Dextallp3(leftc) - Dextallp3(rightc); \
+ if( Dextallp2(rightb) > Dextallp2(leftb) ) Dextallp1(lefta)--; \
+ Dextallp2(resultb) = Dextallp2(leftb) - Dextallp2(rightb); \
+ Dextallp1(resulta) = Dextallp1(lefta) - Dextallp1(righta)
+
+#define Dblext_addition(lefta,leftb,leftc,leftd,righta,rightb,rightc,rightd,resulta,resultb,resultc,resultd) \
+ /* If the sum of the low words is less than either source, then \
+ * an overflow into the next word occurred. */ \
+ if ((Dextallp4(resultd) = Dextallp4(leftd)+Dextallp4(rightd)) < \
+ Dextallp4(rightd)) \
+ if((Dextallp3(resultc) = Dextallp3(leftc)+Dextallp3(rightc)+1) <= \
+ Dextallp3(rightc)) \
+ if((Dextallp2(resultb) = Dextallp2(leftb)+Dextallp2(rightb)+1) \
+ <= Dextallp2(rightb)) \
+ Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta)+1; \
+ else Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta); \
+ else \
+ if ((Dextallp2(resultb) = Dextallp2(leftb)+Dextallp2(rightb)) < \
+ Dextallp2(rightb)) \
+ Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta)+1; \
+ else Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta); \
+ else \
+ if ((Dextallp3(resultc) = Dextallp3(leftc)+Dextallp3(rightc)) < \
+ Dextallp3(rightc)) \
+ if ((Dextallp2(resultb) = Dextallp2(leftb)+Dextallp2(rightb)+1) \
+ <= Dextallp2(rightb)) \
+ Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta)+1; \
+ else Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta); \
+ else \
+ if ((Dextallp2(resultb) = Dextallp2(leftb)+Dextallp2(rightb)) < \
+ Dextallp2(rightb)) \
+ Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta)+1; \
+ else Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta)
+
+
+#define Dblext_arithrightshiftby1(srcdstA,srcdstB,srcdstC,srcdstD) \
+ Shiftdouble(Dextallp3(srcdstC),Dextallp4(srcdstD),1,Dextallp4(srcdstD)); \
+ Shiftdouble(Dextallp2(srcdstB),Dextallp3(srcdstC),1,Dextallp3(srcdstC)); \
+ Shiftdouble(Dextallp1(srcdstA),Dextallp2(srcdstB),1,Dextallp2(srcdstB)); \
+ Dextallp1(srcdstA) = (int)Dextallp1(srcdstA) >> 1
+
+#define Dblext_leftshiftby8(valA,valB,valC,valD) \
+ Shiftdouble(Dextallp1(valA),Dextallp2(valB),24,Dextallp1(valA)); \
+ Shiftdouble(Dextallp2(valB),Dextallp3(valC),24,Dextallp2(valB)); \
+ Shiftdouble(Dextallp3(valC),Dextallp4(valD),24,Dextallp3(valC)); \
+ Dextallp4(valD) <<= 8
+#define Dblext_leftshiftby4(valA,valB,valC,valD) \
+ Shiftdouble(Dextallp1(valA),Dextallp2(valB),28,Dextallp1(valA)); \
+ Shiftdouble(Dextallp2(valB),Dextallp3(valC),28,Dextallp2(valB)); \
+ Shiftdouble(Dextallp3(valC),Dextallp4(valD),28,Dextallp3(valC)); \
+ Dextallp4(valD) <<= 4
+#define Dblext_leftshiftby3(valA,valB,valC,valD) \
+ Shiftdouble(Dextallp1(valA),Dextallp2(valB),29,Dextallp1(valA)); \
+ Shiftdouble(Dextallp2(valB),Dextallp3(valC),29,Dextallp2(valB)); \
+ Shiftdouble(Dextallp3(valC),Dextallp4(valD),29,Dextallp3(valC)); \
+ Dextallp4(valD) <<= 3
+#define Dblext_leftshiftby2(valA,valB,valC,valD) \
+ Shiftdouble(Dextallp1(valA),Dextallp2(valB),30,Dextallp1(valA)); \
+ Shiftdouble(Dextallp2(valB),Dextallp3(valC),30,Dextallp2(valB)); \
+ Shiftdouble(Dextallp3(valC),Dextallp4(valD),30,Dextallp3(valC)); \
+ Dextallp4(valD) <<= 2
+#define Dblext_leftshiftby1(valA,valB,valC,valD) \
+ Shiftdouble(Dextallp1(valA),Dextallp2(valB),31,Dextallp1(valA)); \
+ Shiftdouble(Dextallp2(valB),Dextallp3(valC),31,Dextallp2(valB)); \
+ Shiftdouble(Dextallp3(valC),Dextallp4(valD),31,Dextallp3(valC)); \
+ Dextallp4(valD) <<= 1
+
+#define Dblext_rightshiftby4(valueA,valueB,valueC,valueD) \
+ Shiftdouble(Dextallp3(valueC),Dextallp4(valueD),4,Dextallp4(valueD)); \
+ Shiftdouble(Dextallp2(valueB),Dextallp3(valueC),4,Dextallp3(valueC)); \
+ Shiftdouble(Dextallp1(valueA),Dextallp2(valueB),4,Dextallp2(valueB)); \
+ Dextallp1(valueA) >>= 4
+#define Dblext_rightshiftby1(valueA,valueB,valueC,valueD) \
+ Shiftdouble(Dextallp3(valueC),Dextallp4(valueD),1,Dextallp4(valueD)); \
+ Shiftdouble(Dextallp2(valueB),Dextallp3(valueC),1,Dextallp3(valueC)); \
+ Shiftdouble(Dextallp1(valueA),Dextallp2(valueB),1,Dextallp2(valueB)); \
+ Dextallp1(valueA) >>= 1
+
+#define Dblext_xortointp1(left,right,result) Dbl_xortointp1(left,right,result)
+
+#define Dblext_xorfromintp1(left,right,result) \
+ Dbl_xorfromintp1(left,right,result)
+
+#define Dblext_copytoint_exponentmantissap1(src,dest) \
+ Dbl_copytoint_exponentmantissap1(src,dest)
+
+#define Dblext_ismagnitudeless(leftB,rightB,signlessleft,signlessright) \
+ Dbl_ismagnitudeless(leftB,rightB,signlessleft,signlessright)
+
+#define Dbl_copyto_dblext(src1,src2,dest1,dest2,dest3,dest4) \
+ Dextallp1(dest1) = Dallp1(src1); Dextallp2(dest2) = Dallp2(src2); \
+ Dextallp3(dest3) = 0; Dextallp4(dest4) = 0
+
+#define Dblext_set_sign(dbl_value,sign) Dbl_set_sign(dbl_value,sign)
+#define Dblext_clear_signexponent_set_hidden(srcdst) \
+ Dbl_clear_signexponent_set_hidden(srcdst)
+#define Dblext_clear_signexponent(srcdst) Dbl_clear_signexponent(srcdst)
+#define Dblext_clear_sign(srcdst) Dbl_clear_sign(srcdst)
+#define Dblext_isone_hidden(dbl_value) Dbl_isone_hidden(dbl_value)
+
+/*
+ * The Fourword_add() macro assumes that integers are 4 bytes in size.
+ * It will break if this is not the case.
+ */
+
+#define Fourword_add(src1dstA,src1dstB,src1dstC,src1dstD,src2A,src2B,src2C,src2D) \
+ /* \
+ * want this macro to generate: \
+ * ADD src1dstD,src2D,src1dstD; \
+ * ADDC src1dstC,src2C,src1dstC; \
+ * ADDC src1dstB,src2B,src1dstB; \
+ * ADDC src1dstA,src2A,src1dstA; \
+ */ \
+ if ((unsigned int)(src1dstD += (src2D)) < (unsigned int)(src2D)) { \
+ if ((unsigned int)(src1dstC += (src2C) + 1) <= \
+ (unsigned int)(src2C)) { \
+ if ((unsigned int)(src1dstB += (src2B) + 1) <= \
+ (unsigned int)(src2B)) src1dstA++; \
+ } \
+ else if ((unsigned int)(src1dstB += (src2B)) < \
+ (unsigned int)(src2B)) src1dstA++; \
+ } \
+ else { \
+ if ((unsigned int)(src1dstC += (src2C)) < \
+ (unsigned int)(src2C)) { \
+ if ((unsigned int)(src1dstB += (src2B) + 1) <= \
+ (unsigned int)(src2B)) src1dstA++; \
+ } \
+ else if ((unsigned int)(src1dstB += (src2B)) < \
+ (unsigned int)(src2B)) src1dstA++; \
+ } \
+ src1dstA += (src2A)
+
+#define Dblext_denormalize(opndp1,opndp2,opndp3,opndp4,exponent,is_tiny) \
+ {int shiftamt, sticky; \
+ is_tiny = TRUE; \
+ if (exponent == 0 && (Dextallp3(opndp3) || Dextallp4(opndp4))) { \
+ switch (Rounding_mode()) { \
+ case ROUNDPLUS: \
+ if (Dbl_iszero_sign(opndp1)) { \
+ Dbl_increment(opndp1,opndp2); \
+ if (Dbl_isone_hiddenoverflow(opndp1)) \
+ is_tiny = FALSE; \
+ Dbl_decrement(opndp1,opndp2); \
+ } \
+ break; \
+ case ROUNDMINUS: \
+ if (Dbl_isone_sign(opndp1)) { \
+ Dbl_increment(opndp1,opndp2); \
+ if (Dbl_isone_hiddenoverflow(opndp1)) \
+ is_tiny = FALSE; \
+ Dbl_decrement(opndp1,opndp2); \
+ } \
+ break; \
+ case ROUNDNEAREST: \
+ if (Dblext_isone_highp3(opndp3) && \
+ (Dblext_isone_lowp2(opndp2) || \
+ Dblext_isnotzero_low31p3(opndp3))) { \
+ Dbl_increment(opndp1,opndp2); \
+ if (Dbl_isone_hiddenoverflow(opndp1)) \
+ is_tiny = FALSE; \
+ Dbl_decrement(opndp1,opndp2); \
+ } \
+ break; \
+ } \
+ } \
+ Dblext_clear_signexponent_set_hidden(opndp1); \
+ if (exponent >= (1-QUAD_P)) { \
+ shiftamt = (1-exponent) % 32; \
+ switch((1-exponent)/32) { \
+ case 0: sticky = Dextallp4(opndp4) << 32-(shiftamt); \
+ Variableshiftdouble(opndp3,opndp4,shiftamt,opndp4); \
+ Variableshiftdouble(opndp2,opndp3,shiftamt,opndp3); \
+ Variableshiftdouble(opndp1,opndp2,shiftamt,opndp2); \
+ Dextallp1(opndp1) >>= shiftamt; \
+ break; \
+ case 1: sticky = (Dextallp3(opndp3) << 32-(shiftamt)) | \
+ Dextallp4(opndp4); \
+ Variableshiftdouble(opndp2,opndp3,shiftamt,opndp4); \
+ Variableshiftdouble(opndp1,opndp2,shiftamt,opndp3); \
+ Dextallp2(opndp2) = Dextallp1(opndp1) >> shiftamt; \
+ Dextallp1(opndp1) = 0; \
+ break; \
+ case 2: sticky = (Dextallp2(opndp2) << 32-(shiftamt)) | \
+ Dextallp3(opndp3) | Dextallp4(opndp4); \
+ Variableshiftdouble(opndp1,opndp2,shiftamt,opndp4); \
+ Dextallp3(opndp3) = Dextallp1(opndp1) >> shiftamt; \
+ Dextallp1(opndp1) = Dextallp2(opndp2) = 0; \
+ break; \
+ case 3: sticky = (Dextallp1(opndp1) << 32-(shiftamt)) | \
+ Dextallp2(opndp2) | Dextallp3(opndp3) | \
+ Dextallp4(opndp4); \
+ Dextallp4(opndp4) = Dextallp1(opndp1) >> shiftamt; \
+ Dextallp1(opndp1) = Dextallp2(opndp2) = 0; \
+ Dextallp3(opndp3) = 0; \
+ break; \
+ } \
+ } \
+ else { \
+ sticky = Dextallp1(opndp1) | Dextallp2(opndp2) | \
+ Dextallp3(opndp3) | Dextallp4(opndp4); \
+ Dblext_setzero(opndp1,opndp2,opndp3,opndp4); \
+ } \
+ if (sticky) Dblext_setone_lowmantissap4(opndp4); \
+ exponent = 0; \
+ }
diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c
new file mode 100644
index 00000000..27a7492d
--- /dev/null
+++ b/arch/parisc/math-emu/decode_exc.c
@@ -0,0 +1,370 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/fp/decode_exc.c $ Revision: $
+ *
+ * Purpose:
+ * <<please update with a synopsis of the functionality provided by this file>>
+ *
+ * External Interfaces:
+ * <<the following list was autogenerated, please review>>
+ * decode_fpu(Fpu_register, trap_counts)
+ *
+ * Internal Interfaces:
+ * <<please update>>
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+#include <linux/kernel.h>
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+/* #include "types.h" */
+#include <asm/signal.h>
+#include <asm/siginfo.h>
+/* #include <machine/sys/mdep_private.h> */
+
+#undef Fpustatus_register
+#define Fpustatus_register Fpu_register[0]
+
+/* General definitions */
+#define DOESTRAP 1
+#define NOTRAP 0
+#define SIGNALCODE(signal, code) ((signal) << 24 | (code));
+#define copropbit 1<<31-2 /* bit position 2 */
+#define opclass 9 /* bits 21 & 22 */
+#define fmt 11 /* bits 19 & 20 */
+#define df 13 /* bits 17 & 18 */
+#define twobits 3 /* mask low-order 2 bits */
+#define fivebits 31 /* mask low-order 5 bits */
+#define MAX_EXCP_REG 7 /* number of excpeption registers to check */
+
+/* Exception register definitions */
+#define Excp_type(index) Exceptiontype(Fpu_register[index])
+#define Excp_instr(index) Instructionfield(Fpu_register[index])
+#define Clear_excp_register(index) Allexception(Fpu_register[index]) = 0
+#define Excp_format() \
+ (current_ir >> ((current_ir>>opclass & twobits)==1 ? df : fmt) & twobits)
+
+/* Miscellaneous definitions */
+#define Fpu_sgl(index) Fpu_register[index*2]
+
+#define Fpu_dblp1(index) Fpu_register[index*2]
+#define Fpu_dblp2(index) Fpu_register[(index*2)+1]
+
+#define Fpu_quadp1(index) Fpu_register[index*2]
+#define Fpu_quadp2(index) Fpu_register[(index*2)+1]
+#define Fpu_quadp3(index) Fpu_register[(index*2)+2]
+#define Fpu_quadp4(index) Fpu_register[(index*2)+3]
+
+/* Single precision floating-point definitions */
+#ifndef Sgl_decrement
+# define Sgl_decrement(sgl_value) Sall(sgl_value)--
+#endif
+
+/* Double precision floating-point definitions */
+#ifndef Dbl_decrement
+# define Dbl_decrement(dbl_valuep1,dbl_valuep2) \
+ if ((Dallp2(dbl_valuep2)--) == 0) Dallp1(dbl_valuep1)--
+#endif
+
+
+#define update_trap_counts(Fpu_register, aflags, bflags, trap_counts) { \
+ aflags=(Fpu_register[0])>>27; /* assumes zero fill. 32 bit */ \
+ Fpu_register[0] |= bflags; \
+}
+
+u_int
+decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
+{
+ unsigned int current_ir, excp;
+ int target, exception_index = 1;
+ boolean inexact;
+ unsigned int aflags;
+ unsigned int bflags;
+ unsigned int excptype;
+
+
+ /* Keep stats on how many floating point exceptions (based on type)
+ * that happen. Want to keep this overhead low, but still provide
+ * some information to the customer. All exits from this routine
+ * need to restore Fpu_register[0]
+ */
+
+ bflags=(Fpu_register[0] & 0xf8000000);
+ Fpu_register[0] &= 0x07ffffff;
+
+ /* exception_index is used to index the exception register queue. It
+ * always points at the last register that contains a valid exception. A
+ * zero value implies no exceptions (also the initialized value). Setting
+ * the T-bit resets the exception_index to zero.
+ */
+
+ /*
+ * Check for reserved-op exception. A reserved-op exception does not
+ * set any exception registers nor does it set the T-bit. If the T-bit
+ * is not set then a reserved-op exception occurred.
+ *
+ * At some point, we may want to report reserved op exceptions as
+ * illegal instructions.
+ */
+
+ if (!Is_tbit_set()) {
+ update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+ return SIGNALCODE(SIGILL, ILL_COPROC);
+ }
+
+ /*
+ * Is a coprocessor op.
+ *
+ * Now we need to determine what type of exception occurred.
+ */
+ for (exception_index=1; exception_index<=MAX_EXCP_REG; exception_index++) {
+ current_ir = Excp_instr(exception_index);
+ /*
+ * On PA89: there are 5 different unimplemented exception
+ * codes: 0x1, 0x9, 0xb, 0x3, and 0x23. PA-RISC 2.0 adds
+ * another, 0x2b. Only these have the low order bit set.
+ */
+ excptype = Excp_type(exception_index);
+ if (excptype & UNIMPLEMENTEDEXCEPTION) {
+ /*
+ * Clear T-bit and exception register so that
+ * we can tell if a trap really occurs while
+ * emulating the instruction.
+ */
+ Clear_tbit();
+ Clear_excp_register(exception_index);
+ /*
+ * Now emulate this instruction. If a trap occurs,
+ * fpudispatch will return a non-zero number
+ */
+ excp = fpudispatch(current_ir,excptype,0,Fpu_register);
+ /* accumulate the status flags, don't lose them as in hpux */
+ if (excp) {
+ /*
+ * We now need to make sure that the T-bit and the
+ * exception register contain the correct values
+ * before continuing.
+ */
+ /*
+ * Set t-bit since it might still be needed for a
+ * subsequent real trap (I don't understand fully -PB)
+ */
+ Set_tbit();
+ /* some of the following code uses
+ * Excp_type(exception_index) so fix that up */
+ Set_exceptiontype_and_instr_field(excp,current_ir,
+ Fpu_register[exception_index]);
+ if (excp == UNIMPLEMENTEDEXCEPTION) {
+ /*
+ * it is really unimplemented, so restore the
+ * TIMEX extended unimplemented exception code
+ */
+ excp = excptype;
+ update_trap_counts(Fpu_register, aflags, bflags,
+ trap_counts);
+ return SIGNALCODE(SIGILL, ILL_COPROC);
+ }
+ /* some of the following code uses excptype, so
+ * fix that up too */
+ excptype = excp;
+ }
+ /* handle exceptions other than the real UNIMPLIMENTED the
+ * same way as if the hardware had caused them */
+ if (excp == NOEXCEPTION)
+ /* For now use 'break', should technically be 'continue' */
+ break;
+ }
+
+ /*
+ * In PA89, the underflow exception has been extended to encode
+ * additional information. The exception looks like pp01x0,
+ * where x is 1 if inexact and pp represent the inexact bit (I)
+ * and the round away bit (RA)
+ */
+ if (excptype & UNDERFLOWEXCEPTION) {
+ /* check for underflow trap enabled */
+ if (Is_underflowtrap_enabled()) {
+ update_trap_counts(Fpu_register, aflags, bflags,
+ trap_counts);
+ return SIGNALCODE(SIGFPE, FPE_FLTUND);
+ } else {
+ /*
+ * Isn't a real trap; we need to
+ * return the default value.
+ */
+ target = current_ir & fivebits;
+#ifndef lint
+ if (Ibit(Fpu_register[exception_index])) inexact = TRUE;
+ else inexact = FALSE;
+#endif
+ switch (Excp_format()) {
+ case SGL:
+ /*
+ * If ra (round-away) is set, will
+ * want to undo the rounding done
+ * by the hardware.
+ */
+ if (Rabit(Fpu_register[exception_index]))
+ Sgl_decrement(Fpu_sgl(target));
+
+ /* now denormalize */
+ sgl_denormalize(&Fpu_sgl(target),&inexact,Rounding_mode());
+ break;
+ case DBL:
+ /*
+ * If ra (round-away) is set, will
+ * want to undo the rounding done
+ * by the hardware.
+ */
+ if (Rabit(Fpu_register[exception_index]))
+ Dbl_decrement(Fpu_dblp1(target),Fpu_dblp2(target));
+
+ /* now denormalize */
+ dbl_denormalize(&Fpu_dblp1(target),&Fpu_dblp2(target),
+ &inexact,Rounding_mode());
+ break;
+ }
+ if (inexact) Set_underflowflag();
+ /*
+ * Underflow can generate an inexact
+ * exception. If inexact trap is enabled,
+ * want to do an inexact trap, otherwise
+ * set inexact flag.
+ */
+ if (inexact && Is_inexacttrap_enabled()) {
+ /*
+ * Set exception field of exception register
+ * to inexact, parm field to zero.
+ * Underflow bit should be cleared.
+ */
+ Set_exceptiontype(Fpu_register[exception_index],
+ INEXACTEXCEPTION);
+ Set_parmfield(Fpu_register[exception_index],0);
+ update_trap_counts(Fpu_register, aflags, bflags,
+ trap_counts);
+ return SIGNALCODE(SIGFPE, FPE_FLTRES);
+ }
+ else {
+ /*
+ * Exception register needs to be cleared.
+ * Inexact flag needs to be set if inexact.
+ */
+ Clear_excp_register(exception_index);
+ if (inexact) Set_inexactflag();
+ }
+ }
+ continue;
+ }
+ switch(Excp_type(exception_index)) {
+ case OVERFLOWEXCEPTION:
+ case OVERFLOWEXCEPTION | INEXACTEXCEPTION:
+ /* check for overflow trap enabled */
+ update_trap_counts(Fpu_register, aflags, bflags,
+ trap_counts);
+ if (Is_overflowtrap_enabled()) {
+ update_trap_counts(Fpu_register, aflags, bflags,
+ trap_counts);
+ return SIGNALCODE(SIGFPE, FPE_FLTOVF);
+ } else {
+ /*
+ * Isn't a real trap; we need to
+ * return the default value.
+ */
+ target = current_ir & fivebits;
+ switch (Excp_format()) {
+ case SGL:
+ Sgl_setoverflow(Fpu_sgl(target));
+ break;
+ case DBL:
+ Dbl_setoverflow(Fpu_dblp1(target),Fpu_dblp2(target));
+ break;
+ }
+ Set_overflowflag();
+ /*
+ * Overflow always generates an inexact
+ * exception. If inexact trap is enabled,
+ * want to do an inexact trap, otherwise
+ * set inexact flag.
+ */
+ if (Is_inexacttrap_enabled()) {
+ /*
+ * Set exception field of exception
+ * register to inexact. Overflow
+ * bit should be cleared.
+ */
+ Set_exceptiontype(Fpu_register[exception_index],
+ INEXACTEXCEPTION);
+ update_trap_counts(Fpu_register, aflags, bflags,
+ trap_counts);
+ return SIGNALCODE(SIGFPE, FPE_FLTRES);
+ }
+ else {
+ /*
+ * Exception register needs to be cleared.
+ * Inexact flag needs to be set.
+ */
+ Clear_excp_register(exception_index);
+ Set_inexactflag();
+ }
+ }
+ break;
+ case INVALIDEXCEPTION:
+ case OPC_2E_INVALIDEXCEPTION:
+ update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+ return SIGNALCODE(SIGFPE, FPE_FLTINV);
+ case DIVISIONBYZEROEXCEPTION:
+ update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+ Clear_excp_register(exception_index);
+ return SIGNALCODE(SIGFPE, FPE_FLTDIV);
+ case INEXACTEXCEPTION:
+ update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+ return SIGNALCODE(SIGFPE, FPE_FLTRES);
+ default:
+ update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+ printk("%s(%d) Unknown FPU exception 0x%x\n", __FILE__,
+ __LINE__, Excp_type(exception_index));
+ return SIGNALCODE(SIGILL, ILL_COPROC);
+ case NOEXCEPTION: /* no exception */
+ /*
+ * Clear exception register in case
+ * other fields are non-zero.
+ */
+ Clear_excp_register(exception_index);
+ break;
+ }
+ }
+ /*
+ * No real exceptions occurred.
+ */
+ Clear_tbit();
+ update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+ return(NOTRAP);
+}
diff --git a/arch/parisc/math-emu/denormal.c b/arch/parisc/math-emu/denormal.c
new file mode 100644
index 00000000..60687e13
--- /dev/null
+++ b/arch/parisc/math-emu/denormal.c
@@ -0,0 +1,135 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/fp/denormal.c $ Revision: $
+ *
+ * Purpose:
+ * <<please update with a synopsis of the functionality provided by this file>>
+ *
+ * External Interfaces:
+ * <<the following list was autogenerated, please review>>
+ * dbl_denormalize(dbl_opndp1,dbl_opndp2,inexactflag,rmode)
+ * sgl_denormalize(sgl_opnd,inexactflag,rmode)
+ *
+ * Internal Interfaces:
+ * <<please update>>
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "hppa.h"
+#include <linux/kernel.h>
+/* #include <machine/sys/mdep_private.h> */
+
+#undef Fpustatus_register
+#define Fpustatus_register Fpu_register[0]
+
+void
+sgl_denormalize(unsigned int *sgl_opnd, boolean *inexactflag, int rmode)
+{
+ unsigned int opnd;
+ int sign, exponent;
+ boolean guardbit = FALSE, stickybit, inexact;
+
+ opnd = *sgl_opnd;
+ stickybit = *inexactflag;
+ exponent = Sgl_exponent(opnd) - SGL_WRAP;
+ sign = Sgl_sign(opnd);
+ Sgl_denormalize(opnd,exponent,guardbit,stickybit,inexact);
+ if (inexact) {
+ switch (rmode) {
+ case ROUNDPLUS:
+ if (sign == 0) {
+ Sgl_increment(opnd);
+ }
+ break;
+ case ROUNDMINUS:
+ if (sign != 0) {
+ Sgl_increment(opnd);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (guardbit && (stickybit ||
+ Sgl_isone_lowmantissa(opnd))) {
+ Sgl_increment(opnd);
+ }
+ break;
+ }
+ }
+ Sgl_set_sign(opnd,sign);
+ *sgl_opnd = opnd;
+ *inexactflag = inexact;
+ return;
+}
+
+void
+dbl_denormalize(unsigned int *dbl_opndp1,
+ unsigned int * dbl_opndp2,
+ boolean *inexactflag,
+ int rmode)
+{
+ unsigned int opndp1, opndp2;
+ int sign, exponent;
+ boolean guardbit = FALSE, stickybit, inexact;
+
+ opndp1 = *dbl_opndp1;
+ opndp2 = *dbl_opndp2;
+ stickybit = *inexactflag;
+ exponent = Dbl_exponent(opndp1) - DBL_WRAP;
+ sign = Dbl_sign(opndp1);
+ Dbl_denormalize(opndp1,opndp2,exponent,guardbit,stickybit,inexact);
+ if (inexact) {
+ switch (rmode) {
+ case ROUNDPLUS:
+ if (sign == 0) {
+ Dbl_increment(opndp1,opndp2);
+ }
+ break;
+ case ROUNDMINUS:
+ if (sign != 0) {
+ Dbl_increment(opndp1,opndp2);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (guardbit && (stickybit ||
+ Dbl_isone_lowmantissap2(opndp2))) {
+ Dbl_increment(opndp1,opndp2);
+ }
+ break;
+ }
+ }
+ Dbl_set_sign(opndp1,sign);
+ *dbl_opndp1 = opndp1;
+ *dbl_opndp2 = opndp2;
+ *inexactflag = inexact;
+ return;
+}
diff --git a/arch/parisc/math-emu/dfadd.c b/arch/parisc/math-emu/dfadd.c
new file mode 100644
index 00000000..d37e2d2c
--- /dev/null
+++ b/arch/parisc/math-emu/dfadd.c
@@ -0,0 +1,524 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/dfadd.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Double_add: add two double precision values.
+ *
+ * External Interfaces:
+ * dbl_fadd(leftptr, rightptr, dstptr, status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "dbl_float.h"
+
+/*
+ * Double_add: add two double precision values.
+ */
+dbl_fadd(
+ dbl_floating_point *leftptr,
+ dbl_floating_point *rightptr,
+ dbl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register unsigned int signless_upper_left, signless_upper_right, save;
+ register unsigned int leftp1, leftp2, rightp1, rightp2, extent;
+ register unsigned int resultp1 = 0, resultp2 = 0;
+
+ register int result_exponent, right_exponent, diff_exponent;
+ register int sign_save, jumpsize;
+ register boolean inexact = FALSE;
+ register boolean underflowtrap;
+
+ /* Create local copies of the numbers */
+ Dbl_copyfromptr(leftptr,leftp1,leftp2);
+ Dbl_copyfromptr(rightptr,rightp1,rightp2);
+
+ /* A zero "save" helps discover equal operands (for later), *
+ * and is used in swapping operands (if needed). */
+ Dbl_xortointp1(leftp1,rightp1,/*to*/save);
+
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if ((result_exponent = Dbl_exponent(leftp1)) == DBL_INFINITY_EXPONENT)
+ {
+ if (Dbl_iszero_mantissa(leftp1,leftp2))
+ {
+ if (Dbl_isnotnan(rightp1,rightp2))
+ {
+ if (Dbl_isinfinity(rightp1,rightp2) && save!=0)
+ {
+ /*
+ * invalid since operands are opposite signed infinity's
+ */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return infinity
+ */
+ Dbl_copytoptr(leftp1,leftp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else
+ {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(leftp1))
+ {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(leftp1);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Dbl_is_signalingnan(rightp1))
+ {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(rightp1);
+ Dbl_copytoptr(rightp1,rightp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(leftp1,leftp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ } /* End left NaN or Infinity processing */
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if (Dbl_isinfinity_exponent(rightp1))
+ {
+ if (Dbl_iszero_mantissa(rightp1,rightp2))
+ {
+ /* return infinity */
+ Dbl_copytoptr(rightp1,rightp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(rightp1))
+ {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(rightp1);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(rightp1,rightp2,dstptr);
+ return(NOEXCEPTION);
+ } /* End right NaN or Infinity processing */
+
+ /* Invariant: Must be dealing with finite numbers */
+
+ /* Compare operands by removing the sign */
+ Dbl_copytoint_exponentmantissap1(leftp1,signless_upper_left);
+ Dbl_copytoint_exponentmantissap1(rightp1,signless_upper_right);
+
+ /* sign difference selects add or sub operation. */
+ if(Dbl_ismagnitudeless(leftp2,rightp2,signless_upper_left,signless_upper_right))
+ {
+ /* Set the left operand to the larger one by XOR swap *
+ * First finish the first word using "save" */
+ Dbl_xorfromintp1(save,rightp1,/*to*/rightp1);
+ Dbl_xorfromintp1(save,leftp1,/*to*/leftp1);
+ Dbl_swap_lower(leftp2,rightp2);
+ result_exponent = Dbl_exponent(leftp1);
+ }
+ /* Invariant: left is not smaller than right. */
+
+ if((right_exponent = Dbl_exponent(rightp1)) == 0)
+ {
+ /* Denormalized operands. First look for zeroes */
+ if(Dbl_iszero_mantissa(rightp1,rightp2))
+ {
+ /* right is zero */
+ if(Dbl_iszero_exponentmantissa(leftp1,leftp2))
+ {
+ /* Both operands are zeros */
+ if(Is_rounding_mode(ROUNDMINUS))
+ {
+ Dbl_or_signs(leftp1,/*with*/rightp1);
+ }
+ else
+ {
+ Dbl_and_signs(leftp1,/*with*/rightp1);
+ }
+ }
+ else
+ {
+ /* Left is not a zero and must be the result. Trapped
+ * underflows are signaled if left is denormalized. Result
+ * is always exact. */
+ if( (result_exponent == 0) && Is_underflowtrap_enabled() )
+ {
+ /* need to normalize results mantissa */
+ sign_save = Dbl_signextendedsign(leftp1);
+ Dbl_leftshiftby1(leftp1,leftp2);
+ Dbl_normalize(leftp1,leftp2,result_exponent);
+ Dbl_set_sign(leftp1,/*using*/sign_save);
+ Dbl_setwrapped_exponent(leftp1,result_exponent,unfl);
+ Dbl_copytoptr(leftp1,leftp2,dstptr);
+ /* inexact = FALSE */
+ return(UNDERFLOWEXCEPTION);
+ }
+ }
+ Dbl_copytoptr(leftp1,leftp2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /* Neither are zeroes */
+ Dbl_clear_sign(rightp1); /* Exponent is already cleared */
+ if(result_exponent == 0 )
+ {
+ /* Both operands are denormalized. The result must be exact
+ * and is simply calculated. A sum could become normalized and a
+ * difference could cancel to a true zero. */
+ if( (/*signed*/int) save < 0 )
+ {
+ Dbl_subtract(leftp1,leftp2,/*minus*/rightp1,rightp2,
+ /*into*/resultp1,resultp2);
+ if(Dbl_iszero_mantissa(resultp1,resultp2))
+ {
+ if(Is_rounding_mode(ROUNDMINUS))
+ {
+ Dbl_setone_sign(resultp1);
+ }
+ else
+ {
+ Dbl_setzero_sign(resultp1);
+ }
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else
+ {
+ Dbl_addition(leftp1,leftp2,rightp1,rightp2,
+ /*into*/resultp1,resultp2);
+ if(Dbl_isone_hidden(resultp1))
+ {
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ if(Is_underflowtrap_enabled())
+ {
+ /* need to normalize result */
+ sign_save = Dbl_signextendedsign(resultp1);
+ Dbl_leftshiftby1(resultp1,resultp2);
+ Dbl_normalize(resultp1,resultp2,result_exponent);
+ Dbl_set_sign(resultp1,/*using*/sign_save);
+ Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ /* inexact = FALSE */
+ return(UNDERFLOWEXCEPTION);
+ }
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ right_exponent = 1; /* Set exponent to reflect different bias
+ * with denomalized numbers. */
+ }
+ else
+ {
+ Dbl_clear_signexponent_set_hidden(rightp1);
+ }
+ Dbl_clear_exponent_set_hidden(leftp1);
+ diff_exponent = result_exponent - right_exponent;
+
+ /*
+ * Special case alignment of operands that would force alignment
+ * beyond the extent of the extension. A further optimization
+ * could special case this but only reduces the path length for this
+ * infrequent case.
+ */
+ if(diff_exponent > DBL_THRESHOLD)
+ {
+ diff_exponent = DBL_THRESHOLD;
+ }
+
+ /* Align right operand by shifting to right */
+ Dbl_right_align(/*operand*/rightp1,rightp2,/*shifted by*/diff_exponent,
+ /*and lower to*/extent);
+
+ /* Treat sum and difference of the operands separately. */
+ if( (/*signed*/int) save < 0 )
+ {
+ /*
+ * Difference of the two operands. Their can be no overflow. A
+ * borrow can occur out of the hidden bit and force a post
+ * normalization phase.
+ */
+ Dbl_subtract_withextension(leftp1,leftp2,/*minus*/rightp1,rightp2,
+ /*with*/extent,/*into*/resultp1,resultp2);
+ if(Dbl_iszero_hidden(resultp1))
+ {
+ /* Handle normalization */
+ /* A straight forward algorithm would now shift the result
+ * and extension left until the hidden bit becomes one. Not
+ * all of the extension bits need participate in the shift.
+ * Only the two most significant bits (round and guard) are
+ * needed. If only a single shift is needed then the guard
+ * bit becomes a significant low order bit and the extension
+ * must participate in the rounding. If more than a single
+ * shift is needed, then all bits to the right of the guard
+ * bit are zeros, and the guard bit may or may not be zero. */
+ sign_save = Dbl_signextendedsign(resultp1);
+ Dbl_leftshiftby1_withextent(resultp1,resultp2,extent,resultp1,resultp2);
+
+ /* Need to check for a zero result. The sign and exponent
+ * fields have already been zeroed. The more efficient test
+ * of the full object can be used.
+ */
+ if(Dbl_iszero(resultp1,resultp2))
+ /* Must have been "x-x" or "x+(-x)". */
+ {
+ if(Is_rounding_mode(ROUNDMINUS)) Dbl_setone_sign(resultp1);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ result_exponent--;
+ /* Look to see if normalization is finished. */
+ if(Dbl_isone_hidden(resultp1))
+ {
+ if(result_exponent==0)
+ {
+ /* Denormalized, exponent should be zero. Left operand *
+ * was normalized, so extent (guard, round) was zero */
+ goto underflow;
+ }
+ else
+ {
+ /* No further normalization is needed. */
+ Dbl_set_sign(resultp1,/*using*/sign_save);
+ Ext_leftshiftby1(extent);
+ goto round;
+ }
+ }
+
+ /* Check for denormalized, exponent should be zero. Left *
+ * operand was normalized, so extent (guard, round) was zero */
+ if(!(underflowtrap = Is_underflowtrap_enabled()) &&
+ result_exponent==0) goto underflow;
+
+ /* Shift extension to complete one bit of normalization and
+ * update exponent. */
+ Ext_leftshiftby1(extent);
+
+ /* Discover first one bit to determine shift amount. Use a
+ * modified binary search. We have already shifted the result
+ * one position right and still not found a one so the remainder
+ * of the extension must be zero and simplifies rounding. */
+ /* Scan bytes */
+ while(Dbl_iszero_hiddenhigh7mantissa(resultp1))
+ {
+ Dbl_leftshiftby8(resultp1,resultp2);
+ if((result_exponent -= 8) <= 0 && !underflowtrap)
+ goto underflow;
+ }
+ /* Now narrow it down to the nibble */
+ if(Dbl_iszero_hiddenhigh3mantissa(resultp1))
+ {
+ /* The lower nibble contains the normalizing one */
+ Dbl_leftshiftby4(resultp1,resultp2);
+ if((result_exponent -= 4) <= 0 && !underflowtrap)
+ goto underflow;
+ }
+ /* Select case were first bit is set (already normalized)
+ * otherwise select the proper shift. */
+ if((jumpsize = Dbl_hiddenhigh3mantissa(resultp1)) > 7)
+ {
+ /* Already normalized */
+ if(result_exponent <= 0) goto underflow;
+ Dbl_set_sign(resultp1,/*using*/sign_save);
+ Dbl_set_exponent(resultp1,/*using*/result_exponent);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ Dbl_sethigh4bits(resultp1,/*using*/sign_save);
+ switch(jumpsize)
+ {
+ case 1:
+ {
+ Dbl_leftshiftby3(resultp1,resultp2);
+ result_exponent -= 3;
+ break;
+ }
+ case 2:
+ case 3:
+ {
+ Dbl_leftshiftby2(resultp1,resultp2);
+ result_exponent -= 2;
+ break;
+ }
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ {
+ Dbl_leftshiftby1(resultp1,resultp2);
+ result_exponent -= 1;
+ break;
+ }
+ }
+ if(result_exponent > 0)
+ {
+ Dbl_set_exponent(resultp1,/*using*/result_exponent);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION); /* Sign bit is already set */
+ }
+ /* Fixup potential underflows */
+ underflow:
+ if(Is_underflowtrap_enabled())
+ {
+ Dbl_set_sign(resultp1,sign_save);
+ Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ /* inexact = FALSE */
+ return(UNDERFLOWEXCEPTION);
+ }
+ /*
+ * Since we cannot get an inexact denormalized result,
+ * we can now return.
+ */
+ Dbl_fix_overshift(resultp1,resultp2,(1-result_exponent),extent);
+ Dbl_clear_signexponent(resultp1);
+ Dbl_set_sign(resultp1,sign_save);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ } /* end if(hidden...)... */
+ /* Fall through and round */
+ } /* end if(save < 0)... */
+ else
+ {
+ /* Add magnitudes */
+ Dbl_addition(leftp1,leftp2,rightp1,rightp2,/*to*/resultp1,resultp2);
+ if(Dbl_isone_hiddenoverflow(resultp1))
+ {
+ /* Prenormalization required. */
+ Dbl_rightshiftby1_withextent(resultp2,extent,extent);
+ Dbl_arithrightshiftby1(resultp1,resultp2);
+ result_exponent++;
+ } /* end if hiddenoverflow... */
+ } /* end else ...add magnitudes... */
+
+ /* Round the result. If the extension is all zeros,then the result is
+ * exact. Otherwise round in the correct direction. No underflow is
+ * possible. If a postnormalization is necessary, then the mantissa is
+ * all zeros so no shift is needed. */
+ round:
+ if(Ext_isnotzero(extent))
+ {
+ inexact = TRUE;
+ switch(Rounding_mode())
+ {
+ case ROUNDNEAREST: /* The default. */
+ if(Ext_isone_sign(extent))
+ {
+ /* at least 1/2 ulp */
+ if(Ext_isnotzero_lower(extent) ||
+ Dbl_isone_lowmantissap2(resultp2))
+ {
+ /* either exactly half way and odd or more than 1/2ulp */
+ Dbl_increment(resultp1,resultp2);
+ }
+ }
+ break;
+
+ case ROUNDPLUS:
+ if(Dbl_iszero_sign(resultp1))
+ {
+ /* Round up positive results */
+ Dbl_increment(resultp1,resultp2);
+ }
+ break;
+
+ case ROUNDMINUS:
+ if(Dbl_isone_sign(resultp1))
+ {
+ /* Round down negative results */
+ Dbl_increment(resultp1,resultp2);
+ }
+
+ case ROUNDZERO:;
+ /* truncate is simple */
+ } /* end switch... */
+ if(Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
+ }
+ if(result_exponent == DBL_INFINITY_EXPONENT)
+ {
+ /* Overflow */
+ if(Is_overflowtrap_enabled())
+ {
+ Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(OVERFLOWEXCEPTION);
+ }
+ else
+ {
+ inexact = TRUE;
+ Set_overflowflag();
+ Dbl_setoverflow(resultp1,resultp2);
+ }
+ }
+ else Dbl_set_exponent(resultp1,result_exponent);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if(inexact)
+ if(Is_inexacttrap_enabled())
+ return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/dfcmp.c b/arch/parisc/math-emu/dfcmp.c
new file mode 100644
index 00000000..59521267
--- /dev/null
+++ b/arch/parisc/math-emu/dfcmp.c
@@ -0,0 +1,181 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/dfcmp.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * dbl_cmp: compare two values
+ *
+ * External Interfaces:
+ * dbl_fcmp(leftptr, rightptr, cond, status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+
+#include "float.h"
+#include "dbl_float.h"
+
+/*
+ * dbl_cmp: compare two values
+ */
+int
+dbl_fcmp (dbl_floating_point * leftptr, dbl_floating_point * rightptr,
+ unsigned int cond, unsigned int *status)
+
+ /* The predicate to be tested */
+
+ {
+ register unsigned int leftp1, leftp2, rightp1, rightp2;
+ register int xorresult;
+
+ /* Create local copies of the numbers */
+ Dbl_copyfromptr(leftptr,leftp1,leftp2);
+ Dbl_copyfromptr(rightptr,rightp1,rightp2);
+ /*
+ * Test for NaN
+ */
+ if( (Dbl_exponent(leftp1) == DBL_INFINITY_EXPONENT)
+ || (Dbl_exponent(rightp1) == DBL_INFINITY_EXPONENT) )
+ {
+ /* Check if a NaN is involved. Signal an invalid exception when
+ * comparing a signaling NaN or when comparing quiet NaNs and the
+ * low bit of the condition is set */
+ if( ((Dbl_exponent(leftp1) == DBL_INFINITY_EXPONENT)
+ && Dbl_isnotzero_mantissa(leftp1,leftp2)
+ && (Exception(cond) || Dbl_isone_signaling(leftp1)))
+ ||
+ ((Dbl_exponent(rightp1) == DBL_INFINITY_EXPONENT)
+ && Dbl_isnotzero_mantissa(rightp1,rightp2)
+ && (Exception(cond) || Dbl_isone_signaling(rightp1))) )
+ {
+ if( Is_invalidtrap_enabled() ) {
+ Set_status_cbit(Unordered(cond));
+ return(INVALIDEXCEPTION);
+ }
+ else Set_invalidflag();
+ Set_status_cbit(Unordered(cond));
+ return(NOEXCEPTION);
+ }
+ /* All the exceptional conditions are handled, now special case
+ NaN compares */
+ else if( ((Dbl_exponent(leftp1) == DBL_INFINITY_EXPONENT)
+ && Dbl_isnotzero_mantissa(leftp1,leftp2))
+ ||
+ ((Dbl_exponent(rightp1) == DBL_INFINITY_EXPONENT)
+ && Dbl_isnotzero_mantissa(rightp1,rightp2)) )
+ {
+ /* NaNs always compare unordered. */
+ Set_status_cbit(Unordered(cond));
+ return(NOEXCEPTION);
+ }
+ /* infinities will drop down to the normal compare mechanisms */
+ }
+ /* First compare for unequal signs => less or greater or
+ * special equal case */
+ Dbl_xortointp1(leftp1,rightp1,xorresult);
+ if( xorresult < 0 )
+ {
+ /* left negative => less, left positive => greater.
+ * equal is possible if both operands are zeros. */
+ if( Dbl_iszero_exponentmantissa(leftp1,leftp2)
+ && Dbl_iszero_exponentmantissa(rightp1,rightp2) )
+ {
+ Set_status_cbit(Equal(cond));
+ }
+ else if( Dbl_isone_sign(leftp1) )
+ {
+ Set_status_cbit(Lessthan(cond));
+ }
+ else
+ {
+ Set_status_cbit(Greaterthan(cond));
+ }
+ }
+ /* Signs are the same. Treat negative numbers separately
+ * from the positives because of the reversed sense. */
+ else if(Dbl_isequal(leftp1,leftp2,rightp1,rightp2))
+ {
+ Set_status_cbit(Equal(cond));
+ }
+ else if( Dbl_iszero_sign(leftp1) )
+ {
+ /* Positive compare */
+ if( Dbl_allp1(leftp1) < Dbl_allp1(rightp1) )
+ {
+ Set_status_cbit(Lessthan(cond));
+ }
+ else if( Dbl_allp1(leftp1) > Dbl_allp1(rightp1) )
+ {
+ Set_status_cbit(Greaterthan(cond));
+ }
+ else
+ {
+ /* Equal first parts. Now we must use unsigned compares to
+ * resolve the two possibilities. */
+ if( Dbl_allp2(leftp2) < Dbl_allp2(rightp2) )
+ {
+ Set_status_cbit(Lessthan(cond));
+ }
+ else
+ {
+ Set_status_cbit(Greaterthan(cond));
+ }
+ }
+ }
+ else
+ {
+ /* Negative compare. Signed or unsigned compares
+ * both work the same. That distinction is only
+ * important when the sign bits differ. */
+ if( Dbl_allp1(leftp1) > Dbl_allp1(rightp1) )
+ {
+ Set_status_cbit(Lessthan(cond));
+ }
+ else if( Dbl_allp1(leftp1) < Dbl_allp1(rightp1) )
+ {
+ Set_status_cbit(Greaterthan(cond));
+ }
+ else
+ {
+ /* Equal first parts. Now we must use unsigned compares to
+ * resolve the two possibilities. */
+ if( Dbl_allp2(leftp2) > Dbl_allp2(rightp2) )
+ {
+ Set_status_cbit(Lessthan(cond));
+ }
+ else
+ {
+ Set_status_cbit(Greaterthan(cond));
+ }
+ }
+ }
+ return(NOEXCEPTION);
+ }
diff --git a/arch/parisc/math-emu/dfdiv.c b/arch/parisc/math-emu/dfdiv.c
new file mode 100644
index 00000000..d7d4bec0
--- /dev/null
+++ b/arch/parisc/math-emu/dfdiv.c
@@ -0,0 +1,400 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/dfdiv.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Double Precision Floating-point Divide
+ *
+ * External Interfaces:
+ * dbl_fdiv(srcptr1,srcptr2,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "dbl_float.h"
+
+/*
+ * Double Precision Floating-point Divide
+ */
+
+int
+dbl_fdiv (dbl_floating_point * srcptr1, dbl_floating_point * srcptr2,
+ dbl_floating_point * dstptr, unsigned int *status)
+{
+ register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2;
+ register unsigned int opnd3p1, opnd3p2, resultp1, resultp2;
+ register int dest_exponent, count;
+ register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
+ boolean is_tiny;
+
+ Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2);
+ Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2);
+ /*
+ * set sign bit of result
+ */
+ if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1))
+ Dbl_setnegativezerop1(resultp1);
+ else Dbl_setzerop1(resultp1);
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if (Dbl_isinfinity_exponent(opnd1p1)) {
+ if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+ if (Dbl_isnotnan(opnd2p1,opnd2p2)) {
+ if (Dbl_isinfinity(opnd2p1,opnd2p2)) {
+ /*
+ * invalid since both operands
+ * are infinity
+ */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return infinity
+ */
+ Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(opnd1p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd1p1);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Dbl_is_signalingnan(opnd2p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd2p1);
+ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if (Dbl_isinfinity_exponent(opnd2p1)) {
+ if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+ /*
+ * return zero
+ */
+ Dbl_setzero_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(opnd2p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd2p1);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * check for division by zero
+ */
+ if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
+ if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
+ /* invalid since both operands are zero */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ if (Is_divisionbyzerotrap_enabled())
+ return(DIVISIONBYZEROEXCEPTION);
+ Set_divisionbyzeroflag();
+ Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate exponent
+ */
+ dest_exponent = Dbl_exponent(opnd1p1) - Dbl_exponent(opnd2p1) + DBL_BIAS;
+
+ /*
+ * Generate mantissa
+ */
+ if (Dbl_isnotzero_exponent(opnd1p1)) {
+ /* set hidden bit */
+ Dbl_clear_signexponent_set_hidden(opnd1p1);
+ }
+ else {
+ /* check for zero */
+ if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+ Dbl_setzero_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /* is denormalized, want to normalize */
+ Dbl_clear_signexponent(opnd1p1);
+ Dbl_leftshiftby1(opnd1p1,opnd1p2);
+ Dbl_normalize(opnd1p1,opnd1p2,dest_exponent);
+ }
+ /* opnd2 needs to have hidden bit set with msb in hidden bit */
+ if (Dbl_isnotzero_exponent(opnd2p1)) {
+ Dbl_clear_signexponent_set_hidden(opnd2p1);
+ }
+ else {
+ /* is denormalized; want to normalize */
+ Dbl_clear_signexponent(opnd2p1);
+ Dbl_leftshiftby1(opnd2p1,opnd2p2);
+ while (Dbl_iszero_hiddenhigh7mantissa(opnd2p1)) {
+ dest_exponent+=8;
+ Dbl_leftshiftby8(opnd2p1,opnd2p2);
+ }
+ if (Dbl_iszero_hiddenhigh3mantissa(opnd2p1)) {
+ dest_exponent+=4;
+ Dbl_leftshiftby4(opnd2p1,opnd2p2);
+ }
+ while (Dbl_iszero_hidden(opnd2p1)) {
+ dest_exponent++;
+ Dbl_leftshiftby1(opnd2p1,opnd2p2);
+ }
+ }
+
+ /* Divide the source mantissas */
+
+ /*
+ * A non-restoring divide algorithm is used.
+ */
+ Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
+ Dbl_setzero(opnd3p1,opnd3p2);
+ for (count=1; count <= DBL_P && (opnd1p1 || opnd1p2); count++) {
+ Dbl_leftshiftby1(opnd1p1,opnd1p2);
+ Dbl_leftshiftby1(opnd3p1,opnd3p2);
+ if (Dbl_iszero_sign(opnd1p1)) {
+ Dbl_setone_lowmantissap2(opnd3p2);
+ Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
+ }
+ else {
+ Twoword_add(opnd1p1, opnd1p2, opnd2p1, opnd2p2);
+ }
+ }
+ if (count <= DBL_P) {
+ Dbl_leftshiftby1(opnd3p1,opnd3p2);
+ Dbl_setone_lowmantissap2(opnd3p2);
+ Dbl_leftshift(opnd3p1,opnd3p2,(DBL_P-count));
+ if (Dbl_iszero_hidden(opnd3p1)) {
+ Dbl_leftshiftby1(opnd3p1,opnd3p2);
+ dest_exponent--;
+ }
+ }
+ else {
+ if (Dbl_iszero_hidden(opnd3p1)) {
+ /* need to get one more bit of result */
+ Dbl_leftshiftby1(opnd1p1,opnd1p2);
+ Dbl_leftshiftby1(opnd3p1,opnd3p2);
+ if (Dbl_iszero_sign(opnd1p1)) {
+ Dbl_setone_lowmantissap2(opnd3p2);
+ Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
+ }
+ else {
+ Twoword_add(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
+ }
+ dest_exponent--;
+ }
+ if (Dbl_iszero_sign(opnd1p1)) guardbit = TRUE;
+ stickybit = Dbl_allp1(opnd1p1) || Dbl_allp2(opnd1p2);
+ }
+ inexact = guardbit | stickybit;
+
+ /*
+ * round result
+ */
+ if (inexact && (dest_exponent > 0 || Is_underflowtrap_enabled())) {
+ Dbl_clear_signexponent(opnd3p1);
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(resultp1))
+ Dbl_increment(opnd3p1,opnd3p2);
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(resultp1))
+ Dbl_increment(opnd3p1,opnd3p2);
+ break;
+ case ROUNDNEAREST:
+ if (guardbit && (stickybit ||
+ Dbl_isone_lowmantissap2(opnd3p2))) {
+ Dbl_increment(opnd3p1,opnd3p2);
+ }
+ }
+ if (Dbl_isone_hidden(opnd3p1)) dest_exponent++;
+ }
+ Dbl_set_mantissa(resultp1,resultp2,opnd3p1,opnd3p2);
+
+ /*
+ * Test for overflow
+ */
+ if (dest_exponent >= DBL_INFINITY_EXPONENT) {
+ /* trap if OVERFLOWTRAP enabled */
+ if (Is_overflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Dbl_setwrapped_exponent(resultp1,dest_exponent,ovfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(OVERFLOWEXCEPTION);
+ }
+ Set_overflowflag();
+ /* set result to infinity or largest number */
+ Dbl_setoverflow(resultp1,resultp2);
+ inexact = TRUE;
+ }
+ /*
+ * Test for underflow
+ */
+ else if (dest_exponent <= 0) {
+ /* trap if UNDERFLOWTRAP enabled */
+ if (Is_underflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(UNDERFLOWEXCEPTION);
+ }
+
+ /* Determine if should set underflow flag */
+ is_tiny = TRUE;
+ if (dest_exponent == 0 && inexact) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(resultp1)) {
+ Dbl_increment(opnd3p1,opnd3p2);
+ if (Dbl_isone_hiddenoverflow(opnd3p1))
+ is_tiny = FALSE;
+ Dbl_decrement(opnd3p1,opnd3p2);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(resultp1)) {
+ Dbl_increment(opnd3p1,opnd3p2);
+ if (Dbl_isone_hiddenoverflow(opnd3p1))
+ is_tiny = FALSE;
+ Dbl_decrement(opnd3p1,opnd3p2);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (guardbit && (stickybit ||
+ Dbl_isone_lowmantissap2(opnd3p2))) {
+ Dbl_increment(opnd3p1,opnd3p2);
+ if (Dbl_isone_hiddenoverflow(opnd3p1))
+ is_tiny = FALSE;
+ Dbl_decrement(opnd3p1,opnd3p2);
+ }
+ break;
+ }
+ }
+
+ /*
+ * denormalize result or set to signed zero
+ */
+ stickybit = inexact;
+ Dbl_denormalize(opnd3p1,opnd3p2,dest_exponent,guardbit,
+ stickybit,inexact);
+
+ /* return rounded number */
+ if (inexact) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(resultp1)) {
+ Dbl_increment(opnd3p1,opnd3p2);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(resultp1)) {
+ Dbl_increment(opnd3p1,opnd3p2);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (guardbit && (stickybit ||
+ Dbl_isone_lowmantissap2(opnd3p2))) {
+ Dbl_increment(opnd3p1,opnd3p2);
+ }
+ break;
+ }
+ if (is_tiny) Set_underflowflag();
+ }
+ Dbl_set_exponentmantissa(resultp1,resultp2,opnd3p1,opnd3p2);
+ }
+ else Dbl_set_exponent(resultp1,dest_exponent);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+
+ /* check for inexact */
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/dfmpy.c b/arch/parisc/math-emu/dfmpy.c
new file mode 100644
index 00000000..4380f5a6
--- /dev/null
+++ b/arch/parisc/math-emu/dfmpy.c
@@ -0,0 +1,394 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/dfmpy.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Double Precision Floating-point Multiply
+ *
+ * External Interfaces:
+ * dbl_fmpy(srcptr1,srcptr2,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "dbl_float.h"
+
+/*
+ * Double Precision Floating-point Multiply
+ */
+
+int
+dbl_fmpy(
+ dbl_floating_point *srcptr1,
+ dbl_floating_point *srcptr2,
+ dbl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2;
+ register unsigned int opnd3p1, opnd3p2, resultp1, resultp2;
+ register int dest_exponent, count;
+ register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
+ boolean is_tiny;
+
+ Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2);
+ Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2);
+
+ /*
+ * set sign bit of result
+ */
+ if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1))
+ Dbl_setnegativezerop1(resultp1);
+ else Dbl_setzerop1(resultp1);
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if (Dbl_isinfinity_exponent(opnd1p1)) {
+ if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+ if (Dbl_isnotnan(opnd2p1,opnd2p2)) {
+ if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
+ /*
+ * invalid since operands are infinity
+ * and zero
+ */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return infinity
+ */
+ Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(opnd1p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd1p1);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Dbl_is_signalingnan(opnd2p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd2p1);
+ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if (Dbl_isinfinity_exponent(opnd2p1)) {
+ if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+ if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
+ /* invalid since operands are zero & infinity */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(opnd2p1,opnd2p2);
+ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return infinity
+ */
+ Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(opnd2p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd2p1);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate exponent
+ */
+ dest_exponent = Dbl_exponent(opnd1p1) + Dbl_exponent(opnd2p1) -DBL_BIAS;
+
+ /*
+ * Generate mantissa
+ */
+ if (Dbl_isnotzero_exponent(opnd1p1)) {
+ /* set hidden bit */
+ Dbl_clear_signexponent_set_hidden(opnd1p1);
+ }
+ else {
+ /* check for zero */
+ if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+ Dbl_setzero_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /* is denormalized, adjust exponent */
+ Dbl_clear_signexponent(opnd1p1);
+ Dbl_leftshiftby1(opnd1p1,opnd1p2);
+ Dbl_normalize(opnd1p1,opnd1p2,dest_exponent);
+ }
+ /* opnd2 needs to have hidden bit set with msb in hidden bit */
+ if (Dbl_isnotzero_exponent(opnd2p1)) {
+ Dbl_clear_signexponent_set_hidden(opnd2p1);
+ }
+ else {
+ /* check for zero */
+ if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+ Dbl_setzero_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /* is denormalized; want to normalize */
+ Dbl_clear_signexponent(opnd2p1);
+ Dbl_leftshiftby1(opnd2p1,opnd2p2);
+ Dbl_normalize(opnd2p1,opnd2p2,dest_exponent);
+ }
+
+ /* Multiply two source mantissas together */
+
+ /* make room for guard bits */
+ Dbl_leftshiftby7(opnd2p1,opnd2p2);
+ Dbl_setzero(opnd3p1,opnd3p2);
+ /*
+ * Four bits at a time are inspected in each loop, and a
+ * simple shift and add multiply algorithm is used.
+ */
+ for (count=1;count<=DBL_P;count+=4) {
+ stickybit |= Dlow4p2(opnd3p2);
+ Dbl_rightshiftby4(opnd3p1,opnd3p2);
+ if (Dbit28p2(opnd1p2)) {
+ /* Twoword_add should be an ADDC followed by an ADD. */
+ Twoword_add(opnd3p1, opnd3p2, opnd2p1<<3 | opnd2p2>>29,
+ opnd2p2<<3);
+ }
+ if (Dbit29p2(opnd1p2)) {
+ Twoword_add(opnd3p1, opnd3p2, opnd2p1<<2 | opnd2p2>>30,
+ opnd2p2<<2);
+ }
+ if (Dbit30p2(opnd1p2)) {
+ Twoword_add(opnd3p1, opnd3p2, opnd2p1<<1 | opnd2p2>>31,
+ opnd2p2<<1);
+ }
+ if (Dbit31p2(opnd1p2)) {
+ Twoword_add(opnd3p1, opnd3p2, opnd2p1, opnd2p2);
+ }
+ Dbl_rightshiftby4(opnd1p1,opnd1p2);
+ }
+ if (Dbit3p1(opnd3p1)==0) {
+ Dbl_leftshiftby1(opnd3p1,opnd3p2);
+ }
+ else {
+ /* result mantissa >= 2. */
+ dest_exponent++;
+ }
+ /* check for denormalized result */
+ while (Dbit3p1(opnd3p1)==0) {
+ Dbl_leftshiftby1(opnd3p1,opnd3p2);
+ dest_exponent--;
+ }
+ /*
+ * check for guard, sticky and inexact bits
+ */
+ stickybit |= Dallp2(opnd3p2) << 25;
+ guardbit = (Dallp2(opnd3p2) << 24) >> 31;
+ inexact = guardbit | stickybit;
+
+ /* align result mantissa */
+ Dbl_rightshiftby8(opnd3p1,opnd3p2);
+
+ /*
+ * round result
+ */
+ if (inexact && (dest_exponent>0 || Is_underflowtrap_enabled())) {
+ Dbl_clear_signexponent(opnd3p1);
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(resultp1))
+ Dbl_increment(opnd3p1,opnd3p2);
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(resultp1))
+ Dbl_increment(opnd3p1,opnd3p2);
+ break;
+ case ROUNDNEAREST:
+ if (guardbit) {
+ if (stickybit || Dbl_isone_lowmantissap2(opnd3p2))
+ Dbl_increment(opnd3p1,opnd3p2);
+ }
+ }
+ if (Dbl_isone_hidden(opnd3p1)) dest_exponent++;
+ }
+ Dbl_set_mantissa(resultp1,resultp2,opnd3p1,opnd3p2);
+
+ /*
+ * Test for overflow
+ */
+ if (dest_exponent >= DBL_INFINITY_EXPONENT) {
+ /* trap if OVERFLOWTRAP enabled */
+ if (Is_overflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Dbl_setwrapped_exponent(resultp1,dest_exponent,ovfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return (OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return (OVERFLOWEXCEPTION);
+ }
+ inexact = TRUE;
+ Set_overflowflag();
+ /* set result to infinity or largest number */
+ Dbl_setoverflow(resultp1,resultp2);
+ }
+ /*
+ * Test for underflow
+ */
+ else if (dest_exponent <= 0) {
+ /* trap if UNDERFLOWTRAP enabled */
+ if (Is_underflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return (UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return (UNDERFLOWEXCEPTION);
+ }
+
+ /* Determine if should set underflow flag */
+ is_tiny = TRUE;
+ if (dest_exponent == 0 && inexact) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(resultp1)) {
+ Dbl_increment(opnd3p1,opnd3p2);
+ if (Dbl_isone_hiddenoverflow(opnd3p1))
+ is_tiny = FALSE;
+ Dbl_decrement(opnd3p1,opnd3p2);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(resultp1)) {
+ Dbl_increment(opnd3p1,opnd3p2);
+ if (Dbl_isone_hiddenoverflow(opnd3p1))
+ is_tiny = FALSE;
+ Dbl_decrement(opnd3p1,opnd3p2);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (guardbit && (stickybit ||
+ Dbl_isone_lowmantissap2(opnd3p2))) {
+ Dbl_increment(opnd3p1,opnd3p2);
+ if (Dbl_isone_hiddenoverflow(opnd3p1))
+ is_tiny = FALSE;
+ Dbl_decrement(opnd3p1,opnd3p2);
+ }
+ break;
+ }
+ }
+
+ /*
+ * denormalize result or set to signed zero
+ */
+ stickybit = inexact;
+ Dbl_denormalize(opnd3p1,opnd3p2,dest_exponent,guardbit,
+ stickybit,inexact);
+
+ /* return zero or smallest number */
+ if (inexact) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(resultp1)) {
+ Dbl_increment(opnd3p1,opnd3p2);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(resultp1)) {
+ Dbl_increment(opnd3p1,opnd3p2);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (guardbit && (stickybit ||
+ Dbl_isone_lowmantissap2(opnd3p2))) {
+ Dbl_increment(opnd3p1,opnd3p2);
+ }
+ break;
+ }
+ if (is_tiny) Set_underflowflag();
+ }
+ Dbl_set_exponentmantissa(resultp1,resultp2,opnd3p1,opnd3p2);
+ }
+ else Dbl_set_exponent(resultp1,dest_exponent);
+ /* check for inexact */
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/dfrem.c b/arch/parisc/math-emu/dfrem.c
new file mode 100644
index 00000000..b9837853
--- /dev/null
+++ b/arch/parisc/math-emu/dfrem.c
@@ -0,0 +1,297 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/dfrem.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Double Precision Floating-point Remainder
+ *
+ * External Interfaces:
+ * dbl_frem(srcptr1,srcptr2,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+
+#include "float.h"
+#include "dbl_float.h"
+
+/*
+ * Double Precision Floating-point Remainder
+ */
+
+int
+dbl_frem (dbl_floating_point * srcptr1, dbl_floating_point * srcptr2,
+ dbl_floating_point * dstptr, unsigned int *status)
+{
+ register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2;
+ register unsigned int resultp1, resultp2;
+ register int opnd1_exponent, opnd2_exponent, dest_exponent, stepcount;
+ register boolean roundup = FALSE;
+
+ Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2);
+ Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2);
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if ((opnd1_exponent = Dbl_exponent(opnd1p1)) == DBL_INFINITY_EXPONENT) {
+ if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+ if (Dbl_isnotnan(opnd2p1,opnd2p2)) {
+ /* invalid since first operand is infinity */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(opnd1p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd1p1);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Dbl_is_signalingnan(opnd2p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd2p1);
+ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if ((opnd2_exponent = Dbl_exponent(opnd2p1)) == DBL_INFINITY_EXPONENT) {
+ if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+ /*
+ * return first operand
+ */
+ Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(opnd2p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd2p1);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * check second operand for zero
+ */
+ if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
+ /* invalid since second operand is zero */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * get sign of result
+ */
+ resultp1 = opnd1p1;
+
+ /*
+ * check for denormalized operands
+ */
+ if (opnd1_exponent == 0) {
+ /* check for zero */
+ if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+ Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /* normalize, then continue */
+ opnd1_exponent = 1;
+ Dbl_normalize(opnd1p1,opnd1p2,opnd1_exponent);
+ }
+ else {
+ Dbl_clear_signexponent_set_hidden(opnd1p1);
+ }
+ if (opnd2_exponent == 0) {
+ /* normalize, then continue */
+ opnd2_exponent = 1;
+ Dbl_normalize(opnd2p1,opnd2p2,opnd2_exponent);
+ }
+ else {
+ Dbl_clear_signexponent_set_hidden(opnd2p1);
+ }
+
+ /* find result exponent and divide step loop count */
+ dest_exponent = opnd2_exponent - 1;
+ stepcount = opnd1_exponent - opnd2_exponent;
+
+ /*
+ * check for opnd1/opnd2 < 1
+ */
+ if (stepcount < 0) {
+ /*
+ * check for opnd1/opnd2 > 1/2
+ *
+ * In this case n will round to 1, so
+ * r = opnd1 - opnd2
+ */
+ if (stepcount == -1 &&
+ Dbl_isgreaterthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
+ /* set sign */
+ Dbl_allp1(resultp1) = ~Dbl_allp1(resultp1);
+ /* align opnd2 with opnd1 */
+ Dbl_leftshiftby1(opnd2p1,opnd2p2);
+ Dbl_subtract(opnd2p1,opnd2p2,opnd1p1,opnd1p2,
+ opnd2p1,opnd2p2);
+ /* now normalize */
+ while (Dbl_iszero_hidden(opnd2p1)) {
+ Dbl_leftshiftby1(opnd2p1,opnd2p2);
+ dest_exponent--;
+ }
+ Dbl_set_exponentmantissa(resultp1,resultp2,opnd2p1,opnd2p2);
+ goto testforunderflow;
+ }
+ /*
+ * opnd1/opnd2 <= 1/2
+ *
+ * In this case n will round to zero, so
+ * r = opnd1
+ */
+ Dbl_set_exponentmantissa(resultp1,resultp2,opnd1p1,opnd1p2);
+ dest_exponent = opnd1_exponent;
+ goto testforunderflow;
+ }
+
+ /*
+ * Generate result
+ *
+ * Do iterative subtract until remainder is less than operand 2.
+ */
+ while (stepcount-- > 0 && (Dbl_allp1(opnd1p1) || Dbl_allp2(opnd1p2))) {
+ if (Dbl_isnotlessthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
+ Dbl_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2,opnd1p1,opnd1p2);
+ }
+ Dbl_leftshiftby1(opnd1p1,opnd1p2);
+ }
+ /*
+ * Do last subtract, then determine which way to round if remainder
+ * is exactly 1/2 of opnd2
+ */
+ if (Dbl_isnotlessthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
+ Dbl_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2,opnd1p1,opnd1p2);
+ roundup = TRUE;
+ }
+ if (stepcount > 0 || Dbl_iszero(opnd1p1,opnd1p2)) {
+ /* division is exact, remainder is zero */
+ Dbl_setzero_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * Check for cases where opnd1/opnd2 < n
+ *
+ * In this case the result's sign will be opposite that of
+ * opnd1. The mantissa also needs some correction.
+ */
+ Dbl_leftshiftby1(opnd1p1,opnd1p2);
+ if (Dbl_isgreaterthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
+ Dbl_invert_sign(resultp1);
+ Dbl_leftshiftby1(opnd2p1,opnd2p2);
+ Dbl_subtract(opnd2p1,opnd2p2,opnd1p1,opnd1p2,opnd1p1,opnd1p2);
+ }
+ /* check for remainder being exactly 1/2 of opnd2 */
+ else if (Dbl_isequal(opnd1p1,opnd1p2,opnd2p1,opnd2p2) && roundup) {
+ Dbl_invert_sign(resultp1);
+ }
+
+ /* normalize result's mantissa */
+ while (Dbl_iszero_hidden(opnd1p1)) {
+ dest_exponent--;
+ Dbl_leftshiftby1(opnd1p1,opnd1p2);
+ }
+ Dbl_set_exponentmantissa(resultp1,resultp2,opnd1p1,opnd1p2);
+
+ /*
+ * Test for underflow
+ */
+ testforunderflow:
+ if (dest_exponent <= 0) {
+ /* trap if UNDERFLOWTRAP enabled */
+ if (Is_underflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl);
+ /* frem is always exact */
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(UNDERFLOWEXCEPTION);
+ }
+ /*
+ * denormalize result or set to signed zero
+ */
+ if (dest_exponent >= (1 - DBL_P)) {
+ Dbl_rightshift_exponentmantissa(resultp1,resultp2,
+ 1-dest_exponent);
+ }
+ else {
+ Dbl_setzero_exponentmantissa(resultp1,resultp2);
+ }
+ }
+ else Dbl_set_exponent(resultp1,dest_exponent);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/dfsqrt.c b/arch/parisc/math-emu/dfsqrt.c
new file mode 100644
index 00000000..9542c6d2
--- /dev/null
+++ b/arch/parisc/math-emu/dfsqrt.c
@@ -0,0 +1,195 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/dfsqrt.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Double Floating-point Square Root
+ *
+ * External Interfaces:
+ * dbl_fsqrt(srcptr,nullptr,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "dbl_float.h"
+
+/*
+ * Double Floating-point Square Root
+ */
+
+/*ARGSUSED*/
+unsigned int
+dbl_fsqrt(
+ dbl_floating_point *srcptr,
+ unsigned int *nullptr,
+ dbl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register unsigned int srcp1, srcp2, resultp1, resultp2;
+ register unsigned int newbitp1, newbitp2, sump1, sump2;
+ register int src_exponent;
+ register boolean guardbit = FALSE, even_exponent;
+
+ Dbl_copyfromptr(srcptr,srcp1,srcp2);
+ /*
+ * check source operand for NaN or infinity
+ */
+ if ((src_exponent = Dbl_exponent(srcp1)) == DBL_INFINITY_EXPONENT) {
+ /*
+ * is signaling NaN?
+ */
+ if (Dbl_isone_signaling(srcp1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(srcp1);
+ }
+ /*
+ * Return quiet NaN or positive infinity.
+ * Fall through to negative test if negative infinity.
+ */
+ if (Dbl_iszero_sign(srcp1) ||
+ Dbl_isnotzero_mantissa(srcp1,srcp2)) {
+ Dbl_copytoptr(srcp1,srcp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * check for zero source operand
+ */
+ if (Dbl_iszero_exponentmantissa(srcp1,srcp2)) {
+ Dbl_copytoptr(srcp1,srcp2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * check for negative source operand
+ */
+ if (Dbl_isone_sign(srcp1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_makequietnan(srcp1,srcp2);
+ Dbl_copytoptr(srcp1,srcp2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * Generate result
+ */
+ if (src_exponent > 0) {
+ even_exponent = Dbl_hidden(srcp1);
+ Dbl_clear_signexponent_set_hidden(srcp1);
+ }
+ else {
+ /* normalize operand */
+ Dbl_clear_signexponent(srcp1);
+ src_exponent++;
+ Dbl_normalize(srcp1,srcp2,src_exponent);
+ even_exponent = src_exponent & 1;
+ }
+ if (even_exponent) {
+ /* exponent is even */
+ /* Add comment here. Explain why odd exponent needs correction */
+ Dbl_leftshiftby1(srcp1,srcp2);
+ }
+ /*
+ * Add comment here. Explain following algorithm.
+ *
+ * Trust me, it works.
+ *
+ */
+ Dbl_setzero(resultp1,resultp2);
+ Dbl_allp1(newbitp1) = 1 << (DBL_P - 32);
+ Dbl_setzero_mantissap2(newbitp2);
+ while (Dbl_isnotzero(newbitp1,newbitp2) && Dbl_isnotzero(srcp1,srcp2)) {
+ Dbl_addition(resultp1,resultp2,newbitp1,newbitp2,sump1,sump2);
+ if(Dbl_isnotgreaterthan(sump1,sump2,srcp1,srcp2)) {
+ Dbl_leftshiftby1(newbitp1,newbitp2);
+ /* update result */
+ Dbl_addition(resultp1,resultp2,newbitp1,newbitp2,
+ resultp1,resultp2);
+ Dbl_subtract(srcp1,srcp2,sump1,sump2,srcp1,srcp2);
+ Dbl_rightshiftby2(newbitp1,newbitp2);
+ }
+ else {
+ Dbl_rightshiftby1(newbitp1,newbitp2);
+ }
+ Dbl_leftshiftby1(srcp1,srcp2);
+ }
+ /* correct exponent for pre-shift */
+ if (even_exponent) {
+ Dbl_rightshiftby1(resultp1,resultp2);
+ }
+
+ /* check for inexact */
+ if (Dbl_isnotzero(srcp1,srcp2)) {
+ if (!even_exponent && Dbl_islessthan(resultp1,resultp2,srcp1,srcp2)) {
+ Dbl_increment(resultp1,resultp2);
+ }
+ guardbit = Dbl_lowmantissap2(resultp2);
+ Dbl_rightshiftby1(resultp1,resultp2);
+
+ /* now round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ Dbl_increment(resultp1,resultp2);
+ break;
+ case ROUNDNEAREST:
+ /* stickybit is always true, so guardbit
+ * is enough to determine rounding */
+ if (guardbit) {
+ Dbl_increment(resultp1,resultp2);
+ }
+ break;
+ }
+ /* increment result exponent by 1 if mantissa overflowed */
+ if (Dbl_isone_hiddenoverflow(resultp1)) src_exponent+=2;
+
+ if (Is_inexacttrap_enabled()) {
+ Dbl_set_exponent(resultp1,
+ ((src_exponent-DBL_BIAS)>>1)+DBL_BIAS);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(INEXACTEXCEPTION);
+ }
+ else Set_inexactflag();
+ }
+ else {
+ Dbl_rightshiftby1(resultp1,resultp2);
+ }
+ Dbl_set_exponent(resultp1,((src_exponent-DBL_BIAS)>>1)+DBL_BIAS);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/dfsub.c b/arch/parisc/math-emu/dfsub.c
new file mode 100644
index 00000000..2e8b5a79
--- /dev/null
+++ b/arch/parisc/math-emu/dfsub.c
@@ -0,0 +1,526 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/dfsub.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Double_subtract: subtract two double precision values.
+ *
+ * External Interfaces:
+ * dbl_fsub(leftptr, rightptr, dstptr, status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "dbl_float.h"
+
+/*
+ * Double_subtract: subtract two double precision values.
+ */
+int
+dbl_fsub(
+ dbl_floating_point *leftptr,
+ dbl_floating_point *rightptr,
+ dbl_floating_point *dstptr,
+ unsigned int *status)
+ {
+ register unsigned int signless_upper_left, signless_upper_right, save;
+ register unsigned int leftp1, leftp2, rightp1, rightp2, extent;
+ register unsigned int resultp1 = 0, resultp2 = 0;
+
+ register int result_exponent, right_exponent, diff_exponent;
+ register int sign_save, jumpsize;
+ register boolean inexact = FALSE, underflowtrap;
+
+ /* Create local copies of the numbers */
+ Dbl_copyfromptr(leftptr,leftp1,leftp2);
+ Dbl_copyfromptr(rightptr,rightp1,rightp2);
+
+ /* A zero "save" helps discover equal operands (for later), *
+ * and is used in swapping operands (if needed). */
+ Dbl_xortointp1(leftp1,rightp1,/*to*/save);
+
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if ((result_exponent = Dbl_exponent(leftp1)) == DBL_INFINITY_EXPONENT)
+ {
+ if (Dbl_iszero_mantissa(leftp1,leftp2))
+ {
+ if (Dbl_isnotnan(rightp1,rightp2))
+ {
+ if (Dbl_isinfinity(rightp1,rightp2) && save==0)
+ {
+ /*
+ * invalid since operands are same signed infinity's
+ */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return infinity
+ */
+ Dbl_copytoptr(leftp1,leftp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else
+ {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(leftp1))
+ {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(leftp1);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Dbl_is_signalingnan(rightp1))
+ {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(rightp1);
+ Dbl_copytoptr(rightp1,rightp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(leftp1,leftp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ } /* End left NaN or Infinity processing */
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if (Dbl_isinfinity_exponent(rightp1))
+ {
+ if (Dbl_iszero_mantissa(rightp1,rightp2))
+ {
+ /* return infinity */
+ Dbl_invert_sign(rightp1);
+ Dbl_copytoptr(rightp1,rightp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(rightp1))
+ {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(rightp1);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(rightp1,rightp2,dstptr);
+ return(NOEXCEPTION);
+ } /* End right NaN or Infinity processing */
+
+ /* Invariant: Must be dealing with finite numbers */
+
+ /* Compare operands by removing the sign */
+ Dbl_copytoint_exponentmantissap1(leftp1,signless_upper_left);
+ Dbl_copytoint_exponentmantissap1(rightp1,signless_upper_right);
+
+ /* sign difference selects add or sub operation. */
+ if(Dbl_ismagnitudeless(leftp2,rightp2,signless_upper_left,signless_upper_right))
+ {
+ /* Set the left operand to the larger one by XOR swap *
+ * First finish the first word using "save" */
+ Dbl_xorfromintp1(save,rightp1,/*to*/rightp1);
+ Dbl_xorfromintp1(save,leftp1,/*to*/leftp1);
+ Dbl_swap_lower(leftp2,rightp2);
+ result_exponent = Dbl_exponent(leftp1);
+ Dbl_invert_sign(leftp1);
+ }
+ /* Invariant: left is not smaller than right. */
+
+ if((right_exponent = Dbl_exponent(rightp1)) == 0)
+ {
+ /* Denormalized operands. First look for zeroes */
+ if(Dbl_iszero_mantissa(rightp1,rightp2))
+ {
+ /* right is zero */
+ if(Dbl_iszero_exponentmantissa(leftp1,leftp2))
+ {
+ /* Both operands are zeros */
+ Dbl_invert_sign(rightp1);
+ if(Is_rounding_mode(ROUNDMINUS))
+ {
+ Dbl_or_signs(leftp1,/*with*/rightp1);
+ }
+ else
+ {
+ Dbl_and_signs(leftp1,/*with*/rightp1);
+ }
+ }
+ else
+ {
+ /* Left is not a zero and must be the result. Trapped
+ * underflows are signaled if left is denormalized. Result
+ * is always exact. */
+ if( (result_exponent == 0) && Is_underflowtrap_enabled() )
+ {
+ /* need to normalize results mantissa */
+ sign_save = Dbl_signextendedsign(leftp1);
+ Dbl_leftshiftby1(leftp1,leftp2);
+ Dbl_normalize(leftp1,leftp2,result_exponent);
+ Dbl_set_sign(leftp1,/*using*/sign_save);
+ Dbl_setwrapped_exponent(leftp1,result_exponent,unfl);
+ Dbl_copytoptr(leftp1,leftp2,dstptr);
+ /* inexact = FALSE */
+ return(UNDERFLOWEXCEPTION);
+ }
+ }
+ Dbl_copytoptr(leftp1,leftp2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /* Neither are zeroes */
+ Dbl_clear_sign(rightp1); /* Exponent is already cleared */
+ if(result_exponent == 0 )
+ {
+ /* Both operands are denormalized. The result must be exact
+ * and is simply calculated. A sum could become normalized and a
+ * difference could cancel to a true zero. */
+ if( (/*signed*/int) save >= 0 )
+ {
+ Dbl_subtract(leftp1,leftp2,/*minus*/rightp1,rightp2,
+ /*into*/resultp1,resultp2);
+ if(Dbl_iszero_mantissa(resultp1,resultp2))
+ {
+ if(Is_rounding_mode(ROUNDMINUS))
+ {
+ Dbl_setone_sign(resultp1);
+ }
+ else
+ {
+ Dbl_setzero_sign(resultp1);
+ }
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else
+ {
+ Dbl_addition(leftp1,leftp2,rightp1,rightp2,
+ /*into*/resultp1,resultp2);
+ if(Dbl_isone_hidden(resultp1))
+ {
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ if(Is_underflowtrap_enabled())
+ {
+ /* need to normalize result */
+ sign_save = Dbl_signextendedsign(resultp1);
+ Dbl_leftshiftby1(resultp1,resultp2);
+ Dbl_normalize(resultp1,resultp2,result_exponent);
+ Dbl_set_sign(resultp1,/*using*/sign_save);
+ Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ /* inexact = FALSE */
+ return(UNDERFLOWEXCEPTION);
+ }
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ right_exponent = 1; /* Set exponent to reflect different bias
+ * with denomalized numbers. */
+ }
+ else
+ {
+ Dbl_clear_signexponent_set_hidden(rightp1);
+ }
+ Dbl_clear_exponent_set_hidden(leftp1);
+ diff_exponent = result_exponent - right_exponent;
+
+ /*
+ * Special case alignment of operands that would force alignment
+ * beyond the extent of the extension. A further optimization
+ * could special case this but only reduces the path length for this
+ * infrequent case.
+ */
+ if(diff_exponent > DBL_THRESHOLD)
+ {
+ diff_exponent = DBL_THRESHOLD;
+ }
+
+ /* Align right operand by shifting to right */
+ Dbl_right_align(/*operand*/rightp1,rightp2,/*shifted by*/diff_exponent,
+ /*and lower to*/extent);
+
+ /* Treat sum and difference of the operands separately. */
+ if( (/*signed*/int) save >= 0 )
+ {
+ /*
+ * Difference of the two operands. Their can be no overflow. A
+ * borrow can occur out of the hidden bit and force a post
+ * normalization phase.
+ */
+ Dbl_subtract_withextension(leftp1,leftp2,/*minus*/rightp1,rightp2,
+ /*with*/extent,/*into*/resultp1,resultp2);
+ if(Dbl_iszero_hidden(resultp1))
+ {
+ /* Handle normalization */
+ /* A straight forward algorithm would now shift the result
+ * and extension left until the hidden bit becomes one. Not
+ * all of the extension bits need participate in the shift.
+ * Only the two most significant bits (round and guard) are
+ * needed. If only a single shift is needed then the guard
+ * bit becomes a significant low order bit and the extension
+ * must participate in the rounding. If more than a single
+ * shift is needed, then all bits to the right of the guard
+ * bit are zeros, and the guard bit may or may not be zero. */
+ sign_save = Dbl_signextendedsign(resultp1);
+ Dbl_leftshiftby1_withextent(resultp1,resultp2,extent,resultp1,resultp2);
+
+ /* Need to check for a zero result. The sign and exponent
+ * fields have already been zeroed. The more efficient test
+ * of the full object can be used.
+ */
+ if(Dbl_iszero(resultp1,resultp2))
+ /* Must have been "x-x" or "x+(-x)". */
+ {
+ if(Is_rounding_mode(ROUNDMINUS)) Dbl_setone_sign(resultp1);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ result_exponent--;
+ /* Look to see if normalization is finished. */
+ if(Dbl_isone_hidden(resultp1))
+ {
+ if(result_exponent==0)
+ {
+ /* Denormalized, exponent should be zero. Left operand *
+ * was normalized, so extent (guard, round) was zero */
+ goto underflow;
+ }
+ else
+ {
+ /* No further normalization is needed. */
+ Dbl_set_sign(resultp1,/*using*/sign_save);
+ Ext_leftshiftby1(extent);
+ goto round;
+ }
+ }
+
+ /* Check for denormalized, exponent should be zero. Left *
+ * operand was normalized, so extent (guard, round) was zero */
+ if(!(underflowtrap = Is_underflowtrap_enabled()) &&
+ result_exponent==0) goto underflow;
+
+ /* Shift extension to complete one bit of normalization and
+ * update exponent. */
+ Ext_leftshiftby1(extent);
+
+ /* Discover first one bit to determine shift amount. Use a
+ * modified binary search. We have already shifted the result
+ * one position right and still not found a one so the remainder
+ * of the extension must be zero and simplifies rounding. */
+ /* Scan bytes */
+ while(Dbl_iszero_hiddenhigh7mantissa(resultp1))
+ {
+ Dbl_leftshiftby8(resultp1,resultp2);
+ if((result_exponent -= 8) <= 0 && !underflowtrap)
+ goto underflow;
+ }
+ /* Now narrow it down to the nibble */
+ if(Dbl_iszero_hiddenhigh3mantissa(resultp1))
+ {
+ /* The lower nibble contains the normalizing one */
+ Dbl_leftshiftby4(resultp1,resultp2);
+ if((result_exponent -= 4) <= 0 && !underflowtrap)
+ goto underflow;
+ }
+ /* Select case were first bit is set (already normalized)
+ * otherwise select the proper shift. */
+ if((jumpsize = Dbl_hiddenhigh3mantissa(resultp1)) > 7)
+ {
+ /* Already normalized */
+ if(result_exponent <= 0) goto underflow;
+ Dbl_set_sign(resultp1,/*using*/sign_save);
+ Dbl_set_exponent(resultp1,/*using*/result_exponent);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ Dbl_sethigh4bits(resultp1,/*using*/sign_save);
+ switch(jumpsize)
+ {
+ case 1:
+ {
+ Dbl_leftshiftby3(resultp1,resultp2);
+ result_exponent -= 3;
+ break;
+ }
+ case 2:
+ case 3:
+ {
+ Dbl_leftshiftby2(resultp1,resultp2);
+ result_exponent -= 2;
+ break;
+ }
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ {
+ Dbl_leftshiftby1(resultp1,resultp2);
+ result_exponent -= 1;
+ break;
+ }
+ }
+ if(result_exponent > 0)
+ {
+ Dbl_set_exponent(resultp1,/*using*/result_exponent);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION); /* Sign bit is already set */
+ }
+ /* Fixup potential underflows */
+ underflow:
+ if(Is_underflowtrap_enabled())
+ {
+ Dbl_set_sign(resultp1,sign_save);
+ Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ /* inexact = FALSE */
+ return(UNDERFLOWEXCEPTION);
+ }
+ /*
+ * Since we cannot get an inexact denormalized result,
+ * we can now return.
+ */
+ Dbl_fix_overshift(resultp1,resultp2,(1-result_exponent),extent);
+ Dbl_clear_signexponent(resultp1);
+ Dbl_set_sign(resultp1,sign_save);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ } /* end if(hidden...)... */
+ /* Fall through and round */
+ } /* end if(save >= 0)... */
+ else
+ {
+ /* Subtract magnitudes */
+ Dbl_addition(leftp1,leftp2,rightp1,rightp2,/*to*/resultp1,resultp2);
+ if(Dbl_isone_hiddenoverflow(resultp1))
+ {
+ /* Prenormalization required. */
+ Dbl_rightshiftby1_withextent(resultp2,extent,extent);
+ Dbl_arithrightshiftby1(resultp1,resultp2);
+ result_exponent++;
+ } /* end if hiddenoverflow... */
+ } /* end else ...subtract magnitudes... */
+
+ /* Round the result. If the extension is all zeros,then the result is
+ * exact. Otherwise round in the correct direction. No underflow is
+ * possible. If a postnormalization is necessary, then the mantissa is
+ * all zeros so no shift is needed. */
+ round:
+ if(Ext_isnotzero(extent))
+ {
+ inexact = TRUE;
+ switch(Rounding_mode())
+ {
+ case ROUNDNEAREST: /* The default. */
+ if(Ext_isone_sign(extent))
+ {
+ /* at least 1/2 ulp */
+ if(Ext_isnotzero_lower(extent) ||
+ Dbl_isone_lowmantissap2(resultp2))
+ {
+ /* either exactly half way and odd or more than 1/2ulp */
+ Dbl_increment(resultp1,resultp2);
+ }
+ }
+ break;
+
+ case ROUNDPLUS:
+ if(Dbl_iszero_sign(resultp1))
+ {
+ /* Round up positive results */
+ Dbl_increment(resultp1,resultp2);
+ }
+ break;
+
+ case ROUNDMINUS:
+ if(Dbl_isone_sign(resultp1))
+ {
+ /* Round down negative results */
+ Dbl_increment(resultp1,resultp2);
+ }
+
+ case ROUNDZERO:;
+ /* truncate is simple */
+ } /* end switch... */
+ if(Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
+ }
+ if(result_exponent == DBL_INFINITY_EXPONENT)
+ {
+ /* Overflow */
+ if(Is_overflowtrap_enabled())
+ {
+ Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(OVERFLOWEXCEPTION);
+ }
+ else
+ {
+ inexact = TRUE;
+ Set_overflowflag();
+ Dbl_setoverflow(resultp1,resultp2);
+ }
+ }
+ else Dbl_set_exponent(resultp1,result_exponent);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if(inexact)
+ if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(NOEXCEPTION);
+ }
diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c
new file mode 100644
index 00000000..09ef4136
--- /dev/null
+++ b/arch/parisc/math-emu/driver.c
@@ -0,0 +1,128 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * linux/arch/math-emu/driver.c.c
+ *
+ * decodes and dispatches unimplemented FPU instructions
+ *
+ * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 2001 Hewlett-Packard <bame@debian.org>
+ */
+
+#include <linux/sched.h>
+#include "float.h"
+#include "math-emu.h"
+
+
+#define fptpos 31
+#define fpr1pos 10
+#define extru(r,pos,len) (((r) >> (31-(pos))) & (( 1 << (len)) - 1))
+
+#define FPUDEBUG 0
+
+/* Format of the floating-point exception registers. */
+struct exc_reg {
+ unsigned int exception : 6;
+ unsigned int ei : 26;
+};
+
+/* Macros for grabbing bits of the instruction format from the 'ei'
+ field above. */
+/* Major opcode 0c and 0e */
+#define FP0CE_UID(i) (((i) >> 6) & 3)
+#define FP0CE_CLASS(i) (((i) >> 9) & 3)
+#define FP0CE_SUBOP(i) (((i) >> 13) & 7)
+#define FP0CE_SUBOP1(i) (((i) >> 15) & 7) /* Class 1 subopcode */
+#define FP0C_FORMAT(i) (((i) >> 11) & 3)
+#define FP0E_FORMAT(i) (((i) >> 11) & 1)
+
+/* Major opcode 0c, uid 2 (performance monitoring) */
+#define FPPM_SUBOP(i) (((i) >> 9) & 0x1f)
+
+/* Major opcode 2e (fused operations). */
+#define FP2E_SUBOP(i) (((i) >> 5) & 1)
+#define FP2E_FORMAT(i) (((i) >> 11) & 1)
+
+/* Major opcode 26 (FMPYSUB) */
+/* Major opcode 06 (FMPYADD) */
+#define FPx6_FORMAT(i) ((i) & 0x1f)
+
+/* Flags and enable bits of the status word. */
+#define FPSW_FLAGS(w) ((w) >> 27)
+#define FPSW_ENABLE(w) ((w) & 0x1f)
+#define FPSW_V (1<<4)
+#define FPSW_Z (1<<3)
+#define FPSW_O (1<<2)
+#define FPSW_U (1<<1)
+#define FPSW_I (1<<0)
+
+/* Handle a floating point exception. Return zero if the faulting
+ instruction can be completed successfully. */
+int
+handle_fpe(struct pt_regs *regs)
+{
+ extern void printbinary(unsigned long x, int nbits);
+ struct siginfo si;
+ unsigned int orig_sw, sw;
+ int signalcode;
+ /* need an intermediate copy of float regs because FPU emulation
+ * code expects an artificial last entry which contains zero
+ *
+ * also, the passed in fr registers contain one word that defines
+ * the fpu type. the fpu type information is constructed
+ * inside the emulation code
+ */
+ __u64 frcopy[36];
+
+ memcpy(frcopy, regs->fr, sizeof regs->fr);
+ frcopy[32] = 0;
+
+ memcpy(&orig_sw, frcopy, sizeof(orig_sw));
+
+ if (FPUDEBUG) {
+ printk(KERN_DEBUG "FP VZOUICxxxxCQCQCQCQCQCRMxxTDVZOUI ->\n ");
+ printbinary(orig_sw, 32);
+ printk(KERN_DEBUG "\n");
+ }
+
+ signalcode = decode_fpu(frcopy, 0x666);
+
+ /* Status word = FR0L. */
+ memcpy(&sw, frcopy, sizeof(sw));
+ if (FPUDEBUG) {
+ printk(KERN_DEBUG "VZOUICxxxxCQCQCQCQCQCRMxxTDVZOUI decode_fpu returns %d|0x%x\n",
+ signalcode >> 24, signalcode & 0xffffff);
+ printbinary(sw, 32);
+ printk(KERN_DEBUG "\n");
+ }
+
+ memcpy(regs->fr, frcopy, sizeof regs->fr);
+ if (signalcode != 0) {
+ si.si_signo = signalcode >> 24;
+ si.si_errno = 0;
+ si.si_code = signalcode & 0xffffff;
+ si.si_addr = (void __user *) regs->iaoq[0];
+ force_sig_info(si.si_signo, &si, current);
+ return -1;
+ }
+
+ return signalcode ? -1 : 0;
+}
diff --git a/arch/parisc/math-emu/fcnvff.c b/arch/parisc/math-emu/fcnvff.c
new file mode 100644
index 00000000..76c063f7
--- /dev/null
+++ b/arch/parisc/math-emu/fcnvff.c
@@ -0,0 +1,309 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/fcnvff.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Single Floating-point to Double Floating-point
+ * Double Floating-point to Single Floating-point
+ *
+ * External Interfaces:
+ * dbl_to_sgl_fcnvff(srcptr,nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvff(srcptr,nullptr,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/*
+ * Single Floating-point to Double Floating-point
+ */
+/*ARGSUSED*/
+int
+sgl_to_dbl_fcnvff(
+ sgl_floating_point *srcptr,
+ unsigned int *nullptr,
+ dbl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register unsigned int src, resultp1, resultp2;
+ register int src_exponent;
+
+ src = *srcptr;
+ src_exponent = Sgl_exponent(src);
+ Dbl_allp1(resultp1) = Sgl_all(src); /* set sign of result */
+ /*
+ * Test for NaN or infinity
+ */
+ if (src_exponent == SGL_INFINITY_EXPONENT) {
+ /*
+ * determine if NaN or infinity
+ */
+ if (Sgl_iszero_mantissa(src)) {
+ /*
+ * is infinity; want to return double infinity
+ */
+ Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(src)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ else {
+ Set_invalidflag();
+ Sgl_set_quiet(src);
+ }
+ }
+ /*
+ * NaN is quiet, return as double NaN
+ */
+ Dbl_setinfinity_exponent(resultp1);
+ Sgl_to_dbl_mantissa(src,resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * Test for zero or denormalized
+ */
+ if (src_exponent == 0) {
+ /*
+ * determine if zero or denormalized
+ */
+ if (Sgl_isnotzero_mantissa(src)) {
+ /*
+ * is denormalized; want to normalize
+ */
+ Sgl_clear_signexponent(src);
+ Sgl_leftshiftby1(src);
+ Sgl_normalize(src,src_exponent);
+ Sgl_to_dbl_exponent(src_exponent,resultp1);
+ Sgl_to_dbl_mantissa(src,resultp1,resultp2);
+ }
+ else {
+ Dbl_setzero_exponentmantissa(resultp1,resultp2);
+ }
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * No special cases, just complete the conversion
+ */
+ Sgl_to_dbl_exponent(src_exponent, resultp1);
+ Sgl_to_dbl_mantissa(Sgl_mantissa(src), resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Floating-point to Single Floating-point
+ */
+/*ARGSUSED*/
+int
+dbl_to_sgl_fcnvff(
+ dbl_floating_point *srcptr,
+ unsigned int *nullptr,
+ sgl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register unsigned int srcp1, srcp2, result;
+ register int src_exponent, dest_exponent, dest_mantissa;
+ register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
+ register boolean lsb_odd = FALSE;
+ boolean is_tiny;
+
+ Dbl_copyfromptr(srcptr,srcp1,srcp2);
+ src_exponent = Dbl_exponent(srcp1);
+ Sgl_all(result) = Dbl_allp1(srcp1); /* set sign of result */
+ /*
+ * Test for NaN or infinity
+ */
+ if (src_exponent == DBL_INFINITY_EXPONENT) {
+ /*
+ * determine if NaN or infinity
+ */
+ if (Dbl_iszero_mantissa(srcp1,srcp2)) {
+ /*
+ * is infinity; want to return single infinity
+ */
+ Sgl_setinfinity_exponentmantissa(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(srcp1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ else {
+ Set_invalidflag();
+ /* make NaN quiet */
+ Dbl_set_quiet(srcp1);
+ }
+ }
+ /*
+ * NaN is quiet, return as single NaN
+ */
+ Sgl_setinfinity_exponent(result);
+ Sgl_set_mantissa(result,Dallp1(srcp1)<<3 | Dallp2(srcp2)>>29);
+ if (Sgl_iszero_mantissa(result)) Sgl_set_quiet(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate result
+ */
+ Dbl_to_sgl_exponent(src_exponent,dest_exponent);
+ if (dest_exponent > 0) {
+ Dbl_to_sgl_mantissa(srcp1,srcp2,dest_mantissa,inexact,guardbit,
+ stickybit,lsb_odd);
+ }
+ else {
+ if (Dbl_iszero_exponentmantissa(srcp1,srcp2)){
+ Sgl_setzero_exponentmantissa(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ if (Is_underflowtrap_enabled()) {
+ Dbl_to_sgl_mantissa(srcp1,srcp2,dest_mantissa,inexact,
+ guardbit,stickybit,lsb_odd);
+ }
+ else {
+ /* compute result, determine inexact info,
+ * and set Underflowflag if appropriate
+ */
+ Dbl_to_sgl_denormalized(srcp1,srcp2,dest_exponent,
+ dest_mantissa,inexact,guardbit,stickybit,lsb_odd,
+ is_tiny);
+ }
+ }
+ /*
+ * Now round result if not exact
+ */
+ if (inexact) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(result)) dest_mantissa++;
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(result)) dest_mantissa++;
+ break;
+ case ROUNDNEAREST:
+ if (guardbit) {
+ if (stickybit || lsb_odd) dest_mantissa++;
+ }
+ }
+ }
+ Sgl_set_exponentmantissa(result,dest_mantissa);
+
+ /*
+ * check for mantissa overflow after rounding
+ */
+ if ((dest_exponent>0 || Is_underflowtrap_enabled()) &&
+ Sgl_isone_hidden(result)) dest_exponent++;
+
+ /*
+ * Test for overflow
+ */
+ if (dest_exponent >= SGL_INFINITY_EXPONENT) {
+ /* trap if OVERFLOWTRAP enabled */
+ if (Is_overflowtrap_enabled()) {
+ /*
+ * Check for gross overflow
+ */
+ if (dest_exponent >= SGL_INFINITY_EXPONENT+SGL_WRAP)
+ return(UNIMPLEMENTEDEXCEPTION);
+
+ /*
+ * Adjust bias of result
+ */
+ Sgl_setwrapped_exponent(result,dest_exponent,ovfl);
+ *dstptr = result;
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return(OVERFLOWEXCEPTION|INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(OVERFLOWEXCEPTION);
+ }
+ Set_overflowflag();
+ inexact = TRUE;
+ /* set result to infinity or largest number */
+ Sgl_setoverflow(result);
+ }
+ /*
+ * Test for underflow
+ */
+ else if (dest_exponent <= 0) {
+ /* trap if UNDERFLOWTRAP enabled */
+ if (Is_underflowtrap_enabled()) {
+ /*
+ * Check for gross underflow
+ */
+ if (dest_exponent <= -(SGL_WRAP))
+ return(UNIMPLEMENTEDEXCEPTION);
+ /*
+ * Adjust bias of result
+ */
+ Sgl_setwrapped_exponent(result,dest_exponent,unfl);
+ *dstptr = result;
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return(UNDERFLOWEXCEPTION|INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(UNDERFLOWEXCEPTION);
+ }
+ /*
+ * result is denormalized or signed zero
+ */
+ if (inexact && is_tiny) Set_underflowflag();
+
+ }
+ else Sgl_set_exponent(result,dest_exponent);
+ *dstptr = result;
+ /*
+ * Trap if inexact trap is enabled
+ */
+ if (inexact)
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/fcnvfu.c b/arch/parisc/math-emu/fcnvfu.c
new file mode 100644
index 00000000..7e856553
--- /dev/null
+++ b/arch/parisc/math-emu/fcnvfu.c
@@ -0,0 +1,536 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/fcnvfu.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Floating-point to Unsigned Fixed-point Converts
+ *
+ * External Interfaces:
+ * dbl_to_dbl_fcnvfu(srcptr,nullptr,dstptr,status)
+ * dbl_to_sgl_fcnvfu(srcptr,nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvfu(srcptr,nullptr,dstptr,status)
+ * sgl_to_sgl_fcnvfu(srcptr,nullptr,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/************************************************************************
+ * Floating-point to Unsigned Fixed-point Converts *
+ ************************************************************************/
+
+/*
+ * Single Floating-point to Single Unsigned Fixed
+ */
+/*ARGSUSED*/
+int
+sgl_to_sgl_fcnvfu(
+ sgl_floating_point *srcptr,
+ unsigned int *nullptr,
+ unsigned int *dstptr,
+ unsigned int *status)
+{
+ register unsigned int src, result;
+ register int src_exponent;
+ register boolean inexact = FALSE;
+
+ src = *srcptr;
+ src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > SGL_FX_MAX_EXP + 1) {
+ if (Sgl_isone_sign(src)) {
+ result = 0;
+ } else {
+ result = 0xffffffff;
+ }
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ /*
+ * Check sign.
+ * If negative, trap unimplemented.
+ */
+ if (Sgl_isone_sign(src)) {
+ result = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ Sgl_clear_signexponent_set_hidden(src);
+ Suint_from_sgl_mantissa(src,src_exponent,result);
+
+ /* check for inexact */
+ if (Sgl_isinexact_to_unsigned(src,src_exponent)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ result++;
+ break;
+ case ROUNDMINUS: /* never negative */
+ break;
+ case ROUNDNEAREST:
+ if (Sgl_isone_roundbit(src,src_exponent) &&
+ (Sgl_isone_stickybit(src,src_exponent) ||
+ (result & 1))) {
+ result++;
+ }
+ break;
+ }
+ }
+ } else {
+ result = 0;
+
+ /* check for inexact */
+ if (Sgl_isnotzero_exponentmantissa(src)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(src)) {
+ result++;
+ }
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(src)) {
+ result = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ inexact = FALSE;
+ }
+ break;
+ case ROUNDNEAREST:
+ if (src_exponent == -1 &&
+ Sgl_isnotzero_mantissa(src)) {
+ if (Sgl_isone_sign(src)) {
+ result = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ inexact = FALSE;
+ }
+ else result++;
+ }
+ break;
+ }
+ }
+ }
+ *dstptr = result;
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
+
+/*
+ * Single Floating-point to Double Unsigned Fixed
+ */
+/*ARGSUSED*/
+int
+sgl_to_dbl_fcnvfu(
+ sgl_floating_point *srcptr,
+ unsigned int *nullptr,
+ dbl_unsigned *dstptr,
+ unsigned int *status)
+{
+ register int src_exponent;
+ register unsigned int src, resultp1, resultp2;
+ register boolean inexact = FALSE;
+
+ src = *srcptr;
+ src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > DBL_FX_MAX_EXP + 1) {
+ if (Sgl_isone_sign(src)) {
+ resultp1 = resultp2 = 0;
+ } else {
+ resultp1 = resultp2 = 0xffffffff;
+ }
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ /*
+ * Check sign.
+ * If negative, trap unimplemented.
+ */
+ if (Sgl_isone_sign(src)) {
+ resultp1 = resultp2 = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ Sgl_clear_signexponent_set_hidden(src);
+ Duint_from_sgl_mantissa(src,src_exponent,resultp1,resultp2);
+
+ /* check for inexact */
+ if (Sgl_isinexact_to_unsigned(src,src_exponent)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ Duint_increment(resultp1,resultp2);
+ break;
+ case ROUNDMINUS: /* never negative */
+ break;
+ case ROUNDNEAREST:
+ if (Sgl_isone_roundbit(src,src_exponent) &&
+ (Sgl_isone_stickybit(src,src_exponent) ||
+ Duint_isone_lowp2(resultp2))) {
+ Duint_increment(resultp1,resultp2);
+ }
+ break;
+ }
+ }
+ } else {
+ Duint_setzero(resultp1,resultp2);
+
+ /* check for inexact */
+ if (Sgl_isnotzero_exponentmantissa(src)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(src)) {
+ Duint_increment(resultp1,resultp2);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(src)) {
+ resultp1 = resultp2 = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ inexact = FALSE;
+ }
+ break;
+ case ROUNDNEAREST:
+ if (src_exponent == -1 &&
+ Sgl_isnotzero_mantissa(src)) {
+ if (Sgl_isone_sign(src)) {
+ resultp1 = 0;
+ resultp2 = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ inexact = FALSE;
+ }
+ else Duint_increment(resultp1,resultp2);
+ }
+ }
+ }
+ }
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Floating-point to Single Unsigned Fixed
+ */
+/*ARGSUSED*/
+int
+dbl_to_sgl_fcnvfu (dbl_floating_point * srcptr, unsigned int *nullptr,
+ unsigned int *dstptr, unsigned int *status)
+{
+ register unsigned int srcp1, srcp2, result;
+ register int src_exponent;
+ register boolean inexact = FALSE;
+
+ Dbl_copyfromptr(srcptr,srcp1,srcp2);
+ src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > SGL_FX_MAX_EXP + 1) {
+ if (Dbl_isone_sign(srcp1)) {
+ result = 0;
+ } else {
+ result = 0xffffffff;
+ }
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ /*
+ * Check sign.
+ * If negative, trap unimplemented.
+ */
+ if (Dbl_isone_sign(srcp1)) {
+ result = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ Dbl_clear_signexponent_set_hidden(srcp1);
+ Suint_from_dbl_mantissa(srcp1,srcp2,src_exponent,result);
+
+ /* check for inexact */
+ if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ result++;
+ break;
+ case ROUNDMINUS: /* never negative */
+ break;
+ case ROUNDNEAREST:
+ if(Dbl_isone_roundbit(srcp1,srcp2,src_exponent) &&
+ (Dbl_isone_stickybit(srcp1,srcp2,src_exponent)||
+ result&1))
+ result++;
+ break;
+ }
+ /* check for overflow */
+ if (result == 0) {
+ result = 0xffffffff;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ } else {
+ result = 0;
+
+ /* check for inexact */
+ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(srcp1)) result++;
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(srcp1)) {
+ result = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ inexact = FALSE;
+ }
+ break;
+ case ROUNDNEAREST:
+ if (src_exponent == -1 &&
+ Dbl_isnotzero_mantissa(srcp1,srcp2))
+ if (Dbl_isone_sign(srcp1)) {
+ result = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ inexact = FALSE;
+ }
+ else result++;
+ }
+ }
+ }
+ *dstptr = result;
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Floating-point to Double Unsigned Fixed
+ */
+/*ARGSUSED*/
+int
+dbl_to_dbl_fcnvfu (dbl_floating_point * srcptr, unsigned int *nullptr,
+ dbl_unsigned * dstptr, unsigned int *status)
+{
+ register int src_exponent;
+ register unsigned int srcp1, srcp2, resultp1, resultp2;
+ register boolean inexact = FALSE;
+
+ Dbl_copyfromptr(srcptr,srcp1,srcp2);
+ src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > DBL_FX_MAX_EXP + 1) {
+ if (Dbl_isone_sign(srcp1)) {
+ resultp1 = resultp2 = 0;
+ } else {
+ resultp1 = resultp2 = 0xffffffff;
+ }
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ /*
+ * Check sign.
+ * If negative, trap unimplemented.
+ */
+ if (Dbl_isone_sign(srcp1)) {
+ resultp1 = resultp2 = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ Dbl_clear_signexponent_set_hidden(srcp1);
+ Duint_from_dbl_mantissa(srcp1,srcp2,src_exponent,resultp1,
+ resultp2);
+
+ /* check for inexact */
+ if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ Duint_increment(resultp1,resultp2);
+ break;
+ case ROUNDMINUS: /* never negative */
+ break;
+ case ROUNDNEAREST:
+ if(Dbl_isone_roundbit(srcp1,srcp2,src_exponent))
+ if(Dbl_isone_stickybit(srcp1,srcp2,src_exponent) ||
+ Duint_isone_lowp2(resultp2))
+ Duint_increment(resultp1,resultp2);
+ }
+ }
+ } else {
+ Duint_setzero(resultp1,resultp2);
+
+ /* check for inexact */
+ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(srcp1)) {
+ Duint_increment(resultp1,resultp2);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(srcp1)) {
+ resultp1 = resultp2 = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ inexact = FALSE;
+ }
+ break;
+ case ROUNDNEAREST:
+ if (src_exponent == -1 &&
+ Dbl_isnotzero_mantissa(srcp1,srcp2))
+ if (Dbl_iszero_sign(srcp1)) {
+ Duint_increment(resultp1,resultp2);
+ } else {
+ resultp1 = 0;
+ resultp2 = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ inexact = FALSE;
+ }
+ }
+ }
+ }
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
+
diff --git a/arch/parisc/math-emu/fcnvfut.c b/arch/parisc/math-emu/fcnvfut.c
new file mode 100644
index 00000000..4176a44e
--- /dev/null
+++ b/arch/parisc/math-emu/fcnvfut.c
@@ -0,0 +1,332 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/fcnvfut.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Floating-point to Unsigned Fixed-point Converts with Truncation
+ *
+ * External Interfaces:
+ * dbl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status)
+ * dbl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status)
+ * sgl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/************************************************************************
+ * Floating-point to Unsigned Fixed-point Converts with Truncation *
+ ************************************************************************/
+
+/*
+ * Convert single floating-point to single fixed-point format
+ * with truncated result
+ */
+/*ARGSUSED*/
+int
+sgl_to_sgl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr,
+ unsigned int *dstptr, unsigned int *status)
+{
+ register unsigned int src, result;
+ register int src_exponent;
+
+ src = *srcptr;
+ src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > SGL_FX_MAX_EXP + 1) {
+ if (Sgl_isone_sign(src)) {
+ result = 0;
+ } else {
+ result = 0xffffffff;
+ }
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ /*
+ * Check sign.
+ * If negative, trap unimplemented.
+ */
+ if (Sgl_isone_sign(src)) {
+ result = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ Sgl_clear_signexponent_set_hidden(src);
+ Suint_from_sgl_mantissa(src,src_exponent,result);
+ *dstptr = result;
+
+ /* check for inexact */
+ if (Sgl_isinexact_to_unsigned(src,src_exponent)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ else {
+ *dstptr = 0;
+
+ /* check for inexact */
+ if (Sgl_isnotzero_exponentmantissa(src)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ return(NOEXCEPTION);
+}
+
+/*
+ * Single Floating-point to Double Unsigned Fixed
+ */
+/*ARGSUSED*/
+int
+sgl_to_dbl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr,
+ dbl_unsigned * dstptr, unsigned int *status)
+{
+ register int src_exponent;
+ register unsigned int src, resultp1, resultp2;
+
+ src = *srcptr;
+ src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > DBL_FX_MAX_EXP + 1) {
+ if (Sgl_isone_sign(src)) {
+ resultp1 = resultp2 = 0;
+ } else {
+ resultp1 = resultp2 = 0xffffffff;
+ }
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ /*
+ * Check sign.
+ * If negative, trap unimplemented.
+ */
+ if (Sgl_isone_sign(src)) {
+ resultp1 = resultp2 = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ Sgl_clear_signexponent_set_hidden(src);
+ Duint_from_sgl_mantissa(src,src_exponent,resultp1,resultp2);
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+
+ /* check for inexact */
+ if (Sgl_isinexact_to_unsigned(src,src_exponent)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ else {
+ Duint_setzero(resultp1,resultp2);
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+
+ /* check for inexact */
+ if (Sgl_isnotzero_exponentmantissa(src)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Floating-point to Single Unsigned Fixed
+ */
+/*ARGSUSED*/
+int
+dbl_to_sgl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr,
+ unsigned int *dstptr, unsigned int *status)
+{
+ register unsigned int srcp1, srcp2, result;
+ register int src_exponent;
+
+ Dbl_copyfromptr(srcptr,srcp1,srcp2);
+ src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > SGL_FX_MAX_EXP + 1) {
+ if (Dbl_isone_sign(srcp1)) {
+ result = 0;
+ } else {
+ result = 0xffffffff;
+ }
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ /*
+ * Check sign.
+ * If negative, trap unimplemented.
+ */
+ if (Dbl_isone_sign(srcp1)) {
+ result = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ Dbl_clear_signexponent_set_hidden(srcp1);
+ Suint_from_dbl_mantissa(srcp1,srcp2,src_exponent,result);
+ *dstptr = result;
+
+ /* check for inexact */
+ if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ else {
+ *dstptr = 0;
+
+ /* check for inexact */
+ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Floating-point to Double Unsigned Fixed
+ */
+/*ARGSUSED*/
+int
+dbl_to_dbl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr,
+ dbl_unsigned * dstptr, unsigned int *status)
+{
+ register int src_exponent;
+ register unsigned int srcp1, srcp2, resultp1, resultp2;
+
+ Dbl_copyfromptr(srcptr,srcp1,srcp2);
+ src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > DBL_FX_MAX_EXP + 1) {
+ if (Dbl_isone_sign(srcp1)) {
+ resultp1 = resultp2 = 0;
+ } else {
+ resultp1 = resultp2 = 0xffffffff;
+ }
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ /*
+ * Check sign.
+ * If negative, trap unimplemented.
+ */
+ if (Dbl_isone_sign(srcp1)) {
+ resultp1 = resultp2 = 0;
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ Dbl_clear_signexponent_set_hidden(srcp1);
+ Duint_from_dbl_mantissa(srcp1,srcp2,src_exponent,
+ resultp1,resultp2);
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+
+ /* check for inexact */
+ if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ else {
+ Duint_setzero(resultp1,resultp2);
+ Duint_copytoptr(resultp1,resultp2,dstptr);
+
+ /* check for inexact */
+ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/fcnvfx.c b/arch/parisc/math-emu/fcnvfx.c
new file mode 100644
index 00000000..d6475bdd
--- /dev/null
+++ b/arch/parisc/math-emu/fcnvfx.c
@@ -0,0 +1,501 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/fcnvfx.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Single Floating-point to Single Fixed-point
+ * Single Floating-point to Double Fixed-point
+ * Double Floating-point to Single Fixed-point
+ * Double Floating-point to Double Fixed-point
+ *
+ * External Interfaces:
+ * dbl_to_dbl_fcnvfx(srcptr,nullptr,dstptr,status)
+ * dbl_to_sgl_fcnvfx(srcptr,nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvfx(srcptr,nullptr,dstptr,status)
+ * sgl_to_sgl_fcnvfx(srcptr,nullptr,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/*
+ * Single Floating-point to Single Fixed-point
+ */
+/*ARGSUSED*/
+int
+sgl_to_sgl_fcnvfx(
+ sgl_floating_point *srcptr,
+ sgl_floating_point *nullptr,
+ int *dstptr,
+ sgl_floating_point *status)
+{
+ register unsigned int src, temp;
+ register int src_exponent, result;
+ register boolean inexact = FALSE;
+
+ src = *srcptr;
+ src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > SGL_FX_MAX_EXP) {
+ /* check for MININT */
+ if ((src_exponent > SGL_FX_MAX_EXP + 1) ||
+ Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
+ if (Sgl_iszero_sign(src)) result = 0x7fffffff;
+ else result = 0x80000000;
+
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ temp = src;
+ Sgl_clear_signexponent_set_hidden(temp);
+ Int_from_sgl_mantissa(temp,src_exponent);
+ if (Sgl_isone_sign(src)) result = -Sgl_all(temp);
+ else result = Sgl_all(temp);
+
+ /* check for inexact */
+ if (Sgl_isinexact_to_fix(src,src_exponent)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(src)) result++;
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(src)) result--;
+ break;
+ case ROUNDNEAREST:
+ if (Sgl_isone_roundbit(src,src_exponent)) {
+ if (Sgl_isone_stickybit(src,src_exponent)
+ || (Sgl_isone_lowmantissa(temp)))
+ if (Sgl_iszero_sign(src)) result++;
+ else result--;
+ }
+ }
+ }
+ }
+ else {
+ result = 0;
+
+ /* check for inexact */
+ if (Sgl_isnotzero_exponentmantissa(src)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(src)) result++;
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(src)) result--;
+ break;
+ case ROUNDNEAREST:
+ if (src_exponent == -1)
+ if (Sgl_isnotzero_mantissa(src))
+ if (Sgl_iszero_sign(src)) result++;
+ else result--;
+ }
+ }
+ }
+ *dstptr = result;
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
+
+/*
+ * Single Floating-point to Double Fixed-point
+ */
+/*ARGSUSED*/
+int
+sgl_to_dbl_fcnvfx(
+ sgl_floating_point *srcptr,
+ unsigned int *nullptr,
+ dbl_integer *dstptr,
+ unsigned int *status)
+{
+ register int src_exponent, resultp1;
+ register unsigned int src, temp, resultp2;
+ register boolean inexact = FALSE;
+
+ src = *srcptr;
+ src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > DBL_FX_MAX_EXP) {
+ /* check for MININT */
+ if ((src_exponent > DBL_FX_MAX_EXP + 1) ||
+ Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
+ if (Sgl_iszero_sign(src)) {
+ resultp1 = 0x7fffffff;
+ resultp2 = 0xffffffff;
+ }
+ else {
+ resultp1 = 0x80000000;
+ resultp2 = 0;
+ }
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ Dint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ Dint_set_minint(resultp1,resultp2);
+ Dint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ temp = src;
+ Sgl_clear_signexponent_set_hidden(temp);
+ Dint_from_sgl_mantissa(temp,src_exponent,resultp1,resultp2);
+ if (Sgl_isone_sign(src)) {
+ Dint_setone_sign(resultp1,resultp2);
+ }
+
+ /* check for inexact */
+ if (Sgl_isinexact_to_fix(src,src_exponent)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(src)) {
+ Dint_increment(resultp1,resultp2);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(src)) {
+ Dint_decrement(resultp1,resultp2);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (Sgl_isone_roundbit(src,src_exponent))
+ if (Sgl_isone_stickybit(src,src_exponent) ||
+ (Dint_isone_lowp2(resultp2)))
+ if (Sgl_iszero_sign(src)) {
+ Dint_increment(resultp1,resultp2);
+ }
+ else {
+ Dint_decrement(resultp1,resultp2);
+ }
+ }
+ }
+ }
+ else {
+ Dint_setzero(resultp1,resultp2);
+
+ /* check for inexact */
+ if (Sgl_isnotzero_exponentmantissa(src)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(src)) {
+ Dint_increment(resultp1,resultp2);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(src)) {
+ Dint_decrement(resultp1,resultp2);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (src_exponent == -1)
+ if (Sgl_isnotzero_mantissa(src))
+ if (Sgl_iszero_sign(src)) {
+ Dint_increment(resultp1,resultp2);
+ }
+ else {
+ Dint_decrement(resultp1,resultp2);
+ }
+ }
+ }
+ }
+ Dint_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Floating-point to Single Fixed-point
+ */
+/*ARGSUSED*/
+int
+dbl_to_sgl_fcnvfx(
+ dbl_floating_point *srcptr,
+ unsigned int *nullptr,
+ int *dstptr,
+ unsigned int *status)
+{
+ register unsigned int srcp1,srcp2, tempp1,tempp2;
+ register int src_exponent, result;
+ register boolean inexact = FALSE;
+
+ Dbl_copyfromptr(srcptr,srcp1,srcp2);
+ src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > SGL_FX_MAX_EXP) {
+ /* check for MININT */
+ if (Dbl_isoverflow_to_int(src_exponent,srcp1,srcp2)) {
+ if (Dbl_iszero_sign(srcp1)) result = 0x7fffffff;
+ else result = 0x80000000;
+
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ tempp1 = srcp1;
+ tempp2 = srcp2;
+ Dbl_clear_signexponent_set_hidden(tempp1);
+ Int_from_dbl_mantissa(tempp1,tempp2,src_exponent);
+ if (Dbl_isone_sign(srcp1) && (src_exponent <= SGL_FX_MAX_EXP))
+ result = -Dbl_allp1(tempp1);
+ else result = Dbl_allp1(tempp1);
+
+ /* check for inexact */
+ if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(srcp1)) result++;
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(srcp1)) result--;
+ break;
+ case ROUNDNEAREST:
+ if (Dbl_isone_roundbit(srcp1,srcp2,src_exponent))
+ if (Dbl_isone_stickybit(srcp1,srcp2,src_exponent) ||
+ (Dbl_isone_lowmantissap1(tempp1)))
+ if (Dbl_iszero_sign(srcp1)) result++;
+ else result--;
+ }
+ /* check for overflow */
+ if ((Dbl_iszero_sign(srcp1) && result < 0) ||
+ (Dbl_isone_sign(srcp1) && result > 0)) {
+
+ if (Dbl_iszero_sign(srcp1)) result = 0x7fffffff;
+ else result = 0x80000000;
+
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ }
+ else {
+ result = 0;
+
+ /* check for inexact */
+ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(srcp1)) result++;
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(srcp1)) result--;
+ break;
+ case ROUNDNEAREST:
+ if (src_exponent == -1)
+ if (Dbl_isnotzero_mantissa(srcp1,srcp2))
+ if (Dbl_iszero_sign(srcp1)) result++;
+ else result--;
+ }
+ }
+ }
+ *dstptr = result;
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Floating-point to Double Fixed-point
+ */
+/*ARGSUSED*/
+int
+dbl_to_dbl_fcnvfx(
+ dbl_floating_point *srcptr,
+ unsigned int *nullptr,
+ dbl_integer *dstptr,
+ unsigned int *status)
+{
+ register int src_exponent, resultp1;
+ register unsigned int srcp1, srcp2, tempp1, tempp2, resultp2;
+ register boolean inexact = FALSE;
+
+ Dbl_copyfromptr(srcptr,srcp1,srcp2);
+ src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > DBL_FX_MAX_EXP) {
+ /* check for MININT */
+ if ((src_exponent > DBL_FX_MAX_EXP + 1) ||
+ Dbl_isnotzero_mantissa(srcp1,srcp2) || Dbl_iszero_sign(srcp1)) {
+ if (Dbl_iszero_sign(srcp1)) {
+ resultp1 = 0x7fffffff;
+ resultp2 = 0xffffffff;
+ }
+ else {
+ resultp1 = 0x80000000;
+ resultp2 = 0;
+ }
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ Dint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ tempp1 = srcp1;
+ tempp2 = srcp2;
+ Dbl_clear_signexponent_set_hidden(tempp1);
+ Dint_from_dbl_mantissa(tempp1,tempp2,src_exponent,resultp1,
+ resultp2);
+ if (Dbl_isone_sign(srcp1)) {
+ Dint_setone_sign(resultp1,resultp2);
+ }
+
+ /* check for inexact */
+ if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(srcp1)) {
+ Dint_increment(resultp1,resultp2);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(srcp1)) {
+ Dint_decrement(resultp1,resultp2);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (Dbl_isone_roundbit(srcp1,srcp2,src_exponent))
+ if (Dbl_isone_stickybit(srcp1,srcp2,src_exponent) ||
+ (Dint_isone_lowp2(resultp2)))
+ if (Dbl_iszero_sign(srcp1)) {
+ Dint_increment(resultp1,resultp2);
+ }
+ else {
+ Dint_decrement(resultp1,resultp2);
+ }
+ }
+ }
+ }
+ else {
+ Dint_setzero(resultp1,resultp2);
+
+ /* check for inexact */
+ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(srcp1)) {
+ Dint_increment(resultp1,resultp2);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(srcp1)) {
+ Dint_decrement(resultp1,resultp2);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (src_exponent == -1)
+ if (Dbl_isnotzero_mantissa(srcp1,srcp2))
+ if (Dbl_iszero_sign(srcp1)) {
+ Dint_increment(resultp1,resultp2);
+ }
+ else {
+ Dint_decrement(resultp1,resultp2);
+ }
+ }
+ }
+ }
+ Dint_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/fcnvfxt.c b/arch/parisc/math-emu/fcnvfxt.c
new file mode 100644
index 00000000..8b9010c4
--- /dev/null
+++ b/arch/parisc/math-emu/fcnvfxt.c
@@ -0,0 +1,328 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/fcnvfxt.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Single Floating-point to Single Fixed-point /w truncated result
+ * Single Floating-point to Double Fixed-point /w truncated result
+ * Double Floating-point to Single Fixed-point /w truncated result
+ * Double Floating-point to Double Fixed-point /w truncated result
+ *
+ * External Interfaces:
+ * dbl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status)
+ * dbl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status)
+ * sgl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/*
+ * Convert single floating-point to single fixed-point format
+ * with truncated result
+ */
+/*ARGSUSED*/
+int
+sgl_to_sgl_fcnvfxt(
+ sgl_floating_point *srcptr,
+ unsigned int *nullptr,
+ int *dstptr,
+ unsigned int *status)
+{
+ register unsigned int src, temp;
+ register int src_exponent, result;
+
+ src = *srcptr;
+ src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > SGL_FX_MAX_EXP) {
+ /* check for MININT */
+ if ((src_exponent > SGL_FX_MAX_EXP + 1) ||
+ Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
+ if (Sgl_iszero_sign(src)) result = 0x7fffffff;
+ else result = 0x80000000;
+
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ temp = src;
+ Sgl_clear_signexponent_set_hidden(temp);
+ Int_from_sgl_mantissa(temp,src_exponent);
+ if (Sgl_isone_sign(src)) result = -Sgl_all(temp);
+ else result = Sgl_all(temp);
+ *dstptr = result;
+
+ /* check for inexact */
+ if (Sgl_isinexact_to_fix(src,src_exponent)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ else {
+ *dstptr = 0;
+
+ /* check for inexact */
+ if (Sgl_isnotzero_exponentmantissa(src)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ return(NOEXCEPTION);
+}
+
+/*
+ * Single Floating-point to Double Fixed-point
+ */
+/*ARGSUSED*/
+int
+sgl_to_dbl_fcnvfxt(
+ sgl_floating_point *srcptr,
+ unsigned int *nullptr,
+ dbl_integer *dstptr,
+ unsigned int *status)
+{
+ register int src_exponent, resultp1;
+ register unsigned int src, temp, resultp2;
+
+ src = *srcptr;
+ src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > DBL_FX_MAX_EXP) {
+ /* check for MININT */
+ if ((src_exponent > DBL_FX_MAX_EXP + 1) ||
+ Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
+ if (Sgl_iszero_sign(src)) {
+ resultp1 = 0x7fffffff;
+ resultp2 = 0xffffffff;
+ }
+ else {
+ resultp1 = 0x80000000;
+ resultp2 = 0;
+ }
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ Dint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ Dint_set_minint(resultp1,resultp2);
+ Dint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ temp = src;
+ Sgl_clear_signexponent_set_hidden(temp);
+ Dint_from_sgl_mantissa(temp,src_exponent,resultp1,resultp2);
+ if (Sgl_isone_sign(src)) {
+ Dint_setone_sign(resultp1,resultp2);
+ }
+ Dint_copytoptr(resultp1,resultp2,dstptr);
+
+ /* check for inexact */
+ if (Sgl_isinexact_to_fix(src,src_exponent)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ else {
+ Dint_setzero(resultp1,resultp2);
+ Dint_copytoptr(resultp1,resultp2,dstptr);
+
+ /* check for inexact */
+ if (Sgl_isnotzero_exponentmantissa(src)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Floating-point to Single Fixed-point
+ */
+/*ARGSUSED*/
+int
+dbl_to_sgl_fcnvfxt(
+ dbl_floating_point *srcptr,
+ unsigned int *nullptr,
+ int *dstptr,
+ unsigned int *status)
+{
+ register unsigned int srcp1, srcp2, tempp1, tempp2;
+ register int src_exponent, result;
+
+ Dbl_copyfromptr(srcptr,srcp1,srcp2);
+ src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > SGL_FX_MAX_EXP) {
+ /* check for MININT */
+ if (Dbl_isoverflow_to_int(src_exponent,srcp1,srcp2)) {
+ if (Dbl_iszero_sign(srcp1)) result = 0x7fffffff;
+ else result = 0x80000000;
+
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ tempp1 = srcp1;
+ tempp2 = srcp2;
+ Dbl_clear_signexponent_set_hidden(tempp1);
+ Int_from_dbl_mantissa(tempp1,tempp2,src_exponent);
+ if (Dbl_isone_sign(srcp1) && (src_exponent <= SGL_FX_MAX_EXP))
+ result = -Dbl_allp1(tempp1);
+ else result = Dbl_allp1(tempp1);
+ *dstptr = result;
+
+ /* check for inexact */
+ if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ else {
+ *dstptr = 0;
+
+ /* check for inexact */
+ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Floating-point to Double Fixed-point
+ */
+/*ARGSUSED*/
+int
+dbl_to_dbl_fcnvfxt(
+ dbl_floating_point *srcptr,
+ unsigned int *nullptr,
+ dbl_integer *dstptr,
+ unsigned int *status)
+{
+ register int src_exponent, resultp1;
+ register unsigned int srcp1, srcp2, tempp1, tempp2, resultp2;
+
+ Dbl_copyfromptr(srcptr,srcp1,srcp2);
+ src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+ /*
+ * Test for overflow
+ */
+ if (src_exponent > DBL_FX_MAX_EXP) {
+ /* check for MININT */
+ if ((src_exponent > DBL_FX_MAX_EXP + 1) ||
+ Dbl_isnotzero_mantissa(srcp1,srcp2) || Dbl_iszero_sign(srcp1)) {
+ if (Dbl_iszero_sign(srcp1)) {
+ resultp1 = 0x7fffffff;
+ resultp2 = 0xffffffff;
+ }
+ else {
+ resultp1 = 0x80000000;
+ resultp2 = 0;
+ }
+ if (Is_invalidtrap_enabled()) {
+ return(INVALIDEXCEPTION);
+ }
+ Set_invalidflag();
+ Dint_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ tempp1 = srcp1;
+ tempp2 = srcp2;
+ Dbl_clear_signexponent_set_hidden(tempp1);
+ Dint_from_dbl_mantissa(tempp1,tempp2,src_exponent,
+ resultp1,resultp2);
+ if (Dbl_isone_sign(srcp1)) {
+ Dint_setone_sign(resultp1,resultp2);
+ }
+ Dint_copytoptr(resultp1,resultp2,dstptr);
+
+ /* check for inexact */
+ if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ else {
+ Dint_setzero(resultp1,resultp2);
+ Dint_copytoptr(resultp1,resultp2,dstptr);
+
+ /* check for inexact */
+ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ }
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/fcnvuf.c b/arch/parisc/math-emu/fcnvuf.c
new file mode 100644
index 00000000..5e68189f
--- /dev/null
+++ b/arch/parisc/math-emu/fcnvuf.c
@@ -0,0 +1,318 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/fcnvuf.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Fixed point to Floating-point Converts
+ *
+ * External Interfaces:
+ * dbl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status)
+ * dbl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status)
+ * sgl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/************************************************************************
+ * Fixed point to Floating-point Converts *
+ ************************************************************************/
+
+/*
+ * Convert Single Unsigned Fixed to Single Floating-point format
+ */
+
+int
+sgl_to_sgl_fcnvuf(
+ unsigned int *srcptr,
+ unsigned int *nullptr,
+ sgl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register unsigned int src, result = 0;
+ register int dst_exponent;
+
+ src = *srcptr;
+
+ /* Check for zero */
+ if (src == 0) {
+ Sgl_setzero(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate exponent and normalized mantissa
+ */
+ dst_exponent = 16; /* initialize for normalization */
+ /*
+ * Check word for most significant bit set. Returns
+ * a value in dst_exponent indicating the bit position,
+ * between -1 and 30.
+ */
+ Find_ms_one_bit(src,dst_exponent);
+ /* left justify source, with msb at bit position 0 */
+ src <<= dst_exponent+1;
+ Sgl_set_mantissa(result, src >> SGL_EXP_LENGTH);
+ Sgl_set_exponent(result, 30+SGL_BIAS - dst_exponent);
+
+ /* check for inexact */
+ if (Suint_isinexact_to_sgl(src)) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ Sgl_increment(result);
+ break;
+ case ROUNDMINUS: /* never negative */
+ break;
+ case ROUNDNEAREST:
+ Sgl_roundnearest_from_suint(src,result);
+ break;
+ }
+ if (Is_inexacttrap_enabled()) {
+ *dstptr = result;
+ return(INEXACTEXCEPTION);
+ }
+ else Set_inexactflag();
+ }
+ *dstptr = result;
+ return(NOEXCEPTION);
+}
+
+/*
+ * Single Unsigned Fixed to Double Floating-point
+ */
+
+int
+sgl_to_dbl_fcnvuf(
+ unsigned int *srcptr,
+ unsigned int *nullptr,
+ dbl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register int dst_exponent;
+ register unsigned int src, resultp1 = 0, resultp2 = 0;
+
+ src = *srcptr;
+
+ /* Check for zero */
+ if (src == 0) {
+ Dbl_setzero(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate exponent and normalized mantissa
+ */
+ dst_exponent = 16; /* initialize for normalization */
+ /*
+ * Check word for most significant bit set. Returns
+ * a value in dst_exponent indicating the bit position,
+ * between -1 and 30.
+ */
+ Find_ms_one_bit(src,dst_exponent);
+ /* left justify source, with msb at bit position 0 */
+ src <<= dst_exponent+1;
+ Dbl_set_mantissap1(resultp1, src >> DBL_EXP_LENGTH);
+ Dbl_set_mantissap2(resultp2, src << (32-DBL_EXP_LENGTH));
+ Dbl_set_exponent(resultp1, (30+DBL_BIAS) - dst_exponent);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Unsigned Fixed to Single Floating-point
+ */
+
+int
+dbl_to_sgl_fcnvuf(
+ dbl_unsigned *srcptr,
+ unsigned int *nullptr,
+ sgl_floating_point *dstptr,
+ unsigned int *status)
+{
+ int dst_exponent;
+ unsigned int srcp1, srcp2, result = 0;
+
+ Duint_copyfromptr(srcptr,srcp1,srcp2);
+
+ /* Check for zero */
+ if (srcp1 == 0 && srcp2 == 0) {
+ Sgl_setzero(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate exponent and normalized mantissa
+ */
+ dst_exponent = 16; /* initialize for normalization */
+ if (srcp1 == 0) {
+ /*
+ * Check word for most significant bit set. Returns
+ * a value in dst_exponent indicating the bit position,
+ * between -1 and 30.
+ */
+ Find_ms_one_bit(srcp2,dst_exponent);
+ /* left justify source, with msb at bit position 0 */
+ srcp1 = srcp2 << dst_exponent+1;
+ srcp2 = 0;
+ /*
+ * since msb set is in second word, need to
+ * adjust bit position count
+ */
+ dst_exponent += 32;
+ }
+ else {
+ /*
+ * Check word for most significant bit set. Returns
+ * a value in dst_exponent indicating the bit position,
+ * between -1 and 30.
+ *
+ */
+ Find_ms_one_bit(srcp1,dst_exponent);
+ /* left justify source, with msb at bit position 0 */
+ if (dst_exponent >= 0) {
+ Variable_shift_double(srcp1,srcp2,(31-dst_exponent),
+ srcp1);
+ srcp2 <<= dst_exponent+1;
+ }
+ }
+ Sgl_set_mantissa(result, srcp1 >> SGL_EXP_LENGTH);
+ Sgl_set_exponent(result, (62+SGL_BIAS) - dst_exponent);
+
+ /* check for inexact */
+ if (Duint_isinexact_to_sgl(srcp1,srcp2)) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ Sgl_increment(result);
+ break;
+ case ROUNDMINUS: /* never negative */
+ break;
+ case ROUNDNEAREST:
+ Sgl_roundnearest_from_duint(srcp1,srcp2,result);
+ break;
+ }
+ if (Is_inexacttrap_enabled()) {
+ *dstptr = result;
+ return(INEXACTEXCEPTION);
+ }
+ else Set_inexactflag();
+ }
+ *dstptr = result;
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Unsigned Fixed to Double Floating-point
+ */
+
+int
+dbl_to_dbl_fcnvuf(
+ dbl_unsigned *srcptr,
+ unsigned int *nullptr,
+ dbl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register int dst_exponent;
+ register unsigned int srcp1, srcp2, resultp1 = 0, resultp2 = 0;
+
+ Duint_copyfromptr(srcptr,srcp1,srcp2);
+
+ /* Check for zero */
+ if (srcp1 == 0 && srcp2 ==0) {
+ Dbl_setzero(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate exponent and normalized mantissa
+ */
+ dst_exponent = 16; /* initialize for normalization */
+ if (srcp1 == 0) {
+ /*
+ * Check word for most significant bit set. Returns
+ * a value in dst_exponent indicating the bit position,
+ * between -1 and 30.
+ */
+ Find_ms_one_bit(srcp2,dst_exponent);
+ /* left justify source, with msb at bit position 0 */
+ srcp1 = srcp2 << dst_exponent+1;
+ srcp2 = 0;
+ /*
+ * since msb set is in second word, need to
+ * adjust bit position count
+ */
+ dst_exponent += 32;
+ }
+ else {
+ /*
+ * Check word for most significant bit set. Returns
+ * a value in dst_exponent indicating the bit position,
+ * between -1 and 30.
+ */
+ Find_ms_one_bit(srcp1,dst_exponent);
+ /* left justify source, with msb at bit position 0 */
+ if (dst_exponent >= 0) {
+ Variable_shift_double(srcp1,srcp2,(31-dst_exponent),
+ srcp1);
+ srcp2 <<= dst_exponent+1;
+ }
+ }
+ Dbl_set_mantissap1(resultp1, srcp1 >> DBL_EXP_LENGTH);
+ Shiftdouble(srcp1,srcp2,DBL_EXP_LENGTH,resultp2);
+ Dbl_set_exponent(resultp1, (62+DBL_BIAS) - dst_exponent);
+
+ /* check for inexact */
+ if (Duint_isinexact_to_dbl(srcp2)) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ Dbl_increment(resultp1,resultp2);
+ break;
+ case ROUNDMINUS: /* never negative */
+ break;
+ case ROUNDNEAREST:
+ Dbl_roundnearest_from_duint(srcp2,resultp1,
+ resultp2);
+ break;
+ }
+ if (Is_inexacttrap_enabled()) {
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(INEXACTEXCEPTION);
+ }
+ else Set_inexactflag();
+ }
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+}
+
diff --git a/arch/parisc/math-emu/fcnvxf.c b/arch/parisc/math-emu/fcnvxf.c
new file mode 100644
index 00000000..05c7fadb
--- /dev/null
+++ b/arch/parisc/math-emu/fcnvxf.c
@@ -0,0 +1,386 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/fcnvxf.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Single Fixed-point to Single Floating-point
+ * Single Fixed-point to Double Floating-point
+ * Double Fixed-point to Single Floating-point
+ * Double Fixed-point to Double Floating-point
+ *
+ * External Interfaces:
+ * dbl_to_dbl_fcnvxf(srcptr,nullptr,dstptr,status)
+ * dbl_to_sgl_fcnvxf(srcptr,nullptr,dstptr,status)
+ * sgl_to_dbl_fcnvxf(srcptr,nullptr,dstptr,status)
+ * sgl_to_sgl_fcnvxf(srcptr,nullptr,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/*
+ * Convert single fixed-point to single floating-point format
+ */
+
+int
+sgl_to_sgl_fcnvxf(
+ int *srcptr,
+ unsigned int *nullptr,
+ sgl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register int src, dst_exponent;
+ register unsigned int result = 0;
+
+ src = *srcptr;
+ /*
+ * set sign bit of result and get magnitude of source
+ */
+ if (src < 0) {
+ Sgl_setone_sign(result);
+ Int_negate(src);
+ }
+ else {
+ Sgl_setzero_sign(result);
+ /* Check for zero */
+ if (src == 0) {
+ Sgl_setzero(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * Generate exponent and normalized mantissa
+ */
+ dst_exponent = 16; /* initialize for normalization */
+ /*
+ * Check word for most significant bit set. Returns
+ * a value in dst_exponent indicating the bit position,
+ * between -1 and 30.
+ */
+ Find_ms_one_bit(src,dst_exponent);
+ /* left justify source, with msb at bit position 1 */
+ if (dst_exponent >= 0) src <<= dst_exponent;
+ else src = 1 << 30;
+ Sgl_set_mantissa(result, src >> (SGL_EXP_LENGTH-1));
+ Sgl_set_exponent(result, 30+SGL_BIAS - dst_exponent);
+
+ /* check for inexact */
+ if (Int_isinexact_to_sgl(src)) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(result))
+ Sgl_increment(result);
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(result))
+ Sgl_increment(result);
+ break;
+ case ROUNDNEAREST:
+ Sgl_roundnearest_from_int(src,result);
+ }
+ if (Is_inexacttrap_enabled()) {
+ *dstptr = result;
+ return(INEXACTEXCEPTION);
+ }
+ else Set_inexactflag();
+ }
+ *dstptr = result;
+ return(NOEXCEPTION);
+}
+
+/*
+ * Single Fixed-point to Double Floating-point
+ */
+
+int
+sgl_to_dbl_fcnvxf(
+ int *srcptr,
+ unsigned int *nullptr,
+ dbl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register int src, dst_exponent;
+ register unsigned int resultp1 = 0, resultp2 = 0;
+
+ src = *srcptr;
+ /*
+ * set sign bit of result and get magnitude of source
+ */
+ if (src < 0) {
+ Dbl_setone_sign(resultp1);
+ Int_negate(src);
+ }
+ else {
+ Dbl_setzero_sign(resultp1);
+ /* Check for zero */
+ if (src == 0) {
+ Dbl_setzero(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * Generate exponent and normalized mantissa
+ */
+ dst_exponent = 16; /* initialize for normalization */
+ /*
+ * Check word for most significant bit set. Returns
+ * a value in dst_exponent indicating the bit position,
+ * between -1 and 30.
+ */
+ Find_ms_one_bit(src,dst_exponent);
+ /* left justify source, with msb at bit position 1 */
+ if (dst_exponent >= 0) src <<= dst_exponent;
+ else src = 1 << 30;
+ Dbl_set_mantissap1(resultp1, src >> DBL_EXP_LENGTH - 1);
+ Dbl_set_mantissap2(resultp2, src << (33-DBL_EXP_LENGTH));
+ Dbl_set_exponent(resultp1, (30+DBL_BIAS) - dst_exponent);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Fixed-point to Single Floating-point
+ */
+
+int
+dbl_to_sgl_fcnvxf(
+ dbl_integer *srcptr,
+ unsigned int *nullptr,
+ sgl_floating_point *dstptr,
+ unsigned int *status)
+{
+ int dst_exponent, srcp1;
+ unsigned int result = 0, srcp2;
+
+ Dint_copyfromptr(srcptr,srcp1,srcp2);
+ /*
+ * set sign bit of result and get magnitude of source
+ */
+ if (srcp1 < 0) {
+ Sgl_setone_sign(result);
+ Dint_negate(srcp1,srcp2);
+ }
+ else {
+ Sgl_setzero_sign(result);
+ /* Check for zero */
+ if (srcp1 == 0 && srcp2 == 0) {
+ Sgl_setzero(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * Generate exponent and normalized mantissa
+ */
+ dst_exponent = 16; /* initialize for normalization */
+ if (srcp1 == 0) {
+ /*
+ * Check word for most significant bit set. Returns
+ * a value in dst_exponent indicating the bit position,
+ * between -1 and 30.
+ */
+ Find_ms_one_bit(srcp2,dst_exponent);
+ /* left justify source, with msb at bit position 1 */
+ if (dst_exponent >= 0) {
+ srcp1 = srcp2 << dst_exponent;
+ srcp2 = 0;
+ }
+ else {
+ srcp1 = srcp2 >> 1;
+ srcp2 <<= 31;
+ }
+ /*
+ * since msb set is in second word, need to
+ * adjust bit position count
+ */
+ dst_exponent += 32;
+ }
+ else {
+ /*
+ * Check word for most significant bit set. Returns
+ * a value in dst_exponent indicating the bit position,
+ * between -1 and 30.
+ *
+ */
+ Find_ms_one_bit(srcp1,dst_exponent);
+ /* left justify source, with msb at bit position 1 */
+ if (dst_exponent > 0) {
+ Variable_shift_double(srcp1,srcp2,(32-dst_exponent),
+ srcp1);
+ srcp2 <<= dst_exponent;
+ }
+ /*
+ * If dst_exponent = 0, we don't need to shift anything.
+ * If dst_exponent = -1, src = - 2**63 so we won't need to
+ * shift srcp2.
+ */
+ else srcp1 >>= -(dst_exponent);
+ }
+ Sgl_set_mantissa(result, srcp1 >> SGL_EXP_LENGTH - 1);
+ Sgl_set_exponent(result, (62+SGL_BIAS) - dst_exponent);
+
+ /* check for inexact */
+ if (Dint_isinexact_to_sgl(srcp1,srcp2)) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(result))
+ Sgl_increment(result);
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(result))
+ Sgl_increment(result);
+ break;
+ case ROUNDNEAREST:
+ Sgl_roundnearest_from_dint(srcp1,srcp2,result);
+ }
+ if (Is_inexacttrap_enabled()) {
+ *dstptr = result;
+ return(INEXACTEXCEPTION);
+ }
+ else Set_inexactflag();
+ }
+ *dstptr = result;
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Fixed-point to Double Floating-point
+ */
+
+int
+dbl_to_dbl_fcnvxf(
+ dbl_integer *srcptr,
+ unsigned int *nullptr,
+ dbl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register int srcp1, dst_exponent;
+ register unsigned int srcp2, resultp1 = 0, resultp2 = 0;
+
+ Dint_copyfromptr(srcptr,srcp1,srcp2);
+ /*
+ * set sign bit of result and get magnitude of source
+ */
+ if (srcp1 < 0) {
+ Dbl_setone_sign(resultp1);
+ Dint_negate(srcp1,srcp2);
+ }
+ else {
+ Dbl_setzero_sign(resultp1);
+ /* Check for zero */
+ if (srcp1 == 0 && srcp2 ==0) {
+ Dbl_setzero(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * Generate exponent and normalized mantissa
+ */
+ dst_exponent = 16; /* initialize for normalization */
+ if (srcp1 == 0) {
+ /*
+ * Check word for most significant bit set. Returns
+ * a value in dst_exponent indicating the bit position,
+ * between -1 and 30.
+ */
+ Find_ms_one_bit(srcp2,dst_exponent);
+ /* left justify source, with msb at bit position 1 */
+ if (dst_exponent >= 0) {
+ srcp1 = srcp2 << dst_exponent;
+ srcp2 = 0;
+ }
+ else {
+ srcp1 = srcp2 >> 1;
+ srcp2 <<= 31;
+ }
+ /*
+ * since msb set is in second word, need to
+ * adjust bit position count
+ */
+ dst_exponent += 32;
+ }
+ else {
+ /*
+ * Check word for most significant bit set. Returns
+ * a value in dst_exponent indicating the bit position,
+ * between -1 and 30.
+ */
+ Find_ms_one_bit(srcp1,dst_exponent);
+ /* left justify source, with msb at bit position 1 */
+ if (dst_exponent > 0) {
+ Variable_shift_double(srcp1,srcp2,(32-dst_exponent),
+ srcp1);
+ srcp2 <<= dst_exponent;
+ }
+ /*
+ * If dst_exponent = 0, we don't need to shift anything.
+ * If dst_exponent = -1, src = - 2**63 so we won't need to
+ * shift srcp2.
+ */
+ else srcp1 >>= -(dst_exponent);
+ }
+ Dbl_set_mantissap1(resultp1, srcp1 >> (DBL_EXP_LENGTH-1));
+ Shiftdouble(srcp1,srcp2,DBL_EXP_LENGTH-1,resultp2);
+ Dbl_set_exponent(resultp1, (62+DBL_BIAS) - dst_exponent);
+
+ /* check for inexact */
+ if (Dint_isinexact_to_dbl(srcp2)) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(resultp1)) {
+ Dbl_increment(resultp1,resultp2);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(resultp1)) {
+ Dbl_increment(resultp1,resultp2);
+ }
+ break;
+ case ROUNDNEAREST:
+ Dbl_roundnearest_from_dint(srcp2,resultp1,
+ resultp2);
+ }
+ if (Is_inexacttrap_enabled()) {
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(INEXACTEXCEPTION);
+ }
+ else Set_inexactflag();
+ }
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/float.h b/arch/parisc/math-emu/float.h
new file mode 100644
index 00000000..ce76f6df
--- /dev/null
+++ b/arch/parisc/math-emu/float.h
@@ -0,0 +1,582 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/float.h $Revision: 1.1 $
+ *
+ * Purpose:
+ * <<please update with a synopis of the functionality provided by this file>>
+ *
+ * BE header: no
+ *
+ * Shipped: yes
+ * /usr/conf/pa/spmath/float.h
+ *
+ * END_DESC
+*/
+
+#ifdef __NO_PA_HDRS
+ PA header file -- do not include this header file for non-PA builds.
+#endif
+
+#include "fpbits.h"
+#include "hppa.h"
+/*
+ * Want to pick up the FPU capability flags, not the PDC structures.
+ * 'LOCORE' isn't really true in this case, but we don't want the C structures
+ * so it suits our purposes
+ */
+#define LOCORE
+#include "fpu.h"
+
+/*
+ * Declare the basic structures for the 3 different
+ * floating-point precisions.
+ *
+ * Single number
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |s| exp | mantissa |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+#define Sall(object) (object)
+#define Ssign(object) Bitfield_extract( 0, 1,object)
+#define Ssignedsign(object) Bitfield_signed_extract( 0, 1,object)
+#define Sexponent(object) Bitfield_extract( 1, 8,object)
+#define Smantissa(object) Bitfield_mask( 9, 23,object)
+#define Ssignaling(object) Bitfield_extract( 9, 1,object)
+#define Ssignalingnan(object) Bitfield_extract( 1, 9,object)
+#define Shigh2mantissa(object) Bitfield_extract( 9, 2,object)
+#define Sexponentmantissa(object) Bitfield_mask( 1, 31,object)
+#define Ssignexponent(object) Bitfield_extract( 0, 9,object)
+#define Shidden(object) Bitfield_extract( 8, 1,object)
+#define Shiddenoverflow(object) Bitfield_extract( 7, 1,object)
+#define Shiddenhigh7mantissa(object) Bitfield_extract( 8, 8,object)
+#define Shiddenhigh3mantissa(object) Bitfield_extract( 8, 4,object)
+#define Slow(object) Bitfield_mask( 31, 1,object)
+#define Slow4(object) Bitfield_mask( 28, 4,object)
+#define Slow31(object) Bitfield_mask( 1, 31,object)
+#define Shigh31(object) Bitfield_extract( 0, 31,object)
+#define Ssignedhigh31(object) Bitfield_signed_extract( 0, 31,object)
+#define Shigh4(object) Bitfield_extract( 0, 4,object)
+#define Sbit24(object) Bitfield_extract( 24, 1,object)
+#define Sbit28(object) Bitfield_extract( 28, 1,object)
+#define Sbit29(object) Bitfield_extract( 29, 1,object)
+#define Sbit30(object) Bitfield_extract( 30, 1,object)
+#define Sbit31(object) Bitfield_mask( 31, 1,object)
+
+#define Deposit_ssign(object,value) Bitfield_deposit(value,0,1,object)
+#define Deposit_sexponent(object,value) Bitfield_deposit(value,1,8,object)
+#define Deposit_smantissa(object,value) Bitfield_deposit(value,9,23,object)
+#define Deposit_shigh2mantissa(object,value) Bitfield_deposit(value,9,2,object)
+#define Deposit_sexponentmantissa(object,value) \
+ Bitfield_deposit(value,1,31,object)
+#define Deposit_ssignexponent(object,value) Bitfield_deposit(value,0,9,object)
+#define Deposit_slow(object,value) Bitfield_deposit(value,31,1,object)
+#define Deposit_shigh4(object,value) Bitfield_deposit(value,0,4,object)
+
+#define Is_ssign(object) Bitfield_mask( 0, 1,object)
+#define Is_ssignaling(object) Bitfield_mask( 9, 1,object)
+#define Is_shidden(object) Bitfield_mask( 8, 1,object)
+#define Is_shiddenoverflow(object) Bitfield_mask( 7, 1,object)
+#define Is_slow(object) Bitfield_mask( 31, 1,object)
+#define Is_sbit24(object) Bitfield_mask( 24, 1,object)
+#define Is_sbit28(object) Bitfield_mask( 28, 1,object)
+#define Is_sbit29(object) Bitfield_mask( 29, 1,object)
+#define Is_sbit30(object) Bitfield_mask( 30, 1,object)
+#define Is_sbit31(object) Bitfield_mask( 31, 1,object)
+
+/*
+ * Double number.
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |s| exponent | mantissa part 1 |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * | mantissa part 2 |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+#define Dallp1(object) (object)
+#define Dsign(object) Bitfield_extract( 0, 1,object)
+#define Dsignedsign(object) Bitfield_signed_extract( 0, 1,object)
+#define Dexponent(object) Bitfield_extract( 1, 11,object)
+#define Dmantissap1(object) Bitfield_mask( 12, 20,object)
+#define Dsignaling(object) Bitfield_extract( 12, 1,object)
+#define Dsignalingnan(object) Bitfield_extract( 1, 12,object)
+#define Dhigh2mantissa(object) Bitfield_extract( 12, 2,object)
+#define Dexponentmantissap1(object) Bitfield_mask( 1, 31,object)
+#define Dsignexponent(object) Bitfield_extract( 0, 12,object)
+#define Dhidden(object) Bitfield_extract( 11, 1,object)
+#define Dhiddenoverflow(object) Bitfield_extract( 10, 1,object)
+#define Dhiddenhigh7mantissa(object) Bitfield_extract( 11, 8,object)
+#define Dhiddenhigh3mantissa(object) Bitfield_extract( 11, 4,object)
+#define Dlowp1(object) Bitfield_mask( 31, 1,object)
+#define Dlow31p1(object) Bitfield_mask( 1, 31,object)
+#define Dhighp1(object) Bitfield_extract( 0, 1,object)
+#define Dhigh4p1(object) Bitfield_extract( 0, 4,object)
+#define Dhigh31p1(object) Bitfield_extract( 0, 31,object)
+#define Dsignedhigh31p1(object) Bitfield_signed_extract( 0, 31,object)
+#define Dbit3p1(object) Bitfield_extract( 3, 1,object)
+
+#define Deposit_dsign(object,value) Bitfield_deposit(value,0,1,object)
+#define Deposit_dexponent(object,value) Bitfield_deposit(value,1,11,object)
+#define Deposit_dmantissap1(object,value) Bitfield_deposit(value,12,20,object)
+#define Deposit_dhigh2mantissa(object,value) Bitfield_deposit(value,12,2,object)
+#define Deposit_dexponentmantissap1(object,value) \
+ Bitfield_deposit(value,1,31,object)
+#define Deposit_dsignexponent(object,value) Bitfield_deposit(value,0,12,object)
+#define Deposit_dlowp1(object,value) Bitfield_deposit(value,31,1,object)
+#define Deposit_dhigh4p1(object,value) Bitfield_deposit(value,0,4,object)
+
+#define Is_dsign(object) Bitfield_mask( 0, 1,object)
+#define Is_dsignaling(object) Bitfield_mask( 12, 1,object)
+#define Is_dhidden(object) Bitfield_mask( 11, 1,object)
+#define Is_dhiddenoverflow(object) Bitfield_mask( 10, 1,object)
+#define Is_dlowp1(object) Bitfield_mask( 31, 1,object)
+#define Is_dhighp1(object) Bitfield_mask( 0, 1,object)
+#define Is_dbit3p1(object) Bitfield_mask( 3, 1,object)
+
+#define Dallp2(object) (object)
+#define Dmantissap2(object) (object)
+#define Dlowp2(object) Bitfield_mask( 31, 1,object)
+#define Dlow4p2(object) Bitfield_mask( 28, 4,object)
+#define Dlow31p2(object) Bitfield_mask( 1, 31,object)
+#define Dhighp2(object) Bitfield_extract( 0, 1,object)
+#define Dhigh31p2(object) Bitfield_extract( 0, 31,object)
+#define Dbit2p2(object) Bitfield_extract( 2, 1,object)
+#define Dbit3p2(object) Bitfield_extract( 3, 1,object)
+#define Dbit21p2(object) Bitfield_extract( 21, 1,object)
+#define Dbit28p2(object) Bitfield_extract( 28, 1,object)
+#define Dbit29p2(object) Bitfield_extract( 29, 1,object)
+#define Dbit30p2(object) Bitfield_extract( 30, 1,object)
+#define Dbit31p2(object) Bitfield_mask( 31, 1,object)
+
+#define Deposit_dlowp2(object,value) Bitfield_deposit(value,31,1,object)
+
+#define Is_dlowp2(object) Bitfield_mask( 31, 1,object)
+#define Is_dhighp2(object) Bitfield_mask( 0, 1,object)
+#define Is_dbit2p2(object) Bitfield_mask( 2, 1,object)
+#define Is_dbit3p2(object) Bitfield_mask( 3, 1,object)
+#define Is_dbit21p2(object) Bitfield_mask( 21, 1,object)
+#define Is_dbit28p2(object) Bitfield_mask( 28, 1,object)
+#define Is_dbit29p2(object) Bitfield_mask( 29, 1,object)
+#define Is_dbit30p2(object) Bitfield_mask( 30, 1,object)
+#define Is_dbit31p2(object) Bitfield_mask( 31, 1,object)
+
+/*
+ * Quad number.
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |s| exponent | mantissa part 1 |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * | mantissa part 2 |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * | mantissa part 3 |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * | mantissa part 4 |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+typedef struct
+ {
+ union
+ {
+ struct { unsigned qallp1; } u_qallp1;
+/* Not needed for now...
+ Bitfield_extract( 0, 1,u_qsign,qsign)
+ Bitfield_signed_extract( 0, 1,u_qsignedsign,qsignedsign)
+ Bitfield_extract( 1, 15,u_qexponent,qexponent)
+ Bitfield_extract(16, 16,u_qmantissap1,qmantissap1)
+ Bitfield_extract(16, 1,u_qsignaling,qsignaling)
+ Bitfield_extract(1, 16,u_qsignalingnan,qsignalingnan)
+ Bitfield_extract(16, 2,u_qhigh2mantissa,qhigh2mantissa)
+ Bitfield_extract( 1, 31,u_qexponentmantissap1,qexponentmantissap1)
+ Bitfield_extract( 0, 16,u_qsignexponent,qsignexponent)
+ Bitfield_extract(15, 1,u_qhidden,qhidden)
+ Bitfield_extract(14, 1,u_qhiddenoverflow,qhiddenoverflow)
+ Bitfield_extract(15, 8,u_qhiddenhigh7mantissa,qhiddenhigh7mantissa)
+ Bitfield_extract(15, 4,u_qhiddenhigh3mantissa,qhiddenhigh3mantissa)
+ Bitfield_extract(31, 1,u_qlowp1,qlowp1)
+ Bitfield_extract( 1, 31,u_qlow31p1,qlow31p1)
+ Bitfield_extract( 0, 1,u_qhighp1,qhighp1)
+ Bitfield_extract( 0, 4,u_qhigh4p1,qhigh4p1)
+ Bitfield_extract( 0, 31,u_qhigh31p1,qhigh31p1)
+ */
+ } quad_u1;
+ union
+ {
+ struct { unsigned qallp2; } u_qallp2;
+ /* Not needed for now...
+ Bitfield_extract(31, 1,u_qlowp2,qlowp2)
+ Bitfield_extract( 1, 31,u_qlow31p2,qlow31p2)
+ Bitfield_extract( 0, 1,u_qhighp2,qhighp2)
+ Bitfield_extract( 0, 31,u_qhigh31p2,qhigh31p2)
+ */
+ } quad_u2;
+ union
+ {
+ struct { unsigned qallp3; } u_qallp3;
+ /* Not needed for now...
+ Bitfield_extract(31, 1,u_qlowp3,qlowp3)
+ Bitfield_extract( 1, 31,u_qlow31p3,qlow31p3)
+ Bitfield_extract( 0, 1,u_qhighp3,qhighp3)
+ Bitfield_extract( 0, 31,u_qhigh31p3,qhigh31p3)
+ */
+ } quad_u3;
+ union
+ {
+ struct { unsigned qallp4; } u_qallp4;
+ /* Not need for now...
+ Bitfield_extract(31, 1,u_qlowp4,qlowp4)
+ Bitfield_extract( 1, 31,u_qlow31p4,qlow31p4)
+ Bitfield_extract( 0, 1,u_qhighp4,qhighp4)
+ Bitfield_extract( 0, 31,u_qhigh31p4,qhigh31p4)
+ */
+ } quad_u4;
+ } quad_floating_point;
+
+/* Extension - An additional structure to hold the guard, round and
+ * sticky bits during computations.
+ */
+#define Extall(object) (object)
+#define Extsign(object) Bitfield_extract( 0, 1,object)
+#define Exthigh31(object) Bitfield_extract( 0, 31,object)
+#define Extlow31(object) Bitfield_extract( 1, 31,object)
+#define Extlow(object) Bitfield_extract( 31, 1,object)
+
+/*
+ * Single extended - The upper word is just like single precision,
+ * but one additional word of mantissa is needed.
+ */
+#define Sextallp1(object) (object)
+#define Sextallp2(object) (object)
+#define Sextlowp1(object) Bitfield_extract( 31, 1,object)
+#define Sexthighp2(object) Bitfield_extract( 0, 1,object)
+#define Sextlow31p2(object) Bitfield_extract( 1, 31,object)
+#define Sexthiddenoverflow(object) Bitfield_extract( 4, 1,object)
+#define Is_sexthiddenoverflow(object) Bitfield_mask( 4, 1,object)
+
+/*
+ * Double extended - The upper two words are just like double precision,
+ * but two additional words of mantissa are needed.
+ */
+#define Dextallp1(object) (object)
+#define Dextallp2(object) (object)
+#define Dextallp3(object) (object)
+#define Dextallp4(object) (object)
+#define Dextlowp2(object) Bitfield_extract( 31, 1,object)
+#define Dexthighp3(object) Bitfield_extract( 0, 1,object)
+#define Dextlow31p3(object) Bitfield_extract( 1, 31,object)
+#define Dexthiddenoverflow(object) Bitfield_extract( 10, 1,object)
+#define Is_dexthiddenoverflow(object) Bitfield_mask( 10, 1,object)
+#define Deposit_dextlowp4(object,value) Bitfield_deposit(value,31,1,object)
+
+/*
+ * Declare the basic structures for the 3 different
+ * fixed-point precisions.
+ *
+ * Single number
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |s| integer |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+typedef int sgl_integer;
+
+/*
+ * Double number.
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |s| high integer |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * | low integer |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+struct dint {
+ int wd0;
+ unsigned int wd1;
+};
+
+struct dblwd {
+ unsigned int wd0;
+ unsigned int wd1;
+};
+
+/*
+ * Quad number.
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |s| integer part1 |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * | integer part 2 |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * | integer part 3 |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * | integer part 4 |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+
+struct quadwd {
+ int wd0;
+ unsigned int wd1;
+ unsigned int wd2;
+ unsigned int wd3;
+};
+
+typedef struct quadwd quad_integer;
+
+
+/* useful typedefs */
+typedef unsigned int sgl_floating_point;
+typedef struct dblwd dbl_floating_point;
+typedef struct dint dbl_integer;
+typedef struct dblwd dbl_unsigned;
+
+/*
+ * Define the different precisions' parameters.
+ */
+#define SGL_BITLENGTH 32
+#define SGL_EMAX 127
+#define SGL_EMIN (-126)
+#define SGL_BIAS 127
+#define SGL_WRAP 192
+#define SGL_INFINITY_EXPONENT (SGL_EMAX+SGL_BIAS+1)
+#define SGL_THRESHOLD 32
+#define SGL_EXP_LENGTH 8
+#define SGL_P 24
+
+#define DBL_BITLENGTH 64
+#define DBL_EMAX 1023
+#define DBL_EMIN (-1022)
+#define DBL_BIAS 1023
+#define DBL_WRAP 1536
+#define DBL_INFINITY_EXPONENT (DBL_EMAX+DBL_BIAS+1)
+#define DBL_THRESHOLD 64
+#define DBL_EXP_LENGTH 11
+#define DBL_P 53
+
+#define QUAD_BITLENGTH 128
+#define QUAD_EMAX 16383
+#define QUAD_EMIN (-16382)
+#define QUAD_BIAS 16383
+#define QUAD_WRAP 24576
+#define QUAD_INFINITY_EXPONENT (QUAD_EMAX+QUAD_BIAS+1)
+#define QUAD_P 113
+
+/* Boolean Values etc. */
+#define FALSE 0
+#define TRUE (!FALSE)
+#define NOT !
+#define XOR ^
+
+/* other constants */
+#undef NULL
+#define NULL 0
+#define NIL 0
+#define SGL 0
+#define DBL 1
+#define BADFMT 2
+#define QUAD 3
+
+
+/* Types */
+typedef int boolean;
+typedef int FORMAT;
+typedef int VOID;
+
+
+/* Declare status register equivalent to FPUs architecture.
+ *
+ * 0 1 2 3 4 5 6 7 8 910 1 2 3 4 5 6 7 8 920 1 2 3 4 5 6 7 8 930 1
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |V|Z|O|U|I|C| rsv | model | version |RM |rsv|T|r|V|Z|O|U|I|
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+#define Cbit(object) Bitfield_extract( 5, 1,object)
+#define Tbit(object) Bitfield_extract( 25, 1,object)
+#define Roundingmode(object) Bitfield_extract( 21, 2,object)
+#define Invalidtrap(object) Bitfield_extract( 27, 1,object)
+#define Divisionbyzerotrap(object) Bitfield_extract( 28, 1,object)
+#define Overflowtrap(object) Bitfield_extract( 29, 1,object)
+#define Underflowtrap(object) Bitfield_extract( 30, 1,object)
+#define Inexacttrap(object) Bitfield_extract( 31, 1,object)
+#define Invalidflag(object) Bitfield_extract( 0, 1,object)
+#define Divisionbyzeroflag(object) Bitfield_extract( 1, 1,object)
+#define Overflowflag(object) Bitfield_extract( 2, 1,object)
+#define Underflowflag(object) Bitfield_extract( 3, 1,object)
+#define Inexactflag(object) Bitfield_extract( 4, 1,object)
+#define Allflags(object) Bitfield_extract( 0, 5,object)
+
+/* Definitions relevant to the status register */
+
+/* Rounding Modes */
+#define ROUNDNEAREST 0
+#define ROUNDZERO 1
+#define ROUNDPLUS 2
+#define ROUNDMINUS 3
+
+/* Exceptions */
+#define NOEXCEPTION 0x0
+#define INVALIDEXCEPTION 0x20
+#define DIVISIONBYZEROEXCEPTION 0x10
+#define OVERFLOWEXCEPTION 0x08
+#define UNDERFLOWEXCEPTION 0x04
+#define INEXACTEXCEPTION 0x02
+#define UNIMPLEMENTEDEXCEPTION 0x01
+
+/* New exceptions for the 2E Opcode */
+#define OPC_2E_INVALIDEXCEPTION 0x30
+#define OPC_2E_OVERFLOWEXCEPTION 0x18
+#define OPC_2E_UNDERFLOWEXCEPTION 0x0c
+#define OPC_2E_INEXACTEXCEPTION 0x12
+
+/* Declare exception registers equivalent to FPUs architecture
+ *
+ * 0 1 2 3 4 5 6 7 8 910 1 2 3 4 5 6 7 8 920 1 2 3 4 5 6 7 8 930 1
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |excepttype | r1 | r2/ext | operation |parm |n| t/cond |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+#define Allexception(object) (object)
+#define Exceptiontype(object) Bitfield_extract( 0, 6,object)
+#define Instructionfield(object) Bitfield_mask( 6,26,object)
+#define Parmfield(object) Bitfield_extract( 23, 3,object)
+#define Rabit(object) Bitfield_extract( 24, 1,object)
+#define Ibit(object) Bitfield_extract( 25, 1,object)
+
+#define Set_exceptiontype(object,value) Bitfield_deposit(value, 0, 6,object)
+#define Set_parmfield(object,value) Bitfield_deposit(value, 23, 3,object)
+#define Set_exceptiontype_and_instr_field(exception,instruction,object) \
+ object = exception << 26 | instruction
+
+/* Declare the condition field
+ *
+ * 0 1 2 3 4 5 6 7 8 910 1 2 3 4 5 6 7 8 920 1 2 3 4 5 6 7 8 930 1
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * | |G|L|E|U|X|
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+#define Allexception(object) (object)
+#define Greaterthanbit(object) Bitfield_extract( 27, 1,object)
+#define Lessthanbit(object) Bitfield_extract( 28, 1,object)
+#define Equalbit(object) Bitfield_extract( 29, 1,object)
+#define Unorderedbit(object) Bitfield_extract( 30, 1,object)
+#define Exceptionbit(object) Bitfield_extract( 31, 1,object)
+
+/* An alias name for the status register */
+#define Fpustatus_register (*status)
+
+/**************************************************
+ * Status register referencing and manipulation. *
+ **************************************************/
+
+/* Rounding mode */
+#define Rounding_mode() Roundingmode(Fpustatus_register)
+#define Is_rounding_mode(rmode) \
+ (Roundingmode(Fpustatus_register) == rmode)
+#define Set_rounding_mode(value) \
+ Bitfield_deposit(value,21,2,Fpustatus_register)
+
+/* Boolean testing of the trap enable bits */
+#define Is_invalidtrap_enabled() Invalidtrap(Fpustatus_register)
+#define Is_divisionbyzerotrap_enabled() Divisionbyzerotrap(Fpustatus_register)
+#define Is_overflowtrap_enabled() Overflowtrap(Fpustatus_register)
+#define Is_underflowtrap_enabled() Underflowtrap(Fpustatus_register)
+#define Is_inexacttrap_enabled() Inexacttrap(Fpustatus_register)
+
+/* Set the indicated flags in the status register */
+#define Set_invalidflag() Bitfield_deposit(1,0,1,Fpustatus_register)
+#define Set_divisionbyzeroflag() Bitfield_deposit(1,1,1,Fpustatus_register)
+#define Set_overflowflag() Bitfield_deposit(1,2,1,Fpustatus_register)
+#define Set_underflowflag() Bitfield_deposit(1,3,1,Fpustatus_register)
+#define Set_inexactflag() Bitfield_deposit(1,4,1,Fpustatus_register)
+
+#define Clear_all_flags() Bitfield_deposit(0,0,5,Fpustatus_register)
+
+/* Manipulate the trap and condition code bits (tbit and cbit) */
+#define Set_tbit() Bitfield_deposit(1,25,1,Fpustatus_register)
+#define Clear_tbit() Bitfield_deposit(0,25,1,Fpustatus_register)
+#define Is_tbit_set() Tbit(Fpustatus_register)
+#define Is_cbit_set() Cbit(Fpustatus_register)
+
+#define Set_status_cbit(value) \
+ Bitfield_deposit(value,5,1,Fpustatus_register)
+
+/*******************************
+ * Condition field referencing *
+ *******************************/
+#define Unordered(cond) Unorderedbit(cond)
+#define Equal(cond) Equalbit(cond)
+#define Lessthan(cond) Lessthanbit(cond)
+#define Greaterthan(cond) Greaterthanbit(cond)
+#define Exception(cond) Exceptionbit(cond)
+
+
+/* Defines for the extension */
+#define Ext_isone_sign(extent) (Extsign(extent))
+#define Ext_isnotzero(extent) \
+ (Extall(extent))
+#define Ext_isnotzero_lower(extent) \
+ (Extlow31(extent))
+#define Ext_leftshiftby1(extent) \
+ Extall(extent) <<= 1
+#define Ext_negate(extent) \
+ (int )Extall(extent) = 0 - (int )Extall(extent)
+#define Ext_setone_low(extent) Bitfield_deposit(1,31,1,extent)
+#define Ext_setzero(extent) Extall(extent) = 0
+
+typedef int operation;
+
+/* error messages */
+
+#define NONE 0
+#define UNDEFFPINST 1
+
+/* Function definitions: opcode, opclass */
+#define FTEST (1<<2) | 0
+#define FCPY (2<<2) | 0
+#define FABS (3<<2) | 0
+#define FSQRT (4<<2) | 0
+#define FRND (5<<2) | 0
+
+#define FCNVFF (0<<2) | 1
+#define FCNVXF (1<<2) | 1
+#define FCNVFX (2<<2) | 1
+#define FCNVFXT (3<<2) | 1
+
+#define FCMP (0<<2) | 2
+
+#define FADD (0<<2) | 3
+#define FSUB (1<<2) | 3
+#define FMPY (2<<2) | 3
+#define FDIV (3<<2) | 3
+#define FREM (4<<2) | 3
+
diff --git a/arch/parisc/math-emu/fmpyfadd.c b/arch/parisc/math-emu/fmpyfadd.c
new file mode 100644
index 00000000..b067c45c
--- /dev/null
+++ b/arch/parisc/math-emu/fmpyfadd.c
@@ -0,0 +1,2655 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/fmpyfadd.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Double Floating-point Multiply Fused Add
+ * Double Floating-point Multiply Negate Fused Add
+ * Single Floating-point Multiply Fused Add
+ * Single Floating-point Multiply Negate Fused Add
+ *
+ * External Interfaces:
+ * dbl_fmpyfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+ * dbl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+ * sgl_fmpyfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+ * sgl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+
+
+/*
+ * Double Floating-point Multiply Fused Add
+ */
+
+int
+dbl_fmpyfadd(
+ dbl_floating_point *src1ptr,
+ dbl_floating_point *src2ptr,
+ dbl_floating_point *src3ptr,
+ unsigned int *status,
+ dbl_floating_point *dstptr)
+{
+ unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2, opnd3p1, opnd3p2;
+ register unsigned int tmpresp1, tmpresp2, tmpresp3, tmpresp4;
+ unsigned int rightp1, rightp2, rightp3, rightp4;
+ unsigned int resultp1, resultp2 = 0, resultp3 = 0, resultp4 = 0;
+ register int mpy_exponent, add_exponent, count;
+ boolean inexact = FALSE, is_tiny = FALSE;
+
+ unsigned int signlessleft1, signlessright1, save;
+ register int result_exponent, diff_exponent;
+ int sign_save, jumpsize;
+
+ Dbl_copyfromptr(src1ptr,opnd1p1,opnd1p2);
+ Dbl_copyfromptr(src2ptr,opnd2p1,opnd2p2);
+ Dbl_copyfromptr(src3ptr,opnd3p1,opnd3p2);
+
+ /*
+ * set sign bit of result of multiply
+ */
+ if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1))
+ Dbl_setnegativezerop1(resultp1);
+ else Dbl_setzerop1(resultp1);
+
+ /*
+ * Generate multiply exponent
+ */
+ mpy_exponent = Dbl_exponent(opnd1p1) + Dbl_exponent(opnd2p1) - DBL_BIAS;
+
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if (Dbl_isinfinity_exponent(opnd1p1)) {
+ if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+ if (Dbl_isnotnan(opnd2p1,opnd2p2) &&
+ Dbl_isnotnan(opnd3p1,opnd3p2)) {
+ if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
+ /*
+ * invalid since operands are infinity
+ * and zero
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Check third operand for infinity with a
+ * sign opposite of the multiply result
+ */
+ if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
+ (Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
+ /*
+ * invalid since attempting a magnitude
+ * subtraction of infinities
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * return infinity
+ */
+ Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(opnd1p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd1p1);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Dbl_is_signalingnan(opnd2p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd2p1);
+ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * is third operand a signaling NaN?
+ */
+ else if (Dbl_is_signalingnan(opnd3p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd3p1);
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if (Dbl_isinfinity_exponent(opnd2p1)) {
+ if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+ if (Dbl_isnotnan(opnd3p1,opnd3p2)) {
+ if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
+ /*
+ * invalid since multiply operands are
+ * zero & infinity
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(opnd2p1,opnd2p2);
+ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * Check third operand for infinity with a
+ * sign opposite of the multiply result
+ */
+ if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
+ (Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
+ /*
+ * invalid since attempting a magnitude
+ * subtraction of infinities
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * return infinity
+ */
+ Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(opnd2p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd2p1);
+ }
+ /*
+ * is third operand a signaling NaN?
+ */
+ else if (Dbl_is_signalingnan(opnd3p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd3p1);
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * check third operand for NaN's or infinity
+ */
+ if (Dbl_isinfinity_exponent(opnd3p1)) {
+ if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
+ /* return infinity */
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ return(NOEXCEPTION);
+ } else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(opnd3p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd3p1);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * Generate multiply mantissa
+ */
+ if (Dbl_isnotzero_exponent(opnd1p1)) {
+ /* set hidden bit */
+ Dbl_clear_signexponent_set_hidden(opnd1p1);
+ }
+ else {
+ /* check for zero */
+ if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+ /*
+ * Perform the add opnd3 with zero here.
+ */
+ if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
+ if (Is_rounding_mode(ROUNDMINUS)) {
+ Dbl_or_signs(opnd3p1,resultp1);
+ } else {
+ Dbl_and_signs(opnd3p1,resultp1);
+ }
+ }
+ /*
+ * Now let's check for trapped underflow case.
+ */
+ else if (Dbl_iszero_exponent(opnd3p1) &&
+ Is_underflowtrap_enabled()) {
+ /* need to normalize results mantissa */
+ sign_save = Dbl_signextendedsign(opnd3p1);
+ result_exponent = 0;
+ Dbl_leftshiftby1(opnd3p1,opnd3p2);
+ Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
+ Dbl_set_sign(opnd3p1,/*using*/sign_save);
+ Dbl_setwrapped_exponent(opnd3p1,result_exponent,
+ unfl);
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ /* inexact = FALSE */
+ return(OPC_2E_UNDERFLOWEXCEPTION);
+ }
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /* is denormalized, adjust exponent */
+ Dbl_clear_signexponent(opnd1p1);
+ Dbl_leftshiftby1(opnd1p1,opnd1p2);
+ Dbl_normalize(opnd1p1,opnd1p2,mpy_exponent);
+ }
+ /* opnd2 needs to have hidden bit set with msb in hidden bit */
+ if (Dbl_isnotzero_exponent(opnd2p1)) {
+ Dbl_clear_signexponent_set_hidden(opnd2p1);
+ }
+ else {
+ /* check for zero */
+ if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+ /*
+ * Perform the add opnd3 with zero here.
+ */
+ if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
+ if (Is_rounding_mode(ROUNDMINUS)) {
+ Dbl_or_signs(opnd3p1,resultp1);
+ } else {
+ Dbl_and_signs(opnd3p1,resultp1);
+ }
+ }
+ /*
+ * Now let's check for trapped underflow case.
+ */
+ else if (Dbl_iszero_exponent(opnd3p1) &&
+ Is_underflowtrap_enabled()) {
+ /* need to normalize results mantissa */
+ sign_save = Dbl_signextendedsign(opnd3p1);
+ result_exponent = 0;
+ Dbl_leftshiftby1(opnd3p1,opnd3p2);
+ Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
+ Dbl_set_sign(opnd3p1,/*using*/sign_save);
+ Dbl_setwrapped_exponent(opnd3p1,result_exponent,
+ unfl);
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ /* inexact = FALSE */
+ return(OPC_2E_UNDERFLOWEXCEPTION);
+ }
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /* is denormalized; want to normalize */
+ Dbl_clear_signexponent(opnd2p1);
+ Dbl_leftshiftby1(opnd2p1,opnd2p2);
+ Dbl_normalize(opnd2p1,opnd2p2,mpy_exponent);
+ }
+
+ /* Multiply the first two source mantissas together */
+
+ /*
+ * The intermediate result will be kept in tmpres,
+ * which needs enough room for 106 bits of mantissa,
+ * so lets call it a Double extended.
+ */
+ Dblext_setzero(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
+
+ /*
+ * Four bits at a time are inspected in each loop, and a
+ * simple shift and add multiply algorithm is used.
+ */
+ for (count = DBL_P-1; count >= 0; count -= 4) {
+ Dblext_rightshiftby4(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
+ if (Dbit28p2(opnd1p2)) {
+ /* Fourword_add should be an ADD followed by 3 ADDC's */
+ Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+ opnd2p1<<3 | opnd2p2>>29, opnd2p2<<3, 0, 0);
+ }
+ if (Dbit29p2(opnd1p2)) {
+ Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+ opnd2p1<<2 | opnd2p2>>30, opnd2p2<<2, 0, 0);
+ }
+ if (Dbit30p2(opnd1p2)) {
+ Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+ opnd2p1<<1 | opnd2p2>>31, opnd2p2<<1, 0, 0);
+ }
+ if (Dbit31p2(opnd1p2)) {
+ Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+ opnd2p1, opnd2p2, 0, 0);
+ }
+ Dbl_rightshiftby4(opnd1p1,opnd1p2);
+ }
+ if (Is_dexthiddenoverflow(tmpresp1)) {
+ /* result mantissa >= 2 (mantissa overflow) */
+ mpy_exponent++;
+ Dblext_rightshiftby1(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
+ }
+
+ /*
+ * Restore the sign of the mpy result which was saved in resultp1.
+ * The exponent will continue to be kept in mpy_exponent.
+ */
+ Dblext_set_sign(tmpresp1,Dbl_sign(resultp1));
+
+ /*
+ * No rounding is required, since the result of the multiply
+ * is exact in the extended format.
+ */
+
+ /*
+ * Now we are ready to perform the add portion of the operation.
+ *
+ * The exponents need to be kept as integers for now, since the
+ * multiply result might not fit into the exponent field. We
+ * can't overflow or underflow because of this yet, since the
+ * add could bring the final result back into range.
+ */
+ add_exponent = Dbl_exponent(opnd3p1);
+
+ /*
+ * Check for denormalized or zero add operand.
+ */
+ if (add_exponent == 0) {
+ /* check for zero */
+ if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
+ /* right is zero */
+ /* Left can't be zero and must be result.
+ *
+ * The final result is now in tmpres and mpy_exponent,
+ * and needs to be rounded and squeezed back into
+ * double precision format from double extended.
+ */
+ result_exponent = mpy_exponent;
+ Dblext_copy(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
+ resultp1,resultp2,resultp3,resultp4);
+ sign_save = Dbl_signextendedsign(resultp1);/*save sign*/
+ goto round;
+ }
+
+ /*
+ * Neither are zeroes.
+ * Adjust exponent and normalize add operand.
+ */
+ sign_save = Dbl_signextendedsign(opnd3p1); /* save sign */
+ Dbl_clear_signexponent(opnd3p1);
+ Dbl_leftshiftby1(opnd3p1,opnd3p2);
+ Dbl_normalize(opnd3p1,opnd3p2,add_exponent);
+ Dbl_set_sign(opnd3p1,sign_save); /* restore sign */
+ } else {
+ Dbl_clear_exponent_set_hidden(opnd3p1);
+ }
+ /*
+ * Copy opnd3 to the double extended variable called right.
+ */
+ Dbl_copyto_dblext(opnd3p1,opnd3p2,rightp1,rightp2,rightp3,rightp4);
+
+ /*
+ * A zero "save" helps discover equal operands (for later),
+ * and is used in swapping operands (if needed).
+ */
+ Dblext_xortointp1(tmpresp1,rightp1,/*to*/save);
+
+ /*
+ * Compare magnitude of operands.
+ */
+ Dblext_copytoint_exponentmantissap1(tmpresp1,signlessleft1);
+ Dblext_copytoint_exponentmantissap1(rightp1,signlessright1);
+ if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
+ Dblext_ismagnitudeless(tmpresp2,rightp2,signlessleft1,signlessright1)){
+ /*
+ * Set the left operand to the larger one by XOR swap.
+ * First finish the first word "save".
+ */
+ Dblext_xorfromintp1(save,rightp1,/*to*/rightp1);
+ Dblext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
+ Dblext_swap_lower(tmpresp2,tmpresp3,tmpresp4,
+ rightp2,rightp3,rightp4);
+ /* also setup exponents used in rest of routine */
+ diff_exponent = add_exponent - mpy_exponent;
+ result_exponent = add_exponent;
+ } else {
+ /* also setup exponents used in rest of routine */
+ diff_exponent = mpy_exponent - add_exponent;
+ result_exponent = mpy_exponent;
+ }
+ /* Invariant: left is not smaller than right. */
+
+ /*
+ * Special case alignment of operands that would force alignment
+ * beyond the extent of the extension. A further optimization
+ * could special case this but only reduces the path length for
+ * this infrequent case.
+ */
+ if (diff_exponent > DBLEXT_THRESHOLD) {
+ diff_exponent = DBLEXT_THRESHOLD;
+ }
+
+ /* Align right operand by shifting it to the right */
+ Dblext_clear_sign(rightp1);
+ Dblext_right_align(rightp1,rightp2,rightp3,rightp4,
+ /*shifted by*/diff_exponent);
+
+ /* Treat sum and difference of the operands separately. */
+ if ((int)save < 0) {
+ /*
+ * Difference of the two operands. Overflow can occur if the
+ * multiply overflowed. A borrow can occur out of the hidden
+ * bit and force a post normalization phase.
+ */
+ Dblext_subtract(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
+ rightp1,rightp2,rightp3,rightp4,
+ resultp1,resultp2,resultp3,resultp4);
+ sign_save = Dbl_signextendedsign(resultp1);
+ if (Dbl_iszero_hidden(resultp1)) {
+ /* Handle normalization */
+ /* A straightforward algorithm would now shift the
+ * result and extension left until the hidden bit
+ * becomes one. Not all of the extension bits need
+ * participate in the shift. Only the two most
+ * significant bits (round and guard) are needed.
+ * If only a single shift is needed then the guard
+ * bit becomes a significant low order bit and the
+ * extension must participate in the rounding.
+ * If more than a single shift is needed, then all
+ * bits to the right of the guard bit are zeros,
+ * and the guard bit may or may not be zero. */
+ Dblext_leftshiftby1(resultp1,resultp2,resultp3,
+ resultp4);
+
+ /* Need to check for a zero result. The sign and
+ * exponent fields have already been zeroed. The more
+ * efficient test of the full object can be used.
+ */
+ if(Dblext_iszero(resultp1,resultp2,resultp3,resultp4)){
+ /* Must have been "x-x" or "x+(-x)". */
+ if (Is_rounding_mode(ROUNDMINUS))
+ Dbl_setone_sign(resultp1);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ result_exponent--;
+
+ /* Look to see if normalization is finished. */
+ if (Dbl_isone_hidden(resultp1)) {
+ /* No further normalization is needed */
+ goto round;
+ }
+
+ /* Discover first one bit to determine shift amount.
+ * Use a modified binary search. We have already
+ * shifted the result one position right and still
+ * not found a one so the remainder of the extension
+ * must be zero and simplifies rounding. */
+ /* Scan bytes */
+ while (Dbl_iszero_hiddenhigh7mantissa(resultp1)) {
+ Dblext_leftshiftby8(resultp1,resultp2,resultp3,resultp4);
+ result_exponent -= 8;
+ }
+ /* Now narrow it down to the nibble */
+ if (Dbl_iszero_hiddenhigh3mantissa(resultp1)) {
+ /* The lower nibble contains the
+ * normalizing one */
+ Dblext_leftshiftby4(resultp1,resultp2,resultp3,resultp4);
+ result_exponent -= 4;
+ }
+ /* Select case where first bit is set (already
+ * normalized) otherwise select the proper shift. */
+ jumpsize = Dbl_hiddenhigh3mantissa(resultp1);
+ if (jumpsize <= 7) switch(jumpsize) {
+ case 1:
+ Dblext_leftshiftby3(resultp1,resultp2,resultp3,
+ resultp4);
+ result_exponent -= 3;
+ break;
+ case 2:
+ case 3:
+ Dblext_leftshiftby2(resultp1,resultp2,resultp3,
+ resultp4);
+ result_exponent -= 2;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ Dblext_leftshiftby1(resultp1,resultp2,resultp3,
+ resultp4);
+ result_exponent -= 1;
+ break;
+ }
+ } /* end if (hidden...)... */
+ /* Fall through and round */
+ } /* end if (save < 0)... */
+ else {
+ /* Add magnitudes */
+ Dblext_addition(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
+ rightp1,rightp2,rightp3,rightp4,
+ /*to*/resultp1,resultp2,resultp3,resultp4);
+ sign_save = Dbl_signextendedsign(resultp1);
+ if (Dbl_isone_hiddenoverflow(resultp1)) {
+ /* Prenormalization required. */
+ Dblext_arithrightshiftby1(resultp1,resultp2,resultp3,
+ resultp4);
+ result_exponent++;
+ } /* end if hiddenoverflow... */
+ } /* end else ...add magnitudes... */
+
+ /* Round the result. If the extension and lower two words are
+ * all zeros, then the result is exact. Otherwise round in the
+ * correct direction. Underflow is possible. If a postnormalization
+ * is necessary, then the mantissa is all zeros so no shift is needed.
+ */
+ round:
+ if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
+ Dblext_denormalize(resultp1,resultp2,resultp3,resultp4,
+ result_exponent,is_tiny);
+ }
+ Dbl_set_sign(resultp1,/*using*/sign_save);
+ if (Dblext_isnotzero_mantissap3(resultp3) ||
+ Dblext_isnotzero_mantissap4(resultp4)) {
+ inexact = TRUE;
+ switch(Rounding_mode()) {
+ case ROUNDNEAREST: /* The default. */
+ if (Dblext_isone_highp3(resultp3)) {
+ /* at least 1/2 ulp */
+ if (Dblext_isnotzero_low31p3(resultp3) ||
+ Dblext_isnotzero_mantissap4(resultp4) ||
+ Dblext_isone_lowp2(resultp2)) {
+ /* either exactly half way and odd or
+ * more than 1/2ulp */
+ Dbl_increment(resultp1,resultp2);
+ }
+ }
+ break;
+
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(resultp1)) {
+ /* Round up positive results */
+ Dbl_increment(resultp1,resultp2);
+ }
+ break;
+
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(resultp1)) {
+ /* Round down negative results */
+ Dbl_increment(resultp1,resultp2);
+ }
+
+ case ROUNDZERO:;
+ /* truncate is simple */
+ } /* end switch... */
+ if (Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
+ }
+ if (result_exponent >= DBL_INFINITY_EXPONENT) {
+ /* trap if OVERFLOWTRAP enabled */
+ if (Is_overflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return (OPC_2E_OVERFLOWEXCEPTION |
+ OPC_2E_INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return (OPC_2E_OVERFLOWEXCEPTION);
+ }
+ inexact = TRUE;
+ Set_overflowflag();
+ /* set result to infinity or largest number */
+ Dbl_setoverflow(resultp1,resultp2);
+
+ } else if (result_exponent <= 0) { /* underflow case */
+ if (Is_underflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return (OPC_2E_UNDERFLOWEXCEPTION |
+ OPC_2E_INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(OPC_2E_UNDERFLOWEXCEPTION);
+ }
+ else if (inexact && is_tiny) Set_underflowflag();
+ }
+ else Dbl_set_exponent(resultp1,result_exponent);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Floating-point Multiply Negate Fused Add
+ */
+
+dbl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+
+dbl_floating_point *src1ptr, *src2ptr, *src3ptr, *dstptr;
+unsigned int *status;
+{
+ unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2, opnd3p1, opnd3p2;
+ register unsigned int tmpresp1, tmpresp2, tmpresp3, tmpresp4;
+ unsigned int rightp1, rightp2, rightp3, rightp4;
+ unsigned int resultp1, resultp2 = 0, resultp3 = 0, resultp4 = 0;
+ register int mpy_exponent, add_exponent, count;
+ boolean inexact = FALSE, is_tiny = FALSE;
+
+ unsigned int signlessleft1, signlessright1, save;
+ register int result_exponent, diff_exponent;
+ int sign_save, jumpsize;
+
+ Dbl_copyfromptr(src1ptr,opnd1p1,opnd1p2);
+ Dbl_copyfromptr(src2ptr,opnd2p1,opnd2p2);
+ Dbl_copyfromptr(src3ptr,opnd3p1,opnd3p2);
+
+ /*
+ * set sign bit of result of multiply
+ */
+ if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1))
+ Dbl_setzerop1(resultp1);
+ else
+ Dbl_setnegativezerop1(resultp1);
+
+ /*
+ * Generate multiply exponent
+ */
+ mpy_exponent = Dbl_exponent(opnd1p1) + Dbl_exponent(opnd2p1) - DBL_BIAS;
+
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if (Dbl_isinfinity_exponent(opnd1p1)) {
+ if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+ if (Dbl_isnotnan(opnd2p1,opnd2p2) &&
+ Dbl_isnotnan(opnd3p1,opnd3p2)) {
+ if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
+ /*
+ * invalid since operands are infinity
+ * and zero
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Check third operand for infinity with a
+ * sign opposite of the multiply result
+ */
+ if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
+ (Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
+ /*
+ * invalid since attempting a magnitude
+ * subtraction of infinities
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * return infinity
+ */
+ Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(opnd1p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd1p1);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Dbl_is_signalingnan(opnd2p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd2p1);
+ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * is third operand a signaling NaN?
+ */
+ else if (Dbl_is_signalingnan(opnd3p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd3p1);
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if (Dbl_isinfinity_exponent(opnd2p1)) {
+ if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+ if (Dbl_isnotnan(opnd3p1,opnd3p2)) {
+ if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
+ /*
+ * invalid since multiply operands are
+ * zero & infinity
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(opnd2p1,opnd2p2);
+ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * Check third operand for infinity with a
+ * sign opposite of the multiply result
+ */
+ if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
+ (Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
+ /*
+ * invalid since attempting a magnitude
+ * subtraction of infinities
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Dbl_makequietnan(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * return infinity
+ */
+ Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(opnd2p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd2p1);
+ }
+ /*
+ * is third operand a signaling NaN?
+ */
+ else if (Dbl_is_signalingnan(opnd3p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd3p1);
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * check third operand for NaN's or infinity
+ */
+ if (Dbl_isinfinity_exponent(opnd3p1)) {
+ if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
+ /* return infinity */
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ return(NOEXCEPTION);
+ } else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Dbl_isone_signaling(opnd3p1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(opnd3p1);
+ }
+ /*
+ * return quiet NaN
+ */
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * Generate multiply mantissa
+ */
+ if (Dbl_isnotzero_exponent(opnd1p1)) {
+ /* set hidden bit */
+ Dbl_clear_signexponent_set_hidden(opnd1p1);
+ }
+ else {
+ /* check for zero */
+ if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+ /*
+ * Perform the add opnd3 with zero here.
+ */
+ if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
+ if (Is_rounding_mode(ROUNDMINUS)) {
+ Dbl_or_signs(opnd3p1,resultp1);
+ } else {
+ Dbl_and_signs(opnd3p1,resultp1);
+ }
+ }
+ /*
+ * Now let's check for trapped underflow case.
+ */
+ else if (Dbl_iszero_exponent(opnd3p1) &&
+ Is_underflowtrap_enabled()) {
+ /* need to normalize results mantissa */
+ sign_save = Dbl_signextendedsign(opnd3p1);
+ result_exponent = 0;
+ Dbl_leftshiftby1(opnd3p1,opnd3p2);
+ Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
+ Dbl_set_sign(opnd3p1,/*using*/sign_save);
+ Dbl_setwrapped_exponent(opnd3p1,result_exponent,
+ unfl);
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ /* inexact = FALSE */
+ return(OPC_2E_UNDERFLOWEXCEPTION);
+ }
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /* is denormalized, adjust exponent */
+ Dbl_clear_signexponent(opnd1p1);
+ Dbl_leftshiftby1(opnd1p1,opnd1p2);
+ Dbl_normalize(opnd1p1,opnd1p2,mpy_exponent);
+ }
+ /* opnd2 needs to have hidden bit set with msb in hidden bit */
+ if (Dbl_isnotzero_exponent(opnd2p1)) {
+ Dbl_clear_signexponent_set_hidden(opnd2p1);
+ }
+ else {
+ /* check for zero */
+ if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+ /*
+ * Perform the add opnd3 with zero here.
+ */
+ if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
+ if (Is_rounding_mode(ROUNDMINUS)) {
+ Dbl_or_signs(opnd3p1,resultp1);
+ } else {
+ Dbl_and_signs(opnd3p1,resultp1);
+ }
+ }
+ /*
+ * Now let's check for trapped underflow case.
+ */
+ else if (Dbl_iszero_exponent(opnd3p1) &&
+ Is_underflowtrap_enabled()) {
+ /* need to normalize results mantissa */
+ sign_save = Dbl_signextendedsign(opnd3p1);
+ result_exponent = 0;
+ Dbl_leftshiftby1(opnd3p1,opnd3p2);
+ Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
+ Dbl_set_sign(opnd3p1,/*using*/sign_save);
+ Dbl_setwrapped_exponent(opnd3p1,result_exponent,
+ unfl);
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ /* inexact = FALSE */
+ return(OPC_2E_UNDERFLOWEXCEPTION);
+ }
+ Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /* is denormalized; want to normalize */
+ Dbl_clear_signexponent(opnd2p1);
+ Dbl_leftshiftby1(opnd2p1,opnd2p2);
+ Dbl_normalize(opnd2p1,opnd2p2,mpy_exponent);
+ }
+
+ /* Multiply the first two source mantissas together */
+
+ /*
+ * The intermediate result will be kept in tmpres,
+ * which needs enough room for 106 bits of mantissa,
+ * so lets call it a Double extended.
+ */
+ Dblext_setzero(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
+
+ /*
+ * Four bits at a time are inspected in each loop, and a
+ * simple shift and add multiply algorithm is used.
+ */
+ for (count = DBL_P-1; count >= 0; count -= 4) {
+ Dblext_rightshiftby4(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
+ if (Dbit28p2(opnd1p2)) {
+ /* Fourword_add should be an ADD followed by 3 ADDC's */
+ Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+ opnd2p1<<3 | opnd2p2>>29, opnd2p2<<3, 0, 0);
+ }
+ if (Dbit29p2(opnd1p2)) {
+ Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+ opnd2p1<<2 | opnd2p2>>30, opnd2p2<<2, 0, 0);
+ }
+ if (Dbit30p2(opnd1p2)) {
+ Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+ opnd2p1<<1 | opnd2p2>>31, opnd2p2<<1, 0, 0);
+ }
+ if (Dbit31p2(opnd1p2)) {
+ Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+ opnd2p1, opnd2p2, 0, 0);
+ }
+ Dbl_rightshiftby4(opnd1p1,opnd1p2);
+ }
+ if (Is_dexthiddenoverflow(tmpresp1)) {
+ /* result mantissa >= 2 (mantissa overflow) */
+ mpy_exponent++;
+ Dblext_rightshiftby1(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
+ }
+
+ /*
+ * Restore the sign of the mpy result which was saved in resultp1.
+ * The exponent will continue to be kept in mpy_exponent.
+ */
+ Dblext_set_sign(tmpresp1,Dbl_sign(resultp1));
+
+ /*
+ * No rounding is required, since the result of the multiply
+ * is exact in the extended format.
+ */
+
+ /*
+ * Now we are ready to perform the add portion of the operation.
+ *
+ * The exponents need to be kept as integers for now, since the
+ * multiply result might not fit into the exponent field. We
+ * can't overflow or underflow because of this yet, since the
+ * add could bring the final result back into range.
+ */
+ add_exponent = Dbl_exponent(opnd3p1);
+
+ /*
+ * Check for denormalized or zero add operand.
+ */
+ if (add_exponent == 0) {
+ /* check for zero */
+ if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
+ /* right is zero */
+ /* Left can't be zero and must be result.
+ *
+ * The final result is now in tmpres and mpy_exponent,
+ * and needs to be rounded and squeezed back into
+ * double precision format from double extended.
+ */
+ result_exponent = mpy_exponent;
+ Dblext_copy(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
+ resultp1,resultp2,resultp3,resultp4);
+ sign_save = Dbl_signextendedsign(resultp1);/*save sign*/
+ goto round;
+ }
+
+ /*
+ * Neither are zeroes.
+ * Adjust exponent and normalize add operand.
+ */
+ sign_save = Dbl_signextendedsign(opnd3p1); /* save sign */
+ Dbl_clear_signexponent(opnd3p1);
+ Dbl_leftshiftby1(opnd3p1,opnd3p2);
+ Dbl_normalize(opnd3p1,opnd3p2,add_exponent);
+ Dbl_set_sign(opnd3p1,sign_save); /* restore sign */
+ } else {
+ Dbl_clear_exponent_set_hidden(opnd3p1);
+ }
+ /*
+ * Copy opnd3 to the double extended variable called right.
+ */
+ Dbl_copyto_dblext(opnd3p1,opnd3p2,rightp1,rightp2,rightp3,rightp4);
+
+ /*
+ * A zero "save" helps discover equal operands (for later),
+ * and is used in swapping operands (if needed).
+ */
+ Dblext_xortointp1(tmpresp1,rightp1,/*to*/save);
+
+ /*
+ * Compare magnitude of operands.
+ */
+ Dblext_copytoint_exponentmantissap1(tmpresp1,signlessleft1);
+ Dblext_copytoint_exponentmantissap1(rightp1,signlessright1);
+ if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
+ Dblext_ismagnitudeless(tmpresp2,rightp2,signlessleft1,signlessright1)){
+ /*
+ * Set the left operand to the larger one by XOR swap.
+ * First finish the first word "save".
+ */
+ Dblext_xorfromintp1(save,rightp1,/*to*/rightp1);
+ Dblext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
+ Dblext_swap_lower(tmpresp2,tmpresp3,tmpresp4,
+ rightp2,rightp3,rightp4);
+ /* also setup exponents used in rest of routine */
+ diff_exponent = add_exponent - mpy_exponent;
+ result_exponent = add_exponent;
+ } else {
+ /* also setup exponents used in rest of routine */
+ diff_exponent = mpy_exponent - add_exponent;
+ result_exponent = mpy_exponent;
+ }
+ /* Invariant: left is not smaller than right. */
+
+ /*
+ * Special case alignment of operands that would force alignment
+ * beyond the extent of the extension. A further optimization
+ * could special case this but only reduces the path length for
+ * this infrequent case.
+ */
+ if (diff_exponent > DBLEXT_THRESHOLD) {
+ diff_exponent = DBLEXT_THRESHOLD;
+ }
+
+ /* Align right operand by shifting it to the right */
+ Dblext_clear_sign(rightp1);
+ Dblext_right_align(rightp1,rightp2,rightp3,rightp4,
+ /*shifted by*/diff_exponent);
+
+ /* Treat sum and difference of the operands separately. */
+ if ((int)save < 0) {
+ /*
+ * Difference of the two operands. Overflow can occur if the
+ * multiply overflowed. A borrow can occur out of the hidden
+ * bit and force a post normalization phase.
+ */
+ Dblext_subtract(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
+ rightp1,rightp2,rightp3,rightp4,
+ resultp1,resultp2,resultp3,resultp4);
+ sign_save = Dbl_signextendedsign(resultp1);
+ if (Dbl_iszero_hidden(resultp1)) {
+ /* Handle normalization */
+ /* A straightforward algorithm would now shift the
+ * result and extension left until the hidden bit
+ * becomes one. Not all of the extension bits need
+ * participate in the shift. Only the two most
+ * significant bits (round and guard) are needed.
+ * If only a single shift is needed then the guard
+ * bit becomes a significant low order bit and the
+ * extension must participate in the rounding.
+ * If more than a single shift is needed, then all
+ * bits to the right of the guard bit are zeros,
+ * and the guard bit may or may not be zero. */
+ Dblext_leftshiftby1(resultp1,resultp2,resultp3,
+ resultp4);
+
+ /* Need to check for a zero result. The sign and
+ * exponent fields have already been zeroed. The more
+ * efficient test of the full object can be used.
+ */
+ if (Dblext_iszero(resultp1,resultp2,resultp3,resultp4)) {
+ /* Must have been "x-x" or "x+(-x)". */
+ if (Is_rounding_mode(ROUNDMINUS))
+ Dbl_setone_sign(resultp1);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ result_exponent--;
+
+ /* Look to see if normalization is finished. */
+ if (Dbl_isone_hidden(resultp1)) {
+ /* No further normalization is needed */
+ goto round;
+ }
+
+ /* Discover first one bit to determine shift amount.
+ * Use a modified binary search. We have already
+ * shifted the result one position right and still
+ * not found a one so the remainder of the extension
+ * must be zero and simplifies rounding. */
+ /* Scan bytes */
+ while (Dbl_iszero_hiddenhigh7mantissa(resultp1)) {
+ Dblext_leftshiftby8(resultp1,resultp2,resultp3,resultp4);
+ result_exponent -= 8;
+ }
+ /* Now narrow it down to the nibble */
+ if (Dbl_iszero_hiddenhigh3mantissa(resultp1)) {
+ /* The lower nibble contains the
+ * normalizing one */
+ Dblext_leftshiftby4(resultp1,resultp2,resultp3,resultp4);
+ result_exponent -= 4;
+ }
+ /* Select case where first bit is set (already
+ * normalized) otherwise select the proper shift. */
+ jumpsize = Dbl_hiddenhigh3mantissa(resultp1);
+ if (jumpsize <= 7) switch(jumpsize) {
+ case 1:
+ Dblext_leftshiftby3(resultp1,resultp2,resultp3,
+ resultp4);
+ result_exponent -= 3;
+ break;
+ case 2:
+ case 3:
+ Dblext_leftshiftby2(resultp1,resultp2,resultp3,
+ resultp4);
+ result_exponent -= 2;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ Dblext_leftshiftby1(resultp1,resultp2,resultp3,
+ resultp4);
+ result_exponent -= 1;
+ break;
+ }
+ } /* end if (hidden...)... */
+ /* Fall through and round */
+ } /* end if (save < 0)... */
+ else {
+ /* Add magnitudes */
+ Dblext_addition(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
+ rightp1,rightp2,rightp3,rightp4,
+ /*to*/resultp1,resultp2,resultp3,resultp4);
+ sign_save = Dbl_signextendedsign(resultp1);
+ if (Dbl_isone_hiddenoverflow(resultp1)) {
+ /* Prenormalization required. */
+ Dblext_arithrightshiftby1(resultp1,resultp2,resultp3,
+ resultp4);
+ result_exponent++;
+ } /* end if hiddenoverflow... */
+ } /* end else ...add magnitudes... */
+
+ /* Round the result. If the extension and lower two words are
+ * all zeros, then the result is exact. Otherwise round in the
+ * correct direction. Underflow is possible. If a postnormalization
+ * is necessary, then the mantissa is all zeros so no shift is needed.
+ */
+ round:
+ if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
+ Dblext_denormalize(resultp1,resultp2,resultp3,resultp4,
+ result_exponent,is_tiny);
+ }
+ Dbl_set_sign(resultp1,/*using*/sign_save);
+ if (Dblext_isnotzero_mantissap3(resultp3) ||
+ Dblext_isnotzero_mantissap4(resultp4)) {
+ inexact = TRUE;
+ switch(Rounding_mode()) {
+ case ROUNDNEAREST: /* The default. */
+ if (Dblext_isone_highp3(resultp3)) {
+ /* at least 1/2 ulp */
+ if (Dblext_isnotzero_low31p3(resultp3) ||
+ Dblext_isnotzero_mantissap4(resultp4) ||
+ Dblext_isone_lowp2(resultp2)) {
+ /* either exactly half way and odd or
+ * more than 1/2ulp */
+ Dbl_increment(resultp1,resultp2);
+ }
+ }
+ break;
+
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(resultp1)) {
+ /* Round up positive results */
+ Dbl_increment(resultp1,resultp2);
+ }
+ break;
+
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(resultp1)) {
+ /* Round down negative results */
+ Dbl_increment(resultp1,resultp2);
+ }
+
+ case ROUNDZERO:;
+ /* truncate is simple */
+ } /* end switch... */
+ if (Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
+ }
+ if (result_exponent >= DBL_INFINITY_EXPONENT) {
+ /* Overflow */
+ if (Is_overflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return (OPC_2E_OVERFLOWEXCEPTION |
+ OPC_2E_INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return (OPC_2E_OVERFLOWEXCEPTION);
+ }
+ inexact = TRUE;
+ Set_overflowflag();
+ Dbl_setoverflow(resultp1,resultp2);
+ } else if (result_exponent <= 0) { /* underflow case */
+ if (Is_underflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return (OPC_2E_UNDERFLOWEXCEPTION |
+ OPC_2E_INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(OPC_2E_UNDERFLOWEXCEPTION);
+ }
+ else if (inexact && is_tiny) Set_underflowflag();
+ }
+ else Dbl_set_exponent(resultp1,result_exponent);
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(NOEXCEPTION);
+}
+
+/*
+ * Single Floating-point Multiply Fused Add
+ */
+
+sgl_fmpyfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+
+sgl_floating_point *src1ptr, *src2ptr, *src3ptr, *dstptr;
+unsigned int *status;
+{
+ unsigned int opnd1, opnd2, opnd3;
+ register unsigned int tmpresp1, tmpresp2;
+ unsigned int rightp1, rightp2;
+ unsigned int resultp1, resultp2 = 0;
+ register int mpy_exponent, add_exponent, count;
+ boolean inexact = FALSE, is_tiny = FALSE;
+
+ unsigned int signlessleft1, signlessright1, save;
+ register int result_exponent, diff_exponent;
+ int sign_save, jumpsize;
+
+ Sgl_copyfromptr(src1ptr,opnd1);
+ Sgl_copyfromptr(src2ptr,opnd2);
+ Sgl_copyfromptr(src3ptr,opnd3);
+
+ /*
+ * set sign bit of result of multiply
+ */
+ if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2))
+ Sgl_setnegativezero(resultp1);
+ else Sgl_setzero(resultp1);
+
+ /*
+ * Generate multiply exponent
+ */
+ mpy_exponent = Sgl_exponent(opnd1) + Sgl_exponent(opnd2) - SGL_BIAS;
+
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if (Sgl_isinfinity_exponent(opnd1)) {
+ if (Sgl_iszero_mantissa(opnd1)) {
+ if (Sgl_isnotnan(opnd2) && Sgl_isnotnan(opnd3)) {
+ if (Sgl_iszero_exponentmantissa(opnd2)) {
+ /*
+ * invalid since operands are infinity
+ * and zero
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(resultp1);
+ Sgl_copytoptr(resultp1,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Check third operand for infinity with a
+ * sign opposite of the multiply result
+ */
+ if (Sgl_isinfinity(opnd3) &&
+ (Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
+ /*
+ * invalid since attempting a magnitude
+ * subtraction of infinities
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(resultp1);
+ Sgl_copytoptr(resultp1,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * return infinity
+ */
+ Sgl_setinfinity_exponentmantissa(resultp1);
+ Sgl_copytoptr(resultp1,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(opnd1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd1);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Sgl_is_signalingnan(opnd2)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd2);
+ Sgl_copytoptr(opnd2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * is third operand a signaling NaN?
+ */
+ else if (Sgl_is_signalingnan(opnd3)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd3);
+ Sgl_copytoptr(opnd3,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ Sgl_copytoptr(opnd1,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if (Sgl_isinfinity_exponent(opnd2)) {
+ if (Sgl_iszero_mantissa(opnd2)) {
+ if (Sgl_isnotnan(opnd3)) {
+ if (Sgl_iszero_exponentmantissa(opnd1)) {
+ /*
+ * invalid since multiply operands are
+ * zero & infinity
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(opnd2);
+ Sgl_copytoptr(opnd2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * Check third operand for infinity with a
+ * sign opposite of the multiply result
+ */
+ if (Sgl_isinfinity(opnd3) &&
+ (Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
+ /*
+ * invalid since attempting a magnitude
+ * subtraction of infinities
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(resultp1);
+ Sgl_copytoptr(resultp1,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * return infinity
+ */
+ Sgl_setinfinity_exponentmantissa(resultp1);
+ Sgl_copytoptr(resultp1,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(opnd2)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd2);
+ }
+ /*
+ * is third operand a signaling NaN?
+ */
+ else if (Sgl_is_signalingnan(opnd3)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd3);
+ Sgl_copytoptr(opnd3,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ Sgl_copytoptr(opnd2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * check third operand for NaN's or infinity
+ */
+ if (Sgl_isinfinity_exponent(opnd3)) {
+ if (Sgl_iszero_mantissa(opnd3)) {
+ /* return infinity */
+ Sgl_copytoptr(opnd3,dstptr);
+ return(NOEXCEPTION);
+ } else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(opnd3)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd3);
+ }
+ /*
+ * return quiet NaN
+ */
+ Sgl_copytoptr(opnd3,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * Generate multiply mantissa
+ */
+ if (Sgl_isnotzero_exponent(opnd1)) {
+ /* set hidden bit */
+ Sgl_clear_signexponent_set_hidden(opnd1);
+ }
+ else {
+ /* check for zero */
+ if (Sgl_iszero_mantissa(opnd1)) {
+ /*
+ * Perform the add opnd3 with zero here.
+ */
+ if (Sgl_iszero_exponentmantissa(opnd3)) {
+ if (Is_rounding_mode(ROUNDMINUS)) {
+ Sgl_or_signs(opnd3,resultp1);
+ } else {
+ Sgl_and_signs(opnd3,resultp1);
+ }
+ }
+ /*
+ * Now let's check for trapped underflow case.
+ */
+ else if (Sgl_iszero_exponent(opnd3) &&
+ Is_underflowtrap_enabled()) {
+ /* need to normalize results mantissa */
+ sign_save = Sgl_signextendedsign(opnd3);
+ result_exponent = 0;
+ Sgl_leftshiftby1(opnd3);
+ Sgl_normalize(opnd3,result_exponent);
+ Sgl_set_sign(opnd3,/*using*/sign_save);
+ Sgl_setwrapped_exponent(opnd3,result_exponent,
+ unfl);
+ Sgl_copytoptr(opnd3,dstptr);
+ /* inexact = FALSE */
+ return(OPC_2E_UNDERFLOWEXCEPTION);
+ }
+ Sgl_copytoptr(opnd3,dstptr);
+ return(NOEXCEPTION);
+ }
+ /* is denormalized, adjust exponent */
+ Sgl_clear_signexponent(opnd1);
+ Sgl_leftshiftby1(opnd1);
+ Sgl_normalize(opnd1,mpy_exponent);
+ }
+ /* opnd2 needs to have hidden bit set with msb in hidden bit */
+ if (Sgl_isnotzero_exponent(opnd2)) {
+ Sgl_clear_signexponent_set_hidden(opnd2);
+ }
+ else {
+ /* check for zero */
+ if (Sgl_iszero_mantissa(opnd2)) {
+ /*
+ * Perform the add opnd3 with zero here.
+ */
+ if (Sgl_iszero_exponentmantissa(opnd3)) {
+ if (Is_rounding_mode(ROUNDMINUS)) {
+ Sgl_or_signs(opnd3,resultp1);
+ } else {
+ Sgl_and_signs(opnd3,resultp1);
+ }
+ }
+ /*
+ * Now let's check for trapped underflow case.
+ */
+ else if (Sgl_iszero_exponent(opnd3) &&
+ Is_underflowtrap_enabled()) {
+ /* need to normalize results mantissa */
+ sign_save = Sgl_signextendedsign(opnd3);
+ result_exponent = 0;
+ Sgl_leftshiftby1(opnd3);
+ Sgl_normalize(opnd3,result_exponent);
+ Sgl_set_sign(opnd3,/*using*/sign_save);
+ Sgl_setwrapped_exponent(opnd3,result_exponent,
+ unfl);
+ Sgl_copytoptr(opnd3,dstptr);
+ /* inexact = FALSE */
+ return(OPC_2E_UNDERFLOWEXCEPTION);
+ }
+ Sgl_copytoptr(opnd3,dstptr);
+ return(NOEXCEPTION);
+ }
+ /* is denormalized; want to normalize */
+ Sgl_clear_signexponent(opnd2);
+ Sgl_leftshiftby1(opnd2);
+ Sgl_normalize(opnd2,mpy_exponent);
+ }
+
+ /* Multiply the first two source mantissas together */
+
+ /*
+ * The intermediate result will be kept in tmpres,
+ * which needs enough room for 106 bits of mantissa,
+ * so lets call it a Double extended.
+ */
+ Sglext_setzero(tmpresp1,tmpresp2);
+
+ /*
+ * Four bits at a time are inspected in each loop, and a
+ * simple shift and add multiply algorithm is used.
+ */
+ for (count = SGL_P-1; count >= 0; count -= 4) {
+ Sglext_rightshiftby4(tmpresp1,tmpresp2);
+ if (Sbit28(opnd1)) {
+ /* Twoword_add should be an ADD followed by 2 ADDC's */
+ Twoword_add(tmpresp1, tmpresp2, opnd2<<3, 0);
+ }
+ if (Sbit29(opnd1)) {
+ Twoword_add(tmpresp1, tmpresp2, opnd2<<2, 0);
+ }
+ if (Sbit30(opnd1)) {
+ Twoword_add(tmpresp1, tmpresp2, opnd2<<1, 0);
+ }
+ if (Sbit31(opnd1)) {
+ Twoword_add(tmpresp1, tmpresp2, opnd2, 0);
+ }
+ Sgl_rightshiftby4(opnd1);
+ }
+ if (Is_sexthiddenoverflow(tmpresp1)) {
+ /* result mantissa >= 2 (mantissa overflow) */
+ mpy_exponent++;
+ Sglext_rightshiftby4(tmpresp1,tmpresp2);
+ } else {
+ Sglext_rightshiftby3(tmpresp1,tmpresp2);
+ }
+
+ /*
+ * Restore the sign of the mpy result which was saved in resultp1.
+ * The exponent will continue to be kept in mpy_exponent.
+ */
+ Sglext_set_sign(tmpresp1,Sgl_sign(resultp1));
+
+ /*
+ * No rounding is required, since the result of the multiply
+ * is exact in the extended format.
+ */
+
+ /*
+ * Now we are ready to perform the add portion of the operation.
+ *
+ * The exponents need to be kept as integers for now, since the
+ * multiply result might not fit into the exponent field. We
+ * can't overflow or underflow because of this yet, since the
+ * add could bring the final result back into range.
+ */
+ add_exponent = Sgl_exponent(opnd3);
+
+ /*
+ * Check for denormalized or zero add operand.
+ */
+ if (add_exponent == 0) {
+ /* check for zero */
+ if (Sgl_iszero_mantissa(opnd3)) {
+ /* right is zero */
+ /* Left can't be zero and must be result.
+ *
+ * The final result is now in tmpres and mpy_exponent,
+ * and needs to be rounded and squeezed back into
+ * double precision format from double extended.
+ */
+ result_exponent = mpy_exponent;
+ Sglext_copy(tmpresp1,tmpresp2,resultp1,resultp2);
+ sign_save = Sgl_signextendedsign(resultp1);/*save sign*/
+ goto round;
+ }
+
+ /*
+ * Neither are zeroes.
+ * Adjust exponent and normalize add operand.
+ */
+ sign_save = Sgl_signextendedsign(opnd3); /* save sign */
+ Sgl_clear_signexponent(opnd3);
+ Sgl_leftshiftby1(opnd3);
+ Sgl_normalize(opnd3,add_exponent);
+ Sgl_set_sign(opnd3,sign_save); /* restore sign */
+ } else {
+ Sgl_clear_exponent_set_hidden(opnd3);
+ }
+ /*
+ * Copy opnd3 to the double extended variable called right.
+ */
+ Sgl_copyto_sglext(opnd3,rightp1,rightp2);
+
+ /*
+ * A zero "save" helps discover equal operands (for later),
+ * and is used in swapping operands (if needed).
+ */
+ Sglext_xortointp1(tmpresp1,rightp1,/*to*/save);
+
+ /*
+ * Compare magnitude of operands.
+ */
+ Sglext_copytoint_exponentmantissa(tmpresp1,signlessleft1);
+ Sglext_copytoint_exponentmantissa(rightp1,signlessright1);
+ if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
+ Sglext_ismagnitudeless(signlessleft1,signlessright1)) {
+ /*
+ * Set the left operand to the larger one by XOR swap.
+ * First finish the first word "save".
+ */
+ Sglext_xorfromintp1(save,rightp1,/*to*/rightp1);
+ Sglext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
+ Sglext_swap_lower(tmpresp2,rightp2);
+ /* also setup exponents used in rest of routine */
+ diff_exponent = add_exponent - mpy_exponent;
+ result_exponent = add_exponent;
+ } else {
+ /* also setup exponents used in rest of routine */
+ diff_exponent = mpy_exponent - add_exponent;
+ result_exponent = mpy_exponent;
+ }
+ /* Invariant: left is not smaller than right. */
+
+ /*
+ * Special case alignment of operands that would force alignment
+ * beyond the extent of the extension. A further optimization
+ * could special case this but only reduces the path length for
+ * this infrequent case.
+ */
+ if (diff_exponent > SGLEXT_THRESHOLD) {
+ diff_exponent = SGLEXT_THRESHOLD;
+ }
+
+ /* Align right operand by shifting it to the right */
+ Sglext_clear_sign(rightp1);
+ Sglext_right_align(rightp1,rightp2,/*shifted by*/diff_exponent);
+
+ /* Treat sum and difference of the operands separately. */
+ if ((int)save < 0) {
+ /*
+ * Difference of the two operands. Overflow can occur if the
+ * multiply overflowed. A borrow can occur out of the hidden
+ * bit and force a post normalization phase.
+ */
+ Sglext_subtract(tmpresp1,tmpresp2, rightp1,rightp2,
+ resultp1,resultp2);
+ sign_save = Sgl_signextendedsign(resultp1);
+ if (Sgl_iszero_hidden(resultp1)) {
+ /* Handle normalization */
+ /* A straightforward algorithm would now shift the
+ * result and extension left until the hidden bit
+ * becomes one. Not all of the extension bits need
+ * participate in the shift. Only the two most
+ * significant bits (round and guard) are needed.
+ * If only a single shift is needed then the guard
+ * bit becomes a significant low order bit and the
+ * extension must participate in the rounding.
+ * If more than a single shift is needed, then all
+ * bits to the right of the guard bit are zeros,
+ * and the guard bit may or may not be zero. */
+ Sglext_leftshiftby1(resultp1,resultp2);
+
+ /* Need to check for a zero result. The sign and
+ * exponent fields have already been zeroed. The more
+ * efficient test of the full object can be used.
+ */
+ if (Sglext_iszero(resultp1,resultp2)) {
+ /* Must have been "x-x" or "x+(-x)". */
+ if (Is_rounding_mode(ROUNDMINUS))
+ Sgl_setone_sign(resultp1);
+ Sgl_copytoptr(resultp1,dstptr);
+ return(NOEXCEPTION);
+ }
+ result_exponent--;
+
+ /* Look to see if normalization is finished. */
+ if (Sgl_isone_hidden(resultp1)) {
+ /* No further normalization is needed */
+ goto round;
+ }
+
+ /* Discover first one bit to determine shift amount.
+ * Use a modified binary search. We have already
+ * shifted the result one position right and still
+ * not found a one so the remainder of the extension
+ * must be zero and simplifies rounding. */
+ /* Scan bytes */
+ while (Sgl_iszero_hiddenhigh7mantissa(resultp1)) {
+ Sglext_leftshiftby8(resultp1,resultp2);
+ result_exponent -= 8;
+ }
+ /* Now narrow it down to the nibble */
+ if (Sgl_iszero_hiddenhigh3mantissa(resultp1)) {
+ /* The lower nibble contains the
+ * normalizing one */
+ Sglext_leftshiftby4(resultp1,resultp2);
+ result_exponent -= 4;
+ }
+ /* Select case where first bit is set (already
+ * normalized) otherwise select the proper shift. */
+ jumpsize = Sgl_hiddenhigh3mantissa(resultp1);
+ if (jumpsize <= 7) switch(jumpsize) {
+ case 1:
+ Sglext_leftshiftby3(resultp1,resultp2);
+ result_exponent -= 3;
+ break;
+ case 2:
+ case 3:
+ Sglext_leftshiftby2(resultp1,resultp2);
+ result_exponent -= 2;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ Sglext_leftshiftby1(resultp1,resultp2);
+ result_exponent -= 1;
+ break;
+ }
+ } /* end if (hidden...)... */
+ /* Fall through and round */
+ } /* end if (save < 0)... */
+ else {
+ /* Add magnitudes */
+ Sglext_addition(tmpresp1,tmpresp2,
+ rightp1,rightp2, /*to*/resultp1,resultp2);
+ sign_save = Sgl_signextendedsign(resultp1);
+ if (Sgl_isone_hiddenoverflow(resultp1)) {
+ /* Prenormalization required. */
+ Sglext_arithrightshiftby1(resultp1,resultp2);
+ result_exponent++;
+ } /* end if hiddenoverflow... */
+ } /* end else ...add magnitudes... */
+
+ /* Round the result. If the extension and lower two words are
+ * all zeros, then the result is exact. Otherwise round in the
+ * correct direction. Underflow is possible. If a postnormalization
+ * is necessary, then the mantissa is all zeros so no shift is needed.
+ */
+ round:
+ if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
+ Sglext_denormalize(resultp1,resultp2,result_exponent,is_tiny);
+ }
+ Sgl_set_sign(resultp1,/*using*/sign_save);
+ if (Sglext_isnotzero_mantissap2(resultp2)) {
+ inexact = TRUE;
+ switch(Rounding_mode()) {
+ case ROUNDNEAREST: /* The default. */
+ if (Sglext_isone_highp2(resultp2)) {
+ /* at least 1/2 ulp */
+ if (Sglext_isnotzero_low31p2(resultp2) ||
+ Sglext_isone_lowp1(resultp1)) {
+ /* either exactly half way and odd or
+ * more than 1/2ulp */
+ Sgl_increment(resultp1);
+ }
+ }
+ break;
+
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(resultp1)) {
+ /* Round up positive results */
+ Sgl_increment(resultp1);
+ }
+ break;
+
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(resultp1)) {
+ /* Round down negative results */
+ Sgl_increment(resultp1);
+ }
+
+ case ROUNDZERO:;
+ /* truncate is simple */
+ } /* end switch... */
+ if (Sgl_isone_hiddenoverflow(resultp1)) result_exponent++;
+ }
+ if (result_exponent >= SGL_INFINITY_EXPONENT) {
+ /* Overflow */
+ if (Is_overflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Sgl_setwrapped_exponent(resultp1,result_exponent,ovfl);
+ Sgl_copytoptr(resultp1,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return (OPC_2E_OVERFLOWEXCEPTION |
+ OPC_2E_INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return (OPC_2E_OVERFLOWEXCEPTION);
+ }
+ inexact = TRUE;
+ Set_overflowflag();
+ Sgl_setoverflow(resultp1);
+ } else if (result_exponent <= 0) { /* underflow case */
+ if (Is_underflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Sgl_setwrapped_exponent(resultp1,result_exponent,unfl);
+ Sgl_copytoptr(resultp1,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return (OPC_2E_UNDERFLOWEXCEPTION |
+ OPC_2E_INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(OPC_2E_UNDERFLOWEXCEPTION);
+ }
+ else if (inexact && is_tiny) Set_underflowflag();
+ }
+ else Sgl_set_exponent(resultp1,result_exponent);
+ Sgl_copytoptr(resultp1,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(NOEXCEPTION);
+}
+
+/*
+ * Single Floating-point Multiply Negate Fused Add
+ */
+
+sgl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+
+sgl_floating_point *src1ptr, *src2ptr, *src3ptr, *dstptr;
+unsigned int *status;
+{
+ unsigned int opnd1, opnd2, opnd3;
+ register unsigned int tmpresp1, tmpresp2;
+ unsigned int rightp1, rightp2;
+ unsigned int resultp1, resultp2 = 0;
+ register int mpy_exponent, add_exponent, count;
+ boolean inexact = FALSE, is_tiny = FALSE;
+
+ unsigned int signlessleft1, signlessright1, save;
+ register int result_exponent, diff_exponent;
+ int sign_save, jumpsize;
+
+ Sgl_copyfromptr(src1ptr,opnd1);
+ Sgl_copyfromptr(src2ptr,opnd2);
+ Sgl_copyfromptr(src3ptr,opnd3);
+
+ /*
+ * set sign bit of result of multiply
+ */
+ if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2))
+ Sgl_setzero(resultp1);
+ else
+ Sgl_setnegativezero(resultp1);
+
+ /*
+ * Generate multiply exponent
+ */
+ mpy_exponent = Sgl_exponent(opnd1) + Sgl_exponent(opnd2) - SGL_BIAS;
+
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if (Sgl_isinfinity_exponent(opnd1)) {
+ if (Sgl_iszero_mantissa(opnd1)) {
+ if (Sgl_isnotnan(opnd2) && Sgl_isnotnan(opnd3)) {
+ if (Sgl_iszero_exponentmantissa(opnd2)) {
+ /*
+ * invalid since operands are infinity
+ * and zero
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(resultp1);
+ Sgl_copytoptr(resultp1,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Check third operand for infinity with a
+ * sign opposite of the multiply result
+ */
+ if (Sgl_isinfinity(opnd3) &&
+ (Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
+ /*
+ * invalid since attempting a magnitude
+ * subtraction of infinities
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(resultp1);
+ Sgl_copytoptr(resultp1,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * return infinity
+ */
+ Sgl_setinfinity_exponentmantissa(resultp1);
+ Sgl_copytoptr(resultp1,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(opnd1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd1);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Sgl_is_signalingnan(opnd2)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd2);
+ Sgl_copytoptr(opnd2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * is third operand a signaling NaN?
+ */
+ else if (Sgl_is_signalingnan(opnd3)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd3);
+ Sgl_copytoptr(opnd3,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ Sgl_copytoptr(opnd1,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if (Sgl_isinfinity_exponent(opnd2)) {
+ if (Sgl_iszero_mantissa(opnd2)) {
+ if (Sgl_isnotnan(opnd3)) {
+ if (Sgl_iszero_exponentmantissa(opnd1)) {
+ /*
+ * invalid since multiply operands are
+ * zero & infinity
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(opnd2);
+ Sgl_copytoptr(opnd2,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * Check third operand for infinity with a
+ * sign opposite of the multiply result
+ */
+ if (Sgl_isinfinity(opnd3) &&
+ (Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
+ /*
+ * invalid since attempting a magnitude
+ * subtraction of infinities
+ */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(resultp1);
+ Sgl_copytoptr(resultp1,dstptr);
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * return infinity
+ */
+ Sgl_setinfinity_exponentmantissa(resultp1);
+ Sgl_copytoptr(resultp1,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(opnd2)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd2);
+ }
+ /*
+ * is third operand a signaling NaN?
+ */
+ else if (Sgl_is_signalingnan(opnd3)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd3);
+ Sgl_copytoptr(opnd3,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ Sgl_copytoptr(opnd2,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * check third operand for NaN's or infinity
+ */
+ if (Sgl_isinfinity_exponent(opnd3)) {
+ if (Sgl_iszero_mantissa(opnd3)) {
+ /* return infinity */
+ Sgl_copytoptr(opnd3,dstptr);
+ return(NOEXCEPTION);
+ } else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(opnd3)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(OPC_2E_INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd3);
+ }
+ /*
+ * return quiet NaN
+ */
+ Sgl_copytoptr(opnd3,dstptr);
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * Generate multiply mantissa
+ */
+ if (Sgl_isnotzero_exponent(opnd1)) {
+ /* set hidden bit */
+ Sgl_clear_signexponent_set_hidden(opnd1);
+ }
+ else {
+ /* check for zero */
+ if (Sgl_iszero_mantissa(opnd1)) {
+ /*
+ * Perform the add opnd3 with zero here.
+ */
+ if (Sgl_iszero_exponentmantissa(opnd3)) {
+ if (Is_rounding_mode(ROUNDMINUS)) {
+ Sgl_or_signs(opnd3,resultp1);
+ } else {
+ Sgl_and_signs(opnd3,resultp1);
+ }
+ }
+ /*
+ * Now let's check for trapped underflow case.
+ */
+ else if (Sgl_iszero_exponent(opnd3) &&
+ Is_underflowtrap_enabled()) {
+ /* need to normalize results mantissa */
+ sign_save = Sgl_signextendedsign(opnd3);
+ result_exponent = 0;
+ Sgl_leftshiftby1(opnd3);
+ Sgl_normalize(opnd3,result_exponent);
+ Sgl_set_sign(opnd3,/*using*/sign_save);
+ Sgl_setwrapped_exponent(opnd3,result_exponent,
+ unfl);
+ Sgl_copytoptr(opnd3,dstptr);
+ /* inexact = FALSE */
+ return(OPC_2E_UNDERFLOWEXCEPTION);
+ }
+ Sgl_copytoptr(opnd3,dstptr);
+ return(NOEXCEPTION);
+ }
+ /* is denormalized, adjust exponent */
+ Sgl_clear_signexponent(opnd1);
+ Sgl_leftshiftby1(opnd1);
+ Sgl_normalize(opnd1,mpy_exponent);
+ }
+ /* opnd2 needs to have hidden bit set with msb in hidden bit */
+ if (Sgl_isnotzero_exponent(opnd2)) {
+ Sgl_clear_signexponent_set_hidden(opnd2);
+ }
+ else {
+ /* check for zero */
+ if (Sgl_iszero_mantissa(opnd2)) {
+ /*
+ * Perform the add opnd3 with zero here.
+ */
+ if (Sgl_iszero_exponentmantissa(opnd3)) {
+ if (Is_rounding_mode(ROUNDMINUS)) {
+ Sgl_or_signs(opnd3,resultp1);
+ } else {
+ Sgl_and_signs(opnd3,resultp1);
+ }
+ }
+ /*
+ * Now let's check for trapped underflow case.
+ */
+ else if (Sgl_iszero_exponent(opnd3) &&
+ Is_underflowtrap_enabled()) {
+ /* need to normalize results mantissa */
+ sign_save = Sgl_signextendedsign(opnd3);
+ result_exponent = 0;
+ Sgl_leftshiftby1(opnd3);
+ Sgl_normalize(opnd3,result_exponent);
+ Sgl_set_sign(opnd3,/*using*/sign_save);
+ Sgl_setwrapped_exponent(opnd3,result_exponent,
+ unfl);
+ Sgl_copytoptr(opnd3,dstptr);
+ /* inexact = FALSE */
+ return(OPC_2E_UNDERFLOWEXCEPTION);
+ }
+ Sgl_copytoptr(opnd3,dstptr);
+ return(NOEXCEPTION);
+ }
+ /* is denormalized; want to normalize */
+ Sgl_clear_signexponent(opnd2);
+ Sgl_leftshiftby1(opnd2);
+ Sgl_normalize(opnd2,mpy_exponent);
+ }
+
+ /* Multiply the first two source mantissas together */
+
+ /*
+ * The intermediate result will be kept in tmpres,
+ * which needs enough room for 106 bits of mantissa,
+ * so lets call it a Double extended.
+ */
+ Sglext_setzero(tmpresp1,tmpresp2);
+
+ /*
+ * Four bits at a time are inspected in each loop, and a
+ * simple shift and add multiply algorithm is used.
+ */
+ for (count = SGL_P-1; count >= 0; count -= 4) {
+ Sglext_rightshiftby4(tmpresp1,tmpresp2);
+ if (Sbit28(opnd1)) {
+ /* Twoword_add should be an ADD followed by 2 ADDC's */
+ Twoword_add(tmpresp1, tmpresp2, opnd2<<3, 0);
+ }
+ if (Sbit29(opnd1)) {
+ Twoword_add(tmpresp1, tmpresp2, opnd2<<2, 0);
+ }
+ if (Sbit30(opnd1)) {
+ Twoword_add(tmpresp1, tmpresp2, opnd2<<1, 0);
+ }
+ if (Sbit31(opnd1)) {
+ Twoword_add(tmpresp1, tmpresp2, opnd2, 0);
+ }
+ Sgl_rightshiftby4(opnd1);
+ }
+ if (Is_sexthiddenoverflow(tmpresp1)) {
+ /* result mantissa >= 2 (mantissa overflow) */
+ mpy_exponent++;
+ Sglext_rightshiftby4(tmpresp1,tmpresp2);
+ } else {
+ Sglext_rightshiftby3(tmpresp1,tmpresp2);
+ }
+
+ /*
+ * Restore the sign of the mpy result which was saved in resultp1.
+ * The exponent will continue to be kept in mpy_exponent.
+ */
+ Sglext_set_sign(tmpresp1,Sgl_sign(resultp1));
+
+ /*
+ * No rounding is required, since the result of the multiply
+ * is exact in the extended format.
+ */
+
+ /*
+ * Now we are ready to perform the add portion of the operation.
+ *
+ * The exponents need to be kept as integers for now, since the
+ * multiply result might not fit into the exponent field. We
+ * can't overflow or underflow because of this yet, since the
+ * add could bring the final result back into range.
+ */
+ add_exponent = Sgl_exponent(opnd3);
+
+ /*
+ * Check for denormalized or zero add operand.
+ */
+ if (add_exponent == 0) {
+ /* check for zero */
+ if (Sgl_iszero_mantissa(opnd3)) {
+ /* right is zero */
+ /* Left can't be zero and must be result.
+ *
+ * The final result is now in tmpres and mpy_exponent,
+ * and needs to be rounded and squeezed back into
+ * double precision format from double extended.
+ */
+ result_exponent = mpy_exponent;
+ Sglext_copy(tmpresp1,tmpresp2,resultp1,resultp2);
+ sign_save = Sgl_signextendedsign(resultp1);/*save sign*/
+ goto round;
+ }
+
+ /*
+ * Neither are zeroes.
+ * Adjust exponent and normalize add operand.
+ */
+ sign_save = Sgl_signextendedsign(opnd3); /* save sign */
+ Sgl_clear_signexponent(opnd3);
+ Sgl_leftshiftby1(opnd3);
+ Sgl_normalize(opnd3,add_exponent);
+ Sgl_set_sign(opnd3,sign_save); /* restore sign */
+ } else {
+ Sgl_clear_exponent_set_hidden(opnd3);
+ }
+ /*
+ * Copy opnd3 to the double extended variable called right.
+ */
+ Sgl_copyto_sglext(opnd3,rightp1,rightp2);
+
+ /*
+ * A zero "save" helps discover equal operands (for later),
+ * and is used in swapping operands (if needed).
+ */
+ Sglext_xortointp1(tmpresp1,rightp1,/*to*/save);
+
+ /*
+ * Compare magnitude of operands.
+ */
+ Sglext_copytoint_exponentmantissa(tmpresp1,signlessleft1);
+ Sglext_copytoint_exponentmantissa(rightp1,signlessright1);
+ if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
+ Sglext_ismagnitudeless(signlessleft1,signlessright1)) {
+ /*
+ * Set the left operand to the larger one by XOR swap.
+ * First finish the first word "save".
+ */
+ Sglext_xorfromintp1(save,rightp1,/*to*/rightp1);
+ Sglext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
+ Sglext_swap_lower(tmpresp2,rightp2);
+ /* also setup exponents used in rest of routine */
+ diff_exponent = add_exponent - mpy_exponent;
+ result_exponent = add_exponent;
+ } else {
+ /* also setup exponents used in rest of routine */
+ diff_exponent = mpy_exponent - add_exponent;
+ result_exponent = mpy_exponent;
+ }
+ /* Invariant: left is not smaller than right. */
+
+ /*
+ * Special case alignment of operands that would force alignment
+ * beyond the extent of the extension. A further optimization
+ * could special case this but only reduces the path length for
+ * this infrequent case.
+ */
+ if (diff_exponent > SGLEXT_THRESHOLD) {
+ diff_exponent = SGLEXT_THRESHOLD;
+ }
+
+ /* Align right operand by shifting it to the right */
+ Sglext_clear_sign(rightp1);
+ Sglext_right_align(rightp1,rightp2,/*shifted by*/diff_exponent);
+
+ /* Treat sum and difference of the operands separately. */
+ if ((int)save < 0) {
+ /*
+ * Difference of the two operands. Overflow can occur if the
+ * multiply overflowed. A borrow can occur out of the hidden
+ * bit and force a post normalization phase.
+ */
+ Sglext_subtract(tmpresp1,tmpresp2, rightp1,rightp2,
+ resultp1,resultp2);
+ sign_save = Sgl_signextendedsign(resultp1);
+ if (Sgl_iszero_hidden(resultp1)) {
+ /* Handle normalization */
+ /* A straightforward algorithm would now shift the
+ * result and extension left until the hidden bit
+ * becomes one. Not all of the extension bits need
+ * participate in the shift. Only the two most
+ * significant bits (round and guard) are needed.
+ * If only a single shift is needed then the guard
+ * bit becomes a significant low order bit and the
+ * extension must participate in the rounding.
+ * If more than a single shift is needed, then all
+ * bits to the right of the guard bit are zeros,
+ * and the guard bit may or may not be zero. */
+ Sglext_leftshiftby1(resultp1,resultp2);
+
+ /* Need to check for a zero result. The sign and
+ * exponent fields have already been zeroed. The more
+ * efficient test of the full object can be used.
+ */
+ if (Sglext_iszero(resultp1,resultp2)) {
+ /* Must have been "x-x" or "x+(-x)". */
+ if (Is_rounding_mode(ROUNDMINUS))
+ Sgl_setone_sign(resultp1);
+ Sgl_copytoptr(resultp1,dstptr);
+ return(NOEXCEPTION);
+ }
+ result_exponent--;
+
+ /* Look to see if normalization is finished. */
+ if (Sgl_isone_hidden(resultp1)) {
+ /* No further normalization is needed */
+ goto round;
+ }
+
+ /* Discover first one bit to determine shift amount.
+ * Use a modified binary search. We have already
+ * shifted the result one position right and still
+ * not found a one so the remainder of the extension
+ * must be zero and simplifies rounding. */
+ /* Scan bytes */
+ while (Sgl_iszero_hiddenhigh7mantissa(resultp1)) {
+ Sglext_leftshiftby8(resultp1,resultp2);
+ result_exponent -= 8;
+ }
+ /* Now narrow it down to the nibble */
+ if (Sgl_iszero_hiddenhigh3mantissa(resultp1)) {
+ /* The lower nibble contains the
+ * normalizing one */
+ Sglext_leftshiftby4(resultp1,resultp2);
+ result_exponent -= 4;
+ }
+ /* Select case where first bit is set (already
+ * normalized) otherwise select the proper shift. */
+ jumpsize = Sgl_hiddenhigh3mantissa(resultp1);
+ if (jumpsize <= 7) switch(jumpsize) {
+ case 1:
+ Sglext_leftshiftby3(resultp1,resultp2);
+ result_exponent -= 3;
+ break;
+ case 2:
+ case 3:
+ Sglext_leftshiftby2(resultp1,resultp2);
+ result_exponent -= 2;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ Sglext_leftshiftby1(resultp1,resultp2);
+ result_exponent -= 1;
+ break;
+ }
+ } /* end if (hidden...)... */
+ /* Fall through and round */
+ } /* end if (save < 0)... */
+ else {
+ /* Add magnitudes */
+ Sglext_addition(tmpresp1,tmpresp2,
+ rightp1,rightp2, /*to*/resultp1,resultp2);
+ sign_save = Sgl_signextendedsign(resultp1);
+ if (Sgl_isone_hiddenoverflow(resultp1)) {
+ /* Prenormalization required. */
+ Sglext_arithrightshiftby1(resultp1,resultp2);
+ result_exponent++;
+ } /* end if hiddenoverflow... */
+ } /* end else ...add magnitudes... */
+
+ /* Round the result. If the extension and lower two words are
+ * all zeros, then the result is exact. Otherwise round in the
+ * correct direction. Underflow is possible. If a postnormalization
+ * is necessary, then the mantissa is all zeros so no shift is needed.
+ */
+ round:
+ if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
+ Sglext_denormalize(resultp1,resultp2,result_exponent,is_tiny);
+ }
+ Sgl_set_sign(resultp1,/*using*/sign_save);
+ if (Sglext_isnotzero_mantissap2(resultp2)) {
+ inexact = TRUE;
+ switch(Rounding_mode()) {
+ case ROUNDNEAREST: /* The default. */
+ if (Sglext_isone_highp2(resultp2)) {
+ /* at least 1/2 ulp */
+ if (Sglext_isnotzero_low31p2(resultp2) ||
+ Sglext_isone_lowp1(resultp1)) {
+ /* either exactly half way and odd or
+ * more than 1/2ulp */
+ Sgl_increment(resultp1);
+ }
+ }
+ break;
+
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(resultp1)) {
+ /* Round up positive results */
+ Sgl_increment(resultp1);
+ }
+ break;
+
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(resultp1)) {
+ /* Round down negative results */
+ Sgl_increment(resultp1);
+ }
+
+ case ROUNDZERO:;
+ /* truncate is simple */
+ } /* end switch... */
+ if (Sgl_isone_hiddenoverflow(resultp1)) result_exponent++;
+ }
+ if (result_exponent >= SGL_INFINITY_EXPONENT) {
+ /* Overflow */
+ if (Is_overflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Sgl_setwrapped_exponent(resultp1,result_exponent,ovfl);
+ Sgl_copytoptr(resultp1,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return (OPC_2E_OVERFLOWEXCEPTION |
+ OPC_2E_INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return (OPC_2E_OVERFLOWEXCEPTION);
+ }
+ inexact = TRUE;
+ Set_overflowflag();
+ Sgl_setoverflow(resultp1);
+ } else if (result_exponent <= 0) { /* underflow case */
+ if (Is_underflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Sgl_setwrapped_exponent(resultp1,result_exponent,unfl);
+ Sgl_copytoptr(resultp1,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return (OPC_2E_UNDERFLOWEXCEPTION |
+ OPC_2E_INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(OPC_2E_UNDERFLOWEXCEPTION);
+ }
+ else if (inexact && is_tiny) Set_underflowflag();
+ }
+ else Sgl_set_exponent(resultp1,result_exponent);
+ Sgl_copytoptr(resultp1,dstptr);
+ if (inexact)
+ if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(NOEXCEPTION);
+}
+
diff --git a/arch/parisc/math-emu/fpbits.h b/arch/parisc/math-emu/fpbits.h
new file mode 100644
index 00000000..cefad064
--- /dev/null
+++ b/arch/parisc/math-emu/fpbits.h
@@ -0,0 +1,65 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifdef __NO_PA_HDRS
+ PA header file -- do not include this header file for non-PA builds.
+#endif
+
+
+/*
+ * These macros are designed to be portable to all machines that have
+ * a wordsize greater than or equal to 32 bits that support the portable
+ * C compiler and the standard C preprocessor. Wordsize (default 32)
+ * and bitfield assignment (default left-to-right, unlike VAX, PDP-11)
+ * should be predefined using the constants HOSTWDSZ and BITFRL and
+ * the C compiler "-D" flag (e.g., -DHOSTWDSZ=36 -DBITFLR for the DEC-20).
+ * Note that the macro arguments assume that the integer being referenced
+ * is a 32-bit integer (right-justified on the 20) and that bit 0 is the
+ * most significant bit.
+ */
+
+#ifndef HOSTWDSZ
+#define HOSTWDSZ 32
+#endif
+
+
+/*########################### Macros ######################################*/
+
+/*-------------------------------------------------------------------------
+ * NewDeclareBitField_Reference - Declare a structure similar to the simulator
+ * function "DeclBitfR" except its use is restricted to occur within a larger
+ * enclosing structure or union definition. This declaration is an unnamed
+ * structure with the argument, name, as the member name and the argument,
+ * uname, as the element name.
+ *----------------------------------------------------------------------- */
+#define Bitfield_extract(start, length, object) \
+ ((object) >> (HOSTWDSZ - (start) - (length)) & \
+ ((unsigned)-1 >> (HOSTWDSZ - (length))))
+
+#define Bitfield_signed_extract(start, length, object) \
+ ((int)((object) << start) >> (HOSTWDSZ - (length)))
+
+#define Bitfield_mask(start, len, object) \
+ ((object) & (((unsigned)-1 >> (HOSTWDSZ-len)) << (HOSTWDSZ-start-len)))
+
+#define Bitfield_deposit(value,start,len,object) object = \
+ ((object) & ~(((unsigned)-1 >> (HOSTWDSZ-len)) << (HOSTWDSZ-start-len))) | \
+ (((value) & ((unsigned)-1 >> (HOSTWDSZ-len))) << (HOSTWDSZ-start-len))
diff --git a/arch/parisc/math-emu/fpu.h b/arch/parisc/math-emu/fpu.h
new file mode 100644
index 00000000..0af5c3c8
--- /dev/null
+++ b/arch/parisc/math-emu/fpu.h
@@ -0,0 +1,76 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/fp/fpu.h $Revision: 1.1 $
+ *
+ * Purpose:
+ * <<please update with a synopis of the functionality provided by this file>>
+ *
+ *
+ * END_DESC
+*/
+
+#ifdef __NO_PA_HDRS
+ PA header file -- do not include this header file for non-PA builds.
+#endif
+
+
+#ifndef _MACHINE_FPU_INCLUDED /* allows multiple inclusion */
+#define _MACHINE_FPU_INCLUDED
+
+#if 0
+#ifndef _SYS_STDSYMS_INCLUDED
+# include <sys/stdsyms.h>
+#endif /* _SYS_STDSYMS_INCLUDED */
+#include <machine/pdc/pdc_rqsts.h>
+#endif
+
+#define PA83_FPU_FLAG 0x00000001
+#define PA89_FPU_FLAG 0x00000002
+#define PA2_0_FPU_FLAG 0x00000010
+
+#define TIMEX_EXTEN_FLAG 0x00000004
+
+#define ROLEX_EXTEN_FLAG 0x00000008
+#define COPR_FP 0x00000080 /* Floating point -- Coprocessor 0 */
+#define SFU_MPY_DIVIDE 0x00008000 /* Multiply/Divide __ SFU 0 */
+
+
+#define EM_FPU_TYPE_OFFSET 272
+
+/* version of EMULATION software for COPR,0,0 instruction */
+#define EMULATION_VERSION 4
+
+/*
+ * The only was to differeniate between TIMEX and ROLEX (or PCX-S and PCX-T)
+ * is thorough the potential type field from the PDC_MODEL call. The
+ * following flags are used at assist this differeniation.
+ */
+
+#define ROLEX_POTENTIAL_KEY_FLAGS PDC_MODEL_CPU_KEY_WORD_TO_IO
+#define TIMEX_POTENTIAL_KEY_FLAGS (PDC_MODEL_CPU_KEY_QUAD_STORE | \
+ PDC_MODEL_CPU_KEY_RECIP_SQRT)
+
+
+#endif /* ! _MACHINE_FPU_INCLUDED */
diff --git a/arch/parisc/math-emu/fpudispatch.c b/arch/parisc/math-emu/fpudispatch.c
new file mode 100644
index 00000000..6e28f9f4
--- /dev/null
+++ b/arch/parisc/math-emu/fpudispatch.c
@@ -0,0 +1,1442 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/fp/fpudispatch.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * <<please update with a synopsis of the functionality provided by this file>>
+ *
+ * External Interfaces:
+ * <<the following list was autogenerated, please review>>
+ * emfpudispatch(ir, dummy1, dummy2, fpregs)
+ * fpudispatch(ir, excp_code, holder, fpregs)
+ *
+ * Internal Interfaces:
+ * <<the following list was autogenerated, please review>>
+ * static u_int decode_06(u_int, u_int *)
+ * static u_int decode_0c(u_int, u_int, u_int, u_int *)
+ * static u_int decode_0e(u_int, u_int, u_int, u_int *)
+ * static u_int decode_26(u_int, u_int *)
+ * static u_int decode_2e(u_int, u_int *)
+ * static void update_status_cbit(u_int *, u_int, u_int, u_int)
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+#define FPUDEBUG 0
+
+#include "float.h"
+#include <linux/kernel.h>
+#include <asm/processor.h>
+/* #include <sys/debug.h> */
+/* #include <machine/sys/mdep_private.h> */
+
+#define COPR_INST 0x30000000
+
+/*
+ * definition of extru macro. If pos and len are constants, the compiler
+ * will generate an extru instruction when optimized
+ */
+#define extru(r,pos,len) (((r) >> (31-(pos))) & (( 1 << (len)) - 1))
+/* definitions of bit field locations in the instruction */
+#define fpmajorpos 5
+#define fpr1pos 10
+#define fpr2pos 15
+#define fptpos 31
+#define fpsubpos 18
+#define fpclass1subpos 16
+#define fpclasspos 22
+#define fpfmtpos 20
+#define fpdfpos 18
+#define fpnulpos 26
+/*
+ * the following are the extra bits for the 0E major op
+ */
+#define fpxr1pos 24
+#define fpxr2pos 19
+#define fpxtpos 25
+#define fpxpos 23
+#define fp0efmtpos 20
+/*
+ * the following are for the multi-ops
+ */
+#define fprm1pos 10
+#define fprm2pos 15
+#define fptmpos 31
+#define fprapos 25
+#define fptapos 20
+#define fpmultifmt 26
+/*
+ * the following are for the fused FP instructions
+ */
+ /* fprm1pos 10 */
+ /* fprm2pos 15 */
+#define fpraupos 18
+#define fpxrm2pos 19
+ /* fpfmtpos 20 */
+#define fpralpos 23
+#define fpxrm1pos 24
+ /* fpxtpos 25 */
+#define fpfusedsubop 26
+ /* fptpos 31 */
+
+/*
+ * offset to constant zero in the FP emulation registers
+ */
+#define fpzeroreg (32*sizeof(double)/sizeof(u_int))
+
+/*
+ * extract the major opcode from the instruction
+ */
+#define get_major(op) extru(op,fpmajorpos,6)
+/*
+ * extract the two bit class field from the FP instruction. The class is at bit
+ * positions 21-22
+ */
+#define get_class(op) extru(op,fpclasspos,2)
+/*
+ * extract the 3 bit subop field. For all but class 1 instructions, it is
+ * located at bit positions 16-18
+ */
+#define get_subop(op) extru(op,fpsubpos,3)
+/*
+ * extract the 2 or 3 bit subop field from class 1 instructions. It is located
+ * at bit positions 15-16 (PA1.1) or 14-16 (PA2.0)
+ */
+#define get_subop1_PA1_1(op) extru(op,fpclass1subpos,2) /* PA89 (1.1) fmt */
+#define get_subop1_PA2_0(op) extru(op,fpclass1subpos,3) /* PA 2.0 fmt */
+
+/* definitions of unimplemented exceptions */
+#define MAJOR_0C_EXCP 0x09
+#define MAJOR_0E_EXCP 0x0b
+#define MAJOR_06_EXCP 0x03
+#define MAJOR_26_EXCP 0x23
+#define MAJOR_2E_EXCP 0x2b
+#define PA83_UNIMP_EXCP 0x01
+
+/*
+ * Special Defines for TIMEX specific code
+ */
+
+#define FPU_TYPE_FLAG_POS (EM_FPU_TYPE_OFFSET>>2)
+#define TIMEX_ROLEX_FPU_MASK (TIMEX_EXTEN_FLAG|ROLEX_EXTEN_FLAG)
+
+/*
+ * Static function definitions
+ */
+#define _PROTOTYPES
+#if defined(_PROTOTYPES) || defined(_lint)
+static u_int decode_0c(u_int, u_int, u_int, u_int *);
+static u_int decode_0e(u_int, u_int, u_int, u_int *);
+static u_int decode_06(u_int, u_int *);
+static u_int decode_26(u_int, u_int *);
+static u_int decode_2e(u_int, u_int *);
+static void update_status_cbit(u_int *, u_int, u_int, u_int);
+#else /* !_PROTOTYPES&&!_lint */
+static u_int decode_0c();
+static u_int decode_0e();
+static u_int decode_06();
+static u_int decode_26();
+static u_int decode_2e();
+static void update_status_cbit();
+#endif /* _PROTOTYPES&&!_lint */
+
+#define VASSERT(x)
+
+static void parisc_linux_get_fpu_type(u_int fpregs[])
+{
+ /* on pa-linux the fpu type is not filled in by the
+ * caller; it is constructed here
+ */
+ if (boot_cpu_data.cpu_type == pcxs)
+ fpregs[FPU_TYPE_FLAG_POS] = TIMEX_EXTEN_FLAG;
+ else if (boot_cpu_data.cpu_type == pcxt ||
+ boot_cpu_data.cpu_type == pcxt_)
+ fpregs[FPU_TYPE_FLAG_POS] = ROLEX_EXTEN_FLAG;
+ else if (boot_cpu_data.cpu_type >= pcxu)
+ fpregs[FPU_TYPE_FLAG_POS] = PA2_0_FPU_FLAG;
+}
+
+/*
+ * this routine will decode the excepting floating point instruction and
+ * call the approiate emulation routine.
+ * It is called by decode_fpu with the following parameters:
+ * fpudispatch(current_ir, unimplemented_code, 0, &Fpu_register)
+ * where current_ir is the instruction to be emulated,
+ * unimplemented_code is the exception_code that the hardware generated
+ * and &Fpu_register is the address of emulated FP reg 0.
+ */
+u_int
+fpudispatch(u_int ir, u_int excp_code, u_int holder, u_int fpregs[])
+{
+ u_int class, subop;
+ u_int fpu_type_flags;
+
+ /* All FP emulation code assumes that ints are 4-bytes in length */
+ VASSERT(sizeof(int) == 4);
+
+ parisc_linux_get_fpu_type(fpregs);
+
+ fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */
+
+ class = get_class(ir);
+ if (class == 1) {
+ if (fpu_type_flags & PA2_0_FPU_FLAG)
+ subop = get_subop1_PA2_0(ir);
+ else
+ subop = get_subop1_PA1_1(ir);
+ }
+ else
+ subop = get_subop(ir);
+
+ if (FPUDEBUG) printk("class %d subop %d\n", class, subop);
+
+ switch (excp_code) {
+ case MAJOR_0C_EXCP:
+ case PA83_UNIMP_EXCP:
+ return(decode_0c(ir,class,subop,fpregs));
+ case MAJOR_0E_EXCP:
+ return(decode_0e(ir,class,subop,fpregs));
+ case MAJOR_06_EXCP:
+ return(decode_06(ir,fpregs));
+ case MAJOR_26_EXCP:
+ return(decode_26(ir,fpregs));
+ case MAJOR_2E_EXCP:
+ return(decode_2e(ir,fpregs));
+ default:
+ /* "crashme Night Gallery painting nr 2. (asm_crash.s).
+ * This was fixed for multi-user kernels, but
+ * workstation kernels had a panic here. This allowed
+ * any arbitrary user to panic the kernel by executing
+ * setting the FP exception registers to strange values
+ * and generating an emulation trap. The emulation and
+ * exception code must never be able to panic the
+ * kernel.
+ */
+ return(UNIMPLEMENTEDEXCEPTION);
+ }
+}
+
+/*
+ * this routine is called by $emulation_trap to emulate a coprocessor
+ * instruction if one doesn't exist
+ */
+u_int
+emfpudispatch(u_int ir, u_int dummy1, u_int dummy2, u_int fpregs[])
+{
+ u_int class, subop, major;
+ u_int fpu_type_flags;
+
+ /* All FP emulation code assumes that ints are 4-bytes in length */
+ VASSERT(sizeof(int) == 4);
+
+ fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */
+
+ major = get_major(ir);
+ class = get_class(ir);
+ if (class == 1) {
+ if (fpu_type_flags & PA2_0_FPU_FLAG)
+ subop = get_subop1_PA2_0(ir);
+ else
+ subop = get_subop1_PA1_1(ir);
+ }
+ else
+ subop = get_subop(ir);
+ switch (major) {
+ case 0x0C:
+ return(decode_0c(ir,class,subop,fpregs));
+ case 0x0E:
+ return(decode_0e(ir,class,subop,fpregs));
+ case 0x06:
+ return(decode_06(ir,fpregs));
+ case 0x26:
+ return(decode_26(ir,fpregs));
+ case 0x2E:
+ return(decode_2e(ir,fpregs));
+ default:
+ return(PA83_UNIMP_EXCP);
+ }
+}
+
+
+static u_int
+decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
+{
+ u_int r1,r2,t; /* operand register offsets */
+ u_int fmt; /* also sf for class 1 conversions */
+ u_int df; /* for class 1 conversions */
+ u_int *status;
+ u_int retval, local_status;
+ u_int fpu_type_flags;
+
+ if (ir == COPR_INST) {
+ fpregs[0] = EMULATION_VERSION << 11;
+ return(NOEXCEPTION);
+ }
+ status = &fpregs[0]; /* fp status register */
+ local_status = fpregs[0]; /* and local copy */
+ r1 = extru(ir,fpr1pos,5) * sizeof(double)/sizeof(u_int);
+ if (r1 == 0) /* map fr0 source to constant zero */
+ r1 = fpzeroreg;
+ t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int);
+ if (t == 0 && class != 2) /* don't allow fr0 as a dest */
+ return(MAJOR_0C_EXCP);
+ fmt = extru(ir,fpfmtpos,2); /* get fmt completer */
+
+ switch (class) {
+ case 0:
+ switch (subop) {
+ case 0: /* COPR 0,0 emulated above*/
+ case 1:
+ return(MAJOR_0C_EXCP);
+ case 2: /* FCPY */
+ switch (fmt) {
+ case 2: /* illegal */
+ return(MAJOR_0C_EXCP);
+ case 3: /* quad */
+ t &= ~3; /* force to even reg #s */
+ r1 &= ~3;
+ fpregs[t+3] = fpregs[r1+3];
+ fpregs[t+2] = fpregs[r1+2];
+ case 1: /* double */
+ fpregs[t+1] = fpregs[r1+1];
+ case 0: /* single */
+ fpregs[t] = fpregs[r1];
+ return(NOEXCEPTION);
+ }
+ case 3: /* FABS */
+ switch (fmt) {
+ case 2: /* illegal */
+ return(MAJOR_0C_EXCP);
+ case 3: /* quad */
+ t &= ~3; /* force to even reg #s */
+ r1 &= ~3;
+ fpregs[t+3] = fpregs[r1+3];
+ fpregs[t+2] = fpregs[r1+2];
+ case 1: /* double */
+ fpregs[t+1] = fpregs[r1+1];
+ case 0: /* single */
+ /* copy and clear sign bit */
+ fpregs[t] = fpregs[r1] & 0x7fffffff;
+ return(NOEXCEPTION);
+ }
+ case 6: /* FNEG */
+ switch (fmt) {
+ case 2: /* illegal */
+ return(MAJOR_0C_EXCP);
+ case 3: /* quad */
+ t &= ~3; /* force to even reg #s */
+ r1 &= ~3;
+ fpregs[t+3] = fpregs[r1+3];
+ fpregs[t+2] = fpregs[r1+2];
+ case 1: /* double */
+ fpregs[t+1] = fpregs[r1+1];
+ case 0: /* single */
+ /* copy and invert sign bit */
+ fpregs[t] = fpregs[r1] ^ 0x80000000;
+ return(NOEXCEPTION);
+ }
+ case 7: /* FNEGABS */
+ switch (fmt) {
+ case 2: /* illegal */
+ return(MAJOR_0C_EXCP);
+ case 3: /* quad */
+ t &= ~3; /* force to even reg #s */
+ r1 &= ~3;
+ fpregs[t+3] = fpregs[r1+3];
+ fpregs[t+2] = fpregs[r1+2];
+ case 1: /* double */
+ fpregs[t+1] = fpregs[r1+1];
+ case 0: /* single */
+ /* copy and set sign bit */
+ fpregs[t] = fpregs[r1] | 0x80000000;
+ return(NOEXCEPTION);
+ }
+ case 4: /* FSQRT */
+ switch (fmt) {
+ case 0:
+ return(sgl_fsqrt(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1:
+ return(dbl_fsqrt(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2:
+ case 3: /* quad not implemented */
+ return(MAJOR_0C_EXCP);
+ }
+ case 5: /* FRND */
+ switch (fmt) {
+ case 0:
+ return(sgl_frnd(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1:
+ return(dbl_frnd(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2:
+ case 3: /* quad not implemented */
+ return(MAJOR_0C_EXCP);
+ }
+ } /* end of switch (subop) */
+
+ case 1: /* class 1 */
+ df = extru(ir,fpdfpos,2); /* get dest format */
+ if ((df & 2) || (fmt & 2)) {
+ /*
+ * fmt's 2 and 3 are illegal of not implemented
+ * quad conversions
+ */
+ return(MAJOR_0C_EXCP);
+ }
+ /*
+ * encode source and dest formats into 2 bits.
+ * high bit is source, low bit is dest.
+ * bit = 1 --> double precision
+ */
+ fmt = (fmt << 1) | df;
+ switch (subop) {
+ case 0: /* FCNVFF */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(MAJOR_0C_EXCP);
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvff(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvff(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(MAJOR_0C_EXCP);
+ }
+ case 1: /* FCNVXF */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(sgl_to_sgl_fcnvxf(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvxf(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvxf(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
+ &fpregs[t],status));
+ }
+ case 2: /* FCNVFX */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(sgl_to_sgl_fcnvfx(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvfx(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvfx(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
+ &fpregs[t],status));
+ }
+ case 3: /* FCNVFXT */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(sgl_to_sgl_fcnvfxt(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvfxt(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvfxt(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
+ &fpregs[t],status));
+ }
+ case 5: /* FCNVUF (PA2.0 only) */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(sgl_to_sgl_fcnvuf(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvuf(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvuf(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
+ &fpregs[t],status));
+ }
+ case 6: /* FCNVFU (PA2.0 only) */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(sgl_to_sgl_fcnvfu(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvfu(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvfu(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
+ &fpregs[t],status));
+ }
+ case 7: /* FCNVFUT (PA2.0 only) */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(sgl_to_sgl_fcnvfut(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvfut(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvfut(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
+ &fpregs[t],status));
+ }
+ case 4: /* undefined */
+ return(MAJOR_0C_EXCP);
+ } /* end of switch subop */
+
+ case 2: /* class 2 */
+ fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS];
+ r2 = extru(ir, fpr2pos, 5) * sizeof(double)/sizeof(u_int);
+ if (r2 == 0)
+ r2 = fpzeroreg;
+ if (fpu_type_flags & PA2_0_FPU_FLAG) {
+ /* FTEST if nullify bit set, otherwise FCMP */
+ if (extru(ir, fpnulpos, 1)) { /* FTEST */
+ switch (fmt) {
+ case 0:
+ /*
+ * arg0 is not used
+ * second param is the t field used for
+ * ftest,acc and ftest,rej
+ * third param is the subop (y-field)
+ */
+ BUG();
+ /* Unsupported
+ * return(ftest(0L,extru(ir,fptpos,5),
+ * &fpregs[0],subop));
+ */
+ case 1:
+ case 2:
+ case 3:
+ return(MAJOR_0C_EXCP);
+ }
+ } else { /* FCMP */
+ switch (fmt) {
+ case 0:
+ retval = sgl_fcmp(&fpregs[r1],
+ &fpregs[r2],extru(ir,fptpos,5),
+ &local_status);
+ update_status_cbit(status,local_status,
+ fpu_type_flags, subop);
+ return(retval);
+ case 1:
+ retval = dbl_fcmp(&fpregs[r1],
+ &fpregs[r2],extru(ir,fptpos,5),
+ &local_status);
+ update_status_cbit(status,local_status,
+ fpu_type_flags, subop);
+ return(retval);
+ case 2: /* illegal */
+ case 3: /* quad not implemented */
+ return(MAJOR_0C_EXCP);
+ }
+ }
+ } /* end of if for PA2.0 */
+ else { /* PA1.0 & PA1.1 */
+ switch (subop) {
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ return(MAJOR_0C_EXCP);
+ case 0: /* FCMP */
+ switch (fmt) {
+ case 0:
+ retval = sgl_fcmp(&fpregs[r1],
+ &fpregs[r2],extru(ir,fptpos,5),
+ &local_status);
+ update_status_cbit(status,local_status,
+ fpu_type_flags, subop);
+ return(retval);
+ case 1:
+ retval = dbl_fcmp(&fpregs[r1],
+ &fpregs[r2],extru(ir,fptpos,5),
+ &local_status);
+ update_status_cbit(status,local_status,
+ fpu_type_flags, subop);
+ return(retval);
+ case 2: /* illegal */
+ case 3: /* quad not implemented */
+ return(MAJOR_0C_EXCP);
+ }
+ case 1: /* FTEST */
+ switch (fmt) {
+ case 0:
+ /*
+ * arg0 is not used
+ * second param is the t field used for
+ * ftest,acc and ftest,rej
+ * third param is the subop (y-field)
+ */
+ BUG();
+ /* unsupported
+ * return(ftest(0L,extru(ir,fptpos,5),
+ * &fpregs[0],subop));
+ */
+ case 1:
+ case 2:
+ case 3:
+ return(MAJOR_0C_EXCP);
+ }
+ } /* end of switch subop */
+ } /* end of else for PA1.0 & PA1.1 */
+ case 3: /* class 3 */
+ r2 = extru(ir,fpr2pos,5) * sizeof(double)/sizeof(u_int);
+ if (r2 == 0)
+ r2 = fpzeroreg;
+ switch (subop) {
+ case 5:
+ case 6:
+ case 7:
+ return(MAJOR_0C_EXCP);
+
+ case 0: /* FADD */
+ switch (fmt) {
+ case 0:
+ return(sgl_fadd(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 1:
+ return(dbl_fadd(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 2: /* illegal */
+ case 3: /* quad not implemented */
+ return(MAJOR_0C_EXCP);
+ }
+ case 1: /* FSUB */
+ switch (fmt) {
+ case 0:
+ return(sgl_fsub(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 1:
+ return(dbl_fsub(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 2: /* illegal */
+ case 3: /* quad not implemented */
+ return(MAJOR_0C_EXCP);
+ }
+ case 2: /* FMPY */
+ switch (fmt) {
+ case 0:
+ return(sgl_fmpy(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 1:
+ return(dbl_fmpy(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 2: /* illegal */
+ case 3: /* quad not implemented */
+ return(MAJOR_0C_EXCP);
+ }
+ case 3: /* FDIV */
+ switch (fmt) {
+ case 0:
+ return(sgl_fdiv(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 1:
+ return(dbl_fdiv(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 2: /* illegal */
+ case 3: /* quad not implemented */
+ return(MAJOR_0C_EXCP);
+ }
+ case 4: /* FREM */
+ switch (fmt) {
+ case 0:
+ return(sgl_frem(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 1:
+ return(dbl_frem(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 2: /* illegal */
+ case 3: /* quad not implemented */
+ return(MAJOR_0C_EXCP);
+ }
+ } /* end of class 3 switch */
+ } /* end of switch(class) */
+
+ /* If we get here, something is really wrong! */
+ return(MAJOR_0C_EXCP);
+}
+
+static u_int
+decode_0e(ir,class,subop,fpregs)
+u_int ir,class,subop;
+u_int fpregs[];
+{
+ u_int r1,r2,t; /* operand register offsets */
+ u_int fmt; /* also sf for class 1 conversions */
+ u_int df; /* dest format for class 1 conversions */
+ u_int *status;
+ u_int retval, local_status;
+ u_int fpu_type_flags;
+
+ status = &fpregs[0];
+ local_status = fpregs[0];
+ r1 = ((extru(ir,fpr1pos,5)<<1)|(extru(ir,fpxr1pos,1)));
+ if (r1 == 0)
+ r1 = fpzeroreg;
+ t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1)));
+ if (t == 0 && class != 2)
+ return(MAJOR_0E_EXCP);
+ if (class < 2) /* class 0 or 1 has 2 bit fmt */
+ fmt = extru(ir,fpfmtpos,2);
+ else /* class 2 and 3 have 1 bit fmt */
+ fmt = extru(ir,fp0efmtpos,1);
+ /*
+ * An undefined combination, double precision accessing the
+ * right half of a FPR, can get us into trouble.
+ * Let's just force proper alignment on it.
+ */
+ if (fmt == DBL) {
+ r1 &= ~1;
+ if (class != 1)
+ t &= ~1;
+ }
+
+ switch (class) {
+ case 0:
+ switch (subop) {
+ case 0: /* unimplemented */
+ case 1:
+ return(MAJOR_0E_EXCP);
+ case 2: /* FCPY */
+ switch (fmt) {
+ case 2:
+ case 3:
+ return(MAJOR_0E_EXCP);
+ case 1: /* double */
+ fpregs[t+1] = fpregs[r1+1];
+ case 0: /* single */
+ fpregs[t] = fpregs[r1];
+ return(NOEXCEPTION);
+ }
+ case 3: /* FABS */
+ switch (fmt) {
+ case 2:
+ case 3:
+ return(MAJOR_0E_EXCP);
+ case 1: /* double */
+ fpregs[t+1] = fpregs[r1+1];
+ case 0: /* single */
+ fpregs[t] = fpregs[r1] & 0x7fffffff;
+ return(NOEXCEPTION);
+ }
+ case 6: /* FNEG */
+ switch (fmt) {
+ case 2:
+ case 3:
+ return(MAJOR_0E_EXCP);
+ case 1: /* double */
+ fpregs[t+1] = fpregs[r1+1];
+ case 0: /* single */
+ fpregs[t] = fpregs[r1] ^ 0x80000000;
+ return(NOEXCEPTION);
+ }
+ case 7: /* FNEGABS */
+ switch (fmt) {
+ case 2:
+ case 3:
+ return(MAJOR_0E_EXCP);
+ case 1: /* double */
+ fpregs[t+1] = fpregs[r1+1];
+ case 0: /* single */
+ fpregs[t] = fpregs[r1] | 0x80000000;
+ return(NOEXCEPTION);
+ }
+ case 4: /* FSQRT */
+ switch (fmt) {
+ case 0:
+ return(sgl_fsqrt(&fpregs[r1],0,
+ &fpregs[t], status));
+ case 1:
+ return(dbl_fsqrt(&fpregs[r1],0,
+ &fpregs[t], status));
+ case 2:
+ case 3:
+ return(MAJOR_0E_EXCP);
+ }
+ case 5: /* FRMD */
+ switch (fmt) {
+ case 0:
+ return(sgl_frnd(&fpregs[r1],0,
+ &fpregs[t], status));
+ case 1:
+ return(dbl_frnd(&fpregs[r1],0,
+ &fpregs[t], status));
+ case 2:
+ case 3:
+ return(MAJOR_0E_EXCP);
+ }
+ } /* end of switch (subop */
+
+ case 1: /* class 1 */
+ df = extru(ir,fpdfpos,2); /* get dest format */
+ /*
+ * Fix Crashme problem (writing to 31R in double precision)
+ * here too.
+ */
+ if (df == DBL) {
+ t &= ~1;
+ }
+ if ((df & 2) || (fmt & 2))
+ return(MAJOR_0E_EXCP);
+
+ fmt = (fmt << 1) | df;
+ switch (subop) {
+ case 0: /* FCNVFF */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(MAJOR_0E_EXCP);
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvff(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvff(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(MAJOR_0E_EXCP);
+ }
+ case 1: /* FCNVXF */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(sgl_to_sgl_fcnvxf(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvxf(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvxf(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
+ &fpregs[t],status));
+ }
+ case 2: /* FCNVFX */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(sgl_to_sgl_fcnvfx(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvfx(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvfx(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
+ &fpregs[t],status));
+ }
+ case 3: /* FCNVFXT */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(sgl_to_sgl_fcnvfxt(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvfxt(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvfxt(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
+ &fpregs[t],status));
+ }
+ case 5: /* FCNVUF (PA2.0 only) */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(sgl_to_sgl_fcnvuf(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvuf(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvuf(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
+ &fpregs[t],status));
+ }
+ case 6: /* FCNVFU (PA2.0 only) */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(sgl_to_sgl_fcnvfu(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvfu(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvfu(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
+ &fpregs[t],status));
+ }
+ case 7: /* FCNVFUT (PA2.0 only) */
+ switch(fmt) {
+ case 0: /* sgl/sgl */
+ return(sgl_to_sgl_fcnvfut(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 1: /* sgl/dbl */
+ return(sgl_to_dbl_fcnvfut(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 2: /* dbl/sgl */
+ return(dbl_to_sgl_fcnvfut(&fpregs[r1],0,
+ &fpregs[t],status));
+ case 3: /* dbl/dbl */
+ return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
+ &fpregs[t],status));
+ }
+ case 4: /* undefined */
+ return(MAJOR_0C_EXCP);
+ } /* end of switch subop */
+ case 2: /* class 2 */
+ /*
+ * Be careful out there.
+ * Crashme can generate cases where FR31R is specified
+ * as the source or target of a double precision operation.
+ * Since we just pass the address of the floating-point
+ * register to the emulation routines, this can cause
+ * corruption of fpzeroreg.
+ */
+ if (fmt == DBL)
+ r2 = (extru(ir,fpr2pos,5)<<1);
+ else
+ r2 = ((extru(ir,fpr2pos,5)<<1)|(extru(ir,fpxr2pos,1)));
+ fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS];
+ if (r2 == 0)
+ r2 = fpzeroreg;
+ if (fpu_type_flags & PA2_0_FPU_FLAG) {
+ /* FTEST if nullify bit set, otherwise FCMP */
+ if (extru(ir, fpnulpos, 1)) { /* FTEST */
+ /* not legal */
+ return(MAJOR_0E_EXCP);
+ } else { /* FCMP */
+ switch (fmt) {
+ /*
+ * fmt is only 1 bit long
+ */
+ case 0:
+ retval = sgl_fcmp(&fpregs[r1],
+ &fpregs[r2],extru(ir,fptpos,5),
+ &local_status);
+ update_status_cbit(status,local_status,
+ fpu_type_flags, subop);
+ return(retval);
+ case 1:
+ retval = dbl_fcmp(&fpregs[r1],
+ &fpregs[r2],extru(ir,fptpos,5),
+ &local_status);
+ update_status_cbit(status,local_status,
+ fpu_type_flags, subop);
+ return(retval);
+ }
+ }
+ } /* end of if for PA2.0 */
+ else { /* PA1.0 & PA1.1 */
+ switch (subop) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ return(MAJOR_0E_EXCP);
+ case 0: /* FCMP */
+ switch (fmt) {
+ /*
+ * fmt is only 1 bit long
+ */
+ case 0:
+ retval = sgl_fcmp(&fpregs[r1],
+ &fpregs[r2],extru(ir,fptpos,5),
+ &local_status);
+ update_status_cbit(status,local_status,
+ fpu_type_flags, subop);
+ return(retval);
+ case 1:
+ retval = dbl_fcmp(&fpregs[r1],
+ &fpregs[r2],extru(ir,fptpos,5),
+ &local_status);
+ update_status_cbit(status,local_status,
+ fpu_type_flags, subop);
+ return(retval);
+ }
+ } /* end of switch subop */
+ } /* end of else for PA1.0 & PA1.1 */
+ case 3: /* class 3 */
+ /*
+ * Be careful out there.
+ * Crashme can generate cases where FR31R is specified
+ * as the source or target of a double precision operation.
+ * Since we just pass the address of the floating-point
+ * register to the emulation routines, this can cause
+ * corruption of fpzeroreg.
+ */
+ if (fmt == DBL)
+ r2 = (extru(ir,fpr2pos,5)<<1);
+ else
+ r2 = ((extru(ir,fpr2pos,5)<<1)|(extru(ir,fpxr2pos,1)));
+ if (r2 == 0)
+ r2 = fpzeroreg;
+ switch (subop) {
+ case 5:
+ case 6:
+ case 7:
+ return(MAJOR_0E_EXCP);
+
+ /*
+ * Note that fmt is only 1 bit for class 3 */
+ case 0: /* FADD */
+ switch (fmt) {
+ case 0:
+ return(sgl_fadd(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 1:
+ return(dbl_fadd(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ }
+ case 1: /* FSUB */
+ switch (fmt) {
+ case 0:
+ return(sgl_fsub(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 1:
+ return(dbl_fsub(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ }
+ case 2: /* FMPY or XMPYU */
+ /*
+ * check for integer multiply (x bit set)
+ */
+ if (extru(ir,fpxpos,1)) {
+ /*
+ * emulate XMPYU
+ */
+ switch (fmt) {
+ case 0:
+ /*
+ * bad instruction if t specifies
+ * the right half of a register
+ */
+ if (t & 1)
+ return(MAJOR_0E_EXCP);
+ BUG();
+ /* unsupported
+ * impyu(&fpregs[r1],&fpregs[r2],
+ * &fpregs[t]);
+ */
+ return(NOEXCEPTION);
+ case 1:
+ return(MAJOR_0E_EXCP);
+ }
+ }
+ else { /* FMPY */
+ switch (fmt) {
+ case 0:
+ return(sgl_fmpy(&fpregs[r1],
+ &fpregs[r2],&fpregs[t],status));
+ case 1:
+ return(dbl_fmpy(&fpregs[r1],
+ &fpregs[r2],&fpregs[t],status));
+ }
+ }
+ case 3: /* FDIV */
+ switch (fmt) {
+ case 0:
+ return(sgl_fdiv(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 1:
+ return(dbl_fdiv(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ }
+ case 4: /* FREM */
+ switch (fmt) {
+ case 0:
+ return(sgl_frem(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ case 1:
+ return(dbl_frem(&fpregs[r1],&fpregs[r2],
+ &fpregs[t],status));
+ }
+ } /* end of class 3 switch */
+ } /* end of switch(class) */
+
+ /* If we get here, something is really wrong! */
+ return(MAJOR_0E_EXCP);
+}
+
+
+/*
+ * routine to decode the 06 (FMPYADD and FMPYCFXT) instruction
+ */
+static u_int
+decode_06(ir,fpregs)
+u_int ir;
+u_int fpregs[];
+{
+ u_int rm1, rm2, tm, ra, ta; /* operands */
+ u_int fmt;
+ u_int error = 0;
+ u_int status;
+ u_int fpu_type_flags;
+ union {
+ double dbl;
+ float flt;
+ struct { u_int i1; u_int i2; } ints;
+ } mtmp, atmp;
+
+
+ status = fpregs[0]; /* use a local copy of status reg */
+ fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */
+ fmt = extru(ir, fpmultifmt, 1); /* get sgl/dbl flag */
+ if (fmt == 0) { /* DBL */
+ rm1 = extru(ir, fprm1pos, 5) * sizeof(double)/sizeof(u_int);
+ if (rm1 == 0)
+ rm1 = fpzeroreg;
+ rm2 = extru(ir, fprm2pos, 5) * sizeof(double)/sizeof(u_int);
+ if (rm2 == 0)
+ rm2 = fpzeroreg;
+ tm = extru(ir, fptmpos, 5) * sizeof(double)/sizeof(u_int);
+ if (tm == 0)
+ return(MAJOR_06_EXCP);
+ ra = extru(ir, fprapos, 5) * sizeof(double)/sizeof(u_int);
+ ta = extru(ir, fptapos, 5) * sizeof(double)/sizeof(u_int);
+ if (ta == 0)
+ return(MAJOR_06_EXCP);
+
+ if (fpu_type_flags & TIMEX_ROLEX_FPU_MASK) {
+
+ if (ra == 0) {
+ /* special case FMPYCFXT, see sgl case below */
+ if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],
+ &mtmp.ints.i1,&status))
+ error = 1;
+ if (dbl_to_sgl_fcnvfxt(&fpregs[ta],
+ &atmp.ints.i1,&atmp.ints.i1,&status))
+ error = 1;
+ }
+ else {
+
+ if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,
+ &status))
+ error = 1;
+ if (dbl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,
+ &status))
+ error = 1;
+ }
+ }
+
+ else
+
+ {
+ if (ra == 0)
+ ra = fpzeroreg;
+
+ if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,
+ &status))
+ error = 1;
+ if (dbl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,
+ &status))
+ error = 1;
+
+ }
+
+ if (error)
+ return(MAJOR_06_EXCP);
+ else {
+ /* copy results */
+ fpregs[tm] = mtmp.ints.i1;
+ fpregs[tm+1] = mtmp.ints.i2;
+ fpregs[ta] = atmp.ints.i1;
+ fpregs[ta+1] = atmp.ints.i2;
+ fpregs[0] = status;
+ return(NOEXCEPTION);
+ }
+ }
+ else { /* SGL */
+ /*
+ * calculate offsets for single precision numbers
+ * See table 6-14 in PA-89 architecture for mapping
+ */
+ rm1 = (extru(ir,fprm1pos,4) | 0x10 ) << 1; /* get offset */
+ rm1 |= extru(ir,fprm1pos-4,1); /* add right word offset */
+
+ rm2 = (extru(ir,fprm2pos,4) | 0x10 ) << 1; /* get offset */
+ rm2 |= extru(ir,fprm2pos-4,1); /* add right word offset */
+
+ tm = (extru(ir,fptmpos,4) | 0x10 ) << 1; /* get offset */
+ tm |= extru(ir,fptmpos-4,1); /* add right word offset */
+
+ ra = (extru(ir,fprapos,4) | 0x10 ) << 1; /* get offset */
+ ra |= extru(ir,fprapos-4,1); /* add right word offset */
+
+ ta = (extru(ir,fptapos,4) | 0x10 ) << 1; /* get offset */
+ ta |= extru(ir,fptapos-4,1); /* add right word offset */
+
+ if (ra == 0x20 &&(fpu_type_flags & TIMEX_ROLEX_FPU_MASK)) {
+ /* special case FMPYCFXT (really 0)
+ * This instruction is only present on the Timex and
+ * Rolex fpu's in so if it is the special case and
+ * one of these fpu's we run the FMPYCFXT instruction
+ */
+ if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,
+ &status))
+ error = 1;
+ if (sgl_to_sgl_fcnvfxt(&fpregs[ta],&atmp.ints.i1,
+ &atmp.ints.i1,&status))
+ error = 1;
+ }
+ else {
+ if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,
+ &status))
+ error = 1;
+ if (sgl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,
+ &status))
+ error = 1;
+ }
+ if (error)
+ return(MAJOR_06_EXCP);
+ else {
+ /* copy results */
+ fpregs[tm] = mtmp.ints.i1;
+ fpregs[ta] = atmp.ints.i1;
+ fpregs[0] = status;
+ return(NOEXCEPTION);
+ }
+ }
+}
+
+/*
+ * routine to decode the 26 (FMPYSUB) instruction
+ */
+static u_int
+decode_26(ir,fpregs)
+u_int ir;
+u_int fpregs[];
+{
+ u_int rm1, rm2, tm, ra, ta; /* operands */
+ u_int fmt;
+ u_int error = 0;
+ u_int status;
+ union {
+ double dbl;
+ float flt;
+ struct { u_int i1; u_int i2; } ints;
+ } mtmp, atmp;
+
+
+ status = fpregs[0];
+ fmt = extru(ir, fpmultifmt, 1); /* get sgl/dbl flag */
+ if (fmt == 0) { /* DBL */
+ rm1 = extru(ir, fprm1pos, 5) * sizeof(double)/sizeof(u_int);
+ if (rm1 == 0)
+ rm1 = fpzeroreg;
+ rm2 = extru(ir, fprm2pos, 5) * sizeof(double)/sizeof(u_int);
+ if (rm2 == 0)
+ rm2 = fpzeroreg;
+ tm = extru(ir, fptmpos, 5) * sizeof(double)/sizeof(u_int);
+ if (tm == 0)
+ return(MAJOR_26_EXCP);
+ ra = extru(ir, fprapos, 5) * sizeof(double)/sizeof(u_int);
+ if (ra == 0)
+ return(MAJOR_26_EXCP);
+ ta = extru(ir, fptapos, 5) * sizeof(double)/sizeof(u_int);
+ if (ta == 0)
+ return(MAJOR_26_EXCP);
+
+ if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,&status))
+ error = 1;
+ if (dbl_fsub(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,&status))
+ error = 1;
+ if (error)
+ return(MAJOR_26_EXCP);
+ else {
+ /* copy results */
+ fpregs[tm] = mtmp.ints.i1;
+ fpregs[tm+1] = mtmp.ints.i2;
+ fpregs[ta] = atmp.ints.i1;
+ fpregs[ta+1] = atmp.ints.i2;
+ fpregs[0] = status;
+ return(NOEXCEPTION);
+ }
+ }
+ else { /* SGL */
+ /*
+ * calculate offsets for single precision numbers
+ * See table 6-14 in PA-89 architecture for mapping
+ */
+ rm1 = (extru(ir,fprm1pos,4) | 0x10 ) << 1; /* get offset */
+ rm1 |= extru(ir,fprm1pos-4,1); /* add right word offset */
+
+ rm2 = (extru(ir,fprm2pos,4) | 0x10 ) << 1; /* get offset */
+ rm2 |= extru(ir,fprm2pos-4,1); /* add right word offset */
+
+ tm = (extru(ir,fptmpos,4) | 0x10 ) << 1; /* get offset */
+ tm |= extru(ir,fptmpos-4,1); /* add right word offset */
+
+ ra = (extru(ir,fprapos,4) | 0x10 ) << 1; /* get offset */
+ ra |= extru(ir,fprapos-4,1); /* add right word offset */
+
+ ta = (extru(ir,fptapos,4) | 0x10 ) << 1; /* get offset */
+ ta |= extru(ir,fptapos-4,1); /* add right word offset */
+
+ if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,&status))
+ error = 1;
+ if (sgl_fsub(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,&status))
+ error = 1;
+ if (error)
+ return(MAJOR_26_EXCP);
+ else {
+ /* copy results */
+ fpregs[tm] = mtmp.ints.i1;
+ fpregs[ta] = atmp.ints.i1;
+ fpregs[0] = status;
+ return(NOEXCEPTION);
+ }
+ }
+
+}
+
+/*
+ * routine to decode the 2E (FMPYFADD,FMPYNFADD) instructions
+ */
+static u_int
+decode_2e(ir,fpregs)
+u_int ir;
+u_int fpregs[];
+{
+ u_int rm1, rm2, ra, t; /* operands */
+ u_int fmt;
+
+ fmt = extru(ir,fpfmtpos,1); /* get fmt completer */
+ if (fmt == DBL) { /* DBL */
+ rm1 = extru(ir,fprm1pos,5) * sizeof(double)/sizeof(u_int);
+ if (rm1 == 0)
+ rm1 = fpzeroreg;
+ rm2 = extru(ir,fprm2pos,5) * sizeof(double)/sizeof(u_int);
+ if (rm2 == 0)
+ rm2 = fpzeroreg;
+ ra = ((extru(ir,fpraupos,3)<<2)|(extru(ir,fpralpos,3)>>1)) *
+ sizeof(double)/sizeof(u_int);
+ if (ra == 0)
+ ra = fpzeroreg;
+ t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int);
+ if (t == 0)
+ return(MAJOR_2E_EXCP);
+
+ if (extru(ir,fpfusedsubop,1)) { /* fmpyfadd or fmpynfadd? */
+ return(dbl_fmpynfadd(&fpregs[rm1], &fpregs[rm2],
+ &fpregs[ra], &fpregs[0], &fpregs[t]));
+ } else {
+ return(dbl_fmpyfadd(&fpregs[rm1], &fpregs[rm2],
+ &fpregs[ra], &fpregs[0], &fpregs[t]));
+ }
+ } /* end DBL */
+ else { /* SGL */
+ rm1 = (extru(ir,fprm1pos,5)<<1)|(extru(ir,fpxrm1pos,1));
+ if (rm1 == 0)
+ rm1 = fpzeroreg;
+ rm2 = (extru(ir,fprm2pos,5)<<1)|(extru(ir,fpxrm2pos,1));
+ if (rm2 == 0)
+ rm2 = fpzeroreg;
+ ra = (extru(ir,fpraupos,3)<<3)|extru(ir,fpralpos,3);
+ if (ra == 0)
+ ra = fpzeroreg;
+ t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1)));
+ if (t == 0)
+ return(MAJOR_2E_EXCP);
+
+ if (extru(ir,fpfusedsubop,1)) { /* fmpyfadd or fmpynfadd? */
+ return(sgl_fmpynfadd(&fpregs[rm1], &fpregs[rm2],
+ &fpregs[ra], &fpregs[0], &fpregs[t]));
+ } else {
+ return(sgl_fmpyfadd(&fpregs[rm1], &fpregs[rm2],
+ &fpregs[ra], &fpregs[0], &fpregs[t]));
+ }
+ } /* end SGL */
+}
+
+/*
+ * update_status_cbit
+ *
+ * This routine returns the correct FP status register value in
+ * *status, based on the C-bit & V-bit returned by the FCMP
+ * emulation routine in new_status. The architecture type
+ * (PA83, PA89 or PA2.0) is available in fpu_type. The y_field
+ * and the architecture type are used to determine what flavor
+ * of FCMP is being emulated.
+ */
+static void
+update_status_cbit(status, new_status, fpu_type, y_field)
+u_int *status, new_status;
+u_int fpu_type;
+u_int y_field;
+{
+ /*
+ * For PA89 FPU's which implement the Compare Queue and
+ * for PA2.0 FPU's, update the Compare Queue if the y-field = 0,
+ * otherwise update the specified bit in the Compare Array.
+ * Note that the y-field will always be 0 for non-PA2.0 FPU's.
+ */
+ if ((fpu_type & TIMEX_EXTEN_FLAG) ||
+ (fpu_type & ROLEX_EXTEN_FLAG) ||
+ (fpu_type & PA2_0_FPU_FLAG)) {
+ if (y_field == 0) {
+ *status = ((*status & 0x04000000) >> 5) | /* old Cbit */
+ ((*status & 0x003ff000) >> 1) | /* old CQ */
+ (new_status & 0xffc007ff); /* all other bits*/
+ } else {
+ *status = (*status & 0x04000000) | /* old Cbit */
+ ((new_status & 0x04000000) >> (y_field+4)) |
+ (new_status & ~0x04000000 & /* other bits */
+ ~(0x04000000 >> (y_field+4)));
+ }
+ }
+ /* if PA83, just update the C-bit */
+ else {
+ *status = new_status;
+ }
+}
diff --git a/arch/parisc/math-emu/frnd.c b/arch/parisc/math-emu/frnd.c
new file mode 100644
index 00000000..904b3844
--- /dev/null
+++ b/arch/parisc/math-emu/frnd.c
@@ -0,0 +1,252 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * Purpose:
+ * Single Floating-point Round to Integer
+ * Double Floating-point Round to Integer
+ * Quad Floating-point Round to Integer (returns unimplemented)
+ *
+ * External Interfaces:
+ * dbl_frnd(srcptr,nullptr,dstptr,status)
+ * sgl_frnd(srcptr,nullptr,dstptr,status)
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/*
+ * Single Floating-point Round to Integer
+ */
+
+/*ARGSUSED*/
+int
+sgl_frnd(sgl_floating_point *srcptr,
+ unsigned int *nullptr,
+ sgl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register unsigned int src, result;
+ register int src_exponent;
+ register boolean inexact = FALSE;
+
+ src = *srcptr;
+ /*
+ * check source operand for NaN or infinity
+ */
+ if ((src_exponent = Sgl_exponent(src)) == SGL_INFINITY_EXPONENT) {
+ /*
+ * is signaling NaN?
+ */
+ if (Sgl_isone_signaling(src)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(src);
+ }
+ /*
+ * return quiet NaN or infinity
+ */
+ *dstptr = src;
+ return(NOEXCEPTION);
+ }
+ /*
+ * Need to round?
+ */
+ if ((src_exponent -= SGL_BIAS) >= SGL_P - 1) {
+ *dstptr = src;
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ Sgl_clear_exponent_set_hidden(src);
+ result = src;
+ Sgl_rightshift(result,(SGL_P-1) - (src_exponent));
+ /* check for inexact */
+ if (Sgl_isinexact_to_fix(src,src_exponent)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(src)) Sgl_increment(result);
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(src)) Sgl_increment(result);
+ break;
+ case ROUNDNEAREST:
+ if (Sgl_isone_roundbit(src,src_exponent))
+ if (Sgl_isone_stickybit(src,src_exponent)
+ || (Sgl_isone_lowmantissa(result)))
+ Sgl_increment(result);
+ }
+ }
+ Sgl_leftshift(result,(SGL_P-1) - (src_exponent));
+ if (Sgl_isone_hiddenoverflow(result))
+ Sgl_set_exponent(result,src_exponent + (SGL_BIAS+1));
+ else Sgl_set_exponent(result,src_exponent + SGL_BIAS);
+ }
+ else {
+ result = src; /* set sign */
+ Sgl_setzero_exponentmantissa(result);
+ /* check for inexact */
+ if (Sgl_isnotzero_exponentmantissa(src)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(src))
+ Sgl_set_exponent(result,SGL_BIAS);
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(src))
+ Sgl_set_exponent(result,SGL_BIAS);
+ break;
+ case ROUNDNEAREST:
+ if (src_exponent == -1)
+ if (Sgl_isnotzero_mantissa(src))
+ Sgl_set_exponent(result,SGL_BIAS);
+ }
+ }
+ }
+ *dstptr = result;
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
+
+/*
+ * Double Floating-point Round to Integer
+ */
+
+/*ARGSUSED*/
+int
+dbl_frnd(
+ dbl_floating_point *srcptr,
+ unsigned int *nullptr,
+ dbl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register unsigned int srcp1, srcp2, resultp1, resultp2;
+ register int src_exponent;
+ register boolean inexact = FALSE;
+
+ Dbl_copyfromptr(srcptr,srcp1,srcp2);
+ /*
+ * check source operand for NaN or infinity
+ */
+ if ((src_exponent = Dbl_exponent(srcp1)) == DBL_INFINITY_EXPONENT) {
+ /*
+ * is signaling NaN?
+ */
+ if (Dbl_isone_signaling(srcp1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Dbl_set_quiet(srcp1);
+ }
+ /*
+ * return quiet NaN or infinity
+ */
+ Dbl_copytoptr(srcp1,srcp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Need to round?
+ */
+ if ((src_exponent -= DBL_BIAS) >= DBL_P - 1) {
+ Dbl_copytoptr(srcp1,srcp2,dstptr);
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate result
+ */
+ if (src_exponent >= 0) {
+ Dbl_clear_exponent_set_hidden(srcp1);
+ resultp1 = srcp1;
+ resultp2 = srcp2;
+ Dbl_rightshift(resultp1,resultp2,(DBL_P-1) - (src_exponent));
+ /* check for inexact */
+ if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(srcp1))
+ Dbl_increment(resultp1,resultp2);
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(srcp1))
+ Dbl_increment(resultp1,resultp2);
+ break;
+ case ROUNDNEAREST:
+ if (Dbl_isone_roundbit(srcp1,srcp2,src_exponent))
+ if (Dbl_isone_stickybit(srcp1,srcp2,src_exponent)
+ || (Dbl_isone_lowmantissap2(resultp2)))
+ Dbl_increment(resultp1,resultp2);
+ }
+ }
+ Dbl_leftshift(resultp1,resultp2,(DBL_P-1) - (src_exponent));
+ if (Dbl_isone_hiddenoverflow(resultp1))
+ Dbl_set_exponent(resultp1,src_exponent + (DBL_BIAS+1));
+ else Dbl_set_exponent(resultp1,src_exponent + DBL_BIAS);
+ }
+ else {
+ resultp1 = srcp1; /* set sign */
+ Dbl_setzero_exponentmantissa(resultp1,resultp2);
+ /* check for inexact */
+ if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+ inexact = TRUE;
+ /* round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Dbl_iszero_sign(srcp1))
+ Dbl_set_exponent(resultp1,DBL_BIAS);
+ break;
+ case ROUNDMINUS:
+ if (Dbl_isone_sign(srcp1))
+ Dbl_set_exponent(resultp1,DBL_BIAS);
+ break;
+ case ROUNDNEAREST:
+ if (src_exponent == -1)
+ if (Dbl_isnotzero_mantissa(srcp1,srcp2))
+ Dbl_set_exponent(resultp1,DBL_BIAS);
+ }
+ }
+ }
+ Dbl_copytoptr(resultp1,resultp2,dstptr);
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/hppa.h b/arch/parisc/math-emu/hppa.h
new file mode 100644
index 00000000..5d3d52f7
--- /dev/null
+++ b/arch/parisc/math-emu/hppa.h
@@ -0,0 +1,42 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifdef __NO_PA_HDRS
+ PA header file -- do not include this header file for non-PA builds.
+#endif
+
+
+/* amount is assumed to be a constant between 0 and 32 (non-inclusive) */
+#define Shiftdouble(left,right,amount,dest) \
+ /* int left, right, amount, dest; */ \
+ dest = ((left) << (32-(amount))) | ((unsigned int)(right) >> (amount))
+
+/* amount must be less than 32 */
+#define Variableshiftdouble(left,right,amount,dest) \
+ /* unsigned int left, right; int amount, dest; */ \
+ if (amount == 0) dest = right; \
+ else dest = ((((unsigned) left)&0x7fffffff) << (32-(amount))) | \
+ ((unsigned) right >> (amount))
+
+/* amount must be between 0 and 32 (non-inclusive) */
+#define Variable_shift_double(left,right,amount,dest) \
+ /* unsigned int left, right; int amount, dest; */ \
+ dest = (left << (32-(amount))) | ((unsigned) right >> (amount))
diff --git a/arch/parisc/math-emu/math-emu.h b/arch/parisc/math-emu/math-emu.h
new file mode 100644
index 00000000..3a99f592
--- /dev/null
+++ b/arch/parisc/math-emu/math-emu.h
@@ -0,0 +1,27 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _PARISC_MATH_EMU_H
+#define _PARISC_MATH_EMU_H
+
+#include <asm/ptrace.h>
+extern int handle_fpe(struct pt_regs *regs);
+
+#endif
diff --git a/arch/parisc/math-emu/sfadd.c b/arch/parisc/math-emu/sfadd.c
new file mode 100644
index 00000000..f802cd6c
--- /dev/null
+++ b/arch/parisc/math-emu/sfadd.c
@@ -0,0 +1,518 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/sfadd.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Single_add: add two single precision values.
+ *
+ * External Interfaces:
+ * sgl_fadd(leftptr, rightptr, dstptr, status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+
+/*
+ * Single_add: add two single precision values.
+ */
+int
+sgl_fadd(
+ sgl_floating_point *leftptr,
+ sgl_floating_point *rightptr,
+ sgl_floating_point *dstptr,
+ unsigned int *status)
+ {
+ register unsigned int left, right, result, extent;
+ register unsigned int signless_upper_left, signless_upper_right, save;
+
+
+ register int result_exponent, right_exponent, diff_exponent;
+ register int sign_save, jumpsize;
+ register boolean inexact = FALSE;
+ register boolean underflowtrap;
+
+ /* Create local copies of the numbers */
+ left = *leftptr;
+ right = *rightptr;
+
+ /* A zero "save" helps discover equal operands (for later), *
+ * and is used in swapping operands (if needed). */
+ Sgl_xortointp1(left,right,/*to*/save);
+
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if ((result_exponent = Sgl_exponent(left)) == SGL_INFINITY_EXPONENT)
+ {
+ if (Sgl_iszero_mantissa(left))
+ {
+ if (Sgl_isnotnan(right))
+ {
+ if (Sgl_isinfinity(right) && save!=0)
+ {
+ /*
+ * invalid since operands are opposite signed infinity's
+ */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * return infinity
+ */
+ *dstptr = left;
+ return(NOEXCEPTION);
+ }
+ }
+ else
+ {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(left))
+ {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(left);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Sgl_is_signalingnan(right))
+ {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(right);
+ *dstptr = right;
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ *dstptr = left;
+ return(NOEXCEPTION);
+ }
+ } /* End left NaN or Infinity processing */
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if (Sgl_isinfinity_exponent(right))
+ {
+ if (Sgl_iszero_mantissa(right))
+ {
+ /* return infinity */
+ *dstptr = right;
+ return(NOEXCEPTION);
+ }
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(right))
+ {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(right);
+ }
+ /*
+ * return quiet NaN
+ */
+ *dstptr = right;
+ return(NOEXCEPTION);
+ } /* End right NaN or Infinity processing */
+
+ /* Invariant: Must be dealing with finite numbers */
+
+ /* Compare operands by removing the sign */
+ Sgl_copytoint_exponentmantissa(left,signless_upper_left);
+ Sgl_copytoint_exponentmantissa(right,signless_upper_right);
+
+ /* sign difference selects add or sub operation. */
+ if(Sgl_ismagnitudeless(signless_upper_left,signless_upper_right))
+ {
+ /* Set the left operand to the larger one by XOR swap *
+ * First finish the first word using "save" */
+ Sgl_xorfromintp1(save,right,/*to*/right);
+ Sgl_xorfromintp1(save,left,/*to*/left);
+ result_exponent = Sgl_exponent(left);
+ }
+ /* Invariant: left is not smaller than right. */
+
+ if((right_exponent = Sgl_exponent(right)) == 0)
+ {
+ /* Denormalized operands. First look for zeroes */
+ if(Sgl_iszero_mantissa(right))
+ {
+ /* right is zero */
+ if(Sgl_iszero_exponentmantissa(left))
+ {
+ /* Both operands are zeros */
+ if(Is_rounding_mode(ROUNDMINUS))
+ {
+ Sgl_or_signs(left,/*with*/right);
+ }
+ else
+ {
+ Sgl_and_signs(left,/*with*/right);
+ }
+ }
+ else
+ {
+ /* Left is not a zero and must be the result. Trapped
+ * underflows are signaled if left is denormalized. Result
+ * is always exact. */
+ if( (result_exponent == 0) && Is_underflowtrap_enabled() )
+ {
+ /* need to normalize results mantissa */
+ sign_save = Sgl_signextendedsign(left);
+ Sgl_leftshiftby1(left);
+ Sgl_normalize(left,result_exponent);
+ Sgl_set_sign(left,/*using*/sign_save);
+ Sgl_setwrapped_exponent(left,result_exponent,unfl);
+ *dstptr = left;
+ return(UNDERFLOWEXCEPTION);
+ }
+ }
+ *dstptr = left;
+ return(NOEXCEPTION);
+ }
+
+ /* Neither are zeroes */
+ Sgl_clear_sign(right); /* Exponent is already cleared */
+ if(result_exponent == 0 )
+ {
+ /* Both operands are denormalized. The result must be exact
+ * and is simply calculated. A sum could become normalized and a
+ * difference could cancel to a true zero. */
+ if( (/*signed*/int) save < 0 )
+ {
+ Sgl_subtract(left,/*minus*/right,/*into*/result);
+ if(Sgl_iszero_mantissa(result))
+ {
+ if(Is_rounding_mode(ROUNDMINUS))
+ {
+ Sgl_setone_sign(result);
+ }
+ else
+ {
+ Sgl_setzero_sign(result);
+ }
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ else
+ {
+ Sgl_addition(left,right,/*into*/result);
+ if(Sgl_isone_hidden(result))
+ {
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ if(Is_underflowtrap_enabled())
+ {
+ /* need to normalize result */
+ sign_save = Sgl_signextendedsign(result);
+ Sgl_leftshiftby1(result);
+ Sgl_normalize(result,result_exponent);
+ Sgl_set_sign(result,/*using*/sign_save);
+ Sgl_setwrapped_exponent(result,result_exponent,unfl);
+ *dstptr = result;
+ return(UNDERFLOWEXCEPTION);
+ }
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ right_exponent = 1; /* Set exponent to reflect different bias
+ * with denomalized numbers. */
+ }
+ else
+ {
+ Sgl_clear_signexponent_set_hidden(right);
+ }
+ Sgl_clear_exponent_set_hidden(left);
+ diff_exponent = result_exponent - right_exponent;
+
+ /*
+ * Special case alignment of operands that would force alignment
+ * beyond the extent of the extension. A further optimization
+ * could special case this but only reduces the path length for this
+ * infrequent case.
+ */
+ if(diff_exponent > SGL_THRESHOLD)
+ {
+ diff_exponent = SGL_THRESHOLD;
+ }
+
+ /* Align right operand by shifting to right */
+ Sgl_right_align(/*operand*/right,/*shifted by*/diff_exponent,
+ /*and lower to*/extent);
+
+ /* Treat sum and difference of the operands separately. */
+ if( (/*signed*/int) save < 0 )
+ {
+ /*
+ * Difference of the two operands. Their can be no overflow. A
+ * borrow can occur out of the hidden bit and force a post
+ * normalization phase.
+ */
+ Sgl_subtract_withextension(left,/*minus*/right,/*with*/extent,/*into*/result);
+ if(Sgl_iszero_hidden(result))
+ {
+ /* Handle normalization */
+ /* A straightforward algorithm would now shift the result
+ * and extension left until the hidden bit becomes one. Not
+ * all of the extension bits need participate in the shift.
+ * Only the two most significant bits (round and guard) are
+ * needed. If only a single shift is needed then the guard
+ * bit becomes a significant low order bit and the extension
+ * must participate in the rounding. If more than a single
+ * shift is needed, then all bits to the right of the guard
+ * bit are zeros, and the guard bit may or may not be zero. */
+ sign_save = Sgl_signextendedsign(result);
+ Sgl_leftshiftby1_withextent(result,extent,result);
+
+ /* Need to check for a zero result. The sign and exponent
+ * fields have already been zeroed. The more efficient test
+ * of the full object can be used.
+ */
+ if(Sgl_iszero(result))
+ /* Must have been "x-x" or "x+(-x)". */
+ {
+ if(Is_rounding_mode(ROUNDMINUS)) Sgl_setone_sign(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ result_exponent--;
+ /* Look to see if normalization is finished. */
+ if(Sgl_isone_hidden(result))
+ {
+ if(result_exponent==0)
+ {
+ /* Denormalized, exponent should be zero. Left operand *
+ * was normalized, so extent (guard, round) was zero */
+ goto underflow;
+ }
+ else
+ {
+ /* No further normalization is needed. */
+ Sgl_set_sign(result,/*using*/sign_save);
+ Ext_leftshiftby1(extent);
+ goto round;
+ }
+ }
+
+ /* Check for denormalized, exponent should be zero. Left *
+ * operand was normalized, so extent (guard, round) was zero */
+ if(!(underflowtrap = Is_underflowtrap_enabled()) &&
+ result_exponent==0) goto underflow;
+
+ /* Shift extension to complete one bit of normalization and
+ * update exponent. */
+ Ext_leftshiftby1(extent);
+
+ /* Discover first one bit to determine shift amount. Use a
+ * modified binary search. We have already shifted the result
+ * one position right and still not found a one so the remainder
+ * of the extension must be zero and simplifies rounding. */
+ /* Scan bytes */
+ while(Sgl_iszero_hiddenhigh7mantissa(result))
+ {
+ Sgl_leftshiftby8(result);
+ if((result_exponent -= 8) <= 0 && !underflowtrap)
+ goto underflow;
+ }
+ /* Now narrow it down to the nibble */
+ if(Sgl_iszero_hiddenhigh3mantissa(result))
+ {
+ /* The lower nibble contains the normalizing one */
+ Sgl_leftshiftby4(result);
+ if((result_exponent -= 4) <= 0 && !underflowtrap)
+ goto underflow;
+ }
+ /* Select case were first bit is set (already normalized)
+ * otherwise select the proper shift. */
+ if((jumpsize = Sgl_hiddenhigh3mantissa(result)) > 7)
+ {
+ /* Already normalized */
+ if(result_exponent <= 0) goto underflow;
+ Sgl_set_sign(result,/*using*/sign_save);
+ Sgl_set_exponent(result,/*using*/result_exponent);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ Sgl_sethigh4bits(result,/*using*/sign_save);
+ switch(jumpsize)
+ {
+ case 1:
+ {
+ Sgl_leftshiftby3(result);
+ result_exponent -= 3;
+ break;
+ }
+ case 2:
+ case 3:
+ {
+ Sgl_leftshiftby2(result);
+ result_exponent -= 2;
+ break;
+ }
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ {
+ Sgl_leftshiftby1(result);
+ result_exponent -= 1;
+ break;
+ }
+ }
+ if(result_exponent > 0)
+ {
+ Sgl_set_exponent(result,/*using*/result_exponent);
+ *dstptr = result;
+ return(NOEXCEPTION); /* Sign bit is already set */
+ }
+ /* Fixup potential underflows */
+ underflow:
+ if(Is_underflowtrap_enabled())
+ {
+ Sgl_set_sign(result,sign_save);
+ Sgl_setwrapped_exponent(result,result_exponent,unfl);
+ *dstptr = result;
+ /* inexact = FALSE; */
+ return(UNDERFLOWEXCEPTION);
+ }
+ /*
+ * Since we cannot get an inexact denormalized result,
+ * we can now return.
+ */
+ Sgl_right_align(result,/*by*/(1-result_exponent),extent);
+ Sgl_clear_signexponent(result);
+ Sgl_set_sign(result,sign_save);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ } /* end if(hidden...)... */
+ /* Fall through and round */
+ } /* end if(save < 0)... */
+ else
+ {
+ /* Add magnitudes */
+ Sgl_addition(left,right,/*to*/result);
+ if(Sgl_isone_hiddenoverflow(result))
+ {
+ /* Prenormalization required. */
+ Sgl_rightshiftby1_withextent(result,extent,extent);
+ Sgl_arithrightshiftby1(result);
+ result_exponent++;
+ } /* end if hiddenoverflow... */
+ } /* end else ...add magnitudes... */
+
+ /* Round the result. If the extension is all zeros,then the result is
+ * exact. Otherwise round in the correct direction. No underflow is
+ * possible. If a postnormalization is necessary, then the mantissa is
+ * all zeros so no shift is needed. */
+ round:
+ if(Ext_isnotzero(extent))
+ {
+ inexact = TRUE;
+ switch(Rounding_mode())
+ {
+ case ROUNDNEAREST: /* The default. */
+ if(Ext_isone_sign(extent))
+ {
+ /* at least 1/2 ulp */
+ if(Ext_isnotzero_lower(extent) ||
+ Sgl_isone_lowmantissa(result))
+ {
+ /* either exactly half way and odd or more than 1/2ulp */
+ Sgl_increment(result);
+ }
+ }
+ break;
+
+ case ROUNDPLUS:
+ if(Sgl_iszero_sign(result))
+ {
+ /* Round up positive results */
+ Sgl_increment(result);
+ }
+ break;
+
+ case ROUNDMINUS:
+ if(Sgl_isone_sign(result))
+ {
+ /* Round down negative results */
+ Sgl_increment(result);
+ }
+
+ case ROUNDZERO:;
+ /* truncate is simple */
+ } /* end switch... */
+ if(Sgl_isone_hiddenoverflow(result)) result_exponent++;
+ }
+ if(result_exponent == SGL_INFINITY_EXPONENT)
+ {
+ /* Overflow */
+ if(Is_overflowtrap_enabled())
+ {
+ Sgl_setwrapped_exponent(result,result_exponent,ovfl);
+ *dstptr = result;
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(OVERFLOWEXCEPTION);
+ }
+ else
+ {
+ Set_overflowflag();
+ inexact = TRUE;
+ Sgl_setoverflow(result);
+ }
+ }
+ else Sgl_set_exponent(result,result_exponent);
+ *dstptr = result;
+ if(inexact)
+ if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(NOEXCEPTION);
+ }
diff --git a/arch/parisc/math-emu/sfcmp.c b/arch/parisc/math-emu/sfcmp.c
new file mode 100644
index 00000000..1466fb46
--- /dev/null
+++ b/arch/parisc/math-emu/sfcmp.c
@@ -0,0 +1,155 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/sfcmp.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * sgl_cmp: compare two values
+ *
+ * External Interfaces:
+ * sgl_fcmp(leftptr, rightptr, cond, status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+
+/*
+ * sgl_cmp: compare two values
+ */
+int
+sgl_fcmp (sgl_floating_point * leftptr, sgl_floating_point * rightptr,
+ unsigned int cond, unsigned int *status)
+
+ /* The predicate to be tested */
+
+ {
+ register unsigned int left, right;
+ register int xorresult;
+
+ /* Create local copies of the numbers */
+ left = *leftptr;
+ right = *rightptr;
+
+ /*
+ * Test for NaN
+ */
+ if( (Sgl_exponent(left) == SGL_INFINITY_EXPONENT)
+ || (Sgl_exponent(right) == SGL_INFINITY_EXPONENT) )
+ {
+ /* Check if a NaN is involved. Signal an invalid exception when
+ * comparing a signaling NaN or when comparing quiet NaNs and the
+ * low bit of the condition is set */
+ if( ( (Sgl_exponent(left) == SGL_INFINITY_EXPONENT)
+ && Sgl_isnotzero_mantissa(left)
+ && (Exception(cond) || Sgl_isone_signaling(left)))
+ ||
+ ( (Sgl_exponent(right) == SGL_INFINITY_EXPONENT)
+ && Sgl_isnotzero_mantissa(right)
+ && (Exception(cond) || Sgl_isone_signaling(right)) ) )
+ {
+ if( Is_invalidtrap_enabled() ) {
+ Set_status_cbit(Unordered(cond));
+ return(INVALIDEXCEPTION);
+ }
+ else Set_invalidflag();
+ Set_status_cbit(Unordered(cond));
+ return(NOEXCEPTION);
+ }
+ /* All the exceptional conditions are handled, now special case
+ NaN compares */
+ else if( ((Sgl_exponent(left) == SGL_INFINITY_EXPONENT)
+ && Sgl_isnotzero_mantissa(left))
+ ||
+ ((Sgl_exponent(right) == SGL_INFINITY_EXPONENT)
+ && Sgl_isnotzero_mantissa(right)) )
+ {
+ /* NaNs always compare unordered. */
+ Set_status_cbit(Unordered(cond));
+ return(NOEXCEPTION);
+ }
+ /* infinities will drop down to the normal compare mechanisms */
+ }
+ /* First compare for unequal signs => less or greater or
+ * special equal case */
+ Sgl_xortointp1(left,right,xorresult);
+ if( xorresult < 0 )
+ {
+ /* left negative => less, left positive => greater.
+ * equal is possible if both operands are zeros. */
+ if( Sgl_iszero_exponentmantissa(left)
+ && Sgl_iszero_exponentmantissa(right) )
+ {
+ Set_status_cbit(Equal(cond));
+ }
+ else if( Sgl_isone_sign(left) )
+ {
+ Set_status_cbit(Lessthan(cond));
+ }
+ else
+ {
+ Set_status_cbit(Greaterthan(cond));
+ }
+ }
+ /* Signs are the same. Treat negative numbers separately
+ * from the positives because of the reversed sense. */
+ else if( Sgl_all(left) == Sgl_all(right) )
+ {
+ Set_status_cbit(Equal(cond));
+ }
+ else if( Sgl_iszero_sign(left) )
+ {
+ /* Positive compare */
+ if( Sgl_all(left) < Sgl_all(right) )
+ {
+ Set_status_cbit(Lessthan(cond));
+ }
+ else
+ {
+ Set_status_cbit(Greaterthan(cond));
+ }
+ }
+ else
+ {
+ /* Negative compare. Signed or unsigned compares
+ * both work the same. That distinction is only
+ * important when the sign bits differ. */
+ if( Sgl_all(left) > Sgl_all(right) )
+ {
+ Set_status_cbit(Lessthan(cond));
+ }
+ else
+ {
+ Set_status_cbit(Greaterthan(cond));
+ }
+ }
+ return(NOEXCEPTION);
+ }
diff --git a/arch/parisc/math-emu/sfdiv.c b/arch/parisc/math-emu/sfdiv.c
new file mode 100644
index 00000000..3e2a4d6d
--- /dev/null
+++ b/arch/parisc/math-emu/sfdiv.c
@@ -0,0 +1,392 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/sfdiv.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Single Precision Floating-point Divide
+ *
+ * External Interfaces:
+ * sgl_fdiv(srcptr1,srcptr2,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+
+/*
+ * Single Precision Floating-point Divide
+ */
+
+int
+sgl_fdiv (sgl_floating_point * srcptr1, sgl_floating_point * srcptr2,
+ sgl_floating_point * dstptr, unsigned int *status)
+{
+ register unsigned int opnd1, opnd2, opnd3, result;
+ register int dest_exponent, count;
+ register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
+ boolean is_tiny;
+
+ opnd1 = *srcptr1;
+ opnd2 = *srcptr2;
+ /*
+ * set sign bit of result
+ */
+ if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2)) Sgl_setnegativezero(result);
+ else Sgl_setzero(result);
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if (Sgl_isinfinity_exponent(opnd1)) {
+ if (Sgl_iszero_mantissa(opnd1)) {
+ if (Sgl_isnotnan(opnd2)) {
+ if (Sgl_isinfinity(opnd2)) {
+ /*
+ * invalid since both operands
+ * are infinity
+ */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * return infinity
+ */
+ Sgl_setinfinity_exponentmantissa(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(opnd1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd1);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Sgl_is_signalingnan(opnd2)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd2);
+ *dstptr = opnd2;
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ *dstptr = opnd1;
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if (Sgl_isinfinity_exponent(opnd2)) {
+ if (Sgl_iszero_mantissa(opnd2)) {
+ /*
+ * return zero
+ */
+ Sgl_setzero_exponentmantissa(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(opnd2)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd2);
+ }
+ /*
+ * return quiet NaN
+ */
+ *dstptr = opnd2;
+ return(NOEXCEPTION);
+ }
+ /*
+ * check for division by zero
+ */
+ if (Sgl_iszero_exponentmantissa(opnd2)) {
+ if (Sgl_iszero_exponentmantissa(opnd1)) {
+ /* invalid since both operands are zero */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ if (Is_divisionbyzerotrap_enabled())
+ return(DIVISIONBYZEROEXCEPTION);
+ Set_divisionbyzeroflag();
+ Sgl_setinfinity_exponentmantissa(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate exponent
+ */
+ dest_exponent = Sgl_exponent(opnd1) - Sgl_exponent(opnd2) + SGL_BIAS;
+
+ /*
+ * Generate mantissa
+ */
+ if (Sgl_isnotzero_exponent(opnd1)) {
+ /* set hidden bit */
+ Sgl_clear_signexponent_set_hidden(opnd1);
+ }
+ else {
+ /* check for zero */
+ if (Sgl_iszero_mantissa(opnd1)) {
+ Sgl_setzero_exponentmantissa(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /* is denormalized; want to normalize */
+ Sgl_clear_signexponent(opnd1);
+ Sgl_leftshiftby1(opnd1);
+ Sgl_normalize(opnd1,dest_exponent);
+ }
+ /* opnd2 needs to have hidden bit set with msb in hidden bit */
+ if (Sgl_isnotzero_exponent(opnd2)) {
+ Sgl_clear_signexponent_set_hidden(opnd2);
+ }
+ else {
+ /* is denormalized; want to normalize */
+ Sgl_clear_signexponent(opnd2);
+ Sgl_leftshiftby1(opnd2);
+ while(Sgl_iszero_hiddenhigh7mantissa(opnd2)) {
+ Sgl_leftshiftby8(opnd2);
+ dest_exponent += 8;
+ }
+ if(Sgl_iszero_hiddenhigh3mantissa(opnd2)) {
+ Sgl_leftshiftby4(opnd2);
+ dest_exponent += 4;
+ }
+ while(Sgl_iszero_hidden(opnd2)) {
+ Sgl_leftshiftby1(opnd2);
+ dest_exponent += 1;
+ }
+ }
+
+ /* Divide the source mantissas */
+
+ /*
+ * A non_restoring divide algorithm is used.
+ */
+ Sgl_subtract(opnd1,opnd2,opnd1);
+ Sgl_setzero(opnd3);
+ for (count=1;count<=SGL_P && Sgl_all(opnd1);count++) {
+ Sgl_leftshiftby1(opnd1);
+ Sgl_leftshiftby1(opnd3);
+ if (Sgl_iszero_sign(opnd1)) {
+ Sgl_setone_lowmantissa(opnd3);
+ Sgl_subtract(opnd1,opnd2,opnd1);
+ }
+ else Sgl_addition(opnd1,opnd2,opnd1);
+ }
+ if (count <= SGL_P) {
+ Sgl_leftshiftby1(opnd3);
+ Sgl_setone_lowmantissa(opnd3);
+ Sgl_leftshift(opnd3,SGL_P-count);
+ if (Sgl_iszero_hidden(opnd3)) {
+ Sgl_leftshiftby1(opnd3);
+ dest_exponent--;
+ }
+ }
+ else {
+ if (Sgl_iszero_hidden(opnd3)) {
+ /* need to get one more bit of result */
+ Sgl_leftshiftby1(opnd1);
+ Sgl_leftshiftby1(opnd3);
+ if (Sgl_iszero_sign(opnd1)) {
+ Sgl_setone_lowmantissa(opnd3);
+ Sgl_subtract(opnd1,opnd2,opnd1);
+ }
+ else Sgl_addition(opnd1,opnd2,opnd1);
+ dest_exponent--;
+ }
+ if (Sgl_iszero_sign(opnd1)) guardbit = TRUE;
+ stickybit = Sgl_all(opnd1);
+ }
+ inexact = guardbit | stickybit;
+
+ /*
+ * round result
+ */
+ if (inexact && (dest_exponent > 0 || Is_underflowtrap_enabled())) {
+ Sgl_clear_signexponent(opnd3);
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(result))
+ Sgl_increment_mantissa(opnd3);
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(result))
+ Sgl_increment_mantissa(opnd3);
+ break;
+ case ROUNDNEAREST:
+ if (guardbit) {
+ if (stickybit || Sgl_isone_lowmantissa(opnd3))
+ Sgl_increment_mantissa(opnd3);
+ }
+ }
+ if (Sgl_isone_hidden(opnd3)) dest_exponent++;
+ }
+ Sgl_set_mantissa(result,opnd3);
+
+ /*
+ * Test for overflow
+ */
+ if (dest_exponent >= SGL_INFINITY_EXPONENT) {
+ /* trap if OVERFLOWTRAP enabled */
+ if (Is_overflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Sgl_setwrapped_exponent(result,dest_exponent,ovfl);
+ *dstptr = result;
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(OVERFLOWEXCEPTION);
+ }
+ Set_overflowflag();
+ /* set result to infinity or largest number */
+ Sgl_setoverflow(result);
+ inexact = TRUE;
+ }
+ /*
+ * Test for underflow
+ */
+ else if (dest_exponent <= 0) {
+ /* trap if UNDERFLOWTRAP enabled */
+ if (Is_underflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Sgl_setwrapped_exponent(result,dest_exponent,unfl);
+ *dstptr = result;
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(UNDERFLOWEXCEPTION);
+ }
+
+ /* Determine if should set underflow flag */
+ is_tiny = TRUE;
+ if (dest_exponent == 0 && inexact) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(result)) {
+ Sgl_increment(opnd3);
+ if (Sgl_isone_hiddenoverflow(opnd3))
+ is_tiny = FALSE;
+ Sgl_decrement(opnd3);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(result)) {
+ Sgl_increment(opnd3);
+ if (Sgl_isone_hiddenoverflow(opnd3))
+ is_tiny = FALSE;
+ Sgl_decrement(opnd3);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (guardbit && (stickybit ||
+ Sgl_isone_lowmantissa(opnd3))) {
+ Sgl_increment(opnd3);
+ if (Sgl_isone_hiddenoverflow(opnd3))
+ is_tiny = FALSE;
+ Sgl_decrement(opnd3);
+ }
+ break;
+ }
+ }
+
+ /*
+ * denormalize result or set to signed zero
+ */
+ stickybit = inexact;
+ Sgl_denormalize(opnd3,dest_exponent,guardbit,stickybit,inexact);
+
+ /* return rounded number */
+ if (inexact) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(result)) {
+ Sgl_increment(opnd3);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(result)) {
+ Sgl_increment(opnd3);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (guardbit && (stickybit ||
+ Sgl_isone_lowmantissa(opnd3))) {
+ Sgl_increment(opnd3);
+ }
+ break;
+ }
+ if (is_tiny) Set_underflowflag();
+ }
+ Sgl_set_exponentmantissa(result,opnd3);
+ }
+ else Sgl_set_exponent(result,dest_exponent);
+ *dstptr = result;
+ /* check for inexact */
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/sfmpy.c b/arch/parisc/math-emu/sfmpy.c
new file mode 100644
index 00000000..afa40698
--- /dev/null
+++ b/arch/parisc/math-emu/sfmpy.c
@@ -0,0 +1,380 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/sfmpy.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Single Precision Floating-point Multiply
+ *
+ * External Interfaces:
+ * sgl_fmpy(srcptr1,srcptr2,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+
+/*
+ * Single Precision Floating-point Multiply
+ */
+
+int
+sgl_fmpy(
+ sgl_floating_point *srcptr1,
+ sgl_floating_point *srcptr2,
+ sgl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register unsigned int opnd1, opnd2, opnd3, result;
+ register int dest_exponent, count;
+ register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
+ boolean is_tiny;
+
+ opnd1 = *srcptr1;
+ opnd2 = *srcptr2;
+ /*
+ * set sign bit of result
+ */
+ if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2)) Sgl_setnegativezero(result);
+ else Sgl_setzero(result);
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if (Sgl_isinfinity_exponent(opnd1)) {
+ if (Sgl_iszero_mantissa(opnd1)) {
+ if (Sgl_isnotnan(opnd2)) {
+ if (Sgl_iszero_exponentmantissa(opnd2)) {
+ /*
+ * invalid since operands are infinity
+ * and zero
+ */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * return infinity
+ */
+ Sgl_setinfinity_exponentmantissa(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(opnd1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd1);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Sgl_is_signalingnan(opnd2)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd2);
+ *dstptr = opnd2;
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ *dstptr = opnd1;
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if (Sgl_isinfinity_exponent(opnd2)) {
+ if (Sgl_iszero_mantissa(opnd2)) {
+ if (Sgl_iszero_exponentmantissa(opnd1)) {
+ /* invalid since operands are zero & infinity */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(opnd2);
+ *dstptr = opnd2;
+ return(NOEXCEPTION);
+ }
+ /*
+ * return infinity
+ */
+ Sgl_setinfinity_exponentmantissa(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(opnd2)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd2);
+ }
+ /*
+ * return quiet NaN
+ */
+ *dstptr = opnd2;
+ return(NOEXCEPTION);
+ }
+ /*
+ * Generate exponent
+ */
+ dest_exponent = Sgl_exponent(opnd1) + Sgl_exponent(opnd2) - SGL_BIAS;
+
+ /*
+ * Generate mantissa
+ */
+ if (Sgl_isnotzero_exponent(opnd1)) {
+ /* set hidden bit */
+ Sgl_clear_signexponent_set_hidden(opnd1);
+ }
+ else {
+ /* check for zero */
+ if (Sgl_iszero_mantissa(opnd1)) {
+ Sgl_setzero_exponentmantissa(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /* is denormalized, adjust exponent */
+ Sgl_clear_signexponent(opnd1);
+ Sgl_leftshiftby1(opnd1);
+ Sgl_normalize(opnd1,dest_exponent);
+ }
+ /* opnd2 needs to have hidden bit set with msb in hidden bit */
+ if (Sgl_isnotzero_exponent(opnd2)) {
+ Sgl_clear_signexponent_set_hidden(opnd2);
+ }
+ else {
+ /* check for zero */
+ if (Sgl_iszero_mantissa(opnd2)) {
+ Sgl_setzero_exponentmantissa(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /* is denormalized; want to normalize */
+ Sgl_clear_signexponent(opnd2);
+ Sgl_leftshiftby1(opnd2);
+ Sgl_normalize(opnd2,dest_exponent);
+ }
+
+ /* Multiply two source mantissas together */
+
+ Sgl_leftshiftby4(opnd2); /* make room for guard bits */
+ Sgl_setzero(opnd3);
+ /*
+ * Four bits at a time are inspected in each loop, and a
+ * simple shift and add multiply algorithm is used.
+ */
+ for (count=1;count<SGL_P;count+=4) {
+ stickybit |= Slow4(opnd3);
+ Sgl_rightshiftby4(opnd3);
+ if (Sbit28(opnd1)) Sall(opnd3) += (Sall(opnd2) << 3);
+ if (Sbit29(opnd1)) Sall(opnd3) += (Sall(opnd2) << 2);
+ if (Sbit30(opnd1)) Sall(opnd3) += (Sall(opnd2) << 1);
+ if (Sbit31(opnd1)) Sall(opnd3) += Sall(opnd2);
+ Sgl_rightshiftby4(opnd1);
+ }
+ /* make sure result is left-justified */
+ if (Sgl_iszero_sign(opnd3)) {
+ Sgl_leftshiftby1(opnd3);
+ }
+ else {
+ /* result mantissa >= 2. */
+ dest_exponent++;
+ }
+ /* check for denormalized result */
+ while (Sgl_iszero_sign(opnd3)) {
+ Sgl_leftshiftby1(opnd3);
+ dest_exponent--;
+ }
+ /*
+ * check for guard, sticky and inexact bits
+ */
+ stickybit |= Sgl_all(opnd3) << (SGL_BITLENGTH - SGL_EXP_LENGTH + 1);
+ guardbit = Sbit24(opnd3);
+ inexact = guardbit | stickybit;
+
+ /* re-align mantissa */
+ Sgl_rightshiftby8(opnd3);
+
+ /*
+ * round result
+ */
+ if (inexact && (dest_exponent>0 || Is_underflowtrap_enabled())) {
+ Sgl_clear_signexponent(opnd3);
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(result))
+ Sgl_increment(opnd3);
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(result))
+ Sgl_increment(opnd3);
+ break;
+ case ROUNDNEAREST:
+ if (guardbit) {
+ if (stickybit || Sgl_isone_lowmantissa(opnd3))
+ Sgl_increment(opnd3);
+ }
+ }
+ if (Sgl_isone_hidden(opnd3)) dest_exponent++;
+ }
+ Sgl_set_mantissa(result,opnd3);
+
+ /*
+ * Test for overflow
+ */
+ if (dest_exponent >= SGL_INFINITY_EXPONENT) {
+ /* trap if OVERFLOWTRAP enabled */
+ if (Is_overflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Sgl_setwrapped_exponent(result,dest_exponent,ovfl);
+ *dstptr = result;
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(OVERFLOWEXCEPTION);
+ }
+ inexact = TRUE;
+ Set_overflowflag();
+ /* set result to infinity or largest number */
+ Sgl_setoverflow(result);
+ }
+ /*
+ * Test for underflow
+ */
+ else if (dest_exponent <= 0) {
+ /* trap if UNDERFLOWTRAP enabled */
+ if (Is_underflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Sgl_setwrapped_exponent(result,dest_exponent,unfl);
+ *dstptr = result;
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(UNDERFLOWEXCEPTION);
+ }
+
+ /* Determine if should set underflow flag */
+ is_tiny = TRUE;
+ if (dest_exponent == 0 && inexact) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(result)) {
+ Sgl_increment(opnd3);
+ if (Sgl_isone_hiddenoverflow(opnd3))
+ is_tiny = FALSE;
+ Sgl_decrement(opnd3);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(result)) {
+ Sgl_increment(opnd3);
+ if (Sgl_isone_hiddenoverflow(opnd3))
+ is_tiny = FALSE;
+ Sgl_decrement(opnd3);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (guardbit && (stickybit ||
+ Sgl_isone_lowmantissa(opnd3))) {
+ Sgl_increment(opnd3);
+ if (Sgl_isone_hiddenoverflow(opnd3))
+ is_tiny = FALSE;
+ Sgl_decrement(opnd3);
+ }
+ break;
+ }
+ }
+
+ /*
+ * denormalize result or set to signed zero
+ */
+ stickybit = inexact;
+ Sgl_denormalize(opnd3,dest_exponent,guardbit,stickybit,inexact);
+
+ /* return zero or smallest number */
+ if (inexact) {
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ if (Sgl_iszero_sign(result)) {
+ Sgl_increment(opnd3);
+ }
+ break;
+ case ROUNDMINUS:
+ if (Sgl_isone_sign(result)) {
+ Sgl_increment(opnd3);
+ }
+ break;
+ case ROUNDNEAREST:
+ if (guardbit && (stickybit ||
+ Sgl_isone_lowmantissa(opnd3))) {
+ Sgl_increment(opnd3);
+ }
+ break;
+ }
+ if (is_tiny) Set_underflowflag();
+ }
+ Sgl_set_exponentmantissa(result,opnd3);
+ }
+ else Sgl_set_exponent(result,dest_exponent);
+ *dstptr = result;
+
+ /* check for inexact */
+ if (inexact) {
+ if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ }
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/sfrem.c b/arch/parisc/math-emu/sfrem.c
new file mode 100644
index 00000000..3a1b7a34
--- /dev/null
+++ b/arch/parisc/math-emu/sfrem.c
@@ -0,0 +1,290 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/sfrem.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Single Precision Floating-point Remainder
+ *
+ * External Interfaces:
+ * sgl_frem(srcptr1,srcptr2,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+
+#include "float.h"
+#include "sgl_float.h"
+
+/*
+ * Single Precision Floating-point Remainder
+ */
+
+int
+sgl_frem (sgl_floating_point * srcptr1, sgl_floating_point * srcptr2,
+ sgl_floating_point * dstptr, unsigned int *status)
+{
+ register unsigned int opnd1, opnd2, result;
+ register int opnd1_exponent, opnd2_exponent, dest_exponent, stepcount;
+ register boolean roundup = FALSE;
+
+ opnd1 = *srcptr1;
+ opnd2 = *srcptr2;
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if ((opnd1_exponent = Sgl_exponent(opnd1)) == SGL_INFINITY_EXPONENT) {
+ if (Sgl_iszero_mantissa(opnd1)) {
+ if (Sgl_isnotnan(opnd2)) {
+ /* invalid since first operand is infinity */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ else {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(opnd1)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd1);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Sgl_is_signalingnan(opnd2)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled())
+ return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd2);
+ *dstptr = opnd2;
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ *dstptr = opnd1;
+ return(NOEXCEPTION);
+ }
+ }
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if ((opnd2_exponent = Sgl_exponent(opnd2)) == SGL_INFINITY_EXPONENT) {
+ if (Sgl_iszero_mantissa(opnd2)) {
+ /*
+ * return first operand
+ */
+ *dstptr = opnd1;
+ return(NOEXCEPTION);
+ }
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(opnd2)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(opnd2);
+ }
+ /*
+ * return quiet NaN
+ */
+ *dstptr = opnd2;
+ return(NOEXCEPTION);
+ }
+ /*
+ * check second operand for zero
+ */
+ if (Sgl_iszero_exponentmantissa(opnd2)) {
+ /* invalid since second operand is zero */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * get sign of result
+ */
+ result = opnd1;
+
+ /*
+ * check for denormalized operands
+ */
+ if (opnd1_exponent == 0) {
+ /* check for zero */
+ if (Sgl_iszero_mantissa(opnd1)) {
+ *dstptr = opnd1;
+ return(NOEXCEPTION);
+ }
+ /* normalize, then continue */
+ opnd1_exponent = 1;
+ Sgl_normalize(opnd1,opnd1_exponent);
+ }
+ else {
+ Sgl_clear_signexponent_set_hidden(opnd1);
+ }
+ if (opnd2_exponent == 0) {
+ /* normalize, then continue */
+ opnd2_exponent = 1;
+ Sgl_normalize(opnd2,opnd2_exponent);
+ }
+ else {
+ Sgl_clear_signexponent_set_hidden(opnd2);
+ }
+
+ /* find result exponent and divide step loop count */
+ dest_exponent = opnd2_exponent - 1;
+ stepcount = opnd1_exponent - opnd2_exponent;
+
+ /*
+ * check for opnd1/opnd2 < 1
+ */
+ if (stepcount < 0) {
+ /*
+ * check for opnd1/opnd2 > 1/2
+ *
+ * In this case n will round to 1, so
+ * r = opnd1 - opnd2
+ */
+ if (stepcount == -1 && Sgl_isgreaterthan(opnd1,opnd2)) {
+ Sgl_all(result) = ~Sgl_all(result); /* set sign */
+ /* align opnd2 with opnd1 */
+ Sgl_leftshiftby1(opnd2);
+ Sgl_subtract(opnd2,opnd1,opnd2);
+ /* now normalize */
+ while (Sgl_iszero_hidden(opnd2)) {
+ Sgl_leftshiftby1(opnd2);
+ dest_exponent--;
+ }
+ Sgl_set_exponentmantissa(result,opnd2);
+ goto testforunderflow;
+ }
+ /*
+ * opnd1/opnd2 <= 1/2
+ *
+ * In this case n will round to zero, so
+ * r = opnd1
+ */
+ Sgl_set_exponentmantissa(result,opnd1);
+ dest_exponent = opnd1_exponent;
+ goto testforunderflow;
+ }
+
+ /*
+ * Generate result
+ *
+ * Do iterative subtract until remainder is less than operand 2.
+ */
+ while (stepcount-- > 0 && Sgl_all(opnd1)) {
+ if (Sgl_isnotlessthan(opnd1,opnd2))
+ Sgl_subtract(opnd1,opnd2,opnd1);
+ Sgl_leftshiftby1(opnd1);
+ }
+ /*
+ * Do last subtract, then determine which way to round if remainder
+ * is exactly 1/2 of opnd2
+ */
+ if (Sgl_isnotlessthan(opnd1,opnd2)) {
+ Sgl_subtract(opnd1,opnd2,opnd1);
+ roundup = TRUE;
+ }
+ if (stepcount > 0 || Sgl_iszero(opnd1)) {
+ /* division is exact, remainder is zero */
+ Sgl_setzero_exponentmantissa(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * Check for cases where opnd1/opnd2 < n
+ *
+ * In this case the result's sign will be opposite that of
+ * opnd1. The mantissa also needs some correction.
+ */
+ Sgl_leftshiftby1(opnd1);
+ if (Sgl_isgreaterthan(opnd1,opnd2)) {
+ Sgl_invert_sign(result);
+ Sgl_subtract((opnd2<<1),opnd1,opnd1);
+ }
+ /* check for remainder being exactly 1/2 of opnd2 */
+ else if (Sgl_isequal(opnd1,opnd2) && roundup) {
+ Sgl_invert_sign(result);
+ }
+
+ /* normalize result's mantissa */
+ while (Sgl_iszero_hidden(opnd1)) {
+ dest_exponent--;
+ Sgl_leftshiftby1(opnd1);
+ }
+ Sgl_set_exponentmantissa(result,opnd1);
+
+ /*
+ * Test for underflow
+ */
+ testforunderflow:
+ if (dest_exponent <= 0) {
+ /* trap if UNDERFLOWTRAP enabled */
+ if (Is_underflowtrap_enabled()) {
+ /*
+ * Adjust bias of result
+ */
+ Sgl_setwrapped_exponent(result,dest_exponent,unfl);
+ *dstptr = result;
+ /* frem is always exact */
+ return(UNDERFLOWEXCEPTION);
+ }
+ /*
+ * denormalize result or set to signed zero
+ */
+ if (dest_exponent >= (1 - SGL_P)) {
+ Sgl_rightshift_exponentmantissa(result,1-dest_exponent);
+ }
+ else {
+ Sgl_setzero_exponentmantissa(result);
+ }
+ }
+ else Sgl_set_exponent(result,dest_exponent);
+ *dstptr = result;
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/sfsqrt.c b/arch/parisc/math-emu/sfsqrt.c
new file mode 100644
index 00000000..4657a12c
--- /dev/null
+++ b/arch/parisc/math-emu/sfsqrt.c
@@ -0,0 +1,187 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/sfsqrt.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Single Floating-point Square Root
+ *
+ * External Interfaces:
+ * sgl_fsqrt(srcptr,nullptr,dstptr,status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+
+/*
+ * Single Floating-point Square Root
+ */
+
+/*ARGSUSED*/
+unsigned int
+sgl_fsqrt(
+ sgl_floating_point *srcptr,
+ unsigned int *nullptr,
+ sgl_floating_point *dstptr,
+ unsigned int *status)
+{
+ register unsigned int src, result;
+ register int src_exponent;
+ register unsigned int newbit, sum;
+ register boolean guardbit = FALSE, even_exponent;
+
+ src = *srcptr;
+ /*
+ * check source operand for NaN or infinity
+ */
+ if ((src_exponent = Sgl_exponent(src)) == SGL_INFINITY_EXPONENT) {
+ /*
+ * is signaling NaN?
+ */
+ if (Sgl_isone_signaling(src)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(src);
+ }
+ /*
+ * Return quiet NaN or positive infinity.
+ * Fall through to negative test if negative infinity.
+ */
+ if (Sgl_iszero_sign(src) || Sgl_isnotzero_mantissa(src)) {
+ *dstptr = src;
+ return(NOEXCEPTION);
+ }
+ }
+
+ /*
+ * check for zero source operand
+ */
+ if (Sgl_iszero_exponentmantissa(src)) {
+ *dstptr = src;
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * check for negative source operand
+ */
+ if (Sgl_isone_sign(src)) {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_makequietnan(src);
+ *dstptr = src;
+ return(NOEXCEPTION);
+ }
+
+ /*
+ * Generate result
+ */
+ if (src_exponent > 0) {
+ even_exponent = Sgl_hidden(src);
+ Sgl_clear_signexponent_set_hidden(src);
+ }
+ else {
+ /* normalize operand */
+ Sgl_clear_signexponent(src);
+ src_exponent++;
+ Sgl_normalize(src,src_exponent);
+ even_exponent = src_exponent & 1;
+ }
+ if (even_exponent) {
+ /* exponent is even */
+ /* Add comment here. Explain why odd exponent needs correction */
+ Sgl_leftshiftby1(src);
+ }
+ /*
+ * Add comment here. Explain following algorithm.
+ *
+ * Trust me, it works.
+ *
+ */
+ Sgl_setzero(result);
+ newbit = 1 << SGL_P;
+ while (newbit && Sgl_isnotzero(src)) {
+ Sgl_addition(result,newbit,sum);
+ if(sum <= Sgl_all(src)) {
+ /* update result */
+ Sgl_addition(result,(newbit<<1),result);
+ Sgl_subtract(src,sum,src);
+ }
+ Sgl_rightshiftby1(newbit);
+ Sgl_leftshiftby1(src);
+ }
+ /* correct exponent for pre-shift */
+ if (even_exponent) {
+ Sgl_rightshiftby1(result);
+ }
+
+ /* check for inexact */
+ if (Sgl_isnotzero(src)) {
+ if (!even_exponent && Sgl_islessthan(result,src))
+ Sgl_increment(result);
+ guardbit = Sgl_lowmantissa(result);
+ Sgl_rightshiftby1(result);
+
+ /* now round result */
+ switch (Rounding_mode()) {
+ case ROUNDPLUS:
+ Sgl_increment(result);
+ break;
+ case ROUNDNEAREST:
+ /* stickybit is always true, so guardbit
+ * is enough to determine rounding */
+ if (guardbit) {
+ Sgl_increment(result);
+ }
+ break;
+ }
+ /* increment result exponent by 1 if mantissa overflowed */
+ if (Sgl_isone_hiddenoverflow(result)) src_exponent+=2;
+
+ if (Is_inexacttrap_enabled()) {
+ Sgl_set_exponent(result,
+ ((src_exponent-SGL_BIAS)>>1)+SGL_BIAS);
+ *dstptr = result;
+ return(INEXACTEXCEPTION);
+ }
+ else Set_inexactflag();
+ }
+ else {
+ Sgl_rightshiftby1(result);
+ }
+ Sgl_set_exponent(result,((src_exponent-SGL_BIAS)>>1)+SGL_BIAS);
+ *dstptr = result;
+ return(NOEXCEPTION);
+}
diff --git a/arch/parisc/math-emu/sfsub.c b/arch/parisc/math-emu/sfsub.c
new file mode 100644
index 00000000..5f90d0f3
--- /dev/null
+++ b/arch/parisc/math-emu/sfsub.c
@@ -0,0 +1,521 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ * File:
+ * @(#) pa/spmath/sfsub.c $Revision: 1.1 $
+ *
+ * Purpose:
+ * Single_subtract: subtract two single precision values.
+ *
+ * External Interfaces:
+ * sgl_fsub(leftptr, rightptr, dstptr, status)
+ *
+ * Internal Interfaces:
+ *
+ * Theory:
+ * <<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+
+/*
+ * Single_subtract: subtract two single precision values.
+ */
+int
+sgl_fsub(
+ sgl_floating_point *leftptr,
+ sgl_floating_point *rightptr,
+ sgl_floating_point *dstptr,
+ unsigned int *status)
+ {
+ register unsigned int left, right, result, extent;
+ register unsigned int signless_upper_left, signless_upper_right, save;
+
+ register int result_exponent, right_exponent, diff_exponent;
+ register int sign_save, jumpsize;
+ register boolean inexact = FALSE, underflowtrap;
+
+ /* Create local copies of the numbers */
+ left = *leftptr;
+ right = *rightptr;
+
+ /* A zero "save" helps discover equal operands (for later), *
+ * and is used in swapping operands (if needed). */
+ Sgl_xortointp1(left,right,/*to*/save);
+
+ /*
+ * check first operand for NaN's or infinity
+ */
+ if ((result_exponent = Sgl_exponent(left)) == SGL_INFINITY_EXPONENT)
+ {
+ if (Sgl_iszero_mantissa(left))
+ {
+ if (Sgl_isnotnan(right))
+ {
+ if (Sgl_isinfinity(right) && save==0)
+ {
+ /*
+ * invalid since operands are same signed infinity's
+ */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ Set_invalidflag();
+ Sgl_makequietnan(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ /*
+ * return infinity
+ */
+ *dstptr = left;
+ return(NOEXCEPTION);
+ }
+ }
+ else
+ {
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(left))
+ {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(left);
+ }
+ /*
+ * is second operand a signaling NaN?
+ */
+ else if (Sgl_is_signalingnan(right))
+ {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(right);
+ *dstptr = right;
+ return(NOEXCEPTION);
+ }
+ /*
+ * return quiet NaN
+ */
+ *dstptr = left;
+ return(NOEXCEPTION);
+ }
+ } /* End left NaN or Infinity processing */
+ /*
+ * check second operand for NaN's or infinity
+ */
+ if (Sgl_isinfinity_exponent(right))
+ {
+ if (Sgl_iszero_mantissa(right))
+ {
+ /* return infinity */
+ Sgl_invert_sign(right);
+ *dstptr = right;
+ return(NOEXCEPTION);
+ }
+ /*
+ * is NaN; signaling or quiet?
+ */
+ if (Sgl_isone_signaling(right))
+ {
+ /* trap if INVALIDTRAP enabled */
+ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+ /* make NaN quiet */
+ Set_invalidflag();
+ Sgl_set_quiet(right);
+ }
+ /*
+ * return quiet NaN
+ */
+ *dstptr = right;
+ return(NOEXCEPTION);
+ } /* End right NaN or Infinity processing */
+
+ /* Invariant: Must be dealing with finite numbers */
+
+ /* Compare operands by removing the sign */
+ Sgl_copytoint_exponentmantissa(left,signless_upper_left);
+ Sgl_copytoint_exponentmantissa(right,signless_upper_right);
+
+ /* sign difference selects sub or add operation. */
+ if(Sgl_ismagnitudeless(signless_upper_left,signless_upper_right))
+ {
+ /* Set the left operand to the larger one by XOR swap *
+ * First finish the first word using "save" */
+ Sgl_xorfromintp1(save,right,/*to*/right);
+ Sgl_xorfromintp1(save,left,/*to*/left);
+ result_exponent = Sgl_exponent(left);
+ Sgl_invert_sign(left);
+ }
+ /* Invariant: left is not smaller than right. */
+
+ if((right_exponent = Sgl_exponent(right)) == 0)
+ {
+ /* Denormalized operands. First look for zeroes */
+ if(Sgl_iszero_mantissa(right))
+ {
+ /* right is zero */
+ if(Sgl_iszero_exponentmantissa(left))
+ {
+ /* Both operands are zeros */
+ Sgl_invert_sign(right);
+ if(Is_rounding_mode(ROUNDMINUS))
+ {
+ Sgl_or_signs(left,/*with*/right);
+ }
+ else
+ {
+ Sgl_and_signs(left,/*with*/right);
+ }
+ }
+ else
+ {
+ /* Left is not a zero and must be the result. Trapped
+ * underflows are signaled if left is denormalized. Result
+ * is always exact. */
+ if( (result_exponent == 0) && Is_underflowtrap_enabled() )
+ {
+ /* need to normalize results mantissa */
+ sign_save = Sgl_signextendedsign(left);
+ Sgl_leftshiftby1(left);
+ Sgl_normalize(left,result_exponent);
+ Sgl_set_sign(left,/*using*/sign_save);
+ Sgl_setwrapped_exponent(left,result_exponent,unfl);
+ *dstptr = left;
+ /* inexact = FALSE */
+ return(UNDERFLOWEXCEPTION);
+ }
+ }
+ *dstptr = left;
+ return(NOEXCEPTION);
+ }
+
+ /* Neither are zeroes */
+ Sgl_clear_sign(right); /* Exponent is already cleared */
+ if(result_exponent == 0 )
+ {
+ /* Both operands are denormalized. The result must be exact
+ * and is simply calculated. A sum could become normalized and a
+ * difference could cancel to a true zero. */
+ if( (/*signed*/int) save >= 0 )
+ {
+ Sgl_subtract(left,/*minus*/right,/*into*/result);
+ if(Sgl_iszero_mantissa(result))
+ {
+ if(Is_rounding_mode(ROUNDMINUS))
+ {
+ Sgl_setone_sign(result);
+ }
+ else
+ {
+ Sgl_setzero_sign(result);
+ }
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ else
+ {
+ Sgl_addition(left,right,/*into*/result);
+ if(Sgl_isone_hidden(result))
+ {
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ }
+ if(Is_underflowtrap_enabled())
+ {
+ /* need to normalize result */
+ sign_save = Sgl_signextendedsign(result);
+ Sgl_leftshiftby1(result);
+ Sgl_normalize(result,result_exponent);
+ Sgl_set_sign(result,/*using*/sign_save);
+ Sgl_setwrapped_exponent(result,result_exponent,unfl);
+ *dstptr = result;
+ /* inexact = FALSE */
+ return(UNDERFLOWEXCEPTION);
+ }
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ right_exponent = 1; /* Set exponent to reflect different bias
+ * with denomalized numbers. */
+ }
+ else
+ {
+ Sgl_clear_signexponent_set_hidden(right);
+ }
+ Sgl_clear_exponent_set_hidden(left);
+ diff_exponent = result_exponent - right_exponent;
+
+ /*
+ * Special case alignment of operands that would force alignment
+ * beyond the extent of the extension. A further optimization
+ * could special case this but only reduces the path length for this
+ * infrequent case.
+ */
+ if(diff_exponent > SGL_THRESHOLD)
+ {
+ diff_exponent = SGL_THRESHOLD;
+ }
+
+ /* Align right operand by shifting to right */
+ Sgl_right_align(/*operand*/right,/*shifted by*/diff_exponent,
+ /*and lower to*/extent);
+
+ /* Treat sum and difference of the operands separately. */
+ if( (/*signed*/int) save >= 0 )
+ {
+ /*
+ * Difference of the two operands. Their can be no overflow. A
+ * borrow can occur out of the hidden bit and force a post
+ * normalization phase.
+ */
+ Sgl_subtract_withextension(left,/*minus*/right,/*with*/extent,/*into*/result);
+ if(Sgl_iszero_hidden(result))
+ {
+ /* Handle normalization */
+ /* A straightforward algorithm would now shift the result
+ * and extension left until the hidden bit becomes one. Not
+ * all of the extension bits need participate in the shift.
+ * Only the two most significant bits (round and guard) are
+ * needed. If only a single shift is needed then the guard
+ * bit becomes a significant low order bit and the extension
+ * must participate in the rounding. If more than a single
+ * shift is needed, then all bits to the right of the guard
+ * bit are zeros, and the guard bit may or may not be zero. */
+ sign_save = Sgl_signextendedsign(result);
+ Sgl_leftshiftby1_withextent(result,extent,result);
+
+ /* Need to check for a zero result. The sign and exponent
+ * fields have already been zeroed. The more efficient test
+ * of the full object can be used.
+ */
+ if(Sgl_iszero(result))
+ /* Must have been "x-x" or "x+(-x)". */
+ {
+ if(Is_rounding_mode(ROUNDMINUS)) Sgl_setone_sign(result);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ result_exponent--;
+ /* Look to see if normalization is finished. */
+ if(Sgl_isone_hidden(result))
+ {
+ if(result_exponent==0)
+ {
+ /* Denormalized, exponent should be zero. Left operand *
+ * was normalized, so extent (guard, round) was zero */
+ goto underflow;
+ }
+ else
+ {
+ /* No further normalization is needed. */
+ Sgl_set_sign(result,/*using*/sign_save);
+ Ext_leftshiftby1(extent);
+ goto round;
+ }
+ }
+
+ /* Check for denormalized, exponent should be zero. Left *
+ * operand was normalized, so extent (guard, round) was zero */
+ if(!(underflowtrap = Is_underflowtrap_enabled()) &&
+ result_exponent==0) goto underflow;
+
+ /* Shift extension to complete one bit of normalization and
+ * update exponent. */
+ Ext_leftshiftby1(extent);
+
+ /* Discover first one bit to determine shift amount. Use a
+ * modified binary search. We have already shifted the result
+ * one position right and still not found a one so the remainder
+ * of the extension must be zero and simplifies rounding. */
+ /* Scan bytes */
+ while(Sgl_iszero_hiddenhigh7mantissa(result))
+ {
+ Sgl_leftshiftby8(result);
+ if((result_exponent -= 8) <= 0 && !underflowtrap)
+ goto underflow;
+ }
+ /* Now narrow it down to the nibble */
+ if(Sgl_iszero_hiddenhigh3mantissa(result))
+ {
+ /* The lower nibble contains the normalizing one */
+ Sgl_leftshiftby4(result);
+ if((result_exponent -= 4) <= 0 && !underflowtrap)
+ goto underflow;
+ }
+ /* Select case were first bit is set (already normalized)
+ * otherwise select the proper shift. */
+ if((jumpsize = Sgl_hiddenhigh3mantissa(result)) > 7)
+ {
+ /* Already normalized */
+ if(result_exponent <= 0) goto underflow;
+ Sgl_set_sign(result,/*using*/sign_save);
+ Sgl_set_exponent(result,/*using*/result_exponent);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ }
+ Sgl_sethigh4bits(result,/*using*/sign_save);
+ switch(jumpsize)
+ {
+ case 1:
+ {
+ Sgl_leftshiftby3(result);
+ result_exponent -= 3;
+ break;
+ }
+ case 2:
+ case 3:
+ {
+ Sgl_leftshiftby2(result);
+ result_exponent -= 2;
+ break;
+ }
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ {
+ Sgl_leftshiftby1(result);
+ result_exponent -= 1;
+ break;
+ }
+ }
+ if(result_exponent > 0)
+ {
+ Sgl_set_exponent(result,/*using*/result_exponent);
+ *dstptr = result; /* Sign bit is already set */
+ return(NOEXCEPTION);
+ }
+ /* Fixup potential underflows */
+ underflow:
+ if(Is_underflowtrap_enabled())
+ {
+ Sgl_set_sign(result,sign_save);
+ Sgl_setwrapped_exponent(result,result_exponent,unfl);
+ *dstptr = result;
+ /* inexact = FALSE */
+ return(UNDERFLOWEXCEPTION);
+ }
+ /*
+ * Since we cannot get an inexact denormalized result,
+ * we can now return.
+ */
+ Sgl_right_align(result,/*by*/(1-result_exponent),extent);
+ Sgl_clear_signexponent(result);
+ Sgl_set_sign(result,sign_save);
+ *dstptr = result;
+ return(NOEXCEPTION);
+ } /* end if(hidden...)... */
+ /* Fall through and round */
+ } /* end if(save >= 0)... */
+ else
+ {
+ /* Add magnitudes */
+ Sgl_addition(left,right,/*to*/result);
+ if(Sgl_isone_hiddenoverflow(result))
+ {
+ /* Prenormalization required. */
+ Sgl_rightshiftby1_withextent(result,extent,extent);
+ Sgl_arithrightshiftby1(result);
+ result_exponent++;
+ } /* end if hiddenoverflow... */
+ } /* end else ...sub magnitudes... */
+
+ /* Round the result. If the extension is all zeros,then the result is
+ * exact. Otherwise round in the correct direction. No underflow is
+ * possible. If a postnormalization is necessary, then the mantissa is
+ * all zeros so no shift is needed. */
+ round:
+ if(Ext_isnotzero(extent))
+ {
+ inexact = TRUE;
+ switch(Rounding_mode())
+ {
+ case ROUNDNEAREST: /* The default. */
+ if(Ext_isone_sign(extent))
+ {
+ /* at least 1/2 ulp */
+ if(Ext_isnotzero_lower(extent) ||
+ Sgl_isone_lowmantissa(result))
+ {
+ /* either exactly half way and odd or more than 1/2ulp */
+ Sgl_increment(result);
+ }
+ }
+ break;
+
+ case ROUNDPLUS:
+ if(Sgl_iszero_sign(result))
+ {
+ /* Round up positive results */
+ Sgl_increment(result);
+ }
+ break;
+
+ case ROUNDMINUS:
+ if(Sgl_isone_sign(result))
+ {
+ /* Round down negative results */
+ Sgl_increment(result);
+ }
+
+ case ROUNDZERO:;
+ /* truncate is simple */
+ } /* end switch... */
+ if(Sgl_isone_hiddenoverflow(result)) result_exponent++;
+ }
+ if(result_exponent == SGL_INFINITY_EXPONENT)
+ {
+ /* Overflow */
+ if(Is_overflowtrap_enabled())
+ {
+ Sgl_setwrapped_exponent(result,result_exponent,ovfl);
+ *dstptr = result;
+ if (inexact)
+ if (Is_inexacttrap_enabled())
+ return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(OVERFLOWEXCEPTION);
+ }
+ else
+ {
+ Set_overflowflag();
+ inexact = TRUE;
+ Sgl_setoverflow(result);
+ }
+ }
+ else Sgl_set_exponent(result,result_exponent);
+ *dstptr = result;
+ if(inexact)
+ if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+ else Set_inexactflag();
+ return(NOEXCEPTION);
+ }
diff --git a/arch/parisc/math-emu/sgl_float.h b/arch/parisc/math-emu/sgl_float.h
new file mode 100644
index 00000000..4ee4cc95
--- /dev/null
+++ b/arch/parisc/math-emu/sgl_float.h
@@ -0,0 +1,486 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifdef __NO_PA_HDRS
+ PA header file -- do not include this header file for non-PA builds.
+#endif
+
+/* 32-bit word grabbing functions */
+#define Sgl_firstword(value) Sall(value)
+#define Sgl_secondword(value) dummy_location
+#define Sgl_thirdword(value) dummy_location
+#define Sgl_fourthword(value) dummy_location
+
+#define Sgl_sign(object) Ssign(object)
+#define Sgl_exponent(object) Sexponent(object)
+#define Sgl_signexponent(object) Ssignexponent(object)
+#define Sgl_mantissa(object) Smantissa(object)
+#define Sgl_exponentmantissa(object) Sexponentmantissa(object)
+#define Sgl_all(object) Sall(object)
+
+/* sgl_and_signs ANDs the sign bits of each argument and puts the result
+ * into the first argument. sgl_or_signs ors those same sign bits */
+#define Sgl_and_signs( src1dst, src2) \
+ Sall(src1dst) = (Sall(src2)|~((unsigned int)1<<31)) & Sall(src1dst)
+#define Sgl_or_signs( src1dst, src2) \
+ Sall(src1dst) = (Sall(src2)&((unsigned int)1<<31)) | Sall(src1dst)
+
+/* The hidden bit is always the low bit of the exponent */
+#define Sgl_clear_exponent_set_hidden(srcdst) Deposit_sexponent(srcdst,1)
+#define Sgl_clear_signexponent_set_hidden(srcdst) \
+ Deposit_ssignexponent(srcdst,1)
+#define Sgl_clear_sign(srcdst) Sall(srcdst) &= ~((unsigned int)1<<31)
+#define Sgl_clear_signexponent(srcdst) Sall(srcdst) &= 0x007fffff
+
+/* varamount must be less than 32 for the next three functions */
+#define Sgl_rightshift(srcdst, varamount) \
+ Sall(srcdst) >>= varamount
+#define Sgl_leftshift(srcdst, varamount) \
+ Sall(srcdst) <<= varamount
+#define Sgl_rightshift_exponentmantissa(srcdst, varamount) \
+ Sall(srcdst) = \
+ (Sexponentmantissa(srcdst) >> varamount) | \
+ (Sall(srcdst) & ((unsigned int)1<<31))
+
+#define Sgl_leftshiftby1_withextent(left,right,result) \
+ Shiftdouble(Sall(left),Extall(right),31,Sall(result))
+
+#define Sgl_rightshiftby1_withextent(left,right,dst) \
+ Shiftdouble(Sall(left),Extall(right),1,Extall(right))
+#define Sgl_arithrightshiftby1(srcdst) \
+ Sall(srcdst) = (int)Sall(srcdst) >> 1
+
+/* Sign extend the sign bit with an integer destination */
+#define Sgl_signextendedsign(value) Ssignedsign(value)
+
+#define Sgl_isone_hidden(sgl_value) (Shidden(sgl_value))
+#define Sgl_increment(sgl_value) Sall(sgl_value) += 1
+#define Sgl_increment_mantissa(sgl_value) \
+ Deposit_smantissa(sgl_value,sgl_value+1)
+#define Sgl_decrement(sgl_value) Sall(sgl_value) -= 1
+
+#define Sgl_isone_sign(sgl_value) (Is_ssign(sgl_value)!=0)
+#define Sgl_isone_hiddenoverflow(sgl_value) \
+ (Is_shiddenoverflow(sgl_value)!=0)
+#define Sgl_isone_lowmantissa(sgl_value) (Is_slow(sgl_value)!=0)
+#define Sgl_isone_signaling(sgl_value) (Is_ssignaling(sgl_value)!=0)
+#define Sgl_is_signalingnan(sgl_value) (Ssignalingnan(sgl_value)==0x1ff)
+#define Sgl_isnotzero(sgl_value) (Sall(sgl_value)!=0)
+#define Sgl_isnotzero_hiddenhigh7mantissa(sgl_value) \
+ (Shiddenhigh7mantissa(sgl_value)!=0)
+#define Sgl_isnotzero_low4(sgl_value) (Slow4(sgl_value)!=0)
+#define Sgl_isnotzero_exponent(sgl_value) (Sexponent(sgl_value)!=0)
+#define Sgl_isnotzero_mantissa(sgl_value) (Smantissa(sgl_value)!=0)
+#define Sgl_isnotzero_exponentmantissa(sgl_value) \
+ (Sexponentmantissa(sgl_value)!=0)
+#define Sgl_iszero(sgl_value) (Sall(sgl_value)==0)
+#define Sgl_iszero_signaling(sgl_value) (Is_ssignaling(sgl_value)==0)
+#define Sgl_iszero_hidden(sgl_value) (Is_shidden(sgl_value)==0)
+#define Sgl_iszero_hiddenoverflow(sgl_value) \
+ (Is_shiddenoverflow(sgl_value)==0)
+#define Sgl_iszero_hiddenhigh3mantissa(sgl_value) \
+ (Shiddenhigh3mantissa(sgl_value)==0)
+#define Sgl_iszero_hiddenhigh7mantissa(sgl_value) \
+ (Shiddenhigh7mantissa(sgl_value)==0)
+#define Sgl_iszero_sign(sgl_value) (Is_ssign(sgl_value)==0)
+#define Sgl_iszero_exponent(sgl_value) (Sexponent(sgl_value)==0)
+#define Sgl_iszero_mantissa(sgl_value) (Smantissa(sgl_value)==0)
+#define Sgl_iszero_exponentmantissa(sgl_value) \
+ (Sexponentmantissa(sgl_value)==0)
+#define Sgl_isinfinity_exponent(sgl_value) \
+ (Sgl_exponent(sgl_value)==SGL_INFINITY_EXPONENT)
+#define Sgl_isnotinfinity_exponent(sgl_value) \
+ (Sgl_exponent(sgl_value)!=SGL_INFINITY_EXPONENT)
+#define Sgl_isinfinity(sgl_value) \
+ (Sgl_exponent(sgl_value)==SGL_INFINITY_EXPONENT && \
+ Sgl_mantissa(sgl_value)==0)
+#define Sgl_isnan(sgl_value) \
+ (Sgl_exponent(sgl_value)==SGL_INFINITY_EXPONENT && \
+ Sgl_mantissa(sgl_value)!=0)
+#define Sgl_isnotnan(sgl_value) \
+ (Sgl_exponent(sgl_value)!=SGL_INFINITY_EXPONENT || \
+ Sgl_mantissa(sgl_value)==0)
+#define Sgl_islessthan(sgl_op1,sgl_op2) \
+ (Sall(sgl_op1) < Sall(sgl_op2))
+#define Sgl_isgreaterthan(sgl_op1,sgl_op2) \
+ (Sall(sgl_op1) > Sall(sgl_op2))
+#define Sgl_isnotlessthan(sgl_op1,sgl_op2) \
+ (Sall(sgl_op1) >= Sall(sgl_op2))
+#define Sgl_isequal(sgl_op1,sgl_op2) \
+ (Sall(sgl_op1) == Sall(sgl_op2))
+
+#define Sgl_leftshiftby8(sgl_value) \
+ Sall(sgl_value) <<= 8
+#define Sgl_leftshiftby4(sgl_value) \
+ Sall(sgl_value) <<= 4
+#define Sgl_leftshiftby3(sgl_value) \
+ Sall(sgl_value) <<= 3
+#define Sgl_leftshiftby2(sgl_value) \
+ Sall(sgl_value) <<= 2
+#define Sgl_leftshiftby1(sgl_value) \
+ Sall(sgl_value) <<= 1
+#define Sgl_rightshiftby1(sgl_value) \
+ Sall(sgl_value) >>= 1
+#define Sgl_rightshiftby4(sgl_value) \
+ Sall(sgl_value) >>= 4
+#define Sgl_rightshiftby8(sgl_value) \
+ Sall(sgl_value) >>= 8
+
+#define Sgl_ismagnitudeless(signlessleft,signlessright) \
+/* unsigned int signlessleft, signlessright; */ \
+ (signlessleft < signlessright)
+
+
+#define Sgl_copytoint_exponentmantissa(source,dest) \
+ dest = Sexponentmantissa(source)
+
+/* A quiet NaN has the high mantissa bit clear and at least on other (in this
+ * case the adjacent bit) bit set. */
+#define Sgl_set_quiet(sgl_value) Deposit_shigh2mantissa(sgl_value,1)
+#define Sgl_set_exponent(sgl_value,exp) Deposit_sexponent(sgl_value,exp)
+
+#define Sgl_set_mantissa(dest,value) Deposit_smantissa(dest,value)
+#define Sgl_set_exponentmantissa(dest,value) \
+ Deposit_sexponentmantissa(dest,value)
+
+/* An infinity is represented with the max exponent and a zero mantissa */
+#define Sgl_setinfinity_exponent(sgl_value) \
+ Deposit_sexponent(sgl_value,SGL_INFINITY_EXPONENT)
+#define Sgl_setinfinity_exponentmantissa(sgl_value) \
+ Deposit_sexponentmantissa(sgl_value, \
+ (SGL_INFINITY_EXPONENT << (32-(1+SGL_EXP_LENGTH))))
+#define Sgl_setinfinitypositive(sgl_value) \
+ Sall(sgl_value) = (SGL_INFINITY_EXPONENT << (32-(1+SGL_EXP_LENGTH)))
+#define Sgl_setinfinitynegative(sgl_value) \
+ Sall(sgl_value) = (SGL_INFINITY_EXPONENT << (32-(1+SGL_EXP_LENGTH))) \
+ | ((unsigned int)1<<31)
+#define Sgl_setinfinity(sgl_value,sign) \
+ Sall(sgl_value) = (SGL_INFINITY_EXPONENT << (32-(1+SGL_EXP_LENGTH))) | \
+ ((unsigned int)sign << 31)
+#define Sgl_sethigh4bits(sgl_value, extsign) \
+ Deposit_shigh4(sgl_value,extsign)
+#define Sgl_set_sign(sgl_value,sign) Deposit_ssign(sgl_value,sign)
+#define Sgl_invert_sign(sgl_value) \
+ Deposit_ssign(sgl_value,~Ssign(sgl_value))
+#define Sgl_setone_sign(sgl_value) Deposit_ssign(sgl_value,1)
+#define Sgl_setone_lowmantissa(sgl_value) Deposit_slow(sgl_value,1)
+#define Sgl_setzero_sign(sgl_value) Sall(sgl_value) &= 0x7fffffff
+#define Sgl_setzero_exponent(sgl_value) Sall(sgl_value) &= 0x807fffff
+#define Sgl_setzero_mantissa(sgl_value) Sall(sgl_value) &= 0xff800000
+#define Sgl_setzero_exponentmantissa(sgl_value) Sall(sgl_value) &= 0x80000000
+#define Sgl_setzero(sgl_value) Sall(sgl_value) = 0
+#define Sgl_setnegativezero(sgl_value) Sall(sgl_value) = (unsigned int)1 << 31
+
+/* Use following macro for both overflow & underflow conditions */
+#define ovfl -
+#define unfl +
+#define Sgl_setwrapped_exponent(sgl_value,exponent,op) \
+ Deposit_sexponent(sgl_value,(exponent op SGL_WRAP))
+
+#define Sgl_setlargestpositive(sgl_value) \
+ Sall(sgl_value) = ((SGL_EMAX+SGL_BIAS) << (32-(1+SGL_EXP_LENGTH))) \
+ | ((1<<(32-(1+SGL_EXP_LENGTH))) - 1 )
+#define Sgl_setlargestnegative(sgl_value) \
+ Sall(sgl_value) = ((SGL_EMAX+SGL_BIAS) << (32-(1+SGL_EXP_LENGTH))) \
+ | ((1<<(32-(1+SGL_EXP_LENGTH))) - 1 ) \
+ | ((unsigned int)1<<31)
+
+#define Sgl_setnegativeinfinity(sgl_value) \
+ Sall(sgl_value) = \
+ ((1<<SGL_EXP_LENGTH) | SGL_INFINITY_EXPONENT) << (32-(1+SGL_EXP_LENGTH))
+#define Sgl_setlargest(sgl_value,sign) \
+ Sall(sgl_value) = (unsigned int)sign << 31 | \
+ (((SGL_EMAX+SGL_BIAS) << (32-(1+SGL_EXP_LENGTH))) \
+ | ((1 << (32-(1+SGL_EXP_LENGTH))) - 1 ))
+#define Sgl_setlargest_exponentmantissa(sgl_value) \
+ Sall(sgl_value) = Sall(sgl_value) & ((unsigned int)1<<31) | \
+ (((SGL_EMAX+SGL_BIAS) << (32-(1+SGL_EXP_LENGTH))) \
+ | ((1 << (32-(1+SGL_EXP_LENGTH))) - 1 ))
+
+/* The high bit is always zero so arithmetic or logical shifts will work. */
+#define Sgl_right_align(srcdst,shift,extent) \
+ /* sgl_floating_point srcdst; int shift; extension extent */ \
+ if (shift < 32) { \
+ Extall(extent) = Sall(srcdst) << (32-(shift)); \
+ Sall(srcdst) >>= shift; \
+ } \
+ else { \
+ Extall(extent) = Sall(srcdst); \
+ Sall(srcdst) = 0; \
+ }
+#define Sgl_hiddenhigh3mantissa(sgl_value) Shiddenhigh3mantissa(sgl_value)
+#define Sgl_hidden(sgl_value) Shidden(sgl_value)
+#define Sgl_lowmantissa(sgl_value) Slow(sgl_value)
+
+/* The left argument is never smaller than the right argument */
+#define Sgl_subtract(sgl_left,sgl_right,sgl_result) \
+ Sall(sgl_result) = Sall(sgl_left) - Sall(sgl_right)
+
+/* Subtract right augmented with extension from left augmented with zeros and
+ * store into result and extension. */
+#define Sgl_subtract_withextension(left,right,extent,result) \
+ /* sgl_floating_point left,right,result; extension extent */ \
+ Sgl_subtract(left,right,result); \
+ if((Extall(extent) = 0-Extall(extent))) \
+ Sall(result) = Sall(result)-1
+
+#define Sgl_addition(sgl_left,sgl_right,sgl_result) \
+ Sall(sgl_result) = Sall(sgl_left) + Sall(sgl_right)
+
+#define Sgl_xortointp1(left,right,result) \
+ result = Sall(left) XOR Sall(right);
+
+#define Sgl_xorfromintp1(left,right,result) \
+ Sall(result) = left XOR Sall(right)
+
+/* Need to Initialize */
+#define Sgl_makequietnan(dest) \
+ Sall(dest) = ((SGL_EMAX+SGL_BIAS)+1)<< (32-(1+SGL_EXP_LENGTH)) \
+ | (1<<(32-(1+SGL_EXP_LENGTH+2)))
+#define Sgl_makesignalingnan(dest) \
+ Sall(dest) = ((SGL_EMAX+SGL_BIAS)+1)<< (32-(1+SGL_EXP_LENGTH)) \
+ | (1<<(32-(1+SGL_EXP_LENGTH+1)))
+
+#define Sgl_normalize(sgl_opnd,exponent) \
+ while(Sgl_iszero_hiddenhigh7mantissa(sgl_opnd)) { \
+ Sgl_leftshiftby8(sgl_opnd); \
+ exponent -= 8; \
+ } \
+ if(Sgl_iszero_hiddenhigh3mantissa(sgl_opnd)) { \
+ Sgl_leftshiftby4(sgl_opnd); \
+ exponent -= 4; \
+ } \
+ while(Sgl_iszero_hidden(sgl_opnd)) { \
+ Sgl_leftshiftby1(sgl_opnd); \
+ exponent -= 1; \
+ }
+
+#define Sgl_setoverflow(sgl_opnd) \
+ /* set result to infinity or largest number */ \
+ switch (Rounding_mode()) { \
+ case ROUNDPLUS: \
+ if (Sgl_isone_sign(sgl_opnd)) { \
+ Sgl_setlargestnegative(sgl_opnd); \
+ } \
+ else { \
+ Sgl_setinfinitypositive(sgl_opnd); \
+ } \
+ break; \
+ case ROUNDMINUS: \
+ if (Sgl_iszero_sign(sgl_opnd)) { \
+ Sgl_setlargestpositive(sgl_opnd); \
+ } \
+ else { \
+ Sgl_setinfinitynegative(sgl_opnd); \
+ } \
+ break; \
+ case ROUNDNEAREST: \
+ Sgl_setinfinity_exponentmantissa(sgl_opnd); \
+ break; \
+ case ROUNDZERO: \
+ Sgl_setlargest_exponentmantissa(sgl_opnd); \
+ }
+
+#define Sgl_denormalize(opnd,exponent,guard,sticky,inexact) \
+ Sgl_clear_signexponent_set_hidden(opnd); \
+ if (exponent >= (1 - SGL_P)) { \
+ guard = (Sall(opnd) >> -exponent) & 1; \
+ if (exponent < 0) sticky |= Sall(opnd) << (32+exponent); \
+ inexact = guard | sticky; \
+ Sall(opnd) >>= (1-exponent); \
+ } \
+ else { \
+ guard = 0; \
+ sticky |= Sall(opnd); \
+ inexact = sticky; \
+ Sgl_setzero(opnd); \
+ }
+
+/*
+ * The fused multiply add instructions requires a single extended format,
+ * with 48 bits of mantissa.
+ */
+#define SGLEXT_THRESHOLD 48
+
+#define Sglext_setzero(valA,valB) \
+ Sextallp1(valA) = 0; Sextallp2(valB) = 0
+
+#define Sglext_isnotzero_mantissap2(valB) (Sextallp2(valB)!=0)
+#define Sglext_isone_lowp1(val) (Sextlowp1(val)!=0)
+#define Sglext_isone_highp2(val) (Sexthighp2(val)!=0)
+#define Sglext_isnotzero_low31p2(val) (Sextlow31p2(val)!=0)
+#define Sglext_iszero(valA,valB) (Sextallp1(valA)==0 && Sextallp2(valB)==0)
+
+#define Sgl_copytoptr(src,destptr) *destptr = src
+#define Sgl_copyfromptr(srcptr,dest) dest = *srcptr
+#define Sglext_copy(srca,srcb,desta,destb) \
+ Sextallp1(desta) = Sextallp1(srca); \
+ Sextallp2(destb) = Sextallp2(srcb)
+#define Sgl_copyto_sglext(src1,dest1,dest2) \
+ Sextallp1(dest1) = Sall(src1); Sextallp2(dest2) = 0
+
+#define Sglext_swap_lower(leftp2,rightp2) \
+ Sextallp2(leftp2) = Sextallp2(leftp2) XOR Sextallp2(rightp2); \
+ Sextallp2(rightp2) = Sextallp2(leftp2) XOR Sextallp2(rightp2); \
+ Sextallp2(leftp2) = Sextallp2(leftp2) XOR Sextallp2(rightp2)
+
+#define Sglext_setone_lowmantissap2(value) Deposit_dlowp2(value,1)
+
+/* The high bit is always zero so arithmetic or logical shifts will work. */
+#define Sglext_right_align(srcdstA,srcdstB,shift) \
+ {int shiftamt, sticky; \
+ shiftamt = shift % 32; \
+ sticky = 0; \
+ switch (shift/32) { \
+ case 0: if (shiftamt > 0) { \
+ sticky = Sextallp2(srcdstB) << 32 - (shiftamt); \
+ Variable_shift_double(Sextallp1(srcdstA), \
+ Sextallp2(srcdstB),shiftamt,Sextallp2(srcdstB)); \
+ Sextallp1(srcdstA) >>= shiftamt; \
+ } \
+ break; \
+ case 1: if (shiftamt > 0) { \
+ sticky = (Sextallp1(srcdstA) << 32 - (shiftamt)) | \
+ Sextallp2(srcdstB); \
+ } \
+ else { \
+ sticky = Sextallp2(srcdstB); \
+ } \
+ Sextallp2(srcdstB) = Sextallp1(srcdstA) >> shiftamt; \
+ Sextallp1(srcdstA) = 0; \
+ break; \
+ } \
+ if (sticky) Sglext_setone_lowmantissap2(srcdstB); \
+ }
+
+/* The left argument is never smaller than the right argument */
+#define Sglext_subtract(lefta,leftb,righta,rightb,resulta,resultb) \
+ if( Sextallp2(rightb) > Sextallp2(leftb) ) Sextallp1(lefta)--; \
+ Sextallp2(resultb) = Sextallp2(leftb) - Sextallp2(rightb); \
+ Sextallp1(resulta) = Sextallp1(lefta) - Sextallp1(righta)
+
+#define Sglext_addition(lefta,leftb,righta,rightb,resulta,resultb) \
+ /* If the sum of the low words is less than either source, then \
+ * an overflow into the next word occurred. */ \
+ if ((Sextallp2(resultb) = Sextallp2(leftb)+Sextallp2(rightb)) < \
+ Sextallp2(rightb)) \
+ Sextallp1(resulta) = Sextallp1(lefta)+Sextallp1(righta)+1; \
+ else Sextallp1(resulta) = Sextallp1(lefta)+Sextallp1(righta)
+
+
+#define Sglext_arithrightshiftby1(srcdstA,srcdstB) \
+ Shiftdouble(Sextallp1(srcdstA),Sextallp2(srcdstB),1,Sextallp2(srcdstB)); \
+ Sextallp1(srcdstA) = (int)Sextallp1(srcdstA) >> 1
+
+#define Sglext_leftshiftby8(valA,valB) \
+ Shiftdouble(Sextallp1(valA),Sextallp2(valB),24,Sextallp1(valA)); \
+ Sextallp2(valB) <<= 8
+#define Sglext_leftshiftby4(valA,valB) \
+ Shiftdouble(Sextallp1(valA),Sextallp2(valB),28,Sextallp1(valA)); \
+ Sextallp2(valB) <<= 4
+#define Sglext_leftshiftby3(valA,valB) \
+ Shiftdouble(Sextallp1(valA),Sextallp2(valB),29,Sextallp1(valA)); \
+ Sextallp2(valB) <<= 3
+#define Sglext_leftshiftby2(valA,valB) \
+ Shiftdouble(Sextallp1(valA),Sextallp2(valB),30,Sextallp1(valA)); \
+ Sextallp2(valB) <<= 2
+#define Sglext_leftshiftby1(valA,valB) \
+ Shiftdouble(Sextallp1(valA),Sextallp2(valB),31,Sextallp1(valA)); \
+ Sextallp2(valB) <<= 1
+
+#define Sglext_rightshiftby4(valueA,valueB) \
+ Shiftdouble(Sextallp1(valueA),Sextallp2(valueB),4,Sextallp2(valueB)); \
+ Sextallp1(valueA) >>= 4
+#define Sglext_rightshiftby3(valueA,valueB) \
+ Shiftdouble(Sextallp1(valueA),Sextallp2(valueB),3,Sextallp2(valueB)); \
+ Sextallp1(valueA) >>= 3
+#define Sglext_rightshiftby1(valueA,valueB) \
+ Shiftdouble(Sextallp1(valueA),Sextallp2(valueB),1,Sextallp2(valueB)); \
+ Sextallp1(valueA) >>= 1
+
+#define Sglext_xortointp1(left,right,result) Sgl_xortointp1(left,right,result)
+#define Sglext_xorfromintp1(left,right,result) \
+ Sgl_xorfromintp1(left,right,result)
+#define Sglext_copytoint_exponentmantissa(src,dest) \
+ Sgl_copytoint_exponentmantissa(src,dest)
+#define Sglext_ismagnitudeless(signlessleft,signlessright) \
+ Sgl_ismagnitudeless(signlessleft,signlessright)
+
+#define Sglext_set_sign(dbl_value,sign) Sgl_set_sign(dbl_value,sign)
+#define Sglext_clear_signexponent_set_hidden(srcdst) \
+ Sgl_clear_signexponent_set_hidden(srcdst)
+#define Sglext_clear_signexponent(srcdst) Sgl_clear_signexponent(srcdst)
+#define Sglext_clear_sign(srcdst) Sgl_clear_sign(srcdst)
+#define Sglext_isone_hidden(dbl_value) Sgl_isone_hidden(dbl_value)
+
+#define Sglext_denormalize(opndp1,opndp2,exponent,is_tiny) \
+ {int sticky; \
+ is_tiny = TRUE; \
+ if (exponent == 0 && Sextallp2(opndp2)) { \
+ switch (Rounding_mode()) { \
+ case ROUNDPLUS: \
+ if (Sgl_iszero_sign(opndp1)) \
+ if (Sgl_isone_hiddenoverflow(opndp1 + 1)) \
+ is_tiny = FALSE; \
+ break; \
+ case ROUNDMINUS: \
+ if (Sgl_isone_sign(opndp1)) { \
+ if (Sgl_isone_hiddenoverflow(opndp1 + 1)) \
+ is_tiny = FALSE; \
+ } \
+ break; \
+ case ROUNDNEAREST: \
+ if (Sglext_isone_highp2(opndp2) && \
+ (Sglext_isone_lowp1(opndp1) || \
+ Sglext_isnotzero_low31p2(opndp2))) \
+ if (Sgl_isone_hiddenoverflow(opndp1 + 1)) \
+ is_tiny = FALSE; \
+ break; \
+ } \
+ } \
+ Sglext_clear_signexponent_set_hidden(opndp1); \
+ if (exponent >= (1-DBL_P)) { \
+ if (exponent >= -31) { \
+ if (exponent > -31) { \
+ sticky = Sextallp2(opndp2) << 31+exponent; \
+ Variable_shift_double(opndp1,opndp2,1-exponent,opndp2); \
+ Sextallp1(opndp1) >>= 1-exponent; \
+ } \
+ else { \
+ sticky = Sextallp2(opndp2); \
+ Sextallp2(opndp2) = Sextallp1(opndp1); \
+ Sextallp1(opndp1) = 0; \
+ } \
+ } \
+ else { \
+ sticky = (Sextallp1(opndp1) << 31+exponent) | \
+ Sextallp2(opndp2); \
+ Sextallp2(opndp2) = Sextallp1(opndp1) >> -31-exponent; \
+ Sextallp1(opndp1) = 0; \
+ } \
+ } \
+ else { \
+ sticky = Sextallp1(opndp1) | Sextallp2(opndp2); \
+ Sglext_setzero(opndp1,opndp2); \
+ } \
+ if (sticky) Sglext_setone_lowmantissap2(opndp2); \
+ exponent = 0; \
+ }
diff --git a/arch/parisc/mm/Makefile b/arch/parisc/mm/Makefile
new file mode 100644
index 00000000..758ceefb
--- /dev/null
+++ b/arch/parisc/mm/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for arch/parisc/mm
+#
+
+obj-y := init.o fault.o ioremap.o
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
new file mode 100644
index 00000000..18162ce4
--- /dev/null
+++ b/arch/parisc/mm/fault.c
@@ -0,0 +1,270 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
+ * Copyright 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
+ * Copyright 1999 Hewlett Packard Co.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include <asm/uaccess.h>
+#include <asm/traps.h>
+
+#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
+ /* dumped to the console via printk) */
+
+
+/* Various important other fields */
+#define bit22set(x) (x & 0x00000200)
+#define bits23_25set(x) (x & 0x000001c0)
+#define isGraphicsFlushRead(x) ((x & 0xfc003fdf) == 0x04001a80)
+ /* extended opcode is 0x6a */
+
+#define BITSSET 0x1c0 /* for identifying LDCW */
+
+
+DEFINE_PER_CPU(struct exception_data, exception_data);
+
+/*
+ * parisc_acctyp(unsigned int inst) --
+ * Given a PA-RISC memory access instruction, determine if the
+ * the instruction would perform a memory read or memory write
+ * operation.
+ *
+ * This function assumes that the given instruction is a memory access
+ * instruction (i.e. you should really only call it if you know that
+ * the instruction has generated some sort of a memory access fault).
+ *
+ * Returns:
+ * VM_READ if read operation
+ * VM_WRITE if write operation
+ * VM_EXEC if execute operation
+ */
+static unsigned long
+parisc_acctyp(unsigned long code, unsigned int inst)
+{
+ if (code == 6 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+ case 0x40000000: /* load */
+ case 0x50000000: /* new load */
+ return VM_READ;
+
+ case 0x60000000: /* store */
+ case 0x70000000: /* new store */
+ return VM_WRITE;
+
+ case 0x20000000: /* coproc */
+ case 0x30000000: /* coproc2 */
+ if (bit22set(inst))
+ return VM_WRITE;
+
+ case 0x0: /* indexed/memory management */
+ if (bit22set(inst)) {
+ /*
+ * Check for the 'Graphics Flush Read' instruction.
+ * It resembles an FDC instruction, except for bits
+ * 20 and 21. Any combination other than zero will
+ * utilize the block mover functionality on some
+ * older PA-RISC platforms. The case where a block
+ * move is performed from VM to graphics IO space
+ * should be treated as a READ.
+ *
+ * The significance of bits 20,21 in the FDC
+ * instruction is:
+ *
+ * 00 Flush data cache (normal instruction behavior)
+ * 01 Graphics flush write (IO space -> VM)
+ * 10 Graphics flush read (VM -> IO space)
+ * 11 Graphics flush read/write (VM <-> IO space)
+ */
+ if (isGraphicsFlushRead(inst))
+ return VM_READ;
+ return VM_WRITE;
+ } else {
+ /*
+ * Check for LDCWX and LDCWS (semaphore instructions).
+ * If bits 23 through 25 are all 1's it is one of
+ * the above two instructions and is a write.
+ *
+ * Note: With the limited bits we are looking at,
+ * this will also catch PROBEW and PROBEWI. However,
+ * these should never get in here because they don't
+ * generate exceptions of the type:
+ * Data TLB miss fault/data page fault
+ * Data memory protection trap
+ */
+ if (bits23_25set(inst) == BITSSET)
+ return VM_WRITE;
+ }
+ return VM_READ; /* Default */
+ }
+ return VM_READ; /* Default */
+}
+
+#undef bit22set
+#undef bits23_25set
+#undef isGraphicsFlushRead
+#undef BITSSET
+
+
+#if 0
+/* This is the treewalk to find a vma which is the highest that has
+ * a start < addr. We're using find_vma_prev instead right now, but
+ * we might want to use this at some point in the future. Probably
+ * not, but I want it committed to CVS so I don't lose it :-)
+ */
+ while (tree != vm_avl_empty) {
+ if (tree->vm_start > addr) {
+ tree = tree->vm_avl_left;
+ } else {
+ prev = tree;
+ if (prev->vm_next == NULL)
+ break;
+ if (prev->vm_next->vm_start > addr)
+ break;
+ tree = tree->vm_avl_right;
+ }
+ }
+#endif
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fix;
+
+ fix = search_exception_tables(regs->iaoq[0]);
+ if (fix) {
+ struct exception_data *d;
+ d = &__get_cpu_var(exception_data);
+ d->fault_ip = regs->iaoq[0];
+ d->fault_space = regs->isr;
+ d->fault_addr = regs->ior;
+
+ regs->iaoq[0] = ((fix->fixup) & ~3);
+ /*
+ * NOTE: In some cases the faulting instruction
+ * may be in the delay slot of a branch. We
+ * don't want to take the branch, so we don't
+ * increment iaoq[1], instead we set it to be
+ * iaoq[0]+4, and clear the B bit in the PSW
+ */
+ regs->iaoq[1] = regs->iaoq[0] + 4;
+ regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
+
+ return 1;
+ }
+
+ return 0;
+}
+
+void do_page_fault(struct pt_regs *regs, unsigned long code,
+ unsigned long address)
+{
+ struct vm_area_struct *vma, *prev_vma;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ unsigned long acc_type;
+ int fault;
+
+ if (in_atomic() || !mm)
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma_prev(mm, address, &prev_vma);
+ if (!vma || address < vma->vm_start)
+ goto check_expansion;
+/*
+ * Ok, we have a good vm_area for this memory access. We still need to
+ * check the access permissions.
+ */
+
+good_area:
+
+ acc_type = parisc_acctyp(code,regs->iir);
+
+ if ((vma->vm_flags & acc_type) != acc_type)
+ goto bad_area;
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+ * sure we exit gracefully rather than endlessly redo the
+ * fault.
+ */
+
+ fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ /*
+ * We hit a shared mapping outside of the file, or some
+ * other thing happened to us that made us unable to
+ * handle the page fault gracefully.
+ */
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto bad_area;
+ BUG();
+ }
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ up_read(&mm->mmap_sem);
+ return;
+
+check_expansion:
+ vma = prev_vma;
+ if (vma && (expand_stack(vma, address) == 0))
+ goto good_area;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
+ struct siginfo si;
+
+#ifdef PRINT_USER_FAULTS
+ printk(KERN_DEBUG "\n");
+ printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
+ task_pid_nr(tsk), tsk->comm, code, address);
+ if (vma) {
+ printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
+ vma->vm_start, vma->vm_end);
+ }
+ show_regs(regs);
+#endif
+ /* FIXME: actually we need to get the signo and code correct */
+ si.si_signo = SIGSEGV;
+ si.si_errno = 0;
+ si.si_code = SEGV_MAPERR;
+ si.si_addr = (void __user *) address;
+ force_sig_info(SIGSEGV, &si, current);
+ return;
+ }
+
+no_context:
+
+ if (!user_mode(regs) && fixup_exception(regs)) {
+ return;
+ }
+
+ parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
+
+ out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
new file mode 100644
index 00000000..82f364e2
--- /dev/null
+++ b/arch/parisc/mm/init.c
@@ -0,0 +1,1109 @@
+/*
+ * linux/arch/parisc/mm/init.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright 1999 SuSE GmbH
+ * changed by Philipp Rumpf
+ * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
+ * Copyright 2004 Randolph Chung (tausq@debian.org)
+ * Copyright 2006-2007 Helge Deller (deller@gmx.de)
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
+#include <linux/initrd.h>
+#include <linux/swap.h>
+#include <linux/unistd.h>
+#include <linux/nodemask.h> /* for node_online_map */
+#include <linux/pagemap.h> /* for release_pages and page_cache_release */
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+#include <asm/pdc_chassis.h>
+#include <asm/mmzone.h>
+#include <asm/sections.h>
+
+extern int data_start;
+
+#ifdef CONFIG_DISCONTIGMEM
+struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
+unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
+#endif
+
+static struct resource data_resource = {
+ .name = "Kernel data",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+static struct resource code_resource = {
+ .name = "Kernel code",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+static struct resource pdcdata_resource = {
+ .name = "PDC data (Page Zero)",
+ .start = 0,
+ .end = 0x9ff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
+
+/* The following array is initialized from the firmware specific
+ * information retrieved in kernel/inventory.c.
+ */
+
+physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
+int npmem_ranges __read_mostly;
+
+#ifdef CONFIG_64BIT
+#define MAX_MEM (~0UL)
+#else /* !CONFIG_64BIT */
+#define MAX_MEM (3584U*1024U*1024U)
+#endif /* !CONFIG_64BIT */
+
+static unsigned long mem_limit __read_mostly = MAX_MEM;
+
+static void __init mem_limit_func(void)
+{
+ char *cp, *end;
+ unsigned long limit;
+
+ /* We need this before __setup() functions are called */
+
+ limit = MAX_MEM;
+ for (cp = boot_command_line; *cp; ) {
+ if (memcmp(cp, "mem=", 4) == 0) {
+ cp += 4;
+ limit = memparse(cp, &end);
+ if (end != cp)
+ break;
+ cp = end;
+ } else {
+ while (*cp != ' ' && *cp)
+ ++cp;
+ while (*cp == ' ')
+ ++cp;
+ }
+ }
+
+ if (limit < mem_limit)
+ mem_limit = limit;
+}
+
+#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
+
+static void __init setup_bootmem(void)
+{
+ unsigned long bootmap_size;
+ unsigned long mem_max;
+ unsigned long bootmap_pages;
+ unsigned long bootmap_start_pfn;
+ unsigned long bootmap_pfn;
+#ifndef CONFIG_DISCONTIGMEM
+ physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
+ int npmem_holes;
+#endif
+ int i, sysram_resource_count;
+
+ disable_sr_hashing(); /* Turn off space register hashing */
+
+ /*
+ * Sort the ranges. Since the number of ranges is typically
+ * small, and performance is not an issue here, just do
+ * a simple insertion sort.
+ */
+
+ for (i = 1; i < npmem_ranges; i++) {
+ int j;
+
+ for (j = i; j > 0; j--) {
+ unsigned long tmp;
+
+ if (pmem_ranges[j-1].start_pfn <
+ pmem_ranges[j].start_pfn) {
+
+ break;
+ }
+ tmp = pmem_ranges[j-1].start_pfn;
+ pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
+ pmem_ranges[j].start_pfn = tmp;
+ tmp = pmem_ranges[j-1].pages;
+ pmem_ranges[j-1].pages = pmem_ranges[j].pages;
+ pmem_ranges[j].pages = tmp;
+ }
+ }
+
+#ifndef CONFIG_DISCONTIGMEM
+ /*
+ * Throw out ranges that are too far apart (controlled by
+ * MAX_GAP).
+ */
+
+ for (i = 1; i < npmem_ranges; i++) {
+ if (pmem_ranges[i].start_pfn -
+ (pmem_ranges[i-1].start_pfn +
+ pmem_ranges[i-1].pages) > MAX_GAP) {
+ npmem_ranges = i;
+ printk("Large gap in memory detected (%ld pages). "
+ "Consider turning on CONFIG_DISCONTIGMEM\n",
+ pmem_ranges[i].start_pfn -
+ (pmem_ranges[i-1].start_pfn +
+ pmem_ranges[i-1].pages));
+ break;
+ }
+ }
+#endif
+
+ if (npmem_ranges > 1) {
+
+ /* Print the memory ranges */
+
+ printk(KERN_INFO "Memory Ranges:\n");
+
+ for (i = 0; i < npmem_ranges; i++) {
+ unsigned long start;
+ unsigned long size;
+
+ size = (pmem_ranges[i].pages << PAGE_SHIFT);
+ start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
+ printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
+ i,start, start + (size - 1), size >> 20);
+ }
+ }
+
+ sysram_resource_count = npmem_ranges;
+ for (i = 0; i < sysram_resource_count; i++) {
+ struct resource *res = &sysram_resources[i];
+ res->name = "System RAM";
+ res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
+ res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ request_resource(&iomem_resource, res);
+ }
+
+ /*
+ * For 32 bit kernels we limit the amount of memory we can
+ * support, in order to preserve enough kernel address space
+ * for other purposes. For 64 bit kernels we don't normally
+ * limit the memory, but this mechanism can be used to
+ * artificially limit the amount of memory (and it is written
+ * to work with multiple memory ranges).
+ */
+
+ mem_limit_func(); /* check for "mem=" argument */
+
+ mem_max = 0;
+ num_physpages = 0;
+ for (i = 0; i < npmem_ranges; i++) {
+ unsigned long rsize;
+
+ rsize = pmem_ranges[i].pages << PAGE_SHIFT;
+ if ((mem_max + rsize) > mem_limit) {
+ printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
+ if (mem_max == mem_limit)
+ npmem_ranges = i;
+ else {
+ pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
+ - (mem_max >> PAGE_SHIFT);
+ npmem_ranges = i + 1;
+ mem_max = mem_limit;
+ }
+ num_physpages += pmem_ranges[i].pages;
+ break;
+ }
+ num_physpages += pmem_ranges[i].pages;
+ mem_max += rsize;
+ }
+
+ printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
+
+#ifndef CONFIG_DISCONTIGMEM
+ /* Merge the ranges, keeping track of the holes */
+
+ {
+ unsigned long end_pfn;
+ unsigned long hole_pages;
+
+ npmem_holes = 0;
+ end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
+ for (i = 1; i < npmem_ranges; i++) {
+
+ hole_pages = pmem_ranges[i].start_pfn - end_pfn;
+ if (hole_pages) {
+ pmem_holes[npmem_holes].start_pfn = end_pfn;
+ pmem_holes[npmem_holes++].pages = hole_pages;
+ end_pfn += hole_pages;
+ }
+ end_pfn += pmem_ranges[i].pages;
+ }
+
+ pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
+ npmem_ranges = 1;
+ }
+#endif
+
+ bootmap_pages = 0;
+ for (i = 0; i < npmem_ranges; i++)
+ bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
+
+ bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
+
+#ifdef CONFIG_DISCONTIGMEM
+ for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
+ memset(NODE_DATA(i), 0, sizeof(pg_data_t));
+ NODE_DATA(i)->bdata = &bootmem_node_data[i];
+ }
+ memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
+
+ for (i = 0; i < npmem_ranges; i++) {
+ node_set_state(i, N_NORMAL_MEMORY);
+ node_set_online(i);
+ }
+#endif
+
+ /*
+ * Initialize and free the full range of memory in each range.
+ * Note that the only writing these routines do are to the bootmap,
+ * and we've made sure to locate the bootmap properly so that they
+ * won't be writing over anything important.
+ */
+
+ bootmap_pfn = bootmap_start_pfn;
+ max_pfn = 0;
+ for (i = 0; i < npmem_ranges; i++) {
+ unsigned long start_pfn;
+ unsigned long npages;
+
+ start_pfn = pmem_ranges[i].start_pfn;
+ npages = pmem_ranges[i].pages;
+
+ bootmap_size = init_bootmem_node(NODE_DATA(i),
+ bootmap_pfn,
+ start_pfn,
+ (start_pfn + npages) );
+ free_bootmem_node(NODE_DATA(i),
+ (start_pfn << PAGE_SHIFT),
+ (npages << PAGE_SHIFT) );
+ bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if ((start_pfn + npages) > max_pfn)
+ max_pfn = start_pfn + npages;
+ }
+
+ /* IOMMU is always used to access "high mem" on those boxes
+ * that can support enough mem that a PCI device couldn't
+ * directly DMA to any physical addresses.
+ * ISA DMA support will need to revisit this.
+ */
+ max_low_pfn = max_pfn;
+
+ /* bootmap sizing messed up? */
+ BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages);
+
+ /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
+
+#define PDC_CONSOLE_IO_IODC_SIZE 32768
+
+ reserve_bootmem_node(NODE_DATA(0), 0UL,
+ (unsigned long)(PAGE0->mem_free +
+ PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
+ reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
+ (unsigned long)(_end - _text), BOOTMEM_DEFAULT);
+ reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
+ ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
+ BOOTMEM_DEFAULT);
+
+#ifndef CONFIG_DISCONTIGMEM
+
+ /* reserve the holes */
+
+ for (i = 0; i < npmem_holes; i++) {
+ reserve_bootmem_node(NODE_DATA(0),
+ (pmem_holes[i].start_pfn << PAGE_SHIFT),
+ (pmem_holes[i].pages << PAGE_SHIFT),
+ BOOTMEM_DEFAULT);
+ }
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start) {
+ printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
+ if (__pa(initrd_start) < mem_max) {
+ unsigned long initrd_reserve;
+
+ if (__pa(initrd_end) > mem_max) {
+ initrd_reserve = mem_max - __pa(initrd_start);
+ } else {
+ initrd_reserve = initrd_end - initrd_start;
+ }
+ initrd_below_start_ok = 1;
+ printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
+
+ reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
+ initrd_reserve, BOOTMEM_DEFAULT);
+ }
+ }
+#endif
+
+ data_resource.start = virt_to_phys(&data_start);
+ data_resource.end = virt_to_phys(_end) - 1;
+ code_resource.start = virt_to_phys(_text);
+ code_resource.end = virt_to_phys(&data_start)-1;
+
+ /* We don't know which region the kernel will be in, so try
+ * all of them.
+ */
+ for (i = 0; i < sysram_resource_count; i++) {
+ struct resource *res = &sysram_resources[i];
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
+ }
+ request_resource(&sysram_resources[0], &pdcdata_resource);
+}
+
+static void __init map_pages(unsigned long start_vaddr,
+ unsigned long start_paddr, unsigned long size,
+ pgprot_t pgprot, int force)
+{
+ pgd_t *pg_dir;
+ pmd_t *pmd;
+ pte_t *pg_table;
+ unsigned long end_paddr;
+ unsigned long start_pmd;
+ unsigned long start_pte;
+ unsigned long tmp1;
+ unsigned long tmp2;
+ unsigned long address;
+ unsigned long vaddr;
+ unsigned long ro_start;
+ unsigned long ro_end;
+ unsigned long fv_addr;
+ unsigned long gw_addr;
+ extern const unsigned long fault_vector_20;
+ extern void * const linux_gateway_page;
+
+ ro_start = __pa((unsigned long)_text);
+ ro_end = __pa((unsigned long)&data_start);
+ fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
+ gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
+
+ end_paddr = start_paddr + size;
+
+ pg_dir = pgd_offset_k(start_vaddr);
+
+#if PTRS_PER_PMD == 1
+ start_pmd = 0;
+#else
+ start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
+#endif
+ start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
+
+ address = start_paddr;
+ vaddr = start_vaddr;
+ while (address < end_paddr) {
+#if PTRS_PER_PMD == 1
+ pmd = (pmd_t *)__pa(pg_dir);
+#else
+ pmd = (pmd_t *)pgd_address(*pg_dir);
+
+ /*
+ * pmd is physical at this point
+ */
+
+ if (!pmd) {
+ pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER);
+ pmd = (pmd_t *) __pa(pmd);
+ }
+
+ pgd_populate(NULL, pg_dir, __va(pmd));
+#endif
+ pg_dir++;
+
+ /* now change pmd to kernel virtual addresses */
+
+ pmd = (pmd_t *)__va(pmd) + start_pmd;
+ for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
+
+ /*
+ * pg_table is physical at this point
+ */
+
+ pg_table = (pte_t *)pmd_address(*pmd);
+ if (!pg_table) {
+ pg_table = (pte_t *)
+ alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE);
+ pg_table = (pte_t *) __pa(pg_table);
+ }
+
+ pmd_populate_kernel(NULL, pmd, __va(pg_table));
+
+ /* now change pg_table to kernel virtual addresses */
+
+ pg_table = (pte_t *) __va(pg_table) + start_pte;
+ for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
+ pte_t pte;
+
+ /*
+ * Map the fault vector writable so we can
+ * write the HPMC checksum.
+ */
+ if (force)
+ pte = __mk_pte(address, pgprot);
+ else if (core_kernel_text(vaddr) &&
+ address != fv_addr)
+ pte = __mk_pte(address, PAGE_KERNEL_EXEC);
+ else
+#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+ if (address >= ro_start && address < ro_end
+ && address != fv_addr
+ && address != gw_addr)
+ pte = __mk_pte(address, PAGE_KERNEL_RO);
+ else
+#endif
+ pte = __mk_pte(address, pgprot);
+
+ if (address >= end_paddr) {
+ if (force)
+ break;
+ else
+ pte_val(pte) = 0;
+ }
+
+ set_pte(pg_table, pte);
+
+ address += PAGE_SIZE;
+ vaddr += PAGE_SIZE;
+ }
+ start_pte = 0;
+
+ if (address >= end_paddr)
+ break;
+ }
+ start_pmd = 0;
+ }
+}
+
+void free_initmem(void)
+{
+ unsigned long addr;
+ unsigned long init_begin = (unsigned long)__init_begin;
+ unsigned long init_end = (unsigned long)__init_end;
+
+ /* The init text pages are marked R-X. We have to
+ * flush the icache and mark them RW-
+ *
+ * This is tricky, because map_pages is in the init section.
+ * Do a dummy remap of the data section first (the data
+ * section is already PAGE_KERNEL) to pull in the TLB entries
+ * for map_kernel */
+ map_pages(init_begin, __pa(init_begin), init_end - init_begin,
+ PAGE_KERNEL_RWX, 1);
+ /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
+ * map_pages */
+ map_pages(init_begin, __pa(init_begin), init_end - init_begin,
+ PAGE_KERNEL, 1);
+
+ /* force the kernel to see the new TLB entries */
+ __flush_tlb_range(0, init_begin, init_end);
+ /* Attempt to catch anyone trying to execute code here
+ * by filling the page with BRK insns.
+ */
+ memset((void *)init_begin, 0x00, init_end - init_begin);
+ /* finally dump all the instructions which were cached, since the
+ * pages are no-longer executable */
+ flush_icache_range(init_begin, init_end);
+
+ for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ free_page(addr);
+ num_physpages++;
+ totalram_pages++;
+ }
+
+ /* set up a new led state on systems shipped LED State panel */
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
+
+ printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
+ (init_end - init_begin) >> 10);
+}
+
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void)
+{
+ /* rodata memory was already mapped with KERNEL_RO access rights by
+ pagetable_init() and map_pages(). No need to do additional stuff here */
+ printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+ (unsigned long)(__end_rodata - __start_rodata) >> 10);
+}
+#endif
+
+
+/*
+ * Just an arbitrary offset to serve as a "hole" between mapping areas
+ * (between top of physical memory and a potential pcxl dma mapping
+ * area, and below the vmalloc mapping area).
+ *
+ * The current 32K value just means that there will be a 32K "hole"
+ * between mapping areas. That means that any out-of-bounds memory
+ * accesses will hopefully be caught. The vmalloc() routines leaves
+ * a hole of 4kB between each vmalloced area for the same reason.
+ */
+
+ /* Leave room for gateway page expansion */
+#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
+#error KERNEL_MAP_START is in gateway reserved region
+#endif
+#define MAP_START (KERNEL_MAP_START)
+
+#define VM_MAP_OFFSET (32*1024)
+#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
+ & ~(VM_MAP_OFFSET-1)))
+
+void *parisc_vmalloc_start __read_mostly;
+EXPORT_SYMBOL(parisc_vmalloc_start);
+
+#ifdef CONFIG_PA11
+unsigned long pcxl_dma_start __read_mostly;
+#endif
+
+void __init mem_init(void)
+{
+ int codesize, reservedpages, datasize, initsize;
+
+ /* Do sanity checks on page table constants */
+ BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
+ BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
+ BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
+ BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
+ > BITS_PER_LONG);
+
+ high_memory = __va((max_pfn << PAGE_SHIFT));
+
+#ifndef CONFIG_DISCONTIGMEM
+ max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
+ totalram_pages += free_all_bootmem();
+#else
+ {
+ int i;
+
+ for (i = 0; i < npmem_ranges; i++)
+ totalram_pages += free_all_bootmem_node(NODE_DATA(i));
+ }
+#endif
+
+ codesize = (unsigned long)_etext - (unsigned long)_text;
+ datasize = (unsigned long)_edata - (unsigned long)_etext;
+ initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
+
+ reservedpages = 0;
+{
+ unsigned long pfn;
+#ifdef CONFIG_DISCONTIGMEM
+ int i;
+
+ for (i = 0; i < npmem_ranges; i++) {
+ for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) {
+ if (PageReserved(pfn_to_page(pfn)))
+ reservedpages++;
+ }
+ }
+#else /* !CONFIG_DISCONTIGMEM */
+ for (pfn = 0; pfn < max_pfn; pfn++) {
+ /*
+ * Only count reserved RAM pages
+ */
+ if (PageReserved(pfn_to_page(pfn)))
+ reservedpages++;
+ }
+#endif
+}
+
+#ifdef CONFIG_PA11
+ if (hppa_dma_ops == &pcxl_dma_ops) {
+ pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
+ parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
+ + PCXL_DMA_MAP_SIZE);
+ } else {
+ pcxl_dma_start = 0;
+ parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
+ }
+#else
+ parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
+#endif
+
+ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
+ nr_free_pages() << (PAGE_SHIFT-10),
+ num_physpages << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10
+ );
+
+#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
+ printk("virtual kernel memory layout:\n"
+ " vmalloc : 0x%p - 0x%p (%4ld MB)\n"
+ " memory : 0x%p - 0x%p (%4ld MB)\n"
+ " .init : 0x%p - 0x%p (%4ld kB)\n"
+ " .data : 0x%p - 0x%p (%4ld kB)\n"
+ " .text : 0x%p - 0x%p (%4ld kB)\n",
+
+ (void*)VMALLOC_START, (void*)VMALLOC_END,
+ (VMALLOC_END - VMALLOC_START) >> 20,
+
+ __va(0), high_memory,
+ ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
+
+ __init_begin, __init_end,
+ ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
+
+ _etext, _edata,
+ ((unsigned long)_edata - (unsigned long)_etext) >> 10,
+
+ _text, _etext,
+ ((unsigned long)_etext - (unsigned long)_text) >> 10);
+#endif
+}
+
+unsigned long *empty_zero_page __read_mostly;
+EXPORT_SYMBOL(empty_zero_page);
+
+void show_mem(unsigned int filter)
+{
+ int i,free = 0,total = 0,reserved = 0;
+ int shared = 0, cached = 0;
+
+ printk(KERN_INFO "Mem-info:\n");
+ show_free_areas(filter);
+#ifndef CONFIG_DISCONTIGMEM
+ i = max_mapnr;
+ while (i-- > 0) {
+ total++;
+ if (PageReserved(mem_map+i))
+ reserved++;
+ else if (PageSwapCache(mem_map+i))
+ cached++;
+ else if (!page_count(&mem_map[i]))
+ free++;
+ else
+ shared += page_count(&mem_map[i]) - 1;
+ }
+#else
+ for (i = 0; i < npmem_ranges; i++) {
+ int j;
+
+ for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
+ struct page *p;
+ unsigned long flags;
+
+ pgdat_resize_lock(NODE_DATA(i), &flags);
+ p = nid_page_nr(i, j) - node_start_pfn(i);
+
+ total++;
+ if (PageReserved(p))
+ reserved++;
+ else if (PageSwapCache(p))
+ cached++;
+ else if (!page_count(p))
+ free++;
+ else
+ shared += page_count(p) - 1;
+ pgdat_resize_unlock(NODE_DATA(i), &flags);
+ }
+ }
+#endif
+ printk(KERN_INFO "%d pages of RAM\n", total);
+ printk(KERN_INFO "%d reserved pages\n", reserved);
+ printk(KERN_INFO "%d pages shared\n", shared);
+ printk(KERN_INFO "%d pages swap cached\n", cached);
+
+
+#ifdef CONFIG_DISCONTIGMEM
+ {
+ struct zonelist *zl;
+ int i, j;
+
+ for (i = 0; i < npmem_ranges; i++) {
+ zl = node_zonelist(i, 0);
+ for (j = 0; j < MAX_NR_ZONES; j++) {
+ struct zoneref *z;
+ struct zone *zone;
+
+ printk("Zone list for zone %d on node %d: ", j, i);
+ for_each_zone_zonelist(zone, z, zl, j)
+ printk("[%d/%s] ", zone_to_nid(zone),
+ zone->name);
+ printk("\n");
+ }
+ }
+ }
+#endif
+}
+
+/*
+ * pagetable_init() sets up the page tables
+ *
+ * Note that gateway_init() places the Linux gateway page at page 0.
+ * Since gateway pages cannot be dereferenced this has the desirable
+ * side effect of trapping those pesky NULL-reference errors in the
+ * kernel.
+ */
+static void __init pagetable_init(void)
+{
+ int range;
+
+ /* Map each physical memory range to its kernel vaddr */
+
+ for (range = 0; range < npmem_ranges; range++) {
+ unsigned long start_paddr;
+ unsigned long end_paddr;
+ unsigned long size;
+
+ start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
+ end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
+ size = pmem_ranges[range].pages << PAGE_SHIFT;
+
+ map_pages((unsigned long)__va(start_paddr), start_paddr,
+ size, PAGE_KERNEL, 0);
+ }
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_end && initrd_end > mem_limit) {
+ printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
+ map_pages(initrd_start, __pa(initrd_start),
+ initrd_end - initrd_start, PAGE_KERNEL, 0);
+ }
+#endif
+
+ empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+ memset(empty_zero_page, 0, PAGE_SIZE);
+}
+
+static void __init gateway_init(void)
+{
+ unsigned long linux_gateway_page_addr;
+ /* FIXME: This is 'const' in order to trick the compiler
+ into not treating it as DP-relative data. */
+ extern void * const linux_gateway_page;
+
+ linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
+
+ /*
+ * Setup Linux Gateway page.
+ *
+ * The Linux gateway page will reside in kernel space (on virtual
+ * page 0), so it doesn't need to be aliased into user space.
+ */
+
+ map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
+ PAGE_SIZE, PAGE_GATEWAY, 1);
+}
+
+#ifdef CONFIG_HPUX
+void
+map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
+{
+ pgd_t *pg_dir;
+ pmd_t *pmd;
+ pte_t *pg_table;
+ unsigned long start_pmd;
+ unsigned long start_pte;
+ unsigned long address;
+ unsigned long hpux_gw_page_addr;
+ /* FIXME: This is 'const' in order to trick the compiler
+ into not treating it as DP-relative data. */
+ extern void * const hpux_gateway_page;
+
+ hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
+
+ /*
+ * Setup HP-UX Gateway page.
+ *
+ * The HP-UX gateway page resides in the user address space,
+ * so it needs to be aliased into each process.
+ */
+
+ pg_dir = pgd_offset(mm,hpux_gw_page_addr);
+
+#if PTRS_PER_PMD == 1
+ start_pmd = 0;
+#else
+ start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
+#endif
+ start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
+
+ address = __pa(&hpux_gateway_page);
+#if PTRS_PER_PMD == 1
+ pmd = (pmd_t *)__pa(pg_dir);
+#else
+ pmd = (pmd_t *) pgd_address(*pg_dir);
+
+ /*
+ * pmd is physical at this point
+ */
+
+ if (!pmd) {
+ pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
+ pmd = (pmd_t *) __pa(pmd);
+ }
+
+ __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);
+#endif
+ /* now change pmd to kernel virtual addresses */
+
+ pmd = (pmd_t *)__va(pmd) + start_pmd;
+
+ /*
+ * pg_table is physical at this point
+ */
+
+ pg_table = (pte_t *) pmd_address(*pmd);
+ if (!pg_table)
+ pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
+
+ __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table);
+
+ /* now change pg_table to kernel virtual addresses */
+
+ pg_table = (pte_t *) __va(pg_table) + start_pte;
+ set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
+}
+EXPORT_SYMBOL(map_hpux_gateway_page);
+#endif
+
+void __init paging_init(void)
+{
+ int i;
+
+ setup_bootmem();
+ pagetable_init();
+ gateway_init();
+ flush_cache_all_local(); /* start with known state */
+ flush_tlb_all_local(NULL);
+
+ for (i = 0; i < npmem_ranges; i++) {
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, };
+
+ zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
+
+#ifdef CONFIG_DISCONTIGMEM
+ /* Need to initialize the pfnnid_map before we can initialize
+ the zone */
+ {
+ int j;
+ for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
+ j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
+ j++) {
+ pfnnid_map[j] = i;
+ }
+ }
+#endif
+
+ free_area_init_node(i, zones_size,
+ pmem_ranges[i].start_pfn, NULL);
+ }
+}
+
+#ifdef CONFIG_PA20
+
+/*
+ * Currently, all PA20 chips have 18 bit protection IDs, which is the
+ * limiting factor (space ids are 32 bits).
+ */
+
+#define NR_SPACE_IDS 262144
+
+#else
+
+/*
+ * Currently we have a one-to-one relationship between space IDs and
+ * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
+ * support 15 bit protection IDs, so that is the limiting factor.
+ * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
+ * probably not worth the effort for a special case here.
+ */
+
+#define NR_SPACE_IDS 32768
+
+#endif /* !CONFIG_PA20 */
+
+#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
+#define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
+
+static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
+static unsigned long dirty_space_id[SID_ARRAY_SIZE];
+static unsigned long space_id_index;
+static unsigned long free_space_ids = NR_SPACE_IDS - 1;
+static unsigned long dirty_space_ids = 0;
+
+static DEFINE_SPINLOCK(sid_lock);
+
+unsigned long alloc_sid(void)
+{
+ unsigned long index;
+
+ spin_lock(&sid_lock);
+
+ if (free_space_ids == 0) {
+ if (dirty_space_ids != 0) {
+ spin_unlock(&sid_lock);
+ flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
+ spin_lock(&sid_lock);
+ }
+ BUG_ON(free_space_ids == 0);
+ }
+
+ free_space_ids--;
+
+ index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
+ space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
+ space_id_index = index;
+
+ spin_unlock(&sid_lock);
+
+ return index << SPACEID_SHIFT;
+}
+
+void free_sid(unsigned long spaceid)
+{
+ unsigned long index = spaceid >> SPACEID_SHIFT;
+ unsigned long *dirty_space_offset;
+
+ dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
+ index &= (BITS_PER_LONG - 1);
+
+ spin_lock(&sid_lock);
+
+ BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
+
+ *dirty_space_offset |= (1L << index);
+ dirty_space_ids++;
+
+ spin_unlock(&sid_lock);
+}
+
+
+#ifdef CONFIG_SMP
+static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
+{
+ int i;
+
+ /* NOTE: sid_lock must be held upon entry */
+
+ *ndirtyptr = dirty_space_ids;
+ if (dirty_space_ids != 0) {
+ for (i = 0; i < SID_ARRAY_SIZE; i++) {
+ dirty_array[i] = dirty_space_id[i];
+ dirty_space_id[i] = 0;
+ }
+ dirty_space_ids = 0;
+ }
+
+ return;
+}
+
+static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
+{
+ int i;
+
+ /* NOTE: sid_lock must be held upon entry */
+
+ if (ndirty != 0) {
+ for (i = 0; i < SID_ARRAY_SIZE; i++) {
+ space_id[i] ^= dirty_array[i];
+ }
+
+ free_space_ids += ndirty;
+ space_id_index = 0;
+ }
+}
+
+#else /* CONFIG_SMP */
+
+static void recycle_sids(void)
+{
+ int i;
+
+ /* NOTE: sid_lock must be held upon entry */
+
+ if (dirty_space_ids != 0) {
+ for (i = 0; i < SID_ARRAY_SIZE; i++) {
+ space_id[i] ^= dirty_space_id[i];
+ dirty_space_id[i] = 0;
+ }
+
+ free_space_ids += dirty_space_ids;
+ dirty_space_ids = 0;
+ space_id_index = 0;
+ }
+}
+#endif
+
+/*
+ * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
+ * purged, we can safely reuse the space ids that were released but
+ * not flushed from the tlb.
+ */
+
+#ifdef CONFIG_SMP
+
+static unsigned long recycle_ndirty;
+static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
+static unsigned int recycle_inuse;
+
+void flush_tlb_all(void)
+{
+ int do_recycle;
+
+ do_recycle = 0;
+ spin_lock(&sid_lock);
+ if (dirty_space_ids > RECYCLE_THRESHOLD) {
+ BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
+ get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
+ recycle_inuse++;
+ do_recycle++;
+ }
+ spin_unlock(&sid_lock);
+ on_each_cpu(flush_tlb_all_local, NULL, 1);
+ if (do_recycle) {
+ spin_lock(&sid_lock);
+ recycle_sids(recycle_ndirty,recycle_dirty_array);
+ recycle_inuse = 0;
+ spin_unlock(&sid_lock);
+ }
+}
+#else
+void flush_tlb_all(void)
+{
+ spin_lock(&sid_lock);
+ flush_tlb_all_local(NULL);
+ recycle_sids();
+ spin_unlock(&sid_lock);
+}
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (start >= end)
+ return;
+ printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ init_page_count(virt_to_page(start));
+ free_page(start);
+ num_physpages++;
+ totalram_pages++;
+ }
+}
+#endif
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
new file mode 100644
index 00000000..838d0259
--- /dev/null
+++ b/arch/parisc/mm/ioremap.c
@@ -0,0 +1,99 @@
+/*
+ * arch/parisc/mm/ioremap.c
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <asm/pgalloc.h>
+
+/*
+ * Generic mapping function (not visible outside):
+ */
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+{
+ void __iomem *addr;
+ struct vm_struct *area;
+ unsigned long offset, last_addr;
+ pgprot_t pgprot;
+
+#ifdef CONFIG_EISA
+ unsigned long end = phys_addr + size - 1;
+ /* Support EISA addresses */
+ if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
+ (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
+ phys_addr |= F_EXTEND(0xfc000000);
+ flags |= _PAGE_NO_CACHE;
+ }
+#endif
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+ if (phys_addr < virt_to_phys(high_memory)) {
+ char *t_addr, *t_end;
+ struct page *page;
+
+ t_addr = __va(phys_addr);
+ t_end = t_addr + (size - 1);
+
+ for (page = virt_to_page(t_addr);
+ page <= virt_to_page(t_end); page++) {
+ if(!PageReserved(page))
+ return NULL;
+ }
+ }
+
+ pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
+ _PAGE_ACCESSED | flags);
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+
+ addr = (void __iomem *) area->addr;
+ if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
+ phys_addr, pgprot)) {
+ vfree(addr);
+ return NULL;
+ }
+
+ return (void __iomem *) (offset + (char __iomem *)addr);
+}
+EXPORT_SYMBOL(__ioremap);
+
+void iounmap(const volatile void __iomem *addr)
+{
+ if (addr > high_memory)
+ return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/parisc/nm b/arch/parisc/nm
new file mode 100644
index 00000000..c788308d
--- /dev/null
+++ b/arch/parisc/nm
@@ -0,0 +1,6 @@
+#!/bin/sh
+##
+# Hack to have an nm which removes the local symbols. We also rely
+# on this nm being hidden out of the ordinarily executable path
+##
+${CROSS_COMPILE}nm $* | grep -v '.LC*[0-9]*$'
diff --git a/arch/parisc/oprofile/Makefile b/arch/parisc/oprofile/Makefile
new file mode 100644
index 00000000..e9feca1c
--- /dev/null
+++ b/arch/parisc/oprofile/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+ oprof.o cpu_buffer.o buffer_sync.o \
+ event_buffer.o oprofile_files.o \
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
+oprofile-y := $(DRIVER_OBJS) init.o
diff --git a/arch/parisc/oprofile/init.c b/arch/parisc/oprofile/init.c
new file mode 100644
index 00000000..026cba2a
--- /dev/null
+++ b/arch/parisc/oprofile/init.c
@@ -0,0 +1,23 @@
+/**
+ * @file init.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/oprofile.h>
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ return -ENODEV;
+}
+
+
+void oprofile_arch_exit(void)
+{
+}