aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorroot <root@artemis.panaceas.org>2015-12-25 04:40:36 +0000
committerroot <root@artemis.panaceas.org>2015-12-25 04:40:36 +0000
commit849369d6c66d3054688672f97d31fceb8e8230fb (patch)
tree6135abc790ca67dedbe07c39806591e70eda81ce /drivers/char
downloadlinux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.tar.gz
linux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.tar.bz2
linux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.zip
initial_commit
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig665
-rw-r--r--drivers/char/Makefile70
-rw-r--r--drivers/char/agp/Kconfig157
-rw-r--r--drivers/char/agp/Makefile22
-rw-r--r--drivers/char/agp/agp.h286
-rw-r--r--drivers/char/agp/ali-agp.c423
-rw-r--r--drivers/char/agp/alpha-agp.c222
-rw-r--r--drivers/char/agp/amd-k7-agp.c568
-rw-r--r--drivers/char/agp/amd64-agp.c822
-rw-r--r--drivers/char/agp/ati-agp.c586
-rw-r--r--drivers/char/agp/backend.c367
-rw-r--r--drivers/char/agp/compat_ioctl.c287
-rw-r--r--drivers/char/agp/compat_ioctl.h106
-rw-r--r--drivers/char/agp/efficeon-agp.c479
-rw-r--r--drivers/char/agp/frontend.c1081
-rw-r--r--drivers/char/agp/generic.c1438
-rw-r--r--drivers/char/agp/hp-agp.c551
-rw-r--r--drivers/char/agp/i460-agp.c659
-rw-r--r--drivers/char/agp/intel-agp.c941
-rw-r--r--drivers/char/agp/intel-agp.h242
-rw-r--r--drivers/char/agp/intel-gtt.c1519
-rw-r--r--drivers/char/agp/isoch.c470
-rw-r--r--drivers/char/agp/nvidia-agp.c475
-rw-r--r--drivers/char/agp/parisc-agp.c424
-rw-r--r--drivers/char/agp/sgi-agp.c341
-rw-r--r--drivers/char/agp/sis-agp.c454
-rw-r--r--drivers/char/agp/sworks-agp.c569
-rw-r--r--drivers/char/agp/uninorth-agp.c722
-rw-r--r--drivers/char/agp/via-agp.c600
-rw-r--r--drivers/char/apm-emulation.c746
-rw-r--r--drivers/char/applicom.c843
-rw-r--r--drivers/char/applicom.h85
-rw-r--r--drivers/char/bfin-otp.c275
-rw-r--r--drivers/char/briq_panel.c266
-rw-r--r--drivers/char/bsr.c356
-rw-r--r--drivers/char/dcc_tty.c326
-rw-r--r--drivers/char/ds1302.c358
-rw-r--r--drivers/char/ds1620.c431
-rw-r--r--drivers/char/dsp56k.c534
-rw-r--r--drivers/char/dtlk.c663
-rw-r--r--drivers/char/efirtc.c412
-rwxr-xr-xdrivers/char/fsl_otp.c261
-rwxr-xr-xdrivers/char/fsl_otp.h369
-rw-r--r--drivers/char/generic_nvram.c170
-rw-r--r--drivers/char/genrtc.c542
-rw-r--r--drivers/char/hangcheck-timer.c211
-rw-r--r--drivers/char/hpet.c1100
-rw-r--r--drivers/char/hw_random/Kconfig224
-rw-r--r--drivers/char/hw_random/Makefile23
-rw-r--r--drivers/char/hw_random/amd-rng.c169
-rw-r--r--drivers/char/hw_random/core.c372
-rw-r--r--drivers/char/hw_random/fsl-rngc.c414
-rw-r--r--drivers/char/hw_random/geode-rng.c139
-rw-r--r--drivers/char/hw_random/intel-rng.c419
-rw-r--r--drivers/char/hw_random/ixp4xx-rng.c72
-rw-r--r--drivers/char/hw_random/mxc-rnga.c247
-rw-r--r--drivers/char/hw_random/n2-asm.S79
-rw-r--r--drivers/char/hw_random/n2-drv.c779
-rw-r--r--drivers/char/hw_random/n2rng.h118
-rw-r--r--drivers/char/hw_random/nomadik-rng.c120
-rw-r--r--drivers/char/hw_random/octeon-rng.c148
-rw-r--r--drivers/char/hw_random/omap-rng.c229
-rw-r--r--drivers/char/hw_random/pasemi-rng.c165
-rw-r--r--drivers/char/hw_random/picoxcell-rng.c208
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c168
-rw-r--r--drivers/char/hw_random/tx4939-rng.c185
-rw-r--r--drivers/char/hw_random/via-rng.c224
-rw-r--r--drivers/char/hw_random/virtio-rng.c139
-rw-r--r--drivers/char/i8k.c768
-rw-r--r--drivers/char/ipmi/Kconfig64
-rw-r--r--drivers/char/ipmi/Makefile11
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c705
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c976
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c550
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4549
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c749
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c3587
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h141
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c600
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c1371
-rw-r--r--drivers/char/lp.c1062
-rw-r--r--drivers/char/mbcs.c853
-rw-r--r--drivers/char/mbcs.h553
-rw-r--r--drivers/char/mem.c956
-rw-r--r--drivers/char/misc.c297
-rw-r--r--drivers/char/mmtimer.c859
-rw-r--r--drivers/char/msm_smd_pkt.c466
-rw-r--r--drivers/char/mspec.c451
-rw-r--r--drivers/char/mwave/3780i.c740
-rw-r--r--drivers/char/mwave/3780i.h358
-rw-r--r--drivers/char/mwave/Makefile15
-rw-r--r--drivers/char/mwave/README47
-rw-r--r--drivers/char/mwave/mwavedd.c697
-rw-r--r--drivers/char/mwave/mwavedd.h152
-rw-r--r--drivers/char/mwave/mwavepub.h89
-rw-r--r--drivers/char/mwave/smapi.c570
-rw-r--r--drivers/char/mwave/smapi.h80
-rw-r--r--drivers/char/mwave/tp3780i.c579
-rw-r--r--drivers/char/mwave/tp3780i.h103
-rw-r--r--drivers/char/mxc_iim.c644
-rw-r--r--drivers/char/mxc_iim.h67
-rw-r--r--drivers/char/mxs_viim.c175
-rw-r--r--drivers/char/nsc_gpio.c139
-rw-r--r--drivers/char/nvram.c723
-rw-r--r--drivers/char/nwbutton.c244
-rw-r--r--drivers/char/nwbutton.h40
-rw-r--r--drivers/char/nwflash.c689
-rw-r--r--drivers/char/pc8736x_gpio.c353
-rw-r--r--drivers/char/pcmcia/Kconfig56
-rw-r--r--drivers/char/pcmcia/Makefile9
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c1924
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c687
-rw-r--r--drivers/char/pcmcia/cm4040_cs.h47
-rw-r--r--drivers/char/pcmcia/synclink_cs.c4311
-rw-r--r--drivers/char/ppdev.c814
-rw-r--r--drivers/char/ps3flash.c472
-rw-r--r--drivers/char/ramoops.c195
-rw-r--r--drivers/char/random.c1349
-rw-r--r--drivers/char/raw.c374
-rwxr-xr-xdrivers/char/regs-ocotp-v2.h239
-rw-r--r--drivers/char/regs-ocotp-v3.h367
-rw-r--r--drivers/char/rtc.c1430
-rw-r--r--drivers/char/scc.h613
-rw-r--r--drivers/char/scx200_gpio.c132
-rw-r--r--drivers/char/snsc.c466
-rw-r--r--drivers/char/snsc.h92
-rw-r--r--drivers/char/snsc_event.c304
-rw-r--r--drivers/char/sonypi.c1569
-rw-r--r--drivers/char/tb0219.c372
-rw-r--r--drivers/char/tlclk.c935
-rw-r--r--drivers/char/toshiba.c546
-rw-r--r--drivers/char/tpm/Kconfig63
-rw-r--r--drivers/char/tpm/Makefile11
-rw-r--r--drivers/char/tpm/tpm.c1278
-rw-r--r--drivers/char/tpm/tpm.h302
-rw-r--r--drivers/char/tpm/tpm_atmel.c252
-rw-r--r--drivers/char/tpm/tpm_atmel.h131
-rw-r--r--drivers/char/tpm/tpm_bios.c556
-rw-r--r--drivers/char/tpm/tpm_infineon.c677
-rw-r--r--drivers/char/tpm/tpm_nsc.c412
-rw-r--r--drivers/char/tpm/tpm_tis.c777
-rw-r--r--drivers/char/ttyprintk.c225
-rw-r--r--drivers/char/uv_mmtimer.c220
-rw-r--r--drivers/char/viotape.c1041
-rw-r--r--drivers/char/virtio_console.c1842
-rw-r--r--drivers/char/xilinx_hwicap/Makefile7
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.c365
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.h54
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.c393
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.h59
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c866
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.h213
152 files changed, 81254 insertions, 0 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
new file mode 100644
index 00000000..00061c2e
--- /dev/null
+++ b/drivers/char/Kconfig
@@ -0,0 +1,665 @@
+#
+# Character device configuration
+#
+
+menu "Character devices"
+
+source "drivers/tty/Kconfig"
+
+config DEVMEM
+ bool "Memory device driver"
+ default y
+ help
+ The memory driver provides two character devices, mem and kmem, which
+ provide access to the system's memory. The mem device is a view of
+ physical memory, and each byte in the device corresponds to the
+ matching physical address. The kmem device is the same as mem, but
+ the addresses correspond to the kernel's virtual address space rather
+ than physical memory. These devices are standard parts of a Linux
+ system and most users should say Y here. You might say N if very
+ security conscience or memory is tight.
+
+config DEVKMEM
+ bool "/dev/kmem virtual device support"
+ default y
+ help
+ Say Y here if you want to support the /dev/kmem device. The
+ /dev/kmem device is rarely used, but can be used for certain
+ kind of kernel debugging operations.
+ When in doubt, say "N".
+
+config STALDRV
+ bool "Stallion multiport serial support"
+ depends on SERIAL_NONSTANDARD
+ help
+ Stallion cards give you many serial ports. You would need something
+ like this to connect more than two modems to your Linux box, for
+ instance in order to become a dial-in server. If you say Y here,
+ you will be asked for your specific card model in the next
+ questions. Make sure to read <file:Documentation/serial/stallion.txt>
+ in this case. If you have never heard about all this, it's safe to
+ say N.
+
+config SGI_SNSC
+ bool "SGI Altix system controller communication support"
+ depends on (IA64_SGI_SN2 || IA64_GENERIC)
+ help
+ If you have an SGI Altix and you want to enable system
+ controller communication from user space (you want this!),
+ say Y. Otherwise, say N.
+
+config SGI_TIOCX
+ bool "SGI TIO CX driver support"
+ depends on (IA64_SGI_SN2 || IA64_GENERIC)
+ help
+ If you have an SGI Altix and you have fpga devices attached
+ to your TIO, say Y here, otherwise say N.
+
+config SGI_MBCS
+ tristate "SGI FPGA Core Services driver support"
+ depends on SGI_TIOCX
+ help
+ If you have an SGI Altix with an attached SABrick
+ say Y or M here, otherwise say N.
+
+source "drivers/tty/serial/Kconfig"
+
+config TTY_PRINTK
+ bool "TTY driver to output user messages via printk"
+ depends on EXPERT
+ default n
+ ---help---
+ If you say Y here, the support for writing user messages (i.e.
+ console messages) via printk is available.
+
+ The feature is useful to inline user messages with kernel
+ messages.
+ In order to use this feature, you should output user messages
+ to /dev/ttyprintk or redirect console to this TTY.
+
+ If unsure, say N.
+
+config BRIQ_PANEL
+ tristate 'Total Impact briQ front panel driver'
+ depends on PPC_CHRP
+ ---help---
+ The briQ is a small footprint CHRP computer with a frontpanel VFD, a
+ tristate led and two switches. It is the size of a CDROM drive.
+
+ If you have such one and want anything showing on the VFD then you
+ must answer Y here.
+
+ To compile this driver as a module, choose M here: the
+ module will be called briq_panel.
+
+ It's safe to say N here.
+
+config BFIN_OTP
+ tristate "Blackfin On-Chip OTP Memory Support"
+ depends on BLACKFIN && (BF51x || BF52x || BF54x)
+ default y
+ help
+ If you say Y here, you will get support for a character device
+ interface into the One Time Programmable memory pages that are
+ stored on the Blackfin processor. This will not get you access
+ to the secure memory pages however. You will need to write your
+ own secure code and reader for that.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bfin-otp.
+
+ If unsure, it is safe to say Y.
+
+config BFIN_OTP_WRITE_ENABLE
+ bool "Enable writing support of OTP pages"
+ depends on BFIN_OTP
+ default n
+ help
+ If you say Y here, you will enable support for writing of the
+ OTP pages. This is dangerous by nature as you can only program
+ the pages once, so only enable this option when you actually
+ need it so as to not inadvertently clobber data.
+
+ If unsure, say N.
+
+config FSL_OTP
+ tristate "Freescale On-Chip OTP Memory Support"
+ depends on (ARCH_MX23 || ARCH_MX28 || ARCH_MX50 || ARCH_MX6)
+ default n
+ help
+ If you say Y here, you will get support for a character device
+ interface into the One Time Programmable memory pages that are
+ stored on the iMX23/28/50 processor. This will not get you access
+ to the secure memory pages however. You will need to write your
+ own secure code and reader for that.
+
+ To compile this driver as a module, choose M here: the module
+ will be called fsl_otp.
+
+ If unsure, it is safe to say Y.
+
+config PRINTER
+ tristate "Parallel printer support"
+ depends on PARPORT
+ ---help---
+ If you intend to attach a printer to the parallel port of your Linux
+ box (as opposed to using a serial printer; if the connector at the
+ printer has 9 or 25 holes ["female"], then it's serial), say Y.
+ Also read the Printing-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ It is possible to share one parallel port among several devices
+ (e.g. printer and ZIP drive) and it is safe to compile the
+ corresponding drivers into the kernel.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/parport.txt>. The module will be called lp.
+
+ If you have several parallel ports, you can specify which ports to
+ use with the "lp" kernel command line option. (Try "man bootparam"
+ or see the documentation of your boot loader (lilo or loadlin) about
+ how to pass options to the kernel at boot time.) The syntax of the
+ "lp" command line option can be found in <file:drivers/char/lp.c>.
+
+ If you have more than 8 printers, you need to increase the LP_NO
+ macro in lp.c and the PARPORT_MAX macro in parport.h.
+
+config LP_CONSOLE
+ bool "Support for console on line printer"
+ depends on PRINTER
+ ---help---
+ If you want kernel messages to be printed out as they occur, you
+ can have a console on the printer. This option adds support for
+ doing that; to actually get it to happen you need to pass the
+ option "console=lp0" to the kernel at boot time.
+
+ If the printer is out of paper (or off, or unplugged, or too
+ busy..) the kernel will stall until the printer is ready again.
+ By defining CONSOLE_LP_STRICT to 0 (at your own risk) you
+ can make the kernel continue when this happens,
+ but it'll lose the kernel messages.
+
+ If unsure, say N.
+
+config PPDEV
+ tristate "Support for user-space parallel port device drivers"
+ depends on PARPORT
+ ---help---
+ Saying Y to this adds support for /dev/parport device nodes. This
+ is needed for programs that want portable access to the parallel
+ port, for instance deviceid (which displays Plug-and-Play device
+ IDs).
+
+ This is the parallel port equivalent of SCSI generic support (sg).
+ It is safe to say N to this -- it is not needed for normal printing
+ or parallel port CD-ROM/disk support.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ppdev.
+
+ If unsure, say N.
+
+source "drivers/tty/hvc/Kconfig"
+
+config VIRTIO_CONSOLE
+ tristate "Virtio console"
+ depends on VIRTIO
+ select HVC_DRIVER
+ help
+ Virtio console for use with lguest and other hypervisors.
+
+ Also serves as a general-purpose serial device for data
+ transfer between the guest and host. Character devices at
+ /dev/vportNpn will be created when corresponding ports are
+ found, where N is the device number and n is the port number
+ within that device. If specified by the host, a sysfs
+ attribute called 'name' will be populated with a name for
+ the port which can be used by udev scripts to create a
+ symlink to the device.
+
+config IBM_BSR
+ tristate "IBM POWER Barrier Synchronization Register support"
+ depends on PPC_PSERIES
+ help
+ This devices exposes a hardware mechanism for fast synchronization
+ of threads across a large system which avoids bouncing a cacheline
+ between several cores on a system
+
+source "drivers/char/ipmi/Kconfig"
+
+config DS1620
+ tristate "NetWinder thermometer support"
+ depends on ARCH_NETWINDER
+ help
+ Say Y here to include support for the thermal management hardware
+ found in the NetWinder. This driver allows the user to control the
+ temperature set points and to read the current temperature.
+
+ It is also possible to say M here to build it as a module (ds1620)
+ It is recommended to be used on a NetWinder, but it is not a
+ necessity.
+
+config NWBUTTON
+ tristate "NetWinder Button"
+ depends on ARCH_NETWINDER
+ ---help---
+ If you say Y here and create a character device node /dev/nwbutton
+ with major and minor numbers 10 and 158 ("man mknod"), then every
+ time the orange button is pressed a number of times, the number of
+ times the button was pressed will be written to that device.
+
+ This is most useful for applications, as yet unwritten, which
+ perform actions based on how many times the button is pressed in a
+ row.
+
+ Do not hold the button down for too long, as the driver does not
+ alter the behaviour of the hardware reset circuitry attached to the
+ button; it will still execute a hard reset if the button is held
+ down for longer than approximately five seconds.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nwbutton.
+
+ Most people will answer Y to this question and "Reboot Using Button"
+ below to be able to initiate a system shutdown from the button.
+
+config NWBUTTON_REBOOT
+ bool "Reboot Using Button"
+ depends on NWBUTTON
+ help
+ If you say Y here, then you will be able to initiate a system
+ shutdown and reboot by pressing the orange button a number of times.
+ The number of presses to initiate the shutdown is two by default,
+ but this can be altered by modifying the value of NUM_PRESSES_REBOOT
+ in nwbutton.h and recompiling the driver or, if you compile the
+ driver as a module, you can specify the number of presses at load
+ time with "insmod button reboot_count=<something>".
+
+config NWFLASH
+ tristate "NetWinder flash support"
+ depends on ARCH_NETWINDER
+ ---help---
+ If you say Y here and create a character device /dev/flash with
+ major 10 and minor 160 you can manipulate the flash ROM containing
+ the NetWinder firmware. Be careful as accidentally overwriting the
+ flash contents can render your computer unbootable. On no account
+ allow random users access to this device. :-)
+
+ To compile this driver as a module, choose M here: the
+ module will be called nwflash.
+
+ If you're not sure, say N.
+
+source "drivers/char/hw_random/Kconfig"
+
+config NVRAM
+ tristate "/dev/nvram support"
+ depends on ATARI || X86 || (ARM && RTC_DRV_CMOS) || GENERIC_NVRAM
+ ---help---
+ If you say Y here and create a character special file /dev/nvram
+ with major number 10 and minor number 144 using mknod ("man mknod"),
+ you get read and write access to the extra bytes of non-volatile
+ memory in the real time clock (RTC), which is contained in every PC
+ and most Ataris. The actual number of bytes varies, depending on the
+ nvram in the system, but is usually 114 (128-14 for the RTC).
+
+ This memory is conventionally called "CMOS RAM" on PCs and "NVRAM"
+ on Ataris. /dev/nvram may be used to view settings there, or to
+ change them (with some utility). It could also be used to frequently
+ save a few bits of very important data that may not be lost over
+ power-off and for which writing to disk is too insecure. Note
+ however that most NVRAM space in a PC belongs to the BIOS and you
+ should NEVER idly tamper with it. See Ralf Brown's interrupt list
+ for a guide to the use of CMOS bytes by your BIOS.
+
+ On Atari machines, /dev/nvram is always configured and does not need
+ to be selected.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nvram.
+
+#
+# These legacy RTC drivers just cause too many conflicts with the generic
+# RTC framework ... let's not even try to coexist any more.
+#
+if RTC_LIB=n
+
+config RTC
+ tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)"
+ depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \
+ && !ARM && !SUPERH && !S390 && !AVR32 && !BLACKFIN
+ ---help---
+ If you say Y here and create a character special file /dev/rtc with
+ major number 10 and minor number 135 using mknod ("man mknod"), you
+ will get access to the real time clock (or hardware clock) built
+ into your computer.
+
+ Every PC has such a clock built in. It can be used to generate
+ signals from as low as 1Hz up to 8192Hz, and can also be used
+ as a 24 hour alarm. It reports status information via the file
+ /proc/driver/rtc and its behaviour is set by various ioctls on
+ /dev/rtc.
+
+ If you run Linux on a multiprocessor machine and said Y to
+ "Symmetric Multi Processing" above, you should say Y here to read
+ and set the RTC in an SMP compatible fashion.
+
+ If you think you have a use for such a device (such as periodic data
+ sampling), then say Y here, and read <file:Documentation/rtc.txt>
+ for details.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rtc.
+
+config JS_RTC
+ tristate "Enhanced Real Time Clock Support"
+ depends on SPARC32 && PCI
+ ---help---
+ If you say Y here and create a character special file /dev/rtc with
+ major number 10 and minor number 135 using mknod ("man mknod"), you
+ will get access to the real time clock (or hardware clock) built
+ into your computer.
+
+ Every PC has such a clock built in. It can be used to generate
+ signals from as low as 1Hz up to 8192Hz, and can also be used
+ as a 24 hour alarm. It reports status information via the file
+ /proc/driver/rtc and its behaviour is set by various ioctls on
+ /dev/rtc.
+
+ If you think you have a use for such a device (such as periodic data
+ sampling), then say Y here, and read <file:Documentation/rtc.txt>
+ for details.
+
+ To compile this driver as a module, choose M here: the
+ module will be called js-rtc.
+
+config GEN_RTC
+ tristate "Generic /dev/rtc emulation"
+ depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 && !BLACKFIN
+ ---help---
+ If you say Y here and create a character special file /dev/rtc with
+ major number 10 and minor number 135 using mknod ("man mknod"), you
+ will get access to the real time clock (or hardware clock) built
+ into your computer.
+
+ It reports status information via the file /proc/driver/rtc and its
+ behaviour is set by various ioctls on /dev/rtc. If you enable the
+ "extended RTC operation" below it will also provide an emulation
+ for RTC_UIE which is required by some programs and may improve
+ precision in some cases.
+
+ To compile this driver as a module, choose M here: the
+ module will be called genrtc.
+
+config GEN_RTC_X
+ bool "Extended RTC operation"
+ depends on GEN_RTC
+ help
+ Provides an emulation for RTC_UIE which is required by some programs
+ and may improve precision of the generic RTC support in some cases.
+
+config EFI_RTC
+ bool "EFI Real Time Clock Services"
+ depends on IA64
+
+config DS1302
+ tristate "DS1302 RTC support"
+ depends on M32R && (PLAT_M32700UT || PLAT_OPSPUT)
+ help
+ If you say Y here and create a character special file /dev/rtc with
+ major number 121 and minor number 0 using mknod ("man mknod"), you
+ will get access to the real time clock (or hardware clock) built
+ into your computer.
+
+endif # RTC_LIB
+
+config DTLK
+ tristate "Double Talk PC internal speech card support"
+ depends on ISA
+ help
+ This driver is for the DoubleTalk PC, a speech synthesizer
+ manufactured by RC Systems (<http://www.rcsys.com/>). It is also
+ called the `internal DoubleTalk'.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dtlk.
+
+config XILINX_HWICAP
+ tristate "Xilinx HWICAP Support"
+ depends on XILINX_VIRTEX || MICROBLAZE
+ help
+ This option enables support for Xilinx Internal Configuration
+ Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex
+ FPGA platforms to partially reconfigure the FPGA at runtime.
+
+ If unsure, say N.
+
+config R3964
+ tristate "Siemens R3964 line discipline"
+ ---help---
+ This driver allows synchronous communication with devices using the
+ Siemens R3964 packet protocol. Unless you are dealing with special
+ hardware like PLCs, you are unlikely to need this.
+
+ To compile this driver as a module, choose M here: the
+ module will be called n_r3964.
+
+ If unsure, say N.
+
+config APPLICOM
+ tristate "Applicom intelligent fieldbus card support"
+ depends on PCI
+ ---help---
+ This driver provides the kernel-side support for the intelligent
+ fieldbus cards made by Applicom International. More information
+ about these cards can be found on the WWW at the address
+ <http://www.applicom-int.com/>, or by email from David Woodhouse
+ <dwmw2@infradead.org>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called applicom.
+
+ If unsure, say N.
+
+config SONYPI
+ tristate "Sony Vaio Programmable I/O Control Device support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && X86 && PCI && INPUT && !64BIT
+ ---help---
+ This driver enables access to the Sony Programmable I/O Control
+ Device which can be found in many (all ?) Sony Vaio laptops.
+
+ If you have one of those laptops, read
+ <file:Documentation/laptops/sonypi.txt>, and say Y or M here.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sonypi.
+
+config GPIO_TB0219
+ tristate "TANBAC TB0219 GPIO support"
+ depends on TANBAC_TB022X
+ select GPIO_VR41XX
+
+source "drivers/char/pcmcia/Kconfig"
+
+config MWAVE
+ tristate "ACP Modem (Mwave) support"
+ depends on X86
+ select SERIAL_8250
+ ---help---
+ The ACP modem (Mwave) for Linux is a WinModem. It is composed of a
+ kernel driver and a user level application. Together these components
+ support direct attachment to public switched telephone networks (PSTNs)
+ and support selected world wide countries.
+
+ This version of the ACP Modem driver supports the IBM Thinkpad 600E,
+ 600, and 770 that include on board ACP modem hardware.
+
+ The modem also supports the standard communications port interface
+ (ttySx) and is compatible with the Hayes AT Command Set.
+
+ The user level application needed to use this driver can be found at
+ the IBM Linux Technology Center (LTC) web site:
+ <http://www.ibm.com/linux/ltc/>.
+
+ If you own one of the above IBM Thinkpads which has the Mwave chipset
+ in it, say Y.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mwave.
+
+config SCx200_GPIO
+ tristate "NatSemi SCx200 GPIO Support"
+ depends on SCx200
+ select NSC_GPIO
+ help
+ Give userspace access to the GPIO pins on the National
+ Semiconductor SCx200 processors.
+
+ If compiled as a module, it will be called scx200_gpio.
+
+config PC8736x_GPIO
+ tristate "NatSemi PC8736x GPIO Support"
+ depends on X86_32
+ default SCx200_GPIO # mostly N
+ select NSC_GPIO # needed for support routines
+ help
+ Give userspace access to the GPIO pins on the National
+ Semiconductor PC-8736x (x=[03456]) SuperIO chip. The chip
+ has multiple functional units, inc several managed by
+ hwmon/pc87360 driver. Tested with PC-87366
+
+ If compiled as a module, it will be called pc8736x_gpio.
+
+config NSC_GPIO
+ tristate "NatSemi Base GPIO Support"
+ depends on X86_32
+ # selected by SCx200_GPIO and PC8736x_GPIO
+ # what about 2 selectors differing: m != y
+ help
+ Common support used (and needed) by scx200_gpio and
+ pc8736x_gpio drivers. If those drivers are built as
+ modules, this one will be too, named nsc_gpio
+
+config RAW_DRIVER
+ tristate "RAW driver (/dev/raw/rawN)"
+ depends on BLOCK
+ help
+ The raw driver permits block devices to be bound to /dev/raw/rawN.
+ Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
+ See the raw(8) manpage for more details.
+
+ Applications should preferably open the device (eg /dev/hda1)
+ with the O_DIRECT flag.
+
+config MAX_RAW_DEVS
+ int "Maximum number of RAW devices to support (1-65536)"
+ depends on RAW_DRIVER
+ default "256"
+ help
+ The maximum number of RAW devices that are supported.
+ Default is 256. Increase this number in case you need lots of
+ raw devices.
+
+config HPET
+ bool "HPET - High Precision Event Timer" if (X86 || IA64)
+ default n
+ depends on ACPI
+ help
+ If you say Y here, you will have a miscdevice named "/dev/hpet/". Each
+ open selects one of the timers supported by the HPET. The timers are
+ non-periodic and/or periodic.
+
+config HPET_MMAP
+ bool "Allow mmap of HPET"
+ default y
+ depends on HPET
+ help
+ If you say Y here, user applications will be able to mmap
+ the HPET registers.
+
+ In some hardware implementations, the page containing HPET
+ registers may also contain other things that shouldn't be
+ exposed to the user. If this applies to your hardware,
+ say N here.
+
+config HANGCHECK_TIMER
+ tristate "Hangcheck timer"
+ depends on X86 || IA64 || PPC64 || S390
+ help
+ The hangcheck-timer module detects when the system has gone
+ out to lunch past a certain margin. It can reboot the system
+ or merely print a warning.
+
+config MMTIMER
+ tristate "MMTIMER Memory mapped RTC for SGI Altix"
+ depends on IA64_GENERIC || IA64_SGI_SN2
+ default y
+ help
+ The mmtimer device allows direct userspace access to the
+ Altix system timer.
+
+config UV_MMTIMER
+ tristate "UV_MMTIMER Memory mapped RTC for SGI UV"
+ depends on X86_UV
+ default m
+ help
+ The uv_mmtimer device allows direct userspace access to the
+ UV system timer.
+
+source "drivers/char/tpm/Kconfig"
+
+config TELCLOCK
+ tristate "Telecom clock driver for ATCA SBC"
+ depends on EXPERIMENTAL && X86
+ default n
+ help
+ The telecom clock device is specific to the MPCBL0010 and MPCBL0050
+ ATCA computers and allows direct userspace access to the
+ configuration of the telecom clock configuration settings. This
+ device is used for hardware synchronization across the ATCA backplane
+ fabric. Upon loading, the driver exports a sysfs directory,
+ /sys/devices/platform/telco_clock, with a number of files for
+ controlling the behavior of this hardware.
+
+config DEVPORT
+ bool
+ depends on !M68K
+ depends on ISA || PCI
+ default y
+
+config DCC_TTY
+ tristate "DCC tty driver"
+ depends on ARM
+
+source "drivers/s390/char/Kconfig"
+
+config RAMOOPS
+ tristate "Log panic/oops to a RAM buffer"
+ depends on HAS_IOMEM
+ default n
+ help
+ This enables panic and oops messages to be logged to a circular
+ buffer in RAM where it can be read back at some later point.
+
+config MSM_SMD_PKT
+ bool "Enable device interface for some SMD packet ports"
+ default n
+ depends on MSM_SMD
+ help
+ Enables userspace clients to read and write to some packet SMD
+ ports via device interface for MSM chipset.
+
+config MXC_IIM
+ tristate "MXC IIM device driver"
+ depends on (ARCH_MX51 || ARCH_MX53)
+ help
+ Support for access to MXC IIM device, most people should say N here.
+
+config MXS_VIIM
+ tristate "MXS Virtual IIM device driver"
+ depends on (ARCH_MX50 || ARCH_MX6)
+ help
+ Support for access to MXS Virtual IIM device, most people should say N here.
+
+endmenu
+
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
new file mode 100644
index 00000000..fc3e61b1
--- /dev/null
+++ b/drivers/char/Makefile
@@ -0,0 +1,70 @@
+#
+# Makefile for the kernel character device drivers.
+#
+
+obj-y += mem.o random.o
+obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
+obj-y += misc.o
+obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
+obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
+obj-$(CONFIG_RAW_DRIVER) += raw.o
+obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
+obj-$(CONFIG_MSM_SMD_PKT) += msm_smd_pkt.o
+obj-$(CONFIG_MSPEC) += mspec.o
+obj-$(CONFIG_MMTIMER) += mmtimer.o
+obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
+obj-$(CONFIG_VIOTAPE) += viotape.o
+obj-$(CONFIG_IBM_BSR) += bsr.o
+obj-$(CONFIG_SGI_MBCS) += mbcs.o
+obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o
+obj-$(CONFIG_BFIN_OTP) += bfin-otp.o
+obj-$(CONFIG_FSL_OTP) += fsl_otp.o
+
+obj-$(CONFIG_PRINTER) += lp.o
+
+obj-$(CONFIG_APM_EMULATION) += apm-emulation.o
+
+obj-$(CONFIG_DTLK) += dtlk.o
+obj-$(CONFIG_APPLICOM) += applicom.o
+obj-$(CONFIG_SONYPI) += sonypi.o
+obj-$(CONFIG_RTC) += rtc.o
+obj-$(CONFIG_HPET) += hpet.o
+obj-$(CONFIG_GEN_RTC) += genrtc.o
+obj-$(CONFIG_EFI_RTC) += efirtc.o
+obj-$(CONFIG_DS1302) += ds1302.o
+obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
+ifeq ($(CONFIG_GENERIC_NVRAM),y)
+ obj-$(CONFIG_NVRAM) += generic_nvram.o
+else
+ obj-$(CONFIG_NVRAM) += nvram.o
+endif
+obj-$(CONFIG_TOSHIBA) += toshiba.o
+obj-$(CONFIG_I8K) += i8k.o
+obj-$(CONFIG_DS1620) += ds1620.o
+obj-$(CONFIG_HW_RANDOM) += hw_random/
+obj-$(CONFIG_PPDEV) += ppdev.o
+obj-$(CONFIG_NWBUTTON) += nwbutton.o
+obj-$(CONFIG_NWFLASH) += nwflash.o
+obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
+obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
+obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
+obj-$(CONFIG_GPIO_TB0219) += tb0219.o
+obj-$(CONFIG_TELCLOCK) += tlclk.o
+
+obj-$(CONFIG_MWAVE) += mwave/
+obj-$(CONFIG_AGP) += agp/
+obj-$(CONFIG_PCMCIA) += pcmcia/
+obj-$(CONFIG_IPMI_HANDLER) += ipmi/
+
+obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
+obj-$(CONFIG_TCG_TPM) += tpm/
+
+obj-$(CONFIG_DCC_TTY) += dcc_tty.o
+obj-$(CONFIG_PS3_FLASH) += ps3flash.o
+obj-$(CONFIG_RAMOOPS) += ramoops.o
+
+obj-$(CONFIG_MXC_IIM) += mxc_iim.o
+obj-$(CONFIG_MXS_VIIM) += mxs_viim.o
+
+obj-$(CONFIG_JS_RTC) += js-rtc.o
+js-rtc-y = rtc.o
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
new file mode 100644
index 00000000..d8b1b576
--- /dev/null
+++ b/drivers/char/agp/Kconfig
@@ -0,0 +1,157 @@
+menuconfig AGP
+ tristate "/dev/agpgart (AGP Support)"
+ depends on ALPHA || IA64 || PARISC || PPC || X86
+ depends on PCI
+ ---help---
+ AGP (Accelerated Graphics Port) is a bus system mainly used to
+ connect graphics cards to the rest of the system.
+
+ If you have an AGP system and you say Y here, it will be possible to
+ use the AGP features of your 3D rendering video card. This code acts
+ as a sort of "AGP driver" for the motherboard's chipset.
+
+ If you need more texture memory than you can get with the AGP GART
+ (theoretically up to 256 MB, but in practice usually 64 or 128 MB
+ due to kernel allocation issues), you could use PCI accesses
+ and have up to a couple gigs of texture space.
+
+ Note that this is the only means to have X/GLX use
+ write-combining with MTRR support on the AGP bus. Without it, OpenGL
+ direct rendering will be a lot slower but still faster than PIO.
+
+ To compile this driver as a module, choose M here: the
+ module will be called agpgart.
+
+ You should say Y here if you want to use GLX or DRI.
+
+ If unsure, say N.
+
+config AGP_ALI
+ tristate "ALI chipset support"
+ depends on AGP && X86_32
+ ---help---
+ This option gives you AGP support for the GLX component of
+ X on the following ALi chipsets. The supported chipsets
+ include M1541, M1621, M1631, M1632, M1641,M1647,and M1651.
+ For the ALi-chipset question, ALi suggests you refer to
+ <http://www.ali.com.tw/>.
+
+ The M1541 chipset can do AGP 1x and 2x, but note that there is an
+ acknowledged incompatibility with Matrox G200 cards. Due to
+ timing issues, this chipset cannot do AGP 2x with the G200.
+ This is a hardware limitation. AGP 1x seems to be fine, though.
+
+config AGP_ATI
+ tristate "ATI chipset support"
+ depends on AGP && X86_32
+ ---help---
+ This option gives you AGP support for the GLX component of
+ X on the ATI RadeonIGP family of chipsets.
+
+config AGP_AMD
+ tristate "AMD Irongate, 761, and 762 chipset support"
+ depends on AGP && X86_32
+ help
+ This option gives you AGP support for the GLX component of
+ X on AMD Irongate, 761, and 762 chipsets.
+
+config AGP_AMD64
+ tristate "AMD Opteron/Athlon64 on-CPU GART support"
+ depends on AGP && X86 && AMD_NB
+ help
+ This option gives you AGP support for the GLX component of
+ X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
+ You still need an external AGP bridge like the AMD 8151, VIA
+ K8T400M, SiS755. It may also support other AGP bridges when loaded
+ with agp_try_unsupported=1.
+
+config AGP_INTEL
+ tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
+ depends on AGP && X86
+ help
+ This option gives you AGP support for the GLX component of X
+ on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
+ E7205 and E7505 chipsets and full support for the 810, 815, 830M,
+ 845G, 852GM, 855GM, 865G and I915 integrated graphics chipsets.
+
+
+
+config AGP_NVIDIA
+ tristate "NVIDIA nForce/nForce2 chipset support"
+ depends on AGP && X86_32
+ help
+ This option gives you AGP support for the GLX component of
+ X on NVIDIA chipsets including nForce and nForce2
+
+config AGP_SIS
+ tristate "SiS chipset support"
+ depends on AGP && X86
+ help
+ This option gives you AGP support for the GLX component of
+ X on Silicon Integrated Systems [SiS] chipsets.
+
+ Note that 5591/5592 AGP chipsets are NOT supported.
+
+
+config AGP_SWORKS
+ tristate "Serverworks LE/HE chipset support"
+ depends on AGP && X86_32
+ help
+ Say Y here to support the Serverworks AGP card. See
+ <http://www.serverworks.com/> for product descriptions and images.
+
+config AGP_VIA
+ tristate "VIA chipset support"
+ depends on AGP && X86
+ help
+ This option gives you AGP support for the GLX component of
+ X on VIA MVP3/Apollo Pro chipsets.
+
+config AGP_I460
+ tristate "Intel 460GX chipset support"
+ depends on AGP && (IA64_DIG || IA64_GENERIC)
+ help
+ This option gives you AGP GART support for the Intel 460GX chipset
+ for IA64 processors.
+
+config AGP_HP_ZX1
+ tristate "HP ZX1 chipset AGP support"
+ depends on AGP && (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC)
+ help
+ This option gives you AGP GART support for the HP ZX1 chipset
+ for IA64 processors.
+
+config AGP_PARISC
+ tristate "HP Quicksilver AGP support"
+ depends on AGP && PARISC && 64BIT
+ help
+ This option gives you AGP GART support for the HP Quicksilver
+ AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
+ workstation...)
+
+config AGP_ALPHA_CORE
+ tristate "Alpha AGP support"
+ depends on AGP && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL)
+ default AGP
+
+config AGP_UNINORTH
+ tristate "Apple UniNorth & U3 AGP support"
+ depends on AGP && PPC_PMAC
+ help
+ This option gives you AGP support for Apple machines with a
+ UniNorth or U3 (Apple G5) bridge.
+
+config AGP_EFFICEON
+ tristate "Transmeta Efficeon support"
+ depends on AGP && X86_32
+ help
+ This option gives you AGP support for the Transmeta Efficeon
+ series processors with integrated northbridges.
+
+config AGP_SGI_TIOCA
+ tristate "SGI TIO chipset AGP support"
+ depends on AGP && (IA64_SGI_SN2 || IA64_GENERIC)
+ help
+ This option gives you AGP GART support for the SGI TIO chipset
+ for IA64 processors.
+
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
new file mode 100644
index 00000000..8eb56e27
--- /dev/null
+++ b/drivers/char/agp/Makefile
@@ -0,0 +1,22 @@
+agpgart-y := backend.o frontend.o generic.o isoch.o
+
+agpgart-$(CONFIG_COMPAT) += compat_ioctl.o
+
+obj-$(CONFIG_AGP) += agpgart.o
+obj-$(CONFIG_AGP_ALI) += ali-agp.o
+obj-$(CONFIG_AGP_ATI) += ati-agp.o
+obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o
+obj-$(CONFIG_AGP_AMD64) += amd64-agp.o
+obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o
+obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o
+obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
+obj-$(CONFIG_AGP_PARISC) += parisc-agp.o
+obj-$(CONFIG_AGP_I460) += i460-agp.o
+obj-$(CONFIG_AGP_INTEL) += intel-agp.o
+obj-$(CONFIG_AGP_INTEL) += intel-gtt.o
+obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
+obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o
+obj-$(CONFIG_AGP_SIS) += sis-agp.o
+obj-$(CONFIG_AGP_SWORKS) += sworks-agp.o
+obj-$(CONFIG_AGP_UNINORTH) += uninorth-agp.o
+obj-$(CONFIG_AGP_VIA) += via-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
new file mode 100644
index 00000000..923f99df
--- /dev/null
+++ b/drivers/char/agp/agp.h
@@ -0,0 +1,286 @@
+/*
+ * AGPGART
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2004 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AGP_BACKEND_PRIV_H
+#define _AGP_BACKEND_PRIV_H 1
+
+#include <asm/agp.h> /* for flush_agp_cache() */
+
+#define PFX "agpgart: "
+
+//#define AGP_DEBUG 1
+#ifdef AGP_DEBUG
+#define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __func__ , ## y)
+#else
+#define DBG(x,y...) do { } while (0)
+#endif
+
+extern struct agp_bridge_data *agp_bridge;
+
+enum aper_size_type {
+ U8_APER_SIZE,
+ U16_APER_SIZE,
+ U32_APER_SIZE,
+ LVL2_APER_SIZE,
+ FIXED_APER_SIZE
+};
+
+struct gatt_mask {
+ unsigned long mask;
+ u32 type;
+ /* totally device specific, for integrated chipsets that
+ * might have different types of memory masks. For other
+ * devices this will probably be ignored */
+};
+
+#define AGP_PAGE_DESTROY_UNMAP 1
+#define AGP_PAGE_DESTROY_FREE 2
+
+struct aper_size_info_8 {
+ int size;
+ int num_entries;
+ int page_order;
+ u8 size_value;
+};
+
+struct aper_size_info_16 {
+ int size;
+ int num_entries;
+ int page_order;
+ u16 size_value;
+};
+
+struct aper_size_info_32 {
+ int size;
+ int num_entries;
+ int page_order;
+ u32 size_value;
+};
+
+struct aper_size_info_lvl2 {
+ int size;
+ int num_entries;
+ u32 size_value;
+};
+
+struct aper_size_info_fixed {
+ int size;
+ int num_entries;
+ int page_order;
+};
+
+struct agp_bridge_driver {
+ struct module *owner;
+ const void *aperture_sizes;
+ int num_aperture_sizes;
+ enum aper_size_type size_type;
+ bool cant_use_aperture;
+ bool needs_scratch_page;
+ const struct gatt_mask *masks;
+ int (*fetch_size)(void);
+ int (*configure)(void);
+ void (*agp_enable)(struct agp_bridge_data *, u32);
+ void (*cleanup)(void);
+ void (*tlb_flush)(struct agp_memory *);
+ unsigned long (*mask_memory)(struct agp_bridge_data *, dma_addr_t, int);
+ void (*cache_flush)(void);
+ int (*create_gatt_table)(struct agp_bridge_data *);
+ int (*free_gatt_table)(struct agp_bridge_data *);
+ int (*insert_memory)(struct agp_memory *, off_t, int);
+ int (*remove_memory)(struct agp_memory *, off_t, int);
+ struct agp_memory *(*alloc_by_type) (size_t, int);
+ void (*free_by_type)(struct agp_memory *);
+ struct page *(*agp_alloc_page)(struct agp_bridge_data *);
+ int (*agp_alloc_pages)(struct agp_bridge_data *, struct agp_memory *, size_t);
+ void (*agp_destroy_page)(struct page *, int flags);
+ void (*agp_destroy_pages)(struct agp_memory *);
+ int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
+};
+
+struct agp_bridge_data {
+ const struct agp_version *version;
+ const struct agp_bridge_driver *driver;
+ const struct vm_operations_struct *vm_ops;
+ void *previous_size;
+ void *current_size;
+ void *dev_private_data;
+ struct pci_dev *dev;
+ u32 __iomem *gatt_table;
+ u32 *gatt_table_real;
+ unsigned long scratch_page;
+ struct page *scratch_page_page;
+ dma_addr_t scratch_page_dma;
+ unsigned long gart_bus_addr;
+ unsigned long gatt_bus_addr;
+ u32 mode;
+ enum chipset_type type;
+ unsigned long *key_list;
+ atomic_t current_memory_agp;
+ atomic_t agp_in_use;
+ int max_memory_agp; /* in number of pages */
+ int aperture_size_idx;
+ int capndx;
+ int flags;
+ char major_version;
+ char minor_version;
+ struct list_head list;
+ u32 apbase_config;
+ /* list of agp_memory mapped to the aperture */
+ struct list_head mapped_list;
+ spinlock_t mapped_lock;
+};
+
+#define KB(x) ((x) * 1024)
+#define MB(x) (KB (KB (x)))
+#define GB(x) (MB (KB (x)))
+
+#define A_SIZE_8(x) ((struct aper_size_info_8 *) x)
+#define A_SIZE_16(x) ((struct aper_size_info_16 *) x)
+#define A_SIZE_32(x) ((struct aper_size_info_32 *) x)
+#define A_SIZE_LVL2(x) ((struct aper_size_info_lvl2 *) x)
+#define A_SIZE_FIX(x) ((struct aper_size_info_fixed *) x)
+#define A_IDX8(bridge) (A_SIZE_8((bridge)->driver->aperture_sizes) + i)
+#define A_IDX16(bridge) (A_SIZE_16((bridge)->driver->aperture_sizes) + i)
+#define A_IDX32(bridge) (A_SIZE_32((bridge)->driver->aperture_sizes) + i)
+#define MAXKEY (4096 * 32)
+
+#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
+
+
+struct agp_device_ids {
+ unsigned short device_id; /* first, to make table easier to read */
+ enum chipset_type chipset;
+ const char *chipset_name;
+ int (*chipset_setup) (struct pci_dev *pdev); /* used to override generic */
+};
+
+/* Driver registration */
+struct agp_bridge_data *agp_alloc_bridge(void);
+void agp_put_bridge(struct agp_bridge_data *bridge);
+int agp_add_bridge(struct agp_bridge_data *bridge);
+void agp_remove_bridge(struct agp_bridge_data *bridge);
+
+/* Frontend routines. */
+int agp_frontend_initialize(void);
+void agp_frontend_cleanup(void);
+
+/* Generic routines. */
+void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
+int agp_generic_create_gatt_table(struct agp_bridge_data *bridge);
+int agp_generic_free_gatt_table(struct agp_bridge_data *bridge);
+struct agp_memory *agp_create_memory(int scratch_pages);
+int agp_generic_insert_memory(struct agp_memory *mem, off_t pg_start, int type);
+int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
+struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
+void agp_generic_free_by_type(struct agp_memory *curr);
+struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge);
+int agp_generic_alloc_pages(struct agp_bridge_data *agp_bridge,
+ struct agp_memory *memory, size_t page_count);
+void agp_generic_destroy_page(struct page *page, int flags);
+void agp_generic_destroy_pages(struct agp_memory *memory);
+void agp_free_key(int key);
+int agp_num_entries(void);
+u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command);
+void agp_device_command(u32 command, bool agp_v3);
+int agp_3_5_enable(struct agp_bridge_data *bridge);
+void global_cache_flush(void);
+void get_agp_version(struct agp_bridge_data *bridge);
+unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
+ dma_addr_t phys, int type);
+int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
+ int type);
+struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
+
+/* generic functions for user-populated AGP memory types */
+struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
+void agp_alloc_page_array(size_t size, struct agp_memory *mem);
+void agp_free_page_array(struct agp_memory *mem);
+
+
+/* generic routines for agp>=3 */
+int agp3_generic_fetch_size(void);
+void agp3_generic_tlbflush(struct agp_memory *mem);
+int agp3_generic_configure(void);
+void agp3_generic_cleanup(void);
+
+/* aperture sizes have been standardised since v3 */
+#define AGP_GENERIC_SIZES_ENTRIES 11
+extern const struct aper_size_info_16 agp3_generic_sizes[];
+
+extern int agp_off;
+extern int agp_try_unsupported_boot;
+
+long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+/* Chipset independent registers (from AGP Spec) */
+#define AGP_APBASE 0x10
+
+#define AGPSTAT 0x4
+#define AGPCMD 0x8
+#define AGPNISTAT 0xc
+#define AGPCTRL 0x10
+#define AGPAPSIZE 0x14
+#define AGPNEPG 0x16
+#define AGPGARTLO 0x18
+#define AGPGARTHI 0x1c
+#define AGPNICMD 0x20
+
+#define AGP_MAJOR_VERSION_SHIFT (20)
+#define AGP_MINOR_VERSION_SHIFT (16)
+
+#define AGPSTAT_RQ_DEPTH (0xff000000)
+#define AGPSTAT_RQ_DEPTH_SHIFT 24
+
+#define AGPSTAT_CAL_MASK (1<<12|1<<11|1<<10)
+#define AGPSTAT_ARQSZ (1<<15|1<<14|1<<13)
+#define AGPSTAT_ARQSZ_SHIFT 13
+
+#define AGPSTAT_SBA (1<<9)
+#define AGPSTAT_AGP_ENABLE (1<<8)
+#define AGPSTAT_FW (1<<4)
+#define AGPSTAT_MODE_3_0 (1<<3)
+
+#define AGPSTAT2_1X (1<<0)
+#define AGPSTAT2_2X (1<<1)
+#define AGPSTAT2_4X (1<<2)
+
+#define AGPSTAT3_RSVD (1<<2)
+#define AGPSTAT3_8X (1<<1)
+#define AGPSTAT3_4X (1)
+
+#define AGPCTRL_APERENB (1<<8)
+#define AGPCTRL_GTLBEN (1<<7)
+
+#define AGP2_RESERVED_MASK 0x00fffcc8
+#define AGP3_RESERVED_MASK 0x00ff00c4
+
+#define AGP_ERRATA_FASTWRITES 1<<0
+#define AGP_ERRATA_SBA 1<<1
+#define AGP_ERRATA_1X 1<<2
+
+#endif /* _AGP_BACKEND_PRIV_H */
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
new file mode 100644
index 00000000..fd793519
--- /dev/null
+++ b/drivers/char/agp/ali-agp.c
@@ -0,0 +1,423 @@
+/*
+ * ALi AGPGART routines.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <asm/page.h> /* PAGE_SIZE */
+#include "agp.h"
+
+#define ALI_AGPCTRL 0xb8
+#define ALI_ATTBASE 0xbc
+#define ALI_TLBCTRL 0xc0
+#define ALI_TAGCTRL 0xc4
+#define ALI_CACHE_FLUSH_CTRL 0xD0
+#define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000
+#define ALI_CACHE_FLUSH_EN 0x100
+
+static int ali_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ struct aper_size_info_32 *values;
+
+ pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
+ temp &= ~(0xfffffff0);
+ values = A_SIZE_32(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void ali_tlbflush(struct agp_memory *mem)
+{
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+ temp &= 0xfffffff0;
+ temp |= (1<<0 | 1<<1);
+ pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, temp);
+}
+
+static void ali_cleanup(void)
+{
+ struct aper_size_info_32 *previous_size;
+ u32 temp;
+
+ previous_size = A_SIZE_32(agp_bridge->previous_size);
+
+ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+// clear tag
+ pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL,
+ ((temp & 0xffffff00) | 0x00000001|0x00000002));
+
+ pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE,
+ ((temp & 0x00000ff0) | previous_size->size_value));
+}
+
+static int ali_configure(void)
+{
+ u32 temp;
+ struct aper_size_info_32 *current_size;
+
+ current_size = A_SIZE_32(agp_bridge->current_size);
+
+ /* aperture size and gatt addr */
+ pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
+ temp = (((temp & 0x00000ff0) | (agp_bridge->gatt_bus_addr & 0xfffff000))
+ | (current_size->size_value & 0xf));
+ pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, temp);
+
+ /* tlb control */
+ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010));
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+#if 0
+ if (agp_bridge->type == ALI_M1541) {
+ u32 nlvm_addr = 0;
+
+ switch (current_size->size_value) {
+ case 0: break;
+ case 1: nlvm_addr = 0x100000;break;
+ case 2: nlvm_addr = 0x200000;break;
+ case 3: nlvm_addr = 0x400000;break;
+ case 4: nlvm_addr = 0x800000;break;
+ case 6: nlvm_addr = 0x1000000;break;
+ case 7: nlvm_addr = 0x2000000;break;
+ case 8: nlvm_addr = 0x4000000;break;
+ case 9: nlvm_addr = 0x8000000;break;
+ case 10: nlvm_addr = 0x10000000;break;
+ default: break;
+ }
+ nlvm_addr--;
+ nlvm_addr&=0xfff00000;
+
+ nlvm_addr+= agp_bridge->gart_bus_addr;
+ nlvm_addr|=(agp_bridge->gart_bus_addr>>12);
+ dev_info(&agp_bridge->dev->dev, "nlvm top &base = %8x\n",
+ nlvm_addr);
+ }
+#endif
+
+ pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
+ temp &= 0xffffff7f; //enable TLB
+ pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, temp);
+
+ return 0;
+}
+
+
+static void m1541_cache_flush(void)
+{
+ int i, page_count;
+ u32 temp;
+
+ global_cache_flush();
+
+ page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order;
+ for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
+ pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+ &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+ (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+ (agp_bridge->gatt_bus_addr + i)) |
+ ALI_CACHE_FLUSH_EN));
+ }
+}
+
+static struct page *m1541_alloc_page(struct agp_bridge_data *bridge)
+{
+ struct page *page = agp_generic_alloc_page(agp_bridge);
+ u32 temp;
+
+ if (!page)
+ return NULL;
+
+ pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+ (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+ page_to_phys(page)) | ALI_CACHE_FLUSH_EN ));
+ return page;
+}
+
+static void ali_destroy_page(struct page *page, int flags)
+{
+ if (page) {
+ if (flags & AGP_PAGE_DESTROY_UNMAP) {
+ global_cache_flush(); /* is this really needed? --hch */
+ agp_generic_destroy_page(page, flags);
+ } else
+ agp_generic_destroy_page(page, flags);
+ }
+}
+
+static void m1541_destroy_page(struct page *page, int flags)
+{
+ u32 temp;
+
+ if (page == NULL)
+ return;
+
+ if (flags & AGP_PAGE_DESTROY_UNMAP) {
+ global_cache_flush();
+
+ pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
+ (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+ page_to_phys(page)) | ALI_CACHE_FLUSH_EN));
+ }
+ agp_generic_destroy_page(page, flags);
+}
+
+
+/* Setup function */
+
+static const struct aper_size_info_32 ali_generic_sizes[7] =
+{
+ {256, 65536, 6, 10},
+ {128, 32768, 5, 9},
+ {64, 16384, 4, 8},
+ {32, 8192, 3, 7},
+ {16, 4096, 2, 6},
+ {8, 2048, 1, 4},
+ {4, 1024, 0, 3}
+};
+
+static const struct agp_bridge_driver ali_generic_bridge = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = ali_generic_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = ali_configure,
+ .fetch_size = ali_fetch_size,
+ .cleanup = ali_cleanup,
+ .tlb_flush = ali_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = ali_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver ali_m1541_bridge = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = ali_generic_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = ali_configure,
+ .fetch_size = ali_fetch_size,
+ .cleanup = ali_cleanup,
+ .tlb_flush = ali_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = m1541_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = m1541_alloc_page,
+ .agp_destroy_page = m1541_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+
+static struct agp_device_ids ali_agp_device_ids[] __devinitdata =
+{
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1541,
+ .chipset_name = "M1541",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1621,
+ .chipset_name = "M1621",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1631,
+ .chipset_name = "M1631",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1632,
+ .chipset_name = "M1632",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1641,
+ .chipset_name = "M1641",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1644,
+ .chipset_name = "M1644",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1647,
+ .chipset_name = "M1647",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1651,
+ .chipset_name = "M1651",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1671,
+ .chipset_name = "M1671",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1681,
+ .chipset_name = "M1681",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AL_M1683,
+ .chipset_name = "M1683",
+ },
+
+ { }, /* dummy final entry, always present */
+};
+
+static int __devinit agp_ali_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_device_ids *devs = ali_agp_device_ids;
+ struct agp_bridge_data *bridge;
+ u8 hidden_1621_id, cap_ptr;
+ int j;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ /* probe for known chipsets */
+ for (j = 0; devs[j].chipset_name; j++) {
+ if (pdev->device == devs[j].device_id)
+ goto found;
+ }
+
+ dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n",
+ pdev->vendor, pdev->device);
+ return -ENODEV;
+
+
+found:
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_AL_M1541:
+ bridge->driver = &ali_m1541_bridge;
+ break;
+ case PCI_DEVICE_ID_AL_M1621:
+ pci_read_config_byte(pdev, 0xFB, &hidden_1621_id);
+ switch (hidden_1621_id) {
+ case 0x31:
+ devs[j].chipset_name = "M1631";
+ break;
+ case 0x32:
+ devs[j].chipset_name = "M1632";
+ break;
+ case 0x41:
+ devs[j].chipset_name = "M1641";
+ break;
+ case 0x43:
+ devs[j].chipset_name = "M1621";
+ break;
+ case 0x47:
+ devs[j].chipset_name = "M1647";
+ break;
+ case 0x51:
+ devs[j].chipset_name = "M1651";
+ break;
+ default:
+ break;
+ }
+ /*FALLTHROUGH*/
+ default:
+ bridge->driver = &ali_generic_bridge;
+ }
+
+ dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_ali_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_ali_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AL,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_ali_pci_table);
+
+static struct pci_driver agp_ali_pci_driver = {
+ .name = "agpgart-ali",
+ .id_table = agp_ali_pci_table,
+ .probe = agp_ali_probe,
+ .remove = agp_ali_remove,
+};
+
+static int __init agp_ali_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_ali_pci_driver);
+}
+
+static void __exit agp_ali_cleanup(void)
+{
+ pci_unregister_driver(&agp_ali_pci_driver);
+}
+
+module_init(agp_ali_init);
+module_exit(agp_ali_cleanup);
+
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
new file mode 100644
index 00000000..dd84af4d
--- /dev/null
+++ b/drivers/char/agp/alpha-agp.c
@@ -0,0 +1,222 @@
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/machvec.h>
+#include <asm/agp_backend.h>
+#include "../../../arch/alpha/kernel/pci_impl.h"
+
+#include "agp.h"
+
+static int alpha_core_agp_vm_fault(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ dma_addr_t dma_addr;
+ unsigned long pa;
+ struct page *page;
+
+ dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start
+ + agp->aperture.bus_base;
+ pa = agp->ops->translate(agp, dma_addr);
+
+ if (pa == (unsigned long)-EINVAL)
+ return VM_FAULT_SIGBUS; /* no translation */
+
+ /*
+ * Get the page, inc the use count, and return it
+ */
+ page = virt_to_page(__va(pa));
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static struct aper_size_info_fixed alpha_core_agp_sizes[] =
+{
+ { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */
+};
+
+static const struct vm_operations_struct alpha_core_agp_vm_ops = {
+ .fault = alpha_core_agp_vm_fault,
+};
+
+
+static int alpha_core_agp_fetch_size(void)
+{
+ return alpha_core_agp_sizes[0].size;
+}
+
+static int alpha_core_agp_configure(void)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ agp_bridge->gart_bus_addr = agp->aperture.bus_base;
+ return 0;
+}
+
+static void alpha_core_agp_cleanup(void)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+
+ agp->ops->cleanup(agp);
+}
+
+static void alpha_core_agp_tlbflush(struct agp_memory *mem)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ alpha_mv.mv_pci_tbi(agp->hose, 0, -1);
+}
+
+static void alpha_core_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ alpha_agp_info *agp = bridge->dev_private_data;
+
+ agp->mode.lw = agp_collect_device_status(bridge, mode,
+ agp->capability.lw);
+
+ agp->mode.bits.enable = 1;
+ agp->ops->configure(agp);
+
+ agp_device_command(agp->mode.lw, false);
+}
+
+static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ int num_entries, status;
+ void *temp;
+
+ if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
+ return -EINVAL;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ status = agp->ops->bind(agp, pg_start, mem);
+ mb();
+ alpha_core_agp_tlbflush(mem);
+
+ return status;
+}
+
+static int alpha_core_agp_remove_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ alpha_agp_info *agp = agp_bridge->dev_private_data;
+ int status;
+
+ status = agp->ops->unbind(agp, pg_start, mem);
+ alpha_core_agp_tlbflush(mem);
+ return status;
+}
+
+static int alpha_core_agp_create_free_gatt_table(struct agp_bridge_data *a)
+{
+ return 0;
+}
+
+struct agp_bridge_driver alpha_core_agp_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = alpha_core_agp_sizes,
+ .num_aperture_sizes = 1,
+ .size_type = FIXED_APER_SIZE,
+ .cant_use_aperture = true,
+ .masks = NULL,
+
+ .fetch_size = alpha_core_agp_fetch_size,
+ .configure = alpha_core_agp_configure,
+ .agp_enable = alpha_core_agp_enable,
+ .cleanup = alpha_core_agp_cleanup,
+ .tlb_flush = alpha_core_agp_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = alpha_core_agp_create_free_gatt_table,
+ .free_gatt_table = alpha_core_agp_create_free_gatt_table,
+ .insert_memory = alpha_core_agp_insert_memory,
+ .remove_memory = alpha_core_agp_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+struct agp_bridge_data *alpha_bridge;
+
+int __init
+alpha_core_agp_setup(void)
+{
+ alpha_agp_info *agp = alpha_mv.agp_info();
+ struct pci_dev *pdev; /* faked */
+ struct aper_size_info_fixed *aper_size;
+
+ if (!agp)
+ return -ENODEV;
+ if (agp->ops->setup(agp))
+ return -ENODEV;
+
+ /*
+ * Build the aperture size descriptor
+ */
+ aper_size = alpha_core_agp_sizes;
+ aper_size->size = agp->aperture.size / (1024 * 1024);
+ aper_size->num_entries = agp->aperture.size / PAGE_SIZE;
+ aper_size->page_order = __ffs(aper_size->num_entries / 1024);
+
+ /*
+ * Build a fake pci_dev struct
+ */
+ pdev = alloc_pci_dev();
+ if (!pdev)
+ return -ENOMEM;
+ pdev->vendor = 0xffff;
+ pdev->device = 0xffff;
+ pdev->sysdata = agp->hose;
+
+ alpha_bridge = agp_alloc_bridge();
+ if (!alpha_bridge)
+ goto fail;
+
+ alpha_bridge->driver = &alpha_core_agp_driver;
+ alpha_bridge->vm_ops = &alpha_core_agp_vm_ops;
+ alpha_bridge->current_size = aper_size; /* only 1 size */
+ alpha_bridge->dev_private_data = agp;
+ alpha_bridge->dev = pdev;
+ alpha_bridge->mode = agp->capability.lw;
+
+ printk(KERN_INFO PFX "Detected AGP on hose %d\n", agp->hose->index);
+ return agp_add_bridge(alpha_bridge);
+
+ fail:
+ kfree(pdev);
+ return -ENOMEM;
+}
+
+static int __init agp_alpha_core_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ if (alpha_mv.agp_info)
+ return alpha_core_agp_setup();
+ return -ENODEV;
+}
+
+static void __exit agp_alpha_core_cleanup(void)
+{
+ agp_remove_bridge(alpha_bridge);
+ agp_put_bridge(alpha_bridge);
+}
+
+module_init(agp_alpha_core_init);
+module_exit(agp_alpha_core_cleanup);
+
+MODULE_AUTHOR("Jeff Wiedemeier <Jeff.Wiedemeier@hp.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
new file mode 100644
index 00000000..f7e88787
--- /dev/null
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -0,0 +1,568 @@
+/*
+ * AMD K7 AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include "agp.h"
+
+#define AMD_MMBASE 0x14
+#define AMD_APSIZE 0xac
+#define AMD_MODECNTL 0xb0
+#define AMD_MODECNTL2 0xb2
+#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */
+#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */
+#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
+#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
+
+static struct pci_device_id agp_amdk7_pci_table[];
+
+struct amd_page_map {
+ unsigned long *real;
+ unsigned long __iomem *remapped;
+};
+
+static struct _amd_irongate_private {
+ volatile u8 __iomem *registers;
+ struct amd_page_map **gatt_pages;
+ int num_tables;
+} amd_irongate_private;
+
+static int amd_create_page_map(struct amd_page_map *page_map)
+{
+ int i;
+
+ page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+ if (page_map->real == NULL)
+ return -ENOMEM;
+
+ set_memory_uc((unsigned long)page_map->real, 1);
+ page_map->remapped = page_map->real;
+
+ for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
+ writel(agp_bridge->scratch_page, page_map->remapped+i);
+ readl(page_map->remapped+i); /* PCI Posting. */
+ }
+
+ return 0;
+}
+
+static void amd_free_page_map(struct amd_page_map *page_map)
+{
+ set_memory_wb((unsigned long)page_map->real, 1);
+ free_page((unsigned long) page_map->real);
+}
+
+static void amd_free_gatt_pages(void)
+{
+ int i;
+ struct amd_page_map **tables;
+ struct amd_page_map *entry;
+
+ tables = amd_irongate_private.gatt_pages;
+ for (i = 0; i < amd_irongate_private.num_tables; i++) {
+ entry = tables[i];
+ if (entry != NULL) {
+ if (entry->real != NULL)
+ amd_free_page_map(entry);
+ kfree(entry);
+ }
+ }
+ kfree(tables);
+ amd_irongate_private.gatt_pages = NULL;
+}
+
+static int amd_create_gatt_pages(int nr_tables)
+{
+ struct amd_page_map **tables;
+ struct amd_page_map *entry;
+ int retval = 0;
+ int i;
+
+ tables = kzalloc((nr_tables + 1) * sizeof(struct amd_page_map *),GFP_KERNEL);
+ if (tables == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_tables; i++) {
+ entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL);
+ tables[i] = entry;
+ if (entry == NULL) {
+ retval = -ENOMEM;
+ break;
+ }
+ retval = amd_create_page_map(entry);
+ if (retval != 0)
+ break;
+ }
+ amd_irongate_private.num_tables = i;
+ amd_irongate_private.gatt_pages = tables;
+
+ if (retval != 0)
+ amd_free_gatt_pages();
+
+ return retval;
+}
+
+/* Since we don't need contiguous memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int amd_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct aper_size_info_lvl2 *value;
+ struct amd_page_map page_dir;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+ int retval;
+ u32 temp;
+ int i;
+
+ value = A_SIZE_LVL2(agp_bridge->current_size);
+ retval = amd_create_page_map(&page_dir);
+ if (retval != 0)
+ return retval;
+
+ retval = amd_create_gatt_pages(value->num_entries / 1024);
+ if (retval != 0) {
+ amd_free_page_map(&page_dir);
+ return retval;
+ }
+
+ agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+ agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
+
+ /* Get the address for the gart region.
+ * This is a bus address even on the alpha, b/c its
+ * used to program the agp master not the cpu
+ */
+
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = addr;
+
+ /* Calculate the agp offset */
+ for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
+ writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
+ page_dir.remapped+GET_PAGE_DIR_OFF(addr));
+ readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
+ }
+
+ for (i = 0; i < value->num_entries; i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
+ }
+
+ return 0;
+}
+
+static int amd_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct amd_page_map page_dir;
+
+ page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
+ page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
+
+ amd_free_gatt_pages();
+ amd_free_page_map(&page_dir);
+ return 0;
+}
+
+static int amd_irongate_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ struct aper_size_info_lvl2 *values;
+
+ pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
+ temp = (temp & 0x0000000e);
+ values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static int amd_irongate_configure(void)
+{
+ struct aper_size_info_lvl2 *current_size;
+ u32 temp;
+ u16 enable_reg;
+
+ current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+ if (!amd_irongate_private.registers) {
+ /* Get the memory mapped registers */
+ pci_read_config_dword(agp_bridge->dev, AMD_MMBASE, &temp);
+ temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
+ if (!amd_irongate_private.registers)
+ return -ENOMEM;
+ }
+
+ /* Write out the address of the gatt table */
+ writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE);
+ readl(amd_irongate_private.registers+AMD_ATTBASE); /* PCI Posting. */
+
+ /* Write the Sync register */
+ pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80);
+
+ /* Set indexing mode */
+ pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00);
+
+ /* Write the enable register */
+ enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
+ enable_reg = (enable_reg | 0x0004);
+ writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
+ readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
+
+ /* Write out the size register */
+ pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
+ temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1);
+ pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
+
+ /* Flush the tlb */
+ writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
+ readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting.*/
+ return 0;
+}
+
+static void amd_irongate_cleanup(void)
+{
+ struct aper_size_info_lvl2 *previous_size;
+ u32 temp;
+ u16 enable_reg;
+
+ previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
+
+ enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
+ enable_reg = (enable_reg & ~(0x0004));
+ writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
+ readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
+
+ /* Write back the previous size and disable gart translation */
+ pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
+ temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+ pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
+ iounmap((void __iomem *) amd_irongate_private.registers);
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually. However
+ * currently it just flushes the whole table. Which is probably
+ * more efficient, since agp_memory blocks can be a large number of
+ * entries.
+ */
+
+static void amd_irongate_tlbflush(struct agp_memory *temp)
+{
+ writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
+ readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting. */
+}
+
+static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
+ return -EINVAL;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+ while (j < (pg_start + mem->page_count)) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
+ return -EBUSY;
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_generic_mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]),
+ mem->type),
+ cur_gatt+GET_GATT_OFF(addr));
+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
+ }
+ amd_irongate_tlbflush(mem);
+ return 0;
+}
+
+static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
+ return -EINVAL;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
+ }
+
+ amd_irongate_tlbflush(mem);
+ return 0;
+}
+
+static const struct aper_size_info_lvl2 amd_irongate_sizes[7] =
+{
+ {2048, 524288, 0x0000000c},
+ {1024, 262144, 0x0000000a},
+ {512, 131072, 0x00000008},
+ {256, 65536, 0x00000006},
+ {128, 32768, 0x00000004},
+ {64, 16384, 0x00000002},
+ {32, 8192, 0x00000000}
+};
+
+static const struct gatt_mask amd_irongate_masks[] =
+{
+ {.mask = 1, .type = 0}
+};
+
+static const struct agp_bridge_driver amd_irongate_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = amd_irongate_sizes,
+ .size_type = LVL2_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = amd_irongate_configure,
+ .fetch_size = amd_irongate_fetch_size,
+ .cleanup = amd_irongate_cleanup,
+ .tlb_flush = amd_irongate_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = amd_irongate_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = amd_create_gatt_table,
+ .free_gatt_table = amd_free_gatt_table,
+ .insert_memory = amd_insert_memory,
+ .remove_memory = amd_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static struct agp_device_ids amd_agp_device_ids[] __devinitdata =
+{
+ {
+ .device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006,
+ .chipset_name = "Irongate",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700E,
+ .chipset_name = "761",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700C,
+ .chipset_name = "760MP",
+ },
+ { }, /* dummy final entry, always present */
+};
+
+static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+ int j;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ j = ent - agp_amdk7_pci_table;
+ dev_info(&pdev->dev, "AMD %s chipset\n",
+ amd_agp_device_ids[j].chipset_name);
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &amd_irongate_driver;
+ bridge->dev_private_data = &amd_irongate_private,
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ /* 751 Errata (22564_B-1.PDF)
+ erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
+ system controller may experience noise due to strong drive strengths
+ */
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
+ struct pci_dev *gfxcard=NULL;
+
+ cap_ptr = 0;
+ while (!cap_ptr) {
+ gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
+ if (!gfxcard) {
+ dev_info(&pdev->dev, "no AGP VGA controller\n");
+ return -ENODEV;
+ }
+ cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
+ }
+
+ /* With so many variants of NVidia cards, it's simpler just
+ to blacklist them all, and then whitelist them as needed
+ (if necessary at all). */
+ if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
+ agp_bridge->flags |= AGP_ERRATA_1X;
+ dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n");
+ }
+ pci_dev_put(gfxcard);
+ }
+
+ /* 761 Errata (23613_F.pdf)
+ * Revisions B0/B1 were a disaster.
+ * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
+ * erratum 45: Timing problem prevents fast writes -- Disable fast write.
+ * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
+ * With this lot disabled, we should prevent lockups. */
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
+ if (pdev->revision == 0x10 || pdev->revision == 0x11) {
+ agp_bridge->flags = AGP_ERRATA_FASTWRITES;
+ agp_bridge->flags |= AGP_ERRATA_SBA;
+ agp_bridge->flags |= AGP_ERRATA_1X;
+ dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n");
+ }
+ }
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_amdk7_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+#ifdef CONFIG_PM
+
+static int agp_amdk7_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int agp_amdk7_resume(struct pci_dev *pdev)
+{
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ return amd_irongate_driver.configure();
+}
+
+#endif /* CONFIG_PM */
+
+/* must be the same order as name table above */
+static struct pci_device_id agp_amdk7_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_FE_GATE_7006,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_FE_GATE_700E,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_FE_GATE_700C,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table);
+
+static struct pci_driver agp_amdk7_pci_driver = {
+ .name = "agpgart-amdk7",
+ .id_table = agp_amdk7_pci_table,
+ .probe = agp_amdk7_probe,
+ .remove = agp_amdk7_remove,
+#ifdef CONFIG_PM
+ .suspend = agp_amdk7_suspend,
+ .resume = agp_amdk7_resume,
+#endif
+};
+
+static int __init agp_amdk7_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_amdk7_pci_driver);
+}
+
+static void __exit agp_amdk7_cleanup(void)
+{
+ pci_unregister_driver(&agp_amdk7_pci_driver);
+}
+
+module_init(agp_amdk7_init);
+module_exit(agp_amdk7_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
new file mode 100644
index 00000000..780498d7
--- /dev/null
+++ b/drivers/char/agp/amd64-agp.c
@@ -0,0 +1,822 @@
+/*
+ * Copyright 2001-2003 SuSE Labs.
+ * Distributed under the GNU public license, v2.
+ *
+ * This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge.
+ * It also includes support for the AMD 8151 AGP bridge,
+ * although it doesn't actually do much, as all the real
+ * work is done in the northbridge(s).
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/mmzone.h>
+#include <asm/page.h> /* PAGE_SIZE */
+#include <asm/e820.h>
+#include <asm/amd_nb.h>
+#include <asm/gart.h>
+#include "agp.h"
+
+/* NVIDIA K8 registers */
+#define NVIDIA_X86_64_0_APBASE 0x10
+#define NVIDIA_X86_64_1_APBASE1 0x50
+#define NVIDIA_X86_64_1_APLIMIT1 0x54
+#define NVIDIA_X86_64_1_APSIZE 0xa8
+#define NVIDIA_X86_64_1_APBASE2 0xd8
+#define NVIDIA_X86_64_1_APLIMIT2 0xdc
+
+/* ULi K8 registers */
+#define ULI_X86_64_BASE_ADDR 0x10
+#define ULI_X86_64_HTT_FEA_REG 0x50
+#define ULI_X86_64_ENU_SCR_REG 0x54
+
+static struct resource *aperture_resource;
+static int __initdata agp_try_unsupported = 1;
+static int agp_bridges_found;
+
+static void amd64_tlbflush(struct agp_memory *temp)
+{
+ amd_flush_garts();
+}
+
+static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ long long tmp;
+ int mask_type;
+ struct agp_bridge_data *bridge = mem->bridge;
+ u32 pte;
+
+ num_entries = agp_num_entries();
+
+ if (type != mem->type)
+ return -EINVAL;
+ mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+ if (mask_type != 0)
+ return -EINVAL;
+
+
+ /* Make sure we can fit the range in the gatt table. */
+ /* FIXME: could wrap */
+ if (((unsigned long)pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+
+ /* gatt table should be empty. */
+ while (j < (pg_start + mem->page_count)) {
+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
+ return -EBUSY;
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ tmp = agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]),
+ mask_type);
+
+ BUG_ON(tmp & 0xffffff0000000ffcULL);
+ pte = (tmp & 0x000000ff00000000ULL) >> 28;
+ pte |=(tmp & 0x00000000fffff000ULL);
+ pte |= GPTE_VALID | GPTE_COHERENT;
+
+ writel(pte, agp_bridge->gatt_table+j);
+ readl(agp_bridge->gatt_table+j); /* PCI Posting. */
+ }
+ amd64_tlbflush(mem);
+ return 0;
+}
+
+/*
+ * This hack alters the order element according
+ * to the size of a long. It sucks. I totally disown this, even
+ * though it does appear to work for the most part.
+ */
+static struct aper_size_info_32 amd64_aperture_sizes[7] =
+{
+ {32, 8192, 3+(sizeof(long)/8), 0 },
+ {64, 16384, 4+(sizeof(long)/8), 1<<1 },
+ {128, 32768, 5+(sizeof(long)/8), 1<<2 },
+ {256, 65536, 6+(sizeof(long)/8), 1<<1 | 1<<2 },
+ {512, 131072, 7+(sizeof(long)/8), 1<<3 },
+ {1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3},
+ {2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3}
+};
+
+
+/*
+ * Get the current Aperture size from the x86-64.
+ * Note, that there may be multiple x86-64's, but we just return
+ * the value from the first one we find. The set_size functions
+ * keep the rest coherent anyway. Or at least should do.
+ */
+static int amd64_fetch_size(void)
+{
+ struct pci_dev *dev;
+ int i;
+ u32 temp;
+ struct aper_size_info_32 *values;
+
+ dev = node_to_amd_nb(0)->misc;
+ if (dev==NULL)
+ return 0;
+
+ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp);
+ temp = (temp & 0xe);
+ values = A_SIZE_32(amd64_aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+
+/*
+ * In a multiprocessor x86-64 system, this function gets
+ * called once for each CPU.
+ */
+static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
+{
+ u64 aperturebase;
+ u32 tmp;
+ u64 aper_base;
+
+ /* Address to map to */
+ pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
+ aperturebase = tmp << 25;
+ aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
+
+ enable_gart_translation(hammer, gatt_table);
+
+ return aper_base;
+}
+
+
+static const struct aper_size_info_32 amd_8151_sizes[7] =
+{
+ {2048, 524288, 9, 0x00000000 }, /* 0 0 0 0 0 0 */
+ {1024, 262144, 8, 0x00000400 }, /* 1 0 0 0 0 0 */
+ {512, 131072, 7, 0x00000600 }, /* 1 1 0 0 0 0 */
+ {256, 65536, 6, 0x00000700 }, /* 1 1 1 0 0 0 */
+ {128, 32768, 5, 0x00000720 }, /* 1 1 1 1 0 0 */
+ {64, 16384, 4, 0x00000730 }, /* 1 1 1 1 1 0 */
+ {32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */
+};
+
+static int amd_8151_configure(void)
+{
+ unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
+ int i;
+
+ if (!amd_nb_has_feature(AMD_NB_GART))
+ return 0;
+
+ /* Configure AGP regs in each x86-64 host bridge. */
+ for (i = 0; i < amd_nb_num(); i++) {
+ agp_bridge->gart_bus_addr =
+ amd64_configure(node_to_amd_nb(i)->misc, gatt_bus);
+ }
+ amd_flush_garts();
+ return 0;
+}
+
+
+static void amd64_cleanup(void)
+{
+ u32 tmp;
+ int i;
+
+ if (!amd_nb_has_feature(AMD_NB_GART))
+ return;
+
+ for (i = 0; i < amd_nb_num(); i++) {
+ struct pci_dev *dev = node_to_amd_nb(i)->misc;
+ /* disable gart translation */
+ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
+ tmp &= ~GARTEN;
+ pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
+ }
+}
+
+
+static const struct agp_bridge_driver amd_8151_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = amd_8151_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = amd_8151_configure,
+ .fetch_size = amd64_fetch_size,
+ .cleanup = amd64_cleanup,
+ .tlb_flush = amd64_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = amd64_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+/* Some basic sanity checks for the aperture. */
+static int __devinit agp_aperture_valid(u64 aper, u32 size)
+{
+ if (!aperture_valid(aper, size, 32*1024*1024))
+ return 0;
+
+ /* Request the Aperture. This catches cases when someone else
+ already put a mapping in there - happens with some very broken BIOS
+
+ Maybe better to use pci_assign_resource/pci_enable_device instead
+ trusting the bridges? */
+ if (!aperture_resource &&
+ !(aperture_resource = request_mem_region(aper, size, "aperture"))) {
+ printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n");
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * W*s centric BIOS sometimes only set up the aperture in the AGP
+ * bridge, not the northbridge. On AMD64 this is handled early
+ * in aperture.c, but when IOMMU is not enabled or we run
+ * on a 32bit kernel this needs to be redone.
+ * Unfortunately it is impossible to fix the aperture here because it's too late
+ * to allocate that much memory. But at least error out cleanly instead of
+ * crashing.
+ */
+static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
+ u16 cap)
+{
+ u32 aper_low, aper_hi;
+ u64 aper, nb_aper;
+ int order = 0;
+ u32 nb_order, nb_base;
+ u16 apsize;
+
+ pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
+ nb_order = (nb_order >> 1) & 7;
+ pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
+ nb_aper = nb_base << 25;
+
+ /* Northbridge seems to contain crap. Try the AGP bridge. */
+
+ pci_read_config_word(agp, cap+0x14, &apsize);
+ if (apsize == 0xffff) {
+ if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
+ return 0;
+ return -1;
+ }
+
+ apsize &= 0xfff;
+ /* Some BIOS use weird encodings not in the AGPv3 table. */
+ if (apsize & 0xff)
+ apsize |= 0xf00;
+ order = 7 - hweight16(apsize);
+
+ pci_read_config_dword(agp, 0x10, &aper_low);
+ pci_read_config_dword(agp, 0x14, &aper_hi);
+ aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32);
+
+ /*
+ * On some sick chips APSIZE is 0. This means it wants 4G
+ * so let double check that order, and lets trust the AMD NB settings
+ */
+ if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
+ dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n",
+ 32 << order);
+ order = nb_order;
+ }
+
+ if (nb_order >= order) {
+ if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
+ return 0;
+ }
+
+ dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
+ aper, 32 << order);
+ if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
+ return -1;
+
+ gart_set_size_and_enable(nb, order);
+ pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
+
+ return 0;
+}
+
+static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
+{
+ int i;
+
+ if (amd_cache_northbridges() < 0)
+ return -ENODEV;
+
+ if (!amd_nb_has_feature(AMD_NB_GART))
+ return -ENODEV;
+
+ i = 0;
+ for (i = 0; i < amd_nb_num(); i++) {
+ struct pci_dev *dev = node_to_amd_nb(i)->misc;
+ if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
+ dev_err(&dev->dev, "no usable aperture found\n");
+#ifdef __x86_64__
+ /* should port this to i386 */
+ dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n");
+#endif
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/* Handle AMD 8151 quirks */
+static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
+{
+ char *revstring;
+
+ switch (pdev->revision) {
+ case 0x01: revstring="A0"; break;
+ case 0x02: revstring="A1"; break;
+ case 0x11: revstring="B0"; break;
+ case 0x12: revstring="B1"; break;
+ case 0x13: revstring="B2"; break;
+ case 0x14: revstring="B3"; break;
+ default: revstring="??"; break;
+ }
+
+ dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring);
+
+ /*
+ * Work around errata.
+ * Chips before B2 stepping incorrectly reporting v3.5
+ */
+ if (pdev->revision < 0x13) {
+ dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n");
+ bridge->major_version = 3;
+ bridge->minor_version = 0;
+ }
+}
+
+
+static const struct aper_size_info_32 uli_sizes[7] =
+{
+ {256, 65536, 6, 10},
+ {128, 32768, 5, 9},
+ {64, 16384, 4, 8},
+ {32, 8192, 3, 7},
+ {16, 4096, 2, 6},
+ {8, 2048, 1, 4},
+ {4, 1024, 0, 3}
+};
+static int __devinit uli_agp_init(struct pci_dev *pdev)
+{
+ u32 httfea,baseaddr,enuscr;
+ struct pci_dev *dev1;
+ int i, ret;
+ unsigned size = amd64_fetch_size();
+
+ dev_info(&pdev->dev, "setting up ULi AGP\n");
+ dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0));
+ if (dev1 == NULL) {
+ dev_info(&pdev->dev, "can't find ULi secondary device\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(uli_sizes); i++)
+ if (uli_sizes[i].size == size)
+ break;
+
+ if (i == ARRAY_SIZE(uli_sizes)) {
+ dev_info(&pdev->dev, "no ULi size found for %d\n", size);
+ ret = -ENODEV;
+ goto put;
+ }
+
+ /* shadow x86-64 registers into ULi registers */
+ pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
+ &httfea);
+
+ /* if x86-64 aperture base is beyond 4G, exit here */
+ if ((httfea & 0x7fff) >> (32 - 25)) {
+ ret = -ENODEV;
+ goto put;
+ }
+
+ httfea = (httfea& 0x7fff) << 25;
+
+ pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr);
+ baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK;
+ baseaddr|= httfea;
+ pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr);
+
+ enuscr= httfea+ (size * 1024 * 1024) - 1;
+ pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
+ pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
+ ret = 0;
+put:
+ pci_dev_put(dev1);
+ return ret;
+}
+
+
+static const struct aper_size_info_32 nforce3_sizes[5] =
+{
+ {512, 131072, 7, 0x00000000 },
+ {256, 65536, 6, 0x00000008 },
+ {128, 32768, 5, 0x0000000C },
+ {64, 16384, 4, 0x0000000E },
+ {32, 8192, 3, 0x0000000F }
+};
+
+/* Handle shadow device of the Nvidia NForce3 */
+/* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */
+static int nforce3_agp_init(struct pci_dev *pdev)
+{
+ u32 tmp, apbase, apbar, aplimit;
+ struct pci_dev *dev1;
+ int i, ret;
+ unsigned size = amd64_fetch_size();
+
+ dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
+
+ dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0));
+ if (dev1 == NULL) {
+ dev_info(&pdev->dev, "can't find Nforce3 secondary device\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++)
+ if (nforce3_sizes[i].size == size)
+ break;
+
+ if (i == ARRAY_SIZE(nforce3_sizes)) {
+ dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
+ ret = -ENODEV;
+ goto put;
+ }
+
+ pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
+ tmp &= ~(0xf);
+ tmp |= nforce3_sizes[i].size_value;
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
+
+ /* shadow x86-64 registers into NVIDIA registers */
+ pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
+ &apbase);
+
+ /* if x86-64 aperture base is beyond 4G, exit here */
+ if ( (apbase & 0x7fff) >> (32 - 25) ) {
+ dev_info(&pdev->dev, "aperture base > 4G\n");
+ ret = -ENODEV;
+ goto put;
+ }
+
+ apbase = (apbase & 0x7fff) << 25;
+
+ pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar);
+ apbar &= ~PCI_BASE_ADDRESS_MEM_MASK;
+ apbar |= apbase;
+ pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar);
+
+ aplimit = apbase + (size * 1024 * 1024) - 1;
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase);
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit);
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
+ pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
+
+ ret = 0;
+put:
+ pci_dev_put(dev1);
+
+ return ret;
+}
+
+static int __devinit agp_amd64_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+ int err;
+
+ /* The Highlander principle */
+ if (agp_bridges_found)
+ return -ENODEV;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ /* Could check for AGPv3 here */
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ pdev->device == PCI_DEVICE_ID_AMD_8151_0) {
+ amd8151_init(pdev, bridge);
+ } else {
+ dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
+ }
+
+ bridge->driver = &amd_8151_driver;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
+
+ if (cache_nbs(pdev, cap_ptr) == -1) {
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
+ int ret = nforce3_agp_init(pdev);
+ if (ret) {
+ agp_put_bridge(bridge);
+ return ret;
+ }
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_AL) {
+ int ret = uli_agp_init(pdev);
+ if (ret) {
+ agp_put_bridge(bridge);
+ return ret;
+ }
+ }
+
+ pci_set_drvdata(pdev, bridge);
+ err = agp_add_bridge(bridge);
+ if (err < 0)
+ return err;
+
+ agp_bridges_found++;
+ return 0;
+}
+
+static void __devexit agp_amd64_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ release_mem_region(virt_to_phys(bridge->gatt_table_real),
+ amd64_aperture_sizes[bridge->aperture_size_idx].size);
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+
+ agp_bridges_found--;
+}
+
+#ifdef CONFIG_PM
+
+static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int agp_amd64_resume(struct pci_dev *pdev)
+{
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (pdev->vendor == PCI_VENDOR_ID_NVIDIA)
+ nforce3_agp_init(pdev);
+
+ return amd_8151_configure();
+}
+
+#endif /* CONFIG_PM */
+
+static struct pci_device_id agp_amd64_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_8151_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* ULi M1689 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AL,
+ .device = PCI_DEVICE_ID_AL_M1689,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8T800Pro */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_K8T800PRO_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8T800 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_8385_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8M800 / K8N800 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_8380_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8M890 / K8N890 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_VT3336,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8T890 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_3238_0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* VIA K8T800/K8M800/K8N800 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_838X_1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* NForce3 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NFORCE3,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NFORCE3S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* SIS 755 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_755,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* SIS 760 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_760,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ /* ALI/ULI M1695 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_AL,
+ .device = 0x1695,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
+
+static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
+ { PCI_DEVICE_CLASS(0, 0) },
+ { }
+};
+
+static struct pci_driver agp_amd64_pci_driver = {
+ .name = "agpgart-amd64",
+ .id_table = agp_amd64_pci_table,
+ .probe = agp_amd64_probe,
+ .remove = agp_amd64_remove,
+#ifdef CONFIG_PM
+ .suspend = agp_amd64_suspend,
+ .resume = agp_amd64_resume,
+#endif
+};
+
+
+/* Not static due to IOMMU code calling it early. */
+int __init agp_amd64_init(void)
+{
+ int err = 0;
+
+ if (agp_off)
+ return -EINVAL;
+
+ err = pci_register_driver(&agp_amd64_pci_driver);
+ if (err < 0)
+ return err;
+
+ if (agp_bridges_found == 0) {
+ if (!agp_try_unsupported && !agp_try_unsupported_boot) {
+ printk(KERN_INFO PFX "No supported AGP bridge found.\n");
+#ifdef MODULE
+ printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n");
+#else
+ printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
+#endif
+ pci_unregister_driver(&agp_amd64_pci_driver);
+ return -ENODEV;
+ }
+
+ /* First check that we have at least one AMD64 NB */
+ if (!pci_dev_present(amd_nb_misc_ids)) {
+ pci_unregister_driver(&agp_amd64_pci_driver);
+ return -ENODEV;
+ }
+
+ /* Look for any AGP bridge */
+ agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
+ err = driver_attach(&agp_amd64_pci_driver.driver);
+ if (err == 0 && agp_bridges_found == 0) {
+ pci_unregister_driver(&agp_amd64_pci_driver);
+ err = -ENODEV;
+ }
+ }
+ return err;
+}
+
+static int __init agp_amd64_mod_init(void)
+{
+#ifndef MODULE
+ if (gart_iommu_aperture)
+ return agp_bridges_found ? 0 : -ENODEV;
+#endif
+ return agp_amd64_init();
+}
+
+static void __exit agp_amd64_cleanup(void)
+{
+#ifndef MODULE
+ if (gart_iommu_aperture)
+ return;
+#endif
+ if (aperture_resource)
+ release_resource(aperture_resource);
+ pci_unregister_driver(&agp_amd64_pci_driver);
+}
+
+module_init(agp_amd64_mod_init);
+module_exit(agp_amd64_cleanup);
+
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
+module_param(agp_try_unsupported, bool, 0);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
new file mode 100644
index 00000000..dc30e224
--- /dev/null
+++ b/drivers/char/agp/ati-agp.c
@@ -0,0 +1,586 @@
+/*
+ * ATi AGPGART routines.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/agp_backend.h>
+#include <asm/agp.h>
+#include "agp.h"
+
+#define ATI_GART_MMBASE_ADDR 0x14
+#define ATI_RS100_APSIZE 0xac
+#define ATI_RS100_IG_AGPMODE 0xb0
+#define ATI_RS300_APSIZE 0xf8
+#define ATI_RS300_IG_AGPMODE 0xfc
+#define ATI_GART_FEATURE_ID 0x00
+#define ATI_GART_BASE 0x04
+#define ATI_GART_CACHE_SZBASE 0x08
+#define ATI_GART_CACHE_CNTRL 0x0c
+#define ATI_GART_CACHE_ENTRY_CNTRL 0x10
+
+
+static const struct aper_size_info_lvl2 ati_generic_sizes[7] =
+{
+ {2048, 524288, 0x0000000c},
+ {1024, 262144, 0x0000000a},
+ {512, 131072, 0x00000008},
+ {256, 65536, 0x00000006},
+ {128, 32768, 0x00000004},
+ {64, 16384, 0x00000002},
+ {32, 8192, 0x00000000}
+};
+
+static struct gatt_mask ati_generic_masks[] =
+{
+ { .mask = 1, .type = 0}
+};
+
+
+struct ati_page_map {
+ unsigned long *real;
+ unsigned long __iomem *remapped;
+};
+
+static struct _ati_generic_private {
+ volatile u8 __iomem *registers;
+ struct ati_page_map **gatt_pages;
+ int num_tables;
+} ati_generic_private;
+
+static int ati_create_page_map(struct ati_page_map *page_map)
+{
+ int i, err = 0;
+
+ page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+ if (page_map->real == NULL)
+ return -ENOMEM;
+
+ set_memory_uc((unsigned long)page_map->real, 1);
+ err = map_page_into_agp(virt_to_page(page_map->real));
+ page_map->remapped = page_map->real;
+
+ for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
+ writel(agp_bridge->scratch_page, page_map->remapped+i);
+ readl(page_map->remapped+i); /* PCI Posting. */
+ }
+
+ return 0;
+}
+
+
+static void ati_free_page_map(struct ati_page_map *page_map)
+{
+ unmap_page_from_agp(virt_to_page(page_map->real));
+ set_memory_wb((unsigned long)page_map->real, 1);
+ free_page((unsigned long) page_map->real);
+}
+
+
+static void ati_free_gatt_pages(void)
+{
+ int i;
+ struct ati_page_map **tables;
+ struct ati_page_map *entry;
+
+ tables = ati_generic_private.gatt_pages;
+ for (i = 0; i < ati_generic_private.num_tables; i++) {
+ entry = tables[i];
+ if (entry != NULL) {
+ if (entry->real != NULL)
+ ati_free_page_map(entry);
+ kfree(entry);
+ }
+ }
+ kfree(tables);
+}
+
+
+static int ati_create_gatt_pages(int nr_tables)
+{
+ struct ati_page_map **tables;
+ struct ati_page_map *entry;
+ int retval = 0;
+ int i;
+
+ tables = kzalloc((nr_tables + 1) * sizeof(struct ati_page_map *),GFP_KERNEL);
+ if (tables == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_tables; i++) {
+ entry = kzalloc(sizeof(struct ati_page_map), GFP_KERNEL);
+ tables[i] = entry;
+ if (entry == NULL) {
+ retval = -ENOMEM;
+ break;
+ }
+ retval = ati_create_page_map(entry);
+ if (retval != 0)
+ break;
+ }
+ ati_generic_private.num_tables = i;
+ ati_generic_private.gatt_pages = tables;
+
+ if (retval != 0)
+ ati_free_gatt_pages();
+
+ return retval;
+}
+
+static int is_r200(void)
+{
+ if ((agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS100) ||
+ (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200) ||
+ (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200_B) ||
+ (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS250))
+ return 1;
+ return 0;
+}
+
+static int ati_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ struct aper_size_info_lvl2 *values;
+
+ if (is_r200())
+ pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+ else
+ pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+
+ temp = (temp & 0x0000000e);
+ values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void ati_tlbflush(struct agp_memory * mem)
+{
+ writel(1, ati_generic_private.registers+ATI_GART_CACHE_CNTRL);
+ readl(ati_generic_private.registers+ATI_GART_CACHE_CNTRL); /* PCI Posting. */
+}
+
+static void ati_cleanup(void)
+{
+ struct aper_size_info_lvl2 *previous_size;
+ u32 temp;
+
+ previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
+
+ /* Write back the previous size and disable gart translation */
+ if (is_r200()) {
+ pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+ temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+ pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
+ } else {
+ pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+ temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+ pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
+ }
+ iounmap((volatile u8 __iomem *)ati_generic_private.registers);
+}
+
+
+static int ati_configure(void)
+{
+ u32 temp;
+
+ /* Get the memory mapped registers */
+ pci_read_config_dword(agp_bridge->dev, ATI_GART_MMBASE_ADDR, &temp);
+ temp = (temp & 0xfffff000);
+ ati_generic_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
+
+ if (!ati_generic_private.registers)
+ return -ENOMEM;
+
+ if (is_r200())
+ pci_write_config_dword(agp_bridge->dev, ATI_RS100_IG_AGPMODE, 0x20000);
+ else
+ pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000);
+
+ /* address to map too */
+ /*
+ pci_read_config_dword(agp_bridge.dev, AGP_APBASE, &temp);
+ agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr);
+ */
+ writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID);
+ readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/
+
+ /* SIGNALED_SYSTEM_ERROR @ NB_STATUS */
+ pci_read_config_dword(agp_bridge->dev, 4, &temp);
+ pci_write_config_dword(agp_bridge->dev, 4, temp | (1<<14));
+
+ /* Write out the address of the gatt table */
+ writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE);
+ readl(ati_generic_private.registers+ATI_GART_BASE); /* PCI Posting. */
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PM
+static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state)
+{
+ pci_save_state(dev);
+ pci_set_power_state(dev, 3);
+
+ return 0;
+}
+
+static int agp_ati_resume(struct pci_dev *dev)
+{
+ pci_set_power_state(dev, 0);
+ pci_restore_state(dev);
+
+ return ati_configure();
+}
+#endif
+
+/*
+ *Since we don't need contiguous memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#undef GET_GATT
+#define GET_GATT(addr) (ati_generic_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int ati_insert_memory(struct agp_memory * mem,
+ off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+ int mask_type;
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
+ if (mask_type != 0 || type != mem->type)
+ return -EINVAL;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+ while (j < (pg_start + mem->page_count)) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr))))
+ return -EBUSY;
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ /*CACHE_FLUSH(); */
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]),
+ mem->type),
+ cur_gatt+GET_GATT_OFF(addr));
+ }
+ readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int ati_remove_memory(struct agp_memory * mem, off_t pg_start,
+ int type)
+{
+ int i;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+ int mask_type;
+
+ mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
+ if (mask_type != 0 || type != mem->type)
+ return -EINVAL;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ }
+
+ readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int ati_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct aper_size_info_lvl2 *value;
+ struct ati_page_map page_dir;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+ int retval;
+ u32 temp;
+ int i;
+ struct aper_size_info_lvl2 *current_size;
+
+ value = A_SIZE_LVL2(agp_bridge->current_size);
+ retval = ati_create_page_map(&page_dir);
+ if (retval != 0)
+ return retval;
+
+ retval = ati_create_gatt_pages(value->num_entries / 1024);
+ if (retval != 0) {
+ ati_free_page_map(&page_dir);
+ return retval;
+ }
+
+ agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+ agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
+
+ /* Write out the size register */
+ current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+ if (is_r200()) {
+ pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+ temp = (((temp & ~(0x0000000e)) | current_size->size_value)
+ | 0x00000001);
+ pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
+ pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
+ } else {
+ pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+ temp = (((temp & ~(0x0000000e)) | current_size->size_value)
+ | 0x00000001);
+ pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
+ pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
+ }
+
+ /*
+ * Get the address for the gart region.
+ * This is a bus address even on the alpha, b/c its
+ * used to program the agp master not the cpu
+ */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->gart_bus_addr = addr;
+
+ /* Calculate the agp offset */
+ for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
+ writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1,
+ page_dir.remapped+GET_PAGE_DIR_OFF(addr));
+ readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
+ }
+
+ for (i = 0; i < value->num_entries; i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ }
+
+ return 0;
+}
+
+static int ati_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct ati_page_map page_dir;
+
+ page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
+ page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
+
+ ati_free_gatt_pages();
+ ati_free_page_map(&page_dir);
+ return 0;
+}
+
+static const struct agp_bridge_driver ati_generic_bridge = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = ati_generic_sizes,
+ .size_type = LVL2_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = ati_configure,
+ .fetch_size = ati_fetch_size,
+ .cleanup = ati_cleanup,
+ .tlb_flush = ati_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = ati_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = ati_create_gatt_table,
+ .free_gatt_table = ati_free_gatt_table,
+ .insert_memory = ati_insert_memory,
+ .remove_memory = ati_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+
+static struct agp_device_ids ati_agp_device_ids[] __devinitdata =
+{
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS100,
+ .chipset_name = "IGP320/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS200,
+ .chipset_name = "IGP330/340/345/350/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS200_B,
+ .chipset_name = "IGP345M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS250,
+ .chipset_name = "IGP7000/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS300_100,
+ .chipset_name = "IGP9100/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS300_133,
+ .chipset_name = "IGP9100/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS300_166,
+ .chipset_name = "IGP9100/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS300_200,
+ .chipset_name = "IGP9100/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS350_133,
+ .chipset_name = "IGP9000/M",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_ATI_RS350_200,
+ .chipset_name = "IGP9100/M",
+ },
+ { }, /* dummy final entry, always present */
+};
+
+static int __devinit agp_ati_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_device_ids *devs = ati_agp_device_ids;
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+ int j;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ /* probe for known chipsets */
+ for (j = 0; devs[j].chipset_name; j++) {
+ if (pdev->device == devs[j].device_id)
+ goto found;
+ }
+
+ dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n",
+ pdev->vendor, pdev->device);
+ return -ENODEV;
+
+found:
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ bridge->driver = &ati_generic_bridge;
+
+ dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_ati_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_ati_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_ATI,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_ati_pci_table);
+
+static struct pci_driver agp_ati_pci_driver = {
+ .name = "agpgart-ati",
+ .id_table = agp_ati_pci_table,
+ .probe = agp_ati_probe,
+ .remove = agp_ati_remove,
+#ifdef CONFIG_PM
+ .suspend = agp_ati_suspend,
+ .resume = agp_ati_resume,
+#endif
+};
+
+static int __init agp_ati_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_ati_pci_driver);
+}
+
+static void __exit agp_ati_cleanup(void)
+{
+ pci_unregister_driver(&agp_ati_pci_driver);
+}
+
+module_init(agp_ati_init);
+module_exit(agp_ati_cleanup);
+
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
new file mode 100644
index 00000000..f27d0d08
--- /dev/null
+++ b/drivers/char/agp/backend.c
@@ -0,0 +1,367 @@
+/*
+ * AGPGART driver backend routines.
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones.
+ * Copyright (C) 1999 Jeff Hartmann.
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO:
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/pm.h>
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#include "agp.h"
+
+/* Due to XFree86 brain-damage, we can't go to 1.0 until they
+ * fix some real stupidity. It's only by chance we can bump
+ * past 0.99 at all due to some boolean logic error. */
+#define AGPGART_VERSION_MAJOR 0
+#define AGPGART_VERSION_MINOR 103
+static const struct agp_version agp_current_version =
+{
+ .major = AGPGART_VERSION_MAJOR,
+ .minor = AGPGART_VERSION_MINOR,
+};
+
+struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *) =
+ &agp_generic_find_bridge;
+
+struct agp_bridge_data *agp_bridge;
+LIST_HEAD(agp_bridges);
+EXPORT_SYMBOL(agp_bridge);
+EXPORT_SYMBOL(agp_bridges);
+EXPORT_SYMBOL(agp_find_bridge);
+
+/**
+ * agp_backend_acquire - attempt to acquire an agp backend.
+ *
+ */
+struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge;
+
+ bridge = agp_find_bridge(pdev);
+
+ if (!bridge)
+ return NULL;
+
+ if (atomic_read(&bridge->agp_in_use))
+ return NULL;
+ atomic_inc(&bridge->agp_in_use);
+ return bridge;
+}
+EXPORT_SYMBOL(agp_backend_acquire);
+
+
+/**
+ * agp_backend_release - release the lock on the agp backend.
+ *
+ * The caller must insure that the graphics aperture translation table
+ * is read for use by another entity.
+ *
+ * (Ensure that all memory it bound is unbound.)
+ */
+void agp_backend_release(struct agp_bridge_data *bridge)
+{
+
+ if (bridge)
+ atomic_dec(&bridge->agp_in_use);
+}
+EXPORT_SYMBOL(agp_backend_release);
+
+
+static const struct { int mem, agp; } maxes_table[] = {
+ {0, 0},
+ {32, 4},
+ {64, 28},
+ {128, 96},
+ {256, 204},
+ {512, 440},
+ {1024, 942},
+ {2048, 1920},
+ {4096, 3932}
+};
+
+static int agp_find_max(void)
+{
+ long memory, index, result;
+
+#if PAGE_SHIFT < 20
+ memory = totalram_pages >> (20 - PAGE_SHIFT);
+#else
+ memory = totalram_pages << (PAGE_SHIFT - 20);
+#endif
+ index = 1;
+
+ while ((memory > maxes_table[index].mem) && (index < 8))
+ index++;
+
+ result = maxes_table[index - 1].agp +
+ ( (memory - maxes_table[index - 1].mem) *
+ (maxes_table[index].agp - maxes_table[index - 1].agp)) /
+ (maxes_table[index].mem - maxes_table[index - 1].mem);
+
+ result = result << (20 - PAGE_SHIFT);
+ return result;
+}
+
+
+static int agp_backend_initialize(struct agp_bridge_data *bridge)
+{
+ int size_value, rc, got_gatt=0, got_keylist=0;
+
+ bridge->max_memory_agp = agp_find_max();
+ bridge->version = &agp_current_version;
+
+ if (bridge->driver->needs_scratch_page) {
+ struct page *page = bridge->driver->agp_alloc_page(bridge);
+
+ if (!page) {
+ dev_err(&bridge->dev->dev,
+ "can't get memory for scratch page\n");
+ return -ENOMEM;
+ }
+
+ bridge->scratch_page_page = page;
+ bridge->scratch_page_dma = page_to_phys(page);
+
+ bridge->scratch_page = bridge->driver->mask_memory(bridge,
+ bridge->scratch_page_dma, 0);
+ }
+
+ size_value = bridge->driver->fetch_size();
+ if (size_value == 0) {
+ dev_err(&bridge->dev->dev, "can't determine aperture size\n");
+ rc = -EINVAL;
+ goto err_out;
+ }
+ if (bridge->driver->create_gatt_table(bridge)) {
+ dev_err(&bridge->dev->dev,
+ "can't get memory for graphics translation table\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ got_gatt = 1;
+
+ bridge->key_list = vmalloc(PAGE_SIZE * 4);
+ if (bridge->key_list == NULL) {
+ dev_err(&bridge->dev->dev,
+ "can't allocate memory for key lists\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ got_keylist = 1;
+
+ /* FIXME vmalloc'd memory not guaranteed contiguous */
+ memset(bridge->key_list, 0, PAGE_SIZE * 4);
+
+ if (bridge->driver->configure()) {
+ dev_err(&bridge->dev->dev, "error configuring host chipset\n");
+ rc = -EINVAL;
+ goto err_out;
+ }
+ INIT_LIST_HEAD(&bridge->mapped_list);
+ spin_lock_init(&bridge->mapped_lock);
+
+ return 0;
+
+err_out:
+ if (bridge->driver->needs_scratch_page) {
+ void *va = page_address(bridge->scratch_page_page);
+
+ bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
+ bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
+ }
+ if (got_gatt)
+ bridge->driver->free_gatt_table(bridge);
+ if (got_keylist) {
+ vfree(bridge->key_list);
+ bridge->key_list = NULL;
+ }
+ return rc;
+}
+
+/* cannot be __exit b/c as it could be called from __init code */
+static void agp_backend_cleanup(struct agp_bridge_data *bridge)
+{
+ if (bridge->driver->cleanup)
+ bridge->driver->cleanup();
+ if (bridge->driver->free_gatt_table)
+ bridge->driver->free_gatt_table(bridge);
+
+ vfree(bridge->key_list);
+ bridge->key_list = NULL;
+
+ if (bridge->driver->agp_destroy_page &&
+ bridge->driver->needs_scratch_page) {
+ void *va = page_address(bridge->scratch_page_page);
+
+ bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
+ bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
+ }
+}
+
+/* When we remove the global variable agp_bridge from all drivers
+ * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
+ */
+
+struct agp_bridge_data *agp_alloc_bridge(void)
+{
+ struct agp_bridge_data *bridge;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return NULL;
+
+ atomic_set(&bridge->agp_in_use, 0);
+ atomic_set(&bridge->current_memory_agp, 0);
+
+ if (list_empty(&agp_bridges))
+ agp_bridge = bridge;
+
+ return bridge;
+}
+EXPORT_SYMBOL(agp_alloc_bridge);
+
+
+void agp_put_bridge(struct agp_bridge_data *bridge)
+{
+ kfree(bridge);
+
+ if (list_empty(&agp_bridges))
+ agp_bridge = NULL;
+}
+EXPORT_SYMBOL(agp_put_bridge);
+
+
+int agp_add_bridge(struct agp_bridge_data *bridge)
+{
+ int error;
+
+ if (agp_off) {
+ error = -ENODEV;
+ goto err_put_bridge;
+ }
+
+ if (!bridge->dev) {
+ printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n");
+ error = -EINVAL;
+ goto err_put_bridge;
+ }
+
+ /* Grab reference on the chipset driver. */
+ if (!try_module_get(bridge->driver->owner)) {
+ dev_info(&bridge->dev->dev, "can't lock chipset driver\n");
+ error = -EINVAL;
+ goto err_put_bridge;
+ }
+
+ error = agp_backend_initialize(bridge);
+ if (error) {
+ dev_info(&bridge->dev->dev,
+ "agp_backend_initialize() failed\n");
+ goto err_out;
+ }
+
+ if (list_empty(&agp_bridges)) {
+ error = agp_frontend_initialize();
+ if (error) {
+ dev_info(&bridge->dev->dev,
+ "agp_frontend_initialize() failed\n");
+ goto frontend_err;
+ }
+
+ dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n",
+ bridge->driver->fetch_size(), bridge->gart_bus_addr);
+
+ }
+
+ list_add(&bridge->list, &agp_bridges);
+ return 0;
+
+frontend_err:
+ agp_backend_cleanup(bridge);
+err_out:
+ module_put(bridge->driver->owner);
+err_put_bridge:
+ agp_put_bridge(bridge);
+ return error;
+}
+EXPORT_SYMBOL_GPL(agp_add_bridge);
+
+
+void agp_remove_bridge(struct agp_bridge_data *bridge)
+{
+ agp_backend_cleanup(bridge);
+ list_del(&bridge->list);
+ if (list_empty(&agp_bridges))
+ agp_frontend_cleanup();
+ module_put(bridge->driver->owner);
+}
+EXPORT_SYMBOL_GPL(agp_remove_bridge);
+
+int agp_off;
+int agp_try_unsupported_boot;
+EXPORT_SYMBOL(agp_off);
+EXPORT_SYMBOL(agp_try_unsupported_boot);
+
+static int __init agp_init(void)
+{
+ if (!agp_off)
+ printk(KERN_INFO "Linux agpgart interface v%d.%d\n",
+ AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
+ return 0;
+}
+
+static void __exit agp_exit(void)
+{
+}
+
+#ifndef MODULE
+static __init int agp_setup(char *s)
+{
+ if (!strcmp(s,"off"))
+ agp_off = 1;
+ if (!strcmp(s,"try_unsupported"))
+ agp_try_unsupported_boot = 1;
+ return 1;
+}
+__setup("agp=", agp_setup);
+#endif
+
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_DESCRIPTION("AGP GART driver");
+MODULE_LICENSE("GPL and additional rights");
+MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
+
+module_init(agp_init);
+module_exit(agp_exit);
+
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
new file mode 100644
index 00000000..a48e05b3
--- /dev/null
+++ b/drivers/char/agp/compat_ioctl.c
@@ -0,0 +1,287 @@
+/*
+ * AGPGART driver frontend compatibility ioctls
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/agpgart.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include "agp.h"
+#include "compat_ioctl.h"
+
+static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_info32 userinfo;
+ struct agp_kern_info kerninfo;
+
+ agp_copy_info(agp_bridge, &kerninfo);
+
+ userinfo.version.major = kerninfo.version.major;
+ userinfo.version.minor = kerninfo.version.minor;
+ userinfo.bridge_id = kerninfo.device->vendor |
+ (kerninfo.device->device << 16);
+ userinfo.agp_mode = kerninfo.mode;
+ userinfo.aper_base = (compat_long_t)kerninfo.aper_base;
+ userinfo.aper_size = kerninfo.aper_size;
+ userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
+ userinfo.pg_used = kerninfo.current_memory;
+
+ if (copy_to_user(arg, &userinfo, sizeof(userinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_region32 ureserve;
+ struct agp_region kreserve;
+ struct agp_client *client;
+ struct agp_file_private *client_priv;
+
+ DBG("");
+ if (copy_from_user(&ureserve, arg, sizeof(ureserve)))
+ return -EFAULT;
+
+ if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32))
+ return -EFAULT;
+
+ kreserve.pid = ureserve.pid;
+ kreserve.seg_count = ureserve.seg_count;
+
+ client = agp_find_client_by_pid(kreserve.pid);
+
+ if (kreserve.seg_count == 0) {
+ /* remove a client */
+ client_priv = agp_find_private(kreserve.pid);
+
+ if (client_priv != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+ }
+ if (client == NULL) {
+ /* client is already removed */
+ return 0;
+ }
+ return agp_remove_client(kreserve.pid);
+ } else {
+ struct agp_segment32 *usegment;
+ struct agp_segment *ksegment;
+ int seg;
+
+ if (ureserve.seg_count >= 16384)
+ return -EINVAL;
+
+ usegment = kmalloc(sizeof(*usegment) * ureserve.seg_count, GFP_KERNEL);
+ if (!usegment)
+ return -ENOMEM;
+
+ ksegment = kmalloc(sizeof(*ksegment) * kreserve.seg_count, GFP_KERNEL);
+ if (!ksegment) {
+ kfree(usegment);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
+ sizeof(*usegment) * ureserve.seg_count)) {
+ kfree(usegment);
+ kfree(ksegment);
+ return -EFAULT;
+ }
+
+ for (seg = 0; seg < ureserve.seg_count; seg++) {
+ ksegment[seg].pg_start = usegment[seg].pg_start;
+ ksegment[seg].pg_count = usegment[seg].pg_count;
+ ksegment[seg].prot = usegment[seg].prot;
+ }
+
+ kfree(usegment);
+ kreserve.seg_list = ksegment;
+
+ if (client == NULL) {
+ /* Create the client and add the segment */
+ client = agp_create_client(kreserve.pid);
+
+ if (client == NULL) {
+ kfree(ksegment);
+ return -ENOMEM;
+ }
+ client_priv = agp_find_private(kreserve.pid);
+
+ if (client_priv != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+ }
+ }
+ return agp_create_segment(client, &kreserve);
+ }
+ /* Will never really happen */
+ return -EINVAL;
+}
+
+static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_memory *memory;
+ struct agp_allocate32 alloc;
+
+ DBG("");
+ if (copy_from_user(&alloc, arg, sizeof(alloc)))
+ return -EFAULT;
+
+ memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
+
+ if (memory == NULL)
+ return -ENOMEM;
+
+ alloc.key = memory->key;
+ alloc.physical = memory->physical;
+
+ if (copy_to_user(arg, &alloc, sizeof(alloc))) {
+ agp_free_memory_wrap(memory);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_bind32 bind_info;
+ struct agp_memory *memory;
+
+ DBG("");
+ if (copy_from_user(&bind_info, arg, sizeof(bind_info)))
+ return -EFAULT;
+
+ memory = agp_find_mem_by_key(bind_info.key);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ return agp_bind_memory(memory, bind_info.pg_start);
+}
+
+static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_memory *memory;
+ struct agp_unbind32 unbind;
+
+ DBG("");
+ if (copy_from_user(&unbind, arg, sizeof(unbind)))
+ return -EFAULT;
+
+ memory = agp_find_mem_by_key(unbind.key);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ return agp_unbind_memory(memory);
+}
+
+long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct agp_file_private *curr_priv = file->private_data;
+ int ret_val = -ENOTTY;
+
+ mutex_lock(&(agp_fe.agp_mutex));
+
+ if ((agp_fe.current_controller == NULL) &&
+ (cmd != AGPIOC_ACQUIRE32)) {
+ ret_val = -EINVAL;
+ goto ioctl_out;
+ }
+ if ((agp_fe.backend_acquired != true) &&
+ (cmd != AGPIOC_ACQUIRE32)) {
+ ret_val = -EBUSY;
+ goto ioctl_out;
+ }
+ if (cmd != AGPIOC_ACQUIRE32) {
+ if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
+ ret_val = -EPERM;
+ goto ioctl_out;
+ }
+ /* Use the original pid of the controller,
+ * in case it's threaded */
+
+ if (agp_fe.current_controller->pid != curr_priv->my_pid) {
+ ret_val = -EBUSY;
+ goto ioctl_out;
+ }
+ }
+
+ switch (cmd) {
+ case AGPIOC_INFO32:
+ ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_ACQUIRE32:
+ ret_val = agpioc_acquire_wrap(curr_priv);
+ break;
+
+ case AGPIOC_RELEASE32:
+ ret_val = agpioc_release_wrap(curr_priv);
+ break;
+
+ case AGPIOC_SETUP32:
+ ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_RESERVE32:
+ ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_PROTECT32:
+ ret_val = agpioc_protect_wrap(curr_priv);
+ break;
+
+ case AGPIOC_ALLOCATE32:
+ ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_DEALLOCATE32:
+ ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
+ break;
+
+ case AGPIOC_BIND32:
+ ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_UNBIND32:
+ ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_CHIPSET_FLUSH32:
+ break;
+ }
+
+ioctl_out:
+ DBG("ioctl returns %d\n", ret_val);
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return ret_val;
+}
+
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
new file mode 100644
index 00000000..f30e0fd9
--- /dev/null
+++ b/drivers/char/agp/compat_ioctl.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AGP_COMPAT_IOCTL_H
+#define _AGP_COMPAT_IOCTL_H
+
+#include <linux/compat.h>
+#include <linux/agpgart.h>
+
+#define AGPIOC_INFO32 _IOR (AGPIOC_BASE, 0, compat_uptr_t)
+#define AGPIOC_ACQUIRE32 _IO (AGPIOC_BASE, 1)
+#define AGPIOC_RELEASE32 _IO (AGPIOC_BASE, 2)
+#define AGPIOC_SETUP32 _IOW (AGPIOC_BASE, 3, compat_uptr_t)
+#define AGPIOC_RESERVE32 _IOW (AGPIOC_BASE, 4, compat_uptr_t)
+#define AGPIOC_PROTECT32 _IOW (AGPIOC_BASE, 5, compat_uptr_t)
+#define AGPIOC_ALLOCATE32 _IOWR(AGPIOC_BASE, 6, compat_uptr_t)
+#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t)
+#define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t)
+#define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t)
+#define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10)
+
+struct agp_info32 {
+ struct agp_version version; /* version of the driver */
+ u32 bridge_id; /* bridge vendor/device */
+ u32 agp_mode; /* mode info of bridge */
+ compat_long_t aper_base; /* base of aperture */
+ compat_size_t aper_size; /* size of aperture */
+ compat_size_t pg_total; /* max pages (swap + system) */
+ compat_size_t pg_system; /* max pages (system) */
+ compat_size_t pg_used; /* current pages used */
+};
+
+/*
+ * The "prot" down below needs still a "sleep" flag somehow ...
+ */
+struct agp_segment32 {
+ compat_off_t pg_start; /* starting page to populate */
+ compat_size_t pg_count; /* number of pages */
+ compat_int_t prot; /* prot flags for mmap */
+};
+
+struct agp_region32 {
+ compat_pid_t pid; /* pid of process */
+ compat_size_t seg_count; /* number of segments */
+ struct agp_segment32 *seg_list;
+};
+
+struct agp_allocate32 {
+ compat_int_t key; /* tag of allocation */
+ compat_size_t pg_count; /* number of pages */
+ u32 type; /* 0 == normal, other devspec */
+ u32 physical; /* device specific (some devices
+ * need a phys address of the
+ * actual page behind the gatt
+ * table) */
+};
+
+struct agp_bind32 {
+ compat_int_t key; /* tag of allocation */
+ compat_off_t pg_start; /* starting page to populate */
+};
+
+struct agp_unbind32 {
+ compat_int_t key; /* tag of allocation */
+ u32 priority; /* priority for paging out */
+};
+
+extern struct agp_front_data agp_fe;
+
+int agpioc_acquire_wrap(struct agp_file_private *priv);
+int agpioc_release_wrap(struct agp_file_private *priv);
+int agpioc_protect_wrap(struct agp_file_private *priv);
+int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg);
+int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg);
+struct agp_file_private *agp_find_private(pid_t pid);
+struct agp_client *agp_create_client(pid_t id);
+int agp_remove_client(pid_t id);
+int agp_create_segment(struct agp_client *client, struct agp_region *region);
+void agp_free_memory_wrap(struct agp_memory *memory);
+struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
+struct agp_memory *agp_find_mem_by_key(int key);
+struct agp_client *agp_find_client_by_pid(pid_t id);
+
+#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
new file mode 100644
index 00000000..d607f53d
--- /dev/null
+++ b/drivers/char/agp/efficeon-agp.c
@@ -0,0 +1,479 @@
+/*
+ * Transmeta's Efficeon AGPGART driver.
+ *
+ * Based upon a diff by Linus around November '02.
+ *
+ * Ported to the 2.6 kernel by Carlos Puchol <cpglinux@puchol.com>
+ * and H. Peter Anvin <hpa@transmeta.com>.
+ */
+
+/*
+ * NOTE-cpg-040217:
+ *
+ * - when compiled as a module, after loading the module,
+ * it will refuse to unload, indicating it is in use,
+ * when it is not.
+ * - no s3 (suspend to ram) testing.
+ * - tested on the efficeon integrated nothbridge for tens
+ * of iterations of starting x and glxgears.
+ * - tested with radeon 9000 and radeon mobility m9 cards
+ * - tested with c3/c4 enabled (with the mobility m9 card)
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/gfp.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+#include "agp.h"
+#include "intel-agp.h"
+
+/*
+ * The real differences to the generic AGP code is
+ * in the GART mappings - a two-level setup with the
+ * first level being an on-chip 64-entry table.
+ *
+ * The page array is filled through the ATTPAGE register
+ * (Aperture Translation Table Page Register) at 0xB8. Bits:
+ * 31:20: physical page address
+ * 11:9: Page Attribute Table Index (PATI)
+ * must match the PAT index for the
+ * mapped pages (the 2nd level page table pages
+ * themselves should be just regular WB-cacheable,
+ * so this is normally zero.)
+ * 8: Present
+ * 7:6: reserved, write as zero
+ * 5:0: GATT directory index: which 1st-level entry
+ *
+ * The Efficeon AGP spec requires pages to be WB-cacheable
+ * but to be explicitly CLFLUSH'd after any changes.
+ */
+#define EFFICEON_ATTPAGE 0xb8
+#define EFFICEON_L1_SIZE 64 /* Number of PDE pages */
+
+#define EFFICEON_PATI (0 << 9)
+#define EFFICEON_PRESENT (1 << 8)
+
+static struct _efficeon_private {
+ unsigned long l1_table[EFFICEON_L1_SIZE];
+} efficeon_private;
+
+static const struct gatt_mask efficeon_generic_masks[] =
+{
+ {.mask = 0x00000001, .type = 0}
+};
+
+/* This function does the same thing as mask_memory() for this chipset... */
+static inline unsigned long efficeon_mask_memory(struct page *page)
+{
+ unsigned long addr = page_to_phys(page);
+ return addr | 0x00000001;
+}
+
+static const struct aper_size_info_lvl2 efficeon_generic_sizes[4] =
+{
+ {256, 65536, 0},
+ {128, 32768, 32},
+ {64, 16384, 48},
+ {32, 8192, 56}
+};
+
+/*
+ * Control interfaces are largely identical to
+ * the legacy Intel 440BX..
+ */
+
+static int efficeon_fetch_size(void)
+{
+ int i;
+ u16 temp;
+ struct aper_size_info_lvl2 *values;
+
+ pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
+ values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void efficeon_tlbflush(struct agp_memory * mem)
+{
+ printk(KERN_DEBUG PFX "efficeon_tlbflush()\n");
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+}
+
+static void efficeon_cleanup(void)
+{
+ u16 temp;
+ struct aper_size_info_lvl2 *previous_size;
+
+ printk(KERN_DEBUG PFX "efficeon_cleanup()\n");
+ previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
+ previous_size->size_value);
+}
+
+static int efficeon_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_lvl2 *current_size;
+
+ printk(KERN_DEBUG PFX "efficeon_configure()\n");
+
+ current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
+ current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+
+ /* paccfg/nbxcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
+ (temp2 & ~(1 << 10)) | (1 << 9) | (1 << 11));
+ /* clear any possible error conditions */
+ pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
+ return 0;
+}
+
+static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ int index, freed = 0;
+
+ for (index = 0; index < EFFICEON_L1_SIZE; index++) {
+ unsigned long page = efficeon_private.l1_table[index];
+ if (page) {
+ efficeon_private.l1_table[index] = 0;
+ ClearPageReserved(virt_to_page((char *)page));
+ free_page(page);
+ freed++;
+ }
+ printk(KERN_DEBUG PFX "efficeon_free_gatt_table(%p, %02x, %08x)\n",
+ agp_bridge->dev, EFFICEON_ATTPAGE, index);
+ pci_write_config_dword(agp_bridge->dev,
+ EFFICEON_ATTPAGE, index);
+ }
+ printk(KERN_DEBUG PFX "efficeon_free_gatt_table() freed %d pages\n", freed);
+ return 0;
+}
+
+
+/*
+ * Since we don't need contiguous memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#undef GET_GATT
+#define GET_GATT(addr) (efficeon_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int index;
+ const int pati = EFFICEON_PATI;
+ const int present = EFFICEON_PRESENT;
+ const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
+ int num_entries, l1_pages;
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ printk(KERN_DEBUG PFX "efficeon_create_gatt_table(%d)\n", num_entries);
+
+ /* There are 2^10 PTE pages per PDE page */
+ BUG_ON(num_entries & 0x3ff);
+ l1_pages = num_entries >> 10;
+
+ for (index = 0 ; index < l1_pages ; index++) {
+ int offset;
+ unsigned long page;
+ unsigned long value;
+
+ page = efficeon_private.l1_table[index];
+ BUG_ON(page);
+
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page) {
+ efficeon_free_gatt_table(agp_bridge);
+ return -ENOMEM;
+ }
+ SetPageReserved(virt_to_page((char *)page));
+
+ for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
+ clflush((char *)page+offset);
+
+ efficeon_private.l1_table[index] = page;
+
+ value = virt_to_phys((unsigned long *)page) | pati | present | index;
+
+ pci_write_config_dword(agp_bridge->dev,
+ EFFICEON_ATTPAGE, value);
+ }
+
+ return 0;
+}
+
+static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
+{
+ int i, count = mem->page_count, num_entries;
+ unsigned int *page, *last_page;
+ const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
+ const unsigned long clflush_mask = ~(clflush_chunk-1);
+
+ printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count);
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+ if (type != 0 || mem->type != 0)
+ return -EINVAL;
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ last_page = NULL;
+ for (i = 0; i < count; i++) {
+ int index = pg_start + i;
+ unsigned long insert = efficeon_mask_memory(mem->pages[i]);
+
+ page = (unsigned int *) efficeon_private.l1_table[index >> 10];
+
+ if (!page)
+ continue;
+
+ page += (index & 0x3ff);
+ *page = insert;
+
+ /* clflush is slow, so don't clflush until we have to */
+ if (last_page &&
+ (((unsigned long)page^(unsigned long)last_page) &
+ clflush_mask))
+ clflush(last_page);
+
+ last_page = page;
+ }
+
+ if ( last_page )
+ clflush(last_page);
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type)
+{
+ int i, count = mem->page_count, num_entries;
+
+ printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count);
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+ if (type != 0 || mem->type != 0)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ int index = pg_start + i;
+ unsigned int *page = (unsigned int *) efficeon_private.l1_table[index >> 10];
+
+ if (!page)
+ continue;
+ page += (index & 0x3ff);
+ *page = 0;
+ }
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+
+static const struct agp_bridge_driver efficeon_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = efficeon_generic_sizes,
+ .size_type = LVL2_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .configure = efficeon_configure,
+ .fetch_size = efficeon_fetch_size,
+ .cleanup = efficeon_cleanup,
+ .tlb_flush = efficeon_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = efficeon_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+
+ // Efficeon-specific GATT table setup / populate / teardown
+ .create_gatt_table = efficeon_create_gatt_table,
+ .free_gatt_table = efficeon_free_gatt_table,
+ .insert_memory = efficeon_insert_memory,
+ .remove_memory = efficeon_remove_memory,
+ .cant_use_aperture = false, // true might be faster?
+
+ // Generic
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+ struct resource *r;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ /* Probe for Efficeon controller */
+ if (pdev->device != PCI_DEVICE_ID_EFFICEON) {
+ printk(KERN_ERR PFX "Unsupported Efficeon chipset (device id: %04x)\n",
+ pdev->device);
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO PFX "Detected Transmeta Efficeon TM8000 series chipset\n");
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &efficeon_driver;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ /*
+ * If the device has not been properly setup, the following will catch
+ * the problem and should stop the system from crashing.
+ * 20030610 - hamish@zot.org
+ */
+ if (pci_enable_device(pdev)) {
+ printk(KERN_ERR PFX "Unable to Enable PCI device\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
+ /*
+ * The following fixes the case where the BIOS has "forgotten" to
+ * provide an address range for the GART.
+ * 20030610 - hamish@zot.org
+ */
+ r = &pdev->resource[0];
+ if (!r->start && r->end) {
+ if (pci_assign_resource(pdev, 0)) {
+ printk(KERN_ERR PFX "could not assign resource 0\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+ }
+
+ /* Fill in the mode register */
+ if (cap_ptr) {
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+ }
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_efficeon_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+#ifdef CONFIG_PM
+static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state)
+{
+ return 0;
+}
+
+static int agp_efficeon_resume(struct pci_dev *pdev)
+{
+ printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
+ return efficeon_configure();
+}
+#endif
+
+static struct pci_device_id agp_efficeon_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_TRANSMETA,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table);
+
+static struct pci_driver agp_efficeon_pci_driver = {
+ .name = "agpgart-efficeon",
+ .id_table = agp_efficeon_pci_table,
+ .probe = agp_efficeon_probe,
+ .remove = agp_efficeon_remove,
+#ifdef CONFIG_PM
+ .suspend = agp_efficeon_suspend,
+ .resume = agp_efficeon_resume,
+#endif
+};
+
+static int __init agp_efficeon_init(void)
+{
+ static int agp_initialised=0;
+
+ if (agp_off)
+ return -EINVAL;
+
+ if (agp_initialised == 1)
+ return 0;
+ agp_initialised=1;
+
+ return pci_register_driver(&agp_efficeon_pci_driver);
+}
+
+static void __exit agp_efficeon_cleanup(void)
+{
+ pci_unregister_driver(&agp_efficeon_pci_driver);
+}
+
+module_init(agp_efficeon_init);
+module_exit(agp_efficeon_cleanup);
+
+MODULE_AUTHOR("Carlos Puchol <cpglinux@puchol.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
new file mode 100644
index 00000000..2e044338
--- /dev/null
+++ b/drivers/char/agp/frontend.c
@@ -0,0 +1,1081 @@
+/*
+ * AGPGART driver frontend
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include "agp.h"
+
+struct agp_front_data agp_fe;
+
+struct agp_memory *agp_find_mem_by_key(int key)
+{
+ struct agp_memory *curr;
+
+ if (agp_fe.current_controller == NULL)
+ return NULL;
+
+ curr = agp_fe.current_controller->pool;
+
+ while (curr != NULL) {
+ if (curr->key == key)
+ break;
+ curr = curr->next;
+ }
+
+ DBG("key=%d -> mem=%p", key, curr);
+ return curr;
+}
+
+static void agp_remove_from_pool(struct agp_memory *temp)
+{
+ struct agp_memory *prev;
+ struct agp_memory *next;
+
+ /* Check to see if this is even in the memory pool */
+
+ DBG("mem=%p", temp);
+ if (agp_find_mem_by_key(temp->key) != NULL) {
+ next = temp->next;
+ prev = temp->prev;
+
+ if (prev != NULL) {
+ prev->next = next;
+ if (next != NULL)
+ next->prev = prev;
+
+ } else {
+ /* This is the first item on the list */
+ if (next != NULL)
+ next->prev = NULL;
+
+ agp_fe.current_controller->pool = next;
+ }
+ }
+}
+
+/*
+ * Routines for managing each client's segment list -
+ * These routines handle adding and removing segments
+ * to each auth'ed client.
+ */
+
+static struct
+agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
+ unsigned long offset,
+ int size, pgprot_t page_prot)
+{
+ struct agp_segment_priv *seg;
+ int num_segments, i;
+ off_t pg_start;
+ size_t pg_count;
+
+ pg_start = offset / 4096;
+ pg_count = size / 4096;
+ seg = *(client->segments);
+ num_segments = client->num_segments;
+
+ for (i = 0; i < client->num_segments; i++) {
+ if ((seg[i].pg_start == pg_start) &&
+ (seg[i].pg_count == pg_count) &&
+ (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
+ return seg + i;
+ }
+ }
+
+ return NULL;
+}
+
+static void agp_remove_seg_from_client(struct agp_client *client)
+{
+ DBG("client=%p", client);
+
+ if (client->segments != NULL) {
+ if (*(client->segments) != NULL) {
+ DBG("Freeing %p from client %p", *(client->segments), client);
+ kfree(*(client->segments));
+ }
+ DBG("Freeing %p from client %p", client->segments, client);
+ kfree(client->segments);
+ client->segments = NULL;
+ }
+}
+
+static void agp_add_seg_to_client(struct agp_client *client,
+ struct agp_segment_priv ** seg, int num_segments)
+{
+ struct agp_segment_priv **prev_seg;
+
+ prev_seg = client->segments;
+
+ if (prev_seg != NULL)
+ agp_remove_seg_from_client(client);
+
+ DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client);
+ client->num_segments = num_segments;
+ client->segments = seg;
+}
+
+static pgprot_t agp_convert_mmap_flags(int prot)
+{
+ unsigned long prot_bits;
+
+ prot_bits = calc_vm_prot_bits(prot) | VM_SHARED;
+ return vm_get_page_prot(prot_bits);
+}
+
+int agp_create_segment(struct agp_client *client, struct agp_region *region)
+{
+ struct agp_segment_priv **ret_seg;
+ struct agp_segment_priv *seg;
+ struct agp_segment *user_seg;
+ size_t i;
+
+ seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL);
+ if (seg == NULL) {
+ kfree(region->seg_list);
+ region->seg_list = NULL;
+ return -ENOMEM;
+ }
+ user_seg = region->seg_list;
+
+ for (i = 0; i < region->seg_count; i++) {
+ seg[i].pg_start = user_seg[i].pg_start;
+ seg[i].pg_count = user_seg[i].pg_count;
+ seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
+ }
+ kfree(region->seg_list);
+ region->seg_list = NULL;
+
+ ret_seg = kmalloc(sizeof(void *), GFP_KERNEL);
+ if (ret_seg == NULL) {
+ kfree(seg);
+ return -ENOMEM;
+ }
+ *ret_seg = seg;
+ agp_add_seg_to_client(client, ret_seg, region->seg_count);
+ return 0;
+}
+
+/* End - Routines for managing each client's segment list */
+
+/* This function must only be called when current_controller != NULL */
+static void agp_insert_into_pool(struct agp_memory * temp)
+{
+ struct agp_memory *prev;
+
+ prev = agp_fe.current_controller->pool;
+
+ if (prev != NULL) {
+ prev->prev = temp;
+ temp->next = prev;
+ }
+ agp_fe.current_controller->pool = temp;
+}
+
+
+/* File private list routines */
+
+struct agp_file_private *agp_find_private(pid_t pid)
+{
+ struct agp_file_private *curr;
+
+ curr = agp_fe.file_priv_list;
+
+ while (curr != NULL) {
+ if (curr->my_pid == pid)
+ return curr;
+ curr = curr->next;
+ }
+
+ return NULL;
+}
+
+static void agp_insert_file_private(struct agp_file_private * priv)
+{
+ struct agp_file_private *prev;
+
+ prev = agp_fe.file_priv_list;
+
+ if (prev != NULL)
+ prev->prev = priv;
+ priv->next = prev;
+ agp_fe.file_priv_list = priv;
+}
+
+static void agp_remove_file_private(struct agp_file_private * priv)
+{
+ struct agp_file_private *next;
+ struct agp_file_private *prev;
+
+ next = priv->next;
+ prev = priv->prev;
+
+ if (prev != NULL) {
+ prev->next = next;
+
+ if (next != NULL)
+ next->prev = prev;
+
+ } else {
+ if (next != NULL)
+ next->prev = NULL;
+
+ agp_fe.file_priv_list = next;
+ }
+}
+
+/* End - File flag list routines */
+
+/*
+ * Wrappers for agp_free_memory & agp_allocate_memory
+ * These make sure that internal lists are kept updated.
+ */
+void agp_free_memory_wrap(struct agp_memory *memory)
+{
+ agp_remove_from_pool(memory);
+ agp_free_memory(memory);
+}
+
+struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
+{
+ struct agp_memory *memory;
+
+ memory = agp_allocate_memory(agp_bridge, pg_count, type);
+ if (memory == NULL)
+ return NULL;
+
+ agp_insert_into_pool(memory);
+ return memory;
+}
+
+/* Routines for managing the list of controllers -
+ * These routines manage the current controller, and the list of
+ * controllers
+ */
+
+static struct agp_controller *agp_find_controller_by_pid(pid_t id)
+{
+ struct agp_controller *controller;
+
+ controller = agp_fe.controllers;
+
+ while (controller != NULL) {
+ if (controller->pid == id)
+ return controller;
+ controller = controller->next;
+ }
+
+ return NULL;
+}
+
+static struct agp_controller *agp_create_controller(pid_t id)
+{
+ struct agp_controller *controller;
+
+ controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL);
+ if (controller == NULL)
+ return NULL;
+
+ controller->pid = id;
+ return controller;
+}
+
+static int agp_insert_controller(struct agp_controller *controller)
+{
+ struct agp_controller *prev_controller;
+
+ prev_controller = agp_fe.controllers;
+ controller->next = prev_controller;
+
+ if (prev_controller != NULL)
+ prev_controller->prev = controller;
+
+ agp_fe.controllers = controller;
+
+ return 0;
+}
+
+static void agp_remove_all_clients(struct agp_controller *controller)
+{
+ struct agp_client *client;
+ struct agp_client *temp;
+
+ client = controller->clients;
+
+ while (client) {
+ struct agp_file_private *priv;
+
+ temp = client;
+ agp_remove_seg_from_client(temp);
+ priv = agp_find_private(temp->pid);
+
+ if (priv != NULL) {
+ clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+ }
+ client = client->next;
+ kfree(temp);
+ }
+}
+
+static void agp_remove_all_memory(struct agp_controller *controller)
+{
+ struct agp_memory *memory;
+ struct agp_memory *temp;
+
+ memory = controller->pool;
+
+ while (memory) {
+ temp = memory;
+ memory = memory->next;
+ agp_free_memory_wrap(temp);
+ }
+}
+
+static int agp_remove_controller(struct agp_controller *controller)
+{
+ struct agp_controller *prev_controller;
+ struct agp_controller *next_controller;
+
+ prev_controller = controller->prev;
+ next_controller = controller->next;
+
+ if (prev_controller != NULL) {
+ prev_controller->next = next_controller;
+ if (next_controller != NULL)
+ next_controller->prev = prev_controller;
+
+ } else {
+ if (next_controller != NULL)
+ next_controller->prev = NULL;
+
+ agp_fe.controllers = next_controller;
+ }
+
+ agp_remove_all_memory(controller);
+ agp_remove_all_clients(controller);
+
+ if (agp_fe.current_controller == controller) {
+ agp_fe.current_controller = NULL;
+ agp_fe.backend_acquired = false;
+ agp_backend_release(agp_bridge);
+ }
+ kfree(controller);
+ return 0;
+}
+
+static void agp_controller_make_current(struct agp_controller *controller)
+{
+ struct agp_client *clients;
+
+ clients = controller->clients;
+
+ while (clients != NULL) {
+ struct agp_file_private *priv;
+
+ priv = agp_find_private(clients->pid);
+
+ if (priv != NULL) {
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+ }
+ clients = clients->next;
+ }
+
+ agp_fe.current_controller = controller;
+}
+
+static void agp_controller_release_current(struct agp_controller *controller,
+ struct agp_file_private *controller_priv)
+{
+ struct agp_client *clients;
+
+ clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
+ clients = controller->clients;
+
+ while (clients != NULL) {
+ struct agp_file_private *priv;
+
+ priv = agp_find_private(clients->pid);
+
+ if (priv != NULL)
+ clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
+
+ clients = clients->next;
+ }
+
+ agp_fe.current_controller = NULL;
+ agp_fe.used_by_controller = false;
+ agp_backend_release(agp_bridge);
+}
+
+/*
+ * Routines for managing client lists -
+ * These routines are for managing the list of auth'ed clients.
+ */
+
+static struct agp_client
+*agp_find_client_in_controller(struct agp_controller *controller, pid_t id)
+{
+ struct agp_client *client;
+
+ if (controller == NULL)
+ return NULL;
+
+ client = controller->clients;
+
+ while (client != NULL) {
+ if (client->pid == id)
+ return client;
+ client = client->next;
+ }
+
+ return NULL;
+}
+
+static struct agp_controller *agp_find_controller_for_client(pid_t id)
+{
+ struct agp_controller *controller;
+
+ controller = agp_fe.controllers;
+
+ while (controller != NULL) {
+ if ((agp_find_client_in_controller(controller, id)) != NULL)
+ return controller;
+ controller = controller->next;
+ }
+
+ return NULL;
+}
+
+struct agp_client *agp_find_client_by_pid(pid_t id)
+{
+ struct agp_client *temp;
+
+ if (agp_fe.current_controller == NULL)
+ return NULL;
+
+ temp = agp_find_client_in_controller(agp_fe.current_controller, id);
+ return temp;
+}
+
+static void agp_insert_client(struct agp_client *client)
+{
+ struct agp_client *prev_client;
+
+ prev_client = agp_fe.current_controller->clients;
+ client->next = prev_client;
+
+ if (prev_client != NULL)
+ prev_client->prev = client;
+
+ agp_fe.current_controller->clients = client;
+ agp_fe.current_controller->num_clients++;
+}
+
+struct agp_client *agp_create_client(pid_t id)
+{
+ struct agp_client *new_client;
+
+ new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL);
+ if (new_client == NULL)
+ return NULL;
+
+ new_client->pid = id;
+ agp_insert_client(new_client);
+ return new_client;
+}
+
+int agp_remove_client(pid_t id)
+{
+ struct agp_client *client;
+ struct agp_client *prev_client;
+ struct agp_client *next_client;
+ struct agp_controller *controller;
+
+ controller = agp_find_controller_for_client(id);
+ if (controller == NULL)
+ return -EINVAL;
+
+ client = agp_find_client_in_controller(controller, id);
+ if (client == NULL)
+ return -EINVAL;
+
+ prev_client = client->prev;
+ next_client = client->next;
+
+ if (prev_client != NULL) {
+ prev_client->next = next_client;
+ if (next_client != NULL)
+ next_client->prev = prev_client;
+
+ } else {
+ if (next_client != NULL)
+ next_client->prev = NULL;
+ controller->clients = next_client;
+ }
+
+ controller->num_clients--;
+ agp_remove_seg_from_client(client);
+ kfree(client);
+ return 0;
+}
+
+/* End - Routines for managing client lists */
+
+/* File Operations */
+
+static int agp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned int size, current_size;
+ unsigned long offset;
+ struct agp_client *client;
+ struct agp_file_private *priv = file->private_data;
+ struct agp_kern_info kerninfo;
+
+ mutex_lock(&(agp_fe.agp_mutex));
+
+ if (agp_fe.backend_acquired != true)
+ goto out_eperm;
+
+ if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
+ goto out_eperm;
+
+ agp_copy_info(agp_bridge, &kerninfo);
+ size = vma->vm_end - vma->vm_start;
+ current_size = kerninfo.aper_size;
+ current_size = current_size * 0x100000;
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ DBG("%lx:%lx", offset, offset+size);
+
+ if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
+ if ((size + offset) > current_size)
+ goto out_inval;
+
+ client = agp_find_client_by_pid(current->pid);
+
+ if (client == NULL)
+ goto out_eperm;
+
+ if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot))
+ goto out_inval;
+
+ DBG("client vm_ops=%p", kerninfo.vm_ops);
+ if (kerninfo.vm_ops) {
+ vma->vm_ops = kerninfo.vm_ops;
+ } else if (io_remap_pfn_range(vma, vma->vm_start,
+ (kerninfo.aper_base + offset) >> PAGE_SHIFT,
+ size, vma->vm_page_prot)) {
+ goto out_again;
+ }
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return 0;
+ }
+
+ if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
+ if (size != current_size)
+ goto out_inval;
+
+ DBG("controller vm_ops=%p", kerninfo.vm_ops);
+ if (kerninfo.vm_ops) {
+ vma->vm_ops = kerninfo.vm_ops;
+ } else if (io_remap_pfn_range(vma, vma->vm_start,
+ kerninfo.aper_base >> PAGE_SHIFT,
+ size, vma->vm_page_prot)) {
+ goto out_again;
+ }
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return 0;
+ }
+
+out_eperm:
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return -EPERM;
+
+out_inval:
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return -EINVAL;
+
+out_again:
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return -EAGAIN;
+}
+
+static int agp_release(struct inode *inode, struct file *file)
+{
+ struct agp_file_private *priv = file->private_data;
+
+ mutex_lock(&(agp_fe.agp_mutex));
+
+ DBG("priv=%p", priv);
+
+ if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
+ struct agp_controller *controller;
+
+ controller = agp_find_controller_by_pid(priv->my_pid);
+
+ if (controller != NULL) {
+ if (controller == agp_fe.current_controller)
+ agp_controller_release_current(controller, priv);
+ agp_remove_controller(controller);
+ controller = NULL;
+ }
+ }
+
+ if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags))
+ agp_remove_client(priv->my_pid);
+
+ agp_remove_file_private(priv);
+ kfree(priv);
+ file->private_data = NULL;
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return 0;
+}
+
+static int agp_open(struct inode *inode, struct file *file)
+{
+ int minor = iminor(inode);
+ struct agp_file_private *priv;
+ struct agp_client *client;
+
+ if (minor != AGPGART_MINOR)
+ return -ENXIO;
+
+ mutex_lock(&(agp_fe.agp_mutex));
+
+ priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL);
+ if (priv == NULL) {
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return -ENOMEM;
+ }
+
+ set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
+ priv->my_pid = current->pid;
+
+ if (capable(CAP_SYS_RAWIO))
+ /* Root priv, can be controller */
+ set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
+
+ client = agp_find_client_by_pid(current->pid);
+
+ if (client != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ }
+ file->private_data = (void *) priv;
+ agp_insert_file_private(priv);
+ DBG("private=%p, client=%p", priv, client);
+
+ mutex_unlock(&(agp_fe.agp_mutex));
+
+ return 0;
+}
+
+
+static ssize_t agp_read(struct file *file, char __user *buf,
+ size_t count, loff_t * ppos)
+{
+ return -EINVAL;
+}
+
+static ssize_t agp_write(struct file *file, const char __user *buf,
+ size_t count, loff_t * ppos)
+{
+ return -EINVAL;
+}
+
+static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_info userinfo;
+ struct agp_kern_info kerninfo;
+
+ agp_copy_info(agp_bridge, &kerninfo);
+
+ userinfo.version.major = kerninfo.version.major;
+ userinfo.version.minor = kerninfo.version.minor;
+ userinfo.bridge_id = kerninfo.device->vendor |
+ (kerninfo.device->device << 16);
+ userinfo.agp_mode = kerninfo.mode;
+ userinfo.aper_base = kerninfo.aper_base;
+ userinfo.aper_size = kerninfo.aper_size;
+ userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
+ userinfo.pg_used = kerninfo.current_memory;
+
+ if (copy_to_user(arg, &userinfo, sizeof(struct agp_info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int agpioc_acquire_wrap(struct agp_file_private *priv)
+{
+ struct agp_controller *controller;
+
+ DBG("");
+
+ if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags)))
+ return -EPERM;
+
+ if (agp_fe.current_controller != NULL)
+ return -EBUSY;
+
+ if (!agp_bridge)
+ return -ENODEV;
+
+ if (atomic_read(&agp_bridge->agp_in_use))
+ return -EBUSY;
+
+ atomic_inc(&agp_bridge->agp_in_use);
+
+ agp_fe.backend_acquired = true;
+
+ controller = agp_find_controller_by_pid(priv->my_pid);
+
+ if (controller != NULL) {
+ agp_controller_make_current(controller);
+ } else {
+ controller = agp_create_controller(priv->my_pid);
+
+ if (controller == NULL) {
+ agp_fe.backend_acquired = false;
+ agp_backend_release(agp_bridge);
+ return -ENOMEM;
+ }
+ agp_insert_controller(controller);
+ agp_controller_make_current(controller);
+ }
+
+ set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &priv->access_flags);
+ return 0;
+}
+
+int agpioc_release_wrap(struct agp_file_private *priv)
+{
+ DBG("");
+ agp_controller_release_current(agp_fe.current_controller, priv);
+ return 0;
+}
+
+int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_setup mode;
+
+ DBG("");
+ if (copy_from_user(&mode, arg, sizeof(struct agp_setup)))
+ return -EFAULT;
+
+ agp_enable(agp_bridge, mode.agp_mode);
+ return 0;
+}
+
+static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_region reserve;
+ struct agp_client *client;
+ struct agp_file_private *client_priv;
+
+ DBG("");
+ if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
+ return -EFAULT;
+
+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
+ return -EFAULT;
+
+ client = agp_find_client_by_pid(reserve.pid);
+
+ if (reserve.seg_count == 0) {
+ /* remove a client */
+ client_priv = agp_find_private(reserve.pid);
+
+ if (client_priv != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+ }
+ if (client == NULL) {
+ /* client is already removed */
+ return 0;
+ }
+ return agp_remove_client(reserve.pid);
+ } else {
+ struct agp_segment *segment;
+
+ if (reserve.seg_count >= 16384)
+ return -EINVAL;
+
+ segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count),
+ GFP_KERNEL);
+
+ if (segment == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(segment, (void __user *) reserve.seg_list,
+ sizeof(struct agp_segment) * reserve.seg_count)) {
+ kfree(segment);
+ return -EFAULT;
+ }
+ reserve.seg_list = segment;
+
+ if (client == NULL) {
+ /* Create the client and add the segment */
+ client = agp_create_client(reserve.pid);
+
+ if (client == NULL) {
+ kfree(segment);
+ return -ENOMEM;
+ }
+ client_priv = agp_find_private(reserve.pid);
+
+ if (client_priv != NULL) {
+ set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
+ set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
+ }
+ }
+ return agp_create_segment(client, &reserve);
+ }
+ /* Will never really happen */
+ return -EINVAL;
+}
+
+int agpioc_protect_wrap(struct agp_file_private *priv)
+{
+ DBG("");
+ /* This function is not currently implemented */
+ return -EINVAL;
+}
+
+static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_memory *memory;
+ struct agp_allocate alloc;
+
+ DBG("");
+ if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
+ return -EFAULT;
+
+ if (alloc.type >= AGP_USER_TYPES)
+ return -EINVAL;
+
+ memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
+
+ if (memory == NULL)
+ return -ENOMEM;
+
+ alloc.key = memory->key;
+ alloc.physical = memory->physical;
+
+ if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) {
+ agp_free_memory_wrap(memory);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
+{
+ struct agp_memory *memory;
+
+ DBG("");
+ memory = agp_find_mem_by_key(arg);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ agp_free_memory_wrap(memory);
+ return 0;
+}
+
+static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_bind bind_info;
+ struct agp_memory *memory;
+
+ DBG("");
+ if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind)))
+ return -EFAULT;
+
+ memory = agp_find_mem_by_key(bind_info.key);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ return agp_bind_memory(memory, bind_info.pg_start);
+}
+
+static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
+{
+ struct agp_memory *memory;
+ struct agp_unbind unbind;
+
+ DBG("");
+ if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind)))
+ return -EFAULT;
+
+ memory = agp_find_mem_by_key(unbind.key);
+
+ if (memory == NULL)
+ return -EINVAL;
+
+ return agp_unbind_memory(memory);
+}
+
+static long agp_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct agp_file_private *curr_priv = file->private_data;
+ int ret_val = -ENOTTY;
+
+ DBG("priv=%p, cmd=%x", curr_priv, cmd);
+ mutex_lock(&(agp_fe.agp_mutex));
+
+ if ((agp_fe.current_controller == NULL) &&
+ (cmd != AGPIOC_ACQUIRE)) {
+ ret_val = -EINVAL;
+ goto ioctl_out;
+ }
+ if ((agp_fe.backend_acquired != true) &&
+ (cmd != AGPIOC_ACQUIRE)) {
+ ret_val = -EBUSY;
+ goto ioctl_out;
+ }
+ if (cmd != AGPIOC_ACQUIRE) {
+ if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
+ ret_val = -EPERM;
+ goto ioctl_out;
+ }
+ /* Use the original pid of the controller,
+ * in case it's threaded */
+
+ if (agp_fe.current_controller->pid != curr_priv->my_pid) {
+ ret_val = -EBUSY;
+ goto ioctl_out;
+ }
+ }
+
+ switch (cmd) {
+ case AGPIOC_INFO:
+ ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_ACQUIRE:
+ ret_val = agpioc_acquire_wrap(curr_priv);
+ break;
+
+ case AGPIOC_RELEASE:
+ ret_val = agpioc_release_wrap(curr_priv);
+ break;
+
+ case AGPIOC_SETUP:
+ ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_RESERVE:
+ ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_PROTECT:
+ ret_val = agpioc_protect_wrap(curr_priv);
+ break;
+
+ case AGPIOC_ALLOCATE:
+ ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_DEALLOCATE:
+ ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
+ break;
+
+ case AGPIOC_BIND:
+ ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_UNBIND:
+ ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
+ break;
+
+ case AGPIOC_CHIPSET_FLUSH:
+ break;
+ }
+
+ioctl_out:
+ DBG("ioctl returns %d\n", ret_val);
+ mutex_unlock(&(agp_fe.agp_mutex));
+ return ret_val;
+}
+
+static const struct file_operations agp_fops =
+{
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = agp_read,
+ .write = agp_write,
+ .unlocked_ioctl = agp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_agp_ioctl,
+#endif
+ .mmap = agp_mmap,
+ .open = agp_open,
+ .release = agp_release,
+};
+
+static struct miscdevice agp_miscdev =
+{
+ .minor = AGPGART_MINOR,
+ .name = "agpgart",
+ .fops = &agp_fops
+};
+
+int agp_frontend_initialize(void)
+{
+ memset(&agp_fe, 0, sizeof(struct agp_front_data));
+ mutex_init(&(agp_fe.agp_mutex));
+
+ if (misc_register(&agp_miscdev)) {
+ printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR);
+ return -EIO;
+ }
+ return 0;
+}
+
+void agp_frontend_cleanup(void)
+{
+ misc_deregister(&agp_miscdev);
+}
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
new file mode 100644
index 00000000..b072648d
--- /dev/null
+++ b/drivers/char/agp/generic.c
@@ -0,0 +1,1438 @@
+/*
+ * AGPGART driver.
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2005 Dave Jones.
+ * Copyright (C) 1999 Jeff Hartmann.
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * TODO:
+ * - Allocate more than order 0 pages to avoid too much linear map splitting.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/pm.h>
+#include <linux/agp_backend.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include "agp.h"
+
+__u32 *agp_gatt_table;
+int agp_memory_reserved;
+
+/*
+ * Needed by the Nforce GART driver for the time being. Would be
+ * nice to do this some other way instead of needing this export.
+ */
+EXPORT_SYMBOL_GPL(agp_memory_reserved);
+
+/*
+ * Generic routines for handling agp_memory structures -
+ * They use the basic page allocation routines to do the brunt of the work.
+ */
+
+void agp_free_key(int key)
+{
+ if (key < 0)
+ return;
+
+ if (key < MAXKEY)
+ clear_bit(key, agp_bridge->key_list);
+}
+EXPORT_SYMBOL(agp_free_key);
+
+
+static int agp_get_key(void)
+{
+ int bit;
+
+ bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
+ if (bit < MAXKEY) {
+ set_bit(bit, agp_bridge->key_list);
+ return bit;
+ }
+ return -1;
+}
+
+/*
+ * Use kmalloc if possible for the page list. Otherwise fall back to
+ * vmalloc. This speeds things up and also saves memory for small AGP
+ * regions.
+ */
+
+void agp_alloc_page_array(size_t size, struct agp_memory *mem)
+{
+ mem->pages = NULL;
+
+ if (size <= 2*PAGE_SIZE)
+ mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (mem->pages == NULL) {
+ mem->pages = vmalloc(size);
+ }
+}
+EXPORT_SYMBOL(agp_alloc_page_array);
+
+void agp_free_page_array(struct agp_memory *mem)
+{
+ if (is_vmalloc_addr(mem->pages)) {
+ vfree(mem->pages);
+ } else {
+ kfree(mem->pages);
+ }
+}
+EXPORT_SYMBOL(agp_free_page_array);
+
+
+static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
+{
+ struct agp_memory *new;
+ unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
+
+ if (INT_MAX/sizeof(struct page *) < num_agp_pages)
+ return NULL;
+
+ new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
+ if (new == NULL)
+ return NULL;
+
+ new->key = agp_get_key();
+
+ if (new->key < 0) {
+ kfree(new);
+ return NULL;
+ }
+
+ agp_alloc_page_array(alloc_size, new);
+
+ if (new->pages == NULL) {
+ agp_free_key(new->key);
+ kfree(new);
+ return NULL;
+ }
+ new->num_scratch_pages = 0;
+ return new;
+}
+
+struct agp_memory *agp_create_memory(int scratch_pages)
+{
+ struct agp_memory *new;
+
+ new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
+ if (new == NULL)
+ return NULL;
+
+ new->key = agp_get_key();
+
+ if (new->key < 0) {
+ kfree(new);
+ return NULL;
+ }
+
+ agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
+
+ if (new->pages == NULL) {
+ agp_free_key(new->key);
+ kfree(new);
+ return NULL;
+ }
+ new->num_scratch_pages = scratch_pages;
+ new->type = AGP_NORMAL_MEMORY;
+ return new;
+}
+EXPORT_SYMBOL(agp_create_memory);
+
+/**
+ * agp_free_memory - free memory associated with an agp_memory pointer.
+ *
+ * @curr: agp_memory pointer to be freed.
+ *
+ * It is the only function that can be called when the backend is not owned
+ * by the caller. (So it can free memory on client death.)
+ */
+void agp_free_memory(struct agp_memory *curr)
+{
+ size_t i;
+
+ if (curr == NULL)
+ return;
+
+ if (curr->is_bound)
+ agp_unbind_memory(curr);
+
+ if (curr->type >= AGP_USER_TYPES) {
+ agp_generic_free_by_type(curr);
+ return;
+ }
+
+ if (curr->type != 0) {
+ curr->bridge->driver->free_by_type(curr);
+ return;
+ }
+ if (curr->page_count != 0) {
+ if (curr->bridge->driver->agp_destroy_pages) {
+ curr->bridge->driver->agp_destroy_pages(curr);
+ } else {
+
+ for (i = 0; i < curr->page_count; i++) {
+ curr->bridge->driver->agp_destroy_page(
+ curr->pages[i],
+ AGP_PAGE_DESTROY_UNMAP);
+ }
+ for (i = 0; i < curr->page_count; i++) {
+ curr->bridge->driver->agp_destroy_page(
+ curr->pages[i],
+ AGP_PAGE_DESTROY_FREE);
+ }
+ }
+ }
+ agp_free_key(curr->key);
+ agp_free_page_array(curr);
+ kfree(curr);
+}
+EXPORT_SYMBOL(agp_free_memory);
+
+#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
+
+/**
+ * agp_allocate_memory - allocate a group of pages of a certain type.
+ *
+ * @page_count: size_t argument of the number of pages
+ * @type: u32 argument of the type of memory to be allocated.
+ *
+ * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
+ * maps to physical ram. Any other type is device dependent.
+ *
+ * It returns NULL whenever memory is unavailable.
+ */
+struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
+ size_t page_count, u32 type)
+{
+ int scratch_pages;
+ struct agp_memory *new;
+ size_t i;
+ int cur_memory;
+
+ if (!bridge)
+ return NULL;
+
+ cur_memory = atomic_read(&bridge->current_memory_agp);
+ if ((cur_memory + page_count > bridge->max_memory_agp) ||
+ (cur_memory + page_count < page_count))
+ return NULL;
+
+ if (type >= AGP_USER_TYPES) {
+ new = agp_generic_alloc_user(page_count, type);
+ if (new)
+ new->bridge = bridge;
+ return new;
+ }
+
+ if (type != 0) {
+ new = bridge->driver->alloc_by_type(page_count, type);
+ if (new)
+ new->bridge = bridge;
+ return new;
+ }
+
+ scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+
+ new = agp_create_memory(scratch_pages);
+
+ if (new == NULL)
+ return NULL;
+
+ if (bridge->driver->agp_alloc_pages) {
+ if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
+ agp_free_memory(new);
+ return NULL;
+ }
+ new->bridge = bridge;
+ return new;
+ }
+
+ for (i = 0; i < page_count; i++) {
+ struct page *page = bridge->driver->agp_alloc_page(bridge);
+
+ if (page == NULL) {
+ agp_free_memory(new);
+ return NULL;
+ }
+ new->pages[i] = page;
+ new->page_count++;
+ }
+ new->bridge = bridge;
+
+ return new;
+}
+EXPORT_SYMBOL(agp_allocate_memory);
+
+
+/* End - Generic routines for handling agp_memory structures */
+
+
+static int agp_return_size(void)
+{
+ int current_size;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+
+ switch (agp_bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ current_size = A_SIZE_8(temp)->size;
+ break;
+ case U16_APER_SIZE:
+ current_size = A_SIZE_16(temp)->size;
+ break;
+ case U32_APER_SIZE:
+ current_size = A_SIZE_32(temp)->size;
+ break;
+ case LVL2_APER_SIZE:
+ current_size = A_SIZE_LVL2(temp)->size;
+ break;
+ case FIXED_APER_SIZE:
+ current_size = A_SIZE_FIX(temp)->size;
+ break;
+ default:
+ current_size = 0;
+ break;
+ }
+
+ current_size -= (agp_memory_reserved / (1024*1024));
+ if (current_size <0)
+ current_size = 0;
+ return current_size;
+}
+
+
+int agp_num_entries(void)
+{
+ int num_entries;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+
+ switch (agp_bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ num_entries = A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ case LVL2_APER_SIZE:
+ num_entries = A_SIZE_LVL2(temp)->num_entries;
+ break;
+ case FIXED_APER_SIZE:
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ break;
+ default:
+ num_entries = 0;
+ break;
+ }
+
+ num_entries -= agp_memory_reserved>>PAGE_SHIFT;
+ if (num_entries<0)
+ num_entries = 0;
+ return num_entries;
+}
+EXPORT_SYMBOL_GPL(agp_num_entries);
+
+
+/**
+ * agp_copy_info - copy bridge state information
+ *
+ * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
+ *
+ * This function copies information about the agp bridge device and the state of
+ * the agp backend into an agp_kern_info pointer.
+ */
+int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
+{
+ memset(info, 0, sizeof(struct agp_kern_info));
+ if (!bridge) {
+ info->chipset = NOT_SUPPORTED;
+ return -EIO;
+ }
+
+ info->version.major = bridge->version->major;
+ info->version.minor = bridge->version->minor;
+ info->chipset = SUPPORTED;
+ info->device = bridge->dev;
+ if (bridge->mode & AGPSTAT_MODE_3_0)
+ info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
+ else
+ info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
+ info->aper_base = bridge->gart_bus_addr;
+ info->aper_size = agp_return_size();
+ info->max_memory = bridge->max_memory_agp;
+ info->current_memory = atomic_read(&bridge->current_memory_agp);
+ info->cant_use_aperture = bridge->driver->cant_use_aperture;
+ info->vm_ops = bridge->vm_ops;
+ info->page_mask = ~0UL;
+ return 0;
+}
+EXPORT_SYMBOL(agp_copy_info);
+
+/* End - Routine to copy over information structure */
+
+/*
+ * Routines for handling swapping of agp_memory into the GATT -
+ * These routines take agp_memory and insert them into the GATT.
+ * They call device specific routines to actually write to the GATT.
+ */
+
+/**
+ * agp_bind_memory - Bind an agp_memory structure into the GATT.
+ *
+ * @curr: agp_memory pointer
+ * @pg_start: an offset into the graphics aperture translation table
+ *
+ * It returns -EINVAL if the pointer == NULL.
+ * It returns -EBUSY if the area of the table requested is already in use.
+ */
+int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
+{
+ int ret_val;
+
+ if (curr == NULL)
+ return -EINVAL;
+
+ if (curr->is_bound) {
+ printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
+ return -EINVAL;
+ }
+ if (!curr->is_flushed) {
+ curr->bridge->driver->cache_flush();
+ curr->is_flushed = true;
+ }
+
+ ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
+
+ if (ret_val != 0)
+ return ret_val;
+
+ curr->is_bound = true;
+ curr->pg_start = pg_start;
+ spin_lock(&agp_bridge->mapped_lock);
+ list_add(&curr->mapped_list, &agp_bridge->mapped_list);
+ spin_unlock(&agp_bridge->mapped_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(agp_bind_memory);
+
+
+/**
+ * agp_unbind_memory - Removes an agp_memory structure from the GATT
+ *
+ * @curr: agp_memory pointer to be removed from the GATT.
+ *
+ * It returns -EINVAL if this piece of agp_memory is not currently bound to
+ * the graphics aperture translation table or if the agp_memory pointer == NULL
+ */
+int agp_unbind_memory(struct agp_memory *curr)
+{
+ int ret_val;
+
+ if (curr == NULL)
+ return -EINVAL;
+
+ if (!curr->is_bound) {
+ printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
+ return -EINVAL;
+ }
+
+ ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
+
+ if (ret_val != 0)
+ return ret_val;
+
+ curr->is_bound = false;
+ curr->pg_start = 0;
+ spin_lock(&curr->bridge->mapped_lock);
+ list_del(&curr->mapped_list);
+ spin_unlock(&curr->bridge->mapped_lock);
+ return 0;
+}
+EXPORT_SYMBOL(agp_unbind_memory);
+
+
+/* End - Routines for handling swapping of agp_memory into the GATT */
+
+
+/* Generic Agp routines - Start */
+static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
+{
+ u32 tmp;
+
+ if (*requested_mode & AGP2_RESERVED_MASK) {
+ printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
+ *requested_mode & AGP2_RESERVED_MASK, *requested_mode);
+ *requested_mode &= ~AGP2_RESERVED_MASK;
+ }
+
+ /*
+ * Some dumb bridges are programmed to disobey the AGP2 spec.
+ * This is likely a BIOS misprogramming rather than poweron default, or
+ * it would be a lot more common.
+ * https://bugs.freedesktop.org/show_bug.cgi?id=8816
+ * AGPv2 spec 6.1.9 states:
+ * The RATE field indicates the data transfer rates supported by this
+ * device. A.G.P. devices must report all that apply.
+ * Fix them up as best we can.
+ */
+ switch (*bridge_agpstat & 7) {
+ case 4:
+ *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
+ printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
+ "Fixing up support for x2 & x1\n");
+ break;
+ case 2:
+ *bridge_agpstat |= AGPSTAT2_1X;
+ printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
+ "Fixing up support for x1\n");
+ break;
+ default:
+ break;
+ }
+
+ /* Check the speed bits make sense. Only one should be set. */
+ tmp = *requested_mode & 7;
+ switch (tmp) {
+ case 0:
+ printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
+ *requested_mode |= AGPSTAT2_1X;
+ break;
+ case 1:
+ case 2:
+ break;
+ case 3:
+ *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */
+ break;
+ case 4:
+ break;
+ case 5:
+ case 6:
+ case 7:
+ *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
+ break;
+ }
+
+ /* disable SBA if it's not supported */
+ if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
+ *bridge_agpstat &= ~AGPSTAT_SBA;
+
+ /* Set rate */
+ if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
+ *bridge_agpstat &= ~AGPSTAT2_4X;
+
+ if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
+ *bridge_agpstat &= ~AGPSTAT2_2X;
+
+ if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
+ *bridge_agpstat &= ~AGPSTAT2_1X;
+
+ /* Now we know what mode it should be, clear out the unwanted bits. */
+ if (*bridge_agpstat & AGPSTAT2_4X)
+ *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
+
+ if (*bridge_agpstat & AGPSTAT2_2X)
+ *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
+
+ if (*bridge_agpstat & AGPSTAT2_1X)
+ *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
+
+ /* Apply any errata. */
+ if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
+ *bridge_agpstat &= ~AGPSTAT_FW;
+
+ if (agp_bridge->flags & AGP_ERRATA_SBA)
+ *bridge_agpstat &= ~AGPSTAT_SBA;
+
+ if (agp_bridge->flags & AGP_ERRATA_1X) {
+ *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
+ *bridge_agpstat |= AGPSTAT2_1X;
+ }
+
+ /* If we've dropped down to 1X, disable fast writes. */
+ if (*bridge_agpstat & AGPSTAT2_1X)
+ *bridge_agpstat &= ~AGPSTAT_FW;
+}
+
+/*
+ * requested_mode = Mode requested by (typically) X.
+ * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
+ * vga_agpstat = PCI_AGP_STATUS from graphic card.
+ */
+static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
+{
+ u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
+ u32 tmp;
+
+ if (*requested_mode & AGP3_RESERVED_MASK) {
+ printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
+ *requested_mode & AGP3_RESERVED_MASK, *requested_mode);
+ *requested_mode &= ~AGP3_RESERVED_MASK;
+ }
+
+ /* Check the speed bits make sense. */
+ tmp = *requested_mode & 7;
+ if (tmp == 0) {
+ printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
+ *requested_mode |= AGPSTAT3_4X;
+ }
+ if (tmp >= 3) {
+ printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
+ *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
+ }
+
+ /* ARQSZ - Set the value to the maximum one.
+ * Don't allow the mode register to override values. */
+ *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
+ max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
+
+ /* Calibration cycle.
+ * Don't allow the mode register to override values. */
+ *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
+ min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
+
+ /* SBA *must* be supported for AGP v3 */
+ *bridge_agpstat |= AGPSTAT_SBA;
+
+ /*
+ * Set speed.
+ * Check for invalid speeds. This can happen when applications
+ * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
+ */
+ if (*requested_mode & AGPSTAT_MODE_3_0) {
+ /*
+ * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
+ * have been passed a 3.0 mode, but with 2.x speed bits set.
+ * AGP2.x 4x -> AGP3.0 4x.
+ */
+ if (*requested_mode & AGPSTAT2_4X) {
+ printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
+ current->comm, *requested_mode);
+ *requested_mode &= ~AGPSTAT2_4X;
+ *requested_mode |= AGPSTAT3_4X;
+ }
+ } else {
+ /*
+ * The caller doesn't know what they are doing. We are in 3.0 mode,
+ * but have been passed an AGP 2.x mode.
+ * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
+ */
+ printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
+ current->comm, *requested_mode);
+ *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
+ *requested_mode |= AGPSTAT3_4X;
+ }
+
+ if (*requested_mode & AGPSTAT3_8X) {
+ if (!(*bridge_agpstat & AGPSTAT3_8X)) {
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
+ return;
+ }
+ if (!(*vga_agpstat & AGPSTAT3_8X)) {
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
+ return;
+ }
+ /* All set, bridge & device can do AGP x8*/
+ *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+ goto done;
+
+ } else if (*requested_mode & AGPSTAT3_4X) {
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ goto done;
+
+ } else {
+
+ /*
+ * If we didn't specify an AGP mode, we see if both
+ * the graphics card, and the bridge can do x8, and use if so.
+ * If not, we fall back to x4 mode.
+ */
+ if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
+ printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
+ "supported by bridge & card (x8).\n");
+ *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+ *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+ } else {
+ printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
+ if (!(*bridge_agpstat & AGPSTAT3_8X)) {
+ printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
+ *bridge_agpstat, origbridge);
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ }
+ if (!(*vga_agpstat & AGPSTAT3_8X)) {
+ printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
+ *vga_agpstat, origvga);
+ *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *vga_agpstat |= AGPSTAT3_4X;
+ }
+ }
+ }
+
+done:
+ /* Apply any errata. */
+ if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
+ *bridge_agpstat &= ~AGPSTAT_FW;
+
+ if (agp_bridge->flags & AGP_ERRATA_SBA)
+ *bridge_agpstat &= ~AGPSTAT_SBA;
+
+ if (agp_bridge->flags & AGP_ERRATA_1X) {
+ *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
+ *bridge_agpstat |= AGPSTAT2_1X;
+ }
+}
+
+
+/**
+ * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
+ * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
+ * @requested_mode: requested agp_stat from userspace (Typically from X)
+ * @bridge_agpstat: current agp_stat from AGP bridge.
+ *
+ * This function will hunt for an AGP graphics card, and try to match
+ * the requested mode to the capabilities of both the bridge and the card.
+ */
+u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
+{
+ struct pci_dev *device = NULL;
+ u32 vga_agpstat;
+ u8 cap_ptr;
+
+ for (;;) {
+ device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
+ if (!device) {
+ printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
+ return 0;
+ }
+ cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (cap_ptr)
+ break;
+ }
+
+ /*
+ * Ok, here we have a AGP device. Disable impossible
+ * settings, and adjust the readqueue to the minimum.
+ */
+ pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
+
+ /* adjust RQ depth */
+ bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
+ min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
+ min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
+
+ /* disable FW if it's not supported */
+ if (!((bridge_agpstat & AGPSTAT_FW) &&
+ (vga_agpstat & AGPSTAT_FW) &&
+ (requested_mode & AGPSTAT_FW)))
+ bridge_agpstat &= ~AGPSTAT_FW;
+
+ /* Check to see if we are operating in 3.0 mode */
+ if (agp_bridge->mode & AGPSTAT_MODE_3_0)
+ agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
+ else
+ agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
+
+ pci_dev_put(device);
+ return bridge_agpstat;
+}
+EXPORT_SYMBOL(agp_collect_device_status);
+
+
+void agp_device_command(u32 bridge_agpstat, bool agp_v3)
+{
+ struct pci_dev *device = NULL;
+ int mode;
+
+ mode = bridge_agpstat & 0x7;
+ if (agp_v3)
+ mode *= 4;
+
+ for_each_pci_dev(device) {
+ u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (!agp)
+ continue;
+
+ dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
+ agp_v3 ? 3 : 2, mode);
+ pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
+ }
+}
+EXPORT_SYMBOL(agp_device_command);
+
+
+void get_agp_version(struct agp_bridge_data *bridge)
+{
+ u32 ncapid;
+
+ /* Exit early if already set by errata workarounds. */
+ if (bridge->major_version != 0)
+ return;
+
+ pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
+ bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
+ bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
+}
+EXPORT_SYMBOL(get_agp_version);
+
+
+void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
+{
+ u32 bridge_agpstat, temp;
+
+ get_agp_version(agp_bridge);
+
+ dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
+ agp_bridge->major_version, agp_bridge->minor_version);
+
+ pci_read_config_dword(agp_bridge->dev,
+ agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
+
+ bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
+ if (bridge_agpstat == 0)
+ /* Something bad happened. FIXME: Return error code? */
+ return;
+
+ bridge_agpstat |= AGPSTAT_AGP_ENABLE;
+
+ /* Do AGP version specific frobbing. */
+ if (bridge->major_version >= 3) {
+ if (bridge->mode & AGPSTAT_MODE_3_0) {
+ /* If we have 3.5, we can do the isoch stuff. */
+ if (bridge->minor_version >= 5)
+ agp_3_5_enable(bridge);
+ agp_device_command(bridge_agpstat, true);
+ return;
+ } else {
+ /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
+ bridge_agpstat &= ~(7<<10) ;
+ pci_read_config_dword(bridge->dev,
+ bridge->capndx+AGPCTRL, &temp);
+ temp |= (1<<9);
+ pci_write_config_dword(bridge->dev,
+ bridge->capndx+AGPCTRL, temp);
+
+ dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
+ }
+ }
+
+ /* AGP v<3 */
+ agp_device_command(bridge_agpstat, false);
+}
+EXPORT_SYMBOL(agp_generic_enable);
+
+
+int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ char *table;
+ char *table_end;
+ int size;
+ int page_order;
+ int num_entries;
+ int i;
+ void *temp;
+ struct page *page;
+
+ /* The generic routines can't handle 2 level gatt's */
+ if (bridge->driver->size_type == LVL2_APER_SIZE)
+ return -EINVAL;
+
+ table = NULL;
+ i = bridge->aperture_size_idx;
+ temp = bridge->current_size;
+ size = page_order = num_entries = 0;
+
+ if (bridge->driver->size_type != FIXED_APER_SIZE) {
+ do {
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ size = A_SIZE_8(temp)->size;
+ page_order =
+ A_SIZE_8(temp)->page_order;
+ num_entries =
+ A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ size = A_SIZE_16(temp)->size;
+ page_order = A_SIZE_16(temp)->page_order;
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ size = A_SIZE_32(temp)->size;
+ page_order = A_SIZE_32(temp)->page_order;
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ /* This case will never really happen. */
+ case FIXED_APER_SIZE:
+ case LVL2_APER_SIZE:
+ default:
+ size = page_order = num_entries = 0;
+ break;
+ }
+
+ table = alloc_gatt_pages(page_order);
+
+ if (table == NULL) {
+ i++;
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ bridge->current_size = A_IDX8(bridge);
+ break;
+ case U16_APER_SIZE:
+ bridge->current_size = A_IDX16(bridge);
+ break;
+ case U32_APER_SIZE:
+ bridge->current_size = A_IDX32(bridge);
+ break;
+ /* These cases will never really happen. */
+ case FIXED_APER_SIZE:
+ case LVL2_APER_SIZE:
+ default:
+ break;
+ }
+ temp = bridge->current_size;
+ } else {
+ bridge->aperture_size_idx = i;
+ }
+ } while (!table && (i < bridge->driver->num_aperture_sizes));
+ } else {
+ size = ((struct aper_size_info_fixed *) temp)->size;
+ page_order = ((struct aper_size_info_fixed *) temp)->page_order;
+ num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
+ table = alloc_gatt_pages(page_order);
+ }
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ SetPageReserved(page);
+
+ bridge->gatt_table_real = (u32 *) table;
+ agp_gatt_table = (void *)table;
+
+ bridge->driver->cache_flush();
+#ifdef CONFIG_X86
+ if (set_memory_uc((unsigned long)table, 1 << page_order))
+ printk(KERN_WARNING "Could not set GATT table memory to UC!");
+
+ bridge->gatt_table = (void *)table;
+#else
+ bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
+ (PAGE_SIZE * (1 << page_order)));
+ bridge->driver->cache_flush();
+#endif
+
+ if (bridge->gatt_table == NULL) {
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ ClearPageReserved(page);
+
+ free_gatt_pages(table, page_order);
+
+ return -ENOMEM;
+ }
+ bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
+
+ /* AK: bogus, should encode addresses > 4GB */
+ for (i = 0; i < num_entries; i++) {
+ writel(bridge->scratch_page, bridge->gatt_table+i);
+ readl(bridge->gatt_table+i); /* PCI Posting. */
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_create_gatt_table);
+
+int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ char *table, *table_end;
+ void *temp;
+ struct page *page;
+
+ temp = bridge->current_size;
+
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ page_order = A_SIZE_8(temp)->page_order;
+ break;
+ case U16_APER_SIZE:
+ page_order = A_SIZE_16(temp)->page_order;
+ break;
+ case U32_APER_SIZE:
+ page_order = A_SIZE_32(temp)->page_order;
+ break;
+ case FIXED_APER_SIZE:
+ page_order = A_SIZE_FIX(temp)->page_order;
+ break;
+ case LVL2_APER_SIZE:
+ /* The generic routines can't deal with 2 level gatt's */
+ return -EINVAL;
+ break;
+ default:
+ page_order = 0;
+ break;
+ }
+
+ /* Do not worry about freeing memory, because if this is
+ * called, then all agp memory is deallocated and removed
+ * from the table. */
+
+#ifdef CONFIG_X86
+ set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
+#else
+ iounmap(bridge->gatt_table);
+#endif
+ table = (char *) bridge->gatt_table_real;
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ ClearPageReserved(page);
+
+ free_gatt_pages(bridge->gatt_table_real, page_order);
+
+ agp_gatt_table = NULL;
+ bridge->gatt_table = NULL;
+ bridge->gatt_table_real = NULL;
+ bridge->gatt_bus_addr = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_free_gatt_table);
+
+
+int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
+{
+ int num_entries;
+ size_t i;
+ off_t j;
+ void *temp;
+ struct agp_bridge_data *bridge;
+ int mask_type;
+
+ bridge = mem->bridge;
+ if (!bridge)
+ return -EINVAL;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ temp = bridge->current_size;
+
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ num_entries = A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ case FIXED_APER_SIZE:
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ break;
+ case LVL2_APER_SIZE:
+ /* The generic routines can't deal with 2 level gatt's */
+ return -EINVAL;
+ break;
+ default:
+ num_entries = 0;
+ break;
+ }
+
+ num_entries -= agp_memory_reserved/PAGE_SIZE;
+ if (num_entries < 0) num_entries = 0;
+
+ if (type != mem->type)
+ return -EINVAL;
+
+ mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+ if (mask_type != 0) {
+ /* The generic routines know nothing of memory types */
+ return -EINVAL;
+ }
+
+ if (((pg_start + mem->page_count) > num_entries) ||
+ ((pg_start + mem->page_count) < pg_start))
+ return -EINVAL;
+
+ j = pg_start;
+
+ while (j < (pg_start + mem->page_count)) {
+ if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
+ return -EBUSY;
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ bridge->driver->cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(bridge->driver->mask_memory(bridge,
+ page_to_phys(mem->pages[i]),
+ mask_type),
+ bridge->gatt_table+j);
+ }
+ readl(bridge->gatt_table+j-1); /* PCI Posting. */
+
+ bridge->driver->tlb_flush(mem);
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_insert_memory);
+
+
+int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ size_t i;
+ struct agp_bridge_data *bridge;
+ int mask_type, num_entries;
+
+ bridge = mem->bridge;
+ if (!bridge)
+ return -EINVAL;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ if (type != mem->type)
+ return -EINVAL;
+
+ num_entries = agp_num_entries();
+ if (((pg_start + mem->page_count) > num_entries) ||
+ ((pg_start + mem->page_count) < pg_start))
+ return -EINVAL;
+
+ mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+ if (mask_type != 0) {
+ /* The generic routines know nothing of memory types */
+ return -EINVAL;
+ }
+
+ /* AK: bogus, should encode addresses > 4GB */
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(bridge->scratch_page, bridge->gatt_table+i);
+ }
+ readl(bridge->gatt_table+i-1); /* PCI Posting. */
+
+ bridge->driver->tlb_flush(mem);
+ return 0;
+}
+EXPORT_SYMBOL(agp_generic_remove_memory);
+
+struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
+{
+ return NULL;
+}
+EXPORT_SYMBOL(agp_generic_alloc_by_type);
+
+void agp_generic_free_by_type(struct agp_memory *curr)
+{
+ agp_free_page_array(curr);
+ agp_free_key(curr->key);
+ kfree(curr);
+}
+EXPORT_SYMBOL(agp_generic_free_by_type);
+
+struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
+{
+ struct agp_memory *new;
+ int i;
+ int pages;
+
+ pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+ new = agp_create_user_memory(page_count);
+ if (new == NULL)
+ return NULL;
+
+ for (i = 0; i < page_count; i++)
+ new->pages[i] = NULL;
+ new->page_count = 0;
+ new->type = type;
+ new->num_scratch_pages = pages;
+
+ return new;
+}
+EXPORT_SYMBOL(agp_generic_alloc_user);
+
+/*
+ * Basic Page Allocation Routines -
+ * These routines handle page allocation and by default they reserve the allocated
+ * memory. They also handle incrementing the current_memory_agp value, Which is checked
+ * against a maximum value.
+ */
+
+int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
+{
+ struct page * page;
+ int i, ret = -ENOMEM;
+
+ for (i = 0; i < num_pages; i++) {
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ /* agp_free_memory() needs gart address */
+ if (page == NULL)
+ goto out;
+
+#ifndef CONFIG_X86
+ map_page_into_agp(page);
+#endif
+ get_page(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+
+ mem->pages[i] = page;
+ mem->page_count++;
+ }
+
+#ifdef CONFIG_X86
+ set_pages_array_uc(mem->pages, num_pages);
+#endif
+ ret = 0;
+out:
+ return ret;
+}
+EXPORT_SYMBOL(agp_generic_alloc_pages);
+
+struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
+{
+ struct page * page;
+
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ if (page == NULL)
+ return NULL;
+
+ map_page_into_agp(page);
+
+ get_page(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page;
+}
+EXPORT_SYMBOL(agp_generic_alloc_page);
+
+void agp_generic_destroy_pages(struct agp_memory *mem)
+{
+ int i;
+ struct page *page;
+
+ if (!mem)
+ return;
+
+#ifdef CONFIG_X86
+ set_pages_array_wb(mem->pages, mem->page_count);
+#endif
+
+ for (i = 0; i < mem->page_count; i++) {
+ page = mem->pages[i];
+
+#ifndef CONFIG_X86
+ unmap_page_from_agp(page);
+#endif
+ put_page(page);
+ __free_page(page);
+ atomic_dec(&agp_bridge->current_memory_agp);
+ mem->pages[i] = NULL;
+ }
+}
+EXPORT_SYMBOL(agp_generic_destroy_pages);
+
+void agp_generic_destroy_page(struct page *page, int flags)
+{
+ if (page == NULL)
+ return;
+
+ if (flags & AGP_PAGE_DESTROY_UNMAP)
+ unmap_page_from_agp(page);
+
+ if (flags & AGP_PAGE_DESTROY_FREE) {
+ put_page(page);
+ __free_page(page);
+ atomic_dec(&agp_bridge->current_memory_agp);
+ }
+}
+EXPORT_SYMBOL(agp_generic_destroy_page);
+
+/* End Basic Page Allocation Routines */
+
+
+/**
+ * agp_enable - initialise the agp point-to-point connection.
+ *
+ * @mode: agp mode register value to configure with.
+ */
+void agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ if (!bridge)
+ return;
+ bridge->driver->agp_enable(bridge, mode);
+}
+EXPORT_SYMBOL(agp_enable);
+
+/* When we remove the global variable agp_bridge from all drivers
+ * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
+ */
+
+struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
+{
+ if (list_empty(&agp_bridges))
+ return NULL;
+
+ return agp_bridge;
+}
+
+static void ipi_handler(void *null)
+{
+ flush_agp_cache();
+}
+
+void global_cache_flush(void)
+{
+ if (on_each_cpu(ipi_handler, NULL, 1) != 0)
+ panic(PFX "timed out waiting for the other CPUs!\n");
+}
+EXPORT_SYMBOL(global_cache_flush);
+
+unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
+ dma_addr_t addr, int type)
+{
+ /* memory type is ignored in the generic routine */
+ if (bridge->driver->masks)
+ return addr | bridge->driver->masks[0].mask;
+ else
+ return addr;
+}
+EXPORT_SYMBOL(agp_generic_mask_memory);
+
+int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
+ int type)
+{
+ if (type >= AGP_USER_TYPES)
+ return 0;
+ return type;
+}
+EXPORT_SYMBOL(agp_generic_type_to_mask_type);
+
+/*
+ * These functions are implemented according to the AGPv3 spec,
+ * which covers implementation details that had previously been
+ * left open.
+ */
+
+int agp3_generic_fetch_size(void)
+{
+ u16 temp_size;
+ int i;
+ struct aper_size_info_16 *values;
+
+ pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
+ values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp_size == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(agp3_generic_fetch_size);
+
+void agp3_generic_tlbflush(struct agp_memory *mem)
+{
+ u32 ctrl;
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
+}
+EXPORT_SYMBOL(agp3_generic_tlbflush);
+
+int agp3_generic_configure(void)
+{
+ u32 temp;
+ struct aper_size_info_16 *current_size;
+
+ current_size = A_SIZE_16(agp_bridge->current_size);
+
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* set aperture size */
+ pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
+ /* set gart pointer */
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
+ /* enable aperture and GTLB */
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
+ return 0;
+}
+EXPORT_SYMBOL(agp3_generic_configure);
+
+void agp3_generic_cleanup(void)
+{
+ u32 ctrl;
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
+ pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
+}
+EXPORT_SYMBOL(agp3_generic_cleanup);
+
+const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
+{
+ {4096, 1048576, 10,0x000},
+ {2048, 524288, 9, 0x800},
+ {1024, 262144, 8, 0xc00},
+ { 512, 131072, 7, 0xe00},
+ { 256, 65536, 6, 0xf00},
+ { 128, 32768, 5, 0xf20},
+ { 64, 16384, 4, 0xf30},
+ { 32, 8192, 3, 0xf38},
+ { 16, 4096, 2, 0xf3c},
+ { 8, 2048, 1, 0xf3e},
+ { 4, 1024, 0, 0xf3f}
+};
+EXPORT_SYMBOL(agp3_generic_sizes);
+
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
new file mode 100644
index 00000000..056b289a
--- /dev/null
+++ b/drivers/char/agp/hp-agp.c
@@ -0,0 +1,551 @@
+/*
+ * HP zx1 AGPGART routines.
+ *
+ * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+
+#include <asm/acpi-ext.h>
+
+#include "agp.h"
+
+#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
+
+/* HP ZX1 IOC registers */
+#define HP_ZX1_IBASE 0x300
+#define HP_ZX1_IMASK 0x308
+#define HP_ZX1_PCOM 0x310
+#define HP_ZX1_TCNFG 0x318
+#define HP_ZX1_PDIR_BASE 0x320
+
+#define HP_ZX1_IOVA_BASE GB(1UL)
+#define HP_ZX1_IOVA_SIZE GB(1UL)
+#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2)
+#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
+
+#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL
+#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
+
+#define AGP8X_MODE_BIT 3
+#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
+
+/* AGP bridge need not be PCI device, but DRM thinks it is. */
+static struct pci_dev fake_bridge_dev;
+
+static int hp_zx1_gart_found;
+
+static struct aper_size_info_fixed hp_zx1_sizes[] =
+{
+ {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */
+};
+
+static struct gatt_mask hp_zx1_masks[] =
+{
+ {.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0}
+};
+
+static struct _hp_private {
+ volatile u8 __iomem *ioc_regs;
+ volatile u8 __iomem *lba_regs;
+ int lba_cap_offset;
+ u64 *io_pdir; // PDIR for entire IOVA
+ u64 *gatt; // PDIR just for GART (subset of above)
+ u64 gatt_entries;
+ u64 iova_base;
+ u64 gart_base;
+ u64 gart_size;
+ u64 io_pdir_size;
+ int io_pdir_owner; // do we own it, or share it with sba_iommu?
+ int io_page_size;
+ int io_tlb_shift;
+ int io_tlb_ps; // IOC ps config
+ int io_pages_per_kpage;
+} hp_private;
+
+static int __init hp_zx1_ioc_shared(void)
+{
+ struct _hp_private *hp = &hp_private;
+
+ printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
+
+ /*
+ * IOC already configured by sba_iommu module; just use
+ * its setup. We assume:
+ * - IOVA space is 1Gb in size
+ * - first 512Mb is IOMMU, second 512Mb is GART
+ */
+ hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG);
+ switch (hp->io_tlb_ps) {
+ case 0: hp->io_tlb_shift = 12; break;
+ case 1: hp->io_tlb_shift = 13; break;
+ case 2: hp->io_tlb_shift = 14; break;
+ case 3: hp->io_tlb_shift = 16; break;
+ default:
+ printk(KERN_ERR PFX "Invalid IOTLB page size "
+ "configuration 0x%x\n", hp->io_tlb_ps);
+ hp->gatt = NULL;
+ hp->gatt_entries = 0;
+ return -ENODEV;
+ }
+ hp->io_page_size = 1 << hp->io_tlb_shift;
+ hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
+
+ hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1;
+ hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
+
+ hp->gart_size = HP_ZX1_GART_SIZE;
+ hp->gatt_entries = hp->gart_size / hp->io_page_size;
+
+ hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
+ hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
+
+ if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
+ /* Normal case when no AGP device in system */
+ hp->gatt = NULL;
+ hp->gatt_entries = 0;
+ printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
+ "GART disabled\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init
+hp_zx1_ioc_owner (void)
+{
+ struct _hp_private *hp = &hp_private;
+
+ printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
+
+ /*
+ * Select an IOV page size no larger than system page size.
+ */
+ if (PAGE_SIZE >= KB(64)) {
+ hp->io_tlb_shift = 16;
+ hp->io_tlb_ps = 3;
+ } else if (PAGE_SIZE >= KB(16)) {
+ hp->io_tlb_shift = 14;
+ hp->io_tlb_ps = 2;
+ } else if (PAGE_SIZE >= KB(8)) {
+ hp->io_tlb_shift = 13;
+ hp->io_tlb_ps = 1;
+ } else {
+ hp->io_tlb_shift = 12;
+ hp->io_tlb_ps = 0;
+ }
+ hp->io_page_size = 1 << hp->io_tlb_shift;
+ hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
+
+ hp->iova_base = HP_ZX1_IOVA_BASE;
+ hp->gart_size = HP_ZX1_GART_SIZE;
+ hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
+
+ hp->gatt_entries = hp->gart_size / hp->io_page_size;
+ hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
+
+ return 0;
+}
+
+static int __init
+hp_zx1_ioc_init (u64 hpa)
+{
+ struct _hp_private *hp = &hp_private;
+
+ hp->ioc_regs = ioremap(hpa, 1024);
+ if (!hp->ioc_regs)
+ return -ENOMEM;
+
+ /*
+ * If the IOTLB is currently disabled, we can take it over.
+ * Otherwise, we have to share with sba_iommu.
+ */
+ hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0;
+
+ if (hp->io_pdir_owner)
+ return hp_zx1_ioc_owner();
+
+ return hp_zx1_ioc_shared();
+}
+
+static int
+hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap)
+{
+ u16 status;
+ u8 pos, id;
+ int ttl = 48;
+
+ status = readw(hpa+PCI_STATUS);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+ pos = readb(hpa+PCI_CAPABILITY_LIST);
+ while (ttl-- && pos >= 0x40) {
+ pos &= ~3;
+ id = readb(hpa+pos+PCI_CAP_LIST_ID);
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+ pos = readb(hpa+pos+PCI_CAP_LIST_NEXT);
+ }
+ return 0;
+}
+
+static int __init
+hp_zx1_lba_init (u64 hpa)
+{
+ struct _hp_private *hp = &hp_private;
+ int cap;
+
+ hp->lba_regs = ioremap(hpa, 256);
+ if (!hp->lba_regs)
+ return -ENOMEM;
+
+ hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP);
+
+ cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff;
+ if (cap != PCI_CAP_ID_AGP) {
+ printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n",
+ cap, hp->lba_cap_offset);
+ iounmap(hp->lba_regs);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int
+hp_zx1_fetch_size(void)
+{
+ int size;
+
+ size = hp_private.gart_size / MB(1);
+ hp_zx1_sizes[0].size = size;
+ agp_bridge->current_size = (void *) &hp_zx1_sizes[0];
+ return size;
+}
+
+static int
+hp_zx1_configure (void)
+{
+ struct _hp_private *hp = &hp_private;
+
+ agp_bridge->gart_bus_addr = hp->gart_base;
+ agp_bridge->capndx = hp->lba_cap_offset;
+ agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
+
+ if (hp->io_pdir_owner) {
+ writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
+ readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
+ writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
+ readl(hp->ioc_regs+HP_ZX1_TCNFG);
+ writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK);
+ readl(hp->ioc_regs+HP_ZX1_IMASK);
+ writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
+ readl(hp->ioc_regs+HP_ZX1_IBASE);
+ writel(hp->iova_base|ilog2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM);
+ readl(hp->ioc_regs+HP_ZX1_PCOM);
+ }
+
+ return 0;
+}
+
+static void
+hp_zx1_cleanup (void)
+{
+ struct _hp_private *hp = &hp_private;
+
+ if (hp->ioc_regs) {
+ if (hp->io_pdir_owner) {
+ writeq(0, hp->ioc_regs+HP_ZX1_IBASE);
+ readq(hp->ioc_regs+HP_ZX1_IBASE);
+ }
+ iounmap(hp->ioc_regs);
+ }
+ if (hp->lba_regs)
+ iounmap(hp->lba_regs);
+}
+
+static void
+hp_zx1_tlbflush (struct agp_memory *mem)
+{
+ struct _hp_private *hp = &hp_private;
+
+ writeq(hp->gart_base | ilog2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM);
+ readq(hp->ioc_regs+HP_ZX1_PCOM);
+}
+
+static int
+hp_zx1_create_gatt_table (struct agp_bridge_data *bridge)
+{
+ struct _hp_private *hp = &hp_private;
+ int i;
+
+ if (hp->io_pdir_owner) {
+ hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
+ get_order(hp->io_pdir_size));
+ if (!hp->io_pdir) {
+ printk(KERN_ERR PFX "Couldn't allocate contiguous "
+ "memory for I/O PDIR\n");
+ hp->gatt = NULL;
+ hp->gatt_entries = 0;
+ return -ENOMEM;
+ }
+ memset(hp->io_pdir, 0, hp->io_pdir_size);
+
+ hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
+ }
+
+ for (i = 0; i < hp->gatt_entries; i++) {
+ hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
+ }
+
+ return 0;
+}
+
+static int
+hp_zx1_free_gatt_table (struct agp_bridge_data *bridge)
+{
+ struct _hp_private *hp = &hp_private;
+
+ if (hp->io_pdir_owner)
+ free_pages((unsigned long) hp->io_pdir,
+ get_order(hp->io_pdir_size));
+ else
+ hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
+ return 0;
+}
+
+static int
+hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
+{
+ struct _hp_private *hp = &hp_private;
+ int i, k;
+ off_t j, io_pg_start;
+ int io_pg_count;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ io_pg_start = hp->io_pages_per_kpage * pg_start;
+ io_pg_count = hp->io_pages_per_kpage * mem->page_count;
+ if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
+ return -EINVAL;
+ }
+
+ j = io_pg_start;
+ while (j < (io_pg_start + io_pg_count)) {
+ if (hp->gatt[j]) {
+ return -EBUSY;
+ }
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
+ unsigned long paddr;
+
+ paddr = page_to_phys(mem->pages[i]);
+ for (k = 0;
+ k < hp->io_pages_per_kpage;
+ k++, j++, paddr += hp->io_page_size) {
+ hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr;
+ }
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int
+hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
+{
+ struct _hp_private *hp = &hp_private;
+ int i, io_pg_start, io_pg_count;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ io_pg_start = hp->io_pages_per_kpage * pg_start;
+ io_pg_count = hp->io_pages_per_kpage * mem->page_count;
+ for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
+ hp->gatt[i] = agp_bridge->scratch_page;
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static unsigned long
+hp_zx1_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type)
+{
+ return HP_ZX1_PDIR_VALID_BIT | addr;
+}
+
+static void
+hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode)
+{
+ struct _hp_private *hp = &hp_private;
+ u32 command;
+
+ command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
+ command = agp_collect_device_status(bridge, mode, command);
+ command |= 0x00000100;
+
+ writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND);
+
+ agp_device_command(command, (mode & AGP8X_MODE) != 0);
+}
+
+const struct agp_bridge_driver hp_zx1_driver = {
+ .owner = THIS_MODULE,
+ .size_type = FIXED_APER_SIZE,
+ .configure = hp_zx1_configure,
+ .fetch_size = hp_zx1_fetch_size,
+ .cleanup = hp_zx1_cleanup,
+ .tlb_flush = hp_zx1_tlbflush,
+ .mask_memory = hp_zx1_mask_memory,
+ .masks = hp_zx1_masks,
+ .agp_enable = hp_zx1_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = hp_zx1_create_gatt_table,
+ .free_gatt_table = hp_zx1_free_gatt_table,
+ .insert_memory = hp_zx1_insert_memory,
+ .remove_memory = hp_zx1_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ .cant_use_aperture = true,
+};
+
+static int __init
+hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa)
+{
+ struct agp_bridge_data *bridge;
+ int error = 0;
+
+ error = hp_zx1_ioc_init(ioc_hpa);
+ if (error)
+ goto fail;
+
+ error = hp_zx1_lba_init(lba_hpa);
+ if (error)
+ goto fail;
+
+ bridge = agp_alloc_bridge();
+ if (!bridge) {
+ error = -ENOMEM;
+ goto fail;
+ }
+ bridge->driver = &hp_zx1_driver;
+
+ fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
+ fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA;
+ bridge->dev = &fake_bridge_dev;
+
+ error = agp_add_bridge(bridge);
+ fail:
+ if (error)
+ hp_zx1_cleanup();
+ return error;
+}
+
+static acpi_status __init
+zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
+{
+ acpi_handle handle, parent;
+ acpi_status status;
+ struct acpi_device_info *info;
+ u64 lba_hpa, sba_hpa, length;
+ int match;
+
+ status = hp_acpi_csr_space(obj, &lba_hpa, &length);
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* keep looking for another bridge */
+
+ /* Look for an enclosing IOC scope and find its CSR space */
+ handle = obj;
+ do {
+ status = acpi_get_object_info(handle, &info);
+ if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
+ /* TBD check _CID also */
+ match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
+ kfree(info);
+ if (match) {
+ status = hp_acpi_csr_space(handle, &sba_hpa, &length);
+ if (ACPI_SUCCESS(status))
+ break;
+ else {
+ printk(KERN_ERR PFX "Detected HP ZX1 "
+ "AGP LBA but no IOC.\n");
+ return AE_OK;
+ }
+ }
+ }
+
+ status = acpi_get_parent(handle, &parent);
+ handle = parent;
+ } while (ACPI_SUCCESS(status));
+
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* found no enclosing IOC */
+
+ if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
+ return AE_OK;
+
+ printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset "
+ "(ioc=%llx, lba=%llx)\n", (char *)context,
+ sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
+
+ hp_zx1_gart_found = 1;
+ return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
+}
+
+static int __init
+agp_hp_init (void)
+{
+ if (agp_off)
+ return -EINVAL;
+
+ acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL);
+ if (hp_zx1_gart_found)
+ return 0;
+
+ acpi_get_devices("HWP0007", zx1_gart_probe, "HWP0007", NULL);
+ if (hp_zx1_gart_found)
+ return 0;
+
+ return -ENODEV;
+}
+
+static void __exit
+agp_hp_cleanup (void)
+{
+}
+
+module_init(agp_hp_init);
+module_exit(agp_hp_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
new file mode 100644
index 00000000..75b763cb
--- /dev/null
+++ b/drivers/char/agp/i460-agp.c
@@ -0,0 +1,659 @@
+/*
+ * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of
+ * the "Intel 460GTX Chipset Software Developer's Manual":
+ * http://www.intel.com/design/archives/itanium/downloads/248704.htm
+ */
+/*
+ * 460GX support by Chris Ahna <christopher.j.ahna@intel.com>
+ * Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/agp_backend.h>
+#include <linux/log2.h>
+
+#include "agp.h"
+
+#define INTEL_I460_BAPBASE 0x98
+#define INTEL_I460_GXBCTL 0xa0
+#define INTEL_I460_AGPSIZ 0xa2
+#define INTEL_I460_ATTBASE 0xfe200000
+#define INTEL_I460_GATT_VALID (1UL << 24)
+#define INTEL_I460_GATT_COHERENT (1UL << 25)
+
+/*
+ * The i460 can operate with large (4MB) pages, but there is no sane way to support this
+ * within the current kernel/DRM environment, so we disable the relevant code for now.
+ * See also comments in ia64_alloc_page()...
+ */
+#define I460_LARGE_IO_PAGES 0
+
+#if I460_LARGE_IO_PAGES
+# define I460_IO_PAGE_SHIFT i460.io_page_shift
+#else
+# define I460_IO_PAGE_SHIFT 12
+#endif
+
+#define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT)
+#define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT))
+#define I460_SRAM_IO_DISABLE (1 << 4)
+#define I460_BAPBASE_ENABLE (1 << 3)
+#define I460_AGPSIZ_MASK 0x7
+#define I460_4M_PS (1 << 1)
+
+/* Control bits for Out-Of-GART coherency and Burst Write Combining */
+#define I460_GXBCTL_OOG (1UL << 0)
+#define I460_GXBCTL_BWC (1UL << 2)
+
+/*
+ * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the
+ * gatt_table and gatt_table_real pointers a "void *"...
+ */
+#define RD_GATT(index) readl((u32 *) i460.gatt + (index))
+#define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index))
+/*
+ * The 460 spec says we have to read the last location written to make sure that all
+ * writes have taken effect
+ */
+#define WR_FLUSH_GATT(index) RD_GATT(index)
+
+static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
+ dma_addr_t addr, int type);
+
+static struct {
+ void *gatt; /* ioremap'd GATT area */
+
+ /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */
+ u8 io_page_shift;
+
+ /* BIOS configures chipset to one of 2 possible apbase values: */
+ u8 dynamic_apbase;
+
+ /* structure for tracking partial use of 4MB GART pages: */
+ struct lp_desc {
+ unsigned long *alloced_map; /* bitmap of kernel-pages in use */
+ int refcount; /* number of kernel pages using the large page */
+ u64 paddr; /* physical address of large page */
+ struct page *page; /* page pointer */
+ } *lp_desc;
+} i460;
+
+static const struct aper_size_info_8 i460_sizes[3] =
+{
+ /*
+ * The 32GB aperture is only available with a 4M GART page size. Due to the
+ * dynamic GART page size, we can't figure out page_order or num_entries until
+ * runtime.
+ */
+ {32768, 0, 0, 4},
+ {1024, 0, 0, 2},
+ {256, 0, 0, 1}
+};
+
+static struct gatt_mask i460_masks[] =
+{
+ {
+ .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT,
+ .type = 0
+ }
+};
+
+static int i460_fetch_size (void)
+{
+ int i;
+ u8 temp;
+ struct aper_size_info_8 *values;
+
+ /* Determine the GART page size */
+ pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp);
+ i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12;
+ pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift);
+
+ if (i460.io_page_shift != I460_IO_PAGE_SHIFT) {
+ printk(KERN_ERR PFX
+ "I/O (GART) page-size %luKB doesn't match expected "
+ "size %luKB\n",
+ 1UL << (i460.io_page_shift - 10),
+ 1UL << (I460_IO_PAGE_SHIFT));
+ return 0;
+ }
+
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+
+ pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
+
+ /* Exit now if the IO drivers for the GART SRAMS are turned off */
+ if (temp & I460_SRAM_IO_DISABLE) {
+ printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n");
+ printk(KERN_ERR PFX "AGPGART operation not possible\n");
+ return 0;
+ }
+
+ /* Make sure we don't try to create an 2 ^ 23 entry GATT */
+ if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) {
+ printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n");
+ return 0;
+ }
+
+ /* Determine the proper APBASE register */
+ if (temp & I460_BAPBASE_ENABLE)
+ i460.dynamic_apbase = INTEL_I460_BAPBASE;
+ else
+ i460.dynamic_apbase = AGP_APBASE;
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ /*
+ * Dynamically calculate the proper num_entries and page_order values for
+ * the define aperture sizes. Take care not to shift off the end of
+ * values[i].size.
+ */
+ values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12);
+ values[i].page_order = ilog2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT);
+ }
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ /* Neglect control bits when matching up size_value */
+ if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) {
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+/* There isn't anything to do here since 460 has no GART TLB. */
+static void i460_tlb_flush (struct agp_memory *mem)
+{
+ return;
+}
+
+/*
+ * This utility function is needed to prevent corruption of the control bits
+ * which are stored along with the aperture size in 460's AGPSIZ register
+ */
+static void i460_write_agpsiz (u8 size_value)
+{
+ u8 temp;
+
+ pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ,
+ ((temp & ~I460_AGPSIZ_MASK) | size_value));
+}
+
+static void i460_cleanup (void)
+{
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ i460_write_agpsiz(previous_size->size_value);
+
+ if (I460_IO_PAGE_SHIFT > PAGE_SHIFT)
+ kfree(i460.lp_desc);
+}
+
+static int i460_configure (void)
+{
+ union {
+ u32 small[2];
+ u64 large;
+ } temp;
+ size_t size;
+ u8 scratch;
+ struct aper_size_info_8 *current_size;
+
+ temp.large = 0;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+ i460_write_agpsiz(current_size->size_value);
+
+ /*
+ * Do the necessary rigmarole to read all eight bytes of APBASE.
+ * This has to be done since the AGP aperture can be above 4GB on
+ * 460 based systems.
+ */
+ pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0]));
+ pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1]));
+
+ /* Clear BAR control bits */
+ agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1);
+
+ pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL,
+ (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC);
+
+ /*
+ * Initialize partial allocation trackers if a GART page is bigger than a kernel
+ * page.
+ */
+ if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) {
+ size = current_size->num_entries * sizeof(i460.lp_desc[0]);
+ i460.lp_desc = kzalloc(size, GFP_KERNEL);
+ if (!i460.lp_desc)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int i460_create_gatt_table (struct agp_bridge_data *bridge)
+{
+ int page_order, num_entries, i;
+ void *temp;
+
+ /*
+ * Load up the fixed address of the GART SRAMS which hold our GATT table.
+ */
+ temp = agp_bridge->current_size;
+ page_order = A_SIZE_8(temp)->page_order;
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order);
+ if (!i460.gatt) {
+ printk(KERN_ERR PFX "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ /* These are no good, the should be removed from the agp_bridge strucure... */
+ agp_bridge->gatt_table_real = NULL;
+ agp_bridge->gatt_table = NULL;
+ agp_bridge->gatt_bus_addr = 0;
+
+ for (i = 0; i < num_entries; ++i)
+ WR_GATT(i, 0);
+ WR_FLUSH_GATT(i - 1);
+ return 0;
+}
+
+static int i460_free_gatt_table (struct agp_bridge_data *bridge)
+{
+ int num_entries, i;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ for (i = 0; i < num_entries; ++i)
+ WR_GATT(i, 0);
+ WR_FLUSH_GATT(num_entries - 1);
+
+ iounmap(i460.gatt);
+ return 0;
+}
+
+/*
+ * The following functions are called when the I/O (GART) page size is smaller than
+ * PAGE_SIZE.
+ */
+
+static int i460_insert_memory_small_io_page (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ unsigned long paddr, io_pg_start, io_page_size;
+ int i, j, k, num_entries;
+ void *temp;
+
+ pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
+ mem, pg_start, type, page_to_phys(mem->pages[0]));
+
+ if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
+ return -EINVAL;
+
+ io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) {
+ printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
+ return -EINVAL;
+ }
+
+ j = io_pg_start;
+ while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) {
+ if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) {
+ pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
+ j, RD_GATT(j));
+ return -EBUSY;
+ }
+ j++;
+ }
+
+ io_page_size = 1UL << I460_IO_PAGE_SHIFT;
+ for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
+ paddr = page_to_phys(mem->pages[i]);
+ for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
+ WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
+ }
+ WR_FLUSH_GATT(j - 1);
+ return 0;
+}
+
+static int i460_remove_memory_small_io_page(struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i;
+
+ pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n",
+ mem, pg_start, type);
+
+ pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
+
+ for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++)
+ WR_GATT(i, 0);
+ WR_FLUSH_GATT(i - 1);
+ return 0;
+}
+
+#if I460_LARGE_IO_PAGES
+
+/*
+ * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE.
+ *
+ * This situation is interesting since AGP memory allocations that are smaller than a
+ * single GART page are possible. The i460.lp_desc array tracks partial allocation of the
+ * large GART pages to work around this issue.
+ *
+ * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page
+ * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and
+ * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated).
+ */
+
+static int i460_alloc_large_page (struct lp_desc *lp)
+{
+ unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT;
+ size_t map_size;
+
+ lp->page = alloc_pages(GFP_KERNEL, order);
+ if (!lp->page) {
+ printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n");
+ return -ENOMEM;
+ }
+
+ map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8;
+ lp->alloced_map = kzalloc(map_size, GFP_KERNEL);
+ if (!lp->alloced_map) {
+ __free_pages(lp->page, order);
+ printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
+ return -ENOMEM;
+ }
+
+ lp->paddr = page_to_phys(lp->page);
+ lp->refcount = 0;
+ atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
+ return 0;
+}
+
+static void i460_free_large_page (struct lp_desc *lp)
+{
+ kfree(lp->alloced_map);
+ lp->alloced_map = NULL;
+
+ __free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT);
+ atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
+}
+
+static int i460_insert_memory_large_io_page (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i, start_offset, end_offset, idx, pg, num_entries;
+ struct lp_desc *start, *end, *lp;
+ void *temp;
+
+ if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
+ return -EINVAL;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ /* Figure out what pg_start means in terms of our large GART pages */
+ start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
+ end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
+ start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
+ end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
+
+ if (end > i460.lp_desc + num_entries) {
+ printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
+ return -EINVAL;
+ }
+
+ /* Check if the requested region of the aperture is free */
+ for (lp = start; lp <= end; ++lp) {
+ if (!lp->alloced_map)
+ continue; /* OK, the entire large page is available... */
+
+ for (idx = ((lp == start) ? start_offset : 0);
+ idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
+ idx++)
+ {
+ if (test_bit(idx, lp->alloced_map))
+ return -EBUSY;
+ }
+ }
+
+ for (lp = start, i = 0; lp <= end; ++lp) {
+ if (!lp->alloced_map) {
+ /* Allocate new GART pages... */
+ if (i460_alloc_large_page(lp) < 0)
+ return -ENOMEM;
+ pg = lp - i460.lp_desc;
+ WR_GATT(pg, i460_mask_memory(agp_bridge,
+ lp->paddr, 0));
+ WR_FLUSH_GATT(pg);
+ }
+
+ for (idx = ((lp == start) ? start_offset : 0);
+ idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
+ idx++, i++)
+ {
+ mem->pages[i] = lp->page;
+ __set_bit(idx, lp->alloced_map);
+ ++lp->refcount;
+ }
+ }
+ return 0;
+}
+
+static int i460_remove_memory_large_io_page (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i, pg, start_offset, end_offset, idx, num_entries;
+ struct lp_desc *start, *end, *lp;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_8(temp)->num_entries;
+
+ /* Figure out what pg_start means in terms of our large GART pages */
+ start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
+ end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
+ start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
+ end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
+
+ for (i = 0, lp = start; lp <= end; ++lp) {
+ for (idx = ((lp == start) ? start_offset : 0);
+ idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
+ idx++, i++)
+ {
+ mem->pages[i] = NULL;
+ __clear_bit(idx, lp->alloced_map);
+ --lp->refcount;
+ }
+
+ /* Free GART pages if they are unused */
+ if (lp->refcount == 0) {
+ pg = lp - i460.lp_desc;
+ WR_GATT(pg, 0);
+ WR_FLUSH_GATT(pg);
+ i460_free_large_page(lp);
+ }
+ }
+ return 0;
+}
+
+/* Wrapper routines to call the approriate {small_io_page,large_io_page} function */
+
+static int i460_insert_memory (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
+ return i460_insert_memory_small_io_page(mem, pg_start, type);
+ else
+ return i460_insert_memory_large_io_page(mem, pg_start, type);
+}
+
+static int i460_remove_memory (struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
+ return i460_remove_memory_small_io_page(mem, pg_start, type);
+ else
+ return i460_remove_memory_large_io_page(mem, pg_start, type);
+}
+
+/*
+ * If the I/O (GART) page size is bigger than the kernel page size, we don't want to
+ * allocate memory until we know where it is to be bound in the aperture (a
+ * multi-kernel-page alloc might fit inside of an already allocated GART page).
+ *
+ * Let's just hope nobody counts on the allocated AGP memory being there before bind time
+ * (I don't think current drivers do)...
+ */
+static struct page *i460_alloc_page (struct agp_bridge_data *bridge)
+{
+ void *page;
+
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
+ page = agp_generic_alloc_page(agp_bridge);
+ } else
+ /* Returning NULL would cause problems */
+ /* AK: really dubious code. */
+ page = (void *)~0UL;
+ return page;
+}
+
+static void i460_destroy_page (struct page *page, int flags)
+{
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
+ agp_generic_destroy_page(page, flags);
+ }
+}
+
+#endif /* I460_LARGE_IO_PAGES */
+
+static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
+ dma_addr_t addr, int type)
+{
+ /* Make sure the returned address is a valid GATT entry */
+ return bridge->driver->masks[0].mask
+ | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12);
+}
+
+const struct agp_bridge_driver intel_i460_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = i460_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 3,
+ .configure = i460_configure,
+ .fetch_size = i460_fetch_size,
+ .cleanup = i460_cleanup,
+ .tlb_flush = i460_tlb_flush,
+ .mask_memory = i460_mask_memory,
+ .masks = i460_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = i460_create_gatt_table,
+ .free_gatt_table = i460_free_gatt_table,
+#if I460_LARGE_IO_PAGES
+ .insert_memory = i460_insert_memory,
+ .remove_memory = i460_remove_memory,
+ .agp_alloc_page = i460_alloc_page,
+ .agp_destroy_page = i460_destroy_page,
+#else
+ .insert_memory = i460_insert_memory_small_io_page,
+ .remove_memory = i460_remove_memory_small_io_page,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+#endif
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ .cant_use_aperture = true,
+};
+
+static int __devinit agp_intel_i460_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &intel_i460_driver;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ printk(KERN_INFO PFX "Detected Intel 460GX chipset\n");
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_intel_i460_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_intel_i460_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_84460GX,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table);
+
+static struct pci_driver agp_intel_i460_pci_driver = {
+ .name = "agpgart-intel-i460",
+ .id_table = agp_intel_i460_pci_table,
+ .probe = agp_intel_i460_probe,
+ .remove = __devexit_p(agp_intel_i460_remove),
+};
+
+static int __init agp_intel_i460_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_intel_i460_pci_driver);
+}
+
+static void __exit agp_intel_i460_cleanup(void)
+{
+ pci_unregister_driver(&agp_intel_i460_pci_driver);
+}
+
+module_init(agp_intel_i460_init);
+module_exit(agp_intel_i460_cleanup);
+
+MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
new file mode 100644
index 00000000..58b49d1a
--- /dev/null
+++ b/drivers/char/agp/intel-agp.c
@@ -0,0 +1,941 @@
+/*
+ * Intel AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <asm/smp.h>
+#include "agp.h"
+#include "intel-agp.h"
+
+int intel_agp_enabled;
+EXPORT_SYMBOL(intel_agp_enabled);
+
+static int intel_fetch_size(void)
+{
+ int i;
+ u16 temp;
+ struct aper_size_info_16 *values;
+
+ pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
+ values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static int __intel_8xx_fetch_size(u8 temp)
+{
+ int i;
+ struct aper_size_info_8 *values;
+
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+
+static int intel_8xx_fetch_size(void)
+{
+ u8 temp;
+
+ pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
+ return __intel_8xx_fetch_size(temp);
+}
+
+static int intel_815_fetch_size(void)
+{
+ u8 temp;
+
+ /* Intel 815 chipsets have a _weird_ APSIZE register with only
+ * one non-reserved bit, so mask the others out ... */
+ pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
+ temp &= (1 << 3);
+
+ return __intel_8xx_fetch_size(temp);
+}
+
+static void intel_tlbflush(struct agp_memory *mem)
+{
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+}
+
+
+static void intel_8xx_tlbflush(struct agp_memory *mem)
+{
+ u32 temp;
+ pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp & ~(1 << 7));
+ pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp | (1 << 7));
+}
+
+
+static void intel_cleanup(void)
+{
+ u16 temp;
+ struct aper_size_info_16 *previous_size;
+
+ previous_size = A_SIZE_16(agp_bridge->previous_size);
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
+}
+
+
+static void intel_8xx_cleanup(void)
+{
+ u16 temp;
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
+}
+
+
+static int intel_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_16 *current_size;
+
+ current_size = A_SIZE_16(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
+
+ /* paccfg/nbxcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
+ (temp2 & ~(1 << 10)) | (1 << 9));
+ /* clear any possible error conditions */
+ pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
+ return 0;
+}
+
+static int intel_815_configure(void)
+{
+ u32 temp, addr;
+ u8 temp2;
+ struct aper_size_info_8 *current_size;
+
+ /* attbase - aperture base */
+ /* the Intel 815 chipset spec. says that bits 29-31 in the
+ * ATTBASE register are reserved -> try not to write them */
+ if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) {
+ dev_emerg(&agp_bridge->dev->dev, "gatt bus addr too high");
+ return -EINVAL;
+ }
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
+ current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr);
+ addr &= INTEL_815_ATTBASE_MASK;
+ addr |= agp_bridge->gatt_bus_addr;
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* apcont */
+ pci_read_config_byte(agp_bridge->dev, INTEL_815_APCONT, &temp2);
+ pci_write_config_byte(agp_bridge->dev, INTEL_815_APCONT, temp2 | (1 << 1));
+
+ /* clear any possible error conditions */
+ /* Oddness : this chipset seems to have no ERRSTS register ! */
+ return 0;
+}
+
+static void intel_820_tlbflush(struct agp_memory *mem)
+{
+ return;
+}
+
+static void intel_820_cleanup(void)
+{
+ u8 temp;
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR,
+ temp & ~(1 << 1));
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
+ previous_size->size_value);
+}
+
+
+static int intel_820_configure(void)
+{
+ u32 temp;
+ u8 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* global enable aperture access */
+ /* This flag is not accessed through MCHCFG register as in */
+ /* i850 chipset. */
+ pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp2);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp2 | (1 << 1));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I820_ERRSTS, 0x001c);
+ return 0;
+}
+
+static int intel_840_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mcgcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, temp2 | (1 << 9));
+ /* clear any possible error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I840_ERRSTS, 0xc000);
+ return 0;
+}
+
+static int intel_845_configure(void)
+{
+ u32 temp;
+ u8 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ if (agp_bridge->apbase_config != 0) {
+ pci_write_config_dword(agp_bridge->dev, AGP_APBASE,
+ agp_bridge->apbase_config);
+ } else {
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ agp_bridge->apbase_config = temp;
+ }
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* agpm */
+ pci_read_config_byte(agp_bridge->dev, INTEL_I845_AGPM, &temp2);
+ pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
+ /* clear any possible error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
+ return 0;
+}
+
+static int intel_850_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mcgcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, temp2 | (1 << 9));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I850_ERRSTS, 0x001c);
+ return 0;
+}
+
+static int intel_860_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mcgcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, temp2 | (1 << 9));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I860_ERRSTS, 0xf700);
+ return 0;
+}
+
+static int intel_830mp_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* gmch */
+ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp2 | (1 << 9));
+ /* clear any possible AGP-related error conditions */
+ pci_write_config_word(agp_bridge->dev, INTEL_I830_ERRSTS, 0x1c);
+ return 0;
+}
+
+static int intel_7505_configure(void)
+{
+ u32 temp;
+ u16 temp2;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture base */
+ pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
+
+ /* agpctrl */
+ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
+
+ /* mchcfg */
+ pci_read_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, &temp2);
+ pci_write_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, temp2 | (1 << 9));
+
+ return 0;
+}
+
+/* Setup function */
+static const struct gatt_mask intel_generic_masks[] =
+{
+ {.mask = 0x00000017, .type = 0}
+};
+
+static const struct aper_size_info_8 intel_815_sizes[2] =
+{
+ {64, 16384, 4, 0},
+ {32, 8192, 3, 8},
+};
+
+static const struct aper_size_info_8 intel_8xx_sizes[7] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 48},
+ {32, 8192, 3, 56},
+ {16, 4096, 2, 60},
+ {8, 2048, 1, 62},
+ {4, 1024, 0, 63}
+};
+
+static const struct aper_size_info_16 intel_generic_sizes[7] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 48},
+ {32, 8192, 3, 56},
+ {16, 4096, 2, 60},
+ {8, 2048, 1, 62},
+ {4, 1024, 0, 63}
+};
+
+static const struct aper_size_info_8 intel_830mp_sizes[4] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 48},
+ {32, 8192, 3, 56}
+};
+
+static const struct agp_bridge_driver intel_generic_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_generic_sizes,
+ .size_type = U16_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_configure,
+ .fetch_size = intel_fetch_size,
+ .cleanup = intel_cleanup,
+ .tlb_flush = intel_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_815_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_815_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 2,
+ .needs_scratch_page = true,
+ .configure = intel_815_configure,
+ .fetch_size = intel_815_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_820_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_820_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_820_cleanup,
+ .tlb_flush = intel_820_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_830mp_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_830mp_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_830mp_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_840_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_840_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_845_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_845_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_850_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_850_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_860_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_860_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_7505_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_8xx_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = intel_7505_configure,
+ .fetch_size = intel_8xx_fetch_size,
+ .cleanup = intel_8xx_cleanup,
+ .tlb_flush = intel_8xx_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = intel_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
+ * driver and gmch_driver must be non-null, and find_gmch will determine
+ * which one should be used if a gmch_chip_id is present.
+ */
+static const struct intel_agp_driver_description {
+ unsigned int chip_id;
+ char *name;
+ const struct agp_bridge_driver *driver;
+} intel_agp_chipsets[] = {
+ { PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver },
+ { PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver },
+ { PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver },
+ { PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver },
+ { PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver },
+ { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
+ { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
+ { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
+ { PCI_DEVICE_ID_INTEL_82845_HB, "i845", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82845G_HB, "845G", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
+ { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver },
+ { PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver },
+ { PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver },
+ { 0, NULL, NULL }
+};
+
+static int __devinit agp_intel_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr = 0;
+ struct resource *r;
+ int i, err;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->capndx = cap_ptr;
+
+ if (intel_gmch_probe(pdev, bridge))
+ goto found_gmch;
+
+ for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
+ /* In case that multiple models of gfx chip may
+ stand on same host bridge type, this can be
+ sure we detect the right IGD. */
+ if (pdev->device == intel_agp_chipsets[i].chip_id) {
+ bridge->driver = intel_agp_chipsets[i].driver;
+ break;
+ }
+ }
+
+ if (!bridge->driver) {
+ if (cap_ptr)
+ dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
+ bridge->dev = pdev;
+ bridge->dev_private_data = NULL;
+
+ dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
+
+ /*
+ * The following fixes the case where the BIOS has "forgotten" to
+ * provide an address range for the GART.
+ * 20030610 - hamish@zot.org
+ * This happens before pci_enable_device() intentionally;
+ * calling pci_enable_device() before assigning the resource
+ * will result in the GART being disabled on machines with such
+ * BIOSs (the GART ends up with a BAR starting at 0, which
+ * conflicts a lot of other devices).
+ */
+ r = &pdev->resource[0];
+ if (!r->start && r->end) {
+ if (pci_assign_resource(pdev, 0)) {
+ dev_err(&pdev->dev, "can't assign resource 0\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * If the device has not been properly setup, the following will catch
+ * the problem and should stop the system from crashing.
+ * 20030610 - hamish@zot.org
+ */
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "can't enable PCI device\n");
+ agp_put_bridge(bridge);
+ return -ENODEV;
+ }
+
+ /* Fill in the mode register */
+ if (cap_ptr) {
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+ }
+
+found_gmch:
+ pci_set_drvdata(pdev, bridge);
+ err = agp_add_bridge(bridge);
+ if (!err)
+ intel_agp_enabled = 1;
+ return err;
+}
+
+static void __devexit agp_intel_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+
+ intel_gmch_remove(pdev);
+
+ agp_put_bridge(bridge);
+}
+
+#ifdef CONFIG_PM
+static int agp_intel_resume(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ bridge->driver->configure();
+
+ return 0;
+}
+#endif
+
+static struct pci_device_id agp_intel_pci_table[] = {
+#define ID(x) \
+ { \
+ .class = (PCI_CLASS_BRIDGE_HOST << 8), \
+ .class_mask = ~0, \
+ .vendor = PCI_VENDOR_ID_INTEL, \
+ .device = x, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ }
+ ID(PCI_DEVICE_ID_INTEL_82443LX_0),
+ ID(PCI_DEVICE_ID_INTEL_82443BX_0),
+ ID(PCI_DEVICE_ID_INTEL_82443GX_0),
+ ID(PCI_DEVICE_ID_INTEL_82810_MC1),
+ ID(PCI_DEVICE_ID_INTEL_82810_MC3),
+ ID(PCI_DEVICE_ID_INTEL_82810E_MC),
+ ID(PCI_DEVICE_ID_INTEL_82815_MC),
+ ID(PCI_DEVICE_ID_INTEL_82820_HB),
+ ID(PCI_DEVICE_ID_INTEL_82820_UP_HB),
+ ID(PCI_DEVICE_ID_INTEL_82830_HB),
+ ID(PCI_DEVICE_ID_INTEL_82840_HB),
+ ID(PCI_DEVICE_ID_INTEL_82845_HB),
+ ID(PCI_DEVICE_ID_INTEL_82845G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82850_HB),
+ ID(PCI_DEVICE_ID_INTEL_82854_HB),
+ ID(PCI_DEVICE_ID_INTEL_82855PM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82855GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82860_HB),
+ ID(PCI_DEVICE_ID_INTEL_82865_HB),
+ ID(PCI_DEVICE_ID_INTEL_82875_HB),
+ ID(PCI_DEVICE_ID_INTEL_7505_0),
+ ID(PCI_DEVICE_ID_INTEL_7205_0),
+ ID(PCI_DEVICE_ID_INTEL_E7221_HB),
+ ID(PCI_DEVICE_ID_INTEL_82915G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82945G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
+ ID(PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_PINEVIEW_HB),
+ ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
+ ID(PCI_DEVICE_ID_INTEL_82G35_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965GME_HB),
+ ID(PCI_DEVICE_ID_INTEL_G33_HB),
+ ID(PCI_DEVICE_ID_INTEL_Q35_HB),
+ ID(PCI_DEVICE_ID_INTEL_Q33_HB),
+ ID(PCI_DEVICE_ID_INTEL_GM45_HB),
+ ID(PCI_DEVICE_ID_INTEL_EAGLELAKE_HB),
+ ID(PCI_DEVICE_ID_INTEL_Q45_HB),
+ ID(PCI_DEVICE_ID_INTEL_G45_HB),
+ ID(PCI_DEVICE_ID_INTEL_G41_HB),
+ ID(PCI_DEVICE_ID_INTEL_B43_HB),
+ ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
+ ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
+ ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
+ ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
+ ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
+
+static struct pci_driver agp_intel_pci_driver = {
+ .name = "agpgart-intel",
+ .id_table = agp_intel_pci_table,
+ .probe = agp_intel_probe,
+ .remove = __devexit_p(agp_intel_remove),
+#ifdef CONFIG_PM
+ .resume = agp_intel_resume,
+#endif
+};
+
+static int __init agp_intel_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_intel_pci_driver);
+}
+
+static void __exit agp_intel_cleanup(void)
+{
+ pci_unregister_driver(&agp_intel_pci_driver);
+}
+
+module_init(agp_intel_init);
+module_exit(agp_intel_cleanup);
+
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
new file mode 100644
index 00000000..6f246049
--- /dev/null
+++ b/drivers/char/agp/intel-agp.h
@@ -0,0 +1,242 @@
+/*
+ * Common Intel AGPGART and GTT definitions.
+ */
+#ifndef _INTEL_AGP_H
+#define _INTEL_AGP_H
+
+/* Intel registers */
+#define INTEL_APSIZE 0xb4
+#define INTEL_ATTBASE 0xb8
+#define INTEL_AGPCTRL 0xb0
+#define INTEL_NBXCFG 0x50
+#define INTEL_ERRSTS 0x91
+
+/* Intel i830 registers */
+#define I830_GMCH_CTRL 0x52
+#define I830_GMCH_ENABLED 0x4
+#define I830_GMCH_MEM_MASK 0x1
+#define I830_GMCH_MEM_64M 0x1
+#define I830_GMCH_MEM_128M 0
+#define I830_GMCH_GMS_MASK 0x70
+#define I830_GMCH_GMS_DISABLED 0x00
+#define I830_GMCH_GMS_LOCAL 0x10
+#define I830_GMCH_GMS_STOLEN_512 0x20
+#define I830_GMCH_GMS_STOLEN_1024 0x30
+#define I830_GMCH_GMS_STOLEN_8192 0x40
+#define I830_RDRAM_CHANNEL_TYPE 0x03010
+#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
+#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
+
+/* This one is for I830MP w. an external graphic card */
+#define INTEL_I830_ERRSTS 0x92
+
+/* Intel 855GM/852GM registers */
+#define I855_GMCH_GMS_MASK 0xF0
+#define I855_GMCH_GMS_STOLEN_0M 0x0
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I85X_CAPID 0x44
+#define I85X_VARIANT_MASK 0x7
+#define I85X_VARIANT_SHIFT 5
+#define I855_GME 0x0
+#define I855_GM 0x4
+#define I852_GME 0x2
+#define I852_GM 0x5
+
+/* Intel i845 registers */
+#define INTEL_I845_AGPM 0x51
+#define INTEL_I845_ERRSTS 0xc8
+
+/* Intel i860 registers */
+#define INTEL_I860_MCHCFG 0x50
+#define INTEL_I860_ERRSTS 0xc8
+
+/* Intel i810 registers */
+#define I810_GMADDR 0x10
+#define I810_MMADDR 0x14
+#define I810_PTE_BASE 0x10000
+#define I810_PTE_MAIN_UNCACHED 0x00000000
+#define I810_PTE_LOCAL 0x00000002
+#define I810_PTE_VALID 0x00000001
+#define I830_PTE_SYSTEM_CACHED 0x00000006
+/* GT PTE cache control fields */
+#define GEN6_PTE_UNCACHED 0x00000002
+#define GEN6_PTE_LLC 0x00000004
+#define GEN6_PTE_LLC_MLC 0x00000006
+#define GEN6_PTE_GFDT 0x00000008
+
+#define I810_SMRAM_MISCC 0x70
+#define I810_GFX_MEM_WIN_SIZE 0x00010000
+#define I810_GFX_MEM_WIN_32M 0x00010000
+#define I810_GMS 0x000000c0
+#define I810_GMS_DISABLE 0x00000000
+#define I810_PGETBL_CTL 0x2020
+#define I810_PGETBL_ENABLED 0x00000001
+/* Note: PGETBL_CTL2 has a different offset on G33. */
+#define I965_PGETBL_CTL2 0x20c4
+#define I965_PGETBL_SIZE_MASK 0x0000000e
+#define I965_PGETBL_SIZE_512KB (0 << 1)
+#define I965_PGETBL_SIZE_256KB (1 << 1)
+#define I965_PGETBL_SIZE_128KB (2 << 1)
+#define I965_PGETBL_SIZE_1MB (3 << 1)
+#define I965_PGETBL_SIZE_2MB (4 << 1)
+#define I965_PGETBL_SIZE_1_5MB (5 << 1)
+#define G33_GMCH_SIZE_MASK (3 << 8)
+#define G33_GMCH_SIZE_1M (1 << 8)
+#define G33_GMCH_SIZE_2M (2 << 8)
+#define G4x_GMCH_SIZE_MASK (0xf << 8)
+#define G4x_GMCH_SIZE_1M (0x1 << 8)
+#define G4x_GMCH_SIZE_2M (0x3 << 8)
+#define G4x_GMCH_SIZE_VT_EN (0x8 << 8)
+#define G4x_GMCH_SIZE_VT_1M (G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_1_5M ((0x2 << 8) | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
+
+#define GFX_FLSH_CNTL 0x2170 /* 915+ */
+
+#define I810_DRAM_CTL 0x3000
+#define I810_DRAM_ROW_0 0x00000001
+#define I810_DRAM_ROW_0_SDRAM 0x00000001
+
+/* Intel 815 register */
+#define INTEL_815_APCONT 0x51
+#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
+
+/* Intel i820 registers */
+#define INTEL_I820_RDCR 0x51
+#define INTEL_I820_ERRSTS 0xc8
+
+/* Intel i840 registers */
+#define INTEL_I840_MCHCFG 0x50
+#define INTEL_I840_ERRSTS 0xc8
+
+/* Intel i850 registers */
+#define INTEL_I850_MCHCFG 0x50
+#define INTEL_I850_ERRSTS 0xc8
+
+/* intel 915G registers */
+#define I915_GMADDR 0x18
+#define I915_MMADDR 0x10
+#define I915_PTEADDR 0x1C
+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+
+#define I915_IFPADDR 0x60
+#define I830_HIC 0x70
+
+/* Intel 965G registers */
+#define I965_MSAC 0x62
+#define I965_IFPADDR 0x70
+
+/* Intel 7505 registers */
+#define INTEL_I7505_APSIZE 0x74
+#define INTEL_I7505_NCAPID 0x60
+#define INTEL_I7505_NISTAT 0x6c
+#define INTEL_I7505_ATTBASE 0x78
+#define INTEL_I7505_ERRSTS 0x42
+#define INTEL_I7505_AGPCTRL 0x70
+#define INTEL_I7505_MCHCFG 0x50
+
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
+#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
+#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
+#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
+#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
+#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
+#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
+#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
+#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
+#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
+#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
+#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
+#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
+#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
+#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
+#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
+#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
+#define SNB_GTT_SIZE_0M (0 << 8)
+#define SNB_GTT_SIZE_1M (1 << 8)
+#define SNB_GTT_SIZE_2M (2 << 8)
+#define SNB_GTT_SIZE_MASK (3 << 8)
+
+/* pci devices ids */
+#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
+#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
+#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
+#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
+#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
+#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
+#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
+#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
+#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
+#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
+#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
+#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
+#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
+#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
+#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
+#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
+#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
+#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
+#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
+#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
+#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
+#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
+#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
+#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
+#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90
+#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92
+#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
+#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
+#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
+#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
+#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
+#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
+#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
+#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
+
+int intel_gmch_probe(struct pci_dev *pdev,
+ struct agp_bridge_data *bridge);
+void intel_gmch_remove(struct pci_dev *pdev);
+#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
new file mode 100644
index 00000000..85151019
--- /dev/null
+++ b/drivers/char/agp/intel-gtt.c
@@ -0,0 +1,1519 @@
+/*
+ * Intel GTT (Graphics Translation Table) routines
+ *
+ * Caveat: This driver implements the linux agp interface, but this is far from
+ * a agp driver! GTT support ended up here for purely historical reasons: The
+ * old userspace intel graphics drivers needed an interface to map memory into
+ * the GTT. And the drm provides a default interface for graphic devices sitting
+ * on an agp port. So it made sense to fake the GTT support as an agp port to
+ * avoid having to create a new api.
+ *
+ * With gem this does not make much sense anymore, just needlessly complicates
+ * the code. But as long as the old graphics stack is still support, it's stuck
+ * here.
+ *
+ * /fairy-tale-mode off
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <linux/delay.h>
+#include <asm/smp.h>
+#include "agp.h"
+#include "intel-agp.h"
+#include <drm/intel-gtt.h>
+
+/*
+ * If we have Intel graphics, we're not going to have anything other than
+ * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
+ * on the Intel IOMMU support (CONFIG_DMAR).
+ * Only newer chipsets need to bother with this, of course.
+ */
+#ifdef CONFIG_DMAR
+#define USE_PCI_DMA_API 1
+#else
+#define USE_PCI_DMA_API 0
+#endif
+
+struct intel_gtt_driver {
+ unsigned int gen : 8;
+ unsigned int is_g33 : 1;
+ unsigned int is_pineview : 1;
+ unsigned int is_ironlake : 1;
+ unsigned int has_pgtbl_enable : 1;
+ unsigned int dma_mask_size : 8;
+ /* Chipset specific GTT setup */
+ int (*setup)(void);
+ /* This should undo anything done in ->setup() save the unmapping
+ * of the mmio register file, that's done in the generic code. */
+ void (*cleanup)(void);
+ void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
+ /* Flags is a more or less chipset specific opaque value.
+ * For chipsets that need to support old ums (non-gem) code, this
+ * needs to be identical to the various supported agp memory types! */
+ bool (*check_flags)(unsigned int flags);
+ void (*chipset_flush)(void);
+};
+
+static struct _intel_private {
+ struct intel_gtt base;
+ const struct intel_gtt_driver *driver;
+ struct pci_dev *pcidev; /* device one */
+ struct pci_dev *bridge_dev;
+ u8 __iomem *registers;
+ phys_addr_t gtt_bus_addr;
+ phys_addr_t gma_bus_addr;
+ u32 PGETBL_save;
+ u32 __iomem *gtt; /* I915G */
+ bool clear_fake_agp; /* on first access via agp, fill with scratch */
+ int num_dcache_entries;
+ void __iomem *i9xx_flush_page;
+ char *i81x_gtt_table;
+ struct resource ifp_resource;
+ int resource_valid;
+ struct page *scratch_page;
+ dma_addr_t scratch_page_dma;
+} intel_private;
+
+#define INTEL_GTT_GEN intel_private.driver->gen
+#define IS_G33 intel_private.driver->is_g33
+#define IS_PINEVIEW intel_private.driver->is_pineview
+#define IS_IRONLAKE intel_private.driver->is_ironlake
+#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
+
+int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
+ struct scatterlist **sg_list, int *num_sg)
+{
+ struct sg_table st;
+ struct scatterlist *sg;
+ int i;
+
+ if (*sg_list)
+ return 0; /* already mapped (for e.g. resume */
+
+ DBG("try mapping %lu pages\n", (unsigned long)num_entries);
+
+ if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
+ goto err;
+
+ *sg_list = sg = st.sgl;
+
+ for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
+ sg_set_page(sg, pages[i], PAGE_SIZE, 0);
+
+ *num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
+ num_entries, PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(!*num_sg))
+ goto err;
+
+ return 0;
+
+err:
+ sg_free_table(&st);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(intel_gtt_map_memory);
+
+void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
+{
+ struct sg_table st;
+ DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
+
+ pci_unmap_sg(intel_private.pcidev, sg_list,
+ num_sg, PCI_DMA_BIDIRECTIONAL);
+
+ st.sgl = sg_list;
+ st.orig_nents = st.nents = num_sg;
+
+ sg_free_table(&st);
+}
+EXPORT_SYMBOL(intel_gtt_unmap_memory);
+
+static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ return;
+}
+
+/* Exists to support ARGB cursors */
+static struct page *i8xx_alloc_pages(void)
+{
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
+ if (page == NULL)
+ return NULL;
+
+ if (set_pages_uc(page, 4) < 0) {
+ set_pages_wb(page, 4);
+ __free_pages(page, 2);
+ return NULL;
+ }
+ get_page(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page;
+}
+
+static void i8xx_destroy_pages(struct page *page)
+{
+ if (page == NULL)
+ return;
+
+ set_pages_wb(page, 4);
+ put_page(page);
+ __free_pages(page, 2);
+ atomic_dec(&agp_bridge->current_memory_agp);
+}
+
+#define I810_GTT_ORDER 4
+static int i810_setup(void)
+{
+ u32 reg_addr;
+ char *gtt_table;
+
+ /* i81x does not preallocate the gtt. It's always 64kb in size. */
+ gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
+ if (gtt_table == NULL)
+ return -ENOMEM;
+ intel_private.i81x_gtt_table = gtt_table;
+
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
+ reg_addr &= 0xfff80000;
+
+ intel_private.registers = ioremap(reg_addr, KB(64));
+ if (!intel_private.registers)
+ return -ENOMEM;
+
+ writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
+ intel_private.registers+I810_PGETBL_CTL);
+
+ intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
+
+ if ((readl(intel_private.registers+I810_DRAM_CTL)
+ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+ dev_info(&intel_private.pcidev->dev,
+ "detected 4MB dedicated video ram\n");
+ intel_private.num_dcache_entries = 1024;
+ }
+
+ return 0;
+}
+
+static void i810_cleanup(void)
+{
+ writel(0, intel_private.registers+I810_PGETBL_CTL);
+ free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
+}
+
+static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ if ((pg_start + mem->page_count)
+ > intel_private.num_dcache_entries)
+ return -EINVAL;
+
+ if (!mem->is_flushed)
+ global_cache_flush();
+
+ for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+ dma_addr_t addr = i << PAGE_SHIFT;
+ intel_private.driver->write_entry(addr,
+ i, type);
+ }
+ readl(intel_private.gtt+i-1);
+
+ return 0;
+}
+
+/*
+ * The i810/i830 requires a physical address to program its mouse
+ * pointer into hardware.
+ * However the Xserver still writes to it through the agp aperture.
+ */
+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
+{
+ struct agp_memory *new;
+ struct page *page;
+
+ switch (pg_count) {
+ case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
+ break;
+ case 4:
+ /* kludge to get 4 physical pages for ARGB cursor */
+ page = i8xx_alloc_pages();
+ break;
+ default:
+ return NULL;
+ }
+
+ if (page == NULL)
+ return NULL;
+
+ new = agp_create_memory(pg_count);
+ if (new == NULL)
+ return NULL;
+
+ new->pages[0] = page;
+ if (pg_count == 4) {
+ /* kludge to get 4 physical pages for ARGB cursor */
+ new->pages[1] = new->pages[0] + 1;
+ new->pages[2] = new->pages[1] + 1;
+ new->pages[3] = new->pages[2] + 1;
+ }
+ new->page_count = pg_count;
+ new->num_scratch_pages = pg_count;
+ new->type = AGP_PHYS_MEMORY;
+ new->physical = page_to_phys(new->pages[0]);
+ return new;
+}
+
+static void intel_i810_free_by_type(struct agp_memory *curr)
+{
+ agp_free_key(curr->key);
+ if (curr->type == AGP_PHYS_MEMORY) {
+ if (curr->page_count == 4)
+ i8xx_destroy_pages(curr->pages[0]);
+ else {
+ agp_bridge->driver->agp_destroy_page(curr->pages[0],
+ AGP_PAGE_DESTROY_UNMAP);
+ agp_bridge->driver->agp_destroy_page(curr->pages[0],
+ AGP_PAGE_DESTROY_FREE);
+ }
+ agp_free_page_array(curr);
+ }
+ kfree(curr);
+}
+
+static int intel_gtt_setup_scratch_page(void)
+{
+ struct page *page;
+ dma_addr_t dma_addr;
+
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ if (page == NULL)
+ return -ENOMEM;
+ get_page(page);
+ set_pages_uc(page, 1);
+
+ if (intel_private.base.needs_dmar) {
+ dma_addr = pci_map_page(intel_private.pcidev, page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
+ return -EINVAL;
+
+ intel_private.scratch_page_dma = dma_addr;
+ } else
+ intel_private.scratch_page_dma = page_to_phys(page);
+
+ intel_private.scratch_page = page;
+
+ return 0;
+}
+
+static void i810_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ u32 pte_flags = I810_PTE_VALID;
+
+ switch (flags) {
+ case AGP_DCACHE_MEMORY:
+ pte_flags |= I810_PTE_LOCAL;
+ break;
+ case AGP_USER_CACHED_MEMORY:
+ pte_flags |= I830_PTE_SYSTEM_CACHED;
+ break;
+ }
+
+ writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
+ {32, 8192, 3},
+ {64, 16384, 4},
+ {128, 32768, 5},
+ {256, 65536, 6},
+ {512, 131072, 7},
+};
+
+static unsigned int intel_gtt_stolen_size(void)
+{
+ u16 gmch_ctrl;
+ u8 rdct;
+ int local = 0;
+ static const int ddt[4] = { 0, 16, 32, 64 };
+ unsigned int stolen_size = 0;
+
+ if (INTEL_GTT_GEN == 1)
+ return 0; /* no stolen mem on i81x */
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+
+ if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+ intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I830_GMCH_GMS_STOLEN_512:
+ stolen_size = KB(512);
+ break;
+ case I830_GMCH_GMS_STOLEN_1024:
+ stolen_size = MB(1);
+ break;
+ case I830_GMCH_GMS_STOLEN_8192:
+ stolen_size = MB(8);
+ break;
+ case I830_GMCH_GMS_LOCAL:
+ rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
+ stolen_size = (I830_RDRAM_ND(rdct) + 1) *
+ MB(ddt[I830_RDRAM_DDT(rdct)]);
+ local = 1;
+ break;
+ default:
+ stolen_size = 0;
+ break;
+ }
+ } else if (INTEL_GTT_GEN == 6) {
+ /*
+ * SandyBridge has new memory control reg at 0x50.w
+ */
+ u16 snb_gmch_ctl;
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
+ case SNB_GMCH_GMS_STOLEN_32M:
+ stolen_size = MB(32);
+ break;
+ case SNB_GMCH_GMS_STOLEN_64M:
+ stolen_size = MB(64);
+ break;
+ case SNB_GMCH_GMS_STOLEN_96M:
+ stolen_size = MB(96);
+ break;
+ case SNB_GMCH_GMS_STOLEN_128M:
+ stolen_size = MB(128);
+ break;
+ case SNB_GMCH_GMS_STOLEN_160M:
+ stolen_size = MB(160);
+ break;
+ case SNB_GMCH_GMS_STOLEN_192M:
+ stolen_size = MB(192);
+ break;
+ case SNB_GMCH_GMS_STOLEN_224M:
+ stolen_size = MB(224);
+ break;
+ case SNB_GMCH_GMS_STOLEN_256M:
+ stolen_size = MB(256);
+ break;
+ case SNB_GMCH_GMS_STOLEN_288M:
+ stolen_size = MB(288);
+ break;
+ case SNB_GMCH_GMS_STOLEN_320M:
+ stolen_size = MB(320);
+ break;
+ case SNB_GMCH_GMS_STOLEN_352M:
+ stolen_size = MB(352);
+ break;
+ case SNB_GMCH_GMS_STOLEN_384M:
+ stolen_size = MB(384);
+ break;
+ case SNB_GMCH_GMS_STOLEN_416M:
+ stolen_size = MB(416);
+ break;
+ case SNB_GMCH_GMS_STOLEN_448M:
+ stolen_size = MB(448);
+ break;
+ case SNB_GMCH_GMS_STOLEN_480M:
+ stolen_size = MB(480);
+ break;
+ case SNB_GMCH_GMS_STOLEN_512M:
+ stolen_size = MB(512);
+ break;
+ }
+ } else {
+ switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
+ case I855_GMCH_GMS_STOLEN_1M:
+ stolen_size = MB(1);
+ break;
+ case I855_GMCH_GMS_STOLEN_4M:
+ stolen_size = MB(4);
+ break;
+ case I855_GMCH_GMS_STOLEN_8M:
+ stolen_size = MB(8);
+ break;
+ case I855_GMCH_GMS_STOLEN_16M:
+ stolen_size = MB(16);
+ break;
+ case I855_GMCH_GMS_STOLEN_32M:
+ stolen_size = MB(32);
+ break;
+ case I915_GMCH_GMS_STOLEN_48M:
+ stolen_size = MB(48);
+ break;
+ case I915_GMCH_GMS_STOLEN_64M:
+ stolen_size = MB(64);
+ break;
+ case G33_GMCH_GMS_STOLEN_128M:
+ stolen_size = MB(128);
+ break;
+ case G33_GMCH_GMS_STOLEN_256M:
+ stolen_size = MB(256);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_96M:
+ stolen_size = MB(96);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_160M:
+ stolen_size = MB(160);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_224M:
+ stolen_size = MB(224);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_352M:
+ stolen_size = MB(352);
+ break;
+ default:
+ stolen_size = 0;
+ break;
+ }
+ }
+
+ if (stolen_size > 0) {
+ dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
+ stolen_size / KB(1), local ? "local" : "stolen");
+ } else {
+ dev_info(&intel_private.bridge_dev->dev,
+ "no pre-allocated video memory detected\n");
+ stolen_size = 0;
+ }
+
+ return stolen_size;
+}
+
+static void i965_adjust_pgetbl_size(unsigned int size_flag)
+{
+ u32 pgetbl_ctl, pgetbl_ctl2;
+
+ /* ensure that ppgtt is disabled */
+ pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
+ pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
+ writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
+
+ /* write the new ggtt size */
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+ pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
+ pgetbl_ctl |= size_flag;
+ writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
+}
+
+static unsigned int i965_gtt_total_entries(void)
+{
+ int size;
+ u32 pgetbl_ctl;
+ u16 gmch_ctl;
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctl);
+
+ if (INTEL_GTT_GEN == 5) {
+ switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
+ case G4x_GMCH_SIZE_1M:
+ case G4x_GMCH_SIZE_VT_1M:
+ i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
+ break;
+ case G4x_GMCH_SIZE_VT_1_5M:
+ i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
+ break;
+ case G4x_GMCH_SIZE_2M:
+ case G4x_GMCH_SIZE_VT_2M:
+ i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
+ break;
+ }
+ }
+
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+ case I965_PGETBL_SIZE_128KB:
+ size = KB(128);
+ break;
+ case I965_PGETBL_SIZE_256KB:
+ size = KB(256);
+ break;
+ case I965_PGETBL_SIZE_512KB:
+ size = KB(512);
+ break;
+ /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
+ case I965_PGETBL_SIZE_1MB:
+ size = KB(1024);
+ break;
+ case I965_PGETBL_SIZE_2MB:
+ size = KB(2048);
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = KB(1024 + 512);
+ break;
+ default:
+ dev_info(&intel_private.pcidev->dev,
+ "unknown page table size, assuming 512KB\n");
+ size = KB(512);
+ }
+
+ return size/4;
+}
+
+static unsigned int intel_gtt_total_entries(void)
+{
+ int size;
+
+ if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
+ return i965_gtt_total_entries();
+ else if (INTEL_GTT_GEN == 6) {
+ u16 snb_gmch_ctl;
+
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
+ default:
+ case SNB_GTT_SIZE_0M:
+ printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
+ size = MB(0);
+ break;
+ case SNB_GTT_SIZE_1M:
+ size = MB(1);
+ break;
+ case SNB_GTT_SIZE_2M:
+ size = MB(2);
+ break;
+ }
+ return size/4;
+ } else {
+ /* On previous hardware, the GTT size was just what was
+ * required to map the aperture.
+ */
+ return intel_private.base.gtt_mappable_entries;
+ }
+}
+
+static unsigned int intel_gtt_mappable_entries(void)
+{
+ unsigned int aperture_size;
+
+ if (INTEL_GTT_GEN == 1) {
+ u32 smram_miscc;
+
+ pci_read_config_dword(intel_private.bridge_dev,
+ I810_SMRAM_MISCC, &smram_miscc);
+
+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
+ == I810_GFX_MEM_WIN_32M)
+ aperture_size = MB(32);
+ else
+ aperture_size = MB(64);
+ } else if (INTEL_GTT_GEN == 2) {
+ u16 gmch_ctrl;
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+
+ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
+ aperture_size = MB(64);
+ else
+ aperture_size = MB(128);
+ } else {
+ /* 9xx supports large sizes, just look at the length */
+ aperture_size = pci_resource_len(intel_private.pcidev, 2);
+ }
+
+ return aperture_size >> PAGE_SHIFT;
+}
+
+static void intel_gtt_teardown_scratch_page(void)
+{
+ set_pages_wb(intel_private.scratch_page, 1);
+ pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ put_page(intel_private.scratch_page);
+ __free_page(intel_private.scratch_page);
+}
+
+static void intel_gtt_cleanup(void)
+{
+ intel_private.driver->cleanup();
+
+ iounmap(intel_private.gtt);
+ iounmap(intel_private.registers);
+
+ intel_gtt_teardown_scratch_page();
+}
+
+static int intel_gtt_init(void)
+{
+ u32 gtt_map_size;
+ int ret;
+
+ ret = intel_private.driver->setup();
+ if (ret != 0)
+ return ret;
+
+ intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
+ intel_private.base.gtt_total_entries = intel_gtt_total_entries();
+
+ /* save the PGETBL reg for resume */
+ intel_private.PGETBL_save =
+ readl(intel_private.registers+I810_PGETBL_CTL)
+ & ~I810_PGETBL_ENABLED;
+ /* we only ever restore the register when enabling the PGTBL... */
+ if (HAS_PGTBL_EN)
+ intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
+
+ dev_info(&intel_private.bridge_dev->dev,
+ "detected gtt size: %dK total, %dK mappable\n",
+ intel_private.base.gtt_total_entries * 4,
+ intel_private.base.gtt_mappable_entries * 4);
+
+ gtt_map_size = intel_private.base.gtt_total_entries * 4;
+
+ intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
+ gtt_map_size);
+ if (!intel_private.gtt) {
+ intel_private.driver->cleanup();
+ iounmap(intel_private.registers);
+ return -ENOMEM;
+ }
+
+ global_cache_flush(); /* FIXME: ? */
+
+ intel_private.base.stolen_size = intel_gtt_stolen_size();
+
+ intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
+
+ ret = intel_gtt_setup_scratch_page();
+ if (ret != 0) {
+ intel_gtt_cleanup();
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_fake_agp_fetch_size(void)
+{
+ int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
+ unsigned int aper_size;
+ int i;
+
+ aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
+ / MB(1);
+
+ for (i = 0; i < num_sizes; i++) {
+ if (aper_size == intel_fake_agp_sizes[i].size) {
+ agp_bridge->current_size =
+ (void *) (intel_fake_agp_sizes + i);
+ return aper_size;
+ }
+ }
+
+ return 0;
+}
+
+static void i830_cleanup(void)
+{
+}
+
+/* The chipset_flush interface needs to get data that has already been
+ * flushed out of the CPU all the way out to main memory, because the GPU
+ * doesn't snoop those buffers.
+ *
+ * The 8xx series doesn't have the same lovely interface for flushing the
+ * chipset write buffers that the later chips do. According to the 865
+ * specs, it's 64 octwords, or 1KB. So, to get those previous things in
+ * that buffer out, we just fill 1KB and clflush it out, on the assumption
+ * that it'll push whatever was in there out. It appears to work.
+ */
+static void i830_chipset_flush(void)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ /* Forcibly evict everything from the CPU write buffers.
+ * clflush appears to be insufficient.
+ */
+ wbinvd_on_all_cpus();
+
+ /* Now we've only seen documents for this magic bit on 855GM,
+ * we hope it exists for the other gen2 chipsets...
+ *
+ * Also works as advertised on my 845G.
+ */
+ writel(readl(intel_private.registers+I830_HIC) | (1<<31),
+ intel_private.registers+I830_HIC);
+
+ while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ udelay(50);
+ }
+}
+
+static void i830_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ u32 pte_flags = I810_PTE_VALID;
+
+ if (flags == AGP_USER_CACHED_MEMORY)
+ pte_flags |= I830_PTE_SYSTEM_CACHED;
+
+ writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static bool intel_enable_gtt(void)
+{
+ u32 gma_addr;
+ u8 __iomem *reg;
+
+ if (INTEL_GTT_GEN <= 2)
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
+ &gma_addr);
+ else
+ pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
+ &gma_addr);
+
+ intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
+
+ if (INTEL_GTT_GEN >= 6)
+ return true;
+
+ if (INTEL_GTT_GEN == 2) {
+ u16 gmch_ctrl;
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, gmch_ctrl);
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+ if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
+ dev_err(&intel_private.pcidev->dev,
+ "failed to enable the GTT: GMCH_CTRL=%x\n",
+ gmch_ctrl);
+ return false;
+ }
+ }
+
+ /* On the resume path we may be adjusting the PGTBL value, so
+ * be paranoid and flush all chipset write buffers...
+ */
+ if (INTEL_GTT_GEN >= 3)
+ writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
+ reg = intel_private.registers+I810_PGETBL_CTL;
+ writel(intel_private.PGETBL_save, reg);
+ if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
+ dev_err(&intel_private.pcidev->dev,
+ "failed to enable the GTT: PGETBL=%x [expected %x]\n",
+ readl(reg), intel_private.PGETBL_save);
+ return false;
+ }
+
+ if (INTEL_GTT_GEN >= 3)
+ writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
+ return true;
+}
+
+static int i830_setup(void)
+{
+ u32 reg_addr;
+
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
+ reg_addr &= 0xfff80000;
+
+ intel_private.registers = ioremap(reg_addr, KB(64));
+ if (!intel_private.registers)
+ return -ENOMEM;
+
+ intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
+
+ return 0;
+}
+
+static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ agp_bridge->gatt_table_real = NULL;
+ agp_bridge->gatt_table = NULL;
+ agp_bridge->gatt_bus_addr = 0;
+
+ return 0;
+}
+
+static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ return 0;
+}
+
+static int intel_fake_agp_configure(void)
+{
+ if (!intel_enable_gtt())
+ return -EIO;
+
+ intel_private.clear_fake_agp = true;
+ agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
+
+ return 0;
+}
+
+static bool i830_check_flags(unsigned int flags)
+{
+ switch (flags) {
+ case 0:
+ case AGP_PHYS_MEMORY:
+ case AGP_USER_CACHED_MEMORY:
+ case AGP_USER_MEMORY:
+ return true;
+ }
+
+ return false;
+}
+
+void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
+ unsigned int sg_len,
+ unsigned int pg_start,
+ unsigned int flags)
+{
+ struct scatterlist *sg;
+ unsigned int len, m;
+ int i, j;
+
+ j = pg_start;
+
+ /* sg may merge pages, but we have to separate
+ * per-page addr for GTT */
+ for_each_sg(sg_list, sg, sg_len, i) {
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (m = 0; m < len; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ intel_private.driver->write_entry(addr,
+ j, flags);
+ j++;
+ }
+ }
+ readl(intel_private.gtt+j-1);
+}
+EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
+
+void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
+ struct page **pages, unsigned int flags)
+{
+ int i, j;
+
+ for (i = 0, j = first_entry; i < num_entries; i++, j++) {
+ dma_addr_t addr = page_to_phys(pages[i]);
+ intel_private.driver->write_entry(addr,
+ j, flags);
+ }
+ readl(intel_private.gtt+j-1);
+}
+EXPORT_SYMBOL(intel_gtt_insert_pages);
+
+static int intel_fake_agp_insert_entries(struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int ret = -EINVAL;
+
+ if (intel_private.clear_fake_agp) {
+ int start = intel_private.base.stolen_size / PAGE_SIZE;
+ int end = intel_private.base.gtt_mappable_entries;
+ intel_gtt_clear_range(start, end - start);
+ intel_private.clear_fake_agp = false;
+ }
+
+ if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
+ return i810_insert_dcache_entries(mem, pg_start, type);
+
+ if (mem->page_count == 0)
+ goto out;
+
+ if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
+ goto out_err;
+
+ if (type != mem->type)
+ goto out_err;
+
+ if (!intel_private.driver->check_flags(type))
+ goto out_err;
+
+ if (!mem->is_flushed)
+ global_cache_flush();
+
+ if (intel_private.base.needs_dmar) {
+ ret = intel_gtt_map_memory(mem->pages, mem->page_count,
+ &mem->sg_list, &mem->num_sg);
+ if (ret != 0)
+ return ret;
+
+ intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
+ pg_start, type);
+ } else
+ intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
+ type);
+
+out:
+ ret = 0;
+out_err:
+ mem->is_flushed = true;
+ return ret;
+}
+
+void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
+{
+ unsigned int i;
+
+ for (i = first_entry; i < (first_entry + num_entries); i++) {
+ intel_private.driver->write_entry(intel_private.scratch_page_dma,
+ i, 0);
+ }
+ readl(intel_private.gtt+i-1);
+}
+EXPORT_SYMBOL(intel_gtt_clear_range);
+
+static int intel_fake_agp_remove_entries(struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ if (mem->page_count == 0)
+ return 0;
+
+ intel_gtt_clear_range(pg_start, mem->page_count);
+
+ if (intel_private.base.needs_dmar) {
+ intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
+ mem->sg_list = NULL;
+ mem->num_sg = 0;
+ }
+
+ return 0;
+}
+
+static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
+ int type)
+{
+ struct agp_memory *new;
+
+ if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
+ if (pg_count != intel_private.num_dcache_entries)
+ return NULL;
+
+ new = agp_create_memory(1);
+ if (new == NULL)
+ return NULL;
+
+ new->type = AGP_DCACHE_MEMORY;
+ new->page_count = pg_count;
+ new->num_scratch_pages = 0;
+ agp_free_page_array(new);
+ return new;
+ }
+ if (type == AGP_PHYS_MEMORY)
+ return alloc_agpphysmem_i8xx(pg_count, type);
+ /* always return NULL for other allocation types for now */
+ return NULL;
+}
+
+static int intel_alloc_chipset_flush_resource(void)
+{
+ int ret;
+ ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+ PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+ pcibios_align_resource, intel_private.bridge_dev);
+
+ return ret;
+}
+
+static void intel_i915_setup_chipset_flush(void)
+{
+ int ret;
+ u32 temp;
+
+ pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
+ if (!(temp & 0x1)) {
+ intel_alloc_chipset_flush_resource();
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ temp &= ~1;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = temp;
+ intel_private.ifp_resource.end = temp + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i965_g33_setup_chipset_flush(void)
+{
+ u32 temp_hi, temp_lo;
+ int ret;
+
+ pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
+ pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
+
+ if (!(temp_lo & 0x1)) {
+
+ intel_alloc_chipset_flush_resource();
+
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
+ upper_32_bits(intel_private.ifp_resource.start));
+ pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ u64 l64;
+
+ temp_lo &= ~0x1;
+ l64 = ((u64)temp_hi << 32) | temp_lo;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = l64;
+ intel_private.ifp_resource.end = l64 + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i9xx_setup_flush(void)
+{
+ /* return if already configured */
+ if (intel_private.ifp_resource.start)
+ return;
+
+ if (INTEL_GTT_GEN == 6)
+ return;
+
+ /* setup a resource for this object */
+ intel_private.ifp_resource.name = "Intel Flush Page";
+ intel_private.ifp_resource.flags = IORESOURCE_MEM;
+
+ /* Setup chipset flush for 915 */
+ if (IS_G33 || INTEL_GTT_GEN >= 4) {
+ intel_i965_g33_setup_chipset_flush();
+ } else {
+ intel_i915_setup_chipset_flush();
+ }
+
+ if (intel_private.ifp_resource.start)
+ intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
+ if (!intel_private.i9xx_flush_page)
+ dev_err(&intel_private.pcidev->dev,
+ "can't ioremap flush page - no chipset flushing\n");
+}
+
+static void i9xx_cleanup(void)
+{
+ if (intel_private.i9xx_flush_page)
+ iounmap(intel_private.i9xx_flush_page);
+ if (intel_private.resource_valid)
+ release_resource(&intel_private.ifp_resource);
+ intel_private.ifp_resource.start = 0;
+ intel_private.resource_valid = 0;
+}
+
+static void i9xx_chipset_flush(void)
+{
+ if (intel_private.i9xx_flush_page)
+ writel(1, intel_private.i9xx_flush_page);
+}
+
+static void i965_write_entry(dma_addr_t addr,
+ unsigned int entry,
+ unsigned int flags)
+{
+ u32 pte_flags;
+
+ pte_flags = I810_PTE_VALID;
+ if (flags == AGP_USER_CACHED_MEMORY)
+ pte_flags |= I830_PTE_SYSTEM_CACHED;
+
+ /* Shift high bits down */
+ addr |= (addr >> 28) & 0xf0;
+ writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static bool gen6_check_flags(unsigned int flags)
+{
+ return true;
+}
+
+static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
+ unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
+ u32 pte_flags;
+
+ if (type_mask == AGP_USER_MEMORY)
+ pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
+ else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
+ pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
+ } else { /* set 'normal'/'cached' to LLC by default */
+ pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
+ }
+
+ /* gen6 has bit11-4 for physical addr bit39-32 */
+ addr |= (addr >> 28) & 0xff0;
+ writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static void gen6_cleanup(void)
+{
+}
+
+static int i9xx_setup(void)
+{
+ u32 reg_addr;
+
+ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
+
+ reg_addr &= 0xfff80000;
+
+ intel_private.registers = ioremap(reg_addr, 128 * 4096);
+ if (!intel_private.registers)
+ return -ENOMEM;
+
+ if (INTEL_GTT_GEN == 3) {
+ u32 gtt_addr;
+
+ pci_read_config_dword(intel_private.pcidev,
+ I915_PTEADDR, &gtt_addr);
+ intel_private.gtt_bus_addr = gtt_addr;
+ } else {
+ u32 gtt_offset;
+
+ switch (INTEL_GTT_GEN) {
+ case 5:
+ case 6:
+ gtt_offset = MB(2);
+ break;
+ case 4:
+ default:
+ gtt_offset = KB(512);
+ break;
+ }
+ intel_private.gtt_bus_addr = reg_addr + gtt_offset;
+ }
+
+ intel_i9xx_setup_flush();
+
+ return 0;
+}
+
+static const struct agp_bridge_driver intel_fake_agp_driver = {
+ .owner = THIS_MODULE,
+ .size_type = FIXED_APER_SIZE,
+ .aperture_sizes = intel_fake_agp_sizes,
+ .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
+ .configure = intel_fake_agp_configure,
+ .fetch_size = intel_fake_agp_fetch_size,
+ .cleanup = intel_gtt_cleanup,
+ .agp_enable = intel_fake_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_fake_agp_create_gatt_table,
+ .free_gatt_table = intel_fake_agp_free_gatt_table,
+ .insert_memory = intel_fake_agp_insert_entries,
+ .remove_memory = intel_fake_agp_remove_entries,
+ .alloc_by_type = intel_fake_agp_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+};
+
+static const struct intel_gtt_driver i81x_gtt_driver = {
+ .gen = 1,
+ .has_pgtbl_enable = 1,
+ .dma_mask_size = 32,
+ .setup = i810_setup,
+ .cleanup = i810_cleanup,
+ .check_flags = i830_check_flags,
+ .write_entry = i810_write_entry,
+};
+static const struct intel_gtt_driver i8xx_gtt_driver = {
+ .gen = 2,
+ .has_pgtbl_enable = 1,
+ .setup = i830_setup,
+ .cleanup = i830_cleanup,
+ .write_entry = i830_write_entry,
+ .dma_mask_size = 32,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i830_chipset_flush,
+};
+static const struct intel_gtt_driver i915_gtt_driver = {
+ .gen = 3,
+ .has_pgtbl_enable = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ /* i945 is the last gpu to need phys mem (for overlay and cursors). */
+ .write_entry = i830_write_entry,
+ .dma_mask_size = 32,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver g33_gtt_driver = {
+ .gen = 3,
+ .is_g33 = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver pineview_gtt_driver = {
+ .gen = 3,
+ .is_pineview = 1, .is_g33 = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver i965_gtt_driver = {
+ .gen = 4,
+ .has_pgtbl_enable = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver g4x_gtt_driver = {
+ .gen = 5,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver ironlake_gtt_driver = {
+ .gen = 5,
+ .is_ironlake = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver sandybridge_gtt_driver = {
+ .gen = 6,
+ .setup = i9xx_setup,
+ .cleanup = gen6_cleanup,
+ .write_entry = gen6_write_entry,
+ .dma_mask_size = 40,
+ .check_flags = gen6_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+
+/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
+ * driver and gmch_driver must be non-null, and find_gmch will determine
+ * which one should be used if a gmch_chip_id is present.
+ */
+static const struct intel_gtt_driver_description {
+ unsigned int gmch_chip_id;
+ char *name;
+ const struct intel_gtt_driver *gtt_driver;
+} intel_gtt_chipsets[] = {
+ { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
+ &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
+ &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82854_IG, "854",
+ &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
+ &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82865_IG, "865",
+ &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
+ &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
+ &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
+ &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
+ &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
+ &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
+ &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
+ &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
+ &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
+ &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
+ &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
+ &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
+ &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
+ &g33_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
+ &g33_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
+ &g33_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
+ &pineview_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
+ &pineview_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
+ &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
+ "HD Graphics", &ironlake_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
+ "HD Graphics", &ironlake_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
+ { 0, NULL, NULL }
+};
+
+static int find_gmch(u16 device)
+{
+ struct pci_dev *gmch_device;
+
+ gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+ if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
+ gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
+ device, gmch_device);
+ }
+
+ if (!gmch_device)
+ return 0;
+
+ intel_private.pcidev = gmch_device;
+ return 1;
+}
+
+int intel_gmch_probe(struct pci_dev *pdev,
+ struct agp_bridge_data *bridge)
+{
+ int i, mask;
+ intel_private.driver = NULL;
+
+ for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
+ if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
+ intel_private.driver =
+ intel_gtt_chipsets[i].gtt_driver;
+ break;
+ }
+ }
+
+ if (!intel_private.driver)
+ return 0;
+
+ bridge->driver = &intel_fake_agp_driver;
+ bridge->dev_private_data = &intel_private;
+ bridge->dev = pdev;
+
+ intel_private.bridge_dev = pci_dev_get(pdev);
+
+ dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
+
+ mask = intel_private.driver->dma_mask_size;
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask %d-bit failed!\n", mask);
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(mask));
+
+ /*if (bridge->driver == &intel_810_driver)
+ return 1;*/
+
+ if (intel_gtt_init() != 0)
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(intel_gmch_probe);
+
+const struct intel_gtt *intel_gtt_get(void)
+{
+ return &intel_private.base;
+}
+EXPORT_SYMBOL(intel_gtt_get);
+
+void intel_gtt_chipset_flush(void)
+{
+ if (intel_private.driver->chipset_flush)
+ intel_private.driver->chipset_flush();
+}
+EXPORT_SYMBOL(intel_gtt_chipset_flush);
+
+void intel_gmch_remove(struct pci_dev *pdev)
+{
+ if (intel_private.pcidev)
+ pci_dev_put(intel_private.pcidev);
+ if (intel_private.bridge_dev)
+ pci_dev_put(intel_private.bridge_dev);
+}
+EXPORT_SYMBOL(intel_gmch_remove);
+
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c
new file mode 100644
index 00000000..c73385cc
--- /dev/null
+++ b/drivers/char/agp/isoch.c
@@ -0,0 +1,470 @@
+/*
+ * Setup routines for AGP 3.5 compliant bridges.
+ */
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/agp_backend.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "agp.h"
+
+/* Generic AGP 3.5 enabling routines */
+
+struct agp_3_5_dev {
+ struct list_head list;
+ u8 capndx;
+ u32 maxbw;
+ struct pci_dev *dev;
+};
+
+static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new)
+{
+ struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list);
+ struct list_head *pos;
+
+ list_for_each(pos, head) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ if (cur->maxbw > n->maxbw)
+ break;
+ }
+ list_add_tail(new, pos);
+}
+
+static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs)
+{
+ struct agp_3_5_dev *cur;
+ struct pci_dev *dev;
+ struct list_head *pos, *tmp, *head = &list->list, *start = head->next;
+ u32 nistat;
+
+ INIT_LIST_HEAD(head);
+
+ for (pos=start; pos!=head; ) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ dev = cur->dev;
+
+ pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat);
+ cur->maxbw = (nistat >> 16) & 0xff;
+
+ tmp = pos;
+ pos = pos->next;
+ agp_3_5_dev_list_insert(head, tmp);
+ }
+}
+
+/*
+ * Initialize all isochronous transfer parameters for an AGP 3.0
+ * node (i.e. a host bridge in combination with the adapters
+ * lying behind it...)
+ */
+
+static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
+ struct agp_3_5_dev *dev_list, unsigned int ndevs)
+{
+ /*
+ * Convenience structure to make the calculations clearer
+ * here. The field names come straight from the AGP 3.0 spec.
+ */
+ struct isoch_data {
+ u32 maxbw;
+ u32 n;
+ u32 y;
+ u32 l;
+ u32 rq;
+ struct agp_3_5_dev *dev;
+ };
+
+ struct pci_dev *td = bridge->dev, *dev;
+ struct list_head *head = &dev_list->list, *pos;
+ struct agp_3_5_dev *cur;
+ struct isoch_data *master, target;
+ unsigned int cdev = 0;
+ u32 mnistat, tnistat, tstatus, mcmd;
+ u16 tnicmd, mnicmd;
+ u8 mcapndx;
+ u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
+ u32 step, rem, rem_isoch, rem_async;
+ int ret = 0;
+
+ /*
+ * We'll work with an array of isoch_data's (one for each
+ * device in dev_list) throughout this function.
+ */
+ if ((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto get_out;
+ }
+
+ /*
+ * Sort the device list by maxbw. We need to do this because the
+ * spec suggests that the devices with the smallest requirements
+ * have their resources allocated first, with all remaining resources
+ * falling to the device with the largest requirement.
+ *
+ * We don't exactly do this, we divide target resources by ndevs
+ * and split them amongst the AGP 3.0 devices. The remainder of such
+ * division operations are dropped on the last device, sort of like
+ * the spec mentions it should be done.
+ *
+ * We can't do this sort when we initially construct the dev_list
+ * because we don't know until this function whether isochronous
+ * transfers are enabled and consequently whether maxbw will mean
+ * anything.
+ */
+ agp_3_5_dev_list_sort(dev_list, ndevs);
+
+ pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
+ pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
+
+ /* Extract power-on defaults from the target */
+ target.maxbw = (tnistat >> 16) & 0xff;
+ target.n = (tnistat >> 8) & 0xff;
+ target.y = (tnistat >> 6) & 0x3;
+ target.l = (tnistat >> 3) & 0x7;
+ target.rq = (tstatus >> 24) & 0xff;
+
+ y_max = target.y;
+
+ /*
+ * Extract power-on defaults for each device in dev_list. Along
+ * the way, calculate the total isochronous bandwidth required
+ * by these devices and the largest requested payload size.
+ */
+ list_for_each(pos, head) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ dev = cur->dev;
+
+ mcapndx = cur->capndx;
+
+ pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
+
+ master[cdev].maxbw = (mnistat >> 16) & 0xff;
+ master[cdev].n = (mnistat >> 8) & 0xff;
+ master[cdev].y = (mnistat >> 6) & 0x3;
+ master[cdev].dev = cur;
+
+ tot_bw += master[cdev].maxbw;
+ y_max = max(y_max, master[cdev].y);
+
+ cdev++;
+ }
+
+ /* Check if this configuration has any chance of working */
+ if (tot_bw > target.maxbw) {
+ dev_err(&td->dev, "isochronous bandwidth required "
+ "by AGP 3.0 devices exceeds that which is supported by "
+ "the AGP 3.0 bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ target.y = y_max;
+
+ /*
+ * Write the calculated payload size into the target's NICMD
+ * register. Doing this directly effects the ISOCH_N value
+ * in the target's NISTAT register, so we need to do this now
+ * to get an accurate value for ISOCH_N later.
+ */
+ pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd);
+ tnicmd &= ~(0x3 << 6);
+ tnicmd |= target.y << 6;
+ pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd);
+
+ /* Reread the target's ISOCH_N */
+ pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
+ target.n = (tnistat >> 8) & 0xff;
+
+ /* Calculate the minimum ISOCH_N needed by each master */
+ for (cdev=0; cdev<ndevs; cdev++) {
+ master[cdev].y = target.y;
+ master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1);
+
+ tot_n += master[cdev].n;
+ }
+
+ /* Exit if the minimal ISOCH_N allocation among the masters is more
+ * than the target can handle. */
+ if (tot_n > target.n) {
+ dev_err(&td->dev, "number of isochronous "
+ "transactions per period required by AGP 3.0 devices "
+ "exceeds that which is supported by the AGP 3.0 "
+ "bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ /* Calculate left over ISOCH_N capability in the target. We'll give
+ * this to the hungriest device (as per the spec) */
+ rem = target.n - tot_n;
+
+ /*
+ * Calculate the minimum isochronous RQ depth needed by each master.
+ * Along the way, distribute the extra ISOCH_N capability calculated
+ * above.
+ */
+ for (cdev=0; cdev<ndevs; cdev++) {
+ /*
+ * This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y
+ * byte isochronous writes will be broken into 64B pieces.
+ * This means we need to budget more RQ depth to account for
+ * these kind of writes (each isochronous write is actually
+ * many writes on the AGP bus).
+ */
+ master[cdev].rq = master[cdev].n;
+ if (master[cdev].y > 0x1)
+ master[cdev].rq *= (1 << (master[cdev].y - 1));
+
+ tot_rq += master[cdev].rq;
+ }
+ master[ndevs-1].n += rem;
+
+ /* Figure the number of isochronous and asynchronous RQ slots the
+ * target is providing. */
+ rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n;
+ rq_async = target.rq - rq_isoch;
+
+ /* Exit if the minimal RQ needs of the masters exceeds what the target
+ * can provide. */
+ if (tot_rq > rq_isoch) {
+ dev_err(&td->dev, "number of request queue slots "
+ "required by the isochronous bandwidth requested by "
+ "AGP 3.0 devices exceeds the number provided by the "
+ "AGP 3.0 bridge!\n");
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ /* Calculate asynchronous RQ capability in the target (per master) as
+ * well as the total number of leftover isochronous RQ slots. */
+ step = rq_async / ndevs;
+ rem_async = step + (rq_async % ndevs);
+ rem_isoch = rq_isoch - tot_rq;
+
+ /* Distribute the extra RQ slots calculated above and write our
+ * isochronous settings out to the actual devices. */
+ for (cdev=0; cdev<ndevs; cdev++) {
+ cur = master[cdev].dev;
+ dev = cur->dev;
+
+ mcapndx = cur->capndx;
+
+ master[cdev].rq += (cdev == ndevs - 1)
+ ? (rem_async + rem_isoch) : step;
+
+ pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd);
+ pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd);
+
+ mnicmd &= ~(0xff << 8);
+ mnicmd &= ~(0x3 << 6);
+ mcmd &= ~(0xff << 24);
+
+ mnicmd |= master[cdev].n << 8;
+ mnicmd |= master[cdev].y << 6;
+ mcmd |= master[cdev].rq << 24;
+
+ pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd);
+ pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd);
+ }
+
+free_and_exit:
+ kfree(master);
+
+get_out:
+ return ret;
+}
+
+/*
+ * This function basically allocates request queue slots among the
+ * AGP 3.0 systems in nonisochronous nodes. The algorithm is
+ * pretty stupid, divide the total number of RQ slots provided by the
+ * target by ndevs. Distribute this many slots to each AGP 3.0 device,
+ * giving any left over slots to the last device in dev_list.
+ */
+static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge,
+ struct agp_3_5_dev *dev_list, unsigned int ndevs)
+{
+ struct agp_3_5_dev *cur;
+ struct list_head *head = &dev_list->list, *pos;
+ u32 tstatus, mcmd;
+ u32 trq, mrq, rem;
+ unsigned int cdev = 0;
+
+ pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus);
+
+ trq = (tstatus >> 24) & 0xff;
+ mrq = trq / ndevs;
+
+ rem = mrq + (trq % ndevs);
+
+ for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+
+ pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd);
+ mcmd &= ~(0xff << 24);
+ mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24;
+ pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd);
+ }
+}
+
+/*
+ * Fully configure and enable an AGP 3.0 host bridge and all the devices
+ * lying behind it.
+ */
+int agp_3_5_enable(struct agp_bridge_data *bridge)
+{
+ struct pci_dev *td = bridge->dev, *dev = NULL;
+ u8 mcapndx;
+ u32 isoch, arqsz;
+ u32 tstatus, mstatus, ncapid;
+ u32 mmajor;
+ u16 mpstat;
+ struct agp_3_5_dev *dev_list, *cur;
+ struct list_head *head, *pos;
+ unsigned int ndevs = 0;
+ int ret = 0;
+
+ /* Extract some power-on defaults from the target */
+ pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
+ isoch = (tstatus >> 17) & 0x1;
+ if (isoch == 0) /* isoch xfers not available, bail out. */
+ return -ENODEV;
+
+ arqsz = (tstatus >> 13) & 0x7;
+
+ /*
+ * Allocate a head for our AGP 3.5 device list
+ * (multiple AGP v3 devices are allowed behind a single bridge).
+ */
+ if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto get_out;
+ }
+ head = &dev_list->list;
+ INIT_LIST_HEAD(head);
+
+ /* Find all AGP devices, and add them to dev_list. */
+ for_each_pci_dev(dev) {
+ mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP);
+ if (mcapndx == 0)
+ continue;
+
+ switch ((dev->class >>8) & 0xff00) {
+ case 0x0600: /* Bridge */
+ /* Skip bridges. We should call this function for each one. */
+ continue;
+
+ case 0x0001: /* Unclassified device */
+ /* Don't know what this is, but log it for investigation. */
+ if (mcapndx != 0) {
+ dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n",
+ pci_name(dev),
+ dev->vendor, dev->device);
+ }
+ continue;
+
+ case 0x0300: /* Display controller */
+ case 0x0400: /* Multimedia controller */
+ if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto free_and_exit;
+ }
+ cur->dev = dev;
+
+ pos = &cur->list;
+ list_add(pos, head);
+ ndevs++;
+ continue;
+
+ default:
+ continue;
+ }
+ }
+
+ /*
+ * Take an initial pass through the devices lying behind our host
+ * bridge. Make sure each one is actually an AGP 3.0 device, otherwise
+ * exit with an error message. Along the way store the AGP 3.0
+ * cap_ptr for each device
+ */
+ list_for_each(pos, head) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+ dev = cur->dev;
+
+ pci_read_config_word(dev, PCI_STATUS, &mpstat);
+ if ((mpstat & PCI_STATUS_CAP_LIST) == 0)
+ continue;
+
+ pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx);
+ if (mcapndx != 0) {
+ do {
+ pci_read_config_dword(dev, mcapndx, &ncapid);
+ if ((ncapid & 0xff) != 2)
+ mcapndx = (ncapid >> 8) & 0xff;
+ }
+ while (((ncapid & 0xff) != 2) && (mcapndx != 0));
+ }
+
+ if (mcapndx == 0) {
+ dev_err(&td->dev, "woah! Non-AGP device %s on "
+ "secondary bus of AGP 3.5 bridge!\n",
+ pci_name(dev));
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
+ if (mmajor < 3) {
+ dev_err(&td->dev, "woah! AGP 2.0 device %s on "
+ "secondary bus of AGP 3.5 bridge operating "
+ "with AGP 3.0 electricals!\n", pci_name(dev));
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+
+ cur->capndx = mcapndx;
+
+ pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
+
+ if (((mstatus >> 3) & 0x1) == 0) {
+ dev_err(&td->dev, "woah! AGP 3.x device %s not "
+ "operating in AGP 3.x mode on secondary bus "
+ "of AGP 3.5 bridge operating with AGP 3.0 "
+ "electricals!\n", pci_name(dev));
+ ret = -ENODEV;
+ goto free_and_exit;
+ }
+ }
+
+ /*
+ * Call functions to divide target resources amongst the AGP 3.0
+ * masters. This process is dramatically different depending on
+ * whether isochronous transfers are supported.
+ */
+ if (isoch) {
+ ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs);
+ if (ret) {
+ dev_info(&td->dev, "something bad happened setting "
+ "up isochronous xfers; falling back to "
+ "non-isochronous xfer mode\n");
+ } else {
+ goto free_and_exit;
+ }
+ }
+ agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs);
+
+free_and_exit:
+ /* Be sure to free the dev_list */
+ for (pos=head->next; pos!=head; ) {
+ cur = list_entry(pos, struct agp_3_5_dev, list);
+
+ pos = pos->next;
+ kfree(cur);
+ }
+ kfree(dev_list);
+
+get_out:
+ return ret;
+}
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
new file mode 100644
index 00000000..b9734a97
--- /dev/null
+++ b/drivers/char/agp/nvidia-agp.c
@@ -0,0 +1,475 @@
+/*
+ * Nvidia AGPGART routines.
+ * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
+ * to work in 2.5 by Dave Jones <davej@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+#include <linux/jiffies.h>
+#include "agp.h"
+
+/* NVIDIA registers */
+#define NVIDIA_0_APSIZE 0x80
+#define NVIDIA_1_WBC 0xf0
+#define NVIDIA_2_GARTCTRL 0xd0
+#define NVIDIA_2_APBASE 0xd8
+#define NVIDIA_2_APLIMIT 0xdc
+#define NVIDIA_2_ATTBASE(i) (0xe0 + (i) * 4)
+#define NVIDIA_3_APBASE 0x50
+#define NVIDIA_3_APLIMIT 0x54
+
+
+static struct _nvidia_private {
+ struct pci_dev *dev_1;
+ struct pci_dev *dev_2;
+ struct pci_dev *dev_3;
+ volatile u32 __iomem *aperture;
+ int num_active_entries;
+ off_t pg_offset;
+ u32 wbc_mask;
+} nvidia_private;
+
+
+static int nvidia_fetch_size(void)
+{
+ int i;
+ u8 size_value;
+ struct aper_size_info_8 *values;
+
+ pci_read_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, &size_value);
+ size_value &= 0x0f;
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (size_value == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+#define SYSCFG 0xC0010010
+#define IORR_BASE0 0xC0010016
+#define IORR_MASK0 0xC0010017
+#define AMD_K7_NUM_IORR 2
+
+static int nvidia_init_iorr(u32 base, u32 size)
+{
+ u32 base_hi, base_lo;
+ u32 mask_hi, mask_lo;
+ u32 sys_hi, sys_lo;
+ u32 iorr_addr, free_iorr_addr;
+
+ /* Find the iorr that is already used for the base */
+ /* If not found, determine the uppermost available iorr */
+ free_iorr_addr = AMD_K7_NUM_IORR;
+ for (iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) {
+ rdmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
+ rdmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
+
+ if ((base_lo & 0xfffff000) == (base & 0xfffff000))
+ break;
+
+ if ((mask_lo & 0x00000800) == 0)
+ free_iorr_addr = iorr_addr;
+ }
+
+ if (iorr_addr >= AMD_K7_NUM_IORR) {
+ iorr_addr = free_iorr_addr;
+ if (iorr_addr >= AMD_K7_NUM_IORR)
+ return -EINVAL;
+ }
+ base_hi = 0x0;
+ base_lo = (base & ~0xfff) | 0x18;
+ mask_hi = 0xf;
+ mask_lo = ((~(size - 1)) & 0xfffff000) | 0x800;
+ wrmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
+ wrmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
+
+ rdmsr(SYSCFG, sys_lo, sys_hi);
+ sys_lo |= 0x00100000;
+ wrmsr(SYSCFG, sys_lo, sys_hi);
+
+ return 0;
+}
+
+static int nvidia_configure(void)
+{
+ int i, rc, num_dirs;
+ u32 apbase, aplimit;
+ struct aper_size_info_8 *current_size;
+ u32 temp;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
+ current_size->size_value);
+
+ /* address to map to */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &apbase);
+ apbase &= PCI_BASE_ADDRESS_MEM_MASK;
+ agp_bridge->gart_bus_addr = apbase;
+ aplimit = apbase + (current_size->size * 1024 * 1024) - 1;
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase);
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APLIMIT, aplimit);
+ pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APBASE, apbase);
+ pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APLIMIT, aplimit);
+ if (0 != (rc = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024)))
+ return rc;
+
+ /* directory size is 64k */
+ num_dirs = current_size->size / 64;
+ nvidia_private.num_active_entries = current_size->num_entries;
+ nvidia_private.pg_offset = 0;
+ if (num_dirs == 0) {
+ num_dirs = 1;
+ nvidia_private.num_active_entries /= (64 / current_size->size);
+ nvidia_private.pg_offset = (apbase & (64 * 1024 * 1024 - 1) &
+ ~(current_size->size * 1024 * 1024 - 1)) / PAGE_SIZE;
+ }
+
+ /* attbase */
+ for (i = 0; i < 8; i++) {
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_ATTBASE(i),
+ (agp_bridge->gatt_bus_addr + (i % num_dirs) * 64 * 1024) | 1);
+ }
+
+ /* gtlb control */
+ pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp | 0x11);
+
+ /* gart control */
+ pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
+ pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100);
+
+ /* map aperture */
+ nvidia_private.aperture =
+ (volatile u32 __iomem *) ioremap(apbase, 33 * PAGE_SIZE);
+
+ if (!nvidia_private.aperture)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void nvidia_cleanup(void)
+{
+ struct aper_size_info_8 *previous_size;
+ u32 temp;
+
+ /* gart control */
+ pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
+ pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp & ~(0x100));
+
+ /* gtlb control */
+ pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
+ pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp & ~(0x11));
+
+ /* unmap aperture */
+ iounmap((void __iomem *) nvidia_private.aperture);
+
+ /* restore previous aperture size */
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
+ previous_size->size_value);
+
+ /* restore iorr for previous aperture size */
+ nvidia_init_iorr(agp_bridge->gart_bus_addr,
+ previous_size->size * 1024 * 1024);
+}
+
+
+/*
+ * Note we can't use the generic routines, even though they are 99% the same.
+ * Aperture sizes <64M still requires a full 64k GART directory, but
+ * only use the portion of the TLB entries that correspond to the apertures
+ * alignment inside the surrounding 64M block.
+ */
+extern int agp_memory_reserved;
+
+static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i, j;
+ int mask_type;
+
+ mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
+ if (mask_type != 0 || type != mem->type)
+ return -EINVAL;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ if ((pg_start + mem->page_count) >
+ (nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE))
+ return -EINVAL;
+
+ for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j)))
+ return -EBUSY;
+ }
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mask_type),
+ agp_bridge->gatt_table+nvidia_private.pg_offset+j);
+ }
+
+ /* PCI Posting. */
+ readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j - 1);
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+
+static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i;
+
+ int mask_type;
+
+ mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
+ if (mask_type != 0 || type != mem->type)
+ return -EINVAL;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++)
+ writel(agp_bridge->scratch_page, agp_bridge->gatt_table+nvidia_private.pg_offset+i);
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+
+static void nvidia_tlbflush(struct agp_memory *mem)
+{
+ unsigned long end;
+ u32 wbc_reg, temp;
+ int i;
+
+ /* flush chipset */
+ if (nvidia_private.wbc_mask) {
+ pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg);
+ wbc_reg |= nvidia_private.wbc_mask;
+ pci_write_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, wbc_reg);
+
+ end = jiffies + 3*HZ;
+ do {
+ pci_read_config_dword(nvidia_private.dev_1,
+ NVIDIA_1_WBC, &wbc_reg);
+ if (time_before_eq(end, jiffies)) {
+ printk(KERN_ERR PFX
+ "TLB flush took more than 3 seconds.\n");
+ }
+ } while (wbc_reg & nvidia_private.wbc_mask);
+ }
+
+ /* flush TLB entries */
+ for (i = 0; i < 32 + 1; i++)
+ temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
+ for (i = 0; i < 32 + 1; i++)
+ temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
+}
+
+
+static const struct aper_size_info_8 nvidia_generic_sizes[5] =
+{
+ {512, 131072, 7, 0},
+ {256, 65536, 6, 8},
+ {128, 32768, 5, 12},
+ {64, 16384, 4, 14},
+ /* The 32M mode still requires a 64k gatt */
+ {32, 16384, 4, 15}
+};
+
+
+static const struct gatt_mask nvidia_generic_masks[] =
+{
+ { .mask = 1, .type = 0}
+};
+
+
+static const struct agp_bridge_driver nvidia_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = nvidia_generic_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 5,
+ .needs_scratch_page = true,
+ .configure = nvidia_configure,
+ .fetch_size = nvidia_fetch_size,
+ .cleanup = nvidia_cleanup,
+ .tlb_flush = nvidia_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = nvidia_generic_masks,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = nvidia_insert_memory,
+ .remove_memory = nvidia_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static int __devinit agp_nvidia_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+
+ nvidia_private.dev_1 =
+ pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 1));
+ nvidia_private.dev_2 =
+ pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 2));
+ nvidia_private.dev_3 =
+ pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(30, 0));
+
+ if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) {
+ printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 "
+ "chipset, but could not find the secondary devices.\n");
+ return -ENODEV;
+ }
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_NVIDIA_NFORCE:
+ printk(KERN_INFO PFX "Detected NVIDIA nForce chipset\n");
+ nvidia_private.wbc_mask = 0x00010000;
+ break;
+ case PCI_DEVICE_ID_NVIDIA_NFORCE2:
+ printk(KERN_INFO PFX "Detected NVIDIA nForce2 chipset\n");
+ nvidia_private.wbc_mask = 0x80000000;
+ break;
+ default:
+ printk(KERN_ERR PFX "Unsupported NVIDIA chipset (device id: %04x)\n",
+ pdev->device);
+ return -ENODEV;
+ }
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &nvidia_driver;
+ bridge->dev_private_data = &nvidia_private,
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS,
+ &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_nvidia_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+#ifdef CONFIG_PM
+static int agp_nvidia_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state (pdev);
+ pci_set_power_state (pdev, 3);
+
+ return 0;
+}
+
+static int agp_nvidia_resume(struct pci_dev *pdev)
+{
+ /* set power state 0 and restore PCI space */
+ pci_set_power_state (pdev, 0);
+ pci_restore_state(pdev);
+
+ /* reconfigure AGP hardware again */
+ nvidia_configure();
+
+ return 0;
+}
+#endif
+
+
+static struct pci_device_id agp_nvidia_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NFORCE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_NVIDIA,
+ .device = PCI_DEVICE_ID_NVIDIA_NFORCE2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table);
+
+static struct pci_driver agp_nvidia_pci_driver = {
+ .name = "agpgart-nvidia",
+ .id_table = agp_nvidia_pci_table,
+ .probe = agp_nvidia_probe,
+ .remove = agp_nvidia_remove,
+#ifdef CONFIG_PM
+ .suspend = agp_nvidia_suspend,
+ .resume = agp_nvidia_resume,
+#endif
+};
+
+static int __init agp_nvidia_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_nvidia_pci_driver);
+}
+
+static void __exit agp_nvidia_cleanup(void)
+{
+ pci_unregister_driver(&agp_nvidia_pci_driver);
+ pci_dev_put(nvidia_private.dev_1);
+ pci_dev_put(nvidia_private.dev_2);
+ pci_dev_put(nvidia_private.dev_3);
+}
+
+module_init(agp_nvidia_init);
+module_exit(agp_nvidia_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("NVIDIA Corporation");
+
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
new file mode 100644
index 00000000..94821ab0
--- /dev/null
+++ b/drivers/char/agp/parisc-agp.c
@@ -0,0 +1,424 @@
+/*
+ * HP Quicksilver AGP GART routines
+ *
+ * Copyright (c) 2006, Kyle McMartin <kyle@parisc-linux.org>
+ *
+ * Based on drivers/char/agpgart/hp-agp.c which is
+ * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/klist.h>
+#include <linux/agp_backend.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+
+#include <asm/parisc-device.h>
+#include <asm/ropes.h>
+
+#include "agp.h"
+
+#define DRVNAME "quicksilver"
+#define DRVPFX DRVNAME ": "
+
+#define AGP8X_MODE_BIT 3
+#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
+
+static unsigned long
+parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+ int type);
+
+static struct _parisc_agp_info {
+ void __iomem *ioc_regs;
+ void __iomem *lba_regs;
+
+ int lba_cap_offset;
+
+ u64 *gatt;
+ u64 gatt_entries;
+
+ u64 gart_base;
+ u64 gart_size;
+
+ int io_page_size;
+ int io_pages_per_kpage;
+} parisc_agp_info;
+
+static struct gatt_mask parisc_agp_masks[] =
+{
+ {
+ .mask = SBA_PDIR_VALID_BIT,
+ .type = 0
+ }
+};
+
+static struct aper_size_info_fixed parisc_agp_sizes[] =
+{
+ {0, 0, 0}, /* filled in by parisc_agp_fetch_size() */
+};
+
+static int
+parisc_agp_fetch_size(void)
+{
+ int size;
+
+ size = parisc_agp_info.gart_size / MB(1);
+ parisc_agp_sizes[0].size = size;
+ agp_bridge->current_size = (void *) &parisc_agp_sizes[0];
+
+ return size;
+}
+
+static int
+parisc_agp_configure(void)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+
+ agp_bridge->gart_bus_addr = info->gart_base;
+ agp_bridge->capndx = info->lba_cap_offset;
+ agp_bridge->mode = readl(info->lba_regs+info->lba_cap_offset+PCI_AGP_STATUS);
+
+ return 0;
+}
+
+static void
+parisc_agp_tlbflush(struct agp_memory *mem)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+
+ writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM);
+ readq(info->ioc_regs+IOC_PCOM); /* flush */
+}
+
+static int
+parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ int i;
+
+ for (i = 0; i < info->gatt_entries; i++) {
+ info->gatt[i] = (unsigned long)agp_bridge->scratch_page;
+ }
+
+ return 0;
+}
+
+static int
+parisc_agp_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+
+ info->gatt[0] = SBA_AGPGART_COOKIE;
+
+ return 0;
+}
+
+static int
+parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ int i, k;
+ off_t j, io_pg_start;
+ int io_pg_count;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ io_pg_start = info->io_pages_per_kpage * pg_start;
+ io_pg_count = info->io_pages_per_kpage * mem->page_count;
+ if ((io_pg_start + io_pg_count) > info->gatt_entries) {
+ return -EINVAL;
+ }
+
+ j = io_pg_start;
+ while (j < (io_pg_start + io_pg_count)) {
+ if (info->gatt[j])
+ return -EBUSY;
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
+ unsigned long paddr;
+
+ paddr = page_to_phys(mem->pages[i]);
+ for (k = 0;
+ k < info->io_pages_per_kpage;
+ k++, j++, paddr += info->io_page_size) {
+ info->gatt[j] =
+ parisc_agp_mask_memory(agp_bridge,
+ paddr, type);
+ }
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+
+ return 0;
+}
+
+static int
+parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ int i, io_pg_start, io_pg_count;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ io_pg_start = info->io_pages_per_kpage * pg_start;
+ io_pg_count = info->io_pages_per_kpage * mem->page_count;
+ for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
+ info->gatt[i] = agp_bridge->scratch_page;
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static unsigned long
+parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+ int type)
+{
+ return SBA_PDIR_VALID_BIT | addr;
+}
+
+static void
+parisc_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ u32 command;
+
+ command = readl(info->lba_regs + info->lba_cap_offset + PCI_AGP_STATUS);
+
+ command = agp_collect_device_status(bridge, mode, command);
+ command |= 0x00000100;
+
+ writel(command, info->lba_regs + info->lba_cap_offset + PCI_AGP_COMMAND);
+
+ agp_device_command(command, (mode & AGP8X_MODE) != 0);
+}
+
+static const struct agp_bridge_driver parisc_agp_driver = {
+ .owner = THIS_MODULE,
+ .size_type = FIXED_APER_SIZE,
+ .configure = parisc_agp_configure,
+ .fetch_size = parisc_agp_fetch_size,
+ .tlb_flush = parisc_agp_tlbflush,
+ .mask_memory = parisc_agp_mask_memory,
+ .masks = parisc_agp_masks,
+ .agp_enable = parisc_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = parisc_agp_create_gatt_table,
+ .free_gatt_table = parisc_agp_free_gatt_table,
+ .insert_memory = parisc_agp_insert_memory,
+ .remove_memory = parisc_agp_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ .cant_use_aperture = true,
+};
+
+static int __init
+agp_ioc_init(void __iomem *ioc_regs)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ u64 iova_base, *io_pdir, io_tlb_ps;
+ int io_tlb_shift;
+
+ printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");
+
+ info->ioc_regs = ioc_regs;
+
+ io_tlb_ps = readq(info->ioc_regs+IOC_TCNFG);
+ switch (io_tlb_ps) {
+ case 0: io_tlb_shift = 12; break;
+ case 1: io_tlb_shift = 13; break;
+ case 2: io_tlb_shift = 14; break;
+ case 3: io_tlb_shift = 16; break;
+ default:
+ printk(KERN_ERR DRVPFX "Invalid IOTLB page size "
+ "configuration 0x%llx\n", io_tlb_ps);
+ info->gatt = NULL;
+ info->gatt_entries = 0;
+ return -ENODEV;
+ }
+ info->io_page_size = 1 << io_tlb_shift;
+ info->io_pages_per_kpage = PAGE_SIZE / info->io_page_size;
+
+ iova_base = readq(info->ioc_regs+IOC_IBASE) & ~0x1;
+ info->gart_base = iova_base + PLUTO_IOVA_SIZE - PLUTO_GART_SIZE;
+
+ info->gart_size = PLUTO_GART_SIZE;
+ info->gatt_entries = info->gart_size / info->io_page_size;
+
+ io_pdir = phys_to_virt(readq(info->ioc_regs+IOC_PDIR_BASE));
+ info->gatt = &io_pdir[(PLUTO_IOVA_SIZE/2) >> PAGE_SHIFT];
+
+ if (info->gatt[0] != SBA_AGPGART_COOKIE) {
+ info->gatt = NULL;
+ info->gatt_entries = 0;
+ printk(KERN_ERR DRVPFX "No reserved IO PDIR entry found; "
+ "GART disabled\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int
+lba_find_capability(int cap)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ u16 status;
+ u8 pos, id;
+ int ttl = 48;
+
+ status = readw(info->lba_regs + PCI_STATUS);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+ pos = readb(info->lba_regs + PCI_CAPABILITY_LIST);
+ while (ttl-- && pos >= 0x40) {
+ pos &= ~3;
+ id = readb(info->lba_regs + pos + PCI_CAP_LIST_ID);
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+ pos = readb(info->lba_regs + pos + PCI_CAP_LIST_NEXT);
+ }
+ return 0;
+}
+
+static int __init
+agp_lba_init(void __iomem *lba_hpa)
+{
+ struct _parisc_agp_info *info = &parisc_agp_info;
+ int cap;
+
+ info->lba_regs = lba_hpa;
+ info->lba_cap_offset = lba_find_capability(PCI_CAP_ID_AGP);
+
+ cap = readl(lba_hpa + info->lba_cap_offset) & 0xff;
+ if (cap != PCI_CAP_ID_AGP) {
+ printk(KERN_ERR DRVPFX "Invalid capability ID 0x%02x at 0x%x\n",
+ cap, info->lba_cap_offset);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init
+parisc_agp_setup(void __iomem *ioc_hpa, void __iomem *lba_hpa)
+{
+ struct pci_dev *fake_bridge_dev = NULL;
+ struct agp_bridge_data *bridge;
+ int error = 0;
+
+ fake_bridge_dev = alloc_pci_dev();
+ if (!fake_bridge_dev) {
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ error = agp_ioc_init(ioc_hpa);
+ if (error)
+ goto fail;
+
+ error = agp_lba_init(lba_hpa);
+ if (error)
+ goto fail;
+
+ bridge = agp_alloc_bridge();
+ if (!bridge) {
+ error = -ENOMEM;
+ goto fail;
+ }
+ bridge->driver = &parisc_agp_driver;
+
+ fake_bridge_dev->vendor = PCI_VENDOR_ID_HP;
+ fake_bridge_dev->device = PCI_DEVICE_ID_HP_PCIX_LBA;
+ bridge->dev = fake_bridge_dev;
+
+ error = agp_add_bridge(bridge);
+ if (error)
+ goto fail;
+ return 0;
+
+fail:
+ kfree(fake_bridge_dev);
+ return error;
+}
+
+static int
+find_quicksilver(struct device *dev, void *data)
+{
+ struct parisc_device **lba = data;
+ struct parisc_device *padev = to_parisc_device(dev);
+
+ if (IS_QUICKSILVER(padev))
+ *lba = padev;
+
+ return 0;
+}
+
+static int
+parisc_agp_init(void)
+{
+ extern struct sba_device *sba_list;
+
+ int err = -1;
+ struct parisc_device *sba = NULL, *lba = NULL;
+ struct lba_device *lbadev = NULL;
+
+ if (!sba_list)
+ goto out;
+
+ /* Find our parent Pluto */
+ sba = sba_list->dev;
+ if (!IS_PLUTO(sba)) {
+ printk(KERN_INFO DRVPFX "No Pluto found, so no AGPGART for you.\n");
+ goto out;
+ }
+
+ /* Now search our Pluto for our precious AGP device... */
+ device_for_each_child(&sba->dev, &lba, find_quicksilver);
+
+ if (!lba) {
+ printk(KERN_INFO DRVPFX "No AGP devices found.\n");
+ goto out;
+ }
+
+ lbadev = parisc_get_drvdata(lba);
+
+ /* w00t, let's go find our cookies... */
+ parisc_agp_setup(sba_list->ioc[0].ioc_hpa, lbadev->hba.base_addr);
+
+ return 0;
+
+out:
+ return err;
+}
+
+module_init(parisc_agp_init);
+
+MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
new file mode 100644
index 00000000..ffa888cd
--- /dev/null
+++ b/drivers/char/agp/sgi-agp.c
@@ -0,0 +1,341 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * SGI TIOCA AGPGART routines.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/io.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/tioca_provider.h>
+#include "agp.h"
+
+extern int agp_memory_reserved;
+extern uint32_t tioca_gart_found;
+extern struct list_head tioca_list;
+static struct agp_bridge_data **sgi_tioca_agp_bridges;
+
+/*
+ * The aperature size and related information is set up at TIOCA init time.
+ * Values for this table will be extracted and filled in at
+ * sgi_tioca_fetch_size() time.
+ */
+
+static struct aper_size_info_fixed sgi_tioca_sizes[] = {
+ {0, 0, 0},
+};
+
+static struct page *sgi_tioca_alloc_page(struct agp_bridge_data *bridge)
+{
+ struct page *page;
+ int nid;
+ struct tioca_kernel *info =
+ (struct tioca_kernel *)bridge->dev_private_data;
+
+ nid = info->ca_closest_node;
+ page = alloc_pages_node(nid, GFP_KERNEL, 0);
+ if (!page)
+ return NULL;
+
+ get_page(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page;
+}
+
+/*
+ * Flush GART tlb's. Cannot selectively flush based on memory so the mem
+ * arg is ignored.
+ */
+
+static void sgi_tioca_tlbflush(struct agp_memory *mem)
+{
+ tioca_tlbflush(mem->bridge->dev_private_data);
+}
+
+/*
+ * Given an address of a host physical page, turn it into a valid gart
+ * entry.
+ */
+static unsigned long
+sgi_tioca_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+ int type)
+{
+ return tioca_physpage_to_gart(addr);
+}
+
+static void sgi_tioca_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ tioca_fastwrite_enable(bridge->dev_private_data);
+}
+
+/*
+ * sgi_tioca_configure() doesn't have anything to do since the base CA driver
+ * has alreay set up the GART.
+ */
+
+static int sgi_tioca_configure(void)
+{
+ return 0;
+}
+
+/*
+ * Determine gfx aperature size. This has already been determined by the
+ * CA driver init, so just need to set agp_bridge values accordingly.
+ */
+
+static int sgi_tioca_fetch_size(void)
+{
+ struct tioca_kernel *info =
+ (struct tioca_kernel *)agp_bridge->dev_private_data;
+
+ sgi_tioca_sizes[0].size = info->ca_gfxap_size / MB(1);
+ sgi_tioca_sizes[0].num_entries = info->ca_gfxgart_entries;
+
+ return sgi_tioca_sizes[0].size;
+}
+
+static int sgi_tioca_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct tioca_kernel *info =
+ (struct tioca_kernel *)bridge->dev_private_data;
+
+ bridge->gatt_table_real = (u32 *) info->ca_gfxgart;
+ bridge->gatt_table = bridge->gatt_table_real;
+ bridge->gatt_bus_addr = info->ca_gfxgart_base;
+
+ return 0;
+}
+
+static int sgi_tioca_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ return 0;
+}
+
+static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int num_entries;
+ size_t i;
+ off_t j;
+ void *temp;
+ struct agp_bridge_data *bridge;
+ u64 *table;
+
+ bridge = mem->bridge;
+ if (!bridge)
+ return -EINVAL;
+
+ table = (u64 *)bridge->gatt_table;
+
+ temp = bridge->current_size;
+
+ switch (bridge->driver->size_type) {
+ case U8_APER_SIZE:
+ num_entries = A_SIZE_8(temp)->num_entries;
+ break;
+ case U16_APER_SIZE:
+ num_entries = A_SIZE_16(temp)->num_entries;
+ break;
+ case U32_APER_SIZE:
+ num_entries = A_SIZE_32(temp)->num_entries;
+ break;
+ case FIXED_APER_SIZE:
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+ break;
+ case LVL2_APER_SIZE:
+ return -EINVAL;
+ break;
+ default:
+ num_entries = 0;
+ break;
+ }
+
+ num_entries -= agp_memory_reserved / PAGE_SIZE;
+ if (num_entries < 0)
+ num_entries = 0;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ j = pg_start;
+
+ while (j < (pg_start + mem->page_count)) {
+ if (table[j])
+ return -EBUSY;
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ bridge->driver->cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ table[j] =
+ bridge->driver->mask_memory(bridge,
+ page_to_phys(mem->pages[i]),
+ mem->type);
+ }
+
+ bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ size_t i;
+ struct agp_bridge_data *bridge;
+ u64 *table;
+
+ bridge = mem->bridge;
+ if (!bridge)
+ return -EINVAL;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ table = (u64 *)bridge->gatt_table;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ table[i] = 0;
+ }
+
+ bridge->driver->tlb_flush(mem);
+ return 0;
+}
+
+static void sgi_tioca_cache_flush(void)
+{
+}
+
+/*
+ * Cleanup. Nothing to do as the CA driver owns the GART.
+ */
+
+static void sgi_tioca_cleanup(void)
+{
+}
+
+static struct agp_bridge_data *sgi_tioca_find_bridge(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge;
+
+ list_for_each_entry(bridge, &agp_bridges, list) {
+ if (bridge->dev->bus == pdev->bus)
+ break;
+ }
+ return bridge;
+}
+
+const struct agp_bridge_driver sgi_tioca_driver = {
+ .owner = THIS_MODULE,
+ .size_type = U16_APER_SIZE,
+ .configure = sgi_tioca_configure,
+ .fetch_size = sgi_tioca_fetch_size,
+ .cleanup = sgi_tioca_cleanup,
+ .tlb_flush = sgi_tioca_tlbflush,
+ .mask_memory = sgi_tioca_mask_memory,
+ .agp_enable = sgi_tioca_agp_enable,
+ .cache_flush = sgi_tioca_cache_flush,
+ .create_gatt_table = sgi_tioca_create_gatt_table,
+ .free_gatt_table = sgi_tioca_free_gatt_table,
+ .insert_memory = sgi_tioca_insert_memory,
+ .remove_memory = sgi_tioca_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = sgi_tioca_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ .cant_use_aperture = true,
+ .needs_scratch_page = false,
+ .num_aperture_sizes = 1,
+};
+
+static int __devinit agp_sgi_init(void)
+{
+ unsigned int j;
+ struct tioca_kernel *info;
+ struct pci_dev *pdev = NULL;
+
+ if (tioca_gart_found)
+ printk(KERN_INFO PFX "SGI TIO CA GART driver initialized.\n");
+ else
+ return 0;
+
+ sgi_tioca_agp_bridges = kmalloc(tioca_gart_found *
+ sizeof(struct agp_bridge_data *),
+ GFP_KERNEL);
+ if (!sgi_tioca_agp_bridges)
+ return -ENOMEM;
+
+ j = 0;
+ list_for_each_entry(info, &tioca_list, ca_list) {
+ struct list_head *tmp;
+ if (list_empty(info->ca_devices))
+ continue;
+ list_for_each(tmp, info->ca_devices) {
+ u8 cap_ptr;
+ pdev = pci_dev_b(tmp);
+ if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
+ continue;
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ continue;
+ }
+ sgi_tioca_agp_bridges[j] = agp_alloc_bridge();
+ printk(KERN_INFO PFX "bridge %d = 0x%p\n", j,
+ sgi_tioca_agp_bridges[j]);
+ if (sgi_tioca_agp_bridges[j]) {
+ sgi_tioca_agp_bridges[j]->dev = pdev;
+ sgi_tioca_agp_bridges[j]->dev_private_data = info;
+ sgi_tioca_agp_bridges[j]->driver = &sgi_tioca_driver;
+ sgi_tioca_agp_bridges[j]->gart_bus_addr =
+ info->ca_gfxap_base;
+ sgi_tioca_agp_bridges[j]->mode = (0x7D << 24) | /* 126 requests */
+ (0x1 << 9) | /* SBA supported */
+ (0x1 << 5) | /* 64-bit addresses supported */
+ (0x1 << 4) | /* FW supported */
+ (0x1 << 3) | /* AGP 3.0 mode */
+ 0x2; /* 8x transfer only */
+ sgi_tioca_agp_bridges[j]->current_size =
+ sgi_tioca_agp_bridges[j]->previous_size =
+ (void *)&sgi_tioca_sizes[0];
+ agp_add_bridge(sgi_tioca_agp_bridges[j]);
+ }
+ j++;
+ }
+
+ agp_find_bridge = &sgi_tioca_find_bridge;
+ return 0;
+}
+
+static void __devexit agp_sgi_cleanup(void)
+{
+ kfree(sgi_tioca_agp_bridges);
+ sgi_tioca_agp_bridges = NULL;
+}
+
+module_init(agp_sgi_init);
+module_exit(agp_sgi_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
new file mode 100644
index 00000000..29aacd81
--- /dev/null
+++ b/drivers/char/agp/sis-agp.c
@@ -0,0 +1,454 @@
+/*
+ * SiS AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include <linux/delay.h>
+#include "agp.h"
+
+#define SIS_ATTBASE 0x90
+#define SIS_APSIZE 0x94
+#define SIS_TLBCNTRL 0x97
+#define SIS_TLBFLUSH 0x98
+
+#define PCI_DEVICE_ID_SI_662 0x0662
+#define PCI_DEVICE_ID_SI_671 0x0671
+
+static int __devinitdata agp_sis_force_delay = 0;
+static int __devinitdata agp_sis_agp_spec = -1;
+
+static int sis_fetch_size(void)
+{
+ u8 temp_size;
+ int i;
+ struct aper_size_info_8 *values;
+
+ pci_read_config_byte(agp_bridge->dev, SIS_APSIZE, &temp_size);
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if ((temp_size == values[i].size_value) ||
+ ((temp_size & ~(0x07)) ==
+ (values[i].size_value & ~(0x07)))) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static void sis_tlbflush(struct agp_memory *mem)
+{
+ pci_write_config_byte(agp_bridge->dev, SIS_TLBFLUSH, 0x02);
+}
+
+static int sis_configure(void)
+{
+ u32 temp;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+ pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05);
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE,
+ agp_bridge->gatt_bus_addr);
+ pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
+ current_size->size_value);
+ return 0;
+}
+
+static void sis_cleanup(void)
+{
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
+ (previous_size->size_value & ~(0x03)));
+}
+
+static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ struct pci_dev *device = NULL;
+ u32 command;
+ int rate;
+
+ dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
+ agp_bridge->major_version, agp_bridge->minor_version);
+
+ pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
+ command = agp_collect_device_status(bridge, mode, command);
+ command |= AGPSTAT_AGP_ENABLE;
+ rate = (command & 0x7) << 2;
+
+ for_each_pci_dev(device) {
+ u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (!agp)
+ continue;
+
+ dev_info(&agp_bridge->dev->dev, "putting AGP V3 device at %s into %dx mode\n",
+ pci_name(device), rate);
+
+ pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);
+
+ /*
+ * Weird: on some sis chipsets any rate change in the target
+ * command register triggers a 5ms screwup during which the master
+ * cannot be configured
+ */
+ if (device->device == bridge->dev->device) {
+ dev_info(&agp_bridge->dev->dev, "SiS delay workaround: giving bridge time to recover\n");
+ msleep(10);
+ }
+ }
+}
+
+static const struct aper_size_info_8 sis_generic_sizes[7] =
+{
+ {256, 65536, 6, 99},
+ {128, 32768, 5, 83},
+ {64, 16384, 4, 67},
+ {32, 8192, 3, 51},
+ {16, 4096, 2, 35},
+ {8, 2048, 1, 19},
+ {4, 1024, 0, 3}
+};
+
+static struct agp_bridge_driver sis_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = sis_generic_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .needs_scratch_page = true,
+ .configure = sis_configure,
+ .fetch_size = sis_fetch_size,
+ .cleanup = sis_cleanup,
+ .tlb_flush = sis_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+// chipsets that require the 'delay hack'
+static int sis_broken_chipsets[] __devinitdata = {
+ PCI_DEVICE_ID_SI_648,
+ PCI_DEVICE_ID_SI_746,
+ 0 // terminator
+};
+
+static void __devinit sis_get_driver(struct agp_bridge_data *bridge)
+{
+ int i;
+
+ for (i=0; sis_broken_chipsets[i]!=0; ++i)
+ if (bridge->dev->device==sis_broken_chipsets[i])
+ break;
+
+ if (sis_broken_chipsets[i] || agp_sis_force_delay)
+ sis_driver.agp_enable=sis_delayed_enable;
+
+ // sis chipsets that indicate less than agp3.5
+ // are not actually fully agp3 compliant
+ if ((agp_bridge->major_version == 3 && agp_bridge->minor_version >= 5
+ && agp_sis_agp_spec!=0) || agp_sis_agp_spec==1) {
+ sis_driver.aperture_sizes = agp3_generic_sizes;
+ sis_driver.size_type = U16_APER_SIZE;
+ sis_driver.num_aperture_sizes = AGP_GENERIC_SIZES_ENTRIES;
+ sis_driver.configure = agp3_generic_configure;
+ sis_driver.fetch_size = agp3_generic_fetch_size;
+ sis_driver.cleanup = agp3_generic_cleanup;
+ sis_driver.tlb_flush = agp3_generic_tlbflush;
+ }
+}
+
+
+static int __devinit agp_sis_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ u8 cap_ptr;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+
+ dev_info(&pdev->dev, "SiS chipset [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &sis_driver;
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+
+ get_agp_version(bridge);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
+ sis_get_driver(bridge);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_sis_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+#ifdef CONFIG_PM
+
+static int agp_sis_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int agp_sis_resume(struct pci_dev *pdev)
+{
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ return sis_driver.configure();
+}
+
+#endif /* CONFIG_PM */
+
+static struct pci_device_id agp_sis_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_5591,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_530,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_540,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_550,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_620,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_630,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_635,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_645,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_646,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_648,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_650,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_651,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_655,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_661,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_662,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_671,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_730,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_735,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_740,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_741,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_745,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_746,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_sis_pci_table);
+
+static struct pci_driver agp_sis_pci_driver = {
+ .name = "agpgart-sis",
+ .id_table = agp_sis_pci_table,
+ .probe = agp_sis_probe,
+ .remove = agp_sis_remove,
+#ifdef CONFIG_PM
+ .suspend = agp_sis_suspend,
+ .resume = agp_sis_resume,
+#endif
+};
+
+static int __init agp_sis_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_sis_pci_driver);
+}
+
+static void __exit agp_sis_cleanup(void)
+{
+ pci_unregister_driver(&agp_sis_pci_driver);
+}
+
+module_init(agp_sis_init);
+module_exit(agp_sis_cleanup);
+
+module_param(agp_sis_force_delay, bool, 0);
+MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack");
+module_param(agp_sis_agp_spec, int, 0);
+MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
new file mode 100644
index 00000000..f02f9b07
--- /dev/null
+++ b/drivers/char/agp/sworks-agp.c
@@ -0,0 +1,569 @@
+/*
+ * Serverworks AGPGART routines.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+#define SVWRKS_COMMAND 0x04
+#define SVWRKS_APSIZE 0x10
+#define SVWRKS_MMBASE 0x14
+#define SVWRKS_CACHING 0x4b
+#define SVWRKS_AGP_ENABLE 0x60
+#define SVWRKS_FEATURE 0x68
+
+#define SVWRKS_SIZE_MASK 0xfe000000
+
+/* Memory mapped registers */
+#define SVWRKS_GART_CACHE 0x02
+#define SVWRKS_GATTBASE 0x04
+#define SVWRKS_TLBFLUSH 0x10
+#define SVWRKS_POSTFLUSH 0x14
+#define SVWRKS_DIRFLUSH 0x0c
+
+
+struct serverworks_page_map {
+ unsigned long *real;
+ unsigned long __iomem *remapped;
+};
+
+static struct _serverworks_private {
+ struct pci_dev *svrwrks_dev; /* device one */
+ volatile u8 __iomem *registers;
+ struct serverworks_page_map **gatt_pages;
+ int num_tables;
+ struct serverworks_page_map scratch_dir;
+
+ int gart_addr_ofs;
+ int mm_addr_ofs;
+} serverworks_private;
+
+static int serverworks_create_page_map(struct serverworks_page_map *page_map)
+{
+ int i;
+
+ page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+ if (page_map->real == NULL) {
+ return -ENOMEM;
+ }
+
+ set_memory_uc((unsigned long)page_map->real, 1);
+ page_map->remapped = page_map->real;
+
+ for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
+ writel(agp_bridge->scratch_page, page_map->remapped+i);
+ /* Red Pen: Everyone else does pci posting flush here */
+
+ return 0;
+}
+
+static void serverworks_free_page_map(struct serverworks_page_map *page_map)
+{
+ set_memory_wb((unsigned long)page_map->real, 1);
+ free_page((unsigned long) page_map->real);
+}
+
+static void serverworks_free_gatt_pages(void)
+{
+ int i;
+ struct serverworks_page_map **tables;
+ struct serverworks_page_map *entry;
+
+ tables = serverworks_private.gatt_pages;
+ for (i = 0; i < serverworks_private.num_tables; i++) {
+ entry = tables[i];
+ if (entry != NULL) {
+ if (entry->real != NULL) {
+ serverworks_free_page_map(entry);
+ }
+ kfree(entry);
+ }
+ }
+ kfree(tables);
+}
+
+static int serverworks_create_gatt_pages(int nr_tables)
+{
+ struct serverworks_page_map **tables;
+ struct serverworks_page_map *entry;
+ int retval = 0;
+ int i;
+
+ tables = kzalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *),
+ GFP_KERNEL);
+ if (tables == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_tables; i++) {
+ entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
+ if (entry == NULL) {
+ retval = -ENOMEM;
+ break;
+ }
+ tables[i] = entry;
+ retval = serverworks_create_page_map(entry);
+ if (retval != 0) break;
+ }
+ serverworks_private.num_tables = nr_tables;
+ serverworks_private.gatt_pages = tables;
+
+ if (retval != 0) serverworks_free_gatt_pages();
+
+ return retval;
+}
+
+#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+#ifndef GET_PAGE_DIR_OFF
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#endif
+
+#ifndef GET_PAGE_DIR_IDX
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
+#endif
+
+#ifndef GET_GATT_OFF
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#endif
+
+static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct aper_size_info_lvl2 *value;
+ struct serverworks_page_map page_dir;
+ int retval;
+ u32 temp;
+ int i;
+
+ value = A_SIZE_LVL2(agp_bridge->current_size);
+ retval = serverworks_create_page_map(&page_dir);
+ if (retval != 0) {
+ return retval;
+ }
+ retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
+ if (retval != 0) {
+ serverworks_free_page_map(&page_dir);
+ return retval;
+ }
+ /* Create a fake scratch directory */
+ for (i = 0; i < 1024; i++) {
+ writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
+ writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
+ }
+
+ retval = serverworks_create_gatt_pages(value->num_entries / 1024);
+ if (retval != 0) {
+ serverworks_free_page_map(&page_dir);
+ serverworks_free_page_map(&serverworks_private.scratch_dir);
+ return retval;
+ }
+
+ agp_bridge->gatt_table_real = (u32 *)page_dir.real;
+ agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
+
+ /* Get the address for the gart region.
+ * This is a bus address even on the alpha, b/c its
+ * used to program the agp master not the cpu
+ */
+
+ pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* Calculate the agp offset */
+ for (i = 0; i < value->num_entries / 1024; i++)
+ writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
+
+ return 0;
+}
+
+static int serverworks_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ struct serverworks_page_map page_dir;
+
+ page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
+ page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
+
+ serverworks_free_gatt_pages();
+ serverworks_free_page_map(&page_dir);
+ serverworks_free_page_map(&serverworks_private.scratch_dir);
+ return 0;
+}
+
+static int serverworks_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ u32 temp2;
+ struct aper_size_info_lvl2 *values;
+
+ values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
+ pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
+ pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,
+ SVWRKS_SIZE_MASK);
+ pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2);
+ pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp);
+ temp2 &= SVWRKS_SIZE_MASK;
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp2 == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually. However
+ * currently it just flushes the whole table. Which is probably
+ * more efficient, since agp_memory blocks can be a large number of
+ * entries.
+ */
+static void serverworks_tlbflush(struct agp_memory *temp)
+{
+ unsigned long timeout;
+
+ writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
+ timeout = jiffies + 3*HZ;
+ while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {
+ cpu_relax();
+ if (time_after(jiffies, timeout)) {
+ dev_err(&serverworks_private.svrwrks_dev->dev,
+ "TLB post flush took more than 3 seconds\n");
+ break;
+ }
+ }
+
+ writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
+ timeout = jiffies + 3*HZ;
+ while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {
+ cpu_relax();
+ if (time_after(jiffies, timeout)) {
+ dev_err(&serverworks_private.svrwrks_dev->dev,
+ "TLB Dir flush took more than 3 seconds\n");
+ break;
+ }
+ }
+}
+
+static int serverworks_configure(void)
+{
+ struct aper_size_info_lvl2 *current_size;
+ u32 temp;
+ u8 enable_reg;
+ u16 cap_reg;
+
+ current_size = A_SIZE_LVL2(agp_bridge->current_size);
+
+ /* Get the memory mapped registers */
+ pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
+ temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
+ if (!serverworks_private.registers) {
+ dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp);
+ return -ENOMEM;
+ }
+
+ writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);
+ readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */
+
+ writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE);
+ readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */
+
+ cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND);
+ cap_reg &= ~0x0007;
+ cap_reg |= 0x4;
+ writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND);
+ readw(serverworks_private.registers+SVWRKS_COMMAND);
+
+ pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg);
+ enable_reg |= 0x1; /* Agp Enable bit */
+ pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg);
+ serverworks_tlbflush(NULL);
+
+ agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(serverworks_private.svrwrks_dev,
+ agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
+
+ pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg);
+ enable_reg &= ~0x3;
+ pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg);
+
+ pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg);
+ enable_reg |= (1<<6);
+ pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg);
+
+ return 0;
+}
+
+static void serverworks_cleanup(void)
+{
+ iounmap((void __iomem *) serverworks_private.registers);
+}
+
+static int serverworks_insert_memory(struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+ if ((pg_start + mem->page_count) > num_entries) {
+ return -EINVAL;
+ }
+
+ j = pg_start;
+ while (j < (pg_start + mem->page_count)) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
+ return -EBUSY;
+ j++;
+ }
+
+ if (!mem->is_flushed) {
+ global_cache_flush();
+ mem->is_flushed = true;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mem->type),
+ cur_gatt+GET_GATT_OFF(addr));
+ }
+ serverworks_tlbflush(mem);
+ return 0;
+}
+
+static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+ unsigned long __iomem *cur_gatt;
+ unsigned long addr;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ global_cache_flush();
+ serverworks_tlbflush(mem);
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ }
+
+ serverworks_tlbflush(mem);
+ return 0;
+}
+
+static const struct gatt_mask serverworks_masks[] =
+{
+ {.mask = 1, .type = 0}
+};
+
+static const struct aper_size_info_lvl2 serverworks_sizes[7] =
+{
+ {2048, 524288, 0x80000000},
+ {1024, 262144, 0xc0000000},
+ {512, 131072, 0xe0000000},
+ {256, 65536, 0xf0000000},
+ {128, 32768, 0xf8000000},
+ {64, 16384, 0xfc000000},
+ {32, 8192, 0xfe000000}
+};
+
+static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ u32 command;
+
+ pci_read_config_dword(serverworks_private.svrwrks_dev,
+ bridge->capndx + PCI_AGP_STATUS,
+ &command);
+
+ command = agp_collect_device_status(bridge, mode, command);
+
+ command &= ~0x10; /* disable FW */
+ command &= ~0x08;
+
+ command |= 0x100;
+
+ pci_write_config_dword(serverworks_private.svrwrks_dev,
+ bridge->capndx + PCI_AGP_COMMAND,
+ command);
+
+ agp_device_command(command, false);
+}
+
+static const struct agp_bridge_driver sworks_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = serverworks_sizes,
+ .size_type = LVL2_APER_SIZE,
+ .num_aperture_sizes = 7,
+ .configure = serverworks_configure,
+ .fetch_size = serverworks_fetch_size,
+ .cleanup = serverworks_cleanup,
+ .tlb_flush = serverworks_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = serverworks_masks,
+ .agp_enable = serverworks_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = serverworks_create_gatt_table,
+ .free_gatt_table = serverworks_free_gatt_table,
+ .insert_memory = serverworks_insert_memory,
+ .remove_memory = serverworks_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_bridge_data *bridge;
+ struct pci_dev *bridge_dev;
+ u32 temp, temp2;
+ u8 cap_ptr = 0;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+
+ switch (pdev->device) {
+ case 0x0006:
+ dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
+ return -ENODEV;
+
+ case PCI_DEVICE_ID_SERVERWORKS_HE:
+ case PCI_DEVICE_ID_SERVERWORKS_LE:
+ case 0x0007:
+ break;
+
+ default:
+ if (cap_ptr)
+ dev_err(&pdev->dev, "unsupported Serverworks chipset "
+ "[%04x/%04x]\n", pdev->vendor, pdev->device);
+ return -ENODEV;
+ }
+
+ /* Everything is on func 1 here so we are hardcoding function one */
+ bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
+ PCI_DEVFN(0, 1));
+ if (!bridge_dev) {
+ dev_info(&pdev->dev, "can't find secondary device\n");
+ return -ENODEV;
+ }
+
+ serverworks_private.svrwrks_dev = bridge_dev;
+ serverworks_private.gart_addr_ofs = 0x10;
+
+ pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
+ if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
+ if (temp2 != 0) {
+ dev_info(&pdev->dev, "64 bit aperture address, "
+ "but top bits are not zero; disabling AGP\n");
+ return -ENODEV;
+ }
+ serverworks_private.mm_addr_ofs = 0x18;
+ } else
+ serverworks_private.mm_addr_ofs = 0x14;
+
+ pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
+ if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ pci_read_config_dword(pdev,
+ serverworks_private.mm_addr_ofs + 4, &temp2);
+ if (temp2 != 0) {
+ dev_info(&pdev->dev, "64 bit MMIO address, but top "
+ "bits are not zero; disabling AGP\n");
+ return -ENODEV;
+ }
+ }
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver = &sworks_driver;
+ bridge->dev_private_data = &serverworks_private,
+ bridge->dev = pci_dev_get(pdev);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_serverworks_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ pci_dev_put(bridge->dev);
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+ pci_dev_put(serverworks_private.svrwrks_dev);
+ serverworks_private.svrwrks_dev = NULL;
+}
+
+static struct pci_device_id agp_serverworks_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SERVERWORKS,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
+
+static struct pci_driver agp_serverworks_pci_driver = {
+ .name = "agpgart-serverworks",
+ .id_table = agp_serverworks_pci_table,
+ .probe = agp_serverworks_probe,
+ .remove = agp_serverworks_remove,
+};
+
+static int __init agp_serverworks_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_serverworks_pci_driver);
+}
+
+static void __exit agp_serverworks_cleanup(void)
+{
+ pci_unregister_driver(&agp_serverworks_pci_driver);
+}
+
+module_init(agp_serverworks_init);
+module_exit(agp_serverworks_cleanup);
+
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
new file mode 100644
index 00000000..a32c492b
--- /dev/null
+++ b/drivers/char/agp/uninorth-agp.c
@@ -0,0 +1,722 @@
+/*
+ * UniNorth AGPGART routines.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <asm/uninorth.h>
+#include <asm/pci-bridge.h>
+#include <asm/prom.h>
+#include <asm/pmac_feature.h>
+#include "agp.h"
+
+/*
+ * NOTES for uninorth3 (G5 AGP) supports :
+ *
+ * There maybe also possibility to have bigger cache line size for
+ * agp (see pmac_pci.c and look for cache line). Need to be investigated
+ * by someone.
+ *
+ * PAGE size are hardcoded but this may change, see asm/page.h.
+ *
+ * Jerome Glisse <j.glisse@gmail.com>
+ */
+static int uninorth_rev;
+static int is_u3;
+static u32 scratch_value;
+
+#define DEFAULT_APERTURE_SIZE 256
+#define DEFAULT_APERTURE_STRING "256"
+static char *aperture = NULL;
+
+static int uninorth_fetch_size(void)
+{
+ int i, size = 0;
+ struct aper_size_info_32 *values =
+ A_SIZE_32(agp_bridge->driver->aperture_sizes);
+
+ if (aperture) {
+ char *save = aperture;
+
+ size = memparse(aperture, &aperture) >> 20;
+ aperture = save;
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++)
+ if (size == values[i].size)
+ break;
+
+ if (i == agp_bridge->driver->num_aperture_sizes) {
+ dev_err(&agp_bridge->dev->dev, "invalid aperture size, "
+ "using default\n");
+ size = 0;
+ aperture = NULL;
+ }
+ }
+
+ if (!size) {
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++)
+ if (values[i].size == DEFAULT_APERTURE_SIZE)
+ break;
+ }
+
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *)(values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+}
+
+static void uninorth_tlbflush(struct agp_memory *mem)
+{
+ u32 ctrl = UNI_N_CFG_GART_ENABLE;
+
+ if (is_u3)
+ ctrl |= U3_N_CFG_GART_PERFRD;
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ ctrl | UNI_N_CFG_GART_INVAL);
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl);
+
+ if (!mem && uninorth_rev <= 0x30) {
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ ctrl | UNI_N_CFG_GART_2xRESET);
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ ctrl);
+ }
+}
+
+static void uninorth_cleanup(void)
+{
+ u32 tmp;
+
+ pci_read_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, &tmp);
+ if (!(tmp & UNI_N_CFG_GART_ENABLE))
+ return;
+ tmp |= UNI_N_CFG_GART_INVAL;
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, tmp);
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 0);
+
+ if (uninorth_rev <= 0x30) {
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ UNI_N_CFG_GART_2xRESET);
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
+ 0);
+ }
+}
+
+static int uninorth_configure(void)
+{
+ struct aper_size_info_32 *current_size;
+
+ current_size = A_SIZE_32(agp_bridge->current_size);
+
+ dev_info(&agp_bridge->dev->dev, "configuring for size idx: %d\n",
+ current_size->size_value);
+
+ /* aperture size and gatt addr */
+ pci_write_config_dword(agp_bridge->dev,
+ UNI_N_CFG_GART_BASE,
+ (agp_bridge->gatt_bus_addr & 0xfffff000)
+ | current_size->size_value);
+
+ /* HACK ALERT
+ * UniNorth seem to be buggy enough not to handle properly when
+ * the AGP aperture isn't mapped at bus physical address 0
+ */
+ agp_bridge->gart_bus_addr = 0;
+#ifdef CONFIG_PPC64
+ /* Assume U3 or later on PPC64 systems */
+ /* high 4 bits of GART physical address go in UNI_N_CFG_AGP_BASE */
+ pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_AGP_BASE,
+ (agp_bridge->gatt_bus_addr >> 32) & 0xf);
+#else
+ pci_write_config_dword(agp_bridge->dev,
+ UNI_N_CFG_AGP_BASE, agp_bridge->gart_bus_addr);
+#endif
+
+ if (is_u3) {
+ pci_write_config_dword(agp_bridge->dev,
+ UNI_N_CFG_GART_DUMMY_PAGE,
+ page_to_phys(agp_bridge->scratch_page_page) >> 12);
+ }
+
+ return 0;
+}
+
+static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ int i, num_entries;
+ void *temp;
+ u32 *gp;
+ int mask_type;
+
+ if (type != mem->type)
+ return -EINVAL;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+ if (mask_type != 0) {
+ /* We know nothing of memory types */
+ return -EINVAL;
+ }
+
+ if (mem->page_count == 0)
+ return 0;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_32(temp)->num_entries;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return -EINVAL;
+
+ gp = (u32 *) &agp_bridge->gatt_table[pg_start];
+ for (i = 0; i < mem->page_count; ++i) {
+ if (gp[i] != scratch_value) {
+ dev_info(&agp_bridge->dev->dev,
+ "uninorth_insert_memory: entry 0x%x occupied (%x)\n",
+ i, gp[i]);
+ return -EBUSY;
+ }
+ }
+
+ for (i = 0; i < mem->page_count; i++) {
+ if (is_u3)
+ gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL;
+ else
+ gp[i] = cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) |
+ 0x1UL);
+ flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
+ (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
+ }
+ mb();
+ uninorth_tlbflush(mem);
+
+ return 0;
+}
+
+int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+{
+ size_t i;
+ u32 *gp;
+ int mask_type;
+
+ if (type != mem->type)
+ return -EINVAL;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+ if (mask_type != 0) {
+ /* We know nothing of memory types */
+ return -EINVAL;
+ }
+
+ if (mem->page_count == 0)
+ return 0;
+
+ gp = (u32 *) &agp_bridge->gatt_table[pg_start];
+ for (i = 0; i < mem->page_count; ++i) {
+ gp[i] = scratch_value;
+ }
+ mb();
+ uninorth_tlbflush(mem);
+
+ return 0;
+}
+
+static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ u32 command, scratch, status;
+ int timeout;
+
+ pci_read_config_dword(bridge->dev,
+ bridge->capndx + PCI_AGP_STATUS,
+ &status);
+
+ command = agp_collect_device_status(bridge, mode, status);
+ command |= PCI_AGP_COMMAND_AGP;
+
+ if (uninorth_rev == 0x21) {
+ /*
+ * Darwin disable AGP 4x on this revision, thus we
+ * may assume it's broken. This is an AGP2 controller.
+ */
+ command &= ~AGPSTAT2_4X;
+ }
+
+ if ((uninorth_rev >= 0x30) && (uninorth_rev <= 0x33)) {
+ /*
+ * We need to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1,
+ * 2.2 and 2.3, Darwin do so.
+ */
+ if ((command >> AGPSTAT_RQ_DEPTH_SHIFT) > 7)
+ command = (command & ~AGPSTAT_RQ_DEPTH)
+ | (7 << AGPSTAT_RQ_DEPTH_SHIFT);
+ }
+
+ uninorth_tlbflush(NULL);
+
+ timeout = 0;
+ do {
+ pci_write_config_dword(bridge->dev,
+ bridge->capndx + PCI_AGP_COMMAND,
+ command);
+ pci_read_config_dword(bridge->dev,
+ bridge->capndx + PCI_AGP_COMMAND,
+ &scratch);
+ } while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000);
+ if ((scratch & PCI_AGP_COMMAND_AGP) == 0)
+ dev_err(&bridge->dev->dev, "can't write UniNorth AGP "
+ "command register\n");
+
+ if (uninorth_rev >= 0x30) {
+ /* This is an AGP V3 */
+ agp_device_command(command, (status & AGPSTAT_MODE_3_0) != 0);
+ } else {
+ /* AGP V2 */
+ agp_device_command(command, false);
+ }
+
+ uninorth_tlbflush(NULL);
+}
+
+#ifdef CONFIG_PM
+/*
+ * These Power Management routines are _not_ called by the normal PCI PM layer,
+ * but directly by the video driver through function pointers in the device
+ * tree.
+ */
+static int agp_uninorth_suspend(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge;
+ u32 cmd;
+ u8 agp;
+ struct pci_dev *device = NULL;
+
+ bridge = agp_find_bridge(pdev);
+ if (bridge == NULL)
+ return -ENODEV;
+
+ /* Only one suspend supported */
+ if (bridge->dev_private_data)
+ return 0;
+
+ /* turn off AGP on the video chip, if it was enabled */
+ for_each_pci_dev(device) {
+ /* Don't touch the bridge yet, device first */
+ if (device == pdev)
+ continue;
+ /* Only deal with devices on the same bus here, no Mac has a P2P
+ * bridge on the AGP port, and mucking around the entire PCI
+ * tree is source of problems on some machines because of a bug
+ * in some versions of pci_find_capability() when hitting a dead
+ * device
+ */
+ if (device->bus != pdev->bus)
+ continue;
+ agp = pci_find_capability(device, PCI_CAP_ID_AGP);
+ if (!agp)
+ continue;
+ pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd);
+ if (!(cmd & PCI_AGP_COMMAND_AGP))
+ continue;
+ dev_info(&pdev->dev, "disabling AGP on device %s\n",
+ pci_name(device));
+ cmd &= ~PCI_AGP_COMMAND_AGP;
+ pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd);
+ }
+
+ /* turn off AGP on the bridge */
+ agp = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd);
+ bridge->dev_private_data = (void *)(long)cmd;
+ if (cmd & PCI_AGP_COMMAND_AGP) {
+ dev_info(&pdev->dev, "disabling AGP on bridge\n");
+ cmd &= ~PCI_AGP_COMMAND_AGP;
+ pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd);
+ }
+ /* turn off the GART */
+ uninorth_cleanup();
+
+ return 0;
+}
+
+static int agp_uninorth_resume(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge;
+ u32 command;
+
+ bridge = agp_find_bridge(pdev);
+ if (bridge == NULL)
+ return -ENODEV;
+
+ command = (long)bridge->dev_private_data;
+ bridge->dev_private_data = NULL;
+ if (!(command & PCI_AGP_COMMAND_AGP))
+ return 0;
+
+ uninorth_agp_enable(bridge, command);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ char *table;
+ char *table_end;
+ int size;
+ int page_order;
+ int num_entries;
+ int i;
+ void *temp;
+ struct page *page;
+ struct page **pages;
+
+ /* We can't handle 2 level gatt's */
+ if (bridge->driver->size_type == LVL2_APER_SIZE)
+ return -EINVAL;
+
+ table = NULL;
+ i = bridge->aperture_size_idx;
+ temp = bridge->current_size;
+ size = page_order = num_entries = 0;
+
+ do {
+ size = A_SIZE_32(temp)->size;
+ page_order = A_SIZE_32(temp)->page_order;
+ num_entries = A_SIZE_32(temp)->num_entries;
+
+ table = (char *) __get_free_pages(GFP_KERNEL, page_order);
+
+ if (table == NULL) {
+ i++;
+ bridge->current_size = A_IDX32(bridge);
+ } else {
+ bridge->aperture_size_idx = i;
+ }
+ } while (!table && (i < bridge->driver->num_aperture_sizes));
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ pages = kmalloc((1 << page_order) * sizeof(struct page*), GFP_KERNEL);
+ if (pages == NULL)
+ goto enomem;
+
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table), i = 0; page <= virt_to_page(table_end);
+ page++, i++) {
+ SetPageReserved(page);
+ pages[i] = page;
+ }
+
+ bridge->gatt_table_real = (u32 *) table;
+ /* Need to clear out any dirty data still sitting in caches */
+ flush_dcache_range((unsigned long)table,
+ (unsigned long)table_end + 1);
+ bridge->gatt_table = vmap(pages, (1 << page_order), 0, PAGE_KERNEL_NCG);
+
+ if (bridge->gatt_table == NULL)
+ goto enomem;
+
+ bridge->gatt_bus_addr = virt_to_phys(table);
+
+ if (is_u3)
+ scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL;
+ else
+ scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) |
+ 0x1UL);
+ for (i = 0; i < num_entries; i++)
+ bridge->gatt_table[i] = scratch_value;
+
+ return 0;
+
+enomem:
+ kfree(pages);
+ if (table)
+ free_pages((unsigned long)table, page_order);
+ return -ENOMEM;
+}
+
+static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ char *table, *table_end;
+ void *temp;
+ struct page *page;
+
+ temp = bridge->current_size;
+ page_order = A_SIZE_32(temp)->page_order;
+
+ /* Do not worry about freeing memory, because if this is
+ * called, then all agp memory is deallocated and removed
+ * from the table.
+ */
+
+ vunmap(bridge->gatt_table);
+ table = (char *) bridge->gatt_table_real;
+ table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
+
+ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
+ ClearPageReserved(page);
+
+ free_pages((unsigned long) bridge->gatt_table_real, page_order);
+
+ return 0;
+}
+
+void null_cache_flush(void)
+{
+ mb();
+}
+
+/* Setup function */
+
+static const struct aper_size_info_32 uninorth_sizes[] =
+{
+ {256, 65536, 6, 64},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 16},
+ {32, 8192, 3, 8},
+ {16, 4096, 2, 4},
+ {8, 2048, 1, 2},
+ {4, 1024, 0, 1}
+};
+
+/*
+ * Not sure that u3 supports that high aperture sizes but it
+ * would strange if it did not :)
+ */
+static const struct aper_size_info_32 u3_sizes[] =
+{
+ {512, 131072, 7, 128},
+ {256, 65536, 6, 64},
+ {128, 32768, 5, 32},
+ {64, 16384, 4, 16},
+ {32, 8192, 3, 8},
+ {16, 4096, 2, 4},
+ {8, 2048, 1, 2},
+ {4, 1024, 0, 1}
+};
+
+const struct agp_bridge_driver uninorth_agp_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = (void *)uninorth_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = ARRAY_SIZE(uninorth_sizes),
+ .configure = uninorth_configure,
+ .fetch_size = uninorth_fetch_size,
+ .cleanup = uninorth_cleanup,
+ .tlb_flush = uninorth_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .cache_flush = null_cache_flush,
+ .agp_enable = uninorth_agp_enable,
+ .create_gatt_table = uninorth_create_gatt_table,
+ .free_gatt_table = uninorth_free_gatt_table,
+ .insert_memory = uninorth_insert_memory,
+ .remove_memory = uninorth_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ .cant_use_aperture = true,
+ .needs_scratch_page = true,
+};
+
+const struct agp_bridge_driver u3_agp_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = (void *)u3_sizes,
+ .size_type = U32_APER_SIZE,
+ .num_aperture_sizes = ARRAY_SIZE(u3_sizes),
+ .configure = uninorth_configure,
+ .fetch_size = uninorth_fetch_size,
+ .cleanup = uninorth_cleanup,
+ .tlb_flush = uninorth_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .cache_flush = null_cache_flush,
+ .agp_enable = uninorth_agp_enable,
+ .create_gatt_table = uninorth_create_gatt_table,
+ .free_gatt_table = uninorth_free_gatt_table,
+ .insert_memory = uninorth_insert_memory,
+ .remove_memory = uninorth_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ .cant_use_aperture = true,
+ .needs_scratch_page = true,
+};
+
+static struct agp_device_ids uninorth_agp_device_ids[] __devinitdata = {
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP,
+ .chipset_name = "UniNorth",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP_P,
+ .chipset_name = "UniNorth/Pangea",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP15,
+ .chipset_name = "UniNorth 1.5",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP2,
+ .chipset_name = "UniNorth 2",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_U3_AGP,
+ .chipset_name = "U3",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_U3L_AGP,
+ .chipset_name = "U3L",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_U3H_AGP,
+ .chipset_name = "U3H",
+ },
+ {
+ .device_id = PCI_DEVICE_ID_APPLE_IPID2_AGP,
+ .chipset_name = "UniNorth/Intrepid2",
+ },
+};
+
+static int __devinit agp_uninorth_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_device_ids *devs = uninorth_agp_device_ids;
+ struct agp_bridge_data *bridge;
+ struct device_node *uninorth_node;
+ u8 cap_ptr;
+ int j;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (cap_ptr == 0)
+ return -ENODEV;
+
+ /* probe for known chipsets */
+ for (j = 0; devs[j].chipset_name != NULL; ++j) {
+ if (pdev->device == devs[j].device_id) {
+ dev_info(&pdev->dev, "Apple %s chipset\n",
+ devs[j].chipset_name);
+ goto found;
+ }
+ }
+
+ dev_err(&pdev->dev, "unsupported Apple chipset [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
+ return -ENODEV;
+
+ found:
+ /* Set revision to 0 if we could not read it. */
+ uninorth_rev = 0;
+ is_u3 = 0;
+ /* Locate core99 Uni-N */
+ uninorth_node = of_find_node_by_name(NULL, "uni-n");
+ /* Locate G5 u3 */
+ if (uninorth_node == NULL) {
+ is_u3 = 1;
+ uninorth_node = of_find_node_by_name(NULL, "u3");
+ }
+ if (uninorth_node) {
+ const int *revprop = of_get_property(uninorth_node,
+ "device-rev", NULL);
+ if (revprop != NULL)
+ uninorth_rev = *revprop & 0x3f;
+ of_node_put(uninorth_node);
+ }
+
+#ifdef CONFIG_PM
+ /* Inform platform of our suspend/resume caps */
+ pmac_register_agp_pm(pdev, agp_uninorth_suspend, agp_uninorth_resume);
+#endif
+
+ /* Allocate & setup our driver */
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ if (is_u3)
+ bridge->driver = &u3_agp_driver;
+ else
+ bridge->driver = &uninorth_agp_driver;
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+ bridge->flags = AGP_ERRATA_FASTWRITES;
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev, cap_ptr+PCI_AGP_STATUS, &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_uninorth_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+#ifdef CONFIG_PM
+ /* Inform platform of our suspend/resume caps */
+ pmac_register_agp_pm(pdev, NULL, NULL);
+#endif
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+static struct pci_device_id agp_uninorth_pci_table[] = {
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_APPLE,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table);
+
+static struct pci_driver agp_uninorth_pci_driver = {
+ .name = "agpgart-uninorth",
+ .id_table = agp_uninorth_pci_table,
+ .probe = agp_uninorth_probe,
+ .remove = agp_uninorth_remove,
+};
+
+static int __init agp_uninorth_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_uninorth_pci_driver);
+}
+
+static void __exit agp_uninorth_cleanup(void)
+{
+ pci_unregister_driver(&agp_uninorth_pci_driver);
+}
+
+module_init(agp_uninorth_init);
+module_exit(agp_uninorth_cleanup);
+
+module_param(aperture, charp, 0);
+MODULE_PARM_DESC(aperture,
+ "Aperture size, must be power of two between 4MB and an\n"
+ "\t\tupper limit specific to the UniNorth revision.\n"
+ "\t\tDefault: " DEFAULT_APERTURE_STRING "M");
+
+MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
new file mode 100644
index 00000000..8bc38493
--- /dev/null
+++ b/drivers/char/agp/via-agp.c
@@ -0,0 +1,600 @@
+/*
+ * VIA AGPGART routines.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/agp_backend.h>
+#include "agp.h"
+
+static const struct pci_device_id agp_via_pci_table[];
+
+#define VIA_GARTCTRL 0x80
+#define VIA_APSIZE 0x84
+#define VIA_ATTBASE 0x88
+
+#define VIA_AGP3_GARTCTRL 0x90
+#define VIA_AGP3_APSIZE 0x94
+#define VIA_AGP3_ATTBASE 0x98
+#define VIA_AGPSEL 0xfd
+
+static int via_fetch_size(void)
+{
+ int i;
+ u8 temp;
+ struct aper_size_info_8 *values;
+
+ values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
+ pci_read_config_byte(agp_bridge->dev, VIA_APSIZE, &temp);
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ printk(KERN_ERR PFX "Unknown aperture size from AGP bridge (0x%x)\n", temp);
+ return 0;
+}
+
+
+static int via_configure(void)
+{
+ u32 temp;
+ struct aper_size_info_8 *current_size;
+
+ current_size = A_SIZE_8(agp_bridge->current_size);
+ /* aperture size */
+ pci_write_config_byte(agp_bridge->dev, VIA_APSIZE,
+ current_size->size_value);
+ /* address to map too */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* GART control register */
+ pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f);
+
+ /* attbase - aperture GATT base */
+ pci_write_config_dword(agp_bridge->dev, VIA_ATTBASE,
+ (agp_bridge->gatt_bus_addr & 0xfffff000) | 3);
+ return 0;
+}
+
+
+static void via_cleanup(void)
+{
+ struct aper_size_info_8 *previous_size;
+
+ previous_size = A_SIZE_8(agp_bridge->previous_size);
+ pci_write_config_byte(agp_bridge->dev, VIA_APSIZE,
+ previous_size->size_value);
+ /* Do not disable by writing 0 to VIA_ATTBASE, it screws things up
+ * during reinitialization.
+ */
+}
+
+
+static void via_tlbflush(struct agp_memory *mem)
+{
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, VIA_GARTCTRL, &temp);
+ temp |= (1<<7);
+ pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
+ temp &= ~(1<<7);
+ pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
+}
+
+
+static const struct aper_size_info_8 via_generic_sizes[9] =
+{
+ {256, 65536, 6, 0},
+ {128, 32768, 5, 128},
+ {64, 16384, 4, 192},
+ {32, 8192, 3, 224},
+ {16, 4096, 2, 240},
+ {8, 2048, 1, 248},
+ {4, 1024, 0, 252},
+ {2, 512, 0, 254},
+ {1, 256, 0, 255}
+};
+
+
+static int via_fetch_size_agp3(void)
+{
+ int i;
+ u16 temp;
+ struct aper_size_info_16 *values;
+
+ values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
+ pci_read_config_word(agp_bridge->dev, VIA_AGP3_APSIZE, &temp);
+ temp &= 0xfff;
+
+ for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
+ if (temp == values[i].size_value) {
+ agp_bridge->previous_size =
+ agp_bridge->current_size = (void *) (values + i);
+ agp_bridge->aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+ return 0;
+}
+
+
+static int via_configure_agp3(void)
+{
+ u32 temp;
+ struct aper_size_info_16 *current_size;
+
+ current_size = A_SIZE_16(agp_bridge->current_size);
+
+ /* address to map too */
+ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* attbase - aperture GATT base */
+ pci_write_config_dword(agp_bridge->dev, VIA_AGP3_ATTBASE,
+ agp_bridge->gatt_bus_addr & 0xfffff000);
+
+ /* 1. Enable GTLB in RX90<7>, all AGP aperture access needs to fetch
+ * translation table first.
+ * 2. Enable AGP aperture in RX91<0>. This bit controls the enabling of the
+ * graphics AGP aperture for the AGP3.0 port.
+ */
+ pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp | (3<<7));
+ return 0;
+}
+
+
+static void via_cleanup_agp3(void)
+{
+ struct aper_size_info_16 *previous_size;
+
+ previous_size = A_SIZE_16(agp_bridge->previous_size);
+ pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, previous_size->size_value);
+}
+
+
+static void via_tlbflush_agp3(struct agp_memory *mem)
+{
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp);
+ pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp & ~(1<<7));
+ pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp);
+}
+
+
+static const struct agp_bridge_driver via_agp3_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = agp3_generic_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 10,
+ .needs_scratch_page = true,
+ .configure = via_configure_agp3,
+ .fetch_size = via_fetch_size_agp3,
+ .cleanup = via_cleanup_agp3,
+ .tlb_flush = via_tlbflush_agp3,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver via_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = via_generic_sizes,
+ .size_type = U8_APER_SIZE,
+ .num_aperture_sizes = 9,
+ .needs_scratch_page = true,
+ .configure = via_configure,
+ .fetch_size = via_fetch_size,
+ .cleanup = via_cleanup,
+ .tlb_flush = via_tlbflush,
+ .mask_memory = agp_generic_mask_memory,
+ .masks = NULL,
+ .agp_enable = agp_generic_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = agp_generic_insert_memory,
+ .remove_memory = agp_generic_remove_memory,
+ .alloc_by_type = agp_generic_alloc_by_type,
+ .free_by_type = agp_generic_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static struct agp_device_ids via_agp_device_ids[] __devinitdata =
+{
+ {
+ .device_id = PCI_DEVICE_ID_VIA_82C597_0,
+ .chipset_name = "Apollo VP3",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_82C598_0,
+ .chipset_name = "Apollo MVP3",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8501_0,
+ .chipset_name = "Apollo MVP4",
+ },
+
+ /* VT8601 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8601_0,
+ .chipset_name = "Apollo ProMedia/PLE133Ta",
+ },
+
+ /* VT82C693A / VT28C694T */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_82C691_0,
+ .chipset_name = "Apollo Pro 133",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8371_0,
+ .chipset_name = "KX133",
+ },
+
+ /* VT8633 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8633_0,
+ .chipset_name = "Pro 266",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_XN266,
+ .chipset_name = "Apollo Pro266",
+ },
+
+ /* VT8361 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8361,
+ .chipset_name = "KLE133",
+ },
+
+ /* VT8365 / VT8362 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8363_0,
+ .chipset_name = "Twister-K/KT133x/KM133",
+ },
+
+ /* VT8753A */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8753_0,
+ .chipset_name = "P4X266",
+ },
+
+ /* VT8366 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8367_0,
+ .chipset_name = "KT266/KY266x/KT333",
+ },
+
+ /* VT8633 (for CuMine/ Celeron) */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8653_0,
+ .chipset_name = "Pro266T",
+ },
+
+ /* KM266 / PM266 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_XM266,
+ .chipset_name = "PM266/KM266",
+ },
+
+ /* CLE266 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_862X_0,
+ .chipset_name = "CLE266",
+ },
+
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8377_0,
+ .chipset_name = "KT400/KT400A/KT600",
+ },
+
+ /* VT8604 / VT8605 / VT8603
+ * (Apollo Pro133A chipset with S3 Savage4) */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8605_0,
+ .chipset_name = "ProSavage PM133/PL133/PN133"
+ },
+
+ /* P4M266x/P4N266 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8703_51_0,
+ .chipset_name = "P4M266x/P4N266",
+ },
+
+ /* VT8754 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8754C_0,
+ .chipset_name = "PT800",
+ },
+
+ /* P4X600 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8763_0,
+ .chipset_name = "P4X600"
+ },
+
+ /* KM400 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8378_0,
+ .chipset_name = "KM400/KM400A",
+ },
+
+ /* PT880 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_PT880,
+ .chipset_name = "PT880",
+ },
+
+ /* PT880 Ultra */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_PT880ULTRA,
+ .chipset_name = "PT880 Ultra",
+ },
+
+ /* PT890 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_8783_0,
+ .chipset_name = "PT890",
+ },
+
+ /* PM800/PN800/PM880/PN880 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_PX8X0_0,
+ .chipset_name = "PM800/PN800/PM880/PN880",
+ },
+ /* KT880 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_3269_0,
+ .chipset_name = "KT880",
+ },
+ /* KTxxx/Px8xx */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_83_87XX_1,
+ .chipset_name = "VT83xx/VT87xx/KTxxx/Px8xx",
+ },
+ /* P4M800 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_3296_0,
+ .chipset_name = "P4M800",
+ },
+ /* P4M800CE */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_P4M800CE,
+ .chipset_name = "VT3314",
+ },
+ /* VT3324 / CX700 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_VT3324,
+ .chipset_name = "CX700",
+ },
+ /* VT3336 - this is a chipset for AMD Athlon/K8 CPU. Due to K8's unique
+ * architecture, the AGP resource and behavior are different from
+ * the traditional AGP which resides only in chipset. AGP is used
+ * by 3D driver which wasn't available for the VT3336 and VT3364
+ * generation until now. Unfortunately, by testing, VT3364 works
+ * but VT3336 doesn't. - explanation from via, just leave this as
+ * as a placeholder to avoid future patches adding it back in.
+ */
+#if 0
+ {
+ .device_id = PCI_DEVICE_ID_VIA_VT3336,
+ .chipset_name = "VT3336",
+ },
+#endif
+ /* P4M890 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_P4M890,
+ .chipset_name = "P4M890",
+ },
+ /* P4M900 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_VT3364,
+ .chipset_name = "P4M900",
+ },
+ { }, /* dummy final entry, always present */
+};
+
+
+/*
+ * VIA's AGP3 chipsets do magick to put the AGP bridge compliant
+ * with the same standards version as the graphics card.
+ */
+static void check_via_agp3 (struct agp_bridge_data *bridge)
+{
+ u8 reg;
+
+ pci_read_config_byte(bridge->dev, VIA_AGPSEL, &reg);
+ /* Check AGP 2.0 compatibility mode. */
+ if ((reg & (1<<1))==0)
+ bridge->driver = &via_agp3_driver;
+}
+
+
+static int __devinit agp_via_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct agp_device_ids *devs = via_agp_device_ids;
+ struct agp_bridge_data *bridge;
+ int j = 0;
+ u8 cap_ptr;
+
+ cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
+ if (!cap_ptr)
+ return -ENODEV;
+
+ j = ent - agp_via_pci_table;
+ printk (KERN_INFO PFX "Detected VIA %s chipset\n", devs[j].chipset_name);
+
+ bridge = agp_alloc_bridge();
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->dev = pdev;
+ bridge->capndx = cap_ptr;
+ bridge->driver = &via_driver;
+
+ /*
+ * Garg, there are KT400s with KT266 IDs.
+ */
+ if (pdev->device == PCI_DEVICE_ID_VIA_8367_0) {
+ /* Is there a KT400 subsystem ? */
+ if (pdev->subsystem_device == PCI_DEVICE_ID_VIA_8377_0) {
+ printk(KERN_INFO PFX "Found KT400 in disguise as a KT266.\n");
+ check_via_agp3(bridge);
+ }
+ }
+
+ /* If this is an AGP3 bridge, check which mode its in and adjust. */
+ get_agp_version(bridge);
+ if (bridge->major_version >= 3)
+ check_via_agp3(bridge);
+
+ /* Fill in the mode register */
+ pci_read_config_dword(pdev,
+ bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
+
+ pci_set_drvdata(pdev, bridge);
+ return agp_add_bridge(bridge);
+}
+
+static void __devexit agp_via_remove(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ agp_remove_bridge(bridge);
+ agp_put_bridge(bridge);
+}
+
+#ifdef CONFIG_PM
+
+static int agp_via_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state (pdev);
+ pci_set_power_state (pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int agp_via_resume(struct pci_dev *pdev)
+{
+ struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
+
+ pci_set_power_state (pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (bridge->driver == &via_agp3_driver)
+ return via_configure_agp3();
+ else if (bridge->driver == &via_driver)
+ return via_configure();
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+/* must be the same order as name table above */
+static const struct pci_device_id agp_via_pci_table[] = {
+#define ID(x) \
+ { \
+ .class = (PCI_CLASS_BRIDGE_HOST << 8), \
+ .class_mask = ~0, \
+ .vendor = PCI_VENDOR_ID_VIA, \
+ .device = x, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ }
+ ID(PCI_DEVICE_ID_VIA_82C597_0),
+ ID(PCI_DEVICE_ID_VIA_82C598_0),
+ ID(PCI_DEVICE_ID_VIA_8501_0),
+ ID(PCI_DEVICE_ID_VIA_8601_0),
+ ID(PCI_DEVICE_ID_VIA_82C691_0),
+ ID(PCI_DEVICE_ID_VIA_8371_0),
+ ID(PCI_DEVICE_ID_VIA_8633_0),
+ ID(PCI_DEVICE_ID_VIA_XN266),
+ ID(PCI_DEVICE_ID_VIA_8361),
+ ID(PCI_DEVICE_ID_VIA_8363_0),
+ ID(PCI_DEVICE_ID_VIA_8753_0),
+ ID(PCI_DEVICE_ID_VIA_8367_0),
+ ID(PCI_DEVICE_ID_VIA_8653_0),
+ ID(PCI_DEVICE_ID_VIA_XM266),
+ ID(PCI_DEVICE_ID_VIA_862X_0),
+ ID(PCI_DEVICE_ID_VIA_8377_0),
+ ID(PCI_DEVICE_ID_VIA_8605_0),
+ ID(PCI_DEVICE_ID_VIA_8703_51_0),
+ ID(PCI_DEVICE_ID_VIA_8754C_0),
+ ID(PCI_DEVICE_ID_VIA_8763_0),
+ ID(PCI_DEVICE_ID_VIA_8378_0),
+ ID(PCI_DEVICE_ID_VIA_PT880),
+ ID(PCI_DEVICE_ID_VIA_PT880ULTRA),
+ ID(PCI_DEVICE_ID_VIA_8783_0),
+ ID(PCI_DEVICE_ID_VIA_PX8X0_0),
+ ID(PCI_DEVICE_ID_VIA_3269_0),
+ ID(PCI_DEVICE_ID_VIA_83_87XX_1),
+ ID(PCI_DEVICE_ID_VIA_3296_0),
+ ID(PCI_DEVICE_ID_VIA_P4M800CE),
+ ID(PCI_DEVICE_ID_VIA_VT3324),
+ ID(PCI_DEVICE_ID_VIA_P4M890),
+ ID(PCI_DEVICE_ID_VIA_VT3364),
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, agp_via_pci_table);
+
+
+static struct pci_driver agp_via_pci_driver = {
+ .name = "agpgart-via",
+ .id_table = agp_via_pci_table,
+ .probe = agp_via_probe,
+ .remove = agp_via_remove,
+#ifdef CONFIG_PM
+ .suspend = agp_via_suspend,
+ .resume = agp_via_resume,
+#endif
+};
+
+
+static int __init agp_via_init(void)
+{
+ if (agp_off)
+ return -EINVAL;
+ return pci_register_driver(&agp_via_pci_driver);
+}
+
+static void __exit agp_via_cleanup(void)
+{
+ pci_unregister_driver(&agp_via_pci_driver);
+}
+
+module_init(agp_via_init);
+module_exit(agp_via_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
new file mode 100644
index 00000000..548708c4
--- /dev/null
+++ b/drivers/char/apm-emulation.c
@@ -0,0 +1,746 @@
+/*
+ * bios-less APM driver for ARM Linux
+ * Jamey Hicks <jamey@crl.dec.com>
+ * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com)
+ *
+ * APM 1.2 Reference:
+ * Intel Corporation, Microsoft Corporation. Advanced Power Management
+ * (APM) BIOS Interface Specification, Revision 1.2, February 1996.
+ *
+ * This document is available from Microsoft at:
+ * http://www.microsoft.com/whdc/archive/amp_12.mspx
+ */
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/apm_bios.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <linux/apm-emulation.h>
+#include <linux/freezer.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+
+/*
+ * The apm_bios device is one of the misc char devices.
+ * This is its minor number.
+ */
+#define APM_MINOR_DEV 134
+
+/*
+ * See Documentation/Config.help for the configuration options.
+ *
+ * Various options can be changed at boot time as follows:
+ * (We allow underscores for compatibility with the modules code)
+ * apm=on/off enable/disable APM
+ */
+
+/*
+ * Maximum number of events stored
+ */
+#define APM_MAX_EVENTS 16
+
+struct apm_queue {
+ unsigned int event_head;
+ unsigned int event_tail;
+ apm_event_t events[APM_MAX_EVENTS];
+};
+
+/*
+ * thread states (for threads using a writable /dev/apm_bios fd):
+ *
+ * SUSPEND_NONE: nothing happening
+ * SUSPEND_PENDING: suspend event queued for thread and pending to be read
+ * SUSPEND_READ: suspend event read, pending acknowledgement
+ * SUSPEND_ACKED: acknowledgement received from thread (via ioctl),
+ * waiting for resume
+ * SUSPEND_ACKTO: acknowledgement timeout
+ * SUSPEND_DONE: thread had acked suspend and is now notified of
+ * resume
+ *
+ * SUSPEND_WAIT: this thread invoked suspend and is waiting for resume
+ *
+ * A thread migrates in one of three paths:
+ * NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE
+ * -6-> ACKTO -7-> NONE
+ * NONE -8-> WAIT -9-> NONE
+ *
+ * While in PENDING or READ, the thread is accounted for in the
+ * suspend_acks_pending counter.
+ *
+ * The transitions are invoked as follows:
+ * 1: suspend event is signalled from the core PM code
+ * 2: the suspend event is read from the fd by the userspace thread
+ * 3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack)
+ * 4: core PM code signals that we have resumed
+ * 5: APM_IOC_SUSPEND ioctl returns
+ *
+ * 6: the notifier invoked from the core PM code timed out waiting
+ * for all relevant threds to enter ACKED state and puts those
+ * that haven't into ACKTO
+ * 7: those threads issue APM_IOC_SUSPEND ioctl too late,
+ * get an error
+ *
+ * 8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend),
+ * ioctl code invokes pm_suspend()
+ * 9: pm_suspend() returns indicating resume
+ */
+enum apm_suspend_state {
+ SUSPEND_NONE,
+ SUSPEND_PENDING,
+ SUSPEND_READ,
+ SUSPEND_ACKED,
+ SUSPEND_ACKTO,
+ SUSPEND_WAIT,
+ SUSPEND_DONE,
+};
+
+/*
+ * The per-file APM data
+ */
+struct apm_user {
+ struct list_head list;
+
+ unsigned int suser: 1;
+ unsigned int writer: 1;
+ unsigned int reader: 1;
+
+ int suspend_result;
+ enum apm_suspend_state suspend_state;
+
+ struct apm_queue queue;
+};
+
+/*
+ * Local variables
+ */
+static atomic_t suspend_acks_pending = ATOMIC_INIT(0);
+static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0);
+static int apm_disabled;
+static struct task_struct *kapmd_tsk;
+
+static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
+static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
+
+/*
+ * This is a list of everyone who has opened /dev/apm_bios
+ */
+static DECLARE_RWSEM(user_list_lock);
+static LIST_HEAD(apm_user_list);
+
+/*
+ * kapmd info. kapmd provides us a process context to handle
+ * "APM" events within - specifically necessary if we're going
+ * to be suspending the system.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
+static DEFINE_SPINLOCK(kapmd_queue_lock);
+static struct apm_queue kapmd_queue;
+
+static DEFINE_MUTEX(state_lock);
+
+static const char driver_version[] = "1.13"; /* no spaces */
+
+
+
+/*
+ * Compatibility cruft until the IPAQ people move over to the new
+ * interface.
+ */
+static void __apm_get_power_status(struct apm_power_info *info)
+{
+}
+
+/*
+ * This allows machines to provide their own "apm get power status" function.
+ */
+void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
+EXPORT_SYMBOL(apm_get_power_status);
+
+
+/*
+ * APM event queue management.
+ */
+static inline int queue_empty(struct apm_queue *q)
+{
+ return q->event_head == q->event_tail;
+}
+
+static inline apm_event_t queue_get_event(struct apm_queue *q)
+{
+ q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
+ return q->events[q->event_tail];
+}
+
+static void queue_add_event(struct apm_queue *q, apm_event_t event)
+{
+ q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
+ if (q->event_head == q->event_tail) {
+ static int notified;
+
+ if (notified++ == 0)
+ printk(KERN_ERR "apm: an event queue overflowed\n");
+ q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
+ }
+ q->events[q->event_head] = event;
+}
+
+static void queue_event(apm_event_t event)
+{
+ struct apm_user *as;
+
+ down_read(&user_list_lock);
+ list_for_each_entry(as, &apm_user_list, list) {
+ if (as->reader)
+ queue_add_event(&as->queue, event);
+ }
+ up_read(&user_list_lock);
+ wake_up_interruptible(&apm_waitqueue);
+}
+
+static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct apm_user *as = fp->private_data;
+ apm_event_t event;
+ int i = count, ret = 0;
+
+ if (count < sizeof(apm_event_t))
+ return -EINVAL;
+
+ if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
+
+ while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
+ event = queue_get_event(&as->queue);
+
+ ret = -EFAULT;
+ if (copy_to_user(buf, &event, sizeof(event)))
+ break;
+
+ mutex_lock(&state_lock);
+ if (as->suspend_state == SUSPEND_PENDING &&
+ (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND))
+ as->suspend_state = SUSPEND_READ;
+ mutex_unlock(&state_lock);
+
+ buf += sizeof(event);
+ i -= sizeof(event);
+ }
+
+ if (i < count)
+ ret = count - i;
+
+ return ret;
+}
+
+static unsigned int apm_poll(struct file *fp, poll_table * wait)
+{
+ struct apm_user *as = fp->private_data;
+
+ poll_wait(fp, &apm_waitqueue, wait);
+ return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM;
+}
+
+/*
+ * apm_ioctl - handle APM ioctl
+ *
+ * APM_IOC_SUSPEND
+ * This IOCTL is overloaded, and performs two functions. It is used to:
+ * - initiate a suspend
+ * - acknowledge a suspend read from /dev/apm_bios.
+ * Only when everyone who has opened /dev/apm_bios with write permission
+ * has acknowledge does the actual suspend happen.
+ */
+static long
+apm_ioctl(struct file *filp, u_int cmd, u_long arg)
+{
+ struct apm_user *as = filp->private_data;
+ int err = -EINVAL;
+
+ if (!as->suser || !as->writer)
+ return -EPERM;
+
+ switch (cmd) {
+ case APM_IOC_SUSPEND:
+ mutex_lock(&state_lock);
+
+ as->suspend_result = -EINTR;
+
+ switch (as->suspend_state) {
+ case SUSPEND_READ:
+ /*
+ * If we read a suspend command from /dev/apm_bios,
+ * then the corresponding APM_IOC_SUSPEND ioctl is
+ * interpreted as an acknowledge.
+ */
+ as->suspend_state = SUSPEND_ACKED;
+ atomic_dec(&suspend_acks_pending);
+ mutex_unlock(&state_lock);
+
+ /*
+ * suspend_acks_pending changed, the notifier needs to
+ * be woken up for this
+ */
+ wake_up(&apm_suspend_waitqueue);
+
+ /*
+ * Wait for the suspend/resume to complete. If there
+ * are pending acknowledges, we wait here for them.
+ */
+ freezer_do_not_count();
+
+ wait_event(apm_suspend_waitqueue,
+ as->suspend_state == SUSPEND_DONE);
+
+ /*
+ * Since we are waiting until the suspend is done, the
+ * try_to_freeze() in freezer_count() will not trigger
+ */
+ freezer_count();
+ break;
+ case SUSPEND_ACKTO:
+ as->suspend_result = -ETIMEDOUT;
+ mutex_unlock(&state_lock);
+ break;
+ default:
+ as->suspend_state = SUSPEND_WAIT;
+ mutex_unlock(&state_lock);
+
+ /*
+ * Otherwise it is a request to suspend the system.
+ * Just invoke pm_suspend(), we'll handle it from
+ * there via the notifier.
+ */
+ as->suspend_result = pm_suspend(PM_SUSPEND_MEM);
+ }
+
+ mutex_lock(&state_lock);
+ err = as->suspend_result;
+ as->suspend_state = SUSPEND_NONE;
+ mutex_unlock(&state_lock);
+ break;
+ }
+
+ return err;
+}
+
+static int apm_release(struct inode * inode, struct file * filp)
+{
+ struct apm_user *as = filp->private_data;
+
+ filp->private_data = NULL;
+
+ down_write(&user_list_lock);
+ list_del(&as->list);
+ up_write(&user_list_lock);
+
+ /*
+ * We are now unhooked from the chain. As far as new
+ * events are concerned, we no longer exist.
+ */
+ mutex_lock(&state_lock);
+ if (as->suspend_state == SUSPEND_PENDING ||
+ as->suspend_state == SUSPEND_READ)
+ atomic_dec(&suspend_acks_pending);
+ mutex_unlock(&state_lock);
+
+ wake_up(&apm_suspend_waitqueue);
+
+ kfree(as);
+ return 0;
+}
+
+static int apm_open(struct inode * inode, struct file * filp)
+{
+ struct apm_user *as;
+
+ as = kzalloc(sizeof(*as), GFP_KERNEL);
+ if (as) {
+ /*
+ * XXX - this is a tiny bit broken, when we consider BSD
+ * process accounting. If the device is opened by root, we
+ * instantly flag that we used superuser privs. Who knows,
+ * we might close the device immediately without doing a
+ * privileged operation -- cevans
+ */
+ as->suser = capable(CAP_SYS_ADMIN);
+ as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
+ as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
+
+ down_write(&user_list_lock);
+ list_add(&as->list, &apm_user_list);
+ up_write(&user_list_lock);
+
+ filp->private_data = as;
+ }
+
+ return as ? 0 : -ENOMEM;
+}
+
+static const struct file_operations apm_bios_fops = {
+ .owner = THIS_MODULE,
+ .read = apm_read,
+ .poll = apm_poll,
+ .unlocked_ioctl = apm_ioctl,
+ .open = apm_open,
+ .release = apm_release,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice apm_device = {
+ .minor = APM_MINOR_DEV,
+ .name = "apm_bios",
+ .fops = &apm_bios_fops
+};
+
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Arguments, with symbols from linux/apm_bios.h.
+ *
+ * 0) Linux driver version (this will change if format changes)
+ * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2.
+ * 2) APM flags from APM Installation Check (0x00):
+ * bit 0: APM_16_BIT_SUPPORT
+ * bit 1: APM_32_BIT_SUPPORT
+ * bit 2: APM_IDLE_SLOWS_CLOCK
+ * bit 3: APM_BIOS_DISABLED
+ * bit 4: APM_BIOS_DISENGAGED
+ * 3) AC line status
+ * 0x00: Off-line
+ * 0x01: On-line
+ * 0x02: On backup power (BIOS >= 1.1 only)
+ * 0xff: Unknown
+ * 4) Battery status
+ * 0x00: High
+ * 0x01: Low
+ * 0x02: Critical
+ * 0x03: Charging
+ * 0x04: Selected battery not present (BIOS >= 1.2 only)
+ * 0xff: Unknown
+ * 5) Battery flag
+ * bit 0: High
+ * bit 1: Low
+ * bit 2: Critical
+ * bit 3: Charging
+ * bit 7: No system battery
+ * 0xff: Unknown
+ * 6) Remaining battery life (percentage of charge):
+ * 0-100: valid
+ * -1: Unknown
+ * 7) Remaining battery life (time units):
+ * Number of remaining minutes or seconds
+ * -1: Unknown
+ * 8) min = minutes; sec = seconds
+ */
+static int proc_apm_show(struct seq_file *m, void *v)
+{
+ struct apm_power_info info;
+ char *units;
+
+ info.ac_line_status = 0xff;
+ info.battery_status = 0xff;
+ info.battery_flag = 0xff;
+ info.battery_life = -1;
+ info.time = -1;
+ info.units = -1;
+
+ if (apm_get_power_status)
+ apm_get_power_status(&info);
+
+ switch (info.units) {
+ default: units = "?"; break;
+ case 0: units = "min"; break;
+ case 1: units = "sec"; break;
+ }
+
+ seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
+ driver_version, APM_32_BIT_SUPPORT,
+ info.ac_line_status, info.battery_status,
+ info.battery_flag, info.battery_life,
+ info.time, units);
+
+ return 0;
+}
+
+static int proc_apm_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_apm_show, NULL);
+}
+
+static const struct file_operations apm_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = proc_apm_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static int kapmd(void *arg)
+{
+ do {
+ apm_event_t event;
+
+ wait_event_interruptible(kapmd_wait,
+ !queue_empty(&kapmd_queue) || kthread_should_stop());
+
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irq(&kapmd_queue_lock);
+ event = 0;
+ if (!queue_empty(&kapmd_queue))
+ event = queue_get_event(&kapmd_queue);
+ spin_unlock_irq(&kapmd_queue_lock);
+
+ switch (event) {
+ case 0:
+ break;
+
+ case APM_LOW_BATTERY:
+ case APM_POWER_STATUS_CHANGE:
+ queue_event(event);
+ break;
+
+ case APM_USER_SUSPEND:
+ case APM_SYS_SUSPEND:
+ pm_suspend(PM_SUSPEND_MEM);
+ break;
+
+ case APM_CRITICAL_SUSPEND:
+ atomic_inc(&userspace_notification_inhibit);
+ pm_suspend(PM_SUSPEND_MEM);
+ atomic_dec(&userspace_notification_inhibit);
+ break;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static int apm_suspend_notifier(struct notifier_block *nb,
+ unsigned long event,
+ void *dummy)
+{
+ struct apm_user *as;
+ int err;
+
+ /* short-cut emergency suspends */
+ if (atomic_read(&userspace_notification_inhibit))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ /*
+ * Queue an event to all "writer" users that we want
+ * to suspend and need their ack.
+ */
+ mutex_lock(&state_lock);
+ down_read(&user_list_lock);
+
+ list_for_each_entry(as, &apm_user_list, list) {
+ if (as->suspend_state != SUSPEND_WAIT && as->reader &&
+ as->writer && as->suser) {
+ as->suspend_state = SUSPEND_PENDING;
+ atomic_inc(&suspend_acks_pending);
+ queue_add_event(&as->queue, APM_USER_SUSPEND);
+ }
+ }
+
+ up_read(&user_list_lock);
+ mutex_unlock(&state_lock);
+ wake_up_interruptible(&apm_waitqueue);
+
+ /*
+ * Wait for the the suspend_acks_pending variable to drop to
+ * zero, meaning everybody acked the suspend event (or the
+ * process was killed.)
+ *
+ * If the app won't answer within a short while we assume it
+ * locked up and ignore it.
+ */
+ err = wait_event_interruptible_timeout(
+ apm_suspend_waitqueue,
+ atomic_read(&suspend_acks_pending) == 0,
+ 5*HZ);
+
+ /* timed out */
+ if (err == 0) {
+ /*
+ * Move anybody who timed out to "ack timeout" state.
+ *
+ * We could time out and the userspace does the ACK
+ * right after we time out but before we enter the
+ * locked section here, but that's fine.
+ */
+ mutex_lock(&state_lock);
+ down_read(&user_list_lock);
+ list_for_each_entry(as, &apm_user_list, list) {
+ if (as->suspend_state == SUSPEND_PENDING ||
+ as->suspend_state == SUSPEND_READ) {
+ as->suspend_state = SUSPEND_ACKTO;
+ atomic_dec(&suspend_acks_pending);
+ }
+ }
+ up_read(&user_list_lock);
+ mutex_unlock(&state_lock);
+ }
+
+ /* let suspend proceed */
+ if (err >= 0)
+ return NOTIFY_OK;
+
+ /* interrupted by signal */
+ return NOTIFY_BAD;
+
+ case PM_POST_SUSPEND:
+ /*
+ * Anyone on the APM queues will think we're still suspended.
+ * Send a message so everyone knows we're now awake again.
+ */
+ queue_event(APM_NORMAL_RESUME);
+
+ /*
+ * Finally, wake up anyone who is sleeping on the suspend.
+ */
+ mutex_lock(&state_lock);
+ down_read(&user_list_lock);
+ list_for_each_entry(as, &apm_user_list, list) {
+ if (as->suspend_state == SUSPEND_ACKED) {
+ /*
+ * TODO: maybe grab error code, needs core
+ * changes to push the error to the notifier
+ * chain (could use the second parameter if
+ * implemented)
+ */
+ as->suspend_result = 0;
+ as->suspend_state = SUSPEND_DONE;
+ }
+ }
+ up_read(&user_list_lock);
+ mutex_unlock(&state_lock);
+
+ wake_up(&apm_suspend_waitqueue);
+ return NOTIFY_OK;
+
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct notifier_block apm_notif_block = {
+ .notifier_call = apm_suspend_notifier,
+};
+
+static int __init apm_init(void)
+{
+ int ret;
+
+ if (apm_disabled) {
+ printk(KERN_NOTICE "apm: disabled on user request.\n");
+ return -ENODEV;
+ }
+
+ kapmd_tsk = kthread_create(kapmd, NULL, "kapmd");
+ if (IS_ERR(kapmd_tsk)) {
+ ret = PTR_ERR(kapmd_tsk);
+ kapmd_tsk = NULL;
+ goto out;
+ }
+ wake_up_process(kapmd_tsk);
+
+#ifdef CONFIG_PROC_FS
+ proc_create("apm", 0, NULL, &apm_proc_fops);
+#endif
+
+ ret = misc_register(&apm_device);
+ if (ret)
+ goto out_stop;
+
+ ret = register_pm_notifier(&apm_notif_block);
+ if (ret)
+ goto out_unregister;
+
+ return 0;
+
+ out_unregister:
+ misc_deregister(&apm_device);
+ out_stop:
+ remove_proc_entry("apm", NULL);
+ kthread_stop(kapmd_tsk);
+ out:
+ return ret;
+}
+
+static void __exit apm_exit(void)
+{
+ unregister_pm_notifier(&apm_notif_block);
+ misc_deregister(&apm_device);
+ remove_proc_entry("apm", NULL);
+
+ kthread_stop(kapmd_tsk);
+}
+
+module_init(apm_init);
+module_exit(apm_exit);
+
+MODULE_AUTHOR("Stephen Rothwell");
+MODULE_DESCRIPTION("Advanced Power Management");
+MODULE_LICENSE("GPL");
+
+#ifndef MODULE
+static int __init apm_setup(char *str)
+{
+ while ((str != NULL) && (*str != '\0')) {
+ if (strncmp(str, "off", 3) == 0)
+ apm_disabled = 1;
+ if (strncmp(str, "on", 2) == 0)
+ apm_disabled = 0;
+ str = strchr(str, ',');
+ if (str != NULL)
+ str += strspn(str, ", \t");
+ }
+ return 1;
+}
+
+__setup("apm=", apm_setup);
+#endif
+
+/**
+ * apm_queue_event - queue an APM event for kapmd
+ * @event: APM event
+ *
+ * Queue an APM event for kapmd to process and ultimately take the
+ * appropriate action. Only a subset of events are handled:
+ * %APM_LOW_BATTERY
+ * %APM_POWER_STATUS_CHANGE
+ * %APM_USER_SUSPEND
+ * %APM_SYS_SUSPEND
+ * %APM_CRITICAL_SUSPEND
+ */
+void apm_queue_event(apm_event_t event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kapmd_queue_lock, flags);
+ queue_add_event(&kapmd_queue, event);
+ spin_unlock_irqrestore(&kapmd_queue_lock, flags);
+
+ wake_up_interruptible(&kapmd_wait);
+}
+EXPORT_SYMBOL(apm_queue_event);
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
new file mode 100644
index 00000000..25373df1
--- /dev/null
+++ b/drivers/char/applicom.c
@@ -0,0 +1,843 @@
+/* Derived from Applicom driver ac.c for SCO Unix */
+/* Ported by David Woodhouse, Axiom (Cambridge) Ltd. */
+/* dwmw2@infradead.org 30/8/98 */
+/* $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $ */
+/* This module is for Linux 2.1 and 2.2 series kernels. */
+/*****************************************************************************/
+/* J PAGET 18/02/94 passage V2.4.2 ioctl avec code 2 reset to les interrupt */
+/* ceci pour reseter correctement apres une sortie sauvage */
+/* J PAGET 02/05/94 passage V2.4.3 dans le traitement de d'interruption, */
+/* LoopCount n'etait pas initialise a 0. */
+/* F LAFORSE 04/07/95 version V2.6.0 lecture bidon apres acces a une carte */
+/* pour liberer le bus */
+/* J.PAGET 19/11/95 version V2.6.1 Nombre, addresse,irq n'est plus configure */
+/* et passe en argument a acinit, mais est scrute sur le bus pour s'adapter */
+/* au nombre de cartes presentes sur le bus. IOCL code 6 affichait V2.4.3 */
+/* F.LAFORSE 28/11/95 creation de fichiers acXX.o avec les differentes */
+/* addresses de base des cartes, IOCTL 6 plus complet */
+/* J.PAGET le 19/08/96 copie de la version V2.6 en V2.8.0 sans modification */
+/* de code autre que le texte V2.6.1 en V2.8.0 */
+/*****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/miscdevice.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include "applicom.h"
+
+
+/* NOTE: We use for loops with {write,read}b() instead of
+ memcpy_{from,to}io throughout this driver. This is because
+ the board doesn't correctly handle word accesses - only
+ bytes.
+*/
+
+
+#undef DEBUG
+
+#define MAX_BOARD 8 /* maximum of pc board possible */
+#define MAX_ISA_BOARD 4
+#define LEN_RAM_IO 0x800
+#define AC_MINOR 157
+
+#ifndef PCI_VENDOR_ID_APPLICOM
+#define PCI_VENDOR_ID_APPLICOM 0x1389
+#define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001
+#define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002
+#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003
+#endif
+
+static DEFINE_MUTEX(ac_mutex);
+static char *applicom_pci_devnames[] = {
+ "PCI board",
+ "PCI2000IBS / PCI2000CAN",
+ "PCI2000PFB"
+};
+
+static struct pci_device_id applicom_pci_tbl[] = {
+ { PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCIGENERIC) },
+ { PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN) },
+ { PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000PFB) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, applicom_pci_tbl);
+
+MODULE_AUTHOR("David Woodhouse & Applicom International");
+MODULE_DESCRIPTION("Driver for Applicom Profibus card");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(AC_MINOR);
+
+MODULE_SUPPORTED_DEVICE("ac");
+
+
+static struct applicom_board {
+ unsigned long PhysIO;
+ void __iomem *RamIO;
+ wait_queue_head_t FlagSleepSend;
+ long irq;
+ spinlock_t mutex;
+} apbs[MAX_BOARD];
+
+static unsigned int irq = 0; /* interrupt number IRQ */
+static unsigned long mem = 0; /* physical segment of board */
+
+module_param(irq, uint, 0);
+MODULE_PARM_DESC(irq, "IRQ of the Applicom board");
+module_param(mem, ulong, 0);
+MODULE_PARM_DESC(mem, "Shared Memory Address of Applicom board");
+
+static unsigned int numboards; /* number of installed boards */
+static volatile unsigned char Dummy;
+static DECLARE_WAIT_QUEUE_HEAD(FlagSleepRec);
+static unsigned int WriteErrorCount; /* number of write error */
+static unsigned int ReadErrorCount; /* number of read error */
+static unsigned int DeviceErrorCount; /* number of device error */
+
+static ssize_t ac_read (struct file *, char __user *, size_t, loff_t *);
+static ssize_t ac_write (struct file *, const char __user *, size_t, loff_t *);
+static long ac_ioctl(struct file *, unsigned int, unsigned long);
+static irqreturn_t ac_interrupt(int, void *);
+
+static const struct file_operations ac_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = ac_read,
+ .write = ac_write,
+ .unlocked_ioctl = ac_ioctl,
+};
+
+static struct miscdevice ac_miscdev = {
+ AC_MINOR,
+ "ac",
+ &ac_fops
+};
+
+static int dummy; /* dev_id for request_irq() */
+
+static int ac_register_board(unsigned long physloc, void __iomem *loc,
+ unsigned char boardno)
+{
+ volatile unsigned char byte_reset_it;
+
+ if((readb(loc + CONF_END_TEST) != 0x00) ||
+ (readb(loc + CONF_END_TEST + 1) != 0x55) ||
+ (readb(loc + CONF_END_TEST + 2) != 0xAA) ||
+ (readb(loc + CONF_END_TEST + 3) != 0xFF))
+ return 0;
+
+ if (!boardno)
+ boardno = readb(loc + NUMCARD_OWNER_TO_PC);
+
+ if (!boardno || boardno > MAX_BOARD) {
+ printk(KERN_WARNING "Board #%d (at 0x%lx) is out of range (1 <= x <= %d).\n",
+ boardno, physloc, MAX_BOARD);
+ return 0;
+ }
+
+ if (apbs[boardno - 1].RamIO) {
+ printk(KERN_WARNING "Board #%d (at 0x%lx) conflicts with previous board #%d (at 0x%lx)\n",
+ boardno, physloc, boardno, apbs[boardno-1].PhysIO);
+ return 0;
+ }
+
+ boardno--;
+
+ apbs[boardno].PhysIO = physloc;
+ apbs[boardno].RamIO = loc;
+ init_waitqueue_head(&apbs[boardno].FlagSleepSend);
+ spin_lock_init(&apbs[boardno].mutex);
+ byte_reset_it = readb(loc + RAM_IT_TO_PC);
+
+ numboards++;
+ return boardno + 1;
+}
+
+static void __exit applicom_exit(void)
+{
+ unsigned int i;
+
+ misc_deregister(&ac_miscdev);
+
+ for (i = 0; i < MAX_BOARD; i++) {
+
+ if (!apbs[i].RamIO)
+ continue;
+
+ if (apbs[i].irq)
+ free_irq(apbs[i].irq, &dummy);
+
+ iounmap(apbs[i].RamIO);
+ }
+}
+
+static int __init applicom_init(void)
+{
+ int i, numisa = 0;
+ struct pci_dev *dev = NULL;
+ void __iomem *RamIO;
+ int boardno, ret;
+
+ printk(KERN_INFO "Applicom driver: $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $\n");
+
+ /* No mem and irq given - check for a PCI card */
+
+ while ( (dev = pci_get_class(PCI_CLASS_OTHERS << 16, dev))) {
+
+ if (!pci_match_id(applicom_pci_tbl, dev))
+ continue;
+
+ if (pci_enable_device(dev))
+ return -EIO;
+
+ RamIO = ioremap_nocache(pci_resource_start(dev, 0), LEN_RAM_IO);
+
+ if (!RamIO) {
+ printk(KERN_INFO "ac.o: Failed to ioremap PCI memory "
+ "space at 0x%llx\n",
+ (unsigned long long)pci_resource_start(dev, 0));
+ pci_disable_device(dev);
+ return -EIO;
+ }
+
+ printk(KERN_INFO "Applicom %s found at mem 0x%llx, irq %d\n",
+ applicom_pci_devnames[dev->device-1],
+ (unsigned long long)pci_resource_start(dev, 0),
+ dev->irq);
+
+ boardno = ac_register_board(pci_resource_start(dev, 0),
+ RamIO, 0);
+ if (!boardno) {
+ printk(KERN_INFO "ac.o: PCI Applicom device doesn't have correct signature.\n");
+ iounmap(RamIO);
+ pci_disable_device(dev);
+ continue;
+ }
+
+ if (request_irq(dev->irq, &ac_interrupt, IRQF_SHARED, "Applicom PCI", &dummy)) {
+ printk(KERN_INFO "Could not allocate IRQ %d for PCI Applicom device.\n", dev->irq);
+ iounmap(RamIO);
+ pci_disable_device(dev);
+ apbs[boardno - 1].RamIO = NULL;
+ continue;
+ }
+
+ /* Enable interrupts. */
+
+ writeb(0x40, apbs[boardno - 1].RamIO + RAM_IT_FROM_PC);
+
+ apbs[boardno - 1].irq = dev->irq;
+ }
+
+ /* Finished with PCI cards. If none registered,
+ * and there was no mem/irq specified, exit */
+
+ if (!mem || !irq) {
+ if (numboards)
+ goto fin;
+ else {
+ printk(KERN_INFO "ac.o: No PCI boards found.\n");
+ printk(KERN_INFO "ac.o: For an ISA board you must supply memory and irq parameters.\n");
+ return -ENXIO;
+ }
+ }
+
+ /* Now try the specified ISA cards */
+
+ for (i = 0; i < MAX_ISA_BOARD; i++) {
+ RamIO = ioremap_nocache(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
+
+ if (!RamIO) {
+ printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1);
+ continue;
+ }
+
+ if (!(boardno = ac_register_board((unsigned long)mem+ (LEN_RAM_IO*i),
+ RamIO,i+1))) {
+ iounmap(RamIO);
+ continue;
+ }
+
+ printk(KERN_NOTICE "Applicom ISA card found at mem 0x%lx, irq %d\n", mem + (LEN_RAM_IO*i), irq);
+
+ if (!numisa) {
+ if (request_irq(irq, &ac_interrupt, IRQF_SHARED, "Applicom ISA", &dummy)) {
+ printk(KERN_WARNING "Could not allocate IRQ %d for ISA Applicom device.\n", irq);
+ iounmap(RamIO);
+ apbs[boardno - 1].RamIO = NULL;
+ }
+ else
+ apbs[boardno - 1].irq = irq;
+ }
+ else
+ apbs[boardno - 1].irq = 0;
+
+ numisa++;
+ }
+
+ if (!numisa)
+ printk(KERN_WARNING "ac.o: No valid ISA Applicom boards found "
+ "at mem 0x%lx\n", mem);
+
+ fin:
+ init_waitqueue_head(&FlagSleepRec);
+
+ WriteErrorCount = 0;
+ ReadErrorCount = 0;
+ DeviceErrorCount = 0;
+
+ if (numboards) {
+ ret = misc_register(&ac_miscdev);
+ if (ret) {
+ printk(KERN_WARNING "ac.o: Unable to register misc device\n");
+ goto out;
+ }
+ for (i = 0; i < MAX_BOARD; i++) {
+ int serial;
+ char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];
+
+ if (!apbs[i].RamIO)
+ continue;
+
+ for (serial = 0; serial < SERIAL_NUMBER - TYPE_CARD; serial++)
+ boardname[serial] = readb(apbs[i].RamIO + TYPE_CARD + serial);
+
+ boardname[serial] = 0;
+
+
+ printk(KERN_INFO "Applicom board %d: %s, PROM V%d.%d",
+ i+1, boardname,
+ (int)(readb(apbs[i].RamIO + VERS) >> 4),
+ (int)(readb(apbs[i].RamIO + VERS) & 0xF));
+
+ serial = (readb(apbs[i].RamIO + SERIAL_NUMBER) << 16) +
+ (readb(apbs[i].RamIO + SERIAL_NUMBER + 1) << 8) +
+ (readb(apbs[i].RamIO + SERIAL_NUMBER + 2) );
+
+ if (serial != 0)
+ printk(" S/N %d\n", serial);
+ else
+ printk("\n");
+ }
+ return 0;
+ }
+
+ else
+ return -ENXIO;
+
+out:
+ for (i = 0; i < MAX_BOARD; i++) {
+ if (!apbs[i].RamIO)
+ continue;
+ if (apbs[i].irq)
+ free_irq(apbs[i].irq, &dummy);
+ iounmap(apbs[i].RamIO);
+ }
+ pci_disable_device(dev);
+ return ret;
+}
+
+module_init(applicom_init);
+module_exit(applicom_exit);
+
+
+static ssize_t ac_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos)
+{
+ unsigned int NumCard; /* Board number 1 -> 8 */
+ unsigned int IndexCard; /* Index board number 0 -> 7 */
+ unsigned char TicCard; /* Board TIC to send */
+ unsigned long flags; /* Current priority */
+ struct st_ram_io st_loc;
+ struct mailbox tmpmailbox;
+#ifdef DEBUG
+ int c;
+#endif
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) {
+ static int warncount = 5;
+ if (warncount) {
+ printk(KERN_INFO "Hmmm. write() of Applicom card, length %zd != expected %zd\n",
+ count, sizeof(struct st_ram_io) + sizeof(struct mailbox));
+ warncount--;
+ }
+ return -EINVAL;
+ }
+
+ if(copy_from_user(&st_loc, buf, sizeof(struct st_ram_io)))
+ return -EFAULT;
+
+ if(copy_from_user(&tmpmailbox, &buf[sizeof(struct st_ram_io)],
+ sizeof(struct mailbox)))
+ return -EFAULT;
+
+ NumCard = st_loc.num_card; /* board number to send */
+ TicCard = st_loc.tic_des_from_pc; /* tic number to send */
+ IndexCard = NumCard - 1;
+
+ if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
+ return -EINVAL;
+
+#ifdef DEBUG
+ printk("Write to applicom card #%d. struct st_ram_io follows:",
+ IndexCard+1);
+
+ for (c = 0; c < sizeof(struct st_ram_io);) {
+
+ printk("\n%5.5X: %2.2X", c, ((unsigned char *) &st_loc)[c]);
+
+ for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) {
+ printk(" %2.2X", ((unsigned char *) &st_loc)[c]);
+ }
+ }
+
+ printk("\nstruct mailbox follows:");
+
+ for (c = 0; c < sizeof(struct mailbox);) {
+ printk("\n%5.5X: %2.2X", c, ((unsigned char *) &tmpmailbox)[c]);
+
+ for (c++; c % 8 && c < sizeof(struct mailbox); c++) {
+ printk(" %2.2X", ((unsigned char *) &tmpmailbox)[c]);
+ }
+ }
+
+ printk("\n");
+#endif
+
+ spin_lock_irqsave(&apbs[IndexCard].mutex, flags);
+
+ /* Test octet ready correct */
+ if(readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) > 2) {
+ Dummy = readb(apbs[IndexCard].RamIO + VERS);
+ spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
+ printk(KERN_WARNING "APPLICOM driver write error board %d, DataFromPcReady = %d\n",
+ IndexCard,(int)readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY));
+ DeviceErrorCount++;
+ return -EIO;
+ }
+
+ /* Place ourselves on the wait queue */
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);
+
+ /* Check whether the card is ready for us */
+ while (readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) != 0) {
+ Dummy = readb(apbs[IndexCard].RamIO + VERS);
+ /* It's busy. Sleep. */
+
+ spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
+ schedule();
+ if (signal_pending(current)) {
+ remove_wait_queue(&apbs[IndexCard].FlagSleepSend,
+ &wait);
+ return -EINTR;
+ }
+ spin_lock_irqsave(&apbs[IndexCard].mutex, flags);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ /* We may not have actually slept */
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);
+
+ writeb(1, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
+
+ /* Which is best - lock down the pages with rawio and then
+ copy directly, or use bounce buffers? For now we do the latter
+ because it works with 2.2 still */
+ {
+ unsigned char *from = (unsigned char *) &tmpmailbox;
+ void __iomem *to = apbs[IndexCard].RamIO + RAM_FROM_PC;
+ int c;
+
+ for (c = 0; c < sizeof(struct mailbox); c++)
+ writeb(*(from++), to++);
+ }
+
+ writeb(0x20, apbs[IndexCard].RamIO + TIC_OWNER_FROM_PC);
+ writeb(0xff, apbs[IndexCard].RamIO + NUMCARD_OWNER_FROM_PC);
+ writeb(TicCard, apbs[IndexCard].RamIO + TIC_DES_FROM_PC);
+ writeb(NumCard, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
+ writeb(2, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
+ writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
+ Dummy = readb(apbs[IndexCard].RamIO + VERS);
+ spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
+ return 0;
+}
+
+static int do_ac_read(int IndexCard, char __user *buf,
+ struct st_ram_io *st_loc, struct mailbox *mailbox)
+{
+ void __iomem *from = apbs[IndexCard].RamIO + RAM_TO_PC;
+ unsigned char *to = (unsigned char *)mailbox;
+#ifdef DEBUG
+ int c;
+#endif
+
+ st_loc->tic_owner_to_pc = readb(apbs[IndexCard].RamIO + TIC_OWNER_TO_PC);
+ st_loc->numcard_owner_to_pc = readb(apbs[IndexCard].RamIO + NUMCARD_OWNER_TO_PC);
+
+
+ {
+ int c;
+
+ for (c = 0; c < sizeof(struct mailbox); c++)
+ *(to++) = readb(from++);
+ }
+ writeb(1, apbs[IndexCard].RamIO + ACK_FROM_PC_READY);
+ writeb(1, apbs[IndexCard].RamIO + TYP_ACK_FROM_PC);
+ writeb(IndexCard+1, apbs[IndexCard].RamIO + NUMCARD_ACK_FROM_PC);
+ writeb(readb(apbs[IndexCard].RamIO + TIC_OWNER_TO_PC),
+ apbs[IndexCard].RamIO + TIC_ACK_FROM_PC);
+ writeb(2, apbs[IndexCard].RamIO + ACK_FROM_PC_READY);
+ writeb(0, apbs[IndexCard].RamIO + DATA_TO_PC_READY);
+ writeb(2, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
+ Dummy = readb(apbs[IndexCard].RamIO + VERS);
+
+#ifdef DEBUG
+ printk("Read from applicom card #%d. struct st_ram_io follows:", NumCard);
+
+ for (c = 0; c < sizeof(struct st_ram_io);) {
+ printk("\n%5.5X: %2.2X", c, ((unsigned char *)st_loc)[c]);
+
+ for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) {
+ printk(" %2.2X", ((unsigned char *)st_loc)[c]);
+ }
+ }
+
+ printk("\nstruct mailbox follows:");
+
+ for (c = 0; c < sizeof(struct mailbox);) {
+ printk("\n%5.5X: %2.2X", c, ((unsigned char *)mailbox)[c]);
+
+ for (c++; c % 8 && c < sizeof(struct mailbox); c++) {
+ printk(" %2.2X", ((unsigned char *)mailbox)[c]);
+ }
+ }
+ printk("\n");
+#endif
+ return (sizeof(struct st_ram_io) + sizeof(struct mailbox));
+}
+
+static ssize_t ac_read (struct file *filp, char __user *buf, size_t count, loff_t *ptr)
+{
+ unsigned long flags;
+ unsigned int i;
+ unsigned char tmp;
+ int ret = 0;
+ DECLARE_WAITQUEUE(wait, current);
+#ifdef DEBUG
+ int loopcount=0;
+#endif
+ /* No need to ratelimit this. Only root can trigger it anyway */
+ if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) {
+ printk( KERN_WARNING "Hmmm. read() of Applicom card, length %zd != expected %zd\n",
+ count,sizeof(struct st_ram_io) + sizeof(struct mailbox));
+ return -EINVAL;
+ }
+
+ while(1) {
+ /* Stick ourself on the wait queue */
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&FlagSleepRec, &wait);
+
+ /* Scan each board, looking for one which has a packet for us */
+ for (i=0; i < MAX_BOARD; i++) {
+ if (!apbs[i].RamIO)
+ continue;
+ spin_lock_irqsave(&apbs[i].mutex, flags);
+
+ tmp = readb(apbs[i].RamIO + DATA_TO_PC_READY);
+
+ if (tmp == 2) {
+ struct st_ram_io st_loc;
+ struct mailbox mailbox;
+
+ /* Got a packet for us */
+ memset(&st_loc, 0, sizeof(st_loc));
+ ret = do_ac_read(i, buf, &st_loc, &mailbox);
+ spin_unlock_irqrestore(&apbs[i].mutex, flags);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&FlagSleepRec, &wait);
+
+ if (copy_to_user(buf, &st_loc, sizeof(st_loc)))
+ return -EFAULT;
+ if (copy_to_user(buf + sizeof(st_loc), &mailbox, sizeof(mailbox)))
+ return -EFAULT;
+ return tmp;
+ }
+
+ if (tmp > 2) {
+ /* Got an error */
+ Dummy = readb(apbs[i].RamIO + VERS);
+
+ spin_unlock_irqrestore(&apbs[i].mutex, flags);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&FlagSleepRec, &wait);
+
+ printk(KERN_WARNING "APPLICOM driver read error board %d, DataToPcReady = %d\n",
+ i,(int)readb(apbs[i].RamIO + DATA_TO_PC_READY));
+ DeviceErrorCount++;
+ return -EIO;
+ }
+
+ /* Nothing for us. Try the next board */
+ Dummy = readb(apbs[i].RamIO + VERS);
+ spin_unlock_irqrestore(&apbs[i].mutex, flags);
+
+ } /* per board */
+
+ /* OK - No boards had data for us. Sleep now */
+
+ schedule();
+ remove_wait_queue(&FlagSleepRec, &wait);
+
+ if (signal_pending(current))
+ return -EINTR;
+
+#ifdef DEBUG
+ if (loopcount++ > 2) {
+ printk(KERN_DEBUG "Looping in ac_read. loopcount %d\n", loopcount);
+ }
+#endif
+ }
+}
+
+static irqreturn_t ac_interrupt(int vec, void *dev_instance)
+{
+ unsigned int i;
+ unsigned int FlagInt;
+ unsigned int LoopCount;
+ int handled = 0;
+
+ // printk("Applicom interrupt on IRQ %d occurred\n", vec);
+
+ LoopCount = 0;
+
+ do {
+ FlagInt = 0;
+ for (i = 0; i < MAX_BOARD; i++) {
+
+ /* Skip if this board doesn't exist */
+ if (!apbs[i].RamIO)
+ continue;
+
+ spin_lock(&apbs[i].mutex);
+
+ /* Skip if this board doesn't want attention */
+ if(readb(apbs[i].RamIO + RAM_IT_TO_PC) == 0) {
+ spin_unlock(&apbs[i].mutex);
+ continue;
+ }
+
+ handled = 1;
+ FlagInt = 1;
+ writeb(0, apbs[i].RamIO + RAM_IT_TO_PC);
+
+ if (readb(apbs[i].RamIO + DATA_TO_PC_READY) > 2) {
+ printk(KERN_WARNING "APPLICOM driver interrupt err board %d, DataToPcReady = %d\n",
+ i+1,(int)readb(apbs[i].RamIO + DATA_TO_PC_READY));
+ DeviceErrorCount++;
+ }
+
+ if((readb(apbs[i].RamIO + DATA_FROM_PC_READY) > 2) &&
+ (readb(apbs[i].RamIO + DATA_FROM_PC_READY) != 6)) {
+
+ printk(KERN_WARNING "APPLICOM driver interrupt err board %d, DataFromPcReady = %d\n",
+ i+1,(int)readb(apbs[i].RamIO + DATA_FROM_PC_READY));
+ DeviceErrorCount++;
+ }
+
+ if (readb(apbs[i].RamIO + DATA_TO_PC_READY) == 2) { /* mailbox sent by the card ? */
+ if (waitqueue_active(&FlagSleepRec)) {
+ wake_up_interruptible(&FlagSleepRec);
+ }
+ }
+
+ if (readb(apbs[i].RamIO + DATA_FROM_PC_READY) == 0) { /* ram i/o free for write by pc ? */
+ if (waitqueue_active(&apbs[i].FlagSleepSend)) { /* process sleep during read ? */
+ wake_up_interruptible(&apbs[i].FlagSleepSend);
+ }
+ }
+ Dummy = readb(apbs[i].RamIO + VERS);
+
+ if(readb(apbs[i].RamIO + RAM_IT_TO_PC)) {
+ /* There's another int waiting on this card */
+ spin_unlock(&apbs[i].mutex);
+ i--;
+ } else {
+ spin_unlock(&apbs[i].mutex);
+ }
+ }
+ if (FlagInt)
+ LoopCount = 0;
+ else
+ LoopCount++;
+ } while(LoopCount < 2);
+ return IRQ_RETVAL(handled);
+}
+
+
+
+static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+
+{ /* @ ADG ou ATO selon le cas */
+ int i;
+ unsigned char IndexCard;
+ void __iomem *pmem;
+ int ret = 0;
+ volatile unsigned char byte_reset_it;
+ struct st_ram_io *adgl;
+ void __user *argp = (void __user *)arg;
+
+ /* In general, the device is only openable by root anyway, so we're not
+ particularly concerned that bogus ioctls can flood the console. */
+
+ adgl = memdup_user(argp, sizeof(struct st_ram_io));
+ if (IS_ERR(adgl))
+ return PTR_ERR(adgl);
+
+ mutex_lock(&ac_mutex);
+ IndexCard = adgl->num_card-1;
+
+ if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
+ static int warncount = 10;
+ if (warncount) {
+ printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
+ warncount--;
+ }
+ kfree(adgl);
+ mutex_unlock(&ac_mutex);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+
+ case 0:
+ pmem = apbs[IndexCard].RamIO;
+ for (i = 0; i < sizeof(struct st_ram_io); i++)
+ ((unsigned char *)adgl)[i]=readb(pmem++);
+ if (copy_to_user(argp, adgl, sizeof(struct st_ram_io)))
+ ret = -EFAULT;
+ break;
+ case 1:
+ pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
+ for (i = 0; i < 4; i++)
+ adgl->conf_end_test[i] = readb(pmem++);
+ for (i = 0; i < 2; i++)
+ adgl->error_code[i] = readb(pmem++);
+ for (i = 0; i < 4; i++)
+ adgl->parameter_error[i] = readb(pmem++);
+ pmem = apbs[IndexCard].RamIO + VERS;
+ adgl->vers = readb(pmem);
+ pmem = apbs[IndexCard].RamIO + TYPE_CARD;
+ for (i = 0; i < 20; i++)
+ adgl->reserv1[i] = readb(pmem++);
+ *(int *)&adgl->reserv1[20] =
+ (readb(apbs[IndexCard].RamIO + SERIAL_NUMBER) << 16) +
+ (readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 1) << 8) +
+ (readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 2) );
+
+ if (copy_to_user(argp, adgl, sizeof(struct st_ram_io)))
+ ret = -EFAULT;
+ break;
+ case 2:
+ pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
+ for (i = 0; i < 10; i++)
+ writeb(0xff, pmem++);
+ writeb(adgl->data_from_pc_ready,
+ apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
+
+ writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
+
+ for (i = 0; i < MAX_BOARD; i++) {
+ if (apbs[i].RamIO) {
+ byte_reset_it = readb(apbs[i].RamIO + RAM_IT_TO_PC);
+ }
+ }
+ break;
+ case 3:
+ pmem = apbs[IndexCard].RamIO + TIC_DES_FROM_PC;
+ writeb(adgl->tic_des_from_pc, pmem);
+ break;
+ case 4:
+ pmem = apbs[IndexCard].RamIO + TIC_OWNER_TO_PC;
+ adgl->tic_owner_to_pc = readb(pmem++);
+ adgl->numcard_owner_to_pc = readb(pmem);
+ if (copy_to_user(argp, adgl,sizeof(struct st_ram_io)))
+ ret = -EFAULT;
+ break;
+ case 5:
+ writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_OWNER_TO_PC);
+ writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
+ writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_ACK_FROM_PC);
+ writeb(4, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
+ writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
+ break;
+ case 6:
+ printk(KERN_INFO "APPLICOM driver release .... V2.8.0 ($Revision: 1.30 $)\n");
+ printk(KERN_INFO "Number of installed boards . %d\n", (int) numboards);
+ printk(KERN_INFO "Segment of board ........... %X\n", (int) mem);
+ printk(KERN_INFO "Interrupt IRQ number ....... %d\n", (int) irq);
+ for (i = 0; i < MAX_BOARD; i++) {
+ int serial;
+ char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];
+
+ if (!apbs[i].RamIO)
+ continue;
+
+ for (serial = 0; serial < SERIAL_NUMBER - TYPE_CARD; serial++)
+ boardname[serial] = readb(apbs[i].RamIO + TYPE_CARD + serial);
+ boardname[serial] = 0;
+
+ printk(KERN_INFO "Prom version board %d ....... V%d.%d %s",
+ i+1,
+ (int)(readb(apbs[IndexCard].RamIO + VERS) >> 4),
+ (int)(readb(apbs[IndexCard].RamIO + VERS) & 0xF),
+ boardname);
+
+
+ serial = (readb(apbs[i].RamIO + SERIAL_NUMBER) << 16) +
+ (readb(apbs[i].RamIO + SERIAL_NUMBER + 1) << 8) +
+ (readb(apbs[i].RamIO + SERIAL_NUMBER + 2) );
+
+ if (serial != 0)
+ printk(" S/N %d\n", serial);
+ else
+ printk("\n");
+ }
+ if (DeviceErrorCount != 0)
+ printk(KERN_INFO "DeviceErrorCount ........... %d\n", DeviceErrorCount);
+ if (ReadErrorCount != 0)
+ printk(KERN_INFO "ReadErrorCount ............. %d\n", ReadErrorCount);
+ if (WriteErrorCount != 0)
+ printk(KERN_INFO "WriteErrorCount ............ %d\n", WriteErrorCount);
+ if (waitqueue_active(&FlagSleepRec))
+ printk(KERN_INFO "Process in read pending\n");
+ for (i = 0; i < MAX_BOARD; i++) {
+ if (apbs[i].RamIO && waitqueue_active(&apbs[i].FlagSleepSend))
+ printk(KERN_INFO "Process in write pending board %d\n",i+1);
+ }
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+ Dummy = readb(apbs[IndexCard].RamIO + VERS);
+ kfree(adgl);
+ mutex_unlock(&ac_mutex);
+ return 0;
+}
+
diff --git a/drivers/char/applicom.h b/drivers/char/applicom.h
new file mode 100644
index 00000000..35530b3d
--- /dev/null
+++ b/drivers/char/applicom.h
@@ -0,0 +1,85 @@
+/* $Id: applicom.h,v 1.2 1999/08/28 15:09:49 dwmw2 Exp $ */
+
+
+#ifndef __LINUX_APPLICOM_H__
+#define __LINUX_APPLICOM_H__
+
+
+#define DATA_TO_PC_READY 0x00
+#define TIC_OWNER_TO_PC 0x01
+#define NUMCARD_OWNER_TO_PC 0x02
+#define TIC_DES_TO_PC 0x03
+#define NUMCARD_DES_TO_PC 0x04
+#define DATA_FROM_PC_READY 0x05
+#define TIC_OWNER_FROM_PC 0x06
+#define NUMCARD_OWNER_FROM_PC 0x07
+#define TIC_DES_FROM_PC 0x08
+#define NUMCARD_DES_FROM_PC 0x09
+#define ACK_FROM_PC_READY 0x0E
+#define TIC_ACK_FROM_PC 0x0F
+#define NUMCARD_ACK_FROM_PC 0x010
+#define TYP_ACK_FROM_PC 0x011
+#define CONF_END_TEST 0x012
+#define ERROR_CODE 0x016
+#define PARAMETER_ERROR 0x018
+#define VERS 0x01E
+#define RAM_TO_PC 0x040
+#define RAM_FROM_PC 0x0170
+#define TYPE_CARD 0x03C0
+#define SERIAL_NUMBER 0x03DA
+#define RAM_IT_FROM_PC 0x03FE
+#define RAM_IT_TO_PC 0x03FF
+
+struct mailbox{
+ u16 stjb_codef; /* offset 00 */
+ s16 stjb_status; /* offset 02 */
+ u16 stjb_ticuser_root; /* offset 04 */
+ u8 stjb_piduser[4]; /* offset 06 */
+ u16 stjb_mode; /* offset 0A */
+ u16 stjb_time; /* offset 0C */
+ u16 stjb_stop; /* offset 0E */
+ u16 stjb_nfonc; /* offset 10 */
+ u16 stjb_ncard; /* offset 12 */
+ u16 stjb_nchan; /* offset 14 */
+ u16 stjb_nes; /* offset 16 */
+ u16 stjb_nb; /* offset 18 */
+ u16 stjb_typvar; /* offset 1A */
+ u32 stjb_adr; /* offset 1C */
+ u16 stjb_ticuser_dispcyc; /* offset 20 */
+ u16 stjb_ticuser_protocol; /* offset 22 */
+ u8 stjb_filler[12]; /* offset 24 */
+ u8 stjb_data[256]; /* offset 30 */
+ };
+
+struct st_ram_io
+{
+ unsigned char data_to_pc_ready;
+ unsigned char tic_owner_to_pc;
+ unsigned char numcard_owner_to_pc;
+ unsigned char tic_des_to_pc;
+ unsigned char numcard_des_to_pc;
+ unsigned char data_from_pc_ready;
+ unsigned char tic_owner_from_pc;
+ unsigned char numcard_owner_from_pc;
+ unsigned char tic_des_from_pc;
+ unsigned char numcard_des_from_pc;
+ unsigned char ack_to_pc_ready;
+ unsigned char tic_ack_to_pc;
+ unsigned char numcard_ack_to_pc;
+ unsigned char typ_ack_to_pc;
+ unsigned char ack_from_pc_ready;
+ unsigned char tic_ack_from_pc;
+ unsigned char numcard_ack_from_pc;
+ unsigned char typ_ack_from_pc;
+ unsigned char conf_end_test[4];
+ unsigned char error_code[2];
+ unsigned char parameter_error[4];
+ unsigned char time_base;
+ unsigned char nul_inc;
+ unsigned char vers;
+ unsigned char num_card;
+ unsigned char reserv1[32];
+};
+
+
+#endif /* __LINUX_APPLICOM_H__ */
diff --git a/drivers/char/bfin-otp.c b/drivers/char/bfin-otp.c
new file mode 100644
index 00000000..44660f1c
--- /dev/null
+++ b/drivers/char/bfin-otp.c
@@ -0,0 +1,275 @@
+/*
+ * Blackfin On-Chip OTP Memory Interface
+ *
+ * Copyright 2007-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <mtd/mtd-abi.h>
+
+#include <asm/blackfin.h>
+#include <asm/bfrom.h>
+#include <asm/uaccess.h>
+
+#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args)
+#define stampit() stamp("here i am")
+#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); })
+
+#define DRIVER_NAME "bfin-otp"
+#define PFX DRIVER_NAME ": "
+
+static DEFINE_MUTEX(bfin_otp_lock);
+
+/**
+ * bfin_otp_read - Read OTP pages
+ *
+ * All reads must be in half page chunks (half page == 64 bits).
+ */
+static ssize_t bfin_otp_read(struct file *file, char __user *buff, size_t count, loff_t *pos)
+{
+ ssize_t bytes_done;
+ u32 page, flags, ret;
+ u64 content;
+
+ stampit();
+
+ if (count % sizeof(u64))
+ return -EMSGSIZE;
+
+ if (mutex_lock_interruptible(&bfin_otp_lock))
+ return -ERESTARTSYS;
+
+ bytes_done = 0;
+ page = *pos / (sizeof(u64) * 2);
+ while (bytes_done < count) {
+ flags = (*pos % (sizeof(u64) * 2) ? OTP_UPPER_HALF : OTP_LOWER_HALF);
+ stamp("processing page %i (0x%x:%s)", page, flags,
+ (flags & OTP_UPPER_HALF ? "upper" : "lower"));
+ ret = bfrom_OtpRead(page, flags, &content);
+ if (ret & OTP_MASTER_ERROR) {
+ stamp("error from otp: 0x%x", ret);
+ bytes_done = -EIO;
+ break;
+ }
+ if (copy_to_user(buff + bytes_done, &content, sizeof(content))) {
+ bytes_done = -EFAULT;
+ break;
+ }
+ if (flags & OTP_UPPER_HALF)
+ ++page;
+ bytes_done += sizeof(content);
+ *pos += sizeof(content);
+ }
+
+ mutex_unlock(&bfin_otp_lock);
+
+ return bytes_done;
+}
+
+#ifdef CONFIG_BFIN_OTP_WRITE_ENABLE
+static bool allow_writes;
+
+/**
+ * bfin_otp_init_timing - setup OTP timing parameters
+ *
+ * Required before doing any write operation. Algorithms from HRM.
+ */
+static u32 bfin_otp_init_timing(void)
+{
+ u32 tp1, tp2, tp3, timing;
+
+ tp1 = get_sclk() / 1000000;
+ tp2 = (2 * get_sclk() / 10000000) << 8;
+ tp3 = (0x1401) << 15;
+ timing = tp1 | tp2 | tp3;
+ if (bfrom_OtpCommand(OTP_INIT, timing))
+ return 0;
+
+ return timing;
+}
+
+/**
+ * bfin_otp_deinit_timing - set timings to only allow reads
+ *
+ * Should be called after all writes are done.
+ */
+static void bfin_otp_deinit_timing(u32 timing)
+{
+ /* mask bits [31:15] so that any attempts to write fail */
+ bfrom_OtpCommand(OTP_CLOSE, 0);
+ bfrom_OtpCommand(OTP_INIT, timing & ~(-1 << 15));
+ bfrom_OtpCommand(OTP_CLOSE, 0);
+}
+
+/**
+ * bfin_otp_write - write OTP pages
+ *
+ * All writes must be in half page chunks (half page == 64 bits).
+ */
+static ssize_t bfin_otp_write(struct file *filp, const char __user *buff, size_t count, loff_t *pos)
+{
+ ssize_t bytes_done;
+ u32 timing, page, base_flags, flags, ret;
+ u64 content;
+
+ if (!allow_writes)
+ return -EACCES;
+
+ if (count % sizeof(u64))
+ return -EMSGSIZE;
+
+ if (mutex_lock_interruptible(&bfin_otp_lock))
+ return -ERESTARTSYS;
+
+ stampit();
+
+ timing = bfin_otp_init_timing();
+ if (timing == 0) {
+ mutex_unlock(&bfin_otp_lock);
+ return -EIO;
+ }
+
+ base_flags = OTP_CHECK_FOR_PREV_WRITE;
+
+ bytes_done = 0;
+ page = *pos / (sizeof(u64) * 2);
+ while (bytes_done < count) {
+ flags = base_flags | (*pos % (sizeof(u64) * 2) ? OTP_UPPER_HALF : OTP_LOWER_HALF);
+ stamp("processing page %i (0x%x:%s) from %p", page, flags,
+ (flags & OTP_UPPER_HALF ? "upper" : "lower"), buff + bytes_done);
+ if (copy_from_user(&content, buff + bytes_done, sizeof(content))) {
+ bytes_done = -EFAULT;
+ break;
+ }
+ ret = bfrom_OtpWrite(page, flags, &content);
+ if (ret & OTP_MASTER_ERROR) {
+ stamp("error from otp: 0x%x", ret);
+ bytes_done = -EIO;
+ break;
+ }
+ if (flags & OTP_UPPER_HALF)
+ ++page;
+ bytes_done += sizeof(content);
+ *pos += sizeof(content);
+ }
+
+ bfin_otp_deinit_timing(timing);
+
+ mutex_unlock(&bfin_otp_lock);
+
+ return bytes_done;
+}
+
+static long bfin_otp_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
+{
+ stampit();
+
+ switch (cmd) {
+ case OTPLOCK: {
+ u32 timing;
+ int ret = -EIO;
+
+ if (!allow_writes)
+ return -EACCES;
+
+ if (mutex_lock_interruptible(&bfin_otp_lock))
+ return -ERESTARTSYS;
+
+ timing = bfin_otp_init_timing();
+ if (timing) {
+ u32 otp_result = bfrom_OtpWrite(arg, OTP_LOCK, NULL);
+ stamp("locking page %lu resulted in 0x%x", arg, otp_result);
+ if (!(otp_result & OTP_MASTER_ERROR))
+ ret = 0;
+
+ bfin_otp_deinit_timing(timing);
+ }
+
+ mutex_unlock(&bfin_otp_lock);
+
+ return ret;
+ }
+
+ case MEMLOCK:
+ allow_writes = false;
+ return 0;
+
+ case MEMUNLOCK:
+ allow_writes = true;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+#else
+# define bfin_otp_write NULL
+# define bfin_otp_ioctl NULL
+#endif
+
+static const struct file_operations bfin_otp_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = bfin_otp_ioctl,
+ .read = bfin_otp_read,
+ .write = bfin_otp_write,
+ .llseek = default_llseek,
+};
+
+static struct miscdevice bfin_otp_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DRIVER_NAME,
+ .fops = &bfin_otp_fops,
+};
+
+/**
+ * bfin_otp_init - Initialize module
+ *
+ * Registers the device and notifier handler. Actual device
+ * initialization is handled by bfin_otp_open().
+ */
+static int __init bfin_otp_init(void)
+{
+ int ret;
+
+ stampit();
+
+ ret = misc_register(&bfin_otp_misc_device);
+ if (ret) {
+ pr_init(KERN_ERR PFX "unable to register a misc device\n");
+ return ret;
+ }
+
+ pr_init(KERN_INFO PFX "initialized\n");
+
+ return 0;
+}
+
+/**
+ * bfin_otp_exit - Deinitialize module
+ *
+ * Unregisters the device and notifier handler. Actual device
+ * deinitialization is handled by bfin_otp_close().
+ */
+static void __exit bfin_otp_exit(void)
+{
+ stampit();
+
+ misc_deregister(&bfin_otp_misc_device);
+}
+
+module_init(bfin_otp_init);
+module_exit(bfin_otp_exit);
+
+MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
+MODULE_DESCRIPTION("Blackfin OTP Memory Interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
new file mode 100644
index 00000000..095ab905
--- /dev/null
+++ b/drivers/char/briq_panel.c
@@ -0,0 +1,266 @@
+/*
+ * Drivers for the Total Impact PPC based computer "BRIQ"
+ * by Dr. Karsten Jeppesen
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+
+#define BRIQ_PANEL_MINOR 156
+#define BRIQ_PANEL_VFD_IOPORT 0x0390
+#define BRIQ_PANEL_LED_IOPORT 0x0398
+#define BRIQ_PANEL_VER "1.1 (04/20/2002)"
+#define BRIQ_PANEL_MSG0 "Loading Linux"
+
+static int vfd_is_open;
+static unsigned char vfd[40];
+static int vfd_cursor;
+static unsigned char ledpb, led;
+
+static void update_vfd(void)
+{
+ int i;
+
+ /* cursor home */
+ outb(0x02, BRIQ_PANEL_VFD_IOPORT);
+ for (i=0; i<20; i++)
+ outb(vfd[i], BRIQ_PANEL_VFD_IOPORT + 1);
+
+ /* cursor to next line */
+ outb(0xc0, BRIQ_PANEL_VFD_IOPORT);
+ for (i=20; i<40; i++)
+ outb(vfd[i], BRIQ_PANEL_VFD_IOPORT + 1);
+
+}
+
+static void set_led(char state)
+{
+ if (state == 'R')
+ led = 0x01;
+ else if (state == 'G')
+ led = 0x02;
+ else if (state == 'Y')
+ led = 0x03;
+ else if (state == 'X')
+ led = 0x00;
+ outb(led, BRIQ_PANEL_LED_IOPORT);
+}
+
+static int briq_panel_open(struct inode *ino, struct file *filep)
+{
+ tty_lock();
+ /* enforce single access, vfd_is_open is protected by BKL */
+ if (vfd_is_open) {
+ tty_unlock();
+ return -EBUSY;
+ }
+ vfd_is_open = 1;
+
+ tty_unlock();
+ return 0;
+}
+
+static int briq_panel_release(struct inode *ino, struct file *filep)
+{
+ if (!vfd_is_open)
+ return -ENODEV;
+
+ vfd_is_open = 0;
+
+ return 0;
+}
+
+static ssize_t briq_panel_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned short c;
+ unsigned char cp;
+
+ if (!vfd_is_open)
+ return -ENODEV;
+
+ c = (inb(BRIQ_PANEL_LED_IOPORT) & 0x000c) | (ledpb & 0x0003);
+ set_led(' ');
+ /* upper button released */
+ if ((!(ledpb & 0x0004)) && (c & 0x0004)) {
+ cp = ' ';
+ ledpb = c;
+ if (copy_to_user(buf, &cp, 1))
+ return -EFAULT;
+ return 1;
+ }
+ /* lower button released */
+ else if ((!(ledpb & 0x0008)) && (c & 0x0008)) {
+ cp = '\r';
+ ledpb = c;
+ if (copy_to_user(buf, &cp, 1))
+ return -EFAULT;
+ return 1;
+ } else {
+ ledpb = c;
+ return 0;
+ }
+}
+
+static void scroll_vfd( void )
+{
+ int i;
+
+ for (i=0; i<20; i++) {
+ vfd[i] = vfd[i+20];
+ vfd[i+20] = ' ';
+ }
+ vfd_cursor = 20;
+}
+
+static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ size_t indx = len;
+ int i, esc = 0;
+
+ if (!vfd_is_open)
+ return -EBUSY;
+
+ for (;;) {
+ char c;
+ if (!indx)
+ break;
+ if (get_user(c, buf))
+ return -EFAULT;
+ if (esc) {
+ set_led(c);
+ esc = 0;
+ } else if (c == 27) {
+ esc = 1;
+ } else if (c == 12) {
+ /* do a form feed */
+ for (i=0; i<40; i++)
+ vfd[i] = ' ';
+ vfd_cursor = 0;
+ } else if (c == 10) {
+ if (vfd_cursor < 20)
+ vfd_cursor = 20;
+ else if (vfd_cursor < 40)
+ vfd_cursor = 40;
+ else if (vfd_cursor < 60)
+ vfd_cursor = 60;
+ if (vfd_cursor > 59)
+ scroll_vfd();
+ } else {
+ /* just a character */
+ if (vfd_cursor > 39)
+ scroll_vfd();
+ vfd[vfd_cursor++] = c;
+ }
+ indx--;
+ buf++;
+ }
+ update_vfd();
+
+ return len;
+}
+
+static const struct file_operations briq_panel_fops = {
+ .owner = THIS_MODULE,
+ .read = briq_panel_read,
+ .write = briq_panel_write,
+ .open = briq_panel_open,
+ .release = briq_panel_release,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice briq_panel_miscdev = {
+ BRIQ_PANEL_MINOR,
+ "briq_panel",
+ &briq_panel_fops
+};
+
+static int __init briq_panel_init(void)
+{
+ struct device_node *root = of_find_node_by_path("/");
+ const char *machine;
+ int i;
+
+ machine = of_get_property(root, "model", NULL);
+ if (!machine || strncmp(machine, "TotalImpact,BRIQ-1", 18) != 0) {
+ of_node_put(root);
+ return -ENODEV;
+ }
+ of_node_put(root);
+
+ printk(KERN_INFO
+ "briq_panel: v%s Dr. Karsten Jeppesen (kj@totalimpact.com)\n",
+ BRIQ_PANEL_VER);
+
+ if (!request_region(BRIQ_PANEL_VFD_IOPORT, 4, "BRIQ Front Panel"))
+ return -EBUSY;
+
+ if (!request_region(BRIQ_PANEL_LED_IOPORT, 2, "BRIQ Front Panel")) {
+ release_region(BRIQ_PANEL_VFD_IOPORT, 4);
+ return -EBUSY;
+ }
+ ledpb = inb(BRIQ_PANEL_LED_IOPORT) & 0x000c;
+
+ if (misc_register(&briq_panel_miscdev) < 0) {
+ release_region(BRIQ_PANEL_VFD_IOPORT, 4);
+ release_region(BRIQ_PANEL_LED_IOPORT, 2);
+ return -EBUSY;
+ }
+
+ outb(0x38, BRIQ_PANEL_VFD_IOPORT); /* Function set */
+ outb(0x01, BRIQ_PANEL_VFD_IOPORT); /* Clear display */
+ outb(0x0c, BRIQ_PANEL_VFD_IOPORT); /* Display on */
+ outb(0x06, BRIQ_PANEL_VFD_IOPORT); /* Entry normal */
+ for (i=0; i<40; i++)
+ vfd[i]=' ';
+#ifndef MODULE
+ vfd[0] = 'L';
+ vfd[1] = 'o';
+ vfd[2] = 'a';
+ vfd[3] = 'd';
+ vfd[4] = 'i';
+ vfd[5] = 'n';
+ vfd[6] = 'g';
+ vfd[7] = ' ';
+ vfd[8] = '.';
+ vfd[9] = '.';
+ vfd[10] = '.';
+#endif /* !MODULE */
+
+ update_vfd();
+
+ return 0;
+}
+
+static void __exit briq_panel_exit(void)
+{
+ misc_deregister(&briq_panel_miscdev);
+ release_region(BRIQ_PANEL_VFD_IOPORT, 4);
+ release_region(BRIQ_PANEL_LED_IOPORT, 2);
+}
+
+module_init(briq_panel_init);
+module_exit(briq_panel_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Karsten Jeppesen <karsten@jeppesens.com>");
+MODULE_DESCRIPTION("Driver for the Total Impact briQ front panel");
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
new file mode 100644
index 00000000..cf39bc08
--- /dev/null
+++ b/drivers/char/bsr.c
@@ -0,0 +1,356 @@
+/* IBM POWER Barrier Synchronization Register Driver
+ *
+ * Copyright IBM Corporation 2008
+ *
+ * Author: Sonny Rao <sonnyrao@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+
+/*
+ This driver exposes a special register which can be used for fast
+ synchronization across a large SMP machine. The hardware is exposed
+ as an array of bytes where each process will write to one of the bytes to
+ indicate it has finished the current stage and this update is broadcast to
+ all processors without having to bounce a cacheline between them. In
+ POWER5 and POWER6 there is one of these registers per SMP, but it is
+ presented in two forms; first, it is given as a whole and then as a number
+ of smaller registers which alias to parts of the single whole register.
+ This can potentially allow multiple groups of processes to each have their
+ own private synchronization device.
+
+ Note that this hardware *must* be written to using *only* single byte writes.
+ It may be read using 1, 2, 4, or 8 byte loads which must be aligned since
+ this region is treated as cache-inhibited processes should also use a
+ full sync before and after writing to the BSR to ensure all stores and
+ the BSR update have made it to all chips in the system
+*/
+
+/* This is arbitrary number, up to Power6 it's been 17 or fewer */
+#define BSR_MAX_DEVS (32)
+
+struct bsr_dev {
+ u64 bsr_addr; /* Real address */
+ u64 bsr_len; /* length of mem region we can map */
+ unsigned bsr_bytes; /* size of the BSR reg itself */
+ unsigned bsr_stride; /* interval at which BSR repeats in the page */
+ unsigned bsr_type; /* maps to enum below */
+ unsigned bsr_num; /* bsr id number for its type */
+ int bsr_minor;
+
+ struct list_head bsr_list;
+
+ dev_t bsr_dev;
+ struct cdev bsr_cdev;
+ struct device *bsr_device;
+ char bsr_name[32];
+
+};
+
+static unsigned total_bsr_devs;
+static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs);
+static struct class *bsr_class;
+static int bsr_major;
+
+enum {
+ BSR_8 = 0,
+ BSR_16 = 1,
+ BSR_64 = 2,
+ BSR_128 = 3,
+ BSR_4096 = 4,
+ BSR_UNKNOWN = 5,
+ BSR_MAX = 6,
+};
+
+static unsigned bsr_types[BSR_MAX];
+
+static ssize_t
+bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", bsr_dev->bsr_bytes);
+}
+
+static ssize_t
+bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", bsr_dev->bsr_stride);
+}
+
+static ssize_t
+bsr_len_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
+ return sprintf(buf, "%llu\n", bsr_dev->bsr_len);
+}
+
+static struct device_attribute bsr_dev_attrs[] = {
+ __ATTR(bsr_size, S_IRUGO, bsr_size_show, NULL),
+ __ATTR(bsr_stride, S_IRUGO, bsr_stride_show, NULL),
+ __ATTR(bsr_length, S_IRUGO, bsr_len_show, NULL),
+ __ATTR_NULL
+};
+
+static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long size = vma->vm_end - vma->vm_start;
+ struct bsr_dev *dev = filp->private_data;
+ int ret;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* check for the case of a small BSR device and map one 4k page for it*/
+ if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
+ ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
+ vma->vm_page_prot);
+ else if (size <= dev->bsr_len)
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ dev->bsr_addr >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ else
+ return -EINVAL;
+
+ if (ret)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int bsr_open(struct inode * inode, struct file * filp)
+{
+ struct cdev *cdev = inode->i_cdev;
+ struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev);
+
+ filp->private_data = dev;
+ return 0;
+}
+
+static const struct file_operations bsr_fops = {
+ .owner = THIS_MODULE,
+ .mmap = bsr_mmap,
+ .open = bsr_open,
+ .llseek = noop_llseek,
+};
+
+static void bsr_cleanup_devs(void)
+{
+ struct bsr_dev *cur, *n;
+
+ list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) {
+ if (cur->bsr_device) {
+ cdev_del(&cur->bsr_cdev);
+ device_del(cur->bsr_device);
+ }
+ list_del(&cur->bsr_list);
+ kfree(cur);
+ }
+}
+
+static int bsr_add_node(struct device_node *bn)
+{
+ int bsr_stride_len, bsr_bytes_len, num_bsr_devs;
+ const u32 *bsr_stride;
+ const u32 *bsr_bytes;
+ unsigned i;
+ int ret = -ENODEV;
+
+ bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len);
+ bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len);
+
+ if (!bsr_stride || !bsr_bytes ||
+ (bsr_stride_len != bsr_bytes_len)) {
+ printk(KERN_ERR "bsr of-node has missing/incorrect property\n");
+ return ret;
+ }
+
+ num_bsr_devs = bsr_bytes_len / sizeof(u32);
+
+ for (i = 0 ; i < num_bsr_devs; i++) {
+ struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev),
+ GFP_KERNEL);
+ struct resource res;
+ int result;
+
+ if (!cur) {
+ printk(KERN_ERR "Unable to alloc bsr dev\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ result = of_address_to_resource(bn, i, &res);
+ if (result < 0) {
+ printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n");
+ kfree(cur);
+ continue;
+ }
+
+ cur->bsr_minor = i + total_bsr_devs;
+ cur->bsr_addr = res.start;
+ cur->bsr_len = res.end - res.start + 1;
+ cur->bsr_bytes = bsr_bytes[i];
+ cur->bsr_stride = bsr_stride[i];
+ cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
+
+ /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
+ /* we can only map 4k of it, so only advertise the 4k in sysfs */
+ if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
+ cur->bsr_len = 4096;
+
+ switch(cur->bsr_bytes) {
+ case 8:
+ cur->bsr_type = BSR_8;
+ break;
+ case 16:
+ cur->bsr_type = BSR_16;
+ break;
+ case 64:
+ cur->bsr_type = BSR_64;
+ break;
+ case 128:
+ cur->bsr_type = BSR_128;
+ break;
+ case 4096:
+ cur->bsr_type = BSR_4096;
+ break;
+ default:
+ cur->bsr_type = BSR_UNKNOWN;
+ }
+
+ cur->bsr_num = bsr_types[cur->bsr_type];
+ snprintf(cur->bsr_name, 32, "bsr%d_%d",
+ cur->bsr_bytes, cur->bsr_num);
+
+ cdev_init(&cur->bsr_cdev, &bsr_fops);
+ result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
+ if (result) {
+ kfree(cur);
+ goto out_err;
+ }
+
+ cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
+ cur, cur->bsr_name);
+ if (IS_ERR(cur->bsr_device)) {
+ printk(KERN_ERR "device_create failed for %s\n",
+ cur->bsr_name);
+ cdev_del(&cur->bsr_cdev);
+ kfree(cur);
+ goto out_err;
+ }
+
+ bsr_types[cur->bsr_type] = cur->bsr_num + 1;
+ list_add_tail(&cur->bsr_list, &bsr_devs);
+ }
+
+ total_bsr_devs += num_bsr_devs;
+
+ return 0;
+
+ out_err:
+
+ bsr_cleanup_devs();
+ return ret;
+}
+
+static int bsr_create_devs(struct device_node *bn)
+{
+ int ret;
+
+ while (bn) {
+ ret = bsr_add_node(bn);
+ if (ret) {
+ of_node_put(bn);
+ return ret;
+ }
+ bn = of_find_compatible_node(bn, NULL, "ibm,bsr");
+ }
+ return 0;
+}
+
+static int __init bsr_init(void)
+{
+ struct device_node *np;
+ dev_t bsr_dev;
+ int ret = -ENODEV;
+ int result;
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
+ if (!np)
+ goto out_err;
+
+ bsr_class = class_create(THIS_MODULE, "bsr");
+ if (IS_ERR(bsr_class)) {
+ printk(KERN_ERR "class_create() failed for bsr_class\n");
+ goto out_err_1;
+ }
+ bsr_class->dev_attrs = bsr_dev_attrs;
+
+ result = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
+ bsr_major = MAJOR(bsr_dev);
+ if (result < 0) {
+ printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
+ goto out_err_2;
+ }
+
+ if ((ret = bsr_create_devs(np)) < 0) {
+ np = NULL;
+ goto out_err_3;
+ }
+
+ return 0;
+
+ out_err_3:
+ unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS);
+
+ out_err_2:
+ class_destroy(bsr_class);
+
+ out_err_1:
+ of_node_put(np);
+
+ out_err:
+
+ return ret;
+}
+
+static void __exit bsr_exit(void)
+{
+
+ bsr_cleanup_devs();
+
+ if (bsr_class)
+ class_destroy(bsr_class);
+
+ if (bsr_major)
+ unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS);
+}
+
+module_init(bsr_init);
+module_exit(bsr_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");
diff --git a/drivers/char/dcc_tty.c b/drivers/char/dcc_tty.c
new file mode 100644
index 00000000..a787accd
--- /dev/null
+++ b/drivers/char/dcc_tty.c
@@ -0,0 +1,326 @@
+/* drivers/char/dcc_tty.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/hrtimer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+MODULE_DESCRIPTION("DCC TTY Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static spinlock_t g_dcc_tty_lock = SPIN_LOCK_UNLOCKED;
+static struct hrtimer g_dcc_timer;
+static char g_dcc_buffer[16];
+static int g_dcc_buffer_head;
+static int g_dcc_buffer_count;
+static unsigned g_dcc_write_delay_usecs = 1;
+static struct tty_driver *g_dcc_tty_driver;
+static struct tty_struct *g_dcc_tty;
+static int g_dcc_tty_open_count;
+
+static void dcc_poll_locked(void)
+{
+ char ch;
+ int rch;
+ int written;
+
+ while (g_dcc_buffer_count) {
+ ch = g_dcc_buffer[g_dcc_buffer_head];
+ asm(
+ "mrc 14, 0, r15, c0, c1, 0\n"
+ "mcrcc 14, 0, %1, c0, c5, 0\n"
+ "movcc %0, #1\n"
+ "movcs %0, #0\n"
+ : "=r" (written)
+ : "r" (ch)
+ );
+ if (written) {
+ if (ch == '\n')
+ g_dcc_buffer[g_dcc_buffer_head] = '\r';
+ else {
+ g_dcc_buffer_head = (g_dcc_buffer_head + 1) % ARRAY_SIZE(g_dcc_buffer);
+ g_dcc_buffer_count--;
+ if (g_dcc_tty)
+ tty_wakeup(g_dcc_tty);
+ }
+ g_dcc_write_delay_usecs = 1;
+ } else {
+ if (g_dcc_write_delay_usecs > 0x100)
+ break;
+ g_dcc_write_delay_usecs <<= 1;
+ udelay(g_dcc_write_delay_usecs);
+ }
+ }
+
+ if (g_dcc_tty && !test_bit(TTY_THROTTLED, &g_dcc_tty->flags)) {
+ asm(
+ "mrc 14, 0, %0, c0, c1, 0\n"
+ "tst %0, #(1 << 30)\n"
+ "moveq %0, #-1\n"
+ "mrcne 14, 0, %0, c0, c5, 0\n"
+ : "=r" (rch)
+ );
+ if (rch >= 0) {
+ ch = rch;
+ tty_insert_flip_string(g_dcc_tty, &ch, 1);
+ tty_flip_buffer_push(g_dcc_tty);
+ }
+ }
+
+
+ if (g_dcc_buffer_count)
+ hrtimer_start(&g_dcc_timer, ktime_set(0, g_dcc_write_delay_usecs * NSEC_PER_USEC), HRTIMER_MODE_REL);
+ else
+ hrtimer_start(&g_dcc_timer, ktime_set(0, 20 * NSEC_PER_MSEC), HRTIMER_MODE_REL);
+}
+
+static int dcc_tty_open(struct tty_struct * tty, struct file * filp)
+{
+ int ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ if (g_dcc_tty == NULL || g_dcc_tty == tty) {
+ g_dcc_tty = tty;
+ g_dcc_tty_open_count++;
+ ret = 0;
+ } else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+
+ printk("dcc_tty_open, tty %p, f_flags %x, returned %d\n", tty, filp->f_flags, ret);
+
+ return ret;
+}
+
+static void dcc_tty_close(struct tty_struct * tty, struct file * filp)
+{
+ printk("dcc_tty_close, tty %p, f_flags %x\n", tty, filp->f_flags);
+ if (g_dcc_tty == tty) {
+ if (--g_dcc_tty_open_count == 0)
+ g_dcc_tty = NULL;
+ }
+}
+
+static int dcc_write(const unsigned char *buf_start, int count)
+{
+ const unsigned char *buf = buf_start;
+ unsigned long irq_flags;
+ int copy_len;
+ int space_left;
+ int tail;
+
+ if (count < 1)
+ return 0;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ do {
+ tail = (g_dcc_buffer_head + g_dcc_buffer_count) % ARRAY_SIZE(g_dcc_buffer);
+ copy_len = ARRAY_SIZE(g_dcc_buffer) - tail;
+ space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+ if (copy_len > space_left)
+ copy_len = space_left;
+ if (copy_len > count)
+ copy_len = count;
+ memcpy(&g_dcc_buffer[tail], buf, copy_len);
+ g_dcc_buffer_count += copy_len;
+ buf += copy_len;
+ count -= copy_len;
+ if (copy_len < count && copy_len < space_left) {
+ space_left -= copy_len;
+ copy_len = count;
+ if (copy_len > space_left) {
+ copy_len = space_left;
+ }
+ memcpy(g_dcc_buffer, buf, copy_len);
+ buf += copy_len;
+ count -= copy_len;
+ g_dcc_buffer_count += copy_len;
+ }
+ dcc_poll_locked();
+ space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+ } while(count && space_left);
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+ return buf - buf_start;
+}
+
+static int dcc_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+ int ret;
+ /* printk("dcc_tty_write %p, %d\n", buf, count); */
+ ret = dcc_write(buf, count);
+ if (ret != count)
+ printk("dcc_tty_write %p, %d, returned %d\n", buf, count, ret);
+ return ret;
+}
+
+static int dcc_tty_write_room(struct tty_struct *tty)
+{
+ int space_left;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+ return space_left;
+}
+
+static int dcc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ int ret;
+ asm(
+ "mrc 14, 0, %0, c0, c1, 0\n"
+ "mov %0, %0, LSR #30\n"
+ "and %0, %0, #1\n"
+ : "=r" (ret)
+ );
+ return ret;
+}
+
+static void dcc_tty_unthrottle(struct tty_struct * tty)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ dcc_poll_locked();
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+}
+
+static enum hrtimer_restart dcc_tty_timer_func(struct hrtimer *timer)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+ dcc_poll_locked();
+ spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+ return HRTIMER_NORESTART;
+}
+
+void dcc_console_write(struct console *co, const char *b, unsigned count)
+{
+#if 1
+ dcc_write(b, count);
+#else
+ /* blocking printk */
+ while (count > 0) {
+ int written;
+ written = dcc_write(b, count);
+ if (written) {
+ b += written;
+ count -= written;
+ }
+ }
+#endif
+}
+
+static struct tty_driver *dcc_console_device(struct console *c, int *index)
+{
+ *index = 0;
+ return g_dcc_tty_driver;
+}
+
+static int __init dcc_console_setup(struct console *co, char *options)
+{
+ if (co->index != 0)
+ return -ENODEV;
+ return 0;
+}
+
+
+static struct console dcc_console =
+{
+ .name = "ttyDCC",
+ .write = dcc_console_write,
+ .device = dcc_console_device,
+ .setup = dcc_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static struct tty_operations dcc_tty_ops = {
+ .open = dcc_tty_open,
+ .close = dcc_tty_close,
+ .write = dcc_tty_write,
+ .write_room = dcc_tty_write_room,
+ .chars_in_buffer = dcc_tty_chars_in_buffer,
+ .unthrottle = dcc_tty_unthrottle,
+};
+
+static int __init dcc_tty_init(void)
+{
+ int ret;
+
+ hrtimer_init(&g_dcc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ g_dcc_timer.function = dcc_tty_timer_func;
+
+ g_dcc_tty_driver = alloc_tty_driver(1);
+ if (!g_dcc_tty_driver) {
+ printk(KERN_ERR "dcc_tty_probe: alloc_tty_driver failed\n");
+ ret = -ENOMEM;
+ goto err_alloc_tty_driver_failed;
+ }
+ g_dcc_tty_driver->owner = THIS_MODULE;
+ g_dcc_tty_driver->driver_name = "dcc";
+ g_dcc_tty_driver->name = "ttyDCC";
+ g_dcc_tty_driver->major = 0; // auto assign
+ g_dcc_tty_driver->minor_start = 0;
+ g_dcc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ g_dcc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ g_dcc_tty_driver->init_termios = tty_std_termios;
+ g_dcc_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(g_dcc_tty_driver, &dcc_tty_ops);
+ ret = tty_register_driver(g_dcc_tty_driver);
+ if (ret) {
+ printk(KERN_ERR "dcc_tty_probe: tty_register_driver failed, %d\n", ret);
+ goto err_tty_register_driver_failed;
+ }
+ tty_register_device(g_dcc_tty_driver, 0, NULL);
+
+ register_console(&dcc_console);
+ hrtimer_start(&g_dcc_timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+ return 0;
+
+err_tty_register_driver_failed:
+ put_tty_driver(g_dcc_tty_driver);
+ g_dcc_tty_driver = NULL;
+err_alloc_tty_driver_failed:
+ return ret;
+}
+
+static void __exit dcc_tty_exit(void)
+{
+ int ret;
+
+ tty_unregister_device(g_dcc_tty_driver, 0);
+ ret = tty_unregister_driver(g_dcc_tty_driver);
+ if (ret < 0) {
+ printk(KERN_ERR "dcc_tty_remove: tty_unregister_driver failed, %d\n", ret);
+ } else {
+ put_tty_driver(g_dcc_tty_driver);
+ }
+ g_dcc_tty_driver = NULL;
+}
+
+module_init(dcc_tty_init);
+module_exit(dcc_tty_exit);
+
+
diff --git a/drivers/char/ds1302.c b/drivers/char/ds1302.c
new file mode 100644
index 00000000..ed8303f9
--- /dev/null
+++ b/drivers/char/ds1302.c
@@ -0,0 +1,358 @@
+/*!***************************************************************************
+*!
+*! FILE NAME : ds1302.c
+*!
+*! DESCRIPTION: Implements an interface for the DS1302 RTC
+*!
+*! Functions exported: ds1302_readreg, ds1302_writereg, ds1302_init, get_rtc_status
+*!
+*! ---------------------------------------------------------------------------
+*!
+*! (C) Copyright 1999, 2000, 2001 Axis Communications AB, LUND, SWEDEN
+*!
+*!***************************************************************************/
+
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <linux/bcd.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/rtc.h>
+#if defined(CONFIG_M32R)
+#include <asm/m32r.h>
+#endif
+
+#define RTC_MAJOR_NR 121 /* local major, change later */
+
+static DEFINE_MUTEX(rtc_mutex);
+static const char ds1302_name[] = "ds1302";
+
+/* Send 8 bits. */
+static void
+out_byte_rtc(unsigned int reg_addr, unsigned char x)
+{
+ //RST H
+ outw(0x0001,(unsigned long)PLD_RTCRSTODT);
+ //write data
+ outw(((x<<8)|(reg_addr&0xff)),(unsigned long)PLD_RTCWRDATA);
+ //WE
+ outw(0x0002,(unsigned long)PLD_RTCCR);
+ //wait
+ while(inw((unsigned long)PLD_RTCCR));
+
+ //RST L
+ outw(0x0000,(unsigned long)PLD_RTCRSTODT);
+
+}
+
+static unsigned char
+in_byte_rtc(unsigned int reg_addr)
+{
+ unsigned char retval;
+
+ //RST H
+ outw(0x0001,(unsigned long)PLD_RTCRSTODT);
+ //write data
+ outw((reg_addr&0xff),(unsigned long)PLD_RTCRDDATA);
+ //RE
+ outw(0x0001,(unsigned long)PLD_RTCCR);
+ //wait
+ while(inw((unsigned long)PLD_RTCCR));
+
+ //read data
+ retval=(inw((unsigned long)PLD_RTCRDDATA) & 0xff00)>>8;
+
+ //RST L
+ outw(0x0000,(unsigned long)PLD_RTCRSTODT);
+
+ return retval;
+}
+
+/* Enable writing. */
+
+static void
+ds1302_wenable(void)
+{
+ out_byte_rtc(0x8e,0x00);
+}
+
+/* Disable writing. */
+
+static void
+ds1302_wdisable(void)
+{
+ out_byte_rtc(0x8e,0x80);
+}
+
+
+
+/* Read a byte from the selected register in the DS1302. */
+
+unsigned char
+ds1302_readreg(int reg)
+{
+ unsigned char x;
+
+ x=in_byte_rtc((0x81 | (reg << 1))); /* read register */
+
+ return x;
+}
+
+/* Write a byte to the selected register. */
+
+void
+ds1302_writereg(int reg, unsigned char val)
+{
+ ds1302_wenable();
+ out_byte_rtc((0x80 | (reg << 1)),val);
+ ds1302_wdisable();
+}
+
+void
+get_rtc_time(struct rtc_time *rtc_tm)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS);
+ rtc_tm->tm_min = CMOS_READ(RTC_MINUTES);
+ rtc_tm->tm_hour = CMOS_READ(RTC_HOURS);
+ rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
+ rtc_tm->tm_mon = CMOS_READ(RTC_MONTH);
+ rtc_tm->tm_year = CMOS_READ(RTC_YEAR);
+
+ local_irq_restore(flags);
+
+ rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
+ rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
+ rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
+ rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
+ rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
+ rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
+
+ /*
+ * Account for differences between how the RTC uses the values
+ * and how they are defined in a struct rtc_time;
+ */
+
+ if (rtc_tm->tm_year <= 69)
+ rtc_tm->tm_year += 100;
+
+ rtc_tm->tm_mon--;
+}
+
+static unsigned char days_in_mo[] =
+ {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+
+/* ioctl that supports RTC_RD_TIME and RTC_SET_TIME (read and set time/date). */
+
+static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ unsigned long flags;
+
+ switch(cmd) {
+ case RTC_RD_TIME: /* read the time/date from RTC */
+ {
+ struct rtc_time rtc_tm;
+
+ memset(&rtc_tm, 0, sizeof (struct rtc_time));
+ mutex_lock(&rtc_mutex);
+ get_rtc_time(&rtc_tm);
+ mutex_unlock(&rtc_mutex);
+ if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time)))
+ return -EFAULT;
+ return 0;
+ }
+
+ case RTC_SET_TIME: /* set the RTC */
+ {
+ struct rtc_time rtc_tm;
+ unsigned char mon, day, hrs, min, sec, leap_yr;
+ unsigned int yrs;
+
+ if (!capable(CAP_SYS_TIME))
+ return -EPERM;
+
+ if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, sizeof(struct rtc_time)))
+ return -EFAULT;
+
+ yrs = rtc_tm.tm_year + 1900;
+ mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
+ day = rtc_tm.tm_mday;
+ hrs = rtc_tm.tm_hour;
+ min = rtc_tm.tm_min;
+ sec = rtc_tm.tm_sec;
+
+
+ if ((yrs < 1970) || (yrs > 2069))
+ return -EINVAL;
+
+ leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
+
+ if ((mon > 12) || (day == 0))
+ return -EINVAL;
+
+ if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
+ return -EINVAL;
+
+ if ((hrs >= 24) || (min >= 60) || (sec >= 60))
+ return -EINVAL;
+
+ if (yrs >= 2000)
+ yrs -= 2000; /* RTC (0, 1, ... 69) */
+ else
+ yrs -= 1900; /* RTC (70, 71, ... 99) */
+
+ sec = bin2bcd(sec);
+ min = bin2bcd(min);
+ hrs = bin2bcd(hrs);
+ day = bin2bcd(day);
+ mon = bin2bcd(mon);
+ yrs = bin2bcd(yrs);
+
+ mutex_lock(&rtc_mutex);
+ local_irq_save(flags);
+ CMOS_WRITE(yrs, RTC_YEAR);
+ CMOS_WRITE(mon, RTC_MONTH);
+ CMOS_WRITE(day, RTC_DAY_OF_MONTH);
+ CMOS_WRITE(hrs, RTC_HOURS);
+ CMOS_WRITE(min, RTC_MINUTES);
+ CMOS_WRITE(sec, RTC_SECONDS);
+ local_irq_restore(flags);
+ mutex_unlock(&rtc_mutex);
+
+ /* Notice that at this point, the RTC is updated but
+ * the kernel is still running with the old time.
+ * You need to set that separately with settimeofday
+ * or adjtimex.
+ */
+ return 0;
+ }
+
+ case RTC_SET_CHARGE: /* set the RTC TRICKLE CHARGE register */
+ {
+ int tcs_val;
+
+ if (!capable(CAP_SYS_TIME))
+ return -EPERM;
+
+ if(copy_from_user(&tcs_val, (int*)arg, sizeof(int)))
+ return -EFAULT;
+
+ mutex_lock(&rtc_mutex);
+ tcs_val = RTC_TCR_PATTERN | (tcs_val & 0x0F);
+ ds1302_writereg(RTC_TRICKLECHARGER, tcs_val);
+ mutex_unlock(&rtc_mutex);
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+get_rtc_status(char *buf)
+{
+ char *p;
+ struct rtc_time tm;
+
+ p = buf;
+
+ get_rtc_time(&tm);
+
+ /*
+ * There is no way to tell if the luser has the RTC set for local
+ * time or for Universal Standard Time (GMT). Probably local though.
+ */
+
+ p += sprintf(p,
+ "rtc_time\t: %02d:%02d:%02d\n"
+ "rtc_date\t: %04d-%02d-%02d\n",
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday);
+
+ return p - buf;
+}
+
+
+/* The various file operations we support. */
+
+static const struct file_operations rtc_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = rtc_ioctl,
+ .llseek = noop_llseek,
+};
+
+/* Probe for the chip by writing something to its RAM and try reading it back. */
+
+#define MAGIC_PATTERN 0x42
+
+static int __init
+ds1302_probe(void)
+{
+ int retval, res, baur;
+
+ baur=(boot_cpu_data.bus_clock/(2*1000*1000));
+
+ printk("%s: Set PLD_RTCBAUR = %d\n", ds1302_name,baur);
+
+ outw(0x0000,(unsigned long)PLD_RTCCR);
+ outw(0x0000,(unsigned long)PLD_RTCRSTODT);
+ outw(baur,(unsigned long)PLD_RTCBAUR);
+
+ /* Try to talk to timekeeper. */
+
+ ds1302_wenable();
+ /* write RAM byte 0 */
+ /* write something magic */
+ out_byte_rtc(0xc0,MAGIC_PATTERN);
+
+ /* read RAM byte 0 */
+ if((res = in_byte_rtc(0xc1)) == MAGIC_PATTERN) {
+ char buf[100];
+ ds1302_wdisable();
+ printk("%s: RTC found.\n", ds1302_name);
+ get_rtc_status(buf);
+ printk(buf);
+ retval = 1;
+ } else {
+ printk("%s: RTC not found.\n", ds1302_name);
+ retval = 0;
+ }
+
+ return retval;
+}
+
+
+/* Just probe for the RTC and register the device to handle the ioctl needed. */
+
+int __init
+ds1302_init(void)
+{
+ if (!ds1302_probe()) {
+ return -1;
+ }
+ return 0;
+}
+
+static int __init ds1302_register(void)
+{
+ ds1302_init();
+ if (register_chrdev(RTC_MAJOR_NR, ds1302_name, &rtc_fops)) {
+ printk(KERN_INFO "%s: unable to get major %d for rtc\n",
+ ds1302_name, RTC_MAJOR_NR);
+ return -1;
+ }
+ return 0;
+}
+
+module_init(ds1302_register);
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
new file mode 100644
index 00000000..aab9605f
--- /dev/null
+++ b/drivers/char/ds1620.c
@@ -0,0 +1,431 @@
+/*
+ * linux/drivers/char/ds1620.c: Dallas Semiconductors DS1620
+ * thermometer driver (as used in the Rebel.com NetWinder)
+ */
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/capability.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/uaccess.h>
+#include <asm/therm.h>
+
+#ifdef CONFIG_PROC_FS
+/* define for /proc interface */
+#define THERM_USE_PROC
+#endif
+
+/* Definitions for DS1620 chip */
+#define THERM_START_CONVERT 0xee
+#define THERM_RESET 0xaf
+#define THERM_READ_CONFIG 0xac
+#define THERM_READ_TEMP 0xaa
+#define THERM_READ_TL 0xa2
+#define THERM_READ_TH 0xa1
+#define THERM_WRITE_CONFIG 0x0c
+#define THERM_WRITE_TL 0x02
+#define THERM_WRITE_TH 0x01
+
+#define CFG_CPU 2
+#define CFG_1SHOT 1
+
+static DEFINE_MUTEX(ds1620_mutex);
+static const char *fan_state[] = { "off", "on", "on (hardwired)" };
+
+/*
+ * Start of NetWinder specifics
+ * Note! We have to hold the gpio lock with IRQs disabled over the
+ * whole of our transaction to the Dallas chip, since there is a
+ * chance that the WaveArtist driver could touch these bits to
+ * enable or disable the speaker.
+ */
+extern unsigned int system_rev;
+
+static inline void netwinder_ds1620_set_clk(int clk)
+{
+ nw_gpio_modify_op(GPIO_DSCLK, clk ? GPIO_DSCLK : 0);
+}
+
+static inline void netwinder_ds1620_set_data(int dat)
+{
+ nw_gpio_modify_op(GPIO_DATA, dat ? GPIO_DATA : 0);
+}
+
+static inline int netwinder_ds1620_get_data(void)
+{
+ return nw_gpio_read() & GPIO_DATA;
+}
+
+static inline void netwinder_ds1620_set_data_dir(int dir)
+{
+ nw_gpio_modify_io(GPIO_DATA, dir ? GPIO_DATA : 0);
+}
+
+static inline void netwinder_ds1620_reset(void)
+{
+ nw_cpld_modify(CPLD_DS_ENABLE, 0);
+ nw_cpld_modify(CPLD_DS_ENABLE, CPLD_DS_ENABLE);
+}
+
+static inline void netwinder_lock(unsigned long *flags)
+{
+ spin_lock_irqsave(&nw_gpio_lock, *flags);
+}
+
+static inline void netwinder_unlock(unsigned long *flags)
+{
+ spin_unlock_irqrestore(&nw_gpio_lock, *flags);
+}
+
+static inline void netwinder_set_fan(int i)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_gpio_modify_op(GPIO_FAN, i ? GPIO_FAN : 0);
+ spin_unlock_irqrestore(&nw_gpio_lock, flags);
+}
+
+static inline int netwinder_get_fan(void)
+{
+ if ((system_rev & 0xf000) == 0x4000)
+ return FAN_ALWAYS_ON;
+
+ return (nw_gpio_read() & GPIO_FAN) ? FAN_ON : FAN_OFF;
+}
+
+/*
+ * End of NetWinder specifics
+ */
+
+static void ds1620_send_bits(int nr, int value)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ netwinder_ds1620_set_data(value & 1);
+ netwinder_ds1620_set_clk(0);
+ udelay(1);
+ netwinder_ds1620_set_clk(1);
+ udelay(1);
+
+ value >>= 1;
+ }
+}
+
+static unsigned int ds1620_recv_bits(int nr)
+{
+ unsigned int value = 0, mask = 1;
+ int i;
+
+ netwinder_ds1620_set_data(0);
+
+ for (i = 0; i < nr; i++) {
+ netwinder_ds1620_set_clk(0);
+ udelay(1);
+
+ if (netwinder_ds1620_get_data())
+ value |= mask;
+
+ mask <<= 1;
+
+ netwinder_ds1620_set_clk(1);
+ udelay(1);
+ }
+
+ return value;
+}
+
+static void ds1620_out(int cmd, int bits, int value)
+{
+ unsigned long flags;
+
+ netwinder_lock(&flags);
+ netwinder_ds1620_set_clk(1);
+ netwinder_ds1620_set_data_dir(0);
+ netwinder_ds1620_reset();
+
+ udelay(1);
+
+ ds1620_send_bits(8, cmd);
+ if (bits)
+ ds1620_send_bits(bits, value);
+
+ udelay(1);
+
+ netwinder_ds1620_reset();
+ netwinder_unlock(&flags);
+
+ msleep(20);
+}
+
+static unsigned int ds1620_in(int cmd, int bits)
+{
+ unsigned long flags;
+ unsigned int value;
+
+ netwinder_lock(&flags);
+ netwinder_ds1620_set_clk(1);
+ netwinder_ds1620_set_data_dir(0);
+ netwinder_ds1620_reset();
+
+ udelay(1);
+
+ ds1620_send_bits(8, cmd);
+
+ netwinder_ds1620_set_data_dir(1);
+ value = ds1620_recv_bits(bits);
+
+ netwinder_ds1620_reset();
+ netwinder_unlock(&flags);
+
+ return value;
+}
+
+static int cvt_9_to_int(unsigned int val)
+{
+ if (val & 0x100)
+ val |= 0xfffffe00;
+
+ return val;
+}
+
+static void ds1620_write_state(struct therm *therm)
+{
+ ds1620_out(THERM_WRITE_CONFIG, 8, CFG_CPU);
+ ds1620_out(THERM_WRITE_TL, 9, therm->lo);
+ ds1620_out(THERM_WRITE_TH, 9, therm->hi);
+ ds1620_out(THERM_START_CONVERT, 0, 0);
+}
+
+static void ds1620_read_state(struct therm *therm)
+{
+ therm->lo = cvt_9_to_int(ds1620_in(THERM_READ_TL, 9));
+ therm->hi = cvt_9_to_int(ds1620_in(THERM_READ_TH, 9));
+}
+
+static int ds1620_open(struct inode *inode, struct file *file)
+{
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t
+ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr)
+{
+ signed int cur_temp;
+ signed char cur_temp_degF;
+
+ cur_temp = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)) >> 1;
+
+ /* convert to Fahrenheit, as per wdt.c */
+ cur_temp_degF = (cur_temp * 9) / 5 + 32;
+
+ if (copy_to_user(buf, &cur_temp_degF, 1))
+ return -EFAULT;
+
+ return 1;
+}
+
+static int
+ds1620_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct therm therm;
+ union {
+ struct therm __user *therm;
+ int __user *i;
+ } uarg;
+ int i;
+
+ uarg.i = (int __user *)arg;
+
+ switch(cmd) {
+ case CMD_SET_THERMOSTATE:
+ case CMD_SET_THERMOSTATE2:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (cmd == CMD_SET_THERMOSTATE) {
+ if (get_user(therm.hi, uarg.i))
+ return -EFAULT;
+ therm.lo = therm.hi - 3;
+ } else {
+ if (copy_from_user(&therm, uarg.therm, sizeof(therm)))
+ return -EFAULT;
+ }
+
+ therm.lo <<= 1;
+ therm.hi <<= 1;
+
+ ds1620_write_state(&therm);
+ break;
+
+ case CMD_GET_THERMOSTATE:
+ case CMD_GET_THERMOSTATE2:
+ ds1620_read_state(&therm);
+
+ therm.lo >>= 1;
+ therm.hi >>= 1;
+
+ if (cmd == CMD_GET_THERMOSTATE) {
+ if (put_user(therm.hi, uarg.i))
+ return -EFAULT;
+ } else {
+ if (copy_to_user(uarg.therm, &therm, sizeof(therm)))
+ return -EFAULT;
+ }
+ break;
+
+ case CMD_GET_TEMPERATURE:
+ case CMD_GET_TEMPERATURE2:
+ i = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9));
+
+ if (cmd == CMD_GET_TEMPERATURE)
+ i >>= 1;
+
+ return put_user(i, uarg.i) ? -EFAULT : 0;
+
+ case CMD_GET_STATUS:
+ i = ds1620_in(THERM_READ_CONFIG, 8) & 0xe3;
+
+ return put_user(i, uarg.i) ? -EFAULT : 0;
+
+ case CMD_GET_FAN:
+ i = netwinder_get_fan();
+
+ return put_user(i, uarg.i) ? -EFAULT : 0;
+
+ case CMD_SET_FAN:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(i, uarg.i))
+ return -EFAULT;
+
+ netwinder_set_fan(i);
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
+}
+
+static long
+ds1620_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&ds1620_mutex);
+ ret = ds1620_ioctl(file, cmd, arg);
+ mutex_unlock(&ds1620_mutex);
+
+ return ret;
+}
+
+#ifdef THERM_USE_PROC
+static int
+proc_therm_ds1620_read(char *buf, char **start, off_t offset,
+ int len, int *eof, void *unused)
+{
+ struct therm th;
+ int temp;
+
+ ds1620_read_state(&th);
+ temp = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9));
+
+ len = sprintf(buf, "Thermostat: HI %i.%i, LOW %i.%i; "
+ "temperature: %i.%i C, fan %s\n",
+ th.hi >> 1, th.hi & 1 ? 5 : 0,
+ th.lo >> 1, th.lo & 1 ? 5 : 0,
+ temp >> 1, temp & 1 ? 5 : 0,
+ fan_state[netwinder_get_fan()]);
+
+ return len;
+}
+
+static struct proc_dir_entry *proc_therm_ds1620;
+#endif
+
+static const struct file_operations ds1620_fops = {
+ .owner = THIS_MODULE,
+ .open = ds1620_open,
+ .read = ds1620_read,
+ .unlocked_ioctl = ds1620_unlocked_ioctl,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice ds1620_miscdev = {
+ TEMP_MINOR,
+ "temp",
+ &ds1620_fops
+};
+
+static int __init ds1620_init(void)
+{
+ int ret;
+ struct therm th, th_start;
+
+ if (!machine_is_netwinder())
+ return -ENODEV;
+
+ ds1620_out(THERM_RESET, 0, 0);
+ ds1620_out(THERM_WRITE_CONFIG, 8, CFG_CPU);
+ ds1620_out(THERM_START_CONVERT, 0, 0);
+
+ /*
+ * Trigger the fan to start by setting
+ * temperature high point low. This kicks
+ * the fan into action.
+ */
+ ds1620_read_state(&th);
+ th_start.lo = 0;
+ th_start.hi = 1;
+ ds1620_write_state(&th_start);
+
+ msleep(2000);
+
+ ds1620_write_state(&th);
+
+ ret = misc_register(&ds1620_miscdev);
+ if (ret < 0)
+ return ret;
+
+#ifdef THERM_USE_PROC
+ proc_therm_ds1620 = create_proc_entry("therm", 0, NULL);
+ if (proc_therm_ds1620)
+ proc_therm_ds1620->read_proc = proc_therm_ds1620_read;
+ else
+ printk(KERN_ERR "therm: unable to register /proc/therm\n");
+#endif
+
+ ds1620_read_state(&th);
+ ret = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9));
+
+ printk(KERN_INFO "Thermostat: high %i.%i, low %i.%i, "
+ "current %i.%i C, fan %s.\n",
+ th.hi >> 1, th.hi & 1 ? 5 : 0,
+ th.lo >> 1, th.lo & 1 ? 5 : 0,
+ ret >> 1, ret & 1 ? 5 : 0,
+ fan_state[netwinder_get_fan()]);
+
+ return 0;
+}
+
+static void __exit ds1620_exit(void)
+{
+#ifdef THERM_USE_PROC
+ remove_proc_entry("therm", NULL);
+#endif
+ misc_deregister(&ds1620_miscdev);
+}
+
+module_init(ds1620_init);
+module_exit(ds1620_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
new file mode 100644
index 00000000..052797b3
--- /dev/null
+++ b/drivers/char/dsp56k.c
@@ -0,0 +1,534 @@
+/*
+ * The DSP56001 Device Driver, saviour of the Free World(tm)
+ *
+ * Authors: Fredrik Noring <noring@nocrew.org>
+ * lars brinkhoff <lars@nocrew.org>
+ * Tomas Berndtsson <tomas@nocrew.org>
+ *
+ * First version May 1996
+ *
+ * History:
+ * 97-01-29 Tomas Berndtsson,
+ * Integrated with Linux 2.1.21 kernel sources.
+ * 97-02-15 Tomas Berndtsson,
+ * Fixed for kernel 2.1.26
+ *
+ * BUGS:
+ * Hmm... there must be something here :)
+ *
+ * Copyright (C) 1996,1997 Fredrik Noring, lars brinkhoff & Tomas Berndtsson
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/major.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h> /* guess what */
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h> /* For put_user and get_user */
+
+#include <asm/atarihw.h>
+#include <asm/traps.h>
+
+#include <asm/dsp56k.h>
+
+/* minor devices */
+#define DSP56K_DEV_56001 0 /* The only device so far */
+
+#define TIMEOUT 10 /* Host port timeout in number of tries */
+#define MAXIO 2048 /* Maximum number of words before sleep */
+#define DSP56K_MAX_BINARY_LENGTH (3*64*1024)
+
+#define DSP56K_TX_INT_ON dsp56k_host_interface.icr |= DSP56K_ICR_TREQ
+#define DSP56K_RX_INT_ON dsp56k_host_interface.icr |= DSP56K_ICR_RREQ
+#define DSP56K_TX_INT_OFF dsp56k_host_interface.icr &= ~DSP56K_ICR_TREQ
+#define DSP56K_RX_INT_OFF dsp56k_host_interface.icr &= ~DSP56K_ICR_RREQ
+
+#define DSP56K_TRANSMIT (dsp56k_host_interface.isr & DSP56K_ISR_TXDE)
+#define DSP56K_RECEIVE (dsp56k_host_interface.isr & DSP56K_ISR_RXDF)
+
+#define handshake(count, maxio, timeout, ENABLE, f) \
+{ \
+ long i, t, m; \
+ while (count > 0) { \
+ m = min_t(unsigned long, count, maxio); \
+ for (i = 0; i < m; i++) { \
+ for (t = 0; t < timeout && !ENABLE; t++) \
+ msleep(20); \
+ if(!ENABLE) \
+ return -EIO; \
+ f; \
+ } \
+ count -= m; \
+ if (m == maxio) msleep(20); \
+ } \
+}
+
+#define tx_wait(n) \
+{ \
+ int t; \
+ for(t = 0; t < n && !DSP56K_TRANSMIT; t++) \
+ msleep(10); \
+ if(!DSP56K_TRANSMIT) { \
+ return -EIO; \
+ } \
+}
+
+#define rx_wait(n) \
+{ \
+ int t; \
+ for(t = 0; t < n && !DSP56K_RECEIVE; t++) \
+ msleep(10); \
+ if(!DSP56K_RECEIVE) { \
+ return -EIO; \
+ } \
+}
+
+static DEFINE_MUTEX(dsp56k_mutex);
+static struct dsp56k_device {
+ unsigned long in_use;
+ long maxio, timeout;
+ int tx_wsize, rx_wsize;
+} dsp56k;
+
+static struct class *dsp56k_class;
+
+static int dsp56k_reset(void)
+{
+ u_char status;
+
+ /* Power down the DSP */
+ sound_ym.rd_data_reg_sel = 14;
+ status = sound_ym.rd_data_reg_sel & 0xef;
+ sound_ym.wd_data = status;
+ sound_ym.wd_data = status | 0x10;
+
+ udelay(10);
+
+ /* Power up the DSP */
+ sound_ym.rd_data_reg_sel = 14;
+ sound_ym.wd_data = sound_ym.rd_data_reg_sel & 0xef;
+
+ return 0;
+}
+
+static int dsp56k_upload(u_char __user *bin, int len)
+{
+ struct platform_device *pdev;
+ const struct firmware *fw;
+ const char fw_name[] = "dsp56k/bootstrap.bin";
+ int err;
+ int i;
+
+ dsp56k_reset();
+
+ pdev = platform_device_register_simple("dsp56k", 0, NULL, 0);
+ if (IS_ERR(pdev)) {
+ printk(KERN_ERR "Failed to register device for \"%s\"\n",
+ fw_name);
+ return -EINVAL;
+ }
+ err = request_firmware(&fw, fw_name, &pdev->dev);
+ platform_device_unregister(pdev);
+ if (err) {
+ printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
+ fw_name, err);
+ return err;
+ }
+ if (fw->size % 3) {
+ printk(KERN_ERR "Bogus length %d in image \"%s\"\n",
+ fw->size, fw_name);
+ release_firmware(fw);
+ return -EINVAL;
+ }
+ for (i = 0; i < fw->size; i = i + 3) {
+ /* tx_wait(10); */
+ dsp56k_host_interface.data.b[1] = fw->data[i];
+ dsp56k_host_interface.data.b[2] = fw->data[i + 1];
+ dsp56k_host_interface.data.b[3] = fw->data[i + 2];
+ }
+ release_firmware(fw);
+ for (; i < 512; i++) {
+ /* tx_wait(10); */
+ dsp56k_host_interface.data.b[1] = 0;
+ dsp56k_host_interface.data.b[2] = 0;
+ dsp56k_host_interface.data.b[3] = 0;
+ }
+
+ for (i = 0; i < len; i++) {
+ tx_wait(10);
+ get_user(dsp56k_host_interface.data.b[1], bin++);
+ get_user(dsp56k_host_interface.data.b[2], bin++);
+ get_user(dsp56k_host_interface.data.b[3], bin++);
+ }
+
+ tx_wait(10);
+ dsp56k_host_interface.data.l = 3; /* Magic execute */
+
+ return 0;
+}
+
+static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ int dev = iminor(inode) & 0x0f;
+
+ switch(dev)
+ {
+ case DSP56K_DEV_56001:
+ {
+
+ long n;
+
+ /* Don't do anything if nothing is to be done */
+ if (!count) return 0;
+
+ n = 0;
+ switch (dsp56k.rx_wsize) {
+ case 1: /* 8 bit */
+ {
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
+ put_user(dsp56k_host_interface.data.b[3], buf+n++));
+ return n;
+ }
+ case 2: /* 16 bit */
+ {
+ short __user *data;
+
+ count /= 2;
+ data = (short __user *) buf;
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
+ put_user(dsp56k_host_interface.data.w[1], data+n++));
+ return 2*n;
+ }
+ case 3: /* 24 bit */
+ {
+ count /= 3;
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
+ put_user(dsp56k_host_interface.data.b[1], buf+n++);
+ put_user(dsp56k_host_interface.data.b[2], buf+n++);
+ put_user(dsp56k_host_interface.data.b[3], buf+n++));
+ return 3*n;
+ }
+ case 4: /* 32 bit */
+ {
+ long __user *data;
+
+ count /= 4;
+ data = (long __user *) buf;
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
+ put_user(dsp56k_host_interface.data.l, data+n++));
+ return 4*n;
+ }
+ }
+ return -EFAULT;
+ }
+
+ default:
+ printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev);
+ return -ENXIO;
+ }
+}
+
+static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ int dev = iminor(inode) & 0x0f;
+
+ switch(dev)
+ {
+ case DSP56K_DEV_56001:
+ {
+ long n;
+
+ /* Don't do anything if nothing is to be done */
+ if (!count) return 0;
+
+ n = 0;
+ switch (dsp56k.tx_wsize) {
+ case 1: /* 8 bit */
+ {
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
+ get_user(dsp56k_host_interface.data.b[3], buf+n++));
+ return n;
+ }
+ case 2: /* 16 bit */
+ {
+ const short __user *data;
+
+ count /= 2;
+ data = (const short __user *)buf;
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
+ get_user(dsp56k_host_interface.data.w[1], data+n++));
+ return 2*n;
+ }
+ case 3: /* 24 bit */
+ {
+ count /= 3;
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
+ get_user(dsp56k_host_interface.data.b[1], buf+n++);
+ get_user(dsp56k_host_interface.data.b[2], buf+n++);
+ get_user(dsp56k_host_interface.data.b[3], buf+n++));
+ return 3*n;
+ }
+ case 4: /* 32 bit */
+ {
+ const long __user *data;
+
+ count /= 4;
+ data = (const long __user *)buf;
+ handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
+ get_user(dsp56k_host_interface.data.l, data+n++));
+ return 4*n;
+ }
+ }
+
+ return -EFAULT;
+ }
+ default:
+ printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev);
+ return -ENXIO;
+ }
+}
+
+static long dsp56k_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int dev = iminor(file->f_path.dentry->d_inode) & 0x0f;
+ void __user *argp = (void __user *)arg;
+
+ switch(dev)
+ {
+ case DSP56K_DEV_56001:
+
+ switch(cmd) {
+ case DSP56K_UPLOAD:
+ {
+ char __user *bin;
+ int r, len;
+ struct dsp56k_upload __user *binary = argp;
+
+ if(get_user(len, &binary->len) < 0)
+ return -EFAULT;
+ if(get_user(bin, &binary->bin) < 0)
+ return -EFAULT;
+
+ if (len == 0) {
+ return -EINVAL; /* nothing to upload?!? */
+ }
+ if (len > DSP56K_MAX_BINARY_LENGTH) {
+ return -EINVAL;
+ }
+ mutex_lock(&dsp56k_mutex);
+ r = dsp56k_upload(bin, len);
+ mutex_unlock(&dsp56k_mutex);
+ if (r < 0) {
+ return r;
+ }
+
+ break;
+ }
+ case DSP56K_SET_TX_WSIZE:
+ if (arg > 4 || arg < 1)
+ return -EINVAL;
+ mutex_lock(&dsp56k_mutex);
+ dsp56k.tx_wsize = (int) arg;
+ mutex_unlock(&dsp56k_mutex);
+ break;
+ case DSP56K_SET_RX_WSIZE:
+ if (arg > 4 || arg < 1)
+ return -EINVAL;
+ mutex_lock(&dsp56k_mutex);
+ dsp56k.rx_wsize = (int) arg;
+ mutex_unlock(&dsp56k_mutex);
+ break;
+ case DSP56K_HOST_FLAGS:
+ {
+ int dir, out, status;
+ struct dsp56k_host_flags __user *hf = argp;
+
+ if(get_user(dir, &hf->dir) < 0)
+ return -EFAULT;
+ if(get_user(out, &hf->out) < 0)
+ return -EFAULT;
+
+ mutex_lock(&dsp56k_mutex);
+ if ((dir & 0x1) && (out & 0x1))
+ dsp56k_host_interface.icr |= DSP56K_ICR_HF0;
+ else if (dir & 0x1)
+ dsp56k_host_interface.icr &= ~DSP56K_ICR_HF0;
+ if ((dir & 0x2) && (out & 0x2))
+ dsp56k_host_interface.icr |= DSP56K_ICR_HF1;
+ else if (dir & 0x2)
+ dsp56k_host_interface.icr &= ~DSP56K_ICR_HF1;
+
+ status = 0;
+ if (dsp56k_host_interface.icr & DSP56K_ICR_HF0) status |= 0x1;
+ if (dsp56k_host_interface.icr & DSP56K_ICR_HF1) status |= 0x2;
+ if (dsp56k_host_interface.isr & DSP56K_ISR_HF2) status |= 0x4;
+ if (dsp56k_host_interface.isr & DSP56K_ISR_HF3) status |= 0x8;
+ mutex_unlock(&dsp56k_mutex);
+ return put_user(status, &hf->status);
+ }
+ case DSP56K_HOST_CMD:
+ if (arg > 31 || arg < 0)
+ return -EINVAL;
+ mutex_lock(&dsp56k_mutex);
+ dsp56k_host_interface.cvr = (u_char)((arg & DSP56K_CVR_HV_MASK) |
+ DSP56K_CVR_HC);
+ mutex_unlock(&dsp56k_mutex);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+
+ default:
+ printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev);
+ return -ENXIO;
+ }
+}
+
+/* As of 2.1.26 this should be dsp56k_poll,
+ * but how do I then check device minor number?
+ * Do I need this function at all???
+ */
+#if 0
+static unsigned int dsp56k_poll(struct file *file, poll_table *wait)
+{
+ int dev = iminor(file->f_path.dentry->d_inode) & 0x0f;
+
+ switch(dev)
+ {
+ case DSP56K_DEV_56001:
+ /* poll_wait(file, ???, wait); */
+ return POLLIN | POLLRDNORM | POLLOUT;
+
+ default:
+ printk("DSP56k driver: Unknown minor device: %d\n", dev);
+ return 0;
+ }
+}
+#endif
+
+static int dsp56k_open(struct inode *inode, struct file *file)
+{
+ int dev = iminor(inode) & 0x0f;
+ int ret = 0;
+
+ mutex_lock(&dsp56k_mutex);
+ switch(dev)
+ {
+ case DSP56K_DEV_56001:
+
+ if (test_and_set_bit(0, &dsp56k.in_use)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ dsp56k.timeout = TIMEOUT;
+ dsp56k.maxio = MAXIO;
+ dsp56k.rx_wsize = dsp56k.tx_wsize = 4;
+
+ DSP56K_TX_INT_OFF;
+ DSP56K_RX_INT_OFF;
+
+ /* Zero host flags */
+ dsp56k_host_interface.icr &= ~DSP56K_ICR_HF0;
+ dsp56k_host_interface.icr &= ~DSP56K_ICR_HF1;
+
+ break;
+
+ default:
+ ret = -ENODEV;
+ }
+out:
+ mutex_unlock(&dsp56k_mutex);
+ return ret;
+}
+
+static int dsp56k_release(struct inode *inode, struct file *file)
+{
+ int dev = iminor(inode) & 0x0f;
+
+ switch(dev)
+ {
+ case DSP56K_DEV_56001:
+ clear_bit(0, &dsp56k.in_use);
+ break;
+ default:
+ printk(KERN_ERR "DSP56k driver: Unknown minor device: %d\n", dev);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static const struct file_operations dsp56k_fops = {
+ .owner = THIS_MODULE,
+ .read = dsp56k_read,
+ .write = dsp56k_write,
+ .unlocked_ioctl = dsp56k_ioctl,
+ .open = dsp56k_open,
+ .release = dsp56k_release,
+ .llseek = noop_llseek,
+};
+
+
+/****** Init and module functions ******/
+
+static char banner[] __initdata = KERN_INFO "DSP56k driver installed\n";
+
+static int __init dsp56k_init_driver(void)
+{
+ int err = 0;
+
+ if(!MACH_IS_ATARI || !ATARIHW_PRESENT(DSP56K)) {
+ printk("DSP56k driver: Hardware not present\n");
+ return -ENODEV;
+ }
+
+ if(register_chrdev(DSP56K_MAJOR, "dsp56k", &dsp56k_fops)) {
+ printk("DSP56k driver: Unable to register driver\n");
+ return -ENODEV;
+ }
+ dsp56k_class = class_create(THIS_MODULE, "dsp56k");
+ if (IS_ERR(dsp56k_class)) {
+ err = PTR_ERR(dsp56k_class);
+ goto out_chrdev;
+ }
+ device_create(dsp56k_class, NULL, MKDEV(DSP56K_MAJOR, 0), NULL,
+ "dsp56k");
+
+ printk(banner);
+ goto out;
+
+out_chrdev:
+ unregister_chrdev(DSP56K_MAJOR, "dsp56k");
+out:
+ return err;
+}
+module_init(dsp56k_init_driver);
+
+static void __exit dsp56k_cleanup_driver(void)
+{
+ device_destroy(dsp56k_class, MKDEV(DSP56K_MAJOR, 0));
+ class_destroy(dsp56k_class);
+ unregister_chrdev(DSP56K_MAJOR, "dsp56k");
+}
+module_exit(dsp56k_cleanup_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("dsp56k/bootstrap.bin");
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
new file mode 100644
index 00000000..85156dd0
--- /dev/null
+++ b/drivers/char/dtlk.c
@@ -0,0 +1,663 @@
+/* -*- linux-c -*-
+ * dtlk.c - DoubleTalk PC driver for Linux
+ *
+ * Original author: Chris Pallotta <chris@allmedia.com>
+ * Current maintainer: Jim Van Zandt <jrv@vanzandt.mv.com>
+ *
+ * 2000-03-18 Jim Van Zandt: Fix polling.
+ * Eliminate dtlk_timer_active flag and separate dtlk_stop_timer
+ * function. Don't restart timer in dtlk_timer_tick. Restart timer
+ * in dtlk_poll after every poll. dtlk_poll returns mask (duh).
+ * Eliminate unused function dtlk_write_byte. Misc. code cleanups.
+ */
+
+/* This driver is for the DoubleTalk PC, a speech synthesizer
+ manufactured by RC Systems (http://www.rcsys.com/). It was written
+ based on documentation in their User's Manual file and Developer's
+ Tools disk.
+
+ The DoubleTalk PC contains four voice synthesizers: text-to-speech
+ (TTS), linear predictive coding (LPC), PCM/ADPCM, and CVSD. It
+ also has a tone generator. Output data for LPC are written to the
+ LPC port, and output data for the other modes are written to the
+ TTS port.
+
+ Two kinds of data can be read from the DoubleTalk: status
+ information (in response to the "\001?" interrogation command) is
+ read from the TTS port, and index markers (which mark the progress
+ of the speech) are read from the LPC port. Not all models of the
+ DoubleTalk PC implement index markers. Both the TTS and LPC ports
+ can also display status flags.
+
+ The DoubleTalk PC generates no interrupts.
+
+ These characteristics are mapped into the Unix stream I/O model as
+ follows:
+
+ "write" sends bytes to the TTS port. It is the responsibility of
+ the user program to switch modes among TTS, PCM/ADPCM, and CVSD.
+ This driver was written for use with the text-to-speech
+ synthesizer. If LPC output is needed some day, other minor device
+ numbers can be used to select among output modes.
+
+ "read" gets index markers from the LPC port. If the device does
+ not implement index markers, the read will fail with error EINVAL.
+
+ Status information is available using the DTLK_INTERROGATE ioctl.
+
+ */
+
+#include <linux/module.h>
+
+#define KERNEL
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/errno.h> /* for -EBUSY */
+#include <linux/ioport.h> /* for request_region */
+#include <linux/delay.h> /* for loops_per_jiffy */
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */
+#include <asm/uaccess.h> /* for get_user, etc. */
+#include <linux/wait.h> /* for wait_queue */
+#include <linux/init.h> /* for __init, module_{init,exit} */
+#include <linux/poll.h> /* for POLLIN, etc. */
+#include <linux/dtlk.h> /* local header file for DoubleTalk values */
+
+#ifdef TRACING
+#define TRACE_TEXT(str) printk(str);
+#define TRACE_RET printk(")")
+#else /* !TRACING */
+#define TRACE_TEXT(str) ((void) 0)
+#define TRACE_RET ((void) 0)
+#endif /* TRACING */
+
+static DEFINE_MUTEX(dtlk_mutex);
+static void dtlk_timer_tick(unsigned long data);
+
+static int dtlk_major;
+static int dtlk_port_lpc;
+static int dtlk_port_tts;
+static int dtlk_busy;
+static int dtlk_has_indexing;
+static unsigned int dtlk_portlist[] =
+{0x25e, 0x29e, 0x2de, 0x31e, 0x35e, 0x39e, 0};
+static wait_queue_head_t dtlk_process_list;
+static DEFINE_TIMER(dtlk_timer, dtlk_timer_tick, 0, 0);
+
+/* prototypes for file_operations struct */
+static ssize_t dtlk_read(struct file *, char __user *,
+ size_t nbytes, loff_t * ppos);
+static ssize_t dtlk_write(struct file *, const char __user *,
+ size_t nbytes, loff_t * ppos);
+static unsigned int dtlk_poll(struct file *, poll_table *);
+static int dtlk_open(struct inode *, struct file *);
+static int dtlk_release(struct inode *, struct file *);
+static long dtlk_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+static const struct file_operations dtlk_fops =
+{
+ .owner = THIS_MODULE,
+ .read = dtlk_read,
+ .write = dtlk_write,
+ .poll = dtlk_poll,
+ .unlocked_ioctl = dtlk_ioctl,
+ .open = dtlk_open,
+ .release = dtlk_release,
+ .llseek = no_llseek,
+};
+
+/* local prototypes */
+static int dtlk_dev_probe(void);
+static struct dtlk_settings *dtlk_interrogate(void);
+static int dtlk_readable(void);
+static char dtlk_read_lpc(void);
+static char dtlk_read_tts(void);
+static int dtlk_writeable(void);
+static char dtlk_write_bytes(const char *buf, int n);
+static char dtlk_write_tts(char);
+/*
+ static void dtlk_handle_error(char, char, unsigned int);
+ */
+
+static ssize_t dtlk_read(struct file *file, char __user *buf,
+ size_t count, loff_t * ppos)
+{
+ unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ char ch;
+ int i = 0, retries;
+
+ TRACE_TEXT("(dtlk_read");
+ /* printk("DoubleTalk PC - dtlk_read()\n"); */
+
+ if (minor != DTLK_MINOR || !dtlk_has_indexing)
+ return -EINVAL;
+
+ for (retries = 0; retries < loops_per_jiffy; retries++) {
+ while (i < count && dtlk_readable()) {
+ ch = dtlk_read_lpc();
+ /* printk("dtlk_read() reads 0x%02x\n", ch); */
+ if (put_user(ch, buf++))
+ return -EFAULT;
+ i++;
+ }
+ if (i)
+ return i;
+ if (file->f_flags & O_NONBLOCK)
+ break;
+ msleep_interruptible(100);
+ }
+ if (retries == loops_per_jiffy)
+ printk(KERN_ERR "dtlk_read times out\n");
+ TRACE_RET;
+ return -EAGAIN;
+}
+
+static ssize_t dtlk_write(struct file *file, const char __user *buf,
+ size_t count, loff_t * ppos)
+{
+ int i = 0, retries = 0, ch;
+
+ TRACE_TEXT("(dtlk_write");
+#ifdef TRACING
+ printk(" \"");
+ {
+ int i, ch;
+ for (i = 0; i < count; i++) {
+ if (get_user(ch, buf + i))
+ return -EFAULT;
+ if (' ' <= ch && ch <= '~')
+ printk("%c", ch);
+ else
+ printk("\\%03o", ch);
+ }
+ printk("\"");
+ }
+#endif
+
+ if (iminor(file->f_path.dentry->d_inode) != DTLK_MINOR)
+ return -EINVAL;
+
+ while (1) {
+ while (i < count && !get_user(ch, buf) &&
+ (ch == DTLK_CLEAR || dtlk_writeable())) {
+ dtlk_write_tts(ch);
+ buf++;
+ i++;
+ if (i % 5 == 0)
+ /* We yield our time until scheduled
+ again. This reduces the transfer
+ rate to 500 bytes/sec, but that's
+ still enough to keep up with the
+ speech synthesizer. */
+ msleep_interruptible(1);
+ else {
+ /* the RDY bit goes zero 2-3 usec
+ after writing, and goes 1 again
+ 180-190 usec later. Here, we wait
+ up to 250 usec for the RDY bit to
+ go nonzero. */
+ for (retries = 0;
+ retries < loops_per_jiffy / (4000/HZ);
+ retries++)
+ if (inb_p(dtlk_port_tts) &
+ TTS_WRITABLE)
+ break;
+ }
+ retries = 0;
+ }
+ if (i == count)
+ return i;
+ if (file->f_flags & O_NONBLOCK)
+ break;
+
+ msleep_interruptible(1);
+
+ if (++retries > 10 * HZ) { /* wait no more than 10 sec
+ from last write */
+ printk("dtlk: write timeout. "
+ "inb_p(dtlk_port_tts) = 0x%02x\n",
+ inb_p(dtlk_port_tts));
+ TRACE_RET;
+ return -EBUSY;
+ }
+ }
+ TRACE_RET;
+ return -EAGAIN;
+}
+
+static unsigned int dtlk_poll(struct file *file, poll_table * wait)
+{
+ int mask = 0;
+ unsigned long expires;
+
+ TRACE_TEXT(" dtlk_poll");
+ /*
+ static long int j;
+ printk(".");
+ printk("<%ld>", jiffies-j);
+ j=jiffies;
+ */
+ poll_wait(file, &dtlk_process_list, wait);
+
+ if (dtlk_has_indexing && dtlk_readable()) {
+ del_timer(&dtlk_timer);
+ mask = POLLIN | POLLRDNORM;
+ }
+ if (dtlk_writeable()) {
+ del_timer(&dtlk_timer);
+ mask |= POLLOUT | POLLWRNORM;
+ }
+ /* there are no exception conditions */
+
+ /* There won't be any interrupts, so we set a timer instead. */
+ expires = jiffies + 3*HZ / 100;
+ mod_timer(&dtlk_timer, expires);
+
+ return mask;
+}
+
+static void dtlk_timer_tick(unsigned long data)
+{
+ TRACE_TEXT(" dtlk_timer_tick");
+ wake_up_interruptible(&dtlk_process_list);
+}
+
+static long dtlk_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ char __user *argp = (char __user *)arg;
+ struct dtlk_settings *sp;
+ char portval;
+ TRACE_TEXT(" dtlk_ioctl");
+
+ switch (cmd) {
+
+ case DTLK_INTERROGATE:
+ mutex_lock(&dtlk_mutex);
+ sp = dtlk_interrogate();
+ mutex_unlock(&dtlk_mutex);
+ if (copy_to_user(argp, sp, sizeof(struct dtlk_settings)))
+ return -EINVAL;
+ return 0;
+
+ case DTLK_STATUS:
+ portval = inb_p(dtlk_port_tts);
+ return put_user(portval, argp);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Note that nobody ever sets dtlk_busy... */
+static int dtlk_open(struct inode *inode, struct file *file)
+{
+ TRACE_TEXT("(dtlk_open");
+
+ nonseekable_open(inode, file);
+ switch (iminor(inode)) {
+ case DTLK_MINOR:
+ if (dtlk_busy)
+ return -EBUSY;
+ return nonseekable_open(inode, file);
+
+ default:
+ return -ENXIO;
+ }
+}
+
+static int dtlk_release(struct inode *inode, struct file *file)
+{
+ TRACE_TEXT("(dtlk_release");
+
+ switch (iminor(inode)) {
+ case DTLK_MINOR:
+ break;
+
+ default:
+ break;
+ }
+ TRACE_RET;
+
+ del_timer_sync(&dtlk_timer);
+
+ return 0;
+}
+
+static int __init dtlk_init(void)
+{
+ int err;
+
+ dtlk_port_lpc = 0;
+ dtlk_port_tts = 0;
+ dtlk_busy = 0;
+ dtlk_major = register_chrdev(0, "dtlk", &dtlk_fops);
+ if (dtlk_major < 0) {
+ printk(KERN_ERR "DoubleTalk PC - cannot register device\n");
+ return dtlk_major;
+ }
+ err = dtlk_dev_probe();
+ if (err) {
+ unregister_chrdev(dtlk_major, "dtlk");
+ return err;
+ }
+ printk(", MAJOR %d\n", dtlk_major);
+
+ init_waitqueue_head(&dtlk_process_list);
+
+ return 0;
+}
+
+static void __exit dtlk_cleanup (void)
+{
+ dtlk_write_bytes("goodbye", 8);
+ msleep_interruptible(500); /* nap 0.50 sec but
+ could be awakened
+ earlier by
+ signals... */
+
+ dtlk_write_tts(DTLK_CLEAR);
+ unregister_chrdev(dtlk_major, "dtlk");
+ release_region(dtlk_port_lpc, DTLK_IO_EXTENT);
+}
+
+module_init(dtlk_init);
+module_exit(dtlk_cleanup);
+
+/* ------------------------------------------------------------------------ */
+
+static int dtlk_readable(void)
+{
+#ifdef TRACING
+ printk(" dtlk_readable=%u@%u", inb_p(dtlk_port_lpc) != 0x7f, jiffies);
+#endif
+ return inb_p(dtlk_port_lpc) != 0x7f;
+}
+
+static int dtlk_writeable(void)
+{
+ /* TRACE_TEXT(" dtlk_writeable"); */
+#ifdef TRACINGMORE
+ printk(" dtlk_writeable=%u", (inb_p(dtlk_port_tts) & TTS_WRITABLE)!=0);
+#endif
+ return inb_p(dtlk_port_tts) & TTS_WRITABLE;
+}
+
+static int __init dtlk_dev_probe(void)
+{
+ unsigned int testval = 0;
+ int i = 0;
+ struct dtlk_settings *sp;
+
+ if (dtlk_port_lpc | dtlk_port_tts)
+ return -EBUSY;
+
+ for (i = 0; dtlk_portlist[i]; i++) {
+#if 0
+ printk("DoubleTalk PC - Port %03x = %04x\n",
+ dtlk_portlist[i], (testval = inw_p(dtlk_portlist[i])));
+#endif
+
+ if (!request_region(dtlk_portlist[i], DTLK_IO_EXTENT,
+ "dtlk"))
+ continue;
+ testval = inw_p(dtlk_portlist[i]);
+ if ((testval &= 0xfbff) == 0x107f) {
+ dtlk_port_lpc = dtlk_portlist[i];
+ dtlk_port_tts = dtlk_port_lpc + 1;
+
+ sp = dtlk_interrogate();
+ printk("DoubleTalk PC at %03x-%03x, "
+ "ROM version %s, serial number %u",
+ dtlk_portlist[i], dtlk_portlist[i] +
+ DTLK_IO_EXTENT - 1,
+ sp->rom_version, sp->serial_number);
+
+ /* put LPC port into known state, so
+ dtlk_readable() gives valid result */
+ outb_p(0xff, dtlk_port_lpc);
+
+ /* INIT string and index marker */
+ dtlk_write_bytes("\036\1@\0\0012I\r", 8);
+ /* posting an index takes 18 msec. Here, we
+ wait up to 100 msec to see whether it
+ appears. */
+ msleep_interruptible(100);
+ dtlk_has_indexing = dtlk_readable();
+#ifdef TRACING
+ printk(", indexing %d\n", dtlk_has_indexing);
+#endif
+#ifdef INSCOPE
+ {
+/* This macro records ten samples read from the LPC port, for later display */
+#define LOOK \
+for (i = 0; i < 10; i++) \
+ { \
+ buffer[b++] = inb_p(dtlk_port_lpc); \
+ __delay(loops_per_jiffy/(1000000/HZ)); \
+ }
+ char buffer[1000];
+ int b = 0, i, j;
+
+ LOOK
+ outb_p(0xff, dtlk_port_lpc);
+ buffer[b++] = 0;
+ LOOK
+ dtlk_write_bytes("\0012I\r", 4);
+ buffer[b++] = 0;
+ __delay(50 * loops_per_jiffy / (1000/HZ));
+ outb_p(0xff, dtlk_port_lpc);
+ buffer[b++] = 0;
+ LOOK
+
+ printk("\n");
+ for (j = 0; j < b; j++)
+ printk(" %02x", buffer[j]);
+ printk("\n");
+ }
+#endif /* INSCOPE */
+
+#ifdef OUTSCOPE
+ {
+/* This macro records ten samples read from the TTS port, for later display */
+#define LOOK \
+for (i = 0; i < 10; i++) \
+ { \
+ buffer[b++] = inb_p(dtlk_port_tts); \
+ __delay(loops_per_jiffy/(1000000/HZ)); /* 1 us */ \
+ }
+ char buffer[1000];
+ int b = 0, i, j;
+
+ mdelay(10); /* 10 ms */
+ LOOK
+ outb_p(0x03, dtlk_port_tts);
+ buffer[b++] = 0;
+ LOOK
+ LOOK
+
+ printk("\n");
+ for (j = 0; j < b; j++)
+ printk(" %02x", buffer[j]);
+ printk("\n");
+ }
+#endif /* OUTSCOPE */
+
+ dtlk_write_bytes("Double Talk found", 18);
+
+ return 0;
+ }
+ release_region(dtlk_portlist[i], DTLK_IO_EXTENT);
+ }
+
+ printk(KERN_INFO "DoubleTalk PC - not found\n");
+ return -ENODEV;
+}
+
+/*
+ static void dtlk_handle_error(char op, char rc, unsigned int minor)
+ {
+ printk(KERN_INFO"\nDoubleTalk PC - MINOR: %d, OPCODE: %d, ERROR: %d\n",
+ minor, op, rc);
+ return;
+ }
+ */
+
+/* interrogate the DoubleTalk PC and return its settings */
+static struct dtlk_settings *dtlk_interrogate(void)
+{
+ unsigned char *t;
+ static char buf[sizeof(struct dtlk_settings) + 1];
+ int total, i;
+ static struct dtlk_settings status;
+ TRACE_TEXT("(dtlk_interrogate");
+ dtlk_write_bytes("\030\001?", 3);
+ for (total = 0, i = 0; i < 50; i++) {
+ buf[total] = dtlk_read_tts();
+ if (total > 2 && buf[total] == 0x7f)
+ break;
+ if (total < sizeof(struct dtlk_settings))
+ total++;
+ }
+ /*
+ if (i==50) printk("interrogate() read overrun\n");
+ for (i=0; i<sizeof(buf); i++)
+ printk(" %02x", buf[i]);
+ printk("\n");
+ */
+ t = buf;
+ status.serial_number = t[0] + t[1] * 256; /* serial number is
+ little endian */
+ t += 2;
+
+ i = 0;
+ while (*t != '\r') {
+ status.rom_version[i] = *t;
+ if (i < sizeof(status.rom_version) - 1)
+ i++;
+ t++;
+ }
+ status.rom_version[i] = 0;
+ t++;
+
+ status.mode = *t++;
+ status.punc_level = *t++;
+ status.formant_freq = *t++;
+ status.pitch = *t++;
+ status.speed = *t++;
+ status.volume = *t++;
+ status.tone = *t++;
+ status.expression = *t++;
+ status.ext_dict_loaded = *t++;
+ status.ext_dict_status = *t++;
+ status.free_ram = *t++;
+ status.articulation = *t++;
+ status.reverb = *t++;
+ status.eob = *t++;
+ status.has_indexing = dtlk_has_indexing;
+ TRACE_RET;
+ return &status;
+}
+
+static char dtlk_read_tts(void)
+{
+ int portval, retries = 0;
+ char ch;
+ TRACE_TEXT("(dtlk_read_tts");
+
+ /* verify DT is ready, read char, wait for ACK */
+ do {
+ portval = inb_p(dtlk_port_tts);
+ } while ((portval & TTS_READABLE) == 0 &&
+ retries++ < DTLK_MAX_RETRIES);
+ if (retries > DTLK_MAX_RETRIES)
+ printk(KERN_ERR "dtlk_read_tts() timeout\n");
+
+ ch = inb_p(dtlk_port_tts); /* input from TTS port */
+ ch &= 0x7f;
+ outb_p(ch, dtlk_port_tts);
+
+ retries = 0;
+ do {
+ portval = inb_p(dtlk_port_tts);
+ } while ((portval & TTS_READABLE) != 0 &&
+ retries++ < DTLK_MAX_RETRIES);
+ if (retries > DTLK_MAX_RETRIES)
+ printk(KERN_ERR "dtlk_read_tts() timeout\n");
+
+ TRACE_RET;
+ return ch;
+}
+
+static char dtlk_read_lpc(void)
+{
+ int retries = 0;
+ char ch;
+ TRACE_TEXT("(dtlk_read_lpc");
+
+ /* no need to test -- this is only called when the port is readable */
+
+ ch = inb_p(dtlk_port_lpc); /* input from LPC port */
+
+ outb_p(0xff, dtlk_port_lpc);
+
+ /* acknowledging a read takes 3-4
+ usec. Here, we wait up to 20 usec
+ for the acknowledgement */
+ retries = (loops_per_jiffy * 20) / (1000000/HZ);
+ while (inb_p(dtlk_port_lpc) != 0x7f && --retries > 0);
+ if (retries == 0)
+ printk(KERN_ERR "dtlk_read_lpc() timeout\n");
+
+ TRACE_RET;
+ return ch;
+}
+
+/* write n bytes to tts port */
+static char dtlk_write_bytes(const char *buf, int n)
+{
+ char val = 0;
+ /* printk("dtlk_write_bytes(\"%-*s\", %d)\n", n, buf, n); */
+ TRACE_TEXT("(dtlk_write_bytes");
+ while (n-- > 0)
+ val = dtlk_write_tts(*buf++);
+ TRACE_RET;
+ return val;
+}
+
+static char dtlk_write_tts(char ch)
+{
+ int retries = 0;
+#ifdef TRACINGMORE
+ printk(" dtlk_write_tts(");
+ if (' ' <= ch && ch <= '~')
+ printk("'%c'", ch);
+ else
+ printk("0x%02x", ch);
+#endif
+ if (ch != DTLK_CLEAR) /* no flow control for CLEAR command */
+ while ((inb_p(dtlk_port_tts) & TTS_WRITABLE) == 0 &&
+ retries++ < DTLK_MAX_RETRIES) /* DT ready? */
+ ;
+ if (retries > DTLK_MAX_RETRIES)
+ printk(KERN_ERR "dtlk_write_tts() timeout\n");
+
+ outb_p(ch, dtlk_port_tts); /* output to TTS port */
+ /* the RDY bit goes zero 2-3 usec after writing, and goes
+ 1 again 180-190 usec later. Here, we wait up to 10
+ usec for the RDY bit to go zero. */
+ for (retries = 0; retries < loops_per_jiffy / (100000/HZ); retries++)
+ if ((inb_p(dtlk_port_tts) & TTS_WRITABLE) == 0)
+ break;
+
+#ifdef TRACINGMORE
+ printk(")\n");
+#endif
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
new file mode 100644
index 00000000..53c524e7
--- /dev/null
+++ b/drivers/char/efirtc.c
@@ -0,0 +1,412 @@
+/*
+ * EFI Time Services Driver for Linux
+ *
+ * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * Based on skeleton from the drivers/char/rtc.c driver by P. Gortmaker
+ *
+ * This code provides an architected & portable interface to the real time
+ * clock by using EFI instead of direct bit fiddling. The functionalities are
+ * quite different from the rtc.c driver. The only way to talk to the device
+ * is by using ioctl(). There is a /proc interface which provides the raw
+ * information.
+ *
+ * Please note that we have kept the API as close as possible to the
+ * legacy RTC. The standard /sbin/hwclock program should work normally
+ * when used to get/set the time.
+ *
+ * NOTES:
+ * - Locking is required for safe execution of EFI calls with regards
+ * to interrupts and SMP.
+ *
+ * TODO (December 1999):
+ * - provide the API to set/get the WakeUp Alarm (different from the
+ * rtc.c alarm).
+ * - SMP testing
+ * - Add module support
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/proc_fs.h>
+#include <linux/efi.h>
+#include <linux/uaccess.h>
+
+#include <asm/system.h>
+
+#define EFI_RTC_VERSION "0.4"
+
+#define EFI_ISDST (EFI_TIME_ADJUST_DAYLIGHT|EFI_TIME_IN_DAYLIGHT)
+/*
+ * EFI Epoch is 1/1/1998
+ */
+#define EFI_RTC_EPOCH 1998
+
+static DEFINE_SPINLOCK(efi_rtc_lock);
+
+static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+
+#define is_leap(year) \
+ ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
+
+static const unsigned short int __mon_yday[2][13] =
+{
+ /* Normal years. */
+ { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
+ /* Leap years. */
+ { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
+};
+
+/*
+ * returns day of the year [0-365]
+ */
+static inline int
+compute_yday(efi_time_t *eft)
+{
+ /* efi_time_t.month is in the [1-12] so, we need -1 */
+ return __mon_yday[is_leap(eft->year)][eft->month-1]+ eft->day -1;
+}
+/*
+ * returns day of the week [0-6] 0=Sunday
+ *
+ * Don't try to provide a year that's before 1998, please !
+ */
+static int
+compute_wday(efi_time_t *eft)
+{
+ int y;
+ int ndays = 0;
+
+ if ( eft->year < 1998 ) {
+ printk(KERN_ERR "efirtc: EFI year < 1998, invalid date\n");
+ return -1;
+ }
+
+ for(y=EFI_RTC_EPOCH; y < eft->year; y++ ) {
+ ndays += 365 + (is_leap(y) ? 1 : 0);
+ }
+ ndays += compute_yday(eft);
+
+ /*
+ * 4=1/1/1998 was a Thursday
+ */
+ return (ndays + 4) % 7;
+}
+
+static void
+convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft)
+{
+
+ eft->year = wtime->tm_year + 1900;
+ eft->month = wtime->tm_mon + 1;
+ eft->day = wtime->tm_mday;
+ eft->hour = wtime->tm_hour;
+ eft->minute = wtime->tm_min;
+ eft->second = wtime->tm_sec;
+ eft->nanosecond = 0;
+ eft->daylight = wtime->tm_isdst ? EFI_ISDST: 0;
+ eft->timezone = EFI_UNSPECIFIED_TIMEZONE;
+}
+
+static void
+convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime)
+{
+ memset(wtime, 0, sizeof(*wtime));
+ wtime->tm_sec = eft->second;
+ wtime->tm_min = eft->minute;
+ wtime->tm_hour = eft->hour;
+ wtime->tm_mday = eft->day;
+ wtime->tm_mon = eft->month - 1;
+ wtime->tm_year = eft->year - 1900;
+
+ /* day of the week [0-6], Sunday=0 */
+ wtime->tm_wday = compute_wday(eft);
+
+ /* day in the year [1-365]*/
+ wtime->tm_yday = compute_yday(eft);
+
+
+ switch (eft->daylight & EFI_ISDST) {
+ case EFI_ISDST:
+ wtime->tm_isdst = 1;
+ break;
+ case EFI_TIME_ADJUST_DAYLIGHT:
+ wtime->tm_isdst = 0;
+ break;
+ default:
+ wtime->tm_isdst = -1;
+ }
+}
+
+static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+
+ efi_status_t status;
+ unsigned long flags;
+ efi_time_t eft;
+ efi_time_cap_t cap;
+ struct rtc_time wtime;
+ struct rtc_wkalrm __user *ewp;
+ unsigned char enabled, pending;
+
+ switch (cmd) {
+ case RTC_UIE_ON:
+ case RTC_UIE_OFF:
+ case RTC_PIE_ON:
+ case RTC_PIE_OFF:
+ case RTC_AIE_ON:
+ case RTC_AIE_OFF:
+ case RTC_ALM_SET:
+ case RTC_ALM_READ:
+ case RTC_IRQP_READ:
+ case RTC_IRQP_SET:
+ case RTC_EPOCH_READ:
+ case RTC_EPOCH_SET:
+ return -EINVAL;
+
+ case RTC_RD_TIME:
+ spin_lock_irqsave(&efi_rtc_lock, flags);
+
+ status = efi.get_time(&eft, &cap);
+
+ spin_unlock_irqrestore(&efi_rtc_lock,flags);
+
+ if (status != EFI_SUCCESS) {
+ /* should never happen */
+ printk(KERN_ERR "efitime: can't read time\n");
+ return -EINVAL;
+ }
+
+ convert_from_efi_time(&eft, &wtime);
+
+ return copy_to_user((void __user *)arg, &wtime,
+ sizeof (struct rtc_time)) ? - EFAULT : 0;
+
+ case RTC_SET_TIME:
+
+ if (!capable(CAP_SYS_TIME)) return -EACCES;
+
+ if (copy_from_user(&wtime, (struct rtc_time __user *)arg,
+ sizeof(struct rtc_time)) )
+ return -EFAULT;
+
+ convert_to_efi_time(&wtime, &eft);
+
+ spin_lock_irqsave(&efi_rtc_lock, flags);
+
+ status = efi.set_time(&eft);
+
+ spin_unlock_irqrestore(&efi_rtc_lock,flags);
+
+ return status == EFI_SUCCESS ? 0 : -EINVAL;
+
+ case RTC_WKALM_SET:
+
+ if (!capable(CAP_SYS_TIME)) return -EACCES;
+
+ ewp = (struct rtc_wkalrm __user *)arg;
+
+ if ( get_user(enabled, &ewp->enabled)
+ || copy_from_user(&wtime, &ewp->time, sizeof(struct rtc_time)) )
+ return -EFAULT;
+
+ convert_to_efi_time(&wtime, &eft);
+
+ spin_lock_irqsave(&efi_rtc_lock, flags);
+ /*
+ * XXX Fixme:
+ * As of EFI 0.92 with the firmware I have on my
+ * machine this call does not seem to work quite
+ * right
+ */
+ status = efi.set_wakeup_time((efi_bool_t)enabled, &eft);
+
+ spin_unlock_irqrestore(&efi_rtc_lock,flags);
+
+ return status == EFI_SUCCESS ? 0 : -EINVAL;
+
+ case RTC_WKALM_RD:
+
+ spin_lock_irqsave(&efi_rtc_lock, flags);
+
+ status = efi.get_wakeup_time((efi_bool_t *)&enabled, (efi_bool_t *)&pending, &eft);
+
+ spin_unlock_irqrestore(&efi_rtc_lock,flags);
+
+ if (status != EFI_SUCCESS) return -EINVAL;
+
+ ewp = (struct rtc_wkalrm __user *)arg;
+
+ if ( put_user(enabled, &ewp->enabled)
+ || put_user(pending, &ewp->pending)) return -EFAULT;
+
+ convert_from_efi_time(&eft, &wtime);
+
+ return copy_to_user(&ewp->time, &wtime,
+ sizeof(struct rtc_time)) ? -EFAULT : 0;
+ }
+ return -ENOTTY;
+}
+
+/*
+ * We enforce only one user at a time here with the open/close.
+ * Also clear the previous interrupt data on an open, and clean
+ * up things on a close.
+ */
+
+static int efi_rtc_open(struct inode *inode, struct file *file)
+{
+ /*
+ * nothing special to do here
+ * We do accept multiple open files at the same time as we
+ * synchronize on the per call operation.
+ */
+ return 0;
+}
+
+static int efi_rtc_close(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/*
+ * The various file operations we support.
+ */
+
+static const struct file_operations efi_rtc_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = efi_rtc_ioctl,
+ .open = efi_rtc_open,
+ .release = efi_rtc_close,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice efi_rtc_dev= {
+ EFI_RTC_MINOR,
+ "efirtc",
+ &efi_rtc_fops
+};
+
+/*
+ * We export RAW EFI information to /proc/driver/efirtc
+ */
+static int
+efi_rtc_get_status(char *buf)
+{
+ efi_time_t eft, alm;
+ efi_time_cap_t cap;
+ char *p = buf;
+ efi_bool_t enabled, pending;
+ unsigned long flags;
+
+ memset(&eft, 0, sizeof(eft));
+ memset(&alm, 0, sizeof(alm));
+ memset(&cap, 0, sizeof(cap));
+
+ spin_lock_irqsave(&efi_rtc_lock, flags);
+
+ efi.get_time(&eft, &cap);
+ efi.get_wakeup_time(&enabled, &pending, &alm);
+
+ spin_unlock_irqrestore(&efi_rtc_lock,flags);
+
+ p += sprintf(p,
+ "Time : %u:%u:%u.%09u\n"
+ "Date : %u-%u-%u\n"
+ "Daylight : %u\n",
+ eft.hour, eft.minute, eft.second, eft.nanosecond,
+ eft.year, eft.month, eft.day,
+ eft.daylight);
+
+ if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
+ p += sprintf(p, "Timezone : unspecified\n");
+ else
+ /* XXX fixme: convert to string? */
+ p += sprintf(p, "Timezone : %u\n", eft.timezone);
+
+
+ p += sprintf(p,
+ "Alarm Time : %u:%u:%u.%09u\n"
+ "Alarm Date : %u-%u-%u\n"
+ "Alarm Daylight : %u\n"
+ "Enabled : %s\n"
+ "Pending : %s\n",
+ alm.hour, alm.minute, alm.second, alm.nanosecond,
+ alm.year, alm.month, alm.day,
+ alm.daylight,
+ enabled == 1 ? "yes" : "no",
+ pending == 1 ? "yes" : "no");
+
+ if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
+ p += sprintf(p, "Timezone : unspecified\n");
+ else
+ /* XXX fixme: convert to string? */
+ p += sprintf(p, "Timezone : %u\n", alm.timezone);
+
+ /*
+ * now prints the capabilities
+ */
+ p += sprintf(p,
+ "Resolution : %u\n"
+ "Accuracy : %u\n"
+ "SetstoZero : %u\n",
+ cap.resolution, cap.accuracy, cap.sets_to_zero);
+
+ return p - buf;
+}
+
+static int
+efi_rtc_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = efi_rtc_get_status(page);
+ if (len <= off+count) *eof = 1;
+ *start = page + off;
+ len -= off;
+ if (len>count) len = count;
+ if (len<0) len = 0;
+ return len;
+}
+
+static int __init
+efi_rtc_init(void)
+{
+ int ret;
+ struct proc_dir_entry *dir;
+
+ printk(KERN_INFO "EFI Time Services Driver v%s\n", EFI_RTC_VERSION);
+
+ ret = misc_register(&efi_rtc_dev);
+ if (ret) {
+ printk(KERN_ERR "efirtc: can't misc_register on minor=%d\n",
+ EFI_RTC_MINOR);
+ return ret;
+ }
+
+ dir = create_proc_read_entry ("driver/efirtc", 0, NULL,
+ efi_rtc_read_proc, NULL);
+ if (dir == NULL) {
+ printk(KERN_ERR "efirtc: can't create /proc/driver/efirtc.\n");
+ misc_deregister(&efi_rtc_dev);
+ return -1;
+ }
+ return 0;
+}
+
+static void __exit
+efi_rtc_exit(void)
+{
+ /* not yet used */
+}
+
+module_init(efi_rtc_init);
+module_exit(efi_rtc_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/fsl_otp.c b/drivers/char/fsl_otp.c
new file mode 100755
index 00000000..7021c42c
--- /dev/null
+++ b/drivers/char/fsl_otp.c
@@ -0,0 +1,261 @@
+/*
+ * Freescale On-Chip OTP driver
+ *
+ * Copyright (C) 2010-2012 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/fcntl.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+
+#include "fsl_otp.h"
+
+static DEFINE_MUTEX(otp_mutex);
+static struct kobject *otp_kobj;
+static struct attribute **attrs;
+static struct kobj_attribute *kattr;
+static struct attribute_group attr_group;
+static struct mxc_otp_platform_data *otp_data;
+static struct clk *otp_clk;
+
+static inline unsigned int get_reg_index(struct kobj_attribute *tmp)
+{
+ return tmp - kattr;
+}
+
+static int otp_wait_busy(u32 flags)
+{
+ int count;
+ u32 c;
+
+ for (count = 10000; count >= 0; count--) {
+ c = __raw_readl(REGS_OCOTP_BASE + HW_OCOTP_CTRL);
+ if (!(c & (BM_OCOTP_CTRL_BUSY | BM_OCOTP_CTRL_ERROR | flags)))
+ break;
+ cpu_relax();
+ }
+ if (count < 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+static ssize_t otp_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned int index = get_reg_index(attr);
+ u32 value = 0;
+
+ /* sanity check */
+ if (index >= otp_data->fuse_num)
+ return 0;
+
+ mutex_lock(&otp_mutex);
+
+ if (otp_read_prepare(otp_data)) {
+ mutex_unlock(&otp_mutex);
+ return 0;
+ }
+ value = __raw_readl(REGS_OCOTP_BASE + HW_OCOTP_CUSTn(index));
+ otp_read_post(otp_data);
+
+ mutex_unlock(&otp_mutex);
+ return sprintf(buf, "0x%x\n", value);
+}
+
+static int otp_write_bits(int addr, u32 data, u32 magic)
+{
+ u32 c; /* for control register */
+
+ /* init the control register */
+ c = __raw_readl(REGS_OCOTP_BASE + HW_OCOTP_CTRL);
+ c &= ~BM_OCOTP_CTRL_ADDR;
+ c |= BF(addr, OCOTP_CTRL_ADDR);
+ c |= BF(magic, OCOTP_CTRL_WR_UNLOCK);
+ __raw_writel(c, REGS_OCOTP_BASE + HW_OCOTP_CTRL);
+
+ /* init the data register */
+ __raw_writel(data, REGS_OCOTP_BASE + HW_OCOTP_DATA);
+ otp_wait_busy(0);
+
+ mdelay(2); /* Write Postamble */
+ return 0;
+}
+
+static ssize_t otp_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int index = get_reg_index(attr);
+ int value;
+
+ /* sanity check */
+ if (index >= otp_data->fuse_num)
+ return 0;
+
+ sscanf(buf, "0x%x", &value);
+
+ mutex_lock(&otp_mutex);
+ if (otp_write_prepare(otp_data)) {
+ mutex_unlock(&otp_mutex);
+ return 0;
+ }
+ otp_write_bits(index, value, 0x3e77);
+ otp_write_post(otp_data);
+ mutex_unlock(&otp_mutex);
+ return count;
+}
+
+static void free_otp_attr(void)
+{
+ kfree(attrs);
+ attrs = NULL;
+
+ kfree(kattr);
+ kattr = NULL;
+}
+
+static int __init alloc_otp_attr(struct mxc_otp_platform_data *pdata)
+{
+ int i;
+
+ otp_data = pdata; /* get private data */
+
+ /* The last one is NULL, which is used to detect the end */
+ attrs = kzalloc((otp_data->fuse_num + 1) * sizeof(attrs[0]),
+ GFP_KERNEL);
+ kattr = kzalloc(otp_data->fuse_num * sizeof(struct kobj_attribute),
+ GFP_KERNEL);
+
+ if (!attrs || !kattr)
+ goto error_out;
+
+ for (i = 0; i < otp_data->fuse_num; i++) {
+ sysfs_attr_init(&kattr[i].attr);
+ kattr[i].attr.name = pdata->fuse_name[i];
+ kattr[i].attr.mode = 0600;
+ kattr[i].show = otp_show;
+ kattr[i].store = otp_store;
+
+ attrs[i] = &kattr[i].attr;
+ }
+ memset(&attr_group, 0, sizeof(attr_group));
+ attr_group.attrs = attrs;
+ return 0;
+
+error_out:
+ free_otp_attr();
+ return -ENOMEM;
+}
+
+static int __devinit fsl_otp_probe(struct platform_device *pdev)
+{
+ int retval;
+ struct mxc_otp_platform_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL)
+ return -ENODEV;
+
+ /* Enable clock */
+ if (pdata->clock_name) {
+ otp_clk = clk_get(&pdev->dev, pdata->clock_name);
+ clk_enable(otp_clk);
+ }
+
+ retval = alloc_otp_attr(pdata);
+ if (retval)
+ return retval;
+
+ retval = map_ocotp_addr(pdev);
+ if (retval)
+ goto error;
+
+ otp_kobj = kobject_create_and_add("fsl_otp", NULL);
+ if (!otp_kobj) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ retval = sysfs_create_group(otp_kobj, &attr_group);
+ if (retval)
+ goto error;
+
+ mutex_init(&otp_mutex);
+ return 0;
+error:
+ kobject_put(otp_kobj);
+ otp_kobj = NULL;
+ free_otp_attr();
+ unmap_ocotp_addr();
+ return retval;
+}
+
+static int otp_remove(struct platform_device *pdev)
+{
+ struct mxc_otp_platform_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL)
+ return -ENODEV;
+
+ kobject_put(otp_kobj);
+ otp_kobj = NULL;
+
+ free_otp_attr();
+ unmap_ocotp_addr();
+
+ if (pdata->clock_name) {
+ clk_disable(otp_clk);
+ clk_put(otp_clk);
+ }
+
+ return 0;
+}
+
+static struct platform_driver ocotp_driver = {
+ .probe = fsl_otp_probe,
+ .remove = otp_remove,
+ .driver = {
+ .name = "imx-ocotp",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init fsl_otp_init(void)
+{
+ return platform_driver_register(&ocotp_driver);
+}
+
+static void __exit fsl_otp_exit(void)
+{
+ platform_driver_unregister(&ocotp_driver);
+}
+module_init(fsl_otp_init);
+module_exit(fsl_otp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huang Shijie <b32955@freescale.com>");
+MODULE_DESCRIPTION("Common driver for OTP controller");
diff --git a/drivers/char/fsl_otp.h b/drivers/char/fsl_otp.h
new file mode 100755
index 00000000..5f779e53
--- /dev/null
+++ b/drivers/char/fsl_otp.h
@@ -0,0 +1,369 @@
+/*
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __FREESCALE_OTP__
+#define __FREESCALE_OTP__
+
+#define log(a, ...) printk(KERN_INFO "[ %s : %.3d ] "a"\n", \
+ __func__, __LINE__, ## __VA_ARGS__)
+
+static u32 otp_voltage_saved;
+struct regulator *regu;
+
+static int otp_wait_busy(u32 flags);
+
+/* IMX23 and IMX28 share most of the defination ========================= */
+#if (defined(CONFIG_ARCH_MX23) || defined(CONFIG_ARCH_MX28))
+
+#include <linux/regulator/consumer.h>
+#include <mach/hardware.h>
+#include <mach/device.h>
+#include <mach/regs-ocotp.h>
+#include <mach/regs-power.h>
+
+#if defined(CONFIG_ARCH_MX23)
+#include <mach/mx23.h>
+#else
+#include <mach/mx28.h>
+#endif
+
+#define REGS_OCOTP_BASE (IO_ADDRESS(OCOTP_PHYS_ADDR))
+#define BF(value, field) (((value) << BP_##field) & BM_##field)
+
+static unsigned long otp_hclk_saved;
+
+/* open the bank for the imx23/imx28 */
+static int otp_read_prepare(struct mxc_otp_platform_data *otp_data)
+{
+ int r;
+
+ /* [1] set the HCLK */
+ /* [2] check BUSY and ERROR bit */
+ r = otp_wait_busy(0);
+ if (r < 0)
+ goto error;
+
+ /* [3] open bank */
+ __raw_writel(BM_OCOTP_CTRL_RD_BANK_OPEN,
+ REGS_OCOTP_BASE + HW_OCOTP_CTRL_SET);
+ udelay(10);
+
+ /* [4] wait for BUSY */
+ r = otp_wait_busy(0);
+ return 0;
+error:
+ return r;
+}
+
+static int otp_read_post(struct mxc_otp_platform_data *otp_data)
+{
+ /* [5] close bank */
+ __raw_writel(BM_OCOTP_CTRL_RD_BANK_OPEN,
+ REGS_OCOTP_BASE + HW_OCOTP_CTRL_CLR);
+ return 0;
+}
+
+static int otp_write_prepare(struct mxc_otp_platform_data *otp_data)
+{
+ struct clk *hclk;
+ int err = 0;
+
+ /* [1] HCLK to 24MHz. */
+ hclk = clk_get(NULL, otp_data->clock_name);
+ if (IS_ERR(hclk)) {
+ err = PTR_ERR(hclk);
+ goto out;
+ }
+ /*
+ WARNING ACHTUNG UWAGA
+
+ the code below changes HCLK clock rate to 24M. This is
+ required to write OTP bits (7.2.2 in STMP378x Target
+ Specification), and might affect LCD operation, for example.
+ Moreover, this hacky code changes VDDIO to 2.8V; and resto-
+ res it only on otp_close(). This may affect... anything.
+
+ You are warned now.
+ */
+ otp_hclk_saved = clk_get_rate(hclk);
+ clk_set_rate(hclk, 24000);
+
+ /* [2] The voltage is set to 2.8V */
+ if (otp_data->regulator_name) {
+ regu = regulator_get(NULL, otp_data->regulator_name);
+ otp_voltage_saved = regulator_get_voltage(regu);
+ regulator_set_voltage(regu, otp_data->min_volt, otp_data->max_volt);
+ }
+
+ /* [3] wait for BUSY and ERROR */
+ err = otp_wait_busy(BM_OCOTP_CTRL_RD_BANK_OPEN);
+out:
+ return err;
+}
+
+static int otp_write_post(struct mxc_otp_platform_data *otp_data)
+{
+ struct clk *hclk;
+
+ hclk = clk_get(NULL, otp_data->clock_name);
+
+ /* restore the clock and voltage */
+ clk_set_rate(hclk, otp_hclk_saved);
+ if (otp_data->regulator_name)
+ regulator_set_voltage(regu, otp_voltage_saved, otp_voltage_saved);
+ otp_wait_busy(0);
+
+ /* reset */
+ __raw_writel(BM_OCOTP_CTRL_RELOAD_SHADOWS,
+ REGS_OCOTP_BASE + HW_OCOTP_CTRL_SET);
+ otp_wait_busy(BM_OCOTP_CTRL_RELOAD_SHADOWS);
+ return 0;
+}
+
+static int __init map_ocotp_addr(struct platform_device *pdev)
+{
+ return 0;
+}
+static void unmap_ocotp_addr(void)
+{
+}
+
+#elif defined(CONFIG_ARCH_MX5) /* IMX5 below ============================= */
+
+#include <mach/hardware.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include "regs-ocotp-v2.h"
+
+static void *otp_base;
+#define REGS_OCOTP_BASE ((unsigned long)otp_base)
+#define HW_OCOTP_CUSTn(n) (0x00000030 + (n) * 0x10)
+#define BF(value, field) (((value) << BP_##field) & BM_##field)
+
+#define DEF_RELEX (15) /* > 10.5ns */
+
+static int set_otp_timing(struct mxc_otp_platform_data *otp_data)
+{
+ struct clk *apb_clk;
+ unsigned long clk_rate = 0;
+ unsigned long relex, sclk_count, rd_busy;
+ u32 timing = 0;
+
+ if (!otp_data->clock_name)
+ return -1;
+
+ /* [1] get the clock. It needs the AHB clock,though doc writes APB.*/
+ apb_clk = clk_get(NULL, otp_data->clock_name);
+ if (IS_ERR(apb_clk)) {
+ log("we can not find the clock");
+ return -1;
+ }
+ clk_rate = clk_get_rate(apb_clk);
+
+ /* do optimization for too many zeros */
+ relex = clk_rate / (1000000000 / DEF_RELEX) + 1;
+ sclk_count = clk_rate / (1000000000 / 5000) + 1 + DEF_RELEX;
+ rd_busy = clk_rate / (1000000000 / 300) + 1;
+
+ timing = BF(relex, OCOTP_TIMING_RELAX);
+ timing |= BF(sclk_count, OCOTP_TIMING_SCLK_COUNT);
+ timing |= BF(rd_busy, OCOTP_TIMING_RD_BUSY);
+
+ __raw_writel(timing, REGS_OCOTP_BASE + HW_OCOTP_TIMING);
+ return 0;
+}
+
+/* IMX5 does not need to open the bank anymore */
+static int otp_read_prepare(struct mxc_otp_platform_data *otp_data)
+{
+ return set_otp_timing(otp_data);
+}
+static int otp_read_post(struct mxc_otp_platform_data *otp_data)
+{
+ return 0;
+}
+
+static int otp_write_prepare(struct mxc_otp_platform_data *otp_data)
+{
+ int ret = 0;
+
+ /* [1] set timing */
+ ret = set_otp_timing(otp_data);
+ if (ret)
+ return ret;
+
+ /* [2] wait */
+ otp_wait_busy(0);
+ return 0;
+}
+static int otp_write_post(struct mxc_otp_platform_data *otp_data)
+{
+ /* Reload all the shadow registers */
+ __raw_writel(BM_OCOTP_CTRL_RELOAD_SHADOWS,
+ REGS_OCOTP_BASE + HW_OCOTP_CTRL_SET);
+ udelay(1);
+ otp_wait_busy(BM_OCOTP_CTRL_RELOAD_SHADOWS);
+ return 0;
+}
+
+static int __init map_ocotp_addr(struct platform_device *pdev)
+{
+ struct resource *res;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ otp_base = ioremap(res->start, SZ_8K);
+ if (!otp_base) {
+ log("Can not remap the OTP iomem!");
+ return -1;
+ }
+ return 0;
+}
+
+static void unmap_ocotp_addr(void)
+{
+ iounmap(otp_base);
+ otp_base = NULL;
+}
+
+#elif defined(CONFIG_ARCH_MX6) /* IMX6 below ============================= */
+
+#include <mach/hardware.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "regs-ocotp-v3.h"
+
+static void *otp_base;
+
+#define REGS_OCOTP_BASE ((unsigned long)otp_base)
+#define HW_OCOTP_CUSTn(n) (0x00000400 + (n) * 0x10)
+#define BF(value, field) (((value) << BP_##field) & BM_##field)
+
+#define DEF_RELEX (20) /* > 16.5ns */
+
+static int set_otp_timing(struct mxc_otp_platform_data *otp_data)
+{
+ struct clk *ocotp_clk;
+ unsigned long clk_rate = 0;
+ unsigned long strobe_read, relex, strobe_prog;
+ u32 timing = 0;
+
+ /* [1] get the clock. It needs the IPG clock,though doc writes IPG.*/
+ if (!otp_data->clock_name)
+ return -1;
+
+ ocotp_clk = clk_get(NULL, otp_data->clock_name);
+ if (IS_ERR(ocotp_clk)) {
+ log("we can not find the clock");
+ return -1;
+ }
+ clk_rate = clk_get_rate(ocotp_clk);
+
+ /* do optimization for too many zeros */
+ relex = clk_rate / (1000000000 / DEF_RELEX) - 1;
+ strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELEX + 1) - 1;
+ strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELEX + 1) - 1;
+
+ timing = BF(relex, OCOTP_TIMING_RELAX);
+ timing |= BF(strobe_read, OCOTP_TIMING_STROBE_READ);
+ timing |= BF(strobe_prog, OCOTP_TIMING_STROBE_PROG);
+
+ __raw_writel(timing, REGS_OCOTP_BASE + HW_OCOTP_TIMING);
+
+ return 0;
+}
+
+/* IMX5 does not need to open the bank anymore */
+static int otp_read_prepare(struct mxc_otp_platform_data *otp_data)
+{
+ int ret = 0;
+ /* [1] set the HCLK */
+ set_otp_timing(otp_data);
+
+ /* [2] check BUSY and ERROR bit */
+ ret = otp_wait_busy(0);
+
+ return ret;
+}
+static int otp_read_post(struct mxc_otp_platform_data *otp_data)
+{
+ return 0;
+}
+
+static int otp_write_prepare(struct mxc_otp_platform_data *otp_data)
+{
+ int ret = 0;
+
+ /* [1] set timing */
+ ret = set_otp_timing(otp_data);
+ if (ret)
+ return ret;
+
+ /* [2] wait */
+ ret = otp_wait_busy(0);
+ if (ret < 0)
+ return ret;
+
+ /* [3] The voltage is set to 2.8V, if needed */
+ if (otp_data->regulator_name) {
+ regu = regulator_get(NULL, otp_data->regulator_name);
+ otp_voltage_saved = regulator_get_voltage(regu);
+ regulator_set_voltage(regu, otp_data->min_volt, otp_data->max_volt);
+ /* [4] wait for BUSY and ERROR */
+ ret = otp_wait_busy(0);
+
+ }
+
+ return ret;
+}
+static int otp_write_post(struct mxc_otp_platform_data *otp_data)
+{
+ /* restore the clock and voltage */
+ if (otp_data->regulator_name) {
+ regulator_set_voltage(regu, otp_voltage_saved, otp_voltage_saved);
+ otp_wait_busy(0);
+ }
+
+ /* Reload all the shadow registers */
+ __raw_writel(BM_OCOTP_CTRL_RELOAD_SHADOWS,
+ REGS_OCOTP_BASE + HW_OCOTP_CTRL_SET);
+ udelay(1);
+ otp_wait_busy(BM_OCOTP_CTRL_RELOAD_SHADOWS);
+
+ return 0;
+}
+
+static int __init map_ocotp_addr(struct platform_device *pdev)
+{
+ struct resource *res;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ otp_base = ioremap(res->start, SZ_8K);
+ if (!otp_base) {
+ log("Can not remap the OTP iomem!");
+ return -1;
+ }
+ return 0;
+}
+
+static void unmap_ocotp_addr(void)
+{
+ iounmap(otp_base);
+ otp_base = NULL;
+}
+#endif /* CONFIG_ARCH_MX6 */
+
+#endif
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c
new file mode 100644
index 00000000..0e941b57
--- /dev/null
+++ b/drivers/char/generic_nvram.c
@@ -0,0 +1,170 @@
+/*
+ * Generic /dev/nvram driver for architectures providing some
+ * "generic" hooks, that is :
+ *
+ * nvram_read_byte, nvram_write_byte, nvram_sync, nvram_get_size
+ *
+ * Note that an additional hook is supported for PowerMac only
+ * for getting the nvram "partition" informations
+ *
+ */
+
+#define NVRAM_VERSION "1.1"
+
+#include <linux/module.h>
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <asm/uaccess.h>
+#include <asm/nvram.h>
+#ifdef CONFIG_PPC_PMAC
+#include <asm/machdep.h>
+#endif
+
+#define NVRAM_SIZE 8192
+
+static DEFINE_MUTEX(nvram_mutex);
+static ssize_t nvram_len;
+
+static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
+{
+ switch (origin) {
+ case 1:
+ offset += file->f_pos;
+ break;
+ case 2:
+ offset += nvram_len;
+ break;
+ }
+ if (offset < 0)
+ return -EINVAL;
+
+ file->f_pos = offset;
+
+ return file->f_pos;
+}
+
+static ssize_t read_nvram(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int i;
+ char __user *p = buf;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+ if (*ppos >= nvram_len)
+ return 0;
+ for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count)
+ if (__put_user(nvram_read_byte(i), p))
+ return -EFAULT;
+ *ppos = i;
+ return p - buf;
+}
+
+static ssize_t write_nvram(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int i;
+ const char __user *p = buf;
+ char c;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+ if (*ppos >= nvram_len)
+ return 0;
+ for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count) {
+ if (__get_user(c, p))
+ return -EFAULT;
+ nvram_write_byte(c, i);
+ }
+ *ppos = i;
+ return p - buf;
+}
+
+static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch(cmd) {
+#ifdef CONFIG_PPC_PMAC
+ case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
+ printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
+ case IOC_NVRAM_GET_OFFSET: {
+ int part, offset;
+
+ if (!machine_is(powermac))
+ return -EINVAL;
+ if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
+ return -EFAULT;
+ if (part < pmac_nvram_OF || part > pmac_nvram_NR)
+ return -EINVAL;
+ offset = pmac_get_partition(part);
+ if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0)
+ return -EFAULT;
+ break;
+ }
+#endif /* CONFIG_PPC_PMAC */
+ case IOC_NVRAM_SYNC:
+ nvram_sync();
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&nvram_mutex);
+ ret = nvram_ioctl(file, cmd, arg);
+ mutex_unlock(&nvram_mutex);
+
+ return ret;
+}
+
+const struct file_operations nvram_fops = {
+ .owner = THIS_MODULE,
+ .llseek = nvram_llseek,
+ .read = read_nvram,
+ .write = write_nvram,
+ .unlocked_ioctl = nvram_unlocked_ioctl,
+};
+
+static struct miscdevice nvram_dev = {
+ NVRAM_MINOR,
+ "nvram",
+ &nvram_fops
+};
+
+int __init nvram_init(void)
+{
+ int ret = 0;
+
+ printk(KERN_INFO "Generic non-volatile memory driver v%s\n",
+ NVRAM_VERSION);
+ ret = misc_register(&nvram_dev);
+ if (ret != 0)
+ goto out;
+
+ nvram_len = nvram_get_size();
+ if (nvram_len < 0)
+ nvram_len = NVRAM_SIZE;
+
+out:
+ return ret;
+}
+
+void __exit nvram_cleanup(void)
+{
+ misc_deregister( &nvram_dev );
+}
+
+module_init(nvram_init);
+module_exit(nvram_cleanup);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
new file mode 100644
index 00000000..f773a9dd
--- /dev/null
+++ b/drivers/char/genrtc.c
@@ -0,0 +1,542 @@
+/*
+ * Real Time Clock interface for
+ * - q40 and other m68k machines,
+ * - HP PARISC machines
+ * - PowerPC machines
+ * emulate some RTC irq capabilities in software
+ *
+ * Copyright (C) 1999 Richard Zidlicky
+ *
+ * based on Paul Gortmaker's rtc.c device and
+ * Sam Creasey Generic rtc driver
+ *
+ * This driver allows use of the real time clock (built into
+ * nearly all computers) from user space. It exports the /dev/rtc
+ * interface supporting various ioctl() and also the /proc/driver/rtc
+ * pseudo-file for status information.
+ *
+ * The ioctls can be used to set the interrupt behaviour where
+ * supported.
+ *
+ * The /dev/rtc interface will block on reads until an interrupt
+ * has been received. If a RTC interrupt has already happened,
+ * it will output an unsigned long and then block. The output value
+ * contains the interrupt status in the low byte and the number of
+ * interrupts since the last read in the remaining high bytes. The
+ * /dev/rtc interface can also be used with the select(2) call.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+
+ * 1.01 fix for 2.3.X rz@linux-m68k.org
+ * 1.02 merged with code from genrtc.c rz@linux-m68k.org
+ * 1.03 make it more portable zippel@linux-m68k.org
+ * 1.04 removed useless timer code rz@linux-m68k.org
+ * 1.05 portable RTC_UIE emulation rz@linux-m68k.org
+ * 1.06 set_rtc_time can return an error trini@kernel.crashing.org
+ * 1.07 ported to HP PARISC (hppa) Helge Deller <deller@gmx.de>
+ */
+
+#define RTC_VERSION "1.07"
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/fcntl.h>
+
+#include <linux/rtc.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/rtc.h>
+
+/*
+ * We sponge a minor off of the misc major. No need slurping
+ * up another valuable major dev number for this. If you add
+ * an ioctl, make sure you don't conflict with SPARC's RTC
+ * ioctls.
+ */
+
+static DEFINE_MUTEX(gen_rtc_mutex);
+static DECLARE_WAIT_QUEUE_HEAD(gen_rtc_wait);
+
+/*
+ * Bits in gen_rtc_status.
+ */
+
+#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
+
+static unsigned char gen_rtc_status; /* bitmapped status byte. */
+static unsigned long gen_rtc_irq_data; /* our output to the world */
+
+/* months start at 0 now */
+static unsigned char days_in_mo[] =
+{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+
+static int irq_active;
+
+#ifdef CONFIG_GEN_RTC_X
+static struct work_struct genrtc_task;
+static struct timer_list timer_task;
+
+static unsigned int oldsecs;
+static int lostint;
+static unsigned long tt_exp;
+
+static void gen_rtc_timer(unsigned long data);
+
+static volatile int stask_active; /* schedule_work */
+static volatile int ttask_active; /* timer_task */
+static int stop_rtc_timers; /* don't requeue tasks */
+static DEFINE_SPINLOCK(gen_rtc_lock);
+
+static void gen_rtc_interrupt(unsigned long arg);
+
+/*
+ * Routine to poll RTC seconds field for change as often as possible,
+ * after first RTC_UIE use timer to reduce polling
+ */
+static void genrtc_troutine(struct work_struct *work)
+{
+ unsigned int tmp = get_rtc_ss();
+
+ if (stop_rtc_timers) {
+ stask_active = 0;
+ return;
+ }
+
+ if (oldsecs != tmp){
+ oldsecs = tmp;
+
+ timer_task.function = gen_rtc_timer;
+ timer_task.expires = jiffies + HZ - (HZ/10);
+ tt_exp=timer_task.expires;
+ ttask_active=1;
+ stask_active=0;
+ add_timer(&timer_task);
+
+ gen_rtc_interrupt(0);
+ } else if (schedule_work(&genrtc_task) == 0)
+ stask_active = 0;
+}
+
+static void gen_rtc_timer(unsigned long data)
+{
+ lostint = get_rtc_ss() - oldsecs ;
+ if (lostint<0)
+ lostint = 60 - lostint;
+ if (time_after(jiffies, tt_exp))
+ printk(KERN_INFO "genrtc: timer task delayed by %ld jiffies\n",
+ jiffies-tt_exp);
+ ttask_active=0;
+ stask_active=1;
+ if ((schedule_work(&genrtc_task) == 0))
+ stask_active = 0;
+}
+
+/*
+ * call gen_rtc_interrupt function to signal an RTC_UIE,
+ * arg is unused.
+ * Could be invoked either from a real interrupt handler or
+ * from some routine that periodically (eg 100HZ) monitors
+ * whether RTC_SECS changed
+ */
+static void gen_rtc_interrupt(unsigned long arg)
+{
+ /* We store the status in the low byte and the number of
+ * interrupts received since the last read in the remainder
+ * of rtc_irq_data. */
+
+ gen_rtc_irq_data += 0x100;
+ gen_rtc_irq_data &= ~0xff;
+ gen_rtc_irq_data |= RTC_UIE;
+
+ if (lostint){
+ printk("genrtc: system delaying clock ticks?\n");
+ /* increment count so that userspace knows something is wrong */
+ gen_rtc_irq_data += ((lostint-1)<<8);
+ lostint = 0;
+ }
+
+ wake_up_interruptible(&gen_rtc_wait);
+}
+
+/*
+ * Now all the various file operations that we export.
+ */
+static ssize_t gen_rtc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long data;
+ ssize_t retval;
+
+ if (count != sizeof (unsigned int) && count != sizeof (unsigned long))
+ return -EINVAL;
+
+ if (file->f_flags & O_NONBLOCK && !gen_rtc_irq_data)
+ return -EAGAIN;
+
+ retval = wait_event_interruptible(gen_rtc_wait,
+ (data = xchg(&gen_rtc_irq_data, 0)));
+ if (retval)
+ goto out;
+
+ /* first test allows optimizer to nuke this case for 32-bit machines */
+ if (sizeof (int) != sizeof (long) && count == sizeof (unsigned int)) {
+ unsigned int uidata = data;
+ retval = put_user(uidata, (unsigned int __user *)buf) ?:
+ sizeof(unsigned int);
+ }
+ else {
+ retval = put_user(data, (unsigned long __user *)buf) ?:
+ sizeof(unsigned long);
+ }
+out:
+ return retval;
+}
+
+static unsigned int gen_rtc_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ poll_wait(file, &gen_rtc_wait, wait);
+ if (gen_rtc_irq_data != 0)
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+
+#endif
+
+/*
+ * Used to disable/enable interrupts, only RTC_UIE supported
+ * We also clear out any old irq data after an ioctl() that
+ * meddles with the interrupt enable/disable bits.
+ */
+
+static inline void gen_clear_rtc_irq_bit(unsigned char bit)
+{
+#ifdef CONFIG_GEN_RTC_X
+ stop_rtc_timers = 1;
+ if (ttask_active){
+ del_timer_sync(&timer_task);
+ ttask_active = 0;
+ }
+ while (stask_active)
+ schedule();
+
+ spin_lock(&gen_rtc_lock);
+ irq_active = 0;
+ spin_unlock(&gen_rtc_lock);
+#endif
+}
+
+static inline int gen_set_rtc_irq_bit(unsigned char bit)
+{
+#ifdef CONFIG_GEN_RTC_X
+ spin_lock(&gen_rtc_lock);
+ if ( !irq_active ) {
+ irq_active = 1;
+ stop_rtc_timers = 0;
+ lostint = 0;
+ INIT_WORK(&genrtc_task, genrtc_troutine);
+ oldsecs = get_rtc_ss();
+ init_timer(&timer_task);
+
+ stask_active = 1;
+ if (schedule_work(&genrtc_task) == 0){
+ stask_active = 0;
+ }
+ }
+ spin_unlock(&gen_rtc_lock);
+ gen_rtc_irq_data = 0;
+ return 0;
+#else
+ return -EINVAL;
+#endif
+}
+
+static int gen_rtc_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct rtc_time wtime;
+ struct rtc_pll_info pll;
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+
+ case RTC_PLL_GET:
+ if (get_rtc_pll(&pll))
+ return -EINVAL;
+ else
+ return copy_to_user(argp, &pll, sizeof pll) ? -EFAULT : 0;
+
+ case RTC_PLL_SET:
+ if (!capable(CAP_SYS_TIME))
+ return -EACCES;
+ if (copy_from_user(&pll, argp, sizeof(pll)))
+ return -EFAULT;
+ return set_rtc_pll(&pll);
+
+ case RTC_UIE_OFF: /* disable ints from RTC updates. */
+ gen_clear_rtc_irq_bit(RTC_UIE);
+ return 0;
+
+ case RTC_UIE_ON: /* enable ints for RTC updates. */
+ return gen_set_rtc_irq_bit(RTC_UIE);
+
+ case RTC_RD_TIME: /* Read the time/date from RTC */
+ /* this doesn't get week-day, who cares */
+ memset(&wtime, 0, sizeof(wtime));
+ get_rtc_time(&wtime);
+
+ return copy_to_user(argp, &wtime, sizeof(wtime)) ? -EFAULT : 0;
+
+ case RTC_SET_TIME: /* Set the RTC */
+ {
+ int year;
+ unsigned char leap_yr;
+
+ if (!capable(CAP_SYS_TIME))
+ return -EACCES;
+
+ if (copy_from_user(&wtime, argp, sizeof(wtime)))
+ return -EFAULT;
+
+ year = wtime.tm_year + 1900;
+ leap_yr = ((!(year % 4) && (year % 100)) ||
+ !(year % 400));
+
+ if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1))
+ return -EINVAL;
+
+ if (wtime.tm_mday < 0 || wtime.tm_mday >
+ (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr)))
+ return -EINVAL;
+
+ if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 ||
+ wtime.tm_min < 0 || wtime.tm_min >= 60 ||
+ wtime.tm_sec < 0 || wtime.tm_sec >= 60)
+ return -EINVAL;
+
+ return set_rtc_time(&wtime);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static long gen_rtc_unlocked_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&gen_rtc_mutex);
+ ret = gen_rtc_ioctl(file, cmd, arg);
+ mutex_unlock(&gen_rtc_mutex);
+
+ return ret;
+}
+
+/*
+ * We enforce only one user at a time here with the open/close.
+ * Also clear the previous interrupt data on an open, and clean
+ * up things on a close.
+ */
+
+static int gen_rtc_open(struct inode *inode, struct file *file)
+{
+ mutex_lock(&gen_rtc_mutex);
+ if (gen_rtc_status & RTC_IS_OPEN) {
+ mutex_unlock(&gen_rtc_mutex);
+ return -EBUSY;
+ }
+
+ gen_rtc_status |= RTC_IS_OPEN;
+ gen_rtc_irq_data = 0;
+ irq_active = 0;
+ mutex_unlock(&gen_rtc_mutex);
+
+ return 0;
+}
+
+static int gen_rtc_release(struct inode *inode, struct file *file)
+{
+ /*
+ * Turn off all interrupts once the device is no longer
+ * in use and clear the data.
+ */
+
+ gen_clear_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE);
+
+ gen_rtc_status &= ~RTC_IS_OPEN;
+ return 0;
+}
+
+
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Info exported via "/proc/driver/rtc".
+ */
+
+static int gen_rtc_proc_output(char *buf)
+{
+ char *p;
+ struct rtc_time tm;
+ unsigned int flags;
+ struct rtc_pll_info pll;
+
+ p = buf;
+
+ flags = get_rtc_time(&tm);
+
+ p += sprintf(p,
+ "rtc_time\t: %02d:%02d:%02d\n"
+ "rtc_date\t: %04d-%02d-%02d\n"
+ "rtc_epoch\t: %04u\n",
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 1900);
+
+ tm.tm_hour = tm.tm_min = tm.tm_sec = 0;
+
+ p += sprintf(p, "alarm\t\t: ");
+ if (tm.tm_hour <= 24)
+ p += sprintf(p, "%02d:", tm.tm_hour);
+ else
+ p += sprintf(p, "**:");
+
+ if (tm.tm_min <= 59)
+ p += sprintf(p, "%02d:", tm.tm_min);
+ else
+ p += sprintf(p, "**:");
+
+ if (tm.tm_sec <= 59)
+ p += sprintf(p, "%02d\n", tm.tm_sec);
+ else
+ p += sprintf(p, "**\n");
+
+ p += sprintf(p,
+ "DST_enable\t: %s\n"
+ "BCD\t\t: %s\n"
+ "24hr\t\t: %s\n"
+ "square_wave\t: %s\n"
+ "alarm_IRQ\t: %s\n"
+ "update_IRQ\t: %s\n"
+ "periodic_IRQ\t: %s\n"
+ "periodic_freq\t: %ld\n"
+ "batt_status\t: %s\n",
+ (flags & RTC_DST_EN) ? "yes" : "no",
+ (flags & RTC_DM_BINARY) ? "no" : "yes",
+ (flags & RTC_24H) ? "yes" : "no",
+ (flags & RTC_SQWE) ? "yes" : "no",
+ (flags & RTC_AIE) ? "yes" : "no",
+ irq_active ? "yes" : "no",
+ (flags & RTC_PIE) ? "yes" : "no",
+ 0L /* freq */,
+ (flags & RTC_BATT_BAD) ? "bad" : "okay");
+ if (!get_rtc_pll(&pll))
+ p += sprintf(p,
+ "PLL adjustment\t: %d\n"
+ "PLL max +ve adjustment\t: %d\n"
+ "PLL max -ve adjustment\t: %d\n"
+ "PLL +ve adjustment factor\t: %d\n"
+ "PLL -ve adjustment factor\t: %d\n"
+ "PLL frequency\t: %ld\n",
+ pll.pll_value,
+ pll.pll_max,
+ pll.pll_min,
+ pll.pll_posmult,
+ pll.pll_negmult,
+ pll.pll_clock);
+ return p - buf;
+}
+
+static int gen_rtc_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = gen_rtc_proc_output (page);
+ if (len <= off+count) *eof = 1;
+ *start = page + off;
+ len -= off;
+ if (len>count) len = count;
+ if (len<0) len = 0;
+ return len;
+}
+
+static int __init gen_rtc_proc_init(void)
+{
+ struct proc_dir_entry *r;
+
+ r = create_proc_read_entry("driver/rtc", 0, NULL, gen_rtc_read_proc, NULL);
+ if (!r)
+ return -ENOMEM;
+ return 0;
+}
+#else
+static inline int gen_rtc_proc_init(void) { return 0; }
+#endif /* CONFIG_PROC_FS */
+
+
+/*
+ * The various file operations we support.
+ */
+
+static const struct file_operations gen_rtc_fops = {
+ .owner = THIS_MODULE,
+#ifdef CONFIG_GEN_RTC_X
+ .read = gen_rtc_read,
+ .poll = gen_rtc_poll,
+#endif
+ .unlocked_ioctl = gen_rtc_unlocked_ioctl,
+ .open = gen_rtc_open,
+ .release = gen_rtc_release,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice rtc_gen_dev =
+{
+ .minor = RTC_MINOR,
+ .name = "rtc",
+ .fops = &gen_rtc_fops,
+};
+
+static int __init rtc_generic_init(void)
+{
+ int retval;
+
+ printk(KERN_INFO "Generic RTC Driver v%s\n", RTC_VERSION);
+
+ retval = misc_register(&rtc_gen_dev);
+ if (retval < 0)
+ return retval;
+
+ retval = gen_rtc_proc_init();
+ if (retval) {
+ misc_deregister(&rtc_gen_dev);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void __exit rtc_generic_exit(void)
+{
+ remove_proc_entry ("driver/rtc", NULL);
+ misc_deregister(&rtc_gen_dev);
+}
+
+
+module_init(rtc_generic_init);
+module_exit(rtc_generic_exit);
+
+MODULE_AUTHOR("Richard Zidlicky");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(RTC_MINOR);
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
new file mode 100644
index 00000000..f953c96e
--- /dev/null
+++ b/drivers/char/hangcheck-timer.c
@@ -0,0 +1,211 @@
+/*
+ * hangcheck-timer.c
+ *
+ * Driver for a little io fencing timer.
+ *
+ * Copyright (C) 2002, 2003 Oracle. All rights reserved.
+ *
+ * Author: Joel Becker <joel.becker@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+/*
+ * The hangcheck-timer driver uses the TSC to catch delays that
+ * jiffies does not notice. A timer is set. When the timer fires, it
+ * checks whether it was delayed and if that delay exceeds a given
+ * margin of error. The hangcheck_tick module parameter takes the timer
+ * duration in seconds. The hangcheck_margin parameter defines the
+ * margin of error, in seconds. The defaults are 60 seconds for the
+ * timer and 180 seconds for the margin of error. IOW, a timer is set
+ * for 60 seconds. When the timer fires, the callback checks the
+ * actual duration that the timer waited. If the duration exceeds the
+ * alloted time and margin (here 60 + 180, or 240 seconds), the machine
+ * is restarted. A healthy machine will have the duration match the
+ * expected timeout very closely.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <asm/uaccess.h>
+#include <linux/sysrq.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+
+#define VERSION_STR "0.9.1"
+
+#define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */
+#define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */
+
+static int hangcheck_tick = DEFAULT_IOFENCE_TICK;
+static int hangcheck_margin = DEFAULT_IOFENCE_MARGIN;
+static int hangcheck_reboot; /* Defaults to not reboot */
+static int hangcheck_dump_tasks; /* Defaults to not dumping SysRQ T */
+
+/* options - modular */
+module_param(hangcheck_tick, int, 0);
+MODULE_PARM_DESC(hangcheck_tick, "Timer delay.");
+module_param(hangcheck_margin, int, 0);
+MODULE_PARM_DESC(hangcheck_margin, "If the hangcheck timer has been delayed more than hangcheck_margin seconds, the driver will fire.");
+module_param(hangcheck_reboot, int, 0);
+MODULE_PARM_DESC(hangcheck_reboot, "If nonzero, the machine will reboot when the timer margin is exceeded.");
+module_param(hangcheck_dump_tasks, int, 0);
+MODULE_PARM_DESC(hangcheck_dump_tasks, "If nonzero, the machine will dump the system task state when the timer margin is exceeded.");
+
+MODULE_AUTHOR("Oracle");
+MODULE_DESCRIPTION("Hangcheck-timer detects when the system has gone out to lunch past a certain margin.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VERSION_STR);
+
+/* options - nonmodular */
+#ifndef MODULE
+
+static int __init hangcheck_parse_tick(char *str)
+{
+ int par;
+ if (get_option(&str,&par))
+ hangcheck_tick = par;
+ return 1;
+}
+
+static int __init hangcheck_parse_margin(char *str)
+{
+ int par;
+ if (get_option(&str,&par))
+ hangcheck_margin = par;
+ return 1;
+}
+
+static int __init hangcheck_parse_reboot(char *str)
+{
+ int par;
+ if (get_option(&str,&par))
+ hangcheck_reboot = par;
+ return 1;
+}
+
+static int __init hangcheck_parse_dump_tasks(char *str)
+{
+ int par;
+ if (get_option(&str,&par))
+ hangcheck_dump_tasks = par;
+ return 1;
+}
+
+__setup("hcheck_tick", hangcheck_parse_tick);
+__setup("hcheck_margin", hangcheck_parse_margin);
+__setup("hcheck_reboot", hangcheck_parse_reboot);
+__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
+#endif /* not MODULE */
+
+#if defined(CONFIG_S390)
+# define HAVE_MONOTONIC
+# define TIMER_FREQ 1000000000ULL
+#else
+# define TIMER_FREQ 1000000000ULL
+#endif
+
+#ifdef HAVE_MONOTONIC
+extern unsigned long long monotonic_clock(void);
+#else
+static inline unsigned long long monotonic_clock(void)
+{
+ struct timespec ts;
+ getrawmonotonic(&ts);
+ return timespec_to_ns(&ts);
+}
+#endif /* HAVE_MONOTONIC */
+
+
+/* Last time scheduled */
+static unsigned long long hangcheck_tsc, hangcheck_tsc_margin;
+
+static void hangcheck_fire(unsigned long);
+
+static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire, 0, 0);
+
+
+static void hangcheck_fire(unsigned long data)
+{
+ unsigned long long cur_tsc, tsc_diff;
+
+ cur_tsc = monotonic_clock();
+
+ if (cur_tsc > hangcheck_tsc)
+ tsc_diff = cur_tsc - hangcheck_tsc;
+ else
+ tsc_diff = (cur_tsc + (~0ULL - hangcheck_tsc)); /* or something */
+
+ if (tsc_diff > hangcheck_tsc_margin) {
+ if (hangcheck_dump_tasks) {
+ printk(KERN_CRIT "Hangcheck: Task state:\n");
+#ifdef CONFIG_MAGIC_SYSRQ
+ handle_sysrq('t');
+#endif /* CONFIG_MAGIC_SYSRQ */
+ }
+ if (hangcheck_reboot) {
+ printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n");
+ emergency_restart();
+ } else {
+ printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
+ }
+ }
+#if 0
+ /*
+ * Enable to investigate delays in detail
+ */
+ printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
+ tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
+#endif
+ mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
+ hangcheck_tsc = monotonic_clock();
+}
+
+
+static int __init hangcheck_init(void)
+{
+ printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
+ VERSION_STR, hangcheck_tick, hangcheck_margin);
+#if defined (HAVE_MONOTONIC)
+ printk("Hangcheck: Using monotonic_clock().\n");
+#else
+ printk("Hangcheck: Using getrawmonotonic().\n");
+#endif /* HAVE_MONOTONIC */
+ hangcheck_tsc_margin =
+ (unsigned long long)(hangcheck_margin + hangcheck_tick);
+ hangcheck_tsc_margin *= (unsigned long long)TIMER_FREQ;
+
+ hangcheck_tsc = monotonic_clock();
+ mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
+
+ return 0;
+}
+
+
+static void __exit hangcheck_exit(void)
+{
+ del_timer_sync(&hangcheck_ticktock);
+ printk("Hangcheck: Stopped hangcheck timer.\n");
+}
+
+module_init(hangcheck_init);
+module_exit(hangcheck_exit);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
new file mode 100644
index 00000000..34d6a1ca
--- /dev/null
+++ b/drivers/char/hpet.c
@@ -0,0 +1,1100 @@
+/*
+ * Intel & MS High Precision Event Timer Implementation.
+ *
+ * Copyright (C) 2003 Intel Corporation
+ * Venki Pallipadi
+ * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
+ * Bob Picco <robert.picco@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/major.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/sysctl.h>
+#include <linux/wait.h>
+#include <linux/bcd.h>
+#include <linux/seq_file.h>
+#include <linux/bitops.h>
+#include <linux/compat.h>
+#include <linux/clocksource.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <asm/current.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <linux/hpet.h>
+
+/*
+ * The High Precision Event Timer driver.
+ * This driver is closely modelled after the rtc.c driver.
+ * http://www.intel.com/hardwaredesign/hpetspec_1.pdf
+ */
+#define HPET_USER_FREQ (64)
+#define HPET_DRIFT (500)
+
+#define HPET_RANGE_SIZE 1024 /* from HPET spec */
+
+
+/* WARNING -- don't get confused. These macros are never used
+ * to write the (single) counter, and rarely to read it.
+ * They're badly named; to fix, someday.
+ */
+#if BITS_PER_LONG == 64
+#define write_counter(V, MC) writeq(V, MC)
+#define read_counter(MC) readq(MC)
+#else
+#define write_counter(V, MC) writel(V, MC)
+#define read_counter(MC) readl(MC)
+#endif
+
+static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */
+static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
+
+/* This clocksource driver currently only works on ia64 */
+#ifdef CONFIG_IA64
+static void __iomem *hpet_mctr;
+
+static cycle_t read_hpet(struct clocksource *cs)
+{
+ return (cycle_t)read_counter((void __iomem *)hpet_mctr);
+}
+
+static struct clocksource clocksource_hpet = {
+ .name = "hpet",
+ .rating = 250,
+ .read = read_hpet,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+static struct clocksource *hpet_clocksource;
+#endif
+
+/* A lock for concurrent access by app and isr hpet activity. */
+static DEFINE_SPINLOCK(hpet_lock);
+
+#define HPET_DEV_NAME (7)
+
+struct hpet_dev {
+ struct hpets *hd_hpets;
+ struct hpet __iomem *hd_hpet;
+ struct hpet_timer __iomem *hd_timer;
+ unsigned long hd_ireqfreq;
+ unsigned long hd_irqdata;
+ wait_queue_head_t hd_waitqueue;
+ struct fasync_struct *hd_async_queue;
+ unsigned int hd_flags;
+ unsigned int hd_irq;
+ unsigned int hd_hdwirq;
+ char hd_name[HPET_DEV_NAME];
+};
+
+struct hpets {
+ struct hpets *hp_next;
+ struct hpet __iomem *hp_hpet;
+ unsigned long hp_hpet_phys;
+ struct clocksource *hp_clocksource;
+ unsigned long long hp_tick_freq;
+ unsigned long hp_delta;
+ unsigned int hp_ntimer;
+ unsigned int hp_which;
+ struct hpet_dev hp_dev[1];
+};
+
+static struct hpets *hpets;
+
+#define HPET_OPEN 0x0001
+#define HPET_IE 0x0002 /* interrupt enabled */
+#define HPET_PERIODIC 0x0004
+#define HPET_SHARED_IRQ 0x0008
+
+
+#ifndef readq
+static inline unsigned long long readq(void __iomem *addr)
+{
+ return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(unsigned long long v, void __iomem *addr)
+{
+ writel(v & 0xffffffff, addr);
+ writel(v >> 32, addr + 4);
+}
+#endif
+
+static irqreturn_t hpet_interrupt(int irq, void *data)
+{
+ struct hpet_dev *devp;
+ unsigned long isr;
+
+ devp = data;
+ isr = 1 << (devp - devp->hd_hpets->hp_dev);
+
+ if ((devp->hd_flags & HPET_SHARED_IRQ) &&
+ !(isr & readl(&devp->hd_hpet->hpet_isr)))
+ return IRQ_NONE;
+
+ spin_lock(&hpet_lock);
+ devp->hd_irqdata++;
+
+ /*
+ * For non-periodic timers, increment the accumulator.
+ * This has the effect of treating non-periodic like periodic.
+ */
+ if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
+ unsigned long m, t, mc, base, k;
+ struct hpet __iomem *hpet = devp->hd_hpet;
+ struct hpets *hpetp = devp->hd_hpets;
+
+ t = devp->hd_ireqfreq;
+ m = read_counter(&devp->hd_timer->hpet_compare);
+ mc = read_counter(&hpet->hpet_mc);
+ /* The time for the next interrupt would logically be t + m,
+ * however, if we are very unlucky and the interrupt is delayed
+ * for longer than t then we will completely miss the next
+ * interrupt if we set t + m and an application will hang.
+ * Therefore we need to make a more complex computation assuming
+ * that there exists a k for which the following is true:
+ * k * t + base < mc + delta
+ * (k + 1) * t + base > mc + delta
+ * where t is the interval in hpet ticks for the given freq,
+ * base is the theoretical start value 0 < base < t,
+ * mc is the main counter value at the time of the interrupt,
+ * delta is the time it takes to write the a value to the
+ * comparator.
+ * k may then be computed as (mc - base + delta) / t .
+ */
+ base = mc % t;
+ k = (mc - base + hpetp->hp_delta) / t;
+ write_counter(t * (k + 1) + base,
+ &devp->hd_timer->hpet_compare);
+ }
+
+ if (devp->hd_flags & HPET_SHARED_IRQ)
+ writel(isr, &devp->hd_hpet->hpet_isr);
+ spin_unlock(&hpet_lock);
+
+ wake_up_interruptible(&devp->hd_waitqueue);
+
+ kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN);
+
+ return IRQ_HANDLED;
+}
+
+static void hpet_timer_set_irq(struct hpet_dev *devp)
+{
+ unsigned long v;
+ int irq, gsi;
+ struct hpet_timer __iomem *timer;
+
+ spin_lock_irq(&hpet_lock);
+ if (devp->hd_hdwirq) {
+ spin_unlock_irq(&hpet_lock);
+ return;
+ }
+
+ timer = devp->hd_timer;
+
+ /* we prefer level triggered mode */
+ v = readl(&timer->hpet_config);
+ if (!(v & Tn_INT_TYPE_CNF_MASK)) {
+ v |= Tn_INT_TYPE_CNF_MASK;
+ writel(v, &timer->hpet_config);
+ }
+ spin_unlock_irq(&hpet_lock);
+
+ v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >>
+ Tn_INT_ROUTE_CAP_SHIFT;
+
+ /*
+ * In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by
+ * legacy device. In IO APIC mode, we skip all the legacy IRQS.
+ */
+ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC)
+ v &= ~0xf3df;
+ else
+ v &= ~0xffff;
+
+ for_each_set_bit(irq, &v, HPET_MAX_IRQ) {
+ if (irq >= nr_irqs) {
+ irq = HPET_MAX_IRQ;
+ break;
+ }
+
+ gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE,
+ ACPI_ACTIVE_LOW);
+ if (gsi > 0)
+ break;
+
+ /* FIXME: Setup interrupt source table */
+ }
+
+ if (irq < HPET_MAX_IRQ) {
+ spin_lock_irq(&hpet_lock);
+ v = readl(&timer->hpet_config);
+ v |= irq << Tn_INT_ROUTE_CNF_SHIFT;
+ writel(v, &timer->hpet_config);
+ devp->hd_hdwirq = gsi;
+ spin_unlock_irq(&hpet_lock);
+ }
+ return;
+}
+
+static int hpet_open(struct inode *inode, struct file *file)
+{
+ struct hpet_dev *devp;
+ struct hpets *hpetp;
+ int i;
+
+ if (file->f_mode & FMODE_WRITE)
+ return -EINVAL;
+
+ mutex_lock(&hpet_mutex);
+ spin_lock_irq(&hpet_lock);
+
+ for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
+ for (i = 0; i < hpetp->hp_ntimer; i++)
+ if (hpetp->hp_dev[i].hd_flags & HPET_OPEN)
+ continue;
+ else {
+ devp = &hpetp->hp_dev[i];
+ break;
+ }
+
+ if (!devp) {
+ spin_unlock_irq(&hpet_lock);
+ mutex_unlock(&hpet_mutex);
+ return -EBUSY;
+ }
+
+ file->private_data = devp;
+ devp->hd_irqdata = 0;
+ devp->hd_flags |= HPET_OPEN;
+ spin_unlock_irq(&hpet_lock);
+ mutex_unlock(&hpet_mutex);
+
+ hpet_timer_set_irq(devp);
+
+ return 0;
+}
+
+static ssize_t
+hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long data;
+ ssize_t retval;
+ struct hpet_dev *devp;
+
+ devp = file->private_data;
+ if (!devp->hd_ireqfreq)
+ return -EIO;
+
+ if (count < sizeof(unsigned long))
+ return -EINVAL;
+
+ add_wait_queue(&devp->hd_waitqueue, &wait);
+
+ for ( ; ; ) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock_irq(&hpet_lock);
+ data = devp->hd_irqdata;
+ devp->hd_irqdata = 0;
+ spin_unlock_irq(&hpet_lock);
+
+ if (data)
+ break;
+ else if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ goto out;
+ } else if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ goto out;
+ }
+ schedule();
+ }
+
+ retval = put_user(data, (unsigned long __user *)buf);
+ if (!retval)
+ retval = sizeof(unsigned long);
+out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&devp->hd_waitqueue, &wait);
+
+ return retval;
+}
+
+static unsigned int hpet_poll(struct file *file, poll_table * wait)
+{
+ unsigned long v;
+ struct hpet_dev *devp;
+
+ devp = file->private_data;
+
+ if (!devp->hd_ireqfreq)
+ return 0;
+
+ poll_wait(file, &devp->hd_waitqueue, wait);
+
+ spin_lock_irq(&hpet_lock);
+ v = devp->hd_irqdata;
+ spin_unlock_irq(&hpet_lock);
+
+ if (v != 0)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
+{
+#ifdef CONFIG_HPET_MMAP
+ struct hpet_dev *devp;
+ unsigned long addr;
+
+ if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
+ return -EINVAL;
+
+ devp = file->private_data;
+ addr = devp->hd_hpets->hp_hpet_phys;
+
+ if (addr & (PAGE_SIZE - 1))
+ return -ENOSYS;
+
+ vma->vm_flags |= VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot)) {
+ printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ return 0;
+#else
+ return -ENOSYS;
+#endif
+}
+
+static int hpet_fasync(int fd, struct file *file, int on)
+{
+ struct hpet_dev *devp;
+
+ devp = file->private_data;
+
+ if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0)
+ return 0;
+ else
+ return -EIO;
+}
+
+static int hpet_release(struct inode *inode, struct file *file)
+{
+ struct hpet_dev *devp;
+ struct hpet_timer __iomem *timer;
+ int irq = 0;
+
+ devp = file->private_data;
+ timer = devp->hd_timer;
+
+ spin_lock_irq(&hpet_lock);
+
+ writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK),
+ &timer->hpet_config);
+
+ irq = devp->hd_irq;
+ devp->hd_irq = 0;
+
+ devp->hd_ireqfreq = 0;
+
+ if (devp->hd_flags & HPET_PERIODIC
+ && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
+ unsigned long v;
+
+ v = readq(&timer->hpet_config);
+ v ^= Tn_TYPE_CNF_MASK;
+ writeq(v, &timer->hpet_config);
+ }
+
+ devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC);
+ spin_unlock_irq(&hpet_lock);
+
+ if (irq)
+ free_irq(irq, devp);
+
+ file->private_data = NULL;
+ return 0;
+}
+
+static int hpet_ioctl_ieon(struct hpet_dev *devp)
+{
+ struct hpet_timer __iomem *timer;
+ struct hpet __iomem *hpet;
+ struct hpets *hpetp;
+ int irq;
+ unsigned long g, v, t, m;
+ unsigned long flags, isr;
+
+ timer = devp->hd_timer;
+ hpet = devp->hd_hpet;
+ hpetp = devp->hd_hpets;
+
+ if (!devp->hd_ireqfreq)
+ return -EIO;
+
+ spin_lock_irq(&hpet_lock);
+
+ if (devp->hd_flags & HPET_IE) {
+ spin_unlock_irq(&hpet_lock);
+ return -EBUSY;
+ }
+
+ devp->hd_flags |= HPET_IE;
+
+ if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK)
+ devp->hd_flags |= HPET_SHARED_IRQ;
+ spin_unlock_irq(&hpet_lock);
+
+ irq = devp->hd_hdwirq;
+
+ if (irq) {
+ unsigned long irq_flags;
+
+ if (devp->hd_flags & HPET_SHARED_IRQ) {
+ /*
+ * To prevent the interrupt handler from seeing an
+ * unwanted interrupt status bit, program the timer
+ * so that it will not fire in the near future ...
+ */
+ writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
+ &timer->hpet_config);
+ write_counter(read_counter(&hpet->hpet_mc),
+ &timer->hpet_compare);
+ /* ... and clear any left-over status. */
+ isr = 1 << (devp - devp->hd_hpets->hp_dev);
+ writel(isr, &hpet->hpet_isr);
+ }
+
+ sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
+ irq_flags = devp->hd_flags & HPET_SHARED_IRQ
+ ? IRQF_SHARED : IRQF_DISABLED;
+ if (request_irq(irq, hpet_interrupt, irq_flags,
+ devp->hd_name, (void *)devp)) {
+ printk(KERN_ERR "hpet: IRQ %d is not free\n", irq);
+ irq = 0;
+ }
+ }
+
+ if (irq == 0) {
+ spin_lock_irq(&hpet_lock);
+ devp->hd_flags ^= HPET_IE;
+ spin_unlock_irq(&hpet_lock);
+ return -EIO;
+ }
+
+ devp->hd_irq = irq;
+ t = devp->hd_ireqfreq;
+ v = readq(&timer->hpet_config);
+
+ /* 64-bit comparators are not yet supported through the ioctls,
+ * so force this into 32-bit mode if it supports both modes
+ */
+ g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK;
+
+ if (devp->hd_flags & HPET_PERIODIC) {
+ g |= Tn_TYPE_CNF_MASK;
+ v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK;
+ writeq(v, &timer->hpet_config);
+ local_irq_save(flags);
+
+ /*
+ * NOTE: First we modify the hidden accumulator
+ * register supported by periodic-capable comparators.
+ * We never want to modify the (single) counter; that
+ * would affect all the comparators. The value written
+ * is the counter value when the first interrupt is due.
+ */
+ m = read_counter(&hpet->hpet_mc);
+ write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
+ /*
+ * Then we modify the comparator, indicating the period
+ * for subsequent interrupt.
+ */
+ write_counter(t, &timer->hpet_compare);
+ } else {
+ local_irq_save(flags);
+ m = read_counter(&hpet->hpet_mc);
+ write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
+ }
+
+ if (devp->hd_flags & HPET_SHARED_IRQ) {
+ isr = 1 << (devp - devp->hd_hpets->hp_dev);
+ writel(isr, &hpet->hpet_isr);
+ }
+ writeq(g, &timer->hpet_config);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/* converts Hz to number of timer ticks */
+static inline unsigned long hpet_time_div(struct hpets *hpets,
+ unsigned long dis)
+{
+ unsigned long long m;
+
+ m = hpets->hp_tick_freq + (dis >> 1);
+ do_div(m, dis);
+ return (unsigned long)m;
+}
+
+static int
+hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
+ struct hpet_info *info)
+{
+ struct hpet_timer __iomem *timer;
+ struct hpet __iomem *hpet;
+ struct hpets *hpetp;
+ int err;
+ unsigned long v;
+
+ switch (cmd) {
+ case HPET_IE_OFF:
+ case HPET_INFO:
+ case HPET_EPI:
+ case HPET_DPI:
+ case HPET_IRQFREQ:
+ timer = devp->hd_timer;
+ hpet = devp->hd_hpet;
+ hpetp = devp->hd_hpets;
+ break;
+ case HPET_IE_ON:
+ return hpet_ioctl_ieon(devp);
+ default:
+ return -EINVAL;
+ }
+
+ err = 0;
+
+ switch (cmd) {
+ case HPET_IE_OFF:
+ if ((devp->hd_flags & HPET_IE) == 0)
+ break;
+ v = readq(&timer->hpet_config);
+ v &= ~Tn_INT_ENB_CNF_MASK;
+ writeq(v, &timer->hpet_config);
+ if (devp->hd_irq) {
+ free_irq(devp->hd_irq, devp);
+ devp->hd_irq = 0;
+ }
+ devp->hd_flags ^= HPET_IE;
+ break;
+ case HPET_INFO:
+ {
+ memset(info, 0, sizeof(*info));
+ if (devp->hd_ireqfreq)
+ info->hi_ireqfreq =
+ hpet_time_div(hpetp, devp->hd_ireqfreq);
+ info->hi_flags =
+ readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
+ info->hi_hpet = hpetp->hp_which;
+ info->hi_timer = devp - hpetp->hp_dev;
+ break;
+ }
+ case HPET_EPI:
+ v = readq(&timer->hpet_config);
+ if ((v & Tn_PER_INT_CAP_MASK) == 0) {
+ err = -ENXIO;
+ break;
+ }
+ devp->hd_flags |= HPET_PERIODIC;
+ break;
+ case HPET_DPI:
+ v = readq(&timer->hpet_config);
+ if ((v & Tn_PER_INT_CAP_MASK) == 0) {
+ err = -ENXIO;
+ break;
+ }
+ if (devp->hd_flags & HPET_PERIODIC &&
+ readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
+ v = readq(&timer->hpet_config);
+ v ^= Tn_TYPE_CNF_MASK;
+ writeq(v, &timer->hpet_config);
+ }
+ devp->hd_flags &= ~HPET_PERIODIC;
+ break;
+ case HPET_IRQFREQ:
+ if ((arg > hpet_max_freq) &&
+ !capable(CAP_SYS_RESOURCE)) {
+ err = -EACCES;
+ break;
+ }
+
+ if (!arg) {
+ err = -EINVAL;
+ break;
+ }
+
+ devp->hd_ireqfreq = hpet_time_div(hpetp, arg);
+ }
+
+ return err;
+}
+
+static long
+hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct hpet_info info;
+ int err;
+
+ mutex_lock(&hpet_mutex);
+ err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
+ mutex_unlock(&hpet_mutex);
+
+ if ((cmd == HPET_INFO) && !err &&
+ (copy_to_user((void __user *)arg, &info, sizeof(info))))
+ err = -EFAULT;
+
+ return err;
+}
+
+#ifdef CONFIG_COMPAT
+struct compat_hpet_info {
+ compat_ulong_t hi_ireqfreq; /* Hz */
+ compat_ulong_t hi_flags; /* information */
+ unsigned short hi_hpet;
+ unsigned short hi_timer;
+};
+
+static long
+hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct hpet_info info;
+ int err;
+
+ mutex_lock(&hpet_mutex);
+ err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
+ mutex_unlock(&hpet_mutex);
+
+ if ((cmd == HPET_INFO) && !err) {
+ struct compat_hpet_info __user *u = compat_ptr(arg);
+ if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) ||
+ put_user(info.hi_flags, &u->hi_flags) ||
+ put_user(info.hi_hpet, &u->hi_hpet) ||
+ put_user(info.hi_timer, &u->hi_timer))
+ err = -EFAULT;
+ }
+
+ return err;
+}
+#endif
+
+static const struct file_operations hpet_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = hpet_read,
+ .poll = hpet_poll,
+ .unlocked_ioctl = hpet_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = hpet_compat_ioctl,
+#endif
+ .open = hpet_open,
+ .release = hpet_release,
+ .fasync = hpet_fasync,
+ .mmap = hpet_mmap,
+};
+
+static int hpet_is_known(struct hpet_data *hdp)
+{
+ struct hpets *hpetp;
+
+ for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
+ if (hpetp->hp_hpet_phys == hdp->hd_phys_address)
+ return 1;
+
+ return 0;
+}
+
+static ctl_table hpet_table[] = {
+ {
+ .procname = "max-user-freq",
+ .data = &hpet_max_freq,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+
+static ctl_table hpet_root[] = {
+ {
+ .procname = "hpet",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = hpet_table,
+ },
+ {}
+};
+
+static ctl_table dev_root[] = {
+ {
+ .procname = "dev",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = hpet_root,
+ },
+ {}
+};
+
+static struct ctl_table_header *sysctl_header;
+
+/*
+ * Adjustment for when arming the timer with
+ * initial conditions. That is, main counter
+ * ticks expired before interrupts are enabled.
+ */
+#define TICK_CALIBRATE (1000UL)
+
+static unsigned long __hpet_calibrate(struct hpets *hpetp)
+{
+ struct hpet_timer __iomem *timer = NULL;
+ unsigned long t, m, count, i, flags, start;
+ struct hpet_dev *devp;
+ int j;
+ struct hpet __iomem *hpet;
+
+ for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++)
+ if ((devp->hd_flags & HPET_OPEN) == 0) {
+ timer = devp->hd_timer;
+ break;
+ }
+
+ if (!timer)
+ return 0;
+
+ hpet = hpetp->hp_hpet;
+ t = read_counter(&timer->hpet_compare);
+
+ i = 0;
+ count = hpet_time_div(hpetp, TICK_CALIBRATE);
+
+ local_irq_save(flags);
+
+ start = read_counter(&hpet->hpet_mc);
+
+ do {
+ m = read_counter(&hpet->hpet_mc);
+ write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
+ } while (i++, (m - start) < count);
+
+ local_irq_restore(flags);
+
+ return (m - start) / i;
+}
+
+static unsigned long hpet_calibrate(struct hpets *hpetp)
+{
+ unsigned long ret = -1;
+ unsigned long tmp;
+
+ /*
+ * Try to calibrate until return value becomes stable small value.
+ * If SMI interruption occurs in calibration loop, the return value
+ * will be big. This avoids its impact.
+ */
+ for ( ; ; ) {
+ tmp = __hpet_calibrate(hpetp);
+ if (ret <= tmp)
+ break;
+ ret = tmp;
+ }
+
+ return ret;
+}
+
+int hpet_alloc(struct hpet_data *hdp)
+{
+ u64 cap, mcfg;
+ struct hpet_dev *devp;
+ u32 i, ntimer;
+ struct hpets *hpetp;
+ size_t siz;
+ struct hpet __iomem *hpet;
+ static struct hpets *last;
+ unsigned long period;
+ unsigned long long temp;
+ u32 remainder;
+
+ /*
+ * hpet_alloc can be called by platform dependent code.
+ * If platform dependent code has allocated the hpet that
+ * ACPI has also reported, then we catch it here.
+ */
+ if (hpet_is_known(hdp)) {
+ printk(KERN_DEBUG "%s: duplicate HPET ignored\n",
+ __func__);
+ return 0;
+ }
+
+ siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) *
+ sizeof(struct hpet_dev));
+
+ hpetp = kzalloc(siz, GFP_KERNEL);
+
+ if (!hpetp)
+ return -ENOMEM;
+
+ hpetp->hp_which = hpet_nhpet++;
+ hpetp->hp_hpet = hdp->hd_address;
+ hpetp->hp_hpet_phys = hdp->hd_phys_address;
+
+ hpetp->hp_ntimer = hdp->hd_nirqs;
+
+ for (i = 0; i < hdp->hd_nirqs; i++)
+ hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
+
+ hpet = hpetp->hp_hpet;
+
+ cap = readq(&hpet->hpet_cap);
+
+ ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1;
+
+ if (hpetp->hp_ntimer != ntimer) {
+ printk(KERN_WARNING "hpet: number irqs doesn't agree"
+ " with number of timers\n");
+ kfree(hpetp);
+ return -ENODEV;
+ }
+
+ if (last)
+ last->hp_next = hpetp;
+ else
+ hpets = hpetp;
+
+ last = hpetp;
+
+ period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >>
+ HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */
+ temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */
+ temp += period >> 1; /* round */
+ do_div(temp, period);
+ hpetp->hp_tick_freq = temp; /* ticks per second */
+
+ printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
+ hpetp->hp_which, hdp->hd_phys_address,
+ hpetp->hp_ntimer > 1 ? "s" : "");
+ for (i = 0; i < hpetp->hp_ntimer; i++)
+ printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
+ printk("\n");
+
+ temp = hpetp->hp_tick_freq;
+ remainder = do_div(temp, 1000000);
+ printk(KERN_INFO
+ "hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n",
+ hpetp->hp_which, hpetp->hp_ntimer,
+ cap & HPET_COUNTER_SIZE_MASK ? 64 : 32,
+ (unsigned) temp, remainder);
+
+ mcfg = readq(&hpet->hpet_config);
+ if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) {
+ write_counter(0L, &hpet->hpet_mc);
+ mcfg |= HPET_ENABLE_CNF_MASK;
+ writeq(mcfg, &hpet->hpet_config);
+ }
+
+ for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) {
+ struct hpet_timer __iomem *timer;
+
+ timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
+
+ devp->hd_hpets = hpetp;
+ devp->hd_hpet = hpet;
+ devp->hd_timer = timer;
+
+ /*
+ * If the timer was reserved by platform code,
+ * then make timer unavailable for opens.
+ */
+ if (hdp->hd_state & (1 << i)) {
+ devp->hd_flags = HPET_OPEN;
+ continue;
+ }
+
+ init_waitqueue_head(&devp->hd_waitqueue);
+ }
+
+ hpetp->hp_delta = hpet_calibrate(hpetp);
+
+/* This clocksource driver currently only works on ia64 */
+#ifdef CONFIG_IA64
+ if (!hpet_clocksource) {
+ hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
+ CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr);
+ clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq);
+ hpetp->hp_clocksource = &clocksource_hpet;
+ hpet_clocksource = &clocksource_hpet;
+ }
+#endif
+
+ return 0;
+}
+
+static acpi_status hpet_resources(struct acpi_resource *res, void *data)
+{
+ struct hpet_data *hdp;
+ acpi_status status;
+ struct acpi_resource_address64 addr;
+
+ hdp = data;
+
+ status = acpi_resource_to_address64(res, &addr);
+
+ if (ACPI_SUCCESS(status)) {
+ hdp->hd_phys_address = addr.minimum;
+ hdp->hd_address = ioremap(addr.minimum, addr.address_length);
+
+ if (hpet_is_known(hdp)) {
+ iounmap(hdp->hd_address);
+ return AE_ALREADY_EXISTS;
+ }
+ } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
+ struct acpi_resource_fixed_memory32 *fixmem32;
+
+ fixmem32 = &res->data.fixed_memory32;
+ if (!fixmem32)
+ return AE_NO_MEMORY;
+
+ hdp->hd_phys_address = fixmem32->address;
+ hdp->hd_address = ioremap(fixmem32->address,
+ HPET_RANGE_SIZE);
+
+ if (hpet_is_known(hdp)) {
+ iounmap(hdp->hd_address);
+ return AE_ALREADY_EXISTS;
+ }
+ } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
+ struct acpi_resource_extended_irq *irqp;
+ int i, irq;
+
+ irqp = &res->data.extended_irq;
+
+ for (i = 0; i < irqp->interrupt_count; i++) {
+ irq = acpi_register_gsi(NULL, irqp->interrupts[i],
+ irqp->triggering, irqp->polarity);
+ if (irq < 0)
+ return AE_ERROR;
+
+ hdp->hd_irq[hdp->hd_nirqs] = irq;
+ hdp->hd_nirqs++;
+ }
+ }
+
+ return AE_OK;
+}
+
+static int hpet_acpi_add(struct acpi_device *device)
+{
+ acpi_status result;
+ struct hpet_data data;
+
+ memset(&data, 0, sizeof(data));
+
+ result =
+ acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+ hpet_resources, &data);
+
+ if (ACPI_FAILURE(result))
+ return -ENODEV;
+
+ if (!data.hd_address || !data.hd_nirqs) {
+ if (data.hd_address)
+ iounmap(data.hd_address);
+ printk("%s: no address or irqs in _CRS\n", __func__);
+ return -ENODEV;
+ }
+
+ return hpet_alloc(&data);
+}
+
+static int hpet_acpi_remove(struct acpi_device *device, int type)
+{
+ /* XXX need to unregister clocksource, dealloc mem, etc */
+ return -EINVAL;
+}
+
+static const struct acpi_device_id hpet_device_ids[] = {
+ {"PNP0103", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, hpet_device_ids);
+
+static struct acpi_driver hpet_acpi_driver = {
+ .name = "hpet",
+ .ids = hpet_device_ids,
+ .ops = {
+ .add = hpet_acpi_add,
+ .remove = hpet_acpi_remove,
+ },
+};
+
+static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
+
+static int __init hpet_init(void)
+{
+ int result;
+
+ result = misc_register(&hpet_misc);
+ if (result < 0)
+ return -ENODEV;
+
+ sysctl_header = register_sysctl_table(dev_root);
+
+ result = acpi_bus_register_driver(&hpet_acpi_driver);
+ if (result < 0) {
+ if (sysctl_header)
+ unregister_sysctl_table(sysctl_header);
+ misc_deregister(&hpet_misc);
+ return result;
+ }
+
+ return 0;
+}
+
+static void __exit hpet_exit(void)
+{
+ acpi_bus_unregister_driver(&hpet_acpi_driver);
+
+ if (sysctl_header)
+ unregister_sysctl_table(sysctl_header);
+ misc_deregister(&hpet_misc);
+
+ return;
+}
+
+module_init(hpet_init);
+module_exit(hpet_exit);
+MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
new file mode 100644
index 00000000..8dc927f9
--- /dev/null
+++ b/drivers/char/hw_random/Kconfig
@@ -0,0 +1,224 @@
+#
+# Hardware Random Number Generator (RNG) configuration
+#
+
+config HW_RANDOM
+ tristate "Hardware Random Number Generator Core support"
+ default m
+ ---help---
+ Hardware Random Number Generator Core infrastructure.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rng-core. This provides a device
+ that's usually called /dev/hw_random, and which exposes one
+ of possibly several hardware random number generators.
+
+ These hardware random number generators do not feed directly
+ into the kernel's random number generator. That is usually
+ handled by the "rngd" daemon. Documentation/hw_random.txt
+ has more information.
+
+ If unsure, say Y.
+
+config HW_RANDOM_TIMERIOMEM
+ tristate "Timer IOMEM HW Random Number Generator support"
+ depends on HW_RANDOM && HAS_IOMEM
+ ---help---
+ This driver provides kernel-side support for a generic Random
+ Number Generator used by reading a 'dumb' iomem address that
+ is to be read no faster than, for example, once a second;
+ the default FPGA bitstream on the TS-7800 has such functionality.
+
+ To compile this driver as a module, choose M here: the
+ module will be called timeriomem-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_INTEL
+ tristate "Intel HW Random Number Generator support"
+ depends on HW_RANDOM && (X86 || IA64) && PCI
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Intel i8xx-based motherboards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called intel-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_AMD
+ tristate "AMD HW Random Number Generator support"
+ depends on HW_RANDOM && (X86 || PPC_MAPLE) && PCI
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on AMD 76x-based motherboards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called amd-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_GEODE
+ tristate "AMD Geode HW Random Number Generator support"
+ depends on HW_RANDOM && X86_32 && PCI
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on the AMD Geode LX.
+
+ To compile this driver as a module, choose M here: the
+ module will be called geode-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_N2RNG
+ tristate "Niagara2 Random Number Generator support"
+ depends on HW_RANDOM && SPARC64
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Niagara2 cpus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called n2-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_VIA
+ tristate "VIA HW Random Number Generator support"
+ depends on HW_RANDOM && X86
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on VIA based motherboards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called via-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_IXP4XX
+ tristate "Intel IXP4xx NPU HW Random Number Generator support"
+ depends on HW_RANDOM && ARCH_IXP4XX
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random
+ Number Generator hardware found on the Intel IXP4xx NPU.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ixp4xx-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_OMAP
+ tristate "OMAP Random Number Generator support"
+ depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP2)
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on OMAP16xx and OMAP24xx multimedia
+ processors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called omap-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_OCTEON
+ tristate "Octeon Random Number Generator support"
+ depends on HW_RANDOM && CPU_CAVIUM_OCTEON
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Octeon processors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called octeon-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_PASEMI
+ tristate "PA Semi HW Random Number Generator support"
+ depends on HW_RANDOM && PPC_PASEMI
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on PA Semi PWRficient SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pasemi-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_VIRTIO
+ tristate "VirtIO Random Number Generator support"
+ depends on HW_RANDOM && VIRTIO
+ ---help---
+ This driver provides kernel-side support for the virtual Random Number
+ Generator hardware.
+
+ To compile this driver as a module, choose M here: the
+ module will be called virtio-rng. If unsure, say N.
+
+config HW_RANDOM_TX4939
+ tristate "TX4939 Random Number Generator support"
+ depends on HW_RANDOM && SOC_TX4939
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on TX4939 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tx4939-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_MXC_RNGA
+ tristate "Freescale i.MX RNGA Random Number Generator"
+ depends on HW_RANDOM && ARCH_HAS_RNGA
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Freescale i.MX processors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mxc-rnga.
+
+ If unsure, say Y.
+
+config HW_RANDOM_FSL_RNGC
+ tristate "Freescale RNGC Random Number Generator"
+ depends on HW_RANDOM && ARCH_HAS_RNGC && !MXC_SECURITY_RNG
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator (RNGBB and RNGC) hardware found on Freescale i.MX processors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fsl-rngc.
+
+ If unsure, say Y.
+
+config HW_RANDOM_NOMADIK
+ tristate "ST-Ericsson Nomadik Random Number Generator support"
+ depends on HW_RANDOM && PLAT_NOMADIK
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on ST-Ericsson SoCs (8815 and 8500).
+
+ To compile this driver as a module, choose M here: the
+ module will be called nomadik-rng.
+
+ If unsure, say Y.
+
+config HW_RANDOM_PICOXCELL
+ tristate "Picochip picoXcell true random number generator support"
+ depends on HW_RANDOM && ARCH_PICOXCELL && PICOXCELL_PC3X3
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Picochip PC3x3 and later devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called picoxcell-rng.
+
+ If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
new file mode 100644
index 00000000..d57e03c5
--- /dev/null
+++ b/drivers/char/hw_random/Makefile
@@ -0,0 +1,23 @@
+#
+# Makefile for HW Random Number Generator (RNG) device drivers.
+#
+
+obj-$(CONFIG_HW_RANDOM) += rng-core.o
+rng-core-y := core.o
+obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
+obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
+obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
+obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
+obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
+n2-rng-y := n2-drv.o n2-asm.o
+obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
+obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
+obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
+obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
+obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
+obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
+obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
+obj-$(CONFIG_HW_RANDOM_FSL_RNGC) += fsl-rngc.o
+obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
+obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
+obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
new file mode 100644
index 00000000..c6af0386
--- /dev/null
+++ b/drivers/char/hw_random/amd-rng.c
@@ -0,0 +1,169 @@
+/*
+ * RNG driver for AMD RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+
+#define PFX KBUILD_MODNAME ": "
+
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE. We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static const struct pci_device_id pci_tbl[] = {
+ { PCI_VDEVICE(AMD, 0x7443), 0, },
+ { PCI_VDEVICE(AMD, 0x746b), 0, },
+ { 0, }, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+static struct pci_dev *amd_pdev;
+
+
+static int amd_rng_data_present(struct hwrng *rng, int wait)
+{
+ u32 pmbase = (u32)rng->priv;
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+ data = !!(inl(pmbase + 0xF4) & 1);
+ if (data || !wait)
+ break;
+ udelay(10);
+ }
+ return data;
+}
+
+static int amd_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ u32 pmbase = (u32)rng->priv;
+
+ *data = inl(pmbase + 0xF0);
+
+ return 4;
+}
+
+static int amd_rng_init(struct hwrng *rng)
+{
+ u8 rnen;
+
+ pci_read_config_byte(amd_pdev, 0x40, &rnen);
+ rnen |= (1 << 7); /* RNG on */
+ pci_write_config_byte(amd_pdev, 0x40, rnen);
+
+ pci_read_config_byte(amd_pdev, 0x41, &rnen);
+ rnen |= (1 << 7); /* PMIO enable */
+ pci_write_config_byte(amd_pdev, 0x41, rnen);
+
+ return 0;
+}
+
+static void amd_rng_cleanup(struct hwrng *rng)
+{
+ u8 rnen;
+
+ pci_read_config_byte(amd_pdev, 0x40, &rnen);
+ rnen &= ~(1 << 7); /* RNG off */
+ pci_write_config_byte(amd_pdev, 0x40, rnen);
+}
+
+
+static struct hwrng amd_rng = {
+ .name = "amd",
+ .init = amd_rng_init,
+ .cleanup = amd_rng_cleanup,
+ .data_present = amd_rng_data_present,
+ .data_read = amd_rng_data_read,
+};
+
+
+static int __init mod_init(void)
+{
+ int err = -ENODEV;
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *ent;
+ u32 pmbase;
+
+ for_each_pci_dev(pdev) {
+ ent = pci_match_id(pci_tbl, pdev);
+ if (ent)
+ goto found;
+ }
+ /* Device not found. */
+ goto out;
+
+found:
+ err = pci_read_config_dword(pdev, 0x58, &pmbase);
+ if (err)
+ goto out;
+ err = -EIO;
+ pmbase &= 0x0000FF00;
+ if (pmbase == 0)
+ goto out;
+ if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) {
+ dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n",
+ pmbase + 0xF0);
+ err = -EBUSY;
+ goto out;
+ }
+ amd_rng.priv = (unsigned long)pmbase;
+ amd_pdev = pdev;
+
+ printk(KERN_INFO "AMD768 RNG detected\n");
+ err = hwrng_register(&amd_rng);
+ if (err) {
+ printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ err);
+ release_region(pmbase + 0xF0, 8);
+ goto out;
+ }
+out:
+ return err;
+}
+
+static void __exit mod_exit(void)
+{
+ u32 pmbase = (unsigned long)amd_rng.priv;
+ release_region(pmbase + 0xF0, 8);
+ hwrng_unregister(&amd_rng);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_AUTHOR("The Linux Kernel team");
+MODULE_DESCRIPTION("H/W RNG driver for AMD chipsets");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
new file mode 100644
index 00000000..2016aad8
--- /dev/null
+++ b/drivers/char/hw_random/core.c
@@ -0,0 +1,372 @@
+/*
+ Added support for the AMD Geode LX RNG
+ (c) Copyright 2004-2005 Advanced Micro Devices, Inc.
+
+ derived from
+
+ Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+
+ derived from
+
+ Hardware driver for the AMD 768 Random Number Generator (RNG)
+ (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
+
+ derived from
+
+ Hardware driver for Intel i810 Random Number Generator (RNG)
+ Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+
+ Added generic RNG API
+ Copyright 2006 Michael Buesch <mbuesch@freenet.de>
+ Copyright 2005 (c) MontaVista Software, Inc.
+
+ Please read Documentation/hw_random.txt for details on use.
+
+ ----------------------------------------------------------
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ */
+
+
+#include <linux/device.h>
+#include <linux/hw_random.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <asm/uaccess.h>
+
+
+#define RNG_MODULE_NAME "hw_random"
+#define PFX RNG_MODULE_NAME ": "
+#define RNG_MISCDEV_MINOR 183 /* official */
+
+
+static struct hwrng *current_rng;
+static LIST_HEAD(rng_list);
+static DEFINE_MUTEX(rng_mutex);
+static int data_avail;
+static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES]
+ __cacheline_aligned;
+
+static inline int hwrng_init(struct hwrng *rng)
+{
+ if (!rng->init)
+ return 0;
+ return rng->init(rng);
+}
+
+static inline void hwrng_cleanup(struct hwrng *rng)
+{
+ if (rng && rng->cleanup)
+ rng->cleanup(rng);
+}
+
+static int rng_dev_open(struct inode *inode, struct file *filp)
+{
+ /* enforce read-only access to this chrdev */
+ if ((filp->f_mode & FMODE_READ) == 0)
+ return -EINVAL;
+ if (filp->f_mode & FMODE_WRITE)
+ return -EINVAL;
+ return 0;
+}
+
+static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+ int wait) {
+ int present;
+
+ if (rng->read)
+ return rng->read(rng, (void *)buffer, size, wait);
+
+ if (rng->data_present)
+ present = rng->data_present(rng, wait);
+ else
+ present = 1;
+
+ if (present)
+ return rng->data_read(rng, (u32 *)buffer);
+
+ return 0;
+}
+
+static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ size_t size, loff_t *offp)
+{
+ ssize_t ret = 0;
+ int err = 0;
+ int bytes_read, len;
+
+ while (size) {
+ if (mutex_lock_interruptible(&rng_mutex)) {
+ err = -ERESTARTSYS;
+ goto out;
+ }
+
+ if (!current_rng) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (!data_avail) {
+ bytes_read = rng_get_data(current_rng, rng_buffer,
+ sizeof(rng_buffer),
+ !(filp->f_flags & O_NONBLOCK));
+ if (bytes_read < 0) {
+ err = bytes_read;
+ goto out_unlock;
+ }
+ data_avail = bytes_read;
+ }
+
+ if (!data_avail) {
+ if (filp->f_flags & O_NONBLOCK) {
+ err = -EAGAIN;
+ goto out_unlock;
+ }
+ } else {
+ len = data_avail;
+ if (len > size)
+ len = size;
+
+ data_avail -= len;
+
+ if (copy_to_user(buf + ret, rng_buffer + data_avail,
+ len)) {
+ err = -EFAULT;
+ goto out_unlock;
+ }
+
+ size -= len;
+ ret += len;
+ }
+
+ mutex_unlock(&rng_mutex);
+
+ if (need_resched())
+ schedule_timeout_interruptible(1);
+
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ goto out;
+ }
+ }
+out:
+ return ret ? : err;
+out_unlock:
+ mutex_unlock(&rng_mutex);
+ goto out;
+}
+
+
+static const struct file_operations rng_chrdev_ops = {
+ .owner = THIS_MODULE,
+ .open = rng_dev_open,
+ .read = rng_dev_read,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice rng_miscdev = {
+ .minor = RNG_MISCDEV_MINOR,
+ .name = RNG_MODULE_NAME,
+ .nodename = "hwrng",
+ .fops = &rng_chrdev_ops,
+};
+
+
+static ssize_t hwrng_attr_current_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ int err;
+ struct hwrng *rng;
+
+ err = mutex_lock_interruptible(&rng_mutex);
+ if (err)
+ return -ERESTARTSYS;
+ err = -ENODEV;
+ list_for_each_entry(rng, &rng_list, list) {
+ if (strcmp(rng->name, buf) == 0) {
+ if (rng == current_rng) {
+ err = 0;
+ break;
+ }
+ err = hwrng_init(rng);
+ if (err)
+ break;
+ hwrng_cleanup(current_rng);
+ current_rng = rng;
+ err = 0;
+ break;
+ }
+ }
+ mutex_unlock(&rng_mutex);
+
+ return err ? : len;
+}
+
+static ssize_t hwrng_attr_current_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err;
+ ssize_t ret;
+ const char *name = "none";
+
+ err = mutex_lock_interruptible(&rng_mutex);
+ if (err)
+ return -ERESTARTSYS;
+ if (current_rng)
+ name = current_rng->name;
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
+ mutex_unlock(&rng_mutex);
+
+ return ret;
+}
+
+static ssize_t hwrng_attr_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err;
+ ssize_t ret = 0;
+ struct hwrng *rng;
+
+ err = mutex_lock_interruptible(&rng_mutex);
+ if (err)
+ return -ERESTARTSYS;
+ buf[0] = '\0';
+ list_for_each_entry(rng, &rng_list, list) {
+ strncat(buf, rng->name, PAGE_SIZE - ret - 1);
+ ret += strlen(rng->name);
+ strncat(buf, " ", PAGE_SIZE - ret - 1);
+ ret++;
+ }
+ strncat(buf, "\n", PAGE_SIZE - ret - 1);
+ ret++;
+ mutex_unlock(&rng_mutex);
+
+ return ret;
+}
+
+static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
+ hwrng_attr_current_show,
+ hwrng_attr_current_store);
+static DEVICE_ATTR(rng_available, S_IRUGO,
+ hwrng_attr_available_show,
+ NULL);
+
+
+static void unregister_miscdev(void)
+{
+ device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available);
+ device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
+ misc_deregister(&rng_miscdev);
+}
+
+static int register_miscdev(void)
+{
+ int err;
+
+ err = misc_register(&rng_miscdev);
+ if (err)
+ goto out;
+ err = device_create_file(rng_miscdev.this_device,
+ &dev_attr_rng_current);
+ if (err)
+ goto err_misc_dereg;
+ err = device_create_file(rng_miscdev.this_device,
+ &dev_attr_rng_available);
+ if (err)
+ goto err_remove_current;
+out:
+ return err;
+
+err_remove_current:
+ device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
+err_misc_dereg:
+ misc_deregister(&rng_miscdev);
+ goto out;
+}
+
+int hwrng_register(struct hwrng *rng)
+{
+ int must_register_misc;
+ int err = -EINVAL;
+ struct hwrng *old_rng, *tmp;
+
+ if (rng->name == NULL ||
+ (rng->data_read == NULL && rng->read == NULL))
+ goto out;
+
+ mutex_lock(&rng_mutex);
+
+ /* Must not register two RNGs with the same name. */
+ err = -EEXIST;
+ list_for_each_entry(tmp, &rng_list, list) {
+ if (strcmp(tmp->name, rng->name) == 0)
+ goto out_unlock;
+ }
+
+ must_register_misc = (current_rng == NULL);
+ old_rng = current_rng;
+ if (!old_rng) {
+ err = hwrng_init(rng);
+ if (err)
+ goto out_unlock;
+ current_rng = rng;
+ }
+ err = 0;
+ if (must_register_misc) {
+ err = register_miscdev();
+ if (err) {
+ if (!old_rng) {
+ hwrng_cleanup(rng);
+ current_rng = NULL;
+ }
+ goto out_unlock;
+ }
+ }
+ INIT_LIST_HEAD(&rng->list);
+ list_add_tail(&rng->list, &rng_list);
+out_unlock:
+ mutex_unlock(&rng_mutex);
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(hwrng_register);
+
+void hwrng_unregister(struct hwrng *rng)
+{
+ int err;
+
+ mutex_lock(&rng_mutex);
+
+ list_del(&rng->list);
+ if (current_rng == rng) {
+ hwrng_cleanup(rng);
+ if (list_empty(&rng_list)) {
+ current_rng = NULL;
+ } else {
+ current_rng = list_entry(rng_list.prev, struct hwrng, list);
+ err = hwrng_init(current_rng);
+ if (err)
+ current_rng = NULL;
+ }
+ }
+ if (list_empty(&rng_list))
+ unregister_miscdev();
+
+ mutex_unlock(&rng_mutex);
+}
+EXPORT_SYMBOL_GPL(hwrng_unregister);
+
+
+MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/fsl-rngc.c b/drivers/char/hw_random/fsl-rngc.c
new file mode 100644
index 00000000..0ce32996
--- /dev/null
+++ b/drivers/char/hw_random/fsl-rngc.c
@@ -0,0 +1,414 @@
+/*
+ * RNG driver for Freescale RNGC
+ *
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+
+#define RNGC_VERSION_MAJOR3 3
+
+#define RNGC_VERSION_ID 0x0000
+#define RNGC_COMMAND 0x0004
+#define RNGC_CONTROL 0x0008
+#define RNGC_STATUS 0x000C
+#define RNGC_ERROR 0x0010
+#define RNGC_FIFO 0x0014
+#define RNGC_VERIF_CTRL 0x0020
+#define RNGC_OSC_CTRL_COUNT 0x0028
+#define RNGC_OSC_COUNT 0x002C
+#define RNGC_OSC_COUNT_STATUS 0x0030
+
+#define RNGC_VERID_ZEROS_MASK 0x0f000000
+#define RNGC_VERID_RNG_TYPE_MASK 0xf0000000
+#define RNGC_VERID_RNG_TYPE_SHIFT 28
+#define RNGC_VERID_CHIP_VERSION_MASK 0x00ff0000
+#define RNGC_VERID_CHIP_VERSION_SHIFT 16
+#define RNGC_VERID_VERSION_MAJOR_MASK 0x0000ff00
+#define RNGC_VERID_VERSION_MAJOR_SHIFT 8
+#define RNGC_VERID_VERSION_MINOR_MASK 0x000000ff
+#define RNGC_VERID_VERSION_MINOR_SHIFT 0
+
+#define RNGC_CMD_ZEROS_MASK 0xffffff8c
+#define RNGC_CMD_SW_RST 0x00000040
+#define RNGC_CMD_CLR_ERR 0x00000020
+#define RNGC_CMD_CLR_INT 0x00000010
+#define RNGC_CMD_SEED 0x00000002
+#define RNGC_CMD_SELF_TEST 0x00000001
+
+#define RNGC_CTRL_ZEROS_MASK 0xfffffc8c
+#define RNGC_CTRL_CTL_ACC 0x00000200
+#define RNGC_CTRL_VERIF_MODE 0x00000100
+#define RNGC_CTRL_MASK_ERROR 0x00000040
+
+#define RNGC_CTRL_MASK_DONE 0x00000020
+#define RNGC_CTRL_AUTO_SEED 0x00000010
+#define RNGC_CTRL_FIFO_UFLOW_MASK 0x00000003
+#define RNGC_CTRL_FIFO_UFLOW_SHIFT 0
+
+#define RNGC_CTRL_FIFO_UFLOW_ZEROS_ERROR 0
+#define RNGC_CTRL_FIFO_UFLOW_ZEROS_ERROR2 1
+#define RNGC_CTRL_FIFO_UFLOW_BUS_XFR 2
+#define RNGC_CTRL_FIFO_UFLOW_ZEROS_INTR 3
+
+#define RNGC_STATUS_ST_PF_MASK 0x00c00000
+#define RNGC_STATUS_ST_PF_SHIFT 22
+#define RNGC_STATUS_ST_PF_TRNG 0x00800000
+#define RNGC_STATUS_ST_PF_PRNG 0x00400000
+#define RNGC_STATUS_ERROR 0x00010000
+#define RNGC_STATUS_FIFO_SIZE_MASK 0x0000f000
+#define RNGC_STATUS_FIFO_SIZE_SHIFT 12
+#define RNGC_STATUS_FIFO_LEVEL_MASK 0x00000f00
+#define RNGC_STATUS_FIFO_LEVEL_SHIFT 8
+#define RNGC_STATUS_NEXT_SEED_DONE 0x00000040
+#define RNGC_STATUS_SEED_DONE 0x00000020
+#define RNGC_STATUS_ST_DONE 0x00000010
+#define RNGC_STATUS_RESEED 0x00000008
+#define RNGC_STATUS_SLEEP 0x00000004
+#define RNGC_STATUS_BUSY 0x00000002
+#define RNGC_STATUS_SEC_STATE 0x00000001
+
+#define RNGC_ERROR_STATUS_ZEROS_MASK 0xffffffc0
+#define RNGC_ERROR_STATUS_BAD_KEY 0x00000040
+#define RNGC_ERROR_STATUS_RAND_ERR 0x00000020
+#define RNGC_ERROR_STATUS_FIFO_ERR 0x00000010
+#define RNGC_ERROR_STATUS_STAT_ERR 0x00000008
+#define RNGC_ERROR_STATUS_ST_ERR 0x00000004
+#define RNGC_ERROR_STATUS_OSC_ERR 0x00000002
+#define RNGC_ERROR_STATUS_LFSR_ERR 0x00000001
+
+#define RNG_ADDR_RANGE 0x34
+
+static DECLARE_COMPLETION(rng_self_testing);
+static DECLARE_COMPLETION(rng_seed_done);
+
+static struct platform_device *rng_dev;
+
+int irq_rng;
+
+static int fsl_rngc_data_present(struct hwrng *rng, int wait)
+{
+ int level;
+ u32 rngc_base = (u32) rng->priv;
+
+ /* how many random numbers are in FIFO? [0-16] */
+ level = (__raw_readl(rngc_base + RNGC_STATUS) &
+ RNGC_STATUS_FIFO_LEVEL_MASK) >> RNGC_STATUS_FIFO_LEVEL_SHIFT;
+
+ return level > 0 ? 1 : 0;
+}
+
+static int fsl_rngc_data_read(struct hwrng *rng, u32 * data)
+{
+ int err;
+ u32 rngc_base = (u32) rng->priv;
+
+ /* retrieve a random number from FIFO */
+ *data = __raw_readl(rngc_base + RNGC_FIFO);
+
+ /* is there some error while reading this random number? */
+ err = __raw_readl(rngc_base + RNGC_STATUS) & RNGC_STATUS_ERROR;
+
+ /* if error happened doesn't return random number */
+ return err ? 0 : 4;
+}
+
+static irqreturn_t rngc_irq(int irq, void *dev)
+{
+ int handled = 0;
+ u32 rngc_base = (u32) dev;
+
+ /* is the seed creation done? */
+ if (__raw_readl(rngc_base + RNGC_STATUS) & RNGC_STATUS_SEED_DONE) {
+ complete(&rng_seed_done);
+ __raw_writel(RNGC_CMD_CLR_INT | RNGC_CMD_CLR_ERR,
+ rngc_base + RNGC_COMMAND);
+ handled = 1;
+ }
+
+ /* is the self test done? */
+ if (__raw_readl(rngc_base + RNGC_STATUS) & RNGC_STATUS_ST_DONE) {
+ complete(&rng_self_testing);
+ __raw_writel(RNGC_CMD_CLR_INT | RNGC_CMD_CLR_ERR,
+ rngc_base + RNGC_COMMAND);
+ handled = 1;
+ }
+
+ /* is there any error? */
+ if (__raw_readl(rngc_base + RNGC_STATUS) & RNGC_STATUS_ERROR) {
+ /* clear interrupt */
+ __raw_writel(RNGC_CMD_CLR_INT | RNGC_CMD_CLR_ERR,
+ rngc_base + RNGC_COMMAND);
+ handled = 1;
+ }
+
+ return handled;
+}
+
+static int fsl_rngc_init(struct hwrng *rng)
+{
+ int err;
+ u32 cmd, ctrl, osc, rngc_base = (u32) rng->priv;
+
+ INIT_COMPLETION(rng_self_testing);
+ INIT_COMPLETION(rng_seed_done);
+
+ err = __raw_readl(rngc_base + RNGC_STATUS) & RNGC_STATUS_ERROR;
+ if (err) {
+ /* is this a bad keys error ? */
+ if (__raw_readl(rngc_base + RNGC_ERROR) &
+ RNGC_ERROR_STATUS_BAD_KEY) {
+ dev_err(&rng_dev->dev, "Can't start, Bad Keys.\n");
+ return -EIO;
+ }
+ }
+
+ /* mask all interrupts, will be unmasked soon */
+ ctrl = __raw_readl(rngc_base + RNGC_CONTROL);
+ __raw_writel(ctrl | RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR,
+ rngc_base + RNGC_CONTROL);
+
+ /* verify if oscillator is working */
+ osc = __raw_readl(rngc_base + RNGC_ERROR);
+ if (osc & RNGC_ERROR_STATUS_OSC_ERR) {
+ dev_err(&rng_dev->dev, "RNGC Oscillator is dead!\n");
+ return -EIO;
+ }
+
+ err = request_irq(irq_rng, rngc_irq, 0, "fsl_rngc", (void *)rng->priv);
+ if (err) {
+ dev_err(&rng_dev->dev, "Can't get interrupt working.\n");
+ return -EIO;
+ }
+
+ /* do self test, repeat until get success */
+ do {
+ /* clear error */
+ cmd = __raw_readl(rngc_base + RNGC_COMMAND);
+ __raw_writel(cmd | RNGC_CMD_CLR_ERR, rngc_base + RNGC_COMMAND);
+
+ /* unmask all interrupt */
+ ctrl = __raw_readl(rngc_base + RNGC_CONTROL);
+ __raw_writel(ctrl & ~(RNGC_CTRL_MASK_DONE |
+ RNGC_CTRL_MASK_ERROR), rngc_base + RNGC_CONTROL);
+
+ /* run self test */
+ cmd = __raw_readl(rngc_base + RNGC_COMMAND);
+ __raw_writel(cmd | RNGC_CMD_SELF_TEST,
+ rngc_base + RNGC_COMMAND);
+
+ wait_for_completion(&rng_self_testing);
+
+ } while (__raw_readl(rngc_base + RNGC_ERROR) &
+ RNGC_ERROR_STATUS_ST_ERR);
+
+ /* clear interrupt. Is it really necessary here? */
+ __raw_writel(RNGC_CMD_CLR_INT | RNGC_CMD_CLR_ERR,
+ rngc_base + RNGC_COMMAND);
+
+ /* create seed, repeat while there is some statistical error */
+ do {
+ /* clear error */
+ cmd = __raw_readl(rngc_base + RNGC_COMMAND);
+ __raw_writel(cmd | RNGC_CMD_CLR_ERR, rngc_base + RNGC_COMMAND);
+
+ /* seed creation */
+ cmd = __raw_readl(rngc_base + RNGC_COMMAND);
+ __raw_writel(cmd | RNGC_CMD_SEED, rngc_base + RNGC_COMMAND);
+
+ wait_for_completion(&rng_seed_done);
+
+ } while (__raw_readl(rngc_base + RNGC_ERROR) &
+ RNGC_ERROR_STATUS_STAT_ERR);
+
+ err = __raw_readl(rngc_base + RNGC_ERROR) &
+ (RNGC_ERROR_STATUS_STAT_ERR |
+ RNGC_ERROR_STATUS_RAND_ERR |
+ RNGC_ERROR_STATUS_FIFO_ERR |
+ RNGC_ERROR_STATUS_ST_ERR |
+ RNGC_ERROR_STATUS_OSC_ERR |
+ RNGC_ERROR_STATUS_LFSR_ERR);
+
+ if (err) {
+ dev_err(&rng_dev->dev, "FSL RNGC appears inoperable.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static struct hwrng fsl_rngc = {
+ .name = "fsl-rngc",
+ .init = fsl_rngc_init,
+ .data_present = fsl_rngc_data_present,
+ .data_read = fsl_rngc_data_read
+};
+
+static int __init fsl_rngc_probe(struct platform_device *pdev)
+{
+ int err = -ENODEV;
+ struct clk *clk;
+ struct resource *res, *mem;
+ void __iomem *rngc_base = NULL;
+
+ if (rng_dev)
+ return -EBUSY;
+
+ clk = clk_get(&pdev->dev, "rng_clk");
+
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Can not get rng_clk\n");
+ err = PTR_ERR(clk);
+ return err;
+ }
+
+ clk_enable(clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res)
+ return -ENOENT;
+
+ mem = request_mem_region(res->start, res->end - res->start, pdev->name);
+
+ if (mem == NULL)
+ return -EBUSY;
+
+ dev_set_drvdata(&pdev->dev, mem);
+ rngc_base = ioremap(res->start, res->end - res->start);
+
+ fsl_rngc.priv = (unsigned long)rngc_base;
+
+ irq_rng = platform_get_irq(pdev, 0);
+
+ err = hwrng_register(&fsl_rngc);
+ if (err) {
+ dev_err(&pdev->dev, "FSL RNGC registering failed (%d)\n", err);
+ return err;
+ }
+
+ rng_dev = pdev;
+
+ dev_info(&pdev->dev, "FSL RNGC Registered.\n");
+
+ return 0;
+}
+
+static int __exit fsl_rngc_remove(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct resource *mem = dev_get_drvdata(&pdev->dev);
+ void __iomem *rngc_base = (void __iomem *)fsl_rngc.priv;
+
+ clk = clk_get(&pdev->dev, "rng_clk");
+
+ if (IS_ERR(clk))
+ dev_err(&pdev->dev, "Can not get rng_clk\n");
+ else
+ clk_disable(clk);
+
+ hwrng_unregister(&fsl_rngc);
+
+ release_resource(mem);
+
+ iounmap(rngc_base);
+
+ return 0;
+}
+
+static int fsl_rngc_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+#ifdef CONFIG_PM
+ struct clk *clk = clk_get(&pdev->dev, "rng_clk");
+
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Can not get rng_clk\n");
+ return PTR_ERR(clk);
+ }
+
+ clk_disable(clk);
+#endif
+
+ return 0;
+}
+
+static int fsl_rngc_resume(struct platform_device *pdev)
+{
+#ifdef CONFIG_PM
+ struct clk *clk = clk_get(&pdev->dev, "rng_clk");
+
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Can not get rng_clk\n");
+ return PTR_ERR(clk);
+ }
+
+ clk_enable(clk);
+#endif
+
+ return 0;
+}
+
+static struct platform_driver fsl_rngc_driver = {
+ .driver = {
+ .name = "fsl_rngc",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(fsl_rngc_remove),
+ .suspend = fsl_rngc_suspend,
+ .resume = fsl_rngc_resume,
+};
+
+static int __init mod_init(void)
+{
+ return platform_driver_probe(&fsl_rngc_driver, fsl_rngc_probe);
+}
+
+static void __exit mod_exit(void)
+{
+ platform_driver_unregister(&fsl_rngc_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("H/W RNGC driver for i.MX");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
new file mode 100644
index 00000000..4c4d4e14
--- /dev/null
+++ b/drivers/char/hw_random/geode-rng.c
@@ -0,0 +1,139 @@
+/*
+ * RNG driver for AMD Geode RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+
+#define PFX KBUILD_MODNAME ": "
+
+#define GEODE_RNG_DATA_REG 0x50
+#define GEODE_RNG_STATUS_REG 0x54
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE. We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static const struct pci_device_id pci_tbl[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, },
+ { 0, }, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+
+static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+
+ *data = readl(mem + GEODE_RNG_DATA_REG);
+
+ return 4;
+}
+
+static int geode_rng_data_present(struct hwrng *rng, int wait)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+ data = !!(readl(mem + GEODE_RNG_STATUS_REG));
+ if (data || !wait)
+ break;
+ udelay(10);
+ }
+ return data;
+}
+
+
+static struct hwrng geode_rng = {
+ .name = "geode",
+ .data_present = geode_rng_data_present,
+ .data_read = geode_rng_data_read,
+};
+
+
+static int __init mod_init(void)
+{
+ int err = -ENODEV;
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *ent;
+ void __iomem *mem;
+ unsigned long rng_base;
+
+ for_each_pci_dev(pdev) {
+ ent = pci_match_id(pci_tbl, pdev);
+ if (ent)
+ goto found;
+ }
+ /* Device not found. */
+ goto out;
+
+found:
+ rng_base = pci_resource_start(pdev, 0);
+ if (rng_base == 0)
+ goto out;
+ err = -ENOMEM;
+ mem = ioremap(rng_base, 0x58);
+ if (!mem)
+ goto out;
+ geode_rng.priv = (unsigned long)mem;
+
+ printk(KERN_INFO "AMD Geode RNG detected\n");
+ err = hwrng_register(&geode_rng);
+ if (err) {
+ printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ err);
+ goto err_unmap;
+ }
+out:
+ return err;
+
+err_unmap:
+ iounmap(mem);
+ goto out;
+}
+
+static void __exit mod_exit(void)
+{
+ void __iomem *mem = (void __iomem *)geode_rng.priv;
+
+ hwrng_unregister(&geode_rng);
+ iounmap(mem);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
new file mode 100644
index 00000000..86fe45c1
--- /dev/null
+++ b/drivers/char/hw_random/intel-rng.c
@@ -0,0 +1,419 @@
+/*
+ * RNG driver for Intel RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/stop_machine.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+
+#define PFX KBUILD_MODNAME ": "
+
+/*
+ * RNG registers
+ */
+#define INTEL_RNG_HW_STATUS 0
+#define INTEL_RNG_PRESENT 0x40
+#define INTEL_RNG_ENABLED 0x01
+#define INTEL_RNG_STATUS 1
+#define INTEL_RNG_DATA_PRESENT 0x01
+#define INTEL_RNG_DATA 2
+
+/*
+ * Magic address at which Intel PCI bridges locate the RNG
+ */
+#define INTEL_RNG_ADDR 0xFFBC015F
+#define INTEL_RNG_ADDR_LEN 3
+
+/*
+ * LPC bridge PCI config space registers
+ */
+#define FWH_DEC_EN1_REG_OLD 0xe3
+#define FWH_DEC_EN1_REG_NEW 0xd9 /* high byte of 16-bit register */
+#define FWH_F8_EN_MASK 0x80
+
+#define BIOS_CNTL_REG_OLD 0x4e
+#define BIOS_CNTL_REG_NEW 0xdc
+#define BIOS_CNTL_WRITE_ENABLE_MASK 0x01
+#define BIOS_CNTL_LOCK_ENABLE_MASK 0x02
+
+/*
+ * Magic address at which Intel Firmware Hubs get accessed
+ */
+#define INTEL_FWH_ADDR 0xffff0000
+#define INTEL_FWH_ADDR_LEN 2
+
+/*
+ * Intel Firmware Hub command codes (write to any address inside the device)
+ */
+#define INTEL_FWH_RESET_CMD 0xff /* aka READ_ARRAY */
+#define INTEL_FWH_READ_ID_CMD 0x90
+
+/*
+ * Intel Firmware Hub Read ID command result addresses
+ */
+#define INTEL_FWH_MANUFACTURER_CODE_ADDRESS 0x000000
+#define INTEL_FWH_DEVICE_CODE_ADDRESS 0x000001
+
+/*
+ * Intel Firmware Hub Read ID command result values
+ */
+#define INTEL_FWH_MANUFACTURER_CODE 0x89
+#define INTEL_FWH_DEVICE_CODE_8M 0xac
+#define INTEL_FWH_DEVICE_CODE_4M 0xad
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE. We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static const struct pci_device_id pci_tbl[] = {
+/* AA
+ { PCI_DEVICE(0x8086, 0x2418) }, */
+ { PCI_DEVICE(0x8086, 0x2410) }, /* AA */
+/* AB
+ { PCI_DEVICE(0x8086, 0x2428) }, */
+ { PCI_DEVICE(0x8086, 0x2420) }, /* AB */
+/* ??
+ { PCI_DEVICE(0x8086, 0x2430) }, */
+/* BAM, CAM, DBM, FBM, GxM
+ { PCI_DEVICE(0x8086, 0x2448) }, */
+ { PCI_DEVICE(0x8086, 0x244c) }, /* BAM */
+ { PCI_DEVICE(0x8086, 0x248c) }, /* CAM */
+ { PCI_DEVICE(0x8086, 0x24cc) }, /* DBM */
+ { PCI_DEVICE(0x8086, 0x2641) }, /* FBM */
+ { PCI_DEVICE(0x8086, 0x27b9) }, /* GxM */
+ { PCI_DEVICE(0x8086, 0x27bd) }, /* GxM DH */
+/* BA, CA, DB, Ex, 6300, Fx, 631x/632x, Gx
+ { PCI_DEVICE(0x8086, 0x244e) }, */
+ { PCI_DEVICE(0x8086, 0x2440) }, /* BA */
+ { PCI_DEVICE(0x8086, 0x2480) }, /* CA */
+ { PCI_DEVICE(0x8086, 0x24c0) }, /* DB */
+ { PCI_DEVICE(0x8086, 0x24d0) }, /* Ex */
+ { PCI_DEVICE(0x8086, 0x25a1) }, /* 6300 */
+ { PCI_DEVICE(0x8086, 0x2640) }, /* Fx */
+ { PCI_DEVICE(0x8086, 0x2670) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2671) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2672) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2673) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2674) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2675) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2676) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2677) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2678) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x2679) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x267a) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x267b) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x267c) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x267d) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x267e) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x267f) }, /* 631x/632x */
+ { PCI_DEVICE(0x8086, 0x27b8) }, /* Gx */
+/* E
+ { PCI_DEVICE(0x8086, 0x245e) }, */
+ { PCI_DEVICE(0x8086, 0x2450) }, /* E */
+ { 0, }, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+static __initdata int no_fwh_detect;
+module_param(no_fwh_detect, int, 0);
+MODULE_PARM_DESC(no_fwh_detect, "Skip FWH detection:\n"
+ " positive value - skip if FWH space locked read-only\n"
+ " negative value - skip always");
+
+static inline u8 hwstatus_get(void __iomem *mem)
+{
+ return readb(mem + INTEL_RNG_HW_STATUS);
+}
+
+static inline u8 hwstatus_set(void __iomem *mem,
+ u8 hw_status)
+{
+ writeb(hw_status, mem + INTEL_RNG_HW_STATUS);
+ return hwstatus_get(mem);
+}
+
+static int intel_rng_data_present(struct hwrng *rng, int wait)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+ data = !!(readb(mem + INTEL_RNG_STATUS) &
+ INTEL_RNG_DATA_PRESENT);
+ if (data || !wait)
+ break;
+ udelay(10);
+ }
+ return data;
+}
+
+static int intel_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+
+ *data = readb(mem + INTEL_RNG_DATA);
+
+ return 1;
+}
+
+static int intel_rng_init(struct hwrng *rng)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+ u8 hw_status;
+ int err = -EIO;
+
+ hw_status = hwstatus_get(mem);
+ /* turn RNG h/w on, if it's off */
+ if ((hw_status & INTEL_RNG_ENABLED) == 0)
+ hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED);
+ if ((hw_status & INTEL_RNG_ENABLED) == 0) {
+ printk(KERN_ERR PFX "cannot enable RNG, aborting\n");
+ goto out;
+ }
+ err = 0;
+out:
+ return err;
+}
+
+static void intel_rng_cleanup(struct hwrng *rng)
+{
+ void __iomem *mem = (void __iomem *)rng->priv;
+ u8 hw_status;
+
+ hw_status = hwstatus_get(mem);
+ if (hw_status & INTEL_RNG_ENABLED)
+ hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED);
+ else
+ printk(KERN_WARNING PFX "unusual: RNG already disabled\n");
+}
+
+
+static struct hwrng intel_rng = {
+ .name = "intel",
+ .init = intel_rng_init,
+ .cleanup = intel_rng_cleanup,
+ .data_present = intel_rng_data_present,
+ .data_read = intel_rng_data_read,
+};
+
+struct intel_rng_hw {
+ struct pci_dev *dev;
+ void __iomem *mem;
+ u8 bios_cntl_off;
+ u8 bios_cntl_val;
+ u8 fwh_dec_en1_off;
+ u8 fwh_dec_en1_val;
+};
+
+static int __init intel_rng_hw_init(void *_intel_rng_hw)
+{
+ struct intel_rng_hw *intel_rng_hw = _intel_rng_hw;
+ u8 mfc, dvc;
+
+ /* interrupts disabled in stop_machine call */
+
+ if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK))
+ pci_write_config_byte(intel_rng_hw->dev,
+ intel_rng_hw->fwh_dec_en1_off,
+ intel_rng_hw->fwh_dec_en1_val |
+ FWH_F8_EN_MASK);
+ if (!(intel_rng_hw->bios_cntl_val & BIOS_CNTL_WRITE_ENABLE_MASK))
+ pci_write_config_byte(intel_rng_hw->dev,
+ intel_rng_hw->bios_cntl_off,
+ intel_rng_hw->bios_cntl_val |
+ BIOS_CNTL_WRITE_ENABLE_MASK);
+
+ writeb(INTEL_FWH_RESET_CMD, intel_rng_hw->mem);
+ writeb(INTEL_FWH_READ_ID_CMD, intel_rng_hw->mem);
+ mfc = readb(intel_rng_hw->mem + INTEL_FWH_MANUFACTURER_CODE_ADDRESS);
+ dvc = readb(intel_rng_hw->mem + INTEL_FWH_DEVICE_CODE_ADDRESS);
+ writeb(INTEL_FWH_RESET_CMD, intel_rng_hw->mem);
+
+ if (!(intel_rng_hw->bios_cntl_val &
+ (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK)))
+ pci_write_config_byte(intel_rng_hw->dev,
+ intel_rng_hw->bios_cntl_off,
+ intel_rng_hw->bios_cntl_val);
+ if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK))
+ pci_write_config_byte(intel_rng_hw->dev,
+ intel_rng_hw->fwh_dec_en1_off,
+ intel_rng_hw->fwh_dec_en1_val);
+
+ if (mfc != INTEL_FWH_MANUFACTURER_CODE ||
+ (dvc != INTEL_FWH_DEVICE_CODE_8M &&
+ dvc != INTEL_FWH_DEVICE_CODE_4M)) {
+ printk(KERN_NOTICE PFX "FWH not detected\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw,
+ struct pci_dev *dev)
+{
+ intel_rng_hw->bios_cntl_val = 0xff;
+ intel_rng_hw->fwh_dec_en1_val = 0xff;
+ intel_rng_hw->dev = dev;
+
+ /* Check for Intel 82802 */
+ if (dev->device < 0x2640) {
+ intel_rng_hw->fwh_dec_en1_off = FWH_DEC_EN1_REG_OLD;
+ intel_rng_hw->bios_cntl_off = BIOS_CNTL_REG_OLD;
+ } else {
+ intel_rng_hw->fwh_dec_en1_off = FWH_DEC_EN1_REG_NEW;
+ intel_rng_hw->bios_cntl_off = BIOS_CNTL_REG_NEW;
+ }
+
+ pci_read_config_byte(dev, intel_rng_hw->fwh_dec_en1_off,
+ &intel_rng_hw->fwh_dec_en1_val);
+ pci_read_config_byte(dev, intel_rng_hw->bios_cntl_off,
+ &intel_rng_hw->bios_cntl_val);
+
+ if ((intel_rng_hw->bios_cntl_val &
+ (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))
+ == BIOS_CNTL_LOCK_ENABLE_MASK) {
+ static __initdata /*const*/ char warning[] =
+ KERN_WARNING
+PFX "Firmware space is locked read-only. If you can't or\n"
+PFX "don't want to disable this in firmware setup, and if\n"
+PFX "you are certain that your system has a functional\n"
+PFX "RNG, try using the 'no_fwh_detect' option.\n";
+
+ if (no_fwh_detect)
+ return -ENODEV;
+ printk(warning);
+ return -EBUSY;
+ }
+
+ intel_rng_hw->mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN);
+ if (intel_rng_hw->mem == NULL)
+ return -EBUSY;
+
+ return 0;
+}
+
+
+static int __init mod_init(void)
+{
+ int err = -ENODEV;
+ int i;
+ struct pci_dev *dev = NULL;
+ void __iomem *mem = mem;
+ u8 hw_status;
+ struct intel_rng_hw *intel_rng_hw;
+
+ for (i = 0; !dev && pci_tbl[i].vendor; ++i)
+ dev = pci_get_device(pci_tbl[i].vendor, pci_tbl[i].device,
+ NULL);
+
+ if (!dev)
+ goto out; /* Device not found. */
+
+ if (no_fwh_detect < 0) {
+ pci_dev_put(dev);
+ goto fwh_done;
+ }
+
+ intel_rng_hw = kmalloc(sizeof(*intel_rng_hw), GFP_KERNEL);
+ if (!intel_rng_hw) {
+ pci_dev_put(dev);
+ goto out;
+ }
+
+ err = intel_init_hw_struct(intel_rng_hw, dev);
+ if (err) {
+ pci_dev_put(dev);
+ kfree(intel_rng_hw);
+ if (err == -ENODEV)
+ goto fwh_done;
+ goto out;
+ }
+
+ /*
+ * Since the BIOS code/data is going to disappear from its normal
+ * location with the Read ID command, all activity on the system
+ * must be stopped until the state is back to normal.
+ *
+ * Use stop_machine because IPIs can be blocked by disabling
+ * interrupts.
+ */
+ err = stop_machine(intel_rng_hw_init, intel_rng_hw, NULL);
+ pci_dev_put(dev);
+ iounmap(intel_rng_hw->mem);
+ kfree(intel_rng_hw);
+ if (err)
+ goto out;
+
+fwh_done:
+ err = -ENOMEM;
+ mem = ioremap(INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN);
+ if (!mem)
+ goto out;
+ intel_rng.priv = (unsigned long)mem;
+
+ /* Check for Random Number Generator */
+ err = -ENODEV;
+ hw_status = hwstatus_get(mem);
+ if ((hw_status & INTEL_RNG_PRESENT) == 0) {
+ iounmap(mem);
+ goto out;
+ }
+
+ printk(KERN_INFO "Intel 82802 RNG detected\n");
+ err = hwrng_register(&intel_rng);
+ if (err) {
+ printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ err);
+ iounmap(mem);
+ }
+out:
+ return err;
+
+}
+
+static void __exit mod_exit(void)
+{
+ void __iomem *mem = (void __iomem *)intel_rng.priv;
+
+ hwrng_unregister(&intel_rng);
+ iounmap(mem);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
new file mode 100644
index 00000000..263567f5
--- /dev/null
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -0,0 +1,72 @@
+/*
+ * drivers/char/hw_random/ixp4xx-rng.c
+ *
+ * RNG driver for Intel IXP4xx family of NPUs
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * Fixes by Michael Buesch
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/hw_random.h>
+
+#include <asm/io.h>
+#include <mach/hardware.h>
+
+
+static int ixp4xx_rng_data_read(struct hwrng *rng, u32 *buffer)
+{
+ void __iomem * rng_base = (void __iomem *)rng->priv;
+
+ *buffer = __raw_readl(rng_base);
+
+ return 4;
+}
+
+static struct hwrng ixp4xx_rng_ops = {
+ .name = "ixp4xx",
+ .data_read = ixp4xx_rng_data_read,
+};
+
+static int __init ixp4xx_rng_init(void)
+{
+ void __iomem * rng_base;
+ int err;
+
+ rng_base = ioremap(0x70002100, 4);
+ if (!rng_base)
+ return -ENOMEM;
+ ixp4xx_rng_ops.priv = (unsigned long)rng_base;
+ err = hwrng_register(&ixp4xx_rng_ops);
+ if (err)
+ iounmap(rng_base);
+
+ return err;
+}
+
+static void __exit ixp4xx_rng_exit(void)
+{
+ void __iomem * rng_base = (void __iomem *)ixp4xx_rng_ops.priv;
+
+ hwrng_unregister(&ixp4xx_rng_ops);
+ iounmap(rng_base);
+}
+
+module_init(ixp4xx_rng_init);
+module_exit(ixp4xx_rng_exit);
+
+MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
+MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for IXP4xx");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
new file mode 100644
index 00000000..187c6be8
--- /dev/null
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -0,0 +1,247 @@
+/*
+ * RNG driver for Freescale RNGA
+ *
+ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Author: Alan Carvalho de Assis <acassis@gmail.com>
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * This driver is based on other RNG drivers.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+
+/* RNGA Registers */
+#define RNGA_CONTROL 0x00
+#define RNGA_STATUS 0x04
+#define RNGA_ENTROPY 0x08
+#define RNGA_OUTPUT_FIFO 0x0c
+#define RNGA_MODE 0x10
+#define RNGA_VERIFICATION_CONTROL 0x14
+#define RNGA_OSC_CONTROL_COUNTER 0x18
+#define RNGA_OSC1_COUNTER 0x1c
+#define RNGA_OSC2_COUNTER 0x20
+#define RNGA_OSC_COUNTER_STATUS 0x24
+
+/* RNGA Registers Range */
+#define RNG_ADDR_RANGE 0x28
+
+/* RNGA Control Register */
+#define RNGA_CONTROL_SLEEP 0x00000010
+#define RNGA_CONTROL_CLEAR_INT 0x00000008
+#define RNGA_CONTROL_MASK_INTS 0x00000004
+#define RNGA_CONTROL_HIGH_ASSURANCE 0x00000002
+#define RNGA_CONTROL_GO 0x00000001
+
+#define RNGA_STATUS_LEVEL_MASK 0x0000ff00
+
+/* RNGA Status Register */
+#define RNGA_STATUS_OSC_DEAD 0x80000000
+#define RNGA_STATUS_SLEEP 0x00000010
+#define RNGA_STATUS_ERROR_INT 0x00000008
+#define RNGA_STATUS_FIFO_UNDERFLOW 0x00000004
+#define RNGA_STATUS_LAST_READ_STATUS 0x00000002
+#define RNGA_STATUS_SECURITY_VIOLATION 0x00000001
+
+static struct platform_device *rng_dev;
+
+static int mxc_rnga_data_present(struct hwrng *rng)
+{
+ int level;
+ void __iomem *rng_base = (void __iomem *)rng->priv;
+
+ /* how many random numbers is in FIFO? [0-16] */
+ level = ((__raw_readl(rng_base + RNGA_STATUS) &
+ RNGA_STATUS_LEVEL_MASK) >> 8);
+
+ return level > 0 ? 1 : 0;
+}
+
+static int mxc_rnga_data_read(struct hwrng *rng, u32 * data)
+{
+ int err;
+ u32 ctrl;
+ void __iomem *rng_base = (void __iomem *)rng->priv;
+
+ /* retrieve a random number from FIFO */
+ *data = __raw_readl(rng_base + RNGA_OUTPUT_FIFO);
+
+ /* some error while reading this random number? */
+ err = __raw_readl(rng_base + RNGA_STATUS) & RNGA_STATUS_ERROR_INT;
+
+ /* if error: clear error interrupt, but doesn't return random number */
+ if (err) {
+ dev_dbg(&rng_dev->dev, "Error while reading random number!\n");
+ ctrl = __raw_readl(rng_base + RNGA_CONTROL);
+ __raw_writel(ctrl | RNGA_CONTROL_CLEAR_INT,
+ rng_base + RNGA_CONTROL);
+ return 0;
+ } else
+ return 4;
+}
+
+static int mxc_rnga_init(struct hwrng *rng)
+{
+ u32 ctrl, osc;
+ void __iomem *rng_base = (void __iomem *)rng->priv;
+
+ /* wake up */
+ ctrl = __raw_readl(rng_base + RNGA_CONTROL);
+ __raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, rng_base + RNGA_CONTROL);
+
+ /* verify if oscillator is working */
+ osc = __raw_readl(rng_base + RNGA_STATUS);
+ if (osc & RNGA_STATUS_OSC_DEAD) {
+ dev_err(&rng_dev->dev, "RNGA Oscillator is dead!\n");
+ return -ENODEV;
+ }
+
+ /* go running */
+ ctrl = __raw_readl(rng_base + RNGA_CONTROL);
+ __raw_writel(ctrl | RNGA_CONTROL_GO, rng_base + RNGA_CONTROL);
+
+ return 0;
+}
+
+static void mxc_rnga_cleanup(struct hwrng *rng)
+{
+ u32 ctrl;
+ void __iomem *rng_base = (void __iomem *)rng->priv;
+
+ ctrl = __raw_readl(rng_base + RNGA_CONTROL);
+
+ /* stop rnga */
+ __raw_writel(ctrl & ~RNGA_CONTROL_GO, rng_base + RNGA_CONTROL);
+}
+
+static struct hwrng mxc_rnga = {
+ .name = "mxc-rnga",
+ .init = mxc_rnga_init,
+ .cleanup = mxc_rnga_cleanup,
+ .data_present = mxc_rnga_data_present,
+ .data_read = mxc_rnga_data_read
+};
+
+static int __init mxc_rnga_probe(struct platform_device *pdev)
+{
+ int err = -ENODEV;
+ struct clk *clk;
+ struct resource *res, *mem;
+ void __iomem *rng_base = NULL;
+
+ if (rng_dev)
+ return -EBUSY;
+
+ clk = clk_get(&pdev->dev, "rng");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Could not get rng_clk!\n");
+ err = PTR_ERR(clk);
+ goto out;
+ }
+
+ clk_enable(clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENOENT;
+ goto err_region;
+ }
+
+ mem = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (mem == NULL) {
+ err = -EBUSY;
+ goto err_region;
+ }
+
+ rng_base = ioremap(res->start, resource_size(res));
+ if (!rng_base) {
+ err = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ mxc_rnga.priv = (unsigned long)rng_base;
+
+ err = hwrng_register(&mxc_rnga);
+ if (err) {
+ dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err);
+ goto err_register;
+ }
+
+ rng_dev = pdev;
+
+ dev_info(&pdev->dev, "MXC RNGA Registered.\n");
+
+ return 0;
+
+err_register:
+ iounmap(rng_base);
+ rng_base = NULL;
+
+err_ioremap:
+ release_mem_region(res->start, resource_size(res));
+
+err_region:
+ clk_disable(clk);
+ clk_put(clk);
+
+out:
+ return err;
+}
+
+static int __exit mxc_rnga_remove(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ void __iomem *rng_base = (void __iomem *)mxc_rnga.priv;
+ struct clk *clk = clk_get(&pdev->dev, "rng");
+
+ hwrng_unregister(&mxc_rnga);
+
+ iounmap(rng_base);
+
+ release_mem_region(res->start, resource_size(res));
+
+ clk_disable(clk);
+ clk_put(clk);
+
+ return 0;
+}
+
+static struct platform_driver mxc_rnga_driver = {
+ .driver = {
+ .name = "mxc_rnga",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(mxc_rnga_remove),
+};
+
+static int __init mod_init(void)
+{
+ return platform_driver_probe(&mxc_rnga_driver, mxc_rnga_probe);
+}
+
+static void __exit mod_exit(void)
+{
+ platform_driver_unregister(&mxc_rnga_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("H/W RNGA driver for i.MX");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/n2-asm.S b/drivers/char/hw_random/n2-asm.S
new file mode 100644
index 00000000..9b6eb5cd
--- /dev/null
+++ b/drivers/char/hw_random/n2-asm.S
@@ -0,0 +1,79 @@
+/* n2-asm.S: Niagara2 RNG hypervisor call assembler.
+ *
+ * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
+ */
+#include <linux/linkage.h>
+#include <asm/hypervisor.h>
+#include "n2rng.h"
+
+ .text
+
+ENTRY(sun4v_rng_get_diag_ctl)
+ mov HV_FAST_RNG_GET_DIAG_CTL, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_rng_get_diag_ctl)
+
+ENTRY(sun4v_rng_ctl_read_v1)
+ mov %o1, %o3
+ mov %o2, %o4
+ mov HV_FAST_RNG_CTL_READ, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o3]
+ retl
+ stx %o2, [%o4]
+ENDPROC(sun4v_rng_ctl_read_v1)
+
+ENTRY(sun4v_rng_ctl_read_v2)
+ save %sp, -192, %sp
+ mov %i0, %o0
+ mov %i1, %o1
+ mov HV_FAST_RNG_CTL_READ, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%i2]
+ stx %o2, [%i3]
+ stx %o3, [%i4]
+ stx %o4, [%i5]
+ ret
+ restore %g0, %o0, %o0
+ENDPROC(sun4v_rng_ctl_read_v2)
+
+ENTRY(sun4v_rng_ctl_write_v1)
+ mov %o3, %o4
+ mov HV_FAST_RNG_CTL_WRITE, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%o4]
+ENDPROC(sun4v_rng_ctl_write_v1)
+
+ENTRY(sun4v_rng_ctl_write_v2)
+ mov HV_FAST_RNG_CTL_WRITE, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_rng_ctl_write_v2)
+
+ENTRY(sun4v_rng_data_read_diag_v1)
+ mov %o2, %o4
+ mov HV_FAST_RNG_DATA_READ_DIAG, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%o4]
+ENDPROC(sun4v_rng_data_read_diag_v1)
+
+ENTRY(sun4v_rng_data_read_diag_v2)
+ mov %o3, %o4
+ mov HV_FAST_RNG_DATA_READ_DIAG, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%o4]
+ENDPROC(sun4v_rng_data_read_diag_v2)
+
+ENTRY(sun4v_rng_data_read)
+ mov %o1, %o4
+ mov HV_FAST_RNG_DATA_READ, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%o4]
+ENDPROC(sun4v_rng_data_read)
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
new file mode 100644
index 00000000..ac6739e0
--- /dev/null
+++ b/drivers/char/hw_random/n2-drv.c
@@ -0,0 +1,779 @@
+/* n2-drv.c: Niagara-2 RNG driver.
+ *
+ * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/preempt.h>
+#include <linux/hw_random.h>
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/hypervisor.h>
+
+#include "n2rng.h"
+
+#define DRV_MODULE_NAME "n2rng"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION "0.1"
+#define DRV_MODULE_RELDATE "May 15, 2008"
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Niagara2 RNG driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+/* The Niagara2 RNG provides a 64-bit read-only random number
+ * register, plus a control register. Access to the RNG is
+ * virtualized through the hypervisor so that both guests and control
+ * nodes can access the device.
+ *
+ * The entropy source consists of raw entropy sources, each
+ * constructed from a voltage controlled oscillator whose phase is
+ * jittered by thermal noise sources.
+ *
+ * The oscillator in each of the three raw entropy sources run at
+ * different frequencies. Normally, all three generator outputs are
+ * gathered, xored together, and fed into a CRC circuit, the output of
+ * which is the 64-bit read-only register.
+ *
+ * Some time is necessary for all the necessary entropy to build up
+ * such that a full 64-bits of entropy are available in the register.
+ * In normal operating mode (RNG_CTL_LFSR is set), the chip implements
+ * an interlock which blocks register reads until sufficient entropy
+ * is available.
+ *
+ * A control register is provided for adjusting various aspects of RNG
+ * operation, and to enable diagnostic modes. Each of the three raw
+ * entropy sources has an enable bit (RNG_CTL_ES{1,2,3}). Also
+ * provided are fields for controlling the minimum time in cycles
+ * between read accesses to the register (RNG_CTL_WAIT, this controls
+ * the interlock described in the previous paragraph).
+ *
+ * The standard setting is to have the mode bit (RNG_CTL_LFSR) set,
+ * all three entropy sources enabled, and the interlock time set
+ * appropriately.
+ *
+ * The CRC polynomial used by the chip is:
+ *
+ * P(X) = x64 + x61 + x57 + x56 + x52 + x51 + x50 + x48 + x47 + x46 +
+ * x43 + x42 + x41 + x39 + x38 + x37 + x35 + x32 + x28 + x25 +
+ * x22 + x21 + x17 + x15 + x13 + x12 + x11 + x7 + x5 + x + 1
+ *
+ * The RNG_CTL_VCO value of each noise cell must be programmed
+ * separately. This is why 4 control register values must be provided
+ * to the hypervisor. During a write, the hypervisor writes them all,
+ * one at a time, to the actual RNG_CTL register. The first three
+ * values are used to setup the desired RNG_CTL_VCO for each entropy
+ * source, for example:
+ *
+ * control 0: (1 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES1
+ * control 1: (2 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES2
+ * control 2: (3 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES3
+ *
+ * And then the fourth value sets the final chip state and enables
+ * desired.
+ */
+
+static int n2rng_hv_err_trans(unsigned long hv_err)
+{
+ switch (hv_err) {
+ case HV_EOK:
+ return 0;
+ case HV_EWOULDBLOCK:
+ return -EAGAIN;
+ case HV_ENOACCESS:
+ return -EPERM;
+ case HV_EIO:
+ return -EIO;
+ case HV_EBUSY:
+ return -EBUSY;
+ case HV_EBADALIGN:
+ case HV_ENORADDR:
+ return -EFAULT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned long n2rng_generic_read_control_v2(unsigned long ra,
+ unsigned long unit)
+{
+ unsigned long hv_err, state, ticks, watchdog_delta, watchdog_status;
+ int block = 0, busy = 0;
+
+ while (1) {
+ hv_err = sun4v_rng_ctl_read_v2(ra, unit, &state,
+ &ticks,
+ &watchdog_delta,
+ &watchdog_status);
+ if (hv_err == HV_EOK)
+ break;
+
+ if (hv_err == HV_EBUSY) {
+ if (++busy >= N2RNG_BUSY_LIMIT)
+ break;
+
+ udelay(1);
+ } else if (hv_err == HV_EWOULDBLOCK) {
+ if (++block >= N2RNG_BLOCK_LIMIT)
+ break;
+
+ __delay(ticks);
+ } else
+ break;
+ }
+
+ return hv_err;
+}
+
+/* In multi-socket situations, the hypervisor might need to
+ * queue up the RNG control register write if it's for a unit
+ * that is on a cpu socket other than the one we are executing on.
+ *
+ * We poll here waiting for a successful read of that control
+ * register to make sure the write has been actually performed.
+ */
+static unsigned long n2rng_control_settle_v2(struct n2rng *np, int unit)
+{
+ unsigned long ra = __pa(&np->scratch_control[0]);
+
+ return n2rng_generic_read_control_v2(ra, unit);
+}
+
+static unsigned long n2rng_write_ctl_one(struct n2rng *np, int unit,
+ unsigned long state,
+ unsigned long control_ra,
+ unsigned long watchdog_timeout,
+ unsigned long *ticks)
+{
+ unsigned long hv_err;
+
+ if (np->hvapi_major == 1) {
+ hv_err = sun4v_rng_ctl_write_v1(control_ra, state,
+ watchdog_timeout, ticks);
+ } else {
+ hv_err = sun4v_rng_ctl_write_v2(control_ra, state,
+ watchdog_timeout, unit);
+ if (hv_err == HV_EOK)
+ hv_err = n2rng_control_settle_v2(np, unit);
+ *ticks = N2RNG_ACCUM_CYCLES_DEFAULT;
+ }
+
+ return hv_err;
+}
+
+static int n2rng_generic_read_data(unsigned long data_ra)
+{
+ unsigned long ticks, hv_err;
+ int block = 0, hcheck = 0;
+
+ while (1) {
+ hv_err = sun4v_rng_data_read(data_ra, &ticks);
+ if (hv_err == HV_EOK)
+ return 0;
+
+ if (hv_err == HV_EWOULDBLOCK) {
+ if (++block >= N2RNG_BLOCK_LIMIT)
+ return -EWOULDBLOCK;
+ __delay(ticks);
+ } else if (hv_err == HV_ENOACCESS) {
+ return -EPERM;
+ } else if (hv_err == HV_EIO) {
+ if (++hcheck >= N2RNG_HCHECK_LIMIT)
+ return -EIO;
+ udelay(10000);
+ } else
+ return -ENODEV;
+ }
+}
+
+static unsigned long n2rng_read_diag_data_one(struct n2rng *np,
+ unsigned long unit,
+ unsigned long data_ra,
+ unsigned long data_len,
+ unsigned long *ticks)
+{
+ unsigned long hv_err;
+
+ if (np->hvapi_major == 1) {
+ hv_err = sun4v_rng_data_read_diag_v1(data_ra, data_len, ticks);
+ } else {
+ hv_err = sun4v_rng_data_read_diag_v2(data_ra, data_len,
+ unit, ticks);
+ if (!*ticks)
+ *ticks = N2RNG_ACCUM_CYCLES_DEFAULT;
+ }
+ return hv_err;
+}
+
+static int n2rng_generic_read_diag_data(struct n2rng *np,
+ unsigned long unit,
+ unsigned long data_ra,
+ unsigned long data_len)
+{
+ unsigned long ticks, hv_err;
+ int block = 0;
+
+ while (1) {
+ hv_err = n2rng_read_diag_data_one(np, unit,
+ data_ra, data_len,
+ &ticks);
+ if (hv_err == HV_EOK)
+ return 0;
+
+ if (hv_err == HV_EWOULDBLOCK) {
+ if (++block >= N2RNG_BLOCK_LIMIT)
+ return -EWOULDBLOCK;
+ __delay(ticks);
+ } else if (hv_err == HV_ENOACCESS) {
+ return -EPERM;
+ } else if (hv_err == HV_EIO) {
+ return -EIO;
+ } else
+ return -ENODEV;
+ }
+}
+
+
+static int n2rng_generic_write_control(struct n2rng *np,
+ unsigned long control_ra,
+ unsigned long unit,
+ unsigned long state)
+{
+ unsigned long hv_err, ticks;
+ int block = 0, busy = 0;
+
+ while (1) {
+ hv_err = n2rng_write_ctl_one(np, unit, state, control_ra,
+ np->wd_timeo, &ticks);
+ if (hv_err == HV_EOK)
+ return 0;
+
+ if (hv_err == HV_EWOULDBLOCK) {
+ if (++block >= N2RNG_BLOCK_LIMIT)
+ return -EWOULDBLOCK;
+ __delay(ticks);
+ } else if (hv_err == HV_EBUSY) {
+ if (++busy >= N2RNG_BUSY_LIMIT)
+ return -EBUSY;
+ udelay(1);
+ } else
+ return -ENODEV;
+ }
+}
+
+/* Just try to see if we can successfully access the control register
+ * of the RNG on the domain on which we are currently executing.
+ */
+static int n2rng_try_read_ctl(struct n2rng *np)
+{
+ unsigned long hv_err;
+ unsigned long x;
+
+ if (np->hvapi_major == 1) {
+ hv_err = sun4v_rng_get_diag_ctl();
+ } else {
+ /* We purposefully give invalid arguments, HV_NOACCESS
+ * is higher priority than the errors we'd get from
+ * these other cases, and that's the error we are
+ * truly interested in.
+ */
+ hv_err = sun4v_rng_ctl_read_v2(0UL, ~0UL, &x, &x, &x, &x);
+ switch (hv_err) {
+ case HV_EWOULDBLOCK:
+ case HV_ENOACCESS:
+ break;
+ default:
+ hv_err = HV_EOK;
+ break;
+ }
+ }
+
+ return n2rng_hv_err_trans(hv_err);
+}
+
+#define CONTROL_DEFAULT_BASE \
+ ((2 << RNG_CTL_ASEL_SHIFT) | \
+ (N2RNG_ACCUM_CYCLES_DEFAULT << RNG_CTL_WAIT_SHIFT) | \
+ RNG_CTL_LFSR)
+
+#define CONTROL_DEFAULT_0 \
+ (CONTROL_DEFAULT_BASE | \
+ (1 << RNG_CTL_VCO_SHIFT) | \
+ RNG_CTL_ES1)
+#define CONTROL_DEFAULT_1 \
+ (CONTROL_DEFAULT_BASE | \
+ (2 << RNG_CTL_VCO_SHIFT) | \
+ RNG_CTL_ES2)
+#define CONTROL_DEFAULT_2 \
+ (CONTROL_DEFAULT_BASE | \
+ (3 << RNG_CTL_VCO_SHIFT) | \
+ RNG_CTL_ES3)
+#define CONTROL_DEFAULT_3 \
+ (CONTROL_DEFAULT_BASE | \
+ RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3)
+
+static void n2rng_control_swstate_init(struct n2rng *np)
+{
+ int i;
+
+ np->flags |= N2RNG_FLAG_CONTROL;
+
+ np->health_check_sec = N2RNG_HEALTH_CHECK_SEC_DEFAULT;
+ np->accum_cycles = N2RNG_ACCUM_CYCLES_DEFAULT;
+ np->wd_timeo = N2RNG_WD_TIMEO_DEFAULT;
+
+ for (i = 0; i < np->num_units; i++) {
+ struct n2rng_unit *up = &np->units[i];
+
+ up->control[0] = CONTROL_DEFAULT_0;
+ up->control[1] = CONTROL_DEFAULT_1;
+ up->control[2] = CONTROL_DEFAULT_2;
+ up->control[3] = CONTROL_DEFAULT_3;
+ }
+
+ np->hv_state = HV_RNG_STATE_UNCONFIGURED;
+}
+
+static int n2rng_grab_diag_control(struct n2rng *np)
+{
+ int i, busy_count, err = -ENODEV;
+
+ busy_count = 0;
+ for (i = 0; i < 100; i++) {
+ err = n2rng_try_read_ctl(np);
+ if (err != -EAGAIN)
+ break;
+
+ if (++busy_count > 100) {
+ dev_err(&np->op->dev,
+ "Grab diag control timeout.\n");
+ return -ENODEV;
+ }
+
+ udelay(1);
+ }
+
+ return err;
+}
+
+static int n2rng_init_control(struct n2rng *np)
+{
+ int err = n2rng_grab_diag_control(np);
+
+ /* Not in the control domain, that's OK we are only a consumer
+ * of the RNG data, we don't setup and program it.
+ */
+ if (err == -EPERM)
+ return 0;
+ if (err)
+ return err;
+
+ n2rng_control_swstate_init(np);
+
+ return 0;
+}
+
+static int n2rng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct n2rng *np = (struct n2rng *) rng->priv;
+ unsigned long ra = __pa(&np->test_data);
+ int len;
+
+ if (!(np->flags & N2RNG_FLAG_READY)) {
+ len = 0;
+ } else if (np->flags & N2RNG_FLAG_BUFFER_VALID) {
+ np->flags &= ~N2RNG_FLAG_BUFFER_VALID;
+ *data = np->buffer;
+ len = 4;
+ } else {
+ int err = n2rng_generic_read_data(ra);
+ if (!err) {
+ np->buffer = np->test_data >> 32;
+ *data = np->test_data & 0xffffffff;
+ len = 4;
+ } else {
+ dev_err(&np->op->dev, "RNG error, restesting\n");
+ np->flags &= ~N2RNG_FLAG_READY;
+ if (!(np->flags & N2RNG_FLAG_SHUTDOWN))
+ schedule_delayed_work(&np->work, 0);
+ len = 0;
+ }
+ }
+
+ return len;
+}
+
+/* On a guest node, just make sure we can read random data properly.
+ * If a control node reboots or reloads it's n2rng driver, this won't
+ * work during that time. So we have to keep probing until the device
+ * becomes usable.
+ */
+static int n2rng_guest_check(struct n2rng *np)
+{
+ unsigned long ra = __pa(&np->test_data);
+
+ return n2rng_generic_read_data(ra);
+}
+
+static int n2rng_entropy_diag_read(struct n2rng *np, unsigned long unit,
+ u64 *pre_control, u64 pre_state,
+ u64 *buffer, unsigned long buf_len,
+ u64 *post_control, u64 post_state)
+{
+ unsigned long post_ctl_ra = __pa(post_control);
+ unsigned long pre_ctl_ra = __pa(pre_control);
+ unsigned long buffer_ra = __pa(buffer);
+ int err;
+
+ err = n2rng_generic_write_control(np, pre_ctl_ra, unit, pre_state);
+ if (err)
+ return err;
+
+ err = n2rng_generic_read_diag_data(np, unit,
+ buffer_ra, buf_len);
+
+ (void) n2rng_generic_write_control(np, post_ctl_ra, unit,
+ post_state);
+
+ return err;
+}
+
+static u64 advance_polynomial(u64 poly, u64 val, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ int highbit_set = ((s64)val < 0);
+
+ val <<= 1;
+ if (highbit_set)
+ val ^= poly;
+ }
+
+ return val;
+}
+
+static int n2rng_test_buffer_find(struct n2rng *np, u64 val)
+{
+ int i, count = 0;
+
+ /* Purposefully skip over the first word. */
+ for (i = 1; i < SELFTEST_BUFFER_WORDS; i++) {
+ if (np->test_buffer[i] == val)
+ count++;
+ }
+ return count;
+}
+
+static void n2rng_dump_test_buffer(struct n2rng *np)
+{
+ int i;
+
+ for (i = 0; i < SELFTEST_BUFFER_WORDS; i++)
+ dev_err(&np->op->dev, "Test buffer slot %d [0x%016llx]\n",
+ i, np->test_buffer[i]);
+}
+
+static int n2rng_check_selftest_buffer(struct n2rng *np, unsigned long unit)
+{
+ u64 val = SELFTEST_VAL;
+ int err, matches, limit;
+
+ matches = 0;
+ for (limit = 0; limit < SELFTEST_LOOPS_MAX; limit++) {
+ matches += n2rng_test_buffer_find(np, val);
+ if (matches >= SELFTEST_MATCH_GOAL)
+ break;
+ val = advance_polynomial(SELFTEST_POLY, val, 1);
+ }
+
+ err = 0;
+ if (limit >= SELFTEST_LOOPS_MAX) {
+ err = -ENODEV;
+ dev_err(&np->op->dev, "Selftest failed on unit %lu\n", unit);
+ n2rng_dump_test_buffer(np);
+ } else
+ dev_info(&np->op->dev, "Selftest passed on unit %lu\n", unit);
+
+ return err;
+}
+
+static int n2rng_control_selftest(struct n2rng *np, unsigned long unit)
+{
+ int err;
+
+ np->test_control[0] = (0x2 << RNG_CTL_ASEL_SHIFT);
+ np->test_control[1] = (0x2 << RNG_CTL_ASEL_SHIFT);
+ np->test_control[2] = (0x2 << RNG_CTL_ASEL_SHIFT);
+ np->test_control[3] = ((0x2 << RNG_CTL_ASEL_SHIFT) |
+ RNG_CTL_LFSR |
+ ((SELFTEST_TICKS - 2) << RNG_CTL_WAIT_SHIFT));
+
+
+ err = n2rng_entropy_diag_read(np, unit, np->test_control,
+ HV_RNG_STATE_HEALTHCHECK,
+ np->test_buffer,
+ sizeof(np->test_buffer),
+ &np->units[unit].control[0],
+ np->hv_state);
+ if (err)
+ return err;
+
+ return n2rng_check_selftest_buffer(np, unit);
+}
+
+static int n2rng_control_check(struct n2rng *np)
+{
+ int i;
+
+ for (i = 0; i < np->num_units; i++) {
+ int err = n2rng_control_selftest(np, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/* The sanity checks passed, install the final configuration into the
+ * chip, it's ready to use.
+ */
+static int n2rng_control_configure_units(struct n2rng *np)
+{
+ int unit, err;
+
+ err = 0;
+ for (unit = 0; unit < np->num_units; unit++) {
+ struct n2rng_unit *up = &np->units[unit];
+ unsigned long ctl_ra = __pa(&up->control[0]);
+ int esrc;
+ u64 base;
+
+ base = ((np->accum_cycles << RNG_CTL_WAIT_SHIFT) |
+ (2 << RNG_CTL_ASEL_SHIFT) |
+ RNG_CTL_LFSR);
+
+ /* XXX This isn't the best. We should fetch a bunch
+ * XXX of words using each entropy source combined XXX
+ * with each VCO setting, and see which combinations
+ * XXX give the best random data.
+ */
+ for (esrc = 0; esrc < 3; esrc++)
+ up->control[esrc] = base |
+ (esrc << RNG_CTL_VCO_SHIFT) |
+ (RNG_CTL_ES1 << esrc);
+
+ up->control[3] = base |
+ (RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3);
+
+ err = n2rng_generic_write_control(np, ctl_ra, unit,
+ HV_RNG_STATE_CONFIGURED);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static void n2rng_work(struct work_struct *work)
+{
+ struct n2rng *np = container_of(work, struct n2rng, work.work);
+ int err = 0;
+
+ if (!(np->flags & N2RNG_FLAG_CONTROL)) {
+ err = n2rng_guest_check(np);
+ } else {
+ preempt_disable();
+ err = n2rng_control_check(np);
+ preempt_enable();
+
+ if (!err)
+ err = n2rng_control_configure_units(np);
+ }
+
+ if (!err) {
+ np->flags |= N2RNG_FLAG_READY;
+ dev_info(&np->op->dev, "RNG ready\n");
+ }
+
+ if (err && !(np->flags & N2RNG_FLAG_SHUTDOWN))
+ schedule_delayed_work(&np->work, HZ * 2);
+}
+
+static void __devinit n2rng_driver_version(void)
+{
+ static int n2rng_version_printed;
+
+ if (n2rng_version_printed++ == 0)
+ pr_info("%s", version);
+}
+
+static const struct of_device_id n2rng_match[];
+static int __devinit n2rng_probe(struct platform_device *op)
+{
+ const struct of_device_id *match;
+ int victoria_falls;
+ int err = -ENOMEM;
+ struct n2rng *np;
+
+ match = of_match_device(n2rng_match, &op->dev);
+ if (!match)
+ return -EINVAL;
+ victoria_falls = (match->data != NULL);
+
+ n2rng_driver_version();
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
+ if (!np)
+ goto out;
+ np->op = op;
+
+ INIT_DELAYED_WORK(&np->work, n2rng_work);
+
+ if (victoria_falls)
+ np->flags |= N2RNG_FLAG_VF;
+
+ err = -ENODEV;
+ np->hvapi_major = 2;
+ if (sun4v_hvapi_register(HV_GRP_RNG,
+ np->hvapi_major,
+ &np->hvapi_minor)) {
+ np->hvapi_major = 1;
+ if (sun4v_hvapi_register(HV_GRP_RNG,
+ np->hvapi_major,
+ &np->hvapi_minor)) {
+ dev_err(&op->dev, "Cannot register suitable "
+ "HVAPI version.\n");
+ goto out_free;
+ }
+ }
+
+ if (np->flags & N2RNG_FLAG_VF) {
+ if (np->hvapi_major < 2) {
+ dev_err(&op->dev, "VF RNG requires HVAPI major "
+ "version 2 or later, got %lu\n",
+ np->hvapi_major);
+ goto out_hvapi_unregister;
+ }
+ np->num_units = of_getintprop_default(op->dev.of_node,
+ "rng-#units", 0);
+ if (!np->num_units) {
+ dev_err(&op->dev, "VF RNG lacks rng-#units property\n");
+ goto out_hvapi_unregister;
+ }
+ } else
+ np->num_units = 1;
+
+ dev_info(&op->dev, "Registered RNG HVAPI major %lu minor %lu\n",
+ np->hvapi_major, np->hvapi_minor);
+
+ np->units = kzalloc(sizeof(struct n2rng_unit) * np->num_units,
+ GFP_KERNEL);
+ err = -ENOMEM;
+ if (!np->units)
+ goto out_hvapi_unregister;
+
+ err = n2rng_init_control(np);
+ if (err)
+ goto out_free_units;
+
+ dev_info(&op->dev, "Found %s RNG, units: %d\n",
+ ((np->flags & N2RNG_FLAG_VF) ?
+ "Victoria Falls" : "Niagara2"),
+ np->num_units);
+
+ np->hwrng.name = "n2rng";
+ np->hwrng.data_read = n2rng_data_read;
+ np->hwrng.priv = (unsigned long) np;
+
+ err = hwrng_register(&np->hwrng);
+ if (err)
+ goto out_free_units;
+
+ dev_set_drvdata(&op->dev, np);
+
+ schedule_delayed_work(&np->work, 0);
+
+ return 0;
+
+out_free_units:
+ kfree(np->units);
+ np->units = NULL;
+
+out_hvapi_unregister:
+ sun4v_hvapi_unregister(HV_GRP_RNG);
+
+out_free:
+ kfree(np);
+out:
+ return err;
+}
+
+static int __devexit n2rng_remove(struct platform_device *op)
+{
+ struct n2rng *np = dev_get_drvdata(&op->dev);
+
+ np->flags |= N2RNG_FLAG_SHUTDOWN;
+
+ cancel_delayed_work_sync(&np->work);
+
+ hwrng_unregister(&np->hwrng);
+
+ sun4v_hvapi_unregister(HV_GRP_RNG);
+
+ kfree(np->units);
+ np->units = NULL;
+
+ kfree(np);
+
+ dev_set_drvdata(&op->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id n2rng_match[] = {
+ {
+ .name = "random-number-generator",
+ .compatible = "SUNW,n2-rng",
+ },
+ {
+ .name = "random-number-generator",
+ .compatible = "SUNW,vf-rng",
+ .data = (void *) 1,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, n2rng_match);
+
+static struct platform_driver n2rng_driver = {
+ .driver = {
+ .name = "n2rng",
+ .owner = THIS_MODULE,
+ .of_match_table = n2rng_match,
+ },
+ .probe = n2rng_probe,
+ .remove = __devexit_p(n2rng_remove),
+};
+
+static int __init n2rng_init(void)
+{
+ return platform_driver_register(&n2rng_driver);
+}
+
+static void __exit n2rng_exit(void)
+{
+ platform_driver_unregister(&n2rng_driver);
+}
+
+module_init(n2rng_init);
+module_exit(n2rng_exit);
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
new file mode 100644
index 00000000..4bea07f3
--- /dev/null
+++ b/drivers/char/hw_random/n2rng.h
@@ -0,0 +1,118 @@
+/* n2rng.h: Niagara2 RNG defines.
+ *
+ * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
+ */
+
+#ifndef _N2RNG_H
+#define _N2RNG_H
+
+#define RNG_CTL_WAIT 0x0000000001fffe00ULL /* Minimum wait time */
+#define RNG_CTL_WAIT_SHIFT 9
+#define RNG_CTL_BYPASS 0x0000000000000100ULL /* VCO voltage source */
+#define RNG_CTL_VCO 0x00000000000000c0ULL /* VCO rate control */
+#define RNG_CTL_VCO_SHIFT 6
+#define RNG_CTL_ASEL 0x0000000000000030ULL /* Analog MUX select */
+#define RNG_CTL_ASEL_SHIFT 4
+#define RNG_CTL_LFSR 0x0000000000000008ULL /* Use LFSR or plain shift */
+#define RNG_CTL_ES3 0x0000000000000004ULL /* Enable entropy source 3 */
+#define RNG_CTL_ES2 0x0000000000000002ULL /* Enable entropy source 2 */
+#define RNG_CTL_ES1 0x0000000000000001ULL /* Enable entropy source 1 */
+
+#define HV_FAST_RNG_GET_DIAG_CTL 0x130
+#define HV_FAST_RNG_CTL_READ 0x131
+#define HV_FAST_RNG_CTL_WRITE 0x132
+#define HV_FAST_RNG_DATA_READ_DIAG 0x133
+#define HV_FAST_RNG_DATA_READ 0x134
+
+#define HV_RNG_STATE_UNCONFIGURED 0
+#define HV_RNG_STATE_CONFIGURED 1
+#define HV_RNG_STATE_HEALTHCHECK 2
+#define HV_RNG_STATE_ERROR 3
+
+#define HV_RNG_NUM_CONTROL 4
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_rng_get_diag_ctl(void);
+extern unsigned long sun4v_rng_ctl_read_v1(unsigned long ctl_regs_ra,
+ unsigned long *state,
+ unsigned long *tick_delta);
+extern unsigned long sun4v_rng_ctl_read_v2(unsigned long ctl_regs_ra,
+ unsigned long unit,
+ unsigned long *state,
+ unsigned long *tick_delta,
+ unsigned long *watchdog,
+ unsigned long *write_status);
+extern unsigned long sun4v_rng_ctl_write_v1(unsigned long ctl_regs_ra,
+ unsigned long state,
+ unsigned long write_timeout,
+ unsigned long *tick_delta);
+extern unsigned long sun4v_rng_ctl_write_v2(unsigned long ctl_regs_ra,
+ unsigned long state,
+ unsigned long write_timeout,
+ unsigned long unit);
+extern unsigned long sun4v_rng_data_read_diag_v1(unsigned long data_ra,
+ unsigned long len,
+ unsigned long *tick_delta);
+extern unsigned long sun4v_rng_data_read_diag_v2(unsigned long data_ra,
+ unsigned long len,
+ unsigned long unit,
+ unsigned long *tick_delta);
+extern unsigned long sun4v_rng_data_read(unsigned long data_ra,
+ unsigned long *tick_delta);
+
+struct n2rng_unit {
+ u64 control[HV_RNG_NUM_CONTROL];
+};
+
+struct n2rng {
+ struct platform_device *op;
+
+ unsigned long flags;
+#define N2RNG_FLAG_VF 0x00000001 /* Victoria Falls RNG, else N2 */
+#define N2RNG_FLAG_CONTROL 0x00000002 /* Operating in control domain */
+#define N2RNG_FLAG_READY 0x00000008 /* Ready for hw-rng layer */
+#define N2RNG_FLAG_SHUTDOWN 0x00000010 /* Driver unregistering */
+#define N2RNG_FLAG_BUFFER_VALID 0x00000020 /* u32 buffer holds valid data */
+
+ int num_units;
+ struct n2rng_unit *units;
+
+ struct hwrng hwrng;
+ u32 buffer;
+
+ /* Registered hypervisor group API major and minor version. */
+ unsigned long hvapi_major;
+ unsigned long hvapi_minor;
+
+ struct delayed_work work;
+
+ unsigned long hv_state; /* HV_RNG_STATE_foo */
+
+ unsigned long health_check_sec;
+ unsigned long accum_cycles;
+ unsigned long wd_timeo;
+#define N2RNG_HEALTH_CHECK_SEC_DEFAULT 0
+#define N2RNG_ACCUM_CYCLES_DEFAULT 2048
+#define N2RNG_WD_TIMEO_DEFAULT 0
+
+ u64 scratch_control[HV_RNG_NUM_CONTROL];
+
+#define SELFTEST_TICKS 38859
+#define SELFTEST_VAL ((u64)0xB8820C7BD387E32C)
+#define SELFTEST_POLY ((u64)0x231DCEE91262B8A3)
+#define SELFTEST_MATCH_GOAL 6
+#define SELFTEST_LOOPS_MAX 40000
+#define SELFTEST_BUFFER_WORDS 8
+
+ u64 test_data;
+ u64 test_control[HV_RNG_NUM_CONTROL];
+ u64 test_buffer[SELFTEST_BUFFER_WORDS];
+};
+
+#define N2RNG_BLOCK_LIMIT 60000
+#define N2RNG_BUSY_LIMIT 100
+#define N2RNG_HCHECK_LIMIT 100
+
+#endif /* !(__ASSEMBLY__) */
+
+#endif /* _N2RNG_H */
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
new file mode 100644
index 00000000..dd1d143e
--- /dev/null
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -0,0 +1,120 @@
+/*
+ * Nomadik RNG support
+ * Copyright 2009 Alessandro Rubini
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+static struct clk *rng_clk;
+
+static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ void __iomem *base = (void __iomem *)rng->priv;
+
+ /*
+ * The register is 32 bits and gives 16 random bits (low half).
+ * A subsequent read will delay the core for 400ns, so we just read
+ * once and accept the very unlikely very small delay, even if wait==0.
+ */
+ *(u16 *)data = __raw_readl(base + 8) & 0xffff;
+ return 2;
+}
+
+/* we have at most one RNG per machine, granted */
+static struct hwrng nmk_rng = {
+ .name = "nomadik",
+ .read = nmk_rng_read,
+};
+
+static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
+{
+ void __iomem *base;
+ int ret;
+
+ rng_clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(rng_clk)) {
+ dev_err(&dev->dev, "could not get rng clock\n");
+ ret = PTR_ERR(rng_clk);
+ return ret;
+ }
+
+ clk_enable(rng_clk);
+
+ ret = amba_request_regions(dev, dev->dev.init_name);
+ if (ret)
+ return ret;
+ ret = -ENOMEM;
+ base = ioremap(dev->res.start, resource_size(&dev->res));
+ if (!base)
+ goto out_release;
+ nmk_rng.priv = (unsigned long)base;
+ ret = hwrng_register(&nmk_rng);
+ if (ret)
+ goto out_unmap;
+ return 0;
+
+out_unmap:
+ iounmap(base);
+out_release:
+ amba_release_regions(dev);
+ clk_disable(rng_clk);
+ clk_put(rng_clk);
+ return ret;
+}
+
+static int nmk_rng_remove(struct amba_device *dev)
+{
+ void __iomem *base = (void __iomem *)nmk_rng.priv;
+ hwrng_unregister(&nmk_rng);
+ iounmap(base);
+ amba_release_regions(dev);
+ clk_disable(rng_clk);
+ clk_put(rng_clk);
+ return 0;
+}
+
+static struct amba_id nmk_rng_ids[] = {
+ {
+ .id = 0x000805e1,
+ .mask = 0x000fffff, /* top bits are rev and cfg: accept all */
+ },
+ {0, 0},
+};
+
+static struct amba_driver nmk_rng_driver = {
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "rng",
+ },
+ .probe = nmk_rng_probe,
+ .remove = nmk_rng_remove,
+ .id_table = nmk_rng_ids,
+};
+
+static int __init nmk_rng_init(void)
+{
+ return amba_driver_register(&nmk_rng_driver);
+}
+
+static void __devexit nmk_rng_exit(void)
+{
+ amba_driver_unregister(&nmk_rng_driver);
+}
+
+module_init(nmk_rng_init);
+module_exit(nmk_rng_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
new file mode 100644
index 00000000..9cd0feca
--- /dev/null
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -0,0 +1,148 @@
+/*
+ * Hardware Random Number Generator support for Cavium Networks
+ * Octeon processor family.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Cavium Networks
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/gfp.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-rnm-defs.h>
+
+struct octeon_rng {
+ struct hwrng ops;
+ void __iomem *control_status;
+ void __iomem *result;
+};
+
+static int octeon_rng_init(struct hwrng *rng)
+{
+ union cvmx_rnm_ctl_status ctl;
+ struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
+
+ ctl.u64 = 0;
+ ctl.s.ent_en = 1; /* Enable the entropy source. */
+ ctl.s.rng_en = 1; /* Enable the RNG hardware. */
+ cvmx_write_csr((u64)p->control_status, ctl.u64);
+ return 0;
+}
+
+static void octeon_rng_cleanup(struct hwrng *rng)
+{
+ union cvmx_rnm_ctl_status ctl;
+ struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
+
+ ctl.u64 = 0;
+ /* Disable everything. */
+ cvmx_write_csr((u64)p->control_status, ctl.u64);
+}
+
+static int octeon_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
+
+ *data = cvmx_read64_uint32((u64)p->result);
+ return sizeof(u32);
+}
+
+static int __devinit octeon_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res_ports;
+ struct resource *res_result;
+ struct octeon_rng *rng;
+ int ret;
+ struct hwrng ops = {
+ .name = "octeon",
+ .init = octeon_rng_init,
+ .cleanup = octeon_rng_cleanup,
+ .data_read = octeon_rng_data_read
+ };
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ res_ports = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_ports)
+ goto err_ports;
+
+ res_result = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_result)
+ goto err_ports;
+
+
+ rng->control_status = devm_ioremap_nocache(&pdev->dev,
+ res_ports->start,
+ sizeof(u64));
+ if (!rng->control_status)
+ goto err_ports;
+
+ rng->result = devm_ioremap_nocache(&pdev->dev,
+ res_result->start,
+ sizeof(u64));
+ if (!rng->result)
+ goto err_r;
+
+ rng->ops = ops;
+
+ dev_set_drvdata(&pdev->dev, &rng->ops);
+ ret = hwrng_register(&rng->ops);
+ if (ret)
+ goto err;
+
+ dev_info(&pdev->dev, "Octeon Random Number Generator\n");
+
+ return 0;
+err:
+ devm_iounmap(&pdev->dev, rng->control_status);
+err_r:
+ devm_iounmap(&pdev->dev, rng->result);
+err_ports:
+ devm_kfree(&pdev->dev, rng);
+ return -ENOENT;
+}
+
+static int __exit octeon_rng_remove(struct platform_device *pdev)
+{
+ struct hwrng *rng = dev_get_drvdata(&pdev->dev);
+
+ hwrng_unregister(rng);
+
+ return 0;
+}
+
+static struct platform_driver octeon_rng_driver = {
+ .driver = {
+ .name = "octeon_rng",
+ .owner = THIS_MODULE,
+ },
+ .probe = octeon_rng_probe,
+ .remove = __exit_p(octeon_rng_remove),
+};
+
+static int __init octeon_rng_mod_init(void)
+{
+ return platform_driver_register(&octeon_rng_driver);
+}
+
+static void __exit octeon_rng_mod_exit(void)
+{
+ platform_driver_unregister(&octeon_rng_driver);
+}
+
+module_init(octeon_rng_mod_init);
+module_exit(octeon_rng_mod_exit);
+
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
new file mode 100644
index 00000000..2cc755a6
--- /dev/null
+++ b/drivers/char/hw_random/omap-rng.c
@@ -0,0 +1,229 @@
+/*
+ * omap-rng.c - RNG driver for TI OMAP CPU family
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * Mostly based on original driver:
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+
+#define RNG_OUT_REG 0x00 /* Output register */
+#define RNG_STAT_REG 0x04 /* Status register
+ [0] = STAT_BUSY */
+#define RNG_ALARM_REG 0x24 /* Alarm register
+ [7:0] = ALARM_COUNTER */
+#define RNG_CONFIG_REG 0x28 /* Configuration register
+ [11:6] = RESET_COUNT
+ [5:3] = RING2_DELAY
+ [2:0] = RING1_DELAY */
+#define RNG_REV_REG 0x3c /* Revision register
+ [7:0] = REV_NB */
+#define RNG_MASK_REG 0x40 /* Mask and reset register
+ [2] = IT_EN
+ [1] = SOFTRESET
+ [0] = AUTOIDLE */
+#define RNG_SYSSTATUS 0x44 /* System status
+ [0] = RESETDONE */
+
+static void __iomem *rng_base;
+static struct clk *rng_ick;
+static struct platform_device *rng_dev;
+
+static inline u32 omap_rng_read_reg(int reg)
+{
+ return __raw_readl(rng_base + reg);
+}
+
+static inline void omap_rng_write_reg(int reg, u32 val)
+{
+ __raw_writel(val, rng_base + reg);
+}
+
+static int omap_rng_data_present(struct hwrng *rng, int wait)
+{
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+ data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
+ if (data || !wait)
+ break;
+ /* RNG produces data fast enough (2+ MBit/sec, even
+ * during "rngtest" loads, that these delays don't
+ * seem to trigger. We *could* use the RNG IRQ, but
+ * that'd be higher overhead ... so why bother?
+ */
+ udelay(10);
+ }
+ return data;
+}
+
+static int omap_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ *data = omap_rng_read_reg(RNG_OUT_REG);
+
+ return 4;
+}
+
+static struct hwrng omap_rng_ops = {
+ .name = "omap",
+ .data_present = omap_rng_data_present,
+ .data_read = omap_rng_data_read,
+};
+
+static int __devinit omap_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ /*
+ * A bit ugly, and it will never actually happen but there can
+ * be only one RNG and this catches any bork
+ */
+ if (rng_dev)
+ return -EBUSY;
+
+ if (cpu_is_omap24xx()) {
+ rng_ick = clk_get(&pdev->dev, "ick");
+ if (IS_ERR(rng_ick)) {
+ dev_err(&pdev->dev, "Could not get rng_ick\n");
+ ret = PTR_ERR(rng_ick);
+ return ret;
+ } else
+ clk_enable(rng_ick);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res)
+ return -ENOENT;
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ ret = -EBUSY;
+ goto err_region;
+ }
+
+ dev_set_drvdata(&pdev->dev, res);
+ rng_base = ioremap(res->start, resource_size(res));
+ if (!rng_base) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ ret = hwrng_register(&omap_rng_ops);
+ if (ret)
+ goto err_register;
+
+ dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n",
+ omap_rng_read_reg(RNG_REV_REG));
+ omap_rng_write_reg(RNG_MASK_REG, 0x1);
+
+ rng_dev = pdev;
+
+ return 0;
+
+err_register:
+ iounmap(rng_base);
+ rng_base = NULL;
+err_ioremap:
+ release_mem_region(res->start, resource_size(res));
+err_region:
+ if (cpu_is_omap24xx()) {
+ clk_disable(rng_ick);
+ clk_put(rng_ick);
+ }
+ return ret;
+}
+
+static int __exit omap_rng_remove(struct platform_device *pdev)
+{
+ struct resource *res = dev_get_drvdata(&pdev->dev);
+
+ hwrng_unregister(&omap_rng_ops);
+
+ omap_rng_write_reg(RNG_MASK_REG, 0x0);
+
+ iounmap(rng_base);
+
+ if (cpu_is_omap24xx()) {
+ clk_disable(rng_ick);
+ clk_put(rng_ick);
+ }
+
+ release_mem_region(res->start, resource_size(res));
+ rng_base = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int omap_rng_suspend(struct platform_device *pdev, pm_message_t message)
+{
+ omap_rng_write_reg(RNG_MASK_REG, 0x0);
+ return 0;
+}
+
+static int omap_rng_resume(struct platform_device *pdev)
+{
+ omap_rng_write_reg(RNG_MASK_REG, 0x1);
+ return 0;
+}
+
+#else
+
+#define omap_rng_suspend NULL
+#define omap_rng_resume NULL
+
+#endif
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:omap_rng");
+
+static struct platform_driver omap_rng_driver = {
+ .driver = {
+ .name = "omap_rng",
+ .owner = THIS_MODULE,
+ },
+ .probe = omap_rng_probe,
+ .remove = __exit_p(omap_rng_remove),
+ .suspend = omap_rng_suspend,
+ .resume = omap_rng_resume
+};
+
+static int __init omap_rng_init(void)
+{
+ if (!cpu_is_omap16xx() && !cpu_is_omap24xx())
+ return -ENODEV;
+
+ return platform_driver_register(&omap_rng_driver);
+}
+
+static void __exit omap_rng_exit(void)
+{
+ platform_driver_unregister(&omap_rng_driver);
+}
+
+module_init(omap_rng_init);
+module_exit(omap_rng_exit);
+
+MODULE_AUTHOR("Deepak Saxena (and others)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
new file mode 100644
index 00000000..1d504815
--- /dev/null
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip rng
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/of_platform.h>
+#include <asm/io.h>
+
+#define SDCRNG_CTL_REG 0x00
+#define SDCRNG_CTL_FVLD_M 0x0000f000
+#define SDCRNG_CTL_FVLD_S 12
+#define SDCRNG_CTL_KSZ 0x00000800
+#define SDCRNG_CTL_RSRC_CRG 0x00000010
+#define SDCRNG_CTL_RSRC_RRG 0x00000000
+#define SDCRNG_CTL_CE 0x00000004
+#define SDCRNG_CTL_RE 0x00000002
+#define SDCRNG_CTL_DR 0x00000001
+#define SDCRNG_CTL_SELECT_RRG_RNG (SDCRNG_CTL_RE | SDCRNG_CTL_RSRC_RRG)
+#define SDCRNG_CTL_SELECT_CRG_RNG (SDCRNG_CTL_CE | SDCRNG_CTL_RSRC_CRG)
+#define SDCRNG_VAL_REG 0x20
+
+#define MODULE_NAME "pasemi_rng"
+
+static int pasemi_rng_data_present(struct hwrng *rng, int wait)
+{
+ void __iomem *rng_regs = (void __iomem *)rng->priv;
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+ data = (in_le32(rng_regs + SDCRNG_CTL_REG)
+ & SDCRNG_CTL_FVLD_M) ? 1 : 0;
+ if (data || !wait)
+ break;
+ udelay(10);
+ }
+ return data;
+}
+
+static int pasemi_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ void __iomem *rng_regs = (void __iomem *)rng->priv;
+ *data = in_le32(rng_regs + SDCRNG_VAL_REG);
+ return 4;
+}
+
+static int pasemi_rng_init(struct hwrng *rng)
+{
+ void __iomem *rng_regs = (void __iomem *)rng->priv;
+ u32 ctl;
+
+ ctl = SDCRNG_CTL_DR | SDCRNG_CTL_SELECT_RRG_RNG | SDCRNG_CTL_KSZ;
+ out_le32(rng_regs + SDCRNG_CTL_REG, ctl);
+ out_le32(rng_regs + SDCRNG_CTL_REG, ctl & ~SDCRNG_CTL_DR);
+
+ return 0;
+}
+
+static void pasemi_rng_cleanup(struct hwrng *rng)
+{
+ void __iomem *rng_regs = (void __iomem *)rng->priv;
+ u32 ctl;
+
+ ctl = SDCRNG_CTL_RE | SDCRNG_CTL_CE;
+ out_le32(rng_regs + SDCRNG_CTL_REG,
+ in_le32(rng_regs + SDCRNG_CTL_REG) & ~ctl);
+}
+
+static struct hwrng pasemi_rng = {
+ .name = MODULE_NAME,
+ .init = pasemi_rng_init,
+ .cleanup = pasemi_rng_cleanup,
+ .data_present = pasemi_rng_data_present,
+ .data_read = pasemi_rng_data_read,
+};
+
+static int __devinit rng_probe(struct platform_device *ofdev)
+{
+ void __iomem *rng_regs;
+ struct device_node *rng_np = ofdev->dev.of_node;
+ struct resource res;
+ int err = 0;
+
+ err = of_address_to_resource(rng_np, 0, &res);
+ if (err)
+ return -ENODEV;
+
+ rng_regs = ioremap(res.start, 0x100);
+
+ if (!rng_regs)
+ return -ENOMEM;
+
+ pasemi_rng.priv = (unsigned long)rng_regs;
+
+ printk(KERN_INFO "Registering PA Semi RNG\n");
+
+ err = hwrng_register(&pasemi_rng);
+
+ if (err)
+ iounmap(rng_regs);
+
+ return err;
+}
+
+static int __devexit rng_remove(struct platform_device *dev)
+{
+ void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv;
+
+ hwrng_unregister(&pasemi_rng);
+ iounmap(rng_regs);
+
+ return 0;
+}
+
+static struct of_device_id rng_match[] = {
+ { .compatible = "1682m-rng", },
+ { .compatible = "pasemi,pwrficient-rng", },
+ { },
+};
+
+static struct platform_driver rng_driver = {
+ .driver = {
+ .name = "pasemi-rng",
+ .owner = THIS_MODULE,
+ .of_match_table = rng_match,
+ },
+ .probe = rng_probe,
+ .remove = rng_remove,
+};
+
+static int __init rng_init(void)
+{
+ return platform_driver_register(&rng_driver);
+}
+module_init(rng_init);
+
+static void __exit rng_exit(void)
+{
+ platform_driver_unregister(&rng_driver);
+}
+module_exit(rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("H/W RNG driver for PA Semi processor");
diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c
new file mode 100644
index 00000000..990d55a5
--- /dev/null
+++ b/drivers/char/hw_random/picoxcell-rng.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * All enquiries to support@picochip.com
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define DATA_REG_OFFSET 0x0200
+#define CSR_REG_OFFSET 0x0278
+#define CSR_OUT_EMPTY_MASK (1 << 24)
+#define CSR_FAULT_MASK (1 << 1)
+#define TRNG_BLOCK_RESET_MASK (1 << 0)
+#define TAI_REG_OFFSET 0x0380
+
+/*
+ * The maximum amount of time in microseconds to spend waiting for data if the
+ * core wants us to wait. The TRNG should generate 32 bits every 320ns so a
+ * timeout of 20us seems reasonable. The TRNG does builtin tests of the data
+ * for randomness so we can't always assume there is data present.
+ */
+#define PICO_TRNG_TIMEOUT 20
+
+static void __iomem *rng_base;
+static struct clk *rng_clk;
+struct device *rng_dev;
+
+static inline u32 picoxcell_trng_read_csr(void)
+{
+ return __raw_readl(rng_base + CSR_REG_OFFSET);
+}
+
+static inline bool picoxcell_trng_is_empty(void)
+{
+ return picoxcell_trng_read_csr() & CSR_OUT_EMPTY_MASK;
+}
+
+/*
+ * Take the random number generator out of reset and make sure the interrupts
+ * are masked. We shouldn't need to get large amounts of random bytes so just
+ * poll the status register. The hardware generates 32 bits every 320ns so we
+ * shouldn't have to wait long enough to warrant waiting for an IRQ.
+ */
+static void picoxcell_trng_start(void)
+{
+ __raw_writel(0, rng_base + TAI_REG_OFFSET);
+ __raw_writel(0, rng_base + CSR_REG_OFFSET);
+}
+
+static void picoxcell_trng_reset(void)
+{
+ __raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + CSR_REG_OFFSET);
+ __raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + TAI_REG_OFFSET);
+ picoxcell_trng_start();
+}
+
+/*
+ * Get some random data from the random number generator. The hw_random core
+ * layer provides us with locking.
+ */
+static int picoxcell_trng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ int i;
+
+ /* Wait for some data to become available. */
+ for (i = 0; i < PICO_TRNG_TIMEOUT && picoxcell_trng_is_empty(); ++i) {
+ if (!wait)
+ return 0;
+
+ udelay(1);
+ }
+
+ if (picoxcell_trng_read_csr() & CSR_FAULT_MASK) {
+ dev_err(rng_dev, "fault detected, resetting TRNG\n");
+ picoxcell_trng_reset();
+ return -EIO;
+ }
+
+ if (i == PICO_TRNG_TIMEOUT)
+ return 0;
+
+ *(u32 *)buf = __raw_readl(rng_base + DATA_REG_OFFSET);
+ return sizeof(u32);
+}
+
+static struct hwrng picoxcell_trng = {
+ .name = "picoxcell",
+ .read = picoxcell_trng_read,
+};
+
+static int picoxcell_trng_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!mem) {
+ dev_warn(&pdev->dev, "no memory resource\n");
+ return -ENOMEM;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
+ "picoxcell_trng")) {
+ dev_warn(&pdev->dev, "unable to request io mem\n");
+ return -EBUSY;
+ }
+
+ rng_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (!rng_base) {
+ dev_warn(&pdev->dev, "unable to remap io mem\n");
+ return -ENOMEM;
+ }
+
+ rng_clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rng_clk)) {
+ dev_warn(&pdev->dev, "no clk\n");
+ return PTR_ERR(rng_clk);
+ }
+
+ ret = clk_enable(rng_clk);
+ if (ret) {
+ dev_warn(&pdev->dev, "unable to enable clk\n");
+ goto err_enable;
+ }
+
+ picoxcell_trng_start();
+ ret = hwrng_register(&picoxcell_trng);
+ if (ret)
+ goto err_register;
+
+ rng_dev = &pdev->dev;
+ dev_info(&pdev->dev, "pixoxcell random number generator active\n");
+
+ return 0;
+
+err_register:
+ clk_disable(rng_clk);
+err_enable:
+ clk_put(rng_clk);
+
+ return ret;
+}
+
+static int __devexit picoxcell_trng_remove(struct platform_device *pdev)
+{
+ hwrng_unregister(&picoxcell_trng);
+ clk_disable(rng_clk);
+ clk_put(rng_clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int picoxcell_trng_suspend(struct device *dev)
+{
+ clk_disable(rng_clk);
+
+ return 0;
+}
+
+static int picoxcell_trng_resume(struct device *dev)
+{
+ return clk_enable(rng_clk);
+}
+
+static const struct dev_pm_ops picoxcell_trng_pm_ops = {
+ .suspend = picoxcell_trng_suspend,
+ .resume = picoxcell_trng_resume,
+};
+#endif /* CONFIG_PM */
+
+static struct platform_driver picoxcell_trng_driver = {
+ .probe = picoxcell_trng_probe,
+ .remove = __devexit_p(picoxcell_trng_remove),
+ .driver = {
+ .name = "picoxcell-trng",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &picoxcell_trng_pm_ops,
+#endif /* CONFIG_PM */
+ },
+};
+
+static int __init picoxcell_trng_init(void)
+{
+ return platform_driver_register(&picoxcell_trng_driver);
+}
+module_init(picoxcell_trng_init);
+
+static void __exit picoxcell_trng_exit(void)
+{
+ platform_driver_unregister(&picoxcell_trng_driver);
+}
+module_exit(picoxcell_trng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("Picochip picoXcell TRNG driver");
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
new file mode 100644
index 00000000..a94e9305
--- /dev/null
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -0,0 +1,168 @@
+/*
+ * drivers/char/hw_random/timeriomem-rng.c
+ *
+ * Copyright (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ *
+ * Derived from drivers/char/hw_random/omap-rng.c
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This driver is useful for platforms that have an IO range that provides
+ * periodic random data from a single IO memory address. All the platform
+ * has to do is provide the address and 'wait time' that new data becomes
+ * available.
+ *
+ * TODO: add support for reading sizes other than 32bits and masking
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/timeriomem-rng.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/completion.h>
+
+static struct timeriomem_rng_data *timeriomem_rng_data;
+
+static void timeriomem_rng_trigger(unsigned long);
+static DEFINE_TIMER(timeriomem_rng_timer, timeriomem_rng_trigger, 0, 0);
+
+/*
+ * have data return 1, however return 0 if we have nothing
+ */
+static int timeriomem_rng_data_present(struct hwrng *rng, int wait)
+{
+ if (rng->priv == 0)
+ return 1;
+
+ if (!wait || timeriomem_rng_data->present)
+ return timeriomem_rng_data->present;
+
+ wait_for_completion(&timeriomem_rng_data->completion);
+
+ return 1;
+}
+
+static int timeriomem_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ unsigned long cur;
+ s32 delay;
+
+ *data = readl(timeriomem_rng_data->address);
+
+ if (rng->priv != 0) {
+ cur = jiffies;
+
+ delay = cur - timeriomem_rng_timer.expires;
+ delay = rng->priv - (delay % rng->priv);
+
+ timeriomem_rng_timer.expires = cur + delay;
+ timeriomem_rng_data->present = 0;
+
+ init_completion(&timeriomem_rng_data->completion);
+ add_timer(&timeriomem_rng_timer);
+ }
+
+ return 4;
+}
+
+static void timeriomem_rng_trigger(unsigned long dummy)
+{
+ timeriomem_rng_data->present = 1;
+ complete(&timeriomem_rng_data->completion);
+}
+
+static struct hwrng timeriomem_rng_ops = {
+ .name = "timeriomem",
+ .data_present = timeriomem_rng_data_present,
+ .data_read = timeriomem_rng_data_read,
+ .priv = 0,
+};
+
+static int __devinit timeriomem_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res)
+ return -ENOENT;
+
+ timeriomem_rng_data = pdev->dev.platform_data;
+
+ timeriomem_rng_data->address = ioremap(res->start,
+ res->end - res->start + 1);
+ if (!timeriomem_rng_data->address)
+ return -EIO;
+
+ if (timeriomem_rng_data->period != 0
+ && usecs_to_jiffies(timeriomem_rng_data->period) > 0) {
+ timeriomem_rng_timer.expires = jiffies;
+
+ timeriomem_rng_ops.priv = usecs_to_jiffies(
+ timeriomem_rng_data->period);
+ }
+ timeriomem_rng_data->present = 1;
+
+ ret = hwrng_register(&timeriomem_rng_ops);
+ if (ret)
+ goto failed;
+
+ dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
+ timeriomem_rng_data->address,
+ timeriomem_rng_data->period);
+
+ return 0;
+
+failed:
+ dev_err(&pdev->dev, "problem registering\n");
+ iounmap(timeriomem_rng_data->address);
+
+ return ret;
+}
+
+static int __devexit timeriomem_rng_remove(struct platform_device *pdev)
+{
+ del_timer_sync(&timeriomem_rng_timer);
+ hwrng_unregister(&timeriomem_rng_ops);
+
+ iounmap(timeriomem_rng_data->address);
+
+ return 0;
+}
+
+static struct platform_driver timeriomem_rng_driver = {
+ .driver = {
+ .name = "timeriomem_rng",
+ .owner = THIS_MODULE,
+ },
+ .probe = timeriomem_rng_probe,
+ .remove = __devexit_p(timeriomem_rng_remove),
+};
+
+static int __init timeriomem_rng_init(void)
+{
+ return platform_driver_register(&timeriomem_rng_driver);
+}
+
+static void __exit timeriomem_rng_exit(void)
+{
+ platform_driver_unregister(&timeriomem_rng_driver);
+}
+
+module_init(timeriomem_rng_init);
+module_exit(timeriomem_rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
+MODULE_DESCRIPTION("Timer IOMEM H/W RNG driver");
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
new file mode 100644
index 00000000..0bc0cb70
--- /dev/null
+++ b/drivers/char/hw_random/tx4939-rng.c
@@ -0,0 +1,185 @@
+/*
+ * RNG driver for TX4939 Random Number Generators (RNG)
+ *
+ * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/gfp.h>
+
+#define TX4939_RNG_RCSR 0x00000000
+#define TX4939_RNG_ROR(n) (0x00000018 + (n) * 8)
+
+#define TX4939_RNG_RCSR_INTE 0x00000008
+#define TX4939_RNG_RCSR_RST 0x00000004
+#define TX4939_RNG_RCSR_FIN 0x00000002
+#define TX4939_RNG_RCSR_ST 0x00000001
+
+struct tx4939_rng {
+ struct hwrng rng;
+ void __iomem *base;
+ u64 databuf[3];
+ unsigned int data_avail;
+};
+
+static void rng_io_start(void)
+{
+#ifndef CONFIG_64BIT
+ /*
+ * readq is reading a 64-bit register using a 64-bit load. On
+ * a 32-bit kernel however interrupts or any other processor
+ * exception would clobber the upper 32-bit of the processor
+ * register so interrupts need to be disabled.
+ */
+ local_irq_disable();
+#endif
+}
+
+static void rng_io_end(void)
+{
+#ifndef CONFIG_64BIT
+ local_irq_enable();
+#endif
+}
+
+static u64 read_rng(void __iomem *base, unsigned int offset)
+{
+ return ____raw_readq(base + offset);
+}
+
+static void write_rng(u64 val, void __iomem *base, unsigned int offset)
+{
+ return ____raw_writeq(val, base + offset);
+}
+
+static int tx4939_rng_data_present(struct hwrng *rng, int wait)
+{
+ struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
+ int i;
+
+ if (rngdev->data_avail)
+ return rngdev->data_avail;
+ for (i = 0; i < 20; i++) {
+ rng_io_start();
+ if (!(read_rng(rngdev->base, TX4939_RNG_RCSR)
+ & TX4939_RNG_RCSR_ST)) {
+ rngdev->databuf[0] =
+ read_rng(rngdev->base, TX4939_RNG_ROR(0));
+ rngdev->databuf[1] =
+ read_rng(rngdev->base, TX4939_RNG_ROR(1));
+ rngdev->databuf[2] =
+ read_rng(rngdev->base, TX4939_RNG_ROR(2));
+ rngdev->data_avail =
+ sizeof(rngdev->databuf) / sizeof(u32);
+ /* Start RNG */
+ write_rng(TX4939_RNG_RCSR_ST,
+ rngdev->base, TX4939_RNG_RCSR);
+ wait = 0;
+ }
+ rng_io_end();
+ if (!wait)
+ break;
+ /* 90 bus clock cycles by default for generation */
+ ndelay(90 * 5);
+ }
+ return rngdev->data_avail;
+}
+
+static int tx4939_rng_data_read(struct hwrng *rng, u32 *buffer)
+{
+ struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
+
+ rngdev->data_avail--;
+ *buffer = *((u32 *)&rngdev->databuf + rngdev->data_avail);
+ return sizeof(u32);
+}
+
+static int __init tx4939_rng_probe(struct platform_device *dev)
+{
+ struct tx4939_rng *rngdev;
+ struct resource *r;
+ int i;
+
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -EBUSY;
+ rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
+ if (!rngdev)
+ return -ENOMEM;
+ if (!devm_request_mem_region(&dev->dev, r->start, resource_size(r),
+ dev_name(&dev->dev)))
+ return -EBUSY;
+ rngdev->base = devm_ioremap(&dev->dev, r->start, resource_size(r));
+ if (!rngdev->base)
+ return -EBUSY;
+
+ rngdev->rng.name = dev_name(&dev->dev);
+ rngdev->rng.data_present = tx4939_rng_data_present;
+ rngdev->rng.data_read = tx4939_rng_data_read;
+
+ rng_io_start();
+ /* Reset RNG */
+ write_rng(TX4939_RNG_RCSR_RST, rngdev->base, TX4939_RNG_RCSR);
+ write_rng(0, rngdev->base, TX4939_RNG_RCSR);
+ /* Start RNG */
+ write_rng(TX4939_RNG_RCSR_ST, rngdev->base, TX4939_RNG_RCSR);
+ rng_io_end();
+ /*
+ * Drop first two results. From the datasheet:
+ * The quality of the random numbers generated immediately
+ * after reset can be insufficient. Therefore, do not use
+ * random numbers obtained from the first and second
+ * generations; use the ones from the third or subsequent
+ * generation.
+ */
+ for (i = 0; i < 2; i++) {
+ rngdev->data_avail = 0;
+ if (!tx4939_rng_data_present(&rngdev->rng, 1))
+ return -EIO;
+ }
+
+ platform_set_drvdata(dev, rngdev);
+ return hwrng_register(&rngdev->rng);
+}
+
+static int __exit tx4939_rng_remove(struct platform_device *dev)
+{
+ struct tx4939_rng *rngdev = platform_get_drvdata(dev);
+
+ hwrng_unregister(&rngdev->rng);
+ platform_set_drvdata(dev, NULL);
+ return 0;
+}
+
+static struct platform_driver tx4939_rng_driver = {
+ .driver = {
+ .name = "tx4939-rng",
+ .owner = THIS_MODULE,
+ },
+ .remove = tx4939_rng_remove,
+};
+
+static int __init tx4939rng_init(void)
+{
+ return platform_driver_probe(&tx4939_rng_driver, tx4939_rng_probe);
+}
+
+static void __exit tx4939rng_exit(void)
+{
+ platform_driver_unregister(&tx4939_rng_driver);
+}
+
+module_init(tx4939rng_init);
+module_exit(tx4939rng_exit);
+
+MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for TX4939");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
new file mode 100644
index 00000000..d0387a84
--- /dev/null
+++ b/drivers/char/hw_random/via-rng.c
@@ -0,0 +1,224 @@
+/*
+ * RNG driver for VIA RNGs
+ *
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ *
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <crypto/padlock.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/msr.h>
+#include <asm/cpufeature.h>
+#include <asm/i387.h>
+
+
+
+
+enum {
+ VIA_STRFILT_CNT_SHIFT = 16,
+ VIA_STRFILT_FAIL = (1 << 15),
+ VIA_STRFILT_ENABLE = (1 << 14),
+ VIA_RAWBITS_ENABLE = (1 << 13),
+ VIA_RNG_ENABLE = (1 << 6),
+ VIA_NOISESRC1 = (1 << 8),
+ VIA_NOISESRC2 = (1 << 9),
+ VIA_XSTORE_CNT_MASK = 0x0F,
+
+ VIA_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */
+ VIA_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */
+ VIA_RNG_CHUNK_4_MASK = 0xFFFFFFFF,
+ VIA_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */
+ VIA_RNG_CHUNK_2_MASK = 0xFFFF,
+ VIA_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */
+ VIA_RNG_CHUNK_1_MASK = 0xFF,
+};
+
+/*
+ * Investigate using the 'rep' prefix to obtain 32 bits of random data
+ * in one insn. The upside is potentially better performance. The
+ * downside is that the instruction becomes no longer atomic. Due to
+ * this, just like familiar issues with /dev/random itself, the worst
+ * case of a 'rep xstore' could potentially pause a cpu for an
+ * unreasonably long time. In practice, this condition would likely
+ * only occur when the hardware is failing. (or so we hope :))
+ *
+ * Another possible performance boost may come from simply buffering
+ * until we have 4 bytes, thus returning a u32 at a time,
+ * instead of the current u8-at-a-time.
+ *
+ * Padlock instructions can generate a spurious DNA fault, so
+ * we have to call them in the context of irq_ts_save/restore()
+ */
+
+static inline u32 xstore(u32 *addr, u32 edx_in)
+{
+ u32 eax_out;
+ int ts_state;
+
+ ts_state = irq_ts_save();
+
+ asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
+ : "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr));
+
+ irq_ts_restore(ts_state);
+ return eax_out;
+}
+
+static int via_rng_data_present(struct hwrng *rng, int wait)
+{
+ char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+ ((aligned(STACK_ALIGN)));
+ u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+ u32 bytes_out;
+ int i;
+
+ /* We choose the recommended 1-byte-per-instruction RNG rate,
+ * for greater randomness at the expense of speed. Larger
+ * values 2, 4, or 8 bytes-per-instruction yield greater
+ * speed at lesser randomness.
+ *
+ * If you change this to another VIA_CHUNK_n, you must also
+ * change the ->n_bytes values in rng_vendor_ops[] tables.
+ * VIA_CHUNK_8 requires further code changes.
+ *
+ * A copy of MSR_VIA_RNG is placed in eax_out when xstore
+ * completes.
+ */
+
+ for (i = 0; i < 20; i++) {
+ *via_rng_datum = 0; /* paranoia, not really necessary */
+ bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
+ bytes_out &= VIA_XSTORE_CNT_MASK;
+ if (bytes_out || !wait)
+ break;
+ udelay(10);
+ }
+ rng->priv = *via_rng_datum;
+ return bytes_out ? 1 : 0;
+}
+
+static int via_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ u32 via_rng_datum = (u32)rng->priv;
+
+ *data = via_rng_datum;
+
+ return 1;
+}
+
+static int via_rng_init(struct hwrng *rng)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ u32 lo, hi, old_lo;
+
+ /* VIA Nano CPUs don't have the MSR_VIA_RNG anymore. The RNG
+ * is always enabled if CPUID rng_en is set. There is no
+ * RNG configuration like it used to be the case in this
+ * register */
+ if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
+ if (!cpu_has_xstore_enabled) {
+ printk(KERN_ERR PFX "can't enable hardware RNG "
+ "if XSTORE is not enabled\n");
+ return -ENODEV;
+ }
+ return 0;
+ }
+
+ /* Control the RNG via MSR. Tread lightly and pay very close
+ * close attention to values written, as the reserved fields
+ * are documented to be "undefined and unpredictable"; but it
+ * does not say to write them as zero, so I make a guess that
+ * we restore the values we find in the register.
+ */
+ rdmsr(MSR_VIA_RNG, lo, hi);
+
+ old_lo = lo;
+ lo &= ~(0x7f << VIA_STRFILT_CNT_SHIFT);
+ lo &= ~VIA_XSTORE_CNT_MASK;
+ lo &= ~(VIA_STRFILT_ENABLE | VIA_STRFILT_FAIL | VIA_RAWBITS_ENABLE);
+ lo |= VIA_RNG_ENABLE;
+ lo |= VIA_NOISESRC1;
+
+ /* Enable secondary noise source on CPUs where it is present. */
+
+ /* Nehemiah stepping 8 and higher */
+ if ((c->x86_model == 9) && (c->x86_mask > 7))
+ lo |= VIA_NOISESRC2;
+
+ /* Esther */
+ if (c->x86_model >= 10)
+ lo |= VIA_NOISESRC2;
+
+ if (lo != old_lo)
+ wrmsr(MSR_VIA_RNG, lo, hi);
+
+ /* perhaps-unnecessary sanity check; remove after testing if
+ unneeded */
+ rdmsr(MSR_VIA_RNG, lo, hi);
+ if ((lo & VIA_RNG_ENABLE) == 0) {
+ printk(KERN_ERR PFX "cannot enable VIA C3 RNG, aborting\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+
+static struct hwrng via_rng = {
+ .name = "via",
+ .init = via_rng_init,
+ .data_present = via_rng_data_present,
+ .data_read = via_rng_data_read,
+};
+
+
+static int __init mod_init(void)
+{
+ int err;
+
+ if (!cpu_has_xstore)
+ return -ENODEV;
+ printk(KERN_INFO "VIA RNG detected\n");
+ err = hwrng_register(&via_rng);
+ if (err) {
+ printk(KERN_ERR PFX "RNG registering failed (%d)\n",
+ err);
+ goto out;
+ }
+out:
+ return err;
+}
+
+static void __exit mod_exit(void)
+{
+ hwrng_unregister(&via_rng);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
new file mode 100644
index 00000000..75f1cbd6
--- /dev/null
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -0,0 +1,139 @@
+/*
+ * Randomness driver for virtio
+ * Copyright (C) 2007, 2008 Rusty Russell IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/virtio.h>
+#include <linux/virtio_rng.h>
+
+static struct virtqueue *vq;
+static unsigned int data_avail;
+static DECLARE_COMPLETION(have_data);
+static bool busy;
+
+static void random_recv_done(struct virtqueue *vq)
+{
+ /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
+ if (!virtqueue_get_buf(vq, &data_avail))
+ return;
+
+ complete(&have_data);
+}
+
+/* The host will fill any buffer we give it with sweet, sweet randomness. */
+static void register_buffer(u8 *buf, size_t size)
+{
+ struct scatterlist sg;
+
+ sg_init_one(&sg, buf, size);
+
+ /* There should always be room for one buffer. */
+ if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0)
+ BUG();
+
+ virtqueue_kick(vq);
+}
+
+static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
+{
+
+ if (!busy) {
+ busy = true;
+ init_completion(&have_data);
+ register_buffer(buf, size);
+ }
+
+ if (!wait)
+ return 0;
+
+ wait_for_completion(&have_data);
+
+ busy = false;
+
+ return data_avail;
+}
+
+static void virtio_cleanup(struct hwrng *rng)
+{
+ if (busy)
+ wait_for_completion(&have_data);
+}
+
+
+static struct hwrng virtio_hwrng = {
+ .name = "virtio",
+ .cleanup = virtio_cleanup,
+ .read = virtio_read,
+};
+
+static int virtrng_probe(struct virtio_device *vdev)
+{
+ int err;
+
+ /* We expect a single virtqueue. */
+ vq = virtio_find_single_vq(vdev, random_recv_done, "input");
+ if (IS_ERR(vq))
+ return PTR_ERR(vq);
+
+ err = hwrng_register(&virtio_hwrng);
+ if (err) {
+ vdev->config->del_vqs(vdev);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __devexit virtrng_remove(struct virtio_device *vdev)
+{
+ vdev->config->reset(vdev);
+ hwrng_unregister(&virtio_hwrng);
+ vdev->config->del_vqs(vdev);
+}
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct virtio_driver virtio_rng_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtrng_probe,
+ .remove = __devexit_p(virtrng_remove),
+};
+
+static int __init init(void)
+{
+ return register_virtio_driver(&virtio_rng_driver);
+}
+
+static void __exit fini(void)
+{
+ unregister_virtio_driver(&virtio_rng_driver);
+}
+module_init(init);
+module_exit(fini);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio random number driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
new file mode 100644
index 00000000..6e40072f
--- /dev/null
+++ b/drivers/char/i8k.c
@@ -0,0 +1,768 @@
+/*
+ * i8k.c -- Linux driver for accessing the SMM BIOS on Dell laptops.
+ * See http://www.debian.org/~dz/i8k/ for more information
+ * and for latest version of this driver.
+ *
+ * Copyright (C) 2001 Massimo Dal Zotto <dz@debian.org>
+ *
+ * Hwmon integration:
+ * Copyright (C) 2011 Jean Delvare <khali@linux-fr.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/dmi.h>
+#include <linux/capability.h>
+#include <linux/mutex.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/i8k.h>
+
+#define I8K_VERSION "1.14 21/02/2005"
+
+#define I8K_SMM_FN_STATUS 0x0025
+#define I8K_SMM_POWER_STATUS 0x0069
+#define I8K_SMM_SET_FAN 0x01a3
+#define I8K_SMM_GET_FAN 0x00a3
+#define I8K_SMM_GET_SPEED 0x02a3
+#define I8K_SMM_GET_TEMP 0x10a3
+#define I8K_SMM_GET_DELL_SIG1 0xfea3
+#define I8K_SMM_GET_DELL_SIG2 0xffa3
+#define I8K_SMM_BIOS_VERSION 0x00a6
+
+#define I8K_FAN_MULT 30
+#define I8K_MAX_TEMP 127
+
+#define I8K_FN_NONE 0x00
+#define I8K_FN_UP 0x01
+#define I8K_FN_DOWN 0x02
+#define I8K_FN_MUTE 0x04
+#define I8K_FN_MASK 0x07
+#define I8K_FN_SHIFT 8
+
+#define I8K_POWER_AC 0x05
+#define I8K_POWER_BATTERY 0x01
+
+#define I8K_TEMPERATURE_BUG 1
+
+static DEFINE_MUTEX(i8k_mutex);
+static char bios_version[4];
+static struct device *i8k_hwmon_dev;
+
+MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
+MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops");
+MODULE_LICENSE("GPL");
+
+static int force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Force loading without checking for supported models");
+
+static int ignore_dmi;
+module_param(ignore_dmi, bool, 0);
+MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
+
+static int restricted;
+module_param(restricted, bool, 0);
+MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
+
+static int power_status;
+module_param(power_status, bool, 0600);
+MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
+
+static int fan_mult = I8K_FAN_MULT;
+module_param(fan_mult, int, 0);
+MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with");
+
+static int i8k_open_fs(struct inode *inode, struct file *file);
+static long i8k_ioctl(struct file *, unsigned int, unsigned long);
+
+static const struct file_operations i8k_fops = {
+ .owner = THIS_MODULE,
+ .open = i8k_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .unlocked_ioctl = i8k_ioctl,
+};
+
+struct smm_regs {
+ unsigned int eax;
+ unsigned int ebx __attribute__ ((packed));
+ unsigned int ecx __attribute__ ((packed));
+ unsigned int edx __attribute__ ((packed));
+ unsigned int esi __attribute__ ((packed));
+ unsigned int edi __attribute__ ((packed));
+};
+
+static inline const char *i8k_get_dmi_data(int field)
+{
+ const char *dmi_data = dmi_get_system_info(field);
+
+ return dmi_data && *dmi_data ? dmi_data : "?";
+}
+
+/*
+ * Call the System Management Mode BIOS. Code provided by Jonathan Buzzard.
+ */
+static int i8k_smm(struct smm_regs *regs)
+{
+ int rc;
+ int eax = regs->eax;
+
+#if defined(CONFIG_X86_64)
+ asm volatile("pushq %%rax\n\t"
+ "movl 0(%%rax),%%edx\n\t"
+ "pushq %%rdx\n\t"
+ "movl 4(%%rax),%%ebx\n\t"
+ "movl 8(%%rax),%%ecx\n\t"
+ "movl 12(%%rax),%%edx\n\t"
+ "movl 16(%%rax),%%esi\n\t"
+ "movl 20(%%rax),%%edi\n\t"
+ "popq %%rax\n\t"
+ "out %%al,$0xb2\n\t"
+ "out %%al,$0x84\n\t"
+ "xchgq %%rax,(%%rsp)\n\t"
+ "movl %%ebx,4(%%rax)\n\t"
+ "movl %%ecx,8(%%rax)\n\t"
+ "movl %%edx,12(%%rax)\n\t"
+ "movl %%esi,16(%%rax)\n\t"
+ "movl %%edi,20(%%rax)\n\t"
+ "popq %%rdx\n\t"
+ "movl %%edx,0(%%rax)\n\t"
+ "pushfq\n\t"
+ "popq %%rax\n\t"
+ "andl $1,%%eax\n"
+ :"=a"(rc)
+ : "a"(regs)
+ : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
+#else
+ asm volatile("pushl %%eax\n\t"
+ "movl 0(%%eax),%%edx\n\t"
+ "push %%edx\n\t"
+ "movl 4(%%eax),%%ebx\n\t"
+ "movl 8(%%eax),%%ecx\n\t"
+ "movl 12(%%eax),%%edx\n\t"
+ "movl 16(%%eax),%%esi\n\t"
+ "movl 20(%%eax),%%edi\n\t"
+ "popl %%eax\n\t"
+ "out %%al,$0xb2\n\t"
+ "out %%al,$0x84\n\t"
+ "xchgl %%eax,(%%esp)\n\t"
+ "movl %%ebx,4(%%eax)\n\t"
+ "movl %%ecx,8(%%eax)\n\t"
+ "movl %%edx,12(%%eax)\n\t"
+ "movl %%esi,16(%%eax)\n\t"
+ "movl %%edi,20(%%eax)\n\t"
+ "popl %%edx\n\t"
+ "movl %%edx,0(%%eax)\n\t"
+ "lahf\n\t"
+ "shrl $8,%%eax\n\t"
+ "andl $1,%%eax\n"
+ :"=a"(rc)
+ : "a"(regs)
+ : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
+#endif
+ if (rc != 0 || (regs->eax & 0xffff) == 0xffff || regs->eax == eax)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Read the bios version. Return the version as an integer corresponding
+ * to the ascii value, for example "A17" is returned as 0x00413137.
+ */
+static int i8k_get_bios_version(void)
+{
+ struct smm_regs regs = { .eax = I8K_SMM_BIOS_VERSION, };
+
+ return i8k_smm(&regs) ? : regs.eax;
+}
+
+/*
+ * Read the Fn key status.
+ */
+static int i8k_get_fn_status(void)
+{
+ struct smm_regs regs = { .eax = I8K_SMM_FN_STATUS, };
+ int rc;
+
+ if ((rc = i8k_smm(&regs)) < 0)
+ return rc;
+
+ switch ((regs.eax >> I8K_FN_SHIFT) & I8K_FN_MASK) {
+ case I8K_FN_UP:
+ return I8K_VOL_UP;
+ case I8K_FN_DOWN:
+ return I8K_VOL_DOWN;
+ case I8K_FN_MUTE:
+ return I8K_VOL_MUTE;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Read the power status.
+ */
+static int i8k_get_power_status(void)
+{
+ struct smm_regs regs = { .eax = I8K_SMM_POWER_STATUS, };
+ int rc;
+
+ if ((rc = i8k_smm(&regs)) < 0)
+ return rc;
+
+ return (regs.eax & 0xff) == I8K_POWER_AC ? I8K_AC : I8K_BATTERY;
+}
+
+/*
+ * Read the fan status.
+ */
+static int i8k_get_fan_status(int fan)
+{
+ struct smm_regs regs = { .eax = I8K_SMM_GET_FAN, };
+
+ regs.ebx = fan & 0xff;
+ return i8k_smm(&regs) ? : regs.eax & 0xff;
+}
+
+/*
+ * Read the fan speed in RPM.
+ */
+static int i8k_get_fan_speed(int fan)
+{
+ struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
+
+ regs.ebx = fan & 0xff;
+ return i8k_smm(&regs) ? : (regs.eax & 0xffff) * fan_mult;
+}
+
+/*
+ * Set the fan speed (off, low, high). Returns the new fan status.
+ */
+static int i8k_set_fan(int fan, int speed)
+{
+ struct smm_regs regs = { .eax = I8K_SMM_SET_FAN, };
+
+ speed = (speed < 0) ? 0 : ((speed > I8K_FAN_MAX) ? I8K_FAN_MAX : speed);
+ regs.ebx = (fan & 0xff) | (speed << 8);
+
+ return i8k_smm(&regs) ? : i8k_get_fan_status(fan);
+}
+
+/*
+ * Read the cpu temperature.
+ */
+static int i8k_get_temp(int sensor)
+{
+ struct smm_regs regs = { .eax = I8K_SMM_GET_TEMP, };
+ int rc;
+ int temp;
+
+#ifdef I8K_TEMPERATURE_BUG
+ static int prev;
+#endif
+ regs.ebx = sensor & 0xff;
+ if ((rc = i8k_smm(&regs)) < 0)
+ return rc;
+
+ temp = regs.eax & 0xff;
+
+#ifdef I8K_TEMPERATURE_BUG
+ /*
+ * Sometimes the temperature sensor returns 0x99, which is out of range.
+ * In this case we return (once) the previous cached value. For example:
+ # 1003655137 00000058 00005a4b
+ # 1003655138 00000099 00003a80 <--- 0x99 = 153 degrees
+ # 1003655139 00000054 00005c52
+ */
+ if (temp > I8K_MAX_TEMP) {
+ temp = prev;
+ prev = I8K_MAX_TEMP;
+ } else {
+ prev = temp;
+ }
+#endif
+
+ return temp;
+}
+
+static int i8k_get_dell_signature(int req_fn)
+{
+ struct smm_regs regs = { .eax = req_fn, };
+ int rc;
+
+ if ((rc = i8k_smm(&regs)) < 0)
+ return rc;
+
+ return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1;
+}
+
+static int
+i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ int val = 0;
+ int speed;
+ unsigned char buff[16];
+ int __user *argp = (int __user *)arg;
+
+ if (!argp)
+ return -EINVAL;
+
+ switch (cmd) {
+ case I8K_BIOS_VERSION:
+ val = i8k_get_bios_version();
+ break;
+
+ case I8K_MACHINE_ID:
+ memset(buff, 0, 16);
+ strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), sizeof(buff));
+ break;
+
+ case I8K_FN_STATUS:
+ val = i8k_get_fn_status();
+ break;
+
+ case I8K_POWER_STATUS:
+ val = i8k_get_power_status();
+ break;
+
+ case I8K_GET_TEMP:
+ val = i8k_get_temp(0);
+ break;
+
+ case I8K_GET_SPEED:
+ if (copy_from_user(&val, argp, sizeof(int)))
+ return -EFAULT;
+
+ val = i8k_get_fan_speed(val);
+ break;
+
+ case I8K_GET_FAN:
+ if (copy_from_user(&val, argp, sizeof(int)))
+ return -EFAULT;
+
+ val = i8k_get_fan_status(val);
+ break;
+
+ case I8K_SET_FAN:
+ if (restricted && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&val, argp, sizeof(int)))
+ return -EFAULT;
+
+ if (copy_from_user(&speed, argp + 1, sizeof(int)))
+ return -EFAULT;
+
+ val = i8k_set_fan(val, speed);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (val < 0)
+ return val;
+
+ switch (cmd) {
+ case I8K_BIOS_VERSION:
+ if (copy_to_user(argp, &val, 4))
+ return -EFAULT;
+
+ break;
+ case I8K_MACHINE_ID:
+ if (copy_to_user(argp, buff, 16))
+ return -EFAULT;
+
+ break;
+ default:
+ if (copy_to_user(argp, &val, sizeof(int)))
+ return -EFAULT;
+
+ break;
+ }
+
+ return 0;
+}
+
+static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ mutex_lock(&i8k_mutex);
+ ret = i8k_ioctl_unlocked(fp, cmd, arg);
+ mutex_unlock(&i8k_mutex);
+
+ return ret;
+}
+
+/*
+ * Print the information for /proc/i8k.
+ */
+static int i8k_proc_show(struct seq_file *seq, void *offset)
+{
+ int fn_key, cpu_temp, ac_power;
+ int left_fan, right_fan, left_speed, right_speed;
+
+ cpu_temp = i8k_get_temp(0); /* 11100 µs */
+ left_fan = i8k_get_fan_status(I8K_FAN_LEFT); /* 580 µs */
+ right_fan = i8k_get_fan_status(I8K_FAN_RIGHT); /* 580 µs */
+ left_speed = i8k_get_fan_speed(I8K_FAN_LEFT); /* 580 µs */
+ right_speed = i8k_get_fan_speed(I8K_FAN_RIGHT); /* 580 µs */
+ fn_key = i8k_get_fn_status(); /* 750 µs */
+ if (power_status)
+ ac_power = i8k_get_power_status(); /* 14700 µs */
+ else
+ ac_power = -1;
+
+ /*
+ * Info:
+ *
+ * 1) Format version (this will change if format changes)
+ * 2) BIOS version
+ * 3) BIOS machine ID
+ * 4) Cpu temperature
+ * 5) Left fan status
+ * 6) Right fan status
+ * 7) Left fan speed
+ * 8) Right fan speed
+ * 9) AC power
+ * 10) Fn Key status
+ */
+ return seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
+ I8K_PROC_FMT,
+ bios_version,
+ i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+ cpu_temp,
+ left_fan, right_fan, left_speed, right_speed,
+ ac_power, fn_key);
+}
+
+static int i8k_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, i8k_proc_show, NULL);
+}
+
+
+/*
+ * Hwmon interface
+ */
+
+static ssize_t i8k_hwmon_show_temp(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int cpu_temp;
+
+ cpu_temp = i8k_get_temp(0);
+ if (cpu_temp < 0)
+ return cpu_temp;
+ return sprintf(buf, "%d\n", cpu_temp * 1000);
+}
+
+static ssize_t i8k_hwmon_show_fan(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ int fan_speed;
+
+ fan_speed = i8k_get_fan_speed(index);
+ if (fan_speed < 0)
+ return fan_speed;
+ return sprintf(buf, "%d\n", fan_speed);
+}
+
+static ssize_t i8k_hwmon_show_label(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ static const char *labels[4] = {
+ "i8k",
+ "CPU",
+ "Left Fan",
+ "Right Fan",
+ };
+ int index = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%s\n", labels[index]);
+}
+
+static DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL);
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
+ I8K_FAN_LEFT);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
+ I8K_FAN_RIGHT);
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, i8k_hwmon_show_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_label, NULL, 3);
+
+static void i8k_hwmon_remove_files(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_temp1_input);
+ device_remove_file(dev, &sensor_dev_attr_fan1_input.dev_attr);
+ device_remove_file(dev, &sensor_dev_attr_fan2_input.dev_attr);
+ device_remove_file(dev, &sensor_dev_attr_temp1_label.dev_attr);
+ device_remove_file(dev, &sensor_dev_attr_fan1_label.dev_attr);
+ device_remove_file(dev, &sensor_dev_attr_fan2_label.dev_attr);
+ device_remove_file(dev, &sensor_dev_attr_name.dev_attr);
+}
+
+static int __init i8k_init_hwmon(void)
+{
+ int err;
+
+ i8k_hwmon_dev = hwmon_device_register(NULL);
+ if (IS_ERR(i8k_hwmon_dev)) {
+ err = PTR_ERR(i8k_hwmon_dev);
+ i8k_hwmon_dev = NULL;
+ printk(KERN_ERR "i8k: hwmon registration failed (%d)\n", err);
+ return err;
+ }
+
+ /* Required name attribute */
+ err = device_create_file(i8k_hwmon_dev,
+ &sensor_dev_attr_name.dev_attr);
+ if (err)
+ goto exit_unregister;
+
+ /* CPU temperature attributes, if temperature reading is OK */
+ err = i8k_get_temp(0);
+ if (err < 0) {
+ dev_dbg(i8k_hwmon_dev,
+ "Not creating temperature attributes (%d)\n", err);
+ } else {
+ err = device_create_file(i8k_hwmon_dev, &dev_attr_temp1_input);
+ if (err)
+ goto exit_remove_files;
+ err = device_create_file(i8k_hwmon_dev,
+ &sensor_dev_attr_temp1_label.dev_attr);
+ if (err)
+ goto exit_remove_files;
+ }
+
+ /* Left fan attributes, if left fan is present */
+ err = i8k_get_fan_status(I8K_FAN_LEFT);
+ if (err < 0) {
+ dev_dbg(i8k_hwmon_dev,
+ "Not creating %s fan attributes (%d)\n", "left", err);
+ } else {
+ err = device_create_file(i8k_hwmon_dev,
+ &sensor_dev_attr_fan1_input.dev_attr);
+ if (err)
+ goto exit_remove_files;
+ err = device_create_file(i8k_hwmon_dev,
+ &sensor_dev_attr_fan1_label.dev_attr);
+ if (err)
+ goto exit_remove_files;
+ }
+
+ /* Right fan attributes, if right fan is present */
+ err = i8k_get_fan_status(I8K_FAN_RIGHT);
+ if (err < 0) {
+ dev_dbg(i8k_hwmon_dev,
+ "Not creating %s fan attributes (%d)\n", "right", err);
+ } else {
+ err = device_create_file(i8k_hwmon_dev,
+ &sensor_dev_attr_fan2_input.dev_attr);
+ if (err)
+ goto exit_remove_files;
+ err = device_create_file(i8k_hwmon_dev,
+ &sensor_dev_attr_fan2_label.dev_attr);
+ if (err)
+ goto exit_remove_files;
+ }
+
+ return 0;
+
+ exit_remove_files:
+ i8k_hwmon_remove_files(i8k_hwmon_dev);
+ exit_unregister:
+ hwmon_device_unregister(i8k_hwmon_dev);
+ return err;
+}
+
+static void __exit i8k_exit_hwmon(void)
+{
+ i8k_hwmon_remove_files(i8k_hwmon_dev);
+ hwmon_device_unregister(i8k_hwmon_dev);
+}
+
+static struct dmi_system_id __initdata i8k_dmi_table[] = {
+ {
+ .ident = "Dell Inspiron",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron"),
+ },
+ },
+ {
+ .ident = "Dell Latitude",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude"),
+ },
+ },
+ {
+ .ident = "Dell Inspiron 2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron"),
+ },
+ },
+ {
+ .ident = "Dell Latitude 2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude"),
+ },
+ },
+ { /* UK Inspiron 6400 */
+ .ident = "Dell Inspiron 3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MM061"),
+ },
+ },
+ {
+ .ident = "Dell Inspiron 3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MP061"),
+ },
+ },
+ {
+ .ident = "Dell Precision",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision"),
+ },
+ },
+ {
+ .ident = "Dell Vostro",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"),
+ },
+ },
+ { }
+};
+
+/*
+ * Probe for the presence of a supported laptop.
+ */
+static int __init i8k_probe(void)
+{
+ char buff[4];
+ int version;
+
+ /*
+ * Get DMI information
+ */
+ if (!dmi_check_system(i8k_dmi_table)) {
+ if (!ignore_dmi && !force)
+ return -ENODEV;
+
+ printk(KERN_INFO "i8k: not running on a supported Dell system.\n");
+ printk(KERN_INFO "i8k: vendor=%s, model=%s, version=%s\n",
+ i8k_get_dmi_data(DMI_SYS_VENDOR),
+ i8k_get_dmi_data(DMI_PRODUCT_NAME),
+ i8k_get_dmi_data(DMI_BIOS_VERSION));
+ }
+
+ strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), sizeof(bios_version));
+
+ /*
+ * Get SMM Dell signature
+ */
+ if (i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG1) &&
+ i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG2)) {
+ printk(KERN_ERR "i8k: unable to get SMM Dell signature\n");
+ if (!force)
+ return -ENODEV;
+ }
+
+ /*
+ * Get SMM BIOS version.
+ */
+ version = i8k_get_bios_version();
+ if (version <= 0) {
+ printk(KERN_WARNING "i8k: unable to get SMM BIOS version\n");
+ } else {
+ buff[0] = (version >> 16) & 0xff;
+ buff[1] = (version >> 8) & 0xff;
+ buff[2] = (version) & 0xff;
+ buff[3] = '\0';
+ /*
+ * If DMI BIOS version is unknown use SMM BIOS version.
+ */
+ if (!dmi_get_system_info(DMI_BIOS_VERSION))
+ strlcpy(bios_version, buff, sizeof(bios_version));
+
+ /*
+ * Check if the two versions match.
+ */
+ if (strncmp(buff, bios_version, sizeof(bios_version)) != 0)
+ printk(KERN_WARNING "i8k: BIOS version mismatch: %s != %s\n",
+ buff, bios_version);
+ }
+
+ return 0;
+}
+
+static int __init i8k_init(void)
+{
+ struct proc_dir_entry *proc_i8k;
+ int err;
+
+ /* Are we running on an supported laptop? */
+ if (i8k_probe())
+ return -ENODEV;
+
+ /* Register the proc entry */
+ proc_i8k = proc_create("i8k", 0, NULL, &i8k_fops);
+ if (!proc_i8k)
+ return -ENOENT;
+
+ err = i8k_init_hwmon();
+ if (err)
+ goto exit_remove_proc;
+
+ printk(KERN_INFO
+ "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n",
+ I8K_VERSION);
+
+ return 0;
+
+ exit_remove_proc:
+ remove_proc_entry("i8k", NULL);
+ return err;
+}
+
+static void __exit i8k_exit(void)
+{
+ i8k_exit_hwmon();
+ remove_proc_entry("i8k", NULL);
+}
+
+module_init(i8k_init);
+module_exit(i8k_exit);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
new file mode 100644
index 00000000..0baa8fab
--- /dev/null
+++ b/drivers/char/ipmi/Kconfig
@@ -0,0 +1,64 @@
+#
+# IPMI device configuration
+#
+
+menuconfig IPMI_HANDLER
+ tristate 'IPMI top-level message handler'
+ depends on HAS_IOMEM
+ help
+ This enables the central IPMI message handler, required for IPMI
+ to work.
+
+ IPMI is a standard for managing sensors (temperature,
+ voltage, etc.) in a system.
+
+ See <file:Documentation/IPMI.txt> for more details on the driver.
+
+ If unsure, say N.
+
+if IPMI_HANDLER
+
+config IPMI_PANIC_EVENT
+ bool 'Generate a panic event to all BMCs on a panic'
+ help
+ When a panic occurs, this will cause the IPMI message handler to
+ generate an IPMI event describing the panic to each interface
+ registered with the message handler.
+
+config IPMI_PANIC_STRING
+ bool 'Generate OEM events containing the panic string'
+ depends on IPMI_PANIC_EVENT
+ help
+ When a panic occurs, this will cause the IPMI message handler to
+ generate IPMI OEM type f0 events holding the IPMB address of the
+ panic generator (byte 4 of the event), a sequence number for the
+ string (byte 5 of the event) and part of the string (the rest of the
+ event). Bytes 1, 2, and 3 are the normal usage for an OEM event.
+ You can fetch these events and use the sequence numbers to piece the
+ string together.
+
+config IPMI_DEVICE_INTERFACE
+ tristate 'Device interface for IPMI'
+ help
+ This provides an IOCTL interface to the IPMI message handler so
+ userland processes may use IPMI. It supports poll() and select().
+
+config IPMI_SI
+ tristate 'IPMI System Interface handler'
+ help
+ Provides a driver for System Interfaces (KCS, SMIC, BT).
+ Currently, only KCS and SMIC are supported. If
+ you are using IPMI, you should probably say "y" here.
+
+config IPMI_WATCHDOG
+ tristate 'IPMI Watchdog Timer'
+ help
+ This enables the IPMI watchdog timer.
+
+config IPMI_POWEROFF
+ tristate 'IPMI Poweroff'
+ help
+ This enables a function to power off the system with IPMI if
+ the IPMI management controller is capable of this.
+
+endif # IPMI_HANDLER
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
new file mode 100644
index 00000000..16a93648
--- /dev/null
+++ b/drivers/char/ipmi/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the ipmi drivers.
+#
+
+ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
+
+obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
+obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
+obj-$(CONFIG_IPMI_SI) += ipmi_si.o
+obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
+obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
new file mode 100644
index 00000000..3ed20e8a
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -0,0 +1,705 @@
+/*
+ * ipmi_bt_sm.c
+ *
+ * The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part
+ * of the driver architecture at http://sourceforge.net/projects/openipmi
+ *
+ * Author: Rocky Craig <first.last@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi_msgdefs.h> /* for completion codes */
+#include "ipmi_si_sm.h"
+
+#define BT_DEBUG_OFF 0 /* Used in production */
+#define BT_DEBUG_ENABLE 1 /* Generic messages */
+#define BT_DEBUG_MSG 2 /* Prints all request/response buffers */
+#define BT_DEBUG_STATES 4 /* Verbose look at state changes */
+/*
+ * BT_DEBUG_OFF must be zero to correspond to the default uninitialized
+ * value
+ */
+
+static int bt_debug; /* 0 == BT_DEBUG_OFF */
+
+module_param(bt_debug, int, 0644);
+MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+/*
+ * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
+ * and 64 byte buffers. However, one HP implementation wants 255 bytes of
+ * buffer (with a documented message of 160 bytes) so go for the max.
+ * Since the Open IPMI architecture is single-message oriented at this
+ * stage, the queue depth of BT is of no concern.
+ */
+
+#define BT_NORMAL_TIMEOUT 5 /* seconds */
+#define BT_NORMAL_RETRY_LIMIT 2
+#define BT_RESET_DELAY 6 /* seconds after warm reset */
+
+/*
+ * States are written in chronological order and usually cover
+ * multiple rows of the state table discussion in the IPMI spec.
+ */
+
+enum bt_states {
+ BT_STATE_IDLE = 0, /* Order is critical in this list */
+ BT_STATE_XACTION_START,
+ BT_STATE_WRITE_BYTES,
+ BT_STATE_WRITE_CONSUME,
+ BT_STATE_READ_WAIT,
+ BT_STATE_CLEAR_B2H,
+ BT_STATE_READ_BYTES,
+ BT_STATE_RESET1, /* These must come last */
+ BT_STATE_RESET2,
+ BT_STATE_RESET3,
+ BT_STATE_RESTART,
+ BT_STATE_PRINTME,
+ BT_STATE_CAPABILITIES_BEGIN,
+ BT_STATE_CAPABILITIES_END,
+ BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */
+};
+
+/*
+ * Macros seen at the end of state "case" blocks. They help with legibility
+ * and debugging.
+ */
+
+#define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; }
+
+#define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; }
+
+struct si_sm_data {
+ enum bt_states state;
+ unsigned char seq; /* BT sequence number */
+ struct si_sm_io *io;
+ unsigned char write_data[IPMI_MAX_MSG_LENGTH];
+ int write_count;
+ unsigned char read_data[IPMI_MAX_MSG_LENGTH];
+ int read_count;
+ int truncated;
+ long timeout; /* microseconds countdown */
+ int error_retries; /* end of "common" fields */
+ int nonzero_status; /* hung BMCs stay all 0 */
+ enum bt_states complete; /* to divert the state machine */
+ int BT_CAP_outreqs;
+ long BT_CAP_req2rsp;
+ int BT_CAP_retries; /* Recommended retries */
+};
+
+#define BT_CLR_WR_PTR 0x01 /* See IPMI 1.5 table 11.6.4 */
+#define BT_CLR_RD_PTR 0x02
+#define BT_H2B_ATN 0x04
+#define BT_B2H_ATN 0x08
+#define BT_SMS_ATN 0x10
+#define BT_OEM0 0x20
+#define BT_H_BUSY 0x40
+#define BT_B_BUSY 0x80
+
+/*
+ * Some bits are toggled on each write: write once to set it, once
+ * more to clear it; writing a zero does nothing. To absolutely
+ * clear it, check its state and write if set. This avoids the "get
+ * current then use as mask" scheme to modify one bit. Note that the
+ * variable "bt" is hardcoded into these macros.
+ */
+
+#define BT_STATUS bt->io->inputb(bt->io, 0)
+#define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x)
+
+#define BMC2HOST bt->io->inputb(bt->io, 1)
+#define HOST2BMC(x) bt->io->outputb(bt->io, 1, x)
+
+#define BT_INTMASK_R bt->io->inputb(bt->io, 2)
+#define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x)
+
+/*
+ * Convenience routines for debugging. These are not multi-open safe!
+ * Note the macros have hardcoded variables in them.
+ */
+
+static char *state2txt(unsigned char state)
+{
+ switch (state) {
+ case BT_STATE_IDLE: return("IDLE");
+ case BT_STATE_XACTION_START: return("XACTION");
+ case BT_STATE_WRITE_BYTES: return("WR_BYTES");
+ case BT_STATE_WRITE_CONSUME: return("WR_CONSUME");
+ case BT_STATE_READ_WAIT: return("RD_WAIT");
+ case BT_STATE_CLEAR_B2H: return("CLEAR_B2H");
+ case BT_STATE_READ_BYTES: return("RD_BYTES");
+ case BT_STATE_RESET1: return("RESET1");
+ case BT_STATE_RESET2: return("RESET2");
+ case BT_STATE_RESET3: return("RESET3");
+ case BT_STATE_RESTART: return("RESTART");
+ case BT_STATE_LONG_BUSY: return("LONG_BUSY");
+ case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
+ case BT_STATE_CAPABILITIES_END: return("CAP_END");
+ }
+ return("BAD STATE");
+}
+#define STATE2TXT state2txt(bt->state)
+
+static char *status2txt(unsigned char status)
+{
+ /*
+ * This cannot be called by two threads at the same time and
+ * the buffer is always consumed immediately, so the static is
+ * safe to use.
+ */
+ static char buf[40];
+
+ strcpy(buf, "[ ");
+ if (status & BT_B_BUSY)
+ strcat(buf, "B_BUSY ");
+ if (status & BT_H_BUSY)
+ strcat(buf, "H_BUSY ");
+ if (status & BT_OEM0)
+ strcat(buf, "OEM0 ");
+ if (status & BT_SMS_ATN)
+ strcat(buf, "SMS ");
+ if (status & BT_B2H_ATN)
+ strcat(buf, "B2H ");
+ if (status & BT_H2B_ATN)
+ strcat(buf, "H2B ");
+ strcat(buf, "]");
+ return buf;
+}
+#define STATUS2TXT status2txt(status)
+
+/* called externally at insmod time, and internally on cleanup */
+
+static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
+{
+ memset(bt, 0, sizeof(struct si_sm_data));
+ if (bt->io != io) {
+ /* external: one-time only things */
+ bt->io = io;
+ bt->seq = 0;
+ }
+ bt->state = BT_STATE_IDLE; /* start here */
+ bt->complete = BT_STATE_IDLE; /* end here */
+ bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000;
+ bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
+ /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
+ return 3; /* We claim 3 bytes of space; ought to check SPMI table */
+}
+
+/* Jam a completion code (probably an error) into a response */
+
+static void force_result(struct si_sm_data *bt, unsigned char completion_code)
+{
+ bt->read_data[0] = 4; /* # following bytes */
+ bt->read_data[1] = bt->write_data[1] | 4; /* Odd NetFn/LUN */
+ bt->read_data[2] = bt->write_data[2]; /* seq (ignored) */
+ bt->read_data[3] = bt->write_data[3]; /* Command */
+ bt->read_data[4] = completion_code;
+ bt->read_count = 5;
+}
+
+/* The upper state machine starts here */
+
+static int bt_start_transaction(struct si_sm_data *bt,
+ unsigned char *data,
+ unsigned int size)
+{
+ unsigned int i;
+
+ if (size < 2)
+ return IPMI_REQ_LEN_INVALID_ERR;
+ if (size > IPMI_MAX_MSG_LENGTH)
+ return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+ if (bt->state == BT_STATE_LONG_BUSY)
+ return IPMI_NODE_BUSY_ERR;
+
+ if (bt->state != BT_STATE_IDLE)
+ return IPMI_NOT_IN_MY_STATE_ERR;
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ printk(KERN_WARNING "BT: +++++++++++++++++ New command\n");
+ printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2);
+ for (i = 0; i < size; i ++)
+ printk(" %02x", data[i]);
+ printk("\n");
+ }
+ bt->write_data[0] = size + 1; /* all data plus seq byte */
+ bt->write_data[1] = *data; /* NetFn/LUN */
+ bt->write_data[2] = bt->seq++;
+ memcpy(bt->write_data + 3, data + 1, size - 1);
+ bt->write_count = size + 2;
+ bt->error_retries = 0;
+ bt->nonzero_status = 0;
+ bt->truncated = 0;
+ bt->state = BT_STATE_XACTION_START;
+ bt->timeout = bt->BT_CAP_req2rsp;
+ force_result(bt, IPMI_ERR_UNSPECIFIED);
+ return 0;
+}
+
+/*
+ * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
+ * it calls this. Strip out the length and seq bytes.
+ */
+
+static int bt_get_result(struct si_sm_data *bt,
+ unsigned char *data,
+ unsigned int length)
+{
+ int i, msg_len;
+
+ msg_len = bt->read_count - 2; /* account for length & seq */
+ if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) {
+ force_result(bt, IPMI_ERR_UNSPECIFIED);
+ msg_len = 3;
+ }
+ data[0] = bt->read_data[1];
+ data[1] = bt->read_data[3];
+ if (length < msg_len || bt->truncated) {
+ data[2] = IPMI_ERR_MSG_TRUNCATED;
+ msg_len = 3;
+ } else
+ memcpy(data + 2, bt->read_data + 4, msg_len - 2);
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ printk(KERN_WARNING "BT: result %d bytes:", msg_len);
+ for (i = 0; i < msg_len; i++)
+ printk(" %02x", data[i]);
+ printk("\n");
+ }
+ return msg_len;
+}
+
+/* This bit's functionality is optional */
+#define BT_BMC_HWRST 0x80
+
+static void reset_flags(struct si_sm_data *bt)
+{
+ if (bt_debug)
+ printk(KERN_WARNING "IPMI BT: flag reset %s\n",
+ status2txt(BT_STATUS));
+ if (BT_STATUS & BT_H_BUSY)
+ BT_CONTROL(BT_H_BUSY); /* force clear */
+ BT_CONTROL(BT_CLR_WR_PTR); /* always reset */
+ BT_CONTROL(BT_SMS_ATN); /* always clear */
+ BT_INTMASK_W(BT_BMC_HWRST);
+}
+
+/*
+ * Get rid of an unwanted/stale response. This should only be needed for
+ * BMCs that support multiple outstanding requests.
+ */
+
+static void drain_BMC2HOST(struct si_sm_data *bt)
+{
+ int i, size;
+
+ if (!(BT_STATUS & BT_B2H_ATN)) /* Not signalling a response */
+ return;
+
+ BT_CONTROL(BT_H_BUSY); /* now set */
+ BT_CONTROL(BT_B2H_ATN); /* always clear */
+ BT_STATUS; /* pause */
+ BT_CONTROL(BT_B2H_ATN); /* some BMCs are stubborn */
+ BT_CONTROL(BT_CLR_RD_PTR); /* always reset */
+ if (bt_debug)
+ printk(KERN_WARNING "IPMI BT: stale response %s; ",
+ status2txt(BT_STATUS));
+ size = BMC2HOST;
+ for (i = 0; i < size ; i++)
+ BMC2HOST;
+ BT_CONTROL(BT_H_BUSY); /* now clear */
+ if (bt_debug)
+ printk("drained %d bytes\n", size + 1);
+}
+
+static inline void write_all_bytes(struct si_sm_data *bt)
+{
+ int i;
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
+ bt->write_count, bt->seq);
+ for (i = 0; i < bt->write_count; i++)
+ printk(" %02x", bt->write_data[i]);
+ printk("\n");
+ }
+ for (i = 0; i < bt->write_count; i++)
+ HOST2BMC(bt->write_data[i]);
+}
+
+static inline int read_all_bytes(struct si_sm_data *bt)
+{
+ unsigned char i;
+
+ /*
+ * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+ * Keep layout of first four bytes aligned with write_data[]
+ */
+
+ bt->read_data[0] = BMC2HOST;
+ bt->read_count = bt->read_data[0];
+
+ if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) {
+ if (bt_debug & BT_DEBUG_MSG)
+ printk(KERN_WARNING "BT: bad raw rsp len=%d\n",
+ bt->read_count);
+ bt->truncated = 1;
+ return 1; /* let next XACTION START clean it up */
+ }
+ for (i = 1; i <= bt->read_count; i++)
+ bt->read_data[i] = BMC2HOST;
+ bt->read_count++; /* Account internally for length byte */
+
+ if (bt_debug & BT_DEBUG_MSG) {
+ int max = bt->read_count;
+
+ printk(KERN_WARNING "BT: got %d bytes seq=0x%02X",
+ max, bt->read_data[2]);
+ if (max > 16)
+ max = 16;
+ for (i = 0; i < max; i++)
+ printk(KERN_CONT " %02x", bt->read_data[i]);
+ printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ...");
+ }
+
+ /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
+ if ((bt->read_data[3] == bt->write_data[3]) &&
+ (bt->read_data[2] == bt->write_data[2]) &&
+ ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
+ return 1;
+
+ if (bt_debug & BT_DEBUG_MSG)
+ printk(KERN_WARNING "IPMI BT: bad packet: "
+ "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
+ bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3],
+ bt->read_data[1], bt->read_data[2], bt->read_data[3]);
+ return 0;
+}
+
+/* Restart if retries are left, or return an error completion code */
+
+static enum si_sm_result error_recovery(struct si_sm_data *bt,
+ unsigned char status,
+ unsigned char cCode)
+{
+ char *reason;
+
+ bt->timeout = bt->BT_CAP_req2rsp;
+
+ switch (cCode) {
+ case IPMI_TIMEOUT_ERR:
+ reason = "timeout";
+ break;
+ default:
+ reason = "internal error";
+ break;
+ }
+
+ printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */
+ reason, STATE2TXT, STATUS2TXT);
+
+ /*
+ * Per the IPMI spec, retries are based on the sequence number
+ * known only to this module, so manage a restart here.
+ */
+ (bt->error_retries)++;
+ if (bt->error_retries < bt->BT_CAP_retries) {
+ printk("%d retries left\n",
+ bt->BT_CAP_retries - bt->error_retries);
+ bt->state = BT_STATE_RESTART;
+ return SI_SM_CALL_WITHOUT_DELAY;
+ }
+
+ printk(KERN_WARNING "failed %d retries, sending error response\n",
+ bt->BT_CAP_retries);
+ if (!bt->nonzero_status)
+ printk(KERN_ERR "IPMI BT: stuck, try power cycle\n");
+
+ /* this is most likely during insmod */
+ else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) {
+ printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n");
+ bt->state = BT_STATE_RESET1;
+ return SI_SM_CALL_WITHOUT_DELAY;
+ }
+
+ /*
+ * Concoct a useful error message, set up the next state, and
+ * be done with this sequence.
+ */
+
+ bt->state = BT_STATE_IDLE;
+ switch (cCode) {
+ case IPMI_TIMEOUT_ERR:
+ if (status & BT_B_BUSY) {
+ cCode = IPMI_NODE_BUSY_ERR;
+ bt->state = BT_STATE_LONG_BUSY;
+ }
+ break;
+ default:
+ break;
+ }
+ force_result(bt, cCode);
+ return SI_SM_TRANSACTION_COMPLETE;
+}
+
+/* Check status and (usually) take action and change this state machine. */
+
+static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
+{
+ unsigned char status, BT_CAP[8];
+ static enum bt_states last_printed = BT_STATE_PRINTME;
+ int i;
+
+ status = BT_STATUS;
+ bt->nonzero_status |= status;
+ if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) {
+ printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n",
+ STATE2TXT,
+ STATUS2TXT,
+ bt->timeout,
+ time);
+ last_printed = bt->state;
+ }
+
+ /*
+ * Commands that time out may still (eventually) provide a response.
+ * This stale response will get in the way of a new response so remove
+ * it if possible (hopefully during IDLE). Even if it comes up later
+ * it will be rejected by its (now-forgotten) seq number.
+ */
+
+ if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
+ drain_BMC2HOST(bt);
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ }
+
+ if ((bt->state != BT_STATE_IDLE) &&
+ (bt->state < BT_STATE_PRINTME)) {
+ /* check timeout */
+ bt->timeout -= time;
+ if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
+ return error_recovery(bt,
+ status,
+ IPMI_TIMEOUT_ERR);
+ }
+
+ switch (bt->state) {
+
+ /*
+ * Idle state first checks for asynchronous messages from another
+ * channel, then does some opportunistic housekeeping.
+ */
+
+ case BT_STATE_IDLE:
+ if (status & BT_SMS_ATN) {
+ BT_CONTROL(BT_SMS_ATN); /* clear it */
+ return SI_SM_ATTN;
+ }
+
+ if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
+ BT_CONTROL(BT_H_BUSY);
+
+ /* Read BT capabilities if it hasn't been done yet */
+ if (!bt->BT_CAP_outreqs)
+ BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
+ SI_SM_CALL_WITHOUT_DELAY);
+ bt->timeout = bt->BT_CAP_req2rsp;
+ BT_SI_SM_RETURN(SI_SM_IDLE);
+
+ case BT_STATE_XACTION_START:
+ if (status & (BT_B_BUSY | BT_H2B_ATN))
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ if (BT_STATUS & BT_H_BUSY)
+ BT_CONTROL(BT_H_BUSY); /* force clear */
+ BT_STATE_CHANGE(BT_STATE_WRITE_BYTES,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_WRITE_BYTES:
+ if (status & BT_H_BUSY)
+ BT_CONTROL(BT_H_BUSY); /* clear */
+ BT_CONTROL(BT_CLR_WR_PTR);
+ write_all_bytes(bt);
+ BT_CONTROL(BT_H2B_ATN); /* can clear too fast to catch */
+ BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_WRITE_CONSUME:
+ if (status & (BT_B_BUSY | BT_H2B_ATN))
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ /* Spinning hard can suppress B2H_ATN and force a timeout */
+
+ case BT_STATE_READ_WAIT:
+ if (!(status & BT_B2H_ATN))
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ BT_CONTROL(BT_H_BUSY); /* set */
+
+ /*
+ * Uncached, ordered writes should just proceeed serially but
+ * some BMCs don't clear B2H_ATN with one hit. Fast-path a
+ * workaround without too much penalty to the general case.
+ */
+
+ BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */
+ BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_CLEAR_B2H:
+ if (status & BT_B2H_ATN) {
+ /* keep hitting it */
+ BT_CONTROL(BT_B2H_ATN);
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ }
+ BT_STATE_CHANGE(BT_STATE_READ_BYTES,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_READ_BYTES:
+ if (!(status & BT_H_BUSY))
+ /* check in case of retry */
+ BT_CONTROL(BT_H_BUSY);
+ BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */
+ i = read_all_bytes(bt); /* true == packet seq match */
+ BT_CONTROL(BT_H_BUSY); /* NOW clear */
+ if (!i) /* Not my message */
+ BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+ SI_SM_CALL_WITHOUT_DELAY);
+ bt->state = bt->complete;
+ return bt->state == BT_STATE_IDLE ? /* where to next? */
+ SI_SM_TRANSACTION_COMPLETE : /* normal */
+ SI_SM_CALL_WITHOUT_DELAY; /* Startup magic */
+
+ case BT_STATE_LONG_BUSY: /* For example: after FW update */
+ if (!(status & BT_B_BUSY)) {
+ reset_flags(bt); /* next state is now IDLE */
+ bt_init_data(bt, bt->io);
+ }
+ return SI_SM_CALL_WITH_DELAY; /* No repeat printing */
+
+ case BT_STATE_RESET1:
+ reset_flags(bt);
+ drain_BMC2HOST(bt);
+ BT_STATE_CHANGE(BT_STATE_RESET2,
+ SI_SM_CALL_WITH_DELAY);
+
+ case BT_STATE_RESET2: /* Send a soft reset */
+ BT_CONTROL(BT_CLR_WR_PTR);
+ HOST2BMC(3); /* number of bytes following */
+ HOST2BMC(0x18); /* NetFn/LUN == Application, LUN 0 */
+ HOST2BMC(42); /* Sequence number */
+ HOST2BMC(3); /* Cmd == Soft reset */
+ BT_CONTROL(BT_H2B_ATN);
+ bt->timeout = BT_RESET_DELAY * 1000000;
+ BT_STATE_CHANGE(BT_STATE_RESET3,
+ SI_SM_CALL_WITH_DELAY);
+
+ case BT_STATE_RESET3: /* Hold off everything for a bit */
+ if (bt->timeout > 0)
+ return SI_SM_CALL_WITH_DELAY;
+ drain_BMC2HOST(bt);
+ BT_STATE_CHANGE(BT_STATE_RESTART,
+ SI_SM_CALL_WITH_DELAY);
+
+ case BT_STATE_RESTART: /* don't reset retries or seq! */
+ bt->read_count = 0;
+ bt->nonzero_status = 0;
+ bt->timeout = bt->BT_CAP_req2rsp;
+ BT_STATE_CHANGE(BT_STATE_XACTION_START,
+ SI_SM_CALL_WITH_DELAY);
+
+ /*
+ * Get BT Capabilities, using timing of upper level state machine.
+ * Set outreqs to prevent infinite loop on timeout.
+ */
+ case BT_STATE_CAPABILITIES_BEGIN:
+ bt->BT_CAP_outreqs = 1;
+ {
+ unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+ bt->state = BT_STATE_IDLE;
+ bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+ }
+ bt->complete = BT_STATE_CAPABILITIES_END;
+ BT_STATE_CHANGE(BT_STATE_XACTION_START,
+ SI_SM_CALL_WITH_DELAY);
+
+ case BT_STATE_CAPABILITIES_END:
+ i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+ bt_init_data(bt, bt->io);
+ if ((i == 8) && !BT_CAP[2]) {
+ bt->BT_CAP_outreqs = BT_CAP[3];
+ bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000;
+ bt->BT_CAP_retries = BT_CAP[7];
+ } else
+ printk(KERN_WARNING "IPMI BT: using default values\n");
+ if (!bt->BT_CAP_outreqs)
+ bt->BT_CAP_outreqs = 1;
+ printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
+ bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries);
+ bt->timeout = bt->BT_CAP_req2rsp;
+ return SI_SM_CALL_WITHOUT_DELAY;
+
+ default: /* should never occur */
+ return error_recovery(bt,
+ status,
+ IPMI_ERR_UNSPECIFIED);
+ }
+ return SI_SM_CALL_WITH_DELAY;
+}
+
+static int bt_detect(struct si_sm_data *bt)
+{
+ /*
+ * It's impossible for the BT status and interrupt registers to be
+ * all 1's, (assuming a properly functioning, self-initialized BMC)
+ * but that's what you get from reading a bogus address, so we
+ * test that first. The calling routine uses negative logic.
+ */
+
+ if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
+ return 1;
+ reset_flags(bt);
+ return 0;
+}
+
+static void bt_cleanup(struct si_sm_data *bt)
+{
+}
+
+static int bt_size(void)
+{
+ return sizeof(struct si_sm_data);
+}
+
+struct si_sm_handlers bt_smi_handlers = {
+ .init_data = bt_init_data,
+ .start_transaction = bt_start_transaction,
+ .get_result = bt_get_result,
+ .event = bt_event,
+ .detect = bt_detect,
+ .cleanup = bt_cleanup,
+ .size = bt_size,
+};
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
new file mode 100644
index 00000000..2aa3977a
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -0,0 +1,976 @@
+/*
+ * ipmi_devintf.c
+ *
+ * Linux device interface for the IPMI message handler.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <asm/system.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/ipmi.h>
+#include <linux/mutex.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/compat.h>
+
+struct ipmi_file_private
+{
+ ipmi_user_t user;
+ spinlock_t recv_msg_lock;
+ struct list_head recv_msgs;
+ struct file *file;
+ struct fasync_struct *fasync_queue;
+ wait_queue_head_t wait;
+ struct mutex recv_mutex;
+ int default_retries;
+ unsigned int default_retry_time_ms;
+};
+
+static DEFINE_MUTEX(ipmi_mutex);
+static void file_receive_handler(struct ipmi_recv_msg *msg,
+ void *handler_data)
+{
+ struct ipmi_file_private *priv = handler_data;
+ int was_empty;
+ unsigned long flags;
+
+ spin_lock_irqsave(&(priv->recv_msg_lock), flags);
+
+ was_empty = list_empty(&(priv->recv_msgs));
+ list_add_tail(&(msg->link), &(priv->recv_msgs));
+
+ if (was_empty) {
+ wake_up_interruptible(&priv->wait);
+ kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
+ }
+
+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+}
+
+static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+{
+ struct ipmi_file_private *priv = file->private_data;
+ unsigned int mask = 0;
+ unsigned long flags;
+
+ poll_wait(file, &priv->wait, wait);
+
+ spin_lock_irqsave(&priv->recv_msg_lock, flags);
+
+ if (!list_empty(&(priv->recv_msgs)))
+ mask |= (POLLIN | POLLRDNORM);
+
+ spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
+
+ return mask;
+}
+
+static int ipmi_fasync(int fd, struct file *file, int on)
+{
+ struct ipmi_file_private *priv = file->private_data;
+ int result;
+
+ mutex_lock(&ipmi_mutex); /* could race against open() otherwise */
+ result = fasync_helper(fd, file, on, &priv->fasync_queue);
+ mutex_unlock(&ipmi_mutex);
+
+ return (result);
+}
+
+static struct ipmi_user_hndl ipmi_hndlrs =
+{
+ .ipmi_recv_hndl = file_receive_handler,
+};
+
+static int ipmi_open(struct inode *inode, struct file *file)
+{
+ int if_num = iminor(inode);
+ int rv;
+ struct ipmi_file_private *priv;
+
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_lock(&ipmi_mutex);
+ priv->file = file;
+
+ rv = ipmi_create_user(if_num,
+ &ipmi_hndlrs,
+ priv,
+ &(priv->user));
+ if (rv) {
+ kfree(priv);
+ goto out;
+ }
+
+ file->private_data = priv;
+
+ spin_lock_init(&(priv->recv_msg_lock));
+ INIT_LIST_HEAD(&(priv->recv_msgs));
+ init_waitqueue_head(&priv->wait);
+ priv->fasync_queue = NULL;
+ mutex_init(&priv->recv_mutex);
+
+ /* Use the low-level defaults. */
+ priv->default_retries = -1;
+ priv->default_retry_time_ms = 0;
+
+out:
+ mutex_unlock(&ipmi_mutex);
+ return rv;
+}
+
+static int ipmi_release(struct inode *inode, struct file *file)
+{
+ struct ipmi_file_private *priv = file->private_data;
+ int rv;
+
+ rv = ipmi_destroy_user(priv->user);
+ if (rv)
+ return rv;
+
+ /* FIXME - free the messages in the list. */
+ kfree(priv);
+
+ return 0;
+}
+
+static int handle_send_req(ipmi_user_t user,
+ struct ipmi_req *req,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ int rv;
+ struct ipmi_addr addr;
+ struct kernel_ipmi_msg msg;
+
+ if (req->addr_len > sizeof(struct ipmi_addr))
+ return -EINVAL;
+
+ if (copy_from_user(&addr, req->addr, req->addr_len))
+ return -EFAULT;
+
+ msg.netfn = req->msg.netfn;
+ msg.cmd = req->msg.cmd;
+ msg.data_len = req->msg.data_len;
+ msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!msg.data)
+ return -ENOMEM;
+
+ /* From here out we cannot return, we must jump to "out" for
+ error exits to free msgdata. */
+
+ rv = ipmi_validate_addr(&addr, req->addr_len);
+ if (rv)
+ goto out;
+
+ if (req->msg.data != NULL) {
+ if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
+ rv = -EMSGSIZE;
+ goto out;
+ }
+
+ if (copy_from_user(msg.data,
+ req->msg.data,
+ req->msg.data_len))
+ {
+ rv = -EFAULT;
+ goto out;
+ }
+ } else {
+ msg.data_len = 0;
+ }
+
+ rv = ipmi_request_settime(user,
+ &addr,
+ req->msgid,
+ &msg,
+ NULL,
+ 0,
+ retries,
+ retry_time_ms);
+ out:
+ kfree(msg.data);
+ return rv;
+}
+
+static int ipmi_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long data)
+{
+ int rv = -EINVAL;
+ struct ipmi_file_private *priv = file->private_data;
+ void __user *arg = (void __user *)data;
+
+ switch (cmd)
+ {
+ case IPMICTL_SEND_COMMAND:
+ {
+ struct ipmi_req req;
+
+ if (copy_from_user(&req, arg, sizeof(req))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = handle_send_req(priv->user,
+ &req,
+ priv->default_retries,
+ priv->default_retry_time_ms);
+ break;
+ }
+
+ case IPMICTL_SEND_COMMAND_SETTIME:
+ {
+ struct ipmi_req_settime req;
+
+ if (copy_from_user(&req, arg, sizeof(req))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = handle_send_req(priv->user,
+ &req.req,
+ req.retries,
+ req.retry_time_ms);
+ break;
+ }
+
+ case IPMICTL_RECEIVE_MSG:
+ case IPMICTL_RECEIVE_MSG_TRUNC:
+ {
+ struct ipmi_recv rsp;
+ int addr_len;
+ struct list_head *entry;
+ struct ipmi_recv_msg *msg;
+ unsigned long flags;
+
+
+ rv = 0;
+ if (copy_from_user(&rsp, arg, sizeof(rsp))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ /* We claim a mutex because we don't want two
+ users getting something from the queue at a time.
+ Since we have to release the spinlock before we can
+ copy the data to the user, it's possible another
+ user will grab something from the queue, too. Then
+ the messages might get out of order if something
+ fails and the message gets put back onto the
+ queue. This mutex prevents that problem. */
+ mutex_lock(&priv->recv_mutex);
+
+ /* Grab the message off the list. */
+ spin_lock_irqsave(&(priv->recv_msg_lock), flags);
+ if (list_empty(&(priv->recv_msgs))) {
+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+ rv = -EAGAIN;
+ goto recv_err;
+ }
+ entry = priv->recv_msgs.next;
+ msg = list_entry(entry, struct ipmi_recv_msg, link);
+ list_del(entry);
+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+
+ addr_len = ipmi_addr_length(msg->addr.addr_type);
+ if (rsp.addr_len < addr_len)
+ {
+ rv = -EINVAL;
+ goto recv_putback_on_err;
+ }
+
+ if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) {
+ rv = -EFAULT;
+ goto recv_putback_on_err;
+ }
+ rsp.addr_len = addr_len;
+
+ rsp.recv_type = msg->recv_type;
+ rsp.msgid = msg->msgid;
+ rsp.msg.netfn = msg->msg.netfn;
+ rsp.msg.cmd = msg->msg.cmd;
+
+ if (msg->msg.data_len > 0) {
+ if (rsp.msg.data_len < msg->msg.data_len) {
+ rv = -EMSGSIZE;
+ if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) {
+ msg->msg.data_len = rsp.msg.data_len;
+ } else {
+ goto recv_putback_on_err;
+ }
+ }
+
+ if (copy_to_user(rsp.msg.data,
+ msg->msg.data,
+ msg->msg.data_len))
+ {
+ rv = -EFAULT;
+ goto recv_putback_on_err;
+ }
+ rsp.msg.data_len = msg->msg.data_len;
+ } else {
+ rsp.msg.data_len = 0;
+ }
+
+ if (copy_to_user(arg, &rsp, sizeof(rsp))) {
+ rv = -EFAULT;
+ goto recv_putback_on_err;
+ }
+
+ mutex_unlock(&priv->recv_mutex);
+ ipmi_free_recv_msg(msg);
+ break;
+
+ recv_putback_on_err:
+ /* If we got an error, put the message back onto
+ the head of the queue. */
+ spin_lock_irqsave(&(priv->recv_msg_lock), flags);
+ list_add(entry, &(priv->recv_msgs));
+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+ mutex_unlock(&priv->recv_mutex);
+ break;
+
+ recv_err:
+ mutex_unlock(&priv->recv_mutex);
+ break;
+ }
+
+ case IPMICTL_REGISTER_FOR_CMD:
+ {
+ struct ipmi_cmdspec val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
+ IPMI_CHAN_ALL);
+ break;
+ }
+
+ case IPMICTL_UNREGISTER_FOR_CMD:
+ {
+ struct ipmi_cmdspec val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
+ IPMI_CHAN_ALL);
+ break;
+ }
+
+ case IPMICTL_REGISTER_FOR_CMD_CHANS:
+ {
+ struct ipmi_cmdspec_chans val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
+ val.chans);
+ break;
+ }
+
+ case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
+ {
+ struct ipmi_cmdspec_chans val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
+ val.chans);
+ break;
+ }
+
+ case IPMICTL_SET_GETS_EVENTS_CMD:
+ {
+ int val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_set_gets_events(priv->user, val);
+ break;
+ }
+
+ /* The next four are legacy, not per-channel. */
+ case IPMICTL_SET_MY_ADDRESS_CMD:
+ {
+ unsigned int val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_set_my_address(priv->user, 0, val);
+ break;
+ }
+
+ case IPMICTL_GET_MY_ADDRESS_CMD:
+ {
+ unsigned int val;
+ unsigned char rval;
+
+ rv = ipmi_get_my_address(priv->user, 0, &rval);
+ if (rv)
+ break;
+
+ val = rval;
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case IPMICTL_SET_MY_LUN_CMD:
+ {
+ unsigned int val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_set_my_LUN(priv->user, 0, val);
+ break;
+ }
+
+ case IPMICTL_GET_MY_LUN_CMD:
+ {
+ unsigned int val;
+ unsigned char rval;
+
+ rv = ipmi_get_my_LUN(priv->user, 0, &rval);
+ if (rv)
+ break;
+
+ val = rval;
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
+ {
+ struct ipmi_channel_lun_address_set val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ return ipmi_set_my_address(priv->user, val.channel, val.value);
+ break;
+ }
+
+ case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
+ {
+ struct ipmi_channel_lun_address_set val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
+ if (rv)
+ break;
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
+ {
+ struct ipmi_channel_lun_address_set val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
+ break;
+ }
+
+ case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
+ {
+ struct ipmi_channel_lun_address_set val;
+
+ if (copy_from_user(&val, arg, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
+ if (rv)
+ break;
+
+ if (copy_to_user(arg, &val, sizeof(val))) {
+ rv = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case IPMICTL_SET_TIMING_PARMS_CMD:
+ {
+ struct ipmi_timing_parms parms;
+
+ if (copy_from_user(&parms, arg, sizeof(parms))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ priv->default_retries = parms.retries;
+ priv->default_retry_time_ms = parms.retry_time_ms;
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_GET_TIMING_PARMS_CMD:
+ {
+ struct ipmi_timing_parms parms;
+
+ parms.retries = priv->default_retries;
+ parms.retry_time_ms = priv->default_retry_time_ms;
+
+ if (copy_to_user(arg, &parms, sizeof(parms))) {
+ rv = -EFAULT;
+ break;
+ }
+
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_GET_MAINTENANCE_MODE_CMD:
+ {
+ int mode;
+
+ mode = ipmi_get_maintenance_mode(priv->user);
+ if (copy_to_user(arg, &mode, sizeof(mode))) {
+ rv = -EFAULT;
+ break;
+ }
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_SET_MAINTENANCE_MODE_CMD:
+ {
+ int mode;
+
+ if (copy_from_user(&mode, arg, sizeof(mode))) {
+ rv = -EFAULT;
+ break;
+ }
+ rv = ipmi_set_maintenance_mode(priv->user, mode);
+ break;
+ }
+ }
+
+ return rv;
+}
+
+/*
+ * Note: it doesn't make sense to take the BKL here but
+ * not in compat_ipmi_ioctl. -arnd
+ */
+static long ipmi_unlocked_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long data)
+{
+ int ret;
+
+ mutex_lock(&ipmi_mutex);
+ ret = ipmi_ioctl(file, cmd, data);
+ mutex_unlock(&ipmi_mutex);
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+/*
+ * The following code contains code for supporting 32-bit compatible
+ * ioctls on 64-bit kernels. This allows running 32-bit apps on the
+ * 64-bit kernel
+ */
+#define COMPAT_IPMICTL_SEND_COMMAND \
+ _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
+#define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
+ _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
+#define COMPAT_IPMICTL_RECEIVE_MSG \
+ _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
+#define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
+ _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
+
+struct compat_ipmi_msg {
+ u8 netfn;
+ u8 cmd;
+ u16 data_len;
+ compat_uptr_t data;
+};
+
+struct compat_ipmi_req {
+ compat_uptr_t addr;
+ compat_uint_t addr_len;
+ compat_long_t msgid;
+ struct compat_ipmi_msg msg;
+};
+
+struct compat_ipmi_recv {
+ compat_int_t recv_type;
+ compat_uptr_t addr;
+ compat_uint_t addr_len;
+ compat_long_t msgid;
+ struct compat_ipmi_msg msg;
+};
+
+struct compat_ipmi_req_settime {
+ struct compat_ipmi_req req;
+ compat_int_t retries;
+ compat_uint_t retry_time_ms;
+};
+
+/*
+ * Define some helper functions for copying IPMI data
+ */
+static long get_compat_ipmi_msg(struct ipmi_msg *p64,
+ struct compat_ipmi_msg __user *p32)
+{
+ compat_uptr_t tmp;
+
+ if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
+ __get_user(p64->netfn, &p32->netfn) ||
+ __get_user(p64->cmd, &p32->cmd) ||
+ __get_user(p64->data_len, &p32->data_len) ||
+ __get_user(tmp, &p32->data))
+ return -EFAULT;
+ p64->data = compat_ptr(tmp);
+ return 0;
+}
+
+static long put_compat_ipmi_msg(struct ipmi_msg *p64,
+ struct compat_ipmi_msg __user *p32)
+{
+ if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
+ __put_user(p64->netfn, &p32->netfn) ||
+ __put_user(p64->cmd, &p32->cmd) ||
+ __put_user(p64->data_len, &p32->data_len))
+ return -EFAULT;
+ return 0;
+}
+
+static long get_compat_ipmi_req(struct ipmi_req *p64,
+ struct compat_ipmi_req __user *p32)
+{
+
+ compat_uptr_t tmp;
+
+ if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
+ __get_user(tmp, &p32->addr) ||
+ __get_user(p64->addr_len, &p32->addr_len) ||
+ __get_user(p64->msgid, &p32->msgid) ||
+ get_compat_ipmi_msg(&p64->msg, &p32->msg))
+ return -EFAULT;
+ p64->addr = compat_ptr(tmp);
+ return 0;
+}
+
+static long get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
+ struct compat_ipmi_req_settime __user *p32)
+{
+ if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
+ get_compat_ipmi_req(&p64->req, &p32->req) ||
+ __get_user(p64->retries, &p32->retries) ||
+ __get_user(p64->retry_time_ms, &p32->retry_time_ms))
+ return -EFAULT;
+ return 0;
+}
+
+static long get_compat_ipmi_recv(struct ipmi_recv *p64,
+ struct compat_ipmi_recv __user *p32)
+{
+ compat_uptr_t tmp;
+
+ if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
+ __get_user(p64->recv_type, &p32->recv_type) ||
+ __get_user(tmp, &p32->addr) ||
+ __get_user(p64->addr_len, &p32->addr_len) ||
+ __get_user(p64->msgid, &p32->msgid) ||
+ get_compat_ipmi_msg(&p64->msg, &p32->msg))
+ return -EFAULT;
+ p64->addr = compat_ptr(tmp);
+ return 0;
+}
+
+static long put_compat_ipmi_recv(struct ipmi_recv *p64,
+ struct compat_ipmi_recv __user *p32)
+{
+ if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
+ __put_user(p64->recv_type, &p32->recv_type) ||
+ __put_user(p64->addr_len, &p32->addr_len) ||
+ __put_user(p64->msgid, &p32->msgid) ||
+ put_compat_ipmi_msg(&p64->msg, &p32->msg))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * Handle compatibility ioctls
+ */
+static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc;
+ struct ipmi_file_private *priv = filep->private_data;
+
+ switch(cmd) {
+ case COMPAT_IPMICTL_SEND_COMMAND:
+ {
+ struct ipmi_req rp;
+
+ if (get_compat_ipmi_req(&rp, compat_ptr(arg)))
+ return -EFAULT;
+
+ return handle_send_req(priv->user, &rp,
+ priv->default_retries,
+ priv->default_retry_time_ms);
+ }
+ case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
+ {
+ struct ipmi_req_settime sp;
+
+ if (get_compat_ipmi_req_settime(&sp, compat_ptr(arg)))
+ return -EFAULT;
+
+ return handle_send_req(priv->user, &sp.req,
+ sp.retries, sp.retry_time_ms);
+ }
+ case COMPAT_IPMICTL_RECEIVE_MSG:
+ case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
+ {
+ struct ipmi_recv __user *precv64;
+ struct ipmi_recv recv64;
+
+ if (get_compat_ipmi_recv(&recv64, compat_ptr(arg)))
+ return -EFAULT;
+
+ precv64 = compat_alloc_user_space(sizeof(recv64));
+ if (copy_to_user(precv64, &recv64, sizeof(recv64)))
+ return -EFAULT;
+
+ rc = ipmi_ioctl(filep,
+ ((cmd == COMPAT_IPMICTL_RECEIVE_MSG)
+ ? IPMICTL_RECEIVE_MSG
+ : IPMICTL_RECEIVE_MSG_TRUNC),
+ (unsigned long) precv64);
+ if (rc != 0)
+ return rc;
+
+ if (copy_from_user(&recv64, precv64, sizeof(recv64)))
+ return -EFAULT;
+
+ if (put_compat_ipmi_recv(&recv64, compat_ptr(arg)))
+ return -EFAULT;
+
+ return rc;
+ }
+ default:
+ return ipmi_ioctl(filep, cmd, arg);
+ }
+}
+#endif
+
+static const struct file_operations ipmi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_ipmi_ioctl,
+#endif
+ .open = ipmi_open,
+ .release = ipmi_release,
+ .fasync = ipmi_fasync,
+ .poll = ipmi_poll,
+ .llseek = noop_llseek,
+};
+
+#define DEVICE_NAME "ipmidev"
+
+static int ipmi_major;
+module_param(ipmi_major, int, 0);
+MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
+ " default, or if you set it to zero, it will choose the next"
+ " available device. Setting it to -1 will disable the"
+ " interface. Other values will set the major device number"
+ " to that value.");
+
+/* Keep track of the devices that are registered. */
+struct ipmi_reg_list {
+ dev_t dev;
+ struct list_head link;
+};
+static LIST_HEAD(reg_list);
+static DEFINE_MUTEX(reg_list_mutex);
+
+static struct class *ipmi_class;
+
+static void ipmi_new_smi(int if_num, struct device *device)
+{
+ dev_t dev = MKDEV(ipmi_major, if_num);
+ struct ipmi_reg_list *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ printk(KERN_ERR "ipmi_devintf: Unable to create the"
+ " ipmi class device link\n");
+ return;
+ }
+ entry->dev = dev;
+
+ mutex_lock(&reg_list_mutex);
+ device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num);
+ list_add(&entry->link, &reg_list);
+ mutex_unlock(&reg_list_mutex);
+}
+
+static void ipmi_smi_gone(int if_num)
+{
+ dev_t dev = MKDEV(ipmi_major, if_num);
+ struct ipmi_reg_list *entry;
+
+ mutex_lock(&reg_list_mutex);
+ list_for_each_entry(entry, &reg_list, link) {
+ if (entry->dev == dev) {
+ list_del(&entry->link);
+ kfree(entry);
+ break;
+ }
+ }
+ device_destroy(ipmi_class, dev);
+ mutex_unlock(&reg_list_mutex);
+}
+
+static struct ipmi_smi_watcher smi_watcher =
+{
+ .owner = THIS_MODULE,
+ .new_smi = ipmi_new_smi,
+ .smi_gone = ipmi_smi_gone,
+};
+
+static int __init init_ipmi_devintf(void)
+{
+ int rv;
+
+ if (ipmi_major < 0)
+ return -EINVAL;
+
+ printk(KERN_INFO "ipmi device interface\n");
+
+ ipmi_class = class_create(THIS_MODULE, "ipmi");
+ if (IS_ERR(ipmi_class)) {
+ printk(KERN_ERR "ipmi: can't register device class\n");
+ return PTR_ERR(ipmi_class);
+ }
+
+ rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
+ if (rv < 0) {
+ class_destroy(ipmi_class);
+ printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
+ return rv;
+ }
+
+ if (ipmi_major == 0) {
+ ipmi_major = rv;
+ }
+
+ rv = ipmi_smi_watcher_register(&smi_watcher);
+ if (rv) {
+ unregister_chrdev(ipmi_major, DEVICE_NAME);
+ class_destroy(ipmi_class);
+ printk(KERN_WARNING "ipmi: can't register smi watcher\n");
+ return rv;
+ }
+
+ return 0;
+}
+module_init(init_ipmi_devintf);
+
+static void __exit cleanup_ipmi(void)
+{
+ struct ipmi_reg_list *entry, *entry2;
+ mutex_lock(&reg_list_mutex);
+ list_for_each_entry_safe(entry, entry2, &reg_list, link) {
+ list_del(&entry->link);
+ device_destroy(ipmi_class, entry->dev);
+ kfree(entry);
+ }
+ mutex_unlock(&reg_list_mutex);
+ class_destroy(ipmi_class);
+ ipmi_smi_watcher_unregister(&smi_watcher);
+ unregister_chrdev(ipmi_major, DEVICE_NAME);
+}
+module_exit(cleanup_ipmi);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
+MODULE_ALIAS("platform:ipmi_si");
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
new file mode 100644
index 00000000..cf82feda
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -0,0 +1,550 @@
+/*
+ * ipmi_kcs_sm.c
+ *
+ * State machine for handling IPMI KCS interfaces.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * This state machine is taken from the state machine in the IPMI spec,
+ * pretty much verbatim. If you have questions about the states, see
+ * that document.
+ */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/ipmi_msgdefs.h> /* for completion codes */
+#include "ipmi_si_sm.h"
+
+/* kcs_debug is a bit-field
+ * KCS_DEBUG_ENABLE - turned on for now
+ * KCS_DEBUG_MSG - commands and their responses
+ * KCS_DEBUG_STATES - state machine
+ */
+#define KCS_DEBUG_STATES 4
+#define KCS_DEBUG_MSG 2
+#define KCS_DEBUG_ENABLE 1
+
+static int kcs_debug;
+module_param(kcs_debug, int, 0644);
+MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+/* The states the KCS driver may be in. */
+enum kcs_states {
+ /* The KCS interface is currently doing nothing. */
+ KCS_IDLE,
+
+ /*
+ * We are starting an operation. The data is in the output
+ * buffer, but nothing has been done to the interface yet. This
+ * was added to the state machine in the spec to wait for the
+ * initial IBF.
+ */
+ KCS_START_OP,
+
+ /* We have written a write cmd to the interface. */
+ KCS_WAIT_WRITE_START,
+
+ /* We are writing bytes to the interface. */
+ KCS_WAIT_WRITE,
+
+ /*
+ * We have written the write end cmd to the interface, and
+ * still need to write the last byte.
+ */
+ KCS_WAIT_WRITE_END,
+
+ /* We are waiting to read data from the interface. */
+ KCS_WAIT_READ,
+
+ /*
+ * State to transition to the error handler, this was added to
+ * the state machine in the spec to be sure IBF was there.
+ */
+ KCS_ERROR0,
+
+ /*
+ * First stage error handler, wait for the interface to
+ * respond.
+ */
+ KCS_ERROR1,
+
+ /*
+ * The abort cmd has been written, wait for the interface to
+ * respond.
+ */
+ KCS_ERROR2,
+
+ /*
+ * We wrote some data to the interface, wait for it to switch
+ * to read mode.
+ */
+ KCS_ERROR3,
+
+ /* The hardware failed to follow the state machine. */
+ KCS_HOSED
+};
+
+#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
+#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
+
+/* Timeouts in microseconds. */
+#define IBF_RETRY_TIMEOUT 1000000
+#define OBF_RETRY_TIMEOUT 1000000
+#define MAX_ERROR_RETRIES 10
+#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
+
+struct si_sm_data {
+ enum kcs_states state;
+ struct si_sm_io *io;
+ unsigned char write_data[MAX_KCS_WRITE_SIZE];
+ int write_pos;
+ int write_count;
+ int orig_write_count;
+ unsigned char read_data[MAX_KCS_READ_SIZE];
+ int read_pos;
+ int truncated;
+
+ unsigned int error_retries;
+ long ibf_timeout;
+ long obf_timeout;
+ unsigned long error0_timeout;
+};
+
+static unsigned int init_kcs_data(struct si_sm_data *kcs,
+ struct si_sm_io *io)
+{
+ kcs->state = KCS_IDLE;
+ kcs->io = io;
+ kcs->write_pos = 0;
+ kcs->write_count = 0;
+ kcs->orig_write_count = 0;
+ kcs->read_pos = 0;
+ kcs->error_retries = 0;
+ kcs->truncated = 0;
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+
+ /* Reserve 2 I/O bytes. */
+ return 2;
+}
+
+static inline unsigned char read_status(struct si_sm_data *kcs)
+{
+ return kcs->io->inputb(kcs->io, 1);
+}
+
+static inline unsigned char read_data(struct si_sm_data *kcs)
+{
+ return kcs->io->inputb(kcs->io, 0);
+}
+
+static inline void write_cmd(struct si_sm_data *kcs, unsigned char data)
+{
+ kcs->io->outputb(kcs->io, 1, data);
+}
+
+static inline void write_data(struct si_sm_data *kcs, unsigned char data)
+{
+ kcs->io->outputb(kcs->io, 0, data);
+}
+
+/* Control codes. */
+#define KCS_GET_STATUS_ABORT 0x60
+#define KCS_WRITE_START 0x61
+#define KCS_WRITE_END 0x62
+#define KCS_READ_BYTE 0x68
+
+/* Status bits. */
+#define GET_STATUS_STATE(status) (((status) >> 6) & 0x03)
+#define KCS_IDLE_STATE 0
+#define KCS_READ_STATE 1
+#define KCS_WRITE_STATE 2
+#define KCS_ERROR_STATE 3
+#define GET_STATUS_ATN(status) ((status) & 0x04)
+#define GET_STATUS_IBF(status) ((status) & 0x02)
+#define GET_STATUS_OBF(status) ((status) & 0x01)
+
+
+static inline void write_next_byte(struct si_sm_data *kcs)
+{
+ write_data(kcs, kcs->write_data[kcs->write_pos]);
+ (kcs->write_pos)++;
+ (kcs->write_count)--;
+}
+
+static inline void start_error_recovery(struct si_sm_data *kcs, char *reason)
+{
+ (kcs->error_retries)++;
+ if (kcs->error_retries > MAX_ERROR_RETRIES) {
+ if (kcs_debug & KCS_DEBUG_ENABLE)
+ printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n",
+ reason);
+ kcs->state = KCS_HOSED;
+ } else {
+ kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES;
+ kcs->state = KCS_ERROR0;
+ }
+}
+
+static inline void read_next_byte(struct si_sm_data *kcs)
+{
+ if (kcs->read_pos >= MAX_KCS_READ_SIZE) {
+ /* Throw the data away and mark it truncated. */
+ read_data(kcs);
+ kcs->truncated = 1;
+ } else {
+ kcs->read_data[kcs->read_pos] = read_data(kcs);
+ (kcs->read_pos)++;
+ }
+ write_data(kcs, KCS_READ_BYTE);
+}
+
+static inline int check_ibf(struct si_sm_data *kcs, unsigned char status,
+ long time)
+{
+ if (GET_STATUS_IBF(status)) {
+ kcs->ibf_timeout -= time;
+ if (kcs->ibf_timeout < 0) {
+ start_error_recovery(kcs, "IBF not ready in time");
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ return 1;
+ }
+ return 0;
+ }
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ return 1;
+}
+
+static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
+ long time)
+{
+ if (!GET_STATUS_OBF(status)) {
+ kcs->obf_timeout -= time;
+ if (kcs->obf_timeout < 0) {
+ start_error_recovery(kcs, "OBF not ready in time");
+ return 1;
+ }
+ return 0;
+ }
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ return 1;
+}
+
+static void clear_obf(struct si_sm_data *kcs, unsigned char status)
+{
+ if (GET_STATUS_OBF(status))
+ read_data(kcs);
+}
+
+static void restart_kcs_transaction(struct si_sm_data *kcs)
+{
+ kcs->write_count = kcs->orig_write_count;
+ kcs->write_pos = 0;
+ kcs->read_pos = 0;
+ kcs->state = KCS_WAIT_WRITE_START;
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ write_cmd(kcs, KCS_WRITE_START);
+}
+
+static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
+ unsigned int size)
+{
+ unsigned int i;
+
+ if (size < 2)
+ return IPMI_REQ_LEN_INVALID_ERR;
+ if (size > MAX_KCS_WRITE_SIZE)
+ return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+ if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED))
+ return IPMI_NOT_IN_MY_STATE_ERR;
+
+ if (kcs_debug & KCS_DEBUG_MSG) {
+ printk(KERN_DEBUG "start_kcs_transaction -");
+ for (i = 0; i < size; i++)
+ printk(" %02x", (unsigned char) (data [i]));
+ printk("\n");
+ }
+ kcs->error_retries = 0;
+ memcpy(kcs->write_data, data, size);
+ kcs->write_count = size;
+ kcs->orig_write_count = size;
+ kcs->write_pos = 0;
+ kcs->read_pos = 0;
+ kcs->state = KCS_START_OP;
+ kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ return 0;
+}
+
+static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
+ unsigned int length)
+{
+ if (length < kcs->read_pos) {
+ kcs->read_pos = length;
+ kcs->truncated = 1;
+ }
+
+ memcpy(data, kcs->read_data, kcs->read_pos);
+
+ if ((length >= 3) && (kcs->read_pos < 3)) {
+ /* Guarantee that we return at least 3 bytes, with an
+ error in the third byte if it is too short. */
+ data[2] = IPMI_ERR_UNSPECIFIED;
+ kcs->read_pos = 3;
+ }
+ if (kcs->truncated) {
+ /*
+ * Report a truncated error. We might overwrite
+ * another error, but that's too bad, the user needs
+ * to know it was truncated.
+ */
+ data[2] = IPMI_ERR_MSG_TRUNCATED;
+ kcs->truncated = 0;
+ }
+
+ return kcs->read_pos;
+}
+
+/*
+ * This implements the state machine defined in the IPMI manual, see
+ * that for details on how this works. Divide that flowchart into
+ * sections delimited by "Wait for IBF" and this will become clear.
+ */
+static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
+{
+ unsigned char status;
+ unsigned char state;
+
+ status = read_status(kcs);
+
+ if (kcs_debug & KCS_DEBUG_STATES)
+ printk(KERN_DEBUG "KCS: State = %d, %x\n", kcs->state, status);
+
+ /* All states wait for ibf, so just do it here. */
+ if (!check_ibf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+
+ /* Just about everything looks at the KCS state, so grab that, too. */
+ state = GET_STATUS_STATE(status);
+
+ switch (kcs->state) {
+ case KCS_IDLE:
+ /* If there's and interrupt source, turn it off. */
+ clear_obf(kcs, status);
+
+ if (GET_STATUS_ATN(status))
+ return SI_SM_ATTN;
+ else
+ return SI_SM_IDLE;
+
+ case KCS_START_OP:
+ if (state != KCS_IDLE_STATE) {
+ start_error_recovery(kcs,
+ "State machine not idle at start");
+ break;
+ }
+
+ clear_obf(kcs, status);
+ write_cmd(kcs, KCS_WRITE_START);
+ kcs->state = KCS_WAIT_WRITE_START;
+ break;
+
+ case KCS_WAIT_WRITE_START:
+ if (state != KCS_WRITE_STATE) {
+ start_error_recovery(
+ kcs,
+ "Not in write state at write start");
+ break;
+ }
+ read_data(kcs);
+ if (kcs->write_count == 1) {
+ write_cmd(kcs, KCS_WRITE_END);
+ kcs->state = KCS_WAIT_WRITE_END;
+ } else {
+ write_next_byte(kcs);
+ kcs->state = KCS_WAIT_WRITE;
+ }
+ break;
+
+ case KCS_WAIT_WRITE:
+ if (state != KCS_WRITE_STATE) {
+ start_error_recovery(kcs,
+ "Not in write state for write");
+ break;
+ }
+ clear_obf(kcs, status);
+ if (kcs->write_count == 1) {
+ write_cmd(kcs, KCS_WRITE_END);
+ kcs->state = KCS_WAIT_WRITE_END;
+ } else {
+ write_next_byte(kcs);
+ }
+ break;
+
+ case KCS_WAIT_WRITE_END:
+ if (state != KCS_WRITE_STATE) {
+ start_error_recovery(kcs,
+ "Not in write state"
+ " for write end");
+ break;
+ }
+ clear_obf(kcs, status);
+ write_next_byte(kcs);
+ kcs->state = KCS_WAIT_READ;
+ break;
+
+ case KCS_WAIT_READ:
+ if ((state != KCS_READ_STATE) && (state != KCS_IDLE_STATE)) {
+ start_error_recovery(
+ kcs,
+ "Not in read or idle in read state");
+ break;
+ }
+
+ if (state == KCS_READ_STATE) {
+ if (!check_obf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+ read_next_byte(kcs);
+ } else {
+ /*
+ * We don't implement this exactly like the state
+ * machine in the spec. Some broken hardware
+ * does not write the final dummy byte to the
+ * read register. Thus obf will never go high
+ * here. We just go straight to idle, and we
+ * handle clearing out obf in idle state if it
+ * happens to come in.
+ */
+ clear_obf(kcs, status);
+ kcs->orig_write_count = 0;
+ kcs->state = KCS_IDLE;
+ return SI_SM_TRANSACTION_COMPLETE;
+ }
+ break;
+
+ case KCS_ERROR0:
+ clear_obf(kcs, status);
+ status = read_status(kcs);
+ if (GET_STATUS_OBF(status))
+ /* controller isn't responding */
+ if (time_before(jiffies, kcs->error0_timeout))
+ return SI_SM_CALL_WITH_TICK_DELAY;
+ write_cmd(kcs, KCS_GET_STATUS_ABORT);
+ kcs->state = KCS_ERROR1;
+ break;
+
+ case KCS_ERROR1:
+ clear_obf(kcs, status);
+ write_data(kcs, 0);
+ kcs->state = KCS_ERROR2;
+ break;
+
+ case KCS_ERROR2:
+ if (state != KCS_READ_STATE) {
+ start_error_recovery(kcs,
+ "Not in read state for error2");
+ break;
+ }
+ if (!check_obf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+
+ clear_obf(kcs, status);
+ write_data(kcs, KCS_READ_BYTE);
+ kcs->state = KCS_ERROR3;
+ break;
+
+ case KCS_ERROR3:
+ if (state != KCS_IDLE_STATE) {
+ start_error_recovery(kcs,
+ "Not in idle state for error3");
+ break;
+ }
+
+ if (!check_obf(kcs, status, time))
+ return SI_SM_CALL_WITH_DELAY;
+
+ clear_obf(kcs, status);
+ if (kcs->orig_write_count) {
+ restart_kcs_transaction(kcs);
+ } else {
+ kcs->state = KCS_IDLE;
+ return SI_SM_TRANSACTION_COMPLETE;
+ }
+ break;
+
+ case KCS_HOSED:
+ break;
+ }
+
+ if (kcs->state == KCS_HOSED) {
+ init_kcs_data(kcs, kcs->io);
+ return SI_SM_HOSED;
+ }
+
+ return SI_SM_CALL_WITHOUT_DELAY;
+}
+
+static int kcs_size(void)
+{
+ return sizeof(struct si_sm_data);
+}
+
+static int kcs_detect(struct si_sm_data *kcs)
+{
+ /*
+ * It's impossible for the KCS status register to be all 1's,
+ * (assuming a properly functioning, self-initialized BMC)
+ * but that's what you get from reading a bogus address, so we
+ * test that first.
+ */
+ if (read_status(kcs) == 0xff)
+ return 1;
+
+ return 0;
+}
+
+static void kcs_cleanup(struct si_sm_data *kcs)
+{
+}
+
+struct si_sm_handlers kcs_smi_handlers = {
+ .init_data = init_kcs_data,
+ .start_transaction = start_kcs_transaction,
+ .get_result = get_kcs_result,
+ .event = kcs_event,
+ .detect = kcs_detect,
+ .cleanup = kcs_cleanup,
+ .size = kcs_size,
+};
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
new file mode 100644
index 00000000..58c0e638
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -0,0 +1,4549 @@
+/*
+ * ipmi_msghandler.c
+ *
+ * Incoming and outgoing message routing for an IPMI interface.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <asm/system.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/rcupdate.h>
+
+#define PFX "IPMI message handler: "
+
+#define IPMI_DRIVER_VERSION "39.2"
+
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
+static int ipmi_init_msghandler(void);
+
+static int initialized;
+
+#ifdef CONFIG_PROC_FS
+static struct proc_dir_entry *proc_ipmi_root;
+#endif /* CONFIG_PROC_FS */
+
+/* Remain in auto-maintenance mode for this amount of time (in ms). */
+#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
+
+#define MAX_EVENTS_IN_QUEUE 25
+
+/*
+ * Don't let a message sit in a queue forever, always time it with at lest
+ * the max message timer. This is in milliseconds.
+ */
+#define MAX_MSG_TIMEOUT 60000
+
+/*
+ * The main "user" data structure.
+ */
+struct ipmi_user {
+ struct list_head link;
+
+ /* Set to "0" when the user is destroyed. */
+ int valid;
+
+ struct kref refcount;
+
+ /* The upper layer that handles receive messages. */
+ struct ipmi_user_hndl *handler;
+ void *handler_data;
+
+ /* The interface this user is bound to. */
+ ipmi_smi_t intf;
+
+ /* Does this interface receive IPMI events? */
+ int gets_events;
+};
+
+struct cmd_rcvr {
+ struct list_head link;
+
+ ipmi_user_t user;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned int chans;
+
+ /*
+ * This is used to form a linked lised during mass deletion.
+ * Since this is in an RCU list, we cannot use the link above
+ * or change any data until the RCU period completes. So we
+ * use this next variable during mass deletion so we can have
+ * a list and don't have to wait and restart the search on
+ * every individual deletion of a command.
+ */
+ struct cmd_rcvr *next;
+};
+
+struct seq_table {
+ unsigned int inuse : 1;
+ unsigned int broadcast : 1;
+
+ unsigned long timeout;
+ unsigned long orig_timeout;
+ unsigned int retries_left;
+
+ /*
+ * To verify on an incoming send message response that this is
+ * the message that the response is for, we keep a sequence id
+ * and increment it every time we send a message.
+ */
+ long seqid;
+
+ /*
+ * This is held so we can properly respond to the message on a
+ * timeout, and it is used to hold the temporary data for
+ * retransmission, too.
+ */
+ struct ipmi_recv_msg *recv_msg;
+};
+
+/*
+ * Store the information in a msgid (long) to allow us to find a
+ * sequence table entry from the msgid.
+ */
+#define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
+
+#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
+ do { \
+ seq = ((msgid >> 26) & 0x3f); \
+ seqid = (msgid & 0x3fffff); \
+ } while (0)
+
+#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
+
+struct ipmi_channel {
+ unsigned char medium;
+ unsigned char protocol;
+
+ /*
+ * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
+ * but may be changed by the user.
+ */
+ unsigned char address;
+
+ /*
+ * My LUN. This should generally stay the SMS LUN, but just in
+ * case...
+ */
+ unsigned char lun;
+};
+
+#ifdef CONFIG_PROC_FS
+struct ipmi_proc_entry {
+ char *name;
+ struct ipmi_proc_entry *next;
+};
+#endif
+
+struct bmc_device {
+ struct platform_device *dev;
+ struct ipmi_device_id id;
+ unsigned char guid[16];
+ int guid_set;
+
+ struct kref refcount;
+
+ /* bmc device attributes */
+ struct device_attribute device_id_attr;
+ struct device_attribute provides_dev_sdrs_attr;
+ struct device_attribute revision_attr;
+ struct device_attribute firmware_rev_attr;
+ struct device_attribute version_attr;
+ struct device_attribute add_dev_support_attr;
+ struct device_attribute manufacturer_id_attr;
+ struct device_attribute product_id_attr;
+ struct device_attribute guid_attr;
+ struct device_attribute aux_firmware_rev_attr;
+};
+
+/*
+ * Various statistics for IPMI, these index stats[] in the ipmi_smi
+ * structure.
+ */
+enum ipmi_stat_indexes {
+ /* Commands we got from the user that were invalid. */
+ IPMI_STAT_sent_invalid_commands = 0,
+
+ /* Commands we sent to the MC. */
+ IPMI_STAT_sent_local_commands,
+
+ /* Responses from the MC that were delivered to a user. */
+ IPMI_STAT_handled_local_responses,
+
+ /* Responses from the MC that were not delivered to a user. */
+ IPMI_STAT_unhandled_local_responses,
+
+ /* Commands we sent out to the IPMB bus. */
+ IPMI_STAT_sent_ipmb_commands,
+
+ /* Commands sent on the IPMB that had errors on the SEND CMD */
+ IPMI_STAT_sent_ipmb_command_errs,
+
+ /* Each retransmit increments this count. */
+ IPMI_STAT_retransmitted_ipmb_commands,
+
+ /*
+ * When a message times out (runs out of retransmits) this is
+ * incremented.
+ */
+ IPMI_STAT_timed_out_ipmb_commands,
+
+ /*
+ * This is like above, but for broadcasts. Broadcasts are
+ * *not* included in the above count (they are expected to
+ * time out).
+ */
+ IPMI_STAT_timed_out_ipmb_broadcasts,
+
+ /* Responses I have sent to the IPMB bus. */
+ IPMI_STAT_sent_ipmb_responses,
+
+ /* The response was delivered to the user. */
+ IPMI_STAT_handled_ipmb_responses,
+
+ /* The response had invalid data in it. */
+ IPMI_STAT_invalid_ipmb_responses,
+
+ /* The response didn't have anyone waiting for it. */
+ IPMI_STAT_unhandled_ipmb_responses,
+
+ /* Commands we sent out to the IPMB bus. */
+ IPMI_STAT_sent_lan_commands,
+
+ /* Commands sent on the IPMB that had errors on the SEND CMD */
+ IPMI_STAT_sent_lan_command_errs,
+
+ /* Each retransmit increments this count. */
+ IPMI_STAT_retransmitted_lan_commands,
+
+ /*
+ * When a message times out (runs out of retransmits) this is
+ * incremented.
+ */
+ IPMI_STAT_timed_out_lan_commands,
+
+ /* Responses I have sent to the IPMB bus. */
+ IPMI_STAT_sent_lan_responses,
+
+ /* The response was delivered to the user. */
+ IPMI_STAT_handled_lan_responses,
+
+ /* The response had invalid data in it. */
+ IPMI_STAT_invalid_lan_responses,
+
+ /* The response didn't have anyone waiting for it. */
+ IPMI_STAT_unhandled_lan_responses,
+
+ /* The command was delivered to the user. */
+ IPMI_STAT_handled_commands,
+
+ /* The command had invalid data in it. */
+ IPMI_STAT_invalid_commands,
+
+ /* The command didn't have anyone waiting for it. */
+ IPMI_STAT_unhandled_commands,
+
+ /* Invalid data in an event. */
+ IPMI_STAT_invalid_events,
+
+ /* Events that were received with the proper format. */
+ IPMI_STAT_events,
+
+ /* Retransmissions on IPMB that failed. */
+ IPMI_STAT_dropped_rexmit_ipmb_commands,
+
+ /* Retransmissions on LAN that failed. */
+ IPMI_STAT_dropped_rexmit_lan_commands,
+
+ /* This *must* remain last, add new values above this. */
+ IPMI_NUM_STATS
+};
+
+
+#define IPMI_IPMB_NUM_SEQ 64
+#define IPMI_MAX_CHANNELS 16
+struct ipmi_smi {
+ /* What interface number are we? */
+ int intf_num;
+
+ struct kref refcount;
+
+ /* Used for a list of interfaces. */
+ struct list_head link;
+
+ /*
+ * The list of upper layers that are using me. seq_lock
+ * protects this.
+ */
+ struct list_head users;
+
+ /* Information to supply to users. */
+ unsigned char ipmi_version_major;
+ unsigned char ipmi_version_minor;
+
+ /* Used for wake ups at startup. */
+ wait_queue_head_t waitq;
+
+ struct bmc_device *bmc;
+ char *my_dev_name;
+ char *sysfs_name;
+
+ /*
+ * This is the lower-layer's sender routine. Note that you
+ * must either be holding the ipmi_interfaces_mutex or be in
+ * an umpreemptible region to use this. You must fetch the
+ * value into a local variable and make sure it is not NULL.
+ */
+ struct ipmi_smi_handlers *handlers;
+ void *send_info;
+
+#ifdef CONFIG_PROC_FS
+ /* A list of proc entries for this interface. */
+ struct mutex proc_entry_lock;
+ struct ipmi_proc_entry *proc_entries;
+#endif
+
+ /* Driver-model device for the system interface. */
+ struct device *si_dev;
+
+ /*
+ * A table of sequence numbers for this interface. We use the
+ * sequence numbers for IPMB messages that go out of the
+ * interface to match them up with their responses. A routine
+ * is called periodically to time the items in this list.
+ */
+ spinlock_t seq_lock;
+ struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
+ int curr_seq;
+
+ /*
+ * Messages that were delayed for some reason (out of memory,
+ * for instance), will go in here to be processed later in a
+ * periodic timer interrupt.
+ */
+ spinlock_t waiting_msgs_lock;
+ struct list_head waiting_msgs;
+
+ /*
+ * The list of command receivers that are registered for commands
+ * on this interface.
+ */
+ struct mutex cmd_rcvrs_mutex;
+ struct list_head cmd_rcvrs;
+
+ /*
+ * Events that were queues because no one was there to receive
+ * them.
+ */
+ spinlock_t events_lock; /* For dealing with event stuff. */
+ struct list_head waiting_events;
+ unsigned int waiting_events_count; /* How many events in queue? */
+ char delivering_events;
+ char event_msg_printed;
+
+ /*
+ * The event receiver for my BMC, only really used at panic
+ * shutdown as a place to store this.
+ */
+ unsigned char event_receiver;
+ unsigned char event_receiver_lun;
+ unsigned char local_sel_device;
+ unsigned char local_event_generator;
+
+ /* For handling of maintenance mode. */
+ int maintenance_mode;
+ int maintenance_mode_enable;
+ int auto_maintenance_timeout;
+ spinlock_t maintenance_mode_lock; /* Used in a timer... */
+
+ /*
+ * A cheap hack, if this is non-null and a message to an
+ * interface comes in with a NULL user, call this routine with
+ * it. Note that the message will still be freed by the
+ * caller. This only works on the system interface.
+ */
+ void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
+
+ /*
+ * When we are scanning the channels for an SMI, this will
+ * tell which channel we are scanning.
+ */
+ int curr_channel;
+
+ /* Channel information */
+ struct ipmi_channel channels[IPMI_MAX_CHANNELS];
+
+ /* Proc FS stuff. */
+ struct proc_dir_entry *proc_dir;
+ char proc_dir_name[10];
+
+ atomic_t stats[IPMI_NUM_STATS];
+
+ /*
+ * run_to_completion duplicate of smb_info, smi_info
+ * and ipmi_serial_info structures. Used to decrease numbers of
+ * parameters passed by "low" level IPMI code.
+ */
+ int run_to_completion;
+};
+#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
+
+/**
+ * The driver model view of the IPMI messaging driver.
+ */
+static struct platform_driver ipmidriver = {
+ .driver = {
+ .name = "ipmi",
+ .bus = &platform_bus_type
+ }
+};
+static DEFINE_MUTEX(ipmidriver_mutex);
+
+static LIST_HEAD(ipmi_interfaces);
+static DEFINE_MUTEX(ipmi_interfaces_mutex);
+
+/*
+ * List of watchers that want to know when smi's are added and deleted.
+ */
+static LIST_HEAD(smi_watchers);
+static DEFINE_MUTEX(smi_watchers_mutex);
+
+
+#define ipmi_inc_stat(intf, stat) \
+ atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
+#define ipmi_get_stat(intf, stat) \
+ ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+
+static int is_lan_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_LAN_ADDR_TYPE;
+}
+
+static int is_ipmb_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
+}
+
+static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
+}
+
+static void free_recv_msg_list(struct list_head *q)
+{
+ struct ipmi_recv_msg *msg, *msg2;
+
+ list_for_each_entry_safe(msg, msg2, q, link) {
+ list_del(&msg->link);
+ ipmi_free_recv_msg(msg);
+ }
+}
+
+static void free_smi_msg_list(struct list_head *q)
+{
+ struct ipmi_smi_msg *msg, *msg2;
+
+ list_for_each_entry_safe(msg, msg2, q, link) {
+ list_del(&msg->link);
+ ipmi_free_smi_msg(msg);
+ }
+}
+
+static void clean_up_interface_data(ipmi_smi_t intf)
+{
+ int i;
+ struct cmd_rcvr *rcvr, *rcvr2;
+ struct list_head list;
+
+ free_smi_msg_list(&intf->waiting_msgs);
+ free_recv_msg_list(&intf->waiting_events);
+
+ /*
+ * Wholesale remove all the entries from the list in the
+ * interface and wait for RCU to know that none are in use.
+ */
+ mutex_lock(&intf->cmd_rcvrs_mutex);
+ INIT_LIST_HEAD(&list);
+ list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
+
+ list_for_each_entry_safe(rcvr, rcvr2, &list, link)
+ kfree(rcvr);
+
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+ if ((intf->seq_table[i].inuse)
+ && (intf->seq_table[i].recv_msg))
+ ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
+ }
+}
+
+static void intf_free(struct kref *ref)
+{
+ ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
+
+ clean_up_interface_data(intf);
+ kfree(intf);
+}
+
+struct watcher_entry {
+ int intf_num;
+ ipmi_smi_t intf;
+ struct list_head link;
+};
+
+int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
+{
+ ipmi_smi_t intf;
+ LIST_HEAD(to_deliver);
+ struct watcher_entry *e, *e2;
+
+ mutex_lock(&smi_watchers_mutex);
+
+ mutex_lock(&ipmi_interfaces_mutex);
+
+ /* Build a list of things to deliver. */
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == -1)
+ continue;
+ e = kmalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ goto out_err;
+ kref_get(&intf->refcount);
+ e->intf = intf;
+ e->intf_num = intf->intf_num;
+ list_add_tail(&e->link, &to_deliver);
+ }
+
+ /* We will succeed, so add it to the list. */
+ list_add(&watcher->link, &smi_watchers);
+
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ list_for_each_entry_safe(e, e2, &to_deliver, link) {
+ list_del(&e->link);
+ watcher->new_smi(e->intf_num, e->intf->si_dev);
+ kref_put(&e->intf->refcount, intf_free);
+ kfree(e);
+ }
+
+ mutex_unlock(&smi_watchers_mutex);
+
+ return 0;
+
+ out_err:
+ mutex_unlock(&ipmi_interfaces_mutex);
+ mutex_unlock(&smi_watchers_mutex);
+ list_for_each_entry_safe(e, e2, &to_deliver, link) {
+ list_del(&e->link);
+ kref_put(&e->intf->refcount, intf_free);
+ kfree(e);
+ }
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(ipmi_smi_watcher_register);
+
+int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
+{
+ mutex_lock(&smi_watchers_mutex);
+ list_del(&(watcher->link));
+ mutex_unlock(&smi_watchers_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
+
+/*
+ * Must be called with smi_watchers_mutex held.
+ */
+static void
+call_smi_watchers(int i, struct device *dev)
+{
+ struct ipmi_smi_watcher *w;
+
+ list_for_each_entry(w, &smi_watchers, link) {
+ if (try_module_get(w->owner)) {
+ w->new_smi(i, dev);
+ module_put(w->owner);
+ }
+ }
+}
+
+static int
+ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
+{
+ if (addr1->addr_type != addr2->addr_type)
+ return 0;
+
+ if (addr1->channel != addr2->channel)
+ return 0;
+
+ if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+ struct ipmi_system_interface_addr *smi_addr1
+ = (struct ipmi_system_interface_addr *) addr1;
+ struct ipmi_system_interface_addr *smi_addr2
+ = (struct ipmi_system_interface_addr *) addr2;
+ return (smi_addr1->lun == smi_addr2->lun);
+ }
+
+ if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
+ struct ipmi_ipmb_addr *ipmb_addr1
+ = (struct ipmi_ipmb_addr *) addr1;
+ struct ipmi_ipmb_addr *ipmb_addr2
+ = (struct ipmi_ipmb_addr *) addr2;
+
+ return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
+ && (ipmb_addr1->lun == ipmb_addr2->lun));
+ }
+
+ if (is_lan_addr(addr1)) {
+ struct ipmi_lan_addr *lan_addr1
+ = (struct ipmi_lan_addr *) addr1;
+ struct ipmi_lan_addr *lan_addr2
+ = (struct ipmi_lan_addr *) addr2;
+
+ return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
+ && (lan_addr1->local_SWID == lan_addr2->local_SWID)
+ && (lan_addr1->session_handle
+ == lan_addr2->session_handle)
+ && (lan_addr1->lun == lan_addr2->lun));
+ }
+
+ return 1;
+}
+
+int ipmi_validate_addr(struct ipmi_addr *addr, int len)
+{
+ if (len < sizeof(struct ipmi_system_interface_addr))
+ return -EINVAL;
+
+ if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+ if (addr->channel != IPMI_BMC_CHANNEL)
+ return -EINVAL;
+ return 0;
+ }
+
+ if ((addr->channel == IPMI_BMC_CHANNEL)
+ || (addr->channel >= IPMI_MAX_CHANNELS)
+ || (addr->channel < 0))
+ return -EINVAL;
+
+ if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
+ if (len < sizeof(struct ipmi_ipmb_addr))
+ return -EINVAL;
+ return 0;
+ }
+
+ if (is_lan_addr(addr)) {
+ if (len < sizeof(struct ipmi_lan_addr))
+ return -EINVAL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ipmi_validate_addr);
+
+unsigned int ipmi_addr_length(int addr_type)
+{
+ if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ return sizeof(struct ipmi_system_interface_addr);
+
+ if ((addr_type == IPMI_IPMB_ADDR_TYPE)
+ || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
+ return sizeof(struct ipmi_ipmb_addr);
+
+ if (addr_type == IPMI_LAN_ADDR_TYPE)
+ return sizeof(struct ipmi_lan_addr);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_addr_length);
+
+static void deliver_response(struct ipmi_recv_msg *msg)
+{
+ if (!msg->user) {
+ ipmi_smi_t intf = msg->user_msg_data;
+
+ /* Special handling for NULL users. */
+ if (intf->null_user_handler) {
+ intf->null_user_handler(intf, msg);
+ ipmi_inc_stat(intf, handled_local_responses);
+ } else {
+ /* No handler, so give up. */
+ ipmi_inc_stat(intf, unhandled_local_responses);
+ }
+ ipmi_free_recv_msg(msg);
+ } else {
+ ipmi_user_t user = msg->user;
+ user->handler->ipmi_recv_hndl(msg, user->handler_data);
+ }
+}
+
+static void
+deliver_err_response(struct ipmi_recv_msg *msg, int err)
+{
+ msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ msg->msg_data[0] = err;
+ msg->msg.netfn |= 1; /* Convert to a response. */
+ msg->msg.data_len = 1;
+ msg->msg.data = msg->msg_data;
+ deliver_response(msg);
+}
+
+/*
+ * Find the next sequence number not being used and add the given
+ * message with the given timeout to the sequence table. This must be
+ * called with the interface's seq_lock held.
+ */
+static int intf_next_seq(ipmi_smi_t intf,
+ struct ipmi_recv_msg *recv_msg,
+ unsigned long timeout,
+ int retries,
+ int broadcast,
+ unsigned char *seq,
+ long *seqid)
+{
+ int rv = 0;
+ unsigned int i;
+
+ for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
+ i = (i+1)%IPMI_IPMB_NUM_SEQ) {
+ if (!intf->seq_table[i].inuse)
+ break;
+ }
+
+ if (!intf->seq_table[i].inuse) {
+ intf->seq_table[i].recv_msg = recv_msg;
+
+ /*
+ * Start with the maximum timeout, when the send response
+ * comes in we will start the real timer.
+ */
+ intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
+ intf->seq_table[i].orig_timeout = timeout;
+ intf->seq_table[i].retries_left = retries;
+ intf->seq_table[i].broadcast = broadcast;
+ intf->seq_table[i].inuse = 1;
+ intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
+ *seq = i;
+ *seqid = intf->seq_table[i].seqid;
+ intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
+ } else {
+ rv = -EAGAIN;
+ }
+
+ return rv;
+}
+
+/*
+ * Return the receive message for the given sequence number and
+ * release the sequence number so it can be reused. Some other data
+ * is passed in to be sure the message matches up correctly (to help
+ * guard against message coming in after their timeout and the
+ * sequence number being reused).
+ */
+static int intf_find_seq(ipmi_smi_t intf,
+ unsigned char seq,
+ short channel,
+ unsigned char cmd,
+ unsigned char netfn,
+ struct ipmi_addr *addr,
+ struct ipmi_recv_msg **recv_msg)
+{
+ int rv = -ENODEV;
+ unsigned long flags;
+
+ if (seq >= IPMI_IPMB_NUM_SEQ)
+ return -EINVAL;
+
+ spin_lock_irqsave(&(intf->seq_lock), flags);
+ if (intf->seq_table[seq].inuse) {
+ struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
+
+ if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
+ && (msg->msg.netfn == netfn)
+ && (ipmi_addr_equal(addr, &(msg->addr)))) {
+ *recv_msg = msg;
+ intf->seq_table[seq].inuse = 0;
+ rv = 0;
+ }
+ }
+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
+
+ return rv;
+}
+
+
+/* Start the timer for a specific sequence table entry. */
+static int intf_start_seq_timer(ipmi_smi_t intf,
+ long msgid)
+{
+ int rv = -ENODEV;
+ unsigned long flags;
+ unsigned char seq;
+ unsigned long seqid;
+
+
+ GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+
+ spin_lock_irqsave(&(intf->seq_lock), flags);
+ /*
+ * We do this verification because the user can be deleted
+ * while a message is outstanding.
+ */
+ if ((intf->seq_table[seq].inuse)
+ && (intf->seq_table[seq].seqid == seqid)) {
+ struct seq_table *ent = &(intf->seq_table[seq]);
+ ent->timeout = ent->orig_timeout;
+ rv = 0;
+ }
+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
+
+ return rv;
+}
+
+/* Got an error for the send message for a specific sequence number. */
+static int intf_err_seq(ipmi_smi_t intf,
+ long msgid,
+ unsigned int err)
+{
+ int rv = -ENODEV;
+ unsigned long flags;
+ unsigned char seq;
+ unsigned long seqid;
+ struct ipmi_recv_msg *msg = NULL;
+
+
+ GET_SEQ_FROM_MSGID(msgid, seq, seqid);
+
+ spin_lock_irqsave(&(intf->seq_lock), flags);
+ /*
+ * We do this verification because the user can be deleted
+ * while a message is outstanding.
+ */
+ if ((intf->seq_table[seq].inuse)
+ && (intf->seq_table[seq].seqid == seqid)) {
+ struct seq_table *ent = &(intf->seq_table[seq]);
+
+ ent->inuse = 0;
+ msg = ent->recv_msg;
+ rv = 0;
+ }
+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
+
+ if (msg)
+ deliver_err_response(msg, err);
+
+ return rv;
+}
+
+
+int ipmi_create_user(unsigned int if_num,
+ struct ipmi_user_hndl *handler,
+ void *handler_data,
+ ipmi_user_t *user)
+{
+ unsigned long flags;
+ ipmi_user_t new_user;
+ int rv = 0;
+ ipmi_smi_t intf;
+
+ /*
+ * There is no module usecount here, because it's not
+ * required. Since this can only be used by and called from
+ * other modules, they will implicitly use this module, and
+ * thus this can't be removed unless the other modules are
+ * removed.
+ */
+
+ if (handler == NULL)
+ return -EINVAL;
+
+ /*
+ * Make sure the driver is actually initialized, this handles
+ * problems with initialization order.
+ */
+ if (!initialized) {
+ rv = ipmi_init_msghandler();
+ if (rv)
+ return rv;
+
+ /*
+ * The init code doesn't return an error if it was turned
+ * off, but it won't initialize. Check that.
+ */
+ if (!initialized)
+ return -ENODEV;
+ }
+
+ new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
+ if (!new_user)
+ return -ENOMEM;
+
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num)
+ goto found;
+ }
+ /* Not found, return an error */
+ rv = -EINVAL;
+ goto out_kfree;
+
+ found:
+ /* Note that each existing user holds a refcount to the interface. */
+ kref_get(&intf->refcount);
+
+ kref_init(&new_user->refcount);
+ new_user->handler = handler;
+ new_user->handler_data = handler_data;
+ new_user->intf = intf;
+ new_user->gets_events = 0;
+
+ if (!try_module_get(intf->handlers->owner)) {
+ rv = -ENODEV;
+ goto out_kref;
+ }
+
+ if (intf->handlers->inc_usecount) {
+ rv = intf->handlers->inc_usecount(intf->send_info);
+ if (rv) {
+ module_put(intf->handlers->owner);
+ goto out_kref;
+ }
+ }
+
+ /*
+ * Hold the lock so intf->handlers is guaranteed to be good
+ * until now
+ */
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ new_user->valid = 1;
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ list_add_rcu(&new_user->link, &intf->users);
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+ *user = new_user;
+ return 0;
+
+out_kref:
+ kref_put(&intf->refcount, intf_free);
+out_kfree:
+ mutex_unlock(&ipmi_interfaces_mutex);
+ kfree(new_user);
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_create_user);
+
+int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
+{
+ int rv = 0;
+ ipmi_smi_t intf;
+ struct ipmi_smi_handlers *handlers;
+
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num)
+ goto found;
+ }
+ /* Not found, return an error */
+ rv = -EINVAL;
+ mutex_unlock(&ipmi_interfaces_mutex);
+ return rv;
+
+found:
+ handlers = intf->handlers;
+ rv = -ENOSYS;
+ if (handlers->get_smi_info)
+ rv = handlers->get_smi_info(intf->send_info, data);
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_get_smi_info);
+
+static void free_user(struct kref *ref)
+{
+ ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
+ kfree(user);
+}
+
+int ipmi_destroy_user(ipmi_user_t user)
+{
+ ipmi_smi_t intf = user->intf;
+ int i;
+ unsigned long flags;
+ struct cmd_rcvr *rcvr;
+ struct cmd_rcvr *rcvrs = NULL;
+
+ user->valid = 0;
+
+ /* Remove the user from the interface's sequence table. */
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ list_del_rcu(&user->link);
+
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+ if (intf->seq_table[i].inuse
+ && (intf->seq_table[i].recv_msg->user == user)) {
+ intf->seq_table[i].inuse = 0;
+ ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
+ }
+ }
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+ /*
+ * Remove the user from the command receiver's table. First
+ * we build a list of everything (not using the standard link,
+ * since other things may be using it till we do
+ * synchronize_rcu()) then free everything in that list.
+ */
+ mutex_lock(&intf->cmd_rcvrs_mutex);
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+ if (rcvr->user == user) {
+ list_del_rcu(&rcvr->link);
+ rcvr->next = rcvrs;
+ rcvrs = rcvr;
+ }
+ }
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
+ synchronize_rcu();
+ while (rcvrs) {
+ rcvr = rcvrs;
+ rcvrs = rcvr->next;
+ kfree(rcvr);
+ }
+
+ mutex_lock(&ipmi_interfaces_mutex);
+ if (intf->handlers) {
+ module_put(intf->handlers->owner);
+ if (intf->handlers->dec_usecount)
+ intf->handlers->dec_usecount(intf->send_info);
+ }
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ kref_put(&intf->refcount, intf_free);
+
+ kref_put(&user->refcount, free_user);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_destroy_user);
+
+void ipmi_get_version(ipmi_user_t user,
+ unsigned char *major,
+ unsigned char *minor)
+{
+ *major = user->intf->ipmi_version_major;
+ *minor = user->intf->ipmi_version_minor;
+}
+EXPORT_SYMBOL(ipmi_get_version);
+
+int ipmi_set_my_address(ipmi_user_t user,
+ unsigned int channel,
+ unsigned char address)
+{
+ if (channel >= IPMI_MAX_CHANNELS)
+ return -EINVAL;
+ user->intf->channels[channel].address = address;
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_set_my_address);
+
+int ipmi_get_my_address(ipmi_user_t user,
+ unsigned int channel,
+ unsigned char *address)
+{
+ if (channel >= IPMI_MAX_CHANNELS)
+ return -EINVAL;
+ *address = user->intf->channels[channel].address;
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_get_my_address);
+
+int ipmi_set_my_LUN(ipmi_user_t user,
+ unsigned int channel,
+ unsigned char LUN)
+{
+ if (channel >= IPMI_MAX_CHANNELS)
+ return -EINVAL;
+ user->intf->channels[channel].lun = LUN & 0x3;
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_set_my_LUN);
+
+int ipmi_get_my_LUN(ipmi_user_t user,
+ unsigned int channel,
+ unsigned char *address)
+{
+ if (channel >= IPMI_MAX_CHANNELS)
+ return -EINVAL;
+ *address = user->intf->channels[channel].lun;
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_get_my_LUN);
+
+int ipmi_get_maintenance_mode(ipmi_user_t user)
+{
+ int mode;
+ unsigned long flags;
+
+ spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
+ mode = user->intf->maintenance_mode;
+ spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
+
+ return mode;
+}
+EXPORT_SYMBOL(ipmi_get_maintenance_mode);
+
+static void maintenance_mode_update(ipmi_smi_t intf)
+{
+ if (intf->handlers->set_maintenance_mode)
+ intf->handlers->set_maintenance_mode(
+ intf->send_info, intf->maintenance_mode_enable);
+}
+
+int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
+{
+ int rv = 0;
+ unsigned long flags;
+ ipmi_smi_t intf = user->intf;
+
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+ if (intf->maintenance_mode != mode) {
+ switch (mode) {
+ case IPMI_MAINTENANCE_MODE_AUTO:
+ intf->maintenance_mode = mode;
+ intf->maintenance_mode_enable
+ = (intf->auto_maintenance_timeout > 0);
+ break;
+
+ case IPMI_MAINTENANCE_MODE_OFF:
+ intf->maintenance_mode = mode;
+ intf->maintenance_mode_enable = 0;
+ break;
+
+ case IPMI_MAINTENANCE_MODE_ON:
+ intf->maintenance_mode = mode;
+ intf->maintenance_mode_enable = 1;
+ break;
+
+ default:
+ rv = -EINVAL;
+ goto out_unlock;
+ }
+
+ maintenance_mode_update(intf);
+ }
+ out_unlock:
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_set_maintenance_mode);
+
+int ipmi_set_gets_events(ipmi_user_t user, int val)
+{
+ unsigned long flags;
+ ipmi_smi_t intf = user->intf;
+ struct ipmi_recv_msg *msg, *msg2;
+ struct list_head msgs;
+
+ INIT_LIST_HEAD(&msgs);
+
+ spin_lock_irqsave(&intf->events_lock, flags);
+ user->gets_events = val;
+
+ if (intf->delivering_events)
+ /*
+ * Another thread is delivering events for this, so
+ * let it handle any new events.
+ */
+ goto out;
+
+ /* Deliver any queued events. */
+ while (user->gets_events && !list_empty(&intf->waiting_events)) {
+ list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
+ list_move_tail(&msg->link, &msgs);
+ intf->waiting_events_count = 0;
+ if (intf->event_msg_printed) {
+ printk(KERN_WARNING PFX "Event queue no longer"
+ " full\n");
+ intf->event_msg_printed = 0;
+ }
+
+ intf->delivering_events = 1;
+ spin_unlock_irqrestore(&intf->events_lock, flags);
+
+ list_for_each_entry_safe(msg, msg2, &msgs, link) {
+ msg->user = user;
+ kref_get(&user->refcount);
+ deliver_response(msg);
+ }
+
+ spin_lock_irqsave(&intf->events_lock, flags);
+ intf->delivering_events = 0;
+ }
+
+ out:
+ spin_unlock_irqrestore(&intf->events_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_set_gets_events);
+
+static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned char chan)
+{
+ struct cmd_rcvr *rcvr;
+
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+ if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
+ && (rcvr->chans & (1 << chan)))
+ return rcvr;
+ }
+ return NULL;
+}
+
+static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned int chans)
+{
+ struct cmd_rcvr *rcvr;
+
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+ if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
+ && (rcvr->chans & chans))
+ return 0;
+ }
+ return 1;
+}
+
+int ipmi_register_for_cmd(ipmi_user_t user,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned int chans)
+{
+ ipmi_smi_t intf = user->intf;
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+
+
+ rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
+ if (!rcvr)
+ return -ENOMEM;
+ rcvr->cmd = cmd;
+ rcvr->netfn = netfn;
+ rcvr->chans = chans;
+ rcvr->user = user;
+
+ mutex_lock(&intf->cmd_rcvrs_mutex);
+ /* Make sure the command/netfn is not already registered. */
+ if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
+ rv = -EBUSY;
+ goto out_unlock;
+ }
+
+ list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
+
+ out_unlock:
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
+ if (rv)
+ kfree(rcvr);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_register_for_cmd);
+
+int ipmi_unregister_for_cmd(ipmi_user_t user,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned int chans)
+{
+ ipmi_smi_t intf = user->intf;
+ struct cmd_rcvr *rcvr;
+ struct cmd_rcvr *rcvrs = NULL;
+ int i, rv = -ENOENT;
+
+ mutex_lock(&intf->cmd_rcvrs_mutex);
+ for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
+ if (((1 << i) & chans) == 0)
+ continue;
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
+ if (rcvr == NULL)
+ continue;
+ if (rcvr->user == user) {
+ rv = 0;
+ rcvr->chans &= ~chans;
+ if (rcvr->chans == 0) {
+ list_del_rcu(&rcvr->link);
+ rcvr->next = rcvrs;
+ rcvrs = rcvr;
+ }
+ }
+ }
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
+ synchronize_rcu();
+ while (rcvrs) {
+ rcvr = rcvrs;
+ rcvrs = rcvr->next;
+ kfree(rcvr);
+ }
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_unregister_for_cmd);
+
+static unsigned char
+ipmb_checksum(unsigned char *data, int size)
+{
+ unsigned char csum = 0;
+
+ for (; size > 0; size--, data++)
+ csum += *data;
+
+ return -csum;
+}
+
+static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_ipmb_addr *ipmb_addr,
+ long msgid,
+ unsigned char ipmb_seq,
+ int broadcast,
+ unsigned char source_address,
+ unsigned char source_lun)
+{
+ int i = broadcast;
+
+ /* Format the IPMB header data. */
+ smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_msg->data[1] = IPMI_SEND_MSG_CMD;
+ smi_msg->data[2] = ipmb_addr->channel;
+ if (broadcast)
+ smi_msg->data[3] = 0;
+ smi_msg->data[i+3] = ipmb_addr->slave_addr;
+ smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
+ smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
+ smi_msg->data[i+6] = source_address;
+ smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
+ smi_msg->data[i+8] = msg->cmd;
+
+ /* Now tack on the data to the message. */
+ if (msg->data_len > 0)
+ memcpy(&(smi_msg->data[i+9]), msg->data,
+ msg->data_len);
+ smi_msg->data_size = msg->data_len + 9;
+
+ /* Now calculate the checksum and tack it on. */
+ smi_msg->data[i+smi_msg->data_size]
+ = ipmb_checksum(&(smi_msg->data[i+6]),
+ smi_msg->data_size-6);
+
+ /*
+ * Add on the checksum size and the offset from the
+ * broadcast.
+ */
+ smi_msg->data_size += 1 + i;
+
+ smi_msg->msgid = msgid;
+}
+
+static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_lan_addr *lan_addr,
+ long msgid,
+ unsigned char ipmb_seq,
+ unsigned char source_lun)
+{
+ /* Format the IPMB header data. */
+ smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_msg->data[1] = IPMI_SEND_MSG_CMD;
+ smi_msg->data[2] = lan_addr->channel;
+ smi_msg->data[3] = lan_addr->session_handle;
+ smi_msg->data[4] = lan_addr->remote_SWID;
+ smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
+ smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
+ smi_msg->data[7] = lan_addr->local_SWID;
+ smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
+ smi_msg->data[9] = msg->cmd;
+
+ /* Now tack on the data to the message. */
+ if (msg->data_len > 0)
+ memcpy(&(smi_msg->data[10]), msg->data,
+ msg->data_len);
+ smi_msg->data_size = msg->data_len + 10;
+
+ /* Now calculate the checksum and tack it on. */
+ smi_msg->data[smi_msg->data_size]
+ = ipmb_checksum(&(smi_msg->data[7]),
+ smi_msg->data_size-7);
+
+ /*
+ * Add on the checksum size and the offset from the
+ * broadcast.
+ */
+ smi_msg->data_size += 1;
+
+ smi_msg->msgid = msgid;
+}
+
+/*
+ * Separate from ipmi_request so that the user does not have to be
+ * supplied in certain circumstances (mainly at panic time). If
+ * messages are supplied, they will be freed, even if an error
+ * occurs.
+ */
+static int i_ipmi_request(ipmi_user_t user,
+ ipmi_smi_t intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ void *supplied_smi,
+ struct ipmi_recv_msg *supplied_recv,
+ int priority,
+ unsigned char source_address,
+ unsigned char source_lun,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ int rv = 0;
+ struct ipmi_smi_msg *smi_msg;
+ struct ipmi_recv_msg *recv_msg;
+ unsigned long flags;
+ struct ipmi_smi_handlers *handlers;
+
+
+ if (supplied_recv)
+ recv_msg = supplied_recv;
+ else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (recv_msg == NULL)
+ return -ENOMEM;
+ }
+ recv_msg->user_msg_data = user_msg_data;
+
+ if (supplied_smi)
+ smi_msg = (struct ipmi_smi_msg *) supplied_smi;
+ else {
+ smi_msg = ipmi_alloc_smi_msg();
+ if (smi_msg == NULL) {
+ ipmi_free_recv_msg(recv_msg);
+ return -ENOMEM;
+ }
+ }
+
+ rcu_read_lock();
+ handlers = intf->handlers;
+ if (!handlers) {
+ rv = -ENODEV;
+ goto out_err;
+ }
+
+ recv_msg->user = user;
+ if (user)
+ kref_get(&user->refcount);
+ recv_msg->msgid = msgid;
+ /*
+ * Store the message to send in the receive message so timeout
+ * responses can get the proper response data.
+ */
+ recv_msg->msg = *msg;
+
+ if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
+ struct ipmi_system_interface_addr *smi_addr;
+
+ if (msg->netfn & 1) {
+ /* Responses are not allowed to the SMI. */
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ smi_addr = (struct ipmi_system_interface_addr *) addr;
+ if (smi_addr->lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
+
+ if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
+ && ((msg->cmd == IPMI_SEND_MSG_CMD)
+ || (msg->cmd == IPMI_GET_MSG_CMD)
+ || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
+ /*
+ * We don't let the user do these, since we manage
+ * the sequence numbers.
+ */
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
+ && ((msg->cmd == IPMI_COLD_RESET_CMD)
+ || (msg->cmd == IPMI_WARM_RESET_CMD)))
+ || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) {
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+ intf->auto_maintenance_timeout
+ = IPMI_MAINTENANCE_MODE_TIMEOUT;
+ if (!intf->maintenance_mode
+ && !intf->maintenance_mode_enable) {
+ intf->maintenance_mode_enable = 1;
+ maintenance_mode_update(intf);
+ }
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+ flags);
+ }
+
+ if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EMSGSIZE;
+ goto out_err;
+ }
+
+ smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
+ smi_msg->data[1] = msg->cmd;
+ smi_msg->msgid = msgid;
+ smi_msg->user_data = recv_msg;
+ if (msg->data_len > 0)
+ memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
+ smi_msg->data_size = msg->data_len + 2;
+ ipmi_inc_stat(intf, sent_local_commands);
+ } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
+ struct ipmi_ipmb_addr *ipmb_addr;
+ unsigned char ipmb_seq;
+ long seqid;
+ int broadcast = 0;
+
+ if (addr->channel >= IPMI_MAX_CHANNELS) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ if (intf->channels[addr->channel].medium
+ != IPMI_CHANNEL_MEDIUM_IPMB) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ if (retries < 0) {
+ if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
+ retries = 0; /* Don't retry broadcasts. */
+ else
+ retries = 4;
+ }
+ if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
+ /*
+ * Broadcasts add a zero at the beginning of the
+ * message, but otherwise is the same as an IPMB
+ * address.
+ */
+ addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ broadcast = 1;
+ }
+
+
+ /* Default to 1 second retries. */
+ if (retry_time_ms == 0)
+ retry_time_ms = 1000;
+
+ /*
+ * 9 for the header and 1 for the checksum, plus
+ * possibly one for the broadcast.
+ */
+ if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EMSGSIZE;
+ goto out_err;
+ }
+
+ ipmb_addr = (struct ipmi_ipmb_addr *) addr;
+ if (ipmb_addr->lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
+
+ if (recv_msg->msg.netfn & 0x1) {
+ /*
+ * It's a response, so use the user's sequence
+ * from msgid.
+ */
+ ipmi_inc_stat(intf, sent_ipmb_responses);
+ format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
+ msgid, broadcast,
+ source_address, source_lun);
+
+ /*
+ * Save the receive message so we can use it
+ * to deliver the response.
+ */
+ smi_msg->user_data = recv_msg;
+ } else {
+ /* It's a command, so get a sequence for it. */
+
+ spin_lock_irqsave(&(intf->seq_lock), flags);
+
+ /*
+ * Create a sequence number with a 1 second
+ * timeout and 4 retries.
+ */
+ rv = intf_next_seq(intf,
+ recv_msg,
+ retry_time_ms,
+ retries,
+ broadcast,
+ &ipmb_seq,
+ &seqid);
+ if (rv) {
+ /*
+ * We have used up all the sequence numbers,
+ * probably, so abort.
+ */
+ spin_unlock_irqrestore(&(intf->seq_lock),
+ flags);
+ goto out_err;
+ }
+
+ ipmi_inc_stat(intf, sent_ipmb_commands);
+
+ /*
+ * Store the sequence number in the message,
+ * so that when the send message response
+ * comes back we can start the timer.
+ */
+ format_ipmb_msg(smi_msg, msg, ipmb_addr,
+ STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
+ ipmb_seq, broadcast,
+ source_address, source_lun);
+
+ /*
+ * Copy the message into the recv message data, so we
+ * can retransmit it later if necessary.
+ */
+ memcpy(recv_msg->msg_data, smi_msg->data,
+ smi_msg->data_size);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = smi_msg->data_size;
+
+ /*
+ * We don't unlock until here, because we need
+ * to copy the completed message into the
+ * recv_msg before we release the lock.
+ * Otherwise, race conditions may bite us. I
+ * know that's pretty paranoid, but I prefer
+ * to be correct.
+ */
+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
+ }
+ } else if (is_lan_addr(addr)) {
+ struct ipmi_lan_addr *lan_addr;
+ unsigned char ipmb_seq;
+ long seqid;
+
+ if (addr->channel >= IPMI_MAX_CHANNELS) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ if ((intf->channels[addr->channel].medium
+ != IPMI_CHANNEL_MEDIUM_8023LAN)
+ && (intf->channels[addr->channel].medium
+ != IPMI_CHANNEL_MEDIUM_ASYNC)) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ retries = 4;
+
+ /* Default to 1 second retries. */
+ if (retry_time_ms == 0)
+ retry_time_ms = 1000;
+
+ /* 11 for the header and 1 for the checksum. */
+ if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EMSGSIZE;
+ goto out_err;
+ }
+
+ lan_addr = (struct ipmi_lan_addr *) addr;
+ if (lan_addr->lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+ memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
+
+ if (recv_msg->msg.netfn & 0x1) {
+ /*
+ * It's a response, so use the user's sequence
+ * from msgid.
+ */
+ ipmi_inc_stat(intf, sent_lan_responses);
+ format_lan_msg(smi_msg, msg, lan_addr, msgid,
+ msgid, source_lun);
+
+ /*
+ * Save the receive message so we can use it
+ * to deliver the response.
+ */
+ smi_msg->user_data = recv_msg;
+ } else {
+ /* It's a command, so get a sequence for it. */
+
+ spin_lock_irqsave(&(intf->seq_lock), flags);
+
+ /*
+ * Create a sequence number with a 1 second
+ * timeout and 4 retries.
+ */
+ rv = intf_next_seq(intf,
+ recv_msg,
+ retry_time_ms,
+ retries,
+ 0,
+ &ipmb_seq,
+ &seqid);
+ if (rv) {
+ /*
+ * We have used up all the sequence numbers,
+ * probably, so abort.
+ */
+ spin_unlock_irqrestore(&(intf->seq_lock),
+ flags);
+ goto out_err;
+ }
+
+ ipmi_inc_stat(intf, sent_lan_commands);
+
+ /*
+ * Store the sequence number in the message,
+ * so that when the send message response
+ * comes back we can start the timer.
+ */
+ format_lan_msg(smi_msg, msg, lan_addr,
+ STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
+ ipmb_seq, source_lun);
+
+ /*
+ * Copy the message into the recv message data, so we
+ * can retransmit it later if necessary.
+ */
+ memcpy(recv_msg->msg_data, smi_msg->data,
+ smi_msg->data_size);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = smi_msg->data_size;
+
+ /*
+ * We don't unlock until here, because we need
+ * to copy the completed message into the
+ * recv_msg before we release the lock.
+ * Otherwise, race conditions may bite us. I
+ * know that's pretty paranoid, but I prefer
+ * to be correct.
+ */
+ spin_unlock_irqrestore(&(intf->seq_lock), flags);
+ }
+ } else {
+ /* Unknown address type. */
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ rv = -EINVAL;
+ goto out_err;
+ }
+
+#ifdef DEBUG_MSGING
+ {
+ int m;
+ for (m = 0; m < smi_msg->data_size; m++)
+ printk(" %2.2x", smi_msg->data[m]);
+ printk("\n");
+ }
+#endif
+
+ handlers->sender(intf->send_info, smi_msg, priority);
+ rcu_read_unlock();
+
+ return 0;
+
+ out_err:
+ rcu_read_unlock();
+ ipmi_free_smi_msg(smi_msg);
+ ipmi_free_recv_msg(recv_msg);
+ return rv;
+}
+
+static int check_addr(ipmi_smi_t intf,
+ struct ipmi_addr *addr,
+ unsigned char *saddr,
+ unsigned char *lun)
+{
+ if (addr->channel >= IPMI_MAX_CHANNELS)
+ return -EINVAL;
+ *lun = intf->channels[addr->channel].lun;
+ *saddr = intf->channels[addr->channel].address;
+ return 0;
+}
+
+int ipmi_request_settime(ipmi_user_t user,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ int priority,
+ int retries,
+ unsigned int retry_time_ms)
+{
+ unsigned char saddr, lun;
+ int rv;
+
+ if (!user)
+ return -EINVAL;
+ rv = check_addr(user->intf, addr, &saddr, &lun);
+ if (rv)
+ return rv;
+ return i_ipmi_request(user,
+ user->intf,
+ addr,
+ msgid,
+ msg,
+ user_msg_data,
+ NULL, NULL,
+ priority,
+ saddr,
+ lun,
+ retries,
+ retry_time_ms);
+}
+EXPORT_SYMBOL(ipmi_request_settime);
+
+int ipmi_request_supply_msgs(ipmi_user_t user,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ void *supplied_smi,
+ struct ipmi_recv_msg *supplied_recv,
+ int priority)
+{
+ unsigned char saddr, lun;
+ int rv;
+
+ if (!user)
+ return -EINVAL;
+ rv = check_addr(user->intf, addr, &saddr, &lun);
+ if (rv)
+ return rv;
+ return i_ipmi_request(user,
+ user->intf,
+ addr,
+ msgid,
+ msg,
+ user_msg_data,
+ supplied_smi,
+ supplied_recv,
+ priority,
+ saddr,
+ lun,
+ -1, 0);
+}
+EXPORT_SYMBOL(ipmi_request_supply_msgs);
+
+#ifdef CONFIG_PROC_FS
+static int smi_ipmb_proc_show(struct seq_file *m, void *v)
+{
+ ipmi_smi_t intf = m->private;
+ int i;
+
+ seq_printf(m, "%x", intf->channels[0].address);
+ for (i = 1; i < IPMI_MAX_CHANNELS; i++)
+ seq_printf(m, " %x", intf->channels[i].address);
+ return seq_putc(m, '\n');
+}
+
+static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_ipmb_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations smi_ipmb_proc_ops = {
+ .open = smi_ipmb_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int smi_version_proc_show(struct seq_file *m, void *v)
+{
+ ipmi_smi_t intf = m->private;
+
+ return seq_printf(m, "%u.%u\n",
+ ipmi_version_major(&intf->bmc->id),
+ ipmi_version_minor(&intf->bmc->id));
+}
+
+static int smi_version_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_version_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations smi_version_proc_ops = {
+ .open = smi_version_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int smi_stats_proc_show(struct seq_file *m, void *v)
+{
+ ipmi_smi_t intf = m->private;
+
+ seq_printf(m, "sent_invalid_commands: %u\n",
+ ipmi_get_stat(intf, sent_invalid_commands));
+ seq_printf(m, "sent_local_commands: %u\n",
+ ipmi_get_stat(intf, sent_local_commands));
+ seq_printf(m, "handled_local_responses: %u\n",
+ ipmi_get_stat(intf, handled_local_responses));
+ seq_printf(m, "unhandled_local_responses: %u\n",
+ ipmi_get_stat(intf, unhandled_local_responses));
+ seq_printf(m, "sent_ipmb_commands: %u\n",
+ ipmi_get_stat(intf, sent_ipmb_commands));
+ seq_printf(m, "sent_ipmb_command_errs: %u\n",
+ ipmi_get_stat(intf, sent_ipmb_command_errs));
+ seq_printf(m, "retransmitted_ipmb_commands: %u\n",
+ ipmi_get_stat(intf, retransmitted_ipmb_commands));
+ seq_printf(m, "timed_out_ipmb_commands: %u\n",
+ ipmi_get_stat(intf, timed_out_ipmb_commands));
+ seq_printf(m, "timed_out_ipmb_broadcasts: %u\n",
+ ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
+ seq_printf(m, "sent_ipmb_responses: %u\n",
+ ipmi_get_stat(intf, sent_ipmb_responses));
+ seq_printf(m, "handled_ipmb_responses: %u\n",
+ ipmi_get_stat(intf, handled_ipmb_responses));
+ seq_printf(m, "invalid_ipmb_responses: %u\n",
+ ipmi_get_stat(intf, invalid_ipmb_responses));
+ seq_printf(m, "unhandled_ipmb_responses: %u\n",
+ ipmi_get_stat(intf, unhandled_ipmb_responses));
+ seq_printf(m, "sent_lan_commands: %u\n",
+ ipmi_get_stat(intf, sent_lan_commands));
+ seq_printf(m, "sent_lan_command_errs: %u\n",
+ ipmi_get_stat(intf, sent_lan_command_errs));
+ seq_printf(m, "retransmitted_lan_commands: %u\n",
+ ipmi_get_stat(intf, retransmitted_lan_commands));
+ seq_printf(m, "timed_out_lan_commands: %u\n",
+ ipmi_get_stat(intf, timed_out_lan_commands));
+ seq_printf(m, "sent_lan_responses: %u\n",
+ ipmi_get_stat(intf, sent_lan_responses));
+ seq_printf(m, "handled_lan_responses: %u\n",
+ ipmi_get_stat(intf, handled_lan_responses));
+ seq_printf(m, "invalid_lan_responses: %u\n",
+ ipmi_get_stat(intf, invalid_lan_responses));
+ seq_printf(m, "unhandled_lan_responses: %u\n",
+ ipmi_get_stat(intf, unhandled_lan_responses));
+ seq_printf(m, "handled_commands: %u\n",
+ ipmi_get_stat(intf, handled_commands));
+ seq_printf(m, "invalid_commands: %u\n",
+ ipmi_get_stat(intf, invalid_commands));
+ seq_printf(m, "unhandled_commands: %u\n",
+ ipmi_get_stat(intf, unhandled_commands));
+ seq_printf(m, "invalid_events: %u\n",
+ ipmi_get_stat(intf, invalid_events));
+ seq_printf(m, "events: %u\n",
+ ipmi_get_stat(intf, events));
+ seq_printf(m, "failed rexmit LAN msgs: %u\n",
+ ipmi_get_stat(intf, dropped_rexmit_lan_commands));
+ seq_printf(m, "failed rexmit IPMB msgs: %u\n",
+ ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
+ return 0;
+}
+
+static int smi_stats_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_stats_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations smi_stats_proc_ops = {
+ .open = smi_stats_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* CONFIG_PROC_FS */
+
+int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
+ const struct file_operations *proc_ops,
+ void *data)
+{
+ int rv = 0;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *file;
+ struct ipmi_proc_entry *entry;
+
+ /* Create a list element. */
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
+ if (!entry->name) {
+ kfree(entry);
+ return -ENOMEM;
+ }
+ strcpy(entry->name, name);
+
+ file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
+ if (!file) {
+ kfree(entry->name);
+ kfree(entry);
+ rv = -ENOMEM;
+ } else {
+ mutex_lock(&smi->proc_entry_lock);
+ /* Stick it on the list. */
+ entry->next = smi->proc_entries;
+ smi->proc_entries = entry;
+ mutex_unlock(&smi->proc_entry_lock);
+ }
+#endif /* CONFIG_PROC_FS */
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
+
+static int add_proc_entries(ipmi_smi_t smi, int num)
+{
+ int rv = 0;
+
+#ifdef CONFIG_PROC_FS
+ sprintf(smi->proc_dir_name, "%d", num);
+ smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
+ if (!smi->proc_dir)
+ rv = -ENOMEM;
+
+ if (rv == 0)
+ rv = ipmi_smi_add_proc_entry(smi, "stats",
+ &smi_stats_proc_ops,
+ smi);
+
+ if (rv == 0)
+ rv = ipmi_smi_add_proc_entry(smi, "ipmb",
+ &smi_ipmb_proc_ops,
+ smi);
+
+ if (rv == 0)
+ rv = ipmi_smi_add_proc_entry(smi, "version",
+ &smi_version_proc_ops,
+ smi);
+#endif /* CONFIG_PROC_FS */
+
+ return rv;
+}
+
+static void remove_proc_entries(ipmi_smi_t smi)
+{
+#ifdef CONFIG_PROC_FS
+ struct ipmi_proc_entry *entry;
+
+ mutex_lock(&smi->proc_entry_lock);
+ while (smi->proc_entries) {
+ entry = smi->proc_entries;
+ smi->proc_entries = entry->next;
+
+ remove_proc_entry(entry->name, smi->proc_dir);
+ kfree(entry->name);
+ kfree(entry);
+ }
+ mutex_unlock(&smi->proc_entry_lock);
+ remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
+#endif /* CONFIG_PROC_FS */
+}
+
+static int __find_bmc_guid(struct device *dev, void *data)
+{
+ unsigned char *id = data;
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+ return memcmp(bmc->guid, id, 16) == 0;
+}
+
+static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
+ unsigned char *guid)
+{
+ struct device *dev;
+
+ dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
+ if (dev)
+ return dev_get_drvdata(dev);
+ else
+ return NULL;
+}
+
+struct prod_dev_id {
+ unsigned int product_id;
+ unsigned char device_id;
+};
+
+static int __find_bmc_prod_dev_id(struct device *dev, void *data)
+{
+ struct prod_dev_id *id = data;
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return (bmc->id.product_id == id->product_id
+ && bmc->id.device_id == id->device_id);
+}
+
+static struct bmc_device *ipmi_find_bmc_prod_dev_id(
+ struct device_driver *drv,
+ unsigned int product_id, unsigned char device_id)
+{
+ struct prod_dev_id id = {
+ .product_id = product_id,
+ .device_id = device_id,
+ };
+ struct device *dev;
+
+ dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
+ if (dev)
+ return dev_get_drvdata(dev);
+ else
+ return NULL;
+}
+
+static ssize_t device_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 10, "%u\n", bmc->id.device_id);
+}
+
+static ssize_t provides_dev_sdrs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 10, "%u\n",
+ (bmc->id.device_revision & 0x80) >> 7);
+}
+
+static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 20, "%u\n",
+ bmc->id.device_revision & 0x0F);
+}
+
+static ssize_t firmware_rev_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
+ bmc->id.firmware_revision_2);
+}
+
+static ssize_t ipmi_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 20, "%u.%u\n",
+ ipmi_version_major(&bmc->id),
+ ipmi_version_minor(&bmc->id));
+}
+
+static ssize_t add_dev_support_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 10, "0x%02x\n",
+ bmc->id.additional_device_support);
+}
+
+static ssize_t manufacturer_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
+}
+
+static ssize_t product_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
+}
+
+static ssize_t aux_firmware_rev_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ bmc->id.aux_firmware_revision[3],
+ bmc->id.aux_firmware_revision[2],
+ bmc->id.aux_firmware_revision[1],
+ bmc->id.aux_firmware_revision[0]);
+}
+
+static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bmc_device *bmc = dev_get_drvdata(dev);
+
+ return snprintf(buf, 100, "%Lx%Lx\n",
+ (long long) bmc->guid[0],
+ (long long) bmc->guid[8]);
+}
+
+static void remove_files(struct bmc_device *bmc)
+{
+ if (!bmc->dev)
+ return;
+
+ device_remove_file(&bmc->dev->dev,
+ &bmc->device_id_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->provides_dev_sdrs_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->revision_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->firmware_rev_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->version_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->add_dev_support_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->manufacturer_id_attr);
+ device_remove_file(&bmc->dev->dev,
+ &bmc->product_id_attr);
+
+ if (bmc->id.aux_firmware_revision_set)
+ device_remove_file(&bmc->dev->dev,
+ &bmc->aux_firmware_rev_attr);
+ if (bmc->guid_set)
+ device_remove_file(&bmc->dev->dev,
+ &bmc->guid_attr);
+}
+
+static void
+cleanup_bmc_device(struct kref *ref)
+{
+ struct bmc_device *bmc;
+
+ bmc = container_of(ref, struct bmc_device, refcount);
+
+ remove_files(bmc);
+ platform_device_unregister(bmc->dev);
+ kfree(bmc);
+}
+
+static void ipmi_bmc_unregister(ipmi_smi_t intf)
+{
+ struct bmc_device *bmc = intf->bmc;
+
+ if (intf->sysfs_name) {
+ sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
+ kfree(intf->sysfs_name);
+ intf->sysfs_name = NULL;
+ }
+ if (intf->my_dev_name) {
+ sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
+ kfree(intf->my_dev_name);
+ intf->my_dev_name = NULL;
+ }
+
+ mutex_lock(&ipmidriver_mutex);
+ kref_put(&bmc->refcount, cleanup_bmc_device);
+ intf->bmc = NULL;
+ mutex_unlock(&ipmidriver_mutex);
+}
+
+static int create_files(struct bmc_device *bmc)
+{
+ int err;
+
+ bmc->device_id_attr.attr.name = "device_id";
+ bmc->device_id_attr.attr.mode = S_IRUGO;
+ bmc->device_id_attr.show = device_id_show;
+ sysfs_attr_init(&bmc->device_id_attr.attr);
+
+ bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
+ bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
+ bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
+ sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr);
+
+ bmc->revision_attr.attr.name = "revision";
+ bmc->revision_attr.attr.mode = S_IRUGO;
+ bmc->revision_attr.show = revision_show;
+ sysfs_attr_init(&bmc->revision_attr.attr);
+
+ bmc->firmware_rev_attr.attr.name = "firmware_revision";
+ bmc->firmware_rev_attr.attr.mode = S_IRUGO;
+ bmc->firmware_rev_attr.show = firmware_rev_show;
+ sysfs_attr_init(&bmc->firmware_rev_attr.attr);
+
+ bmc->version_attr.attr.name = "ipmi_version";
+ bmc->version_attr.attr.mode = S_IRUGO;
+ bmc->version_attr.show = ipmi_version_show;
+ sysfs_attr_init(&bmc->version_attr.attr);
+
+ bmc->add_dev_support_attr.attr.name = "additional_device_support";
+ bmc->add_dev_support_attr.attr.mode = S_IRUGO;
+ bmc->add_dev_support_attr.show = add_dev_support_show;
+ sysfs_attr_init(&bmc->add_dev_support_attr.attr);
+
+ bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
+ bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
+ bmc->manufacturer_id_attr.show = manufacturer_id_show;
+ sysfs_attr_init(&bmc->manufacturer_id_attr.attr);
+
+ bmc->product_id_attr.attr.name = "product_id";
+ bmc->product_id_attr.attr.mode = S_IRUGO;
+ bmc->product_id_attr.show = product_id_show;
+ sysfs_attr_init(&bmc->product_id_attr.attr);
+
+ bmc->guid_attr.attr.name = "guid";
+ bmc->guid_attr.attr.mode = S_IRUGO;
+ bmc->guid_attr.show = guid_show;
+ sysfs_attr_init(&bmc->guid_attr.attr);
+
+ bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
+ bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
+ bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
+ sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr);
+
+ err = device_create_file(&bmc->dev->dev,
+ &bmc->device_id_attr);
+ if (err)
+ goto out;
+ err = device_create_file(&bmc->dev->dev,
+ &bmc->provides_dev_sdrs_attr);
+ if (err)
+ goto out_devid;
+ err = device_create_file(&bmc->dev->dev,
+ &bmc->revision_attr);
+ if (err)
+ goto out_sdrs;
+ err = device_create_file(&bmc->dev->dev,
+ &bmc->firmware_rev_attr);
+ if (err)
+ goto out_rev;
+ err = device_create_file(&bmc->dev->dev,
+ &bmc->version_attr);
+ if (err)
+ goto out_firm;
+ err = device_create_file(&bmc->dev->dev,
+ &bmc->add_dev_support_attr);
+ if (err)
+ goto out_version;
+ err = device_create_file(&bmc->dev->dev,
+ &bmc->manufacturer_id_attr);
+ if (err)
+ goto out_add_dev;
+ err = device_create_file(&bmc->dev->dev,
+ &bmc->product_id_attr);
+ if (err)
+ goto out_manu;
+ if (bmc->id.aux_firmware_revision_set) {
+ err = device_create_file(&bmc->dev->dev,
+ &bmc->aux_firmware_rev_attr);
+ if (err)
+ goto out_prod_id;
+ }
+ if (bmc->guid_set) {
+ err = device_create_file(&bmc->dev->dev,
+ &bmc->guid_attr);
+ if (err)
+ goto out_aux_firm;
+ }
+
+ return 0;
+
+out_aux_firm:
+ if (bmc->id.aux_firmware_revision_set)
+ device_remove_file(&bmc->dev->dev,
+ &bmc->aux_firmware_rev_attr);
+out_prod_id:
+ device_remove_file(&bmc->dev->dev,
+ &bmc->product_id_attr);
+out_manu:
+ device_remove_file(&bmc->dev->dev,
+ &bmc->manufacturer_id_attr);
+out_add_dev:
+ device_remove_file(&bmc->dev->dev,
+ &bmc->add_dev_support_attr);
+out_version:
+ device_remove_file(&bmc->dev->dev,
+ &bmc->version_attr);
+out_firm:
+ device_remove_file(&bmc->dev->dev,
+ &bmc->firmware_rev_attr);
+out_rev:
+ device_remove_file(&bmc->dev->dev,
+ &bmc->revision_attr);
+out_sdrs:
+ device_remove_file(&bmc->dev->dev,
+ &bmc->provides_dev_sdrs_attr);
+out_devid:
+ device_remove_file(&bmc->dev->dev,
+ &bmc->device_id_attr);
+out:
+ return err;
+}
+
+static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
+ const char *sysfs_name)
+{
+ int rv;
+ struct bmc_device *bmc = intf->bmc;
+ struct bmc_device *old_bmc;
+ int size;
+ char dummy[1];
+
+ mutex_lock(&ipmidriver_mutex);
+
+ /*
+ * Try to find if there is an bmc_device struct
+ * representing the interfaced BMC already
+ */
+ if (bmc->guid_set)
+ old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, bmc->guid);
+ else
+ old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
+ bmc->id.product_id,
+ bmc->id.device_id);
+
+ /*
+ * If there is already an bmc_device, free the new one,
+ * otherwise register the new BMC device
+ */
+ if (old_bmc) {
+ kfree(bmc);
+ intf->bmc = old_bmc;
+ bmc = old_bmc;
+
+ kref_get(&bmc->refcount);
+ mutex_unlock(&ipmidriver_mutex);
+
+ printk(KERN_INFO
+ "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
+ " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
+ } else {
+ char name[14];
+ unsigned char orig_dev_id = bmc->id.device_id;
+ int warn_printed = 0;
+
+ snprintf(name, sizeof(name),
+ "ipmi_bmc.%4.4x", bmc->id.product_id);
+
+ while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
+ bmc->id.product_id,
+ bmc->id.device_id)) {
+ if (!warn_printed) {
+ printk(KERN_WARNING PFX
+ "This machine has two different BMCs"
+ " with the same product id and device"
+ " id. This is an error in the"
+ " firmware, but incrementing the"
+ " device id to work around the problem."
+ " Prod ID = 0x%x, Dev ID = 0x%x\n",
+ bmc->id.product_id, bmc->id.device_id);
+ warn_printed = 1;
+ }
+ bmc->id.device_id++; /* Wraps at 255 */
+ if (bmc->id.device_id == orig_dev_id) {
+ printk(KERN_ERR PFX
+ "Out of device ids!\n");
+ break;
+ }
+ }
+
+ bmc->dev = platform_device_alloc(name, bmc->id.device_id);
+ if (!bmc->dev) {
+ mutex_unlock(&ipmidriver_mutex);
+ printk(KERN_ERR
+ "ipmi_msghandler:"
+ " Unable to allocate platform device\n");
+ return -ENOMEM;
+ }
+ bmc->dev->dev.driver = &ipmidriver.driver;
+ dev_set_drvdata(&bmc->dev->dev, bmc);
+ kref_init(&bmc->refcount);
+
+ rv = platform_device_add(bmc->dev);
+ mutex_unlock(&ipmidriver_mutex);
+ if (rv) {
+ platform_device_put(bmc->dev);
+ bmc->dev = NULL;
+ printk(KERN_ERR
+ "ipmi_msghandler:"
+ " Unable to register bmc device: %d\n",
+ rv);
+ /*
+ * Don't go to out_err, you can only do that if
+ * the device is registered already.
+ */
+ return rv;
+ }
+
+ rv = create_files(bmc);
+ if (rv) {
+ mutex_lock(&ipmidriver_mutex);
+ platform_device_unregister(bmc->dev);
+ mutex_unlock(&ipmidriver_mutex);
+
+ return rv;
+ }
+
+ dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
+ "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
+ }
+
+ /*
+ * create symlink from system interface device to bmc device
+ * and back.
+ */
+ intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
+ if (!intf->sysfs_name) {
+ rv = -ENOMEM;
+ printk(KERN_ERR
+ "ipmi_msghandler: allocate link to BMC: %d\n",
+ rv);
+ goto out_err;
+ }
+
+ rv = sysfs_create_link(&intf->si_dev->kobj,
+ &bmc->dev->dev.kobj, intf->sysfs_name);
+ if (rv) {
+ kfree(intf->sysfs_name);
+ intf->sysfs_name = NULL;
+ printk(KERN_ERR
+ "ipmi_msghandler: Unable to create bmc symlink: %d\n",
+ rv);
+ goto out_err;
+ }
+
+ size = snprintf(dummy, 0, "ipmi%d", ifnum);
+ intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
+ if (!intf->my_dev_name) {
+ kfree(intf->sysfs_name);
+ intf->sysfs_name = NULL;
+ rv = -ENOMEM;
+ printk(KERN_ERR
+ "ipmi_msghandler: allocate link from BMC: %d\n",
+ rv);
+ goto out_err;
+ }
+ snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
+
+ rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
+ intf->my_dev_name);
+ if (rv) {
+ kfree(intf->sysfs_name);
+ intf->sysfs_name = NULL;
+ kfree(intf->my_dev_name);
+ intf->my_dev_name = NULL;
+ printk(KERN_ERR
+ "ipmi_msghandler:"
+ " Unable to create symlink to bmc: %d\n",
+ rv);
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ ipmi_bmc_unregister(intf);
+ return rv;
+}
+
+static int
+send_guid_cmd(ipmi_smi_t intf, int chan)
+{
+ struct kernel_ipmi_msg msg;
+ struct ipmi_system_interface_addr si;
+
+ si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si.channel = IPMI_BMC_CHANNEL;
+ si.lun = 0;
+
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+ return i_ipmi_request(NULL,
+ intf,
+ (struct ipmi_addr *) &si,
+ 0,
+ &msg,
+ intf,
+ NULL,
+ NULL,
+ 0,
+ intf->channels[0].address,
+ intf->channels[0].lun,
+ -1, 0);
+}
+
+static void
+guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
+{
+ if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
+ || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
+ /* Not for me */
+ return;
+
+ if (msg->msg.data[0] != 0) {
+ /* Error from getting the GUID, the BMC doesn't have one. */
+ intf->bmc->guid_set = 0;
+ goto out;
+ }
+
+ if (msg->msg.data_len < 17) {
+ intf->bmc->guid_set = 0;
+ printk(KERN_WARNING PFX
+ "guid_handler: The GUID response from the BMC was too"
+ " short, it was %d but should have been 17. Assuming"
+ " GUID is not available.\n",
+ msg->msg.data_len);
+ goto out;
+ }
+
+ memcpy(intf->bmc->guid, msg->msg.data, 16);
+ intf->bmc->guid_set = 1;
+ out:
+ wake_up(&intf->waitq);
+}
+
+static void
+get_guid(ipmi_smi_t intf)
+{
+ int rv;
+
+ intf->bmc->guid_set = 0x2;
+ intf->null_user_handler = guid_handler;
+ rv = send_guid_cmd(intf, 0);
+ if (rv)
+ /* Send failed, no GUID available. */
+ intf->bmc->guid_set = 0;
+ wait_event(intf->waitq, intf->bmc->guid_set != 2);
+ intf->null_user_handler = NULL;
+}
+
+static int
+send_channel_info_cmd(ipmi_smi_t intf, int chan)
+{
+ struct kernel_ipmi_msg msg;
+ unsigned char data[1];
+ struct ipmi_system_interface_addr si;
+
+ si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si.channel = IPMI_BMC_CHANNEL;
+ si.lun = 0;
+
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
+ msg.data = data;
+ msg.data_len = 1;
+ data[0] = chan;
+ return i_ipmi_request(NULL,
+ intf,
+ (struct ipmi_addr *) &si,
+ 0,
+ &msg,
+ intf,
+ NULL,
+ NULL,
+ 0,
+ intf->channels[0].address,
+ intf->channels[0].lun,
+ -1, 0);
+}
+
+static void
+channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
+{
+ int rv = 0;
+ int chan;
+
+ if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
+ && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
+ /* It's the one we want */
+ if (msg->msg.data[0] != 0) {
+ /* Got an error from the channel, just go on. */
+
+ if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
+ /*
+ * If the MC does not support this
+ * command, that is legal. We just
+ * assume it has one IPMB at channel
+ * zero.
+ */
+ intf->channels[0].medium
+ = IPMI_CHANNEL_MEDIUM_IPMB;
+ intf->channels[0].protocol
+ = IPMI_CHANNEL_PROTOCOL_IPMB;
+ rv = -ENOSYS;
+
+ intf->curr_channel = IPMI_MAX_CHANNELS;
+ wake_up(&intf->waitq);
+ goto out;
+ }
+ goto next_channel;
+ }
+ if (msg->msg.data_len < 4) {
+ /* Message not big enough, just go on. */
+ goto next_channel;
+ }
+ chan = intf->curr_channel;
+ intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
+ intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
+
+ next_channel:
+ intf->curr_channel++;
+ if (intf->curr_channel >= IPMI_MAX_CHANNELS)
+ wake_up(&intf->waitq);
+ else
+ rv = send_channel_info_cmd(intf, intf->curr_channel);
+
+ if (rv) {
+ /* Got an error somehow, just give up. */
+ intf->curr_channel = IPMI_MAX_CHANNELS;
+ wake_up(&intf->waitq);
+
+ printk(KERN_WARNING PFX
+ "Error sending channel information: %d\n",
+ rv);
+ }
+ }
+ out:
+ return;
+}
+
+void ipmi_poll_interface(ipmi_user_t user)
+{
+ ipmi_smi_t intf = user->intf;
+
+ if (intf->handlers->poll)
+ intf->handlers->poll(intf->send_info);
+}
+EXPORT_SYMBOL(ipmi_poll_interface);
+
+int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
+ void *send_info,
+ struct ipmi_device_id *device_id,
+ struct device *si_dev,
+ const char *sysfs_name,
+ unsigned char slave_addr)
+{
+ int i, j;
+ int rv;
+ ipmi_smi_t intf;
+ ipmi_smi_t tintf;
+ struct list_head *link;
+
+ /*
+ * Make sure the driver is actually initialized, this handles
+ * problems with initialization order.
+ */
+ if (!initialized) {
+ rv = ipmi_init_msghandler();
+ if (rv)
+ return rv;
+ /*
+ * The init code doesn't return an error if it was turned
+ * off, but it won't initialize. Check that.
+ */
+ if (!initialized)
+ return -ENODEV;
+ }
+
+ intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+ if (!intf)
+ return -ENOMEM;
+
+ intf->ipmi_version_major = ipmi_version_major(device_id);
+ intf->ipmi_version_minor = ipmi_version_minor(device_id);
+
+ intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
+ if (!intf->bmc) {
+ kfree(intf);
+ return -ENOMEM;
+ }
+ intf->intf_num = -1; /* Mark it invalid for now. */
+ kref_init(&intf->refcount);
+ intf->bmc->id = *device_id;
+ intf->si_dev = si_dev;
+ for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
+ intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
+ intf->channels[j].lun = 2;
+ }
+ if (slave_addr != 0)
+ intf->channels[0].address = slave_addr;
+ INIT_LIST_HEAD(&intf->users);
+ intf->handlers = handlers;
+ intf->send_info = send_info;
+ spin_lock_init(&intf->seq_lock);
+ for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
+ intf->seq_table[j].inuse = 0;
+ intf->seq_table[j].seqid = 0;
+ }
+ intf->curr_seq = 0;
+#ifdef CONFIG_PROC_FS
+ mutex_init(&intf->proc_entry_lock);
+#endif
+ spin_lock_init(&intf->waiting_msgs_lock);
+ INIT_LIST_HEAD(&intf->waiting_msgs);
+ spin_lock_init(&intf->events_lock);
+ INIT_LIST_HEAD(&intf->waiting_events);
+ intf->waiting_events_count = 0;
+ mutex_init(&intf->cmd_rcvrs_mutex);
+ spin_lock_init(&intf->maintenance_mode_lock);
+ INIT_LIST_HEAD(&intf->cmd_rcvrs);
+ init_waitqueue_head(&intf->waitq);
+ for (i = 0; i < IPMI_NUM_STATS; i++)
+ atomic_set(&intf->stats[i], 0);
+
+ intf->proc_dir = NULL;
+
+ mutex_lock(&smi_watchers_mutex);
+ mutex_lock(&ipmi_interfaces_mutex);
+ /* Look for a hole in the numbers. */
+ i = 0;
+ link = &ipmi_interfaces;
+ list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
+ if (tintf->intf_num != i) {
+ link = &tintf->link;
+ break;
+ }
+ i++;
+ }
+ /* Add the new interface in numeric order. */
+ if (i == 0)
+ list_add_rcu(&intf->link, &ipmi_interfaces);
+ else
+ list_add_tail_rcu(&intf->link, link);
+
+ rv = handlers->start_processing(send_info, intf);
+ if (rv)
+ goto out;
+
+ get_guid(intf);
+
+ if ((intf->ipmi_version_major > 1)
+ || ((intf->ipmi_version_major == 1)
+ && (intf->ipmi_version_minor >= 5))) {
+ /*
+ * Start scanning the channels to see what is
+ * available.
+ */
+ intf->null_user_handler = channel_handler;
+ intf->curr_channel = 0;
+ rv = send_channel_info_cmd(intf, 0);
+ if (rv)
+ goto out;
+
+ /* Wait for the channel info to be read. */
+ wait_event(intf->waitq,
+ intf->curr_channel >= IPMI_MAX_CHANNELS);
+ intf->null_user_handler = NULL;
+ } else {
+ /* Assume a single IPMB channel at zero. */
+ intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
+ intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
+ intf->curr_channel = IPMI_MAX_CHANNELS;
+ }
+
+ if (rv == 0)
+ rv = add_proc_entries(intf, i);
+
+ rv = ipmi_bmc_register(intf, i, sysfs_name);
+
+ out:
+ if (rv) {
+ if (intf->proc_dir)
+ remove_proc_entries(intf);
+ intf->handlers = NULL;
+ list_del_rcu(&intf->link);
+ mutex_unlock(&ipmi_interfaces_mutex);
+ mutex_unlock(&smi_watchers_mutex);
+ synchronize_rcu();
+ kref_put(&intf->refcount, intf_free);
+ } else {
+ /*
+ * Keep memory order straight for RCU readers. Make
+ * sure everything else is committed to memory before
+ * setting intf_num to mark the interface valid.
+ */
+ smp_wmb();
+ intf->intf_num = i;
+ mutex_unlock(&ipmi_interfaces_mutex);
+ /* After this point the interface is legal to use. */
+ call_smi_watchers(i, intf->si_dev);
+ mutex_unlock(&smi_watchers_mutex);
+ }
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_register_smi);
+
+static void cleanup_smi_msgs(ipmi_smi_t intf)
+{
+ int i;
+ struct seq_table *ent;
+
+ /* No need for locks, the interface is down. */
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+ ent = &(intf->seq_table[i]);
+ if (!ent->inuse)
+ continue;
+ deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
+ }
+}
+
+int ipmi_unregister_smi(ipmi_smi_t intf)
+{
+ struct ipmi_smi_watcher *w;
+ int intf_num = intf->intf_num;
+
+ ipmi_bmc_unregister(intf);
+
+ mutex_lock(&smi_watchers_mutex);
+ mutex_lock(&ipmi_interfaces_mutex);
+ intf->intf_num = -1;
+ intf->handlers = NULL;
+ list_del_rcu(&intf->link);
+ mutex_unlock(&ipmi_interfaces_mutex);
+ synchronize_rcu();
+
+ cleanup_smi_msgs(intf);
+
+ remove_proc_entries(intf);
+
+ /*
+ * Call all the watcher interfaces to tell them that
+ * an interface is gone.
+ */
+ list_for_each_entry(w, &smi_watchers, link)
+ w->smi_gone(intf_num);
+ mutex_unlock(&smi_watchers_mutex);
+
+ kref_put(&intf->refcount, intf_free);
+ return 0;
+}
+EXPORT_SYMBOL(ipmi_unregister_smi);
+
+static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_ipmb_addr ipmb_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+ /*
+ * This is 11, not 10, because the response must contain a
+ * completion code.
+ */
+ if (msg->rsp_size < 11) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_ipmb_responses);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr.slave_addr = msg->rsp[6];
+ ipmb_addr.channel = msg->rsp[3] & 0x0f;
+ ipmb_addr.lun = msg->rsp[7] & 3;
+
+ /*
+ * It's a response from a remote entity. Look up the sequence
+ * number and handle the response.
+ */
+ if (intf_find_seq(intf,
+ msg->rsp[7] >> 2,
+ msg->rsp[3] & 0x0f,
+ msg->rsp[8],
+ (msg->rsp[4] >> 2) & (~1),
+ (struct ipmi_addr *) &(ipmb_addr),
+ &recv_msg)) {
+ /*
+ * We were unable to find the sequence number,
+ * so just nuke the message.
+ */
+ ipmi_inc_stat(intf, unhandled_ipmb_responses);
+ return 0;
+ }
+
+ memcpy(recv_msg->msg_data,
+ &(msg->rsp[9]),
+ msg->rsp_size - 9);
+ /*
+ * The other fields matched, so no need to set them, except
+ * for netfn, which needs to be the response that was
+ * returned, not the request value.
+ */
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ ipmi_inc_stat(intf, handled_ipmb_responses);
+ deliver_response(recv_msg);
+
+ return 0;
+}
+
+static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned char chan;
+ ipmi_user_t user = NULL;
+ struct ipmi_ipmb_addr *ipmb_addr;
+ struct ipmi_recv_msg *recv_msg;
+ struct ipmi_smi_handlers *handlers;
+
+ if (msg->rsp_size < 10) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_commands);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ netfn = msg->rsp[4] >> 2;
+ cmd = msg->rsp[8];
+ chan = msg->rsp[3] & 0xf;
+
+ rcu_read_lock();
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, deliver an error response. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_SEND_MSG_CMD;
+ msg->data[2] = msg->rsp[3];
+ msg->data[3] = msg->rsp[6];
+ msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
+ msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
+ msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
+ /* rqseq/lun */
+ msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
+ msg->data[8] = msg->rsp[8]; /* cmd */
+ msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
+ msg->data_size = 11;
+
+#ifdef DEBUG_MSGING
+ {
+ int m;
+ printk("Invalid command:");
+ for (m = 0; m < msg->data_size; m++)
+ printk(" %2.2x", msg->data[m]);
+ printk("\n");
+ }
+#endif
+ rcu_read_lock();
+ handlers = intf->handlers;
+ if (handlers) {
+ handlers->sender(intf->send_info, msg, 0);
+ /*
+ * We used the message, so return the value
+ * that causes it to not be freed or
+ * queued.
+ */
+ rv = -1;
+ }
+ rcu_read_unlock();
+ } else {
+ /* Deliver the message to the user. */
+ ipmi_inc_stat(intf, handled_commands);
+
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /* Extract the source address from the data. */
+ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+ ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr->slave_addr = msg->rsp[6];
+ ipmb_addr->lun = msg->rsp[7] & 3;
+ ipmb_addr->channel = msg->rsp[3] & 0xf;
+
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->user = user;
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[7] >> 2;
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.cmd = msg->rsp[8];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ /*
+ * We chop off 10, not 9 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ memcpy(recv_msg->msg_data,
+ &(msg->rsp[9]),
+ msg->rsp_size - 10);
+ deliver_response(recv_msg);
+ }
+ }
+
+ return rv;
+}
+
+static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_lan_addr lan_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+
+ /*
+ * This is 13, not 12, because the response must contain a
+ * completion code.
+ */
+ if (msg->rsp_size < 13) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_lan_responses);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr.session_handle = msg->rsp[4];
+ lan_addr.remote_SWID = msg->rsp[8];
+ lan_addr.local_SWID = msg->rsp[5];
+ lan_addr.channel = msg->rsp[3] & 0x0f;
+ lan_addr.privilege = msg->rsp[3] >> 4;
+ lan_addr.lun = msg->rsp[9] & 3;
+
+ /*
+ * It's a response from a remote entity. Look up the sequence
+ * number and handle the response.
+ */
+ if (intf_find_seq(intf,
+ msg->rsp[9] >> 2,
+ msg->rsp[3] & 0x0f,
+ msg->rsp[10],
+ (msg->rsp[6] >> 2) & (~1),
+ (struct ipmi_addr *) &(lan_addr),
+ &recv_msg)) {
+ /*
+ * We were unable to find the sequence number,
+ * so just nuke the message.
+ */
+ ipmi_inc_stat(intf, unhandled_lan_responses);
+ return 0;
+ }
+
+ memcpy(recv_msg->msg_data,
+ &(msg->rsp[11]),
+ msg->rsp_size - 11);
+ /*
+ * The other fields matched, so no need to set them, except
+ * for netfn, which needs to be the response that was
+ * returned, not the request value.
+ */
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ ipmi_inc_stat(intf, handled_lan_responses);
+ deliver_response(recv_msg);
+
+ return 0;
+}
+
+static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned char chan;
+ ipmi_user_t user = NULL;
+ struct ipmi_lan_addr *lan_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+ if (msg->rsp_size < 12) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_commands);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ netfn = msg->rsp[6] >> 2;
+ cmd = msg->rsp[10];
+ chan = msg->rsp[3] & 0xf;
+
+ rcu_read_lock();
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, just give up. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ /*
+ * Don't do anything with these messages, just allow
+ * them to be freed.
+ */
+ rv = 0;
+ } else {
+ /* Deliver the message to the user. */
+ ipmi_inc_stat(intf, handled_commands);
+
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /* Extract the source address from the data. */
+ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+ lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr->session_handle = msg->rsp[4];
+ lan_addr->remote_SWID = msg->rsp[8];
+ lan_addr->local_SWID = msg->rsp[5];
+ lan_addr->lun = msg->rsp[9] & 3;
+ lan_addr->channel = msg->rsp[3] & 0xf;
+ lan_addr->privilege = msg->rsp[3] >> 4;
+
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->user = user;
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[9] >> 2;
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.cmd = msg->rsp[10];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ /*
+ * We chop off 12, not 11 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ memcpy(recv_msg->msg_data,
+ &(msg->rsp[11]),
+ msg->rsp_size - 12);
+ deliver_response(recv_msg);
+ }
+ }
+
+ return rv;
+}
+
+/*
+ * This routine will handle "Get Message" command responses with
+ * channels that use an OEM Medium. The message format belongs to
+ * the OEM. See IPMI 2.0 specification, Chapter 6 and
+ * Chapter 22, sections 22.6 and 22.24 for more details.
+ */
+static int handle_oem_get_msg_cmd(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned char chan;
+ ipmi_user_t user = NULL;
+ struct ipmi_system_interface_addr *smi_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+ /*
+ * We expect the OEM SW to perform error checking
+ * so we just do some basic sanity checks
+ */
+ if (msg->rsp_size < 4) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_commands);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ /*
+ * This is an OEM Message so the OEM needs to know how
+ * handle the message. We do no interpretation.
+ */
+ netfn = msg->rsp[0] >> 2;
+ cmd = msg->rsp[1];
+ chan = msg->rsp[3] & 0xf;
+
+ rcu_read_lock();
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, just give up. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ /*
+ * Don't do anything with these messages, just allow
+ * them to be freed.
+ */
+
+ rv = 0;
+ } else {
+ /* Deliver the message to the user. */
+ ipmi_inc_stat(intf, handled_commands);
+
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /*
+ * OEM Messages are expected to be delivered via
+ * the system interface to SMS software. We might
+ * need to visit this again depending on OEM
+ * requirements
+ */
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &(recv_msg->addr));
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+
+ recv_msg->user = user;
+ recv_msg->user_msg_data = NULL;
+ recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ /*
+ * The message starts at byte 4 which follows the
+ * the Channel Byte in the "GET MESSAGE" command
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data,
+ &(msg->rsp[4]),
+ msg->rsp_size - 4);
+ deliver_response(recv_msg);
+ }
+ }
+
+ return rv;
+}
+
+static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_system_interface_addr *smi_addr;
+
+ recv_msg->msgid = 0;
+ smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+ recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 3;
+}
+
+static int handle_read_event_rsp(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_recv_msg *recv_msg, *recv_msg2;
+ struct list_head msgs;
+ ipmi_user_t user;
+ int rv = 0;
+ int deliver_count = 0;
+ unsigned long flags;
+
+ if (msg->rsp_size < 19) {
+ /* Message is too small to be an IPMB event. */
+ ipmi_inc_stat(intf, invalid_events);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the event, just ignore it. */
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&msgs);
+
+ spin_lock_irqsave(&intf->events_lock, flags);
+
+ ipmi_inc_stat(intf, events);
+
+ /*
+ * Allocate and fill in one message for every user that is
+ * getting events.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ if (!user->gets_events)
+ continue;
+
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ rcu_read_unlock();
+ list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
+ link) {
+ list_del(&recv_msg->link);
+ ipmi_free_recv_msg(recv_msg);
+ }
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ goto out;
+ }
+
+ deliver_count++;
+
+ copy_event_into_recv_msg(recv_msg, msg);
+ recv_msg->user = user;
+ kref_get(&user->refcount);
+ list_add_tail(&(recv_msg->link), &msgs);
+ }
+ rcu_read_unlock();
+
+ if (deliver_count) {
+ /* Now deliver all the messages. */
+ list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
+ list_del(&recv_msg->link);
+ deliver_response(recv_msg);
+ }
+ } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
+ /*
+ * No one to receive the message, put it in queue if there's
+ * not already too many things in the queue.
+ */
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ goto out;
+ }
+
+ copy_event_into_recv_msg(recv_msg, msg);
+ list_add_tail(&(recv_msg->link), &(intf->waiting_events));
+ intf->waiting_events_count++;
+ } else if (!intf->event_msg_printed) {
+ /*
+ * There's too many things in the queue, discard this
+ * message.
+ */
+ printk(KERN_WARNING PFX "Event queue full, discarding"
+ " incoming events\n");
+ intf->event_msg_printed = 1;
+ }
+
+ out:
+ spin_unlock_irqrestore(&(intf->events_lock), flags);
+
+ return rv;
+}
+
+static int handle_bmc_rsp(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_recv_msg *recv_msg;
+ struct ipmi_user *user;
+
+ recv_msg = (struct ipmi_recv_msg *) msg->user_data;
+ if (recv_msg == NULL) {
+ printk(KERN_WARNING
+ "IPMI message received with no owner. This\n"
+ "could be because of a malformed message, or\n"
+ "because of a hardware error. Contact your\n"
+ "hardware vender for assistance\n");
+ return 0;
+ }
+
+ user = recv_msg->user;
+ /* Make sure the user still exists. */
+ if (user && !user->valid) {
+ /* The user for the message went away, so give up. */
+ ipmi_inc_stat(intf, unhandled_local_responses);
+ ipmi_free_recv_msg(recv_msg);
+ } else {
+ struct ipmi_system_interface_addr *smi_addr;
+
+ ipmi_inc_stat(intf, handled_local_responses);
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ recv_msg->msgid = msg->msgid;
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &(recv_msg->addr));
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ memcpy(recv_msg->msg_data,
+ &(msg->rsp[2]),
+ msg->rsp_size - 2);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 2;
+ deliver_response(recv_msg);
+ }
+
+ return 0;
+}
+
+/*
+ * Handle a new message. Return 1 if the message should be requeued,
+ * 0 if the message should be freed, or -1 if the message should not
+ * be freed or requeued.
+ */
+static int handle_new_recv_msg(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ int requeue;
+ int chan;
+
+#ifdef DEBUG_MSGING
+ int m;
+ printk("Recv:");
+ for (m = 0; m < msg->rsp_size; m++)
+ printk(" %2.2x", msg->rsp[m]);
+ printk("\n");
+#endif
+ if (msg->rsp_size < 2) {
+ /* Message is too small to be correct. */
+ printk(KERN_WARNING PFX "BMC returned to small a message"
+ " for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+
+ /* Generate an error response for the message. */
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 3;
+ } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
+ || (msg->rsp[1] != msg->data[1])) {
+ /*
+ * The NetFN and Command in the response is not even
+ * marginally correct.
+ */
+ printk(KERN_WARNING PFX "BMC returned incorrect response,"
+ " expected netfn %x cmd %x, got netfn %x cmd %x\n",
+ (msg->data[0] >> 2) | 1, msg->data[1],
+ msg->rsp[0] >> 2, msg->rsp[1]);
+
+ /* Generate an error response for the message. */
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 3;
+ }
+
+ if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
+ && (msg->user_data != NULL)) {
+ /*
+ * It's a response to a response we sent. For this we
+ * deliver a send message response to the user.
+ */
+ struct ipmi_recv_msg *recv_msg = msg->user_data;
+
+ requeue = 0;
+ if (msg->rsp_size < 2)
+ /* Message is too small to be correct. */
+ goto out;
+
+ chan = msg->data[2] & 0x0f;
+ if (chan >= IPMI_MAX_CHANNELS)
+ /* Invalid channel number */
+ goto out;
+
+ if (!recv_msg)
+ goto out;
+
+ /* Make sure the user still exists. */
+ if (!recv_msg->user || !recv_msg->user->valid)
+ goto out;
+
+ recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = 1;
+ recv_msg->msg_data[0] = msg->rsp[2];
+ deliver_response(recv_msg);
+ } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
+ /* It's from the receive queue. */
+ chan = msg->rsp[3] & 0xf;
+ if (chan >= IPMI_MAX_CHANNELS) {
+ /* Invalid channel number */
+ requeue = 0;
+ goto out;
+ }
+
+ /*
+ * We need to make sure the channels have been initialized.
+ * The channel_handler routine will set the "curr_channel"
+ * equal to or greater than IPMI_MAX_CHANNELS when all the
+ * channels for this interface have been initialized.
+ */
+ if (intf->curr_channel < IPMI_MAX_CHANNELS) {
+ requeue = 0; /* Throw the message away */
+ goto out;
+ }
+
+ switch (intf->channels[chan].medium) {
+ case IPMI_CHANNEL_MEDIUM_IPMB:
+ if (msg->rsp[4] & 0x04) {
+ /*
+ * It's a response, so find the
+ * requesting message and send it up.
+ */
+ requeue = handle_ipmb_get_msg_rsp(intf, msg);
+ } else {
+ /*
+ * It's a command to the SMS from some other
+ * entity. Handle that.
+ */
+ requeue = handle_ipmb_get_msg_cmd(intf, msg);
+ }
+ break;
+
+ case IPMI_CHANNEL_MEDIUM_8023LAN:
+ case IPMI_CHANNEL_MEDIUM_ASYNC:
+ if (msg->rsp[6] & 0x04) {
+ /*
+ * It's a response, so find the
+ * requesting message and send it up.
+ */
+ requeue = handle_lan_get_msg_rsp(intf, msg);
+ } else {
+ /*
+ * It's a command to the SMS from some other
+ * entity. Handle that.
+ */
+ requeue = handle_lan_get_msg_cmd(intf, msg);
+ }
+ break;
+
+ default:
+ /* Check for OEM Channels. Clients had better
+ register for these commands. */
+ if ((intf->channels[chan].medium
+ >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
+ && (intf->channels[chan].medium
+ <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
+ requeue = handle_oem_get_msg_cmd(intf, msg);
+ } else {
+ /*
+ * We don't handle the channel type, so just
+ * free the message.
+ */
+ requeue = 0;
+ }
+ }
+
+ } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
+ /* It's an asyncronous event. */
+ requeue = handle_read_event_rsp(intf, msg);
+ } else {
+ /* It's a response from the local BMC. */
+ requeue = handle_bmc_rsp(intf, msg);
+ }
+
+ out:
+ return requeue;
+}
+
+/* Handle a new message from the lower layer. */
+void ipmi_smi_msg_received(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ unsigned long flags = 0; /* keep us warning-free. */
+ int rv;
+ int run_to_completion;
+
+
+ if ((msg->data_size >= 2)
+ && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
+ && (msg->data[1] == IPMI_SEND_MSG_CMD)
+ && (msg->user_data == NULL)) {
+ /*
+ * This is the local response to a command send, start
+ * the timer for these. The user_data will not be
+ * NULL if this is a response send, and we will let
+ * response sends just go through.
+ */
+
+ /*
+ * Check for errors, if we get certain errors (ones
+ * that mean basically we can try again later), we
+ * ignore them and start the timer. Otherwise we
+ * report the error immediately.
+ */
+ if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
+ && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
+ && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
+ && (msg->rsp[2] != IPMI_BUS_ERR)
+ && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
+ int chan = msg->rsp[3] & 0xf;
+
+ /* Got an error sending the message, handle it. */
+ if (chan >= IPMI_MAX_CHANNELS)
+ ; /* This shouldn't happen */
+ else if ((intf->channels[chan].medium
+ == IPMI_CHANNEL_MEDIUM_8023LAN)
+ || (intf->channels[chan].medium
+ == IPMI_CHANNEL_MEDIUM_ASYNC))
+ ipmi_inc_stat(intf, sent_lan_command_errs);
+ else
+ ipmi_inc_stat(intf, sent_ipmb_command_errs);
+ intf_err_seq(intf, msg->msgid, msg->rsp[2]);
+ } else
+ /* The message was sent, start the timer. */
+ intf_start_seq_timer(intf, msg->msgid);
+
+ ipmi_free_smi_msg(msg);
+ goto out;
+ }
+
+ /*
+ * To preserve message order, if the list is not empty, we
+ * tack this message onto the end of the list.
+ */
+ run_to_completion = intf->run_to_completion;
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+ if (!list_empty(&intf->waiting_msgs)) {
+ list_add_tail(&msg->link, &intf->waiting_msgs);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ goto out;
+ }
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+
+ rv = handle_new_recv_msg(intf, msg);
+ if (rv > 0) {
+ /*
+ * Could not handle the message now, just add it to a
+ * list to handle later.
+ */
+ run_to_completion = intf->run_to_completion;
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+ list_add_tail(&msg->link, &intf->waiting_msgs);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ } else if (rv == 0) {
+ ipmi_free_smi_msg(msg);
+ }
+
+ out:
+ return;
+}
+EXPORT_SYMBOL(ipmi_smi_msg_received);
+
+void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
+{
+ ipmi_user_t user;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ if (!user->handler->ipmi_watchdog_pretimeout)
+ continue;
+
+ user->handler->ipmi_watchdog_pretimeout(user->handler_data);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
+
+static struct ipmi_smi_msg *
+smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
+ unsigned char seq, long seqid)
+{
+ struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
+ if (!smi_msg)
+ /*
+ * If we can't allocate the message, then just return, we
+ * get 4 retries, so this should be ok.
+ */
+ return NULL;
+
+ memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
+ smi_msg->data_size = recv_msg->msg.data_len;
+ smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
+
+#ifdef DEBUG_MSGING
+ {
+ int m;
+ printk("Resend: ");
+ for (m = 0; m < smi_msg->data_size; m++)
+ printk(" %2.2x", smi_msg->data[m]);
+ printk("\n");
+ }
+#endif
+ return smi_msg;
+}
+
+static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
+ struct list_head *timeouts, long timeout_period,
+ int slot, unsigned long *flags)
+{
+ struct ipmi_recv_msg *msg;
+ struct ipmi_smi_handlers *handlers;
+
+ if (intf->intf_num == -1)
+ return;
+
+ if (!ent->inuse)
+ return;
+
+ ent->timeout -= timeout_period;
+ if (ent->timeout > 0)
+ return;
+
+ if (ent->retries_left == 0) {
+ /* The message has used all its retries. */
+ ent->inuse = 0;
+ msg = ent->recv_msg;
+ list_add_tail(&msg->link, timeouts);
+ if (ent->broadcast)
+ ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
+ else if (is_lan_addr(&ent->recv_msg->addr))
+ ipmi_inc_stat(intf, timed_out_lan_commands);
+ else
+ ipmi_inc_stat(intf, timed_out_ipmb_commands);
+ } else {
+ struct ipmi_smi_msg *smi_msg;
+ /* More retries, send again. */
+
+ /*
+ * Start with the max timer, set to normal timer after
+ * the message is sent.
+ */
+ ent->timeout = MAX_MSG_TIMEOUT;
+ ent->retries_left--;
+ smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
+ ent->seqid);
+ if (!smi_msg) {
+ if (is_lan_addr(&ent->recv_msg->addr))
+ ipmi_inc_stat(intf,
+ dropped_rexmit_lan_commands);
+ else
+ ipmi_inc_stat(intf,
+ dropped_rexmit_ipmb_commands);
+ return;
+ }
+
+ spin_unlock_irqrestore(&intf->seq_lock, *flags);
+
+ /*
+ * Send the new message. We send with a zero
+ * priority. It timed out, I doubt time is that
+ * critical now, and high priority messages are really
+ * only for messages to the local MC, which don't get
+ * resent.
+ */
+ handlers = intf->handlers;
+ if (handlers) {
+ if (is_lan_addr(&ent->recv_msg->addr))
+ ipmi_inc_stat(intf,
+ retransmitted_lan_commands);
+ else
+ ipmi_inc_stat(intf,
+ retransmitted_ipmb_commands);
+
+ intf->handlers->sender(intf->send_info,
+ smi_msg, 0);
+ } else
+ ipmi_free_smi_msg(smi_msg);
+
+ spin_lock_irqsave(&intf->seq_lock, *flags);
+ }
+}
+
+static void ipmi_timeout_handler(long timeout_period)
+{
+ ipmi_smi_t intf;
+ struct list_head timeouts;
+ struct ipmi_recv_msg *msg, *msg2;
+ struct ipmi_smi_msg *smi_msg, *smi_msg2;
+ unsigned long flags;
+ int i;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ /* See if any waiting messages need to be processed. */
+ spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+ list_for_each_entry_safe(smi_msg, smi_msg2,
+ &intf->waiting_msgs, link) {
+ if (!handle_new_recv_msg(intf, smi_msg)) {
+ list_del(&smi_msg->link);
+ ipmi_free_smi_msg(smi_msg);
+ } else {
+ /*
+ * To preserve message order, quit if we
+ * can't handle a message.
+ */
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+
+ /*
+ * Go through the seq table and find any messages that
+ * have timed out, putting them in the timeouts
+ * list.
+ */
+ INIT_LIST_HEAD(&timeouts);
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+ check_msg_timeout(intf, &(intf->seq_table[i]),
+ &timeouts, timeout_period, i,
+ &flags);
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+ list_for_each_entry_safe(msg, msg2, &timeouts, link)
+ deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
+
+ /*
+ * Maintenance mode handling. Check the timeout
+ * optimistically before we claim the lock. It may
+ * mean a timeout gets missed occasionally, but that
+ * only means the timeout gets extended by one period
+ * in that case. No big deal, and it avoids the lock
+ * most of the time.
+ */
+ if (intf->auto_maintenance_timeout > 0) {
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+ if (intf->auto_maintenance_timeout > 0) {
+ intf->auto_maintenance_timeout
+ -= timeout_period;
+ if (!intf->maintenance_mode
+ && (intf->auto_maintenance_timeout <= 0)) {
+ intf->maintenance_mode_enable = 0;
+ maintenance_mode_update(intf);
+ }
+ }
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+ flags);
+ }
+ }
+ rcu_read_unlock();
+}
+
+static void ipmi_request_event(void)
+{
+ ipmi_smi_t intf;
+ struct ipmi_smi_handlers *handlers;
+
+ rcu_read_lock();
+ /*
+ * Called from the timer, no need to check if handlers is
+ * valid.
+ */
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ /* No event requests when in maintenance mode. */
+ if (intf->maintenance_mode_enable)
+ continue;
+
+ handlers = intf->handlers;
+ if (handlers)
+ handlers->request_events(intf->send_info);
+ }
+ rcu_read_unlock();
+}
+
+static struct timer_list ipmi_timer;
+
+/* Call every ~1000 ms. */
+#define IPMI_TIMEOUT_TIME 1000
+
+/* How many jiffies does it take to get to the timeout time. */
+#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
+
+/*
+ * Request events from the queue every second (this is the number of
+ * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
+ * future, IPMI will add a way to know immediately if an event is in
+ * the queue and this silliness can go away.
+ */
+#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
+
+static atomic_t stop_operation;
+static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+
+static void ipmi_timeout(unsigned long data)
+{
+ if (atomic_read(&stop_operation))
+ return;
+
+ ticks_to_req_ev--;
+ if (ticks_to_req_ev == 0) {
+ ipmi_request_event();
+ ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+ }
+
+ ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
+
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+}
+
+
+static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
+static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
+
+/* FIXME - convert these to slabs. */
+static void free_smi_msg(struct ipmi_smi_msg *msg)
+{
+ atomic_dec(&smi_msg_inuse_count);
+ kfree(msg);
+}
+
+struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
+{
+ struct ipmi_smi_msg *rv;
+ rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
+ if (rv) {
+ rv->done = free_smi_msg;
+ rv->user_data = NULL;
+ atomic_inc(&smi_msg_inuse_count);
+ }
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_alloc_smi_msg);
+
+static void free_recv_msg(struct ipmi_recv_msg *msg)
+{
+ atomic_dec(&recv_msg_inuse_count);
+ kfree(msg);
+}
+
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+{
+ struct ipmi_recv_msg *rv;
+
+ rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
+ if (rv) {
+ rv->user = NULL;
+ rv->done = free_recv_msg;
+ atomic_inc(&recv_msg_inuse_count);
+ }
+ return rv;
+}
+
+void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
+{
+ if (msg->user)
+ kref_put(&msg->user->refcount, free_user);
+ msg->done(msg);
+}
+EXPORT_SYMBOL(ipmi_free_recv_msg);
+
+#ifdef CONFIG_IPMI_PANIC_EVENT
+
+static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
+{
+}
+
+static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
+{
+}
+
+#ifdef CONFIG_IPMI_PANIC_STRING
+static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
+{
+ if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
+ && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
+ && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
+ /* A get event receiver command, save it. */
+ intf->event_receiver = msg->msg.data[1];
+ intf->event_receiver_lun = msg->msg.data[2] & 0x3;
+ }
+}
+
+static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
+{
+ if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
+ && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
+ && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
+ /*
+ * A get device id command, save if we are an event
+ * receiver or generator.
+ */
+ intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
+ intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
+ }
+}
+#endif
+
+static void send_panic_events(char *str)
+{
+ struct kernel_ipmi_msg msg;
+ ipmi_smi_t intf;
+ unsigned char data[16];
+ struct ipmi_system_interface_addr *si;
+ struct ipmi_addr addr;
+ struct ipmi_smi_msg smi_msg;
+ struct ipmi_recv_msg recv_msg;
+
+ si = (struct ipmi_system_interface_addr *) &addr;
+ si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si->channel = IPMI_BMC_CHANNEL;
+ si->lun = 0;
+
+ /* Fill in an event telling that we have failed. */
+ msg.netfn = 0x04; /* Sensor or Event. */
+ msg.cmd = 2; /* Platform event command. */
+ msg.data = data;
+ msg.data_len = 8;
+ data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
+ data[1] = 0x03; /* This is for IPMI 1.0. */
+ data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
+ data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
+ data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
+
+ /*
+ * Put a few breadcrumbs in. Hopefully later we can add more things
+ * to make the panic events more useful.
+ */
+ if (str) {
+ data[3] = str[0];
+ data[6] = str[1];
+ data[7] = str[2];
+ }
+
+ smi_msg.done = dummy_smi_done_handler;
+ recv_msg.done = dummy_recv_done_handler;
+
+ /* For every registered interface, send the event. */
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (!intf->handlers)
+ /* Interface is not ready. */
+ continue;
+
+ intf->run_to_completion = 1;
+ /* Send the event announcing the panic. */
+ intf->handlers->set_run_to_completion(intf->send_info, 1);
+ i_ipmi_request(NULL,
+ intf,
+ &addr,
+ 0,
+ &msg,
+ intf,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->channels[0].address,
+ intf->channels[0].lun,
+ 0, 1); /* Don't retry, and don't wait. */
+ }
+
+#ifdef CONFIG_IPMI_PANIC_STRING
+ /*
+ * On every interface, dump a bunch of OEM event holding the
+ * string.
+ */
+ if (!str)
+ return;
+
+ /* For every registered interface, send the event. */
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ char *p = str;
+ struct ipmi_ipmb_addr *ipmb;
+ int j;
+
+ if (intf->intf_num == -1)
+ /* Interface was not ready yet. */
+ continue;
+
+ /*
+ * intf_num is used as an marker to tell if the
+ * interface is valid. Thus we need a read barrier to
+ * make sure data fetched before checking intf_num
+ * won't be used.
+ */
+ smp_rmb();
+
+ /*
+ * First job here is to figure out where to send the
+ * OEM events. There's no way in IPMI to send OEM
+ * events using an event send command, so we have to
+ * find the SEL to put them in and stick them in
+ * there.
+ */
+
+ /* Get capabilities from the get device id. */
+ intf->local_sel_device = 0;
+ intf->local_event_generator = 0;
+ intf->event_receiver = 0;
+
+ /* Request the device info from the local MC. */
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+ intf->null_user_handler = device_id_fetcher;
+ i_ipmi_request(NULL,
+ intf,
+ &addr,
+ 0,
+ &msg,
+ intf,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->channels[0].address,
+ intf->channels[0].lun,
+ 0, 1); /* Don't retry, and don't wait. */
+
+ if (intf->local_event_generator) {
+ /* Request the event receiver from the local MC. */
+ msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
+ msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+ intf->null_user_handler = event_receiver_fetcher;
+ i_ipmi_request(NULL,
+ intf,
+ &addr,
+ 0,
+ &msg,
+ intf,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->channels[0].address,
+ intf->channels[0].lun,
+ 0, 1); /* no retry, and no wait. */
+ }
+ intf->null_user_handler = NULL;
+
+ /*
+ * Validate the event receiver. The low bit must not
+ * be 1 (it must be a valid IPMB address), it cannot
+ * be zero, and it must not be my address.
+ */
+ if (((intf->event_receiver & 1) == 0)
+ && (intf->event_receiver != 0)
+ && (intf->event_receiver != intf->channels[0].address)) {
+ /*
+ * The event receiver is valid, send an IPMB
+ * message.
+ */
+ ipmb = (struct ipmi_ipmb_addr *) &addr;
+ ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb->channel = 0; /* FIXME - is this right? */
+ ipmb->lun = intf->event_receiver_lun;
+ ipmb->slave_addr = intf->event_receiver;
+ } else if (intf->local_sel_device) {
+ /*
+ * The event receiver was not valid (or was
+ * me), but I am an SEL device, just dump it
+ * in my SEL.
+ */
+ si = (struct ipmi_system_interface_addr *) &addr;
+ si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si->channel = IPMI_BMC_CHANNEL;
+ si->lun = 0;
+ } else
+ continue; /* No where to send the event. */
+
+ msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
+ msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
+ msg.data = data;
+ msg.data_len = 16;
+
+ j = 0;
+ while (*p) {
+ int size = strlen(p);
+
+ if (size > 11)
+ size = 11;
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0xf0; /* OEM event without timestamp. */
+ data[3] = intf->channels[0].address;
+ data[4] = j++; /* sequence # */
+ /*
+ * Always give 11 bytes, so strncpy will fill
+ * it with zeroes for me.
+ */
+ strncpy(data+5, p, 11);
+ p += size;
+
+ i_ipmi_request(NULL,
+ intf,
+ &addr,
+ 0,
+ &msg,
+ intf,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->channels[0].address,
+ intf->channels[0].lun,
+ 0, 1); /* no retry, and no wait. */
+ }
+ }
+#endif /* CONFIG_IPMI_PANIC_STRING */
+}
+#endif /* CONFIG_IPMI_PANIC_EVENT */
+
+static int has_panicked;
+
+static int panic_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ ipmi_smi_t intf;
+
+ if (has_panicked)
+ return NOTIFY_DONE;
+ has_panicked = 1;
+
+ /* For every registered interface, set it to run to completion. */
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (!intf->handlers)
+ /* Interface is not ready. */
+ continue;
+
+ intf->run_to_completion = 1;
+ intf->handlers->set_run_to_completion(intf->send_info, 1);
+ }
+
+#ifdef CONFIG_IPMI_PANIC_EVENT
+ send_panic_events(ptr);
+#endif
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_block = {
+ .notifier_call = panic_event,
+ .next = NULL,
+ .priority = 200 /* priority: INT_MAX >= x >= 0 */
+};
+
+static int ipmi_init_msghandler(void)
+{
+ int rv;
+
+ if (initialized)
+ return 0;
+
+ rv = driver_register(&ipmidriver.driver);
+ if (rv) {
+ printk(KERN_ERR PFX "Could not register IPMI driver\n");
+ return rv;
+ }
+
+ printk(KERN_INFO "ipmi message handler version "
+ IPMI_DRIVER_VERSION "\n");
+
+#ifdef CONFIG_PROC_FS
+ proc_ipmi_root = proc_mkdir("ipmi", NULL);
+ if (!proc_ipmi_root) {
+ printk(KERN_ERR PFX "Unable to create IPMI proc dir");
+ return -ENOMEM;
+ }
+
+#endif /* CONFIG_PROC_FS */
+
+ setup_timer(&ipmi_timer, ipmi_timeout, 0);
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
+ initialized = 1;
+
+ return 0;
+}
+
+static int __init ipmi_init_msghandler_mod(void)
+{
+ ipmi_init_msghandler();
+ return 0;
+}
+
+static void __exit cleanup_ipmi(void)
+{
+ int count;
+
+ if (!initialized)
+ return;
+
+ atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
+
+ /*
+ * This can't be called if any interfaces exist, so no worry
+ * about shutting down the interfaces.
+ */
+
+ /*
+ * Tell the timer to stop, then wait for it to stop. This
+ * avoids problems with race conditions removing the timer
+ * here.
+ */
+ atomic_inc(&stop_operation);
+ del_timer_sync(&ipmi_timer);
+
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry(proc_ipmi_root->name, NULL);
+#endif /* CONFIG_PROC_FS */
+
+ driver_unregister(&ipmidriver.driver);
+
+ initialized = 0;
+
+ /* Check for buffer leaks. */
+ count = atomic_read(&smi_msg_inuse_count);
+ if (count != 0)
+ printk(KERN_WARNING PFX "SMI message count %d at exit\n",
+ count);
+ count = atomic_read(&recv_msg_inuse_count);
+ if (count != 0)
+ printk(KERN_WARNING PFX "recv message count %d at exit\n",
+ count);
+}
+module_exit(cleanup_ipmi);
+
+module_init(ipmi_init_msghandler_mod);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
+ " interface.");
+MODULE_VERSION(IPMI_DRIVER_VERSION);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
new file mode 100644
index 00000000..2efa176b
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -0,0 +1,749 @@
+/*
+ * ipmi_poweroff.c
+ *
+ * MontaVista IPMI Poweroff extension to sys_reboot
+ *
+ * Author: MontaVista Software, Inc.
+ * Steven Dake <sdake@mvista.com>
+ * Corey Minyard <cminyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002,2004 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/pm.h>
+#include <linux/kdev_t.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+
+#define PFX "IPMI poweroff: "
+
+static void ipmi_po_smi_gone(int if_num);
+static void ipmi_po_new_smi(int if_num, struct device *device);
+
+/* Definitions for controlling power off (if the system supports it). It
+ * conveniently matches the IPMI chassis control values. */
+#define IPMI_CHASSIS_POWER_DOWN 0 /* power down, the default. */
+#define IPMI_CHASSIS_POWER_CYCLE 0x02 /* power cycle */
+
+/* the IPMI data command */
+static int poweroff_powercycle;
+
+/* Which interface to use, -1 means the first we see. */
+static int ifnum_to_use = -1;
+
+/* Our local state. */
+static int ready;
+static ipmi_user_t ipmi_user;
+static int ipmi_ifnum;
+static void (*specific_poweroff_func)(ipmi_user_t user);
+
+/* Holds the old poweroff function so we can restore it on removal. */
+static void (*old_poweroff_func)(void);
+
+static int set_param_ifnum(const char *val, struct kernel_param *kp)
+{
+ int rv = param_set_int(val, kp);
+ if (rv)
+ return rv;
+ if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum))
+ return 0;
+
+ ipmi_po_smi_gone(ipmi_ifnum);
+ ipmi_po_new_smi(ifnum_to_use, NULL);
+ return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_ifnum, param_get_int,
+ &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+ "timer. Setting to -1 defaults to the first registered "
+ "interface");
+
+/* parameter definition to allow user to flag power cycle */
+module_param(poweroff_powercycle, int, 0644);
+MODULE_PARM_DESC(poweroff_powercycle,
+ " Set to non-zero to enable power cycle instead of power"
+ " down. Power cycle is contingent on hardware support,"
+ " otherwise it defaults back to power down.");
+
+/* Stuff from the get device id command. */
+static unsigned int mfg_id;
+static unsigned int prod_id;
+static unsigned char capabilities;
+static unsigned char ipmi_version;
+
+/*
+ * We use our own messages for this operation, we don't let the system
+ * allocate them, since we may be in a panic situation. The whole
+ * thing is single-threaded, anyway, so multiple messages are not
+ * required.
+ */
+static atomic_t dummy_count = ATOMIC_INIT(0);
+static void dummy_smi_free(struct ipmi_smi_msg *msg)
+{
+ atomic_dec(&dummy_count);
+}
+static void dummy_recv_free(struct ipmi_recv_msg *msg)
+{
+ atomic_dec(&dummy_count);
+}
+static struct ipmi_smi_msg halt_smi_msg = {
+ .done = dummy_smi_free
+};
+static struct ipmi_recv_msg halt_recv_msg = {
+ .done = dummy_recv_free
+};
+
+
+/*
+ * Code to send a message and wait for the response.
+ */
+
+static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
+{
+ struct completion *comp = recv_msg->user_msg_data;
+
+ if (comp)
+ complete(comp);
+}
+
+static struct ipmi_user_hndl ipmi_poweroff_handler = {
+ .ipmi_recv_hndl = receive_handler
+};
+
+
+static int ipmi_request_wait_for_response(ipmi_user_t user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *send_msg)
+{
+ int rv;
+ struct completion comp;
+
+ init_completion(&comp);
+
+ rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, &comp,
+ &halt_smi_msg, &halt_recv_msg, 0);
+ if (rv)
+ return rv;
+
+ wait_for_completion(&comp);
+
+ return halt_recv_msg.msg.data[0];
+}
+
+/* Wait for message to complete, spinning. */
+static int ipmi_request_in_rc_mode(ipmi_user_t user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *send_msg)
+{
+ int rv;
+
+ atomic_set(&dummy_count, 2);
+ rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
+ &halt_smi_msg, &halt_recv_msg, 0);
+ if (rv) {
+ atomic_set(&dummy_count, 0);
+ return rv;
+ }
+
+ /*
+ * Spin until our message is done.
+ */
+ while (atomic_read(&dummy_count) > 0) {
+ ipmi_poll_interface(user);
+ cpu_relax();
+ }
+
+ return halt_recv_msg.msg.data[0];
+}
+
+/*
+ * ATCA Support
+ */
+
+#define IPMI_NETFN_ATCA 0x2c
+#define IPMI_ATCA_SET_POWER_CMD 0x11
+#define IPMI_ATCA_GET_ADDR_INFO_CMD 0x01
+#define IPMI_PICMG_ID 0
+
+#define IPMI_NETFN_OEM 0x2e
+#define IPMI_ATCA_PPS_GRACEFUL_RESTART 0x11
+#define IPMI_ATCA_PPS_IANA "\x00\x40\x0A"
+#define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1
+#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051
+
+static void (*atca_oem_poweroff_hook)(ipmi_user_t user);
+
+static void pps_poweroff_atca(ipmi_user_t user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ printk(KERN_INFO PFX "PPS powerdown hook used");
+
+ send_msg.netfn = IPMI_NETFN_OEM;
+ send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
+ send_msg.data = IPMI_ATCA_PPS_IANA;
+ send_msg.data_len = 3;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+ printk(KERN_ERR PFX "Unable to send ATCA ,"
+ " IPMI error 0x%x\n", rv);
+ }
+ return;
+}
+
+static int ipmi_atca_detect(ipmi_user_t user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[1];
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ /*
+ * Use get address info to check and see if we are ATCA
+ */
+ send_msg.netfn = IPMI_NETFN_ATCA;
+ send_msg.cmd = IPMI_ATCA_GET_ADDR_INFO_CMD;
+ data[0] = IPMI_PICMG_ID;
+ send_msg.data = data;
+ send_msg.data_len = sizeof(data);
+ rv = ipmi_request_wait_for_response(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+
+ printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n",
+ mfg_id, prod_id);
+ if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
+ && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
+ printk(KERN_INFO PFX
+ "Installing Pigeon Point Systems Poweroff Hook\n");
+ atca_oem_poweroff_hook = pps_poweroff_atca;
+ }
+ return !rv;
+}
+
+static void ipmi_poweroff_atca(ipmi_user_t user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[4];
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ printk(KERN_INFO PFX "Powering down via ATCA power command\n");
+
+ /*
+ * Power down
+ */
+ send_msg.netfn = IPMI_NETFN_ATCA;
+ send_msg.cmd = IPMI_ATCA_SET_POWER_CMD;
+ data[0] = IPMI_PICMG_ID;
+ data[1] = 0; /* FRU id */
+ data[2] = 0; /* Power Level */
+ data[3] = 0; /* Don't change saved presets */
+ send_msg.data = data;
+ send_msg.data_len = sizeof(data);
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ /*
+ * At this point, the system may be shutting down, and most
+ * serial drivers (if used) will have interrupts turned off
+ * it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
+ * return code
+ */
+ if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+ printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
+ " IPMI error 0x%x\n", rv);
+ goto out;
+ }
+
+ if (atca_oem_poweroff_hook)
+ atca_oem_poweroff_hook(user);
+ out:
+ return;
+}
+
+/*
+ * CPI1 Support
+ */
+
+#define IPMI_NETFN_OEM_1 0xf8
+#define OEM_GRP_CMD_SET_RESET_STATE 0x84
+#define OEM_GRP_CMD_SET_POWER_STATE 0x82
+#define IPMI_NETFN_OEM_8 0xf8
+#define OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL 0x80
+#define OEM_GRP_CMD_GET_SLOT_GA 0xa3
+#define IPMI_NETFN_SENSOR_EVT 0x10
+#define IPMI_CMD_GET_EVENT_RECEIVER 0x01
+
+#define IPMI_CPI1_PRODUCT_ID 0x000157
+#define IPMI_CPI1_MANUFACTURER_ID 0x0108
+
+static int ipmi_cpi1_detect(ipmi_user_t user)
+{
+ return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID)
+ && (prod_id == IPMI_CPI1_PRODUCT_ID));
+}
+
+static void ipmi_poweroff_cpi1(ipmi_user_t user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct ipmi_ipmb_addr ipmb_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[1];
+ int slot;
+ unsigned char hotswap_ipmb;
+ unsigned char aer_addr;
+ unsigned char aer_lun;
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ printk(KERN_INFO PFX "Powering down via CPI1 power command\n");
+
+ /*
+ * Get IPMI ipmb address
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_GET_SLOT_GA;
+ send_msg.data = NULL;
+ send_msg.data_len = 0;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+ slot = halt_recv_msg.msg.data[1];
+ hotswap_ipmb = (slot > 9) ? (0xb0 + 2 * slot) : (0xae + 2 * slot);
+
+ /*
+ * Get active event receiver
+ */
+ send_msg.netfn = IPMI_NETFN_SENSOR_EVT >> 2;
+ send_msg.cmd = IPMI_CMD_GET_EVENT_RECEIVER;
+ send_msg.data = NULL;
+ send_msg.data_len = 0;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+ aer_addr = halt_recv_msg.msg.data[1];
+ aer_lun = halt_recv_msg.msg.data[2];
+
+ /*
+ * Setup IPMB address target instead of local target
+ */
+ ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr.channel = 0;
+ ipmb_addr.slave_addr = aer_addr;
+ ipmb_addr.lun = aer_lun;
+
+ /*
+ * Send request hotswap control to remove blade from dpv
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL;
+ send_msg.data = &hotswap_ipmb;
+ send_msg.data_len = 1;
+ ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &ipmb_addr,
+ &send_msg);
+
+ /*
+ * Set reset asserted
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_SET_RESET_STATE;
+ send_msg.data = data;
+ data[0] = 1; /* Reset asserted state */
+ send_msg.data_len = 1;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+
+ /*
+ * Power down
+ */
+ send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
+ send_msg.cmd = OEM_GRP_CMD_SET_POWER_STATE;
+ send_msg.data = data;
+ data[0] = 1; /* Power down state */
+ send_msg.data_len = 1;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv)
+ goto out;
+
+ out:
+ return;
+}
+
+/*
+ * ipmi_dell_chassis_detect()
+ * Dell systems with IPMI < 1.5 don't set the chassis capability bit
+ * but they can handle a chassis poweroff or powercycle command.
+ */
+
+#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
+static int ipmi_dell_chassis_detect(ipmi_user_t user)
+{
+ const char ipmi_version_major = ipmi_version & 0xF;
+ const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
+ const char mfr[3] = DELL_IANA_MFR_ID;
+ if (!memcmp(mfr, &mfg_id, sizeof(mfr)) &&
+ ipmi_version_major <= 1 &&
+ ipmi_version_minor < 5)
+ return 1;
+ return 0;
+}
+
+/*
+ * Standard chassis support
+ */
+
+#define IPMI_NETFN_CHASSIS_REQUEST 0
+#define IPMI_CHASSIS_CONTROL_CMD 0x02
+
+static int ipmi_chassis_detect(ipmi_user_t user)
+{
+ /* Chassis support, use it. */
+ return (capabilities & 0x80);
+}
+
+static void ipmi_poweroff_chassis(ipmi_user_t user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ unsigned char data[1];
+
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ powercyclefailed:
+ printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n",
+ (poweroff_powercycle ? "cycle" : "down"));
+
+ /*
+ * Power down
+ */
+ send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST;
+ send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD;
+ if (poweroff_powercycle)
+ data[0] = IPMI_CHASSIS_POWER_CYCLE;
+ else
+ data[0] = IPMI_CHASSIS_POWER_DOWN;
+ send_msg.data = data;
+ send_msg.data_len = sizeof(data);
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv) {
+ if (poweroff_powercycle) {
+ /* power cycle failed, default to power down */
+ printk(KERN_ERR PFX "Unable to send chassis power " \
+ "cycle message, IPMI error 0x%x\n", rv);
+ poweroff_powercycle = 0;
+ goto powercyclefailed;
+ }
+
+ printk(KERN_ERR PFX "Unable to send chassis power " \
+ "down message, IPMI error 0x%x\n", rv);
+ }
+}
+
+
+/* Table of possible power off functions. */
+struct poweroff_function {
+ char *platform_type;
+ int (*detect)(ipmi_user_t user);
+ void (*poweroff_func)(ipmi_user_t user);
+};
+
+static struct poweroff_function poweroff_functions[] = {
+ { .platform_type = "ATCA",
+ .detect = ipmi_atca_detect,
+ .poweroff_func = ipmi_poweroff_atca },
+ { .platform_type = "CPI1",
+ .detect = ipmi_cpi1_detect,
+ .poweroff_func = ipmi_poweroff_cpi1 },
+ { .platform_type = "chassis",
+ .detect = ipmi_dell_chassis_detect,
+ .poweroff_func = ipmi_poweroff_chassis },
+ /* Chassis should generally be last, other things should override
+ it. */
+ { .platform_type = "chassis",
+ .detect = ipmi_chassis_detect,
+ .poweroff_func = ipmi_poweroff_chassis },
+};
+#define NUM_PO_FUNCS (sizeof(poweroff_functions) \
+ / sizeof(struct poweroff_function))
+
+
+/* Called on a powerdown request. */
+static void ipmi_poweroff_function(void)
+{
+ if (!ready)
+ return;
+
+ /* Use run-to-completion mode, since interrupts may be off. */
+ specific_poweroff_func(ipmi_user);
+}
+
+/* Wait for an IPMI interface to be installed, the first one installed
+ will be grabbed by this code and used to perform the powerdown. */
+static void ipmi_po_new_smi(int if_num, struct device *device)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ int i;
+
+ if (ready)
+ return;
+
+ if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num))
+ return;
+
+ rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL,
+ &ipmi_user);
+ if (rv) {
+ printk(KERN_ERR PFX "could not create IPMI user, error %d\n",
+ rv);
+ return;
+ }
+
+ ipmi_ifnum = if_num;
+
+ /*
+ * Do a get device ide and store some results, since this is
+ * used by several functions.
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ send_msg.netfn = IPMI_NETFN_APP_REQUEST;
+ send_msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+ send_msg.data = NULL;
+ send_msg.data_len = 0;
+ rv = ipmi_request_wait_for_response(ipmi_user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv) {
+ printk(KERN_ERR PFX "Unable to send IPMI get device id info,"
+ " IPMI error 0x%x\n", rv);
+ goto out_err;
+ }
+
+ if (halt_recv_msg.msg.data_len < 12) {
+ printk(KERN_ERR PFX "(chassis) IPMI get device id info too,"
+ " short, was %d bytes, needed %d bytes\n",
+ halt_recv_msg.msg.data_len, 12);
+ goto out_err;
+ }
+
+ mfg_id = (halt_recv_msg.msg.data[7]
+ | (halt_recv_msg.msg.data[8] << 8)
+ | (halt_recv_msg.msg.data[9] << 16));
+ prod_id = (halt_recv_msg.msg.data[10]
+ | (halt_recv_msg.msg.data[11] << 8));
+ capabilities = halt_recv_msg.msg.data[6];
+ ipmi_version = halt_recv_msg.msg.data[5];
+
+
+ /* Scan for a poweroff method */
+ for (i = 0; i < NUM_PO_FUNCS; i++) {
+ if (poweroff_functions[i].detect(ipmi_user))
+ goto found;
+ }
+
+ out_err:
+ printk(KERN_ERR PFX "Unable to find a poweroff function that"
+ " will work, giving up\n");
+ ipmi_destroy_user(ipmi_user);
+ return;
+
+ found:
+ printk(KERN_INFO PFX "Found a %s style poweroff function\n",
+ poweroff_functions[i].platform_type);
+ specific_poweroff_func = poweroff_functions[i].poweroff_func;
+ old_poweroff_func = pm_power_off;
+ pm_power_off = ipmi_poweroff_function;
+ ready = 1;
+}
+
+static void ipmi_po_smi_gone(int if_num)
+{
+ if (!ready)
+ return;
+
+ if (ipmi_ifnum != if_num)
+ return;
+
+ ready = 0;
+ ipmi_destroy_user(ipmi_user);
+ pm_power_off = old_poweroff_func;
+}
+
+static struct ipmi_smi_watcher smi_watcher = {
+ .owner = THIS_MODULE,
+ .new_smi = ipmi_po_new_smi,
+ .smi_gone = ipmi_po_smi_gone
+};
+
+
+#ifdef CONFIG_PROC_FS
+#include <linux/sysctl.h>
+
+static ctl_table ipmi_table[] = {
+ { .procname = "poweroff_powercycle",
+ .data = &poweroff_powercycle,
+ .maxlen = sizeof(poweroff_powercycle),
+ .mode = 0644,
+ .proc_handler = proc_dointvec },
+ { }
+};
+
+static ctl_table ipmi_dir_table[] = {
+ { .procname = "ipmi",
+ .mode = 0555,
+ .child = ipmi_table },
+ { }
+};
+
+static ctl_table ipmi_root_table[] = {
+ { .procname = "dev",
+ .mode = 0555,
+ .child = ipmi_dir_table },
+ { }
+};
+
+static struct ctl_table_header *ipmi_table_header;
+#endif /* CONFIG_PROC_FS */
+
+/*
+ * Startup and shutdown functions.
+ */
+static int __init ipmi_poweroff_init(void)
+{
+ int rv;
+
+ printk(KERN_INFO "Copyright (C) 2004 MontaVista Software -"
+ " IPMI Powerdown via sys_reboot.\n");
+
+ if (poweroff_powercycle)
+ printk(KERN_INFO PFX "Power cycle is enabled.\n");
+
+#ifdef CONFIG_PROC_FS
+ ipmi_table_header = register_sysctl_table(ipmi_root_table);
+ if (!ipmi_table_header) {
+ printk(KERN_ERR PFX "Unable to register powercycle sysctl\n");
+ rv = -ENOMEM;
+ goto out_err;
+ }
+#endif
+
+ rv = ipmi_smi_watcher_register(&smi_watcher);
+
+#ifdef CONFIG_PROC_FS
+ if (rv) {
+ unregister_sysctl_table(ipmi_table_header);
+ printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
+ goto out_err;
+ }
+
+ out_err:
+#endif
+ return rv;
+}
+
+#ifdef MODULE
+static void __exit ipmi_poweroff_cleanup(void)
+{
+ int rv;
+
+#ifdef CONFIG_PROC_FS
+ unregister_sysctl_table(ipmi_table_header);
+#endif
+
+ ipmi_smi_watcher_unregister(&smi_watcher);
+
+ if (ready) {
+ rv = ipmi_destroy_user(ipmi_user);
+ if (rv)
+ printk(KERN_ERR PFX "could not cleanup the IPMI"
+ " user: 0x%x\n", rv);
+ pm_power_off = old_poweroff_func;
+ }
+}
+module_exit(ipmi_poweroff_cleanup);
+#endif
+
+module_init(ipmi_poweroff_init);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("IPMI Poweroff extension to sys_reboot");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
new file mode 100644
index 00000000..9397ab49
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -0,0 +1,3587 @@
+/*
+ * ipmi_si.c
+ *
+ * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
+ * BT).
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * This file holds the "policy" for the interface to the SMI state
+ * machine. It does the configuration, handles timers and interrupts,
+ * and drives the real SMI state machine.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <asm/system.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/notifier.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <asm/irq.h>
+#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <asm/io.h>
+#include "ipmi_si_sm.h"
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/pnp.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define PFX "ipmi_si: "
+
+/* Measure times between events in the driver. */
+#undef DEBUG_TIMING
+
+/* Call every 10 ms. */
+#define SI_TIMEOUT_TIME_USEC 10000
+#define SI_USEC_PER_JIFFY (1000000/HZ)
+#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
+#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
+ short timeout */
+
+enum si_intf_state {
+ SI_NORMAL,
+ SI_GETTING_FLAGS,
+ SI_GETTING_EVENTS,
+ SI_CLEARING_FLAGS,
+ SI_CLEARING_FLAGS_THEN_SET_IRQ,
+ SI_GETTING_MESSAGES,
+ SI_ENABLE_INTERRUPTS1,
+ SI_ENABLE_INTERRUPTS2,
+ SI_DISABLE_INTERRUPTS1,
+ SI_DISABLE_INTERRUPTS2
+ /* FIXME - add watchdog stuff. */
+};
+
+/* Some BT-specific defines we need here. */
+#define IPMI_BT_INTMASK_REG 2
+#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
+#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
+
+enum si_type {
+ SI_KCS, SI_SMIC, SI_BT
+};
+static char *si_to_str[] = { "kcs", "smic", "bt" };
+
+static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
+ "ACPI", "SMBIOS", "PCI",
+ "device-tree", "default" };
+
+#define DEVICE_NAME "ipmi_si"
+
+static struct platform_driver ipmi_driver;
+
+/*
+ * Indexes into stats[] in smi_info below.
+ */
+enum si_stat_indexes {
+ /*
+ * Number of times the driver requested a timer while an operation
+ * was in progress.
+ */
+ SI_STAT_short_timeouts = 0,
+
+ /*
+ * Number of times the driver requested a timer while nothing was in
+ * progress.
+ */
+ SI_STAT_long_timeouts,
+
+ /* Number of times the interface was idle while being polled. */
+ SI_STAT_idles,
+
+ /* Number of interrupts the driver handled. */
+ SI_STAT_interrupts,
+
+ /* Number of time the driver got an ATTN from the hardware. */
+ SI_STAT_attentions,
+
+ /* Number of times the driver requested flags from the hardware. */
+ SI_STAT_flag_fetches,
+
+ /* Number of times the hardware didn't follow the state machine. */
+ SI_STAT_hosed_count,
+
+ /* Number of completed messages. */
+ SI_STAT_complete_transactions,
+
+ /* Number of IPMI events received from the hardware. */
+ SI_STAT_events,
+
+ /* Number of watchdog pretimeouts. */
+ SI_STAT_watchdog_pretimeouts,
+
+ /* Number of asyncronous messages received. */
+ SI_STAT_incoming_messages,
+
+
+ /* This *must* remain last, add new values above this. */
+ SI_NUM_STATS
+};
+
+struct smi_info {
+ int intf_num;
+ ipmi_smi_t intf;
+ struct si_sm_data *si_sm;
+ struct si_sm_handlers *handlers;
+ enum si_type si_type;
+ spinlock_t si_lock;
+ spinlock_t msg_lock;
+ struct list_head xmit_msgs;
+ struct list_head hp_xmit_msgs;
+ struct ipmi_smi_msg *curr_msg;
+ enum si_intf_state si_state;
+
+ /*
+ * Used to handle the various types of I/O that can occur with
+ * IPMI
+ */
+ struct si_sm_io io;
+ int (*io_setup)(struct smi_info *info);
+ void (*io_cleanup)(struct smi_info *info);
+ int (*irq_setup)(struct smi_info *info);
+ void (*irq_cleanup)(struct smi_info *info);
+ unsigned int io_size;
+ enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
+ void (*addr_source_cleanup)(struct smi_info *info);
+ void *addr_source_data;
+
+ /*
+ * Per-OEM handler, called from handle_flags(). Returns 1
+ * when handle_flags() needs to be re-run or 0 indicating it
+ * set si_state itself.
+ */
+ int (*oem_data_avail_handler)(struct smi_info *smi_info);
+
+ /*
+ * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+ * is set to hold the flags until we are done handling everything
+ * from the flags.
+ */
+#define RECEIVE_MSG_AVAIL 0x01
+#define EVENT_MSG_BUFFER_FULL 0x02
+#define WDT_PRE_TIMEOUT_INT 0x08
+#define OEM0_DATA_AVAIL 0x20
+#define OEM1_DATA_AVAIL 0x40
+#define OEM2_DATA_AVAIL 0x80
+#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
+ OEM1_DATA_AVAIL | \
+ OEM2_DATA_AVAIL)
+ unsigned char msg_flags;
+
+ /* Does the BMC have an event buffer? */
+ char has_event_buffer;
+
+ /*
+ * If set to true, this will request events the next time the
+ * state machine is idle.
+ */
+ atomic_t req_events;
+
+ /*
+ * If true, run the state machine to completion on every send
+ * call. Generally used after a panic to make sure stuff goes
+ * out.
+ */
+ int run_to_completion;
+
+ /* The I/O port of an SI interface. */
+ int port;
+
+ /*
+ * The space between start addresses of the two ports. For
+ * instance, if the first port is 0xca2 and the spacing is 4, then
+ * the second port is 0xca6.
+ */
+ unsigned int spacing;
+
+ /* zero if no irq; */
+ int irq;
+
+ /* The timer for this si. */
+ struct timer_list si_timer;
+
+ /* The time (in jiffies) the last timeout occurred at. */
+ unsigned long last_timeout_jiffies;
+
+ /* Used to gracefully stop the timer without race conditions. */
+ atomic_t stop_operation;
+
+ /*
+ * The driver will disable interrupts when it gets into a
+ * situation where it cannot handle messages due to lack of
+ * memory. Once that situation clears up, it will re-enable
+ * interrupts.
+ */
+ int interrupt_disabled;
+
+ /* From the get device id response... */
+ struct ipmi_device_id device_id;
+
+ /* Driver model stuff. */
+ struct device *dev;
+ struct platform_device *pdev;
+
+ /*
+ * True if we allocated the device, false if it came from
+ * someplace else (like PCI).
+ */
+ int dev_registered;
+
+ /* Slave address, could be reported from DMI. */
+ unsigned char slave_addr;
+
+ /* Counters and things for the proc filesystem. */
+ atomic_t stats[SI_NUM_STATS];
+
+ struct task_struct *thread;
+
+ struct list_head link;
+ union ipmi_smi_info_union addr_info;
+};
+
+#define smi_inc_stat(smi, stat) \
+ atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
+#define smi_get_stat(smi, stat) \
+ ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
+
+#define SI_MAX_PARMS 4
+
+static int force_kipmid[SI_MAX_PARMS];
+static int num_force_kipmid;
+#ifdef CONFIG_PCI
+static int pci_registered;
+#endif
+#ifdef CONFIG_ACPI
+static int pnp_registered;
+#endif
+
+static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
+static int num_max_busy_us;
+
+static int unload_when_empty = 1;
+
+static int add_smi(struct smi_info *smi);
+static int try_smi_init(struct smi_info *smi);
+static void cleanup_one_si(struct smi_info *to_clean);
+static void cleanup_ipmi_si(void);
+
+static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
+static int register_xaction_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&xaction_notifier_list, nb);
+}
+
+static void deliver_recv_msg(struct smi_info *smi_info,
+ struct ipmi_smi_msg *msg)
+{
+ /* Deliver the message to the upper layer with the lock
+ released. */
+
+ if (smi_info->run_to_completion) {
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ } else {
+ spin_unlock(&(smi_info->si_lock));
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ spin_lock(&(smi_info->si_lock));
+ }
+}
+
+static void return_hosed_msg(struct smi_info *smi_info, int cCode)
+{
+ struct ipmi_smi_msg *msg = smi_info->curr_msg;
+
+ if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
+ cCode = IPMI_ERR_UNSPECIFIED;
+ /* else use it as is */
+
+ /* Make it a response */
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = cCode;
+ msg->rsp_size = 3;
+
+ smi_info->curr_msg = NULL;
+ deliver_recv_msg(smi_info, msg);
+}
+
+static enum si_sm_result start_next_msg(struct smi_info *smi_info)
+{
+ int rv;
+ struct list_head *entry = NULL;
+#ifdef DEBUG_TIMING
+ struct timeval t;
+#endif
+
+ /*
+ * No need to save flags, we aleady have interrupts off and we
+ * already hold the SMI lock.
+ */
+ if (!smi_info->run_to_completion)
+ spin_lock(&(smi_info->msg_lock));
+
+ /* Pick the high priority queue first. */
+ if (!list_empty(&(smi_info->hp_xmit_msgs))) {
+ entry = smi_info->hp_xmit_msgs.next;
+ } else if (!list_empty(&(smi_info->xmit_msgs))) {
+ entry = smi_info->xmit_msgs.next;
+ }
+
+ if (!entry) {
+ smi_info->curr_msg = NULL;
+ rv = SI_SM_IDLE;
+ } else {
+ int err;
+
+ list_del(entry);
+ smi_info->curr_msg = list_entry(entry,
+ struct ipmi_smi_msg,
+ link);
+#ifdef DEBUG_TIMING
+ do_gettimeofday(&t);
+ printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+#endif
+ err = atomic_notifier_call_chain(&xaction_notifier_list,
+ 0, smi_info);
+ if (err & NOTIFY_STOP_MASK) {
+ rv = SI_SM_CALL_WITHOUT_DELAY;
+ goto out;
+ }
+ err = smi_info->handlers->start_transaction(
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ if (err)
+ return_hosed_msg(smi_info, err);
+
+ rv = SI_SM_CALL_WITHOUT_DELAY;
+ }
+ out:
+ if (!smi_info->run_to_completion)
+ spin_unlock(&(smi_info->msg_lock));
+
+ return rv;
+}
+
+static void start_enable_irq(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+
+ /*
+ * If we are enabling interrupts, we have to tell the
+ * BMC to use them.
+ */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+ smi_info->si_state = SI_ENABLE_INTERRUPTS1;
+}
+
+static void start_disable_irq(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+ smi_info->si_state = SI_DISABLE_INTERRUPTS1;
+}
+
+static void start_clear_flags(struct smi_info *smi_info)
+{
+ unsigned char msg[3];
+
+ /* Make sure the watchdog pre-timeout flag is not set at startup. */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ msg[2] = WDT_PRE_TIMEOUT_INT;
+
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+ smi_info->si_state = SI_CLEARING_FLAGS;
+}
+
+/*
+ * When we have a situtaion where we run out of memory and cannot
+ * allocate messages, we just leave them in the BMC and run the system
+ * polled until we can allocate some memory. Once we have some
+ * memory, we will re-enable the interrupt.
+ */
+static inline void disable_si_irq(struct smi_info *smi_info)
+{
+ if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+ start_disable_irq(smi_info);
+ smi_info->interrupt_disabled = 1;
+ if (!atomic_read(&smi_info->stop_operation))
+ mod_timer(&smi_info->si_timer,
+ jiffies + SI_TIMEOUT_JIFFIES);
+ }
+}
+
+static inline void enable_si_irq(struct smi_info *smi_info)
+{
+ if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
+ start_enable_irq(smi_info);
+ smi_info->interrupt_disabled = 0;
+ }
+}
+
+static void handle_flags(struct smi_info *smi_info)
+{
+ retry:
+ if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
+ /* Watchdog pre-timeout */
+ smi_inc_stat(smi_info, watchdog_pretimeouts);
+
+ start_clear_flags(smi_info);
+ smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+ spin_unlock(&(smi_info->si_lock));
+ ipmi_smi_watchdog_pretimeout(smi_info->intf);
+ spin_lock(&(smi_info->si_lock));
+ } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
+ /* Messages available. */
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg) {
+ disable_si_irq(smi_info);
+ smi_info->si_state = SI_NORMAL;
+ return;
+ }
+ enable_si_irq(smi_info);
+
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_MESSAGES;
+ } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
+ /* Events available. */
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg) {
+ disable_si_irq(smi_info);
+ smi_info->si_state = SI_NORMAL;
+ return;
+ }
+ enable_si_irq(smi_info);
+
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_EVENTS;
+ } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
+ smi_info->oem_data_avail_handler) {
+ if (smi_info->oem_data_avail_handler(smi_info))
+ goto retry;
+ } else
+ smi_info->si_state = SI_NORMAL;
+}
+
+static void handle_transaction_done(struct smi_info *smi_info)
+{
+ struct ipmi_smi_msg *msg;
+#ifdef DEBUG_TIMING
+ struct timeval t;
+
+ do_gettimeofday(&t);
+ printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+#endif
+ switch (smi_info->si_state) {
+ case SI_NORMAL:
+ if (!smi_info->curr_msg)
+ break;
+
+ smi_info->curr_msg->rsp_size
+ = smi_info->handlers->get_result(
+ smi_info->si_sm,
+ smi_info->curr_msg->rsp,
+ IPMI_MAX_MSG_LENGTH);
+
+ /*
+ * Do this here becase deliver_recv_msg() releases the
+ * lock, and a new message can be put in during the
+ * time the lock is released.
+ */
+ msg = smi_info->curr_msg;
+ smi_info->curr_msg = NULL;
+ deliver_recv_msg(smi_info, msg);
+ break;
+
+ case SI_GETTING_FLAGS:
+ {
+ unsigned char msg[4];
+ unsigned int len;
+
+ /* We got the flags from the SMI, now handle them. */
+ len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0) {
+ /* Error fetching flags, just give up for now. */
+ smi_info->si_state = SI_NORMAL;
+ } else if (len < 4) {
+ /*
+ * Hmm, no flags. That's technically illegal, but
+ * don't use uninitialized data.
+ */
+ smi_info->si_state = SI_NORMAL;
+ } else {
+ smi_info->msg_flags = msg[3];
+ handle_flags(smi_info);
+ }
+ break;
+ }
+
+ case SI_CLEARING_FLAGS:
+ case SI_CLEARING_FLAGS_THEN_SET_IRQ:
+ {
+ unsigned char msg[3];
+
+ /* We cleared the flags. */
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
+ if (msg[2] != 0) {
+ /* Error clearing flags */
+ dev_warn(smi_info->dev,
+ "Error clearing flags: %2.2x\n", msg[2]);
+ }
+ if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
+ start_enable_irq(smi_info);
+ else
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+
+ case SI_GETTING_EVENTS:
+ {
+ smi_info->curr_msg->rsp_size
+ = smi_info->handlers->get_result(
+ smi_info->si_sm,
+ smi_info->curr_msg->rsp,
+ IPMI_MAX_MSG_LENGTH);
+
+ /*
+ * Do this here becase deliver_recv_msg() releases the
+ * lock, and a new message can be put in during the
+ * time the lock is released.
+ */
+ msg = smi_info->curr_msg;
+ smi_info->curr_msg = NULL;
+ if (msg->rsp[2] != 0) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the event flag. */
+ smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+ handle_flags(smi_info);
+ } else {
+ smi_inc_stat(smi_info, events);
+
+ /*
+ * Do this before we deliver the message
+ * because delivering the message releases the
+ * lock and something else can mess with the
+ * state.
+ */
+ handle_flags(smi_info);
+
+ deliver_recv_msg(smi_info, msg);
+ }
+ break;
+ }
+
+ case SI_GETTING_MESSAGES:
+ {
+ smi_info->curr_msg->rsp_size
+ = smi_info->handlers->get_result(
+ smi_info->si_sm,
+ smi_info->curr_msg->rsp,
+ IPMI_MAX_MSG_LENGTH);
+
+ /*
+ * Do this here becase deliver_recv_msg() releases the
+ * lock, and a new message can be put in during the
+ * time the lock is released.
+ */
+ msg = smi_info->curr_msg;
+ smi_info->curr_msg = NULL;
+ if (msg->rsp[2] != 0) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the msg flag. */
+ smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+ handle_flags(smi_info);
+ } else {
+ smi_inc_stat(smi_info, incoming_messages);
+
+ /*
+ * Do this before we deliver the message
+ * because delivering the message releases the
+ * lock and something else can mess with the
+ * state.
+ */
+ handle_flags(smi_info);
+
+ deliver_recv_msg(smi_info, msg);
+ }
+ break;
+ }
+
+ case SI_ENABLE_INTERRUPTS1:
+ {
+ unsigned char msg[4];
+
+ /* We got the flags from the SMI, now handle them. */
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0) {
+ dev_warn(smi_info->dev, "Could not enable interrupts"
+ ", failed get, using polled mode.\n");
+ smi_info->si_state = SI_NORMAL;
+ } else {
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = (msg[3] |
+ IPMI_BMC_RCV_MSG_INTR |
+ IPMI_BMC_EVT_MSG_INTR);
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm, msg, 3);
+ smi_info->si_state = SI_ENABLE_INTERRUPTS2;
+ }
+ break;
+ }
+
+ case SI_ENABLE_INTERRUPTS2:
+ {
+ unsigned char msg[4];
+
+ /* We got the flags from the SMI, now handle them. */
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0)
+ dev_warn(smi_info->dev, "Could not enable interrupts"
+ ", failed set, using polled mode.\n");
+ else
+ smi_info->interrupt_disabled = 0;
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+
+ case SI_DISABLE_INTERRUPTS1:
+ {
+ unsigned char msg[4];
+
+ /* We got the flags from the SMI, now handle them. */
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0) {
+ dev_warn(smi_info->dev, "Could not disable interrupts"
+ ", failed get.\n");
+ smi_info->si_state = SI_NORMAL;
+ } else {
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = (msg[3] &
+ ~(IPMI_BMC_RCV_MSG_INTR |
+ IPMI_BMC_EVT_MSG_INTR));
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm, msg, 3);
+ smi_info->si_state = SI_DISABLE_INTERRUPTS2;
+ }
+ break;
+ }
+
+ case SI_DISABLE_INTERRUPTS2:
+ {
+ unsigned char msg[4];
+
+ /* We got the flags from the SMI, now handle them. */
+ smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
+ if (msg[2] != 0) {
+ dev_warn(smi_info->dev, "Could not disable interrupts"
+ ", failed set.\n");
+ }
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+ }
+}
+
+/*
+ * Called on timeouts and events. Timeouts should pass the elapsed
+ * time, interrupts should pass in zero. Must be called with
+ * si_lock held and interrupts disabled.
+ */
+static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
+ int time)
+{
+ enum si_sm_result si_sm_result;
+
+ restart:
+ /*
+ * There used to be a loop here that waited a little while
+ * (around 25us) before giving up. That turned out to be
+ * pointless, the minimum delays I was seeing were in the 300us
+ * range, which is far too long to wait in an interrupt. So
+ * we just run until the state machine tells us something
+ * happened or it needs a delay.
+ */
+ si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
+ time = 0;
+ while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
+ si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+
+ if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
+ smi_inc_stat(smi_info, complete_transactions);
+
+ handle_transaction_done(smi_info);
+ si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+ } else if (si_sm_result == SI_SM_HOSED) {
+ smi_inc_stat(smi_info, hosed_count);
+
+ /*
+ * Do the before return_hosed_msg, because that
+ * releases the lock.
+ */
+ smi_info->si_state = SI_NORMAL;
+ if (smi_info->curr_msg != NULL) {
+ /*
+ * If we were handling a user message, format
+ * a response to send to the upper layer to
+ * tell it about the error.
+ */
+ return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
+ }
+ si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+ }
+
+ /*
+ * We prefer handling attn over new messages. But don't do
+ * this if there is not yet an upper layer to handle anything.
+ */
+ if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) {
+ unsigned char msg[2];
+
+ smi_inc_stat(smi_info, attentions);
+
+ /*
+ * Got a attn, send down a get message flags to see
+ * what's causing it. It would be better to handle
+ * this in the upper layer, but due to the way
+ * interrupts work with the SMI, that's not really
+ * possible.
+ */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+ goto restart;
+ }
+
+ /* If we are currently idle, try to start the next message. */
+ if (si_sm_result == SI_SM_IDLE) {
+ smi_inc_stat(smi_info, idles);
+
+ si_sm_result = start_next_msg(smi_info);
+ if (si_sm_result != SI_SM_IDLE)
+ goto restart;
+ }
+
+ if ((si_sm_result == SI_SM_IDLE)
+ && (atomic_read(&smi_info->req_events))) {
+ /*
+ * We are idle and the upper layer requested that I fetch
+ * events, so do so.
+ */
+ atomic_set(&smi_info->req_events, 0);
+
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg)
+ goto out;
+
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_EVENTS;
+ goto restart;
+ }
+ out:
+ return si_sm_result;
+}
+
+static void sender(void *send_info,
+ struct ipmi_smi_msg *msg,
+ int priority)
+{
+ struct smi_info *smi_info = send_info;
+ enum si_sm_result result;
+ unsigned long flags;
+#ifdef DEBUG_TIMING
+ struct timeval t;
+#endif
+
+ if (atomic_read(&smi_info->stop_operation)) {
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 3;
+ deliver_recv_msg(smi_info, msg);
+ return;
+ }
+
+#ifdef DEBUG_TIMING
+ do_gettimeofday(&t);
+ printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+#endif
+
+ /*
+ * last_timeout_jiffies is updated here to avoid
+ * smi_timeout() handler passing very large time_diff
+ * value to smi_event_handler() that causes
+ * the send command to abort.
+ */
+ smi_info->last_timeout_jiffies = jiffies;
+
+ mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (smi_info->thread)
+ wake_up_process(smi_info->thread);
+
+ if (smi_info->run_to_completion) {
+ /*
+ * If we are running to completion, then throw it in
+ * the list and run transactions until everything is
+ * clear. Priority doesn't matter here.
+ */
+
+ /*
+ * Run to completion means we are single-threaded, no
+ * need for locks.
+ */
+ list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
+
+ result = smi_event_handler(smi_info, 0);
+ while (result != SI_SM_IDLE) {
+ udelay(SI_SHORT_TIMEOUT_USEC);
+ result = smi_event_handler(smi_info,
+ SI_SHORT_TIMEOUT_USEC);
+ }
+ return;
+ }
+
+ spin_lock_irqsave(&smi_info->msg_lock, flags);
+ if (priority > 0)
+ list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
+ else
+ list_add_tail(&msg->link, &smi_info->xmit_msgs);
+ spin_unlock_irqrestore(&smi_info->msg_lock, flags);
+
+ spin_lock_irqsave(&smi_info->si_lock, flags);
+ if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
+ start_next_msg(smi_info);
+ spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static void set_run_to_completion(void *send_info, int i_run_to_completion)
+{
+ struct smi_info *smi_info = send_info;
+ enum si_sm_result result;
+
+ smi_info->run_to_completion = i_run_to_completion;
+ if (i_run_to_completion) {
+ result = smi_event_handler(smi_info, 0);
+ while (result != SI_SM_IDLE) {
+ udelay(SI_SHORT_TIMEOUT_USEC);
+ result = smi_event_handler(smi_info,
+ SI_SHORT_TIMEOUT_USEC);
+ }
+ }
+}
+
+/*
+ * Use -1 in the nsec value of the busy waiting timespec to tell that
+ * we are spinning in kipmid looking for something and not delaying
+ * between checks
+ */
+static inline void ipmi_si_set_not_busy(struct timespec *ts)
+{
+ ts->tv_nsec = -1;
+}
+static inline int ipmi_si_is_busy(struct timespec *ts)
+{
+ return ts->tv_nsec != -1;
+}
+
+static int ipmi_thread_busy_wait(enum si_sm_result smi_result,
+ const struct smi_info *smi_info,
+ struct timespec *busy_until)
+{
+ unsigned int max_busy_us = 0;
+
+ if (smi_info->intf_num < num_max_busy_us)
+ max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
+ if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
+ ipmi_si_set_not_busy(busy_until);
+ else if (!ipmi_si_is_busy(busy_until)) {
+ getnstimeofday(busy_until);
+ timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
+ } else {
+ struct timespec now;
+ getnstimeofday(&now);
+ if (unlikely(timespec_compare(&now, busy_until) > 0)) {
+ ipmi_si_set_not_busy(busy_until);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+
+/*
+ * A busy-waiting loop for speeding up IPMI operation.
+ *
+ * Lousy hardware makes this hard. This is only enabled for systems
+ * that are not BT and do not have interrupts. It starts spinning
+ * when an operation is complete or until max_busy tells it to stop
+ * (if that is enabled). See the paragraph on kimid_max_busy_us in
+ * Documentation/IPMI.txt for details.
+ */
+static int ipmi_thread(void *data)
+{
+ struct smi_info *smi_info = data;
+ unsigned long flags;
+ enum si_sm_result smi_result;
+ struct timespec busy_until;
+
+ ipmi_si_set_not_busy(&busy_until);
+ set_user_nice(current, 19);
+ while (!kthread_should_stop()) {
+ int busy_wait;
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+ smi_result = smi_event_handler(smi_info, 0);
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
+ &busy_until);
+ if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
+ ; /* do nothing */
+ else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
+ schedule();
+ else if (smi_result == SI_SM_IDLE)
+ schedule_timeout_interruptible(100);
+ else
+ schedule_timeout_interruptible(1);
+ }
+ return 0;
+}
+
+
+static void poll(void *send_info)
+{
+ struct smi_info *smi_info = send_info;
+ unsigned long flags;
+
+ /*
+ * Make sure there is some delay in the poll loop so we can
+ * drive time forward and timeout things.
+ */
+ udelay(10);
+ spin_lock_irqsave(&smi_info->si_lock, flags);
+ smi_event_handler(smi_info, 10);
+ spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
+static void request_events(void *send_info)
+{
+ struct smi_info *smi_info = send_info;
+
+ if (atomic_read(&smi_info->stop_operation) ||
+ !smi_info->has_event_buffer)
+ return;
+
+ atomic_set(&smi_info->req_events, 1);
+}
+
+static int initialized;
+
+static void smi_timeout(unsigned long data)
+{
+ struct smi_info *smi_info = (struct smi_info *) data;
+ enum si_sm_result smi_result;
+ unsigned long flags;
+ unsigned long jiffies_now;
+ long time_diff;
+ long timeout;
+#ifdef DEBUG_TIMING
+ struct timeval t;
+#endif
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+#ifdef DEBUG_TIMING
+ do_gettimeofday(&t);
+ printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+#endif
+ jiffies_now = jiffies;
+ time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
+ * SI_USEC_PER_JIFFY);
+ smi_result = smi_event_handler(smi_info, time_diff);
+
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+
+ smi_info->last_timeout_jiffies = jiffies_now;
+
+ if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+ /* Running with interrupts, only do long timeouts. */
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
+ smi_inc_stat(smi_info, long_timeouts);
+ goto do_mod_timer;
+ }
+
+ /*
+ * If the state machine asks for a short delay, then shorten
+ * the timer timeout.
+ */
+ if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ smi_inc_stat(smi_info, short_timeouts);
+ timeout = jiffies + 1;
+ } else {
+ smi_inc_stat(smi_info, long_timeouts);
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
+ }
+
+ do_mod_timer:
+ if (smi_result != SI_SM_IDLE)
+ mod_timer(&(smi_info->si_timer), timeout);
+}
+
+static irqreturn_t si_irq_handler(int irq, void *data)
+{
+ struct smi_info *smi_info = data;
+ unsigned long flags;
+#ifdef DEBUG_TIMING
+ struct timeval t;
+#endif
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+
+ smi_inc_stat(smi_info, interrupts);
+
+#ifdef DEBUG_TIMING
+ do_gettimeofday(&t);
+ printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+#endif
+ smi_event_handler(smi_info, 0);
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t si_bt_irq_handler(int irq, void *data)
+{
+ struct smi_info *smi_info = data;
+ /* We need to clear the IRQ flag for the BT interface. */
+ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_CLEAR_IRQ_BIT
+ | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+ return si_irq_handler(irq, data);
+}
+
+static int smi_start_processing(void *send_info,
+ ipmi_smi_t intf)
+{
+ struct smi_info *new_smi = send_info;
+ int enable = 0;
+
+ new_smi->intf = intf;
+
+ /* Try to claim any interrupts. */
+ if (new_smi->irq_setup)
+ new_smi->irq_setup(new_smi);
+
+ /* Set up the timer that drives the interface. */
+ setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+ new_smi->last_timeout_jiffies = jiffies;
+ mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+
+ /*
+ * Check if the user forcefully enabled the daemon.
+ */
+ if (new_smi->intf_num < num_force_kipmid)
+ enable = force_kipmid[new_smi->intf_num];
+ /*
+ * The BT interface is efficient enough to not need a thread,
+ * and there is no need for a thread if we have interrupts.
+ */
+ else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
+ enable = 1;
+
+ if (enable) {
+ new_smi->thread = kthread_run(ipmi_thread, new_smi,
+ "kipmi%d", new_smi->intf_num);
+ if (IS_ERR(new_smi->thread)) {
+ dev_notice(new_smi->dev, "Could not start"
+ " kernel thread due to error %ld, only using"
+ " timers to drive the interface\n",
+ PTR_ERR(new_smi->thread));
+ new_smi->thread = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+ struct smi_info *smi = send_info;
+
+ data->addr_src = smi->addr_source;
+ data->dev = smi->dev;
+ data->addr_info = smi->addr_info;
+ get_device(smi->dev);
+
+ return 0;
+}
+
+static void set_maintenance_mode(void *send_info, int enable)
+{
+ struct smi_info *smi_info = send_info;
+
+ if (!enable)
+ atomic_set(&smi_info->req_events, 0);
+}
+
+static struct ipmi_smi_handlers handlers = {
+ .owner = THIS_MODULE,
+ .start_processing = smi_start_processing,
+ .get_smi_info = get_smi_info,
+ .sender = sender,
+ .request_events = request_events,
+ .set_maintenance_mode = set_maintenance_mode,
+ .set_run_to_completion = set_run_to_completion,
+ .poll = poll,
+};
+
+/*
+ * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
+ * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS.
+ */
+
+static LIST_HEAD(smi_infos);
+static DEFINE_MUTEX(smi_infos_lock);
+static int smi_num; /* Used to sequence the SMIs */
+
+#define DEFAULT_REGSPACING 1
+#define DEFAULT_REGSIZE 1
+
+static int si_trydefaults = 1;
+static char *si_type[SI_MAX_PARMS];
+#define MAX_SI_TYPE_STR 30
+static char si_type_str[MAX_SI_TYPE_STR];
+static unsigned long addrs[SI_MAX_PARMS];
+static unsigned int num_addrs;
+static unsigned int ports[SI_MAX_PARMS];
+static unsigned int num_ports;
+static int irqs[SI_MAX_PARMS];
+static unsigned int num_irqs;
+static int regspacings[SI_MAX_PARMS];
+static unsigned int num_regspacings;
+static int regsizes[SI_MAX_PARMS];
+static unsigned int num_regsizes;
+static int regshifts[SI_MAX_PARMS];
+static unsigned int num_regshifts;
+static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
+static unsigned int num_slave_addrs;
+
+#define IPMI_IO_ADDR_SPACE 0
+#define IPMI_MEM_ADDR_SPACE 1
+static char *addr_space_to_str[] = { "i/o", "mem" };
+
+static int hotmod_handler(const char *val, struct kernel_param *kp);
+
+module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
+MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
+ " Documentation/IPMI.txt in the kernel sources for the"
+ " gory details.");
+
+module_param_named(trydefaults, si_trydefaults, bool, 0);
+MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
+ " default scan of the KCS and SMIC interface at the standard"
+ " address");
+module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
+MODULE_PARM_DESC(type, "Defines the type of each interface, each"
+ " interface separated by commas. The types are 'kcs',"
+ " 'smic', and 'bt'. For example si_type=kcs,bt will set"
+ " the first interface to kcs and the second to bt");
+module_param_array(addrs, ulong, &num_addrs, 0);
+MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
+ " addresses separated by commas. Only use if an interface"
+ " is in memory. Otherwise, set it to zero or leave"
+ " it blank.");
+module_param_array(ports, uint, &num_ports, 0);
+MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
+ " addresses separated by commas. Only use if an interface"
+ " is a port. Otherwise, set it to zero or leave"
+ " it blank.");
+module_param_array(irqs, int, &num_irqs, 0);
+MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
+ " addresses separated by commas. Only use if an interface"
+ " has an interrupt. Otherwise, set it to zero or leave"
+ " it blank.");
+module_param_array(regspacings, int, &num_regspacings, 0);
+MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
+ " and each successive register used by the interface. For"
+ " instance, if the start address is 0xca2 and the spacing"
+ " is 2, then the second address is at 0xca4. Defaults"
+ " to 1.");
+module_param_array(regsizes, int, &num_regsizes, 0);
+MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
+ " This should generally be 1, 2, 4, or 8 for an 8-bit,"
+ " 16-bit, 32-bit, or 64-bit register. Use this if you"
+ " the 8-bit IPMI register has to be read from a larger"
+ " register.");
+module_param_array(regshifts, int, &num_regshifts, 0);
+MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
+ " IPMI register, in bits. For instance, if the data"
+ " is read from a 32-bit word and the IPMI data is in"
+ " bit 8-15, then the shift would be 8");
+module_param_array(slave_addrs, int, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
+ " the controller. Normally this is 0x20, but can be"
+ " overridden by this parm. This is an array indexed"
+ " by interface number.");
+module_param_array(force_kipmid, int, &num_force_kipmid, 0);
+MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
+ " disabled(0). Normally the IPMI driver auto-detects"
+ " this, but the value may be overridden by this parm.");
+module_param(unload_when_empty, int, 0);
+MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
+ " specified or found, default is 1. Setting to 0"
+ " is useful for hot add of devices using hotmod.");
+module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
+MODULE_PARM_DESC(kipmid_max_busy_us,
+ "Max time (in microseconds) to busy-wait for IPMI data before"
+ " sleeping. 0 (default) means to wait forever. Set to 100-500"
+ " if kipmid is using up a lot of CPU time.");
+
+
+static void std_irq_cleanup(struct smi_info *info)
+{
+ if (info->si_type == SI_BT)
+ /* Disable the interrupt in the BT interface. */
+ info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
+ free_irq(info->irq, info);
+}
+
+static int std_irq_setup(struct smi_info *info)
+{
+ int rv;
+
+ if (!info->irq)
+ return 0;
+
+ if (info->si_type == SI_BT) {
+ rv = request_irq(info->irq,
+ si_bt_irq_handler,
+ IRQF_SHARED | IRQF_DISABLED,
+ DEVICE_NAME,
+ info);
+ if (!rv)
+ /* Enable the interrupt in the BT interface. */
+ info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+ } else
+ rv = request_irq(info->irq,
+ si_irq_handler,
+ IRQF_SHARED | IRQF_DISABLED,
+ DEVICE_NAME,
+ info);
+ if (rv) {
+ dev_warn(info->dev, "%s unable to claim interrupt %d,"
+ " running polled\n",
+ DEVICE_NAME, info->irq);
+ info->irq = 0;
+ } else {
+ info->irq_cleanup = std_irq_cleanup;
+ dev_info(info->dev, "Using irq %d\n", info->irq);
+ }
+
+ return rv;
+}
+
+static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return inb(addr + (offset * io->regspacing));
+}
+
+static void port_outb(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outb(b, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outw(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outw(b << io->regshift, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outl(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outl(b << io->regshift, addr+(offset * io->regspacing));
+}
+
+static void port_cleanup(struct smi_info *info)
+{
+ unsigned int addr = info->io.addr_data;
+ int idx;
+
+ if (addr) {
+ for (idx = 0; idx < info->io_size; idx++)
+ release_region(addr + idx * info->io.regspacing,
+ info->io.regsize);
+ }
+}
+
+static int port_setup(struct smi_info *info)
+{
+ unsigned int addr = info->io.addr_data;
+ int idx;
+
+ if (!addr)
+ return -ENODEV;
+
+ info->io_cleanup = port_cleanup;
+
+ /*
+ * Figure out the actual inb/inw/inl/etc routine to use based
+ * upon the register size.
+ */
+ switch (info->io.regsize) {
+ case 1:
+ info->io.inputb = port_inb;
+ info->io.outputb = port_outb;
+ break;
+ case 2:
+ info->io.inputb = port_inw;
+ info->io.outputb = port_outw;
+ break;
+ case 4:
+ info->io.inputb = port_inl;
+ info->io.outputb = port_outl;
+ break;
+ default:
+ dev_warn(info->dev, "Invalid register size: %d\n",
+ info->io.regsize);
+ return -EINVAL;
+ }
+
+ /*
+ * Some BIOSes reserve disjoint I/O regions in their ACPI
+ * tables. This causes problems when trying to register the
+ * entire I/O region. Therefore we must register each I/O
+ * port separately.
+ */
+ for (idx = 0; idx < info->io_size; idx++) {
+ if (request_region(addr + idx * info->io.regspacing,
+ info->io.regsize, DEVICE_NAME) == NULL) {
+ /* Undo allocations */
+ while (idx--) {
+ release_region(addr + idx * info->io.regspacing,
+ info->io.regsize);
+ }
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
+{
+ return readb((io->addr)+(offset * io->regspacing));
+}
+
+static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeb(b, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
+{
+ return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
+{
+ return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+#ifdef readq
+static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
+{
+ return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void mem_outq(struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+#endif
+
+static void mem_cleanup(struct smi_info *info)
+{
+ unsigned long addr = info->io.addr_data;
+ int mapsize;
+
+ if (info->io.addr) {
+ iounmap(info->io.addr);
+
+ mapsize = ((info->io_size * info->io.regspacing)
+ - (info->io.regspacing - info->io.regsize));
+
+ release_mem_region(addr, mapsize);
+ }
+}
+
+static int mem_setup(struct smi_info *info)
+{
+ unsigned long addr = info->io.addr_data;
+ int mapsize;
+
+ if (!addr)
+ return -ENODEV;
+
+ info->io_cleanup = mem_cleanup;
+
+ /*
+ * Figure out the actual readb/readw/readl/etc routine to use based
+ * upon the register size.
+ */
+ switch (info->io.regsize) {
+ case 1:
+ info->io.inputb = intf_mem_inb;
+ info->io.outputb = intf_mem_outb;
+ break;
+ case 2:
+ info->io.inputb = intf_mem_inw;
+ info->io.outputb = intf_mem_outw;
+ break;
+ case 4:
+ info->io.inputb = intf_mem_inl;
+ info->io.outputb = intf_mem_outl;
+ break;
+#ifdef readq
+ case 8:
+ info->io.inputb = mem_inq;
+ info->io.outputb = mem_outq;
+ break;
+#endif
+ default:
+ dev_warn(info->dev, "Invalid register size: %d\n",
+ info->io.regsize);
+ return -EINVAL;
+ }
+
+ /*
+ * Calculate the total amount of memory to claim. This is an
+ * unusual looking calculation, but it avoids claiming any
+ * more memory than it has to. It will claim everything
+ * between the first address to the end of the last full
+ * register.
+ */
+ mapsize = ((info->io_size * info->io.regspacing)
+ - (info->io.regspacing - info->io.regsize));
+
+ if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
+ return -EIO;
+
+ info->io.addr = ioremap(addr, mapsize);
+ if (info->io.addr == NULL) {
+ release_mem_region(addr, mapsize);
+ return -EIO;
+ }
+ return 0;
+}
+
+/*
+ * Parms come in as <op1>[:op2[:op3...]]. ops are:
+ * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+ * Options are:
+ * rsp=<regspacing>
+ * rsi=<regsize>
+ * rsh=<regshift>
+ * irq=<irq>
+ * ipmb=<ipmb addr>
+ */
+enum hotmod_op { HM_ADD, HM_REMOVE };
+struct hotmod_vals {
+ char *name;
+ int val;
+};
+static struct hotmod_vals hotmod_ops[] = {
+ { "add", HM_ADD },
+ { "remove", HM_REMOVE },
+ { NULL }
+};
+static struct hotmod_vals hotmod_si[] = {
+ { "kcs", SI_KCS },
+ { "smic", SI_SMIC },
+ { "bt", SI_BT },
+ { NULL }
+};
+static struct hotmod_vals hotmod_as[] = {
+ { "mem", IPMI_MEM_ADDR_SPACE },
+ { "i/o", IPMI_IO_ADDR_SPACE },
+ { NULL }
+};
+
+static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
+{
+ char *s;
+ int i;
+
+ s = strchr(*curr, ',');
+ if (!s) {
+ printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
+ return -EINVAL;
+ }
+ *s = '\0';
+ s++;
+ for (i = 0; hotmod_ops[i].name; i++) {
+ if (strcmp(*curr, v[i].name) == 0) {
+ *val = v[i].val;
+ *curr = s;
+ return 0;
+ }
+ }
+
+ printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
+ return -EINVAL;
+}
+
+static int check_hotmod_int_op(const char *curr, const char *option,
+ const char *name, int *val)
+{
+ char *n;
+
+ if (strcmp(curr, name) == 0) {
+ if (!option) {
+ printk(KERN_WARNING PFX
+ "No option given for '%s'\n",
+ curr);
+ return -EINVAL;
+ }
+ *val = simple_strtoul(option, &n, 0);
+ if ((*n != '\0') || (*option == '\0')) {
+ printk(KERN_WARNING PFX
+ "Bad option given for '%s'\n",
+ curr);
+ return -EINVAL;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static struct smi_info *smi_info_alloc(void)
+{
+ struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+ if (info) {
+ spin_lock_init(&info->si_lock);
+ spin_lock_init(&info->msg_lock);
+ }
+ return info;
+}
+
+static int hotmod_handler(const char *val, struct kernel_param *kp)
+{
+ char *str = kstrdup(val, GFP_KERNEL);
+ int rv;
+ char *next, *curr, *s, *n, *o;
+ enum hotmod_op op;
+ enum si_type si_type;
+ int addr_space;
+ unsigned long addr;
+ int regspacing;
+ int regsize;
+ int regshift;
+ int irq;
+ int ipmb;
+ int ival;
+ int len;
+ struct smi_info *info;
+
+ if (!str)
+ return -ENOMEM;
+
+ /* Kill any trailing spaces, as we can get a "\n" from echo. */
+ len = strlen(str);
+ ival = len - 1;
+ while ((ival >= 0) && isspace(str[ival])) {
+ str[ival] = '\0';
+ ival--;
+ }
+
+ for (curr = str; curr; curr = next) {
+ regspacing = 1;
+ regsize = 1;
+ regshift = 0;
+ irq = 0;
+ ipmb = 0; /* Choose the default if not specified */
+
+ next = strchr(curr, ':');
+ if (next) {
+ *next = '\0';
+ next++;
+ }
+
+ rv = parse_str(hotmod_ops, &ival, "operation", &curr);
+ if (rv)
+ break;
+ op = ival;
+
+ rv = parse_str(hotmod_si, &ival, "interface type", &curr);
+ if (rv)
+ break;
+ si_type = ival;
+
+ rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
+ if (rv)
+ break;
+
+ s = strchr(curr, ',');
+ if (s) {
+ *s = '\0';
+ s++;
+ }
+ addr = simple_strtoul(curr, &n, 0);
+ if ((*n != '\0') || (*curr == '\0')) {
+ printk(KERN_WARNING PFX "Invalid hotmod address"
+ " '%s'\n", curr);
+ break;
+ }
+
+ while (s) {
+ curr = s;
+ s = strchr(curr, ',');
+ if (s) {
+ *s = '\0';
+ s++;
+ }
+ o = strchr(curr, '=');
+ if (o) {
+ *o = '\0';
+ o++;
+ }
+ rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "irq", &irq);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+
+ rv = -EINVAL;
+ printk(KERN_WARNING PFX
+ "Invalid hotmod option '%s'\n",
+ curr);
+ goto out;
+ }
+
+ if (op == HM_ADD) {
+ info = smi_info_alloc();
+ if (!info) {
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ info->addr_source = SI_HOTMOD;
+ info->si_type = si_type;
+ info->io.addr_data = addr;
+ info->io.addr_type = addr_space;
+ if (addr_space == IPMI_MEM_ADDR_SPACE)
+ info->io_setup = mem_setup;
+ else
+ info->io_setup = port_setup;
+
+ info->io.addr = NULL;
+ info->io.regspacing = regspacing;
+ if (!info->io.regspacing)
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = regsize;
+ if (!info->io.regsize)
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = regshift;
+ info->irq = irq;
+ if (info->irq)
+ info->irq_setup = std_irq_setup;
+ info->slave_addr = ipmb;
+
+ if (!add_smi(info)) {
+ if (try_smi_init(info))
+ cleanup_one_si(info);
+ } else {
+ kfree(info);
+ }
+ } else {
+ /* remove */
+ struct smi_info *e, *tmp_e;
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
+ if (e->io.addr_type != addr_space)
+ continue;
+ if (e->si_type != si_type)
+ continue;
+ if (e->io.addr_data == addr)
+ cleanup_one_si(e);
+ }
+ mutex_unlock(&smi_infos_lock);
+ }
+ }
+ rv = len;
+ out:
+ kfree(str);
+ return rv;
+}
+
+static int __devinit hardcode_find_bmc(void)
+{
+ int ret = -ENODEV;
+ int i;
+ struct smi_info *info;
+
+ for (i = 0; i < SI_MAX_PARMS; i++) {
+ if (!ports[i] && !addrs[i])
+ continue;
+
+ info = smi_info_alloc();
+ if (!info)
+ return -ENOMEM;
+
+ info->addr_source = SI_HARDCODED;
+ printk(KERN_INFO PFX "probing via hardcoded address\n");
+
+ if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
+ info->si_type = SI_KCS;
+ } else if (strcmp(si_type[i], "smic") == 0) {
+ info->si_type = SI_SMIC;
+ } else if (strcmp(si_type[i], "bt") == 0) {
+ info->si_type = SI_BT;
+ } else {
+ printk(KERN_WARNING PFX "Interface type specified "
+ "for interface %d, was invalid: %s\n",
+ i, si_type[i]);
+ kfree(info);
+ continue;
+ }
+
+ if (ports[i]) {
+ /* An I/O port */
+ info->io_setup = port_setup;
+ info->io.addr_data = ports[i];
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else if (addrs[i]) {
+ /* A memory port */
+ info->io_setup = mem_setup;
+ info->io.addr_data = addrs[i];
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ } else {
+ printk(KERN_WARNING PFX "Interface type specified "
+ "for interface %d, but port and address were "
+ "not set or set to zero.\n", i);
+ kfree(info);
+ continue;
+ }
+
+ info->io.addr = NULL;
+ info->io.regspacing = regspacings[i];
+ if (!info->io.regspacing)
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = regsizes[i];
+ if (!info->io.regsize)
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = regshifts[i];
+ info->irq = irqs[i];
+ if (info->irq)
+ info->irq_setup = std_irq_setup;
+ info->slave_addr = slave_addrs[i];
+
+ if (!add_smi(info)) {
+ if (try_smi_init(info))
+ cleanup_one_si(info);
+ ret = 0;
+ } else {
+ kfree(info);
+ }
+ }
+ return ret;
+}
+
+#ifdef CONFIG_ACPI
+
+#include <linux/acpi.h>
+
+/*
+ * Once we get an ACPI failure, we don't try any more, because we go
+ * through the tables sequentially. Once we don't find a table, there
+ * are no more.
+ */
+static int acpi_failure;
+
+/* For GPE-type interrupts. */
+static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
+ u32 gpe_number, void *context)
+{
+ struct smi_info *smi_info = context;
+ unsigned long flags;
+#ifdef DEBUG_TIMING
+ struct timeval t;
+#endif
+
+ spin_lock_irqsave(&(smi_info->si_lock), flags);
+
+ smi_inc_stat(smi_info, interrupts);
+
+#ifdef DEBUG_TIMING
+ do_gettimeofday(&t);
+ printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+#endif
+ smi_event_handler(smi_info, 0);
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+
+ return ACPI_INTERRUPT_HANDLED;
+}
+
+static void acpi_gpe_irq_cleanup(struct smi_info *info)
+{
+ if (!info->irq)
+ return;
+
+ acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
+}
+
+static int acpi_gpe_irq_setup(struct smi_info *info)
+{
+ acpi_status status;
+
+ if (!info->irq)
+ return 0;
+
+ /* FIXME - is level triggered right? */
+ status = acpi_install_gpe_handler(NULL,
+ info->irq,
+ ACPI_GPE_LEVEL_TRIGGERED,
+ &ipmi_acpi_gpe,
+ info);
+ if (status != AE_OK) {
+ dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
+ " running polled\n", DEVICE_NAME, info->irq);
+ info->irq = 0;
+ return -EINVAL;
+ } else {
+ info->irq_cleanup = acpi_gpe_irq_cleanup;
+ dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
+ return 0;
+ }
+}
+
+/*
+ * Defined at
+ * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf
+ */
+struct SPMITable {
+ s8 Signature[4];
+ u32 Length;
+ u8 Revision;
+ u8 Checksum;
+ s8 OEMID[6];
+ s8 OEMTableID[8];
+ s8 OEMRevision[4];
+ s8 CreatorID[4];
+ s8 CreatorRevision[4];
+ u8 InterfaceType;
+ u8 IPMIlegacy;
+ s16 SpecificationRevision;
+
+ /*
+ * Bit 0 - SCI interrupt supported
+ * Bit 1 - I/O APIC/SAPIC
+ */
+ u8 InterruptType;
+
+ /*
+ * If bit 0 of InterruptType is set, then this is the SCI
+ * interrupt in the GPEx_STS register.
+ */
+ u8 GPE;
+
+ s16 Reserved;
+
+ /*
+ * If bit 1 of InterruptType is set, then this is the I/O
+ * APIC/SAPIC interrupt.
+ */
+ u32 GlobalSystemInterrupt;
+
+ /* The actual register address. */
+ struct acpi_generic_address addr;
+
+ u8 UID[4];
+
+ s8 spmi_id[1]; /* A '\0' terminated array starts here. */
+};
+
+static int __devinit try_init_spmi(struct SPMITable *spmi)
+{
+ struct smi_info *info;
+
+ if (spmi->IPMIlegacy != 1) {
+ printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+ return -ENODEV;
+ }
+
+ info = smi_info_alloc();
+ if (!info) {
+ printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
+ return -ENOMEM;
+ }
+
+ info->addr_source = SI_SPMI;
+ printk(KERN_INFO PFX "probing via SPMI\n");
+
+ /* Figure out the interface type. */
+ switch (spmi->InterfaceType) {
+ case 1: /* KCS */
+ info->si_type = SI_KCS;
+ break;
+ case 2: /* SMIC */
+ info->si_type = SI_SMIC;
+ break;
+ case 3: /* BT */
+ info->si_type = SI_BT;
+ break;
+ default:
+ printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
+ spmi->InterfaceType);
+ kfree(info);
+ return -EIO;
+ }
+
+ if (spmi->InterruptType & 1) {
+ /* We've got a GPE interrupt. */
+ info->irq = spmi->GPE;
+ info->irq_setup = acpi_gpe_irq_setup;
+ } else if (spmi->InterruptType & 2) {
+ /* We've got an APIC/SAPIC interrupt. */
+ info->irq = spmi->GlobalSystemInterrupt;
+ info->irq_setup = std_irq_setup;
+ } else {
+ /* Use the default interrupt setting. */
+ info->irq = 0;
+ info->irq_setup = NULL;
+ }
+
+ if (spmi->addr.bit_width) {
+ /* A (hopefully) properly formed register bit width. */
+ info->io.regspacing = spmi->addr.bit_width / 8;
+ } else {
+ info->io.regspacing = DEFAULT_REGSPACING;
+ }
+ info->io.regsize = info->io.regspacing;
+ info->io.regshift = spmi->addr.bit_offset;
+
+ if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ info->io_setup = port_setup;
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else {
+ kfree(info);
+ printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
+ return -EIO;
+ }
+ info->io.addr_data = spmi->addr.address;
+
+ pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ info->io.addr_data, info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ if (add_smi(info))
+ kfree(info);
+
+ return 0;
+}
+
+static void __devinit spmi_find_bmc(void)
+{
+ acpi_status status;
+ struct SPMITable *spmi;
+ int i;
+
+ if (acpi_disabled)
+ return;
+
+ if (acpi_failure)
+ return;
+
+ for (i = 0; ; i++) {
+ status = acpi_get_table(ACPI_SIG_SPMI, i+1,
+ (struct acpi_table_header **)&spmi);
+ if (status != AE_OK)
+ return;
+
+ try_init_spmi(spmi);
+ }
+}
+
+static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id)
+{
+ struct acpi_device *acpi_dev;
+ struct smi_info *info;
+ struct resource *res, *res_second;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long tmp;
+
+ acpi_dev = pnp_acpi_device(dev);
+ if (!acpi_dev)
+ return -ENODEV;
+
+ info = smi_info_alloc();
+ if (!info)
+ return -ENOMEM;
+
+ info->addr_source = SI_ACPI;
+ printk(KERN_INFO PFX "probing via ACPI\n");
+
+ handle = acpi_dev->handle;
+ info->addr_info.acpi_info.acpi_handle = handle;
+
+ /* _IFT tells us the interface type: KCS, BT, etc */
+ status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ goto err_free;
+
+ switch (tmp) {
+ case 1:
+ info->si_type = SI_KCS;
+ break;
+ case 2:
+ info->si_type = SI_SMIC;
+ break;
+ case 3:
+ info->si_type = SI_BT;
+ break;
+ default:
+ dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
+ goto err_free;
+ }
+
+ res = pnp_get_resource(dev, IORESOURCE_IO, 0);
+ if (res) {
+ info->io_setup = port_setup;
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else {
+ res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
+ if (res) {
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ }
+ }
+ if (!res) {
+ dev_err(&dev->dev, "no I/O or memory address\n");
+ goto err_free;
+ }
+ info->io.addr_data = res->start;
+
+ info->io.regspacing = DEFAULT_REGSPACING;
+ res_second = pnp_get_resource(dev,
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
+ IORESOURCE_IO : IORESOURCE_MEM,
+ 1);
+ if (res_second) {
+ if (res_second->start > info->io.addr_data)
+ info->io.regspacing = res_second->start - info->io.addr_data;
+ }
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = 0;
+
+ /* If _GPE exists, use it; otherwise use standard interrupts */
+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+ if (ACPI_SUCCESS(status)) {
+ info->irq = tmp;
+ info->irq_setup = acpi_gpe_irq_setup;
+ } else if (pnp_irq_valid(dev, 0)) {
+ info->irq = pnp_irq(dev, 0);
+ info->irq_setup = std_irq_setup;
+ }
+
+ info->dev = &dev->dev;
+ pnp_set_drvdata(dev, info);
+
+ dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
+ res, info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ if (add_smi(info))
+ goto err_free;
+
+ return 0;
+
+err_free:
+ kfree(info);
+ return -EINVAL;
+}
+
+static void __devexit ipmi_pnp_remove(struct pnp_dev *dev)
+{
+ struct smi_info *info = pnp_get_drvdata(dev);
+
+ cleanup_one_si(info);
+}
+
+static const struct pnp_device_id pnp_dev_table[] = {
+ {"IPI0001", 0},
+ {"", 0},
+};
+
+static struct pnp_driver ipmi_pnp_driver = {
+ .name = DEVICE_NAME,
+ .probe = ipmi_pnp_probe,
+ .remove = __devexit_p(ipmi_pnp_remove),
+ .id_table = pnp_dev_table,
+};
+#endif
+
+#ifdef CONFIG_DMI
+struct dmi_ipmi_data {
+ u8 type;
+ u8 addr_space;
+ unsigned long base_addr;
+ u8 irq;
+ u8 offset;
+ u8 slave_addr;
+};
+
+static int __devinit decode_dmi(const struct dmi_header *dm,
+ struct dmi_ipmi_data *dmi)
+{
+ const u8 *data = (const u8 *)dm;
+ unsigned long base_addr;
+ u8 reg_spacing;
+ u8 len = dm->length;
+
+ dmi->type = data[4];
+
+ memcpy(&base_addr, data+8, sizeof(unsigned long));
+ if (len >= 0x11) {
+ if (base_addr & 1) {
+ /* I/O */
+ base_addr &= 0xFFFE;
+ dmi->addr_space = IPMI_IO_ADDR_SPACE;
+ } else
+ /* Memory */
+ dmi->addr_space = IPMI_MEM_ADDR_SPACE;
+
+ /* If bit 4 of byte 0x10 is set, then the lsb for the address
+ is odd. */
+ dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
+
+ dmi->irq = data[0x11];
+
+ /* The top two bits of byte 0x10 hold the register spacing. */
+ reg_spacing = (data[0x10] & 0xC0) >> 6;
+ switch (reg_spacing) {
+ case 0x00: /* Byte boundaries */
+ dmi->offset = 1;
+ break;
+ case 0x01: /* 32-bit boundaries */
+ dmi->offset = 4;
+ break;
+ case 0x02: /* 16-byte boundaries */
+ dmi->offset = 16;
+ break;
+ default:
+ /* Some other interface, just ignore it. */
+ return -EIO;
+ }
+ } else {
+ /* Old DMI spec. */
+ /*
+ * Note that technically, the lower bit of the base
+ * address should be 1 if the address is I/O and 0 if
+ * the address is in memory. So many systems get that
+ * wrong (and all that I have seen are I/O) so we just
+ * ignore that bit and assume I/O. Systems that use
+ * memory should use the newer spec, anyway.
+ */
+ dmi->base_addr = base_addr & 0xfffe;
+ dmi->addr_space = IPMI_IO_ADDR_SPACE;
+ dmi->offset = 1;
+ }
+
+ dmi->slave_addr = data[6];
+
+ return 0;
+}
+
+static void __devinit try_init_dmi(struct dmi_ipmi_data *ipmi_data)
+{
+ struct smi_info *info;
+
+ info = smi_info_alloc();
+ if (!info) {
+ printk(KERN_ERR PFX "Could not allocate SI data\n");
+ return;
+ }
+
+ info->addr_source = SI_SMBIOS;
+ printk(KERN_INFO PFX "probing via SMBIOS\n");
+
+ switch (ipmi_data->type) {
+ case 0x01: /* KCS */
+ info->si_type = SI_KCS;
+ break;
+ case 0x02: /* SMIC */
+ info->si_type = SI_SMIC;
+ break;
+ case 0x03: /* BT */
+ info->si_type = SI_BT;
+ break;
+ default:
+ kfree(info);
+ return;
+ }
+
+ switch (ipmi_data->addr_space) {
+ case IPMI_MEM_ADDR_SPACE:
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ break;
+
+ case IPMI_IO_ADDR_SPACE:
+ info->io_setup = port_setup;
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ break;
+
+ default:
+ kfree(info);
+ printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
+ ipmi_data->addr_space);
+ return;
+ }
+ info->io.addr_data = ipmi_data->base_addr;
+
+ info->io.regspacing = ipmi_data->offset;
+ if (!info->io.regspacing)
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = 0;
+
+ info->slave_addr = ipmi_data->slave_addr;
+
+ info->irq = ipmi_data->irq;
+ if (info->irq)
+ info->irq_setup = std_irq_setup;
+
+ pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ info->io.addr_data, info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ if (add_smi(info))
+ kfree(info);
+}
+
+static void __devinit dmi_find_bmc(void)
+{
+ const struct dmi_device *dev = NULL;
+ struct dmi_ipmi_data data;
+ int rv;
+
+ while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
+ memset(&data, 0, sizeof(data));
+ rv = decode_dmi((const struct dmi_header *) dev->device_data,
+ &data);
+ if (!rv)
+ try_init_dmi(&data);
+ }
+}
+#endif /* CONFIG_DMI */
+
+#ifdef CONFIG_PCI
+
+#define PCI_ERMC_CLASSCODE 0x0C0700
+#define PCI_ERMC_CLASSCODE_MASK 0xffffff00
+#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
+#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
+#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
+#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
+
+#define PCI_HP_VENDOR_ID 0x103C
+#define PCI_MMC_DEVICE_ID 0x121A
+#define PCI_MMC_ADDR_CW 0x10
+
+static void ipmi_pci_cleanup(struct smi_info *info)
+{
+ struct pci_dev *pdev = info->addr_source_data;
+
+ pci_disable_device(pdev);
+}
+
+static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rv;
+ int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
+ struct smi_info *info;
+
+ info = smi_info_alloc();
+ if (!info)
+ return -ENOMEM;
+
+ info->addr_source = SI_PCI;
+ dev_info(&pdev->dev, "probing via PCI");
+
+ switch (class_type) {
+ case PCI_ERMC_CLASSCODE_TYPE_SMIC:
+ info->si_type = SI_SMIC;
+ break;
+
+ case PCI_ERMC_CLASSCODE_TYPE_KCS:
+ info->si_type = SI_KCS;
+ break;
+
+ case PCI_ERMC_CLASSCODE_TYPE_BT:
+ info->si_type = SI_BT;
+ break;
+
+ default:
+ kfree(info);
+ dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
+ return -ENOMEM;
+ }
+
+ rv = pci_enable_device(pdev);
+ if (rv) {
+ dev_err(&pdev->dev, "couldn't enable PCI device\n");
+ kfree(info);
+ return rv;
+ }
+
+ info->addr_source_cleanup = ipmi_pci_cleanup;
+ info->addr_source_data = pdev;
+
+ if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
+ info->io_setup = port_setup;
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else {
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ }
+ info->io.addr_data = pci_resource_start(pdev, 0);
+
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = 0;
+
+ info->irq = pdev->irq;
+ if (info->irq)
+ info->irq_setup = std_irq_setup;
+
+ info->dev = &pdev->dev;
+ pci_set_drvdata(pdev, info);
+
+ dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ &pdev->resource[0], info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ if (add_smi(info))
+ kfree(info);
+
+ return 0;
+}
+
+static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
+{
+ struct smi_info *info = pci_get_drvdata(pdev);
+ cleanup_one_si(info);
+}
+
+#ifdef CONFIG_PM
+static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int ipmi_pci_resume(struct pci_dev *pdev)
+{
+ return 0;
+}
+#endif
+
+static struct pci_device_id ipmi_pci_devices[] = {
+ { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
+ { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
+
+static struct pci_driver ipmi_pci_driver = {
+ .name = DEVICE_NAME,
+ .id_table = ipmi_pci_devices,
+ .probe = ipmi_pci_probe,
+ .remove = __devexit_p(ipmi_pci_remove),
+#ifdef CONFIG_PM
+ .suspend = ipmi_pci_suspend,
+ .resume = ipmi_pci_resume,
+#endif
+};
+#endif /* CONFIG_PCI */
+
+static struct of_device_id ipmi_match[];
+static int __devinit ipmi_probe(struct platform_device *dev)
+{
+#ifdef CONFIG_OF
+ const struct of_device_id *match;
+ struct smi_info *info;
+ struct resource resource;
+ const __be32 *regsize, *regspacing, *regshift;
+ struct device_node *np = dev->dev.of_node;
+ int ret;
+ int proplen;
+
+ dev_info(&dev->dev, "probing via device tree\n");
+
+ match = of_match_device(ipmi_match, &dev->dev);
+ if (!match)
+ return -EINVAL;
+
+ ret = of_address_to_resource(np, 0, &resource);
+ if (ret) {
+ dev_warn(&dev->dev, PFX "invalid address from OF\n");
+ return ret;
+ }
+
+ regsize = of_get_property(np, "reg-size", &proplen);
+ if (regsize && proplen != 4) {
+ dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
+ return -EINVAL;
+ }
+
+ regspacing = of_get_property(np, "reg-spacing", &proplen);
+ if (regspacing && proplen != 4) {
+ dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
+ return -EINVAL;
+ }
+
+ regshift = of_get_property(np, "reg-shift", &proplen);
+ if (regshift && proplen != 4) {
+ dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
+ return -EINVAL;
+ }
+
+ info = smi_info_alloc();
+
+ if (!info) {
+ dev_err(&dev->dev,
+ "could not allocate memory for OF probe\n");
+ return -ENOMEM;
+ }
+
+ info->si_type = (enum si_type) match->data;
+ info->addr_source = SI_DEVICETREE;
+ info->irq_setup = std_irq_setup;
+
+ if (resource.flags & IORESOURCE_IO) {
+ info->io_setup = port_setup;
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else {
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ }
+
+ info->io.addr_data = resource.start;
+
+ info->io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
+ info->io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
+ info->io.regshift = regshift ? be32_to_cpup(regshift) : 0;
+
+ info->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
+ info->dev = &dev->dev;
+
+ dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
+ info->io.addr_data, info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ dev_set_drvdata(&dev->dev, info);
+
+ if (add_smi(info)) {
+ kfree(info);
+ return -EBUSY;
+ }
+#endif
+ return 0;
+}
+
+static int __devexit ipmi_remove(struct platform_device *dev)
+{
+#ifdef CONFIG_OF
+ cleanup_one_si(dev_get_drvdata(&dev->dev));
+#endif
+ return 0;
+}
+
+static struct of_device_id ipmi_match[] =
+{
+ { .type = "ipmi", .compatible = "ipmi-kcs",
+ .data = (void *)(unsigned long) SI_KCS },
+ { .type = "ipmi", .compatible = "ipmi-smic",
+ .data = (void *)(unsigned long) SI_SMIC },
+ { .type = "ipmi", .compatible = "ipmi-bt",
+ .data = (void *)(unsigned long) SI_BT },
+ {},
+};
+
+static struct platform_driver ipmi_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = ipmi_match,
+ },
+ .probe = ipmi_probe,
+ .remove = __devexit_p(ipmi_remove),
+};
+
+static int wait_for_msg_done(struct smi_info *smi_info)
+{
+ enum si_sm_result smi_result;
+
+ smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
+ for (;;) {
+ if (smi_result == SI_SM_CALL_WITH_DELAY ||
+ smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+ schedule_timeout_uninterruptible(1);
+ smi_result = smi_info->handlers->event(
+ smi_info->si_sm, 100);
+ } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+ smi_result = smi_info->handlers->event(
+ smi_info->si_sm, 0);
+ } else
+ break;
+ }
+ if (smi_result == SI_SM_HOSED)
+ /*
+ * We couldn't get the state machine to run, so whatever's at
+ * the port is probably not an IPMI SMI interface.
+ */
+ return -ENODEV;
+
+ return 0;
+}
+
+static int try_get_dev_id(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv = 0;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /*
+ * Do a Get Device ID command, since it comes back with some
+ * useful info.
+ */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv)
+ goto out;
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ /* Check and record info from the get device id, in case we need it. */
+ rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
+
+ out:
+ kfree(resp);
+ return rv;
+}
+
+static int try_enable_event_buffer(struct smi_info *smi_info)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv = 0;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ printk(KERN_WARNING PFX "Error getting response from get"
+ " global enables command, the event buffer is not"
+ " enabled.\n");
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 4 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
+ resp[2] != 0) {
+ printk(KERN_WARNING PFX "Invalid return from get global"
+ " enables command, cannot enable the event buffer.\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (resp[3] & IPMI_BMC_EVT_MSG_BUFF)
+ /* buffer is already enabled, nothing to do. */
+ goto out;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ printk(KERN_WARNING PFX "Error getting response from set"
+ " global, enables command, the event buffer is not"
+ " enabled.\n");
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 3 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
+ printk(KERN_WARNING PFX "Invalid return from get global,"
+ "enables command, not enable the event buffer.\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (resp[2] != 0)
+ /*
+ * An error when setting the event buffer bit means
+ * that the event buffer is not supported.
+ */
+ rv = -ENOENT;
+ out:
+ kfree(resp);
+ return rv;
+}
+
+static int smi_type_proc_show(struct seq_file *m, void *v)
+{
+ struct smi_info *smi = m->private;
+
+ return seq_printf(m, "%s\n", si_to_str[smi->si_type]);
+}
+
+static int smi_type_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_type_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations smi_type_proc_ops = {
+ .open = smi_type_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int smi_si_stats_proc_show(struct seq_file *m, void *v)
+{
+ struct smi_info *smi = m->private;
+
+ seq_printf(m, "interrupts_enabled: %d\n",
+ smi->irq && !smi->interrupt_disabled);
+ seq_printf(m, "short_timeouts: %u\n",
+ smi_get_stat(smi, short_timeouts));
+ seq_printf(m, "long_timeouts: %u\n",
+ smi_get_stat(smi, long_timeouts));
+ seq_printf(m, "idles: %u\n",
+ smi_get_stat(smi, idles));
+ seq_printf(m, "interrupts: %u\n",
+ smi_get_stat(smi, interrupts));
+ seq_printf(m, "attentions: %u\n",
+ smi_get_stat(smi, attentions));
+ seq_printf(m, "flag_fetches: %u\n",
+ smi_get_stat(smi, flag_fetches));
+ seq_printf(m, "hosed_count: %u\n",
+ smi_get_stat(smi, hosed_count));
+ seq_printf(m, "complete_transactions: %u\n",
+ smi_get_stat(smi, complete_transactions));
+ seq_printf(m, "events: %u\n",
+ smi_get_stat(smi, events));
+ seq_printf(m, "watchdog_pretimeouts: %u\n",
+ smi_get_stat(smi, watchdog_pretimeouts));
+ seq_printf(m, "incoming_messages: %u\n",
+ smi_get_stat(smi, incoming_messages));
+ return 0;
+}
+
+static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_si_stats_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations smi_si_stats_proc_ops = {
+ .open = smi_si_stats_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int smi_params_proc_show(struct seq_file *m, void *v)
+{
+ struct smi_info *smi = m->private;
+
+ return seq_printf(m,
+ "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+ si_to_str[smi->si_type],
+ addr_space_to_str[smi->io.addr_type],
+ smi->io.addr_data,
+ smi->io.regspacing,
+ smi->io.regsize,
+ smi->io.regshift,
+ smi->irq,
+ smi->slave_addr);
+}
+
+static int smi_params_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_params_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations smi_params_proc_ops = {
+ .open = smi_params_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * oem_data_avail_to_receive_msg_avail
+ * @info - smi_info structure with msg_flags set
+ *
+ * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
+ * Returns 1 indicating need to re-run handle_flags().
+ */
+static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
+{
+ smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
+ RECEIVE_MSG_AVAIL);
+ return 1;
+}
+
+/*
+ * setup_dell_poweredge_oem_data_handler
+ * @info - smi_info.device_id must be populated
+ *
+ * Systems that match, but have firmware version < 1.40 may assert
+ * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
+ * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
+ * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
+ * as RECEIVE_MSG_AVAIL instead.
+ *
+ * As Dell has no plans to release IPMI 1.5 firmware that *ever*
+ * assert the OEM[012] bits, and if it did, the driver would have to
+ * change to handle that properly, we don't actually check for the
+ * firmware version.
+ * Device ID = 0x20 BMC on PowerEdge 8G servers
+ * Device Revision = 0x80
+ * Firmware Revision1 = 0x01 BMC version 1.40
+ * Firmware Revision2 = 0x40 BCD encoded
+ * IPMI Version = 0x51 IPMI 1.5
+ * Manufacturer ID = A2 02 00 Dell IANA
+ *
+ * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
+ * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
+ *
+ */
+#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
+#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
+#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
+#define DELL_IANA_MFR_ID 0x0002a2
+static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
+{
+ struct ipmi_device_id *id = &smi_info->device_id;
+ if (id->manufacturer_id == DELL_IANA_MFR_ID) {
+ if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
+ id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
+ id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
+ smi_info->oem_data_avail_handler =
+ oem_data_avail_to_receive_msg_avail;
+ } else if (ipmi_version_major(id) < 1 ||
+ (ipmi_version_major(id) == 1 &&
+ ipmi_version_minor(id) < 5)) {
+ smi_info->oem_data_avail_handler =
+ oem_data_avail_to_receive_msg_avail;
+ }
+ }
+}
+
+#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
+static void return_hosed_msg_badsize(struct smi_info *smi_info)
+{
+ struct ipmi_smi_msg *msg = smi_info->curr_msg;
+
+ /* Make it a response */
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
+ msg->rsp_size = 3;
+ smi_info->curr_msg = NULL;
+ deliver_recv_msg(smi_info, msg);
+}
+
+/*
+ * dell_poweredge_bt_xaction_handler
+ * @info - smi_info.device_id must be populated
+ *
+ * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
+ * not respond to a Get SDR command if the length of the data
+ * requested is exactly 0x3A, which leads to command timeouts and no
+ * data returned. This intercepts such commands, and causes userspace
+ * callers to try again with a different-sized buffer, which succeeds.
+ */
+
+#define STORAGE_NETFN 0x0A
+#define STORAGE_CMD_GET_SDR 0x23
+static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
+ unsigned long unused,
+ void *in)
+{
+ struct smi_info *smi_info = in;
+ unsigned char *data = smi_info->curr_msg->data;
+ unsigned int size = smi_info->curr_msg->data_size;
+ if (size >= 8 &&
+ (data[0]>>2) == STORAGE_NETFN &&
+ data[1] == STORAGE_CMD_GET_SDR &&
+ data[7] == 0x3A) {
+ return_hosed_msg_badsize(smi_info);
+ return NOTIFY_STOP;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block dell_poweredge_bt_xaction_notifier = {
+ .notifier_call = dell_poweredge_bt_xaction_handler,
+};
+
+/*
+ * setup_dell_poweredge_bt_xaction_handler
+ * @info - smi_info.device_id must be filled in already
+ *
+ * Fills in smi_info.device_id.start_transaction_pre_hook
+ * when we know what function to use there.
+ */
+static void
+setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
+{
+ struct ipmi_device_id *id = &smi_info->device_id;
+ if (id->manufacturer_id == DELL_IANA_MFR_ID &&
+ smi_info->si_type == SI_BT)
+ register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
+}
+
+/*
+ * setup_oem_data_handler
+ * @info - smi_info.device_id must be filled in already
+ *
+ * Fills in smi_info.device_id.oem_data_available_handler
+ * when we know what function to use there.
+ */
+
+static void setup_oem_data_handler(struct smi_info *smi_info)
+{
+ setup_dell_poweredge_oem_data_handler(smi_info);
+}
+
+static void setup_xaction_handlers(struct smi_info *smi_info)
+{
+ setup_dell_poweredge_bt_xaction_handler(smi_info);
+}
+
+static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
+{
+ if (smi_info->intf) {
+ /*
+ * The timer and thread are only running if the
+ * interface has been started up and registered.
+ */
+ if (smi_info->thread != NULL)
+ kthread_stop(smi_info->thread);
+ del_timer_sync(&smi_info->si_timer);
+ }
+}
+
+static __devinitdata struct ipmi_default_vals
+{
+ int type;
+ int port;
+} ipmi_defaults[] =
+{
+ { .type = SI_KCS, .port = 0xca2 },
+ { .type = SI_SMIC, .port = 0xca9 },
+ { .type = SI_BT, .port = 0xe4 },
+ { .port = 0 }
+};
+
+static void __devinit default_find_bmc(void)
+{
+ struct smi_info *info;
+ int i;
+
+ for (i = 0; ; i++) {
+ if (!ipmi_defaults[i].port)
+ break;
+#ifdef CONFIG_PPC
+ if (check_legacy_ioport(ipmi_defaults[i].port))
+ continue;
+#endif
+ info = smi_info_alloc();
+ if (!info)
+ return;
+
+ info->addr_source = SI_DEFAULT;
+
+ info->si_type = ipmi_defaults[i].type;
+ info->io_setup = port_setup;
+ info->io.addr_data = ipmi_defaults[i].port;
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+
+ info->io.addr = NULL;
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = 0;
+
+ if (add_smi(info) == 0) {
+ if ((try_smi_init(info)) == 0) {
+ /* Found one... */
+ printk(KERN_INFO PFX "Found default %s"
+ " state machine at %s address 0x%lx\n",
+ si_to_str[info->si_type],
+ addr_space_to_str[info->io.addr_type],
+ info->io.addr_data);
+ } else
+ cleanup_one_si(info);
+ } else {
+ kfree(info);
+ }
+ }
+}
+
+static int is_new_interface(struct smi_info *info)
+{
+ struct smi_info *e;
+
+ list_for_each_entry(e, &smi_infos, link) {
+ if (e->io.addr_type != info->io.addr_type)
+ continue;
+ if (e->io.addr_data == info->io.addr_data)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int add_smi(struct smi_info *new_smi)
+{
+ int rv = 0;
+
+ printk(KERN_INFO PFX "Adding %s-specified %s state machine",
+ ipmi_addr_src_to_str[new_smi->addr_source],
+ si_to_str[new_smi->si_type]);
+ mutex_lock(&smi_infos_lock);
+ if (!is_new_interface(new_smi)) {
+ printk(KERN_CONT " duplicate interface\n");
+ rv = -EBUSY;
+ goto out_err;
+ }
+
+ printk(KERN_CONT "\n");
+
+ /* So we know not to free it unless we have allocated one. */
+ new_smi->intf = NULL;
+ new_smi->si_sm = NULL;
+ new_smi->handlers = NULL;
+
+ list_add_tail(&new_smi->link, &smi_infos);
+
+out_err:
+ mutex_unlock(&smi_infos_lock);
+ return rv;
+}
+
+static int try_smi_init(struct smi_info *new_smi)
+{
+ int rv = 0;
+ int i;
+
+ printk(KERN_INFO PFX "Trying %s-specified %s state"
+ " machine at %s address 0x%lx, slave address 0x%x,"
+ " irq %d\n",
+ ipmi_addr_src_to_str[new_smi->addr_source],
+ si_to_str[new_smi->si_type],
+ addr_space_to_str[new_smi->io.addr_type],
+ new_smi->io.addr_data,
+ new_smi->slave_addr, new_smi->irq);
+
+ switch (new_smi->si_type) {
+ case SI_KCS:
+ new_smi->handlers = &kcs_smi_handlers;
+ break;
+
+ case SI_SMIC:
+ new_smi->handlers = &smic_smi_handlers;
+ break;
+
+ case SI_BT:
+ new_smi->handlers = &bt_smi_handlers;
+ break;
+
+ default:
+ /* No support for anything else yet. */
+ rv = -EIO;
+ goto out_err;
+ }
+
+ /* Allocate the state machine's data and initialize it. */
+ new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
+ if (!new_smi->si_sm) {
+ printk(KERN_ERR PFX
+ "Could not allocate state machine memory\n");
+ rv = -ENOMEM;
+ goto out_err;
+ }
+ new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
+ &new_smi->io);
+
+ /* Now that we know the I/O size, we can set up the I/O. */
+ rv = new_smi->io_setup(new_smi);
+ if (rv) {
+ printk(KERN_ERR PFX "Could not set up I/O space\n");
+ goto out_err;
+ }
+
+ /* Do low-level detection first. */
+ if (new_smi->handlers->detect(new_smi->si_sm)) {
+ if (new_smi->addr_source)
+ printk(KERN_INFO PFX "Interface detection failed\n");
+ rv = -ENODEV;
+ goto out_err;
+ }
+
+ /*
+ * Attempt a get device id command. If it fails, we probably
+ * don't have a BMC here.
+ */
+ rv = try_get_dev_id(new_smi);
+ if (rv) {
+ if (new_smi->addr_source)
+ printk(KERN_INFO PFX "There appears to be no BMC"
+ " at this location\n");
+ goto out_err;
+ }
+
+ setup_oem_data_handler(new_smi);
+ setup_xaction_handlers(new_smi);
+
+ INIT_LIST_HEAD(&(new_smi->xmit_msgs));
+ INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
+ new_smi->curr_msg = NULL;
+ atomic_set(&new_smi->req_events, 0);
+ new_smi->run_to_completion = 0;
+ for (i = 0; i < SI_NUM_STATS; i++)
+ atomic_set(&new_smi->stats[i], 0);
+
+ new_smi->interrupt_disabled = 1;
+ atomic_set(&new_smi->stop_operation, 0);
+ new_smi->intf_num = smi_num;
+ smi_num++;
+
+ rv = try_enable_event_buffer(new_smi);
+ if (rv == 0)
+ new_smi->has_event_buffer = 1;
+
+ /*
+ * Start clearing the flags before we enable interrupts or the
+ * timer to avoid racing with the timer.
+ */
+ start_clear_flags(new_smi);
+ /* IRQ is defined to be set when non-zero. */
+ if (new_smi->irq)
+ new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
+
+ if (!new_smi->dev) {
+ /*
+ * If we don't already have a device from something
+ * else (like PCI), then register a new one.
+ */
+ new_smi->pdev = platform_device_alloc("ipmi_si",
+ new_smi->intf_num);
+ if (!new_smi->pdev) {
+ printk(KERN_ERR PFX
+ "Unable to allocate platform device\n");
+ goto out_err;
+ }
+ new_smi->dev = &new_smi->pdev->dev;
+ new_smi->dev->driver = &ipmi_driver.driver;
+
+ rv = platform_device_add(new_smi->pdev);
+ if (rv) {
+ printk(KERN_ERR PFX
+ "Unable to register system interface device:"
+ " %d\n",
+ rv);
+ goto out_err;
+ }
+ new_smi->dev_registered = 1;
+ }
+
+ rv = ipmi_register_smi(&handlers,
+ new_smi,
+ &new_smi->device_id,
+ new_smi->dev,
+ "bmc",
+ new_smi->slave_addr);
+ if (rv) {
+ dev_err(new_smi->dev, "Unable to register device: error %d\n",
+ rv);
+ goto out_err_stop_timer;
+ }
+
+ rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
+ &smi_type_proc_ops,
+ new_smi);
+ if (rv) {
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
+ goto out_err_stop_timer;
+ }
+
+ rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
+ &smi_si_stats_proc_ops,
+ new_smi);
+ if (rv) {
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
+ goto out_err_stop_timer;
+ }
+
+ rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
+ &smi_params_proc_ops,
+ new_smi);
+ if (rv) {
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
+ goto out_err_stop_timer;
+ }
+
+ dev_info(new_smi->dev, "IPMI %s interface initialized\n",
+ si_to_str[new_smi->si_type]);
+
+ return 0;
+
+ out_err_stop_timer:
+ atomic_inc(&new_smi->stop_operation);
+ wait_for_timer_and_thread(new_smi);
+
+ out_err:
+ new_smi->interrupt_disabled = 1;
+
+ if (new_smi->intf) {
+ ipmi_unregister_smi(new_smi->intf);
+ new_smi->intf = NULL;
+ }
+
+ if (new_smi->irq_cleanup) {
+ new_smi->irq_cleanup(new_smi);
+ new_smi->irq_cleanup = NULL;
+ }
+
+ /*
+ * Wait until we know that we are out of any interrupt
+ * handlers might have been running before we freed the
+ * interrupt.
+ */
+ synchronize_sched();
+
+ if (new_smi->si_sm) {
+ if (new_smi->handlers)
+ new_smi->handlers->cleanup(new_smi->si_sm);
+ kfree(new_smi->si_sm);
+ new_smi->si_sm = NULL;
+ }
+ if (new_smi->addr_source_cleanup) {
+ new_smi->addr_source_cleanup(new_smi);
+ new_smi->addr_source_cleanup = NULL;
+ }
+ if (new_smi->io_cleanup) {
+ new_smi->io_cleanup(new_smi);
+ new_smi->io_cleanup = NULL;
+ }
+
+ if (new_smi->dev_registered) {
+ platform_device_unregister(new_smi->pdev);
+ new_smi->dev_registered = 0;
+ }
+
+ return rv;
+}
+
+static int __devinit init_ipmi_si(void)
+{
+ int i;
+ char *str;
+ int rv;
+ struct smi_info *e;
+ enum ipmi_addr_src type = SI_INVALID;
+
+ if (initialized)
+ return 0;
+ initialized = 1;
+
+ rv = platform_driver_register(&ipmi_driver);
+ if (rv) {
+ printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
+ return rv;
+ }
+
+
+ /* Parse out the si_type string into its components. */
+ str = si_type_str;
+ if (*str != '\0') {
+ for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
+ si_type[i] = str;
+ str = strchr(str, ',');
+ if (str) {
+ *str = '\0';
+ str++;
+ } else {
+ break;
+ }
+ }
+ }
+
+ printk(KERN_INFO "IPMI System Interface driver.\n");
+
+ /* If the user gave us a device, they presumably want us to use it */
+ if (!hardcode_find_bmc())
+ return 0;
+
+#ifdef CONFIG_PCI
+ rv = pci_register_driver(&ipmi_pci_driver);
+ if (rv)
+ printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
+ else
+ pci_registered = 1;
+#endif
+
+#ifdef CONFIG_ACPI
+ pnp_register_driver(&ipmi_pnp_driver);
+ pnp_registered = 1;
+#endif
+
+#ifdef CONFIG_DMI
+ dmi_find_bmc();
+#endif
+
+#ifdef CONFIG_ACPI
+ spmi_find_bmc();
+#endif
+
+ /* We prefer devices with interrupts, but in the case of a machine
+ with multiple BMCs we assume that there will be several instances
+ of a given type so if we succeed in registering a type then also
+ try to register everything else of the same type */
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry(e, &smi_infos, link) {
+ /* Try to register a device if it has an IRQ and we either
+ haven't successfully registered a device yet or this
+ device has the same type as one we successfully registered */
+ if (e->irq && (!type || e->addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->addr_source;
+ }
+ }
+ }
+
+ /* type will only have been set if we successfully registered an si */
+ if (type) {
+ mutex_unlock(&smi_infos_lock);
+ return 0;
+ }
+
+ /* Fall back to the preferred device */
+
+ list_for_each_entry(e, &smi_infos, link) {
+ if (!e->irq && (!type || e->addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->addr_source;
+ }
+ }
+ }
+ mutex_unlock(&smi_infos_lock);
+
+ if (type)
+ return 0;
+
+ if (si_trydefaults) {
+ mutex_lock(&smi_infos_lock);
+ if (list_empty(&smi_infos)) {
+ /* No BMC was found, try defaults. */
+ mutex_unlock(&smi_infos_lock);
+ default_find_bmc();
+ } else
+ mutex_unlock(&smi_infos_lock);
+ }
+
+ mutex_lock(&smi_infos_lock);
+ if (unload_when_empty && list_empty(&smi_infos)) {
+ mutex_unlock(&smi_infos_lock);
+ cleanup_ipmi_si();
+ printk(KERN_WARNING PFX
+ "Unable to find any System Interface(s)\n");
+ return -ENODEV;
+ } else {
+ mutex_unlock(&smi_infos_lock);
+ return 0;
+ }
+}
+module_init(init_ipmi_si);
+
+static void cleanup_one_si(struct smi_info *to_clean)
+{
+ int rv = 0;
+ unsigned long flags;
+
+ if (!to_clean)
+ return;
+
+ list_del(&to_clean->link);
+
+ /* Tell the driver that we are shutting down. */
+ atomic_inc(&to_clean->stop_operation);
+
+ /*
+ * Make sure the timer and thread are stopped and will not run
+ * again.
+ */
+ wait_for_timer_and_thread(to_clean);
+
+ /*
+ * Timeouts are stopped, now make sure the interrupts are off
+ * for the device. A little tricky with locks to make sure
+ * there are no races.
+ */
+ spin_lock_irqsave(&to_clean->si_lock, flags);
+ while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
+ spin_unlock_irqrestore(&to_clean->si_lock, flags);
+ poll(to_clean);
+ schedule_timeout_uninterruptible(1);
+ spin_lock_irqsave(&to_clean->si_lock, flags);
+ }
+ disable_si_irq(to_clean);
+ spin_unlock_irqrestore(&to_clean->si_lock, flags);
+ while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
+ poll(to_clean);
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Clean up interrupts and make sure that everything is done. */
+ if (to_clean->irq_cleanup)
+ to_clean->irq_cleanup(to_clean);
+ while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
+ poll(to_clean);
+ schedule_timeout_uninterruptible(1);
+ }
+
+ if (to_clean->intf)
+ rv = ipmi_unregister_smi(to_clean->intf);
+
+ if (rv) {
+ printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
+ rv);
+ }
+
+ if (to_clean->handlers)
+ to_clean->handlers->cleanup(to_clean->si_sm);
+
+ kfree(to_clean->si_sm);
+
+ if (to_clean->addr_source_cleanup)
+ to_clean->addr_source_cleanup(to_clean);
+ if (to_clean->io_cleanup)
+ to_clean->io_cleanup(to_clean);
+
+ if (to_clean->dev_registered)
+ platform_device_unregister(to_clean->pdev);
+
+ kfree(to_clean);
+}
+
+static void cleanup_ipmi_si(void)
+{
+ struct smi_info *e, *tmp_e;
+
+ if (!initialized)
+ return;
+
+#ifdef CONFIG_PCI
+ if (pci_registered)
+ pci_unregister_driver(&ipmi_pci_driver);
+#endif
+#ifdef CONFIG_ACPI
+ if (pnp_registered)
+ pnp_unregister_driver(&ipmi_pnp_driver);
+#endif
+
+ platform_driver_unregister(&ipmi_driver);
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
+ cleanup_one_si(e);
+ mutex_unlock(&smi_infos_lock);
+}
+module_exit(cleanup_ipmi_si);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
+ " system interfaces.");
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
new file mode 100644
index 00000000..df89f734
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -0,0 +1,141 @@
+/*
+ * ipmi_si_sm.h
+ *
+ * State machine interface for low-level IPMI system management
+ * interface state machines. This code is the interface between
+ * the ipmi_smi code (that handles the policy of a KCS, SMIC, or
+ * BT interface) and the actual low-level state machine.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * This is defined by the state machines themselves, it is an opaque
+ * data type for them to use.
+ */
+struct si_sm_data;
+
+/*
+ * The structure for doing I/O in the state machine. The state
+ * machine doesn't have the actual I/O routines, they are done through
+ * this interface.
+ */
+struct si_sm_io {
+ unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset);
+ void (*outputb)(struct si_sm_io *io,
+ unsigned int offset,
+ unsigned char b);
+
+ /*
+ * Generic info used by the actual handling routines, the
+ * state machine shouldn't touch these.
+ */
+ void __iomem *addr;
+ int regspacing;
+ int regsize;
+ int regshift;
+ int addr_type;
+ long addr_data;
+};
+
+/* Results of SMI events. */
+enum si_sm_result {
+ SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
+ SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */
+ SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */
+ SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */
+ SI_SM_IDLE, /* The SM is in idle state. */
+ SI_SM_HOSED, /* The hardware violated the state machine. */
+
+ /*
+ * The hardware is asserting attn and the state machine is
+ * idle.
+ */
+ SI_SM_ATTN
+};
+
+/* Handlers for the SMI state machine. */
+struct si_sm_handlers {
+ /*
+ * Put the version number of the state machine here so the
+ * upper layer can print it.
+ */
+ char *version;
+
+ /*
+ * Initialize the data and return the amount of I/O space to
+ * reserve for the space.
+ */
+ unsigned int (*init_data)(struct si_sm_data *smi,
+ struct si_sm_io *io);
+
+ /*
+ * Start a new transaction in the state machine. This will
+ * return -2 if the state machine is not idle, -1 if the size
+ * is invalid (to large or too small), or 0 if the transaction
+ * is successfully completed.
+ */
+ int (*start_transaction)(struct si_sm_data *smi,
+ unsigned char *data, unsigned int size);
+
+ /*
+ * Return the results after the transaction. This will return
+ * -1 if the buffer is too small, zero if no transaction is
+ * present, or the actual length of the result data.
+ */
+ int (*get_result)(struct si_sm_data *smi,
+ unsigned char *data, unsigned int length);
+
+ /*
+ * Call this periodically (for a polled interface) or upon
+ * receiving an interrupt (for a interrupt-driven interface).
+ * If interrupt driven, you should probably poll this
+ * periodically when not in idle state. This should be called
+ * with the time that passed since the last call, if it is
+ * significant. Time is in microseconds.
+ */
+ enum si_sm_result (*event)(struct si_sm_data *smi, long time);
+
+ /*
+ * Attempt to detect an SMI. Returns 0 on success or nonzero
+ * on failure.
+ */
+ int (*detect)(struct si_sm_data *smi);
+
+ /* The interface is shutting down, so clean it up. */
+ void (*cleanup)(struct si_sm_data *smi);
+
+ /* Return the size of the SMI structure in bytes. */
+ int (*size)(void);
+};
+
+/* Current state machines that we can use. */
+extern struct si_sm_handlers kcs_smi_handlers;
+extern struct si_sm_handlers smic_smi_handlers;
+extern struct si_sm_handlers bt_smi_handlers;
+
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
new file mode 100644
index 00000000..faed9297
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -0,0 +1,600 @@
+/*
+ * ipmi_smic_sm.c
+ *
+ * The state-machine driver for an IPMI SMIC driver
+ *
+ * It started as a copy of Corey Minyard's driver for the KSC interface
+ * and the kernel patch "mmcdev-patch-245" by HP
+ *
+ * modified by: Hannes Schulz <schulz@schwaar.com>
+ * ipmi@schwaar.com
+ *
+ *
+ * Corey Minyard's driver for the KSC interface has the following
+ * copyright notice:
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * the kernel patch "mmcdev-patch-245" by HP has the following
+ * copyright notice:
+ * (c) Copyright 2001 Grant Grundler (c) Copyright
+ * 2001 Hewlett-Packard Company
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <linux/kernel.h> /* For printk. */
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi_msgdefs.h> /* for completion codes */
+#include "ipmi_si_sm.h"
+
+/* smic_debug is a bit-field
+ * SMIC_DEBUG_ENABLE - turned on for now
+ * SMIC_DEBUG_MSG - commands and their responses
+ * SMIC_DEBUG_STATES - state machine
+*/
+#define SMIC_DEBUG_STATES 4
+#define SMIC_DEBUG_MSG 2
+#define SMIC_DEBUG_ENABLE 1
+
+static int smic_debug = 1;
+module_param(smic_debug, int, 0644);
+MODULE_PARM_DESC(smic_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
+
+enum smic_states {
+ SMIC_IDLE,
+ SMIC_START_OP,
+ SMIC_OP_OK,
+ SMIC_WRITE_START,
+ SMIC_WRITE_NEXT,
+ SMIC_WRITE_END,
+ SMIC_WRITE2READ,
+ SMIC_READ_START,
+ SMIC_READ_NEXT,
+ SMIC_READ_END,
+ SMIC_HOSED
+};
+
+#define MAX_SMIC_READ_SIZE 80
+#define MAX_SMIC_WRITE_SIZE 80
+#define SMIC_MAX_ERROR_RETRIES 3
+
+/* Timeouts in microseconds. */
+#define SMIC_RETRY_TIMEOUT 2000000
+
+/* SMIC Flags Register Bits */
+#define SMIC_RX_DATA_READY 0x80
+#define SMIC_TX_DATA_READY 0x40
+
+/*
+ * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by
+ * a few systems, and then only by Systems Management
+ * Interrupts, not by the OS. Always ignore these bits.
+ *
+ */
+#define SMIC_SMI 0x10
+#define SMIC_EVM_DATA_AVAIL 0x08
+#define SMIC_SMS_DATA_AVAIL 0x04
+#define SMIC_FLAG_BSY 0x01
+
+/* SMIC Error Codes */
+#define EC_NO_ERROR 0x00
+#define EC_ABORTED 0x01
+#define EC_ILLEGAL_CONTROL 0x02
+#define EC_NO_RESPONSE 0x03
+#define EC_ILLEGAL_COMMAND 0x04
+#define EC_BUFFER_FULL 0x05
+
+struct si_sm_data {
+ enum smic_states state;
+ struct si_sm_io *io;
+ unsigned char write_data[MAX_SMIC_WRITE_SIZE];
+ int write_pos;
+ int write_count;
+ int orig_write_count;
+ unsigned char read_data[MAX_SMIC_READ_SIZE];
+ int read_pos;
+ int truncated;
+ unsigned int error_retries;
+ long smic_timeout;
+};
+
+static unsigned int init_smic_data(struct si_sm_data *smic,
+ struct si_sm_io *io)
+{
+ smic->state = SMIC_IDLE;
+ smic->io = io;
+ smic->write_pos = 0;
+ smic->write_count = 0;
+ smic->orig_write_count = 0;
+ smic->read_pos = 0;
+ smic->error_retries = 0;
+ smic->truncated = 0;
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+
+ /* We use 3 bytes of I/O. */
+ return 3;
+}
+
+static int start_smic_transaction(struct si_sm_data *smic,
+ unsigned char *data, unsigned int size)
+{
+ unsigned int i;
+
+ if (size < 2)
+ return IPMI_REQ_LEN_INVALID_ERR;
+ if (size > MAX_SMIC_WRITE_SIZE)
+ return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+ if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED))
+ return IPMI_NOT_IN_MY_STATE_ERR;
+
+ if (smic_debug & SMIC_DEBUG_MSG) {
+ printk(KERN_DEBUG "start_smic_transaction -");
+ for (i = 0; i < size; i++)
+ printk(" %02x", (unsigned char) data[i]);
+ printk("\n");
+ }
+ smic->error_retries = 0;
+ memcpy(smic->write_data, data, size);
+ smic->write_count = size;
+ smic->orig_write_count = size;
+ smic->write_pos = 0;
+ smic->read_pos = 0;
+ smic->state = SMIC_START_OP;
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+ return 0;
+}
+
+static int smic_get_result(struct si_sm_data *smic,
+ unsigned char *data, unsigned int length)
+{
+ int i;
+
+ if (smic_debug & SMIC_DEBUG_MSG) {
+ printk(KERN_DEBUG "smic_get result -");
+ for (i = 0; i < smic->read_pos; i++)
+ printk(" %02x", smic->read_data[i]);
+ printk("\n");
+ }
+ if (length < smic->read_pos) {
+ smic->read_pos = length;
+ smic->truncated = 1;
+ }
+ memcpy(data, smic->read_data, smic->read_pos);
+
+ if ((length >= 3) && (smic->read_pos < 3)) {
+ data[2] = IPMI_ERR_UNSPECIFIED;
+ smic->read_pos = 3;
+ }
+ if (smic->truncated) {
+ data[2] = IPMI_ERR_MSG_TRUNCATED;
+ smic->truncated = 0;
+ }
+ return smic->read_pos;
+}
+
+static inline unsigned char read_smic_flags(struct si_sm_data *smic)
+{
+ return smic->io->inputb(smic->io, 2);
+}
+
+static inline unsigned char read_smic_status(struct si_sm_data *smic)
+{
+ return smic->io->inputb(smic->io, 1);
+}
+
+static inline unsigned char read_smic_data(struct si_sm_data *smic)
+{
+ return smic->io->inputb(smic->io, 0);
+}
+
+static inline void write_smic_flags(struct si_sm_data *smic,
+ unsigned char flags)
+{
+ smic->io->outputb(smic->io, 2, flags);
+}
+
+static inline void write_smic_control(struct si_sm_data *smic,
+ unsigned char control)
+{
+ smic->io->outputb(smic->io, 1, control);
+}
+
+static inline void write_si_sm_data(struct si_sm_data *smic,
+ unsigned char data)
+{
+ smic->io->outputb(smic->io, 0, data);
+}
+
+static inline void start_error_recovery(struct si_sm_data *smic, char *reason)
+{
+ (smic->error_retries)++;
+ if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) {
+ if (smic_debug & SMIC_DEBUG_ENABLE)
+ printk(KERN_WARNING
+ "ipmi_smic_drv: smic hosed: %s\n", reason);
+ smic->state = SMIC_HOSED;
+ } else {
+ smic->write_count = smic->orig_write_count;
+ smic->write_pos = 0;
+ smic->read_pos = 0;
+ smic->state = SMIC_START_OP;
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+ }
+}
+
+static inline void write_next_byte(struct si_sm_data *smic)
+{
+ write_si_sm_data(smic, smic->write_data[smic->write_pos]);
+ (smic->write_pos)++;
+ (smic->write_count)--;
+}
+
+static inline void read_next_byte(struct si_sm_data *smic)
+{
+ if (smic->read_pos >= MAX_SMIC_READ_SIZE) {
+ read_smic_data(smic);
+ smic->truncated = 1;
+ } else {
+ smic->read_data[smic->read_pos] = read_smic_data(smic);
+ smic->read_pos++;
+ }
+}
+
+/* SMIC Control/Status Code Components */
+#define SMIC_GET_STATUS 0x00 /* Control form's name */
+#define SMIC_READY 0x00 /* Status form's name */
+#define SMIC_WR_START 0x01 /* Unified Control/Status names... */
+#define SMIC_WR_NEXT 0x02
+#define SMIC_WR_END 0x03
+#define SMIC_RD_START 0x04
+#define SMIC_RD_NEXT 0x05
+#define SMIC_RD_END 0x06
+#define SMIC_CODE_MASK 0x0f
+
+#define SMIC_CONTROL 0x00
+#define SMIC_STATUS 0x80
+#define SMIC_CS_MASK 0x80
+
+#define SMIC_SMS 0x40
+#define SMIC_SMM 0x60
+#define SMIC_STREAM_MASK 0x60
+
+/* SMIC Control Codes */
+#define SMIC_CC_SMS_GET_STATUS (SMIC_CONTROL|SMIC_SMS|SMIC_GET_STATUS)
+#define SMIC_CC_SMS_WR_START (SMIC_CONTROL|SMIC_SMS|SMIC_WR_START)
+#define SMIC_CC_SMS_WR_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_WR_NEXT)
+#define SMIC_CC_SMS_WR_END (SMIC_CONTROL|SMIC_SMS|SMIC_WR_END)
+#define SMIC_CC_SMS_RD_START (SMIC_CONTROL|SMIC_SMS|SMIC_RD_START)
+#define SMIC_CC_SMS_RD_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_RD_NEXT)
+#define SMIC_CC_SMS_RD_END (SMIC_CONTROL|SMIC_SMS|SMIC_RD_END)
+
+#define SMIC_CC_SMM_GET_STATUS (SMIC_CONTROL|SMIC_SMM|SMIC_GET_STATUS)
+#define SMIC_CC_SMM_WR_START (SMIC_CONTROL|SMIC_SMM|SMIC_WR_START)
+#define SMIC_CC_SMM_WR_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_WR_NEXT)
+#define SMIC_CC_SMM_WR_END (SMIC_CONTROL|SMIC_SMM|SMIC_WR_END)
+#define SMIC_CC_SMM_RD_START (SMIC_CONTROL|SMIC_SMM|SMIC_RD_START)
+#define SMIC_CC_SMM_RD_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_RD_NEXT)
+#define SMIC_CC_SMM_RD_END (SMIC_CONTROL|SMIC_SMM|SMIC_RD_END)
+
+/* SMIC Status Codes */
+#define SMIC_SC_SMS_READY (SMIC_STATUS|SMIC_SMS|SMIC_READY)
+#define SMIC_SC_SMS_WR_START (SMIC_STATUS|SMIC_SMS|SMIC_WR_START)
+#define SMIC_SC_SMS_WR_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_WR_NEXT)
+#define SMIC_SC_SMS_WR_END (SMIC_STATUS|SMIC_SMS|SMIC_WR_END)
+#define SMIC_SC_SMS_RD_START (SMIC_STATUS|SMIC_SMS|SMIC_RD_START)
+#define SMIC_SC_SMS_RD_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_RD_NEXT)
+#define SMIC_SC_SMS_RD_END (SMIC_STATUS|SMIC_SMS|SMIC_RD_END)
+
+#define SMIC_SC_SMM_READY (SMIC_STATUS|SMIC_SMM|SMIC_READY)
+#define SMIC_SC_SMM_WR_START (SMIC_STATUS|SMIC_SMM|SMIC_WR_START)
+#define SMIC_SC_SMM_WR_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_WR_NEXT)
+#define SMIC_SC_SMM_WR_END (SMIC_STATUS|SMIC_SMM|SMIC_WR_END)
+#define SMIC_SC_SMM_RD_START (SMIC_STATUS|SMIC_SMM|SMIC_RD_START)
+#define SMIC_SC_SMM_RD_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_RD_NEXT)
+#define SMIC_SC_SMM_RD_END (SMIC_STATUS|SMIC_SMM|SMIC_RD_END)
+
+/* these are the control/status codes we actually use
+ SMIC_CC_SMS_GET_STATUS 0x40
+ SMIC_CC_SMS_WR_START 0x41
+ SMIC_CC_SMS_WR_NEXT 0x42
+ SMIC_CC_SMS_WR_END 0x43
+ SMIC_CC_SMS_RD_START 0x44
+ SMIC_CC_SMS_RD_NEXT 0x45
+ SMIC_CC_SMS_RD_END 0x46
+
+ SMIC_SC_SMS_READY 0xC0
+ SMIC_SC_SMS_WR_START 0xC1
+ SMIC_SC_SMS_WR_NEXT 0xC2
+ SMIC_SC_SMS_WR_END 0xC3
+ SMIC_SC_SMS_RD_START 0xC4
+ SMIC_SC_SMS_RD_NEXT 0xC5
+ SMIC_SC_SMS_RD_END 0xC6
+*/
+
+static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
+{
+ unsigned char status;
+ unsigned char flags;
+ unsigned char data;
+
+ if (smic->state == SMIC_HOSED) {
+ init_smic_data(smic, smic->io);
+ return SI_SM_HOSED;
+ }
+ if (smic->state != SMIC_IDLE) {
+ if (smic_debug & SMIC_DEBUG_STATES)
+ printk(KERN_DEBUG
+ "smic_event - smic->smic_timeout = %ld,"
+ " time = %ld\n",
+ smic->smic_timeout, time);
+ /*
+ * FIXME: smic_event is sometimes called with time >
+ * SMIC_RETRY_TIMEOUT
+ */
+ if (time < SMIC_RETRY_TIMEOUT) {
+ smic->smic_timeout -= time;
+ if (smic->smic_timeout < 0) {
+ start_error_recovery(smic, "smic timed out.");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ }
+ }
+ flags = read_smic_flags(smic);
+ if (flags & SMIC_FLAG_BSY)
+ return SI_SM_CALL_WITH_DELAY;
+
+ status = read_smic_status(smic);
+ if (smic_debug & SMIC_DEBUG_STATES)
+ printk(KERN_DEBUG
+ "smic_event - state = %d, flags = 0x%02x,"
+ " status = 0x%02x\n",
+ smic->state, flags, status);
+
+ switch (smic->state) {
+ case SMIC_IDLE:
+ /* in IDLE we check for available messages */
+ if (flags & SMIC_SMS_DATA_AVAIL)
+ return SI_SM_ATTN;
+ return SI_SM_IDLE;
+
+ case SMIC_START_OP:
+ /* sanity check whether smic is really idle */
+ write_smic_control(smic, SMIC_CC_SMS_GET_STATUS);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_OP_OK;
+ break;
+
+ case SMIC_OP_OK:
+ if (status != SMIC_SC_SMS_READY) {
+ /* this should not happen */
+ start_error_recovery(smic,
+ "state = SMIC_OP_OK,"
+ " status != SMIC_SC_SMS_READY");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /* OK so far; smic is idle let us start ... */
+ write_smic_control(smic, SMIC_CC_SMS_WR_START);
+ write_next_byte(smic);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_WRITE_START;
+ break;
+
+ case SMIC_WRITE_START:
+ if (status != SMIC_SC_SMS_WR_START) {
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_START, "
+ "status != SMIC_SC_SMS_WR_START");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /*
+ * we must not issue WR_(NEXT|END) unless
+ * TX_DATA_READY is set
+ * */
+ if (flags & SMIC_TX_DATA_READY) {
+ if (smic->write_count == 1) {
+ /* last byte */
+ write_smic_control(smic, SMIC_CC_SMS_WR_END);
+ smic->state = SMIC_WRITE_END;
+ } else {
+ write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
+ smic->state = SMIC_WRITE_NEXT;
+ }
+ write_next_byte(smic);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+
+ case SMIC_WRITE_NEXT:
+ if (status != SMIC_SC_SMS_WR_NEXT) {
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_NEXT, "
+ "status != SMIC_SC_SMS_WR_NEXT");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /* this is the same code as in SMIC_WRITE_START */
+ if (flags & SMIC_TX_DATA_READY) {
+ if (smic->write_count == 1) {
+ write_smic_control(smic, SMIC_CC_SMS_WR_END);
+ smic->state = SMIC_WRITE_END;
+ } else {
+ write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
+ smic->state = SMIC_WRITE_NEXT;
+ }
+ write_next_byte(smic);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+
+ case SMIC_WRITE_END:
+ if (status != SMIC_SC_SMS_WR_END) {
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_END, "
+ "status != SMIC_SC_SMS_WR_END");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ /* data register holds an error code */
+ data = read_smic_data(smic);
+ if (data != 0) {
+ if (smic_debug & SMIC_DEBUG_ENABLE)
+ printk(KERN_DEBUG
+ "SMIC_WRITE_END: data = %02x\n", data);
+ start_error_recovery(smic,
+ "state = SMIC_WRITE_END, "
+ "data != SUCCESS");
+ return SI_SM_CALL_WITH_DELAY;
+ } else
+ smic->state = SMIC_WRITE2READ;
+ break;
+
+ case SMIC_WRITE2READ:
+ /*
+ * we must wait for RX_DATA_READY to be set before we
+ * can continue
+ */
+ if (flags & SMIC_RX_DATA_READY) {
+ write_smic_control(smic, SMIC_CC_SMS_RD_START);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_START;
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+
+ case SMIC_READ_START:
+ if (status != SMIC_SC_SMS_RD_START) {
+ start_error_recovery(smic,
+ "state = SMIC_READ_START, "
+ "status != SMIC_SC_SMS_RD_START");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ if (flags & SMIC_RX_DATA_READY) {
+ read_next_byte(smic);
+ write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_NEXT;
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+
+ case SMIC_READ_NEXT:
+ switch (status) {
+ /*
+ * smic tells us that this is the last byte to be read
+ * --> clean up
+ */
+ case SMIC_SC_SMS_RD_END:
+ read_next_byte(smic);
+ write_smic_control(smic, SMIC_CC_SMS_RD_END);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_END;
+ break;
+ case SMIC_SC_SMS_RD_NEXT:
+ if (flags & SMIC_RX_DATA_READY) {
+ read_next_byte(smic);
+ write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
+ write_smic_flags(smic, flags | SMIC_FLAG_BSY);
+ smic->state = SMIC_READ_NEXT;
+ } else
+ return SI_SM_CALL_WITH_DELAY;
+ break;
+ default:
+ start_error_recovery(
+ smic,
+ "state = SMIC_READ_NEXT, "
+ "status != SMIC_SC_SMS_RD_(NEXT|END)");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ break;
+
+ case SMIC_READ_END:
+ if (status != SMIC_SC_SMS_READY) {
+ start_error_recovery(smic,
+ "state = SMIC_READ_END, "
+ "status != SMIC_SC_SMS_READY");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ data = read_smic_data(smic);
+ /* data register holds an error code */
+ if (data != 0) {
+ if (smic_debug & SMIC_DEBUG_ENABLE)
+ printk(KERN_DEBUG
+ "SMIC_READ_END: data = %02x\n", data);
+ start_error_recovery(smic,
+ "state = SMIC_READ_END, "
+ "data != SUCCESS");
+ return SI_SM_CALL_WITH_DELAY;
+ } else {
+ smic->state = SMIC_IDLE;
+ return SI_SM_TRANSACTION_COMPLETE;
+ }
+
+ case SMIC_HOSED:
+ init_smic_data(smic, smic->io);
+ return SI_SM_HOSED;
+
+ default:
+ if (smic_debug & SMIC_DEBUG_ENABLE) {
+ printk(KERN_DEBUG "smic->state = %d\n", smic->state);
+ start_error_recovery(smic, "state = UNKNOWN");
+ return SI_SM_CALL_WITH_DELAY;
+ }
+ }
+ smic->smic_timeout = SMIC_RETRY_TIMEOUT;
+ return SI_SM_CALL_WITHOUT_DELAY;
+}
+
+static int smic_detect(struct si_sm_data *smic)
+{
+ /*
+ * It's impossible for the SMIC fnags register to be all 1's,
+ * (assuming a properly functioning, self-initialized BMC)
+ * but that's what you get from reading a bogus address, so we
+ * test that first.
+ */
+ if (read_smic_flags(smic) == 0xff)
+ return 1;
+
+ return 0;
+}
+
+static void smic_cleanup(struct si_sm_data *kcs)
+{
+}
+
+static int smic_size(void)
+{
+ return sizeof(struct si_sm_data);
+}
+
+struct si_sm_handlers smic_smi_handlers = {
+ .init_data = init_smic_data,
+ .start_transaction = start_smic_transaction,
+ .get_result = smic_get_result,
+ .event = smic_event,
+ .detect = smic_detect,
+ .cleanup = smic_cleanup,
+ .size = smic_size,
+};
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
new file mode 100644
index 00000000..320668f4
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -0,0 +1,1371 @@
+/*
+ * ipmi_watchdog.c
+ *
+ * A watchdog timer based upon the IPMI interface.
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ipmi.h>
+#include <linux/ipmi_smi.h>
+#include <linux/mutex.h>
+#include <linux/watchdog.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/kdebug.h>
+#include <linux/rwsem.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <linux/notifier.h>
+#include <linux/nmi.h>
+#include <linux/reboot.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <asm/atomic.h>
+
+#ifdef CONFIG_X86
+/*
+ * This is ugly, but I've determined that x86 is the only architecture
+ * that can reasonably support the IPMI NMI watchdog timeout at this
+ * time. If another architecture adds this capability somehow, it
+ * will have to be a somewhat different mechanism and I have no idea
+ * how it will work. So in the unlikely event that another
+ * architecture supports this, we can figure out a good generic
+ * mechanism for it at that time.
+ */
+#include <asm/kdebug.h>
+#define HAVE_DIE_NMI
+#endif
+
+#define PFX "IPMI Watchdog: "
+
+/*
+ * The IPMI command/response information for the watchdog timer.
+ */
+
+/* values for byte 1 of the set command, byte 2 of the get response. */
+#define WDOG_DONT_LOG (1 << 7)
+#define WDOG_DONT_STOP_ON_SET (1 << 6)
+#define WDOG_SET_TIMER_USE(byte, use) \
+ byte = ((byte) & 0xf8) | ((use) & 0x7)
+#define WDOG_GET_TIMER_USE(byte) ((byte) & 0x7)
+#define WDOG_TIMER_USE_BIOS_FRB2 1
+#define WDOG_TIMER_USE_BIOS_POST 2
+#define WDOG_TIMER_USE_OS_LOAD 3
+#define WDOG_TIMER_USE_SMS_OS 4
+#define WDOG_TIMER_USE_OEM 5
+
+/* values for byte 2 of the set command, byte 3 of the get response. */
+#define WDOG_SET_PRETIMEOUT_ACT(byte, use) \
+ byte = ((byte) & 0x8f) | (((use) & 0x7) << 4)
+#define WDOG_GET_PRETIMEOUT_ACT(byte) (((byte) >> 4) & 0x7)
+#define WDOG_PRETIMEOUT_NONE 0
+#define WDOG_PRETIMEOUT_SMI 1
+#define WDOG_PRETIMEOUT_NMI 2
+#define WDOG_PRETIMEOUT_MSG_INT 3
+
+/* Operations that can be performed on a pretimout. */
+#define WDOG_PREOP_NONE 0
+#define WDOG_PREOP_PANIC 1
+/* Cause data to be available to read. Doesn't work in NMI mode. */
+#define WDOG_PREOP_GIVE_DATA 2
+
+/* Actions to perform on a full timeout. */
+#define WDOG_SET_TIMEOUT_ACT(byte, use) \
+ byte = ((byte) & 0xf8) | ((use) & 0x7)
+#define WDOG_GET_TIMEOUT_ACT(byte) ((byte) & 0x7)
+#define WDOG_TIMEOUT_NONE 0
+#define WDOG_TIMEOUT_RESET 1
+#define WDOG_TIMEOUT_POWER_DOWN 2
+#define WDOG_TIMEOUT_POWER_CYCLE 3
+
+/*
+ * Byte 3 of the get command, byte 4 of the get response is the
+ * pre-timeout in seconds.
+ */
+
+/* Bits for setting byte 4 of the set command, byte 5 of the get response. */
+#define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1)
+#define WDOG_EXPIRE_CLEAR_BIOS_POST (1 << 2)
+#define WDOG_EXPIRE_CLEAR_OS_LOAD (1 << 3)
+#define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4)
+#define WDOG_EXPIRE_CLEAR_OEM (1 << 5)
+
+/*
+ * Setting/getting the watchdog timer value. This is for bytes 5 and
+ * 6 (the timeout time) of the set command, and bytes 6 and 7 (the
+ * timeout time) and 8 and 9 (the current countdown value) of the
+ * response. The timeout value is given in seconds (in the command it
+ * is 100ms intervals).
+ */
+#define WDOG_SET_TIMEOUT(byte1, byte2, val) \
+ (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8)
+#define WDOG_GET_TIMEOUT(byte1, byte2) \
+ (((byte1) | ((byte2) << 8)) / 10)
+
+#define IPMI_WDOG_RESET_TIMER 0x22
+#define IPMI_WDOG_SET_TIMER 0x24
+#define IPMI_WDOG_GET_TIMER 0x25
+
+/* These are here until the real ones get into the watchdog.h interface. */
+#ifndef WDIOC_GETTIMEOUT
+#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
+#endif
+#ifndef WDIOC_SET_PRETIMEOUT
+#define WDIOC_SET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 21, int)
+#endif
+#ifndef WDIOC_GET_PRETIMEOUT
+#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int)
+#endif
+
+static DEFINE_MUTEX(ipmi_watchdog_mutex);
+static int nowayout = WATCHDOG_NOWAYOUT;
+
+static ipmi_user_t watchdog_user;
+static int watchdog_ifnum;
+
+/* Default the timeout to 10 seconds. */
+static int timeout = 10;
+
+/* The pre-timeout is disabled by default. */
+static int pretimeout;
+
+/* Default action is to reset the board on a timeout. */
+static unsigned char action_val = WDOG_TIMEOUT_RESET;
+
+static char action[16] = "reset";
+
+static unsigned char preaction_val = WDOG_PRETIMEOUT_NONE;
+
+static char preaction[16] = "pre_none";
+
+static unsigned char preop_val = WDOG_PREOP_NONE;
+
+static char preop[16] = "preop_none";
+static DEFINE_SPINLOCK(ipmi_read_lock);
+static char data_to_read;
+static DECLARE_WAIT_QUEUE_HEAD(read_q);
+static struct fasync_struct *fasync_q;
+static char pretimeout_since_last_heartbeat;
+static char expect_close;
+
+static int ifnum_to_use = -1;
+
+/* Parameters to ipmi_set_timeout */
+#define IPMI_SET_TIMEOUT_NO_HB 0
+#define IPMI_SET_TIMEOUT_HB_IF_NECESSARY 1
+#define IPMI_SET_TIMEOUT_FORCE_HB 2
+
+static int ipmi_set_timeout(int do_heartbeat);
+static void ipmi_register_watchdog(int ipmi_intf);
+static void ipmi_unregister_watchdog(int ipmi_intf);
+
+/*
+ * If true, the driver will start running as soon as it is configured
+ * and ready.
+ */
+static int start_now;
+
+static int set_param_timeout(const char *val, const struct kernel_param *kp)
+{
+ char *endp;
+ int l;
+ int rv = 0;
+
+ if (!val)
+ return -EINVAL;
+ l = simple_strtoul(val, &endp, 0);
+ if (endp == val)
+ return -EINVAL;
+
+ *((int *)kp->arg) = l;
+ if (watchdog_user)
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ return rv;
+}
+
+static struct kernel_param_ops param_ops_timeout = {
+ .set = set_param_timeout,
+ .get = param_get_int,
+};
+#define param_check_timeout param_check_int
+
+typedef int (*action_fn)(const char *intval, char *outval);
+
+static int action_op(const char *inval, char *outval);
+static int preaction_op(const char *inval, char *outval);
+static int preop_op(const char *inval, char *outval);
+static void check_parms(void);
+
+static int set_param_str(const char *val, const struct kernel_param *kp)
+{
+ action_fn fn = (action_fn) kp->arg;
+ int rv = 0;
+ char valcp[16];
+ char *s;
+
+ strncpy(valcp, val, 16);
+ valcp[15] = '\0';
+
+ s = strstrip(valcp);
+
+ rv = fn(s, NULL);
+ if (rv)
+ goto out;
+
+ check_parms();
+ if (watchdog_user)
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ out:
+ return rv;
+}
+
+static int get_param_str(char *buffer, const struct kernel_param *kp)
+{
+ action_fn fn = (action_fn) kp->arg;
+ int rv;
+
+ rv = fn(NULL, buffer);
+ if (rv)
+ return rv;
+ return strlen(buffer);
+}
+
+
+static int set_param_wdog_ifnum(const char *val, const struct kernel_param *kp)
+{
+ int rv = param_set_int(val, kp);
+ if (rv)
+ return rv;
+ if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum))
+ return 0;
+
+ ipmi_unregister_watchdog(watchdog_ifnum);
+ ipmi_register_watchdog(ifnum_to_use);
+ return 0;
+}
+
+static struct kernel_param_ops param_ops_wdog_ifnum = {
+ .set = set_param_wdog_ifnum,
+ .get = param_get_int,
+};
+
+#define param_check_wdog_ifnum param_check_int
+
+static struct kernel_param_ops param_ops_str = {
+ .set = set_param_str,
+ .get = get_param_str,
+};
+
+module_param(ifnum_to_use, wdog_ifnum, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+ "timer. Setting to -1 defaults to the first registered "
+ "interface");
+
+module_param(timeout, timeout, 0644);
+MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
+
+module_param(pretimeout, timeout, 0644);
+MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
+
+module_param_cb(action, &param_ops_str, action_op, 0644);
+MODULE_PARM_DESC(action, "Timeout action. One of: "
+ "reset, none, power_cycle, power_off.");
+
+module_param_cb(preaction, &param_ops_str, preaction_op, 0644);
+MODULE_PARM_DESC(preaction, "Pretimeout action. One of: "
+ "pre_none, pre_smi, pre_nmi, pre_int.");
+
+module_param_cb(preop, &param_ops_str, preop_op, 0644);
+MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: "
+ "preop_none, preop_panic, preop_give_data.");
+
+module_param(start_now, int, 0444);
+MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
+ "soon as the driver is loaded.");
+
+module_param(nowayout, int, 0644);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=CONFIG_WATCHDOG_NOWAYOUT)");
+
+/* Default state of the timer. */
+static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+
+/* If shutting down via IPMI, we ignore the heartbeat. */
+static int ipmi_ignore_heartbeat;
+
+/* Is someone using the watchdog? Only one user is allowed. */
+static unsigned long ipmi_wdog_open;
+
+/*
+ * If set to 1, the heartbeat command will set the state to reset and
+ * start the timer. The timer doesn't normally run when the driver is
+ * first opened until the heartbeat is set the first time, this
+ * variable is used to accomplish this.
+ */
+static int ipmi_start_timer_on_heartbeat;
+
+/* IPMI version of the BMC. */
+static unsigned char ipmi_version_major;
+static unsigned char ipmi_version_minor;
+
+/* If a pretimeout occurs, this is used to allow only one panic to happen. */
+static atomic_t preop_panic_excl = ATOMIC_INIT(-1);
+
+#ifdef HAVE_DIE_NMI
+static int testing_nmi;
+static int nmi_handler_registered;
+#endif
+
+static int ipmi_heartbeat(void);
+
+/*
+ * We use a mutex to make sure that only one thing can send a set
+ * timeout at one time, because we only have one copy of the data.
+ * The mutex is claimed when the set_timeout is sent and freed
+ * when both messages are free.
+ */
+static atomic_t set_timeout_tofree = ATOMIC_INIT(0);
+static DEFINE_MUTEX(set_timeout_lock);
+static DECLARE_COMPLETION(set_timeout_wait);
+static void set_timeout_free_smi(struct ipmi_smi_msg *msg)
+{
+ if (atomic_dec_and_test(&set_timeout_tofree))
+ complete(&set_timeout_wait);
+}
+static void set_timeout_free_recv(struct ipmi_recv_msg *msg)
+{
+ if (atomic_dec_and_test(&set_timeout_tofree))
+ complete(&set_timeout_wait);
+}
+static struct ipmi_smi_msg set_timeout_smi_msg = {
+ .done = set_timeout_free_smi
+};
+static struct ipmi_recv_msg set_timeout_recv_msg = {
+ .done = set_timeout_free_recv
+};
+
+static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ int *send_heartbeat_now)
+{
+ struct kernel_ipmi_msg msg;
+ unsigned char data[6];
+ int rv;
+ struct ipmi_system_interface_addr addr;
+ int hbnow = 0;
+
+
+ /* These can be cleared as we are setting the timeout. */
+ pretimeout_since_last_heartbeat = 0;
+
+ data[0] = 0;
+ WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
+
+ if ((ipmi_version_major > 1)
+ || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
+ /* This is an IPMI 1.5-only feature. */
+ data[0] |= WDOG_DONT_STOP_ON_SET;
+ } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ /*
+ * In ipmi 1.0, setting the timer stops the watchdog, we
+ * need to start it back up again.
+ */
+ hbnow = 1;
+ }
+
+ data[1] = 0;
+ WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state);
+ if ((pretimeout > 0) && (ipmi_watchdog_state != WDOG_TIMEOUT_NONE)) {
+ WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val);
+ data[2] = pretimeout;
+ } else {
+ WDOG_SET_PRETIMEOUT_ACT(data[1], WDOG_PRETIMEOUT_NONE);
+ data[2] = 0; /* No pretimeout. */
+ }
+ data[3] = 0;
+ WDOG_SET_TIMEOUT(data[4], data[5], timeout);
+
+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ addr.channel = IPMI_BMC_CHANNEL;
+ addr.lun = 0;
+
+ msg.netfn = 0x06;
+ msg.cmd = IPMI_WDOG_SET_TIMER;
+ msg.data = data;
+ msg.data_len = sizeof(data);
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ smi_msg,
+ recv_msg,
+ 1);
+ if (rv) {
+ printk(KERN_WARNING PFX "set timeout error: %d\n",
+ rv);
+ }
+
+ if (send_heartbeat_now)
+ *send_heartbeat_now = hbnow;
+
+ return rv;
+}
+
+static int ipmi_set_timeout(int do_heartbeat)
+{
+ int send_heartbeat_now;
+ int rv;
+
+
+ /* We can only send one of these at a time. */
+ mutex_lock(&set_timeout_lock);
+
+ atomic_set(&set_timeout_tofree, 2);
+
+ rv = i_ipmi_set_timeout(&set_timeout_smi_msg,
+ &set_timeout_recv_msg,
+ &send_heartbeat_now);
+ if (rv) {
+ mutex_unlock(&set_timeout_lock);
+ goto out;
+ }
+
+ wait_for_completion(&set_timeout_wait);
+
+ mutex_unlock(&set_timeout_lock);
+
+ if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
+ || ((send_heartbeat_now)
+ && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
+ rv = ipmi_heartbeat();
+
+out:
+ return rv;
+}
+
+static atomic_t panic_done_count = ATOMIC_INIT(0);
+
+static void panic_smi_free(struct ipmi_smi_msg *msg)
+{
+ atomic_dec(&panic_done_count);
+}
+static void panic_recv_free(struct ipmi_recv_msg *msg)
+{
+ atomic_dec(&panic_done_count);
+}
+
+static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = {
+ .done = panic_smi_free
+};
+static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = {
+ .done = panic_recv_free
+};
+
+static void panic_halt_ipmi_heartbeat(void)
+{
+ struct kernel_ipmi_msg msg;
+ struct ipmi_system_interface_addr addr;
+ int rv;
+
+ /*
+ * Don't reset the timer if we have the timer turned off, that
+ * re-enables the watchdog.
+ */
+ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+ return;
+
+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ addr.channel = IPMI_BMC_CHANNEL;
+ addr.lun = 0;
+
+ msg.netfn = 0x06;
+ msg.cmd = IPMI_WDOG_RESET_TIMER;
+ msg.data = NULL;
+ msg.data_len = 0;
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ &panic_halt_heartbeat_smi_msg,
+ &panic_halt_heartbeat_recv_msg,
+ 1);
+ if (!rv)
+ atomic_add(2, &panic_done_count);
+}
+
+static struct ipmi_smi_msg panic_halt_smi_msg = {
+ .done = panic_smi_free
+};
+static struct ipmi_recv_msg panic_halt_recv_msg = {
+ .done = panic_recv_free
+};
+
+/*
+ * Special call, doesn't claim any locks. This is only to be called
+ * at panic or halt time, in run-to-completion mode, when the caller
+ * is the only CPU and the only thing that will be going is these IPMI
+ * calls.
+ */
+static void panic_halt_ipmi_set_timeout(void)
+{
+ int send_heartbeat_now;
+ int rv;
+
+ /* Wait for the messages to be free. */
+ while (atomic_read(&panic_done_count) != 0)
+ ipmi_poll_interface(watchdog_user);
+ rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
+ &panic_halt_recv_msg,
+ &send_heartbeat_now);
+ if (!rv) {
+ atomic_add(2, &panic_done_count);
+ if (send_heartbeat_now)
+ panic_halt_ipmi_heartbeat();
+ } else
+ printk(KERN_WARNING PFX
+ "Unable to extend the watchdog timeout.");
+ while (atomic_read(&panic_done_count) != 0)
+ ipmi_poll_interface(watchdog_user);
+}
+
+/*
+ * We use a mutex to make sure that only one thing can send a
+ * heartbeat at one time, because we only have one copy of the data.
+ * The semaphore is claimed when the set_timeout is sent and freed
+ * when both messages are free.
+ */
+static atomic_t heartbeat_tofree = ATOMIC_INIT(0);
+static DEFINE_MUTEX(heartbeat_lock);
+static DECLARE_COMPLETION(heartbeat_wait);
+static void heartbeat_free_smi(struct ipmi_smi_msg *msg)
+{
+ if (atomic_dec_and_test(&heartbeat_tofree))
+ complete(&heartbeat_wait);
+}
+static void heartbeat_free_recv(struct ipmi_recv_msg *msg)
+{
+ if (atomic_dec_and_test(&heartbeat_tofree))
+ complete(&heartbeat_wait);
+}
+static struct ipmi_smi_msg heartbeat_smi_msg = {
+ .done = heartbeat_free_smi
+};
+static struct ipmi_recv_msg heartbeat_recv_msg = {
+ .done = heartbeat_free_recv
+};
+
+static int ipmi_heartbeat(void)
+{
+ struct kernel_ipmi_msg msg;
+ int rv;
+ struct ipmi_system_interface_addr addr;
+
+ if (ipmi_ignore_heartbeat)
+ return 0;
+
+ if (ipmi_start_timer_on_heartbeat) {
+ ipmi_start_timer_on_heartbeat = 0;
+ ipmi_watchdog_state = action_val;
+ return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ } else if (pretimeout_since_last_heartbeat) {
+ /*
+ * A pretimeout occurred, make sure we set the timeout.
+ * We don't want to set the action, though, we want to
+ * leave that alone (thus it can't be combined with the
+ * above operation.
+ */
+ return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+ }
+
+ mutex_lock(&heartbeat_lock);
+
+ atomic_set(&heartbeat_tofree, 2);
+
+ /*
+ * Don't reset the timer if we have the timer turned off, that
+ * re-enables the watchdog.
+ */
+ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
+ mutex_unlock(&heartbeat_lock);
+ return 0;
+ }
+
+ addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ addr.channel = IPMI_BMC_CHANNEL;
+ addr.lun = 0;
+
+ msg.netfn = 0x06;
+ msg.cmd = IPMI_WDOG_RESET_TIMER;
+ msg.data = NULL;
+ msg.data_len = 0;
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ &heartbeat_smi_msg,
+ &heartbeat_recv_msg,
+ 1);
+ if (rv) {
+ mutex_unlock(&heartbeat_lock);
+ printk(KERN_WARNING PFX "heartbeat failure: %d\n",
+ rv);
+ return rv;
+ }
+
+ /* Wait for the heartbeat to be sent. */
+ wait_for_completion(&heartbeat_wait);
+
+ if (heartbeat_recv_msg.msg.data[0] != 0) {
+ /*
+ * Got an error in the heartbeat response. It was already
+ * reported in ipmi_wdog_msg_handler, but we should return
+ * an error here.
+ */
+ rv = -EINVAL;
+ }
+
+ mutex_unlock(&heartbeat_lock);
+
+ return rv;
+}
+
+static struct watchdog_info ident = {
+ .options = 0, /* WDIOF_SETTIMEOUT, */
+ .firmware_version = 1,
+ .identity = "IPMI"
+};
+
+static int ipmi_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int i;
+ int val;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ i = copy_to_user(argp, &ident, sizeof(ident));
+ return i ? -EFAULT : 0;
+
+ case WDIOC_SETTIMEOUT:
+ i = copy_from_user(&val, argp, sizeof(int));
+ if (i)
+ return -EFAULT;
+ timeout = val;
+ return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ case WDIOC_GETTIMEOUT:
+ i = copy_to_user(argp, &timeout, sizeof(timeout));
+ if (i)
+ return -EFAULT;
+ return 0;
+
+ case WDIOC_SET_PRETIMEOUT:
+ case WDIOC_SETPRETIMEOUT:
+ i = copy_from_user(&val, argp, sizeof(int));
+ if (i)
+ return -EFAULT;
+ pretimeout = val;
+ return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
+
+ case WDIOC_GET_PRETIMEOUT:
+ case WDIOC_GETPRETIMEOUT:
+ i = copy_to_user(argp, &pretimeout, sizeof(pretimeout));
+ if (i)
+ return -EFAULT;
+ return 0;
+
+ case WDIOC_KEEPALIVE:
+ return ipmi_heartbeat();
+
+ case WDIOC_SETOPTIONS:
+ i = copy_from_user(&val, argp, sizeof(int));
+ if (i)
+ return -EFAULT;
+ if (val & WDIOS_DISABLECARD) {
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ ipmi_start_timer_on_heartbeat = 0;
+ }
+
+ if (val & WDIOS_ENABLECARD) {
+ ipmi_watchdog_state = action_val;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ }
+ return 0;
+
+ case WDIOC_GETSTATUS:
+ val = 0;
+ i = copy_to_user(argp, &val, sizeof(val));
+ if (i)
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static long ipmi_unlocked_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&ipmi_watchdog_mutex);
+ ret = ipmi_ioctl(file, cmd, arg);
+ mutex_unlock(&ipmi_watchdog_mutex);
+
+ return ret;
+}
+
+static ssize_t ipmi_write(struct file *file,
+ const char __user *buf,
+ size_t len,
+ loff_t *ppos)
+{
+ int rv;
+
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /* In case it was set long ago */
+ expect_close = 0;
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, buf + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_close = 42;
+ }
+ }
+ rv = ipmi_heartbeat();
+ if (rv)
+ return rv;
+ }
+ return len;
+}
+
+static ssize_t ipmi_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int rv = 0;
+ wait_queue_t wait;
+
+ if (count <= 0)
+ return 0;
+
+ /*
+ * Reading returns if the pretimeout has gone off, and it only does
+ * it once per pretimeout.
+ */
+ spin_lock(&ipmi_read_lock);
+ if (!data_to_read) {
+ if (file->f_flags & O_NONBLOCK) {
+ rv = -EAGAIN;
+ goto out;
+ }
+
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&read_q, &wait);
+ while (!data_to_read) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock(&ipmi_read_lock);
+ schedule();
+ spin_lock(&ipmi_read_lock);
+ }
+ remove_wait_queue(&read_q, &wait);
+
+ if (signal_pending(current)) {
+ rv = -ERESTARTSYS;
+ goto out;
+ }
+ }
+ data_to_read = 0;
+
+ out:
+ spin_unlock(&ipmi_read_lock);
+
+ if (rv == 0) {
+ if (copy_to_user(buf, &data_to_read, 1))
+ rv = -EFAULT;
+ else
+ rv = 1;
+ }
+
+ return rv;
+}
+
+static int ipmi_open(struct inode *ino, struct file *filep)
+{
+ switch (iminor(ino)) {
+ case WATCHDOG_MINOR:
+ if (test_and_set_bit(0, &ipmi_wdog_open))
+ return -EBUSY;
+
+
+ /*
+ * Don't start the timer now, let it start on the
+ * first heartbeat.
+ */
+ ipmi_start_timer_on_heartbeat = 1;
+ return nonseekable_open(ino, filep);
+
+ default:
+ return (-ENODEV);
+ }
+}
+
+static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+
+ poll_wait(file, &read_q, wait);
+
+ spin_lock(&ipmi_read_lock);
+ if (data_to_read)
+ mask |= (POLLIN | POLLRDNORM);
+ spin_unlock(&ipmi_read_lock);
+
+ return mask;
+}
+
+static int ipmi_fasync(int fd, struct file *file, int on)
+{
+ int result;
+
+ result = fasync_helper(fd, file, on, &fasync_q);
+
+ return (result);
+}
+
+static int ipmi_close(struct inode *ino, struct file *filep)
+{
+ if (iminor(ino) == WATCHDOG_MINOR) {
+ if (expect_close == 42) {
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ } else {
+ printk(KERN_CRIT PFX
+ "Unexpected close, not stopping watchdog!\n");
+ ipmi_heartbeat();
+ }
+ clear_bit(0, &ipmi_wdog_open);
+ }
+
+ expect_close = 0;
+
+ return 0;
+}
+
+static const struct file_operations ipmi_wdog_fops = {
+ .owner = THIS_MODULE,
+ .read = ipmi_read,
+ .poll = ipmi_poll,
+ .write = ipmi_write,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
+ .open = ipmi_open,
+ .release = ipmi_close,
+ .fasync = ipmi_fasync,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice ipmi_wdog_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &ipmi_wdog_fops
+};
+
+static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
+ void *handler_data)
+{
+ if (msg->msg.data[0] != 0) {
+ printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
+ msg->msg.data[0],
+ msg->msg.cmd);
+ }
+
+ ipmi_free_recv_msg(msg);
+}
+
+static void ipmi_wdog_pretimeout_handler(void *handler_data)
+{
+ if (preaction_val != WDOG_PRETIMEOUT_NONE) {
+ if (preop_val == WDOG_PREOP_PANIC) {
+ if (atomic_inc_and_test(&preop_panic_excl))
+ panic("Watchdog pre-timeout");
+ } else if (preop_val == WDOG_PREOP_GIVE_DATA) {
+ spin_lock(&ipmi_read_lock);
+ data_to_read = 1;
+ wake_up_interruptible(&read_q);
+ kill_fasync(&fasync_q, SIGIO, POLL_IN);
+
+ spin_unlock(&ipmi_read_lock);
+ }
+ }
+
+ /*
+ * On some machines, the heartbeat will give an error and not
+ * work unless we re-enable the timer. So do so.
+ */
+ pretimeout_since_last_heartbeat = 1;
+}
+
+static struct ipmi_user_hndl ipmi_hndlrs = {
+ .ipmi_recv_hndl = ipmi_wdog_msg_handler,
+ .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler
+};
+
+static void ipmi_register_watchdog(int ipmi_intf)
+{
+ int rv = -EBUSY;
+
+ if (watchdog_user)
+ goto out;
+
+ if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf))
+ goto out;
+
+ watchdog_ifnum = ipmi_intf;
+
+ rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
+ if (rv < 0) {
+ printk(KERN_CRIT PFX "Unable to register with ipmi\n");
+ goto out;
+ }
+
+ ipmi_get_version(watchdog_user,
+ &ipmi_version_major,
+ &ipmi_version_minor);
+
+ rv = misc_register(&ipmi_wdog_miscdev);
+ if (rv < 0) {
+ ipmi_destroy_user(watchdog_user);
+ watchdog_user = NULL;
+ printk(KERN_CRIT PFX "Unable to register misc device\n");
+ }
+
+#ifdef HAVE_DIE_NMI
+ if (nmi_handler_registered) {
+ int old_pretimeout = pretimeout;
+ int old_timeout = timeout;
+ int old_preop_val = preop_val;
+
+ /*
+ * Set the pretimeout to go off in a second and give
+ * ourselves plenty of time to stop the timer.
+ */
+ ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+ preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */
+ pretimeout = 99;
+ timeout = 100;
+
+ testing_nmi = 1;
+
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ if (rv) {
+ printk(KERN_WARNING PFX "Error starting timer to"
+ " test NMI: 0x%x. The NMI pretimeout will"
+ " likely not work\n", rv);
+ rv = 0;
+ goto out_restore;
+ }
+
+ msleep(1500);
+
+ if (testing_nmi != 2) {
+ printk(KERN_WARNING PFX "IPMI NMI didn't seem to"
+ " occur. The NMI pretimeout will"
+ " likely not work\n");
+ }
+ out_restore:
+ testing_nmi = 0;
+ preop_val = old_preop_val;
+ pretimeout = old_pretimeout;
+ timeout = old_timeout;
+ }
+#endif
+
+ out:
+ if ((start_now) && (rv == 0)) {
+ /* Run from startup, so start the timer now. */
+ start_now = 0; /* Disable this function after first startup. */
+ ipmi_watchdog_state = action_val;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
+ printk(KERN_INFO PFX "Starting now!\n");
+ } else {
+ /* Stop the timer now. */
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ }
+}
+
+static void ipmi_unregister_watchdog(int ipmi_intf)
+{
+ int rv;
+
+ if (!watchdog_user)
+ goto out;
+
+ if (watchdog_ifnum != ipmi_intf)
+ goto out;
+
+ /* Make sure no one can call us any more. */
+ misc_deregister(&ipmi_wdog_miscdev);
+
+ /*
+ * Wait to make sure the message makes it out. The lower layer has
+ * pointers to our buffers, we want to make sure they are done before
+ * we release our memory.
+ */
+ while (atomic_read(&set_timeout_tofree))
+ schedule_timeout_uninterruptible(1);
+
+ /* Disconnect from IPMI. */
+ rv = ipmi_destroy_user(watchdog_user);
+ if (rv) {
+ printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
+ rv);
+ }
+ watchdog_user = NULL;
+
+ out:
+ return;
+}
+
+#ifdef HAVE_DIE_NMI
+static int
+ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct die_args *args = data;
+
+ if (val != DIE_NMIUNKNOWN)
+ return NOTIFY_OK;
+
+ /* Hack, if it's a memory or I/O error, ignore it. */
+ if (args->err & 0xc0)
+ return NOTIFY_OK;
+
+ /*
+ * If we get here, it's an NMI that's not a memory or I/O
+ * error. We can't truly tell if it's from IPMI or not
+ * without sending a message, and sending a message is almost
+ * impossible because of locking.
+ */
+
+ if (testing_nmi) {
+ testing_nmi = 2;
+ return NOTIFY_STOP;
+ }
+
+ /* If we are not expecting a timeout, ignore it. */
+ if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
+ return NOTIFY_OK;
+
+ if (preaction_val != WDOG_PRETIMEOUT_NMI)
+ return NOTIFY_OK;
+
+ /*
+ * If no one else handled the NMI, we assume it was the IPMI
+ * watchdog.
+ */
+ if (preop_val == WDOG_PREOP_PANIC) {
+ /* On some machines, the heartbeat will give
+ an error and not work unless we re-enable
+ the timer. So do so. */
+ pretimeout_since_last_heartbeat = 1;
+ if (atomic_inc_and_test(&preop_panic_excl))
+ panic(PFX "pre-timeout");
+ }
+
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block ipmi_nmi_handler = {
+ .notifier_call = ipmi_nmi
+};
+#endif
+
+static int wdog_reboot_handler(struct notifier_block *this,
+ unsigned long code,
+ void *unused)
+{
+ static int reboot_event_handled;
+
+ if ((watchdog_user) && (!reboot_event_handled)) {
+ /* Make sure we only do this once. */
+ reboot_event_handled = 1;
+
+ if (code == SYS_POWER_OFF || code == SYS_HALT) {
+ /* Disable the WDT if we are shutting down. */
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ panic_halt_ipmi_set_timeout();
+ } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ /* Set a long timer to let the reboot happens, but
+ reboot if it hangs, but only if the watchdog
+ timer was already running. */
+ timeout = 120;
+ pretimeout = 0;
+ ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+ panic_halt_ipmi_set_timeout();
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block wdog_reboot_notifier = {
+ .notifier_call = wdog_reboot_handler,
+ .next = NULL,
+ .priority = 0
+};
+
+static int wdog_panic_handler(struct notifier_block *this,
+ unsigned long event,
+ void *unused)
+{
+ static int panic_event_handled;
+
+ /* On a panic, if we have a panic timeout, make sure to extend
+ the watchdog timer to a reasonable value to complete the
+ panic, if the watchdog timer is running. Plus the
+ pretimeout is meaningless at panic time. */
+ if (watchdog_user && !panic_event_handled &&
+ ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ /* Make sure we do this only once. */
+ panic_event_handled = 1;
+
+ timeout = 255;
+ pretimeout = 0;
+ panic_halt_ipmi_set_timeout();
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block wdog_panic_notifier = {
+ .notifier_call = wdog_panic_handler,
+ .next = NULL,
+ .priority = 150 /* priority: INT_MAX >= x >= 0 */
+};
+
+
+static void ipmi_new_smi(int if_num, struct device *device)
+{
+ ipmi_register_watchdog(if_num);
+}
+
+static void ipmi_smi_gone(int if_num)
+{
+ ipmi_unregister_watchdog(if_num);
+}
+
+static struct ipmi_smi_watcher smi_watcher = {
+ .owner = THIS_MODULE,
+ .new_smi = ipmi_new_smi,
+ .smi_gone = ipmi_smi_gone
+};
+
+static int action_op(const char *inval, char *outval)
+{
+ if (outval)
+ strcpy(outval, action);
+
+ if (!inval)
+ return 0;
+
+ if (strcmp(inval, "reset") == 0)
+ action_val = WDOG_TIMEOUT_RESET;
+ else if (strcmp(inval, "none") == 0)
+ action_val = WDOG_TIMEOUT_NONE;
+ else if (strcmp(inval, "power_cycle") == 0)
+ action_val = WDOG_TIMEOUT_POWER_CYCLE;
+ else if (strcmp(inval, "power_off") == 0)
+ action_val = WDOG_TIMEOUT_POWER_DOWN;
+ else
+ return -EINVAL;
+ strcpy(action, inval);
+ return 0;
+}
+
+static int preaction_op(const char *inval, char *outval)
+{
+ if (outval)
+ strcpy(outval, preaction);
+
+ if (!inval)
+ return 0;
+
+ if (strcmp(inval, "pre_none") == 0)
+ preaction_val = WDOG_PRETIMEOUT_NONE;
+ else if (strcmp(inval, "pre_smi") == 0)
+ preaction_val = WDOG_PRETIMEOUT_SMI;
+#ifdef HAVE_DIE_NMI
+ else if (strcmp(inval, "pre_nmi") == 0)
+ preaction_val = WDOG_PRETIMEOUT_NMI;
+#endif
+ else if (strcmp(inval, "pre_int") == 0)
+ preaction_val = WDOG_PRETIMEOUT_MSG_INT;
+ else
+ return -EINVAL;
+ strcpy(preaction, inval);
+ return 0;
+}
+
+static int preop_op(const char *inval, char *outval)
+{
+ if (outval)
+ strcpy(outval, preop);
+
+ if (!inval)
+ return 0;
+
+ if (strcmp(inval, "preop_none") == 0)
+ preop_val = WDOG_PREOP_NONE;
+ else if (strcmp(inval, "preop_panic") == 0)
+ preop_val = WDOG_PREOP_PANIC;
+ else if (strcmp(inval, "preop_give_data") == 0)
+ preop_val = WDOG_PREOP_GIVE_DATA;
+ else
+ return -EINVAL;
+ strcpy(preop, inval);
+ return 0;
+}
+
+static void check_parms(void)
+{
+#ifdef HAVE_DIE_NMI
+ int do_nmi = 0;
+ int rv;
+
+ if (preaction_val == WDOG_PRETIMEOUT_NMI) {
+ do_nmi = 1;
+ if (preop_val == WDOG_PREOP_GIVE_DATA) {
+ printk(KERN_WARNING PFX "Pretimeout op is to give data"
+ " but NMI pretimeout is enabled, setting"
+ " pretimeout op to none\n");
+ preop_op("preop_none", NULL);
+ do_nmi = 0;
+ }
+ }
+ if (do_nmi && !nmi_handler_registered) {
+ rv = register_die_notifier(&ipmi_nmi_handler);
+ if (rv) {
+ printk(KERN_WARNING PFX
+ "Can't register nmi handler\n");
+ return;
+ } else
+ nmi_handler_registered = 1;
+ } else if (!do_nmi && nmi_handler_registered) {
+ unregister_die_notifier(&ipmi_nmi_handler);
+ nmi_handler_registered = 0;
+ }
+#endif
+}
+
+static int __init ipmi_wdog_init(void)
+{
+ int rv;
+
+ if (action_op(action, NULL)) {
+ action_op("reset", NULL);
+ printk(KERN_INFO PFX "Unknown action '%s', defaulting to"
+ " reset\n", action);
+ }
+
+ if (preaction_op(preaction, NULL)) {
+ preaction_op("pre_none", NULL);
+ printk(KERN_INFO PFX "Unknown preaction '%s', defaulting to"
+ " none\n", preaction);
+ }
+
+ if (preop_op(preop, NULL)) {
+ preop_op("preop_none", NULL);
+ printk(KERN_INFO PFX "Unknown preop '%s', defaulting to"
+ " none\n", preop);
+ }
+
+ check_parms();
+
+ register_reboot_notifier(&wdog_reboot_notifier);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &wdog_panic_notifier);
+
+ rv = ipmi_smi_watcher_register(&smi_watcher);
+ if (rv) {
+#ifdef HAVE_DIE_NMI
+ if (nmi_handler_registered)
+ unregister_die_notifier(&ipmi_nmi_handler);
+#endif
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &wdog_panic_notifier);
+ unregister_reboot_notifier(&wdog_reboot_notifier);
+ printk(KERN_WARNING PFX "can't register smi watcher\n");
+ return rv;
+ }
+
+ printk(KERN_INFO PFX "driver initialized\n");
+
+ return 0;
+}
+
+static void __exit ipmi_wdog_exit(void)
+{
+ ipmi_smi_watcher_unregister(&smi_watcher);
+ ipmi_unregister_watchdog(watchdog_ifnum);
+
+#ifdef HAVE_DIE_NMI
+ if (nmi_handler_registered)
+ unregister_die_notifier(&ipmi_nmi_handler);
+#endif
+
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &wdog_panic_notifier);
+ unregister_reboot_notifier(&wdog_reboot_notifier);
+}
+module_exit(ipmi_wdog_exit);
+module_init(ipmi_wdog_init);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+MODULE_DESCRIPTION("watchdog timer based upon the IPMI interface.");
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
new file mode 100644
index 00000000..97c3edb9
--- /dev/null
+++ b/drivers/char/lp.c
@@ -0,0 +1,1062 @@
+/*
+ * Generic parallel printer driver
+ *
+ * Copyright (C) 1992 by Jim Weigand and Linus Torvalds
+ * Copyright (C) 1992,1993 by Michael K. Johnson
+ * - Thanks much to Gunter Windau for pointing out to me where the error
+ * checking ought to be.
+ * Copyright (C) 1993 by Nigel Gamble (added interrupt code)
+ * Copyright (C) 1994 by Alan Cox (Modularised it)
+ * LPCAREFUL, LPABORT, LPGETSTATUS added by Chris Metcalf, metcalf@lcs.mit.edu
+ * Statistics and support for slow printers by Rob Janssen, rob@knoware.nl
+ * "lp=" command line parameters added by Grant Guenther, grant@torque.net
+ * lp_read (Status readback) support added by Carsten Gross,
+ * carsten@sol.wohnheim.uni-ulm.de
+ * Support for parport by Philip Blundell <philb@gnu.org>
+ * Parport sharing hacking by Andrea Arcangeli
+ * Fixed kernel_(to/from)_user memory copy to check for errors
+ * by Riccardo Facchetti <fizban@tin.it>
+ * 22-JAN-1998 Added support for devfs Richard Gooch <rgooch@atnf.csiro.au>
+ * Redesigned interrupt handling for handle printers with buggy handshake
+ * by Andrea Arcangeli, 11 May 1998
+ * Full efficient handling of printer with buggy irq handshake (now I have
+ * understood the meaning of the strange handshake). This is done sending new
+ * characters if the interrupt is just happened, even if the printer say to
+ * be still BUSY. This is needed at least with Epson Stylus Color. To enable
+ * the new TRUST_IRQ mode read the `LP OPTIMIZATION' section below...
+ * Fixed the irq on the rising edge of the strobe case.
+ * Obsoleted the CAREFUL flag since a printer that doesn' t work with
+ * CAREFUL will block a bit after in lp_check_status().
+ * Andrea Arcangeli, 15 Oct 1998
+ * Obsoleted and removed all the lowlevel stuff implemented in the last
+ * month to use the IEEE1284 functions (that handle the _new_ compatibilty
+ * mode fine).
+ */
+
+/* This driver should, in theory, work with any parallel port that has an
+ * appropriate low-level driver; all I/O is done through the parport
+ * abstraction layer.
+ *
+ * If this driver is built into the kernel, you can configure it using the
+ * kernel command-line. For example:
+ *
+ * lp=parport1,none,parport2 (bind lp0 to parport1, disable lp1 and
+ * bind lp2 to parport2)
+ *
+ * lp=auto (assign lp devices to all ports that
+ * have printers attached, as determined
+ * by the IEEE-1284 autoprobe)
+ *
+ * lp=reset (reset the printer during
+ * initialisation)
+ *
+ * lp=off (disable the printer driver entirely)
+ *
+ * If the driver is loaded as a module, similar functionality is available
+ * using module parameters. The equivalent of the above commands would be:
+ *
+ * # insmod lp.o parport=1,none,2
+ *
+ * # insmod lp.o parport=auto
+ *
+ * # insmod lp.o reset=1
+ */
+
+/* COMPATIBILITY WITH OLD KERNELS
+ *
+ * Under Linux 2.0 and previous versions, lp devices were bound to ports at
+ * particular I/O addresses, as follows:
+ *
+ * lp0 0x3bc
+ * lp1 0x378
+ * lp2 0x278
+ *
+ * The new driver, by default, binds lp devices to parport devices as it
+ * finds them. This means that if you only have one port, it will be bound
+ * to lp0 regardless of its I/O address. If you need the old behaviour, you
+ * can force it using the parameters described above.
+ */
+
+/*
+ * The new interrupt handling code take care of the buggy handshake
+ * of some HP and Epson printer:
+ * ___
+ * ACK _______________ ___________
+ * |__|
+ * ____
+ * BUSY _________ _______
+ * |____________|
+ *
+ * I discovered this using the printer scanner that you can find at:
+ *
+ * ftp://e-mind.com/pub/linux/pscan/
+ *
+ * 11 May 98, Andrea Arcangeli
+ *
+ * My printer scanner run on an Epson Stylus Color show that such printer
+ * generates the irq on the _rising_ edge of the STROBE. Now lp handle
+ * this case fine too.
+ *
+ * 15 Oct 1998, Andrea Arcangeli
+ *
+ * The so called `buggy' handshake is really the well documented
+ * compatibility mode IEEE1284 handshake. They changed the well known
+ * Centronics handshake acking in the middle of busy expecting to not
+ * break drivers or legacy application, while they broken linux lp
+ * until I fixed it reverse engineering the protocol by hand some
+ * month ago...
+ *
+ * 14 Dec 1998, Andrea Arcangeli
+ *
+ * Copyright (C) 2000 by Tim Waugh (added LPSETTIMEOUT ioctl)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/poll.h>
+#include <linux/console.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/compat.h>
+
+#include <linux/parport.h>
+#undef LP_STATS
+#include <linux/lp.h>
+
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+/* if you have more than 8 printers, remember to increase LP_NO */
+#define LP_NO 8
+
+static DEFINE_MUTEX(lp_mutex);
+static struct lp_struct lp_table[LP_NO];
+
+static unsigned int lp_count = 0;
+static struct class *lp_class;
+
+#ifdef CONFIG_LP_CONSOLE
+static struct parport *console_registered;
+#endif /* CONFIG_LP_CONSOLE */
+
+#undef LP_DEBUG
+
+/* Bits used to manage claiming the parport device */
+#define LP_PREEMPT_REQUEST 1
+#define LP_PARPORT_CLAIMED 2
+
+/* --- low-level port access ----------------------------------- */
+
+#define r_dtr(x) (parport_read_data(lp_table[(x)].dev->port))
+#define r_str(x) (parport_read_status(lp_table[(x)].dev->port))
+#define w_ctr(x,y) do { parport_write_control(lp_table[(x)].dev->port, (y)); } while (0)
+#define w_dtr(x,y) do { parport_write_data(lp_table[(x)].dev->port, (y)); } while (0)
+
+/* Claim the parport or block trying unless we've already claimed it */
+static void lp_claim_parport_or_block(struct lp_struct *this_lp)
+{
+ if (!test_and_set_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) {
+ parport_claim_or_block (this_lp->dev);
+ }
+}
+
+/* Claim the parport or block trying unless we've already claimed it */
+static void lp_release_parport(struct lp_struct *this_lp)
+{
+ if (test_and_clear_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) {
+ parport_release (this_lp->dev);
+ }
+}
+
+
+
+static int lp_preempt(void *handle)
+{
+ struct lp_struct *this_lp = (struct lp_struct *)handle;
+ set_bit(LP_PREEMPT_REQUEST, &this_lp->bits);
+ return (1);
+}
+
+
+/*
+ * Try to negotiate to a new mode; if unsuccessful negotiate to
+ * compatibility mode. Return the mode we ended up in.
+ */
+static int lp_negotiate(struct parport * port, int mode)
+{
+ if (parport_negotiate (port, mode) != 0) {
+ mode = IEEE1284_MODE_COMPAT;
+ parport_negotiate (port, mode);
+ }
+
+ return (mode);
+}
+
+static int lp_reset(int minor)
+{
+ int retval;
+ lp_claim_parport_or_block (&lp_table[minor]);
+ w_ctr(minor, LP_PSELECP);
+ udelay (LP_DELAY);
+ w_ctr(minor, LP_PSELECP | LP_PINITP);
+ retval = r_str(minor);
+ lp_release_parport (&lp_table[minor]);
+ return retval;
+}
+
+static void lp_error (int minor)
+{
+ DEFINE_WAIT(wait);
+ int polling;
+
+ if (LP_F(minor) & LP_ABORT)
+ return;
+
+ polling = lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE;
+ if (polling) lp_release_parport (&lp_table[minor]);
+ prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE);
+ schedule_timeout(LP_TIMEOUT_POLLED);
+ finish_wait(&lp_table[minor].waitq, &wait);
+ if (polling) lp_claim_parport_or_block (&lp_table[minor]);
+ else parport_yield_blocking (lp_table[minor].dev);
+}
+
+static int lp_check_status(int minor)
+{
+ int error = 0;
+ unsigned int last = lp_table[minor].last_error;
+ unsigned char status = r_str(minor);
+ if ((status & LP_PERRORP) && !(LP_F(minor) & LP_CAREFUL))
+ /* No error. */
+ last = 0;
+ else if ((status & LP_POUTPA)) {
+ if (last != LP_POUTPA) {
+ last = LP_POUTPA;
+ printk(KERN_INFO "lp%d out of paper\n", minor);
+ }
+ error = -ENOSPC;
+ } else if (!(status & LP_PSELECD)) {
+ if (last != LP_PSELECD) {
+ last = LP_PSELECD;
+ printk(KERN_INFO "lp%d off-line\n", minor);
+ }
+ error = -EIO;
+ } else if (!(status & LP_PERRORP)) {
+ if (last != LP_PERRORP) {
+ last = LP_PERRORP;
+ printk(KERN_INFO "lp%d on fire\n", minor);
+ }
+ error = -EIO;
+ } else {
+ last = 0; /* Come here if LP_CAREFUL is set and no
+ errors are reported. */
+ }
+
+ lp_table[minor].last_error = last;
+
+ if (last != 0)
+ lp_error(minor);
+
+ return error;
+}
+
+static int lp_wait_ready(int minor, int nonblock)
+{
+ int error = 0;
+
+ /* If we're not in compatibility mode, we're ready now! */
+ if (lp_table[minor].current_mode != IEEE1284_MODE_COMPAT) {
+ return (0);
+ }
+
+ do {
+ error = lp_check_status (minor);
+ if (error && (nonblock || (LP_F(minor) & LP_ABORT)))
+ break;
+ if (signal_pending (current)) {
+ error = -EINTR;
+ break;
+ }
+ } while (error);
+ return error;
+}
+
+static ssize_t lp_write(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ struct parport *port = lp_table[minor].dev->port;
+ char *kbuf = lp_table[minor].lp_buffer;
+ ssize_t retv = 0;
+ ssize_t written;
+ size_t copy_size = count;
+ int nonblock = ((file->f_flags & O_NONBLOCK) ||
+ (LP_F(minor) & LP_ABORT));
+
+#ifdef LP_STATS
+ if (time_after(jiffies, lp_table[minor].lastcall + LP_TIME(minor)))
+ lp_table[minor].runchars = 0;
+
+ lp_table[minor].lastcall = jiffies;
+#endif
+
+ /* Need to copy the data from user-space. */
+ if (copy_size > LP_BUFFER_SIZE)
+ copy_size = LP_BUFFER_SIZE;
+
+ if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
+ return -EINTR;
+
+ if (copy_from_user (kbuf, buf, copy_size)) {
+ retv = -EFAULT;
+ goto out_unlock;
+ }
+
+ /* Claim Parport or sleep until it becomes available
+ */
+ lp_claim_parport_or_block (&lp_table[minor]);
+ /* Go to the proper mode. */
+ lp_table[minor].current_mode = lp_negotiate (port,
+ lp_table[minor].best_mode);
+
+ parport_set_timeout (lp_table[minor].dev,
+ (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK
+ : lp_table[minor].timeout));
+
+ if ((retv = lp_wait_ready (minor, nonblock)) == 0)
+ do {
+ /* Write the data. */
+ written = parport_write (port, kbuf, copy_size);
+ if (written > 0) {
+ copy_size -= written;
+ count -= written;
+ buf += written;
+ retv += written;
+ }
+
+ if (signal_pending (current)) {
+ if (retv == 0)
+ retv = -EINTR;
+
+ break;
+ }
+
+ if (copy_size > 0) {
+ /* incomplete write -> check error ! */
+ int error;
+
+ parport_negotiate (lp_table[minor].dev->port,
+ IEEE1284_MODE_COMPAT);
+ lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
+
+ error = lp_wait_ready (minor, nonblock);
+
+ if (error) {
+ if (retv == 0)
+ retv = error;
+ break;
+ } else if (nonblock) {
+ if (retv == 0)
+ retv = -EAGAIN;
+ break;
+ }
+
+ parport_yield_blocking (lp_table[minor].dev);
+ lp_table[minor].current_mode
+ = lp_negotiate (port,
+ lp_table[minor].best_mode);
+
+ } else if (need_resched())
+ schedule ();
+
+ if (count) {
+ copy_size = count;
+ if (copy_size > LP_BUFFER_SIZE)
+ copy_size = LP_BUFFER_SIZE;
+
+ if (copy_from_user(kbuf, buf, copy_size)) {
+ if (retv == 0)
+ retv = -EFAULT;
+ break;
+ }
+ }
+ } while (count > 0);
+
+ if (test_and_clear_bit(LP_PREEMPT_REQUEST,
+ &lp_table[minor].bits)) {
+ printk(KERN_INFO "lp%d releasing parport\n", minor);
+ parport_negotiate (lp_table[minor].dev->port,
+ IEEE1284_MODE_COMPAT);
+ lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
+ lp_release_parport (&lp_table[minor]);
+ }
+out_unlock:
+ mutex_unlock(&lp_table[minor].port_mutex);
+
+ return retv;
+}
+
+#ifdef CONFIG_PARPORT_1284
+
+/* Status readback conforming to ieee1284 */
+static ssize_t lp_read(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ DEFINE_WAIT(wait);
+ unsigned int minor=iminor(file->f_path.dentry->d_inode);
+ struct parport *port = lp_table[minor].dev->port;
+ ssize_t retval = 0;
+ char *kbuf = lp_table[minor].lp_buffer;
+ int nonblock = ((file->f_flags & O_NONBLOCK) ||
+ (LP_F(minor) & LP_ABORT));
+
+ if (count > LP_BUFFER_SIZE)
+ count = LP_BUFFER_SIZE;
+
+ if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
+ return -EINTR;
+
+ lp_claim_parport_or_block (&lp_table[minor]);
+
+ parport_set_timeout (lp_table[minor].dev,
+ (nonblock ? PARPORT_INACTIVITY_O_NONBLOCK
+ : lp_table[minor].timeout));
+
+ parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
+ if (parport_negotiate (lp_table[minor].dev->port,
+ IEEE1284_MODE_NIBBLE)) {
+ retval = -EIO;
+ goto out;
+ }
+
+ while (retval == 0) {
+ retval = parport_read (port, kbuf, count);
+
+ if (retval > 0)
+ break;
+
+ if (nonblock) {
+ retval = -EAGAIN;
+ break;
+ }
+
+ /* Wait for data. */
+
+ if (lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE) {
+ parport_negotiate (lp_table[minor].dev->port,
+ IEEE1284_MODE_COMPAT);
+ lp_error (minor);
+ if (parport_negotiate (lp_table[minor].dev->port,
+ IEEE1284_MODE_NIBBLE)) {
+ retval = -EIO;
+ goto out;
+ }
+ } else {
+ prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE);
+ schedule_timeout(LP_TIMEOUT_POLLED);
+ finish_wait(&lp_table[minor].waitq, &wait);
+ }
+
+ if (signal_pending (current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ cond_resched ();
+ }
+ parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
+ out:
+ lp_release_parport (&lp_table[minor]);
+
+ if (retval > 0 && copy_to_user (buf, kbuf, retval))
+ retval = -EFAULT;
+
+ mutex_unlock(&lp_table[minor].port_mutex);
+
+ return retval;
+}
+
+#endif /* IEEE 1284 support */
+
+static int lp_open(struct inode * inode, struct file * file)
+{
+ unsigned int minor = iminor(inode);
+ int ret = 0;
+
+ mutex_lock(&lp_mutex);
+ if (minor >= LP_NO) {
+ ret = -ENXIO;
+ goto out;
+ }
+ if ((LP_F(minor) & LP_EXIST) == 0) {
+ ret = -ENXIO;
+ goto out;
+ }
+ if (test_and_set_bit(LP_BUSY_BIT_POS, &LP_F(minor))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /* If ABORTOPEN is set and the printer is offline or out of paper,
+ we may still want to open it to perform ioctl()s. Therefore we
+ have commandeered O_NONBLOCK, even though it is being used in
+ a non-standard manner. This is strictly a Linux hack, and
+ should most likely only ever be used by the tunelp application. */
+ if ((LP_F(minor) & LP_ABORTOPEN) && !(file->f_flags & O_NONBLOCK)) {
+ int status;
+ lp_claim_parport_or_block (&lp_table[minor]);
+ status = r_str(minor);
+ lp_release_parport (&lp_table[minor]);
+ if (status & LP_POUTPA) {
+ printk(KERN_INFO "lp%d out of paper\n", minor);
+ LP_F(minor) &= ~LP_BUSY;
+ ret = -ENOSPC;
+ goto out;
+ } else if (!(status & LP_PSELECD)) {
+ printk(KERN_INFO "lp%d off-line\n", minor);
+ LP_F(minor) &= ~LP_BUSY;
+ ret = -EIO;
+ goto out;
+ } else if (!(status & LP_PERRORP)) {
+ printk(KERN_ERR "lp%d printer error\n", minor);
+ LP_F(minor) &= ~LP_BUSY;
+ ret = -EIO;
+ goto out;
+ }
+ }
+ lp_table[minor].lp_buffer = kmalloc(LP_BUFFER_SIZE, GFP_KERNEL);
+ if (!lp_table[minor].lp_buffer) {
+ LP_F(minor) &= ~LP_BUSY;
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* Determine if the peripheral supports ECP mode */
+ lp_claim_parport_or_block (&lp_table[minor]);
+ if ( (lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) &&
+ !parport_negotiate (lp_table[minor].dev->port,
+ IEEE1284_MODE_ECP)) {
+ printk (KERN_INFO "lp%d: ECP mode\n", minor);
+ lp_table[minor].best_mode = IEEE1284_MODE_ECP;
+ } else {
+ lp_table[minor].best_mode = IEEE1284_MODE_COMPAT;
+ }
+ /* Leave peripheral in compatibility mode */
+ parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
+ lp_release_parport (&lp_table[minor]);
+ lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
+out:
+ mutex_unlock(&lp_mutex);
+ return ret;
+}
+
+static int lp_release(struct inode * inode, struct file * file)
+{
+ unsigned int minor = iminor(inode);
+
+ lp_claim_parport_or_block (&lp_table[minor]);
+ parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
+ lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
+ lp_release_parport (&lp_table[minor]);
+ kfree(lp_table[minor].lp_buffer);
+ lp_table[minor].lp_buffer = NULL;
+ LP_F(minor) &= ~LP_BUSY;
+ return 0;
+}
+
+static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
+ unsigned long arg, void __user *argp)
+{
+ int status;
+ int retval = 0;
+
+#ifdef LP_DEBUG
+ printk(KERN_DEBUG "lp%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
+#endif
+ if (minor >= LP_NO)
+ return -ENODEV;
+ if ((LP_F(minor) & LP_EXIST) == 0)
+ return -ENODEV;
+ switch ( cmd ) {
+ case LPTIME:
+ LP_TIME(minor) = arg * HZ/100;
+ break;
+ case LPCHAR:
+ LP_CHAR(minor) = arg;
+ break;
+ case LPABORT:
+ if (arg)
+ LP_F(minor) |= LP_ABORT;
+ else
+ LP_F(minor) &= ~LP_ABORT;
+ break;
+ case LPABORTOPEN:
+ if (arg)
+ LP_F(minor) |= LP_ABORTOPEN;
+ else
+ LP_F(minor) &= ~LP_ABORTOPEN;
+ break;
+ case LPCAREFUL:
+ if (arg)
+ LP_F(minor) |= LP_CAREFUL;
+ else
+ LP_F(minor) &= ~LP_CAREFUL;
+ break;
+ case LPWAIT:
+ LP_WAIT(minor) = arg;
+ break;
+ case LPSETIRQ:
+ return -EINVAL;
+ break;
+ case LPGETIRQ:
+ if (copy_to_user(argp, &LP_IRQ(minor),
+ sizeof(int)))
+ return -EFAULT;
+ break;
+ case LPGETSTATUS:
+ lp_claim_parport_or_block (&lp_table[minor]);
+ status = r_str(minor);
+ lp_release_parport (&lp_table[minor]);
+
+ if (copy_to_user(argp, &status, sizeof(int)))
+ return -EFAULT;
+ break;
+ case LPRESET:
+ lp_reset(minor);
+ break;
+#ifdef LP_STATS
+ case LPGETSTATS:
+ if (copy_to_user(argp, &LP_STAT(minor),
+ sizeof(struct lp_stats)))
+ return -EFAULT;
+ if (capable(CAP_SYS_ADMIN))
+ memset(&LP_STAT(minor), 0,
+ sizeof(struct lp_stats));
+ break;
+#endif
+ case LPGETFLAGS:
+ status = LP_F(minor);
+ if (copy_to_user(argp, &status, sizeof(int)))
+ return -EFAULT;
+ break;
+
+ default:
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+static int lp_set_timeout(unsigned int minor, struct timeval *par_timeout)
+{
+ long to_jiffies;
+
+ /* Convert to jiffies, place in lp_table */
+ if ((par_timeout->tv_sec < 0) ||
+ (par_timeout->tv_usec < 0)) {
+ return -EINVAL;
+ }
+ to_jiffies = DIV_ROUND_UP(par_timeout->tv_usec, 1000000/HZ);
+ to_jiffies += par_timeout->tv_sec * (long) HZ;
+ if (to_jiffies <= 0) {
+ return -EINVAL;
+ }
+ lp_table[minor].timeout = to_jiffies;
+ return 0;
+}
+
+static long lp_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned int minor;
+ struct timeval par_timeout;
+ int ret;
+
+ minor = iminor(file->f_path.dentry->d_inode);
+ mutex_lock(&lp_mutex);
+ switch (cmd) {
+ case LPSETTIMEOUT:
+ if (copy_from_user(&par_timeout, (void __user *)arg,
+ sizeof (struct timeval))) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = lp_set_timeout(minor, &par_timeout);
+ break;
+ default:
+ ret = lp_do_ioctl(minor, cmd, arg, (void __user *)arg);
+ break;
+ }
+ mutex_unlock(&lp_mutex);
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long lp_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned int minor;
+ struct timeval par_timeout;
+ struct compat_timeval __user *tc;
+ int ret;
+
+ minor = iminor(file->f_path.dentry->d_inode);
+ mutex_lock(&lp_mutex);
+ switch (cmd) {
+ case LPSETTIMEOUT:
+ tc = compat_ptr(arg);
+ if (get_user(par_timeout.tv_sec, &tc->tv_sec) ||
+ get_user(par_timeout.tv_usec, &tc->tv_usec)) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = lp_set_timeout(minor, &par_timeout);
+ break;
+#ifdef LP_STATS
+ case LPGETSTATS:
+ /* FIXME: add an implementation if you set LP_STATS */
+ ret = -EINVAL;
+ break;
+#endif
+ default:
+ ret = lp_do_ioctl(minor, cmd, arg, compat_ptr(arg));
+ break;
+ }
+ mutex_unlock(&lp_mutex);
+
+ return ret;
+}
+#endif
+
+static const struct file_operations lp_fops = {
+ .owner = THIS_MODULE,
+ .write = lp_write,
+ .unlocked_ioctl = lp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lp_compat_ioctl,
+#endif
+ .open = lp_open,
+ .release = lp_release,
+#ifdef CONFIG_PARPORT_1284
+ .read = lp_read,
+#endif
+ .llseek = noop_llseek,
+};
+
+/* --- support for console on the line printer ----------------- */
+
+#ifdef CONFIG_LP_CONSOLE
+
+#define CONSOLE_LP 0
+
+/* If the printer is out of paper, we can either lose the messages or
+ * stall until the printer is happy again. Define CONSOLE_LP_STRICT
+ * non-zero to get the latter behaviour. */
+#define CONSOLE_LP_STRICT 1
+
+/* The console must be locked when we get here. */
+
+static void lp_console_write (struct console *co, const char *s,
+ unsigned count)
+{
+ struct pardevice *dev = lp_table[CONSOLE_LP].dev;
+ struct parport *port = dev->port;
+ ssize_t written;
+
+ if (parport_claim (dev))
+ /* Nothing we can do. */
+ return;
+
+ parport_set_timeout (dev, 0);
+
+ /* Go to compatibility mode. */
+ parport_negotiate (port, IEEE1284_MODE_COMPAT);
+
+ do {
+ /* Write the data, converting LF->CRLF as we go. */
+ ssize_t canwrite = count;
+ char *lf = memchr (s, '\n', count);
+ if (lf)
+ canwrite = lf - s;
+
+ if (canwrite > 0) {
+ written = parport_write (port, s, canwrite);
+
+ if (written <= 0)
+ continue;
+
+ s += written;
+ count -= written;
+ canwrite -= written;
+ }
+
+ if (lf && canwrite <= 0) {
+ const char *crlf = "\r\n";
+ int i = 2;
+
+ /* Dodge the original '\n', and put '\r\n' instead. */
+ s++;
+ count--;
+ do {
+ written = parport_write (port, crlf, i);
+ if (written > 0)
+ i -= written, crlf += written;
+ } while (i > 0 && (CONSOLE_LP_STRICT || written > 0));
+ }
+ } while (count > 0 && (CONSOLE_LP_STRICT || written > 0));
+
+ parport_release (dev);
+}
+
+static struct console lpcons = {
+ .name = "lp",
+ .write = lp_console_write,
+ .flags = CON_PRINTBUFFER,
+};
+
+#endif /* console on line printer */
+
+/* --- initialisation code ------------------------------------- */
+
+static int parport_nr[LP_NO] = { [0 ... LP_NO-1] = LP_PARPORT_UNSPEC };
+static char *parport[LP_NO];
+static int reset;
+
+module_param_array(parport, charp, NULL, 0);
+module_param(reset, bool, 0);
+
+#ifndef MODULE
+static int __init lp_setup (char *str)
+{
+ static int parport_ptr;
+ int x;
+
+ if (get_option(&str, &x)) {
+ if (x == 0) {
+ /* disable driver on "lp=" or "lp=0" */
+ parport_nr[0] = LP_PARPORT_OFF;
+ } else {
+ printk(KERN_WARNING "warning: 'lp=0x%x' is deprecated, ignored\n", x);
+ return 0;
+ }
+ } else if (!strncmp(str, "parport", 7)) {
+ int n = simple_strtoul(str+7, NULL, 10);
+ if (parport_ptr < LP_NO)
+ parport_nr[parport_ptr++] = n;
+ else
+ printk(KERN_INFO "lp: too many ports, %s ignored.\n",
+ str);
+ } else if (!strcmp(str, "auto")) {
+ parport_nr[0] = LP_PARPORT_AUTO;
+ } else if (!strcmp(str, "none")) {
+ parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+ } else if (!strcmp(str, "reset")) {
+ reset = 1;
+ }
+ return 1;
+}
+#endif
+
+static int lp_register(int nr, struct parport *port)
+{
+ lp_table[nr].dev = parport_register_device(port, "lp",
+ lp_preempt, NULL, NULL, 0,
+ (void *) &lp_table[nr]);
+ if (lp_table[nr].dev == NULL)
+ return 1;
+ lp_table[nr].flags |= LP_EXIST;
+
+ if (reset)
+ lp_reset(nr);
+
+ device_create(lp_class, port->dev, MKDEV(LP_MAJOR, nr), NULL,
+ "lp%d", nr);
+
+ printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name,
+ (port->irq == PARPORT_IRQ_NONE)?"polling":"interrupt-driven");
+
+#ifdef CONFIG_LP_CONSOLE
+ if (!nr) {
+ if (port->modes & PARPORT_MODE_SAFEININT) {
+ register_console(&lpcons);
+ console_registered = port;
+ printk (KERN_INFO "lp%d: console ready\n", CONSOLE_LP);
+ } else
+ printk (KERN_ERR "lp%d: cannot run console on %s\n",
+ CONSOLE_LP, port->name);
+ }
+#endif
+
+ return 0;
+}
+
+static void lp_attach (struct parport *port)
+{
+ unsigned int i;
+
+ switch (parport_nr[0]) {
+ case LP_PARPORT_UNSPEC:
+ case LP_PARPORT_AUTO:
+ if (parport_nr[0] == LP_PARPORT_AUTO &&
+ port->probe_info[0].class != PARPORT_CLASS_PRINTER)
+ return;
+ if (lp_count == LP_NO) {
+ printk(KERN_INFO "lp: ignoring parallel port (max. %d)\n",LP_NO);
+ return;
+ }
+ if (!lp_register(lp_count, port))
+ lp_count++;
+ break;
+
+ default:
+ for (i = 0; i < LP_NO; i++) {
+ if (port->number == parport_nr[i]) {
+ if (!lp_register(i, port))
+ lp_count++;
+ break;
+ }
+ }
+ break;
+ }
+}
+
+static void lp_detach (struct parport *port)
+{
+ /* Write this some day. */
+#ifdef CONFIG_LP_CONSOLE
+ if (console_registered == port) {
+ unregister_console(&lpcons);
+ console_registered = NULL;
+ }
+#endif /* CONFIG_LP_CONSOLE */
+}
+
+static struct parport_driver lp_driver = {
+ .name = "lp",
+ .attach = lp_attach,
+ .detach = lp_detach,
+};
+
+static int __init lp_init (void)
+{
+ int i, err = 0;
+
+ if (parport_nr[0] == LP_PARPORT_OFF)
+ return 0;
+
+ for (i = 0; i < LP_NO; i++) {
+ lp_table[i].dev = NULL;
+ lp_table[i].flags = 0;
+ lp_table[i].chars = LP_INIT_CHAR;
+ lp_table[i].time = LP_INIT_TIME;
+ lp_table[i].wait = LP_INIT_WAIT;
+ lp_table[i].lp_buffer = NULL;
+#ifdef LP_STATS
+ lp_table[i].lastcall = 0;
+ lp_table[i].runchars = 0;
+ memset (&lp_table[i].stats, 0, sizeof (struct lp_stats));
+#endif
+ lp_table[i].last_error = 0;
+ init_waitqueue_head (&lp_table[i].waitq);
+ init_waitqueue_head (&lp_table[i].dataq);
+ mutex_init(&lp_table[i].port_mutex);
+ lp_table[i].timeout = 10 * HZ;
+ }
+
+ if (register_chrdev (LP_MAJOR, "lp", &lp_fops)) {
+ printk (KERN_ERR "lp: unable to get major %d\n", LP_MAJOR);
+ return -EIO;
+ }
+
+ lp_class = class_create(THIS_MODULE, "printer");
+ if (IS_ERR(lp_class)) {
+ err = PTR_ERR(lp_class);
+ goto out_reg;
+ }
+
+ if (parport_register_driver (&lp_driver)) {
+ printk (KERN_ERR "lp: unable to register with parport\n");
+ err = -EIO;
+ goto out_class;
+ }
+
+ if (!lp_count) {
+ printk (KERN_INFO "lp: driver loaded but no devices found\n");
+#ifndef CONFIG_PARPORT_1284
+ if (parport_nr[0] == LP_PARPORT_AUTO)
+ printk (KERN_INFO "lp: (is IEEE 1284 support enabled?)\n");
+#endif
+ }
+
+ return 0;
+
+out_class:
+ class_destroy(lp_class);
+out_reg:
+ unregister_chrdev(LP_MAJOR, "lp");
+ return err;
+}
+
+static int __init lp_init_module (void)
+{
+ if (parport[0]) {
+ /* The user gave some parameters. Let's see what they were. */
+ if (!strncmp(parport[0], "auto", 4))
+ parport_nr[0] = LP_PARPORT_AUTO;
+ else {
+ int n;
+ for (n = 0; n < LP_NO && parport[n]; n++) {
+ if (!strncmp(parport[n], "none", 4))
+ parport_nr[n] = LP_PARPORT_NONE;
+ else {
+ char *ep;
+ unsigned long r = simple_strtoul(parport[n], &ep, 0);
+ if (ep != parport[n])
+ parport_nr[n] = r;
+ else {
+ printk(KERN_ERR "lp: bad port specifier `%s'\n", parport[n]);
+ return -ENODEV;
+ }
+ }
+ }
+ }
+ }
+
+ return lp_init();
+}
+
+static void lp_cleanup_module (void)
+{
+ unsigned int offset;
+
+ parport_unregister_driver (&lp_driver);
+
+#ifdef CONFIG_LP_CONSOLE
+ unregister_console (&lpcons);
+#endif
+
+ unregister_chrdev(LP_MAJOR, "lp");
+ for (offset = 0; offset < LP_NO; offset++) {
+ if (lp_table[offset].dev == NULL)
+ continue;
+ parport_unregister_device(lp_table[offset].dev);
+ device_destroy(lp_class, MKDEV(LP_MAJOR, offset));
+ }
+ class_destroy(lp_class);
+}
+
+__setup("lp=", lp_setup);
+module_init(lp_init_module);
+module_exit(lp_cleanup_module);
+
+MODULE_ALIAS_CHARDEV_MAJOR(LP_MAJOR);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
new file mode 100644
index 00000000..1aeaaba6
--- /dev/null
+++ b/drivers/char/mbcs.c
@@ -0,0 +1,853 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * MOATB Core Services driver.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/uio.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/tiocx.h>
+#include "mbcs.h"
+
+#define MBCS_DEBUG 0
+#if MBCS_DEBUG
+#define DBG(fmt...) printk(KERN_ALERT fmt)
+#else
+#define DBG(fmt...)
+#endif
+static DEFINE_MUTEX(mbcs_mutex);
+static int mbcs_major;
+
+static LIST_HEAD(soft_list);
+
+/*
+ * file operations
+ */
+static const struct file_operations mbcs_ops = {
+ .open = mbcs_open,
+ .llseek = mbcs_sram_llseek,
+ .read = mbcs_sram_read,
+ .write = mbcs_sram_write,
+ .mmap = mbcs_gscr_mmap,
+};
+
+struct mbcs_callback_arg {
+ int minor;
+ struct cx_dev *cx_dev;
+};
+
+static inline void mbcs_getdma_init(struct getdma *gdma)
+{
+ memset(gdma, 0, sizeof(struct getdma));
+ gdma->DoneIntEnable = 1;
+}
+
+static inline void mbcs_putdma_init(struct putdma *pdma)
+{
+ memset(pdma, 0, sizeof(struct putdma));
+ pdma->DoneIntEnable = 1;
+}
+
+static inline void mbcs_algo_init(struct algoblock *algo_soft)
+{
+ memset(algo_soft, 0, sizeof(struct algoblock));
+}
+
+static inline void mbcs_getdma_set(void *mmr,
+ uint64_t hostAddr,
+ uint64_t localAddr,
+ uint64_t localRamSel,
+ uint64_t numPkts,
+ uint64_t amoEnable,
+ uint64_t intrEnable,
+ uint64_t peerIO,
+ uint64_t amoHostDest,
+ uint64_t amoModType, uint64_t intrHostDest,
+ uint64_t intrVector)
+{
+ union dma_control rdma_control;
+ union dma_amo_dest amo_dest;
+ union intr_dest intr_dest;
+ union dma_localaddr local_addr;
+ union dma_hostaddr host_addr;
+
+ rdma_control.dma_control_reg = 0;
+ amo_dest.dma_amo_dest_reg = 0;
+ intr_dest.intr_dest_reg = 0;
+ local_addr.dma_localaddr_reg = 0;
+ host_addr.dma_hostaddr_reg = 0;
+
+ host_addr.dma_sys_addr = hostAddr;
+ MBCS_MMR_SET(mmr, MBCS_RD_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
+
+ local_addr.dma_ram_addr = localAddr;
+ local_addr.dma_ram_sel = localRamSel;
+ MBCS_MMR_SET(mmr, MBCS_RD_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
+
+ rdma_control.dma_op_length = numPkts;
+ rdma_control.done_amo_en = amoEnable;
+ rdma_control.done_int_en = intrEnable;
+ rdma_control.pio_mem_n = peerIO;
+ MBCS_MMR_SET(mmr, MBCS_RD_DMA_CTRL, rdma_control.dma_control_reg);
+
+ amo_dest.dma_amo_sys_addr = amoHostDest;
+ amo_dest.dma_amo_mod_type = amoModType;
+ MBCS_MMR_SET(mmr, MBCS_RD_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
+
+ intr_dest.address = intrHostDest;
+ intr_dest.int_vector = intrVector;
+ MBCS_MMR_SET(mmr, MBCS_RD_DMA_INT_DEST, intr_dest.intr_dest_reg);
+
+}
+
+static inline void mbcs_putdma_set(void *mmr,
+ uint64_t hostAddr,
+ uint64_t localAddr,
+ uint64_t localRamSel,
+ uint64_t numPkts,
+ uint64_t amoEnable,
+ uint64_t intrEnable,
+ uint64_t peerIO,
+ uint64_t amoHostDest,
+ uint64_t amoModType,
+ uint64_t intrHostDest, uint64_t intrVector)
+{
+ union dma_control wdma_control;
+ union dma_amo_dest amo_dest;
+ union intr_dest intr_dest;
+ union dma_localaddr local_addr;
+ union dma_hostaddr host_addr;
+
+ wdma_control.dma_control_reg = 0;
+ amo_dest.dma_amo_dest_reg = 0;
+ intr_dest.intr_dest_reg = 0;
+ local_addr.dma_localaddr_reg = 0;
+ host_addr.dma_hostaddr_reg = 0;
+
+ host_addr.dma_sys_addr = hostAddr;
+ MBCS_MMR_SET(mmr, MBCS_WR_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
+
+ local_addr.dma_ram_addr = localAddr;
+ local_addr.dma_ram_sel = localRamSel;
+ MBCS_MMR_SET(mmr, MBCS_WR_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
+
+ wdma_control.dma_op_length = numPkts;
+ wdma_control.done_amo_en = amoEnable;
+ wdma_control.done_int_en = intrEnable;
+ wdma_control.pio_mem_n = peerIO;
+ MBCS_MMR_SET(mmr, MBCS_WR_DMA_CTRL, wdma_control.dma_control_reg);
+
+ amo_dest.dma_amo_sys_addr = amoHostDest;
+ amo_dest.dma_amo_mod_type = amoModType;
+ MBCS_MMR_SET(mmr, MBCS_WR_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
+
+ intr_dest.address = intrHostDest;
+ intr_dest.int_vector = intrVector;
+ MBCS_MMR_SET(mmr, MBCS_WR_DMA_INT_DEST, intr_dest.intr_dest_reg);
+
+}
+
+static inline void mbcs_algo_set(void *mmr,
+ uint64_t amoHostDest,
+ uint64_t amoModType,
+ uint64_t intrHostDest,
+ uint64_t intrVector, uint64_t algoStepCount)
+{
+ union dma_amo_dest amo_dest;
+ union intr_dest intr_dest;
+ union algo_step step;
+
+ step.algo_step_reg = 0;
+ intr_dest.intr_dest_reg = 0;
+ amo_dest.dma_amo_dest_reg = 0;
+
+ amo_dest.dma_amo_sys_addr = amoHostDest;
+ amo_dest.dma_amo_mod_type = amoModType;
+ MBCS_MMR_SET(mmr, MBCS_ALG_AMO_DEST, amo_dest.dma_amo_dest_reg);
+
+ intr_dest.address = intrHostDest;
+ intr_dest.int_vector = intrVector;
+ MBCS_MMR_SET(mmr, MBCS_ALG_INT_DEST, intr_dest.intr_dest_reg);
+
+ step.alg_step_cnt = algoStepCount;
+ MBCS_MMR_SET(mmr, MBCS_ALG_STEP, step.algo_step_reg);
+}
+
+static inline int mbcs_getdma_start(struct mbcs_soft *soft)
+{
+ void *mmr_base;
+ struct getdma *gdma;
+ uint64_t numPkts;
+ union cm_control cm_control;
+
+ mmr_base = soft->mmr_base;
+ gdma = &soft->getdma;
+
+ /* check that host address got setup */
+ if (!gdma->hostAddr)
+ return -1;
+
+ numPkts =
+ (gdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
+
+ /* program engine */
+ mbcs_getdma_set(mmr_base, tiocx_dma_addr(gdma->hostAddr),
+ gdma->localAddr,
+ (gdma->localAddr < MB2) ? 0 :
+ (gdma->localAddr < MB4) ? 1 :
+ (gdma->localAddr < MB6) ? 2 : 3,
+ numPkts,
+ gdma->DoneAmoEnable,
+ gdma->DoneIntEnable,
+ gdma->peerIO,
+ gdma->amoHostDest,
+ gdma->amoModType,
+ gdma->intrHostDest, gdma->intrVector);
+
+ /* start engine */
+ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+ cm_control.rd_dma_go = 1;
+ MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
+
+ return 0;
+
+}
+
+static inline int mbcs_putdma_start(struct mbcs_soft *soft)
+{
+ void *mmr_base;
+ struct putdma *pdma;
+ uint64_t numPkts;
+ union cm_control cm_control;
+
+ mmr_base = soft->mmr_base;
+ pdma = &soft->putdma;
+
+ /* check that host address got setup */
+ if (!pdma->hostAddr)
+ return -1;
+
+ numPkts =
+ (pdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
+
+ /* program engine */
+ mbcs_putdma_set(mmr_base, tiocx_dma_addr(pdma->hostAddr),
+ pdma->localAddr,
+ (pdma->localAddr < MB2) ? 0 :
+ (pdma->localAddr < MB4) ? 1 :
+ (pdma->localAddr < MB6) ? 2 : 3,
+ numPkts,
+ pdma->DoneAmoEnable,
+ pdma->DoneIntEnable,
+ pdma->peerIO,
+ pdma->amoHostDest,
+ pdma->amoModType,
+ pdma->intrHostDest, pdma->intrVector);
+
+ /* start engine */
+ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+ cm_control.wr_dma_go = 1;
+ MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
+
+ return 0;
+
+}
+
+static inline int mbcs_algo_start(struct mbcs_soft *soft)
+{
+ struct algoblock *algo_soft = &soft->algo;
+ void *mmr_base = soft->mmr_base;
+ union cm_control cm_control;
+
+ if (mutex_lock_interruptible(&soft->algolock))
+ return -ERESTARTSYS;
+
+ atomic_set(&soft->algo_done, 0);
+
+ mbcs_algo_set(mmr_base,
+ algo_soft->amoHostDest,
+ algo_soft->amoModType,
+ algo_soft->intrHostDest,
+ algo_soft->intrVector, algo_soft->algoStepCount);
+
+ /* start algorithm */
+ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+ cm_control.alg_done_int_en = 1;
+ cm_control.alg_go = 1;
+ MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
+
+ mutex_unlock(&soft->algolock);
+
+ return 0;
+}
+
+static inline ssize_t
+do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr,
+ size_t len, loff_t * off)
+{
+ int rv = 0;
+
+ if (mutex_lock_interruptible(&soft->dmawritelock))
+ return -ERESTARTSYS;
+
+ atomic_set(&soft->dmawrite_done, 0);
+
+ soft->putdma.hostAddr = hostAddr;
+ soft->putdma.localAddr = *off;
+ soft->putdma.bytes = len;
+
+ if (mbcs_putdma_start(soft) < 0) {
+ DBG(KERN_ALERT "do_mbcs_sram_dmawrite: "
+ "mbcs_putdma_start failed\n");
+ rv = -EAGAIN;
+ goto dmawrite_exit;
+ }
+
+ if (wait_event_interruptible(soft->dmawrite_queue,
+ atomic_read(&soft->dmawrite_done))) {
+ rv = -ERESTARTSYS;
+ goto dmawrite_exit;
+ }
+
+ rv = len;
+ *off += len;
+
+dmawrite_exit:
+ mutex_unlock(&soft->dmawritelock);
+
+ return rv;
+}
+
+static inline ssize_t
+do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr,
+ size_t len, loff_t * off)
+{
+ int rv = 0;
+
+ if (mutex_lock_interruptible(&soft->dmareadlock))
+ return -ERESTARTSYS;
+
+ atomic_set(&soft->dmawrite_done, 0);
+
+ soft->getdma.hostAddr = hostAddr;
+ soft->getdma.localAddr = *off;
+ soft->getdma.bytes = len;
+
+ if (mbcs_getdma_start(soft) < 0) {
+ DBG(KERN_ALERT "mbcs_strategy: mbcs_getdma_start failed\n");
+ rv = -EAGAIN;
+ goto dmaread_exit;
+ }
+
+ if (wait_event_interruptible(soft->dmaread_queue,
+ atomic_read(&soft->dmaread_done))) {
+ rv = -ERESTARTSYS;
+ goto dmaread_exit;
+ }
+
+ rv = len;
+ *off += len;
+
+dmaread_exit:
+ mutex_unlock(&soft->dmareadlock);
+
+ return rv;
+}
+
+static int mbcs_open(struct inode *ip, struct file *fp)
+{
+ struct mbcs_soft *soft;
+ int minor;
+
+ mutex_lock(&mbcs_mutex);
+ minor = iminor(ip);
+
+ /* Nothing protects access to this list... */
+ list_for_each_entry(soft, &soft_list, list) {
+ if (soft->nasid == minor) {
+ fp->private_data = soft->cxdev;
+ mutex_unlock(&mbcs_mutex);
+ return 0;
+ }
+ }
+
+ mutex_unlock(&mbcs_mutex);
+ return -ENODEV;
+}
+
+static ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off)
+{
+ struct cx_dev *cx_dev = fp->private_data;
+ struct mbcs_soft *soft = cx_dev->soft;
+ uint64_t hostAddr;
+ int rv = 0;
+
+ hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
+ if (hostAddr == 0)
+ return -ENOMEM;
+
+ rv = do_mbcs_sram_dmawrite(soft, hostAddr, len, off);
+ if (rv < 0)
+ goto exit;
+
+ if (copy_to_user(buf, (void *)hostAddr, len))
+ rv = -EFAULT;
+
+ exit:
+ free_pages(hostAddr, get_order(len));
+
+ return rv;
+}
+
+static ssize_t
+mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off)
+{
+ struct cx_dev *cx_dev = fp->private_data;
+ struct mbcs_soft *soft = cx_dev->soft;
+ uint64_t hostAddr;
+ int rv = 0;
+
+ hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
+ if (hostAddr == 0)
+ return -ENOMEM;
+
+ if (copy_from_user((void *)hostAddr, buf, len)) {
+ rv = -EFAULT;
+ goto exit;
+ }
+
+ rv = do_mbcs_sram_dmaread(soft, hostAddr, len, off);
+
+ exit:
+ free_pages(hostAddr, get_order(len));
+
+ return rv;
+}
+
+static loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence)
+{
+ loff_t newpos;
+
+ switch (whence) {
+ case SEEK_SET:
+ newpos = off;
+ break;
+
+ case SEEK_CUR:
+ newpos = filp->f_pos + off;
+ break;
+
+ case SEEK_END:
+ newpos = MBCS_SRAM_SIZE + off;
+ break;
+
+ default: /* can't happen */
+ return -EINVAL;
+ }
+
+ if (newpos < 0)
+ return -EINVAL;
+
+ filp->f_pos = newpos;
+
+ return newpos;
+}
+
+static uint64_t mbcs_pioaddr(struct mbcs_soft *soft, uint64_t offset)
+{
+ uint64_t mmr_base;
+
+ mmr_base = (uint64_t) (soft->mmr_base + offset);
+
+ return mmr_base;
+}
+
+static void mbcs_debug_pioaddr_set(struct mbcs_soft *soft)
+{
+ soft->debug_addr = mbcs_pioaddr(soft, MBCS_DEBUG_START);
+}
+
+static void mbcs_gscr_pioaddr_set(struct mbcs_soft *soft)
+{
+ soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START);
+}
+
+static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct cx_dev *cx_dev = fp->private_data;
+ struct mbcs_soft *soft = cx_dev->soft;
+
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ __pa(soft->gscr_addr) >> PAGE_SHIFT,
+ PAGE_SIZE,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * mbcs_completion_intr_handler - Primary completion handler.
+ * @irq: irq
+ * @arg: soft struct for device
+ *
+ */
+static irqreturn_t
+mbcs_completion_intr_handler(int irq, void *arg)
+{
+ struct mbcs_soft *soft = (struct mbcs_soft *)arg;
+ void *mmr_base;
+ union cm_status cm_status;
+ union cm_control cm_control;
+
+ mmr_base = soft->mmr_base;
+ cm_status.cm_status_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_STATUS);
+
+ if (cm_status.rd_dma_done) {
+ /* stop dma-read engine, clear status */
+ cm_control.cm_control_reg =
+ MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+ cm_control.rd_dma_clr = 1;
+ MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
+ cm_control.cm_control_reg);
+ atomic_set(&soft->dmaread_done, 1);
+ wake_up(&soft->dmaread_queue);
+ }
+ if (cm_status.wr_dma_done) {
+ /* stop dma-write engine, clear status */
+ cm_control.cm_control_reg =
+ MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+ cm_control.wr_dma_clr = 1;
+ MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
+ cm_control.cm_control_reg);
+ atomic_set(&soft->dmawrite_done, 1);
+ wake_up(&soft->dmawrite_queue);
+ }
+ if (cm_status.alg_done) {
+ /* clear status */
+ cm_control.cm_control_reg =
+ MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+ cm_control.alg_done_clr = 1;
+ MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
+ cm_control.cm_control_reg);
+ atomic_set(&soft->algo_done, 1);
+ wake_up(&soft->algo_queue);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * mbcs_intr_alloc - Allocate interrupts.
+ * @dev: device pointer
+ *
+ */
+static int mbcs_intr_alloc(struct cx_dev *dev)
+{
+ struct sn_irq_info *sn_irq;
+ struct mbcs_soft *soft;
+ struct getdma *getdma;
+ struct putdma *putdma;
+ struct algoblock *algo;
+
+ soft = dev->soft;
+ getdma = &soft->getdma;
+ putdma = &soft->putdma;
+ algo = &soft->algo;
+
+ soft->get_sn_irq = NULL;
+ soft->put_sn_irq = NULL;
+ soft->algo_sn_irq = NULL;
+
+ sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
+ if (sn_irq == NULL)
+ return -EAGAIN;
+ soft->get_sn_irq = sn_irq;
+ getdma->intrHostDest = sn_irq->irq_xtalkaddr;
+ getdma->intrVector = sn_irq->irq_irq;
+ if (request_irq(sn_irq->irq_irq,
+ (void *)mbcs_completion_intr_handler, IRQF_SHARED,
+ "MBCS get intr", (void *)soft)) {
+ tiocx_irq_free(soft->get_sn_irq);
+ return -EAGAIN;
+ }
+
+ sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
+ if (sn_irq == NULL) {
+ free_irq(soft->get_sn_irq->irq_irq, soft);
+ tiocx_irq_free(soft->get_sn_irq);
+ return -EAGAIN;
+ }
+ soft->put_sn_irq = sn_irq;
+ putdma->intrHostDest = sn_irq->irq_xtalkaddr;
+ putdma->intrVector = sn_irq->irq_irq;
+ if (request_irq(sn_irq->irq_irq,
+ (void *)mbcs_completion_intr_handler, IRQF_SHARED,
+ "MBCS put intr", (void *)soft)) {
+ tiocx_irq_free(soft->put_sn_irq);
+ free_irq(soft->get_sn_irq->irq_irq, soft);
+ tiocx_irq_free(soft->get_sn_irq);
+ return -EAGAIN;
+ }
+
+ sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
+ if (sn_irq == NULL) {
+ free_irq(soft->put_sn_irq->irq_irq, soft);
+ tiocx_irq_free(soft->put_sn_irq);
+ free_irq(soft->get_sn_irq->irq_irq, soft);
+ tiocx_irq_free(soft->get_sn_irq);
+ return -EAGAIN;
+ }
+ soft->algo_sn_irq = sn_irq;
+ algo->intrHostDest = sn_irq->irq_xtalkaddr;
+ algo->intrVector = sn_irq->irq_irq;
+ if (request_irq(sn_irq->irq_irq,
+ (void *)mbcs_completion_intr_handler, IRQF_SHARED,
+ "MBCS algo intr", (void *)soft)) {
+ tiocx_irq_free(soft->algo_sn_irq);
+ free_irq(soft->put_sn_irq->irq_irq, soft);
+ tiocx_irq_free(soft->put_sn_irq);
+ free_irq(soft->get_sn_irq->irq_irq, soft);
+ tiocx_irq_free(soft->get_sn_irq);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/**
+ * mbcs_intr_dealloc - Remove interrupts.
+ * @dev: device pointer
+ *
+ */
+static void mbcs_intr_dealloc(struct cx_dev *dev)
+{
+ struct mbcs_soft *soft;
+
+ soft = dev->soft;
+
+ free_irq(soft->get_sn_irq->irq_irq, soft);
+ tiocx_irq_free(soft->get_sn_irq);
+ free_irq(soft->put_sn_irq->irq_irq, soft);
+ tiocx_irq_free(soft->put_sn_irq);
+ free_irq(soft->algo_sn_irq->irq_irq, soft);
+ tiocx_irq_free(soft->algo_sn_irq);
+}
+
+static inline int mbcs_hw_init(struct mbcs_soft *soft)
+{
+ void *mmr_base = soft->mmr_base;
+ union cm_control cm_control;
+ union cm_req_timeout cm_req_timeout;
+ uint64_t err_stat;
+
+ cm_req_timeout.cm_req_timeout_reg =
+ MBCS_MMR_GET(mmr_base, MBCS_CM_REQ_TOUT);
+
+ cm_req_timeout.time_out = MBCS_CM_CONTROL_REQ_TOUT_MASK;
+ MBCS_MMR_SET(mmr_base, MBCS_CM_REQ_TOUT,
+ cm_req_timeout.cm_req_timeout_reg);
+
+ mbcs_gscr_pioaddr_set(soft);
+ mbcs_debug_pioaddr_set(soft);
+
+ /* clear errors */
+ err_stat = MBCS_MMR_GET(mmr_base, MBCS_CM_ERR_STAT);
+ MBCS_MMR_SET(mmr_base, MBCS_CM_CLR_ERR_STAT, err_stat);
+ MBCS_MMR_ZERO(mmr_base, MBCS_CM_ERROR_DETAIL1);
+
+ /* enable interrupts */
+ /* turn off 2^23 (INT_EN_PIO_REQ_ADDR_INV) */
+ MBCS_MMR_SET(mmr_base, MBCS_CM_ERR_INT_EN, 0x3ffffff7e00ffUL);
+
+ /* arm status regs and clear engines */
+ cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
+ cm_control.rearm_stat_regs = 1;
+ cm_control.alg_clr = 1;
+ cm_control.wr_dma_clr = 1;
+ cm_control.rd_dma_clr = 1;
+
+ MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
+
+ return 0;
+}
+
+static ssize_t show_algo(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct cx_dev *cx_dev = to_cx_dev(dev);
+ struct mbcs_soft *soft = cx_dev->soft;
+ uint64_t debug0;
+
+ /*
+ * By convention, the first debug register contains the
+ * algorithm number and revision.
+ */
+ debug0 = *(uint64_t *) soft->debug_addr;
+
+ return sprintf(buf, "0x%x 0x%x\n",
+ upper_32_bits(debug0), lower_32_bits(debug0));
+}
+
+static ssize_t store_algo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ int n;
+ struct cx_dev *cx_dev = to_cx_dev(dev);
+ struct mbcs_soft *soft = cx_dev->soft;
+
+ if (count <= 0)
+ return 0;
+
+ n = simple_strtoul(buf, NULL, 0);
+
+ if (n == 1) {
+ mbcs_algo_start(soft);
+ if (wait_event_interruptible(soft->algo_queue,
+ atomic_read(&soft->algo_done)))
+ return -ERESTARTSYS;
+ }
+
+ return count;
+}
+
+DEVICE_ATTR(algo, 0644, show_algo, store_algo);
+
+/**
+ * mbcs_probe - Initialize for device
+ * @dev: device pointer
+ * @device_id: id table pointer
+ *
+ */
+static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id)
+{
+ struct mbcs_soft *soft;
+
+ dev->soft = NULL;
+
+ soft = kzalloc(sizeof(struct mbcs_soft), GFP_KERNEL);
+ if (soft == NULL)
+ return -ENOMEM;
+
+ soft->nasid = dev->cx_id.nasid;
+ list_add(&soft->list, &soft_list);
+ soft->mmr_base = (void *)tiocx_swin_base(dev->cx_id.nasid);
+ dev->soft = soft;
+ soft->cxdev = dev;
+
+ init_waitqueue_head(&soft->dmawrite_queue);
+ init_waitqueue_head(&soft->dmaread_queue);
+ init_waitqueue_head(&soft->algo_queue);
+
+ mutex_init(&soft->dmawritelock);
+ mutex_init(&soft->dmareadlock);
+ mutex_init(&soft->algolock);
+
+ mbcs_getdma_init(&soft->getdma);
+ mbcs_putdma_init(&soft->putdma);
+ mbcs_algo_init(&soft->algo);
+
+ mbcs_hw_init(soft);
+
+ /* Allocate interrupts */
+ mbcs_intr_alloc(dev);
+
+ device_create_file(&dev->dev, &dev_attr_algo);
+
+ return 0;
+}
+
+static int mbcs_remove(struct cx_dev *dev)
+{
+ if (dev->soft) {
+ mbcs_intr_dealloc(dev);
+ kfree(dev->soft);
+ }
+
+ device_remove_file(&dev->dev, &dev_attr_algo);
+
+ return 0;
+}
+
+static const struct cx_device_id __devinitdata mbcs_id_table[] = {
+ {
+ .part_num = MBCS_PART_NUM,
+ .mfg_num = MBCS_MFG_NUM,
+ },
+ {
+ .part_num = MBCS_PART_NUM_ALG0,
+ .mfg_num = MBCS_MFG_NUM,
+ },
+ {0, 0}
+};
+
+MODULE_DEVICE_TABLE(cx, mbcs_id_table);
+
+static struct cx_drv mbcs_driver = {
+ .name = DEVICE_NAME,
+ .id_table = mbcs_id_table,
+ .probe = mbcs_probe,
+ .remove = mbcs_remove,
+};
+
+static void __exit mbcs_exit(void)
+{
+ unregister_chrdev(mbcs_major, DEVICE_NAME);
+ cx_driver_unregister(&mbcs_driver);
+}
+
+static int __init mbcs_init(void)
+{
+ int rv;
+
+ if (!ia64_platform_is("sn2"))
+ return -ENODEV;
+
+ // Put driver into chrdevs[]. Get major number.
+ rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops);
+ if (rv < 0) {
+ DBG(KERN_ALERT "mbcs_init: can't get major number. %d\n", rv);
+ return rv;
+ }
+ mbcs_major = rv;
+
+ return cx_driver_register(&mbcs_driver);
+}
+
+module_init(mbcs_init);
+module_exit(mbcs_exit);
+
+MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>");
+MODULE_DESCRIPTION("Driver for MOATB Core Services");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/mbcs.h b/drivers/char/mbcs.h
new file mode 100644
index 00000000..1a36884c
--- /dev/null
+++ b/drivers/char/mbcs.h
@@ -0,0 +1,553 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef __MBCS_H__
+#define __MBCS_H__
+
+/*
+ * General macros
+ */
+#define MB (1024*1024)
+#define MB2 (2*MB)
+#define MB4 (4*MB)
+#define MB6 (6*MB)
+
+/*
+ * Offsets and masks
+ */
+#define MBCS_CM_ID 0x0000 /* Identification */
+#define MBCS_CM_STATUS 0x0008 /* Status */
+#define MBCS_CM_ERROR_DETAIL1 0x0010 /* Error Detail1 */
+#define MBCS_CM_ERROR_DETAIL2 0x0018 /* Error Detail2 */
+#define MBCS_CM_CONTROL 0x0020 /* Control */
+#define MBCS_CM_REQ_TOUT 0x0028 /* Request Time-out */
+#define MBCS_CM_ERR_INT_DEST 0x0038 /* Error Interrupt Destination */
+#define MBCS_CM_TARG_FL 0x0050 /* Target Flush */
+#define MBCS_CM_ERR_STAT 0x0060 /* Error Status */
+#define MBCS_CM_CLR_ERR_STAT 0x0068 /* Clear Error Status */
+#define MBCS_CM_ERR_INT_EN 0x0070 /* Error Interrupt Enable */
+#define MBCS_RD_DMA_SYS_ADDR 0x0100 /* Read DMA System Address */
+#define MBCS_RD_DMA_LOC_ADDR 0x0108 /* Read DMA Local Address */
+#define MBCS_RD_DMA_CTRL 0x0110 /* Read DMA Control */
+#define MBCS_RD_DMA_AMO_DEST 0x0118 /* Read DMA AMO Destination */
+#define MBCS_RD_DMA_INT_DEST 0x0120 /* Read DMA Interrupt Destination */
+#define MBCS_RD_DMA_AUX_STAT 0x0130 /* Read DMA Auxiliary Status */
+#define MBCS_WR_DMA_SYS_ADDR 0x0200 /* Write DMA System Address */
+#define MBCS_WR_DMA_LOC_ADDR 0x0208 /* Write DMA Local Address */
+#define MBCS_WR_DMA_CTRL 0x0210 /* Write DMA Control */
+#define MBCS_WR_DMA_AMO_DEST 0x0218 /* Write DMA AMO Destination */
+#define MBCS_WR_DMA_INT_DEST 0x0220 /* Write DMA Interrupt Destination */
+#define MBCS_WR_DMA_AUX_STAT 0x0230 /* Write DMA Auxiliary Status */
+#define MBCS_ALG_AMO_DEST 0x0300 /* Algorithm AMO Destination */
+#define MBCS_ALG_INT_DEST 0x0308 /* Algorithm Interrupt Destination */
+#define MBCS_ALG_OFFSETS 0x0310
+#define MBCS_ALG_STEP 0x0318 /* Algorithm Step */
+
+#define MBCS_GSCR_START 0x0000000
+#define MBCS_DEBUG_START 0x0100000
+#define MBCS_RAM0_START 0x0200000
+#define MBCS_RAM1_START 0x0400000
+#define MBCS_RAM2_START 0x0600000
+
+#define MBCS_CM_CONTROL_REQ_TOUT_MASK 0x0000000000ffffffUL
+//#define PIO_BASE_ADDR_BASE_OFFSET_MASK 0x00fffffffff00000UL
+
+#define MBCS_SRAM_SIZE (1024*1024)
+#define MBCS_CACHELINE_SIZE 128
+
+/*
+ * MMR get's and put's
+ */
+#define MBCS_MMR_ADDR(mmr_base, offset)((uint64_t *)(mmr_base + offset))
+#define MBCS_MMR_SET(mmr_base, offset, value) { \
+ uint64_t *mbcs_mmr_set_u64p, readback; \
+ mbcs_mmr_set_u64p = (uint64_t *)(mmr_base + offset); \
+ *mbcs_mmr_set_u64p = value; \
+ readback = *mbcs_mmr_set_u64p; \
+}
+#define MBCS_MMR_GET(mmr_base, offset) *(uint64_t *)(mmr_base + offset)
+#define MBCS_MMR_ZERO(mmr_base, offset) MBCS_MMR_SET(mmr_base, offset, 0)
+
+/*
+ * MBCS mmr structures
+ */
+union cm_id {
+ uint64_t cm_id_reg;
+ struct {
+ uint64_t always_one:1, // 0
+ mfg_id:11, // 11:1
+ part_num:16, // 27:12
+ bitstream_rev:8, // 35:28
+ :28; // 63:36
+ };
+};
+
+union cm_status {
+ uint64_t cm_status_reg;
+ struct {
+ uint64_t pending_reads:8, // 7:0
+ pending_writes:8, // 15:8
+ ice_rsp_credits:8, // 23:16
+ ice_req_credits:8, // 31:24
+ cm_req_credits:8, // 39:32
+ :1, // 40
+ rd_dma_in_progress:1, // 41
+ rd_dma_done:1, // 42
+ :1, // 43
+ wr_dma_in_progress:1, // 44
+ wr_dma_done:1, // 45
+ alg_waiting:1, // 46
+ alg_pipe_running:1, // 47
+ alg_done:1, // 48
+ :3, // 51:49
+ pending_int_reqs:8, // 59:52
+ :3, // 62:60
+ alg_half_speed_sel:1; // 63
+ };
+};
+
+union cm_error_detail1 {
+ uint64_t cm_error_detail1_reg;
+ struct {
+ uint64_t packet_type:4, // 3:0
+ source_id:2, // 5:4
+ data_size:2, // 7:6
+ tnum:8, // 15:8
+ byte_enable:8, // 23:16
+ gfx_cred:8, // 31:24
+ read_type:2, // 33:32
+ pio_or_memory:1, // 34
+ head_cw_error:1, // 35
+ :12, // 47:36
+ head_error_bit:1, // 48
+ data_error_bit:1, // 49
+ :13, // 62:50
+ valid:1; // 63
+ };
+};
+
+union cm_error_detail2 {
+ uint64_t cm_error_detail2_reg;
+ struct {
+ uint64_t address:56, // 55:0
+ :8; // 63:56
+ };
+};
+
+union cm_control {
+ uint64_t cm_control_reg;
+ struct {
+ uint64_t cm_id:2, // 1:0
+ :2, // 3:2
+ max_trans:5, // 8:4
+ :3, // 11:9
+ address_mode:1, // 12
+ :7, // 19:13
+ credit_limit:8, // 27:20
+ :5, // 32:28
+ rearm_stat_regs:1, // 33
+ prescalar_byp:1, // 34
+ force_gap_war:1, // 35
+ rd_dma_go:1, // 36
+ wr_dma_go:1, // 37
+ alg_go:1, // 38
+ rd_dma_clr:1, // 39
+ wr_dma_clr:1, // 40
+ alg_clr:1, // 41
+ :2, // 43:42
+ alg_wait_step:1, // 44
+ alg_done_amo_en:1, // 45
+ alg_done_int_en:1, // 46
+ :1, // 47
+ alg_sram0_locked:1, // 48
+ alg_sram1_locked:1, // 49
+ alg_sram2_locked:1, // 50
+ alg_done_clr:1, // 51
+ :12; // 63:52
+ };
+};
+
+union cm_req_timeout {
+ uint64_t cm_req_timeout_reg;
+ struct {
+ uint64_t time_out:24, // 23:0
+ :40; // 63:24
+ };
+};
+
+union intr_dest {
+ uint64_t intr_dest_reg;
+ struct {
+ uint64_t address:56, // 55:0
+ int_vector:8; // 63:56
+ };
+};
+
+union cm_error_status {
+ uint64_t cm_error_status_reg;
+ struct {
+ uint64_t ecc_sbe:1, // 0
+ ecc_mbe:1, // 1
+ unsupported_req:1, // 2
+ unexpected_rsp:1, // 3
+ bad_length:1, // 4
+ bad_datavalid:1, // 5
+ buffer_overflow:1, // 6
+ request_timeout:1, // 7
+ :8, // 15:8
+ head_inv_data_size:1, // 16
+ rsp_pactype_inv:1, // 17
+ head_sb_err:1, // 18
+ missing_head:1, // 19
+ head_inv_rd_type:1, // 20
+ head_cmd_err_bit:1, // 21
+ req_addr_align_inv:1, // 22
+ pio_req_addr_inv:1, // 23
+ req_range_dsize_inv:1, // 24
+ early_term:1, // 25
+ early_tail:1, // 26
+ missing_tail:1, // 27
+ data_flit_sb_err:1, // 28
+ cm2hcm_req_cred_of:1, // 29
+ cm2hcm_rsp_cred_of:1, // 30
+ rx_bad_didn:1, // 31
+ rd_dma_err_rsp:1, // 32
+ rd_dma_tnum_tout:1, // 33
+ rd_dma_multi_tnum_tou:1, // 34
+ wr_dma_err_rsp:1, // 35
+ wr_dma_tnum_tout:1, // 36
+ wr_dma_multi_tnum_tou:1, // 37
+ alg_data_overflow:1, // 38
+ alg_data_underflow:1, // 39
+ ram0_access_conflict:1, // 40
+ ram1_access_conflict:1, // 41
+ ram2_access_conflict:1, // 42
+ ram0_perr:1, // 43
+ ram1_perr:1, // 44
+ ram2_perr:1, // 45
+ int_gen_rsp_err:1, // 46
+ int_gen_tnum_tout:1, // 47
+ rd_dma_prog_err:1, // 48
+ wr_dma_prog_err:1, // 49
+ :14; // 63:50
+ };
+};
+
+union cm_clr_error_status {
+ uint64_t cm_clr_error_status_reg;
+ struct {
+ uint64_t clr_ecc_sbe:1, // 0
+ clr_ecc_mbe:1, // 1
+ clr_unsupported_req:1, // 2
+ clr_unexpected_rsp:1, // 3
+ clr_bad_length:1, // 4
+ clr_bad_datavalid:1, // 5
+ clr_buffer_overflow:1, // 6
+ clr_request_timeout:1, // 7
+ :8, // 15:8
+ clr_head_inv_data_siz:1, // 16
+ clr_rsp_pactype_inv:1, // 17
+ clr_head_sb_err:1, // 18
+ clr_missing_head:1, // 19
+ clr_head_inv_rd_type:1, // 20
+ clr_head_cmd_err_bit:1, // 21
+ clr_req_addr_align_in:1, // 22
+ clr_pio_req_addr_inv:1, // 23
+ clr_req_range_dsize_i:1, // 24
+ clr_early_term:1, // 25
+ clr_early_tail:1, // 26
+ clr_missing_tail:1, // 27
+ clr_data_flit_sb_err:1, // 28
+ clr_cm2hcm_req_cred_o:1, // 29
+ clr_cm2hcm_rsp_cred_o:1, // 30
+ clr_rx_bad_didn:1, // 31
+ clr_rd_dma_err_rsp:1, // 32
+ clr_rd_dma_tnum_tout:1, // 33
+ clr_rd_dma_multi_tnum:1, // 34
+ clr_wr_dma_err_rsp:1, // 35
+ clr_wr_dma_tnum_tout:1, // 36
+ clr_wr_dma_multi_tnum:1, // 37
+ clr_alg_data_overflow:1, // 38
+ clr_alg_data_underflo:1, // 39
+ clr_ram0_access_confl:1, // 40
+ clr_ram1_access_confl:1, // 41
+ clr_ram2_access_confl:1, // 42
+ clr_ram0_perr:1, // 43
+ clr_ram1_perr:1, // 44
+ clr_ram2_perr:1, // 45
+ clr_int_gen_rsp_err:1, // 46
+ clr_int_gen_tnum_tout:1, // 47
+ clr_rd_dma_prog_err:1, // 48
+ clr_wr_dma_prog_err:1, // 49
+ :14; // 63:50
+ };
+};
+
+union cm_error_intr_enable {
+ uint64_t cm_error_intr_enable_reg;
+ struct {
+ uint64_t int_en_ecc_sbe:1, // 0
+ int_en_ecc_mbe:1, // 1
+ int_en_unsupported_re:1, // 2
+ int_en_unexpected_rsp:1, // 3
+ int_en_bad_length:1, // 4
+ int_en_bad_datavalid:1, // 5
+ int_en_buffer_overflo:1, // 6
+ int_en_request_timeou:1, // 7
+ :8, // 15:8
+ int_en_head_inv_data_:1, // 16
+ int_en_rsp_pactype_in:1, // 17
+ int_en_head_sb_err:1, // 18
+ int_en_missing_head:1, // 19
+ int_en_head_inv_rd_ty:1, // 20
+ int_en_head_cmd_err_b:1, // 21
+ int_en_req_addr_align:1, // 22
+ int_en_pio_req_addr_i:1, // 23
+ int_en_req_range_dsiz:1, // 24
+ int_en_early_term:1, // 25
+ int_en_early_tail:1, // 26
+ int_en_missing_tail:1, // 27
+ int_en_data_flit_sb_e:1, // 28
+ int_en_cm2hcm_req_cre:1, // 29
+ int_en_cm2hcm_rsp_cre:1, // 30
+ int_en_rx_bad_didn:1, // 31
+ int_en_rd_dma_err_rsp:1, // 32
+ int_en_rd_dma_tnum_to:1, // 33
+ int_en_rd_dma_multi_t:1, // 34
+ int_en_wr_dma_err_rsp:1, // 35
+ int_en_wr_dma_tnum_to:1, // 36
+ int_en_wr_dma_multi_t:1, // 37
+ int_en_alg_data_overf:1, // 38
+ int_en_alg_data_under:1, // 39
+ int_en_ram0_access_co:1, // 40
+ int_en_ram1_access_co:1, // 41
+ int_en_ram2_access_co:1, // 42
+ int_en_ram0_perr:1, // 43
+ int_en_ram1_perr:1, // 44
+ int_en_ram2_perr:1, // 45
+ int_en_int_gen_rsp_er:1, // 46
+ int_en_int_gen_tnum_t:1, // 47
+ int_en_rd_dma_prog_er:1, // 48
+ int_en_wr_dma_prog_er:1, // 49
+ :14; // 63:50
+ };
+};
+
+struct cm_mmr {
+ union cm_id id;
+ union cm_status status;
+ union cm_error_detail1 err_detail1;
+ union cm_error_detail2 err_detail2;
+ union cm_control control;
+ union cm_req_timeout req_timeout;
+ uint64_t reserved1[1];
+ union intr_dest int_dest;
+ uint64_t reserved2[2];
+ uint64_t targ_flush;
+ uint64_t reserved3[1];
+ union cm_error_status err_status;
+ union cm_clr_error_status clr_err_status;
+ union cm_error_intr_enable int_enable;
+};
+
+union dma_hostaddr {
+ uint64_t dma_hostaddr_reg;
+ struct {
+ uint64_t dma_sys_addr:56, // 55:0
+ :8; // 63:56
+ };
+};
+
+union dma_localaddr {
+ uint64_t dma_localaddr_reg;
+ struct {
+ uint64_t dma_ram_addr:21, // 20:0
+ dma_ram_sel:2, // 22:21
+ :41; // 63:23
+ };
+};
+
+union dma_control {
+ uint64_t dma_control_reg;
+ struct {
+ uint64_t dma_op_length:16, // 15:0
+ :18, // 33:16
+ done_amo_en:1, // 34
+ done_int_en:1, // 35
+ :1, // 36
+ pio_mem_n:1, // 37
+ :26; // 63:38
+ };
+};
+
+union dma_amo_dest {
+ uint64_t dma_amo_dest_reg;
+ struct {
+ uint64_t dma_amo_sys_addr:56, // 55:0
+ dma_amo_mod_type:3, // 58:56
+ :5; // 63:59
+ };
+};
+
+union rdma_aux_status {
+ uint64_t rdma_aux_status_reg;
+ struct {
+ uint64_t op_num_pacs_left:17, // 16:0
+ :5, // 21:17
+ lrsp_buff_empty:1, // 22
+ :17, // 39:23
+ pending_reqs_left:6, // 45:40
+ :18; // 63:46
+ };
+};
+
+struct rdma_mmr {
+ union dma_hostaddr host_addr;
+ union dma_localaddr local_addr;
+ union dma_control control;
+ union dma_amo_dest amo_dest;
+ union intr_dest intr_dest;
+ union rdma_aux_status aux_status;
+};
+
+union wdma_aux_status {
+ uint64_t wdma_aux_status_reg;
+ struct {
+ uint64_t op_num_pacs_left:17, // 16:0
+ :4, // 20:17
+ lreq_buff_empty:1, // 21
+ :18, // 39:22
+ pending_reqs_left:6, // 45:40
+ :18; // 63:46
+ };
+};
+
+struct wdma_mmr {
+ union dma_hostaddr host_addr;
+ union dma_localaddr local_addr;
+ union dma_control control;
+ union dma_amo_dest amo_dest;
+ union intr_dest intr_dest;
+ union wdma_aux_status aux_status;
+};
+
+union algo_step {
+ uint64_t algo_step_reg;
+ struct {
+ uint64_t alg_step_cnt:16, // 15:0
+ :48; // 63:16
+ };
+};
+
+struct algo_mmr {
+ union dma_amo_dest amo_dest;
+ union intr_dest intr_dest;
+ union {
+ uint64_t algo_offset_reg;
+ struct {
+ uint64_t sram0_offset:7, // 6:0
+ reserved0:1, // 7
+ sram1_offset:7, // 14:8
+ reserved1:1, // 15
+ sram2_offset:7, // 22:16
+ reserved2:14; // 63:23
+ };
+ } sram_offset;
+ union algo_step step;
+};
+
+struct mbcs_mmr {
+ struct cm_mmr cm;
+ uint64_t reserved1[17];
+ struct rdma_mmr rdDma;
+ uint64_t reserved2[25];
+ struct wdma_mmr wrDma;
+ uint64_t reserved3[25];
+ struct algo_mmr algo;
+ uint64_t reserved4[156];
+};
+
+/*
+ * defines
+ */
+#define DEVICE_NAME "mbcs"
+#define MBCS_PART_NUM 0xfff0
+#define MBCS_PART_NUM_ALG0 0xf001
+#define MBCS_MFG_NUM 0x1
+
+struct algoblock {
+ uint64_t amoHostDest;
+ uint64_t amoModType;
+ uint64_t intrHostDest;
+ uint64_t intrVector;
+ uint64_t algoStepCount;
+};
+
+struct getdma {
+ uint64_t hostAddr;
+ uint64_t localAddr;
+ uint64_t bytes;
+ uint64_t DoneAmoEnable;
+ uint64_t DoneIntEnable;
+ uint64_t peerIO;
+ uint64_t amoHostDest;
+ uint64_t amoModType;
+ uint64_t intrHostDest;
+ uint64_t intrVector;
+};
+
+struct putdma {
+ uint64_t hostAddr;
+ uint64_t localAddr;
+ uint64_t bytes;
+ uint64_t DoneAmoEnable;
+ uint64_t DoneIntEnable;
+ uint64_t peerIO;
+ uint64_t amoHostDest;
+ uint64_t amoModType;
+ uint64_t intrHostDest;
+ uint64_t intrVector;
+};
+
+struct mbcs_soft {
+ struct list_head list;
+ struct cx_dev *cxdev;
+ int major;
+ int nasid;
+ void *mmr_base;
+ wait_queue_head_t dmawrite_queue;
+ wait_queue_head_t dmaread_queue;
+ wait_queue_head_t algo_queue;
+ struct sn_irq_info *get_sn_irq;
+ struct sn_irq_info *put_sn_irq;
+ struct sn_irq_info *algo_sn_irq;
+ struct getdma getdma;
+ struct putdma putdma;
+ struct algoblock algo;
+ uint64_t gscr_addr; // pio addr
+ uint64_t ram0_addr; // pio addr
+ uint64_t ram1_addr; // pio addr
+ uint64_t ram2_addr; // pio addr
+ uint64_t debug_addr; // pio addr
+ atomic_t dmawrite_done;
+ atomic_t dmaread_done;
+ atomic_t algo_done;
+ struct mutex dmawritelock;
+ struct mutex dmareadlock;
+ struct mutex algolock;
+};
+
+static int mbcs_open(struct inode *ip, struct file *fp);
+static ssize_t mbcs_sram_read(struct file *fp, char __user *buf, size_t len,
+ loff_t * off);
+static ssize_t mbcs_sram_write(struct file *fp, const char __user *buf, size_t len,
+ loff_t * off);
+static loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence);
+static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma);
+
+#endif // __MBCS_H__
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
new file mode 100644
index 00000000..9b1eb188
--- /dev/null
+++ b/drivers/char/mem.c
@@ -0,0 +1,956 @@
+/*
+ * linux/drivers/char/mem.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Added devfs support.
+ * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
+ * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/raw.h>
+#include <linux/tty.h>
+#include <linux/capability.h>
+#include <linux/ptrace.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/crash_dump.h>
+#include <linux/backing-dev.h>
+#include <linux/bootmem.h>
+#include <linux/splice.h>
+#include <linux/pfn.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_IA64
+# include <linux/efi.h>
+#endif
+
+static inline unsigned long size_inside_page(unsigned long start,
+ unsigned long size)
+{
+ unsigned long sz;
+
+ sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
+
+ return min(sz, size);
+}
+
+#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
+{
+ return addr + count <= __pa(high_memory);
+}
+
+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
+{
+ return 1;
+}
+#endif
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
+#ifdef CONFIG_STRICT_DEVMEM
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ u64 from = ((u64)pfn) << PAGE_SHIFT;
+ u64 to = from + size;
+ u64 cursor = from;
+
+ while (cursor < to) {
+ if (!devmem_is_allowed(pfn)) {
+ printk(KERN_INFO
+ "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
+ current->comm, from, to);
+ return 0;
+ }
+ cursor += PAGE_SIZE;
+ pfn++;
+ }
+ return 1;
+}
+#else
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ return 1;
+}
+#endif
+#endif
+
+#ifdef CONFIG_DEVMEM
+void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
+{
+}
+
+/*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+ */
+static ssize_t read_mem(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t read, sz;
+ char *ptr;
+
+ if (!valid_phys_addr_range(p, count))
+ return -EFAULT;
+ read = 0;
+#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (p < PAGE_SIZE) {
+ sz = size_inside_page(p, count);
+ if (sz > 0) {
+ if (clear_user(buf, sz))
+ return -EFAULT;
+ buf += sz;
+ p += sz;
+ count -= sz;
+ read += sz;
+ }
+ }
+#endif
+
+ while (count > 0) {
+ unsigned long remaining;
+
+ sz = size_inside_page(p, count);
+
+ if (!range_is_allowed(p >> PAGE_SHIFT, count))
+ return -EPERM;
+
+ /*
+ * On ia64 if a page has been mapped somewhere as uncached, then
+ * it must also be accessed uncached by the kernel or data
+ * corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr)
+ return -EFAULT;
+
+ remaining = copy_to_user(buf, ptr, sz);
+ unxlate_dev_mem_ptr(p, ptr);
+ if (remaining)
+ return -EFAULT;
+
+ buf += sz;
+ p += sz;
+ count -= sz;
+ read += sz;
+ }
+
+ *ppos += read;
+ return read;
+}
+
+static ssize_t write_mem(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t written, sz;
+ unsigned long copied;
+ void *ptr;
+
+ if (!valid_phys_addr_range(p, count))
+ return -EFAULT;
+
+ written = 0;
+
+#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (p < PAGE_SIZE) {
+ sz = size_inside_page(p, count);
+ /* Hmm. Do something? */
+ buf += sz;
+ p += sz;
+ count -= sz;
+ written += sz;
+ }
+#endif
+
+ while (count > 0) {
+ sz = size_inside_page(p, count);
+
+ if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+ return -EPERM;
+
+ /*
+ * On ia64 if a page has been mapped somewhere as uncached, then
+ * it must also be accessed uncached by the kernel or data
+ * corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr) {
+ if (written)
+ break;
+ return -EFAULT;
+ }
+
+ copied = copy_from_user(ptr, buf, sz);
+ unxlate_dev_mem_ptr(p, ptr);
+ if (copied) {
+ written += sz - copied;
+ if (written)
+ break;
+ return -EFAULT;
+ }
+
+ buf += sz;
+ p += sz;
+ count -= sz;
+ written += sz;
+ }
+
+ *ppos += written;
+ return written;
+}
+#endif /* CONFIG_DEVMEM */
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
+
+int __weak phys_mem_access_prot_allowed(struct file *file,
+ unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
+{
+ return 1;
+}
+
+#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
+
+/*
+ * Architectures vary in how they handle caching for addresses
+ * outside of main memory.
+ *
+ */
+#ifdef pgprot_noncached
+static int uncached_access(struct file *file, unsigned long addr)
+{
+#if defined(CONFIG_IA64)
+ /*
+ * On ia64, we ignore O_DSYNC because we cannot tolerate memory
+ * attribute aliases.
+ */
+ return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
+#elif defined(CONFIG_MIPS)
+ {
+ extern int __uncached_access(struct file *file,
+ unsigned long addr);
+
+ return __uncached_access(file, addr);
+ }
+#else
+ /*
+ * Accessing memory above the top the kernel knows about or through a
+ * file pointer
+ * that was marked O_DSYNC will be done non-cached.
+ */
+ if (file->f_flags & O_DSYNC)
+ return 1;
+ return addr >= __pa(high_memory);
+#endif
+}
+#endif
+
+static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+{
+#ifdef pgprot_noncached
+ unsigned long offset = pfn << PAGE_SHIFT;
+
+ if (uncached_access(file, offset))
+ return pgprot_noncached(vma_prot);
+#endif
+ return vma_prot;
+}
+#endif
+
+#ifndef CONFIG_MMU
+static unsigned long get_unmapped_area_mem(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ if (!valid_mmap_phys_addr_range(pgoff, len))
+ return (unsigned long) -EINVAL;
+ return pgoff << PAGE_SHIFT;
+}
+
+/* can't do an in-place private mapping if there's no MMU */
+static inline int private_mapping_ok(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & VM_MAYSHARE;
+}
+#else
+#define get_unmapped_area_mem NULL
+
+static inline int private_mapping_ok(struct vm_area_struct *vma)
+{
+ return 1;
+}
+#endif
+
+static const struct vm_operations_struct mmap_mem_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys
+#endif
+};
+
+static int mmap_mem(struct file *file, struct vm_area_struct *vma)
+{
+ size_t size = vma->vm_end - vma->vm_start;
+
+ if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
+ return -EINVAL;
+
+ if (!private_mapping_ok(vma))
+ return -ENOSYS;
+
+ if (!range_is_allowed(vma->vm_pgoff, size))
+ return -EPERM;
+
+ if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
+ &vma->vm_page_prot))
+ return -EINVAL;
+
+ vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
+ size,
+ vma->vm_page_prot);
+
+ vma->vm_ops = &mmap_mem_ops;
+
+ /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ vma->vm_pgoff,
+ size,
+ vma->vm_page_prot)) {
+ return -EAGAIN;
+ }
+ return 0;
+}
+#endif /* CONFIG_DEVMEM */
+
+#ifdef CONFIG_DEVKMEM
+static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long pfn;
+
+ /* Turn a kernel-virtual address into a physical page frame */
+ pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
+
+ /*
+ * RED-PEN: on some architectures there is more mapped memory than
+ * available in mem_map which pfn_valid checks for. Perhaps should add a
+ * new macro here.
+ *
+ * RED-PEN: vmalloc is not supported right now.
+ */
+ if (!pfn_valid(pfn))
+ return -EIO;
+
+ vma->vm_pgoff = pfn;
+ return mmap_mem(file, vma);
+}
+#endif
+
+#ifdef CONFIG_CRASH_DUMP
+/*
+ * Read memory corresponding to the old kernel.
+ */
+static ssize_t read_oldmem(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long pfn, offset;
+ size_t read = 0, csize;
+ int rc = 0;
+
+ while (count) {
+ pfn = *ppos / PAGE_SIZE;
+ if (pfn > saved_max_pfn)
+ return read;
+
+ offset = (unsigned long)(*ppos % PAGE_SIZE);
+ if (count > PAGE_SIZE - offset)
+ csize = PAGE_SIZE - offset;
+ else
+ csize = count;
+
+ rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
+ if (rc < 0)
+ return rc;
+ buf += csize;
+ *ppos += csize;
+ read += csize;
+ count -= csize;
+ }
+ return read;
+}
+#endif
+
+#ifdef CONFIG_DEVKMEM
+/*
+ * This function reads the *virtual* memory as seen by the kernel.
+ */
+static ssize_t read_kmem(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t low_count, read, sz;
+ char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+ int err = 0;
+
+ read = 0;
+ if (p < (unsigned long) high_memory) {
+ low_count = count;
+ if (count > (unsigned long)high_memory - p)
+ low_count = (unsigned long)high_memory - p;
+
+#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (p < PAGE_SIZE && low_count > 0) {
+ sz = size_inside_page(p, low_count);
+ if (clear_user(buf, sz))
+ return -EFAULT;
+ buf += sz;
+ p += sz;
+ read += sz;
+ low_count -= sz;
+ count -= sz;
+ }
+#endif
+ while (low_count > 0) {
+ sz = size_inside_page(p, low_count);
+
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur
+ */
+ kbuf = xlate_dev_kmem_ptr((char *)p);
+
+ if (copy_to_user(buf, kbuf, sz))
+ return -EFAULT;
+ buf += sz;
+ p += sz;
+ read += sz;
+ low_count -= sz;
+ count -= sz;
+ }
+ }
+
+ if (count > 0) {
+ kbuf = (char *)__get_free_page(GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+ while (count > 0) {
+ sz = size_inside_page(p, count);
+ if (!is_vmalloc_or_module_addr((void *)p)) {
+ err = -ENXIO;
+ break;
+ }
+ sz = vread(kbuf, (char *)p, sz);
+ if (!sz)
+ break;
+ if (copy_to_user(buf, kbuf, sz)) {
+ err = -EFAULT;
+ break;
+ }
+ count -= sz;
+ buf += sz;
+ read += sz;
+ p += sz;
+ }
+ free_page((unsigned long)kbuf);
+ }
+ *ppos = p;
+ return read ? read : err;
+}
+
+
+static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t written, sz;
+ unsigned long copied;
+
+ written = 0;
+#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (p < PAGE_SIZE) {
+ sz = size_inside_page(p, count);
+ /* Hmm. Do something? */
+ buf += sz;
+ p += sz;
+ count -= sz;
+ written += sz;
+ }
+#endif
+
+ while (count > 0) {
+ char *ptr;
+
+ sz = size_inside_page(p, count);
+
+ /*
+ * On ia64 if a page has been mapped somewhere as uncached, then
+ * it must also be accessed uncached by the kernel or data
+ * corruption may occur.
+ */
+ ptr = xlate_dev_kmem_ptr((char *)p);
+
+ copied = copy_from_user(ptr, buf, sz);
+ if (copied) {
+ written += sz - copied;
+ if (written)
+ break;
+ return -EFAULT;
+ }
+ buf += sz;
+ p += sz;
+ count -= sz;
+ written += sz;
+ }
+
+ *ppos += written;
+ return written;
+}
+
+/*
+ * This function writes to the *virtual* memory as seen by the kernel.
+ */
+static ssize_t write_kmem(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t wrote = 0;
+ ssize_t virtr = 0;
+ char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+ int err = 0;
+
+ if (p < (unsigned long) high_memory) {
+ unsigned long to_write = min_t(unsigned long, count,
+ (unsigned long)high_memory - p);
+ wrote = do_write_kmem(p, buf, to_write, ppos);
+ if (wrote != to_write)
+ return wrote;
+ p += wrote;
+ buf += wrote;
+ count -= wrote;
+ }
+
+ if (count > 0) {
+ kbuf = (char *)__get_free_page(GFP_KERNEL);
+ if (!kbuf)
+ return wrote ? wrote : -ENOMEM;
+ while (count > 0) {
+ unsigned long sz = size_inside_page(p, count);
+ unsigned long n;
+
+ if (!is_vmalloc_or_module_addr((void *)p)) {
+ err = -ENXIO;
+ break;
+ }
+ n = copy_from_user(kbuf, buf, sz);
+ if (n) {
+ err = -EFAULT;
+ break;
+ }
+ vwrite(kbuf, (char *)p, sz);
+ count -= sz;
+ buf += sz;
+ virtr += sz;
+ p += sz;
+ }
+ free_page((unsigned long)kbuf);
+ }
+
+ *ppos = p;
+ return virtr + wrote ? : err;
+}
+#endif
+
+#ifdef CONFIG_DEVPORT
+static ssize_t read_port(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long i = *ppos;
+ char __user *tmp = buf;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+ while (count-- > 0 && i < 65536) {
+ if (__put_user(inb(i), tmp) < 0)
+ return -EFAULT;
+ i++;
+ tmp++;
+ }
+ *ppos = i;
+ return tmp-buf;
+}
+
+static ssize_t write_port(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long i = *ppos;
+ const char __user * tmp = buf;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+ while (count-- > 0 && i < 65536) {
+ char c;
+ if (__get_user(c, tmp)) {
+ if (tmp > buf)
+ break;
+ return -EFAULT;
+ }
+ outb(c, i);
+ i++;
+ tmp++;
+ }
+ *ppos = i;
+ return tmp-buf;
+}
+#endif
+
+static ssize_t read_null(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t write_null(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return count;
+}
+
+static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
+ struct splice_desc *sd)
+{
+ return sd->len;
+}
+
+static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags)
+{
+ return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
+}
+
+static ssize_t read_zero(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ size_t written;
+
+ if (!count)
+ return 0;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ written = 0;
+ while (count) {
+ unsigned long unwritten;
+ size_t chunk = count;
+
+ if (chunk > PAGE_SIZE)
+ chunk = PAGE_SIZE; /* Just for latency reasons */
+ unwritten = __clear_user(buf, chunk);
+ written += chunk - unwritten;
+ if (unwritten)
+ break;
+ if (signal_pending(current))
+ return written ? written : -ERESTARTSYS;
+ buf += chunk;
+ count -= chunk;
+ cond_resched();
+ }
+ return written ? written : -EFAULT;
+}
+
+static int mmap_zero(struct file *file, struct vm_area_struct *vma)
+{
+#ifndef CONFIG_MMU
+ return -ENOSYS;
+#endif
+ if (vma->vm_flags & VM_SHARED)
+ return shmem_zero_setup(vma);
+ return 0;
+}
+
+static ssize_t write_full(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENOSPC;
+}
+
+/*
+ * Special lseek() function for /dev/null and /dev/zero. Most notably, you
+ * can fopen() both devices with "a" now. This was previously impossible.
+ * -- SRB.
+ */
+static loff_t null_lseek(struct file *file, loff_t offset, int orig)
+{
+ return file->f_pos = 0;
+}
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
+
+/*
+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
+ * check against negative addresses: they are ok. The return value is weird,
+ * though, in that case (0).
+ *
+ * also note that seeking relative to the "end of file" isn't supported:
+ * it has no meaning, so it returns -EINVAL.
+ */
+static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
+{
+ loff_t ret;
+
+ mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
+ switch (orig) {
+ case SEEK_CUR:
+ offset += file->f_pos;
+ case SEEK_SET:
+ /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
+ if ((unsigned long long)offset >= ~0xFFFULL) {
+ ret = -EOVERFLOW;
+ break;
+ }
+ file->f_pos = offset;
+ ret = file->f_pos;
+ force_successful_syscall_return();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
+ return ret;
+}
+
+#endif
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
+static int open_port(struct inode * inode, struct file * filp)
+{
+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+}
+#endif
+
+#define zero_lseek null_lseek
+#define full_lseek null_lseek
+#define write_zero write_null
+#define read_full read_zero
+#define open_mem open_port
+#define open_kmem open_mem
+#define open_oldmem open_mem
+
+#ifdef CONFIG_DEVMEM
+static const struct file_operations mem_fops = {
+ .llseek = memory_lseek,
+ .read = read_mem,
+ .write = write_mem,
+ .mmap = mmap_mem,
+ .open = open_mem,
+ .get_unmapped_area = get_unmapped_area_mem,
+};
+#endif
+
+#ifdef CONFIG_DEVKMEM
+static const struct file_operations kmem_fops = {
+ .llseek = memory_lseek,
+ .read = read_kmem,
+ .write = write_kmem,
+ .mmap = mmap_kmem,
+ .open = open_kmem,
+ .get_unmapped_area = get_unmapped_area_mem,
+};
+#endif
+
+static const struct file_operations null_fops = {
+ .llseek = null_lseek,
+ .read = read_null,
+ .write = write_null,
+ .splice_write = splice_write_null,
+};
+
+#ifdef CONFIG_DEVPORT
+static const struct file_operations port_fops = {
+ .llseek = memory_lseek,
+ .read = read_port,
+ .write = write_port,
+ .open = open_port,
+};
+#endif
+
+static const struct file_operations zero_fops = {
+ .llseek = zero_lseek,
+ .read = read_zero,
+ .write = write_zero,
+ .mmap = mmap_zero,
+};
+
+/*
+ * capabilities for /dev/zero
+ * - permits private mappings, "copies" are taken of the source of zeros
+ * - no writeback happens
+ */
+static struct backing_dev_info zero_bdi = {
+ .name = "char/mem",
+ .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
+};
+
+static const struct file_operations full_fops = {
+ .llseek = full_lseek,
+ .read = read_full,
+ .write = write_full,
+};
+
+#ifdef CONFIG_CRASH_DUMP
+static const struct file_operations oldmem_fops = {
+ .read = read_oldmem,
+ .open = open_oldmem,
+ .llseek = default_llseek,
+};
+#endif
+
+static ssize_t kmsg_writev(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ char *line, *p;
+ int i;
+ ssize_t ret = -EFAULT;
+ size_t len = iov_length(iv, count);
+
+ line = kmalloc(len + 1, GFP_KERNEL);
+ if (line == NULL)
+ return -ENOMEM;
+
+ /*
+ * copy all vectors into a single string, to ensure we do
+ * not interleave our log line with other printk calls
+ */
+ p = line;
+ for (i = 0; i < count; i++) {
+ if (copy_from_user(p, iv[i].iov_base, iv[i].iov_len))
+ goto out;
+ p += iv[i].iov_len;
+ }
+ p[0] = '\0';
+
+ ret = printk("%s", line);
+ /* printk can add a prefix */
+ if (ret > len)
+ ret = len;
+out:
+ kfree(line);
+ return ret;
+}
+
+static const struct file_operations kmsg_fops = {
+ .aio_write = kmsg_writev,
+ .llseek = noop_llseek,
+};
+
+static const struct memdev {
+ const char *name;
+ mode_t mode;
+ const struct file_operations *fops;
+ struct backing_dev_info *dev_info;
+} devlist[] = {
+#ifdef CONFIG_DEVMEM
+ [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
+#endif
+#ifdef CONFIG_DEVKMEM
+ [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
+#endif
+ [3] = { "null", 0666, &null_fops, NULL },
+#ifdef CONFIG_DEVPORT
+ [4] = { "port", 0, &port_fops, NULL },
+#endif
+ [5] = { "zero", 0666, &zero_fops, &zero_bdi },
+ [7] = { "full", 0666, &full_fops, NULL },
+ [8] = { "random", 0666, &random_fops, NULL },
+ [9] = { "urandom", 0666, &urandom_fops, NULL },
+ [11] = { "kmsg", 0, &kmsg_fops, NULL },
+#ifdef CONFIG_CRASH_DUMP
+ [12] = { "oldmem", 0, &oldmem_fops, NULL },
+#endif
+};
+
+static int memory_open(struct inode *inode, struct file *filp)
+{
+ int minor;
+ const struct memdev *dev;
+
+ minor = iminor(inode);
+ if (minor >= ARRAY_SIZE(devlist))
+ return -ENXIO;
+
+ dev = &devlist[minor];
+ if (!dev->fops)
+ return -ENXIO;
+
+ filp->f_op = dev->fops;
+ if (dev->dev_info)
+ filp->f_mapping->backing_dev_info = dev->dev_info;
+
+ /* Is /dev/mem or /dev/kmem ? */
+ if (dev->dev_info == &directly_mappable_cdev_bdi)
+ filp->f_mode |= FMODE_UNSIGNED_OFFSET;
+
+ if (dev->fops->open)
+ return dev->fops->open(inode, filp);
+
+ return 0;
+}
+
+static const struct file_operations memory_fops = {
+ .open = memory_open,
+ .llseek = noop_llseek,
+};
+
+static char *mem_devnode(struct device *dev, mode_t *mode)
+{
+ if (mode && devlist[MINOR(dev->devt)].mode)
+ *mode = devlist[MINOR(dev->devt)].mode;
+ return NULL;
+}
+
+static struct class *mem_class;
+
+static int __init chr_dev_init(void)
+{
+ int minor;
+ int err;
+
+ err = bdi_init(&zero_bdi);
+ if (err)
+ return err;
+
+ if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
+ printk("unable to get major %d for memory devs\n", MEM_MAJOR);
+
+ mem_class = class_create(THIS_MODULE, "mem");
+ if (IS_ERR(mem_class))
+ return PTR_ERR(mem_class);
+
+ mem_class->devnode = mem_devnode;
+ for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
+ if (!devlist[minor].name)
+ continue;
+ device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
+ NULL, devlist[minor].name);
+ }
+
+ return tty_init();
+}
+
+fs_initcall(chr_dev_init);
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
new file mode 100644
index 00000000..778273c9
--- /dev/null
+++ b/drivers/char/misc.c
@@ -0,0 +1,297 @@
+/*
+ * linux/drivers/char/misc.c
+ *
+ * Generic misc open routine by Johan Myreen
+ *
+ * Based on code from Linus
+ *
+ * Teemu Rantanen's Microsoft Busmouse support and Derrick Cole's
+ * changes incorporated into 0.97pl4
+ * by Peter Cervasio (pete%q106fm.uucp@wupost.wustl.edu) (08SEP92)
+ * See busmouse.c for particulars.
+ *
+ * Made things a lot mode modular - easy to compile in just one or two
+ * of the misc drivers, as they are now completely independent. Linus.
+ *
+ * Support for loadable modules. 8-Sep-95 Philip Blundell <pjb27@cam.ac.uk>
+ *
+ * Fixed a failing symbol register to free the device registration
+ * Alan Cox <alan@lxorguk.ukuu.org.uk> 21-Jan-96
+ *
+ * Dynamic minors and /proc/mice by Alessandro Rubini. 26-Mar-96
+ *
+ * Renamed to misc and miscdevice to be more accurate. Alan Cox 26-Mar-96
+ *
+ * Handling of mouse minor numbers for kerneld:
+ * Idea by Jacques Gelinas <jack@solucorp.qc.ca>,
+ * adapted by Bjorn Ekwall <bj0rn@blox.se>
+ * corrected by Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * Changes for kmod (from kerneld):
+ * Cyrus Durgin <cider@speakeasy.org>
+ *
+ * Added devfs support. Richard Gooch <rgooch@atnf.csiro.au> 10-Jan-1998
+ */
+
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/mutex.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/tty.h>
+#include <linux/kmod.h>
+#include <linux/gfp.h>
+
+/*
+ * Head entry for the doubly linked miscdevice list
+ */
+static LIST_HEAD(misc_list);
+static DEFINE_MUTEX(misc_mtx);
+
+/*
+ * Assigned numbers, used for dynamic minors
+ */
+#define DYNAMIC_MINORS 64 /* like dynamic majors */
+static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
+
+#ifdef CONFIG_PROC_FS
+static void *misc_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ mutex_lock(&misc_mtx);
+ return seq_list_start(&misc_list, *pos);
+}
+
+static void *misc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &misc_list, pos);
+}
+
+static void misc_seq_stop(struct seq_file *seq, void *v)
+{
+ mutex_unlock(&misc_mtx);
+}
+
+static int misc_seq_show(struct seq_file *seq, void *v)
+{
+ const struct miscdevice *p = list_entry(v, struct miscdevice, list);
+
+ seq_printf(seq, "%3i %s\n", p->minor, p->name ? p->name : "");
+ return 0;
+}
+
+
+static const struct seq_operations misc_seq_ops = {
+ .start = misc_seq_start,
+ .next = misc_seq_next,
+ .stop = misc_seq_stop,
+ .show = misc_seq_show,
+};
+
+static int misc_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &misc_seq_ops);
+}
+
+static const struct file_operations misc_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = misc_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif
+
+static int misc_open(struct inode * inode, struct file * file)
+{
+ int minor = iminor(inode);
+ struct miscdevice *c;
+ int err = -ENODEV;
+ const struct file_operations *old_fops, *new_fops = NULL;
+
+ mutex_lock(&misc_mtx);
+
+ list_for_each_entry(c, &misc_list, list) {
+ if (c->minor == minor) {
+ new_fops = fops_get(c->fops);
+ break;
+ }
+ }
+
+ if (!new_fops) {
+ mutex_unlock(&misc_mtx);
+ request_module("char-major-%d-%d", MISC_MAJOR, minor);
+ mutex_lock(&misc_mtx);
+
+ list_for_each_entry(c, &misc_list, list) {
+ if (c->minor == minor) {
+ new_fops = fops_get(c->fops);
+ break;
+ }
+ }
+ if (!new_fops)
+ goto fail;
+ }
+
+ err = 0;
+ old_fops = file->f_op;
+ file->f_op = new_fops;
+ if (file->f_op->open) {
+ file->private_data = c;
+ err=file->f_op->open(inode,file);
+ if (err) {
+ fops_put(file->f_op);
+ file->f_op = fops_get(old_fops);
+ }
+ }
+ fops_put(old_fops);
+fail:
+ mutex_unlock(&misc_mtx);
+ return err;
+}
+
+static struct class *misc_class;
+
+static const struct file_operations misc_fops = {
+ .owner = THIS_MODULE,
+ .open = misc_open,
+ .llseek = noop_llseek,
+};
+
+/**
+ * misc_register - register a miscellaneous device
+ * @misc: device structure
+ *
+ * Register a miscellaneous device with the kernel. If the minor
+ * number is set to %MISC_DYNAMIC_MINOR a minor number is assigned
+ * and placed in the minor field of the structure. For other cases
+ * the minor number requested is used.
+ *
+ * The structure passed is linked into the kernel and may not be
+ * destroyed until it has been unregistered.
+ *
+ * A zero is returned on success and a negative errno code for
+ * failure.
+ */
+
+int misc_register(struct miscdevice * misc)
+{
+ struct miscdevice *c;
+ dev_t dev;
+ int err = 0;
+
+ INIT_LIST_HEAD(&misc->list);
+
+ mutex_lock(&misc_mtx);
+ list_for_each_entry(c, &misc_list, list) {
+ if (c->minor == misc->minor) {
+ mutex_unlock(&misc_mtx);
+ return -EBUSY;
+ }
+ }
+
+ if (misc->minor == MISC_DYNAMIC_MINOR) {
+ int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
+ if (i >= DYNAMIC_MINORS) {
+ mutex_unlock(&misc_mtx);
+ return -EBUSY;
+ }
+ misc->minor = DYNAMIC_MINORS - i - 1;
+ set_bit(i, misc_minors);
+ }
+
+ dev = MKDEV(MISC_MAJOR, misc->minor);
+
+ misc->this_device = device_create(misc_class, misc->parent, dev,
+ misc, "%s", misc->name);
+ if (IS_ERR(misc->this_device)) {
+ int i = DYNAMIC_MINORS - misc->minor - 1;
+ if (i < DYNAMIC_MINORS && i >= 0)
+ clear_bit(i, misc_minors);
+ err = PTR_ERR(misc->this_device);
+ goto out;
+ }
+
+ /*
+ * Add it to the front, so that later devices can "override"
+ * earlier defaults
+ */
+ list_add(&misc->list, &misc_list);
+ out:
+ mutex_unlock(&misc_mtx);
+ return err;
+}
+
+/**
+ * misc_deregister - unregister a miscellaneous device
+ * @misc: device to unregister
+ *
+ * Unregister a miscellaneous device that was previously
+ * successfully registered with misc_register(). Success
+ * is indicated by a zero return, a negative errno code
+ * indicates an error.
+ */
+
+int misc_deregister(struct miscdevice *misc)
+{
+ int i = DYNAMIC_MINORS - misc->minor - 1;
+
+ if (WARN_ON(list_empty(&misc->list)))
+ return -EINVAL;
+
+ mutex_lock(&misc_mtx);
+ list_del(&misc->list);
+ device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor));
+ if (i < DYNAMIC_MINORS && i >= 0)
+ clear_bit(i, misc_minors);
+ mutex_unlock(&misc_mtx);
+ return 0;
+}
+
+EXPORT_SYMBOL(misc_register);
+EXPORT_SYMBOL(misc_deregister);
+
+static char *misc_devnode(struct device *dev, mode_t *mode)
+{
+ struct miscdevice *c = dev_get_drvdata(dev);
+
+ if (mode && c->mode)
+ *mode = c->mode;
+ if (c->nodename)
+ return kstrdup(c->nodename, GFP_KERNEL);
+ return NULL;
+}
+
+static int __init misc_init(void)
+{
+ int err;
+
+#ifdef CONFIG_PROC_FS
+ proc_create("misc", 0, NULL, &misc_proc_fops);
+#endif
+ misc_class = class_create(THIS_MODULE, "misc");
+ err = PTR_ERR(misc_class);
+ if (IS_ERR(misc_class))
+ goto fail_remove;
+
+ err = -EIO;
+ if (register_chrdev(MISC_MAJOR,"misc",&misc_fops))
+ goto fail_printk;
+ misc_class->devnode = misc_devnode;
+ return 0;
+
+fail_printk:
+ printk("unable to get major %d for misc devices\n", MISC_MAJOR);
+ class_destroy(misc_class);
+fail_remove:
+ remove_proc_entry("misc", NULL);
+ return err;
+}
+subsys_initcall(misc_init);
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
new file mode 100644
index 00000000..33dc2298
--- /dev/null
+++ b/drivers/char/mmtimer.c
@@ -0,0 +1,859 @@
+/*
+ * Timer device implementation for SGI SN platforms.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2001-2006 Silicon Graphics, Inc. All rights reserved.
+ *
+ * This driver exports an API that should be supportable by any HPET or IA-PC
+ * multimedia timer. The code below is currently specific to the SGI Altix
+ * SHub RTC, however.
+ *
+ * 11/01/01 - jbarnes - initial revision
+ * 9/10/04 - Christoph Lameter - remove interrupt support for kernel inclusion
+ * 10/1/04 - Christoph Lameter - provide posix clock CLOCK_SGI_CYCLE
+ * 10/13/04 - Christoph Lameter, Dimitri Sivanich - provide timer interrupt
+ * support via the posix timer interface
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/mmtimer.h>
+#include <linux/miscdevice.h>
+#include <linux/posix-timers.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/shubio.h>
+
+MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>");
+MODULE_DESCRIPTION("SGI Altix RTC Timer");
+MODULE_LICENSE("GPL");
+
+/* name of the device, usually in /dev */
+#define MMTIMER_NAME "mmtimer"
+#define MMTIMER_DESC "SGI Altix RTC Timer"
+#define MMTIMER_VERSION "2.1"
+
+#define RTC_BITS 55 /* 55 bits for this implementation */
+
+static struct k_clock sgi_clock;
+
+extern unsigned long sn_rtc_cycles_per_second;
+
+#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC))
+
+#define rtc_time() (*RTC_COUNTER_ADDR)
+
+static DEFINE_MUTEX(mmtimer_mutex);
+static long mmtimer_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
+
+/*
+ * Period in femtoseconds (10^-15 s)
+ */
+static unsigned long mmtimer_femtoperiod = 0;
+
+static const struct file_operations mmtimer_fops = {
+ .owner = THIS_MODULE,
+ .mmap = mmtimer_mmap,
+ .unlocked_ioctl = mmtimer_ioctl,
+ .llseek = noop_llseek,
+};
+
+/*
+ * We only have comparison registers RTC1-4 currently available per
+ * node. RTC0 is used by SAL.
+ */
+/* Check for an RTC interrupt pending */
+static int mmtimer_int_pending(int comparator)
+{
+ if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) &
+ SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator)
+ return 1;
+ else
+ return 0;
+}
+
+/* Clear the RTC interrupt pending bit */
+static void mmtimer_clr_int_pending(int comparator)
+{
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
+ SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator);
+}
+
+/* Setup timer on comparator RTC1 */
+static void mmtimer_setup_int_0(int cpu, u64 expires)
+{
+ u64 val;
+
+ /* Disable interrupt */
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL);
+
+ /* Initialize comparator value */
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), -1L);
+
+ /* Clear pending bit */
+ mmtimer_clr_int_pending(0);
+
+ val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) |
+ ((u64)cpu_physical_id(cpu) <<
+ SH_RTC1_INT_CONFIG_PID_SHFT);
+
+ /* Set configuration */
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_CONFIG), val);
+
+ /* Enable RTC interrupts */
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 1UL);
+
+ /* Initialize comparator value */
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), expires);
+
+
+}
+
+/* Setup timer on comparator RTC2 */
+static void mmtimer_setup_int_1(int cpu, u64 expires)
+{
+ u64 val;
+
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL);
+
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), -1L);
+
+ mmtimer_clr_int_pending(1);
+
+ val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) |
+ ((u64)cpu_physical_id(cpu) <<
+ SH_RTC2_INT_CONFIG_PID_SHFT);
+
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val);
+
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 1UL);
+
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), expires);
+}
+
+/* Setup timer on comparator RTC3 */
+static void mmtimer_setup_int_2(int cpu, u64 expires)
+{
+ u64 val;
+
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL);
+
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), -1L);
+
+ mmtimer_clr_int_pending(2);
+
+ val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) |
+ ((u64)cpu_physical_id(cpu) <<
+ SH_RTC3_INT_CONFIG_PID_SHFT);
+
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val);
+
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 1UL);
+
+ HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), expires);
+}
+
+/*
+ * This function must be called with interrupts disabled and preemption off
+ * in order to insure that the setup succeeds in a deterministic time frame.
+ * It will check if the interrupt setup succeeded.
+ */
+static int mmtimer_setup(int cpu, int comparator, unsigned long expires,
+ u64 *set_completion_time)
+{
+ switch (comparator) {
+ case 0:
+ mmtimer_setup_int_0(cpu, expires);
+ break;
+ case 1:
+ mmtimer_setup_int_1(cpu, expires);
+ break;
+ case 2:
+ mmtimer_setup_int_2(cpu, expires);
+ break;
+ }
+ /* We might've missed our expiration time */
+ *set_completion_time = rtc_time();
+ if (*set_completion_time <= expires)
+ return 1;
+
+ /*
+ * If an interrupt is already pending then its okay
+ * if not then we failed
+ */
+ return mmtimer_int_pending(comparator);
+}
+
+static int mmtimer_disable_int(long nasid, int comparator)
+{
+ switch (comparator) {
+ case 0:
+ nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE),
+ 0UL) : REMOTE_HUB_S(nasid, SH_RTC1_INT_ENABLE, 0UL);
+ break;
+ case 1:
+ nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE),
+ 0UL) : REMOTE_HUB_S(nasid, SH_RTC2_INT_ENABLE, 0UL);
+ break;
+ case 2:
+ nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE),
+ 0UL) : REMOTE_HUB_S(nasid, SH_RTC3_INT_ENABLE, 0UL);
+ break;
+ default:
+ return -EFAULT;
+ }
+ return 0;
+}
+
+#define COMPARATOR 1 /* The comparator to use */
+
+#define TIMER_OFF 0xbadcabLL /* Timer is not setup */
+#define TIMER_SET 0 /* Comparator is set for this timer */
+
+#define MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT 40
+
+/* There is one of these for each timer */
+struct mmtimer {
+ struct rb_node list;
+ struct k_itimer *timer;
+ int cpu;
+};
+
+struct mmtimer_node {
+ spinlock_t lock ____cacheline_aligned;
+ struct rb_root timer_head;
+ struct rb_node *next;
+ struct tasklet_struct tasklet;
+};
+static struct mmtimer_node *timers;
+
+static unsigned mmtimer_interval_retry_increment =
+ MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT;
+module_param(mmtimer_interval_retry_increment, uint, 0644);
+MODULE_PARM_DESC(mmtimer_interval_retry_increment,
+ "RTC ticks to add to expiration on interval retry (default 40)");
+
+/*
+ * Add a new mmtimer struct to the node's mmtimer list.
+ * This function assumes the struct mmtimer_node is locked.
+ */
+static void mmtimer_add_list(struct mmtimer *n)
+{
+ int nodeid = n->timer->it.mmtimer.node;
+ unsigned long expires = n->timer->it.mmtimer.expires;
+ struct rb_node **link = &timers[nodeid].timer_head.rb_node;
+ struct rb_node *parent = NULL;
+ struct mmtimer *x;
+
+ /*
+ * Find the right place in the rbtree:
+ */
+ while (*link) {
+ parent = *link;
+ x = rb_entry(parent, struct mmtimer, list);
+
+ if (expires < x->timer->it.mmtimer.expires)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+
+ /*
+ * Insert the timer to the rbtree and check whether it
+ * replaces the first pending timer
+ */
+ rb_link_node(&n->list, parent, link);
+ rb_insert_color(&n->list, &timers[nodeid].timer_head);
+
+ if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next,
+ struct mmtimer, list)->timer->it.mmtimer.expires)
+ timers[nodeid].next = &n->list;
+}
+
+/*
+ * Set the comparator for the next timer.
+ * This function assumes the struct mmtimer_node is locked.
+ */
+static void mmtimer_set_next_timer(int nodeid)
+{
+ struct mmtimer_node *n = &timers[nodeid];
+ struct mmtimer *x;
+ struct k_itimer *t;
+ u64 expires, exp, set_completion_time;
+ int i;
+
+restart:
+ if (n->next == NULL)
+ return;
+
+ x = rb_entry(n->next, struct mmtimer, list);
+ t = x->timer;
+ if (!t->it.mmtimer.incr) {
+ /* Not an interval timer */
+ if (!mmtimer_setup(x->cpu, COMPARATOR,
+ t->it.mmtimer.expires,
+ &set_completion_time)) {
+ /* Late setup, fire now */
+ tasklet_schedule(&n->tasklet);
+ }
+ return;
+ }
+
+ /* Interval timer */
+ i = 0;
+ expires = exp = t->it.mmtimer.expires;
+ while (!mmtimer_setup(x->cpu, COMPARATOR, expires,
+ &set_completion_time)) {
+ int to;
+
+ i++;
+ expires = set_completion_time +
+ mmtimer_interval_retry_increment + (1 << i);
+ /* Calculate overruns as we go. */
+ to = ((u64)(expires - exp) / t->it.mmtimer.incr);
+ if (to) {
+ t->it_overrun += to;
+ t->it.mmtimer.expires += t->it.mmtimer.incr * to;
+ exp = t->it.mmtimer.expires;
+ }
+ if (i > 20) {
+ printk(KERN_ALERT "mmtimer: cannot reschedule timer\n");
+ t->it.mmtimer.clock = TIMER_OFF;
+ n->next = rb_next(&x->list);
+ rb_erase(&x->list, &n->timer_head);
+ kfree(x);
+ goto restart;
+ }
+ }
+}
+
+/**
+ * mmtimer_ioctl - ioctl interface for /dev/mmtimer
+ * @file: file structure for the device
+ * @cmd: command to execute
+ * @arg: optional argument to command
+ *
+ * Executes the command specified by @cmd. Returns 0 for success, < 0 for
+ * failure.
+ *
+ * Valid commands:
+ *
+ * %MMTIMER_GETOFFSET - Should return the offset (relative to the start
+ * of the page where the registers are mapped) for the counter in question.
+ *
+ * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15)
+ * seconds
+ *
+ * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address
+ * specified by @arg
+ *
+ * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter
+ *
+ * %MMTIMER_MMAPAVAIL - Returns 1 if the registers can be mmap'd into userspace
+ *
+ * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it
+ * in the address specified by @arg.
+ */
+static long mmtimer_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ mutex_lock(&mmtimer_mutex);
+
+ switch (cmd) {
+ case MMTIMER_GETOFFSET: /* offset of the counter */
+ /*
+ * SN RTC registers are on their own 64k page
+ */
+ if(PAGE_SIZE <= (1 << 16))
+ ret = (((long)RTC_COUNTER_ADDR) & (PAGE_SIZE-1)) / 8;
+ else
+ ret = -ENOSYS;
+ break;
+
+ case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */
+ if(copy_to_user((unsigned long __user *)arg,
+ &mmtimer_femtoperiod, sizeof(unsigned long)))
+ ret = -EFAULT;
+ break;
+
+ case MMTIMER_GETFREQ: /* frequency in Hz */
+ if(copy_to_user((unsigned long __user *)arg,
+ &sn_rtc_cycles_per_second,
+ sizeof(unsigned long)))
+ ret = -EFAULT;
+ break;
+
+ case MMTIMER_GETBITS: /* number of bits in the clock */
+ ret = RTC_BITS;
+ break;
+
+ case MMTIMER_MMAPAVAIL: /* can we mmap the clock into userspace? */
+ ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0;
+ break;
+
+ case MMTIMER_GETCOUNTER:
+ if(copy_to_user((unsigned long __user *)arg,
+ RTC_COUNTER_ADDR, sizeof(unsigned long)))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+ mutex_unlock(&mmtimer_mutex);
+ return ret;
+}
+
+/**
+ * mmtimer_mmap - maps the clock's registers into userspace
+ * @file: file structure for the device
+ * @vma: VMA to map the registers into
+ *
+ * Calls remap_pfn_range() to map the clock's registers into
+ * the calling process' address space.
+ */
+static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long mmtimer_addr;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ if (PAGE_SIZE > (1 << 16))
+ return -ENOSYS;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ mmtimer_addr = __pa(RTC_COUNTER_ADDR);
+ mmtimer_addr &= ~(PAGE_SIZE - 1);
+ mmtimer_addr &= 0xfffffffffffffffUL;
+
+ if (remap_pfn_range(vma, vma->vm_start, mmtimer_addr >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot)) {
+ printk(KERN_ERR "remap_pfn_range failed in mmtimer.c\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static struct miscdevice mmtimer_miscdev = {
+ SGI_MMTIMER,
+ MMTIMER_NAME,
+ &mmtimer_fops
+};
+
+static struct timespec sgi_clock_offset;
+static int sgi_clock_period;
+
+/*
+ * Posix Timer Interface
+ */
+
+static struct timespec sgi_clock_offset;
+static int sgi_clock_period;
+
+static int sgi_clock_get(clockid_t clockid, struct timespec *tp)
+{
+ u64 nsec;
+
+ nsec = rtc_time() * sgi_clock_period
+ + sgi_clock_offset.tv_nsec;
+ *tp = ns_to_timespec(nsec);
+ tp->tv_sec += sgi_clock_offset.tv_sec;
+ return 0;
+};
+
+static int sgi_clock_set(const clockid_t clockid, const struct timespec *tp)
+{
+
+ u64 nsec;
+ u32 rem;
+
+ nsec = rtc_time() * sgi_clock_period;
+
+ sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem);
+
+ if (rem <= tp->tv_nsec)
+ sgi_clock_offset.tv_nsec = tp->tv_sec - rem;
+ else {
+ sgi_clock_offset.tv_nsec = tp->tv_sec + NSEC_PER_SEC - rem;
+ sgi_clock_offset.tv_sec--;
+ }
+ return 0;
+}
+
+/**
+ * mmtimer_interrupt - timer interrupt handler
+ * @irq: irq received
+ * @dev_id: device the irq came from
+ *
+ * Called when one of the comarators matches the counter, This
+ * routine will send signals to processes that have requested
+ * them.
+ *
+ * This interrupt is run in an interrupt context
+ * by the SHUB. It is therefore safe to locally access SHub
+ * registers.
+ */
+static irqreturn_t
+mmtimer_interrupt(int irq, void *dev_id)
+{
+ unsigned long expires = 0;
+ int result = IRQ_NONE;
+ unsigned indx = cpu_to_node(smp_processor_id());
+ struct mmtimer *base;
+
+ spin_lock(&timers[indx].lock);
+ base = rb_entry(timers[indx].next, struct mmtimer, list);
+ if (base == NULL) {
+ spin_unlock(&timers[indx].lock);
+ return result;
+ }
+
+ if (base->cpu == smp_processor_id()) {
+ if (base->timer)
+ expires = base->timer->it.mmtimer.expires;
+ /* expires test won't work with shared irqs */
+ if ((mmtimer_int_pending(COMPARATOR) > 0) ||
+ (expires && (expires <= rtc_time()))) {
+ mmtimer_clr_int_pending(COMPARATOR);
+ tasklet_schedule(&timers[indx].tasklet);
+ result = IRQ_HANDLED;
+ }
+ }
+ spin_unlock(&timers[indx].lock);
+ return result;
+}
+
+static void mmtimer_tasklet(unsigned long data)
+{
+ int nodeid = data;
+ struct mmtimer_node *mn = &timers[nodeid];
+ struct mmtimer *x;
+ struct k_itimer *t;
+ unsigned long flags;
+
+ /* Send signal and deal with periodic signals */
+ spin_lock_irqsave(&mn->lock, flags);
+ if (!mn->next)
+ goto out;
+
+ x = rb_entry(mn->next, struct mmtimer, list);
+ t = x->timer;
+
+ if (t->it.mmtimer.clock == TIMER_OFF)
+ goto out;
+
+ t->it_overrun = 0;
+
+ mn->next = rb_next(&x->list);
+ rb_erase(&x->list, &mn->timer_head);
+
+ if (posix_timer_event(t, 0) != 0)
+ t->it_overrun++;
+
+ if(t->it.mmtimer.incr) {
+ t->it.mmtimer.expires += t->it.mmtimer.incr;
+ mmtimer_add_list(x);
+ } else {
+ /* Ensure we don't false trigger in mmtimer_interrupt */
+ t->it.mmtimer.clock = TIMER_OFF;
+ t->it.mmtimer.expires = 0;
+ kfree(x);
+ }
+ /* Set comparator for next timer, if there is one */
+ mmtimer_set_next_timer(nodeid);
+
+ t->it_overrun_last = t->it_overrun;
+out:
+ spin_unlock_irqrestore(&mn->lock, flags);
+}
+
+static int sgi_timer_create(struct k_itimer *timer)
+{
+ /* Insure that a newly created timer is off */
+ timer->it.mmtimer.clock = TIMER_OFF;
+ return 0;
+}
+
+/* This does not really delete a timer. It just insures
+ * that the timer is not active
+ *
+ * Assumption: it_lock is already held with irq's disabled
+ */
+static int sgi_timer_del(struct k_itimer *timr)
+{
+ cnodeid_t nodeid = timr->it.mmtimer.node;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&timers[nodeid].lock, irqflags);
+ if (timr->it.mmtimer.clock != TIMER_OFF) {
+ unsigned long expires = timr->it.mmtimer.expires;
+ struct rb_node *n = timers[nodeid].timer_head.rb_node;
+ struct mmtimer *uninitialized_var(t);
+ int r = 0;
+
+ timr->it.mmtimer.clock = TIMER_OFF;
+ timr->it.mmtimer.expires = 0;
+
+ while (n) {
+ t = rb_entry(n, struct mmtimer, list);
+ if (t->timer == timr)
+ break;
+
+ if (expires < t->timer->it.mmtimer.expires)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+ }
+
+ if (!n) {
+ spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
+ return 0;
+ }
+
+ if (timers[nodeid].next == n) {
+ timers[nodeid].next = rb_next(n);
+ r = 1;
+ }
+
+ rb_erase(n, &timers[nodeid].timer_head);
+ kfree(t);
+
+ if (r) {
+ mmtimer_disable_int(cnodeid_to_nasid(nodeid),
+ COMPARATOR);
+ mmtimer_set_next_timer(nodeid);
+ }
+ }
+ spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
+ return 0;
+}
+
+/* Assumption: it_lock is already held with irq's disabled */
+static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
+{
+
+ if (timr->it.mmtimer.clock == TIMER_OFF) {
+ cur_setting->it_interval.tv_nsec = 0;
+ cur_setting->it_interval.tv_sec = 0;
+ cur_setting->it_value.tv_nsec = 0;
+ cur_setting->it_value.tv_sec =0;
+ return;
+ }
+
+ cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period);
+ cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period);
+}
+
+
+static int sgi_timer_set(struct k_itimer *timr, int flags,
+ struct itimerspec * new_setting,
+ struct itimerspec * old_setting)
+{
+ unsigned long when, period, irqflags;
+ int err = 0;
+ cnodeid_t nodeid;
+ struct mmtimer *base;
+ struct rb_node *n;
+
+ if (old_setting)
+ sgi_timer_get(timr, old_setting);
+
+ sgi_timer_del(timr);
+ when = timespec_to_ns(&new_setting->it_value);
+ period = timespec_to_ns(&new_setting->it_interval);
+
+ if (when == 0)
+ /* Clear timer */
+ return 0;
+
+ base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL);
+ if (base == NULL)
+ return -ENOMEM;
+
+ if (flags & TIMER_ABSTIME) {
+ struct timespec n;
+ unsigned long now;
+
+ getnstimeofday(&n);
+ now = timespec_to_ns(&n);
+ if (when > now)
+ when -= now;
+ else
+ /* Fire the timer immediately */
+ when = 0;
+ }
+
+ /*
+ * Convert to sgi clock period. Need to keep rtc_time() as near as possible
+ * to getnstimeofday() in order to be as faithful as possible to the time
+ * specified.
+ */
+ when = (when + sgi_clock_period - 1) / sgi_clock_period + rtc_time();
+ period = (period + sgi_clock_period - 1) / sgi_clock_period;
+
+ /*
+ * We are allocating a local SHub comparator. If we would be moved to another
+ * cpu then another SHub may be local to us. Prohibit that by switching off
+ * preemption.
+ */
+ preempt_disable();
+
+ nodeid = cpu_to_node(smp_processor_id());
+
+ /* Lock the node timer structure */
+ spin_lock_irqsave(&timers[nodeid].lock, irqflags);
+
+ base->timer = timr;
+ base->cpu = smp_processor_id();
+
+ timr->it.mmtimer.clock = TIMER_SET;
+ timr->it.mmtimer.node = nodeid;
+ timr->it.mmtimer.incr = period;
+ timr->it.mmtimer.expires = when;
+
+ n = timers[nodeid].next;
+
+ /* Add the new struct mmtimer to node's timer list */
+ mmtimer_add_list(base);
+
+ if (timers[nodeid].next == n) {
+ /* No need to reprogram comparator for now */
+ spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
+ preempt_enable();
+ return err;
+ }
+
+ /* We need to reprogram the comparator */
+ if (n)
+ mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR);
+
+ mmtimer_set_next_timer(nodeid);
+
+ /* Unlock the node timer structure */
+ spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
+
+ preempt_enable();
+
+ return err;
+}
+
+static int sgi_clock_getres(const clockid_t which_clock, struct timespec *tp)
+{
+ tp->tv_sec = 0;
+ tp->tv_nsec = sgi_clock_period;
+ return 0;
+}
+
+static struct k_clock sgi_clock = {
+ .clock_set = sgi_clock_set,
+ .clock_get = sgi_clock_get,
+ .clock_getres = sgi_clock_getres,
+ .timer_create = sgi_timer_create,
+ .timer_set = sgi_timer_set,
+ .timer_del = sgi_timer_del,
+ .timer_get = sgi_timer_get
+};
+
+/**
+ * mmtimer_init - device initialization routine
+ *
+ * Does initial setup for the mmtimer device.
+ */
+static int __init mmtimer_init(void)
+{
+ cnodeid_t node, maxn = -1;
+
+ if (!ia64_platform_is("sn2"))
+ return 0;
+
+ /*
+ * Sanity check the cycles/sec variable
+ */
+ if (sn_rtc_cycles_per_second < 100000) {
+ printk(KERN_ERR "%s: unable to determine clock frequency\n",
+ MMTIMER_NAME);
+ goto out1;
+ }
+
+ mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second /
+ 2) / sn_rtc_cycles_per_second;
+
+ if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) {
+ printk(KERN_WARNING "%s: unable to allocate interrupt.",
+ MMTIMER_NAME);
+ goto out1;
+ }
+
+ if (misc_register(&mmtimer_miscdev)) {
+ printk(KERN_ERR "%s: failed to register device\n",
+ MMTIMER_NAME);
+ goto out2;
+ }
+
+ /* Get max numbered node, calculate slots needed */
+ for_each_online_node(node) {
+ maxn = node;
+ }
+ maxn++;
+
+ /* Allocate list of node ptrs to mmtimer_t's */
+ timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL);
+ if (timers == NULL) {
+ printk(KERN_ERR "%s: failed to allocate memory for device\n",
+ MMTIMER_NAME);
+ goto out3;
+ }
+
+ /* Initialize struct mmtimer's for each online node */
+ for_each_online_node(node) {
+ spin_lock_init(&timers[node].lock);
+ tasklet_init(&timers[node].tasklet, mmtimer_tasklet,
+ (unsigned long) node);
+ }
+
+ sgi_clock_period = NSEC_PER_SEC / sn_rtc_cycles_per_second;
+ posix_timers_register_clock(CLOCK_SGI_CYCLE, &sgi_clock);
+
+ printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION,
+ sn_rtc_cycles_per_second/(unsigned long)1E6);
+
+ return 0;
+
+out3:
+ kfree(timers);
+ misc_deregister(&mmtimer_miscdev);
+out2:
+ free_irq(SGI_MMTIMER_VECTOR, NULL);
+out1:
+ return -1;
+}
+
+module_init(mmtimer_init);
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
new file mode 100644
index 00000000..b6f8a65c
--- /dev/null
+++ b/drivers/char/msm_smd_pkt.c
@@ -0,0 +1,466 @@
+/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * SMD Packet Driver -- Provides userspace interface to SMD packet ports.
+ */
+
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/poll.h>
+
+#include <mach/msm_smd.h>
+
+#define NUM_SMD_PKT_PORTS 9
+#define DEVICE_NAME "smdpkt"
+#define MAX_BUF_SIZE 2048
+
+struct smd_pkt_dev {
+ struct cdev cdev;
+ struct device *devicep;
+
+ struct smd_channel *ch;
+ int open_count;
+ struct mutex ch_lock;
+ struct mutex rx_lock;
+ struct mutex tx_lock;
+ wait_queue_head_t ch_read_wait_queue;
+ wait_queue_head_t ch_opened_wait_queue;
+
+ int i;
+
+ unsigned char tx_buf[MAX_BUF_SIZE];
+ unsigned char rx_buf[MAX_BUF_SIZE];
+ int remote_open;
+
+} *smd_pkt_devp[NUM_SMD_PKT_PORTS];
+
+struct class *smd_pkt_classp;
+static dev_t smd_pkt_number;
+
+static int msm_smd_pkt_debug_enable;
+module_param_named(debug_enable, msm_smd_pkt_debug_enable,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#ifdef DEBUG
+#define D_DUMP_BUFFER(prestr, cnt, buf) do { \
+ int i; \
+ if (msm_smd_pkt_debug_enable) { \
+ pr_debug("%s", prestr); \
+ for (i = 0; i < cnt; i++) \
+ pr_debug("%.2x", buf[i]); \
+ pr_debug("\n"); \
+ } \
+ } while (0)
+#else
+#define D_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
+#endif
+
+#ifdef DEBUG
+#define DBG(x...) do { \
+ if (msm_smd_pkt_debug_enable) \
+ pr_debug(x); \
+ } while (0)
+#else
+#define DBG(x...) do {} while (0)
+#endif
+
+static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp)
+{
+ int sz;
+
+ if (!smd_pkt_devp || !smd_pkt_devp->ch)
+ return;
+
+ sz = smd_cur_packet_size(smd_pkt_devp->ch);
+ if (sz == 0) {
+ DBG("no packet\n");
+ return;
+ }
+ if (sz > smd_read_avail(smd_pkt_devp->ch)) {
+ DBG("incomplete packet\n");
+ return;
+ }
+
+ DBG("waking up reader\n");
+ wake_up_interruptible(&smd_pkt_devp->ch_read_wait_queue);
+}
+
+static int smd_pkt_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int r, bytes_read;
+ struct smd_pkt_dev *smd_pkt_devp;
+ struct smd_channel *chl;
+
+ DBG("read %d bytes\n", count);
+ if (count > MAX_BUF_SIZE)
+ return -EINVAL;
+
+ smd_pkt_devp = file->private_data;
+ if (!smd_pkt_devp || !smd_pkt_devp->ch)
+ return -EINVAL;
+
+ chl = smd_pkt_devp->ch;
+wait_for_packet:
+ r = wait_event_interruptible(smd_pkt_devp->ch_read_wait_queue,
+ (smd_cur_packet_size(chl) > 0 &&
+ smd_read_avail(chl) >=
+ smd_cur_packet_size(chl)));
+
+ if (r < 0) {
+ if (r != -ERESTARTSYS)
+ pr_err("wait returned %d\n", r);
+ return r;
+ }
+
+ mutex_lock(&smd_pkt_devp->rx_lock);
+
+ bytes_read = smd_cur_packet_size(smd_pkt_devp->ch);
+ if (bytes_read == 0 ||
+ bytes_read < smd_read_avail(smd_pkt_devp->ch)) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ DBG("Nothing to read\n");
+ goto wait_for_packet;
+ }
+
+ if (bytes_read > count) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ pr_info("packet size %d > buffer size %d", bytes_read, count);
+ return -EINVAL;
+ }
+
+ r = smd_read(smd_pkt_devp->ch, smd_pkt_devp->rx_buf, bytes_read);
+ if (r != bytes_read) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ pr_err("smd_read failed to read %d bytes: %d\n", bytes_read, r);
+ return -EIO;
+ }
+
+ D_DUMP_BUFFER("read: ", bytes_read, smd_pkt_devp->rx_buf);
+ r = copy_to_user(buf, smd_pkt_devp->rx_buf, bytes_read);
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ if (r) {
+ pr_err("copy_to_user failed %d\n", r);
+ return -EFAULT;
+ }
+
+ DBG("read complete %d bytes\n", bytes_read);
+ check_and_wakeup_reader(smd_pkt_devp);
+
+ return bytes_read;
+}
+
+static int smd_pkt_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int r;
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ if (count > MAX_BUF_SIZE)
+ return -EINVAL;
+
+ DBG("writting %d bytes\n", count);
+
+ smd_pkt_devp = file->private_data;
+ if (!smd_pkt_devp || !smd_pkt_devp->ch)
+ return -EINVAL;
+
+ mutex_lock(&smd_pkt_devp->tx_lock);
+ if (smd_write_avail(smd_pkt_devp->ch) < count) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ DBG("Not enough space to write\n");
+ return -ENOMEM;
+ }
+
+ D_DUMP_BUFFER("write: ", count, buf);
+ r = copy_from_user(smd_pkt_devp->tx_buf, buf, count);
+ if (r) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ pr_err("copy_from_user failed %d\n", r);
+ return -EFAULT;
+ }
+
+ r = smd_write(smd_pkt_devp->ch, smd_pkt_devp->tx_buf, count);
+ if (r != count) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ pr_err("smd_write failed to write %d bytes: %d.\n", count, r);
+ return -EIO;
+ }
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+
+ DBG("wrote %d bytes\n", count);
+ return count;
+}
+
+static unsigned int smd_pkt_poll(struct file *file, poll_table *wait)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned int mask = 0;
+
+ smd_pkt_devp = file->private_data;
+ if (!smd_pkt_devp)
+ return POLLERR;
+
+ DBG("poll waiting\n");
+ poll_wait(file, &smd_pkt_devp->ch_read_wait_queue, wait);
+ if (smd_read_avail(smd_pkt_devp->ch))
+ mask |= POLLIN | POLLRDNORM;
+
+ DBG("poll return\n");
+ return mask;
+}
+
+static void smd_pkt_ch_notify(void *priv, unsigned event)
+{
+ struct smd_pkt_dev *smd_pkt_devp = priv;
+
+ if (smd_pkt_devp->ch == 0)
+ return;
+
+ switch (event) {
+ case SMD_EVENT_DATA:
+ DBG("data\n");
+ check_and_wakeup_reader(smd_pkt_devp);
+ break;
+
+ case SMD_EVENT_OPEN:
+ DBG("remote open\n");
+ smd_pkt_devp->remote_open = 1;
+ wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue);
+ break;
+
+ case SMD_EVENT_CLOSE:
+ smd_pkt_devp->remote_open = 0;
+ pr_info("remote closed\n");
+ break;
+
+ default:
+ pr_err("unknown event %d\n", event);
+ break;
+ }
+}
+
+static char *smd_pkt_dev_name[] = {
+ "smdcntl0",
+ "smdcntl1",
+ "smdcntl2",
+ "smdcntl3",
+ "smdcntl4",
+ "smdcntl5",
+ "smdcntl6",
+ "smdcntl7",
+ "smd22",
+};
+
+static char *smd_ch_name[] = {
+ "DATA5_CNTL",
+ "DATA6_CNTL",
+ "DATA7_CNTL",
+ "DATA8_CNTL",
+ "DATA9_CNTL",
+ "DATA12_CNTL",
+ "DATA13_CNTL",
+ "DATA14_CNTL",
+ "DATA22",
+};
+
+static int smd_pkt_open(struct inode *inode, struct file *file)
+{
+ int r = 0;
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ smd_pkt_devp = container_of(inode->i_cdev, struct smd_pkt_dev, cdev);
+ if (!smd_pkt_devp)
+ return -EINVAL;
+
+ file->private_data = smd_pkt_devp;
+
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ if (smd_pkt_devp->open_count == 0) {
+ r = smd_open(smd_ch_name[smd_pkt_devp->i],
+ &smd_pkt_devp->ch, smd_pkt_devp,
+ smd_pkt_ch_notify);
+ if (r < 0) {
+ pr_err("smd_open failed for %s, %d\n",
+ smd_ch_name[smd_pkt_devp->i], r);
+ goto out;
+ }
+
+ r = wait_event_interruptible_timeout(
+ smd_pkt_devp->ch_opened_wait_queue,
+ smd_pkt_devp->remote_open,
+ msecs_to_jiffies(2 * HZ));
+ if (r == 0)
+ r = -ETIMEDOUT;
+
+ if (r < 0) {
+ pr_err("wait returned %d\n", r);
+ smd_close(smd_pkt_devp->ch);
+ smd_pkt_devp->ch = 0;
+ } else {
+ smd_pkt_devp->open_count++;
+ r = 0;
+ }
+ }
+out:
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+ return r;
+}
+
+static int smd_pkt_release(struct inode *inode, struct file *file)
+{
+ int r = 0;
+ struct smd_pkt_dev *smd_pkt_devp = file->private_data;
+
+ if (!smd_pkt_devp)
+ return -EINVAL;
+
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ if (--smd_pkt_devp->open_count == 0) {
+ r = smd_close(smd_pkt_devp->ch);
+ smd_pkt_devp->ch = 0;
+ }
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+ return r;
+}
+
+static const struct file_operations smd_pkt_fops = {
+ .owner = THIS_MODULE,
+ .open = smd_pkt_open,
+ .release = smd_pkt_release,
+ .read = smd_pkt_read,
+ .write = smd_pkt_write,
+ .poll = smd_pkt_poll,
+};
+
+static int __init smd_pkt_init(void)
+{
+ int i;
+ int r;
+
+ r = alloc_chrdev_region(&smd_pkt_number, 0,
+ NUM_SMD_PKT_PORTS, DEVICE_NAME);
+ if (r) {
+ pr_err("alloc_chrdev_region() failed %d\n", r);
+ return r;
+ }
+
+ smd_pkt_classp = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(smd_pkt_classp)) {
+ r = PTR_ERR(smd_pkt_classp);
+ pr_err("class_create() failed %d\n", r);
+ goto unreg_chardev;
+ }
+
+ for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
+ smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev),
+ GFP_KERNEL);
+ if (IS_ERR(smd_pkt_devp[i])) {
+ r = PTR_ERR(smd_pkt_devp[i]);
+ pr_err("kmalloc() failed %d\n", r);
+ goto clean_cdevs;
+ }
+
+ smd_pkt_devp[i]->i = i;
+
+ init_waitqueue_head(&smd_pkt_devp[i]->ch_read_wait_queue);
+ smd_pkt_devp[i]->remote_open = 0;
+ init_waitqueue_head(&smd_pkt_devp[i]->ch_opened_wait_queue);
+
+ mutex_init(&smd_pkt_devp[i]->ch_lock);
+ mutex_init(&smd_pkt_devp[i]->rx_lock);
+ mutex_init(&smd_pkt_devp[i]->tx_lock);
+
+ cdev_init(&smd_pkt_devp[i]->cdev, &smd_pkt_fops);
+ smd_pkt_devp[i]->cdev.owner = THIS_MODULE;
+
+ r = cdev_add(&smd_pkt_devp[i]->cdev,
+ (smd_pkt_number + i), 1);
+ if (r) {
+ pr_err("cdev_add() failed %d\n", r);
+ kfree(smd_pkt_devp[i]);
+ goto clean_cdevs;
+ }
+
+ smd_pkt_devp[i]->devicep =
+ device_create(smd_pkt_classp, NULL,
+ (smd_pkt_number + i), NULL,
+ smd_pkt_dev_name[i]);
+ if (IS_ERR(smd_pkt_devp[i]->devicep)) {
+ r = PTR_ERR(smd_pkt_devp[i]->devicep);
+ pr_err("device_create() failed %d\n", r);
+ cdev_del(&smd_pkt_devp[i]->cdev);
+ kfree(smd_pkt_devp[i]);
+ goto clean_cdevs;
+ }
+
+ }
+
+ pr_info("SMD Packet Port Driver Initialized.\n");
+ return 0;
+
+clean_cdevs:
+ if (i > 0) {
+ while (--i >= 0) {
+ mutex_destroy(&smd_pkt_devp[i]->ch_lock);
+ mutex_destroy(&smd_pkt_devp[i]->rx_lock);
+ mutex_destroy(&smd_pkt_devp[i]->tx_lock);
+ cdev_del(&smd_pkt_devp[i]->cdev);
+ kfree(smd_pkt_devp[i]);
+ device_destroy(smd_pkt_classp,
+ MKDEV(MAJOR(smd_pkt_number), i));
+ }
+ }
+
+ class_destroy(smd_pkt_classp);
+unreg_chardev:
+ unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS);
+ return r;
+}
+module_init(smd_pkt_init);
+
+static void __exit smd_pkt_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
+ mutex_destroy(&smd_pkt_devp[i]->ch_lock);
+ mutex_destroy(&smd_pkt_devp[i]->rx_lock);
+ mutex_destroy(&smd_pkt_devp[i]->tx_lock);
+ cdev_del(&smd_pkt_devp[i]->cdev);
+ kfree(smd_pkt_devp[i]);
+ device_destroy(smd_pkt_classp,
+ MKDEV(MAJOR(smd_pkt_number), i));
+ }
+
+ class_destroy(smd_pkt_classp);
+ unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS);
+}
+module_exit(smd_pkt_cleanup);
+
+MODULE_DESCRIPTION("MSM Shared Memory Packet Port");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
new file mode 100644
index 00000000..25d139c9
--- /dev/null
+++ b/drivers/char/mspec.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+/*
+ * SN Platform Special Memory (mspec) Support
+ *
+ * This driver exports the SN special memory (mspec) facility to user
+ * processes.
+ * There are three types of memory made available thru this driver:
+ * fetchops, uncached and cached.
+ *
+ * Fetchops are atomic memory operations that are implemented in the
+ * memory controller on SGI SN hardware.
+ *
+ * Uncached are used for memory write combining feature of the ia64
+ * cpu.
+ *
+ * Cached are used for areas of memory that are used as cached addresses
+ * on our partition and used as uncached addresses from other partitions.
+ * Due to a design constraint of the SN2 Shub, you can not have processors
+ * on the same FSB perform both a cached and uncached reference to the
+ * same cache line. These special memory cached regions prevent the
+ * kernel from ever dropping in a TLB entry and therefore prevent the
+ * processor from ever speculating a cache line from this page.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/numa.h>
+#include <asm/page.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/atomic.h>
+#include <asm/tlbflush.h>
+#include <asm/uncached.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/mspec.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/io.h>
+#include <asm/sn/bte.h>
+#include <asm/sn/shubio.h>
+
+
+#define FETCHOP_ID "SGI Fetchop,"
+#define CACHED_ID "Cached,"
+#define UNCACHED_ID "Uncached"
+#define REVISION "4.0"
+#define MSPEC_BASENAME "mspec"
+
+/*
+ * Page types allocated by the device.
+ */
+enum mspec_page_type {
+ MSPEC_FETCHOP = 1,
+ MSPEC_CACHED,
+ MSPEC_UNCACHED
+};
+
+#ifdef CONFIG_SGI_SN
+static int is_sn2;
+#else
+#define is_sn2 0
+#endif
+
+/*
+ * One of these structures is allocated when an mspec region is mmaped. The
+ * structure is pointed to by the vma->vm_private_data field in the vma struct.
+ * This structure is used to record the addresses of the mspec pages.
+ * This structure is shared by all vma's that are split off from the
+ * original vma when split_vma()'s are done.
+ *
+ * The refcnt is incremented atomically because mm->mmap_sem does not
+ * protect in fork case where multiple tasks share the vma_data.
+ */
+struct vma_data {
+ atomic_t refcnt; /* Number of vmas sharing the data. */
+ spinlock_t lock; /* Serialize access to this structure. */
+ int count; /* Number of pages allocated. */
+ enum mspec_page_type type; /* Type of pages allocated. */
+ int flags; /* See VMD_xxx below. */
+ unsigned long vm_start; /* Original (unsplit) base. */
+ unsigned long vm_end; /* Original (unsplit) end. */
+ unsigned long maddr[0]; /* Array of MSPEC addresses. */
+};
+
+#define VMD_VMALLOCED 0x1 /* vmalloc'd rather than kmalloc'd */
+
+/* used on shub2 to clear FOP cache in the HUB */
+static unsigned long scratch_page[MAX_NUMNODES];
+#define SH2_AMO_CACHE_ENTRIES 4
+
+static inline int
+mspec_zero_block(unsigned long addr, int len)
+{
+ int status;
+
+ if (is_sn2) {
+ if (is_shub2()) {
+ int nid;
+ void *p;
+ int i;
+
+ nid = nasid_to_cnodeid(get_node_number(__pa(addr)));
+ p = (void *)TO_AMO(scratch_page[nid]);
+
+ for (i=0; i < SH2_AMO_CACHE_ENTRIES; i++) {
+ FETCHOP_LOAD_OP(p, FETCHOP_LOAD);
+ p += FETCHOP_VAR_SIZE;
+ }
+ }
+
+ status = bte_copy(0, addr & ~__IA64_UNCACHED_OFFSET, len,
+ BTE_WACQUIRE | BTE_ZERO_FILL, NULL);
+ } else {
+ memset((char *) addr, 0, len);
+ status = 0;
+ }
+ return status;
+}
+
+/*
+ * mspec_open
+ *
+ * Called when a device mapping is created by a means other than mmap
+ * (via fork, munmap, etc.). Increments the reference count on the
+ * underlying mspec data so it is not freed prematurely.
+ */
+static void
+mspec_open(struct vm_area_struct *vma)
+{
+ struct vma_data *vdata;
+
+ vdata = vma->vm_private_data;
+ atomic_inc(&vdata->refcnt);
+}
+
+/*
+ * mspec_close
+ *
+ * Called when unmapping a device mapping. Frees all mspec pages
+ * belonging to all the vma's sharing this vma_data structure.
+ */
+static void
+mspec_close(struct vm_area_struct *vma)
+{
+ struct vma_data *vdata;
+ int index, last_index;
+ unsigned long my_page;
+
+ vdata = vma->vm_private_data;
+
+ if (!atomic_dec_and_test(&vdata->refcnt))
+ return;
+
+ last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
+ for (index = 0; index < last_index; index++) {
+ if (vdata->maddr[index] == 0)
+ continue;
+ /*
+ * Clear the page before sticking it back
+ * into the pool.
+ */
+ my_page = vdata->maddr[index];
+ vdata->maddr[index] = 0;
+ if (!mspec_zero_block(my_page, PAGE_SIZE))
+ uncached_free_page(my_page, 1);
+ else
+ printk(KERN_WARNING "mspec_close(): "
+ "failed to zero page %ld\n", my_page);
+ }
+
+ if (vdata->flags & VMD_VMALLOCED)
+ vfree(vdata);
+ else
+ kfree(vdata);
+}
+
+/*
+ * mspec_fault
+ *
+ * Creates a mspec page and maps it to user space.
+ */
+static int
+mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ unsigned long paddr, maddr;
+ unsigned long pfn;
+ pgoff_t index = vmf->pgoff;
+ struct vma_data *vdata = vma->vm_private_data;
+
+ maddr = (volatile unsigned long) vdata->maddr[index];
+ if (maddr == 0) {
+ maddr = uncached_alloc_page(numa_node_id(), 1);
+ if (maddr == 0)
+ return VM_FAULT_OOM;
+
+ spin_lock(&vdata->lock);
+ if (vdata->maddr[index] == 0) {
+ vdata->count++;
+ vdata->maddr[index] = maddr;
+ } else {
+ uncached_free_page(maddr, 1);
+ maddr = vdata->maddr[index];
+ }
+ spin_unlock(&vdata->lock);
+ }
+
+ if (vdata->type == MSPEC_FETCHOP)
+ paddr = TO_AMO(maddr);
+ else
+ paddr = maddr & ~__IA64_UNCACHED_OFFSET;
+
+ pfn = paddr >> PAGE_SHIFT;
+
+ /*
+ * vm_insert_pfn can fail with -EBUSY, but in that case it will
+ * be because another thread has installed the pte first, so it
+ * is no problem.
+ */
+ vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct mspec_vm_ops = {
+ .open = mspec_open,
+ .close = mspec_close,
+ .fault = mspec_fault,
+};
+
+/*
+ * mspec_mmap
+ *
+ * Called when mmapping the device. Initializes the vma with a fault handler
+ * and private data structure necessary to allocate, track, and free the
+ * underlying pages.
+ */
+static int
+mspec_mmap(struct file *file, struct vm_area_struct *vma,
+ enum mspec_page_type type)
+{
+ struct vma_data *vdata;
+ int pages, vdata_size, flags = 0;
+
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+
+ if ((vma->vm_flags & VM_WRITE) == 0)
+ return -EPERM;
+
+ pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
+ if (vdata_size <= PAGE_SIZE)
+ vdata = kzalloc(vdata_size, GFP_KERNEL);
+ else {
+ vdata = vzalloc(vdata_size);
+ flags = VMD_VMALLOCED;
+ }
+ if (!vdata)
+ return -ENOMEM;
+
+ vdata->vm_start = vma->vm_start;
+ vdata->vm_end = vma->vm_end;
+ vdata->flags = flags;
+ vdata->type = type;
+ spin_lock_init(&vdata->lock);
+ vdata->refcnt = ATOMIC_INIT(1);
+ vma->vm_private_data = vdata;
+
+ vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
+ if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_ops = &mspec_vm_ops;
+
+ return 0;
+}
+
+static int
+fetchop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return mspec_mmap(file, vma, MSPEC_FETCHOP);
+}
+
+static int
+cached_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return mspec_mmap(file, vma, MSPEC_CACHED);
+}
+
+static int
+uncached_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return mspec_mmap(file, vma, MSPEC_UNCACHED);
+}
+
+static const struct file_operations fetchop_fops = {
+ .owner = THIS_MODULE,
+ .mmap = fetchop_mmap,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice fetchop_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "sgi_fetchop",
+ .fops = &fetchop_fops
+};
+
+static const struct file_operations cached_fops = {
+ .owner = THIS_MODULE,
+ .mmap = cached_mmap,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice cached_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mspec_cached",
+ .fops = &cached_fops
+};
+
+static const struct file_operations uncached_fops = {
+ .owner = THIS_MODULE,
+ .mmap = uncached_mmap,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice uncached_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mspec_uncached",
+ .fops = &uncached_fops
+};
+
+/*
+ * mspec_init
+ *
+ * Called at boot time to initialize the mspec facility.
+ */
+static int __init
+mspec_init(void)
+{
+ int ret;
+ int nid;
+
+ /*
+ * The fetchop device only works on SN2 hardware, uncached and cached
+ * memory drivers should both be valid on all ia64 hardware
+ */
+#ifdef CONFIG_SGI_SN
+ if (ia64_platform_is("sn2")) {
+ is_sn2 = 1;
+ if (is_shub2()) {
+ ret = -ENOMEM;
+ for_each_node_state(nid, N_ONLINE) {
+ int actual_nid;
+ int nasid;
+ unsigned long phys;
+
+ scratch_page[nid] = uncached_alloc_page(nid, 1);
+ if (scratch_page[nid] == 0)
+ goto free_scratch_pages;
+ phys = __pa(scratch_page[nid]);
+ nasid = get_node_number(phys);
+ actual_nid = nasid_to_cnodeid(nasid);
+ if (actual_nid != nid)
+ goto free_scratch_pages;
+ }
+ }
+
+ ret = misc_register(&fetchop_miscdev);
+ if (ret) {
+ printk(KERN_ERR
+ "%s: failed to register device %i\n",
+ FETCHOP_ID, ret);
+ goto free_scratch_pages;
+ }
+ }
+#endif
+ ret = misc_register(&cached_miscdev);
+ if (ret) {
+ printk(KERN_ERR "%s: failed to register device %i\n",
+ CACHED_ID, ret);
+ if (is_sn2)
+ misc_deregister(&fetchop_miscdev);
+ goto free_scratch_pages;
+ }
+ ret = misc_register(&uncached_miscdev);
+ if (ret) {
+ printk(KERN_ERR "%s: failed to register device %i\n",
+ UNCACHED_ID, ret);
+ misc_deregister(&cached_miscdev);
+ if (is_sn2)
+ misc_deregister(&fetchop_miscdev);
+ goto free_scratch_pages;
+ }
+
+ printk(KERN_INFO "%s %s initialized devices: %s %s %s\n",
+ MSPEC_BASENAME, REVISION, is_sn2 ? FETCHOP_ID : "",
+ CACHED_ID, UNCACHED_ID);
+
+ return 0;
+
+ free_scratch_pages:
+ for_each_node(nid) {
+ if (scratch_page[nid] != 0)
+ uncached_free_page(scratch_page[nid], 1);
+ }
+ return ret;
+}
+
+static void __exit
+mspec_exit(void)
+{
+ int nid;
+
+ misc_deregister(&uncached_miscdev);
+ misc_deregister(&cached_miscdev);
+ if (is_sn2) {
+ misc_deregister(&fetchop_miscdev);
+
+ for_each_node(nid) {
+ if (scratch_page[nid] != 0)
+ uncached_free_page(scratch_page[nid], 1);
+ }
+ }
+}
+
+module_init(mspec_init);
+module_exit(mspec_exit);
+
+MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>");
+MODULE_DESCRIPTION("Driver for SGI SN special memory operations");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
new file mode 100644
index 00000000..492dbfb2
--- /dev/null
+++ b/drivers/char/mwave/3780i.c
@@ -0,0 +1,740 @@
+/*
+*
+* 3780i.c -- helper routines for the 3780i DSP
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#include <linux/kernel.h>
+#include <linux/unistd.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/sched.h> /* cond_resched() */
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include "smapi.h"
+#include "mwavedd.h"
+#include "3780i.h"
+
+static DEFINE_SPINLOCK(dsp_lock);
+
+static void PaceMsaAccess(unsigned short usDspBaseIO)
+{
+ cond_resched();
+ udelay(100);
+ cond_resched();
+}
+
+unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO,
+ unsigned long ulMsaAddr)
+{
+ unsigned long flags;
+ unsigned short val;
+
+ PRINTK_3(TRACE_3780I,
+ "3780i::dsp3780I_ReadMsaCfg entry usDspBaseIO %x ulMsaAddr %lx\n",
+ usDspBaseIO, ulMsaAddr);
+
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16));
+ val = InWordDsp(DSP_MsaDataDSISHigh);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ PRINTK_2(TRACE_3780I, "3780i::dsp3780I_ReadMsaCfg exit val %x\n", val);
+
+ return val;
+}
+
+void dsp3780I_WriteMsaCfg(unsigned short usDspBaseIO,
+ unsigned long ulMsaAddr, unsigned short usValue)
+{
+ unsigned long flags;
+
+ PRINTK_4(TRACE_3780I,
+ "3780i::dsp3780i_WriteMsaCfg entry usDspBaseIO %x ulMsaAddr %lx usValue %x\n",
+ usDspBaseIO, ulMsaAddr, usValue);
+
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16));
+ OutWordDsp(DSP_MsaDataDSISHigh, usValue);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+}
+
+static void dsp3780I_WriteGenCfg(unsigned short usDspBaseIO, unsigned uIndex,
+ unsigned char ucValue)
+{
+ DSP_ISA_SLAVE_CONTROL rSlaveControl;
+ DSP_ISA_SLAVE_CONTROL rSlaveControl_Save;
+
+
+ PRINTK_4(TRACE_3780I,
+ "3780i::dsp3780i_WriteGenCfg entry usDspBaseIO %x uIndex %x ucValue %x\n",
+ usDspBaseIO, uIndex, ucValue);
+
+ MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl);
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_WriteGenCfg rSlaveControl %x\n",
+ MKBYTE(rSlaveControl));
+
+ rSlaveControl_Save = rSlaveControl;
+ rSlaveControl.ConfigMode = TRUE;
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_WriteGenCfg entry rSlaveControl+ConfigMode %x\n",
+ MKBYTE(rSlaveControl));
+
+ OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl));
+ OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex);
+ OutByteDsp(DSP_ConfigData, ucValue);
+ OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save));
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_WriteGenCfg exit\n");
+
+
+}
+
+#if 0
+unsigned char dsp3780I_ReadGenCfg(unsigned short usDspBaseIO,
+ unsigned uIndex)
+{
+ DSP_ISA_SLAVE_CONTROL rSlaveControl;
+ DSP_ISA_SLAVE_CONTROL rSlaveControl_Save;
+ unsigned char ucValue;
+
+
+ PRINTK_3(TRACE_3780I,
+ "3780i::dsp3780i_ReadGenCfg entry usDspBaseIO %x uIndex %x\n",
+ usDspBaseIO, uIndex);
+
+ MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl);
+ rSlaveControl_Save = rSlaveControl;
+ rSlaveControl.ConfigMode = TRUE;
+ OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl));
+ OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex);
+ ucValue = InByteDsp(DSP_ConfigData);
+ OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save));
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_ReadGenCfg exit ucValue %x\n", ucValue);
+
+
+ return ucValue;
+}
+#endif /* 0 */
+
+int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
+ unsigned short *pIrqMap,
+ unsigned short *pDmaMap)
+{
+ unsigned long flags;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ int i;
+ DSP_UART_CFG_1 rUartCfg1;
+ DSP_UART_CFG_2 rUartCfg2;
+ DSP_HBRIDGE_CFG_1 rHBridgeCfg1;
+ DSP_HBRIDGE_CFG_2 rHBridgeCfg2;
+ DSP_BUSMASTER_CFG_1 rBusmasterCfg1;
+ DSP_BUSMASTER_CFG_2 rBusmasterCfg2;
+ DSP_ISA_PROT_CFG rIsaProtCfg;
+ DSP_POWER_MGMT_CFG rPowerMgmtCfg;
+ DSP_HBUS_TIMER_CFG rHBusTimerCfg;
+ DSP_LBUS_TIMEOUT_DISABLE rLBusTimeoutDisable;
+ DSP_CHIP_RESET rChipReset;
+ DSP_CLOCK_CONTROL_1 rClockControl1;
+ DSP_CLOCK_CONTROL_2 rClockControl2;
+ DSP_ISA_SLAVE_CONTROL rSlaveControl;
+ DSP_HBRIDGE_CONTROL rHBridgeControl;
+ unsigned short ChipID = 0;
+ unsigned short tval;
+
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780I_EnableDSP entry pSettings->bDSPEnabled %x\n",
+ pSettings->bDSPEnabled);
+
+
+ if (!pSettings->bDSPEnabled) {
+ PRINTK_ERROR( KERN_ERR "3780i::dsp3780I_EnableDSP: Error: DSP not enabled. Aborting.\n" );
+ return -EIO;
+ }
+
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_EnableDSP entry pSettings->bModemEnabled %x\n",
+ pSettings->bModemEnabled);
+
+ if (pSettings->bModemEnabled) {
+ rUartCfg1.Reserved = rUartCfg2.Reserved = 0;
+ rUartCfg1.IrqActiveLow = pSettings->bUartIrqActiveLow;
+ rUartCfg1.IrqPulse = pSettings->bUartIrqPulse;
+ rUartCfg1.Irq =
+ (unsigned char) pIrqMap[pSettings->usUartIrq];
+ switch (pSettings->usUartBaseIO) {
+ case 0x03F8:
+ rUartCfg1.BaseIO = 0;
+ break;
+ case 0x02F8:
+ rUartCfg1.BaseIO = 1;
+ break;
+ case 0x03E8:
+ rUartCfg1.BaseIO = 2;
+ break;
+ case 0x02E8:
+ rUartCfg1.BaseIO = 3;
+ break;
+ }
+ rUartCfg2.Enable = TRUE;
+ }
+
+ rHBridgeCfg1.Reserved = rHBridgeCfg2.Reserved = 0;
+ rHBridgeCfg1.IrqActiveLow = pSettings->bDspIrqActiveLow;
+ rHBridgeCfg1.IrqPulse = pSettings->bDspIrqPulse;
+ rHBridgeCfg1.Irq = (unsigned char) pIrqMap[pSettings->usDspIrq];
+ rHBridgeCfg1.AccessMode = 1;
+ rHBridgeCfg2.Enable = TRUE;
+
+
+ rBusmasterCfg2.Reserved = 0;
+ rBusmasterCfg1.Dma = (unsigned char) pDmaMap[pSettings->usDspDma];
+ rBusmasterCfg1.NumTransfers =
+ (unsigned char) pSettings->usNumTransfers;
+ rBusmasterCfg1.ReRequest = (unsigned char) pSettings->usReRequest;
+ rBusmasterCfg1.MEMCS16 = pSettings->bEnableMEMCS16;
+ rBusmasterCfg2.IsaMemCmdWidth =
+ (unsigned char) pSettings->usIsaMemCmdWidth;
+
+
+ rIsaProtCfg.Reserved = 0;
+ rIsaProtCfg.GateIOCHRDY = pSettings->bGateIOCHRDY;
+
+ rPowerMgmtCfg.Reserved = 0;
+ rPowerMgmtCfg.Enable = pSettings->bEnablePwrMgmt;
+
+ rHBusTimerCfg.LoadValue =
+ (unsigned char) pSettings->usHBusTimerLoadValue;
+
+ rLBusTimeoutDisable.Reserved = 0;
+ rLBusTimeoutDisable.DisableTimeout =
+ pSettings->bDisableLBusTimeout;
+
+ MKWORD(rChipReset) = ~pSettings->usChipletEnable;
+
+ rClockControl1.Reserved1 = rClockControl1.Reserved2 = 0;
+ rClockControl1.N_Divisor = pSettings->usN_Divisor;
+ rClockControl1.M_Multiplier = pSettings->usM_Multiplier;
+
+ rClockControl2.Reserved = 0;
+ rClockControl2.PllBypass = pSettings->bPllBypass;
+
+ /* Issue a soft reset to the chip */
+ /* Note: Since we may be coming in with 3780i clocks suspended, we must keep
+ * soft-reset active for 10ms.
+ */
+ rSlaveControl.ClockControl = 0;
+ rSlaveControl.SoftReset = TRUE;
+ rSlaveControl.ConfigMode = FALSE;
+ rSlaveControl.Reserved = 0;
+
+ PRINTK_4(TRACE_3780I,
+ "3780i::dsp3780i_EnableDSP usDspBaseIO %x index %x taddr %x\n",
+ usDspBaseIO, DSP_IsaSlaveControl,
+ usDspBaseIO + DSP_IsaSlaveControl);
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_EnableDSP rSlaveContrl %x\n",
+ MKWORD(rSlaveControl));
+
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
+ MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl);
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_EnableDSP rSlaveControl 2 %x\n", tval);
+
+
+ for (i = 0; i < 11; i++)
+ udelay(2000);
+
+ rSlaveControl.SoftReset = FALSE;
+ OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
+
+ MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl);
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780i_EnableDSP rSlaveControl 3 %x\n", tval);
+
+
+ /* Program our general configuration registers */
+ WriteGenCfg(DSP_HBridgeCfg1Index, MKBYTE(rHBridgeCfg1));
+ WriteGenCfg(DSP_HBridgeCfg2Index, MKBYTE(rHBridgeCfg2));
+ WriteGenCfg(DSP_BusMasterCfg1Index, MKBYTE(rBusmasterCfg1));
+ WriteGenCfg(DSP_BusMasterCfg2Index, MKBYTE(rBusmasterCfg2));
+ WriteGenCfg(DSP_IsaProtCfgIndex, MKBYTE(rIsaProtCfg));
+ WriteGenCfg(DSP_PowerMgCfgIndex, MKBYTE(rPowerMgmtCfg));
+ WriteGenCfg(DSP_HBusTimerCfgIndex, MKBYTE(rHBusTimerCfg));
+
+ if (pSettings->bModemEnabled) {
+ WriteGenCfg(DSP_UartCfg1Index, MKBYTE(rUartCfg1));
+ WriteGenCfg(DSP_UartCfg2Index, MKBYTE(rUartCfg2));
+ }
+
+
+ rHBridgeControl.EnableDspInt = FALSE;
+ rHBridgeControl.MemAutoInc = TRUE;
+ rHBridgeControl.IoAutoInc = FALSE;
+ rHBridgeControl.DiagnosticMode = FALSE;
+
+ PRINTK_3(TRACE_3780I,
+ "3780i::dsp3780i_EnableDSP DSP_HBridgeControl %x rHBridgeControl %x\n",
+ DSP_HBridgeControl, MKWORD(rHBridgeControl));
+
+ OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ WriteMsaCfg(DSP_LBusTimeoutDisable, MKWORD(rLBusTimeoutDisable));
+ WriteMsaCfg(DSP_ClockControl_1, MKWORD(rClockControl1));
+ WriteMsaCfg(DSP_ClockControl_2, MKWORD(rClockControl2));
+ WriteMsaCfg(DSP_ChipReset, MKWORD(rChipReset));
+
+ ChipID = ReadMsaCfg(DSP_ChipID);
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780I_EnableDSP exiting bRC=TRUE, ChipID %x\n",
+ ChipID);
+
+ return 0;
+}
+
+int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings)
+{
+ unsigned long flags;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ DSP_ISA_SLAVE_CONTROL rSlaveControl;
+
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP entry\n");
+
+ rSlaveControl.ClockControl = 0;
+ rSlaveControl.SoftReset = TRUE;
+ rSlaveControl.ConfigMode = FALSE;
+ rSlaveControl.Reserved = 0;
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
+
+ udelay(5);
+
+ rSlaveControl.ClockControl = 1;
+ OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ udelay(5);
+
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP exit\n");
+
+ return 0;
+}
+
+int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings)
+{
+ unsigned long flags;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ DSP_BOOT_DOMAIN rBootDomain;
+ DSP_HBRIDGE_CONTROL rHBridgeControl;
+
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset entry\n");
+
+ spin_lock_irqsave(&dsp_lock, flags);
+ /* Mask DSP to PC interrupt */
+ MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
+
+ PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rHBridgeControl %x\n",
+ MKWORD(rHBridgeControl));
+
+ rHBridgeControl.EnableDspInt = FALSE;
+ OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ /* Reset the core via the boot domain register */
+ rBootDomain.ResetCore = TRUE;
+ rBootDomain.Halt = TRUE;
+ rBootDomain.NMI = TRUE;
+ rBootDomain.Reserved = 0;
+
+ PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rBootDomain %x\n",
+ MKWORD(rBootDomain));
+
+ WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
+
+ /* Reset all the chiplets and then reactivate them */
+ WriteMsaCfg(DSP_ChipReset, 0xFFFF);
+ udelay(5);
+ WriteMsaCfg(DSP_ChipReset,
+ (unsigned short) (~pSettings->usChipletEnable));
+
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset exit bRC=0\n");
+
+ return 0;
+}
+
+
+int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings)
+{
+ unsigned long flags;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ DSP_BOOT_DOMAIN rBootDomain;
+ DSP_HBRIDGE_CONTROL rHBridgeControl;
+
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run entry\n");
+
+
+ /* Transition the core to a running state */
+ rBootDomain.ResetCore = TRUE;
+ rBootDomain.Halt = FALSE;
+ rBootDomain.NMI = TRUE;
+ rBootDomain.Reserved = 0;
+ WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
+
+ udelay(5);
+
+ rBootDomain.ResetCore = FALSE;
+ WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
+ udelay(5);
+
+ rBootDomain.NMI = FALSE;
+ WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
+ udelay(5);
+
+ /* Enable DSP to PC interrupt */
+ spin_lock_irqsave(&dsp_lock, flags);
+ MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
+ rHBridgeControl.EnableDspInt = TRUE;
+
+ PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Run rHBridgeControl %x\n",
+ MKWORD(rHBridgeControl));
+
+ OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+
+ PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run exit bRC=TRUE\n");
+
+ return 0;
+}
+
+
+int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr)
+{
+ unsigned long flags;
+ unsigned short __user *pusBuffer = pvBuffer;
+ unsigned short val;
+
+
+ PRINTK_5(TRACE_3780I,
+ "3780i::dsp3780I_ReadDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+
+ /* Set the initial MSA address. No adjustments need to be made to data store addresses */
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ /* Transfer the memory block */
+ while (uCount-- != 0) {
+ spin_lock_irqsave(&dsp_lock, flags);
+ val = InWordDsp(DSP_MsaDataDSISHigh);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ if(put_user(val, pusBuffer++))
+ return -EFAULT;
+
+ PRINTK_3(TRACE_3780I,
+ "3780I::dsp3780I_ReadDStore uCount %x val %x\n",
+ uCount, val);
+
+ PaceMsaAccess(usDspBaseIO);
+ }
+
+
+ PRINTK_1(TRACE_3780I,
+ "3780I::dsp3780I_ReadDStore exit bRC=TRUE\n");
+
+ return 0;
+}
+
+int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
+ void __user *pvBuffer, unsigned uCount,
+ unsigned long ulDSPAddr)
+{
+ unsigned long flags;
+ unsigned short __user *pusBuffer = pvBuffer;
+ unsigned short val;
+
+
+ PRINTK_5(TRACE_3780I,
+ "3780i::dsp3780I_ReadAndDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+
+ /* Set the initial MSA address. No adjustments need to be made to data store addresses */
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ /* Transfer the memory block */
+ while (uCount-- != 0) {
+ spin_lock_irqsave(&dsp_lock, flags);
+ val = InWordDsp(DSP_ReadAndClear);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ if(put_user(val, pusBuffer++))
+ return -EFAULT;
+
+ PRINTK_3(TRACE_3780I,
+ "3780I::dsp3780I_ReadAndCleanDStore uCount %x val %x\n",
+ uCount, val);
+
+ PaceMsaAccess(usDspBaseIO);
+ }
+
+
+ PRINTK_1(TRACE_3780I,
+ "3780I::dsp3780I_ReadAndClearDStore exit bRC=TRUE\n");
+
+ return 0;
+}
+
+
+int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr)
+{
+ unsigned long flags;
+ unsigned short __user *pusBuffer = pvBuffer;
+
+
+ PRINTK_5(TRACE_3780I,
+ "3780i::dsp3780D_WriteDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+
+ /* Set the initial MSA address. No adjustments need to be made to data store addresses */
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ /* Transfer the memory block */
+ while (uCount-- != 0) {
+ unsigned short val;
+ if(get_user(val, pusBuffer++))
+ return -EFAULT;
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaDataDSISHigh, val);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ PRINTK_3(TRACE_3780I,
+ "3780I::dsp3780I_WriteDStore uCount %x val %x\n",
+ uCount, val);
+
+ PaceMsaAccess(usDspBaseIO);
+ }
+
+
+ PRINTK_1(TRACE_3780I,
+ "3780I::dsp3780D_WriteDStore exit bRC=TRUE\n");
+
+ return 0;
+}
+
+
+int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr)
+{
+ unsigned long flags;
+ unsigned short __user *pusBuffer = pvBuffer;
+
+ PRINTK_5(TRACE_3780I,
+ "3780i::dsp3780I_ReadIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+ /*
+ * Set the initial MSA address. To convert from an instruction store
+ * address to an MSA address
+ * shift the address two bits to the left and set bit 22
+ */
+ ulDSPAddr = (ulDSPAddr << 2) | (1 << 22);
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ /* Transfer the memory block */
+ while (uCount-- != 0) {
+ unsigned short val_lo, val_hi;
+ spin_lock_irqsave(&dsp_lock, flags);
+ val_lo = InWordDsp(DSP_MsaDataISLow);
+ val_hi = InWordDsp(DSP_MsaDataDSISHigh);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+ if(put_user(val_lo, pusBuffer++))
+ return -EFAULT;
+ if(put_user(val_hi, pusBuffer++))
+ return -EFAULT;
+
+ PRINTK_4(TRACE_3780I,
+ "3780I::dsp3780I_ReadIStore uCount %x val_lo %x val_hi %x\n",
+ uCount, val_lo, val_hi);
+
+ PaceMsaAccess(usDspBaseIO);
+
+ }
+
+ PRINTK_1(TRACE_3780I,
+ "3780I::dsp3780I_ReadIStore exit bRC=TRUE\n");
+
+ return 0;
+}
+
+
+int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr)
+{
+ unsigned long flags;
+ unsigned short __user *pusBuffer = pvBuffer;
+
+ PRINTK_5(TRACE_3780I,
+ "3780i::dsp3780I_WriteIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
+
+
+ /*
+ * Set the initial MSA address. To convert from an instruction store
+ * address to an MSA address
+ * shift the address two bits to the left and set bit 22
+ */
+ ulDSPAddr = (ulDSPAddr << 2) | (1 << 22);
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
+ OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulDSPAddr >> 16));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ /* Transfer the memory block */
+ while (uCount-- != 0) {
+ unsigned short val_lo, val_hi;
+ if(get_user(val_lo, pusBuffer++))
+ return -EFAULT;
+ if(get_user(val_hi, pusBuffer++))
+ return -EFAULT;
+ spin_lock_irqsave(&dsp_lock, flags);
+ OutWordDsp(DSP_MsaDataISLow, val_lo);
+ OutWordDsp(DSP_MsaDataDSISHigh, val_hi);
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+ PRINTK_4(TRACE_3780I,
+ "3780I::dsp3780I_WriteIStore uCount %x val_lo %x val_hi %x\n",
+ uCount, val_lo, val_hi);
+
+ PaceMsaAccess(usDspBaseIO);
+
+ }
+
+ PRINTK_1(TRACE_3780I,
+ "3780I::dsp3780I_WriteIStore exit bRC=TRUE\n");
+
+ return 0;
+}
+
+
+int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
+ unsigned short *pusIPCSource)
+{
+ unsigned long flags;
+ DSP_HBRIDGE_CONTROL rHBridgeControl;
+ unsigned short temp;
+
+
+ PRINTK_3(TRACE_3780I,
+ "3780i::dsp3780I_GetIPCSource entry usDspBaseIO %x pusIPCSource %p\n",
+ usDspBaseIO, pusIPCSource);
+
+ /*
+ * Disable DSP to PC interrupts, read the interrupt register,
+ * clear the pending IPC bits, and reenable DSP to PC interrupts
+ */
+ spin_lock_irqsave(&dsp_lock, flags);
+ MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
+ rHBridgeControl.EnableDspInt = FALSE;
+ OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+
+ *pusIPCSource = InWordDsp(DSP_Interrupt);
+ temp = (unsigned short) ~(*pusIPCSource);
+
+ PRINTK_3(TRACE_3780I,
+ "3780i::dsp3780I_GetIPCSource, usIPCSource %x ~ %x\n",
+ *pusIPCSource, temp);
+
+ OutWordDsp(DSP_Interrupt, (unsigned short) ~(*pusIPCSource));
+
+ rHBridgeControl.EnableDspInt = TRUE;
+ OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
+ spin_unlock_irqrestore(&dsp_lock, flags);
+
+
+ PRINTK_2(TRACE_3780I,
+ "3780i::dsp3780I_GetIPCSource exit usIPCSource %x\n",
+ *pusIPCSource);
+
+ return 0;
+}
diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h
new file mode 100644
index 00000000..fba6ab11
--- /dev/null
+++ b/drivers/char/mwave/3780i.h
@@ -0,0 +1,358 @@
+/*
+*
+* 3780i.h -- declarations for 3780i.c
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#ifndef _LINUX_3780I_H
+#define _LINUX_3780I_H
+
+#include <asm/io.h>
+
+/* DSP I/O port offsets and definitions */
+#define DSP_IsaSlaveControl 0x0000 /* ISA slave control register */
+#define DSP_IsaSlaveStatus 0x0001 /* ISA slave status register */
+#define DSP_ConfigAddress 0x0002 /* General config address register */
+#define DSP_ConfigData 0x0003 /* General config data register */
+#define DSP_HBridgeControl 0x0002 /* HBridge control register */
+#define DSP_MsaAddrLow 0x0004 /* MSP System Address, low word */
+#define DSP_MsaAddrHigh 0x0006 /* MSP System Address, high word */
+#define DSP_MsaDataDSISHigh 0x0008 /* MSA data register: d-store word or high byte of i-store */
+#define DSP_MsaDataISLow 0x000A /* MSA data register: low word of i-store */
+#define DSP_ReadAndClear 0x000C /* MSA read and clear data register */
+#define DSP_Interrupt 0x000E /* Interrupt register (IPC source) */
+
+typedef struct {
+ unsigned char ClockControl:1; /* RW: Clock control: 0=normal, 1=stop 3780i clocks */
+ unsigned char SoftReset:1; /* RW: Soft reset 0=normal, 1=soft reset active */
+ unsigned char ConfigMode:1; /* RW: Configuration mode, 0=normal, 1=config mode */
+ unsigned char Reserved:5; /* 0: Reserved */
+} DSP_ISA_SLAVE_CONTROL;
+
+
+typedef struct {
+ unsigned short EnableDspInt:1; /* RW: Enable DSP to X86 ISA interrupt 0=mask it, 1=enable it */
+ unsigned short MemAutoInc:1; /* RW: Memory address auto increment, 0=disable, 1=enable */
+ unsigned short IoAutoInc:1; /* RW: I/O address auto increment, 0=disable, 1=enable */
+ unsigned short DiagnosticMode:1; /* RW: Disgnostic mode 0=nromal, 1=diagnostic mode */
+ unsigned short IsaPacingTimer:12; /* R: ISA access pacing timer: count of core cycles stolen */
+} DSP_HBRIDGE_CONTROL;
+
+
+/* DSP register indexes used with the configuration register address (index) register */
+#define DSP_UartCfg1Index 0x0003 /* UART config register 1 */
+#define DSP_UartCfg2Index 0x0004 /* UART config register 2 */
+#define DSP_HBridgeCfg1Index 0x0007 /* HBridge config register 1 */
+#define DSP_HBridgeCfg2Index 0x0008 /* HBridge config register 2 */
+#define DSP_BusMasterCfg1Index 0x0009 /* ISA bus master config register 1 */
+#define DSP_BusMasterCfg2Index 0x000A /* ISA bus master config register 2 */
+#define DSP_IsaProtCfgIndex 0x000F /* ISA protocol control register */
+#define DSP_PowerMgCfgIndex 0x0010 /* Low poser suspend/resume enable */
+#define DSP_HBusTimerCfgIndex 0x0011 /* HBUS timer load value */
+
+typedef struct {
+ unsigned char IrqActiveLow:1; /* RW: IRQ active high or low: 0=high, 1=low */
+ unsigned char IrqPulse:1; /* RW: IRQ pulse or level: 0=level, 1=pulse */
+ unsigned char Irq:3; /* RW: IRQ selection */
+ unsigned char BaseIO:2; /* RW: Base I/O selection */
+ unsigned char Reserved:1; /* 0: Reserved */
+} DSP_UART_CFG_1;
+
+typedef struct {
+ unsigned char Enable:1; /* RW: Enable I/O and IRQ: 0=FALSE, 1=TRUE */
+ unsigned char Reserved:7; /* 0: Reserved */
+} DSP_UART_CFG_2;
+
+typedef struct {
+ unsigned char IrqActiveLow:1; /* RW: IRQ active high=0 or low=1 */
+ unsigned char IrqPulse:1; /* RW: IRQ pulse=1 or level=0 */
+ unsigned char Irq:3; /* RW: IRQ selection */
+ unsigned char AccessMode:1; /* RW: 16-bit register access method 0=byte, 1=word */
+ unsigned char Reserved:2; /* 0: Reserved */
+} DSP_HBRIDGE_CFG_1;
+
+typedef struct {
+ unsigned char Enable:1; /* RW: enable I/O and IRQ: 0=FALSE, 1=TRUE */
+ unsigned char Reserved:7; /* 0: Reserved */
+} DSP_HBRIDGE_CFG_2;
+
+
+typedef struct {
+ unsigned char Dma:3; /* RW: DMA channel selection */
+ unsigned char NumTransfers:2; /* RW: Maximum # of transfers once being granted the ISA bus */
+ unsigned char ReRequest:2; /* RW: Minimum delay between releasing the ISA bus and requesting it again */
+ unsigned char MEMCS16:1; /* RW: ISA signal MEMCS16: 0=disabled, 1=enabled */
+} DSP_BUSMASTER_CFG_1;
+
+typedef struct {
+ unsigned char IsaMemCmdWidth:2; /* RW: ISA memory command width */
+ unsigned char Reserved:6; /* 0: Reserved */
+} DSP_BUSMASTER_CFG_2;
+
+
+typedef struct {
+ unsigned char GateIOCHRDY:1; /* RW: Enable IOCHRDY gating: 0=FALSE, 1=TRUE */
+ unsigned char Reserved:7; /* 0: Reserved */
+} DSP_ISA_PROT_CFG;
+
+typedef struct {
+ unsigned char Enable:1; /* RW: Enable low power suspend/resume 0=FALSE, 1=TRUE */
+ unsigned char Reserved:7; /* 0: Reserved */
+} DSP_POWER_MGMT_CFG;
+
+typedef struct {
+ unsigned char LoadValue:8; /* RW: HBUS timer load value */
+} DSP_HBUS_TIMER_CFG;
+
+
+
+/* DSP registers that exist in MSA I/O space */
+#define DSP_ChipID 0x80000000
+#define DSP_MspBootDomain 0x80000580
+#define DSP_LBusTimeoutDisable 0x80000580
+#define DSP_ClockControl_1 0x8000058A
+#define DSP_ClockControl_2 0x8000058C
+#define DSP_ChipReset 0x80000588
+#define DSP_GpioModeControl_15_8 0x80000082
+#define DSP_GpioDriverEnable_15_8 0x80000076
+#define DSP_GpioOutputData_15_8 0x80000072
+
+typedef struct {
+ unsigned short NMI:1; /* RW: non maskable interrupt */
+ unsigned short Halt:1; /* RW: Halt MSP clock */
+ unsigned short ResetCore:1; /* RW: Reset MSP core interface */
+ unsigned short Reserved:13; /* 0: Reserved */
+} DSP_BOOT_DOMAIN;
+
+typedef struct {
+ unsigned short DisableTimeout:1; /* RW: Disable LBus timeout */
+ unsigned short Reserved:15; /* 0: Reserved */
+} DSP_LBUS_TIMEOUT_DISABLE;
+
+typedef struct {
+ unsigned short Memory:1; /* RW: Reset memory interface */
+ unsigned short SerialPort1:1; /* RW: Reset serial port 1 interface */
+ unsigned short SerialPort2:1; /* RW: Reset serial port 2 interface */
+ unsigned short SerialPort3:1; /* RW: Reset serial port 3 interface */
+ unsigned short Gpio:1; /* RW: Reset GPIO interface */
+ unsigned short Dma:1; /* RW: Reset DMA interface */
+ unsigned short SoundBlaster:1; /* RW: Reset soundblaster interface */
+ unsigned short Uart:1; /* RW: Reset UART interface */
+ unsigned short Midi:1; /* RW: Reset MIDI interface */
+ unsigned short IsaMaster:1; /* RW: Reset ISA master interface */
+ unsigned short Reserved:6; /* 0: Reserved */
+} DSP_CHIP_RESET;
+
+typedef struct {
+ unsigned short N_Divisor:6; /* RW: (N) PLL output clock divisor */
+ unsigned short Reserved1:2; /* 0: reserved */
+ unsigned short M_Multiplier:6; /* RW: (M) PLL feedback clock multiplier */
+ unsigned short Reserved2:2; /* 0: reserved */
+} DSP_CLOCK_CONTROL_1;
+
+typedef struct {
+ unsigned short PllBypass:1; /* RW: PLL Bypass */
+ unsigned short Reserved:15; /* 0: Reserved */
+} DSP_CLOCK_CONTROL_2;
+
+typedef struct {
+ unsigned short Latch8:1;
+ unsigned short Latch9:1;
+ unsigned short Latch10:1;
+ unsigned short Latch11:1;
+ unsigned short Latch12:1;
+ unsigned short Latch13:1;
+ unsigned short Latch14:1;
+ unsigned short Latch15:1;
+ unsigned short Mask8:1;
+ unsigned short Mask9:1;
+ unsigned short Mask10:1;
+ unsigned short Mask11:1;
+ unsigned short Mask12:1;
+ unsigned short Mask13:1;
+ unsigned short Mask14:1;
+ unsigned short Mask15:1;
+} DSP_GPIO_OUTPUT_DATA_15_8;
+
+typedef struct {
+ unsigned short Enable8:1;
+ unsigned short Enable9:1;
+ unsigned short Enable10:1;
+ unsigned short Enable11:1;
+ unsigned short Enable12:1;
+ unsigned short Enable13:1;
+ unsigned short Enable14:1;
+ unsigned short Enable15:1;
+ unsigned short Mask8:1;
+ unsigned short Mask9:1;
+ unsigned short Mask10:1;
+ unsigned short Mask11:1;
+ unsigned short Mask12:1;
+ unsigned short Mask13:1;
+ unsigned short Mask14:1;
+ unsigned short Mask15:1;
+} DSP_GPIO_DRIVER_ENABLE_15_8;
+
+typedef struct {
+ unsigned short GpioMode8:2;
+ unsigned short GpioMode9:2;
+ unsigned short GpioMode10:2;
+ unsigned short GpioMode11:2;
+ unsigned short GpioMode12:2;
+ unsigned short GpioMode13:2;
+ unsigned short GpioMode14:2;
+ unsigned short GpioMode15:2;
+} DSP_GPIO_MODE_15_8;
+
+/* Component masks that are defined in dspmgr.h */
+#define MW_ADC_MASK 0x0001
+#define MW_AIC2_MASK 0x0006
+#define MW_MIDI_MASK 0x0008
+#define MW_CDDAC_MASK 0x8001
+#define MW_AIC1_MASK 0xE006
+#define MW_UART_MASK 0xE00A
+#define MW_ACI_MASK 0xE00B
+
+/*
+* Definition of 3780i configuration structure. Unless otherwise stated,
+* these values are provided as input to the 3780i support layer. At present,
+* the only values maintained by the 3780i support layer are the saved UART
+* registers.
+*/
+typedef struct _DSP_3780I_CONFIG_SETTINGS {
+
+ /* Location of base configuration register */
+ unsigned short usBaseConfigIO;
+
+ /* Enables for various DSP components */
+ int bDSPEnabled;
+ int bModemEnabled;
+ int bInterruptClaimed;
+
+ /* IRQ, DMA, and Base I/O addresses for various DSP components */
+ unsigned short usDspIrq;
+ unsigned short usDspDma;
+ unsigned short usDspBaseIO;
+ unsigned short usUartIrq;
+ unsigned short usUartBaseIO;
+
+ /* IRQ modes for various DSP components */
+ int bDspIrqActiveLow;
+ int bUartIrqActiveLow;
+ int bDspIrqPulse;
+ int bUartIrqPulse;
+
+ /* Card abilities */
+ unsigned uIps;
+ unsigned uDStoreSize;
+ unsigned uIStoreSize;
+ unsigned uDmaBandwidth;
+
+ /* Adapter specific 3780i settings */
+ unsigned short usNumTransfers;
+ unsigned short usReRequest;
+ int bEnableMEMCS16;
+ unsigned short usIsaMemCmdWidth;
+ int bGateIOCHRDY;
+ int bEnablePwrMgmt;
+ unsigned short usHBusTimerLoadValue;
+ int bDisableLBusTimeout;
+ unsigned short usN_Divisor;
+ unsigned short usM_Multiplier;
+ int bPllBypass;
+ unsigned short usChipletEnable; /* Used with the chip reset register to enable specific chiplets */
+
+ /* Saved UART registers. These are maintained by the 3780i support layer. */
+ int bUartSaved; /* True after a successful save of the UART registers */
+ unsigned char ucIER; /* Interrupt enable register */
+ unsigned char ucFCR; /* FIFO control register */
+ unsigned char ucLCR; /* Line control register */
+ unsigned char ucMCR; /* Modem control register */
+ unsigned char ucSCR; /* Scratch register */
+ unsigned char ucDLL; /* Divisor latch, low byte */
+ unsigned char ucDLM; /* Divisor latch, high byte */
+} DSP_3780I_CONFIG_SETTINGS;
+
+
+/* 3780i support functions */
+int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
+ unsigned short *pIrqMap,
+ unsigned short *pDmaMap);
+int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings);
+int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings);
+int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings);
+int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr);
+int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
+ void __user *pvBuffer, unsigned uCount,
+ unsigned long ulDSPAddr);
+int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr);
+int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr);
+int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
+ unsigned uCount, unsigned long ulDSPAddr);
+unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO,
+ unsigned long ulMsaAddr);
+void dsp3780I_WriteMsaCfg(unsigned short usDspBaseIO,
+ unsigned long ulMsaAddr, unsigned short usValue);
+int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
+ unsigned short *pusIPCSource);
+
+/* I/O port access macros */
+#define MKWORD(var) (*((unsigned short *)(&var)))
+#define MKBYTE(var) (*((unsigned char *)(&var)))
+
+#define WriteMsaCfg(addr,value) dsp3780I_WriteMsaCfg(usDspBaseIO,addr,value)
+#define ReadMsaCfg(addr) dsp3780I_ReadMsaCfg(usDspBaseIO,addr)
+#define WriteGenCfg(index,value) dsp3780I_WriteGenCfg(usDspBaseIO,index,value)
+#define ReadGenCfg(index) dsp3780I_ReadGenCfg(usDspBaseIO,index)
+
+#define InWordDsp(index) inw(usDspBaseIO+index)
+#define InByteDsp(index) inb(usDspBaseIO+index)
+#define OutWordDsp(index,value) outw(value,usDspBaseIO+index)
+#define OutByteDsp(index,value) outb(value,usDspBaseIO+index)
+
+#endif
diff --git a/drivers/char/mwave/Makefile b/drivers/char/mwave/Makefile
new file mode 100644
index 00000000..efa6a82e
--- /dev/null
+++ b/drivers/char/mwave/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for ACP Modem (Mwave).
+#
+# See the README file in this directory for more info. <paulsch@us.ibm.com>
+#
+
+obj-$(CONFIG_MWAVE) += mwave.o
+
+mwave-y := mwavedd.o smapi.o tp3780i.o 3780i.o
+
+# To have the mwave driver disable other uarts if necessary
+# ccflags-y := -DMWAVE_FUTZ_WITH_OTHER_DEVICES
+
+# To compile in lots (~20 KiB) of run-time enablable printk()s for debugging:
+ccflags-y += -DMW_TRACE
diff --git a/drivers/char/mwave/README b/drivers/char/mwave/README
new file mode 100644
index 00000000..c2a58f42
--- /dev/null
+++ b/drivers/char/mwave/README
@@ -0,0 +1,47 @@
+Module options
+--------------
+
+The mwave module takes the following options. Note that these options
+are not saved by the BIOS and so do not persist after unload and reload.
+
+ mwave_debug=value, where value is bitwise OR of trace flags:
+ 0x0001 mwavedd api tracing
+ 0x0002 smapi api tracing
+ 0x0004 3780i tracing
+ 0x0008 tp3780i tracing
+
+ Tracing only occurs if the driver has been compiled with the
+ MW_TRACE macro #defined (i.e. let ccflags-y := -DMW_TRACE
+ in the Makefile).
+
+ mwave_3780i_irq=5/7/10/11/15
+ If the dsp irq has not been setup and stored in bios by the
+ thinkpad configuration utility then this parameter allows the
+ irq used by the dsp to be configured.
+
+ mwave_3780i_io=0x130/0x350/0x0070/0xDB0
+ If the dsp io range has not been setup and stored in bios by the
+ thinkpad configuration utility then this parameter allows the
+ io range used by the dsp to be configured.
+
+ mwave_uart_irq=3/4
+ If the mwave's uart irq has not been setup and stored in bios by the
+ thinkpad configuration utility then this parameter allows the
+ irq used by the mwave uart to be configured.
+
+ mwave_uart_io=0x3f8/0x2f8/0x3E8/0x2E8
+ If the uart io range has not been setup and stored in bios by the
+ thinkpad configuration utility then this parameter allows the
+ io range used by the mwave uart to be configured.
+
+Example to enable the 3780i DSP using ttyS1 resources:
+
+ insmod mwave mwave_3780i_irq=10 mwave_3780i_io=0x0130 mwave_uart_irq=3 mwave_uart_io=0x2f8
+
+Accessing the driver
+--------------------
+
+You must also create a node for the driver:
+ mkdir -p /dev/modems
+ mknod --mode=660 /dev/modems/mwave c 10 219
+
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
new file mode 100644
index 00000000..1d82d583
--- /dev/null
+++ b/drivers/char/mwave/mwavedd.c
@@ -0,0 +1,697 @@
+/*
+*
+* mwavedd.c -- mwave device driver
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/major.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/serial.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/serial_8250.h>
+#include "smapi.h"
+#include "mwavedd.h"
+#include "3780i.h"
+#include "tp3780i.h"
+
+MODULE_DESCRIPTION("3780i Advanced Communications Processor (Mwave) driver");
+MODULE_AUTHOR("Mike Sullivan and Paul Schroeder");
+MODULE_LICENSE("GPL");
+
+/*
+* These parameters support the setting of MWave resources. Note that no
+* checks are made against other devices (ie. superio) for conflicts.
+* We'll depend on users using the tpctl utility to do that for now
+*/
+static DEFINE_MUTEX(mwave_mutex);
+int mwave_debug = 0;
+int mwave_3780i_irq = 0;
+int mwave_3780i_io = 0;
+int mwave_uart_irq = 0;
+int mwave_uart_io = 0;
+module_param(mwave_debug, int, 0);
+module_param(mwave_3780i_irq, int, 0);
+module_param(mwave_3780i_io, int, 0);
+module_param(mwave_uart_irq, int, 0);
+module_param(mwave_uart_io, int, 0);
+
+static int mwave_open(struct inode *inode, struct file *file);
+static int mwave_close(struct inode *inode, struct file *file);
+static long mwave_ioctl(struct file *filp, unsigned int iocmd,
+ unsigned long ioarg);
+
+MWAVE_DEVICE_DATA mwave_s_mdd;
+
+static int mwave_open(struct inode *inode, struct file *file)
+{
+ unsigned int retval = 0;
+
+ PRINTK_3(TRACE_MWAVE,
+ "mwavedd::mwave_open, entry inode %p file %p\n",
+ inode, file);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_open, exit return retval %x\n", retval);
+
+ return retval;
+}
+
+static int mwave_close(struct inode *inode, struct file *file)
+{
+ unsigned int retval = 0;
+
+ PRINTK_3(TRACE_MWAVE,
+ "mwavedd::mwave_close, entry inode %p file %p\n",
+ inode, file);
+
+ PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n",
+ retval);
+
+ return retval;
+}
+
+static long mwave_ioctl(struct file *file, unsigned int iocmd,
+ unsigned long ioarg)
+{
+ unsigned int retval = 0;
+ pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+ void __user *arg = (void __user *)ioarg;
+
+ PRINTK_4(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n",
+ file, iocmd, (int) ioarg);
+
+ switch (iocmd) {
+
+ case IOCTL_MW_RESET:
+ PRINTK_1(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, IOCTL_MW_RESET"
+ " calling tp3780I_ResetDSP\n");
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_ResetDSP(&pDrvData->rBDData);
+ mutex_unlock(&mwave_mutex);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, IOCTL_MW_RESET"
+ " retval %x from tp3780I_ResetDSP\n",
+ retval);
+ break;
+
+ case IOCTL_MW_RUN:
+ PRINTK_1(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, IOCTL_MW_RUN"
+ " calling tp3780I_StartDSP\n");
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_StartDSP(&pDrvData->rBDData);
+ mutex_unlock(&mwave_mutex);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, IOCTL_MW_RUN"
+ " retval %x from tp3780I_StartDSP\n",
+ retval);
+ break;
+
+ case IOCTL_MW_DSP_ABILITIES: {
+ MW_ABILITIES rAbilities;
+
+ PRINTK_1(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl,"
+ " IOCTL_MW_DSP_ABILITIES calling"
+ " tp3780I_QueryAbilities\n");
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_QueryAbilities(&pDrvData->rBDData,
+ &rAbilities);
+ mutex_unlock(&mwave_mutex);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
+ " retval %x from tp3780I_QueryAbilities\n",
+ retval);
+ if (retval == 0) {
+ if( copy_to_user(arg, &rAbilities,
+ sizeof(MW_ABILITIES)) )
+ return -EFAULT;
+ }
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
+ " exit retval %x\n",
+ retval);
+ }
+ break;
+
+ case IOCTL_MW_READ_DATA:
+ case IOCTL_MW_READCLEAR_DATA: {
+ MW_READWRITE rReadData;
+ unsigned short __user *pusBuffer = NULL;
+
+ if( copy_from_user(&rReadData, arg,
+ sizeof(MW_READWRITE)) )
+ return -EFAULT;
+ pusBuffer = (unsigned short __user *) (rReadData.pBuf);
+
+ PRINTK_4(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA,"
+ " size %lx, ioarg %lx pusBuffer %p\n",
+ rReadData.ulDataLength, ioarg, pusBuffer);
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
+ iocmd,
+ pusBuffer,
+ rReadData.ulDataLength,
+ rReadData.usDspAddress);
+ mutex_unlock(&mwave_mutex);
+ }
+ break;
+
+ case IOCTL_MW_READ_INST: {
+ MW_READWRITE rReadData;
+ unsigned short __user *pusBuffer = NULL;
+
+ if( copy_from_user(&rReadData, arg,
+ sizeof(MW_READWRITE)) )
+ return -EFAULT;
+ pusBuffer = (unsigned short __user *) (rReadData.pBuf);
+
+ PRINTK_4(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_READ_INST,"
+ " size %lx, ioarg %lx pusBuffer %p\n",
+ rReadData.ulDataLength / 2, ioarg,
+ pusBuffer);
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
+ iocmd, pusBuffer,
+ rReadData.ulDataLength / 2,
+ rReadData.usDspAddress);
+ mutex_unlock(&mwave_mutex);
+ }
+ break;
+
+ case IOCTL_MW_WRITE_DATA: {
+ MW_READWRITE rWriteData;
+ unsigned short __user *pusBuffer = NULL;
+
+ if( copy_from_user(&rWriteData, arg,
+ sizeof(MW_READWRITE)) )
+ return -EFAULT;
+ pusBuffer = (unsigned short __user *) (rWriteData.pBuf);
+
+ PRINTK_4(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA,"
+ " size %lx, ioarg %lx pusBuffer %p\n",
+ rWriteData.ulDataLength, ioarg,
+ pusBuffer);
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
+ iocmd, pusBuffer,
+ rWriteData.ulDataLength,
+ rWriteData.usDspAddress);
+ mutex_unlock(&mwave_mutex);
+ }
+ break;
+
+ case IOCTL_MW_WRITE_INST: {
+ MW_READWRITE rWriteData;
+ unsigned short __user *pusBuffer = NULL;
+
+ if( copy_from_user(&rWriteData, arg,
+ sizeof(MW_READWRITE)) )
+ return -EFAULT;
+ pusBuffer = (unsigned short __user *)(rWriteData.pBuf);
+
+ PRINTK_4(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST,"
+ " size %lx, ioarg %lx pusBuffer %p\n",
+ rWriteData.ulDataLength, ioarg,
+ pusBuffer);
+ mutex_lock(&mwave_mutex);
+ retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData,
+ iocmd, pusBuffer,
+ rWriteData.ulDataLength,
+ rWriteData.usDspAddress);
+ mutex_unlock(&mwave_mutex);
+ }
+ break;
+
+ case IOCTL_MW_REGISTER_IPC: {
+ unsigned int ipcnum = (unsigned int) ioarg;
+
+ if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::mwave_ioctl:"
+ " IOCTL_MW_REGISTER_IPC:"
+ " Error: Invalid ipcnum %x\n",
+ ipcnum);
+ return -EINVAL;
+ }
+ PRINTK_3(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
+ " ipcnum %x entry usIntCount %x\n",
+ ipcnum,
+ pDrvData->IPCs[ipcnum].usIntCount);
+
+ mutex_lock(&mwave_mutex);
+ pDrvData->IPCs[ipcnum].bIsHere = FALSE;
+ pDrvData->IPCs[ipcnum].bIsEnabled = TRUE;
+ mutex_unlock(&mwave_mutex);
+
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
+ " ipcnum %x exit\n",
+ ipcnum);
+ }
+ break;
+
+ case IOCTL_MW_GET_IPC: {
+ unsigned int ipcnum = (unsigned int) ioarg;
+
+ if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::mwave_ioctl:"
+ " IOCTL_MW_GET_IPC: Error:"
+ " Invalid ipcnum %x\n", ipcnum);
+ return -EINVAL;
+ }
+ PRINTK_3(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
+ " ipcnum %x, usIntCount %x\n",
+ ipcnum,
+ pDrvData->IPCs[ipcnum].usIntCount);
+
+ mutex_lock(&mwave_mutex);
+ if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl, thread for"
+ " ipc %x going to sleep\n",
+ ipcnum);
+ add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
+ pDrvData->IPCs[ipcnum].bIsHere = TRUE;
+ set_current_state(TASK_INTERRUPTIBLE);
+ /* check whether an event was signalled by */
+ /* the interrupt handler while we were gone */
+ if (pDrvData->IPCs[ipcnum].usIntCount == 1) { /* first int has occurred (race condition) */
+ pDrvData->IPCs[ipcnum].usIntCount = 2; /* first int has been handled */
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl"
+ " IOCTL_MW_GET_IPC ipcnum %x"
+ " handling first int\n",
+ ipcnum);
+ } else { /* either 1st int has not yet occurred, or we have already handled the first int */
+ schedule();
+ if (pDrvData->IPCs[ipcnum].usIntCount == 1) {
+ pDrvData->IPCs[ipcnum].usIntCount = 2;
+ }
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl"
+ " IOCTL_MW_GET_IPC ipcnum %x"
+ " woke up and returning to"
+ " application\n",
+ ipcnum);
+ }
+ pDrvData->IPCs[ipcnum].bIsHere = FALSE;
+ remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
+ set_current_state(TASK_RUNNING);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC,"
+ " returning thread for ipc %x"
+ " processing\n",
+ ipcnum);
+ }
+ mutex_unlock(&mwave_mutex);
+ }
+ break;
+
+ case IOCTL_MW_UNREGISTER_IPC: {
+ unsigned int ipcnum = (unsigned int) ioarg;
+
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC"
+ " ipcnum %x\n",
+ ipcnum);
+ if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::mwave_ioctl:"
+ " IOCTL_MW_UNREGISTER_IPC:"
+ " Error: Invalid ipcnum %x\n",
+ ipcnum);
+ return -EINVAL;
+ }
+ mutex_lock(&mwave_mutex);
+ if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
+ pDrvData->IPCs[ipcnum].bIsEnabled = FALSE;
+ if (pDrvData->IPCs[ipcnum].bIsHere == TRUE) {
+ wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue);
+ }
+ }
+ mutex_unlock(&mwave_mutex);
+ }
+ break;
+
+ default:
+ return -ENOTTY;
+ break;
+ } /* switch */
+
+ PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval);
+
+ return retval;
+}
+
+
+static ssize_t mwave_read(struct file *file, char __user *buf, size_t count,
+ loff_t * ppos)
+{
+ PRINTK_5(TRACE_MWAVE,
+ "mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n",
+ file, buf, count, ppos);
+
+ return -EINVAL;
+}
+
+
+static ssize_t mwave_write(struct file *file, const char __user *buf,
+ size_t count, loff_t * ppos)
+{
+ PRINTK_5(TRACE_MWAVE,
+ "mwavedd::mwave_write entry file %p, buf %p,"
+ " count %zx ppos %p\n",
+ file, buf, count, ppos);
+
+ return -EINVAL;
+}
+
+
+static int register_serial_portandirq(unsigned int port, int irq)
+{
+ struct uart_port uart;
+
+ switch ( port ) {
+ case 0x3f8:
+ case 0x2f8:
+ case 0x3e8:
+ case 0x2e8:
+ /* OK */
+ break;
+ default:
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::register_serial_portandirq:"
+ " Error: Illegal port %x\n", port );
+ return -1;
+ } /* switch */
+ /* port is okay */
+
+ switch ( irq ) {
+ case 3:
+ case 4:
+ case 5:
+ case 7:
+ /* OK */
+ break;
+ default:
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::register_serial_portandirq:"
+ " Error: Illegal irq %x\n", irq );
+ return -1;
+ } /* switch */
+ /* irq is okay */
+
+ memset(&uart, 0, sizeof(struct uart_port));
+
+ uart.uartclk = 1843200;
+ uart.iobase = port;
+ uart.irq = irq;
+ uart.iotype = UPIO_PORT;
+ uart.flags = UPF_SHARE_IRQ;
+ return serial8250_register_port(&uart);
+}
+
+
+static const struct file_operations mwave_fops = {
+ .owner = THIS_MODULE,
+ .read = mwave_read,
+ .write = mwave_write,
+ .unlocked_ioctl = mwave_ioctl,
+ .open = mwave_open,
+ .release = mwave_close,
+ .llseek = default_llseek,
+};
+
+
+static struct miscdevice mwave_misc_dev = { MWAVE_MINOR, "mwave", &mwave_fops };
+
+#if 0 /* totally b0rked */
+/*
+ * sysfs support <paulsch@us.ibm.com>
+ */
+
+struct device mwave_device;
+
+/* Prevent code redundancy, create a macro for mwave_show_* functions. */
+#define mwave_show_function(attr_name, format_string, field) \
+static ssize_t mwave_show_##attr_name(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ DSP_3780I_CONFIG_SETTINGS *pSettings = \
+ &mwave_s_mdd.rBDData.rDspSettings; \
+ return sprintf(buf, format_string, pSettings->field); \
+}
+
+/* All of our attributes are read attributes. */
+#define mwave_dev_rd_attr(attr_name, format_string, field) \
+ mwave_show_function(attr_name, format_string, field) \
+static DEVICE_ATTR(attr_name, S_IRUGO, mwave_show_##attr_name, NULL)
+
+mwave_dev_rd_attr (3780i_dma, "%i\n", usDspDma);
+mwave_dev_rd_attr (3780i_irq, "%i\n", usDspIrq);
+mwave_dev_rd_attr (3780i_io, "%#.4x\n", usDspBaseIO);
+mwave_dev_rd_attr (uart_irq, "%i\n", usUartIrq);
+mwave_dev_rd_attr (uart_io, "%#.4x\n", usUartBaseIO);
+
+static struct device_attribute * const mwave_dev_attrs[] = {
+ &dev_attr_3780i_dma,
+ &dev_attr_3780i_irq,
+ &dev_attr_3780i_io,
+ &dev_attr_uart_irq,
+ &dev_attr_uart_io,
+};
+#endif
+
+/*
+* mwave_init is called on module load
+*
+* mwave_exit is called on module unload
+* mwave_exit is also used to clean up after an aborted mwave_init
+*/
+static void mwave_exit(void)
+{
+ pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+
+ PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n");
+
+#if 0
+ for (i = 0; i < pDrvData->nr_registered_attrs; i++)
+ device_remove_file(&mwave_device, mwave_dev_attrs[i]);
+ pDrvData->nr_registered_attrs = 0;
+
+ if (pDrvData->device_registered) {
+ device_unregister(&mwave_device);
+ pDrvData->device_registered = FALSE;
+ }
+#endif
+
+ if ( pDrvData->sLine >= 0 ) {
+ serial8250_unregister_port(pDrvData->sLine);
+ }
+ if (pDrvData->bMwaveDevRegistered) {
+ misc_deregister(&mwave_misc_dev);
+ }
+ if (pDrvData->bDSPEnabled) {
+ tp3780I_DisableDSP(&pDrvData->rBDData);
+ }
+ if (pDrvData->bResourcesClaimed) {
+ tp3780I_ReleaseResources(&pDrvData->rBDData);
+ }
+ if (pDrvData->bBDInitialized) {
+ tp3780I_Cleanup(&pDrvData->rBDData);
+ }
+
+ PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit exit\n");
+}
+
+module_exit(mwave_exit);
+
+static int __init mwave_init(void)
+{
+ int i;
+ int retval = 0;
+ pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+
+ PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n");
+
+ memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA));
+
+ pDrvData->bBDInitialized = FALSE;
+ pDrvData->bResourcesClaimed = FALSE;
+ pDrvData->bDSPEnabled = FALSE;
+ pDrvData->bDSPReset = FALSE;
+ pDrvData->bMwaveDevRegistered = FALSE;
+ pDrvData->sLine = -1;
+
+ for (i = 0; i < ARRAY_SIZE(pDrvData->IPCs); i++) {
+ pDrvData->IPCs[i].bIsEnabled = FALSE;
+ pDrvData->IPCs[i].bIsHere = FALSE;
+ pDrvData->IPCs[i].usIntCount = 0; /* no ints received yet */
+ init_waitqueue_head(&pDrvData->IPCs[i].ipc_wait_queue);
+ }
+
+ retval = tp3780I_InitializeBoardData(&pDrvData->rBDData);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_init, return from tp3780I_InitializeBoardData"
+ " retval %x\n",
+ retval);
+ if (retval) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::mwave_init: Error:"
+ " Failed to initialize board data\n");
+ goto cleanup_error;
+ }
+ pDrvData->bBDInitialized = TRUE;
+
+ retval = tp3780I_CalcResources(&pDrvData->rBDData);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_init, return from tp3780I_CalcResources"
+ " retval %x\n",
+ retval);
+ if (retval) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd:mwave_init: Error:"
+ " Failed to calculate resources\n");
+ goto cleanup_error;
+ }
+
+ retval = tp3780I_ClaimResources(&pDrvData->rBDData);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_init, return from tp3780I_ClaimResources"
+ " retval %x\n",
+ retval);
+ if (retval) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd:mwave_init: Error:"
+ " Failed to claim resources\n");
+ goto cleanup_error;
+ }
+ pDrvData->bResourcesClaimed = TRUE;
+
+ retval = tp3780I_EnableDSP(&pDrvData->rBDData);
+ PRINTK_2(TRACE_MWAVE,
+ "mwavedd::mwave_init, return from tp3780I_EnableDSP"
+ " retval %x\n",
+ retval);
+ if (retval) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd:mwave_init: Error:"
+ " Failed to enable DSP\n");
+ goto cleanup_error;
+ }
+ pDrvData->bDSPEnabled = TRUE;
+
+ if (misc_register(&mwave_misc_dev) < 0) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd:mwave_init: Error:"
+ " Failed to register misc device\n");
+ goto cleanup_error;
+ }
+ pDrvData->bMwaveDevRegistered = TRUE;
+
+ pDrvData->sLine = register_serial_portandirq(
+ pDrvData->rBDData.rDspSettings.usUartBaseIO,
+ pDrvData->rBDData.rDspSettings.usUartIrq
+ );
+ if (pDrvData->sLine < 0) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd:mwave_init: Error:"
+ " Failed to register serial driver\n");
+ goto cleanup_error;
+ }
+ /* uart is registered */
+
+#if 0
+ /* sysfs */
+ memset(&mwave_device, 0, sizeof (struct device));
+ dev_set_name(&mwave_device, "mwave");
+
+ if (device_register(&mwave_device))
+ goto cleanup_error;
+ pDrvData->device_registered = TRUE;
+ for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) {
+ if(device_create_file(&mwave_device, mwave_dev_attrs[i])) {
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd:mwave_init: Error:"
+ " Failed to create sysfs file %s\n",
+ mwave_dev_attrs[i]->attr.name);
+ goto cleanup_error;
+ }
+ pDrvData->nr_registered_attrs++;
+ }
+#endif
+
+ /* SUCCESS! */
+ return 0;
+
+cleanup_error:
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "mwavedd::mwave_init: Error:"
+ " Failed to initialize\n");
+ mwave_exit(); /* clean up */
+
+ return -EIO;
+}
+
+module_init(mwave_init);
+
diff --git a/drivers/char/mwave/mwavedd.h b/drivers/char/mwave/mwavedd.h
new file mode 100644
index 00000000..7e0d530e
--- /dev/null
+++ b/drivers/char/mwave/mwavedd.h
@@ -0,0 +1,152 @@
+/*
+*
+* mwavedd.h -- declarations for mwave device driver
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#ifndef _LINUX_MWAVEDD_H
+#define _LINUX_MWAVEDD_H
+#include "3780i.h"
+#include "tp3780i.h"
+#include "smapi.h"
+#include "mwavepub.h"
+#include <linux/ioctl.h>
+#include <asm/uaccess.h>
+#include <linux/wait.h>
+
+extern int mwave_debug;
+extern int mwave_3780i_irq;
+extern int mwave_3780i_io;
+extern int mwave_uart_irq;
+extern int mwave_uart_io;
+
+#define PRINTK_ERROR printk
+#define KERN_ERR_MWAVE KERN_ERR "mwave: "
+
+#define TRACE_MWAVE 0x0001
+#define TRACE_SMAPI 0x0002
+#define TRACE_3780I 0x0004
+#define TRACE_TP3780I 0x0008
+
+#ifdef MW_TRACE
+#define PRINTK_1(f,s) \
+ if (f & (mwave_debug)) { \
+ printk(s); \
+ }
+
+#define PRINTK_2(f,s,v1) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1); \
+ }
+
+#define PRINTK_3(f,s,v1,v2) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1,v2); \
+ }
+
+#define PRINTK_4(f,s,v1,v2,v3) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1,v2,v3); \
+ }
+
+#define PRINTK_5(f,s,v1,v2,v3,v4) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1,v2,v3,v4); \
+ }
+
+#define PRINTK_6(f,s,v1,v2,v3,v4,v5) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1,v2,v3,v4,v5); \
+ }
+
+#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1,v2,v3,v4,v5,v6); \
+ }
+
+#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7) \
+ if (f & (mwave_debug)) { \
+ printk(s,v1,v2,v3,v4,v5,v6,v7); \
+ }
+
+#else
+#define PRINTK_1(f,s)
+#define PRINTK_2(f,s,v1)
+#define PRINTK_3(f,s,v1,v2)
+#define PRINTK_4(f,s,v1,v2,v3)
+#define PRINTK_5(f,s,v1,v2,v3,v4)
+#define PRINTK_6(f,s,v1,v2,v3,v4,v5)
+#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6)
+#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7)
+#endif
+
+
+typedef struct _MWAVE_IPC {
+ unsigned short usIntCount; /* 0=none, 1=first, 2=greater than 1st */
+ BOOLEAN bIsEnabled;
+ BOOLEAN bIsHere;
+ /* entry spin lock */
+ wait_queue_head_t ipc_wait_queue;
+} MWAVE_IPC;
+
+typedef struct _MWAVE_DEVICE_DATA {
+ THINKPAD_BD_DATA rBDData; /* board driver's data area */
+ unsigned long ulIPCSource_ISR; /* IPC source bits for recently processed intr, set during ISR processing */
+ unsigned long ulIPCSource_DPC; /* IPC source bits for recently processed intr, set during DPC processing */
+ BOOLEAN bBDInitialized;
+ BOOLEAN bResourcesClaimed;
+ BOOLEAN bDSPEnabled;
+ BOOLEAN bDSPReset;
+ MWAVE_IPC IPCs[16];
+ BOOLEAN bMwaveDevRegistered;
+ short sLine;
+ int nr_registered_attrs;
+ int device_registered;
+
+} MWAVE_DEVICE_DATA, *pMWAVE_DEVICE_DATA;
+
+extern MWAVE_DEVICE_DATA mwave_s_mdd;
+
+#endif
diff --git a/drivers/char/mwave/mwavepub.h b/drivers/char/mwave/mwavepub.h
new file mode 100644
index 00000000..60c961ae
--- /dev/null
+++ b/drivers/char/mwave/mwavepub.h
@@ -0,0 +1,89 @@
+/*
+*
+* mwavepub.h -- PUBLIC declarations for the mwave driver
+* and applications using it
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#ifndef _LINUX_MWAVEPUB_H
+#define _LINUX_MWAVEPUB_H
+
+#include <linux/miscdevice.h>
+
+
+typedef struct _MW_ABILITIES {
+ unsigned long instr_per_sec;
+ unsigned long data_size;
+ unsigned long inst_size;
+ unsigned long bus_dma_bw;
+ unsigned short uart_enable;
+ short component_count;
+ unsigned long component_list[7];
+ char mwave_os_name[16];
+ char bios_task_name[16];
+} MW_ABILITIES, *pMW_ABILITIES;
+
+
+typedef struct _MW_READWRITE {
+ unsigned short usDspAddress; /* The dsp address */
+ unsigned long ulDataLength; /* The size in bytes of the data or user buffer */
+ void __user *pBuf; /* Input:variable sized buffer */
+} MW_READWRITE, *pMW_READWRITE;
+
+#define IOCTL_MW_RESET _IO(MWAVE_MINOR,1)
+#define IOCTL_MW_RUN _IO(MWAVE_MINOR,2)
+#define IOCTL_MW_DSP_ABILITIES _IOR(MWAVE_MINOR,3,MW_ABILITIES)
+#define IOCTL_MW_READ_DATA _IOR(MWAVE_MINOR,4,MW_READWRITE)
+#define IOCTL_MW_READCLEAR_DATA _IOR(MWAVE_MINOR,5,MW_READWRITE)
+#define IOCTL_MW_READ_INST _IOR(MWAVE_MINOR,6,MW_READWRITE)
+#define IOCTL_MW_WRITE_DATA _IOW(MWAVE_MINOR,7,MW_READWRITE)
+#define IOCTL_MW_WRITE_INST _IOW(MWAVE_MINOR,8,MW_READWRITE)
+#define IOCTL_MW_REGISTER_IPC _IOW(MWAVE_MINOR,9,int)
+#define IOCTL_MW_UNREGISTER_IPC _IOW(MWAVE_MINOR,10,int)
+#define IOCTL_MW_GET_IPC _IOW(MWAVE_MINOR,11,int)
+#define IOCTL_MW_TRACE _IOR(MWAVE_MINOR,12,MW_READWRITE)
+
+
+#endif
diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c
new file mode 100644
index 00000000..6187fd14
--- /dev/null
+++ b/drivers/char/mwave/smapi.c
@@ -0,0 +1,570 @@
+/*
+*
+* smapi.c -- SMAPI interface routines
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#include <linux/kernel.h>
+#include <linux/mc146818rtc.h> /* CMOS defines */
+#include "smapi.h"
+#include "mwavedd.h"
+
+static unsigned short g_usSmapiPort = 0;
+
+
+static int smapi_request(unsigned short inBX, unsigned short inCX,
+ unsigned short inDI, unsigned short inSI,
+ unsigned short *outAX, unsigned short *outBX,
+ unsigned short *outCX, unsigned short *outDX,
+ unsigned short *outDI, unsigned short *outSI)
+{
+ unsigned short myoutAX = 2, *pmyoutAX = &myoutAX;
+ unsigned short myoutBX = 3, *pmyoutBX = &myoutBX;
+ unsigned short myoutCX = 4, *pmyoutCX = &myoutCX;
+ unsigned short myoutDX = 5, *pmyoutDX = &myoutDX;
+ unsigned short myoutDI = 6, *pmyoutDI = &myoutDI;
+ unsigned short myoutSI = 7, *pmyoutSI = &myoutSI;
+ unsigned short usSmapiOK = -EIO, *pusSmapiOK = &usSmapiOK;
+ unsigned int inBXCX = (inBX << 16) | inCX;
+ unsigned int inDISI = (inDI << 16) | inSI;
+ int retval = 0;
+
+ PRINTK_5(TRACE_SMAPI, "inBX %x inCX %x inDI %x inSI %x\n",
+ inBX, inCX, inDI, inSI);
+
+ __asm__ __volatile__("movw $0x5380,%%ax\n\t"
+ "movl %7,%%ebx\n\t"
+ "shrl $16, %%ebx\n\t"
+ "movw %7,%%cx\n\t"
+ "movl %8,%%edi\n\t"
+ "shrl $16,%%edi\n\t"
+ "movw %8,%%si\n\t"
+ "movw %9,%%dx\n\t"
+ "out %%al,%%dx\n\t"
+ "out %%al,$0x4F\n\t"
+ "cmpb $0x53,%%ah\n\t"
+ "je 2f\n\t"
+ "1:\n\t"
+ "orb %%ah,%%ah\n\t"
+ "jnz 2f\n\t"
+ "movw %%ax,%0\n\t"
+ "movw %%bx,%1\n\t"
+ "movw %%cx,%2\n\t"
+ "movw %%dx,%3\n\t"
+ "movw %%di,%4\n\t"
+ "movw %%si,%5\n\t"
+ "movw $1,%6\n\t"
+ "2:\n\t":"=m"(*(unsigned short *) pmyoutAX),
+ "=m"(*(unsigned short *) pmyoutBX),
+ "=m"(*(unsigned short *) pmyoutCX),
+ "=m"(*(unsigned short *) pmyoutDX),
+ "=m"(*(unsigned short *) pmyoutDI),
+ "=m"(*(unsigned short *) pmyoutSI),
+ "=m"(*(unsigned short *) pusSmapiOK)
+ :"m"(inBXCX), "m"(inDISI), "m"(g_usSmapiPort)
+ :"%eax", "%ebx", "%ecx", "%edx", "%edi",
+ "%esi");
+
+ PRINTK_8(TRACE_SMAPI,
+ "myoutAX %x myoutBX %x myoutCX %x myoutDX %x myoutDI %x myoutSI %x usSmapiOK %x\n",
+ myoutAX, myoutBX, myoutCX, myoutDX, myoutDI, myoutSI,
+ usSmapiOK);
+ *outAX = myoutAX;
+ *outBX = myoutBX;
+ *outCX = myoutCX;
+ *outDX = myoutDX;
+ *outDI = myoutDI;
+ *outSI = myoutSI;
+
+ retval = (usSmapiOK == 1) ? 0 : -EIO;
+ PRINTK_2(TRACE_SMAPI, "smapi::smapi_request exit retval %x\n", retval);
+ return retval;
+}
+
+
+int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
+{
+ int bRC = -EIO;
+ unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
+ unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 };
+ unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
+ unsigned short numDspBases = 8;
+ unsigned short numUartBases = 4;
+
+ PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n");
+
+ bRC = smapi_request(0x1802, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Error: Could not get DSP Settings. Aborting.\n");
+ return bRC;
+ }
+
+ PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n");
+
+ pSettings->bDSPPresent = ((usBX & 0x0100) != 0);
+ pSettings->bDSPEnabled = ((usCX & 0x0001) != 0);
+ pSettings->usDspIRQ = usSI & 0x00FF;
+ pSettings->usDspDMA = (usSI & 0xFF00) >> 8;
+ if ((usDI & 0x00FF) < numDspBases) {
+ pSettings->usDspBaseIO = ausDspBases[usDI & 0x00FF];
+ } else {
+ pSettings->usDspBaseIO = 0;
+ }
+ PRINTK_6(TRACE_SMAPI,
+ "smapi::smapi_query_DSP_cfg get DSP Settings bDSPPresent %x bDSPEnabled %x usDspIRQ %x usDspDMA %x usDspBaseIO %x\n",
+ pSettings->bDSPPresent, pSettings->bDSPEnabled,
+ pSettings->usDspIRQ, pSettings->usDspDMA,
+ pSettings->usDspBaseIO);
+
+ /* check for illegal values */
+ if ( pSettings->usDspBaseIO == 0 )
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP base I/O address is 0\n");
+ if ( pSettings->usDspIRQ == 0 )
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP IRQ line is 0\n");
+
+ bRC = smapi_request(0x1804, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) {
+ PRINTK_ERROR("smapi::smapi_query_DSP_cfg: Error: Could not get DSP modem settings. Aborting.\n");
+ return bRC;
+ }
+
+ PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n");
+
+ pSettings->bModemEnabled = ((usCX & 0x0001) != 0);
+ pSettings->usUartIRQ = usSI & 0x000F;
+ if (((usSI & 0xFF00) >> 8) < numUartBases) {
+ pSettings->usUartBaseIO = ausUartBases[(usSI & 0xFF00) >> 8];
+ } else {
+ pSettings->usUartBaseIO = 0;
+ }
+
+ PRINTK_4(TRACE_SMAPI,
+ "smapi::smapi_query_DSP_cfg get DSP modem settings bModemEnabled %x usUartIRQ %x usUartBaseIO %x\n",
+ pSettings->bModemEnabled,
+ pSettings->usUartIRQ,
+ pSettings->usUartBaseIO);
+
+ /* check for illegal values */
+ if ( pSettings->usUartBaseIO == 0 )
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART base I/O address is 0\n");
+ if ( pSettings->usUartIRQ == 0 )
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART IRQ line is 0\n");
+
+ PRINTK_2(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg exit bRC %x\n", bRC);
+
+ return bRC;
+}
+
+
+int smapi_set_DSP_cfg(void)
+{
+ int bRC = -EIO;
+ int i;
+ unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
+ unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 };
+ unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
+ unsigned short ausDspIrqs[] = { 5, 7, 10, 11, 15 };
+ unsigned short ausUartIrqs[] = { 3, 4 };
+
+ unsigned short numDspBases = 8;
+ unsigned short numUartBases = 4;
+ unsigned short numDspIrqs = 5;
+ unsigned short numUartIrqs = 2;
+ unsigned short dspio_index = 0, uartio_index = 0;
+
+ PRINTK_5(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg entry mwave_3780i_irq %x mwave_3780i_io %x mwave_uart_irq %x mwave_uart_io %x\n",
+ mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io);
+
+ if (mwave_3780i_io) {
+ for (i = 0; i < numDspBases; i++) {
+ if (mwave_3780i_io == ausDspBases[i])
+ break;
+ }
+ if (i == numDspBases) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io);
+ return bRC;
+ }
+ dspio_index = i;
+ }
+
+ if (mwave_3780i_irq) {
+ for (i = 0; i < numDspIrqs; i++) {
+ if (mwave_3780i_irq == ausDspIrqs[i])
+ break;
+ }
+ if (i == numDspIrqs) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq);
+ return bRC;
+ }
+ }
+
+ if (mwave_uart_io) {
+ for (i = 0; i < numUartBases; i++) {
+ if (mwave_uart_io == ausUartBases[i])
+ break;
+ }
+ if (i == numUartBases) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io);
+ return bRC;
+ }
+ uartio_index = i;
+ }
+
+
+ if (mwave_uart_irq) {
+ for (i = 0; i < numUartIrqs; i++) {
+ if (mwave_uart_irq == ausUartIrqs[i])
+ break;
+ }
+ if (i == numUartIrqs) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq);
+ return bRC;
+ }
+ }
+
+ if (mwave_uart_irq || mwave_uart_io) {
+
+ /* Check serial port A */
+ bRC = smapi_request(0x1402, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ /* bRC == 0 */
+ if (usBX & 0x0100) { /* serial port A is present */
+ if (usCX & 1) { /* serial port is enabled */
+ if ((usSI & 0xFF) == mwave_uart_irq) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
+#else
+ PRINTK_3(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_1(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg Disabling conflicting serial port\n");
+ bRC = smapi_request(0x1403, 0x0100, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1402, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+#else
+ goto exit_conflict;
+#endif
+ } else {
+ if ((usSI >> 8) == uartio_index) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
+#else
+ PRINTK_3(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_1(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg Disabling conflicting serial port A\n");
+ bRC = smapi_request (0x1403, 0x0100, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request (0x1402, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+#else
+ goto exit_conflict;
+#endif
+ }
+ }
+ }
+ }
+
+ /* Check serial port B */
+ bRC = smapi_request(0x1404, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ /* bRC == 0 */
+ if (usBX & 0x0100) { /* serial port B is present */
+ if (usCX & 1) { /* serial port is enabled */
+ if ((usSI & 0xFF) == mwave_uart_irq) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
+#else
+ PRINTK_3(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_1(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n");
+ bRC = smapi_request(0x1405, 0x0100, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1404, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+#else
+ goto exit_conflict;
+#endif
+ } else {
+ if ((usSI >> 8) == uartio_index) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
+#else
+ PRINTK_3(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_1 (TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n");
+ bRC = smapi_request (0x1405, 0x0100, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request (0x1404, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+#else
+ goto exit_conflict;
+#endif
+ }
+ }
+ }
+ }
+
+ /* Check IR port */
+ bRC = smapi_request(0x1700, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1704, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ /* bRC == 0 */
+ if ((usCX & 0xff) != 0xff) { /* IR port not disabled */
+ if ((usCX & 0xff) == mwave_uart_irq) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq);
+#else
+ PRINTK_3(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_1(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n");
+ bRC = smapi_request(0x1701, 0x0100, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1700, 0, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1705, 0x01ff, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1704, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+#else
+ goto exit_conflict;
+#endif
+ } else {
+ if ((usSI & 0xff) == uartio_index) {
+#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_ERROR(KERN_ERR_MWAVE
+ "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]);
+#else
+ PRINTK_3(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]);
+#endif
+#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
+ PRINTK_1(TRACE_SMAPI,
+ "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n");
+ bRC = smapi_request(0x1701, 0x0100, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1700, 0, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1705, 0x01ff, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+ bRC = smapi_request(0x1704, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+#else
+ goto exit_conflict;
+#endif
+ }
+ }
+ }
+ }
+
+ bRC = smapi_request(0x1802, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+
+ if (mwave_3780i_io) {
+ usDI = dspio_index;
+ }
+ if (mwave_3780i_irq) {
+ usSI = (usSI & 0xff00) | mwave_3780i_irq;
+ }
+
+ bRC = smapi_request(0x1803, 0x0101, usDI, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+
+ bRC = smapi_request(0x1804, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+
+ if (mwave_uart_io) {
+ usSI = (usSI & 0x00ff) | (uartio_index << 8);
+ }
+ if (mwave_uart_irq) {
+ usSI = (usSI & 0xff00) | mwave_uart_irq;
+ }
+ bRC = smapi_request(0x1805, 0x0101, 0, usSI,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+
+ bRC = smapi_request(0x1802, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+
+ bRC = smapi_request(0x1804, 0x0000, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+ if (bRC) goto exit_smapi_request_error;
+
+/* normal exit: */
+ PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg exit\n");
+ return 0;
+
+exit_conflict:
+ /* Message has already been printed */
+ return -EIO;
+
+exit_smapi_request_error:
+ PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg exit on smapi_request error bRC %x\n", bRC);
+ return bRC;
+}
+
+
+int smapi_set_DSP_power_state(BOOLEAN bOn)
+{
+ int bRC = -EIO;
+ unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
+ unsigned short usPowerFunction;
+
+ PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state entry bOn %x\n", bOn);
+
+ usPowerFunction = (bOn) ? 1 : 0;
+
+ bRC = smapi_request(0x4901, 0x0000, 0, usPowerFunction,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+
+ PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state exit bRC %x\n", bRC);
+
+ return bRC;
+}
+
+#if 0
+static int SmapiQuerySystemID(void)
+{
+ int bRC = -EIO;
+ unsigned short usAX = 0xffff, usBX = 0xffff, usCX = 0xffff,
+ usDX = 0xffff, usDI = 0xffff, usSI = 0xffff;
+
+ printk("smapi::SmapiQUerySystemID entry\n");
+ bRC = smapi_request(0x0000, 0, 0, 0,
+ &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
+
+ if (bRC == 0) {
+ printk("AX=%x, BX=%x, CX=%x, DX=%x, DI=%x, SI=%x\n",
+ usAX, usBX, usCX, usDX, usDI, usSI);
+ } else {
+ printk("smapi::SmapiQuerySystemID smapi_request error\n");
+ }
+
+ return bRC;
+}
+#endif /* 0 */
+
+int smapi_init(void)
+{
+ int retval = -EIO;
+ unsigned short usSmapiID = 0;
+ unsigned long flags;
+
+ PRINTK_1(TRACE_SMAPI, "smapi::smapi_init entry\n");
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ usSmapiID = CMOS_READ(0x7C);
+ usSmapiID |= (CMOS_READ(0x7D) << 8);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ PRINTK_2(TRACE_SMAPI, "smapi::smapi_init usSmapiID %x\n", usSmapiID);
+
+ if (usSmapiID == 0x5349) {
+ spin_lock_irqsave(&rtc_lock, flags);
+ g_usSmapiPort = CMOS_READ(0x7E);
+ g_usSmapiPort |= (CMOS_READ(0x7F) << 8);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ if (g_usSmapiPort == 0) {
+ PRINTK_ERROR("smapi::smapi_init, ERROR unable to read from SMAPI port\n");
+ } else {
+ PRINTK_2(TRACE_SMAPI,
+ "smapi::smapi_init, exit TRUE g_usSmapiPort %x\n",
+ g_usSmapiPort);
+ retval = 0;
+ //SmapiQuerySystemID();
+ }
+ } else {
+ PRINTK_ERROR("smapi::smapi_init, ERROR invalid usSmapiID\n");
+ retval = -ENXIO;
+ }
+
+ return retval;
+}
diff --git a/drivers/char/mwave/smapi.h b/drivers/char/mwave/smapi.h
new file mode 100644
index 00000000..64b2ec14
--- /dev/null
+++ b/drivers/char/mwave/smapi.h
@@ -0,0 +1,80 @@
+/*
+*
+* smapi.h -- declarations for SMAPI interface routines
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#ifndef _LINUX_SMAPI_H
+#define _LINUX_SMAPI_H
+
+#define TRUE 1
+#define FALSE 0
+#define BOOLEAN int
+
+typedef struct {
+ int bDSPPresent;
+ int bDSPEnabled;
+ int bModemEnabled;
+ int bMIDIEnabled;
+ int bSblstEnabled;
+ unsigned short usDspIRQ;
+ unsigned short usDspDMA;
+ unsigned short usDspBaseIO;
+ unsigned short usUartIRQ;
+ unsigned short usUartBaseIO;
+ unsigned short usMidiIRQ;
+ unsigned short usMidiBaseIO;
+ unsigned short usSndblstIRQ;
+ unsigned short usSndblstDMA;
+ unsigned short usSndblstBaseIO;
+} SMAPI_DSP_SETTINGS;
+
+int smapi_init(void);
+int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings);
+int smapi_set_DSP_cfg(void);
+int smapi_set_DSP_power_state(BOOLEAN bOn);
+
+
+#endif
diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
new file mode 100644
index 00000000..c6896970
--- /dev/null
+++ b/drivers/char/mwave/tp3780i.c
@@ -0,0 +1,579 @@
+/*
+*
+* tp3780i.c -- board driver for 3780i on ThinkPads
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include "smapi.h"
+#include "mwavedd.h"
+#include "tp3780i.h"
+#include "3780i.h"
+#include "mwavepub.h"
+
+static unsigned short s_ausThinkpadIrqToField[16] =
+ { 0xFFFF, 0xFFFF, 0xFFFF, 0x0001, 0x0002, 0x0003, 0xFFFF, 0x0004,
+ 0xFFFF, 0xFFFF, 0x0005, 0x0006, 0xFFFF, 0xFFFF, 0xFFFF, 0x0007 };
+static unsigned short s_ausThinkpadDmaToField[8] =
+ { 0x0001, 0x0002, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0003, 0x0004 };
+static unsigned short s_numIrqs = 16, s_numDmas = 8;
+
+
+static void EnableSRAM(THINKPAD_BD_DATA * pBDData)
+{
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ DSP_GPIO_OUTPUT_DATA_15_8 rGpioOutputData;
+ DSP_GPIO_DRIVER_ENABLE_15_8 rGpioDriverEnable;
+ DSP_GPIO_MODE_15_8 rGpioMode;
+
+ PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM, entry\n");
+
+ MKWORD(rGpioMode) = ReadMsaCfg(DSP_GpioModeControl_15_8);
+ rGpioMode.GpioMode10 = 0;
+ WriteMsaCfg(DSP_GpioModeControl_15_8, MKWORD(rGpioMode));
+
+ MKWORD(rGpioDriverEnable) = 0;
+ rGpioDriverEnable.Enable10 = TRUE;
+ rGpioDriverEnable.Mask10 = TRUE;
+ WriteMsaCfg(DSP_GpioDriverEnable_15_8, MKWORD(rGpioDriverEnable));
+
+ MKWORD(rGpioOutputData) = 0;
+ rGpioOutputData.Latch10 = 0;
+ rGpioOutputData.Mask10 = TRUE;
+ WriteMsaCfg(DSP_GpioOutputData_15_8, MKWORD(rGpioOutputData));
+
+ PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM exit\n");
+}
+
+
+static irqreturn_t UartInterrupt(int irq, void *dev_id)
+{
+ PRINTK_3(TRACE_TP3780I,
+ "tp3780i::UartInterrupt entry irq %x dev_id %p\n", irq, dev_id);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t DspInterrupt(int irq, void *dev_id)
+{
+ pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pDrvData->rBDData.rDspSettings;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ unsigned short usIPCSource = 0, usIsolationMask, usPCNum;
+
+ PRINTK_3(TRACE_TP3780I,
+ "tp3780i::DspInterrupt entry irq %x dev_id %p\n", irq, dev_id);
+
+ if (dsp3780I_GetIPCSource(usDspBaseIO, &usIPCSource) == 0) {
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::DspInterrupt, return from dsp3780i_GetIPCSource, usIPCSource %x\n",
+ usIPCSource);
+ usIsolationMask = 1;
+ for (usPCNum = 1; usPCNum <= 16; usPCNum++) {
+ if (usIPCSource & usIsolationMask) {
+ usIPCSource &= ~usIsolationMask;
+ PRINTK_3(TRACE_TP3780I,
+ "tp3780i::DspInterrupt usPCNum %x usIPCSource %x\n",
+ usPCNum, usIPCSource);
+ if (pDrvData->IPCs[usPCNum - 1].usIntCount == 0) {
+ pDrvData->IPCs[usPCNum - 1].usIntCount = 1;
+ }
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::DspInterrupt usIntCount %x\n",
+ pDrvData->IPCs[usPCNum - 1].usIntCount);
+ if (pDrvData->IPCs[usPCNum - 1].bIsEnabled == TRUE) {
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::DspInterrupt, waking up usPCNum %x\n",
+ usPCNum - 1);
+ wake_up_interruptible(&pDrvData->IPCs[usPCNum - 1].ipc_wait_queue);
+ } else {
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::DspInterrupt, no one waiting for IPC %x\n",
+ usPCNum - 1);
+ }
+ }
+ if (usIPCSource == 0)
+ break;
+ /* try next IPC */
+ usIsolationMask = usIsolationMask << 1;
+ }
+ } else {
+ PRINTK_1(TRACE_TP3780I,
+ "tp3780i::DspInterrupt, return false from dsp3780i_GetIPCSource\n");
+ }
+ PRINTK_1(TRACE_TP3780I, "tp3780i::DspInterrupt exit\n");
+ return IRQ_HANDLED;
+}
+
+
+int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData entry pBDData %p\n", pBDData);
+
+ pBDData->bDSPEnabled = FALSE;
+ pSettings->bInterruptClaimed = FALSE;
+
+ retval = smapi_init();
+ if (retval) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_InitializeBoardData: Error: SMAPI is not available on this machine\n");
+ } else {
+ if (mwave_3780i_irq || mwave_3780i_io || mwave_uart_irq || mwave_uart_io) {
+ retval = smapi_set_DSP_cfg();
+ }
+ }
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData exit retval %x\n", retval);
+
+ return retval;
+}
+
+int tp3780I_Cleanup(THINKPAD_BD_DATA * pBDData)
+{
+ int retval = 0;
+
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_Cleanup entry and exit pBDData %p\n", pBDData);
+
+ return retval;
+}
+
+int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
+{
+ SMAPI_DSP_SETTINGS rSmapiInfo;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_CalcResources entry pBDData %p\n", pBDData);
+
+ if (smapi_query_DSP_cfg(&rSmapiInfo)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Could not query DSP config. Aborting.\n");
+ return -EIO;
+ }
+
+ /* Sanity check */
+ if (
+ ( rSmapiInfo.usDspIRQ == 0 )
+ || ( rSmapiInfo.usDspBaseIO == 0 )
+ || ( rSmapiInfo.usUartIRQ == 0 )
+ || ( rSmapiInfo.usUartBaseIO == 0 )
+ ) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Illegal resource setting. Aborting.\n");
+ return -EIO;
+ }
+
+ pSettings->bDSPEnabled = (rSmapiInfo.bDSPEnabled && rSmapiInfo.bDSPPresent);
+ pSettings->bModemEnabled = rSmapiInfo.bModemEnabled;
+ pSettings->usDspIrq = rSmapiInfo.usDspIRQ;
+ pSettings->usDspDma = rSmapiInfo.usDspDMA;
+ pSettings->usDspBaseIO = rSmapiInfo.usDspBaseIO;
+ pSettings->usUartIrq = rSmapiInfo.usUartIRQ;
+ pSettings->usUartBaseIO = rSmapiInfo.usUartBaseIO;
+
+ pSettings->uDStoreSize = TP_ABILITIES_DATA_SIZE;
+ pSettings->uIStoreSize = TP_ABILITIES_INST_SIZE;
+ pSettings->uIps = TP_ABILITIES_INTS_PER_SEC;
+
+ if (pSettings->bDSPEnabled && pSettings->bModemEnabled && pSettings->usDspIrq == pSettings->usUartIrq) {
+ pBDData->bShareDspIrq = pBDData->bShareUartIrq = 1;
+ } else {
+ pBDData->bShareDspIrq = pBDData->bShareUartIrq = 0;
+ }
+
+ PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_CalcResources exit\n");
+
+ return 0;
+}
+
+
+int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct resource *pres;
+
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_ClaimResources entry pBDData %p\n", pBDData);
+
+ pres = request_region(pSettings->usDspBaseIO, 16, "mwave_3780i");
+ if ( pres == NULL ) retval = -EIO;
+
+ if (retval) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_ClaimResources: Error: Could not claim I/O region starting at %x\n", pSettings->usDspBaseIO);
+ retval = -EIO;
+ }
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ClaimResources exit retval %x\n", retval);
+
+ return retval;
+}
+
+int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_ReleaseResources entry pBDData %p\n", pBDData);
+
+ release_region(pSettings->usDspBaseIO & (~3), 16);
+
+ if (pSettings->bInterruptClaimed) {
+ free_irq(pSettings->usDspIrq, NULL);
+ pSettings->bInterruptClaimed = FALSE;
+ }
+
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_ReleaseResources exit retval %x\n", retval);
+
+ return retval;
+}
+
+
+
+int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
+{
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ BOOLEAN bDSPPoweredUp = FALSE, bInterruptAllocated = FALSE;
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP entry pBDData %p\n", pBDData);
+
+ if (pBDData->bDSPEnabled) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: DSP already enabled!\n");
+ goto exit_cleanup;
+ }
+
+ if (!pSettings->bDSPEnabled) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780::tp3780I_EnableDSP: Error: pSettings->bDSPEnabled not set\n");
+ goto exit_cleanup;
+ }
+
+ if (
+ (pSettings->usDspIrq >= s_numIrqs)
+ || (pSettings->usDspDma >= s_numDmas)
+ || (s_ausThinkpadIrqToField[pSettings->usDspIrq] == 0xFFFF)
+ || (s_ausThinkpadDmaToField[pSettings->usDspDma] == 0xFFFF)
+ ) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: invalid irq %x\n", pSettings->usDspIrq);
+ goto exit_cleanup;
+ }
+
+ if (
+ ((pSettings->usDspBaseIO & 0xF00F) != 0)
+ || (pSettings->usDspBaseIO & 0x0FF0) == 0
+ ) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid DSP base I/O address %x\n", pSettings->usDspBaseIO);
+ goto exit_cleanup;
+ }
+
+ if (pSettings->bModemEnabled) {
+ if (
+ pSettings->usUartIrq >= s_numIrqs
+ || s_ausThinkpadIrqToField[pSettings->usUartIrq] == 0xFFFF
+ ) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid UART IRQ %x\n", pSettings->usUartIrq);
+ goto exit_cleanup;
+ }
+ switch (pSettings->usUartBaseIO) {
+ case 0x03F8:
+ case 0x02F8:
+ case 0x03E8:
+ case 0x02E8:
+ break;
+
+ default:
+ PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Invalid UART base I/O address %x\n", pSettings->usUartBaseIO);
+ goto exit_cleanup;
+ }
+ }
+
+ pSettings->bDspIrqActiveLow = pSettings->bDspIrqPulse = TRUE;
+ pSettings->bUartIrqActiveLow = pSettings->bUartIrqPulse = TRUE;
+
+ if (pBDData->bShareDspIrq) {
+ pSettings->bDspIrqActiveLow = FALSE;
+ }
+ if (pBDData->bShareUartIrq) {
+ pSettings->bUartIrqActiveLow = FALSE;
+ }
+
+ pSettings->usNumTransfers = TP_CFG_NumTransfers;
+ pSettings->usReRequest = TP_CFG_RerequestTimer;
+ pSettings->bEnableMEMCS16 = TP_CFG_MEMCS16;
+ pSettings->usIsaMemCmdWidth = TP_CFG_IsaMemCmdWidth;
+ pSettings->bGateIOCHRDY = TP_CFG_GateIOCHRDY;
+ pSettings->bEnablePwrMgmt = TP_CFG_EnablePwrMgmt;
+ pSettings->usHBusTimerLoadValue = TP_CFG_HBusTimerValue;
+ pSettings->bDisableLBusTimeout = TP_CFG_DisableLBusTimeout;
+ pSettings->usN_Divisor = TP_CFG_N_Divisor;
+ pSettings->usM_Multiplier = TP_CFG_M_Multiplier;
+ pSettings->bPllBypass = TP_CFG_PllBypass;
+ pSettings->usChipletEnable = TP_CFG_ChipletEnable;
+
+ if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", NULL)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Could not get UART IRQ %x\n", pSettings->usUartIrq);
+ goto exit_cleanup;
+ } else { /* no conflict just release */
+ free_irq(pSettings->usUartIrq, NULL);
+ }
+
+ if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", NULL)) {
+ PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Could not get 3780i IRQ %x\n", pSettings->usDspIrq);
+ goto exit_cleanup;
+ } else {
+ PRINTK_3(TRACE_TP3780I,
+ "tp3780i::tp3780I_EnableDSP, got interrupt %x bShareDspIrq %x\n",
+ pSettings->usDspIrq, pBDData->bShareDspIrq);
+ bInterruptAllocated = TRUE;
+ pSettings->bInterruptClaimed = TRUE;
+ }
+
+ smapi_set_DSP_power_state(FALSE);
+ if (smapi_set_DSP_power_state(TRUE)) {
+ PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: smapi_set_DSP_power_state(TRUE) failed\n");
+ goto exit_cleanup;
+ } else {
+ bDSPPoweredUp = TRUE;
+ }
+
+ if (dsp3780I_EnableDSP(pSettings, s_ausThinkpadIrqToField, s_ausThinkpadDmaToField)) {
+ PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: dsp7880I_EnableDSP() failed\n");
+ goto exit_cleanup;
+ }
+
+ EnableSRAM(pBDData);
+
+ pBDData->bDSPEnabled = TRUE;
+
+ PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP exit\n");
+
+ return 0;
+
+exit_cleanup:
+ PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Cleaning up\n");
+ if (bDSPPoweredUp)
+ smapi_set_DSP_power_state(FALSE);
+ if (bInterruptAllocated) {
+ free_irq(pSettings->usDspIrq, NULL);
+ pSettings->bInterruptClaimed = FALSE;
+ }
+ return -EIO;
+}
+
+
+int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP entry pBDData %p\n", pBDData);
+
+ if (pBDData->bDSPEnabled) {
+ dsp3780I_DisableDSP(&pBDData->rDspSettings);
+ if (pSettings->bInterruptClaimed) {
+ free_irq(pSettings->usDspIrq, NULL);
+ pSettings->bInterruptClaimed = FALSE;
+ }
+ smapi_set_DSP_power_state(FALSE);
+ pBDData->bDSPEnabled = FALSE;
+ }
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP exit retval %x\n", retval);
+
+ return retval;
+}
+
+
+int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP entry pBDData %p\n",
+ pBDData);
+
+ if (dsp3780I_Reset(pSettings) == 0) {
+ EnableSRAM(pBDData);
+ } else {
+ retval = -EIO;
+ }
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP exit retval %x\n", retval);
+
+ return retval;
+}
+
+
+int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP entry pBDData %p\n", pBDData);
+
+ if (dsp3780I_Run(pSettings) == 0) {
+ // @BUG @TBD EnableSRAM(pBDData);
+ } else {
+ retval = -EIO;
+ }
+
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP exit retval %x\n", retval);
+
+ return retval;
+}
+
+
+int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities)
+{
+ int retval = 0;
+
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
+
+ /* fill out standard constant fields */
+ pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
+ pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
+ pAbilities->inst_size = pBDData->rDspSettings.uIStoreSize;
+ pAbilities->bus_dma_bw = pBDData->rDspSettings.uDmaBandwidth;
+
+ /* fill out dynamically determined fields */
+ pAbilities->component_list[0] = 0x00010000 | MW_ADC_MASK;
+ pAbilities->component_list[1] = 0x00010000 | MW_ACI_MASK;
+ pAbilities->component_list[2] = 0x00010000 | MW_AIC1_MASK;
+ pAbilities->component_list[3] = 0x00010000 | MW_AIC2_MASK;
+ pAbilities->component_list[4] = 0x00010000 | MW_CDDAC_MASK;
+ pAbilities->component_list[5] = 0x00010000 | MW_MIDI_MASK;
+ pAbilities->component_list[6] = 0x00010000 | MW_UART_MASK;
+ pAbilities->component_count = 7;
+
+ /* Fill out Mwave OS and BIOS task names */
+
+ memcpy(pAbilities->mwave_os_name, TP_ABILITIES_MWAVEOS_NAME,
+ sizeof(TP_ABILITIES_MWAVEOS_NAME));
+ memcpy(pAbilities->bios_task_name, TP_ABILITIES_BIOSTASK_NAME,
+ sizeof(TP_ABILITIES_BIOSTASK_NAME));
+
+ PRINTK_1(TRACE_TP3780I,
+ "tp3780i::tp3780I_QueryAbilities exit retval=SUCCESSFUL\n");
+
+ return retval;
+}
+
+int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+ void __user *pvBuffer, unsigned int uCount,
+ unsigned long ulDSPAddr)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ BOOLEAN bRC = 0;
+
+ PRINTK_6(TRACE_TP3780I,
+ "tp3780i::tp3780I_ReadWriteDspDStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr);
+
+ if (pBDData->bDSPEnabled) {
+ switch (uOpcode) {
+ case IOCTL_MW_READ_DATA:
+ bRC = dsp3780I_ReadDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+ break;
+
+ case IOCTL_MW_READCLEAR_DATA:
+ bRC = dsp3780I_ReadAndClearDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+ break;
+
+ case IOCTL_MW_WRITE_DATA:
+ bRC = dsp3780I_WriteDStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+ break;
+ }
+ }
+
+ retval = (bRC) ? -EIO : 0;
+ PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ReadWriteDspDStore exit retval %x\n", retval);
+
+ return retval;
+}
+
+
+int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+ void __user *pvBuffer, unsigned int uCount,
+ unsigned long ulDSPAddr)
+{
+ int retval = 0;
+ DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ unsigned short usDspBaseIO = pSettings->usDspBaseIO;
+ BOOLEAN bRC = 0;
+
+ PRINTK_6(TRACE_TP3780I,
+ "tp3780i::tp3780I_ReadWriteDspIStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
+ pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr);
+
+ if (pBDData->bDSPEnabled) {
+ switch (uOpcode) {
+ case IOCTL_MW_READ_INST:
+ bRC = dsp3780I_ReadIStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+ break;
+
+ case IOCTL_MW_WRITE_INST:
+ bRC = dsp3780I_WriteIStore(usDspBaseIO, pvBuffer, uCount, ulDSPAddr);
+ break;
+ }
+ }
+
+ retval = (bRC) ? -EIO : 0;
+
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_ReadWriteDspIStore exit retval %x\n", retval);
+
+ return retval;
+}
+
diff --git a/drivers/char/mwave/tp3780i.h b/drivers/char/mwave/tp3780i.h
new file mode 100644
index 00000000..07685b68
--- /dev/null
+++ b/drivers/char/mwave/tp3780i.h
@@ -0,0 +1,103 @@
+/*
+*
+* tp3780i.h -- declarations for tp3780i.c
+*
+*
+* Written By: Mike Sullivan IBM Corporation
+*
+* Copyright (C) 1999 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* NO WARRANTY
+* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+* solely responsible for determining the appropriateness of using and
+* distributing the Program and assumes all risks associated with its
+* exercise of rights under this Agreement, including but not limited to
+* the risks and costs of program errors, damage to or loss of data,
+* programs or equipment, and unavailability or interruption of operations.
+*
+* DISCLAIMER OF LIABILITY
+* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*
+* 10/23/2000 - Alpha Release
+* First release to the public
+*/
+
+#ifndef _LINUX_TP3780I_H
+#define _LINUX_TP3780I_H
+
+#include <asm/io.h>
+#include "mwavepub.h"
+
+
+/* DSP abilities constants for 3780i based Thinkpads */
+#define TP_ABILITIES_INTS_PER_SEC 39160800
+#define TP_ABILITIES_DATA_SIZE 32768
+#define TP_ABILITIES_INST_SIZE 32768
+#define TP_ABILITIES_MWAVEOS_NAME "mwaveos0700.dsp"
+#define TP_ABILITIES_BIOSTASK_NAME "mwbio701.dsp"
+
+
+/* DSP configuration values for 3780i based Thinkpads */
+#define TP_CFG_NumTransfers 3 /* 16 transfers */
+#define TP_CFG_RerequestTimer 1 /* 2 usec */
+#define TP_CFG_MEMCS16 0 /* Disabled, 16-bit memory assumed */
+#define TP_CFG_IsaMemCmdWidth 3 /* 295 nsec (16-bit) */
+#define TP_CFG_GateIOCHRDY 0 /* No IOCHRDY gating */
+#define TP_CFG_EnablePwrMgmt 1 /* Enable low poser suspend/resume */
+#define TP_CFG_HBusTimerValue 255 /* HBus timer load value */
+#define TP_CFG_DisableLBusTimeout 0 /* Enable LBus timeout */
+#define TP_CFG_N_Divisor 32 /* Clock = 39.1608 Mhz */
+#define TP_CFG_M_Multiplier 37 /* " */
+#define TP_CFG_PllBypass 0 /* don't bypass */
+#define TP_CFG_ChipletEnable 0xFFFF /* Enable all chiplets */
+
+typedef struct {
+ int bDSPEnabled;
+ int bShareDspIrq;
+ int bShareUartIrq;
+ DSP_3780I_CONFIG_SETTINGS rDspSettings;
+} THINKPAD_BD_DATA;
+
+int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData);
+int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData);
+int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData);
+int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData);
+int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData);
+int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData);
+int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData);
+int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData);
+int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities);
+int tp3780I_Cleanup(THINKPAD_BD_DATA * pBDData);
+int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+ void __user *pvBuffer, unsigned int uCount,
+ unsigned long ulDSPAddr);
+int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+ void __user *pvBuffer, unsigned int uCount,
+ unsigned long ulDSPAddr);
+
+
+#endif
diff --git a/drivers/char/mxc_iim.c b/drivers/char/mxc_iim.c
new file mode 100644
index 00000000..4d1429a9
--- /dev/null
+++ b/drivers/char/mxc_iim.c
@@ -0,0 +1,644 @@
+/*
+ * Copyright (C) 2009-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+#include "mxc_iim.h"
+
+static struct mxc_iim_platform_data *iim_data;
+
+
+#ifdef MXC_IIM_DEBUG
+static inline void dump_reg(void)
+{
+ struct iim_regs *iim_reg_base =
+ (struct iim_regs *)iim_data->virt_base;
+
+ dev_dbg(iim_data->dev, "stat: 0x%08x\n",
+ __raw_readl(&iim_reg_base->stat));
+ dev_dbg(iim_data->dev, "statm: 0x%08x\n",
+ __raw_readl(&iim_reg_base->statm));
+ dev_dbg(iim_data->dev, "err: 0x%08x\n",
+ __raw_readl(&iim_reg_base->err));
+ dev_dbg(iim_data->dev, "emask: 0x%08x\n",
+ __raw_readl(&iim_reg_base->emask));
+ dev_dbg(iim_data->dev, "fctl: 0x%08x\n",
+ __raw_readl(&iim_reg_base->fctl));
+ dev_dbg(iim_data->dev, "ua: 0x%08x\n",
+ __raw_readl(&iim_reg_base->ua));
+ dev_dbg(iim_data->dev, "la: 0x%08x\n",
+ __raw_readl(&iim_reg_base->la));
+ dev_dbg(iim_data->dev, "sdat: 0x%08x\n",
+ __raw_readl(&iim_reg_base->sdat));
+ dev_dbg(iim_data->dev, "prev: 0x%08x\n",
+ __raw_readl(&iim_reg_base->prev));
+ dev_dbg(iim_data->dev, "srev: 0x%08x\n",
+ __raw_readl(&iim_reg_base->srev));
+ dev_dbg(iim_data->dev, "preg_p: 0x%08x\n",
+ __raw_readl(&iim_reg_base->preg_p));
+ dev_dbg(iim_data->dev, "scs0: 0x%08x\n",
+ __raw_readl(&iim_reg_base->scs0));
+ dev_dbg(iim_data->dev, "scs1: 0x%08x\n",
+ __raw_readl(&iim_reg_base->scs1));
+ dev_dbg(iim_data->dev, "scs2: 0x%08x\n",
+ __raw_readl(&iim_reg_base->scs2));
+ dev_dbg(iim_data->dev, "scs3: 0x%08x\n",
+ __raw_readl(&iim_reg_base->scs3));
+}
+#endif
+
+static inline void mxc_iim_disable_irq(void)
+{
+ struct iim_regs *iim_reg_base = (struct iim_regs *)iim_data->virt_base;
+
+ dev_dbg(iim_data->dev, "=> %s\n", __func__);
+
+ __raw_writel(0x0, &(iim_reg_base->statm));
+ __raw_writel(0x0, &(iim_reg_base->emask));
+
+ dev_dbg(iim_data->dev, "<= %s\n", __func__);
+}
+
+static inline void fuse_op_start(void)
+{
+ struct iim_regs *iim_reg_base = (struct iim_regs *)iim_data->virt_base;
+
+ dev_dbg(iim_data->dev, "=> %s\n", __func__);
+
+#ifdef MXC_IIM_DEBUG
+ dump_reg();
+#endif
+
+ /* Clear the status bits and error bits */
+ __raw_writel(0x3, &(iim_reg_base->stat));
+ __raw_writel(0xfe, &(iim_reg_base->err));
+ /* Generate interrupt */
+ __raw_writel(0x3, &(iim_reg_base->statm));
+ __raw_writel(0xfe, &(iim_reg_base->emask));
+
+ dev_dbg(iim_data->dev, "<= %s\n", __func__);
+}
+
+static u32 sense_fuse(u32 bank, u32 row, u32 bit)
+{
+ u32 addr, addr_l, addr_h;
+ s32 err = 0;
+ struct iim_regs *iim_reg_base = (struct iim_regs *)iim_data->virt_base;
+
+ dev_dbg(iim_data->dev, "=> %s\n", __func__);
+
+ init_completion(&(iim_data->completion));
+
+ iim_data->action = POLL_FUSE_SNSD;
+
+ fuse_op_start();
+
+ addr = ((bank << 11) | (row << 3) | (bit & 0x7));
+ /* Set IIM Program Upper Address */
+ addr_h = (addr >> 8) & 0x000000FF;
+ /* Set IIM Program Lower Address */
+ addr_l = (addr & 0x000000FF);
+
+ dev_dbg(iim_data->dev, "%s: addr_h=0x%x, addr_l=0x%x\n",
+ __func__, addr_h, addr_l);
+ __raw_writel(addr_h, &(iim_reg_base->ua));
+ __raw_writel(addr_l, &(iim_reg_base->la));
+
+ /* Start sensing */
+#ifdef MXC_IIM_DEBUG
+ dump_reg();
+#endif
+ __raw_writel(0x8, &(iim_reg_base->fctl));
+
+ err = wait_for_completion_timeout(&(iim_data->completion),
+ msecs_to_jiffies(1000));
+ err = (!err) ? -ETIMEDOUT : 0;
+ if (err)
+ dev_dbg(iim_data->dev, "Sense timeout!");
+
+ dev_dbg(iim_data->dev, "<= %s\n", __func__);
+
+ return __raw_readl(&(iim_reg_base->sdat));
+}
+
+/* Blow fuses based on the bank, row and bit positions (all 0-based)
+*/
+static s32 fuse_blow_bit(u32 bank, u32 row, u32 bit)
+{
+ int addr, addr_l, addr_h, err;
+ struct iim_regs *iim_reg_base = (struct iim_regs *)iim_data->virt_base;
+
+ dev_dbg(iim_data->dev, "=> %s\n", __func__);
+
+ init_completion(&iim_data->completion);
+
+ iim_data->action = POLL_FUSE_PRGD;
+
+ fuse_op_start();
+
+ /* Disable IIM Program Protect */
+ __raw_writel(0xaa, &(iim_reg_base->preg_p));
+
+ addr = ((bank << 11) | (row << 3) | (bit & 0x7));
+ /* Set IIM Program Upper Address */
+ addr_h = (addr >> 8) & 0x000000FF;
+ /* Set IIM Program Lower Address */
+ addr_l = (addr & 0x000000FF);
+
+ dev_dbg(iim_data->dev, "blowing addr_h=0x%x, addr_l=0x%x\n",
+ addr_h, addr_l);
+
+ __raw_writel(addr_h, &(iim_reg_base->ua));
+ __raw_writel(addr_l, &(iim_reg_base->la));
+
+ /* Start Programming */
+#ifdef MXC_IIM_DEBUG
+ dump_reg();
+#endif
+ __raw_writel(0x31, &(iim_reg_base->fctl));
+ err = wait_for_completion_timeout(&(iim_data->completion),
+ msecs_to_jiffies(1000));
+ err = (!err) ? -ETIMEDOUT : 0;
+ if (err)
+ dev_dbg(iim_data->dev, "Fuse timeout!\n");
+
+ /* Enable IIM Program Protect */
+ __raw_writel(0x0, &(iim_reg_base->preg_p));
+
+ dev_dbg(iim_data->dev, "<= %s\n", __func__);
+
+ return err;
+}
+
+static void fuse_blow_row(u32 bank, u32 row, u32 value)
+{
+ u32 i;
+
+ dev_dbg(iim_data->dev, "=> %s\n", __func__);
+
+ /* Enable fuse blown */
+ if (iim_data->enable_fuse)
+ iim_data->enable_fuse();
+
+ for (i = 0; i < 8; i++) {
+ if (((value >> i) & 0x1) == 0)
+ continue;
+ if (fuse_blow_bit(bank, row, i) != 0) {
+ dev_dbg(iim_data->dev,
+ "fuse_blow_bit(bank: %d, row: %d, "
+ "bit: %d failed\n",
+ bank, row, i);
+ }
+ }
+
+ if (iim_data->disable_fuse)
+ iim_data->disable_fuse();
+
+ dev_dbg(iim_data->dev, "<= %s\n", __func__);
+}
+
+static irqreturn_t mxc_iim_irq(int irq, void *dev_id)
+{
+ u32 status, error, rtn = 0;
+ ulong flags;
+ struct iim_regs *iim_reg_base = (struct iim_regs *)iim_data->virt_base;
+
+ dev_dbg(iim_data->dev, "=> %s\n", __func__);
+
+ spin_lock_irqsave(&iim_data->lock, flags);
+
+ mxc_iim_disable_irq();
+#ifdef MXC_IIM_DEBUG
+ dump_reg();
+#endif
+ if (iim_data->action != POLL_FUSE_PRGD &&
+ iim_data->action != POLL_FUSE_SNSD) {
+ dev_dbg(iim_data->dev, "%s(%d) invalid operation\n",
+ __func__, iim_data->action);
+ rtn = 1;
+ goto out;
+ }
+
+ /* Test for successful write */
+ status = readl(&(iim_reg_base->stat));
+ error = readl(&(iim_reg_base->err));
+
+ if ((status & iim_data->action) != 0 && \
+ (error & (iim_data->action >> IIM_ERR_SHIFT)) == 0) {
+ if (error) {
+ printk(KERN_NOTICE "Even though the operation"
+ "seems successful...\n");
+ printk(KERN_NOTICE "There are some error(s) "
+ "at addr=0x%x: 0x%x\n",
+ (u32)&(iim_reg_base->err), error);
+ }
+ __raw_writel(0x3, &(iim_reg_base->stat));
+ rtn = 0;
+ goto out;
+ }
+ printk(KERN_NOTICE "%s(%d) failed\n", __func__, iim_data->action);
+ printk(KERN_NOTICE "status address=0x%x, value=0x%x\n",
+ (u32)&(iim_reg_base->stat), status);
+ printk(KERN_NOTICE "There are some error(s) at addr=0x%x: 0x%x\n",
+ (u32)&(iim_reg_base->err), error);
+#ifdef MXC_IIM_DEBUG
+ dump_reg();
+#endif
+
+out:
+ complete(&(iim_data->completion));
+ spin_unlock_irqrestore(&iim_data->lock, flags);
+
+ dev_dbg(iim_data->dev, "<= %s\n", __func__);
+
+ return IRQ_RETVAL(rtn);
+}
+
+static loff_t mxc_iim_llseek(struct file *filp, loff_t off, int whence)
+{
+ loff_t newpos;
+
+ dev_dbg(iim_data->dev, "=> %s\n", __func__);
+ dev_dbg(iim_data->dev, "off: %lld, whence: %d\n", off, whence);
+
+ if (off < iim_data->bank_start ||
+ off > iim_data->bank_end) {
+ dev_dbg(iim_data->dev, "invalid seek offset: %lld\n", off);
+ goto invald_arg_out;
+ }
+
+ switch (whence) {
+ case 0: /* SEEK_SET */
+ newpos = off;
+ break;
+ case 1: /* SEEK_CUR */
+ newpos = filp->f_pos + off;
+ break;
+ case 2: /* SEEK_END */
+ newpos = iim_data->bank_end + off;
+ break;
+ default: /* can't happen */
+ return -EINVAL;
+
+ }
+
+ if (newpos < 0)
+ return -EINVAL;
+ filp->f_pos = newpos;
+
+ dev_dbg(iim_data->dev, "newpos: %lld\n", newpos);
+
+ dev_dbg(iim_data->dev, "<= %s\n", __func__);
+
+ return newpos;
+invald_arg_out:
+ return -EINVAL;
+}
+
+static ssize_t mxc_iim_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ u32 bank, row, fuse_val;
+ ssize_t retval = 0;
+
+ dev_dbg(iim_data->dev, "=> %s\n", __func__);
+ dev_dbg(iim_data->dev, "count: %d f_pos: %lld\n", count, *f_pos);
+
+ if (1 != count)
+ goto invald_arg_out;
+ if (*f_pos & 0x3)
+ goto invald_arg_out;
+ if (*f_pos < iim_data->bank_start ||
+ *f_pos > iim_data->bank_end) {
+ dev_dbg(iim_data->dev, "bank_start: 0x%08x, bank_end: 0x%08x\n",
+ iim_data->bank_start, iim_data->bank_end);
+ goto out;
+ }
+
+ bank = (*f_pos - iim_data->bank_start) >> 10;
+ row = ((*f_pos - iim_data->bank_start) & 0x3ff) >> 2;
+
+ dev_dbg(iim_data->dev, "Read fuse at bank:%d row:%d\n",
+ bank, row);
+ mutex_lock(&iim_data->mutex);
+ fuse_val = sense_fuse(bank, row, 0);
+ mutex_unlock(&iim_data->mutex);
+ dev_info(iim_data->dev, "fuses at (bank:%d, row:%d) = 0x%x\n",
+ bank, row, fuse_val);
+ if (copy_to_user(buf, &fuse_val, count)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+out:
+ retval = count;
+ dev_dbg(iim_data->dev, "<= %s\n", __func__);
+ return retval;
+invald_arg_out:
+ retval = -EINVAL;
+ return retval;
+}
+
+static ssize_t mxc_iim_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ u32 bank;
+ ulong fuse_val;
+ u8 row;
+ u8 *tmp_buf = NULL;
+ loff_t file_pos = *f_pos;
+ ssize_t retval = 0;
+
+ dev_dbg(iim_data->dev, "=> %s\n", __func__);
+ dev_dbg(iim_data->dev, "count: %d f_pos: %lld\n", count, file_pos);
+
+ tmp_buf = kmalloc(count + 1, GFP_KERNEL);
+ if (unlikely(!tmp_buf)) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ memset(tmp_buf, 0, count + 1);
+ if (copy_from_user(tmp_buf, buf, count)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (count > 1 && 0 == file_pos) {
+ /* Check if file_pos and fuse val are in tmp_buf */
+ if (sscanf(tmp_buf, "%x %x",
+ (s32 *)&file_pos, (s32 *)&fuse_val) < 0) {
+ dev_info(iim_data->dev,
+ "Invalid input string: %s\n", tmp_buf);
+ retval = -EINVAL;
+ goto out;
+ }
+ dev_dbg(iim_data->dev,
+ "file_pos: 0x%08x, fuse_val: 0x%08x\n",
+ (s32)file_pos, (s32)fuse_val);
+ } else if (1 == count)
+ fuse_val = tmp_buf[0];
+ else {
+ dev_info(iim_data->dev,
+ "Invalid input: %s, count: %d, file pos: %d\n",
+ tmp_buf, count, (s32)file_pos);
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (fuse_val > 0xff) {
+ dev_info(iim_data->dev,
+ "Invalid blow value: 0x%08x\n", (s32)fuse_val);
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (file_pos & 0x3) {
+ dev_info(iim_data->dev, "file pos is not 4-byte aligned!\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (file_pos < iim_data->bank_start ||
+ file_pos > iim_data->bank_end) {
+ dev_dbg(iim_data->dev,
+ "bank_start: 0x%08x, bank_end: 0x%08x\n",
+ iim_data->bank_start, iim_data->bank_end);
+ dev_info(iim_data->dev,
+ "file pos out of range: 0x%08x\n", (u32)file_pos);
+ retval = -EINVAL;
+ goto out;
+ }
+
+ bank = (file_pos - iim_data->bank_start) >> 10;
+ row = ((file_pos - iim_data->bank_start) & 0x3ff) >> 2;
+
+ dev_info(iim_data->dev, "Blowing fuse at bank:%d row:%d value:%d\n",
+ bank, row, (int)fuse_val);
+ mutex_lock(&iim_data->mutex);
+ fuse_blow_row(bank, row, fuse_val);
+ fuse_val = sense_fuse(bank, row, 0);
+ mutex_unlock(&iim_data->mutex);
+ dev_info(iim_data->dev, "fuses at (bank:%d, row:%d) = 0x%x\n",
+ bank, row, (unsigned int)fuse_val);
+
+ retval = count;
+out:
+ if (tmp_buf)
+ kfree(tmp_buf);
+ dev_dbg(iim_data->dev, "<= %s\n", __func__);
+ return retval;
+}
+
+/*!
+ * MXC IIM interface - memory map function
+ * This function maps IIM registers from IIM base address.
+ *
+ * @param file struct file *
+ * @param vma structure vm_area_struct *
+ *
+ * @return Return 0 on success or negative error code on error
+ */
+static int mxc_iim_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ dev_dbg(iim_data->dev, "=> %s\n", __func__);
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ iim_data->reg_base >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ dev_dbg(iim_data->dev, "<= %s\n", __func__);
+
+ return 0;
+}
+
+/*!
+ * MXC IIM interface - open function
+ *
+ * @param inode struct inode *
+ * @param filp struct file *
+ *
+ * @return Return 0 on success or negative error code on error
+ */
+static int mxc_iim_open(struct inode *inode, struct file *filp)
+{
+ dev_dbg(iim_data->dev, "=> %s\n", __func__);
+
+ dev_dbg(iim_data->dev, "iim_data addr: 0x%08x\n", (u32)iim_data);
+
+ iim_data->clk = clk_get(NULL, "iim_clk");
+ if (IS_ERR(iim_data->clk)) {
+ dev_err(iim_data->dev, "No IIM clock defined\n");
+ return -ENODEV;
+ }
+ clk_enable(iim_data->clk);
+
+ mxc_iim_disable_irq();
+
+ dev_dbg(iim_data->dev, "<= %s\n", __func__);
+
+ return 0;
+}
+
+/*!
+ * MXC IIM interface - release function
+ *
+ * @param inode struct inode *
+ * @param filp struct file *
+ *
+ * @return Return 0 on success or negative error code on error
+ */
+static int mxc_iim_release(struct inode *inode, struct file *filp)
+{
+ clk_disable(iim_data->clk);
+ clk_put(iim_data->clk);
+ return 0;
+}
+
+static const struct file_operations mxc_iim_fops = {
+ .read = mxc_iim_read,
+ .write = mxc_iim_write,
+ .llseek = mxc_iim_llseek,
+ .mmap = mxc_iim_mmap,
+ .open = mxc_iim_open,
+ .release = mxc_iim_release,
+};
+
+static struct miscdevice mxc_iim_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mxc_iim",
+ .fops = &mxc_iim_fops,
+};
+
+/*!
+ * This function is called by the driver framework to get iim base/end address
+ * and register iim misc device.
+ *
+ * @param dev The device structure for IIM passed in by the driver
+ * framework.
+ *
+ * @return Returns 0 on success or negative error code on error
+ */
+static __devinit int mxc_iim_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ iim_data = pdev->dev.platform_data;
+ iim_data->dev = &pdev->dev;
+ iim_data->name = mxc_iim_miscdev.name;
+
+ dev_dbg(iim_data->dev, "=> %s\n", __func__);
+
+ dev_dbg(iim_data->dev, "iim_data addr: 0x%08x "
+ "bank_start: 0x%04x bank_end: 0x%04x "
+ "enable_fuse: 0x%08x disable_fuse: 0x%08x\n",
+ (u32)iim_data,
+ iim_data->bank_start, iim_data->bank_end,
+ (u32)iim_data->enable_fuse,
+ (u32)iim_data->disable_fuse);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (IS_ERR(res)) {
+ dev_err(iim_data->dev, "Unable to get IIM resource\n");
+ return -ENODEV;
+ }
+
+ iim_data->irq = platform_get_irq(pdev, 0);
+ dev_dbg(iim_data->dev, "irq number: %d\n", iim_data->irq);
+ if (!iim_data->irq) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ ret = request_irq(iim_data->irq, mxc_iim_irq, IRQF_DISABLED,
+ iim_data->name, iim_data);
+ if (ret)
+ return ret;
+
+ iim_data->reg_base = res->start;
+ iim_data->reg_end = res->end;
+ iim_data->reg_size =
+ iim_data->reg_end - iim_data->reg_base + 1;
+ iim_data->virt_base =
+ (u32)ioremap(iim_data->reg_base, iim_data->reg_size);
+
+ mutex_init(&(iim_data->mutex));
+ spin_lock_init(&(iim_data->lock));
+
+ ret = misc_register(&mxc_iim_miscdev);
+ if (ret)
+ return ret;
+
+ dev_dbg(iim_data->dev, "<= %s\n", __func__);
+
+ return 0;
+}
+
+static int __devexit mxc_iim_remove(struct platform_device *pdev)
+{
+ free_irq(iim_data->irq, iim_data);
+ iounmap((void *)iim_data->virt_base);
+ misc_deregister(&mxc_iim_miscdev);
+ return 0;
+}
+
+static struct platform_driver mxc_iim_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mxc_iim",
+ },
+ .probe = mxc_iim_probe,
+ .remove = mxc_iim_remove,
+};
+
+static int __init mxc_iim_dev_init(void)
+{
+ return platform_driver_register(&mxc_iim_driver);
+}
+
+static void __exit mxc_iim_dev_cleanup(void)
+{
+ platform_driver_unregister(&mxc_iim_driver);
+}
+
+module_init(mxc_iim_dev_init);
+module_exit(mxc_iim_dev_cleanup);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MXC IIM driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
diff --git a/drivers/char/mxc_iim.h b/drivers/char/mxc_iim.h
new file mode 100644
index 00000000..0a081456
--- /dev/null
+++ b/drivers/char/mxc_iim.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __IMX_IIM_H__
+#define __IMX_IIM_H__
+
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+
+/* IIM Control Registers */
+struct iim_regs {
+ u32 stat;
+ u32 statm;
+ u32 err;
+ u32 emask;
+ u32 fctl;
+ u32 ua;
+ u32 la;
+ u32 sdat;
+ u32 prev;
+ u32 srev;
+ u32 preg_p;
+ u32 scs0;
+ u32 scs1;
+ u32 scs2;
+ u32 scs3;
+};
+
+#define IIM_STAT_BUSY (1 << 7)
+#define IIM_STAT_PRGD (1 << 1)
+#define IIM_STAT_SNSD (1 << 0)
+
+#define IIM_ERR_PRGE (1 << 7)
+#define IIM_ERR_WPE (1 << 6)
+#define IIM_ERR_OPE (1 << 5)
+#define IIM_ERR_RPE (1 << 4)
+#define IIM_ERR_WLRE (1 << 3)
+#define IIM_ERR_SNSE (1 << 2)
+#define IIM_ERR_PARITYE (1 << 1)
+
+#define IIM_PROD_REV_SH 3
+#define IIM_PROD_REV_LEN 5
+#define IIM_SREV_REV_SH 4
+#define IIM_SREV_REV_LEN 4
+#define PROD_SIGNATURE_MX51 0x1
+
+#define IIM_ERR_SHIFT 8
+#define POLL_FUSE_PRGD (IIM_STAT_PRGD | (IIM_ERR_PRGE << IIM_ERR_SHIFT))
+#define POLL_FUSE_SNSD (IIM_STAT_SNSD | (IIM_ERR_SNSE << IIM_ERR_SHIFT))
+
+#endif
diff --git a/drivers/char/mxs_viim.c b/drivers/char/mxs_viim.c
new file mode 100644
index 00000000..6a20fa9a
--- /dev/null
+++ b/drivers/char/mxs_viim.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2009-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+
+static unsigned long iim_reg_base0, iim_reg_end0, iim_reg_size0;
+static unsigned long iim_reg_base1, iim_reg_end1, iim_reg_size1;
+static struct device *iim_dev;
+
+/*!
+ * MXS Virtual IIM interface - memory map function
+ * This function maps one page size VIIM registers from VIIM base address0
+ * if the size of the required virtual memory space is less than or equal to
+ * one page size, otherwise this function will also map one page size VIIM
+ * registers from VIIM base address1.
+ *
+ * @param file struct file *
+ * @param vma structure vm_area_struct *
+ *
+ * @return Return 0 on success or negative error code on error
+ */
+static int mxs_viim_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ size_t size = vma->vm_end - vma->vm_start;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ iim_reg_base0 >> PAGE_SHIFT,
+ iim_reg_size0,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ if (size > iim_reg_size0) {
+ if (remap_pfn_range(vma,
+ vma->vm_start + iim_reg_size0,
+ iim_reg_base1 >> PAGE_SHIFT,
+ iim_reg_size1,
+ vma->vm_page_prot))
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*!
+ * MXS Virtual IIM interface - open function
+ *
+ * @param inode struct inode *
+ * @param filp struct file *
+ *
+ * @return Return 0 on success or negative error code on error
+ */
+static int mxs_viim_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/*!
+ * MXS Virtual IIM interface - release function
+ *
+ * @param inode struct inode *
+ * @param filp struct file *
+ *
+ * @return Return 0 on success or negative error code on error
+ */
+static int mxs_viim_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static const struct file_operations mxs_viim_fops = {
+ .mmap = mxs_viim_mmap,
+ .open = mxs_viim_open,
+ .release = mxs_viim_release,
+};
+
+static struct miscdevice mxs_viim_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mxs_viim",
+ .fops = &mxs_viim_fops,
+};
+
+/*!
+ * This function is called by the driver framework to get virtual iim base/end
+ * address and register iim misc device.
+ *
+ * @param dev The device structure for Virtual IIM passed in by the
+ * driver framework.
+ *
+ * @return Returns 0 on success or negative error code on error
+ */
+static int mxs_viim_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ iim_dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (IS_ERR(res)) {
+ dev_err(iim_dev, "Unable to get Virtual IIM resource 0\n");
+ return -ENODEV;
+ }
+
+ iim_reg_base0 = res->start;
+ iim_reg_end0 = res->end;
+ iim_reg_size0 = iim_reg_end0 - iim_reg_base0 + 1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (IS_ERR(res)) {
+ dev_err(iim_dev, "Unable to get Virtual IIM resource 1\n");
+ return -ENODEV;
+ }
+
+ iim_reg_base1 = res->start;
+ iim_reg_end1 = res->end;
+ iim_reg_size1 = iim_reg_end1 - iim_reg_base1 + 1;
+
+ ret = misc_register(&mxs_viim_miscdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mxs_viim_remove(struct platform_device *pdev)
+{
+ misc_deregister(&mxs_viim_miscdev);
+ return 0;
+}
+
+static struct platform_driver mxs_viim_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mxs_viim",
+ },
+ .probe = mxs_viim_probe,
+ .remove = mxs_viim_remove,
+};
+
+static int __init mxs_viim_dev_init(void)
+{
+ return platform_driver_register(&mxs_viim_driver);
+}
+
+static void __exit mxs_viim_dev_cleanup(void)
+{
+ platform_driver_unregister(&mxs_viim_driver);
+}
+
+module_init(mxs_viim_dev_init);
+module_exit(mxs_viim_dev_cleanup);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("IMX Virtual IIM driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
diff --git a/drivers/char/nsc_gpio.c b/drivers/char/nsc_gpio.c
new file mode 100644
index 00000000..808d44e9
--- /dev/null
+++ b/drivers/char/nsc_gpio.c
@@ -0,0 +1,139 @@
+/* linux/drivers/char/nsc_gpio.c
+
+ National Semiconductor common GPIO device-file/VFS methods.
+ Allows a user space process to control the GPIO pins.
+
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
+ Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com>
+*/
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/nsc_gpio.h>
+#include <linux/platform_device.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#define NAME "nsc_gpio"
+
+void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index)
+{
+ /* retrieve current config w/o changing it */
+ u32 config = amp->gpio_config(index, ~0, 0);
+
+ /* user requested via 'v' command, so its INFO */
+ dev_info(amp->dev, "io%02u: 0x%04x %s %s %s %s %s %s %s\tio:%d/%d\n",
+ index, config,
+ (config & 1) ? "OE" : "TS", /* output-enabled/tristate */
+ (config & 2) ? "PP" : "OD", /* push pull / open drain */
+ (config & 4) ? "PUE" : "PUD", /* pull up enabled/disabled */
+ (config & 8) ? "LOCKED" : "", /* locked / unlocked */
+ (config & 16) ? "LEVEL" : "EDGE",/* level/edge input */
+ (config & 32) ? "HI" : "LO", /* trigger on rise/fall edge */
+ (config & 64) ? "DEBOUNCE" : "", /* debounce */
+
+ amp->gpio_get(index), amp->gpio_current(index));
+}
+
+ssize_t nsc_gpio_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ unsigned m = iminor(file->f_path.dentry->d_inode);
+ struct nsc_gpio_ops *amp = file->private_data;
+ struct device *dev = amp->dev;
+ size_t i;
+ int err = 0;
+
+ for (i = 0; i < len; ++i) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ switch (c) {
+ case '0':
+ amp->gpio_set(m, 0);
+ break;
+ case '1':
+ amp->gpio_set(m, 1);
+ break;
+ case 'O':
+ dev_dbg(dev, "GPIO%d output enabled\n", m);
+ amp->gpio_config(m, ~1, 1);
+ break;
+ case 'o':
+ dev_dbg(dev, "GPIO%d output disabled\n", m);
+ amp->gpio_config(m, ~1, 0);
+ break;
+ case 'T':
+ dev_dbg(dev, "GPIO%d output is push pull\n", m);
+ amp->gpio_config(m, ~2, 2);
+ break;
+ case 't':
+ dev_dbg(dev, "GPIO%d output is open drain\n", m);
+ amp->gpio_config(m, ~2, 0);
+ break;
+ case 'P':
+ dev_dbg(dev, "GPIO%d pull up enabled\n", m);
+ amp->gpio_config(m, ~4, 4);
+ break;
+ case 'p':
+ dev_dbg(dev, "GPIO%d pull up disabled\n", m);
+ amp->gpio_config(m, ~4, 0);
+ break;
+ case 'v':
+ /* View Current pin settings */
+ amp->gpio_dump(amp, m);
+ break;
+ case '\n':
+ /* end of settings string, do nothing */
+ break;
+ default:
+ dev_err(dev, "io%2d bad setting: chr<0x%2x>\n",
+ m, (int)c);
+ err++;
+ }
+ }
+ if (err)
+ return -EINVAL; /* full string handled, report error */
+
+ return len;
+}
+
+ssize_t nsc_gpio_read(struct file *file, char __user * buf,
+ size_t len, loff_t * ppos)
+{
+ unsigned m = iminor(file->f_path.dentry->d_inode);
+ int value;
+ struct nsc_gpio_ops *amp = file->private_data;
+
+ value = amp->gpio_get(m);
+ if (put_user(value ? '1' : '0', buf))
+ return -EFAULT;
+
+ return 1;
+}
+
+/* common file-ops routines for both scx200_gpio and pc87360_gpio */
+EXPORT_SYMBOL(nsc_gpio_write);
+EXPORT_SYMBOL(nsc_gpio_read);
+EXPORT_SYMBOL(nsc_gpio_dump);
+
+static int __init nsc_gpio_init(void)
+{
+ printk(KERN_DEBUG NAME " initializing\n");
+ return 0;
+}
+
+static void __exit nsc_gpio_cleanup(void)
+{
+ printk(KERN_DEBUG NAME " cleanup\n");
+}
+
+module_init(nsc_gpio_init);
+module_exit(nsc_gpio_cleanup);
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_DESCRIPTION("NatSemi GPIO Common Methods");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
new file mode 100644
index 00000000..166f1e7a
--- /dev/null
+++ b/drivers/char/nvram.c
@@ -0,0 +1,723 @@
+/*
+ * CMOS/NV-RAM driver for Linux
+ *
+ * Copyright (C) 1997 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
+ * idea by and with help from Richard Jelinek <rj@suse.de>
+ * Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
+ *
+ * This driver allows you to access the contents of the non-volatile memory in
+ * the mc146818rtc.h real-time clock. This chip is built into all PCs and into
+ * many Atari machines. In the former it's called "CMOS-RAM", in the latter
+ * "NVRAM" (NV stands for non-volatile).
+ *
+ * The data are supplied as a (seekable) character device, /dev/nvram. The
+ * size of this file is dependent on the controller. The usual size is 114,
+ * the number of freely available bytes in the memory (i.e., not used by the
+ * RTC itself).
+ *
+ * Checksums over the NVRAM contents are managed by this driver. In case of a
+ * bad checksum, reads and writes return -EIO. The checksum can be initialized
+ * to a sane state either by ioctl(NVRAM_INIT) (clear whole NVRAM) or
+ * ioctl(NVRAM_SETCKS) (doesn't change contents, just makes checksum valid
+ * again; use with care!)
+ *
+ * This file also provides some functions for other parts of the kernel that
+ * want to access the NVRAM: nvram_{read,write,check_checksum,set_checksum}.
+ * Obviously this can be used only if this driver is always configured into
+ * the kernel and is not a module. Since the functions are used by some Atari
+ * drivers, this is the case on the Atari.
+ *
+ *
+ * 1.1 Cesar Barros: SMP locking fixes
+ * added changelog
+ * 1.2 Erik Gilling: Cobalt Networks support
+ * Tim Hockin: general cleanup, Cobalt support
+ * 1.3 Wim Van Sebroeck: convert PRINT_PROC to seq_file
+ */
+
+#define NVRAM_VERSION "1.3"
+
+#include <linux/module.h>
+#include <linux/nvram.h>
+
+#define PC 1
+#define ATARI 2
+
+/* select machine configuration */
+#if defined(CONFIG_ATARI)
+# define MACH ATARI
+#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) /* and ?? */
+# define MACH PC
+#else
+# error Cannot build nvram driver for this machine configuration.
+#endif
+
+#if MACH == PC
+
+/* RTC in a PC */
+#define CHECK_DRIVER_INIT() 1
+
+/* On PCs, the checksum is built only over bytes 2..31 */
+#define PC_CKS_RANGE_START 2
+#define PC_CKS_RANGE_END 31
+#define PC_CKS_LOC 32
+#define NVRAM_BYTES (128-NVRAM_FIRST_BYTE)
+
+#define mach_check_checksum pc_check_checksum
+#define mach_set_checksum pc_set_checksum
+#define mach_proc_infos pc_proc_infos
+
+#endif
+
+#if MACH == ATARI
+
+/* Special parameters for RTC in Atari machines */
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#define RTC_PORT(x) (TT_RTC_BAS + 2*(x))
+#define CHECK_DRIVER_INIT() (MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK))
+
+#define NVRAM_BYTES 50
+
+/* On Ataris, the checksum is over all bytes except the checksum bytes
+ * themselves; these are at the very end */
+#define ATARI_CKS_RANGE_START 0
+#define ATARI_CKS_RANGE_END 47
+#define ATARI_CKS_LOC 48
+
+#define mach_check_checksum atari_check_checksum
+#define mach_set_checksum atari_set_checksum
+#define mach_proc_infos atari_proc_infos
+
+#endif
+
+/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
+ * rtc_lock held. Due to the index-port/data-port design of the RTC, we
+ * don't want two different things trying to get to it at once. (e.g. the
+ * periodic 11 min sync from time.c vs. this driver.)
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/mc146818rtc.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+
+#include <asm/system.h>
+
+static DEFINE_MUTEX(nvram_mutex);
+static DEFINE_SPINLOCK(nvram_state_lock);
+static int nvram_open_cnt; /* #times opened */
+static int nvram_open_mode; /* special open modes */
+#define NVRAM_WRITE 1 /* opened for writing (exclusive) */
+#define NVRAM_EXCL 2 /* opened with O_EXCL */
+
+static int mach_check_checksum(void);
+static void mach_set_checksum(void);
+
+#ifdef CONFIG_PROC_FS
+static void mach_proc_infos(unsigned char *contents, struct seq_file *seq,
+ void *offset);
+#endif
+
+/*
+ * These functions are provided to be called internally or by other parts of
+ * the kernel. It's up to the caller to ensure correct checksum before reading
+ * or after writing (needs to be done only once).
+ *
+ * It is worth noting that these functions all access bytes of general
+ * purpose memory in the NVRAM - that is to say, they all add the
+ * NVRAM_FIRST_BYTE offset. Pass them offsets into NVRAM as if you did not
+ * know about the RTC cruft.
+ */
+
+unsigned char __nvram_read_byte(int i)
+{
+ return CMOS_READ(NVRAM_FIRST_BYTE + i);
+}
+EXPORT_SYMBOL(__nvram_read_byte);
+
+unsigned char nvram_read_byte(int i)
+{
+ unsigned long flags;
+ unsigned char c;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ c = __nvram_read_byte(i);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return c;
+}
+EXPORT_SYMBOL(nvram_read_byte);
+
+/* This races nicely with trying to read with checksum checking (nvram_read) */
+void __nvram_write_byte(unsigned char c, int i)
+{
+ CMOS_WRITE(c, NVRAM_FIRST_BYTE + i);
+}
+EXPORT_SYMBOL(__nvram_write_byte);
+
+void nvram_write_byte(unsigned char c, int i)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ __nvram_write_byte(c, i);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+}
+EXPORT_SYMBOL(nvram_write_byte);
+
+int __nvram_check_checksum(void)
+{
+ return mach_check_checksum();
+}
+EXPORT_SYMBOL(__nvram_check_checksum);
+
+int nvram_check_checksum(void)
+{
+ unsigned long flags;
+ int rv;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ rv = __nvram_check_checksum();
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return rv;
+}
+EXPORT_SYMBOL(nvram_check_checksum);
+
+static void __nvram_set_checksum(void)
+{
+ mach_set_checksum();
+}
+
+#if 0
+void nvram_set_checksum(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ __nvram_set_checksum();
+ spin_unlock_irqrestore(&rtc_lock, flags);
+}
+#endif /* 0 */
+
+/*
+ * The are the file operation function for user access to /dev/nvram
+ */
+
+static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
+{
+ switch (origin) {
+ case 0:
+ /* nothing to do */
+ break;
+ case 1:
+ offset += file->f_pos;
+ break;
+ case 2:
+ offset += NVRAM_BYTES;
+ break;
+ }
+
+ return (offset >= 0) ? (file->f_pos = offset) : -EINVAL;
+}
+
+static ssize_t nvram_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned char contents[NVRAM_BYTES];
+ unsigned i = *ppos;
+ unsigned char *tmp;
+
+ spin_lock_irq(&rtc_lock);
+
+ if (!__nvram_check_checksum())
+ goto checksum_err;
+
+ for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp)
+ *tmp = __nvram_read_byte(i);
+
+ spin_unlock_irq(&rtc_lock);
+
+ if (copy_to_user(buf, contents, tmp - contents))
+ return -EFAULT;
+
+ *ppos = i;
+
+ return tmp - contents;
+
+checksum_err:
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+}
+
+static ssize_t nvram_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned char contents[NVRAM_BYTES];
+ unsigned i = *ppos;
+ unsigned char *tmp;
+
+ if (i >= NVRAM_BYTES)
+ return 0; /* Past EOF */
+
+ if (count > NVRAM_BYTES - i)
+ count = NVRAM_BYTES - i;
+ if (count > NVRAM_BYTES)
+ return -EFAULT; /* Can't happen, but prove it to gcc */
+
+ if (copy_from_user(contents, buf, count))
+ return -EFAULT;
+
+ spin_lock_irq(&rtc_lock);
+
+ if (!__nvram_check_checksum())
+ goto checksum_err;
+
+ for (tmp = contents; count--; ++i, ++tmp)
+ __nvram_write_byte(*tmp, i);
+
+ __nvram_set_checksum();
+
+ spin_unlock_irq(&rtc_lock);
+
+ *ppos = i;
+
+ return tmp - contents;
+
+checksum_err:
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+}
+
+static long nvram_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int i;
+
+ switch (cmd) {
+
+ case NVRAM_INIT:
+ /* initialize NVRAM contents and checksum */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ mutex_lock(&nvram_mutex);
+ spin_lock_irq(&rtc_lock);
+
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ __nvram_write_byte(0, i);
+ __nvram_set_checksum();
+
+ spin_unlock_irq(&rtc_lock);
+ mutex_unlock(&nvram_mutex);
+ return 0;
+
+ case NVRAM_SETCKS:
+ /* just set checksum, contents unchanged (maybe useful after
+ * checksum garbaged somehow...) */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ mutex_lock(&nvram_mutex);
+ spin_lock_irq(&rtc_lock);
+ __nvram_set_checksum();
+ spin_unlock_irq(&rtc_lock);
+ mutex_unlock(&nvram_mutex);
+ return 0;
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int nvram_open(struct inode *inode, struct file *file)
+{
+ spin_lock(&nvram_state_lock);
+
+ if ((nvram_open_cnt && (file->f_flags & O_EXCL)) ||
+ (nvram_open_mode & NVRAM_EXCL) ||
+ ((file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE))) {
+ spin_unlock(&nvram_state_lock);
+ return -EBUSY;
+ }
+
+ if (file->f_flags & O_EXCL)
+ nvram_open_mode |= NVRAM_EXCL;
+ if (file->f_mode & FMODE_WRITE)
+ nvram_open_mode |= NVRAM_WRITE;
+ nvram_open_cnt++;
+
+ spin_unlock(&nvram_state_lock);
+
+ return 0;
+}
+
+static int nvram_release(struct inode *inode, struct file *file)
+{
+ spin_lock(&nvram_state_lock);
+
+ nvram_open_cnt--;
+
+ /* if only one instance is open, clear the EXCL bit */
+ if (nvram_open_mode & NVRAM_EXCL)
+ nvram_open_mode &= ~NVRAM_EXCL;
+ if (file->f_mode & FMODE_WRITE)
+ nvram_open_mode &= ~NVRAM_WRITE;
+
+ spin_unlock(&nvram_state_lock);
+
+ return 0;
+}
+
+#ifndef CONFIG_PROC_FS
+static int nvram_add_proc_fs(void)
+{
+ return 0;
+}
+
+#else
+
+static int nvram_proc_read(struct seq_file *seq, void *offset)
+{
+ unsigned char contents[NVRAM_BYTES];
+ int i = 0;
+
+ spin_lock_irq(&rtc_lock);
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ contents[i] = __nvram_read_byte(i);
+ spin_unlock_irq(&rtc_lock);
+
+ mach_proc_infos(contents, seq, offset);
+
+ return 0;
+}
+
+static int nvram_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nvram_proc_read, NULL);
+}
+
+static const struct file_operations nvram_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = nvram_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int nvram_add_proc_fs(void)
+{
+ if (!proc_create("driver/nvram", 0, NULL, &nvram_proc_fops))
+ return -ENOMEM;
+ return 0;
+}
+
+#endif /* CONFIG_PROC_FS */
+
+static const struct file_operations nvram_fops = {
+ .owner = THIS_MODULE,
+ .llseek = nvram_llseek,
+ .read = nvram_read,
+ .write = nvram_write,
+ .unlocked_ioctl = nvram_ioctl,
+ .open = nvram_open,
+ .release = nvram_release,
+};
+
+static struct miscdevice nvram_dev = {
+ NVRAM_MINOR,
+ "nvram",
+ &nvram_fops
+};
+
+static int __init nvram_init(void)
+{
+ int ret;
+
+ /* First test whether the driver should init at all */
+ if (!CHECK_DRIVER_INIT())
+ return -ENODEV;
+
+ ret = misc_register(&nvram_dev);
+ if (ret) {
+ printk(KERN_ERR "nvram: can't misc_register on minor=%d\n",
+ NVRAM_MINOR);
+ goto out;
+ }
+ ret = nvram_add_proc_fs();
+ if (ret) {
+ printk(KERN_ERR "nvram: can't create /proc/driver/nvram\n");
+ goto outmisc;
+ }
+ ret = 0;
+ printk(KERN_INFO "Non-volatile memory driver v" NVRAM_VERSION "\n");
+out:
+ return ret;
+outmisc:
+ misc_deregister(&nvram_dev);
+ goto out;
+}
+
+static void __exit nvram_cleanup_module(void)
+{
+ remove_proc_entry("driver/nvram", NULL);
+ misc_deregister(&nvram_dev);
+}
+
+module_init(nvram_init);
+module_exit(nvram_cleanup_module);
+
+/*
+ * Machine specific functions
+ */
+
+#if MACH == PC
+
+static int pc_check_checksum(void)
+{
+ int i;
+ unsigned short sum = 0;
+ unsigned short expect;
+
+ for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ expect = __nvram_read_byte(PC_CKS_LOC)<<8 |
+ __nvram_read_byte(PC_CKS_LOC+1);
+ return (sum & 0xffff) == expect;
+}
+
+static void pc_set_checksum(void)
+{
+ int i;
+ unsigned short sum = 0;
+
+ for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ __nvram_write_byte(sum >> 8, PC_CKS_LOC);
+ __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1);
+}
+
+#ifdef CONFIG_PROC_FS
+
+static char *floppy_types[] = {
+ "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M",
+ "3.5'' 2.88M", "3.5'' 2.88M"
+};
+
+static char *gfx_types[] = {
+ "EGA, VGA, ... (with BIOS)",
+ "CGA (40 cols)",
+ "CGA (80 cols)",
+ "monochrome",
+};
+
+static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq,
+ void *offset)
+{
+ int checksum;
+ int type;
+
+ spin_lock_irq(&rtc_lock);
+ checksum = __nvram_check_checksum();
+ spin_unlock_irq(&rtc_lock);
+
+ seq_printf(seq, "Checksum status: %svalid\n", checksum ? "" : "not ");
+
+ seq_printf(seq, "# floppies : %d\n",
+ (nvram[6] & 1) ? (nvram[6] >> 6) + 1 : 0);
+ seq_printf(seq, "Floppy 0 type : ");
+ type = nvram[2] >> 4;
+ if (type < ARRAY_SIZE(floppy_types))
+ seq_printf(seq, "%s\n", floppy_types[type]);
+ else
+ seq_printf(seq, "%d (unknown)\n", type);
+ seq_printf(seq, "Floppy 1 type : ");
+ type = nvram[2] & 0x0f;
+ if (type < ARRAY_SIZE(floppy_types))
+ seq_printf(seq, "%s\n", floppy_types[type]);
+ else
+ seq_printf(seq, "%d (unknown)\n", type);
+
+ seq_printf(seq, "HD 0 type : ");
+ type = nvram[4] >> 4;
+ if (type)
+ seq_printf(seq, "%02x\n", type == 0x0f ? nvram[11] : type);
+ else
+ seq_printf(seq, "none\n");
+
+ seq_printf(seq, "HD 1 type : ");
+ type = nvram[4] & 0x0f;
+ if (type)
+ seq_printf(seq, "%02x\n", type == 0x0f ? nvram[12] : type);
+ else
+ seq_printf(seq, "none\n");
+
+ seq_printf(seq, "HD type 48 data: %d/%d/%d C/H/S, precomp %d, lz %d\n",
+ nvram[18] | (nvram[19] << 8),
+ nvram[20], nvram[25],
+ nvram[21] | (nvram[22] << 8), nvram[23] | (nvram[24] << 8));
+ seq_printf(seq, "HD type 49 data: %d/%d/%d C/H/S, precomp %d, lz %d\n",
+ nvram[39] | (nvram[40] << 8),
+ nvram[41], nvram[46],
+ nvram[42] | (nvram[43] << 8), nvram[44] | (nvram[45] << 8));
+
+ seq_printf(seq, "DOS base memory: %d kB\n", nvram[7] | (nvram[8] << 8));
+ seq_printf(seq, "Extended memory: %d kB (configured), %d kB (tested)\n",
+ nvram[9] | (nvram[10] << 8), nvram[34] | (nvram[35] << 8));
+
+ seq_printf(seq, "Gfx adapter : %s\n",
+ gfx_types[(nvram[6] >> 4) & 3]);
+
+ seq_printf(seq, "FPU : %sinstalled\n",
+ (nvram[6] & 2) ? "" : "not ");
+
+ return;
+}
+#endif
+
+#endif /* MACH == PC */
+
+#if MACH == ATARI
+
+static int atari_check_checksum(void)
+{
+ int i;
+ unsigned char sum = 0;
+
+ for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) &&
+ (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff));
+}
+
+static void atari_set_checksum(void)
+{
+ int i;
+ unsigned char sum = 0;
+
+ for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ __nvram_write_byte(~sum, ATARI_CKS_LOC);
+ __nvram_write_byte(sum, ATARI_CKS_LOC + 1);
+}
+
+#ifdef CONFIG_PROC_FS
+
+static struct {
+ unsigned char val;
+ char *name;
+} boot_prefs[] = {
+ { 0x80, "TOS" },
+ { 0x40, "ASV" },
+ { 0x20, "NetBSD (?)" },
+ { 0x10, "Linux" },
+ { 0x00, "unspecified" }
+};
+
+static char *languages[] = {
+ "English (US)",
+ "German",
+ "French",
+ "English (UK)",
+ "Spanish",
+ "Italian",
+ "6 (undefined)",
+ "Swiss (French)",
+ "Swiss (German)"
+};
+
+static char *dateformat[] = {
+ "MM%cDD%cYY",
+ "DD%cMM%cYY",
+ "YY%cMM%cDD",
+ "YY%cDD%cMM",
+ "4 (undefined)",
+ "5 (undefined)",
+ "6 (undefined)",
+ "7 (undefined)"
+};
+
+static char *colors[] = {
+ "2", "4", "16", "256", "65536", "??", "??", "??"
+};
+
+static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq,
+ void *offset)
+{
+ int checksum = nvram_check_checksum();
+ int i;
+ unsigned vmode;
+
+ seq_printf(seq, "Checksum status : %svalid\n", checksum ? "" : "not ");
+
+ seq_printf(seq, "Boot preference : ");
+ for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i) {
+ if (nvram[1] == boot_prefs[i].val) {
+ seq_printf(seq, "%s\n", boot_prefs[i].name);
+ break;
+ }
+ }
+ if (i < 0)
+ seq_printf(seq, "0x%02x (undefined)\n", nvram[1]);
+
+ seq_printf(seq, "SCSI arbitration : %s\n",
+ (nvram[16] & 0x80) ? "on" : "off");
+ seq_printf(seq, "SCSI host ID : ");
+ if (nvram[16] & 0x80)
+ seq_printf(seq, "%d\n", nvram[16] & 7);
+ else
+ seq_printf(seq, "n/a\n");
+
+ /* the following entries are defined only for the Falcon */
+ if ((atari_mch_cookie >> 16) != ATARI_MCH_FALCON)
+ return;
+
+ seq_printf(seq, "OS language : ");
+ if (nvram[6] < ARRAY_SIZE(languages))
+ seq_printf(seq, "%s\n", languages[nvram[6]]);
+ else
+ seq_printf(seq, "%u (undefined)\n", nvram[6]);
+ seq_printf(seq, "Keyboard language: ");
+ if (nvram[7] < ARRAY_SIZE(languages))
+ seq_printf(seq, "%s\n", languages[nvram[7]]);
+ else
+ seq_printf(seq, "%u (undefined)\n", nvram[7]);
+ seq_printf(seq, "Date format : ");
+ seq_printf(seq, dateformat[nvram[8] & 7],
+ nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/');
+ seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12);
+ seq_printf(seq, "Boot delay : ");
+ if (nvram[10] == 0)
+ seq_printf(seq, "default");
+ else
+ seq_printf(seq, "%ds%s\n", nvram[10],
+ nvram[10] < 8 ? ", no memory test" : "");
+
+ vmode = (nvram[14] << 8) || nvram[15];
+ seq_printf(seq,
+ "Video mode : %s colors, %d columns, %s %s monitor\n",
+ colors[vmode & 7],
+ vmode & 8 ? 80 : 40,
+ vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC");
+ seq_printf(seq, " %soverscan, compat. mode %s%s\n",
+ vmode & 64 ? "" : "no ",
+ vmode & 128 ? "on" : "off",
+ vmode & 256 ?
+ (vmode & 16 ? ", line doubling" : ", half screen") : "");
+
+ return;
+}
+#endif
+
+#endif /* MACH == ATARI */
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(NVRAM_MINOR);
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
new file mode 100644
index 00000000..04a480f8
--- /dev/null
+++ b/drivers/char/nwbutton.c
@@ -0,0 +1,244 @@
+/*
+ * NetWinder Button Driver-
+ * Copyright (C) Alex Holden <alex@linuxhacker.org> 1998, 1999.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+
+#define __NWBUTTON_C /* Tell the header file who we are */
+#include "nwbutton.h"
+
+static void button_sequence_finished (unsigned long parameters);
+
+static int button_press_count; /* The count of button presses */
+/* Times for the end of a sequence */
+static DEFINE_TIMER(button_timer, button_sequence_finished, 0, 0);
+static DECLARE_WAIT_QUEUE_HEAD(button_wait_queue); /* Used for blocking read */
+static char button_output_buffer[32]; /* Stores data to write out of device */
+static int bcount; /* The number of bytes in the buffer */
+static int bdelay = BUTTON_DELAY; /* The delay, in jiffies */
+static struct button_callback button_callback_list[32]; /* The callback list */
+static int callback_count; /* The number of callbacks registered */
+static int reboot_count = NUM_PRESSES_REBOOT; /* Number of presses to reboot */
+
+/*
+ * This function is called by other drivers to register a callback function
+ * to be called when a particular number of button presses occurs.
+ * The callback list is a static array of 32 entries (I somehow doubt many
+ * people are ever going to want to register more than 32 different actions
+ * to be performed by the kernel on different numbers of button presses ;).
+ * However, if an attempt to register a 33rd entry (perhaps a stuck loop
+ * somewhere registering the same entry over and over?) it will fail to
+ * do so and return -ENOMEM. If an attempt is made to register a null pointer,
+ * it will fail to do so and return -EINVAL.
+ * Because callbacks can be unregistered at random the list can become
+ * fragmented, so we need to search through the list until we find the first
+ * free entry.
+ *
+ * FIXME: Has anyone spotted any locking functions int his code recently ??
+ */
+
+int button_add_callback (void (*callback) (void), int count)
+{
+ int lp = 0;
+ if (callback_count == 32) {
+ return -ENOMEM;
+ }
+ if (!callback) {
+ return -EINVAL;
+ }
+ callback_count++;
+ for (; (button_callback_list [lp].callback); lp++);
+ button_callback_list [lp].callback = callback;
+ button_callback_list [lp].count = count;
+ return 0;
+}
+
+/*
+ * This function is called by other drivers to deregister a callback function.
+ * If you attempt to unregister a callback which does not exist, it will fail
+ * with -EINVAL. If there is more than one entry with the same address,
+ * because it searches the list from end to beginning, it will unregister the
+ * last one to be registered first (FILO- First In Last Out).
+ * Note that this is not necessarily true if the entries are not submitted
+ * at the same time, because another driver could have unregistered a callback
+ * between the submissions creating a gap earlier in the list, which would
+ * be filled first at submission time.
+ */
+
+int button_del_callback (void (*callback) (void))
+{
+ int lp = 31;
+ if (!callback) {
+ return -EINVAL;
+ }
+ while (lp >= 0) {
+ if ((button_callback_list [lp].callback) == callback) {
+ button_callback_list [lp].callback = NULL;
+ button_callback_list [lp].count = 0;
+ callback_count--;
+ return 0;
+ };
+ lp--;
+ };
+ return -EINVAL;
+}
+
+/*
+ * This function is called by button_sequence_finished to search through the
+ * list of callback functions, and call any of them whose count argument
+ * matches the current count of button presses. It starts at the beginning
+ * of the list and works up to the end. It will refuse to follow a null
+ * pointer (which should never happen anyway).
+ */
+
+static void button_consume_callbacks (int bpcount)
+{
+ int lp = 0;
+ for (; lp <= 31; lp++) {
+ if ((button_callback_list [lp].count) == bpcount) {
+ if (button_callback_list [lp].callback) {
+ button_callback_list[lp].callback();
+ }
+ }
+ }
+}
+
+/*
+ * This function is called when the button_timer times out.
+ * ie. When you don't press the button for bdelay jiffies, this is taken to
+ * mean you have ended the sequence of key presses, and this function is
+ * called to wind things up (write the press_count out to /dev/button, call
+ * any matching registered function callbacks, initiate reboot, etc.).
+ */
+
+static void button_sequence_finished (unsigned long parameters)
+{
+#ifdef CONFIG_NWBUTTON_REBOOT /* Reboot using button is enabled */
+ if (button_press_count == reboot_count)
+ kill_cad_pid(SIGINT, 1); /* Ask init to reboot us */
+#endif /* CONFIG_NWBUTTON_REBOOT */
+ button_consume_callbacks (button_press_count);
+ bcount = sprintf (button_output_buffer, "%d\n", button_press_count);
+ button_press_count = 0; /* Reset the button press counter */
+ wake_up_interruptible (&button_wait_queue);
+}
+
+/*
+ * This handler is called when the orange button is pressed (GPIO 10 of the
+ * SuperIO chip, which maps to logical IRQ 26). If the press_count is 0,
+ * this is the first press, so it starts a timer and increments the counter.
+ * If it is higher than 0, it deletes the old timer, starts a new one, and
+ * increments the counter.
+ */
+
+static irqreturn_t button_handler (int irq, void *dev_id)
+{
+ button_press_count++;
+ mod_timer(&button_timer, jiffies + bdelay);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * This function is called when a user space program attempts to read
+ * /dev/nwbutton. It puts the device to sleep on the wait queue until
+ * button_sequence_finished writes some data to the buffer and flushes
+ * the queue, at which point it writes the data out to the device and
+ * returns the number of characters it has written. This function is
+ * reentrant, so that many processes can be attempting to read from the
+ * device at any one time.
+ */
+
+static int button_read (struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ interruptible_sleep_on (&button_wait_queue);
+ return (copy_to_user (buffer, &button_output_buffer, bcount))
+ ? -EFAULT : bcount;
+}
+
+/*
+ * This structure is the file operations structure, which specifies what
+ * callbacks functions the kernel should call when a user mode process
+ * attempts to perform these operations on the device.
+ */
+
+static const struct file_operations button_fops = {
+ .owner = THIS_MODULE,
+ .read = button_read,
+ .llseek = noop_llseek,
+};
+
+/*
+ * This structure is the misc device structure, which specifies the minor
+ * device number (158 in this case), the name of the device (for /proc/misc),
+ * and the address of the above file operations structure.
+ */
+
+static struct miscdevice button_misc_device = {
+ BUTTON_MINOR,
+ "nwbutton",
+ &button_fops,
+};
+
+/*
+ * This function is called to initialise the driver, either from misc.c at
+ * bootup if the driver is compiled into the kernel, or from init_module
+ * below at module insert time. It attempts to register the device node
+ * and the IRQ and fails with a warning message if either fails, though
+ * neither ever should because the device number and IRQ are unique to
+ * this driver.
+ */
+
+static int __init nwbutton_init(void)
+{
+ if (!machine_is_netwinder())
+ return -ENODEV;
+
+ printk (KERN_INFO "NetWinder Button Driver Version %s (C) Alex Holden "
+ "<alex@linuxhacker.org> 1998.\n", VERSION);
+
+ if (misc_register (&button_misc_device)) {
+ printk (KERN_WARNING "nwbutton: Couldn't register device 10, "
+ "%d.\n", BUTTON_MINOR);
+ return -EBUSY;
+ }
+
+ if (request_irq (IRQ_NETWINDER_BUTTON, button_handler, IRQF_DISABLED,
+ "nwbutton", NULL)) {
+ printk (KERN_WARNING "nwbutton: IRQ %d is not free.\n",
+ IRQ_NETWINDER_BUTTON);
+ misc_deregister (&button_misc_device);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void __exit nwbutton_exit (void)
+{
+ free_irq (IRQ_NETWINDER_BUTTON, NULL);
+ misc_deregister (&button_misc_device);
+}
+
+
+MODULE_AUTHOR("Alex Holden");
+MODULE_LICENSE("GPL");
+
+module_init(nwbutton_init);
+module_exit(nwbutton_exit);
diff --git a/drivers/char/nwbutton.h b/drivers/char/nwbutton.h
new file mode 100644
index 00000000..c3ebc16c
--- /dev/null
+++ b/drivers/char/nwbutton.h
@@ -0,0 +1,40 @@
+#ifndef __NWBUTTON_H
+#define __NWBUTTON_H
+
+/*
+ * NetWinder Button Driver-
+ * Copyright (C) Alex Holden <alex@linuxhacker.org> 1998, 1999.
+ */
+
+#ifdef __NWBUTTON_C /* Actually compiling the driver itself */
+
+/* Various defines: */
+
+#define NUM_PRESSES_REBOOT 2 /* How many presses to activate shutdown */
+#define BUTTON_DELAY 30 /* How many jiffies for sequence to end */
+#define VERSION "0.3" /* Driver version number */
+#define BUTTON_MINOR 158 /* Major 10, Minor 158, /dev/nwbutton */
+
+/* Structure definitions: */
+
+struct button_callback {
+ void (*callback) (void);
+ int count;
+};
+
+/* Function prototypes: */
+
+static void button_sequence_finished (unsigned long parameters);
+static irqreturn_t button_handler (int irq, void *dev_id);
+int button_init (void);
+int button_add_callback (void (*callback) (void), int count);
+int button_del_callback (void (*callback) (void));
+static void button_consume_callbacks (int bpcount);
+
+#else /* Not compiling the driver itself */
+
+extern int button_add_callback (void (*callback) (void), int count);
+extern int button_del_callback (void (*callback) (void));
+
+#endif /* __NWBUTTON_C */
+#endif /* __NWBUTTON_H */
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
new file mode 100644
index 00000000..a12f5240
--- /dev/null
+++ b/drivers/char/nwflash.c
@@ -0,0 +1,689 @@
+/*
+ * Flash memory interface rev.5 driver for the Intel
+ * Flash chips used on the NetWinder.
+ *
+ * 20/08/2000 RMK use __ioremap to map flash into virtual memory
+ * make a few more places use "volatile"
+ * 22/05/2001 RMK - Lock read against write
+ * - merge printk level changes (with mods) from Alan Cox.
+ * - use *ppos as the file position, not file->f_pos.
+ * - fix check for out of range pos and r/w size
+ *
+ * Please note that we are tampering with the only flash chip in the
+ * machine, which contains the bootup code. We therefore have the
+ * power to convert these machines into doorstops...
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+
+#include <asm/hardware/dec21285.h>
+#include <asm/io.h>
+#include <asm/leds.h>
+#include <asm/mach-types.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+/*****************************************************************************/
+#include <asm/nwflash.h>
+
+#define NWFLASH_VERSION "6.4"
+
+static DEFINE_MUTEX(flash_mutex);
+static void kick_open(void);
+static int get_flash_id(void);
+static int erase_block(int nBlock);
+static int write_block(unsigned long p, const char __user *buf, int count);
+
+#define KFLASH_SIZE 1024*1024 //1 Meg
+#define KFLASH_SIZE4 4*1024*1024 //4 Meg
+#define KFLASH_ID 0x89A6 //Intel flash
+#define KFLASH_ID4 0xB0D4 //Intel flash 4Meg
+
+static int flashdebug; //if set - we will display progress msgs
+
+static int gbWriteEnable;
+static int gbWriteBase64Enable;
+static volatile unsigned char *FLASH_BASE;
+static int gbFlashSize = KFLASH_SIZE;
+static DEFINE_MUTEX(nwflash_mutex);
+
+static int get_flash_id(void)
+{
+ volatile unsigned int c1, c2;
+
+ /*
+ * try to get flash chip ID
+ */
+ kick_open();
+ c2 = inb(0x80);
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x90;
+ udelay(15);
+ c1 = *(volatile unsigned char *) FLASH_BASE;
+ c2 = inb(0x80);
+
+ /*
+ * on 4 Meg flash the second byte is actually at offset 2...
+ */
+ if (c1 == 0xB0)
+ c2 = *(volatile unsigned char *) (FLASH_BASE + 2);
+ else
+ c2 = *(volatile unsigned char *) (FLASH_BASE + 1);
+
+ c2 += (c1 << 8);
+
+ /*
+ * set it back to read mode
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF;
+
+ if (c2 == KFLASH_ID4)
+ gbFlashSize = KFLASH_SIZE4;
+
+ return c2;
+}
+
+static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ mutex_lock(&flash_mutex);
+ switch (cmd) {
+ case CMD_WRITE_DISABLE:
+ gbWriteBase64Enable = 0;
+ gbWriteEnable = 0;
+ break;
+
+ case CMD_WRITE_ENABLE:
+ gbWriteEnable = 1;
+ break;
+
+ case CMD_WRITE_BASE64K_ENABLE:
+ gbWriteBase64Enable = 1;
+ break;
+
+ default:
+ gbWriteBase64Enable = 0;
+ gbWriteEnable = 0;
+ mutex_unlock(&flash_mutex);
+ return -EINVAL;
+ }
+ mutex_unlock(&flash_mutex);
+ return 0;
+}
+
+static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
+ loff_t *ppos)
+{
+ ssize_t ret;
+
+ if (flashdebug)
+ printk(KERN_DEBUG "flash_read: flash_read: offset=0x%llx, "
+ "buffer=%p, count=0x%zx.\n", *ppos, buf, size);
+ /*
+ * We now lock against reads and writes. --rmk
+ */
+ if (mutex_lock_interruptible(&nwflash_mutex))
+ return -ERESTARTSYS;
+
+ ret = simple_read_from_buffer(buf, size, ppos, (void *)FLASH_BASE, gbFlashSize);
+ mutex_unlock(&nwflash_mutex);
+
+ return ret;
+}
+
+static ssize_t flash_write(struct file *file, const char __user *buf,
+ size_t size, loff_t * ppos)
+{
+ unsigned long p = *ppos;
+ unsigned int count = size;
+ int written;
+ int nBlock, temp, rc;
+ int i, j;
+
+ if (flashdebug)
+ printk("flash_write: offset=0x%lX, buffer=0x%p, count=0x%X.\n",
+ p, buf, count);
+
+ if (!gbWriteEnable)
+ return -EINVAL;
+
+ if (p < 64 * 1024 && (!gbWriteBase64Enable))
+ return -EINVAL;
+
+ /*
+ * check for out of range pos or count
+ */
+ if (p >= gbFlashSize)
+ return count ? -ENXIO : 0;
+
+ if (count > gbFlashSize - p)
+ count = gbFlashSize - p;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ /*
+ * We now lock against reads and writes. --rmk
+ */
+ if (mutex_lock_interruptible(&nwflash_mutex))
+ return -ERESTARTSYS;
+
+ written = 0;
+
+ leds_event(led_claim);
+ leds_event(led_green_on);
+
+ nBlock = (int) p >> 16; //block # of 64K bytes
+
+ /*
+ * # of 64K blocks to erase and write
+ */
+ temp = ((int) (p + count) >> 16) - nBlock + 1;
+
+ /*
+ * write ends at exactly 64k boundary?
+ */
+ if (((int) (p + count) & 0xFFFF) == 0)
+ temp -= 1;
+
+ if (flashdebug)
+ printk(KERN_DEBUG "flash_write: writing %d block(s) "
+ "starting at %d.\n", temp, nBlock);
+
+ for (; temp; temp--, nBlock++) {
+ if (flashdebug)
+ printk(KERN_DEBUG "flash_write: erasing block %d.\n", nBlock);
+
+ /*
+ * first we have to erase the block(s), where we will write...
+ */
+ i = 0;
+ j = 0;
+ RetryBlock:
+ do {
+ rc = erase_block(nBlock);
+ i++;
+ } while (rc && i < 10);
+
+ if (rc) {
+ printk(KERN_ERR "flash_write: erase error %x\n", rc);
+ break;
+ }
+ if (flashdebug)
+ printk(KERN_DEBUG "flash_write: writing offset %lX, "
+ "from buf %p, bytes left %X.\n", p, buf,
+ count - written);
+
+ /*
+ * write_block will limit write to space left in this block
+ */
+ rc = write_block(p, buf, count - written);
+ j++;
+
+ /*
+ * if somehow write verify failed? Can't happen??
+ */
+ if (!rc) {
+ /*
+ * retry up to 10 times
+ */
+ if (j < 10)
+ goto RetryBlock;
+ else
+ /*
+ * else quit with error...
+ */
+ rc = -1;
+
+ }
+ if (rc < 0) {
+ printk(KERN_ERR "flash_write: write error %X\n", rc);
+ break;
+ }
+ p += rc;
+ buf += rc;
+ written += rc;
+ *ppos += rc;
+
+ if (flashdebug)
+ printk(KERN_DEBUG "flash_write: written 0x%X bytes OK.\n", written);
+ }
+
+ /*
+ * restore reg on exit
+ */
+ leds_event(led_release);
+
+ mutex_unlock(&nwflash_mutex);
+
+ return written;
+}
+
+
+/*
+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
+ * check against negative addresses: they are ok. The return value is weird,
+ * though, in that case (0).
+ *
+ * also note that seeking relative to the "end of file" isn't supported:
+ * it has no meaning, so it returns -EINVAL.
+ */
+static loff_t flash_llseek(struct file *file, loff_t offset, int orig)
+{
+ loff_t ret;
+
+ mutex_lock(&flash_mutex);
+ if (flashdebug)
+ printk(KERN_DEBUG "flash_llseek: offset=0x%X, orig=0x%X.\n",
+ (unsigned int) offset, orig);
+
+ switch (orig) {
+ case 0:
+ if (offset < 0) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if ((unsigned int) offset > gbFlashSize) {
+ ret = -EINVAL;
+ break;
+ }
+
+ file->f_pos = (unsigned int) offset;
+ ret = file->f_pos;
+ break;
+ case 1:
+ if ((file->f_pos + offset) > gbFlashSize) {
+ ret = -EINVAL;
+ break;
+ }
+ if ((file->f_pos + offset) < 0) {
+ ret = -EINVAL;
+ break;
+ }
+ file->f_pos += offset;
+ ret = file->f_pos;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&flash_mutex);
+ return ret;
+}
+
+
+/*
+ * assume that main Write routine did the parameter checking...
+ * so just go ahead and erase, what requested!
+ */
+
+static int erase_block(int nBlock)
+{
+ volatile unsigned int c1;
+ volatile unsigned char *pWritePtr;
+ unsigned long timeout;
+ int temp, temp1;
+
+ /*
+ * orange LED == erase
+ */
+ leds_event(led_amber_on);
+
+ /*
+ * reset footbridge to the correct offset 0 (...0..3)
+ */
+ *CSR_ROMWRITEREG = 0;
+
+ /*
+ * dummy ROM read
+ */
+ c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
+
+ kick_open();
+ /*
+ * reset status if old errors
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
+
+ /*
+ * erase a block...
+ * aim at the middle of a current block...
+ */
+ pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + 0x8000 + (nBlock << 16)));
+ /*
+ * dummy read
+ */
+ c1 = *pWritePtr;
+
+ kick_open();
+ /*
+ * erase
+ */
+ *(volatile unsigned char *) pWritePtr = 0x20;
+
+ /*
+ * confirm
+ */
+ *(volatile unsigned char *) pWritePtr = 0xD0;
+
+ /*
+ * wait 10 ms
+ */
+ msleep(10);
+
+ /*
+ * wait while erasing in process (up to 10 sec)
+ */
+ timeout = jiffies + 10 * HZ;
+ c1 = 0;
+ while (!(c1 & 0x80) && time_before(jiffies, timeout)) {
+ msleep(10);
+ /*
+ * read any address
+ */
+ c1 = *(volatile unsigned char *) (pWritePtr);
+ // printk("Flash_erase: status=%X.\n",c1);
+ }
+
+ /*
+ * set flash for normal read access
+ */
+ kick_open();
+// *(volatile unsigned char*)(FLASH_BASE+0x8000) = 0xFF;
+ *(volatile unsigned char *) pWritePtr = 0xFF; //back to normal operation
+
+ /*
+ * check if erase errors were reported
+ */
+ if (c1 & 0x20) {
+ printk(KERN_ERR "flash_erase: err at %p\n", pWritePtr);
+
+ /*
+ * reset error
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
+ return -2;
+ }
+
+ /*
+ * just to make sure - verify if erased OK...
+ */
+ msleep(10);
+
+ pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + (nBlock << 16)));
+
+ for (temp = 0; temp < 16 * 1024; temp++, pWritePtr += 4) {
+ if ((temp1 = *(volatile unsigned int *) pWritePtr) != 0xFFFFFFFF) {
+ printk(KERN_ERR "flash_erase: verify err at %p = %X\n",
+ pWritePtr, temp1);
+ return -1;
+ }
+ }
+
+ return 0;
+
+}
+
+/*
+ * write_block will limit number of bytes written to the space in this block
+ */
+static int write_block(unsigned long p, const char __user *buf, int count)
+{
+ volatile unsigned int c1;
+ volatile unsigned int c2;
+ unsigned char *pWritePtr;
+ unsigned int uAddress;
+ unsigned int offset;
+ unsigned long timeout;
+ unsigned long timeout1;
+
+ /*
+ * red LED == write
+ */
+ leds_event(led_amber_off);
+ leds_event(led_red_on);
+
+ pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
+
+ /*
+ * check if write will end in this block....
+ */
+ offset = p & 0xFFFF;
+
+ if (offset + count > 0x10000)
+ count = 0x10000 - offset;
+
+ /*
+ * wait up to 30 sec for this block
+ */
+ timeout = jiffies + 30 * HZ;
+
+ for (offset = 0; offset < count; offset++, pWritePtr++) {
+ uAddress = (unsigned int) pWritePtr;
+ uAddress &= 0xFFFFFFFC;
+ if (__get_user(c2, buf + offset))
+ return -EFAULT;
+
+ WriteRetry:
+ /*
+ * dummy read
+ */
+ c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
+
+ /*
+ * kick open the write gate
+ */
+ kick_open();
+
+ /*
+ * program footbridge to the correct offset...0..3
+ */
+ *CSR_ROMWRITEREG = (unsigned int) pWritePtr & 3;
+
+ /*
+ * write cmd
+ */
+ *(volatile unsigned char *) (uAddress) = 0x40;
+
+ /*
+ * data to write
+ */
+ *(volatile unsigned char *) (uAddress) = c2;
+
+ /*
+ * get status
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x10000) = 0x70;
+
+ c1 = 0;
+
+ /*
+ * wait up to 1 sec for this byte
+ */
+ timeout1 = jiffies + 1 * HZ;
+
+ /*
+ * while not ready...
+ */
+ while (!(c1 & 0x80) && time_before(jiffies, timeout1))
+ c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
+
+ /*
+ * if timeout getting status
+ */
+ if (time_after_eq(jiffies, timeout1)) {
+ kick_open();
+ /*
+ * reset err
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
+
+ goto WriteRetry;
+ }
+ /*
+ * switch on read access, as a default flash operation mode
+ */
+ kick_open();
+ /*
+ * read access
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF;
+
+ /*
+ * if hardware reports an error writing, and not timeout -
+ * reset the chip and retry
+ */
+ if (c1 & 0x10) {
+ kick_open();
+ /*
+ * reset err
+ */
+ *(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
+
+ /*
+ * before timeout?
+ */
+ if (time_before(jiffies, timeout)) {
+ if (flashdebug)
+ printk(KERN_DEBUG "write_block: Retrying write at 0x%X)n",
+ pWritePtr - FLASH_BASE);
+
+ /*
+ * no LED == waiting
+ */
+ leds_event(led_amber_off);
+ /*
+ * wait couple ms
+ */
+ msleep(10);
+ /*
+ * red LED == write
+ */
+ leds_event(led_red_on);
+
+ goto WriteRetry;
+ } else {
+ printk(KERN_ERR "write_block: timeout at 0x%X\n",
+ pWritePtr - FLASH_BASE);
+ /*
+ * return error -2
+ */
+ return -2;
+
+ }
+ }
+ }
+
+ /*
+ * green LED == read/verify
+ */
+ leds_event(led_amber_off);
+ leds_event(led_green_on);
+
+ msleep(10);
+
+ pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
+
+ for (offset = 0; offset < count; offset++) {
+ char c, c1;
+ if (__get_user(c, buf))
+ return -EFAULT;
+ buf++;
+ if ((c1 = *pWritePtr++) != c) {
+ printk(KERN_ERR "write_block: verify error at 0x%X (%02X!=%02X)\n",
+ pWritePtr - FLASH_BASE, c1, c);
+ return 0;
+ }
+ }
+
+ return count;
+}
+
+
+static void kick_open(void)
+{
+ unsigned long flags;
+
+ /*
+ * we want to write a bit pattern XXX1 to Xilinx to enable
+ * the write gate, which will be open for about the next 2ms.
+ */
+ spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
+ spin_unlock_irqrestore(&nw_gpio_lock, flags);
+
+ /*
+ * let the ISA bus to catch on...
+ */
+ udelay(25);
+}
+
+static const struct file_operations flash_fops =
+{
+ .owner = THIS_MODULE,
+ .llseek = flash_llseek,
+ .read = flash_read,
+ .write = flash_write,
+ .unlocked_ioctl = flash_ioctl,
+};
+
+static struct miscdevice flash_miscdev =
+{
+ FLASH_MINOR,
+ "nwflash",
+ &flash_fops
+};
+
+static int __init nwflash_init(void)
+{
+ int ret = -ENODEV;
+
+ if (machine_is_netwinder()) {
+ int id;
+
+ FLASH_BASE = ioremap(DC21285_FLASH, KFLASH_SIZE4);
+ if (!FLASH_BASE)
+ goto out;
+
+ id = get_flash_id();
+ if ((id != KFLASH_ID) && (id != KFLASH_ID4)) {
+ ret = -ENXIO;
+ iounmap((void *)FLASH_BASE);
+ printk("Flash: incorrect ID 0x%04X.\n", id);
+ goto out;
+ }
+
+ printk("Flash ROM driver v.%s, flash device ID 0x%04X, size %d Mb.\n",
+ NWFLASH_VERSION, id, gbFlashSize / (1024 * 1024));
+
+ ret = misc_register(&flash_miscdev);
+ if (ret < 0) {
+ iounmap((void *)FLASH_BASE);
+ }
+ }
+out:
+ return ret;
+}
+
+static void __exit nwflash_exit(void)
+{
+ misc_deregister(&flash_miscdev);
+ iounmap((void *)FLASH_BASE);
+}
+
+MODULE_LICENSE("GPL");
+
+module_param(flashdebug, bool, 0644);
+
+module_init(nwflash_init);
+module_exit(nwflash_exit);
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
new file mode 100644
index 00000000..b304ec05
--- /dev/null
+++ b/drivers/char/pc8736x_gpio.c
@@ -0,0 +1,353 @@
+/* linux/drivers/char/pc8736x_gpio.c
+
+ National Semiconductor PC8736x GPIO driver. Allows a user space
+ process to play with the GPIO pins.
+
+ Copyright (c) 2005,2006 Jim Cromie <jim.cromie@gmail.com>
+
+ adapted from linux/drivers/char/scx200_gpio.c
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>,
+*/
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+#include <linux/nsc_gpio.h>
+#include <linux/platform_device.h>
+#include <asm/uaccess.h>
+
+#define DEVNAME "pc8736x_gpio"
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_DESCRIPTION("NatSemi/Winbond PC-8736x GPIO Pin Driver");
+MODULE_LICENSE("GPL");
+
+static int major; /* default to dynamic major */
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+
+static DEFINE_MUTEX(pc8736x_gpio_config_lock);
+static unsigned pc8736x_gpio_base;
+static u8 pc8736x_gpio_shadow[4];
+
+#define SIO_BASE1 0x2E /* 1st command-reg to check */
+#define SIO_BASE2 0x4E /* alt command-reg to check */
+
+#define SIO_SID 0x20 /* SuperI/O ID Register */
+#define SIO_SID_PC87365 0xe5 /* Expected value in ID Register for PC87365 */
+#define SIO_SID_PC87366 0xe9 /* Expected value in ID Register for PC87366 */
+
+#define SIO_CF1 0x21 /* chip config, bit0 is chip enable */
+
+#define PC8736X_GPIO_RANGE 16 /* ioaddr range */
+#define PC8736X_GPIO_CT 32 /* minors matching 4 8 bit ports */
+
+#define SIO_UNIT_SEL 0x7 /* unit select reg */
+#define SIO_UNIT_ACT 0x30 /* unit enable */
+#define SIO_GPIO_UNIT 0x7 /* unit number of GPIO */
+#define SIO_VLM_UNIT 0x0D
+#define SIO_TMS_UNIT 0x0E
+
+/* config-space addrs to read/write each unit's runtime addr */
+#define SIO_BASE_HADDR 0x60
+#define SIO_BASE_LADDR 0x61
+
+/* GPIO config-space pin-control addresses */
+#define SIO_GPIO_PIN_SELECT 0xF0
+#define SIO_GPIO_PIN_CONFIG 0xF1
+#define SIO_GPIO_PIN_EVENT 0xF2
+
+static unsigned char superio_cmd = 0;
+static unsigned char selected_device = 0xFF; /* bogus start val */
+
+/* GPIO port runtime access, functionality */
+static int port_offset[] = { 0, 4, 8, 10 }; /* non-uniform offsets ! */
+/* static int event_capable[] = { 1, 1, 0, 0 }; ports 2,3 are hobbled */
+
+#define PORT_OUT 0
+#define PORT_IN 1
+#define PORT_EVT_EN 2
+#define PORT_EVT_STST 3
+
+static struct platform_device *pdev; /* use in dev_*() */
+
+static inline void superio_outb(int addr, int val)
+{
+ outb_p(addr, superio_cmd);
+ outb_p(val, superio_cmd + 1);
+}
+
+static inline int superio_inb(int addr)
+{
+ outb_p(addr, superio_cmd);
+ return inb_p(superio_cmd + 1);
+}
+
+static int pc8736x_superio_present(void)
+{
+ int id;
+
+ /* try the 2 possible values, read a hardware reg to verify */
+ superio_cmd = SIO_BASE1;
+ id = superio_inb(SIO_SID);
+ if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366)
+ return superio_cmd;
+
+ superio_cmd = SIO_BASE2;
+ id = superio_inb(SIO_SID);
+ if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366)
+ return superio_cmd;
+
+ return 0;
+}
+
+static void device_select(unsigned devldn)
+{
+ superio_outb(SIO_UNIT_SEL, devldn);
+ selected_device = devldn;
+}
+
+static void select_pin(unsigned iminor)
+{
+ /* select GPIO port/pin from device minor number */
+ device_select(SIO_GPIO_UNIT);
+ superio_outb(SIO_GPIO_PIN_SELECT,
+ ((iminor << 1) & 0xF0) | (iminor & 0x7));
+}
+
+static inline u32 pc8736x_gpio_configure_fn(unsigned index, u32 mask, u32 bits,
+ u32 func_slct)
+{
+ u32 config, new_config;
+
+ mutex_lock(&pc8736x_gpio_config_lock);
+
+ device_select(SIO_GPIO_UNIT);
+ select_pin(index);
+
+ /* read current config value */
+ config = superio_inb(func_slct);
+
+ /* set new config */
+ new_config = (config & mask) | bits;
+ superio_outb(func_slct, new_config);
+
+ mutex_unlock(&pc8736x_gpio_config_lock);
+
+ return config;
+}
+
+static u32 pc8736x_gpio_configure(unsigned index, u32 mask, u32 bits)
+{
+ return pc8736x_gpio_configure_fn(index, mask, bits,
+ SIO_GPIO_PIN_CONFIG);
+}
+
+static int pc8736x_gpio_get(unsigned minor)
+{
+ int port, bit, val;
+
+ port = minor >> 3;
+ bit = minor & 7;
+ val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
+ val >>= bit;
+ val &= 1;
+
+ dev_dbg(&pdev->dev, "_gpio_get(%d from %x bit %d) == val %d\n",
+ minor, pc8736x_gpio_base + port_offset[port] + PORT_IN, bit,
+ val);
+
+ return val;
+}
+
+static void pc8736x_gpio_set(unsigned minor, int val)
+{
+ int port, bit, curval;
+
+ minor &= 0x1f;
+ port = minor >> 3;
+ bit = minor & 7;
+ curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+
+ dev_dbg(&pdev->dev, "addr:%x cur:%x bit-pos:%d cur-bit:%x + new:%d -> bit-new:%d\n",
+ pc8736x_gpio_base + port_offset[port] + PORT_OUT,
+ curval, bit, (curval & ~(1 << bit)), val, (val << bit));
+
+ val = (curval & ~(1 << bit)) | (val << bit);
+
+ dev_dbg(&pdev->dev, "gpio_set(minor:%d port:%d bit:%d)"
+ " %2x -> %2x\n", minor, port, bit, curval, val);
+
+ outb_p(val, pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+
+ curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+ val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
+
+ dev_dbg(&pdev->dev, "wrote %x, read: %x\n", curval, val);
+ pc8736x_gpio_shadow[port] = val;
+}
+
+static int pc8736x_gpio_current(unsigned minor)
+{
+ int port, bit;
+ minor &= 0x1f;
+ port = minor >> 3;
+ bit = minor & 7;
+ return ((pc8736x_gpio_shadow[port] >> bit) & 0x01);
+}
+
+static void pc8736x_gpio_change(unsigned index)
+{
+ pc8736x_gpio_set(index, !pc8736x_gpio_current(index));
+}
+
+static struct nsc_gpio_ops pc8736x_gpio_ops = {
+ .owner = THIS_MODULE,
+ .gpio_config = pc8736x_gpio_configure,
+ .gpio_dump = nsc_gpio_dump,
+ .gpio_get = pc8736x_gpio_get,
+ .gpio_set = pc8736x_gpio_set,
+ .gpio_change = pc8736x_gpio_change,
+ .gpio_current = pc8736x_gpio_current
+};
+
+static int pc8736x_gpio_open(struct inode *inode, struct file *file)
+{
+ unsigned m = iminor(inode);
+ file->private_data = &pc8736x_gpio_ops;
+
+ dev_dbg(&pdev->dev, "open %d\n", m);
+
+ if (m >= PC8736X_GPIO_CT)
+ return -EINVAL;
+ return nonseekable_open(inode, file);
+}
+
+static const struct file_operations pc8736x_gpio_fileops = {
+ .owner = THIS_MODULE,
+ .open = pc8736x_gpio_open,
+ .write = nsc_gpio_write,
+ .read = nsc_gpio_read,
+ .llseek = no_llseek,
+};
+
+static void __init pc8736x_init_shadow(void)
+{
+ int port;
+
+ /* read the current values driven on the GPIO signals */
+ for (port = 0; port < 4; ++port)
+ pc8736x_gpio_shadow[port]
+ = inb_p(pc8736x_gpio_base + port_offset[port]
+ + PORT_OUT);
+
+}
+
+static struct cdev pc8736x_gpio_cdev;
+
+static int __init pc8736x_gpio_init(void)
+{
+ int rc;
+ dev_t devid;
+
+ pdev = platform_device_alloc(DEVNAME, 0);
+ if (!pdev)
+ return -ENOMEM;
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ rc = -ENODEV;
+ goto undo_platform_dev_alloc;
+ }
+ dev_info(&pdev->dev, "NatSemi pc8736x GPIO Driver Initializing\n");
+
+ if (!pc8736x_superio_present()) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "no device found\n");
+ goto undo_platform_dev_add;
+ }
+ pc8736x_gpio_ops.dev = &pdev->dev;
+
+ /* Verify that chip and it's GPIO unit are both enabled.
+ My BIOS does this, so I take minimum action here
+ */
+ rc = superio_inb(SIO_CF1);
+ if (!(rc & 0x01)) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "device not enabled\n");
+ goto undo_platform_dev_add;
+ }
+ device_select(SIO_GPIO_UNIT);
+ if (!superio_inb(SIO_UNIT_ACT)) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "GPIO unit not enabled\n");
+ goto undo_platform_dev_add;
+ }
+
+ /* read the GPIO unit base addr that chip responds to */
+ pc8736x_gpio_base = (superio_inb(SIO_BASE_HADDR) << 8
+ | superio_inb(SIO_BASE_LADDR));
+
+ if (!request_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE, DEVNAME)) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "GPIO ioport %x busy\n",
+ pc8736x_gpio_base);
+ goto undo_platform_dev_add;
+ }
+ dev_info(&pdev->dev, "GPIO ioport %x reserved\n", pc8736x_gpio_base);
+
+ if (major) {
+ devid = MKDEV(major, 0);
+ rc = register_chrdev_region(devid, PC8736X_GPIO_CT, DEVNAME);
+ } else {
+ rc = alloc_chrdev_region(&devid, 0, PC8736X_GPIO_CT, DEVNAME);
+ major = MAJOR(devid);
+ }
+
+ if (rc < 0) {
+ dev_err(&pdev->dev, "register-chrdev failed: %d\n", rc);
+ goto undo_request_region;
+ }
+ if (!major) {
+ major = rc;
+ dev_dbg(&pdev->dev, "got dynamic major %d\n", major);
+ }
+
+ pc8736x_init_shadow();
+
+ /* ignore minor errs, and succeed */
+ cdev_init(&pc8736x_gpio_cdev, &pc8736x_gpio_fileops);
+ cdev_add(&pc8736x_gpio_cdev, devid, PC8736X_GPIO_CT);
+
+ return 0;
+
+undo_request_region:
+ release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE);
+undo_platform_dev_add:
+ platform_device_del(pdev);
+undo_platform_dev_alloc:
+ platform_device_put(pdev);
+
+ return rc;
+}
+
+static void __exit pc8736x_gpio_cleanup(void)
+{
+ dev_dbg(&pdev->dev, "cleanup\n");
+
+ cdev_del(&pc8736x_gpio_cdev);
+ unregister_chrdev_region(MKDEV(major,0), PC8736X_GPIO_CT);
+ release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE);
+
+ platform_device_del(pdev);
+ platform_device_put(pdev);
+}
+
+module_init(pc8736x_gpio_init);
+module_exit(pc8736x_gpio_cleanup);
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
new file mode 100644
index 00000000..6614416a
--- /dev/null
+++ b/drivers/char/pcmcia/Kconfig
@@ -0,0 +1,56 @@
+#
+# PCMCIA character device configuration
+#
+
+menu "PCMCIA character devices"
+ depends on HOTPLUG && PCMCIA!=n
+
+config SYNCLINK_CS
+ tristate "SyncLink PC Card support"
+ depends on PCMCIA
+ help
+ Enable support for the SyncLink PC Card serial adapter, running
+ asynchronous and HDLC communications up to 512Kbps. The port is
+ selectable for RS-232, V.35, RS-449, RS-530, and X.21
+
+ This driver may be built as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called synclinkmp. If you want to do that, say M
+ here.
+
+config CARDMAN_4000
+ tristate "Omnikey Cardman 4000 support"
+ depends on PCMCIA
+ select BITREVERSE
+ help
+ Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard
+ reader.
+
+ This kernel driver requires additional userspace support, either
+ by the vendor-provided PC/SC ifd_handler (http://www.omnikey.com/),
+ or via the cm4000 backend of OpenCT (http://www.opensc-project.org/opensc).
+
+config CARDMAN_4040
+ tristate "Omnikey CardMan 4040 support"
+ depends on PCMCIA
+ help
+ Enable support for the Omnikey CardMan 4040 PCMCIA Smartcard
+ reader.
+
+ This card is basically a USB CCID device connected to a FIFO
+ in I/O space. To use the kernel driver, you will need either the
+ PC/SC ifdhandler provided from the Omnikey homepage
+ (http://www.omnikey.com/), or a current development version of OpenCT
+ (http://www.opensc-project.org/opensc).
+
+config IPWIRELESS
+ tristate "IPWireless 3G UMTS PCMCIA card support"
+ depends on PCMCIA && NETDEVICES
+ select PPP
+ help
+ This is a driver for 3G UMTS PCMCIA card from IPWireless company. In
+ some countries (for example Czech Republic, T-Mobile ISP) this card
+ is shipped for service called UMTS 4G.
+
+endmenu
+
diff --git a/drivers/char/pcmcia/Makefile b/drivers/char/pcmcia/Makefile
new file mode 100644
index 00000000..0aae2098
--- /dev/null
+++ b/drivers/char/pcmcia/Makefile
@@ -0,0 +1,9 @@
+#
+# drivers/char/pcmcia/Makefile
+#
+# Makefile for the Linux PCMCIA char device drivers.
+#
+
+obj-$(CONFIG_SYNCLINK_CS) += synclink_cs.o
+obj-$(CONFIG_CARDMAN_4000) += cm4000_cs.o
+obj-$(CONFIG_CARDMAN_4040) += cm4040_cs.o
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
new file mode 100644
index 00000000..a7584860
--- /dev/null
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -0,0 +1,1924 @@
+ /*
+ * A driver for the PCMCIA Smartcard Reader "Omnikey CardMan Mobile 4000"
+ *
+ * cm4000_cs.c support.linux@omnikey.com
+ *
+ * Tue Oct 23 11:32:43 GMT 2001 herp - cleaned up header files
+ * Sun Jan 20 10:11:15 MET 2002 herp - added modversion header files
+ * Thu Nov 14 16:34:11 GMT 2002 mh - added PPS functionality
+ * Tue Nov 19 16:36:27 GMT 2002 mh - added SUSPEND/RESUME functionailty
+ * Wed Jul 28 12:55:01 CEST 2004 mh - kernel 2.6 adjustments
+ *
+ * current version: 2.4.0gm4
+ *
+ * (C) 2000,2001,2002,2003,2004 Omnikey AG
+ *
+ * (C) 2005-2006 Harald Welte <laforge@gnumonks.org>
+ * - Adhere to Kernel CodingStyle
+ * - Port to 2.6.13 "new" style PCMCIA
+ * - Check for copy_{from,to}_user return values
+ * - Use nonseekable_open()
+ * - add class interface for udev device creation
+ *
+ * All rights reserved. Licensed under dual BSD/GPL license.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/bitrev.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+#include <linux/cm4000_cs.h>
+
+/* #define ATR_CSUM */
+
+#define reader_to_dev(x) (&x->p_dev->dev)
+
+/* n (debug level) is ignored */
+/* additional debug output may be enabled by re-compiling with
+ * CM4000_DEBUG set */
+/* #define CM4000_DEBUG */
+#define DEBUGP(n, rdr, x, args...) do { \
+ dev_dbg(reader_to_dev(rdr), "%s:" x, \
+ __func__ , ## args); \
+ } while (0)
+
+static DEFINE_MUTEX(cmm_mutex);
+
+#define T_1SEC (HZ)
+#define T_10MSEC msecs_to_jiffies(10)
+#define T_20MSEC msecs_to_jiffies(20)
+#define T_40MSEC msecs_to_jiffies(40)
+#define T_50MSEC msecs_to_jiffies(50)
+#define T_100MSEC msecs_to_jiffies(100)
+#define T_500MSEC msecs_to_jiffies(500)
+
+static void cm4000_release(struct pcmcia_device *link);
+
+static int major; /* major number we get from the kernel */
+
+/* note: the first state has to have number 0 always */
+
+#define M_FETCH_ATR 0
+#define M_TIMEOUT_WAIT 1
+#define M_READ_ATR_LEN 2
+#define M_READ_ATR 3
+#define M_ATR_PRESENT 4
+#define M_BAD_CARD 5
+#define M_CARDOFF 6
+
+#define LOCK_IO 0
+#define LOCK_MONITOR 1
+
+#define IS_AUTOPPS_ACT 6
+#define IS_PROCBYTE_PRESENT 7
+#define IS_INVREV 8
+#define IS_ANY_T0 9
+#define IS_ANY_T1 10
+#define IS_ATR_PRESENT 11
+#define IS_ATR_VALID 12
+#define IS_CMM_ABSENT 13
+#define IS_BAD_LENGTH 14
+#define IS_BAD_CSUM 15
+#define IS_BAD_CARD 16
+
+#define REG_FLAGS0(x) (x + 0)
+#define REG_FLAGS1(x) (x + 1)
+#define REG_NUM_BYTES(x) (x + 2)
+#define REG_BUF_ADDR(x) (x + 3)
+#define REG_BUF_DATA(x) (x + 4)
+#define REG_NUM_SEND(x) (x + 5)
+#define REG_BAUDRATE(x) (x + 6)
+#define REG_STOPBITS(x) (x + 7)
+
+struct cm4000_dev {
+ struct pcmcia_device *p_dev;
+
+ unsigned char atr[MAX_ATR];
+ unsigned char rbuf[512];
+ unsigned char sbuf[512];
+
+ wait_queue_head_t devq; /* when removing cardman must not be
+ zeroed! */
+
+ wait_queue_head_t ioq; /* if IO is locked, wait on this Q */
+ wait_queue_head_t atrq; /* wait for ATR valid */
+ wait_queue_head_t readq; /* used by write to wake blk.read */
+
+ /* warning: do not move this fields.
+ * initialising to zero depends on it - see ZERO_DEV below. */
+ unsigned char atr_csum;
+ unsigned char atr_len_retry;
+ unsigned short atr_len;
+ unsigned short rlen; /* bytes avail. after write */
+ unsigned short rpos; /* latest read pos. write zeroes */
+ unsigned char procbyte; /* T=0 procedure byte */
+ unsigned char mstate; /* state of card monitor */
+ unsigned char cwarn; /* slow down warning */
+ unsigned char flags0; /* cardman IO-flags 0 */
+ unsigned char flags1; /* cardman IO-flags 1 */
+ unsigned int mdelay; /* variable monitor speeds, in jiffies */
+
+ unsigned int baudv; /* baud value for speed */
+ unsigned char ta1;
+ unsigned char proto; /* T=0, T=1, ... */
+ unsigned long flags; /* lock+flags (MONITOR,IO,ATR) * for concurrent
+ access */
+
+ unsigned char pts[4];
+
+ struct timer_list timer; /* used to keep monitor running */
+ int monitor_running;
+};
+
+#define ZERO_DEV(dev) \
+ memset(&dev->atr_csum,0, \
+ sizeof(struct cm4000_dev) - \
+ offsetof(struct cm4000_dev, atr_csum))
+
+static struct pcmcia_device *dev_table[CM4000_MAX_DEV];
+static struct class *cmm_class;
+
+/* This table doesn't use spaces after the comma between fields and thus
+ * violates CodingStyle. However, I don't really think wrapping it around will
+ * make it any clearer to read -HW */
+static unsigned char fi_di_table[10][14] = {
+/*FI 00 01 02 03 04 05 06 07 08 09 10 11 12 13 */
+/*DI */
+/* 0 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11},
+/* 1 */ {0x01,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x91,0x11,0x11,0x11,0x11},
+/* 2 */ {0x02,0x12,0x22,0x32,0x11,0x11,0x11,0x11,0x11,0x92,0xA2,0xB2,0x11,0x11},
+/* 3 */ {0x03,0x13,0x23,0x33,0x43,0x53,0x63,0x11,0x11,0x93,0xA3,0xB3,0xC3,0xD3},
+/* 4 */ {0x04,0x14,0x24,0x34,0x44,0x54,0x64,0x11,0x11,0x94,0xA4,0xB4,0xC4,0xD4},
+/* 5 */ {0x00,0x15,0x25,0x35,0x45,0x55,0x65,0x11,0x11,0x95,0xA5,0xB5,0xC5,0xD5},
+/* 6 */ {0x06,0x16,0x26,0x36,0x46,0x56,0x66,0x11,0x11,0x96,0xA6,0xB6,0xC6,0xD6},
+/* 7 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11},
+/* 8 */ {0x08,0x11,0x28,0x38,0x48,0x58,0x68,0x11,0x11,0x98,0xA8,0xB8,0xC8,0xD8},
+/* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9}
+};
+
+#ifndef CM4000_DEBUG
+#define xoutb outb
+#define xinb inb
+#else
+static inline void xoutb(unsigned char val, unsigned short port)
+{
+ pr_debug("outb(val=%.2x,port=%.4x)\n", val, port);
+ outb(val, port);
+}
+static inline unsigned char xinb(unsigned short port)
+{
+ unsigned char val;
+
+ val = inb(port);
+ pr_debug("%.2x=inb(%.4x)\n", val, port);
+
+ return val;
+}
+#endif
+
+static inline unsigned char invert_revert(unsigned char ch)
+{
+ return bitrev8(~ch);
+}
+
+static void str_invert_revert(unsigned char *b, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ b[i] = invert_revert(b[i]);
+}
+
+#define ATRLENCK(dev,pos) \
+ if (pos>=dev->atr_len || pos>=MAX_ATR) \
+ goto return_0;
+
+static unsigned int calc_baudv(unsigned char fidi)
+{
+ unsigned int wcrcf, wbrcf, fi_rfu, di_rfu;
+
+ fi_rfu = 372;
+ di_rfu = 1;
+
+ /* FI */
+ switch ((fidi >> 4) & 0x0F) {
+ case 0x00:
+ wcrcf = 372;
+ break;
+ case 0x01:
+ wcrcf = 372;
+ break;
+ case 0x02:
+ wcrcf = 558;
+ break;
+ case 0x03:
+ wcrcf = 744;
+ break;
+ case 0x04:
+ wcrcf = 1116;
+ break;
+ case 0x05:
+ wcrcf = 1488;
+ break;
+ case 0x06:
+ wcrcf = 1860;
+ break;
+ case 0x07:
+ wcrcf = fi_rfu;
+ break;
+ case 0x08:
+ wcrcf = fi_rfu;
+ break;
+ case 0x09:
+ wcrcf = 512;
+ break;
+ case 0x0A:
+ wcrcf = 768;
+ break;
+ case 0x0B:
+ wcrcf = 1024;
+ break;
+ case 0x0C:
+ wcrcf = 1536;
+ break;
+ case 0x0D:
+ wcrcf = 2048;
+ break;
+ default:
+ wcrcf = fi_rfu;
+ break;
+ }
+
+ /* DI */
+ switch (fidi & 0x0F) {
+ case 0x00:
+ wbrcf = di_rfu;
+ break;
+ case 0x01:
+ wbrcf = 1;
+ break;
+ case 0x02:
+ wbrcf = 2;
+ break;
+ case 0x03:
+ wbrcf = 4;
+ break;
+ case 0x04:
+ wbrcf = 8;
+ break;
+ case 0x05:
+ wbrcf = 16;
+ break;
+ case 0x06:
+ wbrcf = 32;
+ break;
+ case 0x07:
+ wbrcf = di_rfu;
+ break;
+ case 0x08:
+ wbrcf = 12;
+ break;
+ case 0x09:
+ wbrcf = 20;
+ break;
+ default:
+ wbrcf = di_rfu;
+ break;
+ }
+
+ return (wcrcf / wbrcf);
+}
+
+static unsigned short io_read_num_rec_bytes(unsigned int iobase,
+ unsigned short *s)
+{
+ unsigned short tmp;
+
+ tmp = *s = 0;
+ do {
+ *s = tmp;
+ tmp = inb(REG_NUM_BYTES(iobase)) |
+ (inb(REG_FLAGS0(iobase)) & 4 ? 0x100 : 0);
+ } while (tmp != *s);
+
+ return *s;
+}
+
+static int parse_atr(struct cm4000_dev *dev)
+{
+ unsigned char any_t1, any_t0;
+ unsigned char ch, ifno;
+ int ix, done;
+
+ DEBUGP(3, dev, "-> parse_atr: dev->atr_len = %i\n", dev->atr_len);
+
+ if (dev->atr_len < 3) {
+ DEBUGP(5, dev, "parse_atr: atr_len < 3\n");
+ return 0;
+ }
+
+ if (dev->atr[0] == 0x3f)
+ set_bit(IS_INVREV, &dev->flags);
+ else
+ clear_bit(IS_INVREV, &dev->flags);
+ ix = 1;
+ ifno = 1;
+ ch = dev->atr[1];
+ dev->proto = 0; /* XXX PROTO */
+ any_t1 = any_t0 = done = 0;
+ dev->ta1 = 0x11; /* defaults to 9600 baud */
+ do {
+ if (ifno == 1 && (ch & 0x10)) {
+ /* read first interface byte and TA1 is present */
+ dev->ta1 = dev->atr[2];
+ DEBUGP(5, dev, "Card says FiDi is 0x%.2x\n", dev->ta1);
+ ifno++;
+ } else if ((ifno == 2) && (ch & 0x10)) { /* TA(2) */
+ dev->ta1 = 0x11;
+ ifno++;
+ }
+
+ DEBUGP(5, dev, "Yi=%.2x\n", ch & 0xf0);
+ ix += ((ch & 0x10) >> 4) /* no of int.face chars */
+ +((ch & 0x20) >> 5)
+ + ((ch & 0x40) >> 6)
+ + ((ch & 0x80) >> 7);
+ /* ATRLENCK(dev,ix); */
+ if (ch & 0x80) { /* TDi */
+ ch = dev->atr[ix];
+ if ((ch & 0x0f)) {
+ any_t1 = 1;
+ DEBUGP(5, dev, "card is capable of T=1\n");
+ } else {
+ any_t0 = 1;
+ DEBUGP(5, dev, "card is capable of T=0\n");
+ }
+ } else
+ done = 1;
+ } while (!done);
+
+ DEBUGP(5, dev, "ix=%d noHist=%d any_t1=%d\n",
+ ix, dev->atr[1] & 15, any_t1);
+ if (ix + 1 + (dev->atr[1] & 0x0f) + any_t1 != dev->atr_len) {
+ DEBUGP(5, dev, "length error\n");
+ return 0;
+ }
+ if (any_t0)
+ set_bit(IS_ANY_T0, &dev->flags);
+
+ if (any_t1) { /* compute csum */
+ dev->atr_csum = 0;
+#ifdef ATR_CSUM
+ for (i = 1; i < dev->atr_len; i++)
+ dev->atr_csum ^= dev->atr[i];
+ if (dev->atr_csum) {
+ set_bit(IS_BAD_CSUM, &dev->flags);
+ DEBUGP(5, dev, "bad checksum\n");
+ goto return_0;
+ }
+#endif
+ if (any_t0 == 0)
+ dev->proto = 1; /* XXX PROTO */
+ set_bit(IS_ANY_T1, &dev->flags);
+ }
+
+ return 1;
+}
+
+struct card_fixup {
+ char atr[12];
+ u_int8_t atr_len;
+ u_int8_t stopbits;
+};
+
+static struct card_fixup card_fixups[] = {
+ { /* ACOS */
+ .atr = { 0x3b, 0xb3, 0x11, 0x00, 0x00, 0x41, 0x01 },
+ .atr_len = 7,
+ .stopbits = 0x03,
+ },
+ { /* Motorola */
+ .atr = {0x3b, 0x76, 0x13, 0x00, 0x00, 0x80, 0x62, 0x07,
+ 0x41, 0x81, 0x81 },
+ .atr_len = 11,
+ .stopbits = 0x04,
+ },
+};
+
+static void set_cardparameter(struct cm4000_dev *dev)
+{
+ int i;
+ unsigned int iobase = dev->p_dev->resource[0]->start;
+ u_int8_t stopbits = 0x02; /* ISO default */
+
+ DEBUGP(3, dev, "-> set_cardparameter\n");
+
+ dev->flags1 = dev->flags1 | (((dev->baudv - 1) & 0x0100) >> 8);
+ xoutb(dev->flags1, REG_FLAGS1(iobase));
+ DEBUGP(5, dev, "flags1 = 0x%02x\n", dev->flags1);
+
+ /* set baudrate */
+ xoutb((unsigned char)((dev->baudv - 1) & 0xFF), REG_BAUDRATE(iobase));
+
+ DEBUGP(5, dev, "baudv = %i -> write 0x%02x\n", dev->baudv,
+ ((dev->baudv - 1) & 0xFF));
+
+ /* set stopbits */
+ for (i = 0; i < ARRAY_SIZE(card_fixups); i++) {
+ if (!memcmp(dev->atr, card_fixups[i].atr,
+ card_fixups[i].atr_len))
+ stopbits = card_fixups[i].stopbits;
+ }
+ xoutb(stopbits, REG_STOPBITS(iobase));
+
+ DEBUGP(3, dev, "<- set_cardparameter\n");
+}
+
+static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
+{
+
+ unsigned long tmp, i;
+ unsigned short num_bytes_read;
+ unsigned char pts_reply[4];
+ ssize_t rc;
+ unsigned int iobase = dev->p_dev->resource[0]->start;
+
+ rc = 0;
+
+ DEBUGP(3, dev, "-> set_protocol\n");
+ DEBUGP(5, dev, "ptsreq->Protocol = 0x%.8x, ptsreq->Flags=0x%.8x, "
+ "ptsreq->pts1=0x%.2x, ptsreq->pts2=0x%.2x, "
+ "ptsreq->pts3=0x%.2x\n", (unsigned int)ptsreq->protocol,
+ (unsigned int)ptsreq->flags, ptsreq->pts1, ptsreq->pts2,
+ ptsreq->pts3);
+
+ /* Fill PTS structure */
+ dev->pts[0] = 0xff;
+ dev->pts[1] = 0x00;
+ tmp = ptsreq->protocol;
+ while ((tmp = (tmp >> 1)) > 0)
+ dev->pts[1]++;
+ dev->proto = dev->pts[1]; /* Set new protocol */
+ dev->pts[1] = (0x01 << 4) | (dev->pts[1]);
+
+ /* Correct Fi/Di according to CM4000 Fi/Di table */
+ DEBUGP(5, dev, "Ta(1) from ATR is 0x%.2x\n", dev->ta1);
+ /* set Fi/Di according to ATR TA(1) */
+ dev->pts[2] = fi_di_table[dev->ta1 & 0x0F][(dev->ta1 >> 4) & 0x0F];
+
+ /* Calculate PCK character */
+ dev->pts[3] = dev->pts[0] ^ dev->pts[1] ^ dev->pts[2];
+
+ DEBUGP(5, dev, "pts0=%.2x, pts1=%.2x, pts2=%.2x, pts3=%.2x\n",
+ dev->pts[0], dev->pts[1], dev->pts[2], dev->pts[3]);
+
+ /* check card convention */
+ if (test_bit(IS_INVREV, &dev->flags))
+ str_invert_revert(dev->pts, 4);
+
+ /* reset SM */
+ xoutb(0x80, REG_FLAGS0(iobase));
+
+ /* Enable access to the message buffer */
+ DEBUGP(5, dev, "Enable access to the messages buffer\n");
+ dev->flags1 = 0x20 /* T_Active */
+ | (test_bit(IS_INVREV, &dev->flags) ? 0x02 : 0x00) /* inv parity */
+ | ((dev->baudv >> 8) & 0x01); /* MSB-baud */
+ xoutb(dev->flags1, REG_FLAGS1(iobase));
+
+ DEBUGP(5, dev, "Enable message buffer -> flags1 = 0x%.2x\n",
+ dev->flags1);
+
+ /* write challenge to the buffer */
+ DEBUGP(5, dev, "Write challenge to buffer: ");
+ for (i = 0; i < 4; i++) {
+ xoutb(i, REG_BUF_ADDR(iobase));
+ xoutb(dev->pts[i], REG_BUF_DATA(iobase)); /* buf data */
+#ifdef CM4000_DEBUG
+ pr_debug("0x%.2x ", dev->pts[i]);
+ }
+ pr_debug("\n");
+#else
+ }
+#endif
+
+ /* set number of bytes to write */
+ DEBUGP(5, dev, "Set number of bytes to write\n");
+ xoutb(0x04, REG_NUM_SEND(iobase));
+
+ /* Trigger CARDMAN CONTROLLER */
+ xoutb(0x50, REG_FLAGS0(iobase));
+
+ /* Monitor progress */
+ /* wait for xmit done */
+ DEBUGP(5, dev, "Waiting for NumRecBytes getting valid\n");
+
+ for (i = 0; i < 100; i++) {
+ if (inb(REG_FLAGS0(iobase)) & 0x08) {
+ DEBUGP(5, dev, "NumRecBytes is valid\n");
+ break;
+ }
+ mdelay(10);
+ }
+ if (i == 100) {
+ DEBUGP(5, dev, "Timeout waiting for NumRecBytes getting "
+ "valid\n");
+ rc = -EIO;
+ goto exit_setprotocol;
+ }
+
+ DEBUGP(5, dev, "Reading NumRecBytes\n");
+ for (i = 0; i < 100; i++) {
+ io_read_num_rec_bytes(iobase, &num_bytes_read);
+ if (num_bytes_read >= 4) {
+ DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read);
+ break;
+ }
+ mdelay(10);
+ }
+
+ /* check whether it is a short PTS reply? */
+ if (num_bytes_read == 3)
+ i = 0;
+
+ if (i == 100) {
+ DEBUGP(5, dev, "Timeout reading num_bytes_read\n");
+ rc = -EIO;
+ goto exit_setprotocol;
+ }
+
+ DEBUGP(5, dev, "Reset the CARDMAN CONTROLLER\n");
+ xoutb(0x80, REG_FLAGS0(iobase));
+
+ /* Read PPS reply */
+ DEBUGP(5, dev, "Read PPS reply\n");
+ for (i = 0; i < num_bytes_read; i++) {
+ xoutb(i, REG_BUF_ADDR(iobase));
+ pts_reply[i] = inb(REG_BUF_DATA(iobase));
+ }
+
+#ifdef CM4000_DEBUG
+ DEBUGP(2, dev, "PTSreply: ");
+ for (i = 0; i < num_bytes_read; i++) {
+ pr_debug("0x%.2x ", pts_reply[i]);
+ }
+ pr_debug("\n");
+#endif /* CM4000_DEBUG */
+
+ DEBUGP(5, dev, "Clear Tactive in Flags1\n");
+ xoutb(0x20, REG_FLAGS1(iobase));
+
+ /* Compare ptsreq and ptsreply */
+ if ((dev->pts[0] == pts_reply[0]) &&
+ (dev->pts[1] == pts_reply[1]) &&
+ (dev->pts[2] == pts_reply[2]) && (dev->pts[3] == pts_reply[3])) {
+ /* setcardparameter according to PPS */
+ dev->baudv = calc_baudv(dev->pts[2]);
+ set_cardparameter(dev);
+ } else if ((dev->pts[0] == pts_reply[0]) &&
+ ((dev->pts[1] & 0xef) == pts_reply[1]) &&
+ ((pts_reply[0] ^ pts_reply[1]) == pts_reply[2])) {
+ /* short PTS reply, set card parameter to default values */
+ dev->baudv = calc_baudv(0x11);
+ set_cardparameter(dev);
+ } else
+ rc = -EIO;
+
+exit_setprotocol:
+ DEBUGP(3, dev, "<- set_protocol\n");
+ return rc;
+}
+
+static int io_detect_cm4000(unsigned int iobase, struct cm4000_dev *dev)
+{
+
+ /* note: statemachine is assumed to be reset */
+ if (inb(REG_FLAGS0(iobase)) & 8) {
+ clear_bit(IS_ATR_VALID, &dev->flags);
+ set_bit(IS_CMM_ABSENT, &dev->flags);
+ return 0; /* detect CMM = 1 -> failure */
+ }
+ /* xoutb(0x40, REG_FLAGS1(iobase)); detectCMM */
+ xoutb(dev->flags1 | 0x40, REG_FLAGS1(iobase));
+ if ((inb(REG_FLAGS0(iobase)) & 8) == 0) {
+ clear_bit(IS_ATR_VALID, &dev->flags);
+ set_bit(IS_CMM_ABSENT, &dev->flags);
+ return 0; /* detect CMM=0 -> failure */
+ }
+ /* clear detectCMM again by restoring original flags1 */
+ xoutb(dev->flags1, REG_FLAGS1(iobase));
+ return 1;
+}
+
+static void terminate_monitor(struct cm4000_dev *dev)
+{
+
+ /* tell the monitor to stop and wait until
+ * it terminates.
+ */
+ DEBUGP(3, dev, "-> terminate_monitor\n");
+ wait_event_interruptible(dev->devq,
+ test_and_set_bit(LOCK_MONITOR,
+ (void *)&dev->flags));
+
+ /* now, LOCK_MONITOR has been set.
+ * allow a last cycle in the monitor.
+ * the monitor will indicate that it has
+ * finished by clearing this bit.
+ */
+ DEBUGP(5, dev, "Now allow last cycle of monitor!\n");
+ while (test_bit(LOCK_MONITOR, (void *)&dev->flags))
+ msleep(25);
+
+ DEBUGP(5, dev, "Delete timer\n");
+ del_timer_sync(&dev->timer);
+#ifdef CM4000_DEBUG
+ dev->monitor_running = 0;
+#endif
+
+ DEBUGP(3, dev, "<- terminate_monitor\n");
+}
+
+/*
+ * monitor the card every 50msec. as a side-effect, retrieve the
+ * atr once a card is inserted. another side-effect of retrieving the
+ * atr is that the card will be powered on, so there is no need to
+ * power on the card explictely from the application: the driver
+ * is already doing that for you.
+ */
+
+static void monitor_card(unsigned long p)
+{
+ struct cm4000_dev *dev = (struct cm4000_dev *) p;
+ unsigned int iobase = dev->p_dev->resource[0]->start;
+ unsigned short s;
+ struct ptsreq ptsreq;
+ int i, atrc;
+
+ DEBUGP(7, dev, "-> monitor_card\n");
+
+ /* if someone has set the lock for us: we're done! */
+ if (test_and_set_bit(LOCK_MONITOR, &dev->flags)) {
+ DEBUGP(4, dev, "About to stop monitor\n");
+ /* no */
+ dev->rlen =
+ dev->rpos =
+ dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0;
+ dev->mstate = M_FETCH_ATR;
+ clear_bit(LOCK_MONITOR, &dev->flags);
+ /* close et al. are sleeping on devq, so wake it */
+ wake_up_interruptible(&dev->devq);
+ DEBUGP(2, dev, "<- monitor_card (we are done now)\n");
+ return;
+ }
+
+ /* try to lock io: if it is already locked, just add another timer */
+ if (test_and_set_bit(LOCK_IO, (void *)&dev->flags)) {
+ DEBUGP(4, dev, "Couldn't get IO lock\n");
+ goto return_with_timer;
+ }
+
+ /* is a card/a reader inserted at all ? */
+ dev->flags0 = xinb(REG_FLAGS0(iobase));
+ DEBUGP(7, dev, "dev->flags0 = 0x%2x\n", dev->flags0);
+ DEBUGP(7, dev, "smartcard present: %s\n",
+ dev->flags0 & 1 ? "yes" : "no");
+ DEBUGP(7, dev, "cardman present: %s\n",
+ dev->flags0 == 0xff ? "no" : "yes");
+
+ if ((dev->flags0 & 1) == 0 /* no smartcard inserted */
+ || dev->flags0 == 0xff) { /* no cardman inserted */
+ /* no */
+ dev->rlen =
+ dev->rpos =
+ dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0;
+ dev->mstate = M_FETCH_ATR;
+
+ dev->flags &= 0x000000ff; /* only keep IO and MONITOR locks */
+
+ if (dev->flags0 == 0xff) {
+ DEBUGP(4, dev, "set IS_CMM_ABSENT bit\n");
+ set_bit(IS_CMM_ABSENT, &dev->flags);
+ } else if (test_bit(IS_CMM_ABSENT, &dev->flags)) {
+ DEBUGP(4, dev, "clear IS_CMM_ABSENT bit "
+ "(card is removed)\n");
+ clear_bit(IS_CMM_ABSENT, &dev->flags);
+ }
+
+ goto release_io;
+ } else if ((dev->flags0 & 1) && test_bit(IS_CMM_ABSENT, &dev->flags)) {
+ /* cardman and card present but cardman was absent before
+ * (after suspend with inserted card) */
+ DEBUGP(4, dev, "clear IS_CMM_ABSENT bit (card is inserted)\n");
+ clear_bit(IS_CMM_ABSENT, &dev->flags);
+ }
+
+ if (test_bit(IS_ATR_VALID, &dev->flags) == 1) {
+ DEBUGP(7, dev, "believe ATR is already valid (do nothing)\n");
+ goto release_io;
+ }
+
+ switch (dev->mstate) {
+ unsigned char flags0;
+ case M_CARDOFF:
+ DEBUGP(4, dev, "M_CARDOFF\n");
+ flags0 = inb(REG_FLAGS0(iobase));
+ if (flags0 & 0x02) {
+ /* wait until Flags0 indicate power is off */
+ dev->mdelay = T_10MSEC;
+ } else {
+ /* Flags0 indicate power off and no card inserted now;
+ * Reset CARDMAN CONTROLLER */
+ xoutb(0x80, REG_FLAGS0(iobase));
+
+ /* prepare for fetching ATR again: after card off ATR
+ * is read again automatically */
+ dev->rlen =
+ dev->rpos =
+ dev->atr_csum =
+ dev->atr_len_retry = dev->cwarn = 0;
+ dev->mstate = M_FETCH_ATR;
+
+ /* minimal gap between CARDOFF and read ATR is 50msec */
+ dev->mdelay = T_50MSEC;
+ }
+ break;
+ case M_FETCH_ATR:
+ DEBUGP(4, dev, "M_FETCH_ATR\n");
+ xoutb(0x80, REG_FLAGS0(iobase));
+ DEBUGP(4, dev, "Reset BAUDV to 9600\n");
+ dev->baudv = 0x173; /* 9600 */
+ xoutb(0x02, REG_STOPBITS(iobase)); /* stopbits=2 */
+ xoutb(0x73, REG_BAUDRATE(iobase)); /* baud value */
+ xoutb(0x21, REG_FLAGS1(iobase)); /* T_Active=1, baud
+ value */
+ /* warm start vs. power on: */
+ xoutb(dev->flags0 & 2 ? 0x46 : 0x44, REG_FLAGS0(iobase));
+ dev->mdelay = T_40MSEC;
+ dev->mstate = M_TIMEOUT_WAIT;
+ break;
+ case M_TIMEOUT_WAIT:
+ DEBUGP(4, dev, "M_TIMEOUT_WAIT\n");
+ /* numRecBytes */
+ io_read_num_rec_bytes(iobase, &dev->atr_len);
+ dev->mdelay = T_10MSEC;
+ dev->mstate = M_READ_ATR_LEN;
+ break;
+ case M_READ_ATR_LEN:
+ DEBUGP(4, dev, "M_READ_ATR_LEN\n");
+ /* infinite loop possible, since there is no timeout */
+
+#define MAX_ATR_LEN_RETRY 100
+
+ if (dev->atr_len == io_read_num_rec_bytes(iobase, &s)) {
+ if (dev->atr_len_retry++ >= MAX_ATR_LEN_RETRY) { /* + XX msec */
+ dev->mdelay = T_10MSEC;
+ dev->mstate = M_READ_ATR;
+ }
+ } else {
+ dev->atr_len = s;
+ dev->atr_len_retry = 0; /* set new timeout */
+ }
+
+ DEBUGP(4, dev, "Current ATR_LEN = %i\n", dev->atr_len);
+ break;
+ case M_READ_ATR:
+ DEBUGP(4, dev, "M_READ_ATR\n");
+ xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */
+ for (i = 0; i < dev->atr_len; i++) {
+ xoutb(i, REG_BUF_ADDR(iobase));
+ dev->atr[i] = inb(REG_BUF_DATA(iobase));
+ }
+ /* Deactivate T_Active flags */
+ DEBUGP(4, dev, "Deactivate T_Active flags\n");
+ dev->flags1 = 0x01;
+ xoutb(dev->flags1, REG_FLAGS1(iobase));
+
+ /* atr is present (which doesn't mean it's valid) */
+ set_bit(IS_ATR_PRESENT, &dev->flags);
+ if (dev->atr[0] == 0x03)
+ str_invert_revert(dev->atr, dev->atr_len);
+ atrc = parse_atr(dev);
+ if (atrc == 0) { /* atr invalid */
+ dev->mdelay = 0;
+ dev->mstate = M_BAD_CARD;
+ } else {
+ dev->mdelay = T_50MSEC;
+ dev->mstate = M_ATR_PRESENT;
+ set_bit(IS_ATR_VALID, &dev->flags);
+ }
+
+ if (test_bit(IS_ATR_VALID, &dev->flags) == 1) {
+ DEBUGP(4, dev, "monitor_card: ATR valid\n");
+ /* if ta1 == 0x11, no PPS necessary (default values) */
+ /* do not do PPS with multi protocol cards */
+ if ((test_bit(IS_AUTOPPS_ACT, &dev->flags) == 0) &&
+ (dev->ta1 != 0x11) &&
+ !(test_bit(IS_ANY_T0, &dev->flags) &&
+ test_bit(IS_ANY_T1, &dev->flags))) {
+ DEBUGP(4, dev, "Perform AUTOPPS\n");
+ set_bit(IS_AUTOPPS_ACT, &dev->flags);
+ ptsreq.protocol = (0x01 << dev->proto);
+ ptsreq.flags = 0x01;
+ ptsreq.pts1 = 0x00;
+ ptsreq.pts2 = 0x00;
+ ptsreq.pts3 = 0x00;
+ if (set_protocol(dev, &ptsreq) == 0) {
+ DEBUGP(4, dev, "AUTOPPS ret SUCC\n");
+ clear_bit(IS_AUTOPPS_ACT, &dev->flags);
+ wake_up_interruptible(&dev->atrq);
+ } else {
+ DEBUGP(4, dev, "AUTOPPS failed: "
+ "repower using defaults\n");
+ /* prepare for repowering */
+ clear_bit(IS_ATR_PRESENT, &dev->flags);
+ clear_bit(IS_ATR_VALID, &dev->flags);
+ dev->rlen =
+ dev->rpos =
+ dev->atr_csum =
+ dev->atr_len_retry = dev->cwarn = 0;
+ dev->mstate = M_FETCH_ATR;
+
+ dev->mdelay = T_50MSEC;
+ }
+ } else {
+ /* for cards which use slightly different
+ * params (extra guard time) */
+ set_cardparameter(dev);
+ if (test_bit(IS_AUTOPPS_ACT, &dev->flags) == 1)
+ DEBUGP(4, dev, "AUTOPPS already active "
+ "2nd try:use default values\n");
+ if (dev->ta1 == 0x11)
+ DEBUGP(4, dev, "No AUTOPPS necessary "
+ "TA(1)==0x11\n");
+ if (test_bit(IS_ANY_T0, &dev->flags)
+ && test_bit(IS_ANY_T1, &dev->flags))
+ DEBUGP(4, dev, "Do NOT perform AUTOPPS "
+ "with multiprotocol cards\n");
+ clear_bit(IS_AUTOPPS_ACT, &dev->flags);
+ wake_up_interruptible(&dev->atrq);
+ }
+ } else {
+ DEBUGP(4, dev, "ATR invalid\n");
+ wake_up_interruptible(&dev->atrq);
+ }
+ break;
+ case M_BAD_CARD:
+ DEBUGP(4, dev, "M_BAD_CARD\n");
+ /* slow down warning, but prompt immediately after insertion */
+ if (dev->cwarn == 0 || dev->cwarn == 10) {
+ set_bit(IS_BAD_CARD, &dev->flags);
+ dev_warn(&dev->p_dev->dev, MODULE_NAME ": ");
+ if (test_bit(IS_BAD_CSUM, &dev->flags)) {
+ DEBUGP(4, dev, "ATR checksum (0x%.2x, should "
+ "be zero) failed\n", dev->atr_csum);
+ }
+#ifdef CM4000_DEBUG
+ else if (test_bit(IS_BAD_LENGTH, &dev->flags)) {
+ DEBUGP(4, dev, "ATR length error\n");
+ } else {
+ DEBUGP(4, dev, "card damaged or wrong way "
+ "inserted\n");
+ }
+#endif
+ dev->cwarn = 0;
+ wake_up_interruptible(&dev->atrq); /* wake open */
+ }
+ dev->cwarn++;
+ dev->mdelay = T_100MSEC;
+ dev->mstate = M_FETCH_ATR;
+ break;
+ default:
+ DEBUGP(7, dev, "Unknown action\n");
+ break; /* nothing */
+ }
+
+release_io:
+ DEBUGP(7, dev, "release_io\n");
+ clear_bit(LOCK_IO, &dev->flags);
+ wake_up_interruptible(&dev->ioq); /* whoever needs IO */
+
+return_with_timer:
+ DEBUGP(7, dev, "<- monitor_card (returns with timer)\n");
+ mod_timer(&dev->timer, jiffies + dev->mdelay);
+ clear_bit(LOCK_MONITOR, &dev->flags);
+}
+
+/* Interface to userland (file_operations) */
+
+static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct cm4000_dev *dev = filp->private_data;
+ unsigned int iobase = dev->p_dev->resource[0]->start;
+ ssize_t rc;
+ int i, j, k;
+
+ DEBUGP(2, dev, "-> cmm_read(%s,%d)\n", current->comm, current->pid);
+
+ if (count == 0) /* according to manpage */
+ return 0;
+
+ if (!pcmcia_dev_present(dev->p_dev) || /* device removed */
+ test_bit(IS_CMM_ABSENT, &dev->flags))
+ return -ENODEV;
+
+ if (test_bit(IS_BAD_CSUM, &dev->flags))
+ return -EIO;
+
+ /* also see the note about this in cmm_write */
+ if (wait_event_interruptible
+ (dev->atrq,
+ ((filp->f_flags & O_NONBLOCK)
+ || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ return -ERESTARTSYS;
+ }
+
+ if (test_bit(IS_ATR_VALID, &dev->flags) == 0)
+ return -EIO;
+
+ /* this one implements blocking IO */
+ if (wait_event_interruptible
+ (dev->readq,
+ ((filp->f_flags & O_NONBLOCK) || (dev->rpos < dev->rlen)))) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ return -ERESTARTSYS;
+ }
+
+ /* lock io */
+ if (wait_event_interruptible
+ (dev->ioq,
+ ((filp->f_flags & O_NONBLOCK)
+ || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ return -ERESTARTSYS;
+ }
+
+ rc = 0;
+ dev->flags0 = inb(REG_FLAGS0(iobase));
+ if ((dev->flags0 & 1) == 0 /* no smartcard inserted */
+ || dev->flags0 == 0xff) { /* no cardman inserted */
+ clear_bit(IS_ATR_VALID, &dev->flags);
+ if (dev->flags0 & 1) {
+ set_bit(IS_CMM_ABSENT, &dev->flags);
+ rc = -ENODEV;
+ } else {
+ rc = -EIO;
+ }
+ goto release_io;
+ }
+
+ DEBUGP(4, dev, "begin read answer\n");
+ j = min(count, (size_t)(dev->rlen - dev->rpos));
+ k = dev->rpos;
+ if (k + j > 255)
+ j = 256 - k;
+ DEBUGP(4, dev, "read1 j=%d\n", j);
+ for (i = 0; i < j; i++) {
+ xoutb(k++, REG_BUF_ADDR(iobase));
+ dev->rbuf[i] = xinb(REG_BUF_DATA(iobase));
+ }
+ j = min(count, (size_t)(dev->rlen - dev->rpos));
+ if (k + j > 255) {
+ DEBUGP(4, dev, "read2 j=%d\n", j);
+ dev->flags1 |= 0x10; /* MSB buf addr set */
+ xoutb(dev->flags1, REG_FLAGS1(iobase));
+ for (; i < j; i++) {
+ xoutb(k++, REG_BUF_ADDR(iobase));
+ dev->rbuf[i] = xinb(REG_BUF_DATA(iobase));
+ }
+ }
+
+ if (dev->proto == 0 && count > dev->rlen - dev->rpos && i) {
+ DEBUGP(4, dev, "T=0 and count > buffer\n");
+ dev->rbuf[i] = dev->rbuf[i - 1];
+ dev->rbuf[i - 1] = dev->procbyte;
+ j++;
+ }
+ count = j;
+
+ dev->rpos = dev->rlen + 1;
+
+ /* Clear T1Active */
+ DEBUGP(4, dev, "Clear T1Active\n");
+ dev->flags1 &= 0xdf;
+ xoutb(dev->flags1, REG_FLAGS1(iobase));
+
+ xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */
+ /* last check before exit */
+ if (!io_detect_cm4000(iobase, dev)) {
+ rc = -ENODEV;
+ goto release_io;
+ }
+
+ if (test_bit(IS_INVREV, &dev->flags) && count > 0)
+ str_invert_revert(dev->rbuf, count);
+
+ if (copy_to_user(buf, dev->rbuf, count))
+ rc = -EFAULT;
+
+release_io:
+ clear_bit(LOCK_IO, &dev->flags);
+ wake_up_interruptible(&dev->ioq);
+
+ DEBUGP(2, dev, "<- cmm_read returns: rc = %Zi\n",
+ (rc < 0 ? rc : count));
+ return rc < 0 ? rc : count;
+}
+
+static ssize_t cmm_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct cm4000_dev *dev = filp->private_data;
+ unsigned int iobase = dev->p_dev->resource[0]->start;
+ unsigned short s;
+ unsigned char tmp;
+ unsigned char infolen;
+ unsigned char sendT0;
+ unsigned short nsend;
+ unsigned short nr;
+ ssize_t rc;
+ int i;
+
+ DEBUGP(2, dev, "-> cmm_write(%s,%d)\n", current->comm, current->pid);
+
+ if (count == 0) /* according to manpage */
+ return 0;
+
+ if (dev->proto == 0 && count < 4) {
+ /* T0 must have at least 4 bytes */
+ DEBUGP(4, dev, "T0 short write\n");
+ return -EIO;
+ }
+
+ nr = count & 0x1ff; /* max bytes to write */
+
+ sendT0 = dev->proto ? 0 : nr > 5 ? 0x08 : 0;
+
+ if (!pcmcia_dev_present(dev->p_dev) || /* device removed */
+ test_bit(IS_CMM_ABSENT, &dev->flags))
+ return -ENODEV;
+
+ if (test_bit(IS_BAD_CSUM, &dev->flags)) {
+ DEBUGP(4, dev, "bad csum\n");
+ return -EIO;
+ }
+
+ /*
+ * wait for atr to become valid.
+ * note: it is important to lock this code. if we dont, the monitor
+ * could be run between test_bit and the call to sleep on the
+ * atr-queue. if *then* the monitor detects atr valid, it will wake up
+ * any process on the atr-queue, *but* since we have been interrupted,
+ * we do not yet sleep on this queue. this would result in a missed
+ * wake_up and the calling process would sleep forever (until
+ * interrupted). also, do *not* restore_flags before sleep_on, because
+ * this could result in the same situation!
+ */
+ if (wait_event_interruptible
+ (dev->atrq,
+ ((filp->f_flags & O_NONBLOCK)
+ || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ return -ERESTARTSYS;
+ }
+
+ if (test_bit(IS_ATR_VALID, &dev->flags) == 0) { /* invalid atr */
+ DEBUGP(4, dev, "invalid ATR\n");
+ return -EIO;
+ }
+
+ /* lock io */
+ if (wait_event_interruptible
+ (dev->ioq,
+ ((filp->f_flags & O_NONBLOCK)
+ || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ return -ERESTARTSYS;
+ }
+
+ if (copy_from_user(dev->sbuf, buf, ((count > 512) ? 512 : count)))
+ return -EFAULT;
+
+ rc = 0;
+ dev->flags0 = inb(REG_FLAGS0(iobase));
+ if ((dev->flags0 & 1) == 0 /* no smartcard inserted */
+ || dev->flags0 == 0xff) { /* no cardman inserted */
+ clear_bit(IS_ATR_VALID, &dev->flags);
+ if (dev->flags0 & 1) {
+ set_bit(IS_CMM_ABSENT, &dev->flags);
+ rc = -ENODEV;
+ } else {
+ DEBUGP(4, dev, "IO error\n");
+ rc = -EIO;
+ }
+ goto release_io;
+ }
+
+ xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */
+
+ if (!io_detect_cm4000(iobase, dev)) {
+ rc = -ENODEV;
+ goto release_io;
+ }
+
+ /* reflect T=0 send/read mode in flags1 */
+ dev->flags1 |= (sendT0);
+
+ set_cardparameter(dev);
+
+ /* dummy read, reset flag procedure received */
+ tmp = inb(REG_FLAGS1(iobase));
+
+ dev->flags1 = 0x20 /* T_Active */
+ | (sendT0)
+ | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0)/* inverse parity */
+ | (((dev->baudv - 1) & 0x0100) >> 8); /* MSB-Baud */
+ DEBUGP(1, dev, "set dev->flags1 = 0x%.2x\n", dev->flags1);
+ xoutb(dev->flags1, REG_FLAGS1(iobase));
+
+ /* xmit data */
+ DEBUGP(4, dev, "Xmit data\n");
+ for (i = 0; i < nr; i++) {
+ if (i >= 256) {
+ dev->flags1 = 0x20 /* T_Active */
+ | (sendT0) /* SendT0 */
+ /* inverse parity: */
+ | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0)
+ | (((dev->baudv - 1) & 0x0100) >> 8) /* MSB-Baud */
+ | 0x10; /* set address high */
+ DEBUGP(4, dev, "dev->flags = 0x%.2x - set address "
+ "high\n", dev->flags1);
+ xoutb(dev->flags1, REG_FLAGS1(iobase));
+ }
+ if (test_bit(IS_INVREV, &dev->flags)) {
+ DEBUGP(4, dev, "Apply inverse convention for 0x%.2x "
+ "-> 0x%.2x\n", (unsigned char)dev->sbuf[i],
+ invert_revert(dev->sbuf[i]));
+ xoutb(i, REG_BUF_ADDR(iobase));
+ xoutb(invert_revert(dev->sbuf[i]),
+ REG_BUF_DATA(iobase));
+ } else {
+ xoutb(i, REG_BUF_ADDR(iobase));
+ xoutb(dev->sbuf[i], REG_BUF_DATA(iobase));
+ }
+ }
+ DEBUGP(4, dev, "Xmit done\n");
+
+ if (dev->proto == 0) {
+ /* T=0 proto: 0 byte reply */
+ if (nr == 4) {
+ DEBUGP(4, dev, "T=0 assumes 0 byte reply\n");
+ xoutb(i, REG_BUF_ADDR(iobase));
+ if (test_bit(IS_INVREV, &dev->flags))
+ xoutb(0xff, REG_BUF_DATA(iobase));
+ else
+ xoutb(0x00, REG_BUF_DATA(iobase));
+ }
+
+ /* numSendBytes */
+ if (sendT0)
+ nsend = nr;
+ else {
+ if (nr == 4)
+ nsend = 5;
+ else {
+ nsend = 5 + (unsigned char)dev->sbuf[4];
+ if (dev->sbuf[4] == 0)
+ nsend += 0x100;
+ }
+ }
+ } else
+ nsend = nr;
+
+ /* T0: output procedure byte */
+ if (test_bit(IS_INVREV, &dev->flags)) {
+ DEBUGP(4, dev, "T=0 set Procedure byte (inverse-reverse) "
+ "0x%.2x\n", invert_revert(dev->sbuf[1]));
+ xoutb(invert_revert(dev->sbuf[1]), REG_NUM_BYTES(iobase));
+ } else {
+ DEBUGP(4, dev, "T=0 set Procedure byte 0x%.2x\n", dev->sbuf[1]);
+ xoutb(dev->sbuf[1], REG_NUM_BYTES(iobase));
+ }
+
+ DEBUGP(1, dev, "set NumSendBytes = 0x%.2x\n",
+ (unsigned char)(nsend & 0xff));
+ xoutb((unsigned char)(nsend & 0xff), REG_NUM_SEND(iobase));
+
+ DEBUGP(1, dev, "Trigger CARDMAN CONTROLLER (0x%.2x)\n",
+ 0x40 /* SM_Active */
+ | (dev->flags0 & 2 ? 0 : 4) /* power on if needed */
+ |(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */
+ |(nsend & 0x100) >> 8 /* MSB numSendBytes */ );
+ xoutb(0x40 /* SM_Active */
+ | (dev->flags0 & 2 ? 0 : 4) /* power on if needed */
+ |(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */
+ |(nsend & 0x100) >> 8, /* MSB numSendBytes */
+ REG_FLAGS0(iobase));
+
+ /* wait for xmit done */
+ if (dev->proto == 1) {
+ DEBUGP(4, dev, "Wait for xmit done\n");
+ for (i = 0; i < 1000; i++) {
+ if (inb(REG_FLAGS0(iobase)) & 0x08)
+ break;
+ msleep_interruptible(10);
+ }
+ if (i == 1000) {
+ DEBUGP(4, dev, "timeout waiting for xmit done\n");
+ rc = -EIO;
+ goto release_io;
+ }
+ }
+
+ /* T=1: wait for infoLen */
+
+ infolen = 0;
+ if (dev->proto) {
+ /* wait until infoLen is valid */
+ for (i = 0; i < 6000; i++) { /* max waiting time of 1 min */
+ io_read_num_rec_bytes(iobase, &s);
+ if (s >= 3) {
+ infolen = inb(REG_FLAGS1(iobase));
+ DEBUGP(4, dev, "infolen=%d\n", infolen);
+ break;
+ }
+ msleep_interruptible(10);
+ }
+ if (i == 6000) {
+ DEBUGP(4, dev, "timeout waiting for infoLen\n");
+ rc = -EIO;
+ goto release_io;
+ }
+ } else
+ clear_bit(IS_PROCBYTE_PRESENT, &dev->flags);
+
+ /* numRecBytes | bit9 of numRecytes */
+ io_read_num_rec_bytes(iobase, &dev->rlen);
+ for (i = 0; i < 600; i++) { /* max waiting time of 2 sec */
+ if (dev->proto) {
+ if (dev->rlen >= infolen + 4)
+ break;
+ }
+ msleep_interruptible(10);
+ /* numRecBytes | bit9 of numRecytes */
+ io_read_num_rec_bytes(iobase, &s);
+ if (s > dev->rlen) {
+ DEBUGP(1, dev, "NumRecBytes inc (reset timeout)\n");
+ i = 0; /* reset timeout */
+ dev->rlen = s;
+ }
+ /* T=0: we are done when numRecBytes doesn't
+ * increment any more and NoProcedureByte
+ * is set and numRecBytes == bytes sent + 6
+ * (header bytes + data + 1 for sw2)
+ * except when the card replies an error
+ * which means, no data will be sent back.
+ */
+ else if (dev->proto == 0) {
+ if ((inb(REG_BUF_ADDR(iobase)) & 0x80)) {
+ /* no procedure byte received since last read */
+ DEBUGP(1, dev, "NoProcedure byte set\n");
+ /* i=0; */
+ } else {
+ /* procedure byte received since last read */
+ DEBUGP(1, dev, "NoProcedure byte unset "
+ "(reset timeout)\n");
+ dev->procbyte = inb(REG_FLAGS1(iobase));
+ DEBUGP(1, dev, "Read procedure byte 0x%.2x\n",
+ dev->procbyte);
+ i = 0; /* resettimeout */
+ }
+ if (inb(REG_FLAGS0(iobase)) & 0x08) {
+ DEBUGP(1, dev, "T0Done flag (read reply)\n");
+ break;
+ }
+ }
+ if (dev->proto)
+ infolen = inb(REG_FLAGS1(iobase));
+ }
+ if (i == 600) {
+ DEBUGP(1, dev, "timeout waiting for numRecBytes\n");
+ rc = -EIO;
+ goto release_io;
+ } else {
+ if (dev->proto == 0) {
+ DEBUGP(1, dev, "Wait for T0Done bit to be set\n");
+ for (i = 0; i < 1000; i++) {
+ if (inb(REG_FLAGS0(iobase)) & 0x08)
+ break;
+ msleep_interruptible(10);
+ }
+ if (i == 1000) {
+ DEBUGP(1, dev, "timeout waiting for T0Done\n");
+ rc = -EIO;
+ goto release_io;
+ }
+
+ dev->procbyte = inb(REG_FLAGS1(iobase));
+ DEBUGP(4, dev, "Read procedure byte 0x%.2x\n",
+ dev->procbyte);
+
+ io_read_num_rec_bytes(iobase, &dev->rlen);
+ DEBUGP(4, dev, "Read NumRecBytes = %i\n", dev->rlen);
+
+ }
+ }
+ /* T=1: read offset=zero, T=0: read offset=after challenge */
+ dev->rpos = dev->proto ? 0 : nr == 4 ? 5 : nr > dev->rlen ? 5 : nr;
+ DEBUGP(4, dev, "dev->rlen = %i, dev->rpos = %i, nr = %i\n",
+ dev->rlen, dev->rpos, nr);
+
+release_io:
+ DEBUGP(4, dev, "Reset SM\n");
+ xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */
+
+ if (rc < 0) {
+ DEBUGP(4, dev, "Write failed but clear T_Active\n");
+ dev->flags1 &= 0xdf;
+ xoutb(dev->flags1, REG_FLAGS1(iobase));
+ }
+
+ clear_bit(LOCK_IO, &dev->flags);
+ wake_up_interruptible(&dev->ioq);
+ wake_up_interruptible(&dev->readq); /* tell read we have data */
+
+ /* ITSEC E2: clear write buffer */
+ memset((char *)dev->sbuf, 0, 512);
+
+ /* return error or actually written bytes */
+ DEBUGP(2, dev, "<- cmm_write\n");
+ return rc < 0 ? rc : nr;
+}
+
+static void start_monitor(struct cm4000_dev *dev)
+{
+ DEBUGP(3, dev, "-> start_monitor\n");
+ if (!dev->monitor_running) {
+ DEBUGP(5, dev, "create, init and add timer\n");
+ setup_timer(&dev->timer, monitor_card, (unsigned long)dev);
+ dev->monitor_running = 1;
+ mod_timer(&dev->timer, jiffies);
+ } else
+ DEBUGP(5, dev, "monitor already running\n");
+ DEBUGP(3, dev, "<- start_monitor\n");
+}
+
+static void stop_monitor(struct cm4000_dev *dev)
+{
+ DEBUGP(3, dev, "-> stop_monitor\n");
+ if (dev->monitor_running) {
+ DEBUGP(5, dev, "stopping monitor\n");
+ terminate_monitor(dev);
+ /* reset monitor SM */
+ clear_bit(IS_ATR_VALID, &dev->flags);
+ clear_bit(IS_ATR_PRESENT, &dev->flags);
+ } else
+ DEBUGP(5, dev, "monitor already stopped\n");
+ DEBUGP(3, dev, "<- stop_monitor\n");
+}
+
+static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct cm4000_dev *dev = filp->private_data;
+ unsigned int iobase = dev->p_dev->resource[0]->start;
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ struct pcmcia_device *link;
+ int size;
+ int rc;
+ void __user *argp = (void __user *)arg;
+#ifdef CM4000_DEBUG
+ char *ioctl_names[CM_IOC_MAXNR + 1] = {
+ [_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS",
+ [_IOC_NR(CM_IOCGATR)] "CM_IOCGATR",
+ [_IOC_NR(CM_IOCARDOFF)] "CM_IOCARDOFF",
+ [_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS",
+ [_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL",
+ };
+ DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode),
+ iminor(inode), ioctl_names[_IOC_NR(cmd)]);
+#endif
+
+ mutex_lock(&cmm_mutex);
+ rc = -ENODEV;
+ link = dev_table[iminor(inode)];
+ if (!pcmcia_dev_present(link)) {
+ DEBUGP(4, dev, "DEV_OK false\n");
+ goto out;
+ }
+
+ if (test_bit(IS_CMM_ABSENT, &dev->flags)) {
+ DEBUGP(4, dev, "CMM_ABSENT flag set\n");
+ goto out;
+ }
+ rc = -EINVAL;
+
+ if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) {
+ DEBUGP(4, dev, "ioctype mismatch\n");
+ goto out;
+ }
+ if (_IOC_NR(cmd) > CM_IOC_MAXNR) {
+ DEBUGP(4, dev, "iocnr mismatch\n");
+ goto out;
+ }
+ size = _IOC_SIZE(cmd);
+ rc = -EFAULT;
+ DEBUGP(4, dev, "iocdir=%.4x iocr=%.4x iocw=%.4x iocsize=%d cmd=%.4x\n",
+ _IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd);
+
+ if (_IOC_DIR(cmd) & _IOC_READ) {
+ if (!access_ok(VERIFY_WRITE, argp, size))
+ goto out;
+ }
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (!access_ok(VERIFY_READ, argp, size))
+ goto out;
+ }
+ rc = 0;
+
+ switch (cmd) {
+ case CM_IOCGSTATUS:
+ DEBUGP(4, dev, " ... in CM_IOCGSTATUS\n");
+ {
+ int status;
+
+ /* clear other bits, but leave inserted & powered as
+ * they are */
+ status = dev->flags0 & 3;
+ if (test_bit(IS_ATR_PRESENT, &dev->flags))
+ status |= CM_ATR_PRESENT;
+ if (test_bit(IS_ATR_VALID, &dev->flags))
+ status |= CM_ATR_VALID;
+ if (test_bit(IS_CMM_ABSENT, &dev->flags))
+ status |= CM_NO_READER;
+ if (test_bit(IS_BAD_CARD, &dev->flags))
+ status |= CM_BAD_CARD;
+ if (copy_to_user(argp, &status, sizeof(int)))
+ rc = -EFAULT;
+ }
+ break;
+ case CM_IOCGATR:
+ DEBUGP(4, dev, "... in CM_IOCGATR\n");
+ {
+ struct atreq __user *atreq = argp;
+ int tmp;
+ /* allow nonblocking io and being interrupted */
+ if (wait_event_interruptible
+ (dev->atrq,
+ ((filp->f_flags & O_NONBLOCK)
+ || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags)
+ != 0)))) {
+ if (filp->f_flags & O_NONBLOCK)
+ rc = -EAGAIN;
+ else
+ rc = -ERESTARTSYS;
+ break;
+ }
+
+ rc = -EFAULT;
+ if (test_bit(IS_ATR_VALID, &dev->flags) == 0) {
+ tmp = -1;
+ if (copy_to_user(&(atreq->atr_len), &tmp,
+ sizeof(int)))
+ break;
+ } else {
+ if (copy_to_user(atreq->atr, dev->atr,
+ dev->atr_len))
+ break;
+
+ tmp = dev->atr_len;
+ if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int)))
+ break;
+ }
+ rc = 0;
+ break;
+ }
+ case CM_IOCARDOFF:
+
+#ifdef CM4000_DEBUG
+ DEBUGP(4, dev, "... in CM_IOCARDOFF\n");
+ if (dev->flags0 & 0x01) {
+ DEBUGP(4, dev, " Card inserted\n");
+ } else {
+ DEBUGP(2, dev, " No card inserted\n");
+ }
+ if (dev->flags0 & 0x02) {
+ DEBUGP(4, dev, " Card powered\n");
+ } else {
+ DEBUGP(2, dev, " Card not powered\n");
+ }
+#endif
+
+ /* is a card inserted and powered? */
+ if ((dev->flags0 & 0x01) && (dev->flags0 & 0x02)) {
+
+ /* get IO lock */
+ if (wait_event_interruptible
+ (dev->ioq,
+ ((filp->f_flags & O_NONBLOCK)
+ || (test_and_set_bit(LOCK_IO, (void *)&dev->flags)
+ == 0)))) {
+ if (filp->f_flags & O_NONBLOCK)
+ rc = -EAGAIN;
+ else
+ rc = -ERESTARTSYS;
+ break;
+ }
+ /* Set Flags0 = 0x42 */
+ DEBUGP(4, dev, "Set Flags0=0x42 \n");
+ xoutb(0x42, REG_FLAGS0(iobase));
+ clear_bit(IS_ATR_PRESENT, &dev->flags);
+ clear_bit(IS_ATR_VALID, &dev->flags);
+ dev->mstate = M_CARDOFF;
+ clear_bit(LOCK_IO, &dev->flags);
+ if (wait_event_interruptible
+ (dev->atrq,
+ ((filp->f_flags & O_NONBLOCK)
+ || (test_bit(IS_ATR_VALID, (void *)&dev->flags) !=
+ 0)))) {
+ if (filp->f_flags & O_NONBLOCK)
+ rc = -EAGAIN;
+ else
+ rc = -ERESTARTSYS;
+ break;
+ }
+ }
+ /* release lock */
+ clear_bit(LOCK_IO, &dev->flags);
+ wake_up_interruptible(&dev->ioq);
+
+ rc = 0;
+ break;
+ case CM_IOCSPTS:
+ {
+ struct ptsreq krnptsreq;
+
+ if (copy_from_user(&krnptsreq, argp,
+ sizeof(struct ptsreq))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = 0;
+ DEBUGP(4, dev, "... in CM_IOCSPTS\n");
+ /* wait for ATR to get valid */
+ if (wait_event_interruptible
+ (dev->atrq,
+ ((filp->f_flags & O_NONBLOCK)
+ || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags)
+ != 0)))) {
+ if (filp->f_flags & O_NONBLOCK)
+ rc = -EAGAIN;
+ else
+ rc = -ERESTARTSYS;
+ break;
+ }
+ /* get IO lock */
+ if (wait_event_interruptible
+ (dev->ioq,
+ ((filp->f_flags & O_NONBLOCK)
+ || (test_and_set_bit(LOCK_IO, (void *)&dev->flags)
+ == 0)))) {
+ if (filp->f_flags & O_NONBLOCK)
+ rc = -EAGAIN;
+ else
+ rc = -ERESTARTSYS;
+ break;
+ }
+
+ if ((rc = set_protocol(dev, &krnptsreq)) != 0) {
+ /* auto power_on again */
+ dev->mstate = M_FETCH_ATR;
+ clear_bit(IS_ATR_VALID, &dev->flags);
+ }
+ /* release lock */
+ clear_bit(LOCK_IO, &dev->flags);
+ wake_up_interruptible(&dev->ioq);
+
+ }
+ break;
+#ifdef CM4000_DEBUG
+ case CM_IOSDBGLVL:
+ rc = -ENOTTY;
+ break;
+#endif
+ default:
+ DEBUGP(4, dev, "... in default (unknown IOCTL code)\n");
+ rc = -ENOTTY;
+ }
+out:
+ mutex_unlock(&cmm_mutex);
+ return rc;
+}
+
+static int cmm_open(struct inode *inode, struct file *filp)
+{
+ struct cm4000_dev *dev;
+ struct pcmcia_device *link;
+ int minor = iminor(inode);
+ int ret;
+
+ if (minor >= CM4000_MAX_DEV)
+ return -ENODEV;
+
+ mutex_lock(&cmm_mutex);
+ link = dev_table[minor];
+ if (link == NULL || !pcmcia_dev_present(link)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (link->open) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ dev = link->priv;
+ filp->private_data = dev;
+
+ DEBUGP(2, dev, "-> cmm_open(device=%d.%d process=%s,%d)\n",
+ imajor(inode), minor, current->comm, current->pid);
+
+ /* init device variables, they may be "polluted" after close
+ * or, the device may never have been closed (i.e. open failed)
+ */
+
+ ZERO_DEV(dev);
+
+ /* opening will always block since the
+ * monitor will be started by open, which
+ * means we have to wait for ATR becoming
+ * valid = block until valid (or card
+ * inserted)
+ */
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ dev->mdelay = T_50MSEC;
+
+ /* start monitoring the cardstatus */
+ start_monitor(dev);
+
+ link->open = 1; /* only one open per device */
+
+ DEBUGP(2, dev, "<- cmm_open\n");
+ ret = nonseekable_open(inode, filp);
+out:
+ mutex_unlock(&cmm_mutex);
+ return ret;
+}
+
+static int cmm_close(struct inode *inode, struct file *filp)
+{
+ struct cm4000_dev *dev;
+ struct pcmcia_device *link;
+ int minor = iminor(inode);
+
+ if (minor >= CM4000_MAX_DEV)
+ return -ENODEV;
+
+ link = dev_table[minor];
+ if (link == NULL)
+ return -ENODEV;
+
+ dev = link->priv;
+
+ DEBUGP(2, dev, "-> cmm_close(maj/min=%d.%d)\n",
+ imajor(inode), minor);
+
+ stop_monitor(dev);
+
+ ZERO_DEV(dev);
+
+ link->open = 0; /* only one open per device */
+ wake_up(&dev->devq); /* socket removed? */
+
+ DEBUGP(2, dev, "cmm_close\n");
+ return 0;
+}
+
+static void cmm_cm4000_release(struct pcmcia_device * link)
+{
+ struct cm4000_dev *dev = link->priv;
+
+ /* dont terminate the monitor, rather rely on
+ * close doing that for us.
+ */
+ DEBUGP(3, dev, "-> cmm_cm4000_release\n");
+ while (link->open) {
+ printk(KERN_INFO MODULE_NAME ": delaying release until "
+ "process has terminated\n");
+ /* note: don't interrupt us:
+ * close the applications which own
+ * the devices _first_ !
+ */
+ wait_event(dev->devq, (link->open == 0));
+ }
+ /* dev->devq=NULL; this cannot be zeroed earlier */
+ DEBUGP(3, dev, "<- cmm_cm4000_release\n");
+ return;
+}
+
+/*==== Interface to PCMCIA Layer =======================================*/
+
+static int cm4000_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ return pcmcia_request_io(p_dev);
+}
+
+static int cm4000_config(struct pcmcia_device * link, int devno)
+{
+ struct cm4000_dev *dev;
+
+ link->config_flags |= CONF_AUTO_SET_IO;
+
+ /* read the config-tuples */
+ if (pcmcia_loop_config(link, cm4000_config_check, NULL))
+ goto cs_release;
+
+ if (pcmcia_enable_device(link))
+ goto cs_release;
+
+ dev = link->priv;
+
+ return 0;
+
+cs_release:
+ cm4000_release(link);
+ return -ENODEV;
+}
+
+static int cm4000_suspend(struct pcmcia_device *link)
+{
+ struct cm4000_dev *dev;
+
+ dev = link->priv;
+ stop_monitor(dev);
+
+ return 0;
+}
+
+static int cm4000_resume(struct pcmcia_device *link)
+{
+ struct cm4000_dev *dev;
+
+ dev = link->priv;
+ if (link->open)
+ start_monitor(dev);
+
+ return 0;
+}
+
+static void cm4000_release(struct pcmcia_device *link)
+{
+ cmm_cm4000_release(link); /* delay release until device closed */
+ pcmcia_disable_device(link);
+}
+
+static int cm4000_probe(struct pcmcia_device *link)
+{
+ struct cm4000_dev *dev;
+ int i, ret;
+
+ for (i = 0; i < CM4000_MAX_DEV; i++)
+ if (dev_table[i] == NULL)
+ break;
+
+ if (i == CM4000_MAX_DEV) {
+ printk(KERN_NOTICE MODULE_NAME ": all devices in use\n");
+ return -ENODEV;
+ }
+
+ /* create a new cm4000_cs device */
+ dev = kzalloc(sizeof(struct cm4000_dev), GFP_KERNEL);
+ if (dev == NULL)
+ return -ENOMEM;
+
+ dev->p_dev = link;
+ link->priv = dev;
+ dev_table[i] = link;
+
+ init_waitqueue_head(&dev->devq);
+ init_waitqueue_head(&dev->ioq);
+ init_waitqueue_head(&dev->atrq);
+ init_waitqueue_head(&dev->readq);
+
+ ret = cm4000_config(link, i);
+ if (ret) {
+ dev_table[i] = NULL;
+ kfree(dev);
+ return ret;
+ }
+
+ device_create(cmm_class, NULL, MKDEV(major, i), NULL, "cmm%d", i);
+
+ return 0;
+}
+
+static void cm4000_detach(struct pcmcia_device *link)
+{
+ struct cm4000_dev *dev = link->priv;
+ int devno;
+
+ /* find device */
+ for (devno = 0; devno < CM4000_MAX_DEV; devno++)
+ if (dev_table[devno] == link)
+ break;
+ if (devno == CM4000_MAX_DEV)
+ return;
+
+ stop_monitor(dev);
+
+ cm4000_release(link);
+
+ dev_table[devno] = NULL;
+ kfree(dev);
+
+ device_destroy(cmm_class, MKDEV(major, devno));
+
+ return;
+}
+
+static const struct file_operations cm4000_fops = {
+ .owner = THIS_MODULE,
+ .read = cmm_read,
+ .write = cmm_write,
+ .unlocked_ioctl = cmm_ioctl,
+ .open = cmm_open,
+ .release= cmm_close,
+ .llseek = no_llseek,
+};
+
+static const struct pcmcia_device_id cm4000_ids[] = {
+ PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0002),
+ PCMCIA_DEVICE_PROD_ID12("CardMan", "4000", 0x2FB368CA, 0xA2BD8C39),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, cm4000_ids);
+
+static struct pcmcia_driver cm4000_driver = {
+ .owner = THIS_MODULE,
+ .name = "cm4000_cs",
+ .probe = cm4000_probe,
+ .remove = cm4000_detach,
+ .suspend = cm4000_suspend,
+ .resume = cm4000_resume,
+ .id_table = cm4000_ids,
+};
+
+static int __init cmm_init(void)
+{
+ int rc;
+
+ cmm_class = class_create(THIS_MODULE, "cardman_4000");
+ if (IS_ERR(cmm_class))
+ return PTR_ERR(cmm_class);
+
+ major = register_chrdev(0, DEVICE_NAME, &cm4000_fops);
+ if (major < 0) {
+ printk(KERN_WARNING MODULE_NAME
+ ": could not get major number\n");
+ class_destroy(cmm_class);
+ return major;
+ }
+
+ rc = pcmcia_register_driver(&cm4000_driver);
+ if (rc < 0) {
+ unregister_chrdev(major, DEVICE_NAME);
+ class_destroy(cmm_class);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void __exit cmm_exit(void)
+{
+ pcmcia_unregister_driver(&cm4000_driver);
+ unregister_chrdev(major, DEVICE_NAME);
+ class_destroy(cmm_class);
+};
+
+module_init(cmm_init);
+module_exit(cmm_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
new file mode 100644
index 00000000..8dd48a2b
--- /dev/null
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -0,0 +1,687 @@
+/*
+ * A driver for the Omnikey PCMCIA smartcard reader CardMan 4040
+ *
+ * (c) 2000-2004 Omnikey AG (http://www.omnikey.com/)
+ *
+ * (C) 2005-2006 Harald Welte <laforge@gnumonks.org>
+ * - add support for poll()
+ * - driver cleanup
+ * - add waitqueues
+ * - adhere to linux kernel coding style and policies
+ * - support 2.6.13 "new style" pcmcia interface
+ * - add class interface for udev device creation
+ *
+ * The device basically is a USB CCID compliant device that has been
+ * attached to an I/O-Mapped FIFO.
+ *
+ * All rights reserved, Dual BSD/GPL Licensed.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+#include "cm4040_cs.h"
+
+
+#define reader_to_dev(x) (&x->p_dev->dev)
+
+/* n (debug level) is ignored */
+/* additional debug output may be enabled by re-compiling with
+ * CM4040_DEBUG set */
+/* #define CM4040_DEBUG */
+#define DEBUGP(n, rdr, x, args...) do { \
+ dev_dbg(reader_to_dev(rdr), "%s:" x, \
+ __func__ , ## args); \
+ } while (0)
+
+static DEFINE_MUTEX(cm4040_mutex);
+
+#define CCID_DRIVER_BULK_DEFAULT_TIMEOUT (150*HZ)
+#define CCID_DRIVER_ASYNC_POWERUP_TIMEOUT (35*HZ)
+#define CCID_DRIVER_MINIMUM_TIMEOUT (3*HZ)
+#define READ_WRITE_BUFFER_SIZE 512
+#define POLL_LOOP_COUNT 1000
+
+/* how often to poll for fifo status change */
+#define POLL_PERIOD msecs_to_jiffies(10)
+
+static void reader_release(struct pcmcia_device *link);
+
+static int major;
+static struct class *cmx_class;
+
+#define BS_READABLE 0x01
+#define BS_WRITABLE 0x02
+
+struct reader_dev {
+ struct pcmcia_device *p_dev;
+ wait_queue_head_t devq;
+ wait_queue_head_t poll_wait;
+ wait_queue_head_t read_wait;
+ wait_queue_head_t write_wait;
+ unsigned long buffer_status;
+ unsigned long timeout;
+ unsigned char s_buf[READ_WRITE_BUFFER_SIZE];
+ unsigned char r_buf[READ_WRITE_BUFFER_SIZE];
+ struct timer_list poll_timer;
+};
+
+static struct pcmcia_device *dev_table[CM_MAX_DEV];
+
+#ifndef CM4040_DEBUG
+#define xoutb outb
+#define xinb inb
+#else
+static inline void xoutb(unsigned char val, unsigned short port)
+{
+ pr_debug("outb(val=%.2x,port=%.4x)\n", val, port);
+ outb(val, port);
+}
+
+static inline unsigned char xinb(unsigned short port)
+{
+ unsigned char val;
+
+ val = inb(port);
+ pr_debug("%.2x=inb(%.4x)\n", val, port);
+ return val;
+}
+#endif
+
+/* poll the device fifo status register. not to be confused with
+ * the poll syscall. */
+static void cm4040_do_poll(unsigned long dummy)
+{
+ struct reader_dev *dev = (struct reader_dev *) dummy;
+ unsigned int obs = xinb(dev->p_dev->resource[0]->start
+ + REG_OFFSET_BUFFER_STATUS);
+
+ if ((obs & BSR_BULK_IN_FULL)) {
+ set_bit(BS_READABLE, &dev->buffer_status);
+ DEBUGP(4, dev, "waking up read_wait\n");
+ wake_up_interruptible(&dev->read_wait);
+ } else
+ clear_bit(BS_READABLE, &dev->buffer_status);
+
+ if (!(obs & BSR_BULK_OUT_FULL)) {
+ set_bit(BS_WRITABLE, &dev->buffer_status);
+ DEBUGP(4, dev, "waking up write_wait\n");
+ wake_up_interruptible(&dev->write_wait);
+ } else
+ clear_bit(BS_WRITABLE, &dev->buffer_status);
+
+ if (dev->buffer_status)
+ wake_up_interruptible(&dev->poll_wait);
+
+ mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD);
+}
+
+static void cm4040_stop_poll(struct reader_dev *dev)
+{
+ del_timer_sync(&dev->poll_timer);
+}
+
+static int wait_for_bulk_out_ready(struct reader_dev *dev)
+{
+ int i, rc;
+ int iobase = dev->p_dev->resource[0]->start;
+
+ for (i = 0; i < POLL_LOOP_COUNT; i++) {
+ if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS)
+ & BSR_BULK_OUT_FULL) == 0) {
+ DEBUGP(4, dev, "BulkOut empty (i=%d)\n", i);
+ return 1;
+ }
+ }
+
+ DEBUGP(4, dev, "wait_event_interruptible_timeout(timeout=%ld\n",
+ dev->timeout);
+ rc = wait_event_interruptible_timeout(dev->write_wait,
+ test_and_clear_bit(BS_WRITABLE,
+ &dev->buffer_status),
+ dev->timeout);
+
+ if (rc > 0)
+ DEBUGP(4, dev, "woke up: BulkOut empty\n");
+ else if (rc == 0)
+ DEBUGP(4, dev, "woke up: BulkOut full, returning 0 :(\n");
+ else if (rc < 0)
+ DEBUGP(4, dev, "woke up: signal arrived\n");
+
+ return rc;
+}
+
+/* Write to Sync Control Register */
+static int write_sync_reg(unsigned char val, struct reader_dev *dev)
+{
+ int iobase = dev->p_dev->resource[0]->start;
+ int rc;
+
+ rc = wait_for_bulk_out_ready(dev);
+ if (rc <= 0)
+ return rc;
+
+ xoutb(val, iobase + REG_OFFSET_SYNC_CONTROL);
+ rc = wait_for_bulk_out_ready(dev);
+ if (rc <= 0)
+ return rc;
+
+ return 1;
+}
+
+static int wait_for_bulk_in_ready(struct reader_dev *dev)
+{
+ int i, rc;
+ int iobase = dev->p_dev->resource[0]->start;
+
+ for (i = 0; i < POLL_LOOP_COUNT; i++) {
+ if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS)
+ & BSR_BULK_IN_FULL) == BSR_BULK_IN_FULL) {
+ DEBUGP(3, dev, "BulkIn full (i=%d)\n", i);
+ return 1;
+ }
+ }
+
+ DEBUGP(4, dev, "wait_event_interruptible_timeout(timeout=%ld\n",
+ dev->timeout);
+ rc = wait_event_interruptible_timeout(dev->read_wait,
+ test_and_clear_bit(BS_READABLE,
+ &dev->buffer_status),
+ dev->timeout);
+ if (rc > 0)
+ DEBUGP(4, dev, "woke up: BulkIn full\n");
+ else if (rc == 0)
+ DEBUGP(4, dev, "woke up: BulkIn not full, returning 0 :(\n");
+ else if (rc < 0)
+ DEBUGP(4, dev, "woke up: signal arrived\n");
+
+ return rc;
+}
+
+static ssize_t cm4040_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct reader_dev *dev = filp->private_data;
+ int iobase = dev->p_dev->resource[0]->start;
+ size_t bytes_to_read;
+ unsigned long i;
+ size_t min_bytes_to_read;
+ int rc;
+ unsigned char uc;
+
+ DEBUGP(2, dev, "-> cm4040_read(%s,%d)\n", current->comm, current->pid);
+
+ if (count == 0)
+ return 0;
+
+ if (count < 10)
+ return -EFAULT;
+
+ if (filp->f_flags & O_NONBLOCK) {
+ DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n");
+ DEBUGP(2, dev, "<- cm4040_read (failure)\n");
+ return -EAGAIN;
+ }
+
+ if (!pcmcia_dev_present(dev->p_dev))
+ return -ENODEV;
+
+ for (i = 0; i < 5; i++) {
+ rc = wait_for_bulk_in_ready(dev);
+ if (rc <= 0) {
+ DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc);
+ DEBUGP(2, dev, "<- cm4040_read (failed)\n");
+ if (rc == -ERESTARTSYS)
+ return rc;
+ return -EIO;
+ }
+ dev->r_buf[i] = xinb(iobase + REG_OFFSET_BULK_IN);
+#ifdef CM4040_DEBUG
+ pr_debug("%lu:%2x ", i, dev->r_buf[i]);
+ }
+ pr_debug("\n");
+#else
+ }
+#endif
+
+ bytes_to_read = 5 + le32_to_cpu(*(__le32 *)&dev->r_buf[1]);
+
+ DEBUGP(6, dev, "BytesToRead=%zu\n", bytes_to_read);
+
+ min_bytes_to_read = min(count, bytes_to_read + 5);
+ min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE);
+
+ DEBUGP(6, dev, "Min=%zu\n", min_bytes_to_read);
+
+ for (i = 0; i < (min_bytes_to_read-5); i++) {
+ rc = wait_for_bulk_in_ready(dev);
+ if (rc <= 0) {
+ DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc);
+ DEBUGP(2, dev, "<- cm4040_read (failed)\n");
+ if (rc == -ERESTARTSYS)
+ return rc;
+ return -EIO;
+ }
+ dev->r_buf[i+5] = xinb(iobase + REG_OFFSET_BULK_IN);
+#ifdef CM4040_DEBUG
+ pr_debug("%lu:%2x ", i, dev->r_buf[i]);
+ }
+ pr_debug("\n");
+#else
+ }
+#endif
+
+ *ppos = min_bytes_to_read;
+ if (copy_to_user(buf, dev->r_buf, min_bytes_to_read))
+ return -EFAULT;
+
+ rc = wait_for_bulk_in_ready(dev);
+ if (rc <= 0) {
+ DEBUGP(5, dev, "wait_for_bulk_in_ready rc=%.2x\n", rc);
+ DEBUGP(2, dev, "<- cm4040_read (failed)\n");
+ if (rc == -ERESTARTSYS)
+ return rc;
+ return -EIO;
+ }
+
+ rc = write_sync_reg(SCR_READER_TO_HOST_DONE, dev);
+ if (rc <= 0) {
+ DEBUGP(5, dev, "write_sync_reg c=%.2x\n", rc);
+ DEBUGP(2, dev, "<- cm4040_read (failed)\n");
+ if (rc == -ERESTARTSYS)
+ return rc;
+ else
+ return -EIO;
+ }
+
+ uc = xinb(iobase + REG_OFFSET_BULK_IN);
+
+ DEBUGP(2, dev, "<- cm4040_read (successfully)\n");
+ return min_bytes_to_read;
+}
+
+static ssize_t cm4040_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct reader_dev *dev = filp->private_data;
+ int iobase = dev->p_dev->resource[0]->start;
+ ssize_t rc;
+ int i;
+ unsigned int bytes_to_write;
+
+ DEBUGP(2, dev, "-> cm4040_write(%s,%d)\n", current->comm, current->pid);
+
+ if (count == 0) {
+ DEBUGP(2, dev, "<- cm4040_write empty read (successfully)\n");
+ return 0;
+ }
+
+ if ((count < 5) || (count > READ_WRITE_BUFFER_SIZE)) {
+ DEBUGP(2, dev, "<- cm4040_write buffersize=%Zd < 5\n", count);
+ return -EIO;
+ }
+
+ if (filp->f_flags & O_NONBLOCK) {
+ DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n");
+ DEBUGP(4, dev, "<- cm4040_write (failure)\n");
+ return -EAGAIN;
+ }
+
+ if (!pcmcia_dev_present(dev->p_dev))
+ return -ENODEV;
+
+ bytes_to_write = count;
+ if (copy_from_user(dev->s_buf, buf, bytes_to_write))
+ return -EFAULT;
+
+ switch (dev->s_buf[0]) {
+ case CMD_PC_TO_RDR_XFRBLOCK:
+ case CMD_PC_TO_RDR_SECURE:
+ case CMD_PC_TO_RDR_TEST_SECURE:
+ case CMD_PC_TO_RDR_OK_SECURE:
+ dev->timeout = CCID_DRIVER_BULK_DEFAULT_TIMEOUT;
+ break;
+
+ case CMD_PC_TO_RDR_ICCPOWERON:
+ dev->timeout = CCID_DRIVER_ASYNC_POWERUP_TIMEOUT;
+ break;
+
+ case CMD_PC_TO_RDR_GETSLOTSTATUS:
+ case CMD_PC_TO_RDR_ICCPOWEROFF:
+ case CMD_PC_TO_RDR_GETPARAMETERS:
+ case CMD_PC_TO_RDR_RESETPARAMETERS:
+ case CMD_PC_TO_RDR_SETPARAMETERS:
+ case CMD_PC_TO_RDR_ESCAPE:
+ case CMD_PC_TO_RDR_ICCCLOCK:
+ default:
+ dev->timeout = CCID_DRIVER_MINIMUM_TIMEOUT;
+ break;
+ }
+
+ rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
+ if (rc <= 0) {
+ DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+ DEBUGP(2, dev, "<- cm4040_write (failed)\n");
+ if (rc == -ERESTARTSYS)
+ return rc;
+ else
+ return -EIO;
+ }
+
+ DEBUGP(4, dev, "start \n");
+
+ for (i = 0; i < bytes_to_write; i++) {
+ rc = wait_for_bulk_out_ready(dev);
+ if (rc <= 0) {
+ DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
+ rc);
+ DEBUGP(2, dev, "<- cm4040_write (failed)\n");
+ if (rc == -ERESTARTSYS)
+ return rc;
+ else
+ return -EIO;
+ }
+
+ xoutb(dev->s_buf[i],iobase + REG_OFFSET_BULK_OUT);
+ }
+ DEBUGP(4, dev, "end\n");
+
+ rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
+
+ if (rc <= 0) {
+ DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+ DEBUGP(2, dev, "<- cm4040_write (failed)\n");
+ if (rc == -ERESTARTSYS)
+ return rc;
+ else
+ return -EIO;
+ }
+
+ DEBUGP(2, dev, "<- cm4040_write (successfully)\n");
+ return count;
+}
+
+static unsigned int cm4040_poll(struct file *filp, poll_table *wait)
+{
+ struct reader_dev *dev = filp->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &dev->poll_wait, wait);
+
+ if (test_and_clear_bit(BS_READABLE, &dev->buffer_status))
+ mask |= POLLIN | POLLRDNORM;
+ if (test_and_clear_bit(BS_WRITABLE, &dev->buffer_status))
+ mask |= POLLOUT | POLLWRNORM;
+
+ DEBUGP(2, dev, "<- cm4040_poll(%u)\n", mask);
+
+ return mask;
+}
+
+static int cm4040_open(struct inode *inode, struct file *filp)
+{
+ struct reader_dev *dev;
+ struct pcmcia_device *link;
+ int minor = iminor(inode);
+ int ret;
+
+ if (minor >= CM_MAX_DEV)
+ return -ENODEV;
+
+ mutex_lock(&cm4040_mutex);
+ link = dev_table[minor];
+ if (link == NULL || !pcmcia_dev_present(link)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (link->open) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ dev = link->priv;
+ filp->private_data = dev;
+
+ if (filp->f_flags & O_NONBLOCK) {
+ DEBUGP(4, dev, "filep->f_flags O_NONBLOCK set\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ link->open = 1;
+
+ dev->poll_timer.data = (unsigned long) dev;
+ mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD);
+
+ DEBUGP(2, dev, "<- cm4040_open (successfully)\n");
+ ret = nonseekable_open(inode, filp);
+out:
+ mutex_unlock(&cm4040_mutex);
+ return ret;
+}
+
+static int cm4040_close(struct inode *inode, struct file *filp)
+{
+ struct reader_dev *dev = filp->private_data;
+ struct pcmcia_device *link;
+ int minor = iminor(inode);
+
+ DEBUGP(2, dev, "-> cm4040_close(maj/min=%d.%d)\n", imajor(inode),
+ iminor(inode));
+
+ if (minor >= CM_MAX_DEV)
+ return -ENODEV;
+
+ link = dev_table[minor];
+ if (link == NULL)
+ return -ENODEV;
+
+ cm4040_stop_poll(dev);
+
+ link->open = 0;
+ wake_up(&dev->devq);
+
+ DEBUGP(2, dev, "<- cm4040_close\n");
+ return 0;
+}
+
+static void cm4040_reader_release(struct pcmcia_device *link)
+{
+ struct reader_dev *dev = link->priv;
+
+ DEBUGP(3, dev, "-> cm4040_reader_release\n");
+ while (link->open) {
+ DEBUGP(3, dev, KERN_INFO MODULE_NAME ": delaying release "
+ "until process has terminated\n");
+ wait_event(dev->devq, (link->open == 0));
+ }
+ DEBUGP(3, dev, "<- cm4040_reader_release\n");
+ return;
+}
+
+static int cm4040_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ return pcmcia_request_io(p_dev);
+}
+
+
+static int reader_config(struct pcmcia_device *link, int devno)
+{
+ struct reader_dev *dev;
+ int fail_rc;
+
+ link->config_flags |= CONF_AUTO_SET_IO;
+
+ if (pcmcia_loop_config(link, cm4040_config_check, NULL))
+ goto cs_release;
+
+ fail_rc = pcmcia_enable_device(link);
+ if (fail_rc != 0) {
+ dev_printk(KERN_INFO, &link->dev,
+ "pcmcia_enable_device failed 0x%x\n",
+ fail_rc);
+ goto cs_release;
+ }
+
+ dev = link->priv;
+
+ DEBUGP(2, dev, "device " DEVICE_NAME "%d at %pR\n", devno,
+ link->resource[0]);
+ DEBUGP(2, dev, "<- reader_config (succ)\n");
+
+ return 0;
+
+cs_release:
+ reader_release(link);
+ return -ENODEV;
+}
+
+static void reader_release(struct pcmcia_device *link)
+{
+ cm4040_reader_release(link);
+ pcmcia_disable_device(link);
+}
+
+static int reader_probe(struct pcmcia_device *link)
+{
+ struct reader_dev *dev;
+ int i, ret;
+
+ for (i = 0; i < CM_MAX_DEV; i++) {
+ if (dev_table[i] == NULL)
+ break;
+ }
+
+ if (i == CM_MAX_DEV)
+ return -ENODEV;
+
+ dev = kzalloc(sizeof(struct reader_dev), GFP_KERNEL);
+ if (dev == NULL)
+ return -ENOMEM;
+
+ dev->timeout = CCID_DRIVER_MINIMUM_TIMEOUT;
+ dev->buffer_status = 0;
+
+ link->priv = dev;
+ dev->p_dev = link;
+
+ dev_table[i] = link;
+
+ init_waitqueue_head(&dev->devq);
+ init_waitqueue_head(&dev->poll_wait);
+ init_waitqueue_head(&dev->read_wait);
+ init_waitqueue_head(&dev->write_wait);
+ setup_timer(&dev->poll_timer, cm4040_do_poll, 0);
+
+ ret = reader_config(link, i);
+ if (ret) {
+ dev_table[i] = NULL;
+ kfree(dev);
+ return ret;
+ }
+
+ device_create(cmx_class, NULL, MKDEV(major, i), NULL, "cmx%d", i);
+
+ return 0;
+}
+
+static void reader_detach(struct pcmcia_device *link)
+{
+ struct reader_dev *dev = link->priv;
+ int devno;
+
+ /* find device */
+ for (devno = 0; devno < CM_MAX_DEV; devno++) {
+ if (dev_table[devno] == link)
+ break;
+ }
+ if (devno == CM_MAX_DEV)
+ return;
+
+ reader_release(link);
+
+ dev_table[devno] = NULL;
+ kfree(dev);
+
+ device_destroy(cmx_class, MKDEV(major, devno));
+
+ return;
+}
+
+static const struct file_operations reader_fops = {
+ .owner = THIS_MODULE,
+ .read = cm4040_read,
+ .write = cm4040_write,
+ .open = cm4040_open,
+ .release = cm4040_close,
+ .poll = cm4040_poll,
+ .llseek = no_llseek,
+};
+
+static const struct pcmcia_device_id cm4040_ids[] = {
+ PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0200),
+ PCMCIA_DEVICE_PROD_ID12("OMNIKEY", "CardMan 4040",
+ 0xE32CDD8C, 0x8F23318B),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, cm4040_ids);
+
+static struct pcmcia_driver reader_driver = {
+ .owner = THIS_MODULE,
+ .name = "cm4040_cs",
+ .probe = reader_probe,
+ .remove = reader_detach,
+ .id_table = cm4040_ids,
+};
+
+static int __init cm4040_init(void)
+{
+ int rc;
+
+ cmx_class = class_create(THIS_MODULE, "cardman_4040");
+ if (IS_ERR(cmx_class))
+ return PTR_ERR(cmx_class);
+
+ major = register_chrdev(0, DEVICE_NAME, &reader_fops);
+ if (major < 0) {
+ printk(KERN_WARNING MODULE_NAME
+ ": could not get major number\n");
+ class_destroy(cmx_class);
+ return major;
+ }
+
+ rc = pcmcia_register_driver(&reader_driver);
+ if (rc < 0) {
+ unregister_chrdev(major, DEVICE_NAME);
+ class_destroy(cmx_class);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void __exit cm4040_exit(void)
+{
+ pcmcia_unregister_driver(&reader_driver);
+ unregister_chrdev(major, DEVICE_NAME);
+ class_destroy(cmx_class);
+}
+
+module_init(cm4040_init);
+module_exit(cm4040_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/char/pcmcia/cm4040_cs.h b/drivers/char/pcmcia/cm4040_cs.h
new file mode 100644
index 00000000..9a8b805c
--- /dev/null
+++ b/drivers/char/pcmcia/cm4040_cs.h
@@ -0,0 +1,47 @@
+#ifndef _CM4040_H_
+#define _CM4040_H_
+
+#define CM_MAX_DEV 4
+
+#define DEVICE_NAME "cmx"
+#define MODULE_NAME "cm4040_cs"
+
+#define REG_OFFSET_BULK_OUT 0
+#define REG_OFFSET_BULK_IN 0
+#define REG_OFFSET_BUFFER_STATUS 1
+#define REG_OFFSET_SYNC_CONTROL 2
+
+#define BSR_BULK_IN_FULL 0x02
+#define BSR_BULK_OUT_FULL 0x01
+
+#define SCR_HOST_TO_READER_START 0x80
+#define SCR_ABORT 0x40
+#define SCR_EN_NOTIFY 0x20
+#define SCR_ACK_NOTIFY 0x10
+#define SCR_READER_TO_HOST_DONE 0x08
+#define SCR_HOST_TO_READER_DONE 0x04
+#define SCR_PULSE_INTERRUPT 0x02
+#define SCR_POWER_DOWN 0x01
+
+
+#define CMD_PC_TO_RDR_ICCPOWERON 0x62
+#define CMD_PC_TO_RDR_GETSLOTSTATUS 0x65
+#define CMD_PC_TO_RDR_ICCPOWEROFF 0x63
+#define CMD_PC_TO_RDR_SECURE 0x69
+#define CMD_PC_TO_RDR_GETPARAMETERS 0x6C
+#define CMD_PC_TO_RDR_RESETPARAMETERS 0x6D
+#define CMD_PC_TO_RDR_SETPARAMETERS 0x61
+#define CMD_PC_TO_RDR_XFRBLOCK 0x6F
+#define CMD_PC_TO_RDR_ESCAPE 0x6B
+#define CMD_PC_TO_RDR_ICCCLOCK 0x6E
+#define CMD_PC_TO_RDR_TEST_SECURE 0x74
+#define CMD_PC_TO_RDR_OK_SECURE 0x89
+
+
+#define CMD_RDR_TO_PC_SLOTSTATUS 0x81
+#define CMD_RDR_TO_PC_DATABLOCK 0x80
+#define CMD_RDR_TO_PC_PARAMETERS 0x82
+#define CMD_RDR_TO_PC_ESCAPE 0x83
+#define CMD_RDR_TO_PC_OK_SECURE 0x89
+
+#endif /* _CM4040_H_ */
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
new file mode 100644
index 00000000..15781396
--- /dev/null
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -0,0 +1,4311 @@
+/*
+ * linux/drivers/char/pcmcia/synclink_cs.c
+ *
+ * $Id: synclink_cs.c,v 4.34 2005/09/08 13:20:54 paulkf Exp $
+ *
+ * Device driver for Microgate SyncLink PC Card
+ * multiprotocol serial adapter.
+ *
+ * written by Paul Fulghum for Microgate Corporation
+ * paulkf@microgate.com
+ *
+ * Microgate and SyncLink are trademarks of Microgate Corporation
+ *
+ * This code is released under the GNU General Public License (GPL)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq))
+#if defined(__i386__)
+# define BREAKPOINT() asm(" int $3");
+#else
+# define BREAKPOINT() { }
+#endif
+
+#define MAX_DEVICE_COUNT 4
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/ioctl.h>
+#include <linux/synclink.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <linux/bitops.h>
+#include <asm/types.h>
+#include <linux/termios.h>
+#include <linux/workqueue.h>
+#include <linux/hdlc.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_CS_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
+#endif
+
+#define GET_USER(error,value,addr) error = get_user(value,addr)
+#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
+#define PUT_USER(error,value,addr) error = put_user(value,addr)
+#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
+
+#include <asm/uaccess.h>
+
+static MGSL_PARAMS default_params = {
+ MGSL_MODE_HDLC, /* unsigned long mode */
+ 0, /* unsigned char loopback; */
+ HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
+ HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
+ 0, /* unsigned long clock_speed; */
+ 0xff, /* unsigned char addr_filter; */
+ HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
+ HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
+ HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
+ 9600, /* unsigned long data_rate; */
+ 8, /* unsigned char data_bits; */
+ 1, /* unsigned char stop_bits; */
+ ASYNC_PARITY_NONE /* unsigned char parity; */
+};
+
+typedef struct
+{
+ int count;
+ unsigned char status;
+ char data[1];
+} RXBUF;
+
+/* The queue of BH actions to be performed */
+
+#define BH_RECEIVE 1
+#define BH_TRANSMIT 2
+#define BH_STATUS 4
+
+#define IO_PIN_SHUTDOWN_LIMIT 100
+
+#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
+
+struct _input_signal_events {
+ int ri_up;
+ int ri_down;
+ int dsr_up;
+ int dsr_down;
+ int dcd_up;
+ int dcd_down;
+ int cts_up;
+ int cts_down;
+};
+
+
+/*
+ * Device instance data structure
+ */
+
+typedef struct _mgslpc_info {
+ struct tty_port port;
+ void *if_ptr; /* General purpose pointer (used by SPPP) */
+ int magic;
+ int line;
+
+ struct mgsl_icount icount;
+
+ int timeout;
+ int x_char; /* xon/xoff character */
+ unsigned char read_status_mask;
+ unsigned char ignore_status_mask;
+
+ unsigned char *tx_buf;
+ int tx_put;
+ int tx_get;
+ int tx_count;
+
+ /* circular list of fixed length rx buffers */
+
+ unsigned char *rx_buf; /* memory allocated for all rx buffers */
+ int rx_buf_total_size; /* size of memory allocated for rx buffers */
+ int rx_put; /* index of next empty rx buffer */
+ int rx_get; /* index of next full rx buffer */
+ int rx_buf_size; /* size in bytes of single rx buffer */
+ int rx_buf_count; /* total number of rx buffers */
+ int rx_frame_count; /* number of full rx buffers */
+
+ wait_queue_head_t status_event_wait_q;
+ wait_queue_head_t event_wait_q;
+ struct timer_list tx_timer; /* HDLC transmit timeout timer */
+ struct _mgslpc_info *next_device; /* device list link */
+
+ unsigned short imra_value;
+ unsigned short imrb_value;
+ unsigned char pim_value;
+
+ spinlock_t lock;
+ struct work_struct task; /* task structure for scheduling bh */
+
+ u32 max_frame_size;
+
+ u32 pending_bh;
+
+ bool bh_running;
+ bool bh_requested;
+
+ int dcd_chkcount; /* check counts to prevent */
+ int cts_chkcount; /* too many IRQs if a signal */
+ int dsr_chkcount; /* is floating */
+ int ri_chkcount;
+
+ bool rx_enabled;
+ bool rx_overflow;
+
+ bool tx_enabled;
+ bool tx_active;
+ bool tx_aborting;
+ u32 idle_mode;
+
+ int if_mode; /* serial interface selection (RS-232, v.35 etc) */
+
+ char device_name[25]; /* device instance name */
+
+ unsigned int io_base; /* base I/O address of adapter */
+ unsigned int irq_level;
+
+ MGSL_PARAMS params; /* communications parameters */
+
+ unsigned char serial_signals; /* current serial signal states */
+
+ bool irq_occurred; /* for diagnostics use */
+ char testing_irq;
+ unsigned int init_error; /* startup error (DIAGS) */
+
+ char flag_buf[MAX_ASYNC_BUFFER_SIZE];
+ bool drop_rts_on_tx_done;
+
+ struct _input_signal_events input_signal_events;
+
+ /* PCMCIA support */
+ struct pcmcia_device *p_dev;
+ int stop;
+
+ /* SPPP/Cisco HDLC device parts */
+ int netcount;
+ spinlock_t netlock;
+
+#if SYNCLINK_GENERIC_HDLC
+ struct net_device *netdev;
+#endif
+
+} MGSLPC_INFO;
+
+#define MGSLPC_MAGIC 0x5402
+
+/*
+ * The size of the serial xmit buffer is 1 page, or 4096 bytes
+ */
+#define TXBUFSIZE 4096
+
+
+#define CHA 0x00 /* channel A offset */
+#define CHB 0x40 /* channel B offset */
+
+/*
+ * FIXME: PPC has PVR defined in asm/reg.h. For now we just undef it.
+ */
+#undef PVR
+
+#define RXFIFO 0
+#define TXFIFO 0
+#define STAR 0x20
+#define CMDR 0x20
+#define RSTA 0x21
+#define PRE 0x21
+#define MODE 0x22
+#define TIMR 0x23
+#define XAD1 0x24
+#define XAD2 0x25
+#define RAH1 0x26
+#define RAH2 0x27
+#define DAFO 0x27
+#define RAL1 0x28
+#define RFC 0x28
+#define RHCR 0x29
+#define RAL2 0x29
+#define RBCL 0x2a
+#define XBCL 0x2a
+#define RBCH 0x2b
+#define XBCH 0x2b
+#define CCR0 0x2c
+#define CCR1 0x2d
+#define CCR2 0x2e
+#define CCR3 0x2f
+#define VSTR 0x34
+#define BGR 0x34
+#define RLCR 0x35
+#define AML 0x36
+#define AMH 0x37
+#define GIS 0x38
+#define IVA 0x38
+#define IPC 0x39
+#define ISR 0x3a
+#define IMR 0x3a
+#define PVR 0x3c
+#define PIS 0x3d
+#define PIM 0x3d
+#define PCR 0x3e
+#define CCR4 0x3f
+
+// IMR/ISR
+
+#define IRQ_BREAK_ON BIT15 // rx break detected
+#define IRQ_DATAOVERRUN BIT14 // receive data overflow
+#define IRQ_ALLSENT BIT13 // all sent
+#define IRQ_UNDERRUN BIT12 // transmit data underrun
+#define IRQ_TIMER BIT11 // timer interrupt
+#define IRQ_CTS BIT10 // CTS status change
+#define IRQ_TXREPEAT BIT9 // tx message repeat
+#define IRQ_TXFIFO BIT8 // transmit pool ready
+#define IRQ_RXEOM BIT7 // receive message end
+#define IRQ_EXITHUNT BIT6 // receive frame start
+#define IRQ_RXTIME BIT6 // rx char timeout
+#define IRQ_DCD BIT2 // carrier detect status change
+#define IRQ_OVERRUN BIT1 // receive frame overflow
+#define IRQ_RXFIFO BIT0 // receive pool full
+
+// STAR
+
+#define XFW BIT6 // transmit FIFO write enable
+#define CEC BIT2 // command executing
+#define CTS BIT1 // CTS state
+
+#define PVR_DTR BIT0
+#define PVR_DSR BIT1
+#define PVR_RI BIT2
+#define PVR_AUTOCTS BIT3
+#define PVR_RS232 0x20 /* 0010b */
+#define PVR_V35 0xe0 /* 1110b */
+#define PVR_RS422 0x40 /* 0100b */
+
+/* Register access functions */
+
+#define write_reg(info, reg, val) outb((val),(info)->io_base + (reg))
+#define read_reg(info, reg) inb((info)->io_base + (reg))
+
+#define read_reg16(info, reg) inw((info)->io_base + (reg))
+#define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg))
+
+#define set_reg_bits(info, reg, mask) \
+ write_reg(info, (reg), \
+ (unsigned char) (read_reg(info, (reg)) | (mask)))
+#define clear_reg_bits(info, reg, mask) \
+ write_reg(info, (reg), \
+ (unsigned char) (read_reg(info, (reg)) & ~(mask)))
+/*
+ * interrupt enable/disable routines
+ */
+static void irq_disable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask)
+{
+ if (channel == CHA) {
+ info->imra_value |= mask;
+ write_reg16(info, CHA + IMR, info->imra_value);
+ } else {
+ info->imrb_value |= mask;
+ write_reg16(info, CHB + IMR, info->imrb_value);
+ }
+}
+static void irq_enable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask)
+{
+ if (channel == CHA) {
+ info->imra_value &= ~mask;
+ write_reg16(info, CHA + IMR, info->imra_value);
+ } else {
+ info->imrb_value &= ~mask;
+ write_reg16(info, CHB + IMR, info->imrb_value);
+ }
+}
+
+#define port_irq_disable(info, mask) \
+ { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
+
+#define port_irq_enable(info, mask) \
+ { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
+
+static void rx_start(MGSLPC_INFO *info);
+static void rx_stop(MGSLPC_INFO *info);
+
+static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty);
+static void tx_stop(MGSLPC_INFO *info);
+static void tx_set_idle(MGSLPC_INFO *info);
+
+static void get_signals(MGSLPC_INFO *info);
+static void set_signals(MGSLPC_INFO *info);
+
+static void reset_device(MGSLPC_INFO *info);
+
+static void hdlc_mode(MGSLPC_INFO *info);
+static void async_mode(MGSLPC_INFO *info);
+
+static void tx_timeout(unsigned long context);
+
+static int carrier_raised(struct tty_port *port);
+static void dtr_rts(struct tty_port *port, int onoff);
+
+#if SYNCLINK_GENERIC_HDLC
+#define dev_to_port(D) (dev_to_hdlc(D)->priv)
+static void hdlcdev_tx_done(MGSLPC_INFO *info);
+static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size);
+static int hdlcdev_init(MGSLPC_INFO *info);
+static void hdlcdev_exit(MGSLPC_INFO *info);
+#endif
+
+static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit);
+
+static bool register_test(MGSLPC_INFO *info);
+static bool irq_test(MGSLPC_INFO *info);
+static int adapter_test(MGSLPC_INFO *info);
+
+static int claim_resources(MGSLPC_INFO *info);
+static void release_resources(MGSLPC_INFO *info);
+static void mgslpc_add_device(MGSLPC_INFO *info);
+static void mgslpc_remove_device(MGSLPC_INFO *info);
+
+static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty);
+static void rx_reset_buffers(MGSLPC_INFO *info);
+static int rx_alloc_buffers(MGSLPC_INFO *info);
+static void rx_free_buffers(MGSLPC_INFO *info);
+
+static irqreturn_t mgslpc_isr(int irq, void *dev_id);
+
+/*
+ * Bottom half interrupt handlers
+ */
+static void bh_handler(struct work_struct *work);
+static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty);
+static void bh_status(MGSLPC_INFO *info);
+
+/*
+ * ioctl handlers
+ */
+static int tiocmget(struct tty_struct *tty);
+static int tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear);
+static int get_stats(MGSLPC_INFO *info, struct mgsl_icount __user *user_icount);
+static int get_params(MGSLPC_INFO *info, MGSL_PARAMS __user *user_params);
+static int set_params(MGSLPC_INFO *info, MGSL_PARAMS __user *new_params, struct tty_struct *tty);
+static int get_txidle(MGSLPC_INFO *info, int __user *idle_mode);
+static int set_txidle(MGSLPC_INFO *info, int idle_mode);
+static int set_txenable(MGSLPC_INFO *info, int enable, struct tty_struct *tty);
+static int tx_abort(MGSLPC_INFO *info);
+static int set_rxenable(MGSLPC_INFO *info, int enable);
+static int wait_events(MGSLPC_INFO *info, int __user *mask);
+
+static MGSLPC_INFO *mgslpc_device_list = NULL;
+static int mgslpc_device_count = 0;
+
+/*
+ * Set this param to non-zero to load eax with the
+ * .text section address and breakpoint on module load.
+ * This is useful for use with gdb and add-symbol-file command.
+ */
+static int break_on_load=0;
+
+/*
+ * Driver major number, defaults to zero to get auto
+ * assigned major number. May be forced as module parameter.
+ */
+static int ttymajor=0;
+
+static int debug_level = 0;
+static int maxframe[MAX_DEVICE_COUNT] = {0,};
+
+module_param(break_on_load, bool, 0);
+module_param(ttymajor, int, 0);
+module_param(debug_level, int, 0);
+module_param_array(maxframe, int, NULL, 0);
+
+MODULE_LICENSE("GPL");
+
+static char *driver_name = "SyncLink PC Card driver";
+static char *driver_version = "$Revision: 4.34 $";
+
+static struct tty_driver *serial_driver;
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 256
+
+static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty);
+static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout);
+
+/* PCMCIA prototypes */
+
+static int mgslpc_config(struct pcmcia_device *link);
+static void mgslpc_release(u_long arg);
+static void mgslpc_detach(struct pcmcia_device *p_dev);
+
+/*
+ * 1st function defined in .text section. Calling this function in
+ * init_module() followed by a breakpoint allows a remote debugger
+ * (gdb) to get the .text address for the add-symbol-file command.
+ * This allows remote debugging of dynamically loadable modules.
+ */
+static void* mgslpc_get_text_ptr(void)
+{
+ return mgslpc_get_text_ptr;
+}
+
+/**
+ * line discipline callback wrappers
+ *
+ * The wrappers maintain line discipline references
+ * while calling into the line discipline.
+ *
+ * ldisc_receive_buf - pass receive data to line discipline
+ */
+
+static void ldisc_receive_buf(struct tty_struct *tty,
+ const __u8 *data, char *flags, int count)
+{
+ struct tty_ldisc *ld;
+ if (!tty)
+ return;
+ ld = tty_ldisc_ref(tty);
+ if (ld) {
+ if (ld->ops->receive_buf)
+ ld->ops->receive_buf(tty, data, flags, count);
+ tty_ldisc_deref(ld);
+ }
+}
+
+static const struct tty_port_operations mgslpc_port_ops = {
+ .carrier_raised = carrier_raised,
+ .dtr_rts = dtr_rts
+};
+
+static int mgslpc_probe(struct pcmcia_device *link)
+{
+ MGSLPC_INFO *info;
+ int ret;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("mgslpc_attach\n");
+
+ info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
+ if (!info) {
+ printk("Error can't allocate device instance data\n");
+ return -ENOMEM;
+ }
+
+ info->magic = MGSLPC_MAGIC;
+ tty_port_init(&info->port);
+ info->port.ops = &mgslpc_port_ops;
+ INIT_WORK(&info->task, bh_handler);
+ info->max_frame_size = 4096;
+ info->port.close_delay = 5*HZ/10;
+ info->port.closing_wait = 30*HZ;
+ init_waitqueue_head(&info->status_event_wait_q);
+ init_waitqueue_head(&info->event_wait_q);
+ spin_lock_init(&info->lock);
+ spin_lock_init(&info->netlock);
+ memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
+ info->idle_mode = HDLC_TXIDLE_FLAGS;
+ info->imra_value = 0xffff;
+ info->imrb_value = 0xffff;
+ info->pim_value = 0xff;
+
+ info->p_dev = link;
+ link->priv = info;
+
+ /* Initialize the struct pcmcia_device structure */
+
+ ret = mgslpc_config(link);
+ if (ret)
+ return ret;
+
+ mgslpc_add_device(info);
+
+ return 0;
+}
+
+/* Card has been inserted.
+ */
+
+static int mgslpc_ioprobe(struct pcmcia_device *p_dev, void *priv_data)
+{
+ return pcmcia_request_io(p_dev);
+}
+
+static int mgslpc_config(struct pcmcia_device *link)
+{
+ MGSLPC_INFO *info = link->priv;
+ int ret;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("mgslpc_config(0x%p)\n", link);
+
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+
+ ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
+ if (ret != 0)
+ goto failed;
+
+ link->config_index = 8;
+ link->config_regs = PRESENT_OPTION;
+
+ ret = pcmcia_request_irq(link, mgslpc_isr);
+ if (ret)
+ goto failed;
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ info->io_base = link->resource[0]->start;
+ info->irq_level = link->irq;
+ return 0;
+
+failed:
+ mgslpc_release((u_long)link);
+ return -ENODEV;
+}
+
+/* Card has been removed.
+ * Unregister device and release PCMCIA configuration.
+ * If device is open, postpone until it is closed.
+ */
+static void mgslpc_release(u_long arg)
+{
+ struct pcmcia_device *link = (struct pcmcia_device *)arg;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("mgslpc_release(0x%p)\n", link);
+
+ pcmcia_disable_device(link);
+}
+
+static void mgslpc_detach(struct pcmcia_device *link)
+{
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("mgslpc_detach(0x%p)\n", link);
+
+ ((MGSLPC_INFO *)link->priv)->stop = 1;
+ mgslpc_release((u_long)link);
+
+ mgslpc_remove_device((MGSLPC_INFO *)link->priv);
+}
+
+static int mgslpc_suspend(struct pcmcia_device *link)
+{
+ MGSLPC_INFO *info = link->priv;
+
+ info->stop = 1;
+
+ return 0;
+}
+
+static int mgslpc_resume(struct pcmcia_device *link)
+{
+ MGSLPC_INFO *info = link->priv;
+
+ info->stop = 0;
+
+ return 0;
+}
+
+
+static inline bool mgslpc_paranoia_check(MGSLPC_INFO *info,
+ char *name, const char *routine)
+{
+#ifdef MGSLPC_PARANOIA_CHECK
+ static const char *badmagic =
+ "Warning: bad magic number for mgsl struct (%s) in %s\n";
+ static const char *badinfo =
+ "Warning: null mgslpc_info for (%s) in %s\n";
+
+ if (!info) {
+ printk(badinfo, name, routine);
+ return true;
+ }
+ if (info->magic != MGSLPC_MAGIC) {
+ printk(badmagic, name, routine);
+ return true;
+ }
+#else
+ if (!info)
+ return true;
+#endif
+ return false;
+}
+
+
+#define CMD_RXFIFO BIT7 // release current rx FIFO
+#define CMD_RXRESET BIT6 // receiver reset
+#define CMD_RXFIFO_READ BIT5
+#define CMD_START_TIMER BIT4
+#define CMD_TXFIFO BIT3 // release current tx FIFO
+#define CMD_TXEOM BIT1 // transmit end message
+#define CMD_TXRESET BIT0 // transmit reset
+
+static bool wait_command_complete(MGSLPC_INFO *info, unsigned char channel)
+{
+ int i = 0;
+ /* wait for command completion */
+ while (read_reg(info, (unsigned char)(channel+STAR)) & BIT2) {
+ udelay(1);
+ if (i++ == 1000)
+ return false;
+ }
+ return true;
+}
+
+static void issue_command(MGSLPC_INFO *info, unsigned char channel, unsigned char cmd)
+{
+ wait_command_complete(info, channel);
+ write_reg(info, (unsigned char) (channel + CMDR), cmd);
+}
+
+static void tx_pause(struct tty_struct *tty)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned long flags;
+
+ if (mgslpc_paranoia_check(info, tty->name, "tx_pause"))
+ return;
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("tx_pause(%s)\n",info->device_name);
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (info->tx_enabled)
+ tx_stop(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+static void tx_release(struct tty_struct *tty)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned long flags;
+
+ if (mgslpc_paranoia_check(info, tty->name, "tx_release"))
+ return;
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("tx_release(%s)\n",info->device_name);
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_enabled)
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+/* Return next bottom half action to perform.
+ * or 0 if nothing to do.
+ */
+static int bh_action(MGSLPC_INFO *info)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ if (info->pending_bh & BH_RECEIVE) {
+ info->pending_bh &= ~BH_RECEIVE;
+ rc = BH_RECEIVE;
+ } else if (info->pending_bh & BH_TRANSMIT) {
+ info->pending_bh &= ~BH_TRANSMIT;
+ rc = BH_TRANSMIT;
+ } else if (info->pending_bh & BH_STATUS) {
+ info->pending_bh &= ~BH_STATUS;
+ rc = BH_STATUS;
+ }
+
+ if (!rc) {
+ /* Mark BH routine as complete */
+ info->bh_running = false;
+ info->bh_requested = false;
+ }
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return rc;
+}
+
+static void bh_handler(struct work_struct *work)
+{
+ MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task);
+ struct tty_struct *tty;
+ int action;
+
+ if (!info)
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_BH)
+ printk( "%s(%d):bh_handler(%s) entry\n",
+ __FILE__,__LINE__,info->device_name);
+
+ info->bh_running = true;
+ tty = tty_port_tty_get(&info->port);
+
+ while((action = bh_action(info)) != 0) {
+
+ /* Process work item */
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk( "%s(%d):bh_handler() work item action=%d\n",
+ __FILE__,__LINE__,action);
+
+ switch (action) {
+
+ case BH_RECEIVE:
+ while(rx_get_frame(info, tty));
+ break;
+ case BH_TRANSMIT:
+ bh_transmit(info, tty);
+ break;
+ case BH_STATUS:
+ bh_status(info);
+ break;
+ default:
+ /* unknown work item ID */
+ printk("Unknown work item ID=%08X!\n", action);
+ break;
+ }
+ }
+
+ tty_kref_put(tty);
+ if (debug_level >= DEBUG_LEVEL_BH)
+ printk( "%s(%d):bh_handler(%s) exit\n",
+ __FILE__,__LINE__,info->device_name);
+}
+
+static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+ if (debug_level >= DEBUG_LEVEL_BH)
+ printk("bh_transmit() entry on %s\n", info->device_name);
+
+ if (tty)
+ tty_wakeup(tty);
+}
+
+static void bh_status(MGSLPC_INFO *info)
+{
+ info->ri_chkcount = 0;
+ info->dsr_chkcount = 0;
+ info->dcd_chkcount = 0;
+ info->cts_chkcount = 0;
+}
+
+/* eom: non-zero = end of frame */
+static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
+{
+ unsigned char data[2];
+ unsigned char fifo_count, read_count, i;
+ RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size));
+
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):rx_ready_hdlc(eom=%d)\n",__FILE__,__LINE__,eom);
+
+ if (!info->rx_enabled)
+ return;
+
+ if (info->rx_frame_count >= info->rx_buf_count) {
+ /* no more free buffers */
+ issue_command(info, CHA, CMD_RXRESET);
+ info->pending_bh |= BH_RECEIVE;
+ info->rx_overflow = true;
+ info->icount.buf_overrun++;
+ return;
+ }
+
+ if (eom) {
+ /* end of frame, get FIFO count from RBCL register */
+ if (!(fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f)))
+ fifo_count = 32;
+ } else
+ fifo_count = 32;
+
+ do {
+ if (fifo_count == 1) {
+ read_count = 1;
+ data[0] = read_reg(info, CHA + RXFIFO);
+ } else {
+ read_count = 2;
+ *((unsigned short *) data) = read_reg16(info, CHA + RXFIFO);
+ }
+ fifo_count -= read_count;
+ if (!fifo_count && eom)
+ buf->status = data[--read_count];
+
+ for (i = 0; i < read_count; i++) {
+ if (buf->count >= info->max_frame_size) {
+ /* frame too large, reset receiver and reset current buffer */
+ issue_command(info, CHA, CMD_RXRESET);
+ buf->count = 0;
+ return;
+ }
+ *(buf->data + buf->count) = data[i];
+ buf->count++;
+ }
+ } while (fifo_count);
+
+ if (eom) {
+ info->pending_bh |= BH_RECEIVE;
+ info->rx_frame_count++;
+ info->rx_put++;
+ if (info->rx_put >= info->rx_buf_count)
+ info->rx_put = 0;
+ }
+ issue_command(info, CHA, CMD_RXFIFO);
+}
+
+static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
+{
+ unsigned char data, status, flag;
+ int fifo_count;
+ int work = 0;
+ struct mgsl_icount *icount = &info->icount;
+
+ if (tcd) {
+ /* early termination, get FIFO count from RBCL register */
+ fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f);
+
+ /* Zero fifo count could mean 0 or 32 bytes available.
+ * If BIT5 of STAR is set then at least 1 byte is available.
+ */
+ if (!fifo_count && (read_reg(info,CHA+STAR) & BIT5))
+ fifo_count = 32;
+ } else
+ fifo_count = 32;
+
+ tty_buffer_request_room(tty, fifo_count);
+ /* Flush received async data to receive data buffer. */
+ while (fifo_count) {
+ data = read_reg(info, CHA + RXFIFO);
+ status = read_reg(info, CHA + RXFIFO);
+ fifo_count -= 2;
+
+ icount->rx++;
+ flag = TTY_NORMAL;
+
+ // if no frameing/crc error then save data
+ // BIT7:parity error
+ // BIT6:framing error
+
+ if (status & (BIT7 + BIT6)) {
+ if (status & BIT7)
+ icount->parity++;
+ else
+ icount->frame++;
+
+ /* discard char if tty control flags say so */
+ if (status & info->ignore_status_mask)
+ continue;
+
+ status &= info->read_status_mask;
+
+ if (status & BIT7)
+ flag = TTY_PARITY;
+ else if (status & BIT6)
+ flag = TTY_FRAME;
+ }
+ work += tty_insert_flip_char(tty, data, flag);
+ }
+ issue_command(info, CHA, CMD_RXFIFO);
+
+ if (debug_level >= DEBUG_LEVEL_ISR) {
+ printk("%s(%d):rx_ready_async",
+ __FILE__,__LINE__);
+ printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
+ __FILE__,__LINE__,icount->rx,icount->brk,
+ icount->parity,icount->frame,icount->overrun);
+ }
+
+ if (work)
+ tty_flip_buffer_push(tty);
+}
+
+
+static void tx_done(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+ if (!info->tx_active)
+ return;
+
+ info->tx_active = false;
+ info->tx_aborting = false;
+
+ if (info->params.mode == MGSL_MODE_ASYNC)
+ return;
+
+ info->tx_count = info->tx_put = info->tx_get = 0;
+ del_timer(&info->tx_timer);
+
+ if (info->drop_rts_on_tx_done) {
+ get_signals(info);
+ if (info->serial_signals & SerialSignal_RTS) {
+ info->serial_signals &= ~SerialSignal_RTS;
+ set_signals(info);
+ }
+ info->drop_rts_on_tx_done = false;
+ }
+
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount)
+ hdlcdev_tx_done(info);
+ else
+#endif
+ {
+ if (tty->stopped || tty->hw_stopped) {
+ tx_stop(info);
+ return;
+ }
+ info->pending_bh |= BH_TRANSMIT;
+ }
+}
+
+static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+ unsigned char fifo_count = 32;
+ int c;
+
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):tx_ready(%s)\n", __FILE__,__LINE__,info->device_name);
+
+ if (info->params.mode == MGSL_MODE_HDLC) {
+ if (!info->tx_active)
+ return;
+ } else {
+ if (tty->stopped || tty->hw_stopped) {
+ tx_stop(info);
+ return;
+ }
+ if (!info->tx_count)
+ info->tx_active = false;
+ }
+
+ if (!info->tx_count)
+ return;
+
+ while (info->tx_count && fifo_count) {
+ c = min(2, min_t(int, fifo_count, min(info->tx_count, TXBUFSIZE - info->tx_get)));
+
+ if (c == 1) {
+ write_reg(info, CHA + TXFIFO, *(info->tx_buf + info->tx_get));
+ } else {
+ write_reg16(info, CHA + TXFIFO,
+ *((unsigned short*)(info->tx_buf + info->tx_get)));
+ }
+ info->tx_count -= c;
+ info->tx_get = (info->tx_get + c) & (TXBUFSIZE - 1);
+ fifo_count -= c;
+ }
+
+ if (info->params.mode == MGSL_MODE_ASYNC) {
+ if (info->tx_count < WAKEUP_CHARS)
+ info->pending_bh |= BH_TRANSMIT;
+ issue_command(info, CHA, CMD_TXFIFO);
+ } else {
+ if (info->tx_count)
+ issue_command(info, CHA, CMD_TXFIFO);
+ else
+ issue_command(info, CHA, CMD_TXFIFO + CMD_TXEOM);
+ }
+}
+
+static void cts_change(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+ get_signals(info);
+ if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
+ irq_disable(info, CHB, IRQ_CTS);
+ info->icount.cts++;
+ if (info->serial_signals & SerialSignal_CTS)
+ info->input_signal_events.cts_up++;
+ else
+ info->input_signal_events.cts_down++;
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+
+ if (info->port.flags & ASYNC_CTS_FLOW) {
+ if (tty->hw_stopped) {
+ if (info->serial_signals & SerialSignal_CTS) {
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("CTS tx start...");
+ if (tty)
+ tty->hw_stopped = 0;
+ tx_start(info, tty);
+ info->pending_bh |= BH_TRANSMIT;
+ return;
+ }
+ } else {
+ if (!(info->serial_signals & SerialSignal_CTS)) {
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("CTS tx stop...");
+ if (tty)
+ tty->hw_stopped = 1;
+ tx_stop(info);
+ }
+ }
+ }
+ info->pending_bh |= BH_STATUS;
+}
+
+static void dcd_change(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+ get_signals(info);
+ if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
+ irq_disable(info, CHB, IRQ_DCD);
+ info->icount.dcd++;
+ if (info->serial_signals & SerialSignal_DCD) {
+ info->input_signal_events.dcd_up++;
+ }
+ else
+ info->input_signal_events.dcd_down++;
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount) {
+ if (info->serial_signals & SerialSignal_DCD)
+ netif_carrier_on(info->netdev);
+ else
+ netif_carrier_off(info->netdev);
+ }
+#endif
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+
+ if (info->port.flags & ASYNC_CHECK_CD) {
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s CD now %s...", info->device_name,
+ (info->serial_signals & SerialSignal_DCD) ? "on" : "off");
+ if (info->serial_signals & SerialSignal_DCD)
+ wake_up_interruptible(&info->port.open_wait);
+ else {
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("doing serial hangup...");
+ if (tty)
+ tty_hangup(tty);
+ }
+ }
+ info->pending_bh |= BH_STATUS;
+}
+
+static void dsr_change(MGSLPC_INFO *info)
+{
+ get_signals(info);
+ if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
+ port_irq_disable(info, PVR_DSR);
+ info->icount.dsr++;
+ if (info->serial_signals & SerialSignal_DSR)
+ info->input_signal_events.dsr_up++;
+ else
+ info->input_signal_events.dsr_down++;
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+ info->pending_bh |= BH_STATUS;
+}
+
+static void ri_change(MGSLPC_INFO *info)
+{
+ get_signals(info);
+ if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
+ port_irq_disable(info, PVR_RI);
+ info->icount.rng++;
+ if (info->serial_signals & SerialSignal_RI)
+ info->input_signal_events.ri_up++;
+ else
+ info->input_signal_events.ri_down++;
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+ info->pending_bh |= BH_STATUS;
+}
+
+/* Interrupt service routine entry point.
+ *
+ * Arguments:
+ *
+ * irq interrupt number that caused interrupt
+ * dev_id device ID supplied during interrupt registration
+ */
+static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
+{
+ MGSLPC_INFO *info = dev_id;
+ struct tty_struct *tty;
+ unsigned short isr;
+ unsigned char gis, pis;
+ int count=0;
+
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("mgslpc_isr(%d) entry.\n", info->irq_level);
+
+ if (!(info->p_dev->_locked))
+ return IRQ_HANDLED;
+
+ tty = tty_port_tty_get(&info->port);
+
+ spin_lock(&info->lock);
+
+ while ((gis = read_reg(info, CHA + GIS))) {
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("mgslpc_isr %s gis=%04X\n", info->device_name,gis);
+
+ if ((gis & 0x70) || count > 1000) {
+ printk("synclink_cs:hardware failed or ejected\n");
+ break;
+ }
+ count++;
+
+ if (gis & (BIT1 + BIT0)) {
+ isr = read_reg16(info, CHB + ISR);
+ if (isr & IRQ_DCD)
+ dcd_change(info, tty);
+ if (isr & IRQ_CTS)
+ cts_change(info, tty);
+ }
+ if (gis & (BIT3 + BIT2))
+ {
+ isr = read_reg16(info, CHA + ISR);
+ if (isr & IRQ_TIMER) {
+ info->irq_occurred = true;
+ irq_disable(info, CHA, IRQ_TIMER);
+ }
+
+ /* receive IRQs */
+ if (isr & IRQ_EXITHUNT) {
+ info->icount.exithunt++;
+ wake_up_interruptible(&info->event_wait_q);
+ }
+ if (isr & IRQ_BREAK_ON) {
+ info->icount.brk++;
+ if (info->port.flags & ASYNC_SAK)
+ do_SAK(tty);
+ }
+ if (isr & IRQ_RXTIME) {
+ issue_command(info, CHA, CMD_RXFIFO_READ);
+ }
+ if (isr & (IRQ_RXEOM + IRQ_RXFIFO)) {
+ if (info->params.mode == MGSL_MODE_HDLC)
+ rx_ready_hdlc(info, isr & IRQ_RXEOM);
+ else
+ rx_ready_async(info, isr & IRQ_RXEOM, tty);
+ }
+
+ /* transmit IRQs */
+ if (isr & IRQ_UNDERRUN) {
+ if (info->tx_aborting)
+ info->icount.txabort++;
+ else
+ info->icount.txunder++;
+ tx_done(info, tty);
+ }
+ else if (isr & IRQ_ALLSENT) {
+ info->icount.txok++;
+ tx_done(info, tty);
+ }
+ else if (isr & IRQ_TXFIFO)
+ tx_ready(info, tty);
+ }
+ if (gis & BIT7) {
+ pis = read_reg(info, CHA + PIS);
+ if (pis & BIT1)
+ dsr_change(info);
+ if (pis & BIT2)
+ ri_change(info);
+ }
+ }
+
+ /* Request bottom half processing if there's something
+ * for it to do and the bh is not already running
+ */
+
+ if (info->pending_bh && !info->bh_running && !info->bh_requested) {
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s queueing bh task.\n",
+ __FILE__,__LINE__,info->device_name);
+ schedule_work(&info->task);
+ info->bh_requested = true;
+ }
+
+ spin_unlock(&info->lock);
+ tty_kref_put(tty);
+
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):mgslpc_isr(%d)exit.\n",
+ __FILE__, __LINE__, info->irq_level);
+
+ return IRQ_HANDLED;
+}
+
+/* Initialize and start device.
+ */
+static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
+{
+ int retval = 0;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):startup(%s)\n",__FILE__,__LINE__,info->device_name);
+
+ if (info->port.flags & ASYNC_INITIALIZED)
+ return 0;
+
+ if (!info->tx_buf) {
+ /* allocate a page of memory for a transmit buffer */
+ info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
+ if (!info->tx_buf) {
+ printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
+ __FILE__,__LINE__,info->device_name);
+ return -ENOMEM;
+ }
+ }
+
+ info->pending_bh = 0;
+
+ memset(&info->icount, 0, sizeof(info->icount));
+
+ setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
+
+ /* Allocate and claim adapter resources */
+ retval = claim_resources(info);
+
+ /* perform existence check and diagnostics */
+ if ( !retval )
+ retval = adapter_test(info);
+
+ if ( retval ) {
+ if (capable(CAP_SYS_ADMIN) && tty)
+ set_bit(TTY_IO_ERROR, &tty->flags);
+ release_resources(info);
+ return retval;
+ }
+
+ /* program hardware for current parameters */
+ mgslpc_change_params(info, tty);
+
+ if (tty)
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+
+ info->port.flags |= ASYNC_INITIALIZED;
+
+ return 0;
+}
+
+/* Called by mgslpc_close() and mgslpc_hangup() to shutdown hardware
+ */
+static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ if (!(info->port.flags & ASYNC_INITIALIZED))
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_shutdown(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ /* clear status wait queue because status changes */
+ /* can't happen after shutting down the hardware */
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+
+ del_timer_sync(&info->tx_timer);
+
+ if (info->tx_buf) {
+ free_page((unsigned long) info->tx_buf);
+ info->tx_buf = NULL;
+ }
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ rx_stop(info);
+ tx_stop(info);
+
+ /* TODO:disable interrupts instead of reset to preserve signal states */
+ reset_device(info);
+
+ if (!tty || tty->termios->c_cflag & HUPCL) {
+ info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ set_signals(info);
+ }
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ release_resources(info);
+
+ if (tty)
+ set_bit(TTY_IO_ERROR, &tty->flags);
+
+ info->port.flags &= ~ASYNC_INITIALIZED;
+}
+
+static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ rx_stop(info);
+ tx_stop(info);
+ info->tx_count = info->tx_put = info->tx_get = 0;
+
+ if (info->params.mode == MGSL_MODE_HDLC || info->netcount)
+ hdlc_mode(info);
+ else
+ async_mode(info);
+
+ set_signals(info);
+
+ info->dcd_chkcount = 0;
+ info->cts_chkcount = 0;
+ info->ri_chkcount = 0;
+ info->dsr_chkcount = 0;
+
+ irq_enable(info, CHB, IRQ_DCD | IRQ_CTS);
+ port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI);
+ get_signals(info);
+
+ if (info->netcount || (tty && (tty->termios->c_cflag & CREAD)))
+ rx_start(info);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+/* Reconfigure adapter based on new parameters
+ */
+static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+ unsigned cflag;
+ int bits_per_char;
+
+ if (!tty || !tty->termios)
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_change_params(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ cflag = tty->termios->c_cflag;
+
+ /* if B0 rate (hangup) specified then negate DTR and RTS */
+ /* otherwise assert DTR and RTS */
+ if (cflag & CBAUD)
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+
+ /* byte size and parity */
+
+ switch (cflag & CSIZE) {
+ case CS5: info->params.data_bits = 5; break;
+ case CS6: info->params.data_bits = 6; break;
+ case CS7: info->params.data_bits = 7; break;
+ case CS8: info->params.data_bits = 8; break;
+ default: info->params.data_bits = 7; break;
+ }
+
+ if (cflag & CSTOPB)
+ info->params.stop_bits = 2;
+ else
+ info->params.stop_bits = 1;
+
+ info->params.parity = ASYNC_PARITY_NONE;
+ if (cflag & PARENB) {
+ if (cflag & PARODD)
+ info->params.parity = ASYNC_PARITY_ODD;
+ else
+ info->params.parity = ASYNC_PARITY_EVEN;
+#ifdef CMSPAR
+ if (cflag & CMSPAR)
+ info->params.parity = ASYNC_PARITY_SPACE;
+#endif
+ }
+
+ /* calculate number of jiffies to transmit a full
+ * FIFO (32 bytes) at specified data rate
+ */
+ bits_per_char = info->params.data_bits +
+ info->params.stop_bits + 1;
+
+ /* if port data rate is set to 460800 or less then
+ * allow tty settings to override, otherwise keep the
+ * current data rate.
+ */
+ if (info->params.data_rate <= 460800) {
+ info->params.data_rate = tty_get_baud_rate(tty);
+ }
+
+ if ( info->params.data_rate ) {
+ info->timeout = (32*HZ*bits_per_char) /
+ info->params.data_rate;
+ }
+ info->timeout += HZ/50; /* Add .02 seconds of slop */
+
+ if (cflag & CRTSCTS)
+ info->port.flags |= ASYNC_CTS_FLOW;
+ else
+ info->port.flags &= ~ASYNC_CTS_FLOW;
+
+ if (cflag & CLOCAL)
+ info->port.flags &= ~ASYNC_CHECK_CD;
+ else
+ info->port.flags |= ASYNC_CHECK_CD;
+
+ /* process tty input control flags */
+
+ info->read_status_mask = 0;
+ if (I_INPCK(tty))
+ info->read_status_mask |= BIT7 | BIT6;
+ if (I_IGNPAR(tty))
+ info->ignore_status_mask |= BIT7 | BIT6;
+
+ mgslpc_program_hw(info, tty);
+}
+
+/* Add a character to the transmit buffer
+ */
+static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO) {
+ printk( "%s(%d):mgslpc_put_char(%d) on %s\n",
+ __FILE__,__LINE__,ch,info->device_name);
+ }
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char"))
+ return 0;
+
+ if (!info->tx_buf)
+ return 0;
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) {
+ if (info->tx_count < TXBUFSIZE - 1) {
+ info->tx_buf[info->tx_put++] = ch;
+ info->tx_put &= TXBUFSIZE-1;
+ info->tx_count++;
+ }
+ }
+
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 1;
+}
+
+/* Enable transmitter so remaining characters in the
+ * transmit buffer are sent.
+ */
+static void mgslpc_flush_chars(struct tty_struct *tty)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk( "%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
+ __FILE__,__LINE__,info->device_name,info->tx_count);
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars"))
+ return;
+
+ if (info->tx_count <= 0 || tty->stopped ||
+ tty->hw_stopped || !info->tx_buf)
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk( "%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
+ __FILE__,__LINE__,info->device_name);
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_active)
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+/* Send a block of data
+ *
+ * Arguments:
+ *
+ * tty pointer to tty information structure
+ * buf pointer to buffer containing send data
+ * count size of send data in bytes
+ *
+ * Returns: number of characters written
+ */
+static int mgslpc_write(struct tty_struct * tty,
+ const unsigned char *buf, int count)
+{
+ int c, ret = 0;
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk( "%s(%d):mgslpc_write(%s) count=%d\n",
+ __FILE__,__LINE__,info->device_name,count);
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") ||
+ !info->tx_buf)
+ goto cleanup;
+
+ if (info->params.mode == MGSL_MODE_HDLC) {
+ if (count > TXBUFSIZE) {
+ ret = -EIO;
+ goto cleanup;
+ }
+ if (info->tx_active)
+ goto cleanup;
+ else if (info->tx_count)
+ goto start;
+ }
+
+ for (;;) {
+ c = min(count,
+ min(TXBUFSIZE - info->tx_count - 1,
+ TXBUFSIZE - info->tx_put));
+ if (c <= 0)
+ break;
+
+ memcpy(info->tx_buf + info->tx_put, buf, c);
+
+ spin_lock_irqsave(&info->lock,flags);
+ info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1);
+ info->tx_count += c;
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+start:
+ if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_active)
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+cleanup:
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk( "%s(%d):mgslpc_write(%s) returning=%d\n",
+ __FILE__,__LINE__,info->device_name,ret);
+ return ret;
+}
+
+/* Return the count of free bytes in transmit buffer
+ */
+static int mgslpc_write_room(struct tty_struct *tty)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ int ret;
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write_room"))
+ return 0;
+
+ if (info->params.mode == MGSL_MODE_HDLC) {
+ /* HDLC (frame oriented) mode */
+ if (info->tx_active)
+ return 0;
+ else
+ return HDLC_MAX_FRAME_SIZE;
+ } else {
+ ret = TXBUFSIZE - info->tx_count - 1;
+ if (ret < 0)
+ ret = 0;
+ }
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_write_room(%s)=%d\n",
+ __FILE__,__LINE__, info->device_name, ret);
+ return ret;
+}
+
+/* Return the count of bytes in transmit buffer
+ */
+static int mgslpc_chars_in_buffer(struct tty_struct *tty)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ int rc;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_chars_in_buffer(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer"))
+ return 0;
+
+ if (info->params.mode == MGSL_MODE_HDLC)
+ rc = info->tx_active ? info->max_frame_size : 0;
+ else
+ rc = info->tx_count;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n",
+ __FILE__,__LINE__, info->device_name, rc);
+
+ return rc;
+}
+
+/* Discard all data in the send buffer
+ */
+static void mgslpc_flush_buffer(struct tty_struct *tty)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_flush_buffer(%s) entry\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer"))
+ return;
+
+ spin_lock_irqsave(&info->lock,flags);
+ info->tx_count = info->tx_put = info->tx_get = 0;
+ del_timer(&info->tx_timer);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ wake_up_interruptible(&tty->write_wait);
+ tty_wakeup(tty);
+}
+
+/* Send a high-priority XON/XOFF character
+ */
+static void mgslpc_send_xchar(struct tty_struct *tty, char ch)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_send_xchar(%s,%d)\n",
+ __FILE__,__LINE__, info->device_name, ch );
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar"))
+ return;
+
+ info->x_char = ch;
+ if (ch) {
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_enabled)
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+}
+
+/* Signal remote device to throttle send data (our receive data)
+ */
+static void mgslpc_throttle(struct tty_struct * tty)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_throttle(%s) entry\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle"))
+ return;
+
+ if (I_IXOFF(tty))
+ mgslpc_send_xchar(tty, STOP_CHAR(tty));
+
+ if (tty->termios->c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock,flags);
+ info->serial_signals &= ~SerialSignal_RTS;
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+}
+
+/* Signal remote device to stop throttling send data (our receive data)
+ */
+static void mgslpc_unthrottle(struct tty_struct * tty)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_unthrottle(%s) entry\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle"))
+ return;
+
+ if (I_IXOFF(tty)) {
+ if (info->x_char)
+ info->x_char = 0;
+ else
+ mgslpc_send_xchar(tty, START_CHAR(tty));
+ }
+
+ if (tty->termios->c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock,flags);
+ info->serial_signals |= SerialSignal_RTS;
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+}
+
+/* get the current serial statistics
+ */
+static int get_stats(MGSLPC_INFO * info, struct mgsl_icount __user *user_icount)
+{
+ int err;
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("get_params(%s)\n", info->device_name);
+ if (!user_icount) {
+ memset(&info->icount, 0, sizeof(info->icount));
+ } else {
+ COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
+ if (err)
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/* get the current serial parameters
+ */
+static int get_params(MGSLPC_INFO * info, MGSL_PARAMS __user *user_params)
+{
+ int err;
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("get_params(%s)\n", info->device_name);
+ COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
+ if (err)
+ return -EFAULT;
+ return 0;
+}
+
+/* set the serial parameters
+ *
+ * Arguments:
+ *
+ * info pointer to device instance data
+ * new_params user buffer containing new serial params
+ *
+ * Returns: 0 if success, otherwise error code
+ */
+static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty)
+{
+ unsigned long flags;
+ MGSL_PARAMS tmp_params;
+ int err;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):set_params %s\n", __FILE__,__LINE__,
+ info->device_name );
+ COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
+ if (err) {
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):set_params(%s) user buffer copy failed\n",
+ __FILE__,__LINE__,info->device_name);
+ return -EFAULT;
+ }
+
+ spin_lock_irqsave(&info->lock,flags);
+ memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ mgslpc_change_params(info, tty);
+
+ return 0;
+}
+
+static int get_txidle(MGSLPC_INFO * info, int __user *idle_mode)
+{
+ int err;
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("get_txidle(%s)=%d\n", info->device_name, info->idle_mode);
+ COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
+ if (err)
+ return -EFAULT;
+ return 0;
+}
+
+static int set_txidle(MGSLPC_INFO * info, int idle_mode)
+{
+ unsigned long flags;
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("set_txidle(%s,%d)\n", info->device_name, idle_mode);
+ spin_lock_irqsave(&info->lock,flags);
+ info->idle_mode = idle_mode;
+ tx_set_idle(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int get_interface(MGSLPC_INFO * info, int __user *if_mode)
+{
+ int err;
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("get_interface(%s)=%d\n", info->device_name, info->if_mode);
+ COPY_TO_USER(err,if_mode, &info->if_mode, sizeof(int));
+ if (err)
+ return -EFAULT;
+ return 0;
+}
+
+static int set_interface(MGSLPC_INFO * info, int if_mode)
+{
+ unsigned long flags;
+ unsigned char val;
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("set_interface(%s,%d)\n", info->device_name, if_mode);
+ spin_lock_irqsave(&info->lock,flags);
+ info->if_mode = if_mode;
+
+ val = read_reg(info, PVR) & 0x0f;
+ switch (info->if_mode)
+ {
+ case MGSL_INTERFACE_RS232: val |= PVR_RS232; break;
+ case MGSL_INTERFACE_V35: val |= PVR_V35; break;
+ case MGSL_INTERFACE_RS422: val |= PVR_RS422; break;
+ }
+ write_reg(info, PVR, val);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("set_txenable(%s,%d)\n", info->device_name, enable);
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (enable) {
+ if (!info->tx_enabled)
+ tx_start(info, tty);
+ } else {
+ if (info->tx_enabled)
+ tx_stop(info);
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int tx_abort(MGSLPC_INFO * info)
+{
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("tx_abort(%s)\n", info->device_name);
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (info->tx_active && info->tx_count &&
+ info->params.mode == MGSL_MODE_HDLC) {
+ /* clear data count so FIFO is not filled on next IRQ.
+ * This results in underrun and abort transmission.
+ */
+ info->tx_count = info->tx_put = info->tx_get = 0;
+ info->tx_aborting = true;
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int set_rxenable(MGSLPC_INFO * info, int enable)
+{
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("set_rxenable(%s,%d)\n", info->device_name, enable);
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (enable) {
+ if (!info->rx_enabled)
+ rx_start(info);
+ } else {
+ if (info->rx_enabled)
+ rx_stop(info);
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+/* wait for specified event to occur
+ *
+ * Arguments: info pointer to device instance data
+ * mask pointer to bitmask of events to wait for
+ * Return Value: 0 if successful and bit mask updated with
+ * of events triggerred,
+ * otherwise error code
+ */
+static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
+{
+ unsigned long flags;
+ int s;
+ int rc=0;
+ struct mgsl_icount cprev, cnow;
+ int events;
+ int mask;
+ struct _input_signal_events oldsigs, newsigs;
+ DECLARE_WAITQUEUE(wait, current);
+
+ COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
+ if (rc)
+ return -EFAULT;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("wait_events(%s,%d)\n", info->device_name, mask);
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ /* return immediately if state matches requested events */
+ get_signals(info);
+ s = info->serial_signals;
+ events = mask &
+ ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
+ ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
+ ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
+ ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
+ if (events) {
+ spin_unlock_irqrestore(&info->lock,flags);
+ goto exit;
+ }
+
+ /* save current irq counts */
+ cprev = info->icount;
+ oldsigs = info->input_signal_events;
+
+ if ((info->params.mode == MGSL_MODE_HDLC) &&
+ (mask & MgslEvent_ExitHuntMode))
+ irq_enable(info, CHA, IRQ_EXITHUNT);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&info->event_wait_q, &wait);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+
+ for(;;) {
+ schedule();
+ if (signal_pending(current)) {
+ rc = -ERESTARTSYS;
+ break;
+ }
+
+ /* get current irq counts */
+ spin_lock_irqsave(&info->lock,flags);
+ cnow = info->icount;
+ newsigs = info->input_signal_events;
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ /* if no change, wait aborted for some reason */
+ if (newsigs.dsr_up == oldsigs.dsr_up &&
+ newsigs.dsr_down == oldsigs.dsr_down &&
+ newsigs.dcd_up == oldsigs.dcd_up &&
+ newsigs.dcd_down == oldsigs.dcd_down &&
+ newsigs.cts_up == oldsigs.cts_up &&
+ newsigs.cts_down == oldsigs.cts_down &&
+ newsigs.ri_up == oldsigs.ri_up &&
+ newsigs.ri_down == oldsigs.ri_down &&
+ cnow.exithunt == cprev.exithunt &&
+ cnow.rxidle == cprev.rxidle) {
+ rc = -EIO;
+ break;
+ }
+
+ events = mask &
+ ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
+ (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
+ (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
+ (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
+ (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
+ (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
+ (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
+ (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
+ (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
+ (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
+ if (events)
+ break;
+
+ cprev = cnow;
+ oldsigs = newsigs;
+ }
+
+ remove_wait_queue(&info->event_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+
+ if (mask & MgslEvent_ExitHuntMode) {
+ spin_lock_irqsave(&info->lock,flags);
+ if (!waitqueue_active(&info->event_wait_q))
+ irq_disable(info, CHA, IRQ_EXITHUNT);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+exit:
+ if (rc == 0)
+ PUT_USER(rc, events, mask_ptr);
+ return rc;
+}
+
+static int modem_input_wait(MGSLPC_INFO *info,int arg)
+{
+ unsigned long flags;
+ int rc;
+ struct mgsl_icount cprev, cnow;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /* save current irq counts */
+ spin_lock_irqsave(&info->lock,flags);
+ cprev = info->icount;
+ add_wait_queue(&info->status_event_wait_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ for(;;) {
+ schedule();
+ if (signal_pending(current)) {
+ rc = -ERESTARTSYS;
+ break;
+ }
+
+ /* get new irq counts */
+ spin_lock_irqsave(&info->lock,flags);
+ cnow = info->icount;
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ /* if no change, wait aborted for some reason */
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
+ rc = -EIO;
+ break;
+ }
+
+ /* check for change in caller specified modem input */
+ if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
+ (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
+ (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
+ (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
+ rc = 0;
+ break;
+ }
+
+ cprev = cnow;
+ }
+ remove_wait_queue(&info->status_event_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+ return rc;
+}
+
+/* return the state of the serial control and status signals
+ */
+static int tiocmget(struct tty_struct *tty)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
+ ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
+ ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
+ ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
+ ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
+ ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s tiocmget() value=%08X\n",
+ __FILE__,__LINE__, info->device_name, result );
+ return result;
+}
+
+/* set modem control signals (DTR/RTS)
+ */
+static int tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s tiocmset(%x,%x)\n",
+ __FILE__,__LINE__,info->device_name, set, clear);
+
+ if (set & TIOCM_RTS)
+ info->serial_signals |= SerialSignal_RTS;
+ if (set & TIOCM_DTR)
+ info->serial_signals |= SerialSignal_DTR;
+ if (clear & TIOCM_RTS)
+ info->serial_signals &= ~SerialSignal_RTS;
+ if (clear & TIOCM_DTR)
+ info->serial_signals &= ~SerialSignal_DTR;
+
+ spin_lock_irqsave(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return 0;
+}
+
+/* Set or clear transmit break condition
+ *
+ * Arguments: tty pointer to tty instance data
+ * break_state -1=set break condition, 0=clear
+ */
+static int mgslpc_break(struct tty_struct *tty, int break_state)
+{
+ MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_break(%s,%d)\n",
+ __FILE__,__LINE__, info->device_name, break_state);
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break"))
+ return -EINVAL;
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (break_state == -1)
+ set_reg_bits(info, CHA+DAFO, BIT6);
+ else
+ clear_reg_bits(info, CHA+DAFO, BIT6);
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int mgslpc_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+{
+ MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
+ struct mgsl_icount cnow; /* kernel counter temps */
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ cnow = info->icount;
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ icount->cts = cnow.cts;
+ icount->dsr = cnow.dsr;
+ icount->rng = cnow.rng;
+ icount->dcd = cnow.dcd;
+ icount->rx = cnow.rx;
+ icount->tx = cnow.tx;
+ icount->frame = cnow.frame;
+ icount->overrun = cnow.overrun;
+ icount->parity = cnow.parity;
+ icount->brk = cnow.brk;
+ icount->buf_overrun = cnow.buf_overrun;
+
+ return 0;
+}
+
+/* Service an IOCTL request
+ *
+ * Arguments:
+ *
+ * tty pointer to tty instance data
+ * cmd IOCTL command code
+ * arg command argument/context
+ *
+ * Return Value: 0 if success, otherwise error code
+ */
+static int mgslpc_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
+ void __user *argp = (void __user *)arg;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
+ info->device_name, cmd );
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl"))
+ return -ENODEV;
+
+ if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+ (cmd != TIOCMIWAIT)) {
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case MGSL_IOCGPARAMS:
+ return get_params(info, argp);
+ case MGSL_IOCSPARAMS:
+ return set_params(info, argp, tty);
+ case MGSL_IOCGTXIDLE:
+ return get_txidle(info, argp);
+ case MGSL_IOCSTXIDLE:
+ return set_txidle(info, (int)arg);
+ case MGSL_IOCGIF:
+ return get_interface(info, argp);
+ case MGSL_IOCSIF:
+ return set_interface(info,(int)arg);
+ case MGSL_IOCTXENABLE:
+ return set_txenable(info,(int)arg, tty);
+ case MGSL_IOCRXENABLE:
+ return set_rxenable(info,(int)arg);
+ case MGSL_IOCTXABORT:
+ return tx_abort(info);
+ case MGSL_IOCGSTATS:
+ return get_stats(info, argp);
+ case MGSL_IOCWAITEVENT:
+ return wait_events(info, argp);
+ case TIOCMIWAIT:
+ return modem_input_wait(info,(int)arg);
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+/* Set new termios settings
+ *
+ * Arguments:
+ *
+ * tty pointer to tty structure
+ * termios pointer to buffer to hold returned old termios
+ */
+static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_set_termios %s\n", __FILE__,__LINE__,
+ tty->driver->name );
+
+ /* just return if nothing has changed */
+ if ((tty->termios->c_cflag == old_termios->c_cflag)
+ && (RELEVANT_IFLAG(tty->termios->c_iflag)
+ == RELEVANT_IFLAG(old_termios->c_iflag)))
+ return;
+
+ mgslpc_change_params(info, tty);
+
+ /* Handle transition to B0 status */
+ if (old_termios->c_cflag & CBAUD &&
+ !(tty->termios->c_cflag & CBAUD)) {
+ info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ spin_lock_irqsave(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+ /* Handle transition away from B0 status */
+ if (!(old_termios->c_cflag & CBAUD) &&
+ tty->termios->c_cflag & CBAUD) {
+ info->serial_signals |= SerialSignal_DTR;
+ if (!(tty->termios->c_cflag & CRTSCTS) ||
+ !test_bit(TTY_THROTTLED, &tty->flags)) {
+ info->serial_signals |= SerialSignal_RTS;
+ }
+ spin_lock_irqsave(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+ /* Handle turning off CRTSCTS */
+ if (old_termios->c_cflag & CRTSCTS &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ tx_release(tty);
+ }
+}
+
+static void mgslpc_close(struct tty_struct *tty, struct file * filp)
+{
+ MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
+ struct tty_port *port = &info->port;
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_close"))
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
+ __FILE__,__LINE__, info->device_name, port->count);
+
+ WARN_ON(!port->count);
+
+ if (tty_port_close_start(port, tty, filp) == 0)
+ goto cleanup;
+
+ if (port->flags & ASYNC_INITIALIZED)
+ mgslpc_wait_until_sent(tty, info->timeout);
+
+ mgslpc_flush_buffer(tty);
+
+ tty_ldisc_flush(tty);
+ shutdown(info, tty);
+
+ tty_port_close_end(port, tty);
+ tty_port_tty_set(port, NULL);
+cleanup:
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
+ tty->driver->name, port->count);
+}
+
+/* Wait until the transmitter is empty.
+ */
+static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
+ unsigned long orig_jiffies, char_time;
+
+ if (!info )
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent"))
+ return;
+
+ if (!(info->port.flags & ASYNC_INITIALIZED))
+ goto exit;
+
+ orig_jiffies = jiffies;
+
+ /* Set check interval to 1/5 of estimated time to
+ * send a character, and make it at least 1. The check
+ * interval should also be less than the timeout.
+ * Note: use tight timings here to satisfy the NIST-PCTS.
+ */
+
+ if ( info->params.data_rate ) {
+ char_time = info->timeout/(32 * 5);
+ if (!char_time)
+ char_time++;
+ } else
+ char_time = 1;
+
+ if (timeout)
+ char_time = min_t(unsigned long, char_time, timeout);
+
+ if (info->params.mode == MGSL_MODE_HDLC) {
+ while (info->tx_active) {
+ msleep_interruptible(jiffies_to_msecs(char_time));
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ }
+ } else {
+ while ((info->tx_count || info->tx_active) &&
+ info->tx_enabled) {
+ msleep_interruptible(jiffies_to_msecs(char_time));
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ }
+ }
+
+exit:
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n",
+ __FILE__,__LINE__, info->device_name );
+}
+
+/* Called by tty_hangup() when a hangup is signaled.
+ * This is the same as closing all open files for the port.
+ */
+static void mgslpc_hangup(struct tty_struct *tty)
+{
+ MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_hangup(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup"))
+ return;
+
+ mgslpc_flush_buffer(tty);
+ shutdown(info, tty);
+ tty_port_hangup(&info->port);
+}
+
+static int carrier_raised(struct tty_port *port)
+{
+ MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ if (info->serial_signals & SerialSignal_DCD)
+ return 1;
+ return 0;
+}
+
+static void dtr_rts(struct tty_port *port, int onoff)
+{
+ MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (onoff)
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->serial_signals &= ~SerialSignal_RTS + SerialSignal_DTR;
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+
+static int mgslpc_open(struct tty_struct *tty, struct file * filp)
+{
+ MGSLPC_INFO *info;
+ struct tty_port *port;
+ int retval, line;
+ unsigned long flags;
+
+ /* verify range of specified line number */
+ line = tty->index;
+ if ((line < 0) || (line >= mgslpc_device_count)) {
+ printk("%s(%d):mgslpc_open with invalid line #%d.\n",
+ __FILE__,__LINE__,line);
+ return -ENODEV;
+ }
+
+ /* find the info structure for the specified line */
+ info = mgslpc_device_list;
+ while(info && info->line != line)
+ info = info->next_device;
+ if (mgslpc_paranoia_check(info, tty->name, "mgslpc_open"))
+ return -ENODEV;
+
+ port = &info->port;
+ tty->driver_data = info;
+ tty_port_tty_set(port, tty);
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
+ __FILE__,__LINE__,tty->driver->name, port->count);
+
+ /* If port is closing, signal caller to try again */
+ if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
+ if (port->flags & ASYNC_CLOSING)
+ interruptible_sleep_on(&port->close_wait);
+ retval = ((port->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS);
+ goto cleanup;
+ }
+
+ tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+ spin_lock_irqsave(&info->netlock, flags);
+ if (info->netcount) {
+ retval = -EBUSY;
+ spin_unlock_irqrestore(&info->netlock, flags);
+ goto cleanup;
+ }
+ spin_lock(&port->lock);
+ port->count++;
+ spin_unlock(&port->lock);
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ if (port->count == 1) {
+ /* 1st open on this device, init hardware */
+ retval = startup(info, tty);
+ if (retval < 0)
+ goto cleanup;
+ }
+
+ retval = tty_port_block_til_ready(&info->port, tty, filp);
+ if (retval) {
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):block_til_ready(%s) returned %d\n",
+ __FILE__,__LINE__, info->device_name, retval);
+ goto cleanup;
+ }
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgslpc_open(%s) success\n",
+ __FILE__,__LINE__, info->device_name);
+ retval = 0;
+
+cleanup:
+ return retval;
+}
+
+/*
+ * /proc fs routines....
+ */
+
+static inline void line_info(struct seq_file *m, MGSLPC_INFO *info)
+{
+ char stat_buf[30];
+ unsigned long flags;
+
+ seq_printf(m, "%s:io:%04X irq:%d",
+ info->device_name, info->io_base, info->irq_level);
+
+ /* output current serial signal states */
+ spin_lock_irqsave(&info->lock,flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ stat_buf[0] = 0;
+ stat_buf[1] = 0;
+ if (info->serial_signals & SerialSignal_RTS)
+ strcat(stat_buf, "|RTS");
+ if (info->serial_signals & SerialSignal_CTS)
+ strcat(stat_buf, "|CTS");
+ if (info->serial_signals & SerialSignal_DTR)
+ strcat(stat_buf, "|DTR");
+ if (info->serial_signals & SerialSignal_DSR)
+ strcat(stat_buf, "|DSR");
+ if (info->serial_signals & SerialSignal_DCD)
+ strcat(stat_buf, "|CD");
+ if (info->serial_signals & SerialSignal_RI)
+ strcat(stat_buf, "|RI");
+
+ if (info->params.mode == MGSL_MODE_HDLC) {
+ seq_printf(m, " HDLC txok:%d rxok:%d",
+ info->icount.txok, info->icount.rxok);
+ if (info->icount.txunder)
+ seq_printf(m, " txunder:%d", info->icount.txunder);
+ if (info->icount.txabort)
+ seq_printf(m, " txabort:%d", info->icount.txabort);
+ if (info->icount.rxshort)
+ seq_printf(m, " rxshort:%d", info->icount.rxshort);
+ if (info->icount.rxlong)
+ seq_printf(m, " rxlong:%d", info->icount.rxlong);
+ if (info->icount.rxover)
+ seq_printf(m, " rxover:%d", info->icount.rxover);
+ if (info->icount.rxcrc)
+ seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
+ } else {
+ seq_printf(m, " ASYNC tx:%d rx:%d",
+ info->icount.tx, info->icount.rx);
+ if (info->icount.frame)
+ seq_printf(m, " fe:%d", info->icount.frame);
+ if (info->icount.parity)
+ seq_printf(m, " pe:%d", info->icount.parity);
+ if (info->icount.brk)
+ seq_printf(m, " brk:%d", info->icount.brk);
+ if (info->icount.overrun)
+ seq_printf(m, " oe:%d", info->icount.overrun);
+ }
+
+ /* Append serial signal status to end */
+ seq_printf(m, " %s\n", stat_buf+1);
+
+ seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
+ info->tx_active,info->bh_requested,info->bh_running,
+ info->pending_bh);
+}
+
+/* Called to print information about devices
+ */
+static int mgslpc_proc_show(struct seq_file *m, void *v)
+{
+ MGSLPC_INFO *info;
+
+ seq_printf(m, "synclink driver:%s\n", driver_version);
+
+ info = mgslpc_device_list;
+ while( info ) {
+ line_info(m, info);
+ info = info->next_device;
+ }
+ return 0;
+}
+
+static int mgslpc_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mgslpc_proc_show, NULL);
+}
+
+static const struct file_operations mgslpc_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = mgslpc_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int rx_alloc_buffers(MGSLPC_INFO *info)
+{
+ /* each buffer has header and data */
+ info->rx_buf_size = sizeof(RXBUF) + info->max_frame_size;
+
+ /* calculate total allocation size for 8 buffers */
+ info->rx_buf_total_size = info->rx_buf_size * 8;
+
+ /* limit total allocated memory */
+ if (info->rx_buf_total_size > 0x10000)
+ info->rx_buf_total_size = 0x10000;
+
+ /* calculate number of buffers */
+ info->rx_buf_count = info->rx_buf_total_size / info->rx_buf_size;
+
+ info->rx_buf = kmalloc(info->rx_buf_total_size, GFP_KERNEL);
+ if (info->rx_buf == NULL)
+ return -ENOMEM;
+
+ rx_reset_buffers(info);
+ return 0;
+}
+
+static void rx_free_buffers(MGSLPC_INFO *info)
+{
+ kfree(info->rx_buf);
+ info->rx_buf = NULL;
+}
+
+static int claim_resources(MGSLPC_INFO *info)
+{
+ if (rx_alloc_buffers(info) < 0 ) {
+ printk( "Can't allocate rx buffer %s\n", info->device_name);
+ release_resources(info);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void release_resources(MGSLPC_INFO *info)
+{
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("release_resources(%s)\n", info->device_name);
+ rx_free_buffers(info);
+}
+
+/* Add the specified device instance data structure to the
+ * global linked list of devices and increment the device count.
+ *
+ * Arguments: info pointer to device instance data
+ */
+static void mgslpc_add_device(MGSLPC_INFO *info)
+{
+ info->next_device = NULL;
+ info->line = mgslpc_device_count;
+ sprintf(info->device_name,"ttySLP%d",info->line);
+
+ if (info->line < MAX_DEVICE_COUNT) {
+ if (maxframe[info->line])
+ info->max_frame_size = maxframe[info->line];
+ }
+
+ mgslpc_device_count++;
+
+ if (!mgslpc_device_list)
+ mgslpc_device_list = info;
+ else {
+ MGSLPC_INFO *current_dev = mgslpc_device_list;
+ while( current_dev->next_device )
+ current_dev = current_dev->next_device;
+ current_dev->next_device = info;
+ }
+
+ if (info->max_frame_size < 4096)
+ info->max_frame_size = 4096;
+ else if (info->max_frame_size > 65535)
+ info->max_frame_size = 65535;
+
+ printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n",
+ info->device_name, info->io_base, info->irq_level);
+
+#if SYNCLINK_GENERIC_HDLC
+ hdlcdev_init(info);
+#endif
+}
+
+static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
+{
+ MGSLPC_INFO *info = mgslpc_device_list;
+ MGSLPC_INFO *last = NULL;
+
+ while(info) {
+ if (info == remove_info) {
+ if (last)
+ last->next_device = info->next_device;
+ else
+ mgslpc_device_list = info->next_device;
+#if SYNCLINK_GENERIC_HDLC
+ hdlcdev_exit(info);
+#endif
+ release_resources(info);
+ kfree(info);
+ mgslpc_device_count--;
+ return;
+ }
+ last = info;
+ info = info->next_device;
+ }
+}
+
+static const struct pcmcia_device_id mgslpc_ids[] = {
+ PCMCIA_DEVICE_MANF_CARD(0x02c5, 0x0050),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, mgslpc_ids);
+
+static struct pcmcia_driver mgslpc_driver = {
+ .owner = THIS_MODULE,
+ .name = "synclink_cs",
+ .probe = mgslpc_probe,
+ .remove = mgslpc_detach,
+ .id_table = mgslpc_ids,
+ .suspend = mgslpc_suspend,
+ .resume = mgslpc_resume,
+};
+
+static const struct tty_operations mgslpc_ops = {
+ .open = mgslpc_open,
+ .close = mgslpc_close,
+ .write = mgslpc_write,
+ .put_char = mgslpc_put_char,
+ .flush_chars = mgslpc_flush_chars,
+ .write_room = mgslpc_write_room,
+ .chars_in_buffer = mgslpc_chars_in_buffer,
+ .flush_buffer = mgslpc_flush_buffer,
+ .ioctl = mgslpc_ioctl,
+ .throttle = mgslpc_throttle,
+ .unthrottle = mgslpc_unthrottle,
+ .send_xchar = mgslpc_send_xchar,
+ .break_ctl = mgslpc_break,
+ .wait_until_sent = mgslpc_wait_until_sent,
+ .set_termios = mgslpc_set_termios,
+ .stop = tx_pause,
+ .start = tx_release,
+ .hangup = mgslpc_hangup,
+ .tiocmget = tiocmget,
+ .tiocmset = tiocmset,
+ .get_icount = mgslpc_get_icount,
+ .proc_fops = &mgslpc_proc_fops,
+};
+
+static void synclink_cs_cleanup(void)
+{
+ int rc;
+
+ while(mgslpc_device_list)
+ mgslpc_remove_device(mgslpc_device_list);
+
+ if (serial_driver) {
+ if ((rc = tty_unregister_driver(serial_driver)))
+ printk("%s(%d) failed to unregister tty driver err=%d\n",
+ __FILE__,__LINE__,rc);
+ put_tty_driver(serial_driver);
+ }
+
+ pcmcia_unregister_driver(&mgslpc_driver);
+}
+
+static int __init synclink_cs_init(void)
+{
+ int rc;
+
+ if (break_on_load) {
+ mgslpc_get_text_ptr();
+ BREAKPOINT();
+ }
+
+ if ((rc = pcmcia_register_driver(&mgslpc_driver)) < 0)
+ return rc;
+
+ serial_driver = alloc_tty_driver(MAX_DEVICE_COUNT);
+ if (!serial_driver) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize the tty_driver structure */
+
+ serial_driver->owner = THIS_MODULE;
+ serial_driver->driver_name = "synclink_cs";
+ serial_driver->name = "ttySLP";
+ serial_driver->major = ttymajor;
+ serial_driver->minor_start = 64;
+ serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ serial_driver->init_termios = tty_std_termios;
+ serial_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ serial_driver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(serial_driver, &mgslpc_ops);
+
+ if ((rc = tty_register_driver(serial_driver)) < 0) {
+ printk("%s(%d):Couldn't register serial driver\n",
+ __FILE__,__LINE__);
+ put_tty_driver(serial_driver);
+ serial_driver = NULL;
+ goto error;
+ }
+
+ printk("%s %s, tty major#%d\n",
+ driver_name, driver_version,
+ serial_driver->major);
+
+ return 0;
+
+error:
+ synclink_cs_cleanup();
+ return rc;
+}
+
+static void __exit synclink_cs_exit(void)
+{
+ synclink_cs_cleanup();
+}
+
+module_init(synclink_cs_init);
+module_exit(synclink_cs_exit);
+
+static void mgslpc_set_rate(MGSLPC_INFO *info, unsigned char channel, unsigned int rate)
+{
+ unsigned int M, N;
+ unsigned char val;
+
+ /* note:standard BRG mode is broken in V3.2 chip
+ * so enhanced mode is always used
+ */
+
+ if (rate) {
+ N = 3686400 / rate;
+ if (!N)
+ N = 1;
+ N >>= 1;
+ for (M = 1; N > 64 && M < 16; M++)
+ N >>= 1;
+ N--;
+
+ /* BGR[5..0] = N
+ * BGR[9..6] = M
+ * BGR[7..0] contained in BGR register
+ * BGR[9..8] contained in CCR2[7..6]
+ * divisor = (N+1)*2^M
+ *
+ * Note: M *must* not be zero (causes asymetric duty cycle)
+ */
+ write_reg(info, (unsigned char) (channel + BGR),
+ (unsigned char) ((M << 6) + N));
+ val = read_reg(info, (unsigned char) (channel + CCR2)) & 0x3f;
+ val |= ((M << 4) & 0xc0);
+ write_reg(info, (unsigned char) (channel + CCR2), val);
+ }
+}
+
+/* Enabled the AUX clock output at the specified frequency.
+ */
+static void enable_auxclk(MGSLPC_INFO *info)
+{
+ unsigned char val;
+
+ /* MODE
+ *
+ * 07..06 MDS[1..0] 10 = transparent HDLC mode
+ * 05 ADM Address Mode, 0 = no addr recognition
+ * 04 TMD Timer Mode, 0 = external
+ * 03 RAC Receiver Active, 0 = inactive
+ * 02 RTS 0=RTS active during xmit, 1=RTS always active
+ * 01 TRS Timer Resolution, 1=512
+ * 00 TLP Test Loop, 0 = no loop
+ *
+ * 1000 0010
+ */
+ val = 0x82;
+
+ /* channel B RTS is used to enable AUXCLK driver on SP505 */
+ if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed)
+ val |= BIT2;
+ write_reg(info, CHB + MODE, val);
+
+ /* CCR0
+ *
+ * 07 PU Power Up, 1=active, 0=power down
+ * 06 MCE Master Clock Enable, 1=enabled
+ * 05 Reserved, 0
+ * 04..02 SC[2..0] Encoding
+ * 01..00 SM[1..0] Serial Mode, 00=HDLC
+ *
+ * 11000000
+ */
+ write_reg(info, CHB + CCR0, 0xc0);
+
+ /* CCR1
+ *
+ * 07 SFLG Shared Flag, 0 = disable shared flags
+ * 06 GALP Go Active On Loop, 0 = not used
+ * 05 GLP Go On Loop, 0 = not used
+ * 04 ODS Output Driver Select, 1=TxD is push-pull output
+ * 03 ITF Interframe Time Fill, 0=mark, 1=flag
+ * 02..00 CM[2..0] Clock Mode
+ *
+ * 0001 0111
+ */
+ write_reg(info, CHB + CCR1, 0x17);
+
+ /* CCR2 (Channel B)
+ *
+ * 07..06 BGR[9..8] Baud rate bits 9..8
+ * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value
+ * 04 SSEL Clock source select, 1=submode b
+ * 03 TOE 0=TxCLK is input, 1=TxCLK is output
+ * 02 RWX Read/Write Exchange 0=disabled
+ * 01 C32, CRC select, 0=CRC-16, 1=CRC-32
+ * 00 DIV, data inversion 0=disabled, 1=enabled
+ *
+ * 0011 1000
+ */
+ if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed)
+ write_reg(info, CHB + CCR2, 0x38);
+ else
+ write_reg(info, CHB + CCR2, 0x30);
+
+ /* CCR4
+ *
+ * 07 MCK4 Master Clock Divide by 4, 1=enabled
+ * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled
+ * 05 TST1 Test Pin, 0=normal operation
+ * 04 ICD Ivert Carrier Detect, 1=enabled (active low)
+ * 03..02 Reserved, must be 0
+ * 01..00 RFT[1..0] RxFIFO Threshold 00=32 bytes
+ *
+ * 0101 0000
+ */
+ write_reg(info, CHB + CCR4, 0x50);
+
+ /* if auxclk not enabled, set internal BRG so
+ * CTS transitions can be detected (requires TxC)
+ */
+ if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed)
+ mgslpc_set_rate(info, CHB, info->params.clock_speed);
+ else
+ mgslpc_set_rate(info, CHB, 921600);
+}
+
+static void loopback_enable(MGSLPC_INFO *info)
+{
+ unsigned char val;
+
+ /* CCR1:02..00 CM[2..0] Clock Mode = 111 (clock mode 7) */
+ val = read_reg(info, CHA + CCR1) | (BIT2 + BIT1 + BIT0);
+ write_reg(info, CHA + CCR1, val);
+
+ /* CCR2:04 SSEL Clock source select, 1=submode b */
+ val = read_reg(info, CHA + CCR2) | (BIT4 + BIT5);
+ write_reg(info, CHA + CCR2, val);
+
+ /* set LinkSpeed if available, otherwise default to 2Mbps */
+ if (info->params.clock_speed)
+ mgslpc_set_rate(info, CHA, info->params.clock_speed);
+ else
+ mgslpc_set_rate(info, CHA, 1843200);
+
+ /* MODE:00 TLP Test Loop, 1=loopback enabled */
+ val = read_reg(info, CHA + MODE) | BIT0;
+ write_reg(info, CHA + MODE, val);
+}
+
+static void hdlc_mode(MGSLPC_INFO *info)
+{
+ unsigned char val;
+ unsigned char clkmode, clksubmode;
+
+ /* disable all interrupts */
+ irq_disable(info, CHA, 0xffff);
+ irq_disable(info, CHB, 0xffff);
+ port_irq_disable(info, 0xff);
+
+ /* assume clock mode 0a, rcv=RxC xmt=TxC */
+ clkmode = clksubmode = 0;
+ if (info->params.flags & HDLC_FLAG_RXC_DPLL
+ && info->params.flags & HDLC_FLAG_TXC_DPLL) {
+ /* clock mode 7a, rcv = DPLL, xmt = DPLL */
+ clkmode = 7;
+ } else if (info->params.flags & HDLC_FLAG_RXC_BRG
+ && info->params.flags & HDLC_FLAG_TXC_BRG) {
+ /* clock mode 7b, rcv = BRG, xmt = BRG */
+ clkmode = 7;
+ clksubmode = 1;
+ } else if (info->params.flags & HDLC_FLAG_RXC_DPLL) {
+ if (info->params.flags & HDLC_FLAG_TXC_BRG) {
+ /* clock mode 6b, rcv = DPLL, xmt = BRG/16 */
+ clkmode = 6;
+ clksubmode = 1;
+ } else {
+ /* clock mode 6a, rcv = DPLL, xmt = TxC */
+ clkmode = 6;
+ }
+ } else if (info->params.flags & HDLC_FLAG_TXC_BRG) {
+ /* clock mode 0b, rcv = RxC, xmt = BRG */
+ clksubmode = 1;
+ }
+
+ /* MODE
+ *
+ * 07..06 MDS[1..0] 10 = transparent HDLC mode
+ * 05 ADM Address Mode, 0 = no addr recognition
+ * 04 TMD Timer Mode, 0 = external
+ * 03 RAC Receiver Active, 0 = inactive
+ * 02 RTS 0=RTS active during xmit, 1=RTS always active
+ * 01 TRS Timer Resolution, 1=512
+ * 00 TLP Test Loop, 0 = no loop
+ *
+ * 1000 0010
+ */
+ val = 0x82;
+ if (info->params.loopback)
+ val |= BIT0;
+
+ /* preserve RTS state */
+ if (info->serial_signals & SerialSignal_RTS)
+ val |= BIT2;
+ write_reg(info, CHA + MODE, val);
+
+ /* CCR0
+ *
+ * 07 PU Power Up, 1=active, 0=power down
+ * 06 MCE Master Clock Enable, 1=enabled
+ * 05 Reserved, 0
+ * 04..02 SC[2..0] Encoding
+ * 01..00 SM[1..0] Serial Mode, 00=HDLC
+ *
+ * 11000000
+ */
+ val = 0xc0;
+ switch (info->params.encoding)
+ {
+ case HDLC_ENCODING_NRZI:
+ val |= BIT3;
+ break;
+ case HDLC_ENCODING_BIPHASE_SPACE:
+ val |= BIT4;
+ break; // FM0
+ case HDLC_ENCODING_BIPHASE_MARK:
+ val |= BIT4 + BIT2;
+ break; // FM1
+ case HDLC_ENCODING_BIPHASE_LEVEL:
+ val |= BIT4 + BIT3;
+ break; // Manchester
+ }
+ write_reg(info, CHA + CCR0, val);
+
+ /* CCR1
+ *
+ * 07 SFLG Shared Flag, 0 = disable shared flags
+ * 06 GALP Go Active On Loop, 0 = not used
+ * 05 GLP Go On Loop, 0 = not used
+ * 04 ODS Output Driver Select, 1=TxD is push-pull output
+ * 03 ITF Interframe Time Fill, 0=mark, 1=flag
+ * 02..00 CM[2..0] Clock Mode
+ *
+ * 0001 0000
+ */
+ val = 0x10 + clkmode;
+ write_reg(info, CHA + CCR1, val);
+
+ /* CCR2
+ *
+ * 07..06 BGR[9..8] Baud rate bits 9..8
+ * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value
+ * 04 SSEL Clock source select, 1=submode b
+ * 03 TOE 0=TxCLK is input, 0=TxCLK is input
+ * 02 RWX Read/Write Exchange 0=disabled
+ * 01 C32, CRC select, 0=CRC-16, 1=CRC-32
+ * 00 DIV, data inversion 0=disabled, 1=enabled
+ *
+ * 0000 0000
+ */
+ val = 0x00;
+ if (clkmode == 2 || clkmode == 3 || clkmode == 6
+ || clkmode == 7 || (clkmode == 0 && clksubmode == 1))
+ val |= BIT5;
+ if (clksubmode)
+ val |= BIT4;
+ if (info->params.crc_type == HDLC_CRC_32_CCITT)
+ val |= BIT1;
+ if (info->params.encoding == HDLC_ENCODING_NRZB)
+ val |= BIT0;
+ write_reg(info, CHA + CCR2, val);
+
+ /* CCR3
+ *
+ * 07..06 PRE[1..0] Preamble count 00=1, 01=2, 10=4, 11=8
+ * 05 EPT Enable preamble transmission, 1=enabled
+ * 04 RADD Receive address pushed to FIFO, 0=disabled
+ * 03 CRL CRC Reset Level, 0=FFFF
+ * 02 RCRC Rx CRC 0=On 1=Off
+ * 01 TCRC Tx CRC 0=On 1=Off
+ * 00 PSD DPLL Phase Shift Disable
+ *
+ * 0000 0000
+ */
+ val = 0x00;
+ if (info->params.crc_type == HDLC_CRC_NONE)
+ val |= BIT2 + BIT1;
+ if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE)
+ val |= BIT5;
+ switch (info->params.preamble_length)
+ {
+ case HDLC_PREAMBLE_LENGTH_16BITS:
+ val |= BIT6;
+ break;
+ case HDLC_PREAMBLE_LENGTH_32BITS:
+ val |= BIT6;
+ break;
+ case HDLC_PREAMBLE_LENGTH_64BITS:
+ val |= BIT7 + BIT6;
+ break;
+ }
+ write_reg(info, CHA + CCR3, val);
+
+ /* PRE - Preamble pattern */
+ val = 0;
+ switch (info->params.preamble)
+ {
+ case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break;
+ case HDLC_PREAMBLE_PATTERN_10: val = 0xaa; break;
+ case HDLC_PREAMBLE_PATTERN_01: val = 0x55; break;
+ case HDLC_PREAMBLE_PATTERN_ONES: val = 0xff; break;
+ }
+ write_reg(info, CHA + PRE, val);
+
+ /* CCR4
+ *
+ * 07 MCK4 Master Clock Divide by 4, 1=enabled
+ * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled
+ * 05 TST1 Test Pin, 0=normal operation
+ * 04 ICD Ivert Carrier Detect, 1=enabled (active low)
+ * 03..02 Reserved, must be 0
+ * 01..00 RFT[1..0] RxFIFO Threshold 00=32 bytes
+ *
+ * 0101 0000
+ */
+ val = 0x50;
+ write_reg(info, CHA + CCR4, val);
+ if (info->params.flags & HDLC_FLAG_RXC_DPLL)
+ mgslpc_set_rate(info, CHA, info->params.clock_speed * 16);
+ else
+ mgslpc_set_rate(info, CHA, info->params.clock_speed);
+
+ /* RLCR Receive length check register
+ *
+ * 7 1=enable receive length check
+ * 6..0 Max frame length = (RL + 1) * 32
+ */
+ write_reg(info, CHA + RLCR, 0);
+
+ /* XBCH Transmit Byte Count High
+ *
+ * 07 DMA mode, 0 = interrupt driven
+ * 06 NRM, 0=ABM (ignored)
+ * 05 CAS Carrier Auto Start
+ * 04 XC Transmit Continuously (ignored)
+ * 03..00 XBC[10..8] Transmit byte count bits 10..8
+ *
+ * 0000 0000
+ */
+ val = 0x00;
+ if (info->params.flags & HDLC_FLAG_AUTO_DCD)
+ val |= BIT5;
+ write_reg(info, CHA + XBCH, val);
+ enable_auxclk(info);
+ if (info->params.loopback || info->testing_irq)
+ loopback_enable(info);
+ if (info->params.flags & HDLC_FLAG_AUTO_CTS)
+ {
+ irq_enable(info, CHB, IRQ_CTS);
+ /* PVR[3] 1=AUTO CTS active */
+ set_reg_bits(info, CHA + PVR, BIT3);
+ } else
+ clear_reg_bits(info, CHA + PVR, BIT3);
+
+ irq_enable(info, CHA,
+ IRQ_RXEOM + IRQ_RXFIFO + IRQ_ALLSENT +
+ IRQ_UNDERRUN + IRQ_TXFIFO);
+ issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET);
+ wait_command_complete(info, CHA);
+ read_reg16(info, CHA + ISR); /* clear pending IRQs */
+
+ /* Master clock mode enabled above to allow reset commands
+ * to complete even if no data clocks are present.
+ *
+ * Disable master clock mode for normal communications because
+ * V3.2 of the ESCC2 has a bug that prevents the transmit all sent
+ * IRQ when in master clock mode.
+ *
+ * Leave master clock mode enabled for IRQ test because the
+ * timer IRQ used by the test can only happen in master clock mode.
+ */
+ if (!info->testing_irq)
+ clear_reg_bits(info, CHA + CCR0, BIT6);
+
+ tx_set_idle(info);
+
+ tx_stop(info);
+ rx_stop(info);
+}
+
+static void rx_stop(MGSLPC_INFO *info)
+{
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):rx_stop(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ /* MODE:03 RAC Receiver Active, 0=inactive */
+ clear_reg_bits(info, CHA + MODE, BIT3);
+
+ info->rx_enabled = false;
+ info->rx_overflow = false;
+}
+
+static void rx_start(MGSLPC_INFO *info)
+{
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):rx_start(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ rx_reset_buffers(info);
+ info->rx_enabled = false;
+ info->rx_overflow = false;
+
+ /* MODE:03 RAC Receiver Active, 1=active */
+ set_reg_bits(info, CHA + MODE, BIT3);
+
+ info->rx_enabled = true;
+}
+
+static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):tx_start(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (info->tx_count) {
+ /* If auto RTS enabled and RTS is inactive, then assert */
+ /* RTS and set a flag indicating that the driver should */
+ /* negate RTS when the transmission completes. */
+ info->drop_rts_on_tx_done = false;
+
+ if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
+ get_signals(info);
+ if (!(info->serial_signals & SerialSignal_RTS)) {
+ info->serial_signals |= SerialSignal_RTS;
+ set_signals(info);
+ info->drop_rts_on_tx_done = true;
+ }
+ }
+
+ if (info->params.mode == MGSL_MODE_ASYNC) {
+ if (!info->tx_active) {
+ info->tx_active = true;
+ tx_ready(info, tty);
+ }
+ } else {
+ info->tx_active = true;
+ tx_ready(info, tty);
+ mod_timer(&info->tx_timer, jiffies +
+ msecs_to_jiffies(5000));
+ }
+ }
+
+ if (!info->tx_enabled)
+ info->tx_enabled = true;
+}
+
+static void tx_stop(MGSLPC_INFO *info)
+{
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):tx_stop(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ del_timer(&info->tx_timer);
+
+ info->tx_enabled = false;
+ info->tx_active = false;
+}
+
+/* Reset the adapter to a known state and prepare it for further use.
+ */
+static void reset_device(MGSLPC_INFO *info)
+{
+ /* power up both channels (set BIT7) */
+ write_reg(info, CHA + CCR0, 0x80);
+ write_reg(info, CHB + CCR0, 0x80);
+ write_reg(info, CHA + MODE, 0);
+ write_reg(info, CHB + MODE, 0);
+
+ /* disable all interrupts */
+ irq_disable(info, CHA, 0xffff);
+ irq_disable(info, CHB, 0xffff);
+ port_irq_disable(info, 0xff);
+
+ /* PCR Port Configuration Register
+ *
+ * 07..04 DEC[3..0] Serial I/F select outputs
+ * 03 output, 1=AUTO CTS control enabled
+ * 02 RI Ring Indicator input 0=active
+ * 01 DSR input 0=active
+ * 00 DTR output 0=active
+ *
+ * 0000 0110
+ */
+ write_reg(info, PCR, 0x06);
+
+ /* PVR Port Value Register
+ *
+ * 07..04 DEC[3..0] Serial I/F select (0000=disabled)
+ * 03 AUTO CTS output 1=enabled
+ * 02 RI Ring Indicator input
+ * 01 DSR input
+ * 00 DTR output (1=inactive)
+ *
+ * 0000 0001
+ */
+// write_reg(info, PVR, PVR_DTR);
+
+ /* IPC Interrupt Port Configuration
+ *
+ * 07 VIS 1=Masked interrupts visible
+ * 06..05 Reserved, 0
+ * 04..03 SLA Slave address, 00 ignored
+ * 02 CASM Cascading Mode, 1=daisy chain
+ * 01..00 IC[1..0] Interrupt Config, 01=push-pull output, active low
+ *
+ * 0000 0101
+ */
+ write_reg(info, IPC, 0x05);
+}
+
+static void async_mode(MGSLPC_INFO *info)
+{
+ unsigned char val;
+
+ /* disable all interrupts */
+ irq_disable(info, CHA, 0xffff);
+ irq_disable(info, CHB, 0xffff);
+ port_irq_disable(info, 0xff);
+
+ /* MODE
+ *
+ * 07 Reserved, 0
+ * 06 FRTS RTS State, 0=active
+ * 05 FCTS Flow Control on CTS
+ * 04 FLON Flow Control Enable
+ * 03 RAC Receiver Active, 0 = inactive
+ * 02 RTS 0=Auto RTS, 1=manual RTS
+ * 01 TRS Timer Resolution, 1=512
+ * 00 TLP Test Loop, 0 = no loop
+ *
+ * 0000 0110
+ */
+ val = 0x06;
+ if (info->params.loopback)
+ val |= BIT0;
+
+ /* preserve RTS state */
+ if (!(info->serial_signals & SerialSignal_RTS))
+ val |= BIT6;
+ write_reg(info, CHA + MODE, val);
+
+ /* CCR0
+ *
+ * 07 PU Power Up, 1=active, 0=power down
+ * 06 MCE Master Clock Enable, 1=enabled
+ * 05 Reserved, 0
+ * 04..02 SC[2..0] Encoding, 000=NRZ
+ * 01..00 SM[1..0] Serial Mode, 11=Async
+ *
+ * 1000 0011
+ */
+ write_reg(info, CHA + CCR0, 0x83);
+
+ /* CCR1
+ *
+ * 07..05 Reserved, 0
+ * 04 ODS Output Driver Select, 1=TxD is push-pull output
+ * 03 BCR Bit Clock Rate, 1=16x
+ * 02..00 CM[2..0] Clock Mode, 111=BRG
+ *
+ * 0001 1111
+ */
+ write_reg(info, CHA + CCR1, 0x1f);
+
+ /* CCR2 (channel A)
+ *
+ * 07..06 BGR[9..8] Baud rate bits 9..8
+ * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value
+ * 04 SSEL Clock source select, 1=submode b
+ * 03 TOE 0=TxCLK is input, 0=TxCLK is input
+ * 02 RWX Read/Write Exchange 0=disabled
+ * 01 Reserved, 0
+ * 00 DIV, data inversion 0=disabled, 1=enabled
+ *
+ * 0001 0000
+ */
+ write_reg(info, CHA + CCR2, 0x10);
+
+ /* CCR3
+ *
+ * 07..01 Reserved, 0
+ * 00 PSD DPLL Phase Shift Disable
+ *
+ * 0000 0000
+ */
+ write_reg(info, CHA + CCR3, 0);
+
+ /* CCR4
+ *
+ * 07 MCK4 Master Clock Divide by 4, 1=enabled
+ * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled
+ * 05 TST1 Test Pin, 0=normal operation
+ * 04 ICD Ivert Carrier Detect, 1=enabled (active low)
+ * 03..00 Reserved, must be 0
+ *
+ * 0101 0000
+ */
+ write_reg(info, CHA + CCR4, 0x50);
+ mgslpc_set_rate(info, CHA, info->params.data_rate * 16);
+
+ /* DAFO Data Format
+ *
+ * 07 Reserved, 0
+ * 06 XBRK transmit break, 0=normal operation
+ * 05 Stop bits (0=1, 1=2)
+ * 04..03 PAR[1..0] Parity (01=odd, 10=even)
+ * 02 PAREN Parity Enable
+ * 01..00 CHL[1..0] Character Length (00=8, 01=7)
+ *
+ */
+ val = 0x00;
+ if (info->params.data_bits != 8)
+ val |= BIT0; /* 7 bits */
+ if (info->params.stop_bits != 1)
+ val |= BIT5;
+ if (info->params.parity != ASYNC_PARITY_NONE)
+ {
+ val |= BIT2; /* Parity enable */
+ if (info->params.parity == ASYNC_PARITY_ODD)
+ val |= BIT3;
+ else
+ val |= BIT4;
+ }
+ write_reg(info, CHA + DAFO, val);
+
+ /* RFC Rx FIFO Control
+ *
+ * 07 Reserved, 0
+ * 06 DPS, 1=parity bit not stored in data byte
+ * 05 DXS, 0=all data stored in FIFO (including XON/XOFF)
+ * 04 RFDF Rx FIFO Data Format, 1=status byte stored in FIFO
+ * 03..02 RFTH[1..0], rx threshold, 11=16 status + 16 data byte
+ * 01 Reserved, 0
+ * 00 TCDE Terminate Char Detect Enable, 0=disabled
+ *
+ * 0101 1100
+ */
+ write_reg(info, CHA + RFC, 0x5c);
+
+ /* RLCR Receive length check register
+ *
+ * Max frame length = (RL + 1) * 32
+ */
+ write_reg(info, CHA + RLCR, 0);
+
+ /* XBCH Transmit Byte Count High
+ *
+ * 07 DMA mode, 0 = interrupt driven
+ * 06 NRM, 0=ABM (ignored)
+ * 05 CAS Carrier Auto Start
+ * 04 XC Transmit Continuously (ignored)
+ * 03..00 XBC[10..8] Transmit byte count bits 10..8
+ *
+ * 0000 0000
+ */
+ val = 0x00;
+ if (info->params.flags & HDLC_FLAG_AUTO_DCD)
+ val |= BIT5;
+ write_reg(info, CHA + XBCH, val);
+ if (info->params.flags & HDLC_FLAG_AUTO_CTS)
+ irq_enable(info, CHA, IRQ_CTS);
+
+ /* MODE:03 RAC Receiver Active, 1=active */
+ set_reg_bits(info, CHA + MODE, BIT3);
+ enable_auxclk(info);
+ if (info->params.flags & HDLC_FLAG_AUTO_CTS) {
+ irq_enable(info, CHB, IRQ_CTS);
+ /* PVR[3] 1=AUTO CTS active */
+ set_reg_bits(info, CHA + PVR, BIT3);
+ } else
+ clear_reg_bits(info, CHA + PVR, BIT3);
+ irq_enable(info, CHA,
+ IRQ_RXEOM + IRQ_RXFIFO + IRQ_BREAK_ON + IRQ_RXTIME +
+ IRQ_ALLSENT + IRQ_TXFIFO);
+ issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET);
+ wait_command_complete(info, CHA);
+ read_reg16(info, CHA + ISR); /* clear pending IRQs */
+}
+
+/* Set the HDLC idle mode for the transmitter.
+ */
+static void tx_set_idle(MGSLPC_INFO *info)
+{
+ /* Note: ESCC2 only supports flags and one idle modes */
+ if (info->idle_mode == HDLC_TXIDLE_FLAGS)
+ set_reg_bits(info, CHA + CCR1, BIT3);
+ else
+ clear_reg_bits(info, CHA + CCR1, BIT3);
+}
+
+/* get state of the V24 status (input) signals.
+ */
+static void get_signals(MGSLPC_INFO *info)
+{
+ unsigned char status = 0;
+
+ /* preserve DTR and RTS */
+ info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
+
+ if (read_reg(info, CHB + VSTR) & BIT7)
+ info->serial_signals |= SerialSignal_DCD;
+ if (read_reg(info, CHB + STAR) & BIT1)
+ info->serial_signals |= SerialSignal_CTS;
+
+ status = read_reg(info, CHA + PVR);
+ if (!(status & PVR_RI))
+ info->serial_signals |= SerialSignal_RI;
+ if (!(status & PVR_DSR))
+ info->serial_signals |= SerialSignal_DSR;
+}
+
+/* Set the state of DTR and RTS based on contents of
+ * serial_signals member of device extension.
+ */
+static void set_signals(MGSLPC_INFO *info)
+{
+ unsigned char val;
+
+ val = read_reg(info, CHA + MODE);
+ if (info->params.mode == MGSL_MODE_ASYNC) {
+ if (info->serial_signals & SerialSignal_RTS)
+ val &= ~BIT6;
+ else
+ val |= BIT6;
+ } else {
+ if (info->serial_signals & SerialSignal_RTS)
+ val |= BIT2;
+ else
+ val &= ~BIT2;
+ }
+ write_reg(info, CHA + MODE, val);
+
+ if (info->serial_signals & SerialSignal_DTR)
+ clear_reg_bits(info, CHA + PVR, PVR_DTR);
+ else
+ set_reg_bits(info, CHA + PVR, PVR_DTR);
+}
+
+static void rx_reset_buffers(MGSLPC_INFO *info)
+{
+ RXBUF *buf;
+ int i;
+
+ info->rx_put = 0;
+ info->rx_get = 0;
+ info->rx_frame_count = 0;
+ for (i=0 ; i < info->rx_buf_count ; i++) {
+ buf = (RXBUF*)(info->rx_buf + (i * info->rx_buf_size));
+ buf->status = buf->count = 0;
+ }
+}
+
+/* Attempt to return a received HDLC frame
+ * Only frames received without errors are returned.
+ *
+ * Returns true if frame returned, otherwise false
+ */
+static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty)
+{
+ unsigned short status;
+ RXBUF *buf;
+ unsigned int framesize = 0;
+ unsigned long flags;
+ bool return_frame = false;
+
+ if (info->rx_frame_count == 0)
+ return false;
+
+ buf = (RXBUF*)(info->rx_buf + (info->rx_get * info->rx_buf_size));
+
+ status = buf->status;
+
+ /* 07 VFR 1=valid frame
+ * 06 RDO 1=data overrun
+ * 05 CRC 1=OK, 0=error
+ * 04 RAB 1=frame aborted
+ */
+ if ((status & 0xf0) != 0xA0) {
+ if (!(status & BIT7) || (status & BIT4))
+ info->icount.rxabort++;
+ else if (status & BIT6)
+ info->icount.rxover++;
+ else if (!(status & BIT5)) {
+ info->icount.rxcrc++;
+ if (info->params.crc_type & HDLC_CRC_RETURN_EX)
+ return_frame = true;
+ }
+ framesize = 0;
+#if SYNCLINK_GENERIC_HDLC
+ {
+ info->netdev->stats.rx_errors++;
+ info->netdev->stats.rx_frame_errors++;
+ }
+#endif
+ } else
+ return_frame = true;
+
+ if (return_frame)
+ framesize = buf->count;
+
+ if (debug_level >= DEBUG_LEVEL_BH)
+ printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n",
+ __FILE__,__LINE__,info->device_name,status,framesize);
+
+ if (debug_level >= DEBUG_LEVEL_DATA)
+ trace_block(info, buf->data, framesize, 0);
+
+ if (framesize) {
+ if ((info->params.crc_type & HDLC_CRC_RETURN_EX &&
+ framesize+1 > info->max_frame_size) ||
+ framesize > info->max_frame_size)
+ info->icount.rxlong++;
+ else {
+ if (status & BIT5)
+ info->icount.rxok++;
+
+ if (info->params.crc_type & HDLC_CRC_RETURN_EX) {
+ *(buf->data + framesize) = status & BIT5 ? RX_OK:RX_CRC_ERROR;
+ ++framesize;
+ }
+
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount)
+ hdlcdev_rx(info, buf->data, framesize);
+ else
+#endif
+ ldisc_receive_buf(tty, buf->data, info->flag_buf, framesize);
+ }
+ }
+
+ spin_lock_irqsave(&info->lock,flags);
+ buf->status = buf->count = 0;
+ info->rx_frame_count--;
+ info->rx_get++;
+ if (info->rx_get >= info->rx_buf_count)
+ info->rx_get = 0;
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return true;
+}
+
+static bool register_test(MGSLPC_INFO *info)
+{
+ static unsigned char patterns[] =
+ { 0x00, 0xff, 0xaa, 0x55, 0x69, 0x96, 0x0f };
+ static unsigned int count = ARRAY_SIZE(patterns);
+ unsigned int i;
+ bool rc = true;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ reset_device(info);
+
+ for (i = 0; i < count; i++) {
+ write_reg(info, XAD1, patterns[i]);
+ write_reg(info, XAD2, patterns[(i + 1) % count]);
+ if ((read_reg(info, XAD1) != patterns[i]) ||
+ (read_reg(info, XAD2) != patterns[(i + 1) % count])) {
+ rc = false;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&info->lock,flags);
+ return rc;
+}
+
+static bool irq_test(MGSLPC_INFO *info)
+{
+ unsigned long end_time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ reset_device(info);
+
+ info->testing_irq = true;
+ hdlc_mode(info);
+
+ info->irq_occurred = false;
+
+ /* init hdlc mode */
+
+ irq_enable(info, CHA, IRQ_TIMER);
+ write_reg(info, CHA + TIMR, 0); /* 512 cycles */
+ issue_command(info, CHA, CMD_START_TIMER);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ end_time=100;
+ while(end_time-- && !info->irq_occurred) {
+ msleep_interruptible(10);
+ }
+
+ info->testing_irq = false;
+
+ spin_lock_irqsave(&info->lock,flags);
+ reset_device(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return info->irq_occurred;
+}
+
+static int adapter_test(MGSLPC_INFO *info)
+{
+ if (!register_test(info)) {
+ info->init_error = DiagStatus_AddressFailure;
+ printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
+ __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
+ return -ENODEV;
+ }
+
+ if (!irq_test(info)) {
+ info->init_error = DiagStatus_IrqFailure;
+ printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
+ __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
+ return -ENODEV;
+ }
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):device %s passed diagnostics\n",
+ __FILE__,__LINE__,info->device_name);
+ return 0;
+}
+
+static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
+{
+ int i;
+ int linecount;
+ if (xmit)
+ printk("%s tx data:\n",info->device_name);
+ else
+ printk("%s rx data:\n",info->device_name);
+
+ while(count) {
+ if (count > 16)
+ linecount = 16;
+ else
+ linecount = count;
+
+ for(i=0;i<linecount;i++)
+ printk("%02X ",(unsigned char)data[i]);
+ for(;i<17;i++)
+ printk(" ");
+ for(i=0;i<linecount;i++) {
+ if (data[i]>=040 && data[i]<=0176)
+ printk("%c",data[i]);
+ else
+ printk(".");
+ }
+ printk("\n");
+
+ data += linecount;
+ count -= linecount;
+ }
+}
+
+/* HDLC frame time out
+ * update stats and do tx completion processing
+ */
+static void tx_timeout(unsigned long context)
+{
+ MGSLPC_INFO *info = (MGSLPC_INFO*)context;
+ unsigned long flags;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):tx_timeout(%s)\n",
+ __FILE__,__LINE__,info->device_name);
+ if(info->tx_active &&
+ info->params.mode == MGSL_MODE_HDLC) {
+ info->icount.txtimeout++;
+ }
+ spin_lock_irqsave(&info->lock,flags);
+ info->tx_active = false;
+ info->tx_count = info->tx_put = info->tx_get = 0;
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount)
+ hdlcdev_tx_done(info);
+ else
+#endif
+ {
+ struct tty_struct *tty = tty_port_tty_get(&info->port);
+ bh_transmit(info, tty);
+ tty_kref_put(tty);
+ }
+}
+
+#if SYNCLINK_GENERIC_HDLC
+
+/**
+ * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
+ * set encoding and frame check sequence (FCS) options
+ *
+ * dev pointer to network device structure
+ * encoding serial encoding setting
+ * parity FCS setting
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ MGSLPC_INFO *info = dev_to_port(dev);
+ struct tty_struct *tty;
+ unsigned char new_encoding;
+ unsigned short new_crctype;
+
+ /* return error if TTY interface open */
+ if (info->port.count)
+ return -EBUSY;
+
+ switch (encoding)
+ {
+ case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
+ case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
+ case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
+ case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
+ case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
+ default: return -EINVAL;
+ }
+
+ switch (parity)
+ {
+ case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
+ case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
+ case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
+ default: return -EINVAL;
+ }
+
+ info->params.encoding = new_encoding;
+ info->params.crc_type = new_crctype;
+
+ /* if network interface up, reprogram hardware */
+ if (info->netcount) {
+ tty = tty_port_tty_get(&info->port);
+ mgslpc_program_hw(info, tty);
+ tty_kref_put(tty);
+ }
+
+ return 0;
+}
+
+/**
+ * called by generic HDLC layer to send frame
+ *
+ * skb socket buffer containing HDLC frame
+ * dev pointer to network device structure
+ */
+static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ MGSLPC_INFO *info = dev_to_port(dev);
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
+
+ /* stop sending until this frame completes */
+ netif_stop_queue(dev);
+
+ /* copy data to device buffers */
+ skb_copy_from_linear_data(skb, info->tx_buf, skb->len);
+ info->tx_get = 0;
+ info->tx_put = info->tx_count = skb->len;
+
+ /* update network statistics */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ /* done with socket buffer, so free it */
+ dev_kfree_skb(skb);
+
+ /* save start time for transmit timeout detection */
+ dev->trans_start = jiffies;
+
+ /* start hardware transmitter if necessary */
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_active) {
+ struct tty_struct *tty = tty_port_tty_get(&info->port);
+ tx_start(info, tty);
+ tty_kref_put(tty);
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * called by network layer when interface enabled
+ * claim resources and initialize hardware
+ *
+ * dev pointer to network device structure
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_open(struct net_device *dev)
+{
+ MGSLPC_INFO *info = dev_to_port(dev);
+ struct tty_struct *tty;
+ int rc;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
+
+ /* generic HDLC layer open processing */
+ if ((rc = hdlc_open(dev)))
+ return rc;
+
+ /* arbitrate between network and tty opens */
+ spin_lock_irqsave(&info->netlock, flags);
+ if (info->port.count != 0 || info->netcount != 0) {
+ printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
+ spin_unlock_irqrestore(&info->netlock, flags);
+ return -EBUSY;
+ }
+ info->netcount=1;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ tty = tty_port_tty_get(&info->port);
+ /* claim resources and init adapter */
+ if ((rc = startup(info, tty)) != 0) {
+ tty_kref_put(tty);
+ spin_lock_irqsave(&info->netlock, flags);
+ info->netcount=0;
+ spin_unlock_irqrestore(&info->netlock, flags);
+ return rc;
+ }
+ /* assert DTR and RTS, apply hardware settings */
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ mgslpc_program_hw(info, tty);
+ tty_kref_put(tty);
+
+ /* enable network layer transmit */
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+
+ /* inform generic HDLC layer of current DCD status */
+ spin_lock_irqsave(&info->lock, flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
+ if (info->serial_signals & SerialSignal_DCD)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ return 0;
+}
+
+/**
+ * called by network layer when interface is disabled
+ * shutdown hardware and release resources
+ *
+ * dev pointer to network device structure
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_close(struct net_device *dev)
+{
+ MGSLPC_INFO *info = dev_to_port(dev);
+ struct tty_struct *tty = tty_port_tty_get(&info->port);
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
+
+ netif_stop_queue(dev);
+
+ /* shutdown adapter and release resources */
+ shutdown(info, tty);
+ tty_kref_put(tty);
+ hdlc_close(dev);
+
+ spin_lock_irqsave(&info->netlock, flags);
+ info->netcount=0;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ return 0;
+}
+
+/**
+ * called by network layer to process IOCTL call to network device
+ *
+ * dev pointer to network device structure
+ * ifr pointer to network interface request structure
+ * cmd IOCTL command code
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ MGSLPC_INFO *info = dev_to_port(dev);
+ unsigned int flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
+
+ /* return error if TTY interface open */
+ if (info->port.count)
+ return -EBUSY;
+
+ if (cmd != SIOCWANDEV)
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ memset(&new_line, 0, size);
+
+ switch(ifr->ifr_settings.type) {
+ case IF_GET_IFACE: /* return current sync_serial_settings */
+
+ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+
+ flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+
+ switch (flags){
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
+ case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
+ default: new_line.clock_type = CLOCK_DEFAULT;
+ }
+
+ new_line.clock_rate = info->params.clock_speed;
+ new_line.loopback = info->params.loopback ? 1:0;
+
+ if (copy_to_user(line, &new_line, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&new_line, line, size))
+ return -EFAULT;
+
+ switch (new_line.clock_type)
+ {
+ case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
+ case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
+ case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
+ case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
+ case CLOCK_DEFAULT: flags = info->params.flags &
+ (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
+ default: return -EINVAL;
+ }
+
+ if (new_line.loopback != 0 && new_line.loopback != 1)
+ return -EINVAL;
+
+ info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+ info->params.flags |= flags;
+
+ info->params.loopback = new_line.loopback;
+
+ if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
+ info->params.clock_speed = new_line.clock_rate;
+ else
+ info->params.clock_speed = 0;
+
+ /* if network interface up, reprogram hardware */
+ if (info->netcount) {
+ struct tty_struct *tty = tty_port_tty_get(&info->port);
+ mgslpc_program_hw(info, tty);
+ tty_kref_put(tty);
+ }
+ return 0;
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+/**
+ * called by network layer when transmit timeout is detected
+ *
+ * dev pointer to network device structure
+ */
+static void hdlcdev_tx_timeout(struct net_device *dev)
+{
+ MGSLPC_INFO *info = dev_to_port(dev);
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("hdlcdev_tx_timeout(%s)\n",dev->name);
+
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+
+ spin_lock_irqsave(&info->lock,flags);
+ tx_stop(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ netif_wake_queue(dev);
+}
+
+/**
+ * called by device driver when transmit completes
+ * reenable network layer transmit if stopped
+ *
+ * info pointer to device instance information
+ */
+static void hdlcdev_tx_done(MGSLPC_INFO *info)
+{
+ if (netif_queue_stopped(info->netdev))
+ netif_wake_queue(info->netdev);
+}
+
+/**
+ * called by device driver when frame received
+ * pass frame to network layer
+ *
+ * info pointer to device instance information
+ * buf pointer to buffer contianing frame data
+ * size count of data bytes in buf
+ */
+static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size)
+{
+ struct sk_buff *skb = dev_alloc_skb(size);
+ struct net_device *dev = info->netdev;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("hdlcdev_rx(%s)\n",dev->name);
+
+ if (skb == NULL) {
+ printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ memcpy(skb_put(skb, size), buf, size);
+
+ skb->protocol = hdlc_type_trans(skb, dev);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += size;
+
+ netif_rx(skb);
+}
+
+static const struct net_device_ops hdlcdev_ops = {
+ .ndo_open = hdlcdev_open,
+ .ndo_stop = hdlcdev_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = hdlcdev_ioctl,
+ .ndo_tx_timeout = hdlcdev_tx_timeout,
+};
+
+/**
+ * called by device driver when adding device instance
+ * do generic HDLC initialization
+ *
+ * info pointer to device instance information
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_init(MGSLPC_INFO *info)
+{
+ int rc;
+ struct net_device *dev;
+ hdlc_device *hdlc;
+
+ /* allocate and initialize network and HDLC layer objects */
+
+ if (!(dev = alloc_hdlcdev(info))) {
+ printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
+ return -ENOMEM;
+ }
+
+ /* for network layer reporting purposes only */
+ dev->base_addr = info->io_base;
+ dev->irq = info->irq_level;
+
+ /* network layer callbacks and settings */
+ dev->netdev_ops = &hdlcdev_ops;
+ dev->watchdog_timeo = 10 * HZ;
+ dev->tx_queue_len = 50;
+
+ /* generic HDLC layer callbacks and settings */
+ hdlc = dev_to_hdlc(dev);
+ hdlc->attach = hdlcdev_attach;
+ hdlc->xmit = hdlcdev_xmit;
+
+ /* register objects with HDLC layer */
+ if ((rc = register_hdlc_device(dev))) {
+ printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
+ free_netdev(dev);
+ return rc;
+ }
+
+ info->netdev = dev;
+ return 0;
+}
+
+/**
+ * called by device driver when removing device instance
+ * do generic HDLC cleanup
+ *
+ * info pointer to device instance information
+ */
+static void hdlcdev_exit(MGSLPC_INFO *info)
+{
+ unregister_hdlc_device(info->netdev);
+ free_netdev(info->netdev);
+ info->netdev = NULL;
+}
+
+#endif /* CONFIG_HDLC */
+
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
new file mode 100644
index 00000000..3fcf80ff
--- /dev/null
+++ b/drivers/char/ppdev.c
@@ -0,0 +1,814 @@
+/*
+ * linux/drivers/char/ppdev.c
+ *
+ * This is the code behind /dev/parport* -- it allows a user-space
+ * application to use the parport subsystem.
+ *
+ * Copyright (C) 1998-2000, 2002 Tim Waugh <tim@cyberelk.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * A /dev/parportx device node represents an arbitrary device
+ * on port 'x'. The following operations are possible:
+ *
+ * open do nothing, set up default IEEE 1284 protocol to be COMPAT
+ * close release port and unregister device (if necessary)
+ * ioctl
+ * EXCL register device exclusively (may fail)
+ * CLAIM (register device first time) parport_claim_or_block
+ * RELEASE parport_release
+ * SETMODE set the IEEE 1284 protocol to use for read/write
+ * SETPHASE set the IEEE 1284 phase of a particular mode. Not to be
+ * confused with ioctl(fd, SETPHASER, &stun). ;-)
+ * DATADIR data_forward / data_reverse
+ * WDATA write_data
+ * RDATA read_data
+ * WCONTROL write_control
+ * RCONTROL read_control
+ * FCONTROL frob_control
+ * RSTATUS read_status
+ * NEGOT parport_negotiate
+ * YIELD parport_yield_blocking
+ * WCTLONIRQ on interrupt, set control lines
+ * CLRIRQ clear (and return) interrupt count
+ * SETTIME sets device timeout (struct timeval)
+ * GETTIME gets device timeout (struct timeval)
+ * GETMODES gets hardware supported modes (unsigned int)
+ * GETMODE gets the current IEEE1284 mode
+ * GETPHASE gets the current IEEE1284 phase
+ * GETFLAGS gets current (user-visible) flags
+ * SETFLAGS sets current (user-visible) flags
+ * read/write read or write in current IEEE 1284 protocol
+ * select wait for interrupt (in readfds)
+ *
+ * Changes:
+ * Added SETTIME/GETTIME ioctl, Fred Barnes, 1999.
+ *
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 2000/08/25
+ * - On error, copy_from_user and copy_to_user do not return -EFAULT,
+ * They return the positive number of bytes *not* copied due to address
+ * space errors.
+ *
+ * Added GETMODES/GETMODE/GETPHASE ioctls, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001.
+ * Added GETFLAGS/SETFLAGS ioctls, Fred Barnes, 04/2001
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/ioctl.h>
+#include <linux/parport.h>
+#include <linux/ctype.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+#include <linux/ppdev.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+
+#define PP_VERSION "ppdev: user-space parallel port driver"
+#define CHRDEV "ppdev"
+
+struct pp_struct {
+ struct pardevice * pdev;
+ wait_queue_head_t irq_wait;
+ atomic_t irqc;
+ unsigned int flags;
+ int irqresponse;
+ unsigned char irqctl;
+ struct ieee1284_info state;
+ struct ieee1284_info saved_state;
+ long default_inactivity;
+};
+
+/* pp_struct.flags bitfields */
+#define PP_CLAIMED (1<<0)
+#define PP_EXCL (1<<1)
+
+/* Other constants */
+#define PP_INTERRUPT_TIMEOUT (10 * HZ) /* 10s */
+#define PP_BUFFER_SIZE 1024
+#define PARDEVICE_MAX 8
+
+/* ROUND_UP macro from fs/select.c */
+#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
+
+static DEFINE_MUTEX(pp_do_mutex);
+static inline void pp_enable_irq (struct pp_struct *pp)
+{
+ struct parport *port = pp->pdev->port;
+ port->ops->enable_irq (port);
+}
+
+static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
+ loff_t * ppos)
+{
+ unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ struct pp_struct *pp = file->private_data;
+ char * kbuffer;
+ ssize_t bytes_read = 0;
+ struct parport *pport;
+ int mode;
+
+ if (!(pp->flags & PP_CLAIMED)) {
+ /* Don't have the port claimed */
+ pr_debug(CHRDEV "%x: claim the port first\n", minor);
+ return -EINVAL;
+ }
+
+ /* Trivial case. */
+ if (count == 0)
+ return 0;
+
+ kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
+ if (!kbuffer) {
+ return -ENOMEM;
+ }
+ pport = pp->pdev->port;
+ mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
+
+ parport_set_timeout (pp->pdev,
+ (file->f_flags & O_NONBLOCK) ?
+ PARPORT_INACTIVITY_O_NONBLOCK :
+ pp->default_inactivity);
+
+ while (bytes_read == 0) {
+ ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE);
+
+ if (mode == IEEE1284_MODE_EPP) {
+ /* various specials for EPP mode */
+ int flags = 0;
+ size_t (*fn)(struct parport *, void *, size_t, int);
+
+ if (pp->flags & PP_W91284PIC) {
+ flags |= PARPORT_W91284PIC;
+ }
+ if (pp->flags & PP_FASTREAD) {
+ flags |= PARPORT_EPP_FAST;
+ }
+ if (pport->ieee1284.mode & IEEE1284_ADDR) {
+ fn = pport->ops->epp_read_addr;
+ } else {
+ fn = pport->ops->epp_read_data;
+ }
+ bytes_read = (*fn)(pport, kbuffer, need, flags);
+ } else {
+ bytes_read = parport_read (pport, kbuffer, need);
+ }
+
+ if (bytes_read != 0)
+ break;
+
+ if (file->f_flags & O_NONBLOCK) {
+ bytes_read = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending (current)) {
+ bytes_read = -ERESTARTSYS;
+ break;
+ }
+
+ cond_resched();
+ }
+
+ parport_set_timeout (pp->pdev, pp->default_inactivity);
+
+ if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read))
+ bytes_read = -EFAULT;
+
+ kfree (kbuffer);
+ pp_enable_irq (pp);
+ return bytes_read;
+}
+
+static ssize_t pp_write (struct file * file, const char __user * buf,
+ size_t count, loff_t * ppos)
+{
+ unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ struct pp_struct *pp = file->private_data;
+ char * kbuffer;
+ ssize_t bytes_written = 0;
+ ssize_t wrote;
+ int mode;
+ struct parport *pport;
+
+ if (!(pp->flags & PP_CLAIMED)) {
+ /* Don't have the port claimed */
+ pr_debug(CHRDEV "%x: claim the port first\n", minor);
+ return -EINVAL;
+ }
+
+ kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
+ if (!kbuffer) {
+ return -ENOMEM;
+ }
+ pport = pp->pdev->port;
+ mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
+
+ parport_set_timeout (pp->pdev,
+ (file->f_flags & O_NONBLOCK) ?
+ PARPORT_INACTIVITY_O_NONBLOCK :
+ pp->default_inactivity);
+
+ while (bytes_written < count) {
+ ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);
+
+ if (copy_from_user (kbuffer, buf + bytes_written, n)) {
+ bytes_written = -EFAULT;
+ break;
+ }
+
+ if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) {
+ /* do a fast EPP write */
+ if (pport->ieee1284.mode & IEEE1284_ADDR) {
+ wrote = pport->ops->epp_write_addr (pport,
+ kbuffer, n, PARPORT_EPP_FAST);
+ } else {
+ wrote = pport->ops->epp_write_data (pport,
+ kbuffer, n, PARPORT_EPP_FAST);
+ }
+ } else {
+ wrote = parport_write (pp->pdev->port, kbuffer, n);
+ }
+
+ if (wrote <= 0) {
+ if (!bytes_written) {
+ bytes_written = wrote;
+ }
+ break;
+ }
+
+ bytes_written += wrote;
+
+ if (file->f_flags & O_NONBLOCK) {
+ if (!bytes_written)
+ bytes_written = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending (current)) {
+ if (!bytes_written) {
+ bytes_written = -EINTR;
+ }
+ break;
+ }
+
+ cond_resched();
+ }
+
+ parport_set_timeout (pp->pdev, pp->default_inactivity);
+
+ kfree (kbuffer);
+ pp_enable_irq (pp);
+ return bytes_written;
+}
+
+static void pp_irq (void *private)
+{
+ struct pp_struct *pp = private;
+
+ if (pp->irqresponse) {
+ parport_write_control (pp->pdev->port, pp->irqctl);
+ pp->irqresponse = 0;
+ }
+
+ atomic_inc (&pp->irqc);
+ wake_up_interruptible (&pp->irq_wait);
+}
+
+static int register_device (int minor, struct pp_struct *pp)
+{
+ struct parport *port;
+ struct pardevice * pdev = NULL;
+ char *name;
+ int fl;
+
+ name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
+ if (name == NULL)
+ return -ENOMEM;
+
+ port = parport_find_number (minor);
+ if (!port) {
+ printk (KERN_WARNING "%s: no associated port!\n", name);
+ kfree (name);
+ return -ENXIO;
+ }
+
+ fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
+ pdev = parport_register_device (port, name, NULL,
+ NULL, pp_irq, fl, pp);
+ parport_put_port (port);
+
+ if (!pdev) {
+ printk (KERN_WARNING "%s: failed to register device!\n", name);
+ kfree (name);
+ return -ENXIO;
+ }
+
+ pp->pdev = pdev;
+ pr_debug("%s: registered pardevice\n", name);
+ return 0;
+}
+
+static enum ieee1284_phase init_phase (int mode)
+{
+ switch (mode & ~(IEEE1284_DEVICEID
+ | IEEE1284_ADDR)) {
+ case IEEE1284_MODE_NIBBLE:
+ case IEEE1284_MODE_BYTE:
+ return IEEE1284_PH_REV_IDLE;
+ }
+ return IEEE1284_PH_FWD_IDLE;
+}
+
+static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ struct pp_struct *pp = file->private_data;
+ struct parport * port;
+ void __user *argp = (void __user *)arg;
+
+ /* First handle the cases that don't take arguments. */
+ switch (cmd) {
+ case PPCLAIM:
+ {
+ struct ieee1284_info *info;
+ int ret;
+
+ if (pp->flags & PP_CLAIMED) {
+ pr_debug(CHRDEV "%x: you've already got it!\n", minor);
+ return -EINVAL;
+ }
+
+ /* Deferred device registration. */
+ if (!pp->pdev) {
+ int err = register_device (minor, pp);
+ if (err) {
+ return err;
+ }
+ }
+
+ ret = parport_claim_or_block (pp->pdev);
+ if (ret < 0)
+ return ret;
+
+ pp->flags |= PP_CLAIMED;
+
+ /* For interrupt-reporting to work, we need to be
+ * informed of each interrupt. */
+ pp_enable_irq (pp);
+
+ /* We may need to fix up the state machine. */
+ info = &pp->pdev->port->ieee1284;
+ pp->saved_state.mode = info->mode;
+ pp->saved_state.phase = info->phase;
+ info->mode = pp->state.mode;
+ info->phase = pp->state.phase;
+ pp->default_inactivity = parport_set_timeout (pp->pdev, 0);
+ parport_set_timeout (pp->pdev, pp->default_inactivity);
+
+ return 0;
+ }
+ case PPEXCL:
+ if (pp->pdev) {
+ pr_debug(CHRDEV "%x: too late for PPEXCL; "
+ "already registered\n", minor);
+ if (pp->flags & PP_EXCL)
+ /* But it's not really an error. */
+ return 0;
+ /* There's no chance of making the driver happy. */
+ return -EINVAL;
+ }
+
+ /* Just remember to register the device exclusively
+ * when we finally do the registration. */
+ pp->flags |= PP_EXCL;
+ return 0;
+ case PPSETMODE:
+ {
+ int mode;
+ if (copy_from_user (&mode, argp, sizeof (mode)))
+ return -EFAULT;
+ /* FIXME: validate mode */
+ pp->state.mode = mode;
+ pp->state.phase = init_phase (mode);
+
+ if (pp->flags & PP_CLAIMED) {
+ pp->pdev->port->ieee1284.mode = mode;
+ pp->pdev->port->ieee1284.phase = pp->state.phase;
+ }
+
+ return 0;
+ }
+ case PPGETMODE:
+ {
+ int mode;
+
+ if (pp->flags & PP_CLAIMED) {
+ mode = pp->pdev->port->ieee1284.mode;
+ } else {
+ mode = pp->state.mode;
+ }
+ if (copy_to_user (argp, &mode, sizeof (mode))) {
+ return -EFAULT;
+ }
+ return 0;
+ }
+ case PPSETPHASE:
+ {
+ int phase;
+ if (copy_from_user (&phase, argp, sizeof (phase))) {
+ return -EFAULT;
+ }
+ /* FIXME: validate phase */
+ pp->state.phase = phase;
+
+ if (pp->flags & PP_CLAIMED) {
+ pp->pdev->port->ieee1284.phase = phase;
+ }
+
+ return 0;
+ }
+ case PPGETPHASE:
+ {
+ int phase;
+
+ if (pp->flags & PP_CLAIMED) {
+ phase = pp->pdev->port->ieee1284.phase;
+ } else {
+ phase = pp->state.phase;
+ }
+ if (copy_to_user (argp, &phase, sizeof (phase))) {
+ return -EFAULT;
+ }
+ return 0;
+ }
+ case PPGETMODES:
+ {
+ unsigned int modes;
+
+ port = parport_find_number (minor);
+ if (!port)
+ return -ENODEV;
+
+ modes = port->modes;
+ parport_put_port(port);
+ if (copy_to_user (argp, &modes, sizeof (modes))) {
+ return -EFAULT;
+ }
+ return 0;
+ }
+ case PPSETFLAGS:
+ {
+ int uflags;
+
+ if (copy_from_user (&uflags, argp, sizeof (uflags))) {
+ return -EFAULT;
+ }
+ pp->flags &= ~PP_FLAGMASK;
+ pp->flags |= (uflags & PP_FLAGMASK);
+ return 0;
+ }
+ case PPGETFLAGS:
+ {
+ int uflags;
+
+ uflags = pp->flags & PP_FLAGMASK;
+ if (copy_to_user (argp, &uflags, sizeof (uflags))) {
+ return -EFAULT;
+ }
+ return 0;
+ }
+ } /* end switch() */
+
+ /* Everything else requires the port to be claimed, so check
+ * that now. */
+ if ((pp->flags & PP_CLAIMED) == 0) {
+ pr_debug(CHRDEV "%x: claim the port first\n", minor);
+ return -EINVAL;
+ }
+
+ port = pp->pdev->port;
+ switch (cmd) {
+ struct ieee1284_info *info;
+ unsigned char reg;
+ unsigned char mask;
+ int mode;
+ int ret;
+ struct timeval par_timeout;
+ long to_jiffies;
+
+ case PPRSTATUS:
+ reg = parport_read_status (port);
+ if (copy_to_user (argp, &reg, sizeof (reg)))
+ return -EFAULT;
+ return 0;
+ case PPRDATA:
+ reg = parport_read_data (port);
+ if (copy_to_user (argp, &reg, sizeof (reg)))
+ return -EFAULT;
+ return 0;
+ case PPRCONTROL:
+ reg = parport_read_control (port);
+ if (copy_to_user (argp, &reg, sizeof (reg)))
+ return -EFAULT;
+ return 0;
+ case PPYIELD:
+ parport_yield_blocking (pp->pdev);
+ return 0;
+
+ case PPRELEASE:
+ /* Save the state machine's state. */
+ info = &pp->pdev->port->ieee1284;
+ pp->state.mode = info->mode;
+ pp->state.phase = info->phase;
+ info->mode = pp->saved_state.mode;
+ info->phase = pp->saved_state.phase;
+ parport_release (pp->pdev);
+ pp->flags &= ~PP_CLAIMED;
+ return 0;
+
+ case PPWCONTROL:
+ if (copy_from_user (&reg, argp, sizeof (reg)))
+ return -EFAULT;
+ parport_write_control (port, reg);
+ return 0;
+
+ case PPWDATA:
+ if (copy_from_user (&reg, argp, sizeof (reg)))
+ return -EFAULT;
+ parport_write_data (port, reg);
+ return 0;
+
+ case PPFCONTROL:
+ if (copy_from_user (&mask, argp,
+ sizeof (mask)))
+ return -EFAULT;
+ if (copy_from_user (&reg, 1 + (unsigned char __user *) arg,
+ sizeof (reg)))
+ return -EFAULT;
+ parport_frob_control (port, mask, reg);
+ return 0;
+
+ case PPDATADIR:
+ if (copy_from_user (&mode, argp, sizeof (mode)))
+ return -EFAULT;
+ if (mode)
+ port->ops->data_reverse (port);
+ else
+ port->ops->data_forward (port);
+ return 0;
+
+ case PPNEGOT:
+ if (copy_from_user (&mode, argp, sizeof (mode)))
+ return -EFAULT;
+ switch ((ret = parport_negotiate (port, mode))) {
+ case 0: break;
+ case -1: /* handshake failed, peripheral not IEEE 1284 */
+ ret = -EIO;
+ break;
+ case 1: /* handshake succeeded, peripheral rejected mode */
+ ret = -ENXIO;
+ break;
+ }
+ pp_enable_irq (pp);
+ return ret;
+
+ case PPWCTLONIRQ:
+ if (copy_from_user (&reg, argp, sizeof (reg)))
+ return -EFAULT;
+
+ /* Remember what to set the control lines to, for next
+ * time we get an interrupt. */
+ pp->irqctl = reg;
+ pp->irqresponse = 1;
+ return 0;
+
+ case PPCLRIRQ:
+ ret = atomic_read (&pp->irqc);
+ if (copy_to_user (argp, &ret, sizeof (ret)))
+ return -EFAULT;
+ atomic_sub (ret, &pp->irqc);
+ return 0;
+
+ case PPSETTIME:
+ if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) {
+ return -EFAULT;
+ }
+ /* Convert to jiffies, place in pp->pdev->timeout */
+ if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) {
+ return -EINVAL;
+ }
+ to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ);
+ to_jiffies += par_timeout.tv_sec * (long)HZ;
+ if (to_jiffies <= 0) {
+ return -EINVAL;
+ }
+ pp->pdev->timeout = to_jiffies;
+ return 0;
+
+ case PPGETTIME:
+ to_jiffies = pp->pdev->timeout;
+ memset(&par_timeout, 0, sizeof(par_timeout));
+ par_timeout.tv_sec = to_jiffies / HZ;
+ par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ);
+ if (copy_to_user (argp, &par_timeout, sizeof(struct timeval)))
+ return -EFAULT;
+ return 0;
+
+ default:
+ pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd);
+ return -EINVAL;
+ }
+
+ /* Keep the compiler happy */
+ return 0;
+}
+
+static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+ mutex_lock(&pp_do_mutex);
+ ret = pp_do_ioctl(file, cmd, arg);
+ mutex_unlock(&pp_do_mutex);
+ return ret;
+}
+
+static int pp_open (struct inode * inode, struct file * file)
+{
+ unsigned int minor = iminor(inode);
+ struct pp_struct *pp;
+
+ if (minor >= PARPORT_MAX)
+ return -ENXIO;
+
+ pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ pp->state.mode = IEEE1284_MODE_COMPAT;
+ pp->state.phase = init_phase (pp->state.mode);
+ pp->flags = 0;
+ pp->irqresponse = 0;
+ atomic_set (&pp->irqc, 0);
+ init_waitqueue_head (&pp->irq_wait);
+
+ /* Defer the actual device registration until the first claim.
+ * That way, we know whether or not the driver wants to have
+ * exclusive access to the port (PPEXCL).
+ */
+ pp->pdev = NULL;
+ file->private_data = pp;
+
+ return 0;
+}
+
+static int pp_release (struct inode * inode, struct file * file)
+{
+ unsigned int minor = iminor(inode);
+ struct pp_struct *pp = file->private_data;
+ int compat_negot;
+
+ compat_negot = 0;
+ if (!(pp->flags & PP_CLAIMED) && pp->pdev &&
+ (pp->state.mode != IEEE1284_MODE_COMPAT)) {
+ struct ieee1284_info *info;
+
+ /* parport released, but not in compatibility mode */
+ parport_claim_or_block (pp->pdev);
+ pp->flags |= PP_CLAIMED;
+ info = &pp->pdev->port->ieee1284;
+ pp->saved_state.mode = info->mode;
+ pp->saved_state.phase = info->phase;
+ info->mode = pp->state.mode;
+ info->phase = pp->state.phase;
+ compat_negot = 1;
+ } else if ((pp->flags & PP_CLAIMED) && pp->pdev &&
+ (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT)) {
+ compat_negot = 2;
+ }
+ if (compat_negot) {
+ parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT);
+ pr_debug(CHRDEV "%x: negotiated back to compatibility "
+ "mode because user-space forgot\n", minor);
+ }
+
+ if (pp->flags & PP_CLAIMED) {
+ struct ieee1284_info *info;
+
+ info = &pp->pdev->port->ieee1284;
+ pp->state.mode = info->mode;
+ pp->state.phase = info->phase;
+ info->mode = pp->saved_state.mode;
+ info->phase = pp->saved_state.phase;
+ parport_release (pp->pdev);
+ if (compat_negot != 1) {
+ pr_debug(CHRDEV "%x: released pardevice "
+ "because user-space forgot\n", minor);
+ }
+ }
+
+ if (pp->pdev) {
+ const char *name = pp->pdev->name;
+ parport_unregister_device (pp->pdev);
+ kfree (name);
+ pp->pdev = NULL;
+ pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
+ }
+
+ kfree (pp);
+
+ return 0;
+}
+
+/* No kernel lock held - fine */
+static unsigned int pp_poll (struct file * file, poll_table * wait)
+{
+ struct pp_struct *pp = file->private_data;
+ unsigned int mask = 0;
+
+ poll_wait (file, &pp->irq_wait, wait);
+ if (atomic_read (&pp->irqc))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static struct class *ppdev_class;
+
+static const struct file_operations pp_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = pp_read,
+ .write = pp_write,
+ .poll = pp_poll,
+ .unlocked_ioctl = pp_ioctl,
+ .open = pp_open,
+ .release = pp_release,
+};
+
+static void pp_attach(struct parport *port)
+{
+ device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number),
+ NULL, "parport%d", port->number);
+}
+
+static void pp_detach(struct parport *port)
+{
+ device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
+}
+
+static struct parport_driver pp_driver = {
+ .name = CHRDEV,
+ .attach = pp_attach,
+ .detach = pp_detach,
+};
+
+static int __init ppdev_init (void)
+{
+ int err = 0;
+
+ if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) {
+ printk (KERN_WARNING CHRDEV ": unable to get major %d\n",
+ PP_MAJOR);
+ return -EIO;
+ }
+ ppdev_class = class_create(THIS_MODULE, CHRDEV);
+ if (IS_ERR(ppdev_class)) {
+ err = PTR_ERR(ppdev_class);
+ goto out_chrdev;
+ }
+ if (parport_register_driver(&pp_driver)) {
+ printk (KERN_WARNING CHRDEV ": unable to register with parport\n");
+ goto out_class;
+ }
+
+ printk (KERN_INFO PP_VERSION "\n");
+ goto out;
+
+out_class:
+ class_destroy(ppdev_class);
+out_chrdev:
+ unregister_chrdev(PP_MAJOR, CHRDEV);
+out:
+ return err;
+}
+
+static void __exit ppdev_cleanup (void)
+{
+ /* Clean up all parport stuff */
+ parport_unregister_driver(&pp_driver);
+ class_destroy(ppdev_class);
+ unregister_chrdev (PP_MAJOR, CHRDEV);
+}
+
+module_init(ppdev_init);
+module_exit(ppdev_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR);
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
new file mode 100644
index 00000000..85c004a5
--- /dev/null
+++ b/drivers/char/ps3flash.c
@@ -0,0 +1,472 @@
+/*
+ * PS3 FLASH ROM Storage Driver
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/lv1call.h>
+#include <asm/ps3stor.h>
+
+
+#define DEVICE_NAME "ps3flash"
+
+#define FLASH_BLOCK_SIZE (256*1024)
+
+
+struct ps3flash_private {
+ struct mutex mutex; /* Bounce buffer mutex */
+ u64 chunk_sectors;
+ int tag; /* Start sector of buffer, -1 if invalid */
+ bool dirty;
+};
+
+static struct ps3_storage_device *ps3flash_dev;
+
+static int ps3flash_read_write_sectors(struct ps3_storage_device *dev,
+ u64 start_sector, int write)
+{
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ u64 res = ps3stor_read_write_sectors(dev, dev->bounce_lpar,
+ start_sector, priv->chunk_sectors,
+ write);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
+ __LINE__, write ? "write" : "read", res);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int ps3flash_writeback(struct ps3_storage_device *dev)
+{
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ int res;
+
+ if (!priv->dirty || priv->tag < 0)
+ return 0;
+
+ res = ps3flash_read_write_sectors(dev, priv->tag, 1);
+ if (res)
+ return res;
+
+ priv->dirty = false;
+ return 0;
+}
+
+static int ps3flash_fetch(struct ps3_storage_device *dev, u64 start_sector)
+{
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ int res;
+
+ if (start_sector == priv->tag)
+ return 0;
+
+ res = ps3flash_writeback(dev);
+ if (res)
+ return res;
+
+ priv->tag = -1;
+
+ res = ps3flash_read_write_sectors(dev, start_sector, 0);
+ if (res)
+ return res;
+
+ priv->tag = start_sector;
+ return 0;
+}
+
+static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct ps3_storage_device *dev = ps3flash_dev;
+ loff_t res;
+
+ mutex_lock(&file->f_mapping->host->i_mutex);
+ switch (origin) {
+ case 1:
+ offset += file->f_pos;
+ break;
+ case 2:
+ offset += dev->regions[dev->region_idx].size*dev->blk_size;
+ break;
+ }
+ if (offset < 0) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ file->f_pos = offset;
+ res = file->f_pos;
+
+out:
+ mutex_unlock(&file->f_mapping->host->i_mutex);
+ return res;
+}
+
+static ssize_t ps3flash_read(char __user *userbuf, void *kernelbuf,
+ size_t count, loff_t *pos)
+{
+ struct ps3_storage_device *dev = ps3flash_dev;
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ u64 size, sector, offset;
+ int res;
+ size_t remaining, n;
+ const void *src;
+
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: Reading %zu bytes at position %lld to U0x%p/K0x%p\n",
+ __func__, __LINE__, count, *pos, userbuf, kernelbuf);
+
+ size = dev->regions[dev->region_idx].size*dev->blk_size;
+ if (*pos >= size || !count)
+ return 0;
+
+ if (*pos + count > size) {
+ dev_dbg(&dev->sbd.core,
+ "%s:%u Truncating count from %zu to %llu\n", __func__,
+ __LINE__, count, size - *pos);
+ count = size - *pos;
+ }
+
+ sector = *pos / dev->bounce_size * priv->chunk_sectors;
+ offset = *pos % dev->bounce_size;
+
+ remaining = count;
+ do {
+ n = min_t(u64, remaining, dev->bounce_size - offset);
+ src = dev->bounce_buf + offset;
+
+ mutex_lock(&priv->mutex);
+
+ res = ps3flash_fetch(dev, sector);
+ if (res)
+ goto fail;
+
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: copy %lu bytes from 0x%p to U0x%p/K0x%p\n",
+ __func__, __LINE__, n, src, userbuf, kernelbuf);
+ if (userbuf) {
+ if (copy_to_user(userbuf, src, n)) {
+ res = -EFAULT;
+ goto fail;
+ }
+ userbuf += n;
+ }
+ if (kernelbuf) {
+ memcpy(kernelbuf, src, n);
+ kernelbuf += n;
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ *pos += n;
+ remaining -= n;
+ sector += priv->chunk_sectors;
+ offset = 0;
+ } while (remaining > 0);
+
+ return count;
+
+fail:
+ mutex_unlock(&priv->mutex);
+ return res;
+}
+
+static ssize_t ps3flash_write(const char __user *userbuf,
+ const void *kernelbuf, size_t count, loff_t *pos)
+{
+ struct ps3_storage_device *dev = ps3flash_dev;
+ struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ u64 size, sector, offset;
+ int res = 0;
+ size_t remaining, n;
+ void *dst;
+
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: Writing %zu bytes at position %lld from U0x%p/K0x%p\n",
+ __func__, __LINE__, count, *pos, userbuf, kernelbuf);
+
+ size = dev->regions[dev->region_idx].size*dev->blk_size;
+ if (*pos >= size || !count)
+ return 0;
+
+ if (*pos + count > size) {
+ dev_dbg(&dev->sbd.core,
+ "%s:%u Truncating count from %zu to %llu\n", __func__,
+ __LINE__, count, size - *pos);
+ count = size - *pos;
+ }
+
+ sector = *pos / dev->bounce_size * priv->chunk_sectors;
+ offset = *pos % dev->bounce_size;
+
+ remaining = count;
+ do {
+ n = min_t(u64, remaining, dev->bounce_size - offset);
+ dst = dev->bounce_buf + offset;
+
+ mutex_lock(&priv->mutex);
+
+ if (n != dev->bounce_size)
+ res = ps3flash_fetch(dev, sector);
+ else if (sector != priv->tag)
+ res = ps3flash_writeback(dev);
+ if (res)
+ goto fail;
+
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: copy %lu bytes from U0x%p/K0x%p to 0x%p\n",
+ __func__, __LINE__, n, userbuf, kernelbuf, dst);
+ if (userbuf) {
+ if (copy_from_user(dst, userbuf, n)) {
+ res = -EFAULT;
+ goto fail;
+ }
+ userbuf += n;
+ }
+ if (kernelbuf) {
+ memcpy(dst, kernelbuf, n);
+ kernelbuf += n;
+ }
+
+ priv->tag = sector;
+ priv->dirty = true;
+
+ mutex_unlock(&priv->mutex);
+
+ *pos += n;
+ remaining -= n;
+ sector += priv->chunk_sectors;
+ offset = 0;
+ } while (remaining > 0);
+
+ return count;
+
+fail:
+ mutex_unlock(&priv->mutex);
+ return res;
+}
+
+static ssize_t ps3flash_user_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ return ps3flash_read(buf, NULL, count, pos);
+}
+
+static ssize_t ps3flash_user_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ return ps3flash_write(buf, NULL, count, pos);
+}
+
+static ssize_t ps3flash_kernel_read(void *buf, size_t count, loff_t pos)
+{
+ return ps3flash_read(NULL, buf, count, &pos);
+}
+
+static ssize_t ps3flash_kernel_write(const void *buf, size_t count,
+ loff_t pos)
+{
+ ssize_t res;
+ int wb;
+
+ res = ps3flash_write(NULL, buf, count, &pos);
+ if (res < 0)
+ return res;
+
+ /* Make kernel writes synchronous */
+ wb = ps3flash_writeback(ps3flash_dev);
+ if (wb)
+ return wb;
+
+ return res;
+}
+
+static int ps3flash_flush(struct file *file, fl_owner_t id)
+{
+ return ps3flash_writeback(ps3flash_dev);
+}
+
+static int ps3flash_fsync(struct file *file, int datasync)
+{
+ return ps3flash_writeback(ps3flash_dev);
+}
+
+static irqreturn_t ps3flash_interrupt(int irq, void *data)
+{
+ struct ps3_storage_device *dev = data;
+ int res;
+ u64 tag, status;
+
+ res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
+
+ if (tag != dev->tag)
+ dev_err(&dev->sbd.core,
+ "%s:%u: tag mismatch, got %llx, expected %llx\n",
+ __func__, __LINE__, tag, dev->tag);
+
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n",
+ __func__, __LINE__, res, status);
+ } else {
+ dev->lv1_status = status;
+ complete(&dev->done);
+ }
+ return IRQ_HANDLED;
+}
+
+static const struct file_operations ps3flash_fops = {
+ .owner = THIS_MODULE,
+ .llseek = ps3flash_llseek,
+ .read = ps3flash_user_read,
+ .write = ps3flash_user_write,
+ .flush = ps3flash_flush,
+ .fsync = ps3flash_fsync,
+};
+
+static const struct ps3_os_area_flash_ops ps3flash_kernel_ops = {
+ .read = ps3flash_kernel_read,
+ .write = ps3flash_kernel_write,
+};
+
+static struct miscdevice ps3flash_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DEVICE_NAME,
+ .fops = &ps3flash_fops,
+};
+
+static int __devinit ps3flash_probe(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+ struct ps3flash_private *priv;
+ int error;
+ unsigned long tmp;
+
+ tmp = dev->regions[dev->region_idx].start*dev->blk_size;
+ if (tmp % FLASH_BLOCK_SIZE) {
+ dev_err(&dev->sbd.core,
+ "%s:%u region start %lu is not aligned\n", __func__,
+ __LINE__, tmp);
+ return -EINVAL;
+ }
+ tmp = dev->regions[dev->region_idx].size*dev->blk_size;
+ if (tmp % FLASH_BLOCK_SIZE) {
+ dev_err(&dev->sbd.core,
+ "%s:%u region size %lu is not aligned\n", __func__,
+ __LINE__, tmp);
+ return -EINVAL;
+ }
+
+ /* use static buffer, kmalloc cannot allocate 256 KiB */
+ if (!ps3flash_bounce_buffer.address)
+ return -ENODEV;
+
+ if (ps3flash_dev) {
+ dev_err(&dev->sbd.core,
+ "Only one FLASH device is supported\n");
+ return -EBUSY;
+ }
+
+ ps3flash_dev = dev;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ ps3_system_bus_set_drvdata(&dev->sbd, priv);
+ mutex_init(&priv->mutex);
+ priv->tag = -1;
+
+ dev->bounce_size = ps3flash_bounce_buffer.size;
+ dev->bounce_buf = ps3flash_bounce_buffer.address;
+ priv->chunk_sectors = dev->bounce_size / dev->blk_size;
+
+ error = ps3stor_setup(dev, ps3flash_interrupt);
+ if (error)
+ goto fail_free_priv;
+
+ ps3flash_misc.parent = &dev->sbd.core;
+ error = misc_register(&ps3flash_misc);
+ if (error) {
+ dev_err(&dev->sbd.core, "%s:%u: misc_register failed %d\n",
+ __func__, __LINE__, error);
+ goto fail_teardown;
+ }
+
+ dev_info(&dev->sbd.core, "%s:%u: registered misc device %d\n",
+ __func__, __LINE__, ps3flash_misc.minor);
+
+ ps3_os_area_flash_register(&ps3flash_kernel_ops);
+ return 0;
+
+fail_teardown:
+ ps3stor_teardown(dev);
+fail_free_priv:
+ kfree(priv);
+ ps3_system_bus_set_drvdata(&dev->sbd, NULL);
+fail:
+ ps3flash_dev = NULL;
+ return error;
+}
+
+static int ps3flash_remove(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+
+ ps3_os_area_flash_register(NULL);
+ misc_deregister(&ps3flash_misc);
+ ps3stor_teardown(dev);
+ kfree(ps3_system_bus_get_drvdata(&dev->sbd));
+ ps3_system_bus_set_drvdata(&dev->sbd, NULL);
+ ps3flash_dev = NULL;
+ return 0;
+}
+
+
+static struct ps3_system_bus_driver ps3flash = {
+ .match_id = PS3_MATCH_ID_STOR_FLASH,
+ .core.name = DEVICE_NAME,
+ .core.owner = THIS_MODULE,
+ .probe = ps3flash_probe,
+ .remove = ps3flash_remove,
+ .shutdown = ps3flash_remove,
+};
+
+
+static int __init ps3flash_init(void)
+{
+ return ps3_system_bus_driver_register(&ps3flash);
+}
+
+static void __exit ps3flash_exit(void)
+{
+ ps3_system_bus_driver_unregister(&ps3flash);
+}
+
+module_init(ps3flash_init);
+module_exit(ps3flash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PS3 FLASH ROM Storage Driver");
+MODULE_AUTHOR("Sony Corporation");
+MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_FLASH);
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
new file mode 100644
index 00000000..1a9f5f6d
--- /dev/null
+++ b/drivers/char/ramoops.c
@@ -0,0 +1,195 @@
+/*
+ * RAM Oops/Panic logger
+ *
+ * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kmsg_dump.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/ramoops.h>
+
+#define RAMOOPS_KERNMSG_HDR "===="
+
+#define RECORD_SIZE 4096UL
+
+static ulong mem_address;
+module_param(mem_address, ulong, 0400);
+MODULE_PARM_DESC(mem_address,
+ "start of reserved RAM used to store oops/panic logs");
+
+static ulong mem_size;
+module_param(mem_size, ulong, 0400);
+MODULE_PARM_DESC(mem_size,
+ "size of reserved RAM used to store oops/panic logs");
+
+static int dump_oops = 1;
+module_param(dump_oops, int, 0600);
+MODULE_PARM_DESC(dump_oops,
+ "set to 1 to dump oopses, 0 to only dump panics (default 1)");
+
+static struct ramoops_context {
+ struct kmsg_dumper dump;
+ void *virt_addr;
+ phys_addr_t phys_addr;
+ unsigned long size;
+ int count;
+ int max_count;
+} oops_cxt;
+
+static void ramoops_do_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
+ const char *s2, unsigned long l2)
+{
+ struct ramoops_context *cxt = container_of(dumper,
+ struct ramoops_context, dump);
+ unsigned long s1_start, s2_start;
+ unsigned long l1_cpy, l2_cpy;
+ int res, hdr_size;
+ char *buf, *buf_orig;
+ struct timeval timestamp;
+
+ if (reason != KMSG_DUMP_OOPS &&
+ reason != KMSG_DUMP_PANIC &&
+ reason != KMSG_DUMP_KEXEC)
+ return;
+
+ /* Only dump oopses if dump_oops is set */
+ if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ return;
+
+ buf = cxt->virt_addr + (cxt->count * RECORD_SIZE);
+ buf_orig = buf;
+
+ memset(buf, '\0', RECORD_SIZE);
+ res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
+ buf += res;
+ do_gettimeofday(&timestamp);
+ res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec);
+ buf += res;
+
+ hdr_size = buf - buf_orig;
+ l2_cpy = min(l2, RECORD_SIZE - hdr_size);
+ l1_cpy = min(l1, RECORD_SIZE - hdr_size - l2_cpy);
+
+ s2_start = l2 - l2_cpy;
+ s1_start = l1 - l1_cpy;
+
+ memcpy(buf, s1 + s1_start, l1_cpy);
+ memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy);
+
+ cxt->count = (cxt->count + 1) % cxt->max_count;
+}
+
+static int __init ramoops_probe(struct platform_device *pdev)
+{
+ struct ramoops_platform_data *pdata = pdev->dev.platform_data;
+ struct ramoops_context *cxt = &oops_cxt;
+ int err = -EINVAL;
+
+ if (pdata) {
+ mem_size = pdata->mem_size;
+ mem_address = pdata->mem_address;
+ }
+
+ if (!mem_size) {
+ printk(KERN_ERR "ramoops: invalid size specification");
+ goto fail3;
+ }
+
+ rounddown_pow_of_two(mem_size);
+
+ if (mem_size < RECORD_SIZE) {
+ printk(KERN_ERR "ramoops: size too small");
+ goto fail3;
+ }
+
+ cxt->max_count = mem_size / RECORD_SIZE;
+ cxt->count = 0;
+ cxt->size = mem_size;
+ cxt->phys_addr = mem_address;
+
+ if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
+ printk(KERN_ERR "ramoops: request mem region failed");
+ err = -EINVAL;
+ goto fail3;
+ }
+
+ cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size);
+ if (!cxt->virt_addr) {
+ printk(KERN_ERR "ramoops: ioremap failed");
+ goto fail2;
+ }
+
+ cxt->dump.dump = ramoops_do_dump;
+ err = kmsg_dump_register(&cxt->dump);
+ if (err) {
+ printk(KERN_ERR "ramoops: registering kmsg dumper failed");
+ goto fail1;
+ }
+
+ return 0;
+
+fail1:
+ iounmap(cxt->virt_addr);
+fail2:
+ release_mem_region(cxt->phys_addr, cxt->size);
+fail3:
+ return err;
+}
+
+static int __exit ramoops_remove(struct platform_device *pdev)
+{
+ struct ramoops_context *cxt = &oops_cxt;
+
+ if (kmsg_dump_unregister(&cxt->dump) < 0)
+ printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper");
+
+ iounmap(cxt->virt_addr);
+ release_mem_region(cxt->phys_addr, cxt->size);
+ return 0;
+}
+
+static struct platform_driver ramoops_driver = {
+ .remove = __exit_p(ramoops_remove),
+ .driver = {
+ .name = "ramoops",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ramoops_init(void)
+{
+ return platform_driver_probe(&ramoops_driver, ramoops_probe);
+}
+
+static void __exit ramoops_exit(void)
+{
+ platform_driver_unregister(&ramoops_driver);
+}
+
+module_init(ramoops_init);
+module_exit(ramoops_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>");
+MODULE_DESCRIPTION("RAM Oops/Panic logger/driver");
diff --git a/drivers/char/random.c b/drivers/char/random.c
new file mode 100644
index 00000000..c35a7850
--- /dev/null
+++ b/drivers/char/random.c
@@ -0,0 +1,1349 @@
+/*
+ * random.c -- A strong random number generator
+ *
+ * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
+ *
+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
+ * rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, and the entire permission notice in its entirety,
+ * including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions. (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+/*
+ * (now, with legal B.S. out of the way.....)
+ *
+ * This routine gathers environmental noise from device drivers, etc.,
+ * and returns good random numbers, suitable for cryptographic use.
+ * Besides the obvious cryptographic uses, these numbers are also good
+ * for seeding TCP sequence numbers, and other places where it is
+ * desirable to have numbers which are not only random, but hard to
+ * predict by an attacker.
+ *
+ * Theory of operation
+ * ===================
+ *
+ * Computers are very predictable devices. Hence it is extremely hard
+ * to produce truly random numbers on a computer --- as opposed to
+ * pseudo-random numbers, which can easily generated by using a
+ * algorithm. Unfortunately, it is very easy for attackers to guess
+ * the sequence of pseudo-random number generators, and for some
+ * applications this is not acceptable. So instead, we must try to
+ * gather "environmental noise" from the computer's environment, which
+ * must be hard for outside attackers to observe, and use that to
+ * generate random numbers. In a Unix environment, this is best done
+ * from inside the kernel.
+ *
+ * Sources of randomness from the environment include inter-keyboard
+ * timings, inter-interrupt timings from some interrupts, and other
+ * events which are both (a) non-deterministic and (b) hard for an
+ * outside observer to measure. Randomness from these sources are
+ * added to an "entropy pool", which is mixed using a CRC-like function.
+ * This is not cryptographically strong, but it is adequate assuming
+ * the randomness is not chosen maliciously, and it is fast enough that
+ * the overhead of doing it on every interrupt is very reasonable.
+ * As random bytes are mixed into the entropy pool, the routines keep
+ * an *estimate* of how many bits of randomness have been stored into
+ * the random number generator's internal state.
+ *
+ * When random bytes are desired, they are obtained by taking the SHA
+ * hash of the contents of the "entropy pool". The SHA hash avoids
+ * exposing the internal state of the entropy pool. It is believed to
+ * be computationally infeasible to derive any useful information
+ * about the input of SHA from its output. Even if it is possible to
+ * analyze SHA in some clever way, as long as the amount of data
+ * returned from the generator is less than the inherent entropy in
+ * the pool, the output data is totally unpredictable. For this
+ * reason, the routine decreases its internal estimate of how many
+ * bits of "true randomness" are contained in the entropy pool as it
+ * outputs random numbers.
+ *
+ * If this estimate goes to zero, the routine can still generate
+ * random numbers; however, an attacker may (at least in theory) be
+ * able to infer the future output of the generator from prior
+ * outputs. This requires successful cryptanalysis of SHA, which is
+ * not believed to be feasible, but there is a remote possibility.
+ * Nonetheless, these numbers should be useful for the vast majority
+ * of purposes.
+ *
+ * Exported interfaces ---- output
+ * ===============================
+ *
+ * There are three exported interfaces; the first is one designed to
+ * be used from within the kernel:
+ *
+ * void get_random_bytes(void *buf, int nbytes);
+ *
+ * This interface will return the requested number of random bytes,
+ * and place it in the requested buffer.
+ *
+ * The two other interfaces are two character devices /dev/random and
+ * /dev/urandom. /dev/random is suitable for use when very high
+ * quality randomness is desired (for example, for key generation or
+ * one-time pads), as it will only return a maximum of the number of
+ * bits of randomness (as estimated by the random number generator)
+ * contained in the entropy pool.
+ *
+ * The /dev/urandom device does not have this limit, and will return
+ * as many bytes as are requested. As more and more random bytes are
+ * requested without giving time for the entropy pool to recharge,
+ * this will result in random numbers that are merely cryptographically
+ * strong. For many applications, however, this is acceptable.
+ *
+ * Exported interfaces ---- input
+ * ==============================
+ *
+ * The current exported interfaces for gathering environmental noise
+ * from the devices are:
+ *
+ * void add_input_randomness(unsigned int type, unsigned int code,
+ * unsigned int value);
+ * void add_interrupt_randomness(int irq);
+ * void add_disk_randomness(struct gendisk *disk);
+ *
+ * add_input_randomness() uses the input layer interrupt timing, as well as
+ * the event type information from the hardware.
+ *
+ * add_interrupt_randomness() uses the inter-interrupt timing as random
+ * inputs to the entropy pool. Note that not all interrupts are good
+ * sources of randomness! For example, the timer interrupts is not a
+ * good choice, because the periodicity of the interrupts is too
+ * regular, and hence predictable to an attacker. Network Interface
+ * Controller interrupts are a better measure, since the timing of the
+ * NIC interrupts are more unpredictable.
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+ * entropy pool. Note that high-speed solid state drives with very low
+ * seek times do not make for good sources of entropy, as their seek
+ * times are usually fairly consistent.
+ *
+ * All of these routines try to estimate how many bits of randomness a
+ * particular randomness source. They do this by keeping track of the
+ * first and second order deltas of the event timings.
+ *
+ * Ensuring unpredictability at system startup
+ * ============================================
+ *
+ * When any operating system starts up, it will go through a sequence
+ * of actions that are fairly predictable by an adversary, especially
+ * if the start-up does not involve interaction with a human operator.
+ * This reduces the actual number of bits of unpredictability in the
+ * entropy pool below the value in entropy_count. In order to
+ * counteract this effect, it helps to carry information in the
+ * entropy pool across shut-downs and start-ups. To do this, put the
+ * following lines an appropriate script which is run during the boot
+ * sequence:
+ *
+ * echo "Initializing random number generator..."
+ * random_seed=/var/run/random-seed
+ * # Carry a random seed from start-up to start-up
+ * # Load and then save the whole entropy pool
+ * if [ -f $random_seed ]; then
+ * cat $random_seed >/dev/urandom
+ * else
+ * touch $random_seed
+ * fi
+ * chmod 600 $random_seed
+ * dd if=/dev/urandom of=$random_seed count=1 bs=512
+ *
+ * and the following lines in an appropriate script which is run as
+ * the system is shutdown:
+ *
+ * # Carry a random seed from shut-down to start-up
+ * # Save the whole entropy pool
+ * echo "Saving random seed..."
+ * random_seed=/var/run/random-seed
+ * touch $random_seed
+ * chmod 600 $random_seed
+ * dd if=/dev/urandom of=$random_seed count=1 bs=512
+ *
+ * For example, on most modern systems using the System V init
+ * scripts, such code fragments would be found in
+ * /etc/rc.d/init.d/random. On older Linux systems, the correct script
+ * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
+ *
+ * Effectively, these commands cause the contents of the entropy pool
+ * to be saved at shut-down time and reloaded into the entropy pool at
+ * start-up. (The 'dd' in the addition to the bootup script is to
+ * make sure that /etc/random-seed is different for every start-up,
+ * even if the system crashes without executing rc.0.) Even with
+ * complete knowledge of the start-up activities, predicting the state
+ * of the entropy pool requires knowledge of the previous history of
+ * the system.
+ *
+ * Configuring the /dev/random driver under Linux
+ * ==============================================
+ *
+ * The /dev/random driver under Linux uses minor numbers 8 and 9 of
+ * the /dev/mem major number (#1). So if your system does not have
+ * /dev/random and /dev/urandom created already, they can be created
+ * by using the commands:
+ *
+ * mknod /dev/random c 1 8
+ * mknod /dev/urandom c 1 9
+ *
+ * Acknowledgements:
+ * =================
+ *
+ * Ideas for constructing this random number generator were derived
+ * from Pretty Good Privacy's random number generator, and from private
+ * discussions with Phil Karn. Colin Plumb provided a faster random
+ * number generator, which speed up the mixing function of the entropy
+ * pool, taken from PGPfone. Dale Worley has also contributed many
+ * useful ideas and suggestions to improve this driver.
+ *
+ * Any flaws in the design are solely my responsibility, and should
+ * not be attributed to the Phil, Colin, or any of authors of PGP.
+ *
+ * Further background information on this topic may be obtained from
+ * RFC 1750, "Randomness Recommendations for Security", by Donald
+ * Eastlake, Steve Crocker, and Jeff Schiller.
+ */
+
+#include <linux/utsname.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
+#include <linux/cryptohash.h>
+#include <linux/fips.h>
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+# include <linux/irq.h>
+#endif
+
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+/*
+ * Configuration information
+ */
+#define INPUT_POOL_WORDS 128
+#define OUTPUT_POOL_WORDS 32
+#define SEC_XFER_SIZE 512
+#define EXTRACT_SIZE 10
+
+/*
+ * The minimum number of bits of entropy before we wake up a read on
+ * /dev/random. Should be enough to do a significant reseed.
+ */
+static int random_read_wakeup_thresh = 64;
+
+/*
+ * If the entropy count falls under this number of bits, then we
+ * should wake up processes which are selecting or polling on write
+ * access to /dev/random.
+ */
+static int random_write_wakeup_thresh = 128;
+
+/*
+ * When the input pool goes over trickle_thresh, start dropping most
+ * samples to avoid wasting CPU time and reduce lock contention.
+ */
+
+static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
+
+static DEFINE_PER_CPU(int, trickle_count);
+
+/*
+ * A pool of size .poolwords is stirred with a primitive polynomial
+ * of degree .poolwords over GF(2). The taps for various sizes are
+ * defined below. They are chosen to be evenly spaced (minimum RMS
+ * distance from evenly spaced; the numbers in the comments are a
+ * scaled squared error sum) except for the last tap, which is 1 to
+ * get the twisting happening as fast as possible.
+ */
+static struct poolinfo {
+ int poolwords;
+ int tap1, tap2, tap3, tap4, tap5;
+} poolinfo_table[] = {
+ /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
+ { 128, 103, 76, 51, 25, 1 },
+ /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
+ { 32, 26, 20, 14, 7, 1 },
+#if 0
+ /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
+ { 2048, 1638, 1231, 819, 411, 1 },
+
+ /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
+ { 1024, 817, 615, 412, 204, 1 },
+
+ /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
+ { 1024, 819, 616, 410, 207, 2 },
+
+ /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
+ { 512, 411, 308, 208, 104, 1 },
+
+ /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
+ { 512, 409, 307, 206, 102, 2 },
+ /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
+ { 512, 409, 309, 205, 103, 2 },
+
+ /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
+ { 256, 205, 155, 101, 52, 1 },
+
+ /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
+ { 128, 103, 78, 51, 27, 2 },
+
+ /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
+ { 64, 52, 39, 26, 14, 1 },
+#endif
+};
+
+#define POOLBITS poolwords*32
+#define POOLBYTES poolwords*4
+
+/*
+ * For the purposes of better mixing, we use the CRC-32 polynomial as
+ * well to make a twisted Generalized Feedback Shift Reigster
+ *
+ * (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR generators. ACM
+ * Transactions on Modeling and Computer Simulation 2(3):179-194.
+ * Also see M. Matsumoto & Y. Kurita, 1994. Twisted GFSR generators
+ * II. ACM Transactions on Mdeling and Computer Simulation 4:254-266)
+ *
+ * Thanks to Colin Plumb for suggesting this.
+ *
+ * We have not analyzed the resultant polynomial to prove it primitive;
+ * in fact it almost certainly isn't. Nonetheless, the irreducible factors
+ * of a random large-degree polynomial over GF(2) are more than large enough
+ * that periodicity is not a concern.
+ *
+ * The input hash is much less sensitive than the output hash. All
+ * that we want of it is that it be a good non-cryptographic hash;
+ * i.e. it not produce collisions when fed "random" data of the sort
+ * we expect to see. As long as the pool state differs for different
+ * inputs, we have preserved the input entropy and done a good job.
+ * The fact that an intelligent attacker can construct inputs that
+ * will produce controlled alterations to the pool's state is not
+ * important because we don't consider such inputs to contribute any
+ * randomness. The only property we need with respect to them is that
+ * the attacker can't increase his/her knowledge of the pool's state.
+ * Since all additions are reversible (knowing the final state and the
+ * input, you can reconstruct the initial state), if an attacker has
+ * any uncertainty about the initial state, he/she can only shuffle
+ * that uncertainty about, but never cause any collisions (which would
+ * decrease the uncertainty).
+ *
+ * The chosen system lets the state of the pool be (essentially) the input
+ * modulo the generator polymnomial. Now, for random primitive polynomials,
+ * this is a universal class of hash functions, meaning that the chance
+ * of a collision is limited by the attacker's knowledge of the generator
+ * polynomail, so if it is chosen at random, an attacker can never force
+ * a collision. Here, we use a fixed polynomial, but we *can* assume that
+ * ###--> it is unknown to the processes generating the input entropy. <-###
+ * Because of this important property, this is a good, collision-resistant
+ * hash; hash collisions will occur no more often than chance.
+ */
+
+/*
+ * Static global variables
+ */
+static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
+static struct fasync_struct *fasync;
+
+#if 0
+static int debug;
+module_param(debug, bool, 0644);
+#define DEBUG_ENT(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG "random %04d %04d %04d: " \
+ fmt,\
+ input_pool.entropy_count,\
+ blocking_pool.entropy_count,\
+ nonblocking_pool.entropy_count,\
+ ## arg); } while (0)
+#else
+#define DEBUG_ENT(fmt, arg...) do {} while (0)
+#endif
+
+/**********************************************************************
+ *
+ * OS independent entropy store. Here are the functions which handle
+ * storing entropy in an entropy pool.
+ *
+ **********************************************************************/
+
+struct entropy_store;
+struct entropy_store {
+ /* read-only data: */
+ struct poolinfo *poolinfo;
+ __u32 *pool;
+ const char *name;
+ struct entropy_store *pull;
+ int limit;
+
+ /* read-write data: */
+ spinlock_t lock;
+ unsigned add_ptr;
+ int entropy_count;
+ int input_rotate;
+ __u8 last_data[EXTRACT_SIZE];
+};
+
+static __u32 input_pool_data[INPUT_POOL_WORDS];
+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
+
+static struct entropy_store input_pool = {
+ .poolinfo = &poolinfo_table[0],
+ .name = "input",
+ .limit = 1,
+ .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
+ .pool = input_pool_data
+};
+
+static struct entropy_store blocking_pool = {
+ .poolinfo = &poolinfo_table[1],
+ .name = "blocking",
+ .limit = 1,
+ .pull = &input_pool,
+ .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
+ .pool = blocking_pool_data
+};
+
+static struct entropy_store nonblocking_pool = {
+ .poolinfo = &poolinfo_table[1],
+ .name = "nonblocking",
+ .pull = &input_pool,
+ .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
+ .pool = nonblocking_pool_data
+};
+
+/*
+ * This function adds bytes into the entropy "pool". It does not
+ * update the entropy estimate. The caller should call
+ * credit_entropy_bits if this is appropriate.
+ *
+ * The pool is stirred with a primitive polynomial of the appropriate
+ * degree, and then twisted. We twist by three bits at a time because
+ * it's cheap to do so and helps slightly in the expected case where
+ * the entropy is concentrated in the low-order bits.
+ */
+static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+ int nbytes, __u8 out[64])
+{
+ static __u32 const twist_table[8] = {
+ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+ unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
+ int input_rotate;
+ int wordmask = r->poolinfo->poolwords - 1;
+ const char *bytes = in;
+ __u32 w;
+ unsigned long flags;
+
+ /* Taps are constant, so we can load them without holding r->lock. */
+ tap1 = r->poolinfo->tap1;
+ tap2 = r->poolinfo->tap2;
+ tap3 = r->poolinfo->tap3;
+ tap4 = r->poolinfo->tap4;
+ tap5 = r->poolinfo->tap5;
+
+ spin_lock_irqsave(&r->lock, flags);
+ input_rotate = r->input_rotate;
+ i = r->add_ptr;
+
+ /* mix one byte at a time to simplify size handling and churn faster */
+ while (nbytes--) {
+ w = rol32(*bytes++, input_rotate & 31);
+ i = (i - 1) & wordmask;
+
+ /* XOR in the various taps */
+ w ^= r->pool[i];
+ w ^= r->pool[(i + tap1) & wordmask];
+ w ^= r->pool[(i + tap2) & wordmask];
+ w ^= r->pool[(i + tap3) & wordmask];
+ w ^= r->pool[(i + tap4) & wordmask];
+ w ^= r->pool[(i + tap5) & wordmask];
+
+ /* Mix the result back in with a twist */
+ r->pool[i] = (w >> 3) ^ twist_table[w & 7];
+
+ /*
+ * Normally, we add 7 bits of rotation to the pool.
+ * At the beginning of the pool, add an extra 7 bits
+ * rotation, so that successive passes spread the
+ * input bits across the pool evenly.
+ */
+ input_rotate += i ? 7 : 14;
+ }
+
+ r->input_rotate = input_rotate;
+ r->add_ptr = i;
+
+ if (out)
+ for (j = 0; j < 16; j++)
+ ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
+
+ spin_unlock_irqrestore(&r->lock, flags);
+}
+
+static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
+{
+ mix_pool_bytes_extract(r, in, bytes, NULL);
+}
+
+/*
+ * Credit (or debit) the entropy store with n bits of entropy
+ */
+static void credit_entropy_bits(struct entropy_store *r, int nbits)
+{
+ unsigned long flags;
+ int entropy_count;
+
+ if (!nbits)
+ return;
+
+ spin_lock_irqsave(&r->lock, flags);
+
+ DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
+ entropy_count = r->entropy_count;
+ entropy_count += nbits;
+ if (entropy_count < 0) {
+ DEBUG_ENT("negative entropy/overflow\n");
+ entropy_count = 0;
+ } else if (entropy_count > r->poolinfo->POOLBITS)
+ entropy_count = r->poolinfo->POOLBITS;
+ r->entropy_count = entropy_count;
+
+ /* should we wake readers? */
+ if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
+ wake_up_interruptible(&random_read_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ }
+ spin_unlock_irqrestore(&r->lock, flags);
+}
+
+/*********************************************************************
+ *
+ * Entropy input management
+ *
+ *********************************************************************/
+
+/* There is one of these per entropy source */
+struct timer_rand_state {
+ cycles_t last_time;
+ long last_delta, last_delta2;
+ unsigned dont_count_entropy:1;
+};
+
+#ifndef CONFIG_GENERIC_HARDIRQS
+
+static struct timer_rand_state *irq_timer_state[NR_IRQS];
+
+static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+{
+ return irq_timer_state[irq];
+}
+
+static void set_timer_rand_state(unsigned int irq,
+ struct timer_rand_state *state)
+{
+ irq_timer_state[irq] = state;
+}
+
+#else
+
+static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+
+ return desc->timer_rand_state;
+}
+
+static void set_timer_rand_state(unsigned int irq,
+ struct timer_rand_state *state)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+
+ desc->timer_rand_state = state;
+}
+#endif
+
+static struct timer_rand_state input_timer_state;
+
+/*
+ * This function adds entropy to the entropy "pool" by using timing
+ * delays. It uses the timer_rand_state structure to make an estimate
+ * of how many bits of entropy this call has added to the pool.
+ *
+ * The number "num" is also added to the pool - it should somehow describe
+ * the type of event which just happened. This is currently 0-255 for
+ * keyboard scan codes, and 256 upwards for interrupts.
+ *
+ */
+static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+{
+ struct {
+ cycles_t cycles;
+ long jiffies;
+ unsigned num;
+ } sample;
+ long delta, delta2, delta3;
+
+ preempt_disable();
+ /* if over the trickle threshold, use only 1 in 4096 samples */
+ if (input_pool.entropy_count > trickle_thresh &&
+ ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
+ goto out;
+
+ sample.jiffies = jiffies;
+ sample.cycles = get_cycles();
+ sample.num = num;
+ mix_pool_bytes(&input_pool, &sample, sizeof(sample));
+
+ /*
+ * Calculate number of bits of randomness we probably added.
+ * We take into account the first, second and third-order deltas
+ * in order to make our estimate.
+ */
+
+ if (!state->dont_count_entropy) {
+ delta = sample.jiffies - state->last_time;
+ state->last_time = sample.jiffies;
+
+ delta2 = delta - state->last_delta;
+ state->last_delta = delta;
+
+ delta3 = delta2 - state->last_delta2;
+ state->last_delta2 = delta2;
+
+ if (delta < 0)
+ delta = -delta;
+ if (delta2 < 0)
+ delta2 = -delta2;
+ if (delta3 < 0)
+ delta3 = -delta3;
+ if (delta > delta2)
+ delta = delta2;
+ if (delta > delta3)
+ delta = delta3;
+
+ /*
+ * delta is now minimum absolute delta.
+ * Round down by 1 bit on general principles,
+ * and limit entropy entimate to 12 bits.
+ */
+ credit_entropy_bits(&input_pool,
+ min_t(int, fls(delta>>1), 11));
+ }
+out:
+ preempt_enable();
+}
+
+void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value)
+{
+ static unsigned char last_value;
+
+ /* ignore autorepeat and the like */
+ if (value == last_value)
+ return;
+
+ DEBUG_ENT("input event\n");
+ last_value = value;
+ add_timer_randomness(&input_timer_state,
+ (type << 4) ^ code ^ (code >> 4) ^ value);
+}
+EXPORT_SYMBOL_GPL(add_input_randomness);
+
+void add_interrupt_randomness(int irq)
+{
+ struct timer_rand_state *state;
+
+ state = get_timer_rand_state(irq);
+
+ if (state == NULL)
+ return;
+
+ DEBUG_ENT("irq event %d\n", irq);
+ add_timer_randomness(state, 0x100 + irq);
+}
+
+#ifdef CONFIG_BLOCK
+void add_disk_randomness(struct gendisk *disk)
+{
+ if (!disk || !disk->random)
+ return;
+ /* first major is 1, so we get >= 0x200 here */
+ DEBUG_ENT("disk event %d:%d\n",
+ MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
+
+ add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
+}
+#endif
+
+/*********************************************************************
+ *
+ * Entropy extraction routines
+ *
+ *********************************************************************/
+
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ size_t nbytes, int min, int rsvd);
+
+/*
+ * This utility inline function is responsible for transferring entropy
+ * from the primary pool to the secondary extraction pool. We make
+ * sure we pull enough for a 'catastrophic reseed'.
+ */
+static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+{
+ __u32 tmp[OUTPUT_POOL_WORDS];
+
+ if (r->pull && r->entropy_count < nbytes * 8 &&
+ r->entropy_count < r->poolinfo->POOLBITS) {
+ /* If we're limited, always leave two wakeup worth's BITS */
+ int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
+ int bytes = nbytes;
+
+ /* pull at least as many as BYTES as wakeup BITS */
+ bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
+ /* but never more than the buffer size */
+ bytes = min_t(int, bytes, sizeof(tmp));
+
+ DEBUG_ENT("going to reseed %s with %d bits "
+ "(%d of %d requested)\n",
+ r->name, bytes * 8, nbytes * 8, r->entropy_count);
+
+ bytes = extract_entropy(r->pull, tmp, bytes,
+ random_read_wakeup_thresh / 8, rsvd);
+ mix_pool_bytes(r, tmp, bytes);
+ credit_entropy_bits(r, bytes*8);
+ }
+}
+
+/*
+ * These functions extracts randomness from the "entropy pool", and
+ * returns it in a buffer.
+ *
+ * The min parameter specifies the minimum amount we can pull before
+ * failing to avoid races that defeat catastrophic reseeding while the
+ * reserved parameter indicates how much entropy we must leave in the
+ * pool after each pull to avoid starving other readers.
+ *
+ * Note: extract_entropy() assumes that .poolwords is a multiple of 16 words.
+ */
+
+static size_t account(struct entropy_store *r, size_t nbytes, int min,
+ int reserved)
+{
+ unsigned long flags;
+
+ /* Hold lock while accounting */
+ spin_lock_irqsave(&r->lock, flags);
+
+ BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
+ DEBUG_ENT("trying to extract %d bits from %s\n",
+ nbytes * 8, r->name);
+
+ /* Can we pull enough? */
+ if (r->entropy_count / 8 < min + reserved) {
+ nbytes = 0;
+ } else {
+ /* If limited, never pull more than available */
+ if (r->limit && nbytes + reserved >= r->entropy_count / 8)
+ nbytes = r->entropy_count/8 - reserved;
+
+ if (r->entropy_count / 8 >= nbytes + reserved)
+ r->entropy_count -= nbytes*8;
+ else
+ r->entropy_count = reserved;
+
+ if (r->entropy_count < random_write_wakeup_thresh) {
+ wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
+ }
+
+ DEBUG_ENT("debiting %d entropy credits from %s%s\n",
+ nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
+
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ return nbytes;
+}
+
+static void extract_buf(struct entropy_store *r, __u8 *out)
+{
+ int i;
+ __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
+ __u8 extract[64];
+
+ /* Generate a hash across the pool, 16 words (512 bits) at a time */
+ sha_init(hash);
+ for (i = 0; i < r->poolinfo->poolwords; i += 16)
+ sha_transform(hash, (__u8 *)(r->pool + i), workspace);
+
+ /*
+ * We mix the hash back into the pool to prevent backtracking
+ * attacks (where the attacker knows the state of the pool
+ * plus the current outputs, and attempts to find previous
+ * ouputs), unless the hash function can be inverted. By
+ * mixing at least a SHA1 worth of hash data back, we make
+ * brute-forcing the feedback as hard as brute-forcing the
+ * hash.
+ */
+ mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
+
+ /*
+ * To avoid duplicates, we atomically extract a portion of the
+ * pool while mixing, and hash one final time.
+ */
+ sha_transform(hash, extract, workspace);
+ memset(extract, 0, sizeof(extract));
+ memset(workspace, 0, sizeof(workspace));
+
+ /*
+ * In case the hash function has some recognizable output
+ * pattern, we fold it in half. Thus, we always feed back
+ * twice as much data as we output.
+ */
+ hash[0] ^= hash[3];
+ hash[1] ^= hash[4];
+ hash[2] ^= rol32(hash[2], 16);
+ memcpy(out, hash, EXTRACT_SIZE);
+ memset(hash, 0, sizeof(hash));
+}
+
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ size_t nbytes, int min, int reserved)
+{
+ ssize_t ret = 0, i;
+ __u8 tmp[EXTRACT_SIZE];
+ unsigned long flags;
+
+ xfer_secondary_pool(r, nbytes);
+ nbytes = account(r, nbytes, min, reserved);
+
+ while (nbytes) {
+ extract_buf(r, tmp);
+
+ if (fips_enabled) {
+ spin_lock_irqsave(&r->lock, flags);
+ if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
+ panic("Hardware RNG duplicated output!\n");
+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
+ spin_unlock_irqrestore(&r->lock, flags);
+ }
+ i = min_t(int, nbytes, EXTRACT_SIZE);
+ memcpy(buf, tmp, i);
+ nbytes -= i;
+ buf += i;
+ ret += i;
+ }
+
+ /* Wipe data just returned from memory */
+ memset(tmp, 0, sizeof(tmp));
+
+ return ret;
+}
+
+static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+ size_t nbytes)
+{
+ ssize_t ret = 0, i;
+ __u8 tmp[EXTRACT_SIZE];
+
+ xfer_secondary_pool(r, nbytes);
+ nbytes = account(r, nbytes, 0, 0);
+
+ while (nbytes) {
+ if (need_resched()) {
+ if (signal_pending(current)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ break;
+ }
+ schedule();
+ }
+
+ extract_buf(r, tmp);
+ i = min_t(int, nbytes, EXTRACT_SIZE);
+ if (copy_to_user(buf, tmp, i)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ nbytes -= i;
+ buf += i;
+ ret += i;
+ }
+
+ /* Wipe data just returned from memory */
+ memset(tmp, 0, sizeof(tmp));
+
+ return ret;
+}
+
+/*
+ * This function is the exported kernel interface. It returns some
+ * number of good random numbers, suitable for seeding TCP sequence
+ * numbers, etc.
+ */
+void get_random_bytes(void *buf, int nbytes)
+{
+ extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
+}
+EXPORT_SYMBOL(get_random_bytes);
+
+/*
+ * init_std_data - initialize pool with system data
+ *
+ * @r: pool to initialize
+ *
+ * This function clears the pool's entropy count and mixes some system
+ * data into the pool to prepare it for use. The pool is not cleared
+ * as that can only decrease the entropy in the pool.
+ */
+static void init_std_data(struct entropy_store *r)
+{
+ ktime_t now;
+ unsigned long flags;
+
+ spin_lock_irqsave(&r->lock, flags);
+ r->entropy_count = 0;
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ now = ktime_get_real();
+ mix_pool_bytes(r, &now, sizeof(now));
+ mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+}
+
+static int rand_initialize(void)
+{
+ init_std_data(&input_pool);
+ init_std_data(&blocking_pool);
+ init_std_data(&nonblocking_pool);
+ return 0;
+}
+module_init(rand_initialize);
+
+void rand_initialize_irq(int irq)
+{
+ struct timer_rand_state *state;
+
+ state = get_timer_rand_state(irq);
+
+ if (state)
+ return;
+
+ /*
+ * If kzalloc returns null, we just won't use that entropy
+ * source.
+ */
+ state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+ if (state)
+ set_timer_rand_state(irq, state);
+}
+
+#ifdef CONFIG_BLOCK
+void rand_initialize_disk(struct gendisk *disk)
+{
+ struct timer_rand_state *state;
+
+ /*
+ * If kzalloc returns null, we just won't use that entropy
+ * source.
+ */
+ state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+ if (state)
+ disk->random = state;
+}
+#endif
+
+static ssize_t
+random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+{
+ ssize_t n, retval = 0, count = 0;
+
+ if (nbytes == 0)
+ return 0;
+
+ while (nbytes > 0) {
+ n = nbytes;
+ if (n > SEC_XFER_SIZE)
+ n = SEC_XFER_SIZE;
+
+ DEBUG_ENT("reading %d bits\n", n*8);
+
+ n = extract_entropy_user(&blocking_pool, buf, n);
+
+ DEBUG_ENT("read got %d bits (%d still needed)\n",
+ n*8, (nbytes-n)*8);
+
+ if (n == 0) {
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ break;
+ }
+
+ DEBUG_ENT("sleeping?\n");
+
+ wait_event_interruptible(random_read_wait,
+ input_pool.entropy_count >=
+ random_read_wakeup_thresh);
+
+ DEBUG_ENT("awake\n");
+
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ continue;
+ }
+
+ if (n < 0) {
+ retval = n;
+ break;
+ }
+ count += n;
+ buf += n;
+ nbytes -= n;
+ break; /* This break makes the device work */
+ /* like a named pipe */
+ }
+
+ return (count ? count : retval);
+}
+
+static ssize_t
+urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+{
+ return extract_entropy_user(&nonblocking_pool, buf, nbytes);
+}
+
+static unsigned int
+random_poll(struct file *file, poll_table * wait)
+{
+ unsigned int mask;
+
+ poll_wait(file, &random_read_wait, wait);
+ poll_wait(file, &random_write_wait, wait);
+ mask = 0;
+ if (input_pool.entropy_count >= random_read_wakeup_thresh)
+ mask |= POLLIN | POLLRDNORM;
+ if (input_pool.entropy_count < random_write_wakeup_thresh)
+ mask |= POLLOUT | POLLWRNORM;
+ return mask;
+}
+
+static int
+write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+{
+ size_t bytes;
+ __u32 buf[16];
+ const char __user *p = buffer;
+
+ while (count > 0) {
+ bytes = min(count, sizeof(buf));
+ if (copy_from_user(&buf, p, bytes))
+ return -EFAULT;
+
+ count -= bytes;
+ p += bytes;
+
+ mix_pool_bytes(r, buf, bytes);
+ cond_resched();
+ }
+
+ return 0;
+}
+
+static ssize_t random_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t ret;
+
+ ret = write_pool(&blocking_pool, buffer, count);
+ if (ret)
+ return ret;
+ ret = write_pool(&nonblocking_pool, buffer, count);
+ if (ret)
+ return ret;
+
+ return (ssize_t)count;
+}
+
+static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int size, ent_count;
+ int __user *p = (int __user *)arg;
+ int retval;
+
+ switch (cmd) {
+ case RNDGETENTCNT:
+ /* inherently racy, no point locking */
+ if (put_user(input_pool.entropy_count, p))
+ return -EFAULT;
+ return 0;
+ case RNDADDTOENTCNT:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ent_count, p))
+ return -EFAULT;
+ credit_entropy_bits(&input_pool, ent_count);
+ return 0;
+ case RNDADDENTROPY:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ent_count, p++))
+ return -EFAULT;
+ if (ent_count < 0)
+ return -EINVAL;
+ if (get_user(size, p++))
+ return -EFAULT;
+ retval = write_pool(&input_pool, (const char __user *)p,
+ size);
+ if (retval < 0)
+ return retval;
+ credit_entropy_bits(&input_pool, ent_count);
+ return 0;
+ case RNDZAPENTCNT:
+ case RNDCLEARPOOL:
+ /* Clear the entropy pool counters. */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ rand_initialize();
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int random_fasync(int fd, struct file *filp, int on)
+{
+ return fasync_helper(fd, filp, on, &fasync);
+}
+
+const struct file_operations random_fops = {
+ .read = random_read,
+ .write = random_write,
+ .poll = random_poll,
+ .unlocked_ioctl = random_ioctl,
+ .fasync = random_fasync,
+ .llseek = noop_llseek,
+};
+
+const struct file_operations urandom_fops = {
+ .read = urandom_read,
+ .write = random_write,
+ .unlocked_ioctl = random_ioctl,
+ .fasync = random_fasync,
+ .llseek = noop_llseek,
+};
+
+/***************************************************************
+ * Random UUID interface
+ *
+ * Used here for a Boot ID, but can be useful for other kernel
+ * drivers.
+ ***************************************************************/
+
+/*
+ * Generate random UUID
+ */
+void generate_random_uuid(unsigned char uuid_out[16])
+{
+ get_random_bytes(uuid_out, 16);
+ /* Set UUID version to 4 --- truly random generation */
+ uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40;
+ /* Set the UUID variant to DCE */
+ uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
+}
+EXPORT_SYMBOL(generate_random_uuid);
+
+/********************************************************************
+ *
+ * Sysctl interface
+ *
+ ********************************************************************/
+
+#ifdef CONFIG_SYSCTL
+
+#include <linux/sysctl.h>
+
+static int min_read_thresh = 8, min_write_thresh;
+static int max_read_thresh = INPUT_POOL_WORDS * 32;
+static int max_write_thresh = INPUT_POOL_WORDS * 32;
+static char sysctl_bootid[16];
+
+/*
+ * These functions is used to return both the bootid UUID, and random
+ * UUID. The difference is in whether table->data is NULL; if it is,
+ * then a new UUID is generated and returned to the user.
+ *
+ * If the user accesses this via the proc interface, it will be returned
+ * as an ASCII string in the standard UUID format. If accesses via the
+ * sysctl system call, it is returned as 16 bytes of binary data.
+ */
+static int proc_do_uuid(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ ctl_table fake_table;
+ unsigned char buf[64], tmp_uuid[16], *uuid;
+
+ uuid = table->data;
+ if (!uuid) {
+ uuid = tmp_uuid;
+ uuid[8] = 0;
+ }
+ if (uuid[8] == 0)
+ generate_random_uuid(uuid);
+
+ sprintf(buf, "%pU", uuid);
+
+ fake_table.data = buf;
+ fake_table.maxlen = sizeof(buf);
+
+ return proc_dostring(&fake_table, write, buffer, lenp, ppos);
+}
+
+static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
+ctl_table random_table[] = {
+ {
+ .procname = "poolsize",
+ .data = &sysctl_poolsize,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "entropy_avail",
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ .data = &input_pool.entropy_count,
+ },
+ {
+ .procname = "read_wakeup_threshold",
+ .data = &random_read_wakeup_thresh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_read_thresh,
+ .extra2 = &max_read_thresh,
+ },
+ {
+ .procname = "write_wakeup_threshold",
+ .data = &random_write_wakeup_thresh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_write_thresh,
+ .extra2 = &max_write_thresh,
+ },
+ {
+ .procname = "boot_id",
+ .data = &sysctl_bootid,
+ .maxlen = 16,
+ .mode = 0444,
+ .proc_handler = proc_do_uuid,
+ },
+ {
+ .procname = "uuid",
+ .maxlen = 16,
+ .mode = 0444,
+ .proc_handler = proc_do_uuid,
+ },
+ { }
+};
+#endif /* CONFIG_SYSCTL */
+
+static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+
+static int __init random_int_secret_init(void)
+{
+ get_random_bytes(random_int_secret, sizeof(random_int_secret));
+ return 0;
+}
+late_initcall(random_int_secret_init);
+
+/*
+ * Get a random word for internal kernel use only. Similar to urandom but
+ * with the goal of minimal entropy pool depletion. As a result, the random
+ * value is not cryptographically secure but for several uses the cost of
+ * depleting entropy is too high
+ */
+DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
+unsigned int get_random_int(void)
+{
+ __u32 *hash = get_cpu_var(get_random_int_hash);
+ unsigned int ret;
+
+ hash[0] += current->pid + jiffies + get_cycles();
+ md5_transform(hash, random_int_secret);
+ ret = hash[0];
+ put_cpu_var(get_random_int_hash);
+
+ return ret;
+}
+
+/*
+ * randomize_range() returns a start address such that
+ *
+ * [...... <range> .....]
+ * start end
+ *
+ * a <range> with size "len" starting at the return value is inside in the
+ * area defined by [start, end], but is otherwise randomized.
+ */
+unsigned long
+randomize_range(unsigned long start, unsigned long end, unsigned long len)
+{
+ unsigned long range = end - len - start;
+
+ if (end <= start + len)
+ return 0;
+ return PAGE_ALIGN(get_random_int() % range + start);
+}
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
new file mode 100644
index 00000000..b33e8ea3
--- /dev/null
+++ b/drivers/char/raw.c
@@ -0,0 +1,374 @@
+/*
+ * linux/drivers/char/raw.c
+ *
+ * Front-end raw character devices. These can be bound to any block
+ * devices to provide genuine Unix raw character device semantics.
+ *
+ * We reserve minor number 0 for a control interface. ioctl()s on this
+ * device are used to bind the other minor numbers to block devices.
+ */
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include <linux/raw.h>
+#include <linux/capability.h>
+#include <linux/uio.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/gfp.h>
+#include <linux/compat.h>
+#include <linux/vmalloc.h>
+
+#include <asm/uaccess.h>
+
+struct raw_device_data {
+ struct block_device *binding;
+ int inuse;
+};
+
+static struct class *raw_class;
+static struct raw_device_data *raw_devices;
+static DEFINE_MUTEX(raw_mutex);
+static const struct file_operations raw_ctl_fops; /* forward declaration */
+
+static int max_raw_minors = MAX_RAW_MINORS;
+
+module_param(max_raw_minors, int, 0);
+MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)");
+
+/*
+ * Open/close code for raw IO.
+ *
+ * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
+ * point at the blockdev's address_space and set the file handle to use
+ * O_DIRECT.
+ *
+ * Set the device's soft blocksize to the minimum possible. This gives the
+ * finest possible alignment and has no adverse impact on performance.
+ */
+static int raw_open(struct inode *inode, struct file *filp)
+{
+ const int minor = iminor(inode);
+ struct block_device *bdev;
+ int err;
+
+ if (minor == 0) { /* It is the control device */
+ filp->f_op = &raw_ctl_fops;
+ return 0;
+ }
+
+ mutex_lock(&raw_mutex);
+
+ /*
+ * All we need to do on open is check that the device is bound.
+ */
+ bdev = raw_devices[minor].binding;
+ err = -ENODEV;
+ if (!bdev)
+ goto out;
+ igrab(bdev->bd_inode);
+ err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open);
+ if (err)
+ goto out;
+ err = set_blocksize(bdev, bdev_logical_block_size(bdev));
+ if (err)
+ goto out1;
+ filp->f_flags |= O_DIRECT;
+ filp->f_mapping = bdev->bd_inode->i_mapping;
+ if (++raw_devices[minor].inuse == 1)
+ filp->f_path.dentry->d_inode->i_mapping =
+ bdev->bd_inode->i_mapping;
+ filp->private_data = bdev;
+ mutex_unlock(&raw_mutex);
+ return 0;
+
+out1:
+ blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
+out:
+ mutex_unlock(&raw_mutex);
+ return err;
+}
+
+/*
+ * When the final fd which refers to this character-special node is closed, we
+ * make its ->mapping point back at its own i_data.
+ */
+static int raw_release(struct inode *inode, struct file *filp)
+{
+ const int minor= iminor(inode);
+ struct block_device *bdev;
+
+ mutex_lock(&raw_mutex);
+ bdev = raw_devices[minor].binding;
+ if (--raw_devices[minor].inuse == 0) {
+ /* Here inode->i_mapping == bdev->bd_inode->i_mapping */
+ inode->i_mapping = &inode->i_data;
+ inode->i_mapping->backing_dev_info = &default_backing_dev_info;
+ }
+ mutex_unlock(&raw_mutex);
+
+ blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
+ return 0;
+}
+
+/*
+ * Forward ioctls to the underlying block device.
+ */
+static long
+raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
+{
+ struct block_device *bdev = filp->private_data;
+ return blkdev_ioctl(bdev, 0, command, arg);
+}
+
+static int bind_set(int number, u64 major, u64 minor)
+{
+ dev_t dev = MKDEV(major, minor);
+ struct raw_device_data *rawdev;
+ int err = 0;
+
+ if (number <= 0 || number >= max_raw_minors)
+ return -EINVAL;
+
+ if (MAJOR(dev) != major || MINOR(dev) != minor)
+ return -EINVAL;
+
+ rawdev = &raw_devices[number];
+
+ /*
+ * This is like making block devices, so demand the
+ * same capability
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * For now, we don't need to check that the underlying
+ * block device is present or not: we can do that when
+ * the raw device is opened. Just check that the
+ * major/minor numbers make sense.
+ */
+
+ if (MAJOR(dev) == 0 && dev != 0)
+ return -EINVAL;
+
+ mutex_lock(&raw_mutex);
+ if (rawdev->inuse) {
+ mutex_unlock(&raw_mutex);
+ return -EBUSY;
+ }
+ if (rawdev->binding) {
+ bdput(rawdev->binding);
+ module_put(THIS_MODULE);
+ }
+ if (!dev) {
+ /* unbind */
+ rawdev->binding = NULL;
+ device_destroy(raw_class, MKDEV(RAW_MAJOR, number));
+ } else {
+ rawdev->binding = bdget(dev);
+ if (rawdev->binding == NULL) {
+ err = -ENOMEM;
+ } else {
+ dev_t raw = MKDEV(RAW_MAJOR, number);
+ __module_get(THIS_MODULE);
+ device_destroy(raw_class, raw);
+ device_create(raw_class, NULL, raw, NULL,
+ "raw%d", number);
+ }
+ }
+ mutex_unlock(&raw_mutex);
+ return err;
+}
+
+static int bind_get(int number, dev_t *dev)
+{
+ struct raw_device_data *rawdev;
+ struct block_device *bdev;
+
+ if (number <= 0 || number >= MAX_RAW_MINORS)
+ return -EINVAL;
+
+ rawdev = &raw_devices[number];
+
+ mutex_lock(&raw_mutex);
+ bdev = rawdev->binding;
+ *dev = bdev ? bdev->bd_dev : 0;
+ mutex_unlock(&raw_mutex);
+ return 0;
+}
+
+/*
+ * Deal with ioctls against the raw-device control interface, to bind
+ * and unbind other raw devices.
+ */
+static long raw_ctl_ioctl(struct file *filp, unsigned int command,
+ unsigned long arg)
+{
+ struct raw_config_request rq;
+ dev_t dev;
+ int err;
+
+ switch (command) {
+ case RAW_SETBIND:
+ if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
+ return -EFAULT;
+
+ return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
+
+ case RAW_GETBIND:
+ if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
+ return -EFAULT;
+
+ err = bind_get(rq.raw_minor, &dev);
+ if (err)
+ return err;
+
+ rq.block_major = MAJOR(dev);
+ rq.block_minor = MINOR(dev);
+
+ if (copy_to_user((void __user *)arg, &rq, sizeof(rq)))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_COMPAT
+struct raw32_config_request {
+ compat_int_t raw_minor;
+ compat_u64 block_major;
+ compat_u64 block_minor;
+};
+
+static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct raw32_config_request __user *user_req = compat_ptr(arg);
+ struct raw32_config_request rq;
+ dev_t dev;
+ int err = 0;
+
+ switch (cmd) {
+ case RAW_SETBIND:
+ if (copy_from_user(&rq, user_req, sizeof(rq)))
+ return -EFAULT;
+
+ return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
+
+ case RAW_GETBIND:
+ if (copy_from_user(&rq, user_req, sizeof(rq)))
+ return -EFAULT;
+
+ err = bind_get(rq.raw_minor, &dev);
+ if (err)
+ return err;
+
+ rq.block_major = MAJOR(dev);
+ rq.block_minor = MINOR(dev);
+
+ if (copy_to_user(user_req, &rq, sizeof(rq)))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+#endif
+
+static const struct file_operations raw_fops = {
+ .read = do_sync_read,
+ .aio_read = generic_file_aio_read,
+ .write = do_sync_write,
+ .aio_write = blkdev_aio_write,
+ .fsync = blkdev_fsync,
+ .open = raw_open,
+ .release = raw_release,
+ .unlocked_ioctl = raw_ioctl,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations raw_ctl_fops = {
+ .unlocked_ioctl = raw_ctl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = raw_ctl_compat_ioctl,
+#endif
+ .open = raw_open,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+static struct cdev raw_cdev;
+
+static char *raw_devnode(struct device *dev, mode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
+}
+
+static int __init raw_init(void)
+{
+ dev_t dev = MKDEV(RAW_MAJOR, 0);
+ int ret;
+
+ if (max_raw_minors < 1 || max_raw_minors > 65536) {
+ printk(KERN_WARNING "raw: invalid max_raw_minors (must be"
+ " between 1 and 65536), using %d\n", MAX_RAW_MINORS);
+ max_raw_minors = MAX_RAW_MINORS;
+ }
+
+ raw_devices = vmalloc(sizeof(struct raw_device_data) * max_raw_minors);
+ if (!raw_devices) {
+ printk(KERN_ERR "Not enough memory for raw device structures\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ memset(raw_devices, 0, sizeof(struct raw_device_data) * max_raw_minors);
+
+ ret = register_chrdev_region(dev, max_raw_minors, "raw");
+ if (ret)
+ goto error;
+
+ cdev_init(&raw_cdev, &raw_fops);
+ ret = cdev_add(&raw_cdev, dev, max_raw_minors);
+ if (ret) {
+ goto error_region;
+ }
+
+ raw_class = class_create(THIS_MODULE, "raw");
+ if (IS_ERR(raw_class)) {
+ printk(KERN_ERR "Error creating raw class.\n");
+ cdev_del(&raw_cdev);
+ ret = PTR_ERR(raw_class);
+ goto error_region;
+ }
+ raw_class->devnode = raw_devnode;
+ device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
+
+ return 0;
+
+error_region:
+ unregister_chrdev_region(dev, max_raw_minors);
+error:
+ vfree(raw_devices);
+ return ret;
+}
+
+static void __exit raw_exit(void)
+{
+ device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
+ class_destroy(raw_class);
+ cdev_del(&raw_cdev);
+ unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors);
+}
+
+module_init(raw_init);
+module_exit(raw_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/regs-ocotp-v2.h b/drivers/char/regs-ocotp-v2.h
new file mode 100755
index 00000000..559c8e7f
--- /dev/null
+++ b/drivers/char/regs-ocotp-v2.h
@@ -0,0 +1,239 @@
+/*
+ * Freescale OCOTP Register Definitions
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This file is created by xml file. Don't Edit it.
+ *
+ * Xml Revision: 1.12
+ * Template revision: 1.3
+ */
+
+#ifndef __ARCH_ARM___OCOTP_H
+#define __ARCH_ARM___OCOTP_H
+
+
+#define HW_OCOTP_CTRL (0x00000000)
+#define HW_OCOTP_CTRL_SET (0x00000004)
+#define HW_OCOTP_CTRL_CLR (0x00000008)
+#define HW_OCOTP_CTRL_TOG (0x0000000c)
+
+#define BP_OCOTP_CTRL_WR_UNLOCK 16
+#define BM_OCOTP_CTRL_WR_UNLOCK 0xFFFF0000
+#define BF_OCOTP_CTRL_WR_UNLOCK(v) \
+ (((v) << 16) & BM_OCOTP_CTRL_WR_UNLOCK)
+#define BV_OCOTP_CTRL_WR_UNLOCK__KEY 0x3E77
+#define BP_OCOTP_CTRL_RSRVD2 14
+#define BM_OCOTP_CTRL_RSRVD2 0x0000C000
+#define BF_OCOTP_CTRL_RSRVD2(v) \
+ (((v) << 14) & BM_OCOTP_CTRL_RSRVD2)
+#define BM_OCOTP_CTRL_RELOAD_SHADOWS 0x00002000
+#define BM_OCOTP_CTRL_RD_BANK_OPEN 0x00001000
+#define BP_OCOTP_CTRL_RSRVD1 10
+#define BM_OCOTP_CTRL_RSRVD1 0x00000C00
+#define BF_OCOTP_CTRL_RSRVD1(v) \
+ (((v) << 10) & BM_OCOTP_CTRL_RSRVD1)
+#define BM_OCOTP_CTRL_ERROR 0x00000200
+#define BM_OCOTP_CTRL_BUSY 0x00000100
+#define BP_OCOTP_CTRL_RSRVD0 6
+#define BM_OCOTP_CTRL_RSRVD0 0x000000C0
+#define BF_OCOTP_CTRL_RSRVD0(v) \
+ (((v) << 6) & BM_OCOTP_CTRL_RSRVD0)
+#define BP_OCOTP_CTRL_ADDR 0
+#define BM_OCOTP_CTRL_ADDR 0x0000003F
+#define BF_OCOTP_CTRL_ADDR(v) \
+ (((v) << 0) & BM_OCOTP_CTRL_ADDR)
+
+#define HW_OCOTP_TIMING (0x00000010)
+
+#define BP_OCOTP_TIMING_RSRVD0 22
+#define BM_OCOTP_TIMING_RSRVD0 0xFFC00000
+#define BF_OCOTP_TIMING_RSRVD0(v) \
+ (((v) << 22) & BM_OCOTP_TIMING_RSRVD0)
+#define BP_OCOTP_TIMING_RD_BUSY 16
+#define BM_OCOTP_TIMING_RD_BUSY 0x003F0000
+#define BF_OCOTP_TIMING_RD_BUSY(v) \
+ (((v) << 16) & BM_OCOTP_TIMING_RD_BUSY)
+#define BP_OCOTP_TIMING_RELAX 12
+#define BM_OCOTP_TIMING_RELAX 0x0000F000
+#define BF_OCOTP_TIMING_RELAX(v) \
+ (((v) << 12) & BM_OCOTP_TIMING_RELAX)
+#define BP_OCOTP_TIMING_SCLK_COUNT 0
+#define BM_OCOTP_TIMING_SCLK_COUNT 0x00000FFF
+#define BF_OCOTP_TIMING_SCLK_COUNT(v) \
+ (((v) << 0) & BM_OCOTP_TIMING_SCLK_COUNT)
+
+#define HW_OCOTP_DATA (0x00000020)
+
+#define BP_OCOTP_DATA_DATA 0
+#define BM_OCOTP_DATA_DATA 0xFFFFFFFF
+#define BF_OCOTP_DATA_DATA(v) (v)
+
+#define HW_OCOTP_LOCK (0x00000030)
+
+#define BP_OCOTP_LOCK_UNALLOCATED 26
+#define BM_OCOTP_LOCK_UNALLOCATED 0xFC000000
+#define BF_OCOTP_LOCK_UNALLOCATED(v) \
+ (((v) << 26) & BM_OCOTP_LOCK_UNALLOCATED)
+#define BM_OCOTP_LOCK_PIN 0x02000000
+#define BM_OCOTP_LOCK_DCPKEY_ALT 0x01000000
+#define BM_OCOTP_LOCK_SCC_ALT 0x00800000
+#define BM_OCOTP_LOCK_DCPKEY 0x00400000
+#define BM_OCOTP_LOCK_SWCAP_SHADOW 0x00200000
+#define BM_OCOTP_LOCK_SWCAP 0x00100000
+#define BM_OCOTP_LOCK_HWCAP_SHADOW 0x00080000
+#define BM_OCOTP_LOCK_HWCAP 0x00040000
+#define BM_OCOTP_LOCK_MAC_SHADOW 0x00020000
+#define BM_OCOTP_LOCK_MAC 0x00010000
+#define BM_OCOTP_LOCK_SJC_SHADOW 0x00008000
+#define BM_OCOTP_LOCK_SJC 0x00004000
+#define BM_OCOTP_LOCK_SRK_SHADOW 0x00002000
+#define BM_OCOTP_LOCK_SRK 0x00001000
+#define BM_OCOTP_LOCK_SCC_SHADOW 0x00000800
+#define BM_OCOTP_LOCK_SCC 0x00000400
+#define BM_OCOTP_LOCK_GP_SHADOW 0x00000200
+#define BM_OCOTP_LOCK_GP 0x00000100
+#define BM_OCOTP_LOCK_MEM_MISC_SHADOW 0x00000080
+#define BM_OCOTP_LOCK_MEM_TRIM_SHADOW 0x00000040
+#define BM_OCOTP_LOCK_MEM_TRIM 0x00000020
+#define BM_OCOTP_LOCK_CFG_MISC_SHADOW 0x00000010
+#define BM_OCOTP_LOCK_CFG_BOOT_SHADOW 0x00000008
+#define BM_OCOTP_LOCK_CFG_BOOT 0x00000004
+#define BM_OCOTP_LOCK_CFG_TESTER_SHADOW 0x00000002
+#define BM_OCOTP_LOCK_CFG_TESTER 0x00000001
+
+/*
+ * multi-register-define name HW_OCOTP_CFGn
+ * base 0x00000040
+ * count 7
+ * offset 0x10
+ */
+#define HW_OCOTP_CFGn(n) (0x00000040 + (n) * 0x10)
+#define BP_OCOTP_CFGn_BITS 0
+#define BM_OCOTP_CFGn_BITS 0xFFFFFFFF
+#define BF_OCOTP_CFGn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_MEMn
+ * base 0x000000B0
+ * count 6
+ * offset 0x10
+ */
+#define HW_OCOTP_MEMn(n) (0x000000b0 + (n) * 0x10)
+#define BP_OCOTP_MEMn_BITS 0
+#define BM_OCOTP_MEMn_BITS 0xFFFFFFFF
+#define BF_OCOTP_MEMn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_GPn
+ * base 0x00000110
+ * count 2
+ * offset 0x10
+ */
+#define HW_OCOTP_GPn(n) (0x00000110 + (n) * 0x10)
+#define BP_OCOTP_GPn_BITS 0
+#define BM_OCOTP_GPn_BITS 0xFFFFFFFF
+#define BF_OCOTP_GPn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_SCCn
+ * base 0x00000130
+ * count 8
+ * offset 0x10
+ */
+#define HW_OCOTP_SCCn(n) (0x00000130 + (n) * 0x10)
+#define BP_OCOTP_SCCn_BITS 0
+#define BM_OCOTP_SCCn_BITS 0xFFFFFFFF
+#define BF_OCOTP_SCCn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_SRKn
+ * base 0x000001B0
+ * count 8
+ * offset 0x10
+ */
+#define HW_OCOTP_SRKn(n) (0x000001b0 + (n) * 0x10)
+#define BP_OCOTP_SRKn_BITS 0
+#define BM_OCOTP_SRKn_BITS 0xFFFFFFFF
+#define BF_OCOTP_SRKn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_SJC_RESPn
+ * base 0x00000230
+ * count 2
+ * offset 0x10
+ */
+#define HW_OCOTP_SJC_RESPn(n) (0x00000230 + (n) * 0x10)
+#define BP_OCOTP_SJC_RESPn_BITS 0
+#define BM_OCOTP_SJC_RESPn_BITS 0xFFFFFFFF
+#define BF_OCOTP_SJC_RESPn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_MACn
+ * base 0x00000250
+ * count 2
+ * offset 0x10
+ */
+#define HW_OCOTP_MACn(n) (0x00000250 + (n) * 0x10)
+#define BP_OCOTP_MACn_BITS 0
+#define BM_OCOTP_MACn_BITS 0xFFFFFFFF
+#define BF_OCOTP_MACn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_HWCAPn
+ * base 0x00000270
+ * count 3
+ * offset 0x10
+ */
+#define HW_OCOTP_HWCAPn(n) (0x00000270 + (n) * 0x10)
+#define BP_OCOTP_HWCAPn_BITS 0
+#define BM_OCOTP_HWCAPn_BITS 0xFFFFFFFF
+#define BF_OCOTP_HWCAPn_BITS(v) (v)
+
+#define HW_OCOTP_SWCAP (0x000002a0)
+
+#define BP_OCOTP_SWCAP_BITS 0
+#define BM_OCOTP_SWCAP_BITS 0xFFFFFFFF
+#define BF_OCOTP_SWCAP_BITS(v) (v)
+
+#define HW_OCOTP_SCS (0x000002b0)
+#define HW_OCOTP_SCS_SET (0x000002b4)
+#define HW_OCOTP_SCS_CLR (0x000002b8)
+#define HW_OCOTP_SCS_TOG (0x000002bc)
+
+#define BM_OCOTP_SCS_LOCK 0x80000000
+#define BP_OCOTP_SCS_SPARE 1
+#define BM_OCOTP_SCS_SPARE 0x7FFFFFFE
+#define BF_OCOTP_SCS_SPARE(v) \
+ (((v) << 1) & BM_OCOTP_SCS_SPARE)
+#define BM_OCOTP_SCS_HAB_JDE 0x00000001
+
+#define HW_OCOTP_VERSION (0x000002c0)
+
+#define BP_OCOTP_VERSION_MAJOR 24
+#define BM_OCOTP_VERSION_MAJOR 0xFF000000
+#define BF_OCOTP_VERSION_MAJOR(v) \
+ (((v) << 24) & BM_OCOTP_VERSION_MAJOR)
+#define BP_OCOTP_VERSION_MINOR 16
+#define BM_OCOTP_VERSION_MINOR 0x00FF0000
+#define BF_OCOTP_VERSION_MINOR(v) \
+ (((v) << 16) & BM_OCOTP_VERSION_MINOR)
+#define BP_OCOTP_VERSION_STEP 0
+#define BM_OCOTP_VERSION_STEP 0x0000FFFF
+#define BF_OCOTP_VERSION_STEP(v) \
+ (((v) << 0) & BM_OCOTP_VERSION_STEP)
+#endif /* __ARCH_ARM___OCOTP_H */
diff --git a/drivers/char/regs-ocotp-v3.h b/drivers/char/regs-ocotp-v3.h
new file mode 100644
index 00000000..d3c8de98
--- /dev/null
+++ b/drivers/char/regs-ocotp-v3.h
@@ -0,0 +1,367 @@
+/*
+ * Freescale OCOTP Register Definitions
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This file is created by xml file. Don't Edit it.
+ *
+ * Xml Revision: 1.12
+ * Template revision: 1.3
+ */
+
+#ifndef __ARCH_ARM___OCOTP_H
+#define __ARCH_ARM___OCOTP_H
+
+
+#define HW_OCOTP_CTRL (0x00000000)
+#define HW_OCOTP_CTRL_SET (0x00000004)
+#define HW_OCOTP_CTRL_CLR (0x00000008)
+#define HW_OCOTP_CTRL_TOG (0x0000000c)
+
+#define BP_OCOTP_CTRL_WR_UNLOCK 16
+#define BM_OCOTP_CTRL_WR_UNLOCK 0xFFFF0000
+#define BF_OCOTP_CTRL_WR_UNLOCK(v) \
+ (((v) << 16) & BM_OCOTP_CTRL_WR_UNLOCK)
+#define BV_OCOTP_CTRL_WR_UNLOCK__KEY 0x3E77
+#define BP_OCOTP_CTRL_RSVD1 13
+#define BM_OCOTP_CTRL_RSVD1 0x0000E000
+#define BF_OCOTP_CTRL_RSVD1(v) \
+ (((v) << 13) & BM_OCOTP_CTRL_RSVD1)
+#define BM_OCOTP_CTRL_CRC_FAIL 0x00001000
+#define BM_OCOTP_CTRL_CRC_TEST 0x00000800
+#define BM_OCOTP_CTRL_RELOAD_SHADOWS 0x00000400
+#define BM_OCOTP_CTRL_ERROR 0x00000200
+#define BM_OCOTP_CTRL_BUSY 0x00000100
+#define BM_OCOTP_CTRL_RSVD0 0x00000080
+#define BP_OCOTP_CTRL_ADDR 0
+#define BM_OCOTP_CTRL_ADDR 0x0000007F
+#define BF_OCOTP_CTRL_ADDR(v) \
+ (((v) << 0) & BM_OCOTP_CTRL_ADDR)
+
+#define HW_OCOTP_TIMING (0x00000010)
+
+#define BP_OCOTP_TIMING_RSRVD0 28
+#define BM_OCOTP_TIMING_RSRVD0 0xF0000000
+#define BF_OCOTP_TIMING_RSRVD0(v) \
+ (((v) << 28) & BM_OCOTP_TIMING_RSRVD0)
+#define BP_OCOTP_TIMING_WAIT 22
+#define BM_OCOTP_TIMING_WAIT 0x0FC00000
+#define BF_OCOTP_TIMING_WAIT(v) \
+ (((v) << 22) & BM_OCOTP_TIMING_WAIT)
+#define BP_OCOTP_TIMING_STROBE_READ 16
+#define BM_OCOTP_TIMING_STROBE_READ 0x003F0000
+#define BF_OCOTP_TIMING_STROBE_READ(v) \
+ (((v) << 16) & BM_OCOTP_TIMING_STROBE_READ)
+#define BP_OCOTP_TIMING_RELAX 12
+#define BM_OCOTP_TIMING_RELAX 0x0000F000
+#define BF_OCOTP_TIMING_RELAX(v) \
+ (((v) << 12) & BM_OCOTP_TIMING_RELAX)
+#define BP_OCOTP_TIMING_STROBE_PROG 0
+#define BM_OCOTP_TIMING_STROBE_PROG 0x00000FFF
+#define BF_OCOTP_TIMING_STROBE_PROG(v) \
+ (((v) << 0) & BM_OCOTP_TIMING_STROBE_PROG)
+
+#define HW_OCOTP_DATA (0x00000020)
+
+#define BP_OCOTP_DATA_DATA 0
+#define BM_OCOTP_DATA_DATA 0xFFFFFFFF
+#define BF_OCOTP_DATA_DATA(v) (v)
+
+#define HW_OCOTP_READ_CTRL (0x00000030)
+
+#define BP_OCOTP_READ_CTRL_RSVD0 1
+#define BM_OCOTP_READ_CTRL_RSVD0 0xFFFFFFFE
+#define BF_OCOTP_READ_CTRL_RSVD0(v) \
+ (((v) << 1) & BM_OCOTP_READ_CTRL_RSVD0)
+#define BM_OCOTP_READ_CTRL_READ_FUSE 0x00000001
+
+#define HW_OCOTP_READ_FUSE_DATA (0x00000040)
+
+#define BP_OCOTP_READ_FUSE_DATA_DATA 0
+#define BM_OCOTP_READ_FUSE_DATA_DATA 0xFFFFFFFF
+#define BF_OCOTP_READ_FUSE_DATA_DATA(v) (v)
+
+#define HW_OCOTP_SW_STICKY (0x00000050)
+
+#define BP_OCOTP_SW_STICKY_RSVD0 5
+#define BM_OCOTP_SW_STICKY_RSVD0 0xFFFFFFE0
+#define BF_OCOTP_SW_STICKY_RSVD0(v) \
+ (((v) << 5) & BM_OCOTP_SW_STICKY_RSVD0)
+#define BM_OCOTP_SW_STICKY_JTAG_BLOCK_RELEASE 0x00000010
+#define BM_OCOTP_SW_STICKY_BLOCK_ROM_PART 0x00000008
+#define BM_OCOTP_SW_STICKY_FIELD_RETURN_LOCK 0x00000004
+#define BM_OCOTP_SW_STICKY_SRK_REVOKE_LOCK 0x00000002
+#define BM_OCOTP_SW_STICKY_BLOCK_DTCP_KEY 0x00000001
+
+#define HW_OCOTP_SCS (0x00000060)
+#define HW_OCOTP_SCS_SET (0x00000064)
+#define HW_OCOTP_SCS_CLR (0x00000068)
+#define HW_OCOTP_SCS_TOG (0x0000006c)
+
+#define BM_OCOTP_SCS_LOCK 0x80000000
+#define BP_OCOTP_SCS_SPARE 1
+#define BM_OCOTP_SCS_SPARE 0x7FFFFFFE
+#define BF_OCOTP_SCS_SPARE(v) \
+ (((v) << 1) & BM_OCOTP_SCS_SPARE)
+#define BM_OCOTP_SCS_HAB_JDE 0x00000001
+
+#define HW_OCOTP_CRC_ADDR (0x00000070)
+
+#define BP_OCOTP_CRC_ADDR_RSVD0 19
+#define BM_OCOTP_CRC_ADDR_RSVD0 0xFFF80000
+#define BF_OCOTP_CRC_ADDR_RSVD0(v) \
+ (((v) << 19) & BM_OCOTP_CRC_ADDR_RSVD0)
+#define BP_OCOTP_CRC_ADDR_CRC_ADDR 16
+#define BM_OCOTP_CRC_ADDR_CRC_ADDR 0x00070000
+#define BF_OCOTP_CRC_ADDR_CRC_ADDR(v) \
+ (((v) << 16) & BM_OCOTP_CRC_ADDR_CRC_ADDR)
+#define BP_OCOTP_CRC_ADDR_DATA_END_ADDR 8
+#define BM_OCOTP_CRC_ADDR_DATA_END_ADDR 0x0000FF00
+#define BF_OCOTP_CRC_ADDR_DATA_END_ADDR(v) \
+ (((v) << 8) & BM_OCOTP_CRC_ADDR_DATA_END_ADDR)
+#define BP_OCOTP_CRC_ADDR_DATA_START_ADDR 0
+#define BM_OCOTP_CRC_ADDR_DATA_START_ADDR 0x000000FF
+#define BF_OCOTP_CRC_ADDR_DATA_START_ADDR(v) \
+ (((v) << 0) & BM_OCOTP_CRC_ADDR_DATA_START_ADDR)
+
+#define HW_OCOTP_CRC_VALUE (0x00000080)
+
+#define BP_OCOTP_CRC_VALUE_DATA 0
+#define BM_OCOTP_CRC_VALUE_DATA 0xFFFFFFFF
+#define BF_OCOTP_CRC_VALUE_DATA(v) (v)
+
+#define HW_OCOTP_VERSION (0x00000090)
+
+#define BP_OCOTP_VERSION_MAJOR 24
+#define BM_OCOTP_VERSION_MAJOR 0xFF000000
+#define BF_OCOTP_VERSION_MAJOR(v) \
+ (((v) << 24) & BM_OCOTP_VERSION_MAJOR)
+#define BP_OCOTP_VERSION_MINOR 16
+#define BM_OCOTP_VERSION_MINOR 0x00FF0000
+#define BF_OCOTP_VERSION_MINOR(v) \
+ (((v) << 16) & BM_OCOTP_VERSION_MINOR)
+#define BP_OCOTP_VERSION_STEP 0
+#define BM_OCOTP_VERSION_STEP 0x0000FFFF
+#define BF_OCOTP_VERSION_STEP(v) \
+ (((v) << 0) & BM_OCOTP_VERSION_STEP)
+
+#define HW_OCOTP_LOCK (0x00000400)
+
+#define BP_OCOTP_LOCK_UNALLOCATED 30
+#define BM_OCOTP_LOCK_UNALLOCATED 0xC0000000
+#define BF_OCOTP_LOCK_UNALLOCATED(v) \
+ (((v) << 30) & BM_OCOTP_LOCK_UNALLOCATED)
+#define BP_OCOTP_LOCK_CRC_GP_HI_LOCK 28
+#define BM_OCOTP_LOCK_CRC_GP_HI_LOCK 0x30000000
+#define BF_OCOTP_LOCK_CRC_GP_HI_LOCK(v) \
+ (((v) << 28) & BM_OCOTP_LOCK_CRC_GP_HI_LOCK)
+#define BP_OCOTP_LOCK_CRC_GP_LO_LOCK 26
+#define BM_OCOTP_LOCK_CRC_GP_LO_LOCK 0x0C000000
+#define BF_OCOTP_LOCK_CRC_GP_LO_LOCK(v) \
+ (((v) << 26) & BM_OCOTP_LOCK_CRC_GP_LO_LOCK)
+#define BM_OCOTP_LOCK_PIN 0x02000000
+#define BM_OCOTP_LOCK_RSVD2 0x01000000
+#define BM_OCOTP_LOCK_DTCP_DEV_CERT 0x00800000
+#define BM_OCOTP_LOCK_MISC_CONF 0x00400000
+#define BM_OCOTP_LOCK_HDCP_KEYS 0x00200000
+#define BM_OCOTP_LOCK_HDCP_KSV 0x00100000
+#define BP_OCOTP_LOCK_ANALOG 18
+#define BM_OCOTP_LOCK_ANALOG 0x000C0000
+#define BF_OCOTP_LOCK_ANALOG(v) \
+ (((v) << 18) & BM_OCOTP_LOCK_ANALOG)
+#define BM_OCOTP_LOCK_OTPMK 0x00020000
+#define BM_OCOTP_LOCK_DTCP_KEY 0x00010000
+#define BM_OCOTP_LOCK_RSVD1 0x00008000
+#define BM_OCOTP_LOCK_SRK 0x00004000
+#define BP_OCOTP_LOCK_GP2 12
+#define BM_OCOTP_LOCK_GP2 0x00003000
+#define BF_OCOTP_LOCK_GP2(v) \
+ (((v) << 12) & BM_OCOTP_LOCK_GP2)
+#define BP_OCOTP_LOCK_GP1 10
+#define BM_OCOTP_LOCK_GP1 0x00000C00
+#define BF_OCOTP_LOCK_GP1(v) \
+ (((v) << 10) & BM_OCOTP_LOCK_GP1)
+#define BP_OCOTP_LOCK_MAC_ADDR 8
+#define BM_OCOTP_LOCK_MAC_ADDR 0x00000300
+#define BF_OCOTP_LOCK_MAC_ADDR(v) \
+ (((v) << 8) & BM_OCOTP_LOCK_MAC_ADDR)
+#define BM_OCOTP_LOCK_RSVD0 0x00000080
+#define BM_OCOTP_LOCK_SJC_RESP 0x00000040
+#define BP_OCOTP_LOCK_MEM_TRIM 4
+#define BM_OCOTP_LOCK_MEM_TRIM 0x00000030
+#define BF_OCOTP_LOCK_MEM_TRIM(v) \
+ (((v) << 4) & BM_OCOTP_LOCK_MEM_TRIM)
+#define BP_OCOTP_LOCK_BOOT_CFG 2
+#define BM_OCOTP_LOCK_BOOT_CFG 0x0000000C
+#define BF_OCOTP_LOCK_BOOT_CFG(v) \
+ (((v) << 2) & BM_OCOTP_LOCK_BOOT_CFG)
+#define BP_OCOTP_LOCK_TESTER 0
+#define BM_OCOTP_LOCK_TESTER 0x00000003
+#define BF_OCOTP_LOCK_TESTER(v) \
+ (((v) << 0) & BM_OCOTP_LOCK_TESTER)
+
+/*
+ * multi-register-define name HW_OCOTP_CFGn
+ * base 0x00000410
+ * count 7
+ * offset 0x10
+ */
+#define HW_OCOTP_CFGn(n) (0x00000410 + (n) * 0x10)
+#define BP_OCOTP_CFGn_BITS 0
+#define BM_OCOTP_CFGn_BITS 0xFFFFFFFF
+#define BF_OCOTP_CFGn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_MEMn
+ * base 0x00000480
+ * count 5
+ * offset 0x10
+ */
+#define HW_OCOTP_MEMn(n) (0x00000480 + (n) * 0x10)
+#define BP_OCOTP_MEMn_BITS 0
+#define BM_OCOTP_MEMn_BITS 0xFFFFFFFF
+#define BF_OCOTP_MEMn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_ANAn
+ * base 0x000004D0
+ * count 3
+ * offset 0x10
+ */
+#define HW_OCOTP_ANAn(n) (0x000004d0 + (n) * 0x10)
+#define BP_OCOTP_ANAn_BITS 0
+#define BM_OCOTP_ANAn_BITS 0xFFFFFFFF
+#define BF_OCOTP_ANAn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_OTPMKn
+ * base 0x00000500
+ * count 8
+ * offset 0x10
+ */
+#define HW_OCOTP_OTPMKn(n) (0x00000500 + (n) * 0x10)
+#define BP_OCOTP_OTPMKn_BITS 0
+#define BM_OCOTP_OTPMKn_BITS 0xFFFFFFFF
+#define BF_OCOTP_OTPMKn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_SRKn
+ * base 0x00000580
+ * count 8
+ * offset 0x10
+ */
+#define HW_OCOTP_SRKn(n) (0x00000580 + (n) * 0x10)
+#define BP_OCOTP_SRKn_BITS 0
+#define BM_OCOTP_SRKn_BITS 0xFFFFFFFF
+#define BF_OCOTP_SRKn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_SJC_RESPn
+ * base 0x00000600
+ * count 2
+ * offset 0x10
+ */
+#define HW_OCOTP_SJC_RESPn(n) (0x00000600 + (n) * 0x10)
+#define BP_OCOTP_SJC_RESPn_BITS 0
+#define BM_OCOTP_SJC_RESPn_BITS 0xFFFFFFFF
+#define BF_OCOTP_SJC_RESPn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_MACn
+ * base 0x00000620
+ * count 2
+ * offset 0x10
+ */
+#define HW_OCOTP_MACn(n) (0x00000620 + (n) * 0x10)
+#define BP_OCOTP_MACn_BITS 0
+#define BM_OCOTP_MACn_BITS 0xFFFFFFFF
+#define BF_OCOTP_MACn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_HDCP_KSVn
+ * base 0x00000640
+ * count 2
+ * offset 0x10
+ */
+#define HW_OCOTP_HDCP_KSVn(n) (0x00000640 + (n) * 0x10)
+#define BP_OCOTP_HDCP_KSVn_BITS 0
+#define BM_OCOTP_HDCP_KSVn_BITS 0xFFFFFFFF
+#define BF_OCOTP_HDCP_KSVn_BITS(v) (v)
+
+#define HW_OCOTP_GP1 (0x00000660)
+
+#define BP_OCOTP_GP1_BITS 0
+#define BM_OCOTP_GP1_BITS 0xFFFFFFFF
+#define BF_OCOTP_GP1_BITS(v) (v)
+
+#define HW_OCOTP_GP2 (0x00000670)
+
+#define BP_OCOTP_GP2_BITS 0
+#define BM_OCOTP_GP2_BITS 0xFFFFFFFF
+#define BF_OCOTP_GP2_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_DTCP_KEYn
+ * base 0x00000680
+ * count 5
+ * offset 0x10
+ */
+#define HW_OCOTP_DTCP_KEYn(n) (0x00000680 + (n) * 0x10)
+#define BP_OCOTP_DTCP_KEYn_BITS 0
+#define BM_OCOTP_DTCP_KEYn_BITS 0xFFFFFFFF
+#define BF_OCOTP_DTCP_KEYn_BITS(v) (v)
+
+#define HW_OCOTP_MISC_CONF (0x000006d0)
+
+#define BP_OCOTP_MISC_CONF_BITS 0
+#define BM_OCOTP_MISC_CONF_BITS 0xFFFFFFFF
+#define BF_OCOTP_MISC_CONF_BITS(v) (v)
+
+#define HW_OCOTP_FIELD_RETURN (0x000006e0)
+
+#define BP_OCOTP_FIELD_RETURN_BITS 0
+#define BM_OCOTP_FIELD_RETURN_BITS 0xFFFFFFFF
+#define BF_OCOTP_FIELD_RETURN_BITS(v) (v)
+
+#define HW_OCOTP_SRK_REVOKE (0x000006f0)
+
+#define BP_OCOTP_SRK_REVOKE_BITS 0
+#define BM_OCOTP_SRK_REVOKE_BITS 0xFFFFFFFF
+#define BF_OCOTP_SRK_REVOKE_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_HDCP_KEYn
+ * base 0x00000800
+ * count 72
+ * offset 0x10
+ */
+#define HW_OCOTP_HDCP_KEYn(n) (0x00000800 + (n) * 0x10)
+#define BP_OCOTP_HDCP_KEYn_BITS 0
+#define BM_OCOTP_HDCP_KEYn_BITS 0xFFFFFFFF
+#define BF_OCOTP_HDCP_KEYn_BITS(v) (v)
+
+/*
+ * multi-register-define name HW_OCOTP_CRCn
+ * base 0x00000D00
+ * count 8
+ * offset 0x10
+ */
+#define HW_OCOTP_CRCn(n) (0x00000d00 + (n) * 0x10)
+#define BP_OCOTP_CRCn_BITS 0
+#define BM_OCOTP_CRCn_BITS 0xFFFFFFFF
+#define BF_OCOTP_CRCn_BITS(v) (v)
+#endif /* __ARCH_ARM___OCOTP_H */
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
new file mode 100644
index 00000000..dfa8b306
--- /dev/null
+++ b/drivers/char/rtc.c
@@ -0,0 +1,1430 @@
+/*
+ * Real Time Clock interface for Linux
+ *
+ * Copyright (C) 1996 Paul Gortmaker
+ *
+ * This driver allows use of the real time clock (built into
+ * nearly all computers) from user space. It exports the /dev/rtc
+ * interface supporting various ioctl() and also the
+ * /proc/driver/rtc pseudo-file for status information.
+ *
+ * The ioctls can be used to set the interrupt behaviour and
+ * generation rate from the RTC via IRQ 8. Then the /dev/rtc
+ * interface can be used to make use of these timer interrupts,
+ * be they interval or alarm based.
+ *
+ * The /dev/rtc interface will block on reads until an interrupt
+ * has been received. If a RTC interrupt has already happened,
+ * it will output an unsigned long and then block. The output value
+ * contains the interrupt status in the low byte and the number of
+ * interrupts since the last read in the remaining high bytes. The
+ * /dev/rtc interface can also be used with the select(2) call.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Based on other minimal char device drivers, like Alan's
+ * watchdog, Ted's random, etc. etc.
+ *
+ * 1.07 Paul Gortmaker.
+ * 1.08 Miquel van Smoorenburg: disallow certain things on the
+ * DEC Alpha as the CMOS clock is also used for other things.
+ * 1.09 Nikita Schmidt: epoch support and some Alpha cleanup.
+ * 1.09a Pete Zaitcev: Sun SPARC
+ * 1.09b Jeff Garzik: Modularize, init cleanup
+ * 1.09c Jeff Garzik: SMP cleanup
+ * 1.10 Paul Barton-Davis: add support for async I/O
+ * 1.10a Andrea Arcangeli: Alpha updates
+ * 1.10b Andrew Morton: SMP lock fix
+ * 1.10c Cesar Barros: SMP locking fixes and cleanup
+ * 1.10d Paul Gortmaker: delete paranoia check in rtc_exit
+ * 1.10e Maciej W. Rozycki: Handle DECstation's year weirdness.
+ * 1.11 Takashi Iwai: Kernel access functions
+ * rtc_register/rtc_unregister/rtc_control
+ * 1.11a Daniele Bellucci: Audit create_proc_read_entry in rtc_init
+ * 1.12 Venkatesh Pallipadi: Hooks for emulating rtc on HPET base-timer
+ * CONFIG_HPET_EMULATE_RTC
+ * 1.12a Maciej W. Rozycki: Handle memory-mapped chips properly.
+ * 1.12ac Alan Cox: Allow read access to the day of week register
+ * 1.12b David John: Remove calls to the BKL.
+ */
+
+#define RTC_VERSION "1.12b"
+
+/*
+ * Note that *all* calls to CMOS_READ and CMOS_WRITE are done with
+ * interrupts disabled. Due to the index-port/data-port (0x70/0x71)
+ * design of the RTC, we don't want two different things trying to
+ * get to it at once. (e.g. the periodic 11 min sync from time.c vs.
+ * this driver.)
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/mc146818rtc.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/sysctl.h>
+#include <linux/wait.h>
+#include <linux/bcd.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include <asm/current.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_X86
+#include <asm/hpet.h>
+#endif
+
+#ifdef CONFIG_SPARC32
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <asm/io.h>
+
+static unsigned long rtc_port;
+static int rtc_irq;
+#endif
+
+#ifdef CONFIG_HPET_EMULATE_RTC
+#undef RTC_IRQ
+#endif
+
+#ifdef RTC_IRQ
+static int rtc_has_irq = 1;
+#endif
+
+#ifndef CONFIG_HPET_EMULATE_RTC
+#define is_hpet_enabled() 0
+#define hpet_set_alarm_time(hrs, min, sec) 0
+#define hpet_set_periodic_freq(arg) 0
+#define hpet_mask_rtc_irq_bit(arg) 0
+#define hpet_set_rtc_irq_bit(arg) 0
+#define hpet_rtc_timer_init() do { } while (0)
+#define hpet_rtc_dropped_irq() 0
+#define hpet_register_irq_handler(h) ({ 0; })
+#define hpet_unregister_irq_handler(h) ({ 0; })
+#ifdef RTC_IRQ
+static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
+{
+ return 0;
+}
+#endif
+#endif
+
+/*
+ * We sponge a minor off of the misc major. No need slurping
+ * up another valuable major dev number for this. If you add
+ * an ioctl, make sure you don't conflict with SPARC's RTC
+ * ioctls.
+ */
+
+static struct fasync_struct *rtc_async_queue;
+
+static DECLARE_WAIT_QUEUE_HEAD(rtc_wait);
+
+#ifdef RTC_IRQ
+static void rtc_dropped_irq(unsigned long data);
+
+static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq, 0, 0);
+#endif
+
+static ssize_t rtc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos);
+
+static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+static void rtc_get_rtc_time(struct rtc_time *rtc_tm);
+
+#ifdef RTC_IRQ
+static unsigned int rtc_poll(struct file *file, poll_table *wait);
+#endif
+
+static void get_rtc_alm_time(struct rtc_time *alm_tm);
+#ifdef RTC_IRQ
+static void set_rtc_irq_bit_locked(unsigned char bit);
+static void mask_rtc_irq_bit_locked(unsigned char bit);
+
+static inline void set_rtc_irq_bit(unsigned char bit)
+{
+ spin_lock_irq(&rtc_lock);
+ set_rtc_irq_bit_locked(bit);
+ spin_unlock_irq(&rtc_lock);
+}
+
+static void mask_rtc_irq_bit(unsigned char bit)
+{
+ spin_lock_irq(&rtc_lock);
+ mask_rtc_irq_bit_locked(bit);
+ spin_unlock_irq(&rtc_lock);
+}
+#endif
+
+#ifdef CONFIG_PROC_FS
+static int rtc_proc_open(struct inode *inode, struct file *file);
+#endif
+
+/*
+ * Bits in rtc_status. (6 bits of room for future expansion)
+ */
+
+#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
+#define RTC_TIMER_ON 0x02 /* missed irq timer active */
+
+/*
+ * rtc_status is never changed by rtc_interrupt, and ioctl/open/close is
+ * protected by the spin lock rtc_lock. However, ioctl can still disable the
+ * timer in rtc_status and then with del_timer after the interrupt has read
+ * rtc_status but before mod_timer is called, which would then reenable the
+ * timer (but you would need to have an awful timing before you'd trip on it)
+ */
+static unsigned long rtc_status; /* bitmapped status byte. */
+static unsigned long rtc_freq; /* Current periodic IRQ rate */
+static unsigned long rtc_irq_data; /* our output to the world */
+static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
+
+#ifdef RTC_IRQ
+/*
+ * rtc_task_lock nests inside rtc_lock.
+ */
+static DEFINE_SPINLOCK(rtc_task_lock);
+static rtc_task_t *rtc_callback;
+#endif
+
+/*
+ * If this driver ever becomes modularised, it will be really nice
+ * to make the epoch retain its value across module reload...
+ */
+
+static unsigned long epoch = 1900; /* year corresponding to 0x00 */
+
+static const unsigned char days_in_mo[] =
+{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+
+/*
+ * Returns true if a clock update is in progress
+ */
+static inline unsigned char rtc_is_updating(void)
+{
+ unsigned long flags;
+ unsigned char uip;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return uip;
+}
+
+#ifdef RTC_IRQ
+/*
+ * A very tiny interrupt handler. It runs with IRQF_DISABLED set,
+ * but there is possibility of conflicting with the set_rtc_mmss()
+ * call (the rtc irq and the timer irq can easily run at the same
+ * time in two different CPUs). So we need to serialize
+ * accesses to the chip with the rtc_lock spinlock that each
+ * architecture should implement in the timer code.
+ * (See ./arch/XXXX/kernel/time.c for the set_rtc_mmss() function.)
+ */
+
+static irqreturn_t rtc_interrupt(int irq, void *dev_id)
+{
+ /*
+ * Can be an alarm interrupt, update complete interrupt,
+ * or a periodic interrupt. We store the status in the
+ * low byte and the number of interrupts received since
+ * the last read in the remainder of rtc_irq_data.
+ */
+
+ spin_lock(&rtc_lock);
+ rtc_irq_data += 0x100;
+ rtc_irq_data &= ~0xff;
+ if (is_hpet_enabled()) {
+ /*
+ * In this case it is HPET RTC interrupt handler
+ * calling us, with the interrupt information
+ * passed as arg1, instead of irq.
+ */
+ rtc_irq_data |= (unsigned long)irq & 0xF0;
+ } else {
+ rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0);
+ }
+
+ if (rtc_status & RTC_TIMER_ON)
+ mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
+
+ spin_unlock(&rtc_lock);
+
+ /* Now do the rest of the actions */
+ spin_lock(&rtc_task_lock);
+ if (rtc_callback)
+ rtc_callback->func(rtc_callback->private_data);
+ spin_unlock(&rtc_task_lock);
+ wake_up_interruptible(&rtc_wait);
+
+ kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
+
+ return IRQ_HANDLED;
+}
+#endif
+
+/*
+ * sysctl-tuning infrastructure.
+ */
+static ctl_table rtc_table[] = {
+ {
+ .procname = "max-user-freq",
+ .data = &rtc_max_user_freq,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ { }
+};
+
+static ctl_table rtc_root[] = {
+ {
+ .procname = "rtc",
+ .mode = 0555,
+ .child = rtc_table,
+ },
+ { }
+};
+
+static ctl_table dev_root[] = {
+ {
+ .procname = "dev",
+ .mode = 0555,
+ .child = rtc_root,
+ },
+ { }
+};
+
+static struct ctl_table_header *sysctl_header;
+
+static int __init init_sysctl(void)
+{
+ sysctl_header = register_sysctl_table(dev_root);
+ return 0;
+}
+
+static void __exit cleanup_sysctl(void)
+{
+ unregister_sysctl_table(sysctl_header);
+}
+
+/*
+ * Now all the various file operations that we export.
+ */
+
+static ssize_t rtc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+#ifndef RTC_IRQ
+ return -EIO;
+#else
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long data;
+ ssize_t retval;
+
+ if (rtc_has_irq == 0)
+ return -EIO;
+
+ /*
+ * Historically this function used to assume that sizeof(unsigned long)
+ * is the same in userspace and kernelspace. This lead to problems
+ * for configurations with multiple ABIs such a the MIPS o32 and 64
+ * ABIs supported on the same kernel. So now we support read of both
+ * 4 and 8 bytes and assume that's the sizeof(unsigned long) in the
+ * userspace ABI.
+ */
+ if (count != sizeof(unsigned int) && count != sizeof(unsigned long))
+ return -EINVAL;
+
+ add_wait_queue(&rtc_wait, &wait);
+
+ do {
+ /* First make it right. Then make it fast. Putting this whole
+ * block within the parentheses of a while would be too
+ * confusing. And no, xchg() is not the answer. */
+
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock_irq(&rtc_lock);
+ data = rtc_irq_data;
+ rtc_irq_data = 0;
+ spin_unlock_irq(&rtc_lock);
+
+ if (data != 0)
+ break;
+
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ goto out;
+ }
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ goto out;
+ }
+ schedule();
+ } while (1);
+
+ if (count == sizeof(unsigned int)) {
+ retval = put_user(data,
+ (unsigned int __user *)buf) ?: sizeof(int);
+ } else {
+ retval = put_user(data,
+ (unsigned long __user *)buf) ?: sizeof(long);
+ }
+ if (!retval)
+ retval = count;
+ out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&rtc_wait, &wait);
+
+ return retval;
+#endif
+}
+
+static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
+{
+ struct rtc_time wtime;
+
+#ifdef RTC_IRQ
+ if (rtc_has_irq == 0) {
+ switch (cmd) {
+ case RTC_AIE_OFF:
+ case RTC_AIE_ON:
+ case RTC_PIE_OFF:
+ case RTC_PIE_ON:
+ case RTC_UIE_OFF:
+ case RTC_UIE_ON:
+ case RTC_IRQP_READ:
+ case RTC_IRQP_SET:
+ return -EINVAL;
+ };
+ }
+#endif
+
+ switch (cmd) {
+#ifdef RTC_IRQ
+ case RTC_AIE_OFF: /* Mask alarm int. enab. bit */
+ {
+ mask_rtc_irq_bit(RTC_AIE);
+ return 0;
+ }
+ case RTC_AIE_ON: /* Allow alarm interrupts. */
+ {
+ set_rtc_irq_bit(RTC_AIE);
+ return 0;
+ }
+ case RTC_PIE_OFF: /* Mask periodic int. enab. bit */
+ {
+ /* can be called from isr via rtc_control() */
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ mask_rtc_irq_bit_locked(RTC_PIE);
+ if (rtc_status & RTC_TIMER_ON) {
+ rtc_status &= ~RTC_TIMER_ON;
+ del_timer(&rtc_irq_timer);
+ }
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ return 0;
+ }
+ case RTC_PIE_ON: /* Allow periodic ints */
+ {
+ /* can be called from isr via rtc_control() */
+ unsigned long flags;
+
+ /*
+ * We don't really want Joe User enabling more
+ * than 64Hz of interrupts on a multi-user machine.
+ */
+ if (!kernel && (rtc_freq > rtc_max_user_freq) &&
+ (!capable(CAP_SYS_RESOURCE)))
+ return -EACCES;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ if (!(rtc_status & RTC_TIMER_ON)) {
+ mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq +
+ 2*HZ/100);
+ rtc_status |= RTC_TIMER_ON;
+ }
+ set_rtc_irq_bit_locked(RTC_PIE);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ return 0;
+ }
+ case RTC_UIE_OFF: /* Mask ints from RTC updates. */
+ {
+ mask_rtc_irq_bit(RTC_UIE);
+ return 0;
+ }
+ case RTC_UIE_ON: /* Allow ints for RTC updates. */
+ {
+ set_rtc_irq_bit(RTC_UIE);
+ return 0;
+ }
+#endif
+ case RTC_ALM_READ: /* Read the present alarm time */
+ {
+ /*
+ * This returns a struct rtc_time. Reading >= 0xc0
+ * means "don't care" or "match all". Only the tm_hour,
+ * tm_min, and tm_sec values are filled in.
+ */
+ memset(&wtime, 0, sizeof(struct rtc_time));
+ get_rtc_alm_time(&wtime);
+ break;
+ }
+ case RTC_ALM_SET: /* Store a time into the alarm */
+ {
+ /*
+ * This expects a struct rtc_time. Writing 0xff means
+ * "don't care" or "match all". Only the tm_hour,
+ * tm_min and tm_sec are used.
+ */
+ unsigned char hrs, min, sec;
+ struct rtc_time alm_tm;
+
+ if (copy_from_user(&alm_tm, (struct rtc_time __user *)arg,
+ sizeof(struct rtc_time)))
+ return -EFAULT;
+
+ hrs = alm_tm.tm_hour;
+ min = alm_tm.tm_min;
+ sec = alm_tm.tm_sec;
+
+ spin_lock_irq(&rtc_lock);
+ if (hpet_set_alarm_time(hrs, min, sec)) {
+ /*
+ * Fallthru and set alarm time in CMOS too,
+ * so that we will get proper value in RTC_ALM_READ
+ */
+ }
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) ||
+ RTC_ALWAYS_BCD) {
+ if (sec < 60)
+ sec = bin2bcd(sec);
+ else
+ sec = 0xff;
+
+ if (min < 60)
+ min = bin2bcd(min);
+ else
+ min = 0xff;
+
+ if (hrs < 24)
+ hrs = bin2bcd(hrs);
+ else
+ hrs = 0xff;
+ }
+ CMOS_WRITE(hrs, RTC_HOURS_ALARM);
+ CMOS_WRITE(min, RTC_MINUTES_ALARM);
+ CMOS_WRITE(sec, RTC_SECONDS_ALARM);
+ spin_unlock_irq(&rtc_lock);
+
+ return 0;
+ }
+ case RTC_RD_TIME: /* Read the time/date from RTC */
+ {
+ memset(&wtime, 0, sizeof(struct rtc_time));
+ rtc_get_rtc_time(&wtime);
+ break;
+ }
+ case RTC_SET_TIME: /* Set the RTC */
+ {
+ struct rtc_time rtc_tm;
+ unsigned char mon, day, hrs, min, sec, leap_yr;
+ unsigned char save_control, save_freq_select;
+ unsigned int yrs;
+#ifdef CONFIG_MACH_DECSTATION
+ unsigned int real_yrs;
+#endif
+
+ if (!capable(CAP_SYS_TIME))
+ return -EACCES;
+
+ if (copy_from_user(&rtc_tm, (struct rtc_time __user *)arg,
+ sizeof(struct rtc_time)))
+ return -EFAULT;
+
+ yrs = rtc_tm.tm_year + 1900;
+ mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
+ day = rtc_tm.tm_mday;
+ hrs = rtc_tm.tm_hour;
+ min = rtc_tm.tm_min;
+ sec = rtc_tm.tm_sec;
+
+ if (yrs < 1970)
+ return -EINVAL;
+
+ leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
+
+ if ((mon > 12) || (day == 0))
+ return -EINVAL;
+
+ if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
+ return -EINVAL;
+
+ if ((hrs >= 24) || (min >= 60) || (sec >= 60))
+ return -EINVAL;
+
+ yrs -= epoch;
+ if (yrs > 255) /* They are unsigned */
+ return -EINVAL;
+
+ spin_lock_irq(&rtc_lock);
+#ifdef CONFIG_MACH_DECSTATION
+ real_yrs = yrs;
+ yrs = 72;
+
+ /*
+ * We want to keep the year set to 73 until March
+ * for non-leap years, so that Feb, 29th is handled
+ * correctly.
+ */
+ if (!leap_yr && mon < 3) {
+ real_yrs--;
+ yrs = 73;
+ }
+#endif
+ /* These limits and adjustments are independent of
+ * whether the chip is in binary mode or not.
+ */
+ if (yrs > 169) {
+ spin_unlock_irq(&rtc_lock);
+ return -EINVAL;
+ }
+ if (yrs >= 100)
+ yrs -= 100;
+
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
+ || RTC_ALWAYS_BCD) {
+ sec = bin2bcd(sec);
+ min = bin2bcd(min);
+ hrs = bin2bcd(hrs);
+ day = bin2bcd(day);
+ mon = bin2bcd(mon);
+ yrs = bin2bcd(yrs);
+ }
+
+ save_control = CMOS_READ(RTC_CONTROL);
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+#ifdef CONFIG_MACH_DECSTATION
+ CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
+#endif
+ CMOS_WRITE(yrs, RTC_YEAR);
+ CMOS_WRITE(mon, RTC_MONTH);
+ CMOS_WRITE(day, RTC_DAY_OF_MONTH);
+ CMOS_WRITE(hrs, RTC_HOURS);
+ CMOS_WRITE(min, RTC_MINUTES);
+ CMOS_WRITE(sec, RTC_SECONDS);
+
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+
+ spin_unlock_irq(&rtc_lock);
+ return 0;
+ }
+#ifdef RTC_IRQ
+ case RTC_IRQP_READ: /* Read the periodic IRQ rate. */
+ {
+ return put_user(rtc_freq, (unsigned long __user *)arg);
+ }
+ case RTC_IRQP_SET: /* Set periodic IRQ rate. */
+ {
+ int tmp = 0;
+ unsigned char val;
+ /* can be called from isr via rtc_control() */
+ unsigned long flags;
+
+ /*
+ * The max we can do is 8192Hz.
+ */
+ if ((arg < 2) || (arg > 8192))
+ return -EINVAL;
+ /*
+ * We don't really want Joe User generating more
+ * than 64Hz of interrupts on a multi-user machine.
+ */
+ if (!kernel && (arg > rtc_max_user_freq) &&
+ !capable(CAP_SYS_RESOURCE))
+ return -EACCES;
+
+ while (arg > (1<<tmp))
+ tmp++;
+
+ /*
+ * Check that the input was really a power of 2.
+ */
+ if (arg != (1<<tmp))
+ return -EINVAL;
+
+ rtc_freq = arg;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ if (hpet_set_periodic_freq(arg)) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return 0;
+ }
+
+ val = CMOS_READ(RTC_FREQ_SELECT) & 0xf0;
+ val |= (16 - tmp);
+ CMOS_WRITE(val, RTC_FREQ_SELECT);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return 0;
+ }
+#endif
+ case RTC_EPOCH_READ: /* Read the epoch. */
+ {
+ return put_user(epoch, (unsigned long __user *)arg);
+ }
+ case RTC_EPOCH_SET: /* Set the epoch. */
+ {
+ /*
+ * There were no RTC clocks before 1900.
+ */
+ if (arg < 1900)
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_TIME))
+ return -EACCES;
+
+ epoch = arg;
+ return 0;
+ }
+ default:
+ return -ENOTTY;
+ }
+ return copy_to_user((void __user *)arg,
+ &wtime, sizeof wtime) ? -EFAULT : 0;
+}
+
+static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+ ret = rtc_do_ioctl(cmd, arg, 0);
+ return ret;
+}
+
+/*
+ * We enforce only one user at a time here with the open/close.
+ * Also clear the previous interrupt data on an open, and clean
+ * up things on a close.
+ */
+static int rtc_open(struct inode *inode, struct file *file)
+{
+ spin_lock_irq(&rtc_lock);
+
+ if (rtc_status & RTC_IS_OPEN)
+ goto out_busy;
+
+ rtc_status |= RTC_IS_OPEN;
+
+ rtc_irq_data = 0;
+ spin_unlock_irq(&rtc_lock);
+ return 0;
+
+out_busy:
+ spin_unlock_irq(&rtc_lock);
+ return -EBUSY;
+}
+
+static int rtc_fasync(int fd, struct file *filp, int on)
+{
+ return fasync_helper(fd, filp, on, &rtc_async_queue);
+}
+
+static int rtc_release(struct inode *inode, struct file *file)
+{
+#ifdef RTC_IRQ
+ unsigned char tmp;
+
+ if (rtc_has_irq == 0)
+ goto no_irq;
+
+ /*
+ * Turn off all interrupts once the device is no longer
+ * in use, and clear the data.
+ */
+
+ spin_lock_irq(&rtc_lock);
+ if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) {
+ tmp = CMOS_READ(RTC_CONTROL);
+ tmp &= ~RTC_PIE;
+ tmp &= ~RTC_AIE;
+ tmp &= ~RTC_UIE;
+ CMOS_WRITE(tmp, RTC_CONTROL);
+ CMOS_READ(RTC_INTR_FLAGS);
+ }
+ if (rtc_status & RTC_TIMER_ON) {
+ rtc_status &= ~RTC_TIMER_ON;
+ del_timer(&rtc_irq_timer);
+ }
+ spin_unlock_irq(&rtc_lock);
+
+no_irq:
+#endif
+
+ spin_lock_irq(&rtc_lock);
+ rtc_irq_data = 0;
+ rtc_status &= ~RTC_IS_OPEN;
+ spin_unlock_irq(&rtc_lock);
+
+ return 0;
+}
+
+#ifdef RTC_IRQ
+static unsigned int rtc_poll(struct file *file, poll_table *wait)
+{
+ unsigned long l;
+
+ if (rtc_has_irq == 0)
+ return 0;
+
+ poll_wait(file, &rtc_wait, wait);
+
+ spin_lock_irq(&rtc_lock);
+ l = rtc_irq_data;
+ spin_unlock_irq(&rtc_lock);
+
+ if (l != 0)
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+#endif
+
+int rtc_register(rtc_task_t *task)
+{
+#ifndef RTC_IRQ
+ return -EIO;
+#else
+ if (task == NULL || task->func == NULL)
+ return -EINVAL;
+ spin_lock_irq(&rtc_lock);
+ if (rtc_status & RTC_IS_OPEN) {
+ spin_unlock_irq(&rtc_lock);
+ return -EBUSY;
+ }
+ spin_lock(&rtc_task_lock);
+ if (rtc_callback) {
+ spin_unlock(&rtc_task_lock);
+ spin_unlock_irq(&rtc_lock);
+ return -EBUSY;
+ }
+ rtc_status |= RTC_IS_OPEN;
+ rtc_callback = task;
+ spin_unlock(&rtc_task_lock);
+ spin_unlock_irq(&rtc_lock);
+ return 0;
+#endif
+}
+EXPORT_SYMBOL(rtc_register);
+
+int rtc_unregister(rtc_task_t *task)
+{
+#ifndef RTC_IRQ
+ return -EIO;
+#else
+ unsigned char tmp;
+
+ spin_lock_irq(&rtc_lock);
+ spin_lock(&rtc_task_lock);
+ if (rtc_callback != task) {
+ spin_unlock(&rtc_task_lock);
+ spin_unlock_irq(&rtc_lock);
+ return -ENXIO;
+ }
+ rtc_callback = NULL;
+
+ /* disable controls */
+ if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) {
+ tmp = CMOS_READ(RTC_CONTROL);
+ tmp &= ~RTC_PIE;
+ tmp &= ~RTC_AIE;
+ tmp &= ~RTC_UIE;
+ CMOS_WRITE(tmp, RTC_CONTROL);
+ CMOS_READ(RTC_INTR_FLAGS);
+ }
+ if (rtc_status & RTC_TIMER_ON) {
+ rtc_status &= ~RTC_TIMER_ON;
+ del_timer(&rtc_irq_timer);
+ }
+ rtc_status &= ~RTC_IS_OPEN;
+ spin_unlock(&rtc_task_lock);
+ spin_unlock_irq(&rtc_lock);
+ return 0;
+#endif
+}
+EXPORT_SYMBOL(rtc_unregister);
+
+int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
+{
+#ifndef RTC_IRQ
+ return -EIO;
+#else
+ unsigned long flags;
+ if (cmd != RTC_PIE_ON && cmd != RTC_PIE_OFF && cmd != RTC_IRQP_SET)
+ return -EINVAL;
+ spin_lock_irqsave(&rtc_task_lock, flags);
+ if (rtc_callback != task) {
+ spin_unlock_irqrestore(&rtc_task_lock, flags);
+ return -ENXIO;
+ }
+ spin_unlock_irqrestore(&rtc_task_lock, flags);
+ return rtc_do_ioctl(cmd, arg, 1);
+#endif
+}
+EXPORT_SYMBOL(rtc_control);
+
+/*
+ * The various file operations we support.
+ */
+
+static const struct file_operations rtc_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = rtc_read,
+#ifdef RTC_IRQ
+ .poll = rtc_poll,
+#endif
+ .unlocked_ioctl = rtc_ioctl,
+ .open = rtc_open,
+ .release = rtc_release,
+ .fasync = rtc_fasync,
+};
+
+static struct miscdevice rtc_dev = {
+ .minor = RTC_MINOR,
+ .name = "rtc",
+ .fops = &rtc_fops,
+};
+
+#ifdef CONFIG_PROC_FS
+static const struct file_operations rtc_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = rtc_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static resource_size_t rtc_size;
+
+static struct resource * __init rtc_request_region(resource_size_t size)
+{
+ struct resource *r;
+
+ if (RTC_IOMAPPED)
+ r = request_region(RTC_PORT(0), size, "rtc");
+ else
+ r = request_mem_region(RTC_PORT(0), size, "rtc");
+
+ if (r)
+ rtc_size = size;
+
+ return r;
+}
+
+static void rtc_release_region(void)
+{
+ if (RTC_IOMAPPED)
+ release_region(RTC_PORT(0), rtc_size);
+ else
+ release_mem_region(RTC_PORT(0), rtc_size);
+}
+
+static int __init rtc_init(void)
+{
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *ent;
+#endif
+#if defined(__alpha__) || defined(__mips__)
+ unsigned int year, ctrl;
+ char *guess = NULL;
+#endif
+#ifdef CONFIG_SPARC32
+ struct device_node *ebus_dp;
+ struct platform_device *op;
+#else
+ void *r;
+#ifdef RTC_IRQ
+ irq_handler_t rtc_int_handler_ptr;
+#endif
+#endif
+
+#ifdef CONFIG_SPARC32
+ for_each_node_by_name(ebus_dp, "ebus") {
+ struct device_node *dp;
+ for (dp = ebus_dp; dp; dp = dp->sibling) {
+ if (!strcmp(dp->name, "rtc")) {
+ op = of_find_device_by_node(dp);
+ if (op) {
+ rtc_port = op->resource[0].start;
+ rtc_irq = op->irqs[0];
+ goto found;
+ }
+ }
+ }
+ }
+ rtc_has_irq = 0;
+ printk(KERN_ERR "rtc_init: no PC rtc found\n");
+ return -EIO;
+
+found:
+ if (!rtc_irq) {
+ rtc_has_irq = 0;
+ goto no_irq;
+ }
+
+ /*
+ * XXX Interrupt pin #7 in Espresso is shared between RTC and
+ * PCI Slot 2 INTA# (and some INTx# in Slot 1).
+ */
+ if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc",
+ (void *)&rtc_port)) {
+ rtc_has_irq = 0;
+ printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq);
+ return -EIO;
+ }
+no_irq:
+#else
+ r = rtc_request_region(RTC_IO_EXTENT);
+
+ /*
+ * If we've already requested a smaller range (for example, because
+ * PNPBIOS or ACPI told us how the device is configured), the request
+ * above might fail because it's too big.
+ *
+ * If so, request just the range we actually use.
+ */
+ if (!r)
+ r = rtc_request_region(RTC_IO_EXTENT_USED);
+ if (!r) {
+#ifdef RTC_IRQ
+ rtc_has_irq = 0;
+#endif
+ printk(KERN_ERR "rtc: I/O resource %lx is not free.\n",
+ (long)(RTC_PORT(0)));
+ return -EIO;
+ }
+
+#ifdef RTC_IRQ
+ if (is_hpet_enabled()) {
+ int err;
+
+ rtc_int_handler_ptr = hpet_rtc_interrupt;
+ err = hpet_register_irq_handler(rtc_interrupt);
+ if (err != 0) {
+ printk(KERN_WARNING "hpet_register_irq_handler failed "
+ "in rtc_init().");
+ return err;
+ }
+ } else {
+ rtc_int_handler_ptr = rtc_interrupt;
+ }
+
+ if (request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED,
+ "rtc", NULL)) {
+ /* Yeah right, seeing as irq 8 doesn't even hit the bus. */
+ rtc_has_irq = 0;
+ printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
+ rtc_release_region();
+
+ return -EIO;
+ }
+ hpet_rtc_timer_init();
+
+#endif
+
+#endif /* CONFIG_SPARC32 vs. others */
+
+ if (misc_register(&rtc_dev)) {
+#ifdef RTC_IRQ
+ free_irq(RTC_IRQ, NULL);
+ hpet_unregister_irq_handler(rtc_interrupt);
+ rtc_has_irq = 0;
+#endif
+ rtc_release_region();
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_PROC_FS
+ ent = proc_create("driver/rtc", 0, NULL, &rtc_proc_fops);
+ if (!ent)
+ printk(KERN_WARNING "rtc: Failed to register with procfs.\n");
+#endif
+
+#if defined(__alpha__) || defined(__mips__)
+ rtc_freq = HZ;
+
+ /* Each operating system on an Alpha uses its own epoch.
+ Let's try to guess which one we are using now. */
+
+ if (rtc_is_updating() != 0)
+ msleep(20);
+
+ spin_lock_irq(&rtc_lock);
+ year = CMOS_READ(RTC_YEAR);
+ ctrl = CMOS_READ(RTC_CONTROL);
+ spin_unlock_irq(&rtc_lock);
+
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ year = bcd2bin(year); /* This should never happen... */
+
+ if (year < 20) {
+ epoch = 2000;
+ guess = "SRM (post-2000)";
+ } else if (year >= 20 && year < 48) {
+ epoch = 1980;
+ guess = "ARC console";
+ } else if (year >= 48 && year < 72) {
+ epoch = 1952;
+ guess = "Digital UNIX";
+#if defined(__mips__)
+ } else if (year >= 72 && year < 74) {
+ epoch = 2000;
+ guess = "Digital DECstation";
+#else
+ } else if (year >= 70) {
+ epoch = 1900;
+ guess = "Standard PC (1900)";
+#endif
+ }
+ if (guess)
+ printk(KERN_INFO "rtc: %s epoch (%lu) detected\n",
+ guess, epoch);
+#endif
+#ifdef RTC_IRQ
+ if (rtc_has_irq == 0)
+ goto no_irq2;
+
+ spin_lock_irq(&rtc_lock);
+ rtc_freq = 1024;
+ if (!hpet_set_periodic_freq(rtc_freq)) {
+ /*
+ * Initialize periodic frequency to CMOS reset default,
+ * which is 1024Hz
+ */
+ CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06),
+ RTC_FREQ_SELECT);
+ }
+ spin_unlock_irq(&rtc_lock);
+no_irq2:
+#endif
+
+ (void) init_sysctl();
+
+ printk(KERN_INFO "Real Time Clock Driver v" RTC_VERSION "\n");
+
+ return 0;
+}
+
+static void __exit rtc_exit(void)
+{
+ cleanup_sysctl();
+ remove_proc_entry("driver/rtc", NULL);
+ misc_deregister(&rtc_dev);
+
+#ifdef CONFIG_SPARC32
+ if (rtc_has_irq)
+ free_irq(rtc_irq, &rtc_port);
+#else
+ rtc_release_region();
+#ifdef RTC_IRQ
+ if (rtc_has_irq) {
+ free_irq(RTC_IRQ, NULL);
+ hpet_unregister_irq_handler(hpet_rtc_interrupt);
+ }
+#endif
+#endif /* CONFIG_SPARC32 */
+}
+
+module_init(rtc_init);
+module_exit(rtc_exit);
+
+#ifdef RTC_IRQ
+/*
+ * At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
+ * (usually during an IDE disk interrupt, with IRQ unmasking off)
+ * Since the interrupt handler doesn't get called, the IRQ status
+ * byte doesn't get read, and the RTC stops generating interrupts.
+ * A timer is set, and will call this function if/when that happens.
+ * To get it out of this stalled state, we just read the status.
+ * At least a jiffy of interrupts (rtc_freq/HZ) will have been lost.
+ * (You *really* shouldn't be trying to use a non-realtime system
+ * for something that requires a steady > 1KHz signal anyways.)
+ */
+
+static void rtc_dropped_irq(unsigned long data)
+{
+ unsigned long freq;
+
+ spin_lock_irq(&rtc_lock);
+
+ if (hpet_rtc_dropped_irq()) {
+ spin_unlock_irq(&rtc_lock);
+ return;
+ }
+
+ /* Just in case someone disabled the timer from behind our back... */
+ if (rtc_status & RTC_TIMER_ON)
+ mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
+
+ rtc_irq_data += ((rtc_freq/HZ)<<8);
+ rtc_irq_data &= ~0xff;
+ rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); /* restart */
+
+ freq = rtc_freq;
+
+ spin_unlock_irq(&rtc_lock);
+
+ if (printk_ratelimit()) {
+ printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
+ freq);
+ }
+
+ /* Now we have new data */
+ wake_up_interruptible(&rtc_wait);
+
+ kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
+}
+#endif
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Info exported via "/proc/driver/rtc".
+ */
+
+static int rtc_proc_show(struct seq_file *seq, void *v)
+{
+#define YN(bit) ((ctrl & bit) ? "yes" : "no")
+#define NY(bit) ((ctrl & bit) ? "no" : "yes")
+ struct rtc_time tm;
+ unsigned char batt, ctrl;
+ unsigned long freq;
+
+ spin_lock_irq(&rtc_lock);
+ batt = CMOS_READ(RTC_VALID) & RTC_VRT;
+ ctrl = CMOS_READ(RTC_CONTROL);
+ freq = rtc_freq;
+ spin_unlock_irq(&rtc_lock);
+
+
+ rtc_get_rtc_time(&tm);
+
+ /*
+ * There is no way to tell if the luser has the RTC set for local
+ * time or for Universal Standard Time (GMT). Probably local though.
+ */
+ seq_printf(seq,
+ "rtc_time\t: %02d:%02d:%02d\n"
+ "rtc_date\t: %04d-%02d-%02d\n"
+ "rtc_epoch\t: %04lu\n",
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, epoch);
+
+ get_rtc_alm_time(&tm);
+
+ /*
+ * We implicitly assume 24hr mode here. Alarm values >= 0xc0 will
+ * match any value for that particular field. Values that are
+ * greater than a valid time, but less than 0xc0 shouldn't appear.
+ */
+ seq_puts(seq, "alarm\t\t: ");
+ if (tm.tm_hour <= 24)
+ seq_printf(seq, "%02d:", tm.tm_hour);
+ else
+ seq_puts(seq, "**:");
+
+ if (tm.tm_min <= 59)
+ seq_printf(seq, "%02d:", tm.tm_min);
+ else
+ seq_puts(seq, "**:");
+
+ if (tm.tm_sec <= 59)
+ seq_printf(seq, "%02d\n", tm.tm_sec);
+ else
+ seq_puts(seq, "**\n");
+
+ seq_printf(seq,
+ "DST_enable\t: %s\n"
+ "BCD\t\t: %s\n"
+ "24hr\t\t: %s\n"
+ "square_wave\t: %s\n"
+ "alarm_IRQ\t: %s\n"
+ "update_IRQ\t: %s\n"
+ "periodic_IRQ\t: %s\n"
+ "periodic_freq\t: %ld\n"
+ "batt_status\t: %s\n",
+ YN(RTC_DST_EN),
+ NY(RTC_DM_BINARY),
+ YN(RTC_24H),
+ YN(RTC_SQWE),
+ YN(RTC_AIE),
+ YN(RTC_UIE),
+ YN(RTC_PIE),
+ freq,
+ batt ? "okay" : "dead");
+
+ return 0;
+#undef YN
+#undef NY
+}
+
+static int rtc_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtc_proc_show, NULL);
+}
+#endif
+
+static void rtc_get_rtc_time(struct rtc_time *rtc_tm)
+{
+ unsigned long uip_watchdog = jiffies, flags;
+ unsigned char ctrl;
+#ifdef CONFIG_MACH_DECSTATION
+ unsigned int real_year;
+#endif
+
+ /*
+ * read RTC once any update in progress is done. The update
+ * can take just over 2ms. We wait 20ms. There is no need to
+ * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
+ * If you need to know *exactly* when a second has started, enable
+ * periodic update complete interrupts, (via ioctl) and then
+ * immediately read /dev/rtc which will block until you get the IRQ.
+ * Once the read clears, read the RTC time (again via ioctl). Easy.
+ */
+
+ while (rtc_is_updating() != 0 &&
+ time_before(jiffies, uip_watchdog + 2*HZ/100))
+ cpu_relax();
+
+ /*
+ * Only the values that we read from the RTC are set. We leave
+ * tm_wday, tm_yday and tm_isdst untouched. Note that while the
+ * RTC has RTC_DAY_OF_WEEK, we should usually ignore it, as it is
+ * only updated by the RTC when initially set to a non-zero value.
+ */
+ spin_lock_irqsave(&rtc_lock, flags);
+ rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS);
+ rtc_tm->tm_min = CMOS_READ(RTC_MINUTES);
+ rtc_tm->tm_hour = CMOS_READ(RTC_HOURS);
+ rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
+ rtc_tm->tm_mon = CMOS_READ(RTC_MONTH);
+ rtc_tm->tm_year = CMOS_READ(RTC_YEAR);
+ /* Only set from 2.6.16 onwards */
+ rtc_tm->tm_wday = CMOS_READ(RTC_DAY_OF_WEEK);
+
+#ifdef CONFIG_MACH_DECSTATION
+ real_year = CMOS_READ(RTC_DEC_YEAR);
+#endif
+ ctrl = CMOS_READ(RTC_CONTROL);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
+ rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
+ rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
+ rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
+ rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
+ rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
+ rtc_tm->tm_wday = bcd2bin(rtc_tm->tm_wday);
+ }
+
+#ifdef CONFIG_MACH_DECSTATION
+ rtc_tm->tm_year += real_year - 72;
+#endif
+
+ /*
+ * Account for differences between how the RTC uses the values
+ * and how they are defined in a struct rtc_time;
+ */
+ rtc_tm->tm_year += epoch - 1900;
+ if (rtc_tm->tm_year <= 69)
+ rtc_tm->tm_year += 100;
+
+ rtc_tm->tm_mon--;
+}
+
+static void get_rtc_alm_time(struct rtc_time *alm_tm)
+{
+ unsigned char ctrl;
+
+ /*
+ * Only the values that we read from the RTC are set. That
+ * means only tm_hour, tm_min, and tm_sec.
+ */
+ spin_lock_irq(&rtc_lock);
+ alm_tm->tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
+ alm_tm->tm_min = CMOS_READ(RTC_MINUTES_ALARM);
+ alm_tm->tm_hour = CMOS_READ(RTC_HOURS_ALARM);
+ ctrl = CMOS_READ(RTC_CONTROL);
+ spin_unlock_irq(&rtc_lock);
+
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
+ alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
+ alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
+ }
+}
+
+#ifdef RTC_IRQ
+/*
+ * Used to disable/enable interrupts for any one of UIE, AIE, PIE.
+ * Rumour has it that if you frob the interrupt enable/disable
+ * bits in RTC_CONTROL, you should read RTC_INTR_FLAGS, to
+ * ensure you actually start getting interrupts. Probably for
+ * compatibility with older/broken chipset RTC implementations.
+ * We also clear out any old irq data after an ioctl() that
+ * meddles with the interrupt enable/disable bits.
+ */
+
+static void mask_rtc_irq_bit_locked(unsigned char bit)
+{
+ unsigned char val;
+
+ if (hpet_mask_rtc_irq_bit(bit))
+ return;
+ val = CMOS_READ(RTC_CONTROL);
+ val &= ~bit;
+ CMOS_WRITE(val, RTC_CONTROL);
+ CMOS_READ(RTC_INTR_FLAGS);
+
+ rtc_irq_data = 0;
+}
+
+static void set_rtc_irq_bit_locked(unsigned char bit)
+{
+ unsigned char val;
+
+ if (hpet_set_rtc_irq_bit(bit))
+ return;
+ val = CMOS_READ(RTC_CONTROL);
+ val |= bit;
+ CMOS_WRITE(val, RTC_CONTROL);
+ CMOS_READ(RTC_INTR_FLAGS);
+
+ rtc_irq_data = 0;
+}
+#endif
+
+MODULE_AUTHOR("Paul Gortmaker");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(RTC_MINOR);
diff --git a/drivers/char/scc.h b/drivers/char/scc.h
new file mode 100644
index 00000000..341b1142
--- /dev/null
+++ b/drivers/char/scc.h
@@ -0,0 +1,613 @@
+/*
+ * atari_SCC.h: Definitions for the Am8530 Serial Communications Controller
+ *
+ * Copyright 1994 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ */
+
+
+#ifndef _SCC_H
+#define _SCC_H
+
+#include <linux/delay.h>
+
+/* Special configuration ioctls for the Atari SCC5380 Serial
+ * Communications Controller
+ */
+
+/* ioctl command codes */
+
+#define TIOCGATSCC 0x54c0 /* get SCC configuration */
+#define TIOCSATSCC 0x54c1 /* set SCC configuration */
+#define TIOCDATSCC 0x54c2 /* reset configuration to defaults */
+
+/* Clock sources */
+
+#define CLK_RTxC 0
+#define CLK_TRxC 1
+#define CLK_PCLK 2
+
+/* baud_bases for the common clocks in the Atari. These are the real
+ * frequencies divided by 16.
+ */
+
+#define SCC_BAUD_BASE_TIMC 19200 /* 0.3072 MHz from TT-MFP, Timer C */
+#define SCC_BAUD_BASE_BCLK 153600 /* 2.4576 MHz */
+#define SCC_BAUD_BASE_PCLK4 229500 /* 3.6720 MHz */
+#define SCC_BAUD_BASE_PCLK 503374 /* 8.0539763 MHz */
+#define SCC_BAUD_BASE_NONE 0 /* for not connected or unused
+ * clock sources */
+
+/* The SCC clock configuration structure */
+
+struct scc_clock_config {
+ unsigned RTxC_base; /* base_baud of RTxC */
+ unsigned TRxC_base; /* base_baud of TRxC */
+ unsigned PCLK_base; /* base_baud of PCLK, both channels! */
+ struct {
+ unsigned clksrc; /* CLK_RTxC, CLK_TRxC or CLK_PCLK */
+ unsigned divisor; /* divisor for base baud, valid values:
+ * see below */
+ } baud_table[17]; /* For 50, 75, 110, 135, 150, 200, 300,
+ * 600, 1200, 1800, 2400, 4800, 9600,
+ * 19200, 38400, 57600 and 115200 bps.
+ * The last two could be replaced by
+ * other rates > 38400 if they're not
+ * possible.
+ */
+};
+
+/* The following divisors are valid:
+ *
+ * - CLK_RTxC: 1 or even (1, 2 and 4 are the direct modes, > 4 use
+ * the BRG)
+ *
+ * - CLK_TRxC: 1, 2 or 4 (no BRG, only direct modes possible)
+ *
+ * - CLK_PCLK: >= 4 and even (no direct modes, only BRG)
+ *
+ */
+
+struct scc_port {
+ struct gs_port gs;
+ volatile unsigned char *ctrlp;
+ volatile unsigned char *datap;
+ int x_char; /* xon/xoff character */
+ int c_dcd;
+ int channel;
+ struct scc_port *port_a; /* Reference to port A and B */
+ struct scc_port *port_b; /* structs for reg access */
+};
+
+#define SCC_MAGIC 0x52696368
+
+/***********************************************************************/
+/* */
+/* Register Names */
+/* */
+/***********************************************************************/
+
+/* The SCC documentation gives no explicit names to the registers,
+ * they're just called WR0..15 and RR0..15. To make the source code
+ * better readable and make the transparent write reg read access (see
+ * below) possible, I christen them here with self-invented names.
+ * Note that (real) read registers are assigned numbers 16..31. WR7'
+ * has number 33.
+ */
+
+#define COMMAND_REG 0 /* wo */
+#define INT_AND_DMA_REG 1 /* wo */
+#define INT_VECTOR_REG 2 /* rw, common to both channels */
+#define RX_CTRL_REG 3 /* rw */
+#define AUX1_CTRL_REG 4 /* rw */
+#define TX_CTRL_REG 5 /* rw */
+#define SYNC_ADR_REG 6 /* wo */
+#define SYNC_CHAR_REG 7 /* wo */
+#define SDLC_OPTION_REG 33 /* wo */
+#define TX_DATA_REG 8 /* wo */
+#define MASTER_INT_CTRL 9 /* wo, common to both channels */
+#define AUX2_CTRL_REG 10 /* rw */
+#define CLK_CTRL_REG 11 /* wo */
+#define TIMER_LOW_REG 12 /* rw */
+#define TIMER_HIGH_REG 13 /* rw */
+#define DPLL_CTRL_REG 14 /* wo */
+#define INT_CTRL_REG 15 /* rw */
+
+#define STATUS_REG 16 /* ro */
+#define SPCOND_STATUS_REG 17 /* wo */
+/* RR2 is WR2 for Channel A, Channel B gives vector + current status: */
+#define CURR_VECTOR_REG 18 /* Ch. B only, Ch. A for rw */
+#define INT_PENDING_REG 19 /* Channel A only! */
+/* RR4 is WR4, if b6(MR7') == 1 */
+/* RR5 is WR5, if b6(MR7') == 1 */
+#define FS_FIFO_LOW_REG 22 /* ro */
+#define FS_FIFO_HIGH_REG 23 /* ro */
+#define RX_DATA_REG 24 /* ro */
+/* RR9 is WR3, if b6(MR7') == 1 */
+#define DPLL_STATUS_REG 26 /* ro */
+/* RR11 is WR10, if b6(MR7') == 1 */
+/* RR12 is WR12 */
+/* RR13 is WR13 */
+/* RR14 not present */
+/* RR15 is WR15 */
+
+
+/***********************************************************************/
+/* */
+/* Register Values */
+/* */
+/***********************************************************************/
+
+
+/* WR0: COMMAND_REG "CR" */
+
+#define CR_RX_CRC_RESET 0x40
+#define CR_TX_CRC_RESET 0x80
+#define CR_TX_UNDERRUN_RESET 0xc0
+
+#define CR_EXTSTAT_RESET 0x10
+#define CR_SEND_ABORT 0x18
+#define CR_ENAB_INT_NEXT_RX 0x20
+#define CR_TX_PENDING_RESET 0x28
+#define CR_ERROR_RESET 0x30
+#define CR_HIGHEST_IUS_RESET 0x38
+
+
+/* WR1: INT_AND_DMA_REG "IDR" */
+
+#define IDR_EXTSTAT_INT_ENAB 0x01
+#define IDR_TX_INT_ENAB 0x02
+#define IDR_PARERR_AS_SPCOND 0x04
+
+#define IDR_RX_INT_DISAB 0x00
+#define IDR_RX_INT_FIRST 0x08
+#define IDR_RX_INT_ALL 0x10
+#define IDR_RX_INT_SPCOND 0x18
+#define IDR_RX_INT_MASK 0x18
+
+#define IDR_WAITREQ_RX 0x20
+#define IDR_WAITREQ_IS_REQ 0x40
+#define IDR_WAITREQ_ENAB 0x80
+
+
+/* WR3: RX_CTRL_REG "RCR" */
+
+#define RCR_RX_ENAB 0x01
+#define RCR_DISCARD_SYNC_CHARS 0x02
+#define RCR_ADDR_SEARCH 0x04
+#define RCR_CRC_ENAB 0x08
+#define RCR_SEARCH_MODE 0x10
+#define RCR_AUTO_ENAB_MODE 0x20
+
+#define RCR_CHSIZE_MASK 0xc0
+#define RCR_CHSIZE_5 0x00
+#define RCR_CHSIZE_6 0x40
+#define RCR_CHSIZE_7 0x80
+#define RCR_CHSIZE_8 0xc0
+
+
+/* WR4: AUX1_CTRL_REG "A1CR" */
+
+#define A1CR_PARITY_MASK 0x03
+#define A1CR_PARITY_NONE 0x00
+#define A1CR_PARITY_ODD 0x01
+#define A1CR_PARITY_EVEN 0x03
+
+#define A1CR_MODE_MASK 0x0c
+#define A1CR_MODE_SYNCR 0x00
+#define A1CR_MODE_ASYNC_1 0x04
+#define A1CR_MODE_ASYNC_15 0x08
+#define A1CR_MODE_ASYNC_2 0x0c
+
+#define A1CR_SYNCR_MODE_MASK 0x30
+#define A1CR_SYNCR_MONOSYNC 0x00
+#define A1CR_SYNCR_BISYNC 0x10
+#define A1CR_SYNCR_SDLC 0x20
+#define A1CR_SYNCR_EXTCSYNC 0x30
+
+#define A1CR_CLKMODE_MASK 0xc0
+#define A1CR_CLKMODE_x1 0x00
+#define A1CR_CLKMODE_x16 0x40
+#define A1CR_CLKMODE_x32 0x80
+#define A1CR_CLKMODE_x64 0xc0
+
+
+/* WR5: TX_CTRL_REG "TCR" */
+
+#define TCR_TX_CRC_ENAB 0x01
+#define TCR_RTS 0x02
+#define TCR_USE_CRC_CCITT 0x00
+#define TCR_USE_CRC_16 0x04
+#define TCR_TX_ENAB 0x08
+#define TCR_SEND_BREAK 0x10
+
+#define TCR_CHSIZE_MASK 0x60
+#define TCR_CHSIZE_5 0x00
+#define TCR_CHSIZE_6 0x20
+#define TCR_CHSIZE_7 0x40
+#define TCR_CHSIZE_8 0x60
+
+#define TCR_DTR 0x80
+
+
+/* WR7': SLDC_OPTION_REG "SOR" */
+
+#define SOR_AUTO_TX_ENAB 0x01
+#define SOR_AUTO_EOM_RESET 0x02
+#define SOR_AUTO_RTS_MODE 0x04
+#define SOR_NRZI_DISAB_HIGH 0x08
+#define SOR_ALT_DTRREQ_TIMING 0x10
+#define SOR_READ_CRC_CHARS 0x20
+#define SOR_EXTENDED_REG_ACCESS 0x40
+
+
+/* WR9: MASTER_INT_CTRL "MIC" */
+
+#define MIC_VEC_INCL_STAT 0x01
+#define MIC_NO_VECTOR 0x02
+#define MIC_DISAB_LOWER_CHAIN 0x04
+#define MIC_MASTER_INT_ENAB 0x08
+#define MIC_STATUS_HIGH 0x10
+#define MIC_IGN_INTACK 0x20
+
+#define MIC_NO_RESET 0x00
+#define MIC_CH_A_RESET 0x40
+#define MIC_CH_B_RESET 0x80
+#define MIC_HARD_RESET 0xc0
+
+
+/* WR10: AUX2_CTRL_REG "A2CR" */
+
+#define A2CR_SYNC_6 0x01
+#define A2CR_LOOP_MODE 0x02
+#define A2CR_ABORT_ON_UNDERRUN 0x04
+#define A2CR_MARK_IDLE 0x08
+#define A2CR_GO_ACTIVE_ON_POLL 0x10
+
+#define A2CR_CODING_MASK 0x60
+#define A2CR_CODING_NRZ 0x00
+#define A2CR_CODING_NRZI 0x20
+#define A2CR_CODING_FM1 0x40
+#define A2CR_CODING_FM0 0x60
+
+#define A2CR_PRESET_CRC_1 0x80
+
+
+/* WR11: CLK_CTRL_REG "CCR" */
+
+#define CCR_TRxCOUT_MASK 0x03
+#define CCR_TRxCOUT_XTAL 0x00
+#define CCR_TRxCOUT_TXCLK 0x01
+#define CCR_TRxCOUT_BRG 0x02
+#define CCR_TRxCOUT_DPLL 0x03
+
+#define CCR_TRxC_OUTPUT 0x04
+
+#define CCR_TXCLK_MASK 0x18
+#define CCR_TXCLK_RTxC 0x00
+#define CCR_TXCLK_TRxC 0x08
+#define CCR_TXCLK_BRG 0x10
+#define CCR_TXCLK_DPLL 0x18
+
+#define CCR_RXCLK_MASK 0x60
+#define CCR_RXCLK_RTxC 0x00
+#define CCR_RXCLK_TRxC 0x20
+#define CCR_RXCLK_BRG 0x40
+#define CCR_RXCLK_DPLL 0x60
+
+#define CCR_RTxC_XTAL 0x80
+
+
+/* WR14: DPLL_CTRL_REG "DCR" */
+
+#define DCR_BRG_ENAB 0x01
+#define DCR_BRG_USE_PCLK 0x02
+#define DCR_DTRREQ_IS_REQ 0x04
+#define DCR_AUTO_ECHO 0x08
+#define DCR_LOCAL_LOOPBACK 0x10
+
+#define DCR_DPLL_EDGE_SEARCH 0x20
+#define DCR_DPLL_ERR_RESET 0x40
+#define DCR_DPLL_DISAB 0x60
+#define DCR_DPLL_CLK_BRG 0x80
+#define DCR_DPLL_CLK_RTxC 0xa0
+#define DCR_DPLL_FM 0xc0
+#define DCR_DPLL_NRZI 0xe0
+
+
+/* WR15: INT_CTRL_REG "ICR" */
+
+#define ICR_OPTIONREG_SELECT 0x01
+#define ICR_ENAB_BRG_ZERO_INT 0x02
+#define ICR_USE_FS_FIFO 0x04
+#define ICR_ENAB_DCD_INT 0x08
+#define ICR_ENAB_SYNC_INT 0x10
+#define ICR_ENAB_CTS_INT 0x20
+#define ICR_ENAB_UNDERRUN_INT 0x40
+#define ICR_ENAB_BREAK_INT 0x80
+
+
+/* RR0: STATUS_REG "SR" */
+
+#define SR_CHAR_AVAIL 0x01
+#define SR_BRG_ZERO 0x02
+#define SR_TX_BUF_EMPTY 0x04
+#define SR_DCD 0x08
+#define SR_SYNC_ABORT 0x10
+#define SR_CTS 0x20
+#define SR_TX_UNDERRUN 0x40
+#define SR_BREAK 0x80
+
+
+/* RR1: SPCOND_STATUS_REG "SCSR" */
+
+#define SCSR_ALL_SENT 0x01
+#define SCSR_RESIDUAL_MASK 0x0e
+#define SCSR_PARITY_ERR 0x10
+#define SCSR_RX_OVERRUN 0x20
+#define SCSR_CRC_FRAME_ERR 0x40
+#define SCSR_END_OF_FRAME 0x80
+
+
+/* RR3: INT_PENDING_REG "IPR" */
+
+#define IPR_B_EXTSTAT 0x01
+#define IPR_B_TX 0x02
+#define IPR_B_RX 0x04
+#define IPR_A_EXTSTAT 0x08
+#define IPR_A_TX 0x10
+#define IPR_A_RX 0x20
+
+
+/* RR7: FS_FIFO_HIGH_REG "FFHR" */
+
+#define FFHR_CNT_MASK 0x3f
+#define FFHR_IS_FROM_FIFO 0x40
+#define FFHR_FIFO_OVERRUN 0x80
+
+
+/* RR10: DPLL_STATUS_REG "DSR" */
+
+#define DSR_ON_LOOP 0x02
+#define DSR_ON_LOOP_SENDING 0x10
+#define DSR_TWO_CLK_MISSING 0x40
+#define DSR_ONE_CLK_MISSING 0x80
+
+/***********************************************************************/
+/* */
+/* Register Access */
+/* */
+/***********************************************************************/
+
+
+/* The SCC needs 3.5 PCLK cycles recovery time between to register
+ * accesses. PCLK runs with 8 MHz on an Atari, so this delay is 3.5 *
+ * 125 ns = 437.5 ns. This is too short for udelay().
+ * 10/16/95: A tstb st_mfp.par_dt_reg takes 600ns (sure?) and thus should be
+ * quite right
+ */
+
+#define scc_reg_delay() \
+ do { \
+ if (MACH_IS_MVME16x || MACH_IS_BVME6000 || MACH_IS_MVME147) \
+ __asm__ __volatile__ ( " nop; nop"); \
+ else if (MACH_IS_ATARI) \
+ __asm__ __volatile__ ( "tstb %0" : : "g" (*_scc_del) : "cc" );\
+ } while (0)
+
+static unsigned char scc_shadow[2][16];
+
+/* The following functions should relax the somehow complicated
+ * register access of the SCC. _SCCwrite() stores all written values
+ * (except for WR0 and WR8) in shadow registers for later recall. This
+ * removes the burden of remembering written values as needed. The
+ * extra work of storing the value doesn't count, since a delay is
+ * needed after a SCC access anyway. Additionally, _SCCwrite() manages
+ * writes to WR0 and WR8 differently, because these can be accessed
+ * directly with less overhead. Another special case are WR7 and WR7'.
+ * _SCCwrite automatically checks what of this registers is selected
+ * and changes b0 of WR15 if needed.
+ *
+ * _SCCread() for standard read registers is straightforward, except
+ * for RR2 (split into two "virtual" registers: one for the value
+ * written to WR2 (from the shadow) and one for the vector including
+ * status from RR2, Ch. B) and RR3. The latter must be read from
+ * Channel A, because it reads as all zeros on Ch. B. RR0 and RR8 can
+ * be accessed directly as before.
+ *
+ * The two inline function contain complicated switch statements. But
+ * I rely on regno and final_delay being constants, so gcc can reduce
+ * the whole stuff to just some assembler statements.
+ *
+ * _SCCwrite and _SCCread aren't intended to be used directly under
+ * normal circumstances. The macros SCCread[_ND] and SCCwrite[_ND] are
+ * for that purpose. They assume that a local variable 'port' is
+ * declared and pointing to the port's scc_struct entry. The
+ * variants with "_NB" appended should be used if no other SCC
+ * accesses follow immediately (within 0.5 usecs). They just skip the
+ * final delay nops.
+ *
+ * Please note that accesses to SCC registers should only take place
+ * when interrupts are turned off (at least if SCC interrupts are
+ * enabled). Otherwise, an interrupt could interfere with the
+ * two-stage accessing process.
+ *
+ */
+
+
+static __inline__ void _SCCwrite(
+ struct scc_port *port,
+ unsigned char *shadow,
+ volatile unsigned char *_scc_del,
+ int regno,
+ unsigned char val, int final_delay )
+{
+ switch( regno ) {
+
+ case COMMAND_REG:
+ /* WR0 can be written directly without pointing */
+ *port->ctrlp = val;
+ break;
+
+ case SYNC_CHAR_REG:
+ /* For WR7, first set b0 of WR15 to 0, if needed */
+ if (shadow[INT_CTRL_REG] & ICR_OPTIONREG_SELECT) {
+ *port->ctrlp = 15;
+ shadow[INT_CTRL_REG] &= ~ICR_OPTIONREG_SELECT;
+ scc_reg_delay();
+ *port->ctrlp = shadow[INT_CTRL_REG];
+ scc_reg_delay();
+ }
+ goto normal_case;
+
+ case SDLC_OPTION_REG:
+ /* For WR7', first set b0 of WR15 to 1, if needed */
+ if (!(shadow[INT_CTRL_REG] & ICR_OPTIONREG_SELECT)) {
+ *port->ctrlp = 15;
+ shadow[INT_CTRL_REG] |= ICR_OPTIONREG_SELECT;
+ scc_reg_delay();
+ *port->ctrlp = shadow[INT_CTRL_REG];
+ scc_reg_delay();
+ }
+ *port->ctrlp = 7;
+ shadow[8] = val; /* WR7' shadowed at WR8 */
+ scc_reg_delay();
+ *port->ctrlp = val;
+ break;
+
+ case TX_DATA_REG: /* WR8 */
+ /* TX_DATA_REG can be accessed directly on some h/w */
+ if (MACH_IS_MVME16x || MACH_IS_BVME6000 || MACH_IS_MVME147)
+ {
+ *port->ctrlp = regno;
+ scc_reg_delay();
+ *port->ctrlp = val;
+ }
+ else
+ *port->datap = val;
+ break;
+
+ case MASTER_INT_CTRL:
+ *port->ctrlp = regno;
+ val &= 0x3f; /* bits 6..7 are the reset commands */
+ scc_shadow[0][regno] = val;
+ scc_reg_delay();
+ *port->ctrlp = val;
+ break;
+
+ case DPLL_CTRL_REG:
+ *port->ctrlp = regno;
+ val &= 0x1f; /* bits 5..7 are the DPLL commands */
+ shadow[regno] = val;
+ scc_reg_delay();
+ *port->ctrlp = val;
+ break;
+
+ case 1 ... 6:
+ case 10 ... 13:
+ case 15:
+ normal_case:
+ *port->ctrlp = regno;
+ shadow[regno] = val;
+ scc_reg_delay();
+ *port->ctrlp = val;
+ break;
+
+ default:
+ printk( "Bad SCC write access to WR%d\n", regno );
+ break;
+
+ }
+
+ if (final_delay)
+ scc_reg_delay();
+}
+
+
+static __inline__ unsigned char _SCCread(
+ struct scc_port *port,
+ unsigned char *shadow,
+ volatile unsigned char *_scc_del,
+ int regno, int final_delay )
+{
+ unsigned char rv;
+
+ switch( regno ) {
+
+ /* --- real read registers --- */
+ case STATUS_REG:
+ rv = *port->ctrlp;
+ break;
+
+ case INT_PENDING_REG:
+ /* RR3: read only from Channel A! */
+ port = port->port_a;
+ goto normal_case;
+
+ case RX_DATA_REG:
+ /* RR8 can be accessed directly on some h/w */
+ if (MACH_IS_MVME16x || MACH_IS_BVME6000 || MACH_IS_MVME147)
+ {
+ *port->ctrlp = 8;
+ scc_reg_delay();
+ rv = *port->ctrlp;
+ }
+ else
+ rv = *port->datap;
+ break;
+
+ case CURR_VECTOR_REG:
+ /* RR2 (vector including status) from Ch. B */
+ port = port->port_b;
+ goto normal_case;
+
+ /* --- reading write registers: access the shadow --- */
+ case 1 ... 7:
+ case 10 ... 15:
+ return shadow[regno]; /* no final delay! */
+
+ /* WR7' is special, because it is shadowed at the place of WR8 */
+ case SDLC_OPTION_REG:
+ return shadow[8]; /* no final delay! */
+
+ /* WR9 is special too, because it is common for both channels */
+ case MASTER_INT_CTRL:
+ return scc_shadow[0][9]; /* no final delay! */
+
+ default:
+ printk( "Bad SCC read access to %cR%d\n", (regno & 16) ? 'R' : 'W',
+ regno & ~16 );
+ break;
+
+ case SPCOND_STATUS_REG:
+ case FS_FIFO_LOW_REG:
+ case FS_FIFO_HIGH_REG:
+ case DPLL_STATUS_REG:
+ normal_case:
+ *port->ctrlp = regno & 0x0f;
+ scc_reg_delay();
+ rv = *port->ctrlp;
+ break;
+
+ }
+
+ if (final_delay)
+ scc_reg_delay();
+ return rv;
+}
+
+#define SCC_ACCESS_INIT(port) \
+ unsigned char *_scc_shadow = &scc_shadow[port->channel][0]
+
+#define SCCwrite(reg,val) _SCCwrite(port,_scc_shadow,scc_del,(reg),(val),1)
+#define SCCwrite_NB(reg,val) _SCCwrite(port,_scc_shadow,scc_del,(reg),(val),0)
+#define SCCread(reg) _SCCread(port,_scc_shadow,scc_del,(reg),1)
+#define SCCread_NB(reg) _SCCread(port,_scc_shadow,scc_del,(reg),0)
+
+#define SCCmod(reg,and,or) SCCwrite((reg),(SCCread(reg)&(and))|(or))
+
+#endif /* _SCC_H */
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
new file mode 100644
index 00000000..0bc135b9
--- /dev/null
+++ b/drivers/char/scx200_gpio.c
@@ -0,0 +1,132 @@
+/* linux/drivers/char/scx200_gpio.c
+
+ National Semiconductor SCx200 GPIO driver. Allows a user space
+ process to play with the GPIO pins.
+
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+
+#include <linux/scx200_gpio.h>
+#include <linux/nsc_gpio.h>
+
+#define DRVNAME "scx200_gpio"
+
+static struct platform_device *pdev;
+
+MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
+MODULE_DESCRIPTION("NatSemi/AMD SCx200 GPIO Pin Driver");
+MODULE_LICENSE("GPL");
+
+static int major = 0; /* default to dynamic major */
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+
+#define MAX_PINS 32 /* 64 later, when known ok */
+
+struct nsc_gpio_ops scx200_gpio_ops = {
+ .owner = THIS_MODULE,
+ .gpio_config = scx200_gpio_configure,
+ .gpio_dump = nsc_gpio_dump,
+ .gpio_get = scx200_gpio_get,
+ .gpio_set = scx200_gpio_set,
+ .gpio_change = scx200_gpio_change,
+ .gpio_current = scx200_gpio_current
+};
+EXPORT_SYMBOL_GPL(scx200_gpio_ops);
+
+static int scx200_gpio_open(struct inode *inode, struct file *file)
+{
+ unsigned m = iminor(inode);
+ file->private_data = &scx200_gpio_ops;
+
+ if (m >= MAX_PINS)
+ return -EINVAL;
+ return nonseekable_open(inode, file);
+}
+
+static int scx200_gpio_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations scx200_gpio_fileops = {
+ .owner = THIS_MODULE,
+ .write = nsc_gpio_write,
+ .read = nsc_gpio_read,
+ .open = scx200_gpio_open,
+ .release = scx200_gpio_release,
+ .llseek = no_llseek,
+};
+
+static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */
+
+static int __init scx200_gpio_init(void)
+{
+ int rc;
+ dev_t devid;
+
+ if (!scx200_gpio_present()) {
+ printk(KERN_ERR DRVNAME ": no SCx200 gpio present\n");
+ return -ENODEV;
+ }
+
+ /* support dev_dbg() with pdev->dev */
+ pdev = platform_device_alloc(DRVNAME, 0);
+ if (!pdev)
+ return -ENOMEM;
+
+ rc = platform_device_add(pdev);
+ if (rc)
+ goto undo_malloc;
+
+ /* nsc_gpio uses dev_dbg(), so needs this */
+ scx200_gpio_ops.dev = &pdev->dev;
+
+ if (major) {
+ devid = MKDEV(major, 0);
+ rc = register_chrdev_region(devid, MAX_PINS, "scx200_gpio");
+ } else {
+ rc = alloc_chrdev_region(&devid, 0, MAX_PINS, "scx200_gpio");
+ major = MAJOR(devid);
+ }
+ if (rc < 0) {
+ dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc);
+ goto undo_platform_device_add;
+ }
+
+ cdev_init(&scx200_gpio_cdev, &scx200_gpio_fileops);
+ cdev_add(&scx200_gpio_cdev, devid, MAX_PINS);
+
+ return 0; /* succeed */
+
+undo_platform_device_add:
+ platform_device_del(pdev);
+undo_malloc:
+ platform_device_put(pdev);
+
+ return rc;
+}
+
+static void __exit scx200_gpio_cleanup(void)
+{
+ cdev_del(&scx200_gpio_cdev);
+ /* cdev_put(&scx200_gpio_cdev); */
+
+ unregister_chrdev_region(MKDEV(major, 0), MAX_PINS);
+ platform_device_unregister(pdev);
+}
+
+module_init(scx200_gpio_init);
+module_exit(scx200_gpio_cleanup);
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
new file mode 100644
index 00000000..5816b39f
--- /dev/null
+++ b/drivers/char/snsc.c
@@ -0,0 +1,466 @@
+/*
+ * SN Platform system controller communication support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * System controller communication driver
+ *
+ * This driver allows a user process to communicate with the system
+ * controller (a.k.a. "IRouter") network in an SGI SN system.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/poll.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <asm/sn/io.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/module.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/nodepda.h>
+#include "snsc.h"
+
+#define SYSCTL_BASENAME "snsc"
+
+#define SCDRV_BUFSZ 2048
+#define SCDRV_TIMEOUT 1000
+
+static DEFINE_MUTEX(scdrv_mutex);
+static irqreturn_t
+scdrv_interrupt(int irq, void *subch_data)
+{
+ struct subch_data_s *sd = subch_data;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&sd->sd_rlock, flags);
+ spin_lock(&sd->sd_wlock);
+ status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
+
+ if (status > 0) {
+ if (status & SAL_IROUTER_INTR_RECV) {
+ wake_up(&sd->sd_rq);
+ }
+ if (status & SAL_IROUTER_INTR_XMIT) {
+ ia64_sn_irtr_intr_disable
+ (sd->sd_nasid, sd->sd_subch,
+ SAL_IROUTER_INTR_XMIT);
+ wake_up(&sd->sd_wq);
+ }
+ }
+ spin_unlock(&sd->sd_wlock);
+ spin_unlock_irqrestore(&sd->sd_rlock, flags);
+ return IRQ_HANDLED;
+}
+
+/*
+ * scdrv_open
+ *
+ * Reserve a subchannel for system controller communication.
+ */
+
+static int
+scdrv_open(struct inode *inode, struct file *file)
+{
+ struct sysctl_data_s *scd;
+ struct subch_data_s *sd;
+ int rv;
+
+ /* look up device info for this device file */
+ scd = container_of(inode->i_cdev, struct sysctl_data_s, scd_cdev);
+
+ /* allocate memory for subchannel data */
+ sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
+ if (sd == NULL) {
+ printk("%s: couldn't allocate subchannel data\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* initialize subch_data_s fields */
+ sd->sd_nasid = scd->scd_nasid;
+ sd->sd_subch = ia64_sn_irtr_open(scd->scd_nasid);
+
+ if (sd->sd_subch < 0) {
+ kfree(sd);
+ printk("%s: couldn't allocate subchannel\n", __func__);
+ return -EBUSY;
+ }
+
+ spin_lock_init(&sd->sd_rlock);
+ spin_lock_init(&sd->sd_wlock);
+ init_waitqueue_head(&sd->sd_rq);
+ init_waitqueue_head(&sd->sd_wq);
+ sema_init(&sd->sd_rbs, 1);
+ sema_init(&sd->sd_wbs, 1);
+
+ file->private_data = sd;
+
+ /* hook this subchannel up to the system controller interrupt */
+ mutex_lock(&scdrv_mutex);
+ rv = request_irq(SGI_UART_VECTOR, scdrv_interrupt,
+ IRQF_SHARED | IRQF_DISABLED,
+ SYSCTL_BASENAME, sd);
+ if (rv) {
+ ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
+ kfree(sd);
+ printk("%s: irq request failed (%d)\n", __func__, rv);
+ mutex_unlock(&scdrv_mutex);
+ return -EBUSY;
+ }
+ mutex_unlock(&scdrv_mutex);
+ return 0;
+}
+
+/*
+ * scdrv_release
+ *
+ * Release a previously-reserved subchannel.
+ */
+
+static int
+scdrv_release(struct inode *inode, struct file *file)
+{
+ struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
+ int rv;
+
+ /* free the interrupt */
+ free_irq(SGI_UART_VECTOR, sd);
+
+ /* ask SAL to close the subchannel */
+ rv = ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
+
+ kfree(sd);
+ return rv;
+}
+
+/*
+ * scdrv_read
+ *
+ * Called to read bytes from the open IRouter pipe.
+ *
+ */
+
+static inline int
+read_status_check(struct subch_data_s *sd, int *len)
+{
+ return ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch, sd->sd_rb, len);
+}
+
+static ssize_t
+scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
+{
+ int status;
+ int len;
+ unsigned long flags;
+ struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
+
+ /* try to get control of the read buffer */
+ if (down_trylock(&sd->sd_rbs)) {
+ /* somebody else has it now;
+ * if we're non-blocking, then exit...
+ */
+ if (file->f_flags & O_NONBLOCK) {
+ return -EAGAIN;
+ }
+ /* ...or if we want to block, then do so here */
+ if (down_interruptible(&sd->sd_rbs)) {
+ /* something went wrong with wait */
+ return -ERESTARTSYS;
+ }
+ }
+
+ /* anything to read? */
+ len = CHUNKSIZE;
+ spin_lock_irqsave(&sd->sd_rlock, flags);
+ status = read_status_check(sd, &len);
+
+ /* if not, and we're blocking I/O, loop */
+ while (status < 0) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (file->f_flags & O_NONBLOCK) {
+ spin_unlock_irqrestore(&sd->sd_rlock, flags);
+ up(&sd->sd_rbs);
+ return -EAGAIN;
+ }
+
+ len = CHUNKSIZE;
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&sd->sd_rq, &wait);
+ spin_unlock_irqrestore(&sd->sd_rlock, flags);
+
+ schedule_timeout(SCDRV_TIMEOUT);
+
+ remove_wait_queue(&sd->sd_rq, &wait);
+ if (signal_pending(current)) {
+ /* wait was interrupted */
+ up(&sd->sd_rbs);
+ return -ERESTARTSYS;
+ }
+
+ spin_lock_irqsave(&sd->sd_rlock, flags);
+ status = read_status_check(sd, &len);
+ }
+ spin_unlock_irqrestore(&sd->sd_rlock, flags);
+
+ if (len > 0) {
+ /* we read something in the last read_status_check(); copy
+ * it out to user space
+ */
+ if (count < len) {
+ pr_debug("%s: only accepting %d of %d bytes\n",
+ __func__, (int) count, len);
+ }
+ len = min((int) count, len);
+ if (copy_to_user(buf, sd->sd_rb, len))
+ len = -EFAULT;
+ }
+
+ /* release the read buffer and wake anyone who might be
+ * waiting for it
+ */
+ up(&sd->sd_rbs);
+
+ /* return the number of characters read in */
+ return len;
+}
+
+/*
+ * scdrv_write
+ *
+ * Writes a chunk of an IRouter packet (or other system controller data)
+ * to the system controller.
+ *
+ */
+static inline int
+write_status_check(struct subch_data_s *sd, int count)
+{
+ return ia64_sn_irtr_send(sd->sd_nasid, sd->sd_subch, sd->sd_wb, count);
+}
+
+static ssize_t
+scdrv_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ unsigned long flags;
+ int status;
+ struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
+
+ /* try to get control of the write buffer */
+ if (down_trylock(&sd->sd_wbs)) {
+ /* somebody else has it now;
+ * if we're non-blocking, then exit...
+ */
+ if (file->f_flags & O_NONBLOCK) {
+ return -EAGAIN;
+ }
+ /* ...or if we want to block, then do so here */
+ if (down_interruptible(&sd->sd_wbs)) {
+ /* something went wrong with wait */
+ return -ERESTARTSYS;
+ }
+ }
+
+ count = min((int) count, CHUNKSIZE);
+ if (copy_from_user(sd->sd_wb, buf, count)) {
+ up(&sd->sd_wbs);
+ return -EFAULT;
+ }
+
+ /* try to send the buffer */
+ spin_lock_irqsave(&sd->sd_wlock, flags);
+ status = write_status_check(sd, count);
+
+ /* if we failed, and we want to block, then loop */
+ while (status <= 0) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (file->f_flags & O_NONBLOCK) {
+ spin_unlock(&sd->sd_wlock);
+ up(&sd->sd_wbs);
+ return -EAGAIN;
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&sd->sd_wq, &wait);
+ spin_unlock_irqrestore(&sd->sd_wlock, flags);
+
+ schedule_timeout(SCDRV_TIMEOUT);
+
+ remove_wait_queue(&sd->sd_wq, &wait);
+ if (signal_pending(current)) {
+ /* wait was interrupted */
+ up(&sd->sd_wbs);
+ return -ERESTARTSYS;
+ }
+
+ spin_lock_irqsave(&sd->sd_wlock, flags);
+ status = write_status_check(sd, count);
+ }
+ spin_unlock_irqrestore(&sd->sd_wlock, flags);
+
+ /* release the write buffer and wake anyone who's waiting for it */
+ up(&sd->sd_wbs);
+
+ /* return the number of characters accepted (should be the complete
+ * "chunk" as requested)
+ */
+ if ((status >= 0) && (status < count)) {
+ pr_debug("Didn't accept the full chunk; %d of %d\n",
+ status, (int) count);
+ }
+ return status;
+}
+
+static unsigned int
+scdrv_poll(struct file *file, struct poll_table_struct *wait)
+{
+ unsigned int mask = 0;
+ int status = 0;
+ struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
+ unsigned long flags;
+
+ poll_wait(file, &sd->sd_rq, wait);
+ poll_wait(file, &sd->sd_wq, wait);
+
+ spin_lock_irqsave(&sd->sd_rlock, flags);
+ spin_lock(&sd->sd_wlock);
+ status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
+ spin_unlock(&sd->sd_wlock);
+ spin_unlock_irqrestore(&sd->sd_rlock, flags);
+
+ if (status > 0) {
+ if (status & SAL_IROUTER_INTR_RECV) {
+ mask |= POLLIN | POLLRDNORM;
+ }
+ if (status & SAL_IROUTER_INTR_XMIT) {
+ mask |= POLLOUT | POLLWRNORM;
+ }
+ }
+
+ return mask;
+}
+
+static const struct file_operations scdrv_fops = {
+ .owner = THIS_MODULE,
+ .read = scdrv_read,
+ .write = scdrv_write,
+ .poll = scdrv_poll,
+ .open = scdrv_open,
+ .release = scdrv_release,
+ .llseek = noop_llseek,
+};
+
+static struct class *snsc_class;
+
+/*
+ * scdrv_init
+ *
+ * Called at boot time to initialize the system controller communication
+ * facility.
+ */
+int __init
+scdrv_init(void)
+{
+ geoid_t geoid;
+ cnodeid_t cnode;
+ char devname[32];
+ char *devnamep;
+ struct sysctl_data_s *scd;
+ void *salbuf;
+ dev_t first_dev, dev;
+ nasid_t event_nasid;
+
+ if (!ia64_platform_is("sn2"))
+ return -ENODEV;
+
+ event_nasid = ia64_sn_get_console_nasid();
+
+ if (alloc_chrdev_region(&first_dev, 0, num_cnodes,
+ SYSCTL_BASENAME) < 0) {
+ printk("%s: failed to register SN system controller device\n",
+ __func__);
+ return -ENODEV;
+ }
+ snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME);
+
+ for (cnode = 0; cnode < num_cnodes; cnode++) {
+ geoid = cnodeid_get_geoid(cnode);
+ devnamep = devname;
+ format_module_id(devnamep, geo_module(geoid),
+ MODULE_FORMAT_BRIEF);
+ devnamep = devname + strlen(devname);
+ sprintf(devnamep, "^%d#%d", geo_slot(geoid),
+ geo_slab(geoid));
+
+ /* allocate sysctl device data */
+ scd = kzalloc(sizeof (struct sysctl_data_s),
+ GFP_KERNEL);
+ if (!scd) {
+ printk("%s: failed to allocate device info"
+ "for %s/%s\n", __func__,
+ SYSCTL_BASENAME, devname);
+ continue;
+ }
+
+ /* initialize sysctl device data fields */
+ scd->scd_nasid = cnodeid_to_nasid(cnode);
+ if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) {
+ printk("%s: failed to allocate driver buffer"
+ "(%s%s)\n", __func__,
+ SYSCTL_BASENAME, devname);
+ kfree(scd);
+ continue;
+ }
+
+ if (ia64_sn_irtr_init(scd->scd_nasid, salbuf,
+ SCDRV_BUFSZ) < 0) {
+ printk
+ ("%s: failed to initialize SAL for"
+ " system controller communication"
+ " (%s/%s): outdated PROM?\n",
+ __func__, SYSCTL_BASENAME, devname);
+ kfree(scd);
+ kfree(salbuf);
+ continue;
+ }
+
+ dev = first_dev + cnode;
+ cdev_init(&scd->scd_cdev, &scdrv_fops);
+ if (cdev_add(&scd->scd_cdev, dev, 1)) {
+ printk("%s: failed to register system"
+ " controller device (%s%s)\n",
+ __func__, SYSCTL_BASENAME, devname);
+ kfree(scd);
+ kfree(salbuf);
+ continue;
+ }
+
+ device_create(snsc_class, NULL, dev, NULL,
+ "%s", devname);
+
+ ia64_sn_irtr_intr_enable(scd->scd_nasid,
+ 0 /*ignored */ ,
+ SAL_IROUTER_INTR_RECV);
+
+ /* on the console nasid, prepare to receive
+ * system controller environmental events
+ */
+ if(scd->scd_nasid == event_nasid) {
+ scdrv_event_init(scd);
+ }
+ }
+ return 0;
+}
+
+module_init(scdrv_init);
diff --git a/drivers/char/snsc.h b/drivers/char/snsc.h
new file mode 100644
index 00000000..e8c52c88
--- /dev/null
+++ b/drivers/char/snsc.h
@@ -0,0 +1,92 @@
+/*
+ * SN Platform system controller communication support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * This file contains macros and data types for communication with the
+ * system controllers in SGI SN systems.
+ */
+
+#ifndef _SN_SYSCTL_H_
+#define _SN_SYSCTL_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/semaphore.h>
+#include <asm/sn/types.h>
+
+#define CHUNKSIZE 127
+
+/* This structure is used to track an open subchannel. */
+struct subch_data_s {
+ nasid_t sd_nasid; /* node on which the subchannel was opened */
+ int sd_subch; /* subchannel number */
+ spinlock_t sd_rlock; /* monitor lock for rsv */
+ spinlock_t sd_wlock; /* monitor lock for wsv */
+ wait_queue_head_t sd_rq; /* wait queue for readers */
+ wait_queue_head_t sd_wq; /* wait queue for writers */
+ struct semaphore sd_rbs; /* semaphore for read buffer */
+ struct semaphore sd_wbs; /* semaphore for write buffer */
+
+ char sd_rb[CHUNKSIZE]; /* read buffer */
+ char sd_wb[CHUNKSIZE]; /* write buffer */
+};
+
+struct sysctl_data_s {
+ struct cdev scd_cdev; /* Character device info */
+ nasid_t scd_nasid; /* Node on which subchannels are opened. */
+};
+
+
+/* argument types */
+#define IR_ARG_INT 0x00 /* 4-byte integer (big-endian) */
+#define IR_ARG_ASCII 0x01 /* null-terminated ASCII string */
+#define IR_ARG_UNKNOWN 0x80 /* unknown data type. The low
+ * 7 bits will contain the data
+ * length. */
+#define IR_ARG_UNKNOWN_LENGTH_MASK 0x7f
+
+
+/* system controller event codes */
+#define EV_CLASS_MASK 0xf000ul
+#define EV_SEVERITY_MASK 0x0f00ul
+#define EV_COMPONENT_MASK 0x00fful
+
+#define EV_CLASS_POWER 0x1000ul
+#define EV_CLASS_FAN 0x2000ul
+#define EV_CLASS_TEMP 0x3000ul
+#define EV_CLASS_ENV 0x4000ul
+#define EV_CLASS_TEST_FAULT 0x5000ul
+#define EV_CLASS_TEST_WARNING 0x6000ul
+#define EV_CLASS_PWRD_NOTIFY 0x8000ul
+
+/* ENV class codes */
+#define ENV_PWRDN_PEND 0x4101ul
+
+#define EV_SEVERITY_POWER_STABLE 0x0000ul
+#define EV_SEVERITY_POWER_LOW_WARNING 0x0100ul
+#define EV_SEVERITY_POWER_HIGH_WARNING 0x0200ul
+#define EV_SEVERITY_POWER_HIGH_FAULT 0x0300ul
+#define EV_SEVERITY_POWER_LOW_FAULT 0x0400ul
+
+#define EV_SEVERITY_FAN_STABLE 0x0000ul
+#define EV_SEVERITY_FAN_WARNING 0x0100ul
+#define EV_SEVERITY_FAN_FAULT 0x0200ul
+
+#define EV_SEVERITY_TEMP_STABLE 0x0000ul
+#define EV_SEVERITY_TEMP_ADVISORY 0x0100ul
+#define EV_SEVERITY_TEMP_CRITICAL 0x0200ul
+#define EV_SEVERITY_TEMP_FAULT 0x0300ul
+
+void scdrv_event_init(struct sysctl_data_s *);
+
+#endif /* _SN_SYSCTL_H_ */
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c
new file mode 100644
index 00000000..ee156948
--- /dev/null
+++ b/drivers/char/snsc_event.c
@@ -0,0 +1,304 @@
+/*
+ * SN Platform system controller communication support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * System controller event handler
+ *
+ * These routines deal with environmental events arriving from the
+ * system controllers.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/unaligned.h>
+#include "snsc.h"
+
+static struct subch_data_s *event_sd;
+
+void scdrv_event(unsigned long);
+DECLARE_TASKLET(sn_sysctl_event, scdrv_event, 0);
+
+/*
+ * scdrv_event_interrupt
+ *
+ * Pull incoming environmental events off the physical link to the
+ * system controller and put them in a temporary holding area in SAL.
+ * Schedule scdrv_event() to move them along to their ultimate
+ * destination.
+ */
+static irqreturn_t
+scdrv_event_interrupt(int irq, void *subch_data)
+{
+ struct subch_data_s *sd = subch_data;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&sd->sd_rlock, flags);
+ status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
+
+ if ((status > 0) && (status & SAL_IROUTER_INTR_RECV)) {
+ tasklet_schedule(&sn_sysctl_event);
+ }
+ spin_unlock_irqrestore(&sd->sd_rlock, flags);
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * scdrv_parse_event
+ *
+ * Break an event (as read from SAL) into useful pieces so we can decide
+ * what to do with it.
+ */
+static int
+scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
+{
+ char *desc_end;
+
+ /* record event source address */
+ *src = get_unaligned_be32(event);
+ event += 4; /* move on to event code */
+
+ /* record the system controller's event code */
+ *code = get_unaligned_be32(event);
+ event += 4; /* move on to event arguments */
+
+ /* how many arguments are in the packet? */
+ if (*event++ != 2) {
+ /* if not 2, give up */
+ return -1;
+ }
+
+ /* parse out the ESP code */
+ if (*event++ != IR_ARG_INT) {
+ /* not an integer argument, so give up */
+ return -1;
+ }
+ *esp_code = get_unaligned_be32(event);
+ event += 4;
+
+ /* parse out the event description */
+ if (*event++ != IR_ARG_ASCII) {
+ /* not an ASCII string, so give up */
+ return -1;
+ }
+ event[CHUNKSIZE-1] = '\0'; /* ensure this string ends! */
+ event += 2; /* skip leading CR/LF */
+ desc_end = desc + sprintf(desc, "%s", event);
+
+ /* strip trailing CR/LF (if any) */
+ for (desc_end--;
+ (desc_end != desc) && ((*desc_end == 0xd) || (*desc_end == 0xa));
+ desc_end--) {
+ *desc_end = '\0';
+ }
+
+ return 0;
+}
+
+
+/*
+ * scdrv_event_severity
+ *
+ * Figure out how urgent a message we should write to the console/syslog
+ * via printk.
+ */
+static char *
+scdrv_event_severity(int code)
+{
+ int ev_class = (code & EV_CLASS_MASK);
+ int ev_severity = (code & EV_SEVERITY_MASK);
+ char *pk_severity = KERN_NOTICE;
+
+ switch (ev_class) {
+ case EV_CLASS_POWER:
+ switch (ev_severity) {
+ case EV_SEVERITY_POWER_LOW_WARNING:
+ case EV_SEVERITY_POWER_HIGH_WARNING:
+ pk_severity = KERN_WARNING;
+ break;
+ case EV_SEVERITY_POWER_HIGH_FAULT:
+ case EV_SEVERITY_POWER_LOW_FAULT:
+ pk_severity = KERN_ALERT;
+ break;
+ }
+ break;
+ case EV_CLASS_FAN:
+ switch (ev_severity) {
+ case EV_SEVERITY_FAN_WARNING:
+ pk_severity = KERN_WARNING;
+ break;
+ case EV_SEVERITY_FAN_FAULT:
+ pk_severity = KERN_CRIT;
+ break;
+ }
+ break;
+ case EV_CLASS_TEMP:
+ switch (ev_severity) {
+ case EV_SEVERITY_TEMP_ADVISORY:
+ pk_severity = KERN_WARNING;
+ break;
+ case EV_SEVERITY_TEMP_CRITICAL:
+ pk_severity = KERN_CRIT;
+ break;
+ case EV_SEVERITY_TEMP_FAULT:
+ pk_severity = KERN_ALERT;
+ break;
+ }
+ break;
+ case EV_CLASS_ENV:
+ pk_severity = KERN_ALERT;
+ break;
+ case EV_CLASS_TEST_FAULT:
+ pk_severity = KERN_ALERT;
+ break;
+ case EV_CLASS_TEST_WARNING:
+ pk_severity = KERN_WARNING;
+ break;
+ case EV_CLASS_PWRD_NOTIFY:
+ pk_severity = KERN_ALERT;
+ break;
+ }
+
+ return pk_severity;
+}
+
+
+/*
+ * scdrv_dispatch_event
+ *
+ * Do the right thing with an incoming event. That's often nothing
+ * more than printing it to the system log. For power-down notifications
+ * we start a graceful shutdown.
+ */
+static void
+scdrv_dispatch_event(char *event, int len)
+{
+ static int snsc_shutting_down = 0;
+ int code, esp_code, src, class;
+ char desc[CHUNKSIZE];
+ char *severity;
+
+ if (scdrv_parse_event(event, &src, &code, &esp_code, desc) < 0) {
+ /* ignore uninterpretible event */
+ return;
+ }
+
+ /* how urgent is the message? */
+ severity = scdrv_event_severity(code);
+
+ class = (code & EV_CLASS_MASK);
+
+ if (class == EV_CLASS_PWRD_NOTIFY || code == ENV_PWRDN_PEND) {
+ if (snsc_shutting_down)
+ return;
+
+ snsc_shutting_down = 1;
+
+ /* give a message for each type of event */
+ if (class == EV_CLASS_PWRD_NOTIFY)
+ printk(KERN_NOTICE "Power off indication received."
+ " Sending SIGPWR to init...\n");
+ else if (code == ENV_PWRDN_PEND)
+ printk(KERN_CRIT "WARNING: Shutting down the system"
+ " due to a critical environmental condition."
+ " Sending SIGPWR to init...\n");
+
+ /* give a SIGPWR signal to init proc */
+ kill_cad_pid(SIGPWR, 0);
+ } else {
+ /* print to system log */
+ printk("%s|$(0x%x)%s\n", severity, esp_code, desc);
+ }
+}
+
+
+/*
+ * scdrv_event
+ *
+ * Called as a tasklet when an event arrives from the L1. Read the event
+ * from where it's temporarily stored in SAL and call scdrv_dispatch_event()
+ * to send it on its way. Keep trying to read events until SAL indicates
+ * that there are no more immediately available.
+ */
+void
+scdrv_event(unsigned long dummy)
+{
+ int status;
+ int len;
+ unsigned long flags;
+ struct subch_data_s *sd = event_sd;
+
+ /* anything to read? */
+ len = CHUNKSIZE;
+ spin_lock_irqsave(&sd->sd_rlock, flags);
+ status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch,
+ sd->sd_rb, &len);
+
+ while (!(status < 0)) {
+ spin_unlock_irqrestore(&sd->sd_rlock, flags);
+ scdrv_dispatch_event(sd->sd_rb, len);
+ len = CHUNKSIZE;
+ spin_lock_irqsave(&sd->sd_rlock, flags);
+ status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch,
+ sd->sd_rb, &len);
+ }
+ spin_unlock_irqrestore(&sd->sd_rlock, flags);
+}
+
+
+/*
+ * scdrv_event_init
+ *
+ * Sets up a system controller subchannel to begin receiving event
+ * messages. This is sort of a specialized version of scdrv_open()
+ * in drivers/char/sn_sysctl.c.
+ */
+void
+scdrv_event_init(struct sysctl_data_s *scd)
+{
+ int rv;
+
+ event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
+ if (event_sd == NULL) {
+ printk(KERN_WARNING "%s: couldn't allocate subchannel info"
+ " for event monitoring\n", __func__);
+ return;
+ }
+
+ /* initialize subch_data_s fields */
+ event_sd->sd_nasid = scd->scd_nasid;
+ spin_lock_init(&event_sd->sd_rlock);
+
+ /* ask the system controllers to send events to this node */
+ event_sd->sd_subch = ia64_sn_sysctl_event_init(scd->scd_nasid);
+
+ if (event_sd->sd_subch < 0) {
+ kfree(event_sd);
+ printk(KERN_WARNING "%s: couldn't open event subchannel\n",
+ __func__);
+ return;
+ }
+
+ /* hook event subchannel up to the system controller interrupt */
+ rv = request_irq(SGI_UART_VECTOR, scdrv_event_interrupt,
+ IRQF_SHARED | IRQF_DISABLED,
+ "system controller events", event_sd);
+ if (rv) {
+ printk(KERN_WARNING "%s: irq request failed (%d)\n",
+ __func__, rv);
+ ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch);
+ kfree(event_sd);
+ return;
+ }
+}
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
new file mode 100644
index 00000000..1ee8ce7d
--- /dev/null
+++ b/drivers/char/sonypi.c
@@ -0,0 +1,1569 @@
+/*
+ * Sony Programmable I/O Control Device driver for VAIO
+ *
+ * Copyright (C) 2007 Mattia Dongili <malattia@linux.it>
+ *
+ * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net>
+ *
+ * Copyright (C) 2005 Narayanan R S <nars@kadamba.org>
+ *
+ * Copyright (C) 2001-2002 Alcôve <www.alcove.com>
+ *
+ * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au>
+ *
+ * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp>
+ *
+ * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp>
+ *
+ * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com>
+ *
+ * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/input.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/kfifo.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <linux/sonypi.h>
+
+#define SONYPI_DRIVER_VERSION "1.26"
+
+MODULE_AUTHOR("Stelian Pop <stelian@popies.net>");
+MODULE_DESCRIPTION("Sony Programmable I/O Control Device driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SONYPI_DRIVER_VERSION);
+
+static int minor = -1;
+module_param(minor, int, 0);
+MODULE_PARM_DESC(minor,
+ "minor number of the misc device, default is -1 (automatic)");
+
+static int verbose; /* = 0 */
+module_param(verbose, int, 0644);
+MODULE_PARM_DESC(verbose, "be verbose, default is 0 (no)");
+
+static int fnkeyinit; /* = 0 */
+module_param(fnkeyinit, int, 0444);
+MODULE_PARM_DESC(fnkeyinit,
+ "set this if your Fn keys do not generate any event");
+
+static int camera; /* = 0 */
+module_param(camera, int, 0444);
+MODULE_PARM_DESC(camera,
+ "set this if you have a MotionEye camera (PictureBook series)");
+
+static int compat; /* = 0 */
+module_param(compat, int, 0444);
+MODULE_PARM_DESC(compat,
+ "set this if you want to enable backward compatibility mode");
+
+static unsigned long mask = 0xffffffff;
+module_param(mask, ulong, 0644);
+MODULE_PARM_DESC(mask,
+ "set this to the mask of event you want to enable (see doc)");
+
+static int useinput = 1;
+module_param(useinput, int, 0444);
+MODULE_PARM_DESC(useinput,
+ "set this if you would like sonypi to feed events to the input subsystem");
+
+static int check_ioport = 1;
+module_param(check_ioport, int, 0444);
+MODULE_PARM_DESC(check_ioport,
+ "set this to 0 if you think the automatic ioport check for sony-laptop is wrong");
+
+#define SONYPI_DEVICE_MODEL_TYPE1 1
+#define SONYPI_DEVICE_MODEL_TYPE2 2
+#define SONYPI_DEVICE_MODEL_TYPE3 3
+
+/* type1 models use those */
+#define SONYPI_IRQ_PORT 0x8034
+#define SONYPI_IRQ_SHIFT 22
+#define SONYPI_TYPE1_BASE 0x50
+#define SONYPI_G10A (SONYPI_TYPE1_BASE+0x14)
+#define SONYPI_TYPE1_REGION_SIZE 0x08
+#define SONYPI_TYPE1_EVTYPE_OFFSET 0x04
+
+/* type2 series specifics */
+#define SONYPI_SIRQ 0x9b
+#define SONYPI_SLOB 0x9c
+#define SONYPI_SHIB 0x9d
+#define SONYPI_TYPE2_REGION_SIZE 0x20
+#define SONYPI_TYPE2_EVTYPE_OFFSET 0x12
+
+/* type3 series specifics */
+#define SONYPI_TYPE3_BASE 0x40
+#define SONYPI_TYPE3_GID2 (SONYPI_TYPE3_BASE+0x48) /* 16 bits */
+#define SONYPI_TYPE3_MISC (SONYPI_TYPE3_BASE+0x6d) /* 8 bits */
+#define SONYPI_TYPE3_REGION_SIZE 0x20
+#define SONYPI_TYPE3_EVTYPE_OFFSET 0x12
+
+/* battery / brightness addresses */
+#define SONYPI_BAT_FLAGS 0x81
+#define SONYPI_LCD_LIGHT 0x96
+#define SONYPI_BAT1_PCTRM 0xa0
+#define SONYPI_BAT1_LEFT 0xa2
+#define SONYPI_BAT1_MAXRT 0xa4
+#define SONYPI_BAT2_PCTRM 0xa8
+#define SONYPI_BAT2_LEFT 0xaa
+#define SONYPI_BAT2_MAXRT 0xac
+#define SONYPI_BAT1_MAXTK 0xb0
+#define SONYPI_BAT1_FULL 0xb2
+#define SONYPI_BAT2_MAXTK 0xb8
+#define SONYPI_BAT2_FULL 0xba
+
+/* FAN0 information (reverse engineered from ACPI tables) */
+#define SONYPI_FAN0_STATUS 0x93
+#define SONYPI_TEMP_STATUS 0xC1
+
+/* ioports used for brightness and type2 events */
+#define SONYPI_DATA_IOPORT 0x62
+#define SONYPI_CST_IOPORT 0x66
+
+/* The set of possible ioports */
+struct sonypi_ioport_list {
+ u16 port1;
+ u16 port2;
+};
+
+static struct sonypi_ioport_list sonypi_type1_ioport_list[] = {
+ { 0x10c0, 0x10c4 }, /* looks like the default on C1Vx */
+ { 0x1080, 0x1084 },
+ { 0x1090, 0x1094 },
+ { 0x10a0, 0x10a4 },
+ { 0x10b0, 0x10b4 },
+ { 0x0, 0x0 }
+};
+
+static struct sonypi_ioport_list sonypi_type2_ioport_list[] = {
+ { 0x1080, 0x1084 },
+ { 0x10a0, 0x10a4 },
+ { 0x10c0, 0x10c4 },
+ { 0x10e0, 0x10e4 },
+ { 0x0, 0x0 }
+};
+
+/* same as in type 2 models */
+static struct sonypi_ioport_list *sonypi_type3_ioport_list =
+ sonypi_type2_ioport_list;
+
+/* The set of possible interrupts */
+struct sonypi_irq_list {
+ u16 irq;
+ u16 bits;
+};
+
+static struct sonypi_irq_list sonypi_type1_irq_list[] = {
+ { 11, 0x2 }, /* IRQ 11, GO22=0,GO23=1 in AML */
+ { 10, 0x1 }, /* IRQ 10, GO22=1,GO23=0 in AML */
+ { 5, 0x0 }, /* IRQ 5, GO22=0,GO23=0 in AML */
+ { 0, 0x3 } /* no IRQ, GO22=1,GO23=1 in AML */
+};
+
+static struct sonypi_irq_list sonypi_type2_irq_list[] = {
+ { 11, 0x80 }, /* IRQ 11, 0x80 in SIRQ in AML */
+ { 10, 0x40 }, /* IRQ 10, 0x40 in SIRQ in AML */
+ { 9, 0x20 }, /* IRQ 9, 0x20 in SIRQ in AML */
+ { 6, 0x10 }, /* IRQ 6, 0x10 in SIRQ in AML */
+ { 0, 0x00 } /* no IRQ, 0x00 in SIRQ in AML */
+};
+
+/* same as in type2 models */
+static struct sonypi_irq_list *sonypi_type3_irq_list = sonypi_type2_irq_list;
+
+#define SONYPI_CAMERA_BRIGHTNESS 0
+#define SONYPI_CAMERA_CONTRAST 1
+#define SONYPI_CAMERA_HUE 2
+#define SONYPI_CAMERA_COLOR 3
+#define SONYPI_CAMERA_SHARPNESS 4
+
+#define SONYPI_CAMERA_PICTURE 5
+#define SONYPI_CAMERA_EXPOSURE_MASK 0xC
+#define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3
+#define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30
+#define SONYPI_CAMERA_MUTE_MASK 0x40
+
+/* the rest don't need a loop until not 0xff */
+#define SONYPI_CAMERA_AGC 6
+#define SONYPI_CAMERA_AGC_MASK 0x30
+#define SONYPI_CAMERA_SHUTTER_MASK 0x7
+
+#define SONYPI_CAMERA_SHUTDOWN_REQUEST 7
+#define SONYPI_CAMERA_CONTROL 0x10
+
+#define SONYPI_CAMERA_STATUS 7
+#define SONYPI_CAMERA_STATUS_READY 0x2
+#define SONYPI_CAMERA_STATUS_POSITION 0x4
+
+#define SONYPI_DIRECTION_BACKWARDS 0x4
+
+#define SONYPI_CAMERA_REVISION 8
+#define SONYPI_CAMERA_ROMVERSION 9
+
+/* Event masks */
+#define SONYPI_JOGGER_MASK 0x00000001
+#define SONYPI_CAPTURE_MASK 0x00000002
+#define SONYPI_FNKEY_MASK 0x00000004
+#define SONYPI_BLUETOOTH_MASK 0x00000008
+#define SONYPI_PKEY_MASK 0x00000010
+#define SONYPI_BACK_MASK 0x00000020
+#define SONYPI_HELP_MASK 0x00000040
+#define SONYPI_LID_MASK 0x00000080
+#define SONYPI_ZOOM_MASK 0x00000100
+#define SONYPI_THUMBPHRASE_MASK 0x00000200
+#define SONYPI_MEYE_MASK 0x00000400
+#define SONYPI_MEMORYSTICK_MASK 0x00000800
+#define SONYPI_BATTERY_MASK 0x00001000
+#define SONYPI_WIRELESS_MASK 0x00002000
+
+struct sonypi_event {
+ u8 data;
+ u8 event;
+};
+
+/* The set of possible button release events */
+static struct sonypi_event sonypi_releaseev[] = {
+ { 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0, 0 }
+};
+
+/* The set of possible jogger events */
+static struct sonypi_event sonypi_joggerev[] = {
+ { 0x1f, SONYPI_EVENT_JOGDIAL_UP },
+ { 0x01, SONYPI_EVENT_JOGDIAL_DOWN },
+ { 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED },
+ { 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED },
+ { 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP },
+ { 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN },
+ { 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED },
+ { 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED },
+ { 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP },
+ { 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN },
+ { 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED },
+ { 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED },
+ { 0x40, SONYPI_EVENT_JOGDIAL_PRESSED },
+ { 0, 0 }
+};
+
+/* The set of possible capture button events */
+static struct sonypi_event sonypi_captureev[] = {
+ { 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED },
+ { 0x07, SONYPI_EVENT_CAPTURE_PRESSED },
+ { 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED },
+ { 0, 0 }
+};
+
+/* The set of possible fnkeys events */
+static struct sonypi_event sonypi_fnkeyev[] = {
+ { 0x10, SONYPI_EVENT_FNKEY_ESC },
+ { 0x11, SONYPI_EVENT_FNKEY_F1 },
+ { 0x12, SONYPI_EVENT_FNKEY_F2 },
+ { 0x13, SONYPI_EVENT_FNKEY_F3 },
+ { 0x14, SONYPI_EVENT_FNKEY_F4 },
+ { 0x15, SONYPI_EVENT_FNKEY_F5 },
+ { 0x16, SONYPI_EVENT_FNKEY_F6 },
+ { 0x17, SONYPI_EVENT_FNKEY_F7 },
+ { 0x18, SONYPI_EVENT_FNKEY_F8 },
+ { 0x19, SONYPI_EVENT_FNKEY_F9 },
+ { 0x1a, SONYPI_EVENT_FNKEY_F10 },
+ { 0x1b, SONYPI_EVENT_FNKEY_F11 },
+ { 0x1c, SONYPI_EVENT_FNKEY_F12 },
+ { 0x1f, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x21, SONYPI_EVENT_FNKEY_1 },
+ { 0x22, SONYPI_EVENT_FNKEY_2 },
+ { 0x31, SONYPI_EVENT_FNKEY_D },
+ { 0x32, SONYPI_EVENT_FNKEY_E },
+ { 0x33, SONYPI_EVENT_FNKEY_F },
+ { 0x34, SONYPI_EVENT_FNKEY_S },
+ { 0x35, SONYPI_EVENT_FNKEY_B },
+ { 0x36, SONYPI_EVENT_FNKEY_ONLY },
+ { 0, 0 }
+};
+
+/* The set of possible program key events */
+static struct sonypi_event sonypi_pkeyev[] = {
+ { 0x01, SONYPI_EVENT_PKEY_P1 },
+ { 0x02, SONYPI_EVENT_PKEY_P2 },
+ { 0x04, SONYPI_EVENT_PKEY_P3 },
+ { 0x5c, SONYPI_EVENT_PKEY_P1 },
+ { 0, 0 }
+};
+
+/* The set of possible bluetooth events */
+static struct sonypi_event sonypi_blueev[] = {
+ { 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED },
+ { 0x59, SONYPI_EVENT_BLUETOOTH_ON },
+ { 0x5a, SONYPI_EVENT_BLUETOOTH_OFF },
+ { 0, 0 }
+};
+
+/* The set of possible wireless events */
+static struct sonypi_event sonypi_wlessev[] = {
+ { 0x59, SONYPI_EVENT_WIRELESS_ON },
+ { 0x5a, SONYPI_EVENT_WIRELESS_OFF },
+ { 0, 0 }
+};
+
+/* The set of possible back button events */
+static struct sonypi_event sonypi_backev[] = {
+ { 0x20, SONYPI_EVENT_BACK_PRESSED },
+ { 0, 0 }
+};
+
+/* The set of possible help button events */
+static struct sonypi_event sonypi_helpev[] = {
+ { 0x3b, SONYPI_EVENT_HELP_PRESSED },
+ { 0, 0 }
+};
+
+
+/* The set of possible lid events */
+static struct sonypi_event sonypi_lidev[] = {
+ { 0x51, SONYPI_EVENT_LID_CLOSED },
+ { 0x50, SONYPI_EVENT_LID_OPENED },
+ { 0, 0 }
+};
+
+/* The set of possible zoom events */
+static struct sonypi_event sonypi_zoomev[] = {
+ { 0x39, SONYPI_EVENT_ZOOM_PRESSED },
+ { 0, 0 }
+};
+
+/* The set of possible thumbphrase events */
+static struct sonypi_event sonypi_thumbphraseev[] = {
+ { 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED },
+ { 0, 0 }
+};
+
+/* The set of possible motioneye camera events */
+static struct sonypi_event sonypi_meyeev[] = {
+ { 0x00, SONYPI_EVENT_MEYE_FACE },
+ { 0x01, SONYPI_EVENT_MEYE_OPPOSITE },
+ { 0, 0 }
+};
+
+/* The set of possible memorystick events */
+static struct sonypi_event sonypi_memorystickev[] = {
+ { 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT },
+ { 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT },
+ { 0, 0 }
+};
+
+/* The set of possible battery events */
+static struct sonypi_event sonypi_batteryev[] = {
+ { 0x20, SONYPI_EVENT_BATTERY_INSERT },
+ { 0x30, SONYPI_EVENT_BATTERY_REMOVE },
+ { 0, 0 }
+};
+
+static struct sonypi_eventtypes {
+ int model;
+ u8 data;
+ unsigned long mask;
+ struct sonypi_event * events;
+} sonypi_eventtypes[] = {
+ { SONYPI_DEVICE_MODEL_TYPE1, 0, 0xffffffff, sonypi_releaseev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x70, SONYPI_MEYE_MASK, sonypi_meyeev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_LID_MASK, sonypi_lidev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev },
+
+ { SONYPI_DEVICE_MODEL_TYPE2, 0, 0xffffffff, sonypi_releaseev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x38, SONYPI_LID_MASK, sonypi_lidev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+
+ { SONYPI_DEVICE_MODEL_TYPE3, 0, 0xffffffff, sonypi_releaseev },
+ { SONYPI_DEVICE_MODEL_TYPE3, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
+ { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { SONYPI_DEVICE_MODEL_TYPE3, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0 }
+};
+
+#define SONYPI_BUF_SIZE 128
+
+/* Correspondance table between sonypi events and input layer events */
+static struct {
+ int sonypiev;
+ int inputev;
+} sonypi_inputkeys[] = {
+ { SONYPI_EVENT_CAPTURE_PRESSED, KEY_CAMERA },
+ { SONYPI_EVENT_FNKEY_ONLY, KEY_FN },
+ { SONYPI_EVENT_FNKEY_ESC, KEY_FN_ESC },
+ { SONYPI_EVENT_FNKEY_F1, KEY_FN_F1 },
+ { SONYPI_EVENT_FNKEY_F2, KEY_FN_F2 },
+ { SONYPI_EVENT_FNKEY_F3, KEY_FN_F3 },
+ { SONYPI_EVENT_FNKEY_F4, KEY_FN_F4 },
+ { SONYPI_EVENT_FNKEY_F5, KEY_FN_F5 },
+ { SONYPI_EVENT_FNKEY_F6, KEY_FN_F6 },
+ { SONYPI_EVENT_FNKEY_F7, KEY_FN_F7 },
+ { SONYPI_EVENT_FNKEY_F8, KEY_FN_F8 },
+ { SONYPI_EVENT_FNKEY_F9, KEY_FN_F9 },
+ { SONYPI_EVENT_FNKEY_F10, KEY_FN_F10 },
+ { SONYPI_EVENT_FNKEY_F11, KEY_FN_F11 },
+ { SONYPI_EVENT_FNKEY_F12, KEY_FN_F12 },
+ { SONYPI_EVENT_FNKEY_1, KEY_FN_1 },
+ { SONYPI_EVENT_FNKEY_2, KEY_FN_2 },
+ { SONYPI_EVENT_FNKEY_D, KEY_FN_D },
+ { SONYPI_EVENT_FNKEY_E, KEY_FN_E },
+ { SONYPI_EVENT_FNKEY_F, KEY_FN_F },
+ { SONYPI_EVENT_FNKEY_S, KEY_FN_S },
+ { SONYPI_EVENT_FNKEY_B, KEY_FN_B },
+ { SONYPI_EVENT_BLUETOOTH_PRESSED, KEY_BLUE },
+ { SONYPI_EVENT_BLUETOOTH_ON, KEY_BLUE },
+ { SONYPI_EVENT_PKEY_P1, KEY_PROG1 },
+ { SONYPI_EVENT_PKEY_P2, KEY_PROG2 },
+ { SONYPI_EVENT_PKEY_P3, KEY_PROG3 },
+ { SONYPI_EVENT_BACK_PRESSED, KEY_BACK },
+ { SONYPI_EVENT_HELP_PRESSED, KEY_HELP },
+ { SONYPI_EVENT_ZOOM_PRESSED, KEY_ZOOM },
+ { SONYPI_EVENT_THUMBPHRASE_PRESSED, BTN_THUMB },
+ { 0, 0 },
+};
+
+struct sonypi_keypress {
+ struct input_dev *dev;
+ int key;
+};
+
+static struct sonypi_device {
+ struct pci_dev *dev;
+ u16 irq;
+ u16 bits;
+ u16 ioport1;
+ u16 ioport2;
+ u16 region_size;
+ u16 evtype_offset;
+ int camera_power;
+ int bluetooth_power;
+ struct mutex lock;
+ struct kfifo fifo;
+ spinlock_t fifo_lock;
+ wait_queue_head_t fifo_proc_list;
+ struct fasync_struct *fifo_async;
+ int open_count;
+ int model;
+ struct input_dev *input_jog_dev;
+ struct input_dev *input_key_dev;
+ struct work_struct input_work;
+ struct kfifo input_fifo;
+ spinlock_t input_fifo_lock;
+} sonypi_device;
+
+#define ITERATIONS_LONG 10000
+#define ITERATIONS_SHORT 10
+
+#define wait_on_command(quiet, command, iterations) { \
+ unsigned int n = iterations; \
+ while (--n && (command)) \
+ udelay(1); \
+ if (!n && (verbose || !quiet)) \
+ printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __func__, __LINE__); \
+}
+
+#ifdef CONFIG_ACPI
+#define SONYPI_ACPI_ACTIVE (!acpi_disabled)
+#else
+#define SONYPI_ACPI_ACTIVE 0
+#endif /* CONFIG_ACPI */
+
+#ifdef CONFIG_ACPI
+static struct acpi_device *sonypi_acpi_device;
+static int acpi_driver_registered;
+#endif
+
+static int sonypi_ec_write(u8 addr, u8 value)
+{
+#ifdef CONFIG_ACPI
+ if (SONYPI_ACPI_ACTIVE)
+ return ec_write(addr, value);
+#endif
+ wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG);
+ outb_p(0x81, SONYPI_CST_IOPORT);
+ wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+ outb_p(addr, SONYPI_DATA_IOPORT);
+ wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+ outb_p(value, SONYPI_DATA_IOPORT);
+ wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+ return 0;
+}
+
+static int sonypi_ec_read(u8 addr, u8 *value)
+{
+#ifdef CONFIG_ACPI
+ if (SONYPI_ACPI_ACTIVE)
+ return ec_read(addr, value);
+#endif
+ wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG);
+ outb_p(0x80, SONYPI_CST_IOPORT);
+ wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+ outb_p(addr, SONYPI_DATA_IOPORT);
+ wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
+ *value = inb_p(SONYPI_DATA_IOPORT);
+ return 0;
+}
+
+static int ec_read16(u8 addr, u16 *value)
+{
+ u8 val_lb, val_hb;
+ if (sonypi_ec_read(addr, &val_lb))
+ return -1;
+ if (sonypi_ec_read(addr + 1, &val_hb))
+ return -1;
+ *value = val_lb | (val_hb << 8);
+ return 0;
+}
+
+/* Initializes the device - this comes from the AML code in the ACPI bios */
+static void sonypi_type1_srs(void)
+{
+ u32 v;
+
+ pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
+ v = (v & 0xFFFF0000) | ((u32) sonypi_device.ioport1);
+ pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
+
+ pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
+ v = (v & 0xFFF0FFFF) |
+ (((u32) sonypi_device.ioport1 ^ sonypi_device.ioport2) << 16);
+ pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
+
+ v = inl(SONYPI_IRQ_PORT);
+ v &= ~(((u32) 0x3) << SONYPI_IRQ_SHIFT);
+ v |= (((u32) sonypi_device.bits) << SONYPI_IRQ_SHIFT);
+ outl(v, SONYPI_IRQ_PORT);
+
+ pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
+ v = (v & 0xFF1FFFFF) | 0x00C00000;
+ pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
+}
+
+static void sonypi_type2_srs(void)
+{
+ if (sonypi_ec_write(SONYPI_SHIB, (sonypi_device.ioport1 & 0xFF00) >> 8))
+ printk(KERN_WARNING "ec_write failed\n");
+ if (sonypi_ec_write(SONYPI_SLOB, sonypi_device.ioport1 & 0x00FF))
+ printk(KERN_WARNING "ec_write failed\n");
+ if (sonypi_ec_write(SONYPI_SIRQ, sonypi_device.bits))
+ printk(KERN_WARNING "ec_write failed\n");
+ udelay(10);
+}
+
+static void sonypi_type3_srs(void)
+{
+ u16 v16;
+ u8 v8;
+
+ /* This model type uses the same initialiazation of
+ * the embedded controller as the type2 models. */
+ sonypi_type2_srs();
+
+ /* Initialization of PCI config space of the LPC interface bridge. */
+ v16 = (sonypi_device.ioport1 & 0xFFF0) | 0x01;
+ pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, v16);
+ pci_read_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, &v8);
+ v8 = (v8 & 0xCF) | 0x10;
+ pci_write_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, v8);
+}
+
+/* Disables the device - this comes from the AML code in the ACPI bios */
+static void sonypi_type1_dis(void)
+{
+ u32 v;
+
+ pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
+ v = v & 0xFF3FFFFF;
+ pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
+
+ v = inl(SONYPI_IRQ_PORT);
+ v |= (0x3 << SONYPI_IRQ_SHIFT);
+ outl(v, SONYPI_IRQ_PORT);
+}
+
+static void sonypi_type2_dis(void)
+{
+ if (sonypi_ec_write(SONYPI_SHIB, 0))
+ printk(KERN_WARNING "ec_write failed\n");
+ if (sonypi_ec_write(SONYPI_SLOB, 0))
+ printk(KERN_WARNING "ec_write failed\n");
+ if (sonypi_ec_write(SONYPI_SIRQ, 0))
+ printk(KERN_WARNING "ec_write failed\n");
+}
+
+static void sonypi_type3_dis(void)
+{
+ sonypi_type2_dis();
+ udelay(10);
+ pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, 0);
+}
+
+static u8 sonypi_call1(u8 dev)
+{
+ u8 v1, v2;
+
+ wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+ outb(dev, sonypi_device.ioport2);
+ v1 = inb_p(sonypi_device.ioport2);
+ v2 = inb_p(sonypi_device.ioport1);
+ return v2;
+}
+
+static u8 sonypi_call2(u8 dev, u8 fn)
+{
+ u8 v1;
+
+ wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+ outb(dev, sonypi_device.ioport2);
+ wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+ outb(fn, sonypi_device.ioport1);
+ v1 = inb_p(sonypi_device.ioport1);
+ return v1;
+}
+
+static u8 sonypi_call3(u8 dev, u8 fn, u8 v)
+{
+ u8 v1;
+
+ wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+ outb(dev, sonypi_device.ioport2);
+ wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+ outb(fn, sonypi_device.ioport1);
+ wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
+ outb(v, sonypi_device.ioport1);
+ v1 = inb_p(sonypi_device.ioport1);
+ return v1;
+}
+
+#if 0
+/* Get brightness, hue etc. Unreliable... */
+static u8 sonypi_read(u8 fn)
+{
+ u8 v1, v2;
+ int n = 100;
+
+ while (n--) {
+ v1 = sonypi_call2(0x8f, fn);
+ v2 = sonypi_call2(0x8f, fn);
+ if (v1 == v2 && v1 != 0xff)
+ return v1;
+ }
+ return 0xff;
+}
+#endif
+
+/* Set brightness, hue etc */
+static void sonypi_set(u8 fn, u8 v)
+{
+ wait_on_command(0, sonypi_call3(0x90, fn, v), ITERATIONS_SHORT);
+}
+
+/* Tests if the camera is ready */
+static int sonypi_camera_ready(void)
+{
+ u8 v;
+
+ v = sonypi_call2(0x8f, SONYPI_CAMERA_STATUS);
+ return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY));
+}
+
+/* Turns the camera off */
+static void sonypi_camera_off(void)
+{
+ sonypi_set(SONYPI_CAMERA_PICTURE, SONYPI_CAMERA_MUTE_MASK);
+
+ if (!sonypi_device.camera_power)
+ return;
+
+ sonypi_call2(0x91, 0);
+ sonypi_device.camera_power = 0;
+}
+
+/* Turns the camera on */
+static void sonypi_camera_on(void)
+{
+ int i, j;
+
+ if (sonypi_device.camera_power)
+ return;
+
+ for (j = 5; j > 0; j--) {
+
+ while (sonypi_call2(0x91, 0x1))
+ msleep(10);
+ sonypi_call1(0x93);
+
+ for (i = 400; i > 0; i--) {
+ if (sonypi_camera_ready())
+ break;
+ msleep(10);
+ }
+ if (i)
+ break;
+ }
+
+ if (j == 0) {
+ printk(KERN_WARNING "sonypi: failed to power on camera\n");
+ return;
+ }
+
+ sonypi_set(0x10, 0x5a);
+ sonypi_device.camera_power = 1;
+}
+
+/* sets the bluetooth subsystem power state */
+static void sonypi_setbluetoothpower(u8 state)
+{
+ state = !!state;
+
+ if (sonypi_device.bluetooth_power == state)
+ return;
+
+ sonypi_call2(0x96, state);
+ sonypi_call1(0x82);
+ sonypi_device.bluetooth_power = state;
+}
+
+static void input_keyrelease(struct work_struct *work)
+{
+ struct sonypi_keypress kp;
+
+ while (kfifo_out_locked(&sonypi_device.input_fifo, (unsigned char *)&kp,
+ sizeof(kp), &sonypi_device.input_fifo_lock)
+ == sizeof(kp)) {
+ msleep(10);
+ input_report_key(kp.dev, kp.key, 0);
+ input_sync(kp.dev);
+ }
+}
+
+static void sonypi_report_input_event(u8 event)
+{
+ struct input_dev *jog_dev = sonypi_device.input_jog_dev;
+ struct input_dev *key_dev = sonypi_device.input_key_dev;
+ struct sonypi_keypress kp = { NULL };
+ int i;
+
+ switch (event) {
+ case SONYPI_EVENT_JOGDIAL_UP:
+ case SONYPI_EVENT_JOGDIAL_UP_PRESSED:
+ input_report_rel(jog_dev, REL_WHEEL, 1);
+ input_sync(jog_dev);
+ break;
+
+ case SONYPI_EVENT_JOGDIAL_DOWN:
+ case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED:
+ input_report_rel(jog_dev, REL_WHEEL, -1);
+ input_sync(jog_dev);
+ break;
+
+ case SONYPI_EVENT_JOGDIAL_PRESSED:
+ kp.key = BTN_MIDDLE;
+ kp.dev = jog_dev;
+ break;
+
+ case SONYPI_EVENT_FNKEY_RELEASED:
+ /* Nothing, not all VAIOs generate this event */
+ break;
+
+ default:
+ for (i = 0; sonypi_inputkeys[i].sonypiev; i++)
+ if (event == sonypi_inputkeys[i].sonypiev) {
+ kp.dev = key_dev;
+ kp.key = sonypi_inputkeys[i].inputev;
+ break;
+ }
+ break;
+ }
+
+ if (kp.dev) {
+ input_report_key(kp.dev, kp.key, 1);
+ input_sync(kp.dev);
+ kfifo_in_locked(&sonypi_device.input_fifo,
+ (unsigned char *)&kp, sizeof(kp),
+ &sonypi_device.input_fifo_lock);
+ schedule_work(&sonypi_device.input_work);
+ }
+}
+
+/* Interrupt handler: some event is available */
+static irqreturn_t sonypi_irq(int irq, void *dev_id)
+{
+ u8 v1, v2, event = 0;
+ int i, j;
+
+ v1 = inb_p(sonypi_device.ioport1);
+ v2 = inb_p(sonypi_device.ioport1 + sonypi_device.evtype_offset);
+
+ for (i = 0; sonypi_eventtypes[i].model; i++) {
+ if (sonypi_device.model != sonypi_eventtypes[i].model)
+ continue;
+ if ((v2 & sonypi_eventtypes[i].data) !=
+ sonypi_eventtypes[i].data)
+ continue;
+ if (!(mask & sonypi_eventtypes[i].mask))
+ continue;
+ for (j = 0; sonypi_eventtypes[i].events[j].event; j++) {
+ if (v1 == sonypi_eventtypes[i].events[j].data) {
+ event = sonypi_eventtypes[i].events[j].event;
+ goto found;
+ }
+ }
+ }
+
+ if (verbose)
+ printk(KERN_WARNING
+ "sonypi: unknown event port1=0x%02x,port2=0x%02x\n",
+ v1, v2);
+ /* We need to return IRQ_HANDLED here because there *are*
+ * events belonging to the sonypi device we don't know about,
+ * but we still don't want those to pollute the logs... */
+ return IRQ_HANDLED;
+
+found:
+ if (verbose > 1)
+ printk(KERN_INFO
+ "sonypi: event port1=0x%02x,port2=0x%02x\n", v1, v2);
+
+ if (useinput)
+ sonypi_report_input_event(event);
+
+#ifdef CONFIG_ACPI
+ if (sonypi_acpi_device)
+ acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event);
+#endif
+
+ kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event,
+ sizeof(event), &sonypi_device.fifo_lock);
+ kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN);
+ wake_up_interruptible(&sonypi_device.fifo_proc_list);
+
+ return IRQ_HANDLED;
+}
+
+static int sonypi_misc_fasync(int fd, struct file *filp, int on)
+{
+ return fasync_helper(fd, filp, on, &sonypi_device.fifo_async);
+}
+
+static int sonypi_misc_release(struct inode *inode, struct file *file)
+{
+ mutex_lock(&sonypi_device.lock);
+ sonypi_device.open_count--;
+ mutex_unlock(&sonypi_device.lock);
+ return 0;
+}
+
+static int sonypi_misc_open(struct inode *inode, struct file *file)
+{
+ mutex_lock(&sonypi_device.lock);
+ /* Flush input queue on first open */
+ if (!sonypi_device.open_count)
+ kfifo_reset(&sonypi_device.fifo);
+ sonypi_device.open_count++;
+ mutex_unlock(&sonypi_device.lock);
+
+ return 0;
+}
+
+static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ ssize_t ret;
+ unsigned char c;
+
+ if ((kfifo_len(&sonypi_device.fifo) == 0) &&
+ (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(sonypi_device.fifo_proc_list,
+ kfifo_len(&sonypi_device.fifo) != 0);
+ if (ret)
+ return ret;
+
+ while (ret < count &&
+ (kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c),
+ &sonypi_device.fifo_lock) == sizeof(c))) {
+ if (put_user(c, buf++))
+ return -EFAULT;
+ ret++;
+ }
+
+ if (ret > 0) {
+ struct inode *inode = file->f_path.dentry->d_inode;
+ inode->i_atime = current_fs_time(inode->i_sb);
+ }
+
+ return ret;
+}
+
+static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
+{
+ poll_wait(file, &sonypi_device.fifo_proc_list, wait);
+ if (kfifo_len(&sonypi_device.fifo))
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+
+static long sonypi_misc_ioctl(struct file *fp,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret = 0;
+ void __user *argp = (void __user *)arg;
+ u8 val8;
+ u16 val16;
+
+ mutex_lock(&sonypi_device.lock);
+ switch (cmd) {
+ case SONYPI_IOCGBRT:
+ if (sonypi_ec_read(SONYPI_LCD_LIGHT, &val8)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCSBRT:
+ if (copy_from_user(&val8, argp, sizeof(val8))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (sonypi_ec_write(SONYPI_LCD_LIGHT, val8))
+ ret = -EIO;
+ break;
+ case SONYPI_IOCGBAT1CAP:
+ if (ec_read16(SONYPI_BAT1_FULL, &val16)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val16, sizeof(val16)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBAT1REM:
+ if (ec_read16(SONYPI_BAT1_LEFT, &val16)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val16, sizeof(val16)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBAT2CAP:
+ if (ec_read16(SONYPI_BAT2_FULL, &val16)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val16, sizeof(val16)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBAT2REM:
+ if (ec_read16(SONYPI_BAT2_LEFT, &val16)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val16, sizeof(val16)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBATFLAGS:
+ if (sonypi_ec_read(SONYPI_BAT_FLAGS, &val8)) {
+ ret = -EIO;
+ break;
+ }
+ val8 &= 0x07;
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCGBLUE:
+ val8 = sonypi_device.bluetooth_power;
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCSBLUE:
+ if (copy_from_user(&val8, argp, sizeof(val8))) {
+ ret = -EFAULT;
+ break;
+ }
+ sonypi_setbluetoothpower(val8);
+ break;
+ /* FAN Controls */
+ case SONYPI_IOCGFAN:
+ if (sonypi_ec_read(SONYPI_FAN0_STATUS, &val8)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ case SONYPI_IOCSFAN:
+ if (copy_from_user(&val8, argp, sizeof(val8))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (sonypi_ec_write(SONYPI_FAN0_STATUS, val8))
+ ret = -EIO;
+ break;
+ /* GET Temperature (useful under APM) */
+ case SONYPI_IOCGTEMP:
+ if (sonypi_ec_read(SONYPI_TEMP_STATUS, &val8)) {
+ ret = -EIO;
+ break;
+ }
+ if (copy_to_user(argp, &val8, sizeof(val8)))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&sonypi_device.lock);
+ return ret;
+}
+
+static const struct file_operations sonypi_misc_fops = {
+ .owner = THIS_MODULE,
+ .read = sonypi_misc_read,
+ .poll = sonypi_misc_poll,
+ .open = sonypi_misc_open,
+ .release = sonypi_misc_release,
+ .fasync = sonypi_misc_fasync,
+ .unlocked_ioctl = sonypi_misc_ioctl,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice sonypi_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "sonypi",
+ .fops = &sonypi_misc_fops,
+};
+
+static void sonypi_enable(unsigned int camera_on)
+{
+ switch (sonypi_device.model) {
+ case SONYPI_DEVICE_MODEL_TYPE1:
+ sonypi_type1_srs();
+ break;
+ case SONYPI_DEVICE_MODEL_TYPE2:
+ sonypi_type2_srs();
+ break;
+ case SONYPI_DEVICE_MODEL_TYPE3:
+ sonypi_type3_srs();
+ break;
+ }
+
+ sonypi_call1(0x82);
+ sonypi_call2(0x81, 0xff);
+ sonypi_call1(compat ? 0x92 : 0x82);
+
+ /* Enable ACPI mode to get Fn key events */
+ if (!SONYPI_ACPI_ACTIVE && fnkeyinit)
+ outb(0xf0, 0xb2);
+
+ if (camera && camera_on)
+ sonypi_camera_on();
+}
+
+static int sonypi_disable(void)
+{
+ sonypi_call2(0x81, 0); /* make sure we don't get any more events */
+ if (camera)
+ sonypi_camera_off();
+
+ /* disable ACPI mode */
+ if (!SONYPI_ACPI_ACTIVE && fnkeyinit)
+ outb(0xf1, 0xb2);
+
+ switch (sonypi_device.model) {
+ case SONYPI_DEVICE_MODEL_TYPE1:
+ sonypi_type1_dis();
+ break;
+ case SONYPI_DEVICE_MODEL_TYPE2:
+ sonypi_type2_dis();
+ break;
+ case SONYPI_DEVICE_MODEL_TYPE3:
+ sonypi_type3_dis();
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static int sonypi_acpi_add(struct acpi_device *device)
+{
+ sonypi_acpi_device = device;
+ strcpy(acpi_device_name(device), "Sony laptop hotkeys");
+ strcpy(acpi_device_class(device), "sony/hotkey");
+ return 0;
+}
+
+static int sonypi_acpi_remove(struct acpi_device *device, int type)
+{
+ sonypi_acpi_device = NULL;
+ return 0;
+}
+
+static const struct acpi_device_id sonypi_device_ids[] = {
+ {"SNY6001", 0},
+ {"", 0},
+};
+
+static struct acpi_driver sonypi_acpi_driver = {
+ .name = "sonypi",
+ .class = "hkey",
+ .ids = sonypi_device_ids,
+ .ops = {
+ .add = sonypi_acpi_add,
+ .remove = sonypi_acpi_remove,
+ },
+};
+#endif
+
+static int __devinit sonypi_create_input_devices(struct platform_device *pdev)
+{
+ struct input_dev *jog_dev;
+ struct input_dev *key_dev;
+ int i;
+ int error;
+
+ sonypi_device.input_jog_dev = jog_dev = input_allocate_device();
+ if (!jog_dev)
+ return -ENOMEM;
+
+ jog_dev->name = "Sony Vaio Jogdial";
+ jog_dev->id.bustype = BUS_ISA;
+ jog_dev->id.vendor = PCI_VENDOR_ID_SONY;
+ jog_dev->dev.parent = &pdev->dev;
+
+ jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
+ jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE);
+ jog_dev->relbit[0] = BIT_MASK(REL_WHEEL);
+
+ sonypi_device.input_key_dev = key_dev = input_allocate_device();
+ if (!key_dev) {
+ error = -ENOMEM;
+ goto err_free_jogdev;
+ }
+
+ key_dev->name = "Sony Vaio Keys";
+ key_dev->id.bustype = BUS_ISA;
+ key_dev->id.vendor = PCI_VENDOR_ID_SONY;
+ key_dev->dev.parent = &pdev->dev;
+
+ /* Initialize the Input Drivers: special keys */
+ key_dev->evbit[0] = BIT_MASK(EV_KEY);
+ for (i = 0; sonypi_inputkeys[i].sonypiev; i++)
+ if (sonypi_inputkeys[i].inputev)
+ set_bit(sonypi_inputkeys[i].inputev, key_dev->keybit);
+
+ error = input_register_device(jog_dev);
+ if (error)
+ goto err_free_keydev;
+
+ error = input_register_device(key_dev);
+ if (error)
+ goto err_unregister_jogdev;
+
+ return 0;
+
+ err_unregister_jogdev:
+ input_unregister_device(jog_dev);
+ /* Set to NULL so we don't free it again below */
+ jog_dev = NULL;
+ err_free_keydev:
+ input_free_device(key_dev);
+ sonypi_device.input_key_dev = NULL;
+ err_free_jogdev:
+ input_free_device(jog_dev);
+ sonypi_device.input_jog_dev = NULL;
+
+ return error;
+}
+
+static int __devinit sonypi_setup_ioports(struct sonypi_device *dev,
+ const struct sonypi_ioport_list *ioport_list)
+{
+ /* try to detect if sony-laptop is being used and thus
+ * has already requested one of the known ioports.
+ * As in the deprecated check_region this is racy has we have
+ * multiple ioports available and one of them can be requested
+ * between this check and the subsequent request. Anyway, as an
+ * attempt to be some more user-friendly as we currently are,
+ * this is enough.
+ */
+ const struct sonypi_ioport_list *check = ioport_list;
+ while (check_ioport && check->port1) {
+ if (!request_region(check->port1,
+ sonypi_device.region_size,
+ "Sony Programmable I/O Device Check")) {
+ printk(KERN_ERR "sonypi: ioport 0x%.4x busy, using sony-laptop? "
+ "if not use check_ioport=0\n",
+ check->port1);
+ return -EBUSY;
+ }
+ release_region(check->port1, sonypi_device.region_size);
+ check++;
+ }
+
+ while (ioport_list->port1) {
+
+ if (request_region(ioport_list->port1,
+ sonypi_device.region_size,
+ "Sony Programmable I/O Device")) {
+ dev->ioport1 = ioport_list->port1;
+ dev->ioport2 = ioport_list->port2;
+ return 0;
+ }
+ ioport_list++;
+ }
+
+ return -EBUSY;
+}
+
+static int __devinit sonypi_setup_irq(struct sonypi_device *dev,
+ const struct sonypi_irq_list *irq_list)
+{
+ while (irq_list->irq) {
+
+ if (!request_irq(irq_list->irq, sonypi_irq,
+ IRQF_SHARED, "sonypi", sonypi_irq)) {
+ dev->irq = irq_list->irq;
+ dev->bits = irq_list->bits;
+ return 0;
+ }
+ irq_list++;
+ }
+
+ return -EBUSY;
+}
+
+static void __devinit sonypi_display_info(void)
+{
+ printk(KERN_INFO "sonypi: detected type%d model, "
+ "verbose = %d, fnkeyinit = %s, camera = %s, "
+ "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n",
+ sonypi_device.model,
+ verbose,
+ fnkeyinit ? "on" : "off",
+ camera ? "on" : "off",
+ compat ? "on" : "off",
+ mask,
+ useinput ? "on" : "off",
+ SONYPI_ACPI_ACTIVE ? "on" : "off");
+ printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n",
+ sonypi_device.irq,
+ sonypi_device.ioport1, sonypi_device.ioport2);
+
+ if (minor == -1)
+ printk(KERN_INFO "sonypi: device allocated minor is %d\n",
+ sonypi_misc_device.minor);
+}
+
+static int __devinit sonypi_probe(struct platform_device *dev)
+{
+ const struct sonypi_ioport_list *ioport_list;
+ const struct sonypi_irq_list *irq_list;
+ struct pci_dev *pcidev;
+ int error;
+
+ printk(KERN_WARNING "sonypi: please try the sony-laptop module instead "
+ "and report failures, see also "
+ "http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n");
+
+ spin_lock_init(&sonypi_device.fifo_lock);
+ error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL);
+ if (error) {
+ printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
+ return error;
+ }
+
+ init_waitqueue_head(&sonypi_device.fifo_proc_list);
+ mutex_init(&sonypi_device.lock);
+ sonypi_device.bluetooth_power = -1;
+
+ if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82371AB_3, NULL)))
+ sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE1;
+ else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH6_1, NULL)))
+ sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3;
+ else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH7_1, NULL)))
+ sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3;
+ else
+ sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE2;
+
+ if (pcidev && pci_enable_device(pcidev)) {
+ printk(KERN_ERR "sonypi: pci_enable_device failed\n");
+ error = -EIO;
+ goto err_put_pcidev;
+ }
+
+ sonypi_device.dev = pcidev;
+
+ if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) {
+ ioport_list = sonypi_type1_ioport_list;
+ sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE;
+ sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET;
+ irq_list = sonypi_type1_irq_list;
+ } else if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) {
+ ioport_list = sonypi_type2_ioport_list;
+ sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE;
+ sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET;
+ irq_list = sonypi_type2_irq_list;
+ } else {
+ ioport_list = sonypi_type3_ioport_list;
+ sonypi_device.region_size = SONYPI_TYPE3_REGION_SIZE;
+ sonypi_device.evtype_offset = SONYPI_TYPE3_EVTYPE_OFFSET;
+ irq_list = sonypi_type3_irq_list;
+ }
+
+ error = sonypi_setup_ioports(&sonypi_device, ioport_list);
+ if (error) {
+ printk(KERN_ERR "sonypi: failed to request ioports\n");
+ goto err_disable_pcidev;
+ }
+
+ error = sonypi_setup_irq(&sonypi_device, irq_list);
+ if (error) {
+ printk(KERN_ERR "sonypi: request_irq failed\n");
+ goto err_free_ioports;
+ }
+
+ if (minor != -1)
+ sonypi_misc_device.minor = minor;
+ error = misc_register(&sonypi_misc_device);
+ if (error) {
+ printk(KERN_ERR "sonypi: misc_register failed\n");
+ goto err_free_irq;
+ }
+
+ sonypi_display_info();
+
+ if (useinput) {
+
+ error = sonypi_create_input_devices(dev);
+ if (error) {
+ printk(KERN_ERR
+ "sonypi: failed to create input devices\n");
+ goto err_miscdev_unregister;
+ }
+
+ spin_lock_init(&sonypi_device.input_fifo_lock);
+ error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE,
+ GFP_KERNEL);
+ if (error) {
+ printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
+ goto err_inpdev_unregister;
+ }
+
+ INIT_WORK(&sonypi_device.input_work, input_keyrelease);
+ }
+
+ sonypi_enable(0);
+
+ return 0;
+
+ err_inpdev_unregister:
+ input_unregister_device(sonypi_device.input_key_dev);
+ input_unregister_device(sonypi_device.input_jog_dev);
+ err_miscdev_unregister:
+ misc_deregister(&sonypi_misc_device);
+ err_free_irq:
+ free_irq(sonypi_device.irq, sonypi_irq);
+ err_free_ioports:
+ release_region(sonypi_device.ioport1, sonypi_device.region_size);
+ err_disable_pcidev:
+ if (pcidev)
+ pci_disable_device(pcidev);
+ err_put_pcidev:
+ pci_dev_put(pcidev);
+ kfifo_free(&sonypi_device.fifo);
+
+ return error;
+}
+
+static int __devexit sonypi_remove(struct platform_device *dev)
+{
+ sonypi_disable();
+
+ synchronize_irq(sonypi_device.irq);
+ flush_work_sync(&sonypi_device.input_work);
+
+ if (useinput) {
+ input_unregister_device(sonypi_device.input_key_dev);
+ input_unregister_device(sonypi_device.input_jog_dev);
+ kfifo_free(&sonypi_device.input_fifo);
+ }
+
+ misc_deregister(&sonypi_misc_device);
+
+ free_irq(sonypi_device.irq, sonypi_irq);
+ release_region(sonypi_device.ioport1, sonypi_device.region_size);
+
+ if (sonypi_device.dev) {
+ pci_disable_device(sonypi_device.dev);
+ pci_dev_put(sonypi_device.dev);
+ }
+
+ kfifo_free(&sonypi_device.fifo);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int old_camera_power;
+
+static int sonypi_suspend(struct platform_device *dev, pm_message_t state)
+{
+ old_camera_power = sonypi_device.camera_power;
+ sonypi_disable();
+
+ return 0;
+}
+
+static int sonypi_resume(struct platform_device *dev)
+{
+ sonypi_enable(old_camera_power);
+ return 0;
+}
+#else
+#define sonypi_suspend NULL
+#define sonypi_resume NULL
+#endif
+
+static void sonypi_shutdown(struct platform_device *dev)
+{
+ sonypi_disable();
+}
+
+static struct platform_driver sonypi_driver = {
+ .driver = {
+ .name = "sonypi",
+ .owner = THIS_MODULE,
+ },
+ .probe = sonypi_probe,
+ .remove = __devexit_p(sonypi_remove),
+ .shutdown = sonypi_shutdown,
+ .suspend = sonypi_suspend,
+ .resume = sonypi_resume,
+};
+
+static struct platform_device *sonypi_platform_device;
+
+static struct dmi_system_id __initdata sonypi_dmi_table[] = {
+ {
+ .ident = "Sony Vaio",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"),
+ },
+ },
+ {
+ .ident = "Sony Vaio",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"),
+ },
+ },
+ { }
+};
+
+static int __init sonypi_init(void)
+{
+ int error;
+
+ printk(KERN_INFO
+ "sonypi: Sony Programmable I/O Controller Driver v%s.\n",
+ SONYPI_DRIVER_VERSION);
+
+ if (!dmi_check_system(sonypi_dmi_table))
+ return -ENODEV;
+
+ error = platform_driver_register(&sonypi_driver);
+ if (error)
+ return error;
+
+ sonypi_platform_device = platform_device_alloc("sonypi", -1);
+ if (!sonypi_platform_device) {
+ error = -ENOMEM;
+ goto err_driver_unregister;
+ }
+
+ error = platform_device_add(sonypi_platform_device);
+ if (error)
+ goto err_free_device;
+
+#ifdef CONFIG_ACPI
+ if (acpi_bus_register_driver(&sonypi_acpi_driver) >= 0)
+ acpi_driver_registered = 1;
+#endif
+
+ return 0;
+
+ err_free_device:
+ platform_device_put(sonypi_platform_device);
+ err_driver_unregister:
+ platform_driver_unregister(&sonypi_driver);
+ return error;
+}
+
+static void __exit sonypi_exit(void)
+{
+#ifdef CONFIG_ACPI
+ if (acpi_driver_registered)
+ acpi_bus_unregister_driver(&sonypi_acpi_driver);
+#endif
+ platform_device_unregister(sonypi_platform_device);
+ platform_driver_unregister(&sonypi_driver);
+ printk(KERN_INFO "sonypi: removed.\n");
+}
+
+module_init(sonypi_init);
+module_exit(sonypi_exit);
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
new file mode 100644
index 00000000..ad264185
--- /dev/null
+++ b/drivers/char/tb0219.c
@@ -0,0 +1,372 @@
+/*
+ * Driver for TANBAC TB0219 base board.
+ *
+ * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/io.h>
+#include <asm/reboot.h>
+#include <asm/vr41xx/giu.h>
+#include <asm/vr41xx/tb0219.h>
+
+MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
+MODULE_DESCRIPTION("TANBAC TB0219 base board driver");
+MODULE_LICENSE("GPL");
+
+static int major; /* default is dynamic major device number */
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+
+static void (*old_machine_restart)(char *command);
+static void __iomem *tb0219_base;
+static DEFINE_SPINLOCK(tb0219_lock);
+
+#define tb0219_read(offset) readw(tb0219_base + (offset))
+#define tb0219_write(offset, value) writew((value), tb0219_base + (offset))
+
+#define TB0219_START 0x0a000000UL
+#define TB0219_SIZE 0x20UL
+
+#define TB0219_LED 0x00
+#define TB0219_GPIO_INPUT 0x02
+#define TB0219_GPIO_OUTPUT 0x04
+#define TB0219_DIP_SWITCH 0x06
+#define TB0219_MISC 0x08
+#define TB0219_RESET 0x0e
+#define TB0219_PCI_SLOT1_IRQ_STATUS 0x10
+#define TB0219_PCI_SLOT2_IRQ_STATUS 0x12
+#define TB0219_PCI_SLOT3_IRQ_STATUS 0x14
+
+typedef enum {
+ TYPE_LED,
+ TYPE_GPIO_OUTPUT,
+} tb0219_type_t;
+
+/*
+ * Minor device number
+ * 0 = 7 segment LED
+ *
+ * 16 = GPIO IN 0
+ * 17 = GPIO IN 1
+ * 18 = GPIO IN 2
+ * 19 = GPIO IN 3
+ * 20 = GPIO IN 4
+ * 21 = GPIO IN 5
+ * 22 = GPIO IN 6
+ * 23 = GPIO IN 7
+ *
+ * 32 = GPIO OUT 0
+ * 33 = GPIO OUT 1
+ * 34 = GPIO OUT 2
+ * 35 = GPIO OUT 3
+ * 36 = GPIO OUT 4
+ * 37 = GPIO OUT 5
+ * 38 = GPIO OUT 6
+ * 39 = GPIO OUT 7
+ *
+ * 48 = DIP switch 1
+ * 49 = DIP switch 2
+ * 50 = DIP switch 3
+ * 51 = DIP switch 4
+ * 52 = DIP switch 5
+ * 53 = DIP switch 6
+ * 54 = DIP switch 7
+ * 55 = DIP switch 8
+ */
+
+static inline char get_led(void)
+{
+ return (char)tb0219_read(TB0219_LED);
+}
+
+static inline char get_gpio_input_pin(unsigned int pin)
+{
+ uint16_t values;
+
+ values = tb0219_read(TB0219_GPIO_INPUT);
+ if (values & (1 << pin))
+ return '1';
+
+ return '0';
+}
+
+static inline char get_gpio_output_pin(unsigned int pin)
+{
+ uint16_t values;
+
+ values = tb0219_read(TB0219_GPIO_OUTPUT);
+ if (values & (1 << pin))
+ return '1';
+
+ return '0';
+}
+
+static inline char get_dip_switch(unsigned int pin)
+{
+ uint16_t values;
+
+ values = tb0219_read(TB0219_DIP_SWITCH);
+ if (values & (1 << pin))
+ return '1';
+
+ return '0';
+}
+
+static inline int set_led(char command)
+{
+ tb0219_write(TB0219_LED, command);
+
+ return 0;
+}
+
+static inline int set_gpio_output_pin(unsigned int pin, char command)
+{
+ unsigned long flags;
+ uint16_t value;
+
+ if (command != '0' && command != '1')
+ return -EINVAL;
+
+ spin_lock_irqsave(&tb0219_lock, flags);
+ value = tb0219_read(TB0219_GPIO_OUTPUT);
+ if (command == '0')
+ value &= ~(1 << pin);
+ else
+ value |= 1 << pin;
+ tb0219_write(TB0219_GPIO_OUTPUT, value);
+ spin_unlock_irqrestore(&tb0219_lock, flags);
+
+ return 0;
+
+}
+
+static ssize_t tanbac_tb0219_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ unsigned int minor;
+ char value;
+
+ minor = iminor(file->f_path.dentry->d_inode);
+ switch (minor) {
+ case 0:
+ value = get_led();
+ break;
+ case 16 ... 23:
+ value = get_gpio_input_pin(minor - 16);
+ break;
+ case 32 ... 39:
+ value = get_gpio_output_pin(minor - 32);
+ break;
+ case 48 ... 55:
+ value = get_dip_switch(minor - 48);
+ break;
+ default:
+ return -EBADF;
+ }
+
+ if (len <= 0)
+ return -EFAULT;
+
+ if (put_user(value, buf))
+ return -EFAULT;
+
+ return 1;
+}
+
+static ssize_t tanbac_tb0219_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ unsigned int minor;
+ tb0219_type_t type;
+ size_t i;
+ int retval = 0;
+ char c;
+
+ minor = iminor(file->f_path.dentry->d_inode);
+ switch (minor) {
+ case 0:
+ type = TYPE_LED;
+ break;
+ case 32 ... 39:
+ type = TYPE_GPIO_OUTPUT;
+ break;
+ default:
+ return -EBADF;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (get_user(c, data + i))
+ return -EFAULT;
+
+ switch (type) {
+ case TYPE_LED:
+ retval = set_led(c);
+ break;
+ case TYPE_GPIO_OUTPUT:
+ retval = set_gpio_output_pin(minor - 32, c);
+ break;
+ }
+
+ if (retval < 0)
+ break;
+ }
+
+ return i;
+}
+
+static int tanbac_tb0219_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor;
+
+ minor = iminor(inode);
+ switch (minor) {
+ case 0:
+ case 16 ... 23:
+ case 32 ... 39:
+ case 48 ... 55:
+ return nonseekable_open(inode, file);
+ default:
+ break;
+ }
+
+ return -EBADF;
+}
+
+static int tanbac_tb0219_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations tb0219_fops = {
+ .owner = THIS_MODULE,
+ .read = tanbac_tb0219_read,
+ .write = tanbac_tb0219_write,
+ .open = tanbac_tb0219_open,
+ .release = tanbac_tb0219_release,
+ .llseek = no_llseek,
+};
+
+static void tb0219_restart(char *command)
+{
+ tb0219_write(TB0219_RESET, 0);
+}
+
+static void tb0219_pci_irq_init(void)
+{
+ /* PCI Slot 1 */
+ vr41xx_set_irq_trigger(TB0219_PCI_SLOT1_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH);
+ vr41xx_set_irq_level(TB0219_PCI_SLOT1_PIN, IRQ_LEVEL_LOW);
+
+ /* PCI Slot 2 */
+ vr41xx_set_irq_trigger(TB0219_PCI_SLOT2_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH);
+ vr41xx_set_irq_level(TB0219_PCI_SLOT2_PIN, IRQ_LEVEL_LOW);
+
+ /* PCI Slot 3 */
+ vr41xx_set_irq_trigger(TB0219_PCI_SLOT3_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH);
+ vr41xx_set_irq_level(TB0219_PCI_SLOT3_PIN, IRQ_LEVEL_LOW);
+}
+
+static int __devinit tb0219_probe(struct platform_device *dev)
+{
+ int retval;
+
+ if (request_mem_region(TB0219_START, TB0219_SIZE, "TB0219") == NULL)
+ return -EBUSY;
+
+ tb0219_base = ioremap(TB0219_START, TB0219_SIZE);
+ if (tb0219_base == NULL) {
+ release_mem_region(TB0219_START, TB0219_SIZE);
+ return -ENOMEM;
+ }
+
+ retval = register_chrdev(major, "TB0219", &tb0219_fops);
+ if (retval < 0) {
+ iounmap(tb0219_base);
+ tb0219_base = NULL;
+ release_mem_region(TB0219_START, TB0219_SIZE);
+ return retval;
+ }
+
+ old_machine_restart = _machine_restart;
+ _machine_restart = tb0219_restart;
+
+ tb0219_pci_irq_init();
+
+ if (major == 0) {
+ major = retval;
+ printk(KERN_INFO "TB0219: major number %d\n", major);
+ }
+
+ return 0;
+}
+
+static int __devexit tb0219_remove(struct platform_device *dev)
+{
+ _machine_restart = old_machine_restart;
+
+ iounmap(tb0219_base);
+ tb0219_base = NULL;
+
+ release_mem_region(TB0219_START, TB0219_SIZE);
+
+ return 0;
+}
+
+static struct platform_device *tb0219_platform_device;
+
+static struct platform_driver tb0219_device_driver = {
+ .probe = tb0219_probe,
+ .remove = __devexit_p(tb0219_remove),
+ .driver = {
+ .name = "TB0219",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tanbac_tb0219_init(void)
+{
+ int retval;
+
+ tb0219_platform_device = platform_device_alloc("TB0219", -1);
+ if (!tb0219_platform_device)
+ return -ENOMEM;
+
+ retval = platform_device_add(tb0219_platform_device);
+ if (retval < 0) {
+ platform_device_put(tb0219_platform_device);
+ return retval;
+ }
+
+ retval = platform_driver_register(&tb0219_device_driver);
+ if (retval < 0)
+ platform_device_unregister(tb0219_platform_device);
+
+ return retval;
+}
+
+static void __exit tanbac_tb0219_exit(void)
+{
+ platform_driver_unregister(&tb0219_device_driver);
+ platform_device_unregister(tb0219_platform_device);
+}
+
+module_init(tanbac_tb0219_init);
+module_exit(tanbac_tb0219_exit);
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
new file mode 100644
index 00000000..0c964cdc
--- /dev/null
+++ b/drivers/char/tlclk.c
@@ -0,0 +1,935 @@
+/*
+ * Telecom Clock driver for Intel NetStructure(tm) MPCBL0010
+ *
+ * Copyright (C) 2005 Kontron Canada
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <sebastien.bouchard@ca.kontron.com> and the current
+ * Maintainer <mark.gross@intel.com>
+ *
+ * Description : This is the TELECOM CLOCK module driver for the ATCA
+ * MPCBL0010 ATCA computer.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <asm/io.h> /* inb/outb */
+#include <asm/uaccess.h>
+
+MODULE_AUTHOR("Sebastien Bouchard <sebastien.bouchard@ca.kontron.com>");
+MODULE_LICENSE("GPL");
+
+/*Hardware Reset of the PLL */
+#define RESET_ON 0x00
+#define RESET_OFF 0x01
+
+/* MODE SELECT */
+#define NORMAL_MODE 0x00
+#define HOLDOVER_MODE 0x10
+#define FREERUN_MODE 0x20
+
+/* FILTER SELECT */
+#define FILTER_6HZ 0x04
+#define FILTER_12HZ 0x00
+
+/* SELECT REFERENCE FREQUENCY */
+#define REF_CLK1_8kHz 0x00
+#define REF_CLK2_19_44MHz 0x02
+
+/* Select primary or secondary redundant clock */
+#define PRIMARY_CLOCK 0x00
+#define SECONDARY_CLOCK 0x01
+
+/* CLOCK TRANSMISSION DEFINE */
+#define CLK_8kHz 0xff
+#define CLK_16_384MHz 0xfb
+
+#define CLK_1_544MHz 0x00
+#define CLK_2_048MHz 0x01
+#define CLK_4_096MHz 0x02
+#define CLK_6_312MHz 0x03
+#define CLK_8_192MHz 0x04
+#define CLK_19_440MHz 0x06
+
+#define CLK_8_592MHz 0x08
+#define CLK_11_184MHz 0x09
+#define CLK_34_368MHz 0x0b
+#define CLK_44_736MHz 0x0a
+
+/* RECEIVED REFERENCE */
+#define AMC_B1 0
+#define AMC_B2 1
+
+/* HARDWARE SWITCHING DEFINE */
+#define HW_ENABLE 0x80
+#define HW_DISABLE 0x00
+
+/* HARDWARE SWITCHING MODE DEFINE */
+#define PLL_HOLDOVER 0x40
+#define LOST_CLOCK 0x00
+
+/* ALARMS DEFINE */
+#define UNLOCK_MASK 0x10
+#define HOLDOVER_MASK 0x20
+#define SEC_LOST_MASK 0x40
+#define PRI_LOST_MASK 0x80
+
+/* INTERRUPT CAUSE DEFINE */
+
+#define PRI_LOS_01_MASK 0x01
+#define PRI_LOS_10_MASK 0x02
+
+#define SEC_LOS_01_MASK 0x04
+#define SEC_LOS_10_MASK 0x08
+
+#define HOLDOVER_01_MASK 0x10
+#define HOLDOVER_10_MASK 0x20
+
+#define UNLOCK_01_MASK 0x40
+#define UNLOCK_10_MASK 0x80
+
+struct tlclk_alarms {
+ __u32 lost_clocks;
+ __u32 lost_primary_clock;
+ __u32 lost_secondary_clock;
+ __u32 primary_clock_back;
+ __u32 secondary_clock_back;
+ __u32 switchover_primary;
+ __u32 switchover_secondary;
+ __u32 pll_holdover;
+ __u32 pll_end_holdover;
+ __u32 pll_lost_sync;
+ __u32 pll_sync;
+};
+/* Telecom clock I/O register definition */
+#define TLCLK_BASE 0xa08
+#define TLCLK_REG0 TLCLK_BASE
+#define TLCLK_REG1 (TLCLK_BASE+1)
+#define TLCLK_REG2 (TLCLK_BASE+2)
+#define TLCLK_REG3 (TLCLK_BASE+3)
+#define TLCLK_REG4 (TLCLK_BASE+4)
+#define TLCLK_REG5 (TLCLK_BASE+5)
+#define TLCLK_REG6 (TLCLK_BASE+6)
+#define TLCLK_REG7 (TLCLK_BASE+7)
+
+#define SET_PORT_BITS(port, mask, val) outb(((inb(port) & mask) | val), port)
+
+/* 0 = Dynamic allocation of the major device number */
+#define TLCLK_MAJOR 0
+
+/* sysfs interface definition:
+Upon loading the driver will create a sysfs directory under
+/sys/devices/platform/telco_clock.
+
+This directory exports the following interfaces. There operation is
+documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4.
+alarms :
+current_ref :
+received_ref_clk3a :
+received_ref_clk3b :
+enable_clk3a_output :
+enable_clk3b_output :
+enable_clka0_output :
+enable_clka1_output :
+enable_clkb0_output :
+enable_clkb1_output :
+filter_select :
+hardware_switching :
+hardware_switching_mode :
+telclock_version :
+mode_select :
+refalign :
+reset :
+select_amcb1_transmit_clock :
+select_amcb2_transmit_clock :
+select_redundant_clock :
+select_ref_frequency :
+
+All sysfs interfaces are integers in hex format, i.e echo 99 > refalign
+has the same effect as echo 0x99 > refalign.
+*/
+
+static unsigned int telclk_interrupt;
+
+static int int_events; /* Event that generate a interrupt */
+static int got_event; /* if events processing have been done */
+
+static void switchover_timeout(unsigned long data);
+static struct timer_list switchover_timer =
+ TIMER_INITIALIZER(switchover_timeout , 0, 0);
+static unsigned long tlclk_timer_data;
+
+static struct tlclk_alarms *alarm_events;
+
+static DEFINE_SPINLOCK(event_lock);
+
+static int tlclk_major = TLCLK_MAJOR;
+
+static irqreturn_t tlclk_interrupt(int irq, void *dev_id);
+
+static DECLARE_WAIT_QUEUE_HEAD(wq);
+
+static unsigned long useflags;
+static DEFINE_MUTEX(tlclk_mutex);
+
+static int tlclk_open(struct inode *inode, struct file *filp)
+{
+ int result;
+
+ mutex_lock(&tlclk_mutex);
+ if (test_and_set_bit(0, &useflags)) {
+ result = -EBUSY;
+ /* this legacy device is always one per system and it doesn't
+ * know how to handle multiple concurrent clients.
+ */
+ goto out;
+ }
+
+ /* Make sure there is no interrupt pending while
+ * initialising interrupt handler */
+ inb(TLCLK_REG6);
+
+ /* This device is wired through the FPGA IO space of the ATCA blade
+ * we can't share this IRQ */
+ result = request_irq(telclk_interrupt, &tlclk_interrupt,
+ IRQF_DISABLED, "telco_clock", tlclk_interrupt);
+ if (result == -EBUSY)
+ printk(KERN_ERR "tlclk: Interrupt can't be reserved.\n");
+ else
+ inb(TLCLK_REG6); /* Clear interrupt events */
+
+out:
+ mutex_unlock(&tlclk_mutex);
+ return result;
+}
+
+static int tlclk_release(struct inode *inode, struct file *filp)
+{
+ free_irq(telclk_interrupt, tlclk_interrupt);
+ clear_bit(0, &useflags);
+
+ return 0;
+}
+
+static ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ if (count < sizeof(struct tlclk_alarms))
+ return -EIO;
+ if (mutex_lock_interruptible(&tlclk_mutex))
+ return -EINTR;
+
+
+ wait_event_interruptible(wq, got_event);
+ if (copy_to_user(buf, alarm_events, sizeof(struct tlclk_alarms))) {
+ mutex_unlock(&tlclk_mutex);
+ return -EFAULT;
+ }
+
+ memset(alarm_events, 0, sizeof(struct tlclk_alarms));
+ got_event = 0;
+
+ mutex_unlock(&tlclk_mutex);
+ return sizeof(struct tlclk_alarms);
+}
+
+static const struct file_operations tlclk_fops = {
+ .read = tlclk_read,
+ .open = tlclk_open,
+ .release = tlclk_release,
+ .llseek = noop_llseek,
+
+};
+
+static struct miscdevice tlclk_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "telco_clock",
+ .fops = &tlclk_fops,
+};
+
+static ssize_t show_current_ref(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long ret_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_lock, flags);
+ ret_val = ((inb(TLCLK_REG1) & 0x08) >> 3);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return sprintf(buf, "0x%lX\n", ret_val);
+}
+
+static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL);
+
+
+static ssize_t show_telclock_version(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long ret_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_lock, flags);
+ ret_val = inb(TLCLK_REG5);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return sprintf(buf, "0x%lX\n", ret_val);
+}
+
+static DEVICE_ATTR(telclock_version, S_IRUGO,
+ show_telclock_version, NULL);
+
+static ssize_t show_alarms(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long ret_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_lock, flags);
+ ret_val = (inb(TLCLK_REG2) & 0xf0);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return sprintf(buf, "0x%lX\n", ret_val);
+}
+
+static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+
+static ssize_t store_received_ref_clk3a(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG1, 0xef, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(received_ref_clk3a, (S_IWUSR|S_IWGRP), NULL,
+ store_received_ref_clk3a);
+
+
+static ssize_t store_received_ref_clk3b(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG1, 0xdf, val << 1);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(received_ref_clk3b, (S_IWUSR|S_IWGRP), NULL,
+ store_received_ref_clk3b);
+
+
+static ssize_t store_enable_clk3b_output(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG3, 0x7f, val << 7);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clk3b_output, (S_IWUSR|S_IWGRP), NULL,
+ store_enable_clk3b_output);
+
+static ssize_t store_enable_clk3a_output(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ unsigned long tmp;
+ unsigned char val;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG3, 0xbf, val << 6);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clk3a_output, (S_IWUSR|S_IWGRP), NULL,
+ store_enable_clk3a_output);
+
+static ssize_t store_enable_clkb1_output(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ unsigned long tmp;
+ unsigned char val;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG2, 0xf7, val << 3);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clkb1_output, (S_IWUSR|S_IWGRP), NULL,
+ store_enable_clkb1_output);
+
+
+static ssize_t store_enable_clka1_output(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ unsigned long tmp;
+ unsigned char val;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG2, 0xfb, val << 2);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clka1_output, (S_IWUSR|S_IWGRP), NULL,
+ store_enable_clka1_output);
+
+static ssize_t store_enable_clkb0_output(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ unsigned long tmp;
+ unsigned char val;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG2, 0xfd, val << 1);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clkb0_output, (S_IWUSR|S_IWGRP), NULL,
+ store_enable_clkb0_output);
+
+static ssize_t store_enable_clka0_output(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ unsigned long tmp;
+ unsigned char val;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG2, 0xfe, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(enable_clka0_output, (S_IWUSR|S_IWGRP), NULL,
+ store_enable_clka0_output);
+
+static ssize_t store_select_amcb2_transmit_clock(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ unsigned long tmp;
+ unsigned char val;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) {
+ SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x28);
+ SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val);
+ } else if (val >= CLK_8_592MHz) {
+ SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38);
+ switch (val) {
+ case CLK_8_592MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
+ break;
+ case CLK_11_184MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
+ break;
+ case CLK_34_368MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
+ break;
+ case CLK_44_736MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
+ break;
+ }
+ } else
+ SET_PORT_BITS(TLCLK_REG3, 0xc7, val << 3);
+
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(select_amcb2_transmit_clock, (S_IWUSR|S_IWGRP), NULL,
+ store_select_amcb2_transmit_clock);
+
+static ssize_t store_select_amcb1_transmit_clock(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) {
+ SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x5);
+ SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val);
+ } else if (val >= CLK_8_592MHz) {
+ SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x7);
+ switch (val) {
+ case CLK_8_592MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
+ break;
+ case CLK_11_184MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
+ break;
+ case CLK_34_368MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
+ break;
+ case CLK_44_736MHz:
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
+ break;
+ }
+ } else
+ SET_PORT_BITS(TLCLK_REG3, 0xf8, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(select_amcb1_transmit_clock, (S_IWUSR|S_IWGRP), NULL,
+ store_select_amcb1_transmit_clock);
+
+static ssize_t store_select_redundant_clock(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG1, 0xfe, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(select_redundant_clock, (S_IWUSR|S_IWGRP), NULL,
+ store_select_redundant_clock);
+
+static ssize_t store_select_ref_frequency(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG1, 0xfd, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(select_ref_frequency, (S_IWUSR|S_IWGRP), NULL,
+ store_select_ref_frequency);
+
+static ssize_t store_filter_select(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG0, 0xfb, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(filter_select, (S_IWUSR|S_IWGRP), NULL, store_filter_select);
+
+static ssize_t store_hardware_switching_mode(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG0, 0xbf, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(hardware_switching_mode, (S_IWUSR|S_IWGRP), NULL,
+ store_hardware_switching_mode);
+
+static ssize_t store_hardware_switching(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG0, 0x7f, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(hardware_switching, (S_IWUSR|S_IWGRP), NULL,
+ store_hardware_switching);
+
+static ssize_t store_refalign (struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
+ SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08);
+ SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(refalign, (S_IWUSR|S_IWGRP), NULL, store_refalign);
+
+static ssize_t store_mode_select (struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG0, 0xcf, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(mode_select, (S_IWUSR|S_IWGRP), NULL, store_mode_select);
+
+static ssize_t store_reset (struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, "tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG4, 0xfd, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(reset, (S_IWUSR|S_IWGRP), NULL, store_reset);
+
+static struct attribute *tlclk_sysfs_entries[] = {
+ &dev_attr_current_ref.attr,
+ &dev_attr_telclock_version.attr,
+ &dev_attr_alarms.attr,
+ &dev_attr_received_ref_clk3a.attr,
+ &dev_attr_received_ref_clk3b.attr,
+ &dev_attr_enable_clk3a_output.attr,
+ &dev_attr_enable_clk3b_output.attr,
+ &dev_attr_enable_clkb1_output.attr,
+ &dev_attr_enable_clka1_output.attr,
+ &dev_attr_enable_clkb0_output.attr,
+ &dev_attr_enable_clka0_output.attr,
+ &dev_attr_select_amcb1_transmit_clock.attr,
+ &dev_attr_select_amcb2_transmit_clock.attr,
+ &dev_attr_select_redundant_clock.attr,
+ &dev_attr_select_ref_frequency.attr,
+ &dev_attr_filter_select.attr,
+ &dev_attr_hardware_switching_mode.attr,
+ &dev_attr_hardware_switching.attr,
+ &dev_attr_refalign.attr,
+ &dev_attr_mode_select.attr,
+ &dev_attr_reset.attr,
+ NULL
+};
+
+static struct attribute_group tlclk_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = tlclk_sysfs_entries,
+};
+
+static struct platform_device *tlclk_device;
+
+static int __init tlclk_init(void)
+{
+ int ret;
+
+ ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
+ if (ret < 0) {
+ printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
+ return ret;
+ }
+ tlclk_major = ret;
+ alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
+ if (!alarm_events)
+ goto out1;
+
+ /* Read telecom clock IRQ number (Set by BIOS) */
+ if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
+ printk(KERN_ERR "tlclk: request_region 0x%X failed.\n",
+ TLCLK_BASE);
+ ret = -EBUSY;
+ goto out2;
+ }
+ telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
+
+ if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
+ printk(KERN_ERR "telclk_interrup = 0x%x non-mcpbl0010 hw.\n",
+ telclk_interrupt);
+ ret = -ENXIO;
+ goto out3;
+ }
+
+ init_timer(&switchover_timer);
+
+ ret = misc_register(&tlclk_miscdev);
+ if (ret < 0) {
+ printk(KERN_ERR "tlclk: misc_register returns %d.\n", ret);
+ goto out3;
+ }
+
+ tlclk_device = platform_device_register_simple("telco_clock",
+ -1, NULL, 0);
+ if (IS_ERR(tlclk_device)) {
+ printk(KERN_ERR "tlclk: platform_device_register failed.\n");
+ ret = PTR_ERR(tlclk_device);
+ goto out4;
+ }
+
+ ret = sysfs_create_group(&tlclk_device->dev.kobj,
+ &tlclk_attribute_group);
+ if (ret) {
+ printk(KERN_ERR "tlclk: failed to create sysfs device attributes.\n");
+ goto out5;
+ }
+
+ return 0;
+out5:
+ platform_device_unregister(tlclk_device);
+out4:
+ misc_deregister(&tlclk_miscdev);
+out3:
+ release_region(TLCLK_BASE, 8);
+out2:
+ kfree(alarm_events);
+out1:
+ unregister_chrdev(tlclk_major, "telco_clock");
+ return ret;
+}
+
+static void __exit tlclk_cleanup(void)
+{
+ sysfs_remove_group(&tlclk_device->dev.kobj, &tlclk_attribute_group);
+ platform_device_unregister(tlclk_device);
+ misc_deregister(&tlclk_miscdev);
+ unregister_chrdev(tlclk_major, "telco_clock");
+
+ release_region(TLCLK_BASE, 8);
+ del_timer_sync(&switchover_timer);
+ kfree(alarm_events);
+
+}
+
+static void switchover_timeout(unsigned long data)
+{
+ unsigned long flags = *(unsigned long *) data;
+
+ if ((flags & 1)) {
+ if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08))
+ alarm_events->switchover_primary++;
+ } else {
+ if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08))
+ alarm_events->switchover_secondary++;
+ }
+
+ /* Alarm processing is done, wake up read task */
+ del_timer(&switchover_timer);
+ got_event = 1;
+ wake_up(&wq);
+}
+
+static irqreturn_t tlclk_interrupt(int irq, void *dev_id)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_lock, flags);
+ /* Read and clear interrupt events */
+ int_events = inb(TLCLK_REG6);
+
+ /* Primary_Los changed from 0 to 1 ? */
+ if (int_events & PRI_LOS_01_MASK) {
+ if (inb(TLCLK_REG2) & SEC_LOST_MASK)
+ alarm_events->lost_clocks++;
+ else
+ alarm_events->lost_primary_clock++;
+ }
+
+ /* Primary_Los changed from 1 to 0 ? */
+ if (int_events & PRI_LOS_10_MASK) {
+ alarm_events->primary_clock_back++;
+ SET_PORT_BITS(TLCLK_REG1, 0xFE, 1);
+ }
+ /* Secondary_Los changed from 0 to 1 ? */
+ if (int_events & SEC_LOS_01_MASK) {
+ if (inb(TLCLK_REG2) & PRI_LOST_MASK)
+ alarm_events->lost_clocks++;
+ else
+ alarm_events->lost_secondary_clock++;
+ }
+ /* Secondary_Los changed from 1 to 0 ? */
+ if (int_events & SEC_LOS_10_MASK) {
+ alarm_events->secondary_clock_back++;
+ SET_PORT_BITS(TLCLK_REG1, 0xFE, 0);
+ }
+ if (int_events & HOLDOVER_10_MASK)
+ alarm_events->pll_end_holdover++;
+
+ if (int_events & UNLOCK_01_MASK)
+ alarm_events->pll_lost_sync++;
+
+ if (int_events & UNLOCK_10_MASK)
+ alarm_events->pll_sync++;
+
+ /* Holdover changed from 0 to 1 ? */
+ if (int_events & HOLDOVER_01_MASK) {
+ alarm_events->pll_holdover++;
+
+ /* TIMEOUT in ~10ms */
+ switchover_timer.expires = jiffies + msecs_to_jiffies(10);
+ tlclk_timer_data = inb(TLCLK_REG1);
+ switchover_timer.data = (unsigned long) &tlclk_timer_data;
+ mod_timer(&switchover_timer, switchover_timer.expires);
+ } else {
+ got_event = 1;
+ wake_up(&wq);
+ }
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+module_init(tlclk_init);
+module_exit(tlclk_cleanup);
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
new file mode 100644
index 00000000..014c9d90
--- /dev/null
+++ b/drivers/char/toshiba.c
@@ -0,0 +1,546 @@
+/* toshiba.c -- Linux driver for accessing the SMM on Toshiba laptops
+ *
+ * Copyright (c) 1996-2001 Jonathan A. Buzzard (jonathan@buzzard.org.uk)
+ *
+ * Valuable assistance and patches from:
+ * Tom May <tom@you-bastards.com>
+ * Rob Napier <rnapier@employees.org>
+ *
+ * Fn status port numbers for machine ID's courtesy of
+ * 0xfc02: Scott Eisert <scott.e@sky-eye.com>
+ * 0xfc04: Steve VanDevender <stevev@efn.org>
+ * 0xfc08: Garth Berry <garth@itsbruce.net>
+ * 0xfc0a: Egbert Eich <eich@xfree86.org>
+ * 0xfc10: Andrew Lofthouse <Andrew.Lofthouse@robins.af.mil>
+ * 0xfc11: Spencer Olson <solson@novell.com>
+ * 0xfc13: Claudius Frankewitz <kryp@gmx.de>
+ * 0xfc15: Tom May <tom@you-bastards.com>
+ * 0xfc17: Dave Konrad <konrad@xenia.it>
+ * 0xfc1a: George Betzos <betzos@engr.colostate.edu>
+ * 0xfc1b: Munemasa Wada <munemasa@jnovel.co.jp>
+ * 0xfc1d: Arthur Liu <armie@slap.mine.nu>
+ * 0xfc5a: Jacques L'helgoualc'h <lhh@free.fr>
+ * 0xfcd1: Mr. Dave Konrad <konrad@xenia.it>
+ *
+ * WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
+ *
+ * This code is covered by the GNU GPL and you are free to make any
+ * changes you wish to it under the terms of the license. However the
+ * code has the potential to render your computer and/or someone else's
+ * unusable. Please proceed with care when modifying the code.
+ *
+ * Note: Unfortunately the laptop hardware can close the System Configuration
+ * Interface on it's own accord. It is therefore necessary for *all*
+ * programs using this driver to be aware that *any* SCI call can fail at
+ * *any* time. It is up to any program to be aware of this eventuality
+ * and take appropriate steps.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The information used to write this driver has been obtained by reverse
+ * engineering the software supplied by Toshiba for their portable computers in
+ * strict accordance with the European Council Directive 92/250/EEC on the legal
+ * protection of computer programs, and it's implementation into English Law by
+ * the Copyright (Computer Programs) Regulations 1992 (S.I. 1992 No.3233).
+ *
+ */
+
+#define TOSH_VERSION "1.11 26/9/2001"
+#define TOSH_DEBUG 0
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/miscdevice.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/mutex.h>
+#include <linux/toshiba.h>
+
+#define TOSH_MINOR_DEV 181
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonathan Buzzard <jonathan@buzzard.org.uk>");
+MODULE_DESCRIPTION("Toshiba laptop SMM driver");
+MODULE_SUPPORTED_DEVICE("toshiba");
+
+static DEFINE_MUTEX(tosh_mutex);
+static int tosh_fn;
+module_param_named(fn, tosh_fn, int, 0);
+MODULE_PARM_DESC(fn, "User specified Fn key detection port");
+
+static int tosh_id;
+static int tosh_bios;
+static int tosh_date;
+static int tosh_sci;
+static int tosh_fan;
+
+static long tosh_ioctl(struct file *, unsigned int,
+ unsigned long);
+
+
+static const struct file_operations tosh_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = tosh_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice tosh_device = {
+ TOSH_MINOR_DEV,
+ "toshiba",
+ &tosh_fops
+};
+
+/*
+ * Read the Fn key status
+ */
+#ifdef CONFIG_PROC_FS
+static int tosh_fn_status(void)
+{
+ unsigned char scan;
+ unsigned long flags;
+
+ if (tosh_fn!=0) {
+ scan = inb(tosh_fn);
+ } else {
+ local_irq_save(flags);
+ outb(0x8e, 0xe4);
+ scan = inb(0xe5);
+ local_irq_restore(flags);
+ }
+
+ return (int) scan;
+}
+#endif
+
+
+/*
+ * For the Portage 610CT and the Tecra 700CS/700CDT emulate the HCI fan function
+ */
+static int tosh_emulate_fan(SMMRegisters *regs)
+{
+ unsigned long eax,ecx,flags;
+ unsigned char al;
+
+ eax = regs->eax & 0xff00;
+ ecx = regs->ecx & 0xffff;
+
+ /* Portage 610CT */
+
+ if (tosh_id==0xfccb) {
+ if (eax==0xfe00) {
+ /* fan status */
+ local_irq_save(flags);
+ outb(0xbe, 0xe4);
+ al = inb(0xe5);
+ local_irq_restore(flags);
+ regs->eax = 0x00;
+ regs->ecx = (unsigned int) (al & 0x01);
+ }
+ if ((eax==0xff00) && (ecx==0x0000)) {
+ /* fan off */
+ local_irq_save(flags);
+ outb(0xbe, 0xe4);
+ al = inb(0xe5);
+ outb(0xbe, 0xe4);
+ outb (al | 0x01, 0xe5);
+ local_irq_restore(flags);
+ regs->eax = 0x00;
+ regs->ecx = 0x00;
+ }
+ if ((eax==0xff00) && (ecx==0x0001)) {
+ /* fan on */
+ local_irq_save(flags);
+ outb(0xbe, 0xe4);
+ al = inb(0xe5);
+ outb(0xbe, 0xe4);
+ outb(al & 0xfe, 0xe5);
+ local_irq_restore(flags);
+ regs->eax = 0x00;
+ regs->ecx = 0x01;
+ }
+ }
+
+ /* Tecra 700CS/CDT */
+
+ if (tosh_id==0xfccc) {
+ if (eax==0xfe00) {
+ /* fan status */
+ local_irq_save(flags);
+ outb(0xe0, 0xe4);
+ al = inb(0xe5);
+ local_irq_restore(flags);
+ regs->eax = 0x00;
+ regs->ecx = al & 0x01;
+ }
+ if ((eax==0xff00) && (ecx==0x0000)) {
+ /* fan off */
+ local_irq_save(flags);
+ outb(0xe0, 0xe4);
+ al = inb(0xe5);
+ outw(0xe0 | ((al & 0xfe) << 8), 0xe4);
+ local_irq_restore(flags);
+ regs->eax = 0x00;
+ regs->ecx = 0x00;
+ }
+ if ((eax==0xff00) && (ecx==0x0001)) {
+ /* fan on */
+ local_irq_save(flags);
+ outb(0xe0, 0xe4);
+ al = inb(0xe5);
+ outw(0xe0 | ((al | 0x01) << 8), 0xe4);
+ local_irq_restore(flags);
+ regs->eax = 0x00;
+ regs->ecx = 0x01;
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * Put the laptop into System Management Mode
+ */
+int tosh_smm(SMMRegisters *regs)
+{
+ int eax;
+
+ asm ("# load the values into the registers\n\t" \
+ "pushl %%eax\n\t" \
+ "movl 0(%%eax),%%edx\n\t" \
+ "push %%edx\n\t" \
+ "movl 4(%%eax),%%ebx\n\t" \
+ "movl 8(%%eax),%%ecx\n\t" \
+ "movl 12(%%eax),%%edx\n\t" \
+ "movl 16(%%eax),%%esi\n\t" \
+ "movl 20(%%eax),%%edi\n\t" \
+ "popl %%eax\n\t" \
+ "# call the System Management mode\n\t" \
+ "inb $0xb2,%%al\n\t"
+ "# fill out the memory with the values in the registers\n\t" \
+ "xchgl %%eax,(%%esp)\n\t"
+ "movl %%ebx,4(%%eax)\n\t" \
+ "movl %%ecx,8(%%eax)\n\t" \
+ "movl %%edx,12(%%eax)\n\t" \
+ "movl %%esi,16(%%eax)\n\t" \
+ "movl %%edi,20(%%eax)\n\t" \
+ "popl %%edx\n\t" \
+ "movl %%edx,0(%%eax)\n\t" \
+ "# setup the return value to the carry flag\n\t" \
+ "lahf\n\t" \
+ "shrl $8,%%eax\n\t" \
+ "andl $1,%%eax\n" \
+ : "=a" (eax)
+ : "a" (regs)
+ : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
+
+ return eax;
+}
+EXPORT_SYMBOL(tosh_smm);
+
+
+static long tosh_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ SMMRegisters regs;
+ SMMRegisters __user *argp = (SMMRegisters __user *)arg;
+ unsigned short ax,bx;
+ int err;
+
+ if (!argp)
+ return -EINVAL;
+
+ if (copy_from_user(&regs, argp, sizeof(SMMRegisters)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case TOSH_SMM:
+ ax = regs.eax & 0xff00;
+ bx = regs.ebx & 0xffff;
+ /* block HCI calls to read/write memory & PCI devices */
+ if (((ax==0xff00) || (ax==0xfe00)) && (bx>0x0069))
+ return -EINVAL;
+
+ /* do we need to emulate the fan ? */
+ mutex_lock(&tosh_mutex);
+ if (tosh_fan==1) {
+ if (((ax==0xf300) || (ax==0xf400)) && (bx==0x0004)) {
+ err = tosh_emulate_fan(&regs);
+ mutex_unlock(&tosh_mutex);
+ break;
+ }
+ }
+ err = tosh_smm(&regs);
+ mutex_unlock(&tosh_mutex);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (copy_to_user(argp, &regs, sizeof(SMMRegisters)))
+ return -EFAULT;
+
+ return (err==0) ? 0:-EINVAL;
+}
+
+
+/*
+ * Print the information for /proc/toshiba
+ */
+#ifdef CONFIG_PROC_FS
+static int proc_toshiba_show(struct seq_file *m, void *v)
+{
+ int key;
+
+ key = tosh_fn_status();
+
+ /* Arguments
+ 0) Linux driver version (this will change if format changes)
+ 1) Machine ID
+ 2) SCI version
+ 3) BIOS version (major, minor)
+ 4) BIOS date (in SCI date format)
+ 5) Fn Key status
+ */
+ seq_printf(m, "1.1 0x%04x %d.%d %d.%d 0x%04x 0x%02x\n",
+ tosh_id,
+ (tosh_sci & 0xff00)>>8,
+ tosh_sci & 0xff,
+ (tosh_bios & 0xff00)>>8,
+ tosh_bios & 0xff,
+ tosh_date,
+ key);
+ return 0;
+}
+
+static int proc_toshiba_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_toshiba_show, NULL);
+}
+
+static const struct file_operations proc_toshiba_fops = {
+ .owner = THIS_MODULE,
+ .open = proc_toshiba_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+
+/*
+ * Determine which port to use for the Fn key status
+ */
+static void tosh_set_fn_port(void)
+{
+ switch (tosh_id) {
+ case 0xfc02: case 0xfc04: case 0xfc09: case 0xfc0a: case 0xfc10:
+ case 0xfc11: case 0xfc13: case 0xfc15: case 0xfc1a: case 0xfc1b:
+ case 0xfc5a:
+ tosh_fn = 0x62;
+ break;
+ case 0xfc08: case 0xfc17: case 0xfc1d: case 0xfcd1: case 0xfce0:
+ case 0xfce2:
+ tosh_fn = 0x68;
+ break;
+ default:
+ tosh_fn = 0x00;
+ break;
+ }
+
+ return;
+}
+
+
+/*
+ * Get the machine identification number of the current model
+ */
+static int tosh_get_machine_id(void __iomem *bios)
+{
+ int id;
+ SMMRegisters regs;
+ unsigned short bx,cx;
+ unsigned long address;
+
+ id = (0x100*(int) readb(bios+0xfffe))+((int) readb(bios+0xfffa));
+
+ /* do we have a SCTTable machine identication number on our hands */
+
+ if (id==0xfc2f) {
+
+ /* start by getting a pointer into the BIOS */
+
+ regs.eax = 0xc000;
+ regs.ebx = 0x0000;
+ regs.ecx = 0x0000;
+ tosh_smm(&regs);
+ bx = (unsigned short) (regs.ebx & 0xffff);
+
+ /* At this point in the Toshiba routines under MS Windows
+ the bx register holds 0xe6f5. However my code is producing
+ a different value! For the time being I will just fudge the
+ value. This has been verified on a Satellite Pro 430CDT,
+ Tecra 750CDT, Tecra 780DVD and Satellite 310CDT. */
+#if TOSH_DEBUG
+ printk("toshiba: debugging ID ebx=0x%04x\n", regs.ebx);
+#endif
+ bx = 0xe6f5;
+
+ /* now twiddle with our pointer a bit */
+
+ address = bx;
+ cx = readw(bios + address);
+ address = 9+bx+cx;
+ cx = readw(bios + address);
+ address = 0xa+cx;
+ cx = readw(bios + address);
+
+ /* now construct our machine identification number */
+
+ id = ((cx & 0xff)<<8)+((cx & 0xff00)>>8);
+ }
+
+ return id;
+}
+
+
+/*
+ * Probe for the presence of a Toshiba laptop
+ *
+ * returns and non-zero if unable to detect the presence of a Toshiba
+ * laptop, otherwise zero and determines the Machine ID, BIOS version and
+ * date, and SCI version.
+ */
+static int tosh_probe(void)
+{
+ int i,major,minor,day,year,month,flag;
+ unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 };
+ SMMRegisters regs;
+ void __iomem *bios = ioremap_cache(0xf0000, 0x10000);
+
+ if (!bios)
+ return -ENOMEM;
+
+ /* extra sanity check for the string "TOSHIBA" in the BIOS because
+ some machines that are not Toshiba's pass the next test */
+
+ for (i=0;i<7;i++) {
+ if (readb(bios+0xe010+i)!=signature[i]) {
+ printk("toshiba: not a supported Toshiba laptop\n");
+ iounmap(bios);
+ return -ENODEV;
+ }
+ }
+
+ /* call the Toshiba SCI support check routine */
+
+ regs.eax = 0xf0f0;
+ regs.ebx = 0x0000;
+ regs.ecx = 0x0000;
+ flag = tosh_smm(&regs);
+
+ /* if this is not a Toshiba laptop carry flag is set and ah=0x86 */
+
+ if ((flag==1) || ((regs.eax & 0xff00)==0x8600)) {
+ printk("toshiba: not a supported Toshiba laptop\n");
+ iounmap(bios);
+ return -ENODEV;
+ }
+
+ /* if we get this far then we are running on a Toshiba (probably)! */
+
+ tosh_sci = regs.edx & 0xffff;
+
+ /* next get the machine ID of the current laptop */
+
+ tosh_id = tosh_get_machine_id(bios);
+
+ /* get the BIOS version */
+
+ major = readb(bios+0xe009)-'0';
+ minor = ((readb(bios+0xe00b)-'0')*10)+(readb(bios+0xe00c)-'0');
+ tosh_bios = (major*0x100)+minor;
+
+ /* get the BIOS date */
+
+ day = ((readb(bios+0xfff5)-'0')*10)+(readb(bios+0xfff6)-'0');
+ month = ((readb(bios+0xfff8)-'0')*10)+(readb(bios+0xfff9)-'0');
+ year = ((readb(bios+0xfffb)-'0')*10)+(readb(bios+0xfffc)-'0');
+ tosh_date = (((year-90) & 0x1f)<<10) | ((month & 0xf)<<6)
+ | ((day & 0x1f)<<1);
+
+
+ /* in theory we should check the ports we are going to use for the
+ fn key detection (and the fan on the Portage 610/Tecra700), and
+ then request them to stop other drivers using them. However as
+ the keyboard driver grabs 0x60-0x6f and the pic driver grabs
+ 0xa0-0xbf we can't. We just have to live dangerously and use the
+ ports anyway, oh boy! */
+
+ /* do we need to emulate the fan? */
+
+ if ((tosh_id==0xfccb) || (tosh_id==0xfccc))
+ tosh_fan = 1;
+
+ iounmap(bios);
+
+ return 0;
+}
+
+static int __init toshiba_init(void)
+{
+ int retval;
+ /* are we running on a Toshiba laptop */
+
+ if (tosh_probe())
+ return -ENODEV;
+
+ printk(KERN_INFO "Toshiba System Management Mode driver v" TOSH_VERSION "\n");
+
+ /* set the port to use for Fn status if not specified as a parameter */
+ if (tosh_fn==0x00)
+ tosh_set_fn_port();
+
+ /* register the device file */
+ retval = misc_register(&tosh_device);
+ if (retval < 0)
+ return retval;
+
+#ifdef CONFIG_PROC_FS
+ {
+ struct proc_dir_entry *pde;
+
+ pde = proc_create("toshiba", 0, NULL, &proc_toshiba_fops);
+ if (!pde) {
+ misc_deregister(&tosh_device);
+ return -ENOMEM;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static void __exit toshiba_exit(void)
+{
+ remove_proc_entry("toshiba", NULL);
+ misc_deregister(&tosh_device);
+}
+
+module_init(toshiba_init);
+module_exit(toshiba_exit);
+
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
new file mode 100644
index 00000000..f6595aba
--- /dev/null
+++ b/drivers/char/tpm/Kconfig
@@ -0,0 +1,63 @@
+#
+# TPM device configuration
+#
+
+menuconfig TCG_TPM
+ tristate "TPM Hardware Support"
+ depends on HAS_IOMEM
+ depends on EXPERIMENTAL
+ select SECURITYFS
+ ---help---
+ If you have a TPM security chip in your system, which
+ implements the Trusted Computing Group's specification,
+ say Yes and it will be accessible from within Linux. For
+ more information see <http://www.trustedcomputinggroup.org>.
+ An implementation of the Trusted Software Stack (TSS), the
+ userspace enablement piece of the specification, can be
+ obtained at: <http://sourceforge.net/projects/trousers>. To
+ compile this driver as a module, choose M here; the module
+ will be called tpm. If unsure, say N.
+ Notes:
+ 1) For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI
+ and CONFIG_PNPACPI.
+ 2) Without ACPI enabled, the BIOS event log won't be accessible,
+ which is required to validate the PCR 0-7 values.
+
+if TCG_TPM
+
+config TCG_TIS
+ tristate "TPM Interface Specification 1.2 Interface"
+ ---help---
+ If you have a TPM security chip that is compliant with the
+ TCG TIS 1.2 TPM specification say Yes and it will be accessible
+ from within Linux. To compile this driver as a module, choose
+ M here; the module will be called tpm_tis.
+
+config TCG_NSC
+ tristate "National Semiconductor TPM Interface"
+ ---help---
+ If you have a TPM security chip from National Semiconductor
+ say Yes and it will be accessible from within Linux. To
+ compile this driver as a module, choose M here; the module
+ will be called tpm_nsc.
+
+config TCG_ATMEL
+ tristate "Atmel TPM Interface"
+ ---help---
+ If you have a TPM security chip from Atmel say Yes and it
+ will be accessible from within Linux. To compile this driver
+ as a module, choose M here; the module will be called tpm_atmel.
+
+config TCG_INFINEON
+ tristate "Infineon Technologies TPM Interface"
+ depends on PNP
+ ---help---
+ If you have a TPM security chip from Infineon Technologies
+ (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it
+ will be accessible from within Linux.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_infineon.
+ Further information on this driver and the supported hardware
+ can be found at http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
+
+endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
new file mode 100644
index 00000000..ea3a1e02
--- /dev/null
+++ b/drivers/char/tpm/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the kernel tpm device drivers.
+#
+obj-$(CONFIG_TCG_TPM) += tpm.o
+ifdef CONFIG_ACPI
+ obj-$(CONFIG_TCG_TPM) += tpm_bios.o
+endif
+obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
+obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
+obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
new file mode 100644
index 00000000..b85ee76e
--- /dev/null
+++ b/drivers/char/tpm/tpm.c
@@ -0,0 +1,1278 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * Note, the TPM chip is not interrupt driven (only polling)
+ * and can have very long timeouts (minutes!). Hence the unusual
+ * calls to msleep.
+ *
+ */
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include "tpm.h"
+
+enum tpm_const {
+ TPM_MINOR = 224, /* officially assigned */
+ TPM_BUFSIZE = 4096,
+ TPM_NUM_DEVICES = 256,
+};
+
+enum tpm_duration {
+ TPM_SHORT = 0,
+ TPM_MEDIUM = 1,
+ TPM_LONG = 2,
+ TPM_UNDEFINED,
+};
+
+#define TPM_MAX_ORDINAL 243
+#define TPM_MAX_PROTECTED_ORDINAL 12
+#define TPM_PROTECTED_ORDINAL_MASK 0xFF
+
+/*
+ * Bug workaround - some TPM's don't flush the most
+ * recently changed pcr on suspend, so force the flush
+ * with an extend to the selected _unused_ non-volatile pcr.
+ */
+static int tpm_suspend_pcr;
+module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
+MODULE_PARM_DESC(suspend_pcr,
+ "PCR to use for dummy writes to faciltate flush on suspend.");
+
+static LIST_HEAD(tpm_chip_list);
+static DEFINE_SPINLOCK(driver_lock);
+static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
+
+/*
+ * Array with one entry per ordinal defining the maximum amount
+ * of time the chip could take to return the result. The ordinal
+ * designation of short, medium or long is defined in a table in
+ * TCG Specification TPM Main Part 2 TPM Structures Section 17. The
+ * values of the SHORT, MEDIUM, and LONG durations are retrieved
+ * from the chip during initialization with a call to tpm_get_timeouts.
+ */
+static const u8 tpm_protected_ordinal_duration[TPM_MAX_PROTECTED_ORDINAL] = {
+ TPM_UNDEFINED, /* 0 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 5 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 10 */
+ TPM_SHORT,
+};
+
+static const u8 tpm_ordinal_duration[TPM_MAX_ORDINAL] = {
+ TPM_UNDEFINED, /* 0 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 5 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 10 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_LONG,
+ TPM_LONG,
+ TPM_MEDIUM, /* 15 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_LONG,
+ TPM_SHORT, /* 20 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_SHORT, /* 25 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM, /* 30 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 35 */
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 40 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 45 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_LONG,
+ TPM_MEDIUM, /* 50 */
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 55 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 60 */
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM, /* 65 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 70 */
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 75 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_LONG, /* 80 */
+ TPM_UNDEFINED,
+ TPM_MEDIUM,
+ TPM_LONG,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 85 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 90 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 95 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 100 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 105 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 110 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 115 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_LONG, /* 120 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_SHORT, /* 125 */
+ TPM_SHORT,
+ TPM_LONG,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 130 */
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_UNDEFINED, /* 135 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 140 */
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 145 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 150 */
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 155 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 160 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 165 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_LONG, /* 170 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 175 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 180 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM, /* 185 */
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 190 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 195 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 200 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_SHORT, /* 205 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM, /* 210 */
+ TPM_UNDEFINED,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_UNDEFINED, /* 215 */
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_SHORT, /* 220 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 225 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 230 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 235 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 240 */
+ TPM_UNDEFINED,
+ TPM_MEDIUM,
+};
+
+static void user_reader_timeout(unsigned long ptr)
+{
+ struct tpm_chip *chip = (struct tpm_chip *) ptr;
+
+ schedule_work(&chip->work);
+}
+
+static void timeout_work(struct work_struct *work)
+{
+ struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
+
+ mutex_lock(&chip->buffer_mutex);
+ atomic_set(&chip->data_pending, 0);
+ memset(chip->data_buffer, 0, TPM_BUFSIZE);
+ mutex_unlock(&chip->buffer_mutex);
+}
+
+/*
+ * Returns max number of jiffies to wait
+ */
+unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
+ u32 ordinal)
+{
+ int duration_idx = TPM_UNDEFINED;
+ int duration = 0;
+
+ if (ordinal < TPM_MAX_ORDINAL)
+ duration_idx = tpm_ordinal_duration[ordinal];
+ else if ((ordinal & TPM_PROTECTED_ORDINAL_MASK) <
+ TPM_MAX_PROTECTED_ORDINAL)
+ duration_idx =
+ tpm_protected_ordinal_duration[ordinal &
+ TPM_PROTECTED_ORDINAL_MASK];
+
+ if (duration_idx != TPM_UNDEFINED)
+ duration = chip->vendor.duration[duration_idx];
+ if (duration <= 0)
+ return 2 * 60 * HZ;
+ else
+ return duration;
+}
+EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
+
+/*
+ * Internal kernel interface to transmit TPM commands
+ */
+static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
+ size_t bufsiz)
+{
+ ssize_t rc;
+ u32 count, ordinal;
+ unsigned long stop;
+
+ if (bufsiz > TPM_BUFSIZE)
+ bufsiz = TPM_BUFSIZE;
+
+ count = be32_to_cpu(*((__be32 *) (buf + 2)));
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+ if (count == 0)
+ return -ENODATA;
+ if (count > bufsiz) {
+ dev_err(chip->dev,
+ "invalid count value %x %zx \n", count, bufsiz);
+ return -E2BIG;
+ }
+
+ mutex_lock(&chip->tpm_mutex);
+
+ if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) {
+ dev_err(chip->dev,
+ "tpm_transmit: tpm_send: error %zd\n", rc);
+ goto out;
+ }
+
+ if (chip->vendor.irq)
+ goto out_recv;
+
+ stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
+ do {
+ u8 status = chip->vendor.status(chip);
+ if ((status & chip->vendor.req_complete_mask) ==
+ chip->vendor.req_complete_val)
+ goto out_recv;
+
+ if ((status == chip->vendor.req_canceled)) {
+ dev_err(chip->dev, "Operation Canceled\n");
+ rc = -ECANCELED;
+ goto out;
+ }
+
+ msleep(TPM_TIMEOUT); /* CHECK */
+ rmb();
+ } while (time_before(jiffies, stop));
+
+ chip->vendor.cancel(chip);
+ dev_err(chip->dev, "Operation Timed out\n");
+ rc = -ETIME;
+ goto out;
+
+out_recv:
+ rc = chip->vendor.recv(chip, (u8 *) buf, bufsiz);
+ if (rc < 0)
+ dev_err(chip->dev,
+ "tpm_transmit: tpm_recv: error %zd\n", rc);
+out:
+ mutex_unlock(&chip->tpm_mutex);
+ return rc;
+}
+
+#define TPM_DIGEST_SIZE 20
+#define TPM_ERROR_SIZE 10
+#define TPM_RET_CODE_IDX 6
+
+enum tpm_capabilities {
+ TPM_CAP_FLAG = cpu_to_be32(4),
+ TPM_CAP_PROP = cpu_to_be32(5),
+ CAP_VERSION_1_1 = cpu_to_be32(0x06),
+ CAP_VERSION_1_2 = cpu_to_be32(0x1A)
+};
+
+enum tpm_sub_capabilities {
+ TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
+ TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
+ TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
+ TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
+ TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
+ TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
+ TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
+
+};
+
+static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
+ int len, const char *desc)
+{
+ int err;
+
+ len = tpm_transmit(chip,(u8 *) cmd, len);
+ if (len < 0)
+ return len;
+ if (len == TPM_ERROR_SIZE) {
+ err = be32_to_cpu(cmd->header.out.return_code);
+ dev_dbg(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
+ return err;
+ }
+ return 0;
+}
+
+#define TPM_INTERNAL_RESULT_SIZE 200
+#define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
+#define TPM_ORD_GET_CAP cpu_to_be32(101)
+
+static const struct tpm_input_header tpm_getcap_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(22),
+ .ordinal = TPM_ORD_GET_CAP
+};
+
+ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap,
+ const char *desc)
+{
+ struct tpm_cmd_t tpm_cmd;
+ int rc;
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ tpm_cmd.header.in = tpm_getcap_header;
+ if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
+ tpm_cmd.params.getcap_in.cap = subcap_id;
+ /*subcap field not necessary */
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
+ tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32));
+ } else {
+ if (subcap_id == TPM_CAP_FLAG_PERM ||
+ subcap_id == TPM_CAP_FLAG_VOL)
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG;
+ else
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
+ tpm_cmd.params.getcap_in.subcap = subcap_id;
+ }
+ rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc);
+ if (!rc)
+ *cap = tpm_cmd.params.getcap_out.cap;
+ return rc;
+}
+
+void tpm_gen_interrupt(struct tpm_chip *chip)
+{
+ struct tpm_cmd_t tpm_cmd;
+ ssize_t rc;
+
+ tpm_cmd.header.in = tpm_getcap_header;
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
+ tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
+
+ rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ "attempting to determine the timeouts");
+}
+EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
+
+void tpm_get_timeouts(struct tpm_chip *chip)
+{
+ struct tpm_cmd_t tpm_cmd;
+ struct timeout_t *timeout_cap;
+ struct duration_t *duration_cap;
+ ssize_t rc;
+ u32 timeout;
+
+ tpm_cmd.header.in = tpm_getcap_header;
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
+ tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
+
+ rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ "attempting to determine the timeouts");
+ if (rc)
+ goto duration;
+
+ if (be32_to_cpu(tpm_cmd.header.out.length)
+ != 4 * sizeof(u32))
+ goto duration;
+
+ timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
+ /* Don't overwrite default if value is 0 */
+ timeout = be32_to_cpu(timeout_cap->a);
+ if (timeout)
+ chip->vendor.timeout_a = usecs_to_jiffies(timeout);
+ timeout = be32_to_cpu(timeout_cap->b);
+ if (timeout)
+ chip->vendor.timeout_b = usecs_to_jiffies(timeout);
+ timeout = be32_to_cpu(timeout_cap->c);
+ if (timeout)
+ chip->vendor.timeout_c = usecs_to_jiffies(timeout);
+ timeout = be32_to_cpu(timeout_cap->d);
+ if (timeout)
+ chip->vendor.timeout_d = usecs_to_jiffies(timeout);
+
+duration:
+ tpm_cmd.header.in = tpm_getcap_header;
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
+ tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
+
+ rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ "attempting to determine the durations");
+ if (rc)
+ return;
+
+ if (be32_to_cpu(tpm_cmd.header.out.return_code)
+ != 3 * sizeof(u32))
+ return;
+ duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
+ chip->vendor.duration[TPM_SHORT] =
+ usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
+ /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
+ * value wrong and apparently reports msecs rather than usecs. So we
+ * fix up the resulting too-small TPM_SHORT value to make things work.
+ */
+ if (chip->vendor.duration[TPM_SHORT] < (HZ/100))
+ chip->vendor.duration[TPM_SHORT] = HZ;
+
+ chip->vendor.duration[TPM_MEDIUM] =
+ usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
+ chip->vendor.duration[TPM_LONG] =
+ usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
+}
+EXPORT_SYMBOL_GPL(tpm_get_timeouts);
+
+void tpm_continue_selftest(struct tpm_chip *chip)
+{
+ u8 data[] = {
+ 0, 193, /* TPM_TAG_RQU_COMMAND */
+ 0, 0, 0, 10, /* length */
+ 0, 0, 0, 83, /* TPM_ORD_GetCapability */
+ };
+
+ tpm_transmit(chip, data, sizeof(data));
+}
+EXPORT_SYMBOL_GPL(tpm_continue_selftest);
+
+ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
+ char *buf)
+{
+ cap_t cap;
+ ssize_t rc;
+
+ rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent enabled state");
+ if (rc)
+ return 0;
+
+ rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_show_enabled);
+
+ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
+ char *buf)
+{
+ cap_t cap;
+ ssize_t rc;
+
+ rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent active state");
+ if (rc)
+ return 0;
+
+ rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_show_active);
+
+ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
+ char *buf)
+{
+ cap_t cap;
+ ssize_t rc;
+
+ rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap,
+ "attempting to determine the owner state");
+ if (rc)
+ return 0;
+
+ rc = sprintf(buf, "%d\n", cap.owned);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_show_owned);
+
+ssize_t tpm_show_temp_deactivated(struct device * dev,
+ struct device_attribute * attr, char *buf)
+{
+ cap_t cap;
+ ssize_t rc;
+
+ rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap,
+ "attempting to determine the temporary state");
+ if (rc)
+ return 0;
+
+ rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated);
+
+/*
+ * tpm_chip_find_get - return tpm_chip for given chip number
+ */
+static struct tpm_chip *tpm_chip_find_get(int chip_num)
+{
+ struct tpm_chip *pos, *chip = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
+ if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
+ continue;
+
+ if (try_module_get(pos->dev->driver->owner)) {
+ chip = pos;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return chip;
+}
+
+#define TPM_ORDINAL_PCRREAD cpu_to_be32(21)
+#define READ_PCR_RESULT_SIZE 30
+static struct tpm_input_header pcrread_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(14),
+ .ordinal = TPM_ORDINAL_PCRREAD
+};
+
+int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+{
+ int rc;
+ struct tpm_cmd_t cmd;
+
+ cmd.header.in = pcrread_header;
+ cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
+ rc = transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE,
+ "attempting to read a pcr value");
+
+ if (rc == 0)
+ memcpy(res_buf, cmd.params.pcrread_out.pcr_result,
+ TPM_DIGEST_SIZE);
+ return rc;
+}
+
+/**
+ * tpm_pcr_read - read a pcr value
+ * @chip_num: tpm idx # or ANY
+ * @pcr_idx: pcr idx to retrieve
+ * @res_buf: TPM_PCR value
+ * size of res_buf is 20 bytes (or NULL if you don't care)
+ *
+ * The TPM driver should be built-in, but for whatever reason it
+ * isn't, protect against the chip disappearing, by incrementing
+ * the module usage count.
+ */
+int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
+{
+ struct tpm_chip *chip;
+ int rc;
+
+ chip = tpm_chip_find_get(chip_num);
+ if (chip == NULL)
+ return -ENODEV;
+ rc = __tpm_pcr_read(chip, pcr_idx, res_buf);
+ tpm_chip_put(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_pcr_read);
+
+/**
+ * tpm_pcr_extend - extend pcr value with hash
+ * @chip_num: tpm idx # or AN&
+ * @pcr_idx: pcr idx to extend
+ * @hash: hash value used to extend pcr value
+ *
+ * The TPM driver should be built-in, but for whatever reason it
+ * isn't, protect against the chip disappearing, by incrementing
+ * the module usage count.
+ */
+#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
+#define EXTEND_PCR_RESULT_SIZE 34
+static struct tpm_input_header pcrextend_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(34),
+ .ordinal = TPM_ORD_PCR_EXTEND
+};
+
+int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
+{
+ struct tpm_cmd_t cmd;
+ int rc;
+ struct tpm_chip *chip;
+
+ chip = tpm_chip_find_get(chip_num);
+ if (chip == NULL)
+ return -ENODEV;
+
+ cmd.header.in = pcrextend_header;
+ cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
+ memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE);
+ rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
+ "attempting extend a PCR value");
+
+ tpm_chip_put(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_pcr_extend);
+
+int tpm_send(u32 chip_num, void *cmd, size_t buflen)
+{
+ struct tpm_chip *chip;
+ int rc;
+
+ chip = tpm_chip_find_get(chip_num);
+ if (chip == NULL)
+ return -ENODEV;
+
+ rc = transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd");
+
+ tpm_chip_put(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_send);
+
+ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ cap_t cap;
+ u8 digest[TPM_DIGEST_SIZE];
+ ssize_t rc;
+ int i, j, num_pcrs;
+ char *str = buf;
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap,
+ "attempting to determine the number of PCRS");
+ if (rc)
+ return 0;
+
+ num_pcrs = be32_to_cpu(cap.num_pcrs);
+ for (i = 0; i < num_pcrs; i++) {
+ rc = __tpm_pcr_read(chip, i, digest);
+ if (rc)
+ break;
+ str += sprintf(str, "PCR-%02d: ", i);
+ for (j = 0; j < TPM_DIGEST_SIZE; j++)
+ str += sprintf(str, "%02X ", digest[j]);
+ str += sprintf(str, "\n");
+ }
+ return str - buf;
+}
+EXPORT_SYMBOL_GPL(tpm_show_pcrs);
+
+#define READ_PUBEK_RESULT_SIZE 314
+#define TPM_ORD_READPUBEK cpu_to_be32(124)
+struct tpm_input_header tpm_readpubek_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(30),
+ .ordinal = TPM_ORD_READPUBEK
+};
+
+ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u8 *data;
+ struct tpm_cmd_t tpm_cmd;
+ ssize_t err;
+ int i, rc;
+ char *str = buf;
+
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ tpm_cmd.header.in = tpm_readpubek_header;
+ err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
+ "attempting to read the PUBEK");
+ if (err)
+ goto out;
+
+ /*
+ ignore header 10 bytes
+ algorithm 32 bits (1 == RSA )
+ encscheme 16 bits
+ sigscheme 16 bits
+ parameters (RSA 12->bytes: keybit, #primes, expbit)
+ keylenbytes 32 bits
+ 256 byte modulus
+ ignore checksum 20 bytes
+ */
+ data = tpm_cmd.params.readpubek_out_buffer;
+ str +=
+ sprintf(str,
+ "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n"
+ "Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X"
+ " %02X %02X %02X %02X %02X %02X %02X %02X\n"
+ "Modulus length: %d\nModulus: \n",
+ data[10], data[11], data[12], data[13], data[14],
+ data[15], data[16], data[17], data[22], data[23],
+ data[24], data[25], data[26], data[27], data[28],
+ data[29], data[30], data[31], data[32], data[33],
+ be32_to_cpu(*((__be32 *) (data + 34))));
+
+ for (i = 0; i < 256; i++) {
+ str += sprintf(str, "%02X ", data[i + 38]);
+ if ((i + 1) % 16 == 0)
+ str += sprintf(str, "\n");
+ }
+out:
+ rc = str - buf;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_show_pubek);
+
+
+ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ cap_t cap;
+ ssize_t rc;
+ char *str = buf;
+
+ rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
+ "attempting to determine the manufacturer");
+ if (rc)
+ return 0;
+ str += sprintf(str, "Manufacturer: 0x%x\n",
+ be32_to_cpu(cap.manufacturer_id));
+
+ rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version");
+ if (rc)
+ return 0;
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ cap.tpm_version.Major, cap.tpm_version.Minor,
+ cap.tpm_version.revMajor, cap.tpm_version.revMinor);
+ return str - buf;
+}
+EXPORT_SYMBOL_GPL(tpm_show_caps);
+
+ssize_t tpm_show_caps_1_2(struct device * dev,
+ struct device_attribute * attr, char *buf)
+{
+ cap_t cap;
+ ssize_t rc;
+ char *str = buf;
+
+ rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
+ "attempting to determine the manufacturer");
+ if (rc)
+ return 0;
+ str += sprintf(str, "Manufacturer: 0x%x\n",
+ be32_to_cpu(cap.manufacturer_id));
+ rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
+ "attempting to determine the 1.2 version");
+ if (rc)
+ return 0;
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor,
+ cap.tpm_version_1_2.revMajor,
+ cap.tpm_version_1_2.revMinor);
+ return str - buf;
+}
+EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
+
+ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ if (chip == NULL)
+ return 0;
+
+ chip->vendor.cancel(chip);
+ return count;
+}
+EXPORT_SYMBOL_GPL(tpm_store_cancel);
+
+/*
+ * Device file system interface to the TPM
+ *
+ * It's assured that the chip will be opened just once,
+ * by the check of is_open variable, which is protected
+ * by driver_lock.
+ */
+int tpm_open(struct inode *inode, struct file *file)
+{
+ int minor = iminor(inode);
+ struct tpm_chip *chip = NULL, *pos;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
+ if (pos->vendor.miscdev.minor == minor) {
+ chip = pos;
+ get_device(chip->dev);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (!chip)
+ return -ENODEV;
+
+ if (test_and_set_bit(0, &chip->is_open)) {
+ dev_dbg(chip->dev, "Another process owns this TPM\n");
+ put_device(chip->dev);
+ return -EBUSY;
+ }
+
+ chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL);
+ if (chip->data_buffer == NULL) {
+ clear_bit(0, &chip->is_open);
+ put_device(chip->dev);
+ return -ENOMEM;
+ }
+
+ atomic_set(&chip->data_pending, 0);
+
+ file->private_data = chip;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_open);
+
+/*
+ * Called on file close
+ */
+int tpm_release(struct inode *inode, struct file *file)
+{
+ struct tpm_chip *chip = file->private_data;
+
+ del_singleshot_timer_sync(&chip->user_read_timer);
+ flush_work_sync(&chip->work);
+ file->private_data = NULL;
+ atomic_set(&chip->data_pending, 0);
+ kfree(chip->data_buffer);
+ clear_bit(0, &chip->is_open);
+ put_device(chip->dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_release);
+
+ssize_t tpm_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off)
+{
+ struct tpm_chip *chip = file->private_data;
+ size_t in_size = size, out_size;
+
+ /* cannot perform a write until the read has cleared
+ either via tpm_read or a user_read_timer timeout */
+ while (atomic_read(&chip->data_pending) != 0)
+ msleep(TPM_TIMEOUT);
+
+ mutex_lock(&chip->buffer_mutex);
+
+ if (in_size > TPM_BUFSIZE)
+ in_size = TPM_BUFSIZE;
+
+ if (copy_from_user
+ (chip->data_buffer, (void __user *) buf, in_size)) {
+ mutex_unlock(&chip->buffer_mutex);
+ return -EFAULT;
+ }
+
+ /* atomic tpm command send and result receive */
+ out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
+
+ atomic_set(&chip->data_pending, out_size);
+ mutex_unlock(&chip->buffer_mutex);
+
+ /* Set a timeout by which the reader must come claim the result */
+ mod_timer(&chip->user_read_timer, jiffies + (60 * HZ));
+
+ return in_size;
+}
+EXPORT_SYMBOL_GPL(tpm_write);
+
+ssize_t tpm_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off)
+{
+ struct tpm_chip *chip = file->private_data;
+ ssize_t ret_size;
+ int rc;
+
+ del_singleshot_timer_sync(&chip->user_read_timer);
+ flush_work_sync(&chip->work);
+ ret_size = atomic_read(&chip->data_pending);
+ atomic_set(&chip->data_pending, 0);
+ if (ret_size > 0) { /* relay data */
+ if (size < ret_size)
+ ret_size = size;
+
+ mutex_lock(&chip->buffer_mutex);
+ rc = copy_to_user(buf, chip->data_buffer, ret_size);
+ memset(chip->data_buffer, 0, ret_size);
+ if (rc)
+ ret_size = -EFAULT;
+
+ mutex_unlock(&chip->buffer_mutex);
+ }
+
+ return ret_size;
+}
+EXPORT_SYMBOL_GPL(tpm_read);
+
+void tpm_remove_hardware(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip == NULL) {
+ dev_err(dev, "No device data found\n");
+ return;
+ }
+
+ spin_lock(&driver_lock);
+ list_del_rcu(&chip->list);
+ spin_unlock(&driver_lock);
+ synchronize_rcu();
+
+ misc_deregister(&chip->vendor.miscdev);
+ sysfs_remove_group(&dev->kobj, chip->vendor.attr_group);
+ tpm_bios_log_teardown(chip->bios_dir);
+
+ /* write it this way to be explicit (chip->dev == dev) */
+ put_device(chip->dev);
+}
+EXPORT_SYMBOL_GPL(tpm_remove_hardware);
+
+#define TPM_ORD_SAVESTATE cpu_to_be32(152)
+#define SAVESTATE_RESULT_SIZE 10
+
+static struct tpm_input_header savestate_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(10),
+ .ordinal = TPM_ORD_SAVESTATE
+};
+
+/*
+ * We are about to suspend. Save the TPM state
+ * so that it can be restored.
+ */
+int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_cmd_t cmd;
+ int rc;
+
+ u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
+
+ if (chip == NULL)
+ return -ENODEV;
+
+ /* for buggy tpm, flush pcrs with extend to selected dummy */
+ if (tpm_suspend_pcr) {
+ cmd.header.in = pcrextend_header;
+ cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
+ memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
+ TPM_DIGEST_SIZE);
+ rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
+ "extending dummy pcr before suspend");
+ }
+
+ /* now do the actual savestate */
+ cmd.header.in = savestate_header;
+ rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE,
+ "sending savestate before suspend");
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_pm_suspend);
+
+/*
+ * Resume from a power safe. The BIOS already restored
+ * the TPM state.
+ */
+int tpm_pm_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip == NULL)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_pm_resume);
+
+/* In case vendor provided release function, call it too.*/
+
+void tpm_dev_vendor_release(struct tpm_chip *chip)
+{
+ if (chip->vendor.release)
+ chip->vendor.release(chip->dev);
+
+ clear_bit(chip->dev_num, dev_mask);
+ kfree(chip->vendor.miscdev.name);
+}
+EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
+
+
+/*
+ * Once all references to platform device are down to 0,
+ * release all allocated structures.
+ */
+void tpm_dev_release(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ tpm_dev_vendor_release(chip);
+
+ chip->release(dev);
+ kfree(chip);
+}
+EXPORT_SYMBOL_GPL(tpm_dev_release);
+
+/*
+ * Called from tpm_<specific>.c probe function only for devices
+ * the driver has determined it should claim. Prior to calling
+ * this function the specific probe function has called pci_enable_device
+ * upon errant exit from this function specific probe function should call
+ * pci_disable_device
+ */
+struct tpm_chip *tpm_register_hardware(struct device *dev,
+ const struct tpm_vendor_specific *entry)
+{
+#define DEVNAME_SIZE 7
+
+ char *devname;
+ struct tpm_chip *chip;
+
+ /* Driver specific per-device data */
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL);
+
+ if (chip == NULL || devname == NULL)
+ goto out_free;
+
+ mutex_init(&chip->buffer_mutex);
+ mutex_init(&chip->tpm_mutex);
+ INIT_LIST_HEAD(&chip->list);
+
+ INIT_WORK(&chip->work, timeout_work);
+
+ setup_timer(&chip->user_read_timer, user_reader_timeout,
+ (unsigned long)chip);
+
+ memcpy(&chip->vendor, entry, sizeof(struct tpm_vendor_specific));
+
+ chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES);
+
+ if (chip->dev_num >= TPM_NUM_DEVICES) {
+ dev_err(dev, "No available tpm device numbers\n");
+ goto out_free;
+ } else if (chip->dev_num == 0)
+ chip->vendor.miscdev.minor = TPM_MINOR;
+ else
+ chip->vendor.miscdev.minor = MISC_DYNAMIC_MINOR;
+
+ set_bit(chip->dev_num, dev_mask);
+
+ scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
+ chip->vendor.miscdev.name = devname;
+
+ chip->vendor.miscdev.parent = dev;
+ chip->dev = get_device(dev);
+ chip->release = dev->release;
+ dev->release = tpm_dev_release;
+ dev_set_drvdata(dev, chip);
+
+ if (misc_register(&chip->vendor.miscdev)) {
+ dev_err(chip->dev,
+ "unable to misc_register %s, minor %d\n",
+ chip->vendor.miscdev.name,
+ chip->vendor.miscdev.minor);
+ put_device(chip->dev);
+ return NULL;
+ }
+
+ if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
+ misc_deregister(&chip->vendor.miscdev);
+ put_device(chip->dev);
+
+ return NULL;
+ }
+
+ chip->bios_dir = tpm_bios_log_setup(devname);
+
+ /* Make chip available */
+ spin_lock(&driver_lock);
+ list_add_rcu(&chip->list, &tpm_chip_list);
+ spin_unlock(&driver_lock);
+
+ return chip;
+
+out_free:
+ kfree(chip);
+ kfree(devname);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(tpm_register_hardware);
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
new file mode 100644
index 00000000..72ddb031
--- /dev/null
+++ b/drivers/char/tpm/tpm.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/tpm.h>
+
+enum tpm_timeout {
+ TPM_TIMEOUT = 5, /* msecs */
+};
+
+/* TPM addresses */
+enum tpm_addr {
+ TPM_SUPERIO_ADDR = 0x2E,
+ TPM_ADDR = 0x4E,
+};
+
+extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
+ char *);
+extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr,
+ char *);
+extern ssize_t tpm_show_caps(struct device *, struct device_attribute *attr,
+ char *);
+extern ssize_t tpm_show_caps_1_2(struct device *, struct device_attribute *attr,
+ char *);
+extern ssize_t tpm_store_cancel(struct device *, struct device_attribute *attr,
+ const char *, size_t);
+extern ssize_t tpm_show_enabled(struct device *, struct device_attribute *attr,
+ char *);
+extern ssize_t tpm_show_active(struct device *, struct device_attribute *attr,
+ char *);
+extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
+ char *);
+extern ssize_t tpm_show_temp_deactivated(struct device *,
+ struct device_attribute *attr, char *);
+
+struct tpm_chip;
+
+struct tpm_vendor_specific {
+ const u8 req_complete_mask;
+ const u8 req_complete_val;
+ const u8 req_canceled;
+ void __iomem *iobase; /* ioremapped address */
+ unsigned long base; /* TPM base address */
+
+ int irq;
+
+ int region_size;
+ int have_region;
+
+ int (*recv) (struct tpm_chip *, u8 *, size_t);
+ int (*send) (struct tpm_chip *, u8 *, size_t);
+ void (*cancel) (struct tpm_chip *);
+ u8 (*status) (struct tpm_chip *);
+ void (*release) (struct device *);
+ struct miscdevice miscdev;
+ struct attribute_group *attr_group;
+ struct list_head list;
+ int locality;
+ unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */
+ unsigned long duration[3]; /* jiffies */
+
+ wait_queue_head_t read_queue;
+ wait_queue_head_t int_queue;
+};
+
+struct tpm_chip {
+ struct device *dev; /* Device stuff */
+
+ int dev_num; /* /dev/tpm# */
+ unsigned long is_open; /* only one allowed */
+ int time_expired;
+
+ /* Data passed to and from the tpm via the read/write calls */
+ u8 *data_buffer;
+ atomic_t data_pending;
+ struct mutex buffer_mutex;
+
+ struct timer_list user_read_timer; /* user needs to claim result */
+ struct work_struct work;
+ struct mutex tpm_mutex; /* tpm is processing */
+
+ struct tpm_vendor_specific vendor;
+
+ struct dentry **bios_dir;
+
+ struct list_head list;
+ void (*release) (struct device *);
+};
+
+#define to_tpm_chip(n) container_of(n, struct tpm_chip, vendor)
+
+static inline void tpm_chip_put(struct tpm_chip *chip)
+{
+ module_put(chip->dev->driver->owner);
+}
+
+static inline int tpm_read_index(int base, int index)
+{
+ outb(index, base);
+ return inb(base+1) & 0xFF;
+}
+
+static inline void tpm_write_index(int base, int index, int value)
+{
+ outb(index, base);
+ outb(value & 0xFF, base+1);
+}
+struct tpm_input_header {
+ __be16 tag;
+ __be32 length;
+ __be32 ordinal;
+}__attribute__((packed));
+
+struct tpm_output_header {
+ __be16 tag;
+ __be32 length;
+ __be32 return_code;
+}__attribute__((packed));
+
+struct stclear_flags_t {
+ __be16 tag;
+ u8 deactivated;
+ u8 disableForceClear;
+ u8 physicalPresence;
+ u8 physicalPresenceLock;
+ u8 bGlobalLock;
+}__attribute__((packed));
+
+struct tpm_version_t {
+ u8 Major;
+ u8 Minor;
+ u8 revMajor;
+ u8 revMinor;
+}__attribute__((packed));
+
+struct tpm_version_1_2_t {
+ __be16 tag;
+ u8 Major;
+ u8 Minor;
+ u8 revMajor;
+ u8 revMinor;
+}__attribute__((packed));
+
+struct timeout_t {
+ __be32 a;
+ __be32 b;
+ __be32 c;
+ __be32 d;
+}__attribute__((packed));
+
+struct duration_t {
+ __be32 tpm_short;
+ __be32 tpm_medium;
+ __be32 tpm_long;
+}__attribute__((packed));
+
+struct permanent_flags_t {
+ __be16 tag;
+ u8 disable;
+ u8 ownership;
+ u8 deactivated;
+ u8 readPubek;
+ u8 disableOwnerClear;
+ u8 allowMaintenance;
+ u8 physicalPresenceLifetimeLock;
+ u8 physicalPresenceHWEnable;
+ u8 physicalPresenceCMDEnable;
+ u8 CEKPUsed;
+ u8 TPMpost;
+ u8 TPMpostLock;
+ u8 FIPS;
+ u8 operator;
+ u8 enableRevokeEK;
+ u8 nvLocked;
+ u8 readSRKPub;
+ u8 tpmEstablished;
+ u8 maintenanceDone;
+ u8 disableFullDALogicInfo;
+}__attribute__((packed));
+
+typedef union {
+ struct permanent_flags_t perm_flags;
+ struct stclear_flags_t stclear_flags;
+ bool owned;
+ __be32 num_pcrs;
+ struct tpm_version_t tpm_version;
+ struct tpm_version_1_2_t tpm_version_1_2;
+ __be32 manufacturer_id;
+ struct timeout_t timeout;
+ struct duration_t duration;
+} cap_t;
+
+struct tpm_getcap_params_in {
+ __be32 cap;
+ __be32 subcap_size;
+ __be32 subcap;
+}__attribute__((packed));
+
+struct tpm_getcap_params_out {
+ __be32 cap_size;
+ cap_t cap;
+}__attribute__((packed));
+
+struct tpm_readpubek_params_out {
+ u8 algorithm[4];
+ u8 encscheme[2];
+ u8 sigscheme[2];
+ __be32 paramsize;
+ u8 parameters[12]; /*assuming RSA*/
+ __be32 keysize;
+ u8 modulus[256];
+ u8 checksum[20];
+}__attribute__((packed));
+
+typedef union {
+ struct tpm_input_header in;
+ struct tpm_output_header out;
+} tpm_cmd_header;
+
+#define TPM_DIGEST_SIZE 20
+struct tpm_pcrread_out {
+ u8 pcr_result[TPM_DIGEST_SIZE];
+}__attribute__((packed));
+
+struct tpm_pcrread_in {
+ __be32 pcr_idx;
+}__attribute__((packed));
+
+struct tpm_pcrextend_in {
+ __be32 pcr_idx;
+ u8 hash[TPM_DIGEST_SIZE];
+}__attribute__((packed));
+
+typedef union {
+ struct tpm_getcap_params_out getcap_out;
+ struct tpm_readpubek_params_out readpubek_out;
+ u8 readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)];
+ struct tpm_getcap_params_in getcap_in;
+ struct tpm_pcrread_in pcrread_in;
+ struct tpm_pcrread_out pcrread_out;
+ struct tpm_pcrextend_in pcrextend_in;
+} tpm_cmd_params;
+
+struct tpm_cmd_t {
+ tpm_cmd_header header;
+ tpm_cmd_params params;
+}__attribute__((packed));
+
+ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *);
+
+extern void tpm_get_timeouts(struct tpm_chip *);
+extern void tpm_gen_interrupt(struct tpm_chip *);
+extern void tpm_continue_selftest(struct tpm_chip *);
+extern unsigned long tpm_calc_ordinal_duration(struct tpm_chip *, u32);
+extern struct tpm_chip* tpm_register_hardware(struct device *,
+ const struct tpm_vendor_specific *);
+extern int tpm_open(struct inode *, struct file *);
+extern int tpm_release(struct inode *, struct file *);
+extern void tpm_dev_vendor_release(struct tpm_chip *);
+extern ssize_t tpm_write(struct file *, const char __user *, size_t,
+ loff_t *);
+extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
+extern void tpm_remove_hardware(struct device *);
+extern int tpm_pm_suspend(struct device *, pm_message_t);
+extern int tpm_pm_resume(struct device *);
+
+#ifdef CONFIG_ACPI
+extern struct dentry ** tpm_bios_log_setup(char *);
+extern void tpm_bios_log_teardown(struct dentry **);
+#else
+static inline struct dentry ** tpm_bios_log_setup(char *name)
+{
+ return NULL;
+}
+static inline void tpm_bios_log_teardown(struct dentry **dir)
+{
+}
+#endif
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
new file mode 100644
index 00000000..c64a1bc6
--- /dev/null
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include "tpm.h"
+#include "tpm_atmel.h"
+
+/* write status bits */
+enum tpm_atmel_write_status {
+ ATML_STATUS_ABORT = 0x01,
+ ATML_STATUS_LASTBYTE = 0x04
+};
+/* read status bits */
+enum tpm_atmel_read_status {
+ ATML_STATUS_BUSY = 0x01,
+ ATML_STATUS_DATA_AVAIL = 0x02,
+ ATML_STATUS_REWRITE = 0x04,
+ ATML_STATUS_READY = 0x08
+};
+
+static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ u8 status, *hdr = buf;
+ u32 size;
+ int i;
+ __be32 *native_size;
+
+ /* start reading header */
+ if (count < 6)
+ return -EIO;
+
+ for (i = 0; i < 6; i++) {
+ status = ioread8(chip->vendor.iobase + 1);
+ if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+ dev_err(chip->dev, "error reading header\n");
+ return -EIO;
+ }
+ *buf++ = ioread8(chip->vendor.iobase);
+ }
+
+ /* size of the data received */
+ native_size = (__force __be32 *) (hdr + 2);
+ size = be32_to_cpu(*native_size);
+
+ if (count < size) {
+ dev_err(chip->dev,
+ "Recv size(%d) less than available space\n", size);
+ for (; i < size; i++) { /* clear the waiting data anyway */
+ status = ioread8(chip->vendor.iobase + 1);
+ if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+ dev_err(chip->dev, "error reading data\n");
+ return -EIO;
+ }
+ }
+ return -EIO;
+ }
+
+ /* read all the data available */
+ for (; i < size; i++) {
+ status = ioread8(chip->vendor.iobase + 1);
+ if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+ dev_err(chip->dev, "error reading data\n");
+ return -EIO;
+ }
+ *buf++ = ioread8(chip->vendor.iobase);
+ }
+
+ /* make sure data available is gone */
+ status = ioread8(chip->vendor.iobase + 1);
+
+ if (status & ATML_STATUS_DATA_AVAIL) {
+ dev_err(chip->dev, "data available is stuck\n");
+ return -EIO;
+ }
+
+ return size;
+}
+
+static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ int i;
+
+ dev_dbg(chip->dev, "tpm_atml_send:\n");
+ for (i = 0; i < count; i++) {
+ dev_dbg(chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
+ iowrite8(buf[i], chip->vendor.iobase);
+ }
+
+ return count;
+}
+
+static void tpm_atml_cancel(struct tpm_chip *chip)
+{
+ iowrite8(ATML_STATUS_ABORT, chip->vendor.iobase + 1);
+}
+
+static u8 tpm_atml_status(struct tpm_chip *chip)
+{
+ return ioread8(chip->vendor.iobase + 1);
+}
+
+static const struct file_operations atmel_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_read,
+ .write = tpm_write,
+ .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
+
+static struct attribute* atmel_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr,
+ NULL,
+};
+
+static struct attribute_group atmel_attr_grp = { .attrs = atmel_attrs };
+
+static const struct tpm_vendor_specific tpm_atmel = {
+ .recv = tpm_atml_recv,
+ .send = tpm_atml_send,
+ .cancel = tpm_atml_cancel,
+ .status = tpm_atml_status,
+ .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
+ .req_complete_val = ATML_STATUS_DATA_AVAIL,
+ .req_canceled = ATML_STATUS_READY,
+ .attr_group = &atmel_attr_grp,
+ .miscdev = { .fops = &atmel_ops, },
+};
+
+static struct platform_device *pdev;
+
+static void atml_plat_remove(void)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ if (chip) {
+ if (chip->vendor.have_region)
+ atmel_release_region(chip->vendor.base,
+ chip->vendor.region_size);
+ atmel_put_base_addr(chip->vendor.iobase);
+ tpm_remove_hardware(chip->dev);
+ platform_device_unregister(pdev);
+ }
+}
+
+static int tpm_atml_suspend(struct platform_device *dev, pm_message_t msg)
+{
+ return tpm_pm_suspend(&dev->dev, msg);
+}
+
+static int tpm_atml_resume(struct platform_device *dev)
+{
+ return tpm_pm_resume(&dev->dev);
+}
+static struct platform_driver atml_drv = {
+ .driver = {
+ .name = "tpm_atmel",
+ .owner = THIS_MODULE,
+ },
+ .suspend = tpm_atml_suspend,
+ .resume = tpm_atml_resume,
+};
+
+static int __init init_atmel(void)
+{
+ int rc = 0;
+ void __iomem *iobase = NULL;
+ int have_region, region_size;
+ unsigned long base;
+ struct tpm_chip *chip;
+
+ rc = platform_driver_register(&atml_drv);
+ if (rc)
+ return rc;
+
+ if ((iobase = atmel_get_base_addr(&base, &region_size)) == NULL) {
+ rc = -ENODEV;
+ goto err_unreg_drv;
+ }
+
+ have_region =
+ (atmel_request_region
+ (tpm_atmel.base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
+
+ pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ rc = PTR_ERR(pdev);
+ goto err_rel_reg;
+ }
+
+ if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_atmel))) {
+ rc = -ENODEV;
+ goto err_unreg_dev;
+ }
+
+ chip->vendor.iobase = iobase;
+ chip->vendor.base = base;
+ chip->vendor.have_region = have_region;
+ chip->vendor.region_size = region_size;
+
+ return 0;
+
+err_unreg_dev:
+ platform_device_unregister(pdev);
+err_rel_reg:
+ atmel_put_base_addr(iobase);
+ if (have_region)
+ atmel_release_region(base,
+ region_size);
+err_unreg_drv:
+ platform_driver_unregister(&atml_drv);
+ return rc;
+}
+
+static void __exit cleanup_atmel(void)
+{
+ platform_driver_unregister(&atml_drv);
+ atml_plat_remove();
+}
+
+module_init(init_atmel);
+module_exit(cleanup_atmel);
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h
new file mode 100644
index 00000000..6c831f94
--- /dev/null
+++ b/drivers/char/tpm/tpm_atmel.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Authors:
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * These difference are required on power because the device must be
+ * discovered through the device tree and iomap must be used to get
+ * around the need for holes in the io_page_mask. This does not happen
+ * automatically because the tpm is not a normal pci device and lives
+ * under the root node.
+ *
+ */
+
+#ifdef CONFIG_PPC64
+
+#include <asm/prom.h>
+
+#define atmel_getb(chip, offset) readb(chip->vendor->iobase + offset);
+#define atmel_putb(val, chip, offset) writeb(val, chip->vendor->iobase + offset)
+#define atmel_request_region request_mem_region
+#define atmel_release_region release_mem_region
+
+static inline void atmel_put_base_addr(void __iomem *iobase)
+{
+ iounmap(iobase);
+}
+
+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
+{
+ struct device_node *dn;
+ unsigned long address, size;
+ const unsigned int *reg;
+ int reglen;
+ int naddrc;
+ int nsizec;
+
+ dn = of_find_node_by_name(NULL, "tpm");
+
+ if (!dn)
+ return NULL;
+
+ if (!of_device_is_compatible(dn, "AT97SC3201")) {
+ of_node_put(dn);
+ return NULL;
+ }
+
+ reg = of_get_property(dn, "reg", &reglen);
+ naddrc = of_n_addr_cells(dn);
+ nsizec = of_n_size_cells(dn);
+
+ of_node_put(dn);
+
+
+ if (naddrc == 2)
+ address = ((unsigned long) reg[0] << 32) | reg[1];
+ else
+ address = reg[0];
+
+ if (nsizec == 2)
+ size =
+ ((unsigned long) reg[naddrc] << 32) | reg[naddrc + 1];
+ else
+ size = reg[naddrc];
+
+ *base = address;
+ *region_size = size;
+ return ioremap(*base, *region_size);
+}
+#else
+#define atmel_getb(chip, offset) inb(chip->vendor->base + offset)
+#define atmel_putb(val, chip, offset) outb(val, chip->vendor->base + offset)
+#define atmel_request_region request_region
+#define atmel_release_region release_region
+/* Atmel definitions */
+enum tpm_atmel_addr {
+ TPM_ATMEL_BASE_ADDR_LO = 0x08,
+ TPM_ATMEL_BASE_ADDR_HI = 0x09
+};
+
+/* Verify this is a 1.1 Atmel TPM */
+static int atmel_verify_tpm11(void)
+{
+
+ /* verify that it is an Atmel part */
+ if (tpm_read_index(TPM_ADDR, 4) != 'A' ||
+ tpm_read_index(TPM_ADDR, 5) != 'T' ||
+ tpm_read_index(TPM_ADDR, 6) != 'M' ||
+ tpm_read_index(TPM_ADDR, 7) != 'L')
+ return 1;
+
+ /* query chip for its version number */
+ if (tpm_read_index(TPM_ADDR, 0x00) != 1 ||
+ tpm_read_index(TPM_ADDR, 0x01) != 1)
+ return 1;
+
+ /* This is an atmel supported part */
+ return 0;
+}
+
+static inline void atmel_put_base_addr(void __iomem *iobase)
+{
+}
+
+/* Determine where to talk to device */
+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
+{
+ int lo, hi;
+
+ if (atmel_verify_tpm11() != 0)
+ return NULL;
+
+ lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
+ hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
+
+ *base = (hi << 8) | lo;
+ *region_size = 2;
+
+ return ioport_map(*base, *region_size);
+}
+#endif
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
new file mode 100644
index 00000000..0636520f
--- /dev/null
+++ b/drivers/char/tpm/tpm_bios.c
@@ -0,0 +1,556 @@
+/*
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Authors:
+ * Seiji Munetoh <munetoh@jp.ibm.com>
+ * Stefan Berger <stefanb@us.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Access to the eventlog extended by the TCG BIOS of PC platform
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <acpi/acpi.h>
+#include "tpm.h"
+
+#define TCG_EVENT_NAME_LEN_MAX 255
+#define MAX_TEXT_EVENT 1000 /* Max event string length */
+#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
+
+enum bios_platform_class {
+ BIOS_CLIENT = 0x00,
+ BIOS_SERVER = 0x01,
+};
+
+struct tpm_bios_log {
+ void *bios_event_log;
+ void *bios_event_log_end;
+};
+
+struct acpi_tcpa {
+ struct acpi_table_header hdr;
+ u16 platform_class;
+ union {
+ struct client_hdr {
+ u32 log_max_len __attribute__ ((packed));
+ u64 log_start_addr __attribute__ ((packed));
+ } client;
+ struct server_hdr {
+ u16 reserved;
+ u64 log_max_len __attribute__ ((packed));
+ u64 log_start_addr __attribute__ ((packed));
+ } server;
+ };
+};
+
+struct tcpa_event {
+ u32 pcr_index;
+ u32 event_type;
+ u8 pcr_value[20]; /* SHA1 */
+ u32 event_size;
+ u8 event_data[0];
+};
+
+enum tcpa_event_types {
+ PREBOOT = 0,
+ POST_CODE,
+ UNUSED,
+ NO_ACTION,
+ SEPARATOR,
+ ACTION,
+ EVENT_TAG,
+ SCRTM_CONTENTS,
+ SCRTM_VERSION,
+ CPU_MICROCODE,
+ PLATFORM_CONFIG_FLAGS,
+ TABLE_OF_DEVICES,
+ COMPACT_HASH,
+ IPL,
+ IPL_PARTITION_DATA,
+ NONHOST_CODE,
+ NONHOST_CONFIG,
+ NONHOST_INFO,
+};
+
+static const char* tcpa_event_type_strings[] = {
+ "PREBOOT",
+ "POST CODE",
+ "",
+ "NO ACTION",
+ "SEPARATOR",
+ "ACTION",
+ "EVENT TAG",
+ "S-CRTM Contents",
+ "S-CRTM Version",
+ "CPU Microcode",
+ "Platform Config Flags",
+ "Table of Devices",
+ "Compact Hash",
+ "IPL",
+ "IPL Partition Data",
+ "Non-Host Code",
+ "Non-Host Config",
+ "Non-Host Info"
+};
+
+struct tcpa_pc_event {
+ u32 event_id;
+ u32 event_size;
+ u8 event_data[0];
+};
+
+enum tcpa_pc_event_ids {
+ SMBIOS = 1,
+ BIS_CERT,
+ POST_BIOS_ROM,
+ ESCD,
+ CMOS,
+ NVRAM,
+ OPTION_ROM_EXEC,
+ OPTION_ROM_CONFIG,
+ OPTION_ROM_MICROCODE = 10,
+ S_CRTM_VERSION,
+ S_CRTM_CONTENTS,
+ POST_CONTENTS,
+ HOST_TABLE_OF_DEVICES,
+};
+
+static const char* tcpa_pc_event_id_strings[] = {
+ "",
+ "SMBIOS",
+ "BIS Certificate",
+ "POST BIOS ",
+ "ESCD ",
+ "CMOS",
+ "NVRAM",
+ "Option ROM",
+ "Option ROM config",
+ "",
+ "Option ROM microcode ",
+ "S-CRTM Version",
+ "S-CRTM Contents ",
+ "POST Contents ",
+ "Table of Devices",
+};
+
+/* returns pointer to start of pos. entry of tcg log */
+static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t i;
+ struct tpm_bios_log *log = m->private;
+ void *addr = log->bios_event_log;
+ void *limit = log->bios_event_log_end;
+ struct tcpa_event *event;
+
+ /* read over *pos measurements */
+ for (i = 0; i < *pos; i++) {
+ event = addr;
+
+ if ((addr + sizeof(struct tcpa_event)) < limit) {
+ if (event->event_type == 0 && event->event_size == 0)
+ return NULL;
+ addr += sizeof(struct tcpa_event) + event->event_size;
+ }
+ }
+
+ /* now check if current entry is valid */
+ if ((addr + sizeof(struct tcpa_event)) >= limit)
+ return NULL;
+
+ event = addr;
+
+ if ((event->event_type == 0 && event->event_size == 0) ||
+ ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
+ return NULL;
+
+ return addr;
+}
+
+static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
+ loff_t *pos)
+{
+ struct tcpa_event *event = v;
+ struct tpm_bios_log *log = m->private;
+ void *limit = log->bios_event_log_end;
+
+ v += sizeof(struct tcpa_event) + event->event_size;
+
+ /* now check if current entry is valid */
+ if ((v + sizeof(struct tcpa_event)) >= limit)
+ return NULL;
+
+ event = v;
+
+ if (event->event_type == 0 && event->event_size == 0)
+ return NULL;
+
+ if ((event->event_type == 0 && event->event_size == 0) ||
+ ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
+ return NULL;
+
+ (*pos)++;
+ return v;
+}
+
+static void tpm_bios_measurements_stop(struct seq_file *m, void *v)
+{
+}
+
+static int get_event_name(char *dest, struct tcpa_event *event,
+ unsigned char * event_entry)
+{
+ const char *name = "";
+ /* 41 so there is room for 40 data and 1 nul */
+ char data[41] = "";
+ int i, n_len = 0, d_len = 0;
+ struct tcpa_pc_event *pc_event;
+
+ switch(event->event_type) {
+ case PREBOOT:
+ case POST_CODE:
+ case UNUSED:
+ case NO_ACTION:
+ case SCRTM_CONTENTS:
+ case SCRTM_VERSION:
+ case CPU_MICROCODE:
+ case PLATFORM_CONFIG_FLAGS:
+ case TABLE_OF_DEVICES:
+ case COMPACT_HASH:
+ case IPL:
+ case IPL_PARTITION_DATA:
+ case NONHOST_CODE:
+ case NONHOST_CONFIG:
+ case NONHOST_INFO:
+ name = tcpa_event_type_strings[event->event_type];
+ n_len = strlen(name);
+ break;
+ case SEPARATOR:
+ case ACTION:
+ if (MAX_TEXT_EVENT > event->event_size) {
+ name = event_entry;
+ n_len = event->event_size;
+ }
+ break;
+ case EVENT_TAG:
+ pc_event = (struct tcpa_pc_event *)event_entry;
+
+ /* ToDo Row data -> Base64 */
+
+ switch (pc_event->event_id) {
+ case SMBIOS:
+ case BIS_CERT:
+ case CMOS:
+ case NVRAM:
+ case OPTION_ROM_EXEC:
+ case OPTION_ROM_CONFIG:
+ case S_CRTM_VERSION:
+ name = tcpa_pc_event_id_strings[pc_event->event_id];
+ n_len = strlen(name);
+ break;
+ /* hash data */
+ case POST_BIOS_ROM:
+ case ESCD:
+ case OPTION_ROM_MICROCODE:
+ case S_CRTM_CONTENTS:
+ case POST_CONTENTS:
+ name = tcpa_pc_event_id_strings[pc_event->event_id];
+ n_len = strlen(name);
+ for (i = 0; i < 20; i++)
+ d_len += sprintf(&data[2*i], "%02x",
+ pc_event->event_data[i]);
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ return snprintf(dest, MAX_TEXT_EVENT, "[%.*s%.*s]",
+ n_len, name, d_len, data);
+
+}
+
+static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
+{
+ struct tcpa_event *event = v;
+ char *data = v;
+ int i;
+
+ for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
+ seq_putc(m, data[i]);
+
+ return 0;
+}
+
+static int tpm_bios_measurements_release(struct inode *inode,
+ struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct tpm_bios_log *log = seq->private;
+
+ if (log) {
+ kfree(log->bios_event_log);
+ kfree(log);
+ }
+
+ return seq_release(inode, file);
+}
+
+static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
+{
+ int len = 0;
+ int i;
+ char *eventname;
+ struct tcpa_event *event = v;
+ unsigned char *event_entry =
+ (unsigned char *) (v + sizeof(struct tcpa_event));
+
+ eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
+ if (!eventname) {
+ printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
+ __func__);
+ return -EFAULT;
+ }
+
+ seq_printf(m, "%2d ", event->pcr_index);
+
+ /* 2nd: SHA1 */
+ for (i = 0; i < 20; i++)
+ seq_printf(m, "%02x", event->pcr_value[i]);
+
+ /* 3rd: event type identifier */
+ seq_printf(m, " %02x", event->event_type);
+
+ len += get_event_name(eventname, event, event_entry);
+
+ /* 4th: eventname <= max + \'0' delimiter */
+ seq_printf(m, " %s\n", eventname);
+
+ kfree(eventname);
+ return 0;
+}
+
+static const struct seq_operations tpm_ascii_b_measurments_seqops = {
+ .start = tpm_bios_measurements_start,
+ .next = tpm_bios_measurements_next,
+ .stop = tpm_bios_measurements_stop,
+ .show = tpm_ascii_bios_measurements_show,
+};
+
+static const struct seq_operations tpm_binary_b_measurments_seqops = {
+ .start = tpm_bios_measurements_start,
+ .next = tpm_bios_measurements_next,
+ .stop = tpm_bios_measurements_stop,
+ .show = tpm_binary_bios_measurements_show,
+};
+
+/* read binary bios log */
+static int read_log(struct tpm_bios_log *log)
+{
+ struct acpi_tcpa *buff;
+ acpi_status status;
+ struct acpi_table_header *virt;
+ u64 len, start;
+
+ if (log->bios_event_log != NULL) {
+ printk(KERN_ERR
+ "%s: ERROR - Eventlog already initialized\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
+ status = acpi_get_table(ACPI_SIG_TCPA, 1,
+ (struct acpi_table_header **)&buff);
+
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
+ __func__);
+ return -EIO;
+ }
+
+ switch(buff->platform_class) {
+ case BIOS_SERVER:
+ len = buff->server.log_max_len;
+ start = buff->server.log_start_addr;
+ break;
+ case BIOS_CLIENT:
+ default:
+ len = buff->client.log_max_len;
+ start = buff->client.log_start_addr;
+ break;
+ }
+ if (!len) {
+ printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
+ return -EIO;
+ }
+
+ /* malloc EventLog space */
+ log->bios_event_log = kmalloc(len, GFP_KERNEL);
+ if (!log->bios_event_log) {
+ printk("%s: ERROR - Not enough Memory for BIOS measurements\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ log->bios_event_log_end = log->bios_event_log + len;
+
+ virt = acpi_os_map_memory(start, len);
+
+ memcpy(log->bios_event_log, virt, len);
+
+ acpi_os_unmap_memory(virt, len);
+ return 0;
+}
+
+static int tpm_ascii_bios_measurements_open(struct inode *inode,
+ struct file *file)
+{
+ int err;
+ struct tpm_bios_log *log;
+ struct seq_file *seq;
+
+ log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
+ if (!log)
+ return -ENOMEM;
+
+ if ((err = read_log(log)))
+ goto out_free;
+
+ /* now register seq file */
+ err = seq_open(file, &tpm_ascii_b_measurments_seqops);
+ if (!err) {
+ seq = file->private_data;
+ seq->private = log;
+ } else {
+ goto out_free;
+ }
+
+out:
+ return err;
+out_free:
+ kfree(log->bios_event_log);
+ kfree(log);
+ goto out;
+}
+
+static const struct file_operations tpm_ascii_bios_measurements_ops = {
+ .open = tpm_ascii_bios_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = tpm_bios_measurements_release,
+};
+
+static int tpm_binary_bios_measurements_open(struct inode *inode,
+ struct file *file)
+{
+ int err;
+ struct tpm_bios_log *log;
+ struct seq_file *seq;
+
+ log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
+ if (!log)
+ return -ENOMEM;
+
+ if ((err = read_log(log)))
+ goto out_free;
+
+ /* now register seq file */
+ err = seq_open(file, &tpm_binary_b_measurments_seqops);
+ if (!err) {
+ seq = file->private_data;
+ seq->private = log;
+ } else {
+ goto out_free;
+ }
+
+out:
+ return err;
+out_free:
+ kfree(log->bios_event_log);
+ kfree(log);
+ goto out;
+}
+
+static const struct file_operations tpm_binary_bios_measurements_ops = {
+ .open = tpm_binary_bios_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = tpm_bios_measurements_release,
+};
+
+static int is_bad(void *p)
+{
+ if (!p)
+ return 1;
+ if (IS_ERR(p) && (PTR_ERR(p) != -ENODEV))
+ return 1;
+ return 0;
+}
+
+struct dentry **tpm_bios_log_setup(char *name)
+{
+ struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file;
+
+ tpm_dir = securityfs_create_dir(name, NULL);
+ if (is_bad(tpm_dir))
+ goto out;
+
+ bin_file =
+ securityfs_create_file("binary_bios_measurements",
+ S_IRUSR | S_IRGRP, tpm_dir, NULL,
+ &tpm_binary_bios_measurements_ops);
+ if (is_bad(bin_file))
+ goto out_tpm;
+
+ ascii_file =
+ securityfs_create_file("ascii_bios_measurements",
+ S_IRUSR | S_IRGRP, tpm_dir, NULL,
+ &tpm_ascii_bios_measurements_ops);
+ if (is_bad(ascii_file))
+ goto out_bin;
+
+ ret = kmalloc(3 * sizeof(struct dentry *), GFP_KERNEL);
+ if (!ret)
+ goto out_ascii;
+
+ ret[0] = ascii_file;
+ ret[1] = bin_file;
+ ret[2] = tpm_dir;
+
+ return ret;
+
+out_ascii:
+ securityfs_remove(ascii_file);
+out_bin:
+ securityfs_remove(bin_file);
+out_tpm:
+ securityfs_remove(tpm_dir);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(tpm_bios_log_setup);
+
+void tpm_bios_log_teardown(struct dentry **lst)
+{
+ int i;
+
+ for (i = 0; i < 3; i++)
+ securityfs_remove(lst[i]);
+}
+EXPORT_SYMBOL_GPL(tpm_bios_log_teardown);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
new file mode 100644
index 00000000..76da32e1
--- /dev/null
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -0,0 +1,677 @@
+/*
+ * Description:
+ * Device Driver for the Infineon Technologies
+ * SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * Copyright (C) 2005, Marcel Selhorst <m.selhorst@sirrix.com>
+ * Sirrix AG - security technologies, http://www.sirrix.com and
+ * Applied Data Security Group, Ruhr-University Bochum, Germany
+ * Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/init.h>
+#include <linux/pnp.h>
+#include "tpm.h"
+
+/* Infineon specific definitions */
+/* maximum number of WTX-packages */
+#define TPM_MAX_WTX_PACKAGES 50
+/* msleep-Time for WTX-packages */
+#define TPM_WTX_MSLEEP_TIME 20
+/* msleep-Time --> Interval to check status register */
+#define TPM_MSLEEP_TIME 3
+/* gives number of max. msleep()-calls before throwing timeout */
+#define TPM_MAX_TRIES 5000
+#define TPM_INFINEON_DEV_VEN_VALUE 0x15D1
+
+#define TPM_INF_IO_PORT 0x0
+#define TPM_INF_IO_MEM 0x1
+
+#define TPM_INF_ADDR 0x0
+#define TPM_INF_DATA 0x1
+
+struct tpm_inf_dev {
+ int iotype;
+
+ void __iomem *mem_base; /* MMIO ioremap'd addr */
+ unsigned long map_base; /* phys MMIO base */
+ unsigned long map_size; /* MMIO region size */
+ unsigned int index_off; /* index register offset */
+
+ unsigned int data_regs; /* Data registers */
+ unsigned int data_size;
+
+ unsigned int config_port; /* IO Port config index reg */
+ unsigned int config_size;
+};
+
+static struct tpm_inf_dev tpm_dev;
+
+static inline void tpm_data_out(unsigned char data, unsigned char offset)
+{
+ if (tpm_dev.iotype == TPM_INF_IO_PORT)
+ outb(data, tpm_dev.data_regs + offset);
+ else
+ writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset);
+}
+
+static inline unsigned char tpm_data_in(unsigned char offset)
+{
+ if (tpm_dev.iotype == TPM_INF_IO_PORT)
+ return inb(tpm_dev.data_regs + offset);
+ else
+ return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset);
+}
+
+static inline void tpm_config_out(unsigned char data, unsigned char offset)
+{
+ if (tpm_dev.iotype == TPM_INF_IO_PORT)
+ outb(data, tpm_dev.config_port + offset);
+ else
+ writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset);
+}
+
+static inline unsigned char tpm_config_in(unsigned char offset)
+{
+ if (tpm_dev.iotype == TPM_INF_IO_PORT)
+ return inb(tpm_dev.config_port + offset);
+ else
+ return readb(tpm_dev.mem_base + tpm_dev.index_off + offset);
+}
+
+/* TPM header definitions */
+enum infineon_tpm_header {
+ TPM_VL_VER = 0x01,
+ TPM_VL_CHANNEL_CONTROL = 0x07,
+ TPM_VL_CHANNEL_PERSONALISATION = 0x0A,
+ TPM_VL_CHANNEL_TPM = 0x0B,
+ TPM_VL_CONTROL = 0x00,
+ TPM_INF_NAK = 0x15,
+ TPM_CTRL_WTX = 0x10,
+ TPM_CTRL_WTX_ABORT = 0x18,
+ TPM_CTRL_WTX_ABORT_ACK = 0x18,
+ TPM_CTRL_ERROR = 0x20,
+ TPM_CTRL_CHAININGACK = 0x40,
+ TPM_CTRL_CHAINING = 0x80,
+ TPM_CTRL_DATA = 0x04,
+ TPM_CTRL_DATA_CHA = 0x84,
+ TPM_CTRL_DATA_CHA_ACK = 0xC4
+};
+
+enum infineon_tpm_register {
+ WRFIFO = 0x00,
+ RDFIFO = 0x01,
+ STAT = 0x02,
+ CMD = 0x03
+};
+
+enum infineon_tpm_command_bits {
+ CMD_DIS = 0x00,
+ CMD_LP = 0x01,
+ CMD_RES = 0x02,
+ CMD_IRQC = 0x06
+};
+
+enum infineon_tpm_status_bits {
+ STAT_XFE = 0x00,
+ STAT_LPA = 0x01,
+ STAT_FOK = 0x02,
+ STAT_TOK = 0x03,
+ STAT_IRQA = 0x06,
+ STAT_RDA = 0x07
+};
+
+/* some outgoing values */
+enum infineon_tpm_values {
+ CHIP_ID1 = 0x20,
+ CHIP_ID2 = 0x21,
+ TPM_DAR = 0x30,
+ RESET_LP_IRQC_DISABLE = 0x41,
+ ENABLE_REGISTER_PAIR = 0x55,
+ IOLIMH = 0x60,
+ IOLIML = 0x61,
+ DISABLE_REGISTER_PAIR = 0xAA,
+ IDVENL = 0xF1,
+ IDVENH = 0xF2,
+ IDPDL = 0xF3,
+ IDPDH = 0xF4
+};
+
+static int number_of_wtx;
+
+static int empty_fifo(struct tpm_chip *chip, int clear_wrfifo)
+{
+ int status;
+ int check = 0;
+ int i;
+
+ if (clear_wrfifo) {
+ for (i = 0; i < 4096; i++) {
+ status = tpm_data_in(WRFIFO);
+ if (status == 0xff) {
+ if (check == 5)
+ break;
+ else
+ check++;
+ }
+ }
+ }
+ /* Note: The values which are currently in the FIFO of the TPM
+ are thrown away since there is no usage for them. Usually,
+ this has nothing to say, since the TPM will give its answer
+ immediately or will be aborted anyway, so the data here is
+ usually garbage and useless.
+ We have to clean this, because the next communication with
+ the TPM would be rubbish, if there is still some old data
+ in the Read FIFO.
+ */
+ i = 0;
+ do {
+ status = tpm_data_in(RDFIFO);
+ status = tpm_data_in(STAT);
+ i++;
+ if (i == TPM_MAX_TRIES)
+ return -EIO;
+ } while ((status & (1 << STAT_RDA)) != 0);
+ return 0;
+}
+
+static int wait(struct tpm_chip *chip, int wait_for_bit)
+{
+ int status;
+ int i;
+ for (i = 0; i < TPM_MAX_TRIES; i++) {
+ status = tpm_data_in(STAT);
+ /* check the status-register if wait_for_bit is set */
+ if (status & 1 << wait_for_bit)
+ break;
+ msleep(TPM_MSLEEP_TIME);
+ }
+ if (i == TPM_MAX_TRIES) { /* timeout occurs */
+ if (wait_for_bit == STAT_XFE)
+ dev_err(chip->dev, "Timeout in wait(STAT_XFE)\n");
+ if (wait_for_bit == STAT_RDA)
+ dev_err(chip->dev, "Timeout in wait(STAT_RDA)\n");
+ return -EIO;
+ }
+ return 0;
+};
+
+static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
+{
+ wait(chip, STAT_XFE);
+ tpm_data_out(sendbyte, WRFIFO);
+}
+
+ /* Note: WTX means Waiting-Time-Extension. Whenever the TPM needs more
+ calculation time, it sends a WTX-package, which has to be acknowledged
+ or aborted. This usually occurs if you are hammering the TPM with key
+ creation. Set the maximum number of WTX-packages in the definitions
+ above, if the number is reached, the waiting-time will be denied
+ and the TPM command has to be resend.
+ */
+
+static void tpm_wtx(struct tpm_chip *chip)
+{
+ number_of_wtx++;
+ dev_info(chip->dev, "Granting WTX (%02d / %02d)\n",
+ number_of_wtx, TPM_MAX_WTX_PACKAGES);
+ wait_and_send(chip, TPM_VL_VER);
+ wait_and_send(chip, TPM_CTRL_WTX);
+ wait_and_send(chip, 0x00);
+ wait_and_send(chip, 0x00);
+ msleep(TPM_WTX_MSLEEP_TIME);
+}
+
+static void tpm_wtx_abort(struct tpm_chip *chip)
+{
+ dev_info(chip->dev, "Aborting WTX\n");
+ wait_and_send(chip, TPM_VL_VER);
+ wait_and_send(chip, TPM_CTRL_WTX_ABORT);
+ wait_and_send(chip, 0x00);
+ wait_and_send(chip, 0x00);
+ number_of_wtx = 0;
+ msleep(TPM_WTX_MSLEEP_TIME);
+}
+
+static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+ int i;
+ int ret;
+ u32 size = 0;
+ number_of_wtx = 0;
+
+recv_begin:
+ /* start receiving header */
+ for (i = 0; i < 4; i++) {
+ ret = wait(chip, STAT_RDA);
+ if (ret)
+ return -EIO;
+ buf[i] = tpm_data_in(RDFIFO);
+ }
+
+ if (buf[0] != TPM_VL_VER) {
+ dev_err(chip->dev,
+ "Wrong transport protocol implementation!\n");
+ return -EIO;
+ }
+
+ if (buf[1] == TPM_CTRL_DATA) {
+ /* size of the data received */
+ size = ((buf[2] << 8) | buf[3]);
+
+ for (i = 0; i < size; i++) {
+ wait(chip, STAT_RDA);
+ buf[i] = tpm_data_in(RDFIFO);
+ }
+
+ if ((size == 0x6D00) && (buf[1] == 0x80)) {
+ dev_err(chip->dev, "Error handling on vendor layer!\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < size; i++)
+ buf[i] = buf[i + 6];
+
+ size = size - 6;
+ return size;
+ }
+
+ if (buf[1] == TPM_CTRL_WTX) {
+ dev_info(chip->dev, "WTX-package received\n");
+ if (number_of_wtx < TPM_MAX_WTX_PACKAGES) {
+ tpm_wtx(chip);
+ goto recv_begin;
+ } else {
+ tpm_wtx_abort(chip);
+ goto recv_begin;
+ }
+ }
+
+ if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) {
+ dev_info(chip->dev, "WTX-abort acknowledged\n");
+ return size;
+ }
+
+ if (buf[1] == TPM_CTRL_ERROR) {
+ dev_err(chip->dev, "ERROR-package received:\n");
+ if (buf[4] == TPM_INF_NAK)
+ dev_err(chip->dev,
+ "-> Negative acknowledgement"
+ " - retransmit command!\n");
+ return -EIO;
+ }
+ return -EIO;
+}
+
+static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+ int i;
+ int ret;
+ u8 count_high, count_low, count_4, count_3, count_2, count_1;
+
+ /* Disabling Reset, LP and IRQC */
+ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+
+ ret = empty_fifo(chip, 1);
+ if (ret) {
+ dev_err(chip->dev, "Timeout while clearing FIFO\n");
+ return -EIO;
+ }
+
+ ret = wait(chip, STAT_XFE);
+ if (ret)
+ return -EIO;
+
+ count_4 = (count & 0xff000000) >> 24;
+ count_3 = (count & 0x00ff0000) >> 16;
+ count_2 = (count & 0x0000ff00) >> 8;
+ count_1 = (count & 0x000000ff);
+ count_high = ((count + 6) & 0xffffff00) >> 8;
+ count_low = ((count + 6) & 0x000000ff);
+
+ /* Sending Header */
+ wait_and_send(chip, TPM_VL_VER);
+ wait_and_send(chip, TPM_CTRL_DATA);
+ wait_and_send(chip, count_high);
+ wait_and_send(chip, count_low);
+
+ /* Sending Data Header */
+ wait_and_send(chip, TPM_VL_VER);
+ wait_and_send(chip, TPM_VL_CHANNEL_TPM);
+ wait_and_send(chip, count_4);
+ wait_and_send(chip, count_3);
+ wait_and_send(chip, count_2);
+ wait_and_send(chip, count_1);
+
+ /* Sending Data */
+ for (i = 0; i < count; i++) {
+ wait_and_send(chip, buf[i]);
+ }
+ return count;
+}
+
+static void tpm_inf_cancel(struct tpm_chip *chip)
+{
+ /*
+ Since we are using the legacy mode to communicate
+ with the TPM, we have no cancel functions, but have
+ a workaround for interrupting the TPM through WTX.
+ */
+}
+
+static u8 tpm_inf_status(struct tpm_chip *chip)
+{
+ return tpm_data_in(STAT);
+}
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+
+static struct attribute *inf_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr,
+ NULL,
+};
+
+static struct attribute_group inf_attr_grp = {.attrs = inf_attrs };
+
+static const struct file_operations inf_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_read,
+ .write = tpm_write,
+ .release = tpm_release,
+};
+
+static const struct tpm_vendor_specific tpm_inf = {
+ .recv = tpm_inf_recv,
+ .send = tpm_inf_send,
+ .cancel = tpm_inf_cancel,
+ .status = tpm_inf_status,
+ .req_complete_mask = 0,
+ .req_complete_val = 0,
+ .attr_group = &inf_attr_grp,
+ .miscdev = {.fops = &inf_ops,},
+};
+
+static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
+ /* Infineon TPMs */
+ {"IFX0101", 0},
+ {"IFX0102", 0},
+ {"", 0}
+};
+
+MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
+
+static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id)
+{
+ int rc = 0;
+ u8 iol, ioh;
+ int vendorid[2];
+ int version[2];
+ int productid[2];
+ char chipname[20];
+ struct tpm_chip *chip;
+
+ /* read IO-ports through PnP */
+ if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
+ !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
+
+ tpm_dev.iotype = TPM_INF_IO_PORT;
+
+ tpm_dev.config_port = pnp_port_start(dev, 0);
+ tpm_dev.config_size = pnp_port_len(dev, 0);
+ tpm_dev.data_regs = pnp_port_start(dev, 1);
+ tpm_dev.data_size = pnp_port_len(dev, 1);
+ if ((tpm_dev.data_size < 4) || (tpm_dev.config_size < 2)) {
+ rc = -EINVAL;
+ goto err_last;
+ }
+ dev_info(&dev->dev, "Found %s with ID %s\n",
+ dev->name, dev_id->id);
+ if (!((tpm_dev.data_regs >> 8) & 0xff)) {
+ rc = -EINVAL;
+ goto err_last;
+ }
+ /* publish my base address and request region */
+ if (request_region(tpm_dev.data_regs, tpm_dev.data_size,
+ "tpm_infineon0") == NULL) {
+ rc = -EINVAL;
+ goto err_last;
+ }
+ if (request_region(tpm_dev.config_port, tpm_dev.config_size,
+ "tpm_infineon0") == NULL) {
+ release_region(tpm_dev.data_regs, tpm_dev.data_size);
+ rc = -EINVAL;
+ goto err_last;
+ }
+ } else if (pnp_mem_valid(dev, 0) &&
+ !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
+
+ tpm_dev.iotype = TPM_INF_IO_MEM;
+
+ tpm_dev.map_base = pnp_mem_start(dev, 0);
+ tpm_dev.map_size = pnp_mem_len(dev, 0);
+
+ dev_info(&dev->dev, "Found %s with ID %s\n",
+ dev->name, dev_id->id);
+
+ /* publish my base address and request region */
+ if (request_mem_region(tpm_dev.map_base, tpm_dev.map_size,
+ "tpm_infineon0") == NULL) {
+ rc = -EINVAL;
+ goto err_last;
+ }
+
+ tpm_dev.mem_base = ioremap(tpm_dev.map_base, tpm_dev.map_size);
+ if (tpm_dev.mem_base == NULL) {
+ release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
+ rc = -EINVAL;
+ goto err_last;
+ }
+
+ /*
+ * The only known MMIO based Infineon TPM system provides
+ * a single large mem region with the device config
+ * registers at the default TPM_ADDR. The data registers
+ * seem like they could be placed anywhere within the MMIO
+ * region, but lets just put them at zero offset.
+ */
+ tpm_dev.index_off = TPM_ADDR;
+ tpm_dev.data_regs = 0x0;
+ } else {
+ rc = -EINVAL;
+ goto err_last;
+ }
+
+ /* query chip for its vendor, its version number a.s.o. */
+ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
+ tpm_config_out(IDVENL, TPM_INF_ADDR);
+ vendorid[1] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(IDVENH, TPM_INF_ADDR);
+ vendorid[0] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(IDPDL, TPM_INF_ADDR);
+ productid[1] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(IDPDH, TPM_INF_ADDR);
+ productid[0] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(CHIP_ID1, TPM_INF_ADDR);
+ version[1] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(CHIP_ID2, TPM_INF_ADDR);
+ version[0] = tpm_config_in(TPM_INF_DATA);
+
+ switch ((productid[0] << 8) | productid[1]) {
+ case 6:
+ snprintf(chipname, sizeof(chipname), " (SLD 9630 TT 1.1)");
+ break;
+ case 11:
+ snprintf(chipname, sizeof(chipname), " (SLB 9635 TT 1.2)");
+ break;
+ default:
+ snprintf(chipname, sizeof(chipname), " (unknown chip)");
+ break;
+ }
+
+ if ((vendorid[0] << 8 | vendorid[1]) == (TPM_INFINEON_DEV_VEN_VALUE)) {
+
+ /* configure TPM with IO-ports */
+ tpm_config_out(IOLIMH, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
+ tpm_config_out(IOLIML, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
+
+ /* control if IO-ports are set correctly */
+ tpm_config_out(IOLIMH, TPM_INF_ADDR);
+ ioh = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(IOLIML, TPM_INF_ADDR);
+ iol = tpm_config_in(TPM_INF_DATA);
+
+ if ((ioh << 8 | iol) != tpm_dev.data_regs) {
+ dev_err(&dev->dev,
+ "Could not set IO-data registers to 0x%x\n",
+ tpm_dev.data_regs);
+ rc = -EIO;
+ goto err_release_region;
+ }
+
+ /* activate register */
+ tpm_config_out(TPM_DAR, TPM_INF_ADDR);
+ tpm_config_out(0x01, TPM_INF_DATA);
+ tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
+
+ /* disable RESET, LP and IRQC */
+ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+
+ /* Finally, we're done, print some infos */
+ dev_info(&dev->dev, "TPM found: "
+ "config base 0x%lx, "
+ "data base 0x%lx, "
+ "chip version 0x%02x%02x, "
+ "vendor id 0x%x%x (Infineon), "
+ "product id 0x%02x%02x"
+ "%s\n",
+ tpm_dev.iotype == TPM_INF_IO_PORT ?
+ tpm_dev.config_port :
+ tpm_dev.map_base + tpm_dev.index_off,
+ tpm_dev.iotype == TPM_INF_IO_PORT ?
+ tpm_dev.data_regs :
+ tpm_dev.map_base + tpm_dev.data_regs,
+ version[0], version[1],
+ vendorid[0], vendorid[1],
+ productid[0], productid[1], chipname);
+
+ if (!(chip = tpm_register_hardware(&dev->dev, &tpm_inf)))
+ goto err_release_region;
+
+ return 0;
+ } else {
+ rc = -ENODEV;
+ goto err_release_region;
+ }
+
+err_release_region:
+ if (tpm_dev.iotype == TPM_INF_IO_PORT) {
+ release_region(tpm_dev.data_regs, tpm_dev.data_size);
+ release_region(tpm_dev.config_port, tpm_dev.config_size);
+ } else {
+ iounmap(tpm_dev.mem_base);
+ release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
+ }
+
+err_last:
+ return rc;
+}
+
+static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
+{
+ struct tpm_chip *chip = pnp_get_drvdata(dev);
+
+ if (chip) {
+ if (tpm_dev.iotype == TPM_INF_IO_PORT) {
+ release_region(tpm_dev.data_regs, tpm_dev.data_size);
+ release_region(tpm_dev.config_port,
+ tpm_dev.config_size);
+ } else {
+ iounmap(tpm_dev.mem_base);
+ release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
+ }
+ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(chip->dev);
+ }
+}
+
+static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
+{
+ struct tpm_chip *chip = pnp_get_drvdata(dev);
+ int rc;
+ if (chip) {
+ u8 savestate[] = {
+ 0, 193, /* TPM_TAG_RQU_COMMAND */
+ 0, 0, 0, 10, /* blob length (in bytes) */
+ 0, 0, 0, 152 /* TPM_ORD_SaveState */
+ };
+ dev_info(&dev->dev, "saving TPM state\n");
+ rc = tpm_inf_send(chip, savestate, sizeof(savestate));
+ if (rc < 0) {
+ dev_err(&dev->dev, "error while saving TPM state\n");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int tpm_inf_pnp_resume(struct pnp_dev *dev)
+{
+ /* Re-configure TPM after suspending */
+ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
+ tpm_config_out(IOLIMH, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
+ tpm_config_out(IOLIML, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
+ /* activate register */
+ tpm_config_out(TPM_DAR, TPM_INF_ADDR);
+ tpm_config_out(0x01, TPM_INF_DATA);
+ tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
+ /* disable RESET, LP and IRQC */
+ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+ return tpm_pm_resume(&dev->dev);
+}
+
+static struct pnp_driver tpm_inf_pnp_driver = {
+ .name = "tpm_inf_pnp",
+ .id_table = tpm_inf_pnp_tbl,
+ .probe = tpm_inf_pnp_probe,
+ .suspend = tpm_inf_pnp_suspend,
+ .resume = tpm_inf_pnp_resume,
+ .remove = __devexit_p(tpm_inf_pnp_remove)
+};
+
+static int __init init_inf(void)
+{
+ return pnp_register_driver(&tpm_inf_pnp_driver);
+}
+
+static void __exit cleanup_inf(void)
+{
+ pnp_unregister_driver(&tpm_inf_pnp_driver);
+}
+
+module_init(init_inf);
+module_exit(cleanup_inf);
+
+MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
+MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
+MODULE_VERSION("1.9.2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
new file mode 100644
index 00000000..a605cb7d
--- /dev/null
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "tpm.h"
+
+/* National definitions */
+enum tpm_nsc_addr{
+ TPM_NSC_IRQ = 0x07,
+ TPM_NSC_BASE0_HI = 0x60,
+ TPM_NSC_BASE0_LO = 0x61,
+ TPM_NSC_BASE1_HI = 0x62,
+ TPM_NSC_BASE1_LO = 0x63
+};
+
+enum tpm_nsc_index {
+ NSC_LDN_INDEX = 0x07,
+ NSC_SID_INDEX = 0x20,
+ NSC_LDC_INDEX = 0x30,
+ NSC_DIO_INDEX = 0x60,
+ NSC_CIO_INDEX = 0x62,
+ NSC_IRQ_INDEX = 0x70,
+ NSC_ITS_INDEX = 0x71
+};
+
+enum tpm_nsc_status_loc {
+ NSC_STATUS = 0x01,
+ NSC_COMMAND = 0x01,
+ NSC_DATA = 0x00
+};
+
+/* status bits */
+enum tpm_nsc_status {
+ NSC_STATUS_OBF = 0x01, /* output buffer full */
+ NSC_STATUS_IBF = 0x02, /* input buffer full */
+ NSC_STATUS_F0 = 0x04, /* F0 */
+ NSC_STATUS_A2 = 0x08, /* A2 */
+ NSC_STATUS_RDY = 0x10, /* ready to receive command */
+ NSC_STATUS_IBR = 0x20 /* ready to receive data */
+};
+
+/* command bits */
+enum tpm_nsc_cmd_mode {
+ NSC_COMMAND_NORMAL = 0x01, /* normal mode */
+ NSC_COMMAND_EOC = 0x03,
+ NSC_COMMAND_CANCEL = 0x22
+};
+/*
+ * Wait for a certain status to appear
+ */
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data)
+{
+ unsigned long stop;
+
+ /* status immediately available check */
+ *data = inb(chip->vendor.base + NSC_STATUS);
+ if ((*data & mask) == val)
+ return 0;
+
+ /* wait for status */
+ stop = jiffies + 10 * HZ;
+ do {
+ msleep(TPM_TIMEOUT);
+ *data = inb(chip->vendor.base + 1);
+ if ((*data & mask) == val)
+ return 0;
+ }
+ while (time_before(jiffies, stop));
+
+ return -EBUSY;
+}
+
+static int nsc_wait_for_ready(struct tpm_chip *chip)
+{
+ int status;
+ unsigned long stop;
+
+ /* status immediately available check */
+ status = inb(chip->vendor.base + NSC_STATUS);
+ if (status & NSC_STATUS_OBF)
+ status = inb(chip->vendor.base + NSC_DATA);
+ if (status & NSC_STATUS_RDY)
+ return 0;
+
+ /* wait for status */
+ stop = jiffies + 100;
+ do {
+ msleep(TPM_TIMEOUT);
+ status = inb(chip->vendor.base + NSC_STATUS);
+ if (status & NSC_STATUS_OBF)
+ status = inb(chip->vendor.base + NSC_DATA);
+ if (status & NSC_STATUS_RDY)
+ return 0;
+ }
+ while (time_before(jiffies, stop));
+
+ dev_info(chip->dev, "wait for ready failed\n");
+ return -EBUSY;
+}
+
+
+static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+ u8 *buffer = buf;
+ u8 data, *p;
+ u32 size;
+ __be32 *native_size;
+
+ if (count < 6)
+ return -EIO;
+
+ if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
+ dev_err(chip->dev, "F0 timeout\n");
+ return -EIO;
+ }
+ if ((data =
+ inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
+ dev_err(chip->dev, "not in normal mode (0x%x)\n",
+ data);
+ return -EIO;
+ }
+
+ /* read the whole packet */
+ for (p = buffer; p < &buffer[count]; p++) {
+ if (wait_for_stat
+ (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
+ dev_err(chip->dev,
+ "OBF timeout (while reading data)\n");
+ return -EIO;
+ }
+ if (data & NSC_STATUS_F0)
+ break;
+ *p = inb(chip->vendor.base + NSC_DATA);
+ }
+
+ if ((data & NSC_STATUS_F0) == 0 &&
+ (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
+ dev_err(chip->dev, "F0 not set\n");
+ return -EIO;
+ }
+ if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) {
+ dev_err(chip->dev,
+ "expected end of command(0x%x)\n", data);
+ return -EIO;
+ }
+
+ native_size = (__force __be32 *) (buf + 2);
+ size = be32_to_cpu(*native_size);
+
+ if (count < size)
+ return -EIO;
+
+ return size;
+}
+
+static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+ u8 data;
+ int i;
+
+ /*
+ * If we hit the chip with back to back commands it locks up
+ * and never set IBF. Hitting it with this "hammer" seems to
+ * fix it. Not sure why this is needed, we followed the flow
+ * chart in the manual to the letter.
+ */
+ outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND);
+
+ if (nsc_wait_for_ready(chip) != 0)
+ return -EIO;
+
+ if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+ dev_err(chip->dev, "IBF timeout\n");
+ return -EIO;
+ }
+
+ outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND);
+ if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
+ dev_err(chip->dev, "IBR timeout\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+ dev_err(chip->dev,
+ "IBF timeout (while writing data)\n");
+ return -EIO;
+ }
+ outb(buf[i], chip->vendor.base + NSC_DATA);
+ }
+
+ if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+ dev_err(chip->dev, "IBF timeout\n");
+ return -EIO;
+ }
+ outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND);
+
+ return count;
+}
+
+static void tpm_nsc_cancel(struct tpm_chip *chip)
+{
+ outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND);
+}
+
+static u8 tpm_nsc_status(struct tpm_chip *chip)
+{
+ return inb(chip->vendor.base + NSC_STATUS);
+}
+
+static const struct file_operations nsc_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_read,
+ .write = tpm_write,
+ .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR|S_IWGRP, NULL, tpm_store_cancel);
+
+static struct attribute * nsc_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr,
+ NULL,
+};
+
+static struct attribute_group nsc_attr_grp = { .attrs = nsc_attrs };
+
+static const struct tpm_vendor_specific tpm_nsc = {
+ .recv = tpm_nsc_recv,
+ .send = tpm_nsc_send,
+ .cancel = tpm_nsc_cancel,
+ .status = tpm_nsc_status,
+ .req_complete_mask = NSC_STATUS_OBF,
+ .req_complete_val = NSC_STATUS_OBF,
+ .req_canceled = NSC_STATUS_RDY,
+ .attr_group = &nsc_attr_grp,
+ .miscdev = { .fops = &nsc_ops, },
+};
+
+static struct platform_device *pdev = NULL;
+
+static void tpm_nsc_remove(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ if ( chip ) {
+ release_region(chip->vendor.base, 2);
+ tpm_remove_hardware(chip->dev);
+ }
+}
+
+static int tpm_nsc_suspend(struct platform_device *dev, pm_message_t msg)
+{
+ return tpm_pm_suspend(&dev->dev, msg);
+}
+
+static int tpm_nsc_resume(struct platform_device *dev)
+{
+ return tpm_pm_resume(&dev->dev);
+}
+
+static struct platform_driver nsc_drv = {
+ .suspend = tpm_nsc_suspend,
+ .resume = tpm_nsc_resume,
+ .driver = {
+ .name = "tpm_nsc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init init_nsc(void)
+{
+ int rc = 0;
+ int lo, hi, err;
+ int nscAddrBase = TPM_ADDR;
+ struct tpm_chip *chip;
+ unsigned long base;
+
+ /* verify that it is a National part (SID) */
+ if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) {
+ nscAddrBase = (tpm_read_index(TPM_SUPERIO_ADDR, 0x2C)<<8)|
+ (tpm_read_index(TPM_SUPERIO_ADDR, 0x2B)&0xFE);
+ if (tpm_read_index(nscAddrBase, NSC_SID_INDEX) != 0xF6)
+ return -ENODEV;
+ }
+
+ err = platform_driver_register(&nsc_drv);
+ if (err)
+ return err;
+
+ hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI);
+ lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO);
+ base = (hi<<8) | lo;
+
+ /* enable the DPM module */
+ tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
+
+ pdev = platform_device_alloc("tpm_nscl0", -1);
+ if (!pdev) {
+ rc = -ENOMEM;
+ goto err_unreg_drv;
+ }
+
+ pdev->num_resources = 0;
+ pdev->dev.driver = &nsc_drv.driver;
+ pdev->dev.release = tpm_nsc_remove;
+
+ if ((rc = platform_device_register(pdev)) < 0)
+ goto err_free_dev;
+
+ if (request_region(base, 2, "tpm_nsc0") == NULL ) {
+ rc = -EBUSY;
+ goto err_unreg_dev;
+ }
+
+ if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) {
+ rc = -ENODEV;
+ goto err_rel_reg;
+ }
+
+ dev_dbg(&pdev->dev, "NSC TPM detected\n");
+ dev_dbg(&pdev->dev,
+ "NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n",
+ tpm_read_index(nscAddrBase,0x07), tpm_read_index(nscAddrBase,0x20),
+ tpm_read_index(nscAddrBase,0x27));
+ dev_dbg(&pdev->dev,
+ "NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n",
+ tpm_read_index(nscAddrBase,0x21), tpm_read_index(nscAddrBase,0x25),
+ tpm_read_index(nscAddrBase,0x26), tpm_read_index(nscAddrBase,0x28));
+ dev_dbg(&pdev->dev, "NSC IO Base0 0x%x\n",
+ (tpm_read_index(nscAddrBase,0x60) << 8) | tpm_read_index(nscAddrBase,0x61));
+ dev_dbg(&pdev->dev, "NSC IO Base1 0x%x\n",
+ (tpm_read_index(nscAddrBase,0x62) << 8) | tpm_read_index(nscAddrBase,0x63));
+ dev_dbg(&pdev->dev, "NSC Interrupt number and wakeup 0x%x\n",
+ tpm_read_index(nscAddrBase,0x70));
+ dev_dbg(&pdev->dev, "NSC IRQ type select 0x%x\n",
+ tpm_read_index(nscAddrBase,0x71));
+ dev_dbg(&pdev->dev,
+ "NSC DMA channel select0 0x%x, select1 0x%x\n",
+ tpm_read_index(nscAddrBase,0x74), tpm_read_index(nscAddrBase,0x75));
+ dev_dbg(&pdev->dev,
+ "NSC Config "
+ "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ tpm_read_index(nscAddrBase,0xF0), tpm_read_index(nscAddrBase,0xF1),
+ tpm_read_index(nscAddrBase,0xF2), tpm_read_index(nscAddrBase,0xF3),
+ tpm_read_index(nscAddrBase,0xF4), tpm_read_index(nscAddrBase,0xF5),
+ tpm_read_index(nscAddrBase,0xF6), tpm_read_index(nscAddrBase,0xF7),
+ tpm_read_index(nscAddrBase,0xF8), tpm_read_index(nscAddrBase,0xF9));
+
+ dev_info(&pdev->dev,
+ "NSC TPM revision %d\n",
+ tpm_read_index(nscAddrBase, 0x27) & 0x1F);
+
+ chip->vendor.base = base;
+
+ return 0;
+
+err_rel_reg:
+ release_region(base, 2);
+err_unreg_dev:
+ platform_device_unregister(pdev);
+err_free_dev:
+ kfree(pdev);
+err_unreg_drv:
+ platform_driver_unregister(&nsc_drv);
+ return rc;
+}
+
+static void __exit cleanup_nsc(void)
+{
+ if (pdev) {
+ tpm_nsc_remove(&pdev->dev);
+ platform_device_unregister(pdev);
+ kfree(pdev);
+ pdev = NULL;
+ }
+
+ platform_driver_unregister(&nsc_drv);
+}
+
+module_init(init_nsc);
+module_exit(cleanup_nsc);
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
new file mode 100644
index 00000000..dd21df55
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis.c
@@ -0,0 +1,777 @@
+/*
+ * Copyright (C) 2005, 2006 IBM Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/acpi.h>
+#include "tpm.h"
+
+#define TPM_HEADER_SIZE 10
+
+enum tis_access {
+ TPM_ACCESS_VALID = 0x80,
+ TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+ TPM_ACCESS_REQUEST_PENDING = 0x04,
+ TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum tis_status {
+ TPM_STS_VALID = 0x80,
+ TPM_STS_COMMAND_READY = 0x40,
+ TPM_STS_GO = 0x20,
+ TPM_STS_DATA_AVAIL = 0x10,
+ TPM_STS_DATA_EXPECT = 0x08,
+};
+
+enum tis_int_flags {
+ TPM_GLOBAL_INT_ENABLE = 0x80000000,
+ TPM_INTF_BURST_COUNT_STATIC = 0x100,
+ TPM_INTF_CMD_READY_INT = 0x080,
+ TPM_INTF_INT_EDGE_FALLING = 0x040,
+ TPM_INTF_INT_EDGE_RISING = 0x020,
+ TPM_INTF_INT_LEVEL_LOW = 0x010,
+ TPM_INTF_INT_LEVEL_HIGH = 0x008,
+ TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
+ TPM_INTF_STS_VALID_INT = 0x002,
+ TPM_INTF_DATA_AVAIL_INT = 0x001,
+};
+
+enum tis_defaults {
+ TIS_MEM_BASE = 0xFED40000,
+ TIS_MEM_LEN = 0x5000,
+ TIS_SHORT_TIMEOUT = 750, /* ms */
+ TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+};
+
+#define TPM_ACCESS(l) (0x0000 | ((l) << 12))
+#define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
+#define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
+#define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
+#define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
+#define TPM_STS(l) (0x0018 | ((l) << 12))
+#define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
+
+#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
+#define TPM_RID(l) (0x0F04 | ((l) << 12))
+
+static LIST_HEAD(tis_chips);
+static DEFINE_SPINLOCK(tis_lock);
+
+#ifdef CONFIG_ACPI
+static int is_itpm(struct pnp_dev *dev)
+{
+ struct acpi_device *acpi = pnp_acpi_device(dev);
+ struct acpi_hardware_id *id;
+
+ list_for_each_entry(id, &acpi->pnp.ids, list) {
+ if (!strcmp("INTC0102", id->id))
+ return 1;
+ }
+
+ return 0;
+}
+#else
+static int is_itpm(struct pnp_dev *dev)
+{
+ return 0;
+}
+#endif
+
+static int check_locality(struct tpm_chip *chip, int l)
+{
+ if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
+ return chip->vendor.locality = l;
+
+ return -1;
+}
+
+static void release_locality(struct tpm_chip *chip, int l, int force)
+{
+ if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
+ (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
+ iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
+ chip->vendor.iobase + TPM_ACCESS(l));
+}
+
+static int request_locality(struct tpm_chip *chip, int l)
+{
+ unsigned long stop;
+ long rc;
+
+ if (check_locality(chip, l) >= 0)
+ return l;
+
+ iowrite8(TPM_ACCESS_REQUEST_USE,
+ chip->vendor.iobase + TPM_ACCESS(l));
+
+ if (chip->vendor.irq) {
+ rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
+ (check_locality
+ (chip, l) >= 0),
+ chip->vendor.timeout_a);
+ if (rc > 0)
+ return l;
+
+ } else {
+ /* wait for burstcount */
+ stop = jiffies + chip->vendor.timeout_a;
+ do {
+ if (check_locality(chip, l) >= 0)
+ return l;
+ msleep(TPM_TIMEOUT);
+ }
+ while (time_before(jiffies, stop));
+ }
+ return -1;
+}
+
+static u8 tpm_tis_status(struct tpm_chip *chip)
+{
+ return ioread8(chip->vendor.iobase +
+ TPM_STS(chip->vendor.locality));
+}
+
+static void tpm_tis_ready(struct tpm_chip *chip)
+{
+ /* this causes the current command to be aborted */
+ iowrite8(TPM_STS_COMMAND_READY,
+ chip->vendor.iobase + TPM_STS(chip->vendor.locality));
+}
+
+static int get_burstcount(struct tpm_chip *chip)
+{
+ unsigned long stop;
+ int burstcnt;
+
+ /* wait for burstcount */
+ /* which timeout value, spec has 2 answers (c & d) */
+ stop = jiffies + chip->vendor.timeout_d;
+ do {
+ burstcnt = ioread8(chip->vendor.iobase +
+ TPM_STS(chip->vendor.locality) + 1);
+ burstcnt += ioread8(chip->vendor.iobase +
+ TPM_STS(chip->vendor.locality) +
+ 2) << 8;
+ if (burstcnt)
+ return burstcnt;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ return -EBUSY;
+}
+
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+ wait_queue_head_t *queue)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+
+ /* check current status */
+ status = tpm_tis_status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ if (chip->vendor.irq) {
+ rc = wait_event_interruptible_timeout(*queue,
+ ((tpm_tis_status
+ (chip) & mask) ==
+ mask), timeout);
+ if (rc > 0)
+ return 0;
+ } else {
+ stop = jiffies + timeout;
+ do {
+ msleep(TPM_TIMEOUT);
+ status = tpm_tis_status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+}
+
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ int size = 0, burstcnt;
+ while (size < count &&
+ wait_for_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ chip->vendor.timeout_c,
+ &chip->vendor.read_queue)
+ == 0) {
+ burstcnt = get_burstcount(chip);
+ for (; burstcnt > 0 && size < count; burstcnt--)
+ buf[size++] = ioread8(chip->vendor.iobase +
+ TPM_DATA_FIFO(chip->vendor.
+ locality));
+ }
+ return size;
+}
+
+static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ int size = 0;
+ int expected, status;
+
+ if (count < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ /* read first 10 bytes, including tag, paramsize, and result */
+ if ((size =
+ recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
+ dev_err(chip->dev, "Unable to read header\n");
+ goto out;
+ }
+
+ expected = be32_to_cpu(*(__be32 *) (buf + 2));
+ if (expected > count) {
+ size = -EIO;
+ goto out;
+ }
+
+ if ((size +=
+ recv_data(chip, &buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE)) < expected) {
+ dev_err(chip->dev, "Unable to read remainder of result\n");
+ size = -ETIME;
+ goto out;
+ }
+
+ wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
+ &chip->vendor.int_queue);
+ status = tpm_tis_status(chip);
+ if (status & TPM_STS_DATA_AVAIL) { /* retry? */
+ dev_err(chip->dev, "Error left over data\n");
+ size = -EIO;
+ goto out;
+ }
+
+out:
+ tpm_tis_ready(chip);
+ release_locality(chip, chip->vendor.locality, 0);
+ return size;
+}
+
+static int itpm;
+module_param(itpm, bool, 0444);
+MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
+
+/*
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ int rc, status, burstcnt;
+ size_t count = 0;
+ u32 ordinal;
+
+ if (request_locality(chip, 0) < 0)
+ return -EBUSY;
+
+ status = tpm_tis_status(chip);
+ if ((status & TPM_STS_COMMAND_READY) == 0) {
+ tpm_tis_ready(chip);
+ if (wait_for_stat
+ (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
+ &chip->vendor.int_queue) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ }
+
+ while (count < len - 1) {
+ burstcnt = get_burstcount(chip);
+ for (; burstcnt > 0 && count < len - 1; burstcnt--) {
+ iowrite8(buf[count], chip->vendor.iobase +
+ TPM_DATA_FIFO(chip->vendor.locality));
+ count++;
+ }
+
+ wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
+ &chip->vendor.int_queue);
+ status = tpm_tis_status(chip);
+ if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+ }
+
+ /* write last byte */
+ iowrite8(buf[count],
+ chip->vendor.iobase +
+ TPM_DATA_FIFO(chip->vendor.locality));
+ wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
+ &chip->vendor.int_queue);
+ status = tpm_tis_status(chip);
+ if ((status & TPM_STS_DATA_EXPECT) != 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+
+ /* go and do it */
+ iowrite8(TPM_STS_GO,
+ chip->vendor.iobase + TPM_STS(chip->vendor.locality));
+
+ if (chip->vendor.irq) {
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+ if (wait_for_stat
+ (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ tpm_calc_ordinal_duration(chip, ordinal),
+ &chip->vendor.read_queue) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ }
+ return len;
+out_err:
+ tpm_tis_ready(chip);
+ release_locality(chip, chip->vendor.locality, 0);
+ return rc;
+}
+
+static const struct file_operations tis_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_read,
+ .write = tpm_write,
+ .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
+ NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+
+static struct attribute *tis_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_active.attr,
+ &dev_attr_owned.attr,
+ &dev_attr_temp_deactivated.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr, NULL,
+};
+
+static struct attribute_group tis_attr_grp = {
+ .attrs = tis_attrs
+};
+
+static struct tpm_vendor_specific tpm_tis = {
+ .status = tpm_tis_status,
+ .recv = tpm_tis_recv,
+ .send = tpm_tis_send,
+ .cancel = tpm_tis_ready,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = TPM_STS_COMMAND_READY,
+ .attr_group = &tis_attr_grp,
+ .miscdev = {
+ .fops = &tis_ops,},
+};
+
+static irqreturn_t tis_int_probe(int irq, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ u32 interrupt;
+
+ interrupt = ioread32(chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality));
+
+ if (interrupt == 0)
+ return IRQ_NONE;
+
+ chip->vendor.irq = irq;
+
+ /* Clear interrupts handled with TPM_EOI */
+ iowrite32(interrupt,
+ chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ u32 interrupt;
+ int i;
+
+ interrupt = ioread32(chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality));
+
+ if (interrupt == 0)
+ return IRQ_NONE;
+
+ if (interrupt & TPM_INTF_DATA_AVAIL_INT)
+ wake_up_interruptible(&chip->vendor.read_queue);
+ if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
+ for (i = 0; i < 5; i++)
+ if (check_locality(chip, i) >= 0)
+ break;
+ if (interrupt &
+ (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
+ TPM_INTF_CMD_READY_INT))
+ wake_up_interruptible(&chip->vendor.int_queue);
+
+ /* Clear interrupts handled with TPM_EOI */
+ iowrite32(interrupt,
+ chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality));
+ ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
+ return IRQ_HANDLED;
+}
+
+static int interrupts = 1;
+module_param(interrupts, bool, 0444);
+MODULE_PARM_DESC(interrupts, "Enable interrupts");
+
+static int tpm_tis_init(struct device *dev, resource_size_t start,
+ resource_size_t len, unsigned int irq)
+{
+ u32 vendor, intfcaps, intmask;
+ int rc, i;
+ struct tpm_chip *chip;
+
+ if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
+ return -ENODEV;
+
+ chip->vendor.iobase = ioremap(start, len);
+ if (!chip->vendor.iobase) {
+ rc = -EIO;
+ goto out_err;
+ }
+
+ /* Default timeouts */
+ chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+ if (request_locality(chip, 0) != 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
+
+ dev_info(dev,
+ "1.2 TPM (device-id 0x%X, rev-id %d)\n",
+ vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
+
+ if (itpm)
+ dev_info(dev, "Intel iTPM workaround enabled\n");
+
+
+ /* Figure out the capabilities */
+ intfcaps =
+ ioread32(chip->vendor.iobase +
+ TPM_INTF_CAPS(chip->vendor.locality));
+ dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
+ intfcaps);
+ if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
+ dev_dbg(dev, "\tBurst Count Static\n");
+ if (intfcaps & TPM_INTF_CMD_READY_INT)
+ dev_dbg(dev, "\tCommand Ready Int Support\n");
+ if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
+ dev_dbg(dev, "\tInterrupt Edge Falling\n");
+ if (intfcaps & TPM_INTF_INT_EDGE_RISING)
+ dev_dbg(dev, "\tInterrupt Edge Rising\n");
+ if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
+ dev_dbg(dev, "\tInterrupt Level Low\n");
+ if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
+ dev_dbg(dev, "\tInterrupt Level High\n");
+ if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
+ dev_dbg(dev, "\tLocality Change Int Support\n");
+ if (intfcaps & TPM_INTF_STS_VALID_INT)
+ dev_dbg(dev, "\tSts Valid Int Support\n");
+ if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
+ dev_dbg(dev, "\tData Avail Int Support\n");
+
+ /* INTERRUPT Setup */
+ init_waitqueue_head(&chip->vendor.read_queue);
+ init_waitqueue_head(&chip->vendor.int_queue);
+
+ intmask =
+ ioread32(chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+
+ intmask |= TPM_INTF_CMD_READY_INT
+ | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
+ | TPM_INTF_STS_VALID_INT;
+
+ iowrite32(intmask,
+ chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+ if (interrupts)
+ chip->vendor.irq = irq;
+ if (interrupts && !chip->vendor.irq) {
+ chip->vendor.irq =
+ ioread8(chip->vendor.iobase +
+ TPM_INT_VECTOR(chip->vendor.locality));
+
+ for (i = 3; i < 16 && chip->vendor.irq == 0; i++) {
+ iowrite8(i, chip->vendor.iobase +
+ TPM_INT_VECTOR(chip->vendor.locality));
+ if (request_irq
+ (i, tis_int_probe, IRQF_SHARED,
+ chip->vendor.miscdev.name, chip) != 0) {
+ dev_info(chip->dev,
+ "Unable to request irq: %d for probe\n",
+ i);
+ continue;
+ }
+
+ /* Clear all existing */
+ iowrite32(ioread32
+ (chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality)),
+ chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality));
+
+ /* Turn on */
+ iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
+ chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+
+ /* Generate Interrupts */
+ tpm_gen_interrupt(chip);
+
+ /* Turn off */
+ iowrite32(intmask,
+ chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+ free_irq(i, chip);
+ }
+ }
+ if (chip->vendor.irq) {
+ iowrite8(chip->vendor.irq,
+ chip->vendor.iobase +
+ TPM_INT_VECTOR(chip->vendor.locality));
+ if (request_irq
+ (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
+ chip->vendor.miscdev.name, chip) != 0) {
+ dev_info(chip->dev,
+ "Unable to request irq: %d for use\n",
+ chip->vendor.irq);
+ chip->vendor.irq = 0;
+ } else {
+ /* Clear all existing */
+ iowrite32(ioread32
+ (chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality)),
+ chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality));
+
+ /* Turn on */
+ iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
+ chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+ }
+ }
+
+ INIT_LIST_HEAD(&chip->vendor.list);
+ spin_lock(&tis_lock);
+ list_add(&chip->vendor.list, &tis_chips);
+ spin_unlock(&tis_lock);
+
+ tpm_get_timeouts(chip);
+ tpm_continue_selftest(chip);
+
+ return 0;
+out_err:
+ if (chip->vendor.iobase)
+ iounmap(chip->vendor.iobase);
+ tpm_remove_hardware(chip->dev);
+ return rc;
+}
+#ifdef CONFIG_PNP
+static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
+ const struct pnp_device_id *pnp_id)
+{
+ resource_size_t start, len;
+ unsigned int irq = 0;
+
+ start = pnp_mem_start(pnp_dev, 0);
+ len = pnp_mem_len(pnp_dev, 0);
+
+ if (pnp_irq_valid(pnp_dev, 0))
+ irq = pnp_irq(pnp_dev, 0);
+ else
+ interrupts = 0;
+
+ if (is_itpm(pnp_dev))
+ itpm = 1;
+
+ return tpm_tis_init(&pnp_dev->dev, start, len, irq);
+}
+
+static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
+{
+ return tpm_pm_suspend(&dev->dev, msg);
+}
+
+static int tpm_tis_pnp_resume(struct pnp_dev *dev)
+{
+ struct tpm_chip *chip = pnp_get_drvdata(dev);
+ int ret;
+
+ ret = tpm_pm_resume(&dev->dev);
+ if (!ret)
+ tpm_continue_selftest(chip);
+
+ return ret;
+}
+
+static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
+ {"PNP0C31", 0}, /* TPM */
+ {"ATM1200", 0}, /* Atmel */
+ {"IFX0102", 0}, /* Infineon */
+ {"BCM0101", 0}, /* Broadcom */
+ {"BCM0102", 0}, /* Broadcom */
+ {"NSC1200", 0}, /* National */
+ {"ICO0102", 0}, /* Intel */
+ /* Add new here */
+ {"", 0}, /* User Specified */
+ {"", 0} /* Terminator */
+};
+MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
+
+static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
+{
+ struct tpm_chip *chip = pnp_get_drvdata(dev);
+
+ tpm_dev_vendor_release(chip);
+
+ kfree(chip);
+}
+
+
+static struct pnp_driver tis_pnp_driver = {
+ .name = "tpm_tis",
+ .id_table = tpm_pnp_tbl,
+ .probe = tpm_tis_pnp_init,
+ .suspend = tpm_tis_pnp_suspend,
+ .resume = tpm_tis_pnp_resume,
+ .remove = tpm_tis_pnp_remove,
+};
+
+#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
+module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
+ sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
+MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
+#endif
+static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
+{
+ return tpm_pm_suspend(&dev->dev, msg);
+}
+
+static int tpm_tis_resume(struct platform_device *dev)
+{
+ return tpm_pm_resume(&dev->dev);
+}
+static struct platform_driver tis_drv = {
+ .driver = {
+ .name = "tpm_tis",
+ .owner = THIS_MODULE,
+ },
+ .suspend = tpm_tis_suspend,
+ .resume = tpm_tis_resume,
+};
+
+static struct platform_device *pdev;
+
+static int force;
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
+static int __init init_tis(void)
+{
+ int rc;
+#ifdef CONFIG_PNP
+ if (!force)
+ return pnp_register_driver(&tis_pnp_driver);
+#endif
+
+ rc = platform_driver_register(&tis_drv);
+ if (rc < 0)
+ return rc;
+ if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
+ return PTR_ERR(pdev);
+ if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&tis_drv);
+ }
+ return rc;
+}
+
+static void __exit cleanup_tis(void)
+{
+ struct tpm_vendor_specific *i, *j;
+ struct tpm_chip *chip;
+ spin_lock(&tis_lock);
+ list_for_each_entry_safe(i, j, &tis_chips, list) {
+ chip = to_tpm_chip(i);
+ tpm_remove_hardware(chip->dev);
+ iowrite32(~TPM_GLOBAL_INT_ENABLE &
+ ioread32(chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.
+ locality)),
+ chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+ release_locality(chip, chip->vendor.locality, 1);
+ if (chip->vendor.irq)
+ free_irq(chip->vendor.irq, chip);
+ iounmap(i->iobase);
+ list_del(&i->list);
+ }
+ spin_unlock(&tis_lock);
+#ifdef CONFIG_PNP
+ if (!force) {
+ pnp_unregister_driver(&tis_pnp_driver);
+ return;
+ }
+#endif
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&tis_drv);
+}
+
+module_init(init_tis);
+module_exit(cleanup_tis);
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
new file mode 100644
index 00000000..a1f68af4
--- /dev/null
+++ b/drivers/char/ttyprintk.c
@@ -0,0 +1,225 @@
+/*
+ * linux/drivers/char/ttyprintk.c
+ *
+ * Copyright (C) 2010 Samo Pogacnik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+/*
+ * This pseudo device allows user to make printk messages. It is possible
+ * to store "console" messages inline with kernel messages for better analyses
+ * of the boot process, for example.
+ */
+
+#include <linux/device.h>
+#include <linux/serial.h>
+#include <linux/tty.h>
+
+struct ttyprintk_port {
+ struct tty_port port;
+ struct mutex port_write_mutex;
+};
+
+static struct ttyprintk_port tpk_port;
+
+/*
+ * Our simple preformatting supports transparent output of (time-stamped)
+ * printk messages (also suitable for logging service):
+ * - any cr is replaced by nl
+ * - adds a ttyprintk source tag in front of each line
+ * - too long message is fragmeted, with '\'nl between fragments
+ * - TPK_STR_SIZE isn't really the write_room limiting factor, bcause
+ * it is emptied on the fly during preformatting.
+ */
+#define TPK_STR_SIZE 508 /* should be bigger then max expected line length */
+#define TPK_MAX_ROOM 4096 /* we could assume 4K for instance */
+static const char *tpk_tag = "[U] "; /* U for User */
+static int tpk_curr;
+
+static int tpk_printk(const unsigned char *buf, int count)
+{
+ static char tmp[TPK_STR_SIZE + 4];
+ int i = tpk_curr;
+
+ if (buf == NULL) {
+ /* flush tmp[] */
+ if (tpk_curr > 0) {
+ /* non nl or cr terminated message - add nl */
+ tmp[tpk_curr + 0] = '\n';
+ tmp[tpk_curr + 1] = '\0';
+ printk(KERN_INFO "%s%s", tpk_tag, tmp);
+ tpk_curr = 0;
+ }
+ return i;
+ }
+
+ for (i = 0; i < count; i++) {
+ tmp[tpk_curr] = buf[i];
+ if (tpk_curr < TPK_STR_SIZE) {
+ switch (buf[i]) {
+ case '\r':
+ /* replace cr with nl */
+ tmp[tpk_curr + 0] = '\n';
+ tmp[tpk_curr + 1] = '\0';
+ printk(KERN_INFO "%s%s", tpk_tag, tmp);
+ tpk_curr = 0;
+ if (buf[i + 1] == '\n')
+ i++;
+ break;
+ case '\n':
+ tmp[tpk_curr + 1] = '\0';
+ printk(KERN_INFO "%s%s", tpk_tag, tmp);
+ tpk_curr = 0;
+ break;
+ default:
+ tpk_curr++;
+ }
+ } else {
+ /* end of tmp buffer reached: cut the message in two */
+ tmp[tpk_curr + 1] = '\\';
+ tmp[tpk_curr + 2] = '\n';
+ tmp[tpk_curr + 3] = '\0';
+ printk(KERN_INFO "%s%s", tpk_tag, tmp);
+ tpk_curr = 0;
+ }
+ }
+
+ return count;
+}
+
+/*
+ * TTY operations open function.
+ */
+static int tpk_open(struct tty_struct *tty, struct file *filp)
+{
+ tty->driver_data = &tpk_port;
+
+ return tty_port_open(&tpk_port.port, tty, filp);
+}
+
+/*
+ * TTY operations close function.
+ */
+static void tpk_close(struct tty_struct *tty, struct file *filp)
+{
+ struct ttyprintk_port *tpkp = tty->driver_data;
+
+ mutex_lock(&tpkp->port_write_mutex);
+ /* flush tpk_printk buffer */
+ tpk_printk(NULL, 0);
+ mutex_unlock(&tpkp->port_write_mutex);
+
+ tty_port_close(&tpkp->port, tty, filp);
+}
+
+/*
+ * TTY operations write function.
+ */
+static int tpk_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ struct ttyprintk_port *tpkp = tty->driver_data;
+ int ret;
+
+
+ /* exclusive use of tpk_printk within this tty */
+ mutex_lock(&tpkp->port_write_mutex);
+ ret = tpk_printk(buf, count);
+ mutex_unlock(&tpkp->port_write_mutex);
+
+ return ret;
+}
+
+/*
+ * TTY operations write_room function.
+ */
+static int tpk_write_room(struct tty_struct *tty)
+{
+ return TPK_MAX_ROOM;
+}
+
+/*
+ * TTY operations ioctl function.
+ */
+static int tpk_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ttyprintk_port *tpkp = tty->driver_data;
+
+ if (!tpkp)
+ return -EINVAL;
+
+ switch (cmd) {
+ /* Stop TIOCCONS */
+ case TIOCCONS:
+ return -EOPNOTSUPP;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static const struct tty_operations ttyprintk_ops = {
+ .open = tpk_open,
+ .close = tpk_close,
+ .write = tpk_write,
+ .write_room = tpk_write_room,
+ .ioctl = tpk_ioctl,
+};
+
+struct tty_port_operations null_ops = { };
+
+static struct tty_driver *ttyprintk_driver;
+
+static int __init ttyprintk_init(void)
+{
+ int ret = -ENOMEM;
+ void *rp;
+
+ ttyprintk_driver = alloc_tty_driver(1);
+ if (!ttyprintk_driver)
+ return ret;
+
+ ttyprintk_driver->owner = THIS_MODULE;
+ ttyprintk_driver->driver_name = "ttyprintk";
+ ttyprintk_driver->name = "ttyprintk";
+ ttyprintk_driver->major = TTYAUX_MAJOR;
+ ttyprintk_driver->minor_start = 3;
+ ttyprintk_driver->num = 1;
+ ttyprintk_driver->type = TTY_DRIVER_TYPE_CONSOLE;
+ ttyprintk_driver->init_termios = tty_std_termios;
+ ttyprintk_driver->init_termios.c_oflag = OPOST | OCRNL | ONOCR | ONLRET;
+ ttyprintk_driver->flags = TTY_DRIVER_RESET_TERMIOS |
+ TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(ttyprintk_driver, &ttyprintk_ops);
+
+ ret = tty_register_driver(ttyprintk_driver);
+ if (ret < 0) {
+ printk(KERN_ERR "Couldn't register ttyprintk driver\n");
+ goto error;
+ }
+
+ /* create our unnumbered device */
+ rp = device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 3), NULL,
+ ttyprintk_driver->name);
+ if (IS_ERR(rp)) {
+ printk(KERN_ERR "Couldn't create ttyprintk device\n");
+ ret = PTR_ERR(rp);
+ goto error;
+ }
+
+ tty_port_init(&tpk_port.port);
+ tpk_port.port.ops = &null_ops;
+ mutex_init(&tpk_port.port_write_mutex);
+
+ return 0;
+
+error:
+ put_tty_driver(ttyprintk_driver);
+ ttyprintk_driver = NULL;
+ return ret;
+}
+module_init(ttyprintk_init);
diff --git a/drivers/char/uv_mmtimer.c b/drivers/char/uv_mmtimer.c
new file mode 100644
index 00000000..956ebe20
--- /dev/null
+++ b/drivers/char/uv_mmtimer.c
@@ -0,0 +1,220 @@
+/*
+ * Timer device implementation for SGI UV platform.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2009 Silicon Graphics, Inc. All rights reserved.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/mmtimer.h>
+#include <linux/miscdevice.h>
+#include <linux/posix-timers.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+
+#include <asm/genapic.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/bios.h>
+#include <asm/uv/uv.h>
+
+MODULE_AUTHOR("Dimitri Sivanich <sivanich@sgi.com>");
+MODULE_DESCRIPTION("SGI UV Memory Mapped RTC Timer");
+MODULE_LICENSE("GPL");
+
+/* name of the device, usually in /dev */
+#define UV_MMTIMER_NAME "mmtimer"
+#define UV_MMTIMER_DESC "SGI UV Memory Mapped RTC Timer"
+#define UV_MMTIMER_VERSION "1.0"
+
+static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
+
+/*
+ * Period in femtoseconds (10^-15 s)
+ */
+static unsigned long uv_mmtimer_femtoperiod;
+
+static const struct file_operations uv_mmtimer_fops = {
+ .owner = THIS_MODULE,
+ .mmap = uv_mmtimer_mmap,
+ .unlocked_ioctl = uv_mmtimer_ioctl,
+ .llseek = noop_llseek,
+};
+
+/**
+ * uv_mmtimer_ioctl - ioctl interface for /dev/uv_mmtimer
+ * @file: file structure for the device
+ * @cmd: command to execute
+ * @arg: optional argument to command
+ *
+ * Executes the command specified by @cmd. Returns 0 for success, < 0 for
+ * failure.
+ *
+ * Valid commands:
+ *
+ * %MMTIMER_GETOFFSET - Should return the offset (relative to the start
+ * of the page where the registers are mapped) for the counter in question.
+ *
+ * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15)
+ * seconds
+ *
+ * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address
+ * specified by @arg
+ *
+ * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter
+ *
+ * %MMTIMER_MMAPAVAIL - Returns 1 if registers can be mmap'd into userspace
+ *
+ * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it
+ * in the address specified by @arg.
+ */
+static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case MMTIMER_GETOFFSET: /* offset of the counter */
+ /*
+ * Starting with HUB rev 2.0, the UV RTC register is
+ * replicated across all cachelines of it's own page.
+ * This allows faster simultaneous reads from a given socket.
+ *
+ * The offset returned is in 64 bit units.
+ */
+ if (uv_get_min_hub_revision_id() == 1)
+ ret = 0;
+ else
+ ret = ((uv_blade_processor_id() * L1_CACHE_BYTES) %
+ PAGE_SIZE) / 8;
+ break;
+
+ case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */
+ if (copy_to_user((unsigned long __user *)arg,
+ &uv_mmtimer_femtoperiod, sizeof(unsigned long)))
+ ret = -EFAULT;
+ break;
+
+ case MMTIMER_GETFREQ: /* frequency in Hz */
+ if (copy_to_user((unsigned long __user *)arg,
+ &sn_rtc_cycles_per_second,
+ sizeof(unsigned long)))
+ ret = -EFAULT;
+ break;
+
+ case MMTIMER_GETBITS: /* number of bits in the clock */
+ ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK);
+ break;
+
+ case MMTIMER_MMAPAVAIL:
+ ret = 1;
+ break;
+
+ case MMTIMER_GETCOUNTER:
+ if (copy_to_user((unsigned long __user *)arg,
+ (unsigned long *)uv_local_mmr_address(UVH_RTC),
+ sizeof(unsigned long)))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+ return ret;
+}
+
+/**
+ * uv_mmtimer_mmap - maps the clock's registers into userspace
+ * @file: file structure for the device
+ * @vma: VMA to map the registers into
+ *
+ * Calls remap_pfn_range() to map the clock's registers into
+ * the calling process' address space.
+ */
+static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long uv_mmtimer_addr;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ if (PAGE_SIZE > (1 << 16))
+ return -ENOSYS;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ uv_mmtimer_addr = UV_LOCAL_MMR_BASE | UVH_RTC;
+ uv_mmtimer_addr &= ~(PAGE_SIZE - 1);
+ uv_mmtimer_addr &= 0xfffffffffffffffUL;
+
+ if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot)) {
+ printk(KERN_ERR "remap_pfn_range failed in uv_mmtimer_mmap\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static struct miscdevice uv_mmtimer_miscdev = {
+ MISC_DYNAMIC_MINOR,
+ UV_MMTIMER_NAME,
+ &uv_mmtimer_fops
+};
+
+
+/**
+ * uv_mmtimer_init - device initialization routine
+ *
+ * Does initial setup for the uv_mmtimer device.
+ */
+static int __init uv_mmtimer_init(void)
+{
+ if (!is_uv_system()) {
+ printk(KERN_ERR "%s: Hardware unsupported\n", UV_MMTIMER_NAME);
+ return -1;
+ }
+
+ /*
+ * Sanity check the cycles/sec variable
+ */
+ if (sn_rtc_cycles_per_second < 100000) {
+ printk(KERN_ERR "%s: unable to determine clock frequency\n",
+ UV_MMTIMER_NAME);
+ return -1;
+ }
+
+ uv_mmtimer_femtoperiod = ((unsigned long)1E15 +
+ sn_rtc_cycles_per_second / 2) /
+ sn_rtc_cycles_per_second;
+
+ if (misc_register(&uv_mmtimer_miscdev)) {
+ printk(KERN_ERR "%s: failed to register device\n",
+ UV_MMTIMER_NAME);
+ return -1;
+ }
+
+ printk(KERN_INFO "%s: v%s, %ld MHz\n", UV_MMTIMER_DESC,
+ UV_MMTIMER_VERSION,
+ sn_rtc_cycles_per_second/(unsigned long)1E6);
+
+ return 0;
+}
+
+module_init(uv_mmtimer_init);
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
new file mode 100644
index 00000000..ad6e64a2
--- /dev/null
+++ b/drivers/char/viotape.c
@@ -0,0 +1,1041 @@
+/* -*- linux-c -*-
+ * drivers/char/viotape.c
+ *
+ * iSeries Virtual Tape
+ *
+ * Authors: Dave Boutcher <boutcher@us.ibm.com>
+ * Ryan Arnold <ryanarn@us.ibm.com>
+ * Colin Devilbiss <devilbis@us.ibm.com>
+ * Stephen Rothwell
+ *
+ * (C) Copyright 2000-2004 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) anyu later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This routine provides access to tape drives owned and managed by an OS/400
+ * partition running on the same box as this Linux partition.
+ *
+ * All tape operations are performed by sending messages back and forth to
+ * the OS/400 partition. The format of the messages is defined in
+ * iseries/vio.h
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/mtio.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/major.h>
+#include <linux/completion.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+#include <asm/ioctls.h>
+#include <asm/firmware.h>
+#include <asm/vio.h>
+#include <asm/iseries/vio.h>
+#include <asm/iseries/hv_lp_event.h>
+#include <asm/iseries/hv_call_event.h>
+#include <asm/iseries/hv_lp_config.h>
+
+#define VIOTAPE_VERSION "1.2"
+#define VIOTAPE_MAXREQ 1
+
+#define VIOTAPE_KERN_WARN KERN_WARNING "viotape: "
+#define VIOTAPE_KERN_INFO KERN_INFO "viotape: "
+
+static DEFINE_MUTEX(proc_viotape_mutex);
+static int viotape_numdev;
+
+/*
+ * The minor number follows the conventions of the SCSI tape drives. The
+ * rewind and mode are encoded in the minor #. We use this struct to break
+ * them out
+ */
+struct viot_devinfo_struct {
+ int devno;
+ int mode;
+ int rewind;
+};
+
+#define VIOTAPOP_RESET 0
+#define VIOTAPOP_FSF 1
+#define VIOTAPOP_BSF 2
+#define VIOTAPOP_FSR 3
+#define VIOTAPOP_BSR 4
+#define VIOTAPOP_WEOF 5
+#define VIOTAPOP_REW 6
+#define VIOTAPOP_NOP 7
+#define VIOTAPOP_EOM 8
+#define VIOTAPOP_ERASE 9
+#define VIOTAPOP_SETBLK 10
+#define VIOTAPOP_SETDENSITY 11
+#define VIOTAPOP_SETPOS 12
+#define VIOTAPOP_GETPOS 13
+#define VIOTAPOP_SETPART 14
+#define VIOTAPOP_UNLOAD 15
+
+enum viotaperc {
+ viotape_InvalidRange = 0x0601,
+ viotape_InvalidToken = 0x0602,
+ viotape_DMAError = 0x0603,
+ viotape_UseError = 0x0604,
+ viotape_ReleaseError = 0x0605,
+ viotape_InvalidTape = 0x0606,
+ viotape_InvalidOp = 0x0607,
+ viotape_TapeErr = 0x0608,
+
+ viotape_AllocTimedOut = 0x0640,
+ viotape_BOTEnc = 0x0641,
+ viotape_BlankTape = 0x0642,
+ viotape_BufferEmpty = 0x0643,
+ viotape_CleanCartFound = 0x0644,
+ viotape_CmdNotAllowed = 0x0645,
+ viotape_CmdNotSupported = 0x0646,
+ viotape_DataCheck = 0x0647,
+ viotape_DecompressErr = 0x0648,
+ viotape_DeviceTimeout = 0x0649,
+ viotape_DeviceUnavail = 0x064a,
+ viotape_DeviceBusy = 0x064b,
+ viotape_EndOfMedia = 0x064c,
+ viotape_EndOfTape = 0x064d,
+ viotape_EquipCheck = 0x064e,
+ viotape_InsufficientRs = 0x064f,
+ viotape_InvalidLogBlk = 0x0650,
+ viotape_LengthError = 0x0651,
+ viotape_LibDoorOpen = 0x0652,
+ viotape_LoadFailure = 0x0653,
+ viotape_NotCapable = 0x0654,
+ viotape_NotOperational = 0x0655,
+ viotape_NotReady = 0x0656,
+ viotape_OpCancelled = 0x0657,
+ viotape_PhyLinkErr = 0x0658,
+ viotape_RdyNotBOT = 0x0659,
+ viotape_TapeMark = 0x065a,
+ viotape_WriteProt = 0x065b
+};
+
+static const struct vio_error_entry viotape_err_table[] = {
+ { viotape_InvalidRange, EIO, "Internal error" },
+ { viotape_InvalidToken, EIO, "Internal error" },
+ { viotape_DMAError, EIO, "DMA error" },
+ { viotape_UseError, EIO, "Internal error" },
+ { viotape_ReleaseError, EIO, "Internal error" },
+ { viotape_InvalidTape, EIO, "Invalid tape device" },
+ { viotape_InvalidOp, EIO, "Invalid operation" },
+ { viotape_TapeErr, EIO, "Tape error" },
+ { viotape_AllocTimedOut, EBUSY, "Allocate timed out" },
+ { viotape_BOTEnc, EIO, "Beginning of tape encountered" },
+ { viotape_BlankTape, EIO, "Blank tape" },
+ { viotape_BufferEmpty, EIO, "Buffer empty" },
+ { viotape_CleanCartFound, ENOMEDIUM, "Cleaning cartridge found" },
+ { viotape_CmdNotAllowed, EIO, "Command not allowed" },
+ { viotape_CmdNotSupported, EIO, "Command not supported" },
+ { viotape_DataCheck, EIO, "Data check" },
+ { viotape_DecompressErr, EIO, "Decompression error" },
+ { viotape_DeviceTimeout, EBUSY, "Device timeout" },
+ { viotape_DeviceUnavail, EIO, "Device unavailable" },
+ { viotape_DeviceBusy, EBUSY, "Device busy" },
+ { viotape_EndOfMedia, ENOSPC, "End of media" },
+ { viotape_EndOfTape, ENOSPC, "End of tape" },
+ { viotape_EquipCheck, EIO, "Equipment check" },
+ { viotape_InsufficientRs, EOVERFLOW, "Insufficient tape resources" },
+ { viotape_InvalidLogBlk, EIO, "Invalid logical block location" },
+ { viotape_LengthError, EOVERFLOW, "Length error" },
+ { viotape_LibDoorOpen, EBUSY, "Door open" },
+ { viotape_LoadFailure, ENOMEDIUM, "Load failure" },
+ { viotape_NotCapable, EIO, "Not capable" },
+ { viotape_NotOperational, EIO, "Not operational" },
+ { viotape_NotReady, EIO, "Not ready" },
+ { viotape_OpCancelled, EIO, "Operation cancelled" },
+ { viotape_PhyLinkErr, EIO, "Physical link error" },
+ { viotape_RdyNotBOT, EIO, "Ready but not beginning of tape" },
+ { viotape_TapeMark, EIO, "Tape mark" },
+ { viotape_WriteProt, EROFS, "Write protection error" },
+ { 0, 0, NULL },
+};
+
+/* Maximum number of tapes we support */
+#define VIOTAPE_MAX_TAPE HVMAXARCHITECTEDVIRTUALTAPES
+#define MAX_PARTITIONS 4
+
+/* defines for current tape state */
+#define VIOT_IDLE 0
+#define VIOT_READING 1
+#define VIOT_WRITING 2
+
+/* Our info on the tapes */
+static struct {
+ const char *rsrcname;
+ const char *type;
+ const char *model;
+} viotape_unitinfo[VIOTAPE_MAX_TAPE];
+
+static struct mtget viomtget[VIOTAPE_MAX_TAPE];
+
+static struct class *tape_class;
+
+static struct device *tape_device[VIOTAPE_MAX_TAPE];
+
+/*
+ * maintain the current state of each tape (and partition)
+ * so that we know when to write EOF marks.
+ */
+static struct {
+ unsigned char cur_part;
+ unsigned char part_stat_rwi[MAX_PARTITIONS];
+} state[VIOTAPE_MAX_TAPE];
+
+/* We single-thread */
+static struct semaphore reqSem;
+
+/*
+ * When we send a request, we use this struct to get the response back
+ * from the interrupt handler
+ */
+struct op_struct {
+ void *buffer;
+ dma_addr_t dmaaddr;
+ size_t count;
+ int rc;
+ int non_blocking;
+ struct completion com;
+ struct device *dev;
+ struct op_struct *next;
+};
+
+static spinlock_t op_struct_list_lock;
+static struct op_struct *op_struct_list;
+
+/* forward declaration to resolve interdependence */
+static int chg_state(int index, unsigned char new_state, struct file *file);
+
+/* procfs support */
+static int proc_viotape_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ seq_printf(m, "viotape driver version " VIOTAPE_VERSION "\n");
+ for (i = 0; i < viotape_numdev; i++) {
+ seq_printf(m, "viotape device %d is iSeries resource %10.10s"
+ "type %4.4s, model %3.3s\n",
+ i, viotape_unitinfo[i].rsrcname,
+ viotape_unitinfo[i].type,
+ viotape_unitinfo[i].model);
+ }
+ return 0;
+}
+
+static int proc_viotape_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_viotape_show, NULL);
+}
+
+static const struct file_operations proc_viotape_operations = {
+ .owner = THIS_MODULE,
+ .open = proc_viotape_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* Decode the device minor number into its parts */
+void get_dev_info(struct inode *ino, struct viot_devinfo_struct *devi)
+{
+ devi->devno = iminor(ino) & 0x1F;
+ devi->mode = (iminor(ino) & 0x60) >> 5;
+ /* if bit is set in the minor, do _not_ rewind automatically */
+ devi->rewind = (iminor(ino) & 0x80) == 0;
+}
+
+/* This is called only from the exit and init paths, so no need for locking */
+static void clear_op_struct_pool(void)
+{
+ while (op_struct_list) {
+ struct op_struct *toFree = op_struct_list;
+ op_struct_list = op_struct_list->next;
+ kfree(toFree);
+ }
+}
+
+/* Likewise, this is only called from the init path */
+static int add_op_structs(int structs)
+{
+ int i;
+
+ for (i = 0; i < structs; ++i) {
+ struct op_struct *new_struct =
+ kmalloc(sizeof(*new_struct), GFP_KERNEL);
+ if (!new_struct) {
+ clear_op_struct_pool();
+ return -ENOMEM;
+ }
+ new_struct->next = op_struct_list;
+ op_struct_list = new_struct;
+ }
+ return 0;
+}
+
+/* Allocate an op structure from our pool */
+static struct op_struct *get_op_struct(void)
+{
+ struct op_struct *retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&op_struct_list_lock, flags);
+ retval = op_struct_list;
+ if (retval)
+ op_struct_list = retval->next;
+ spin_unlock_irqrestore(&op_struct_list_lock, flags);
+ if (retval) {
+ memset(retval, 0, sizeof(*retval));
+ init_completion(&retval->com);
+ }
+
+ return retval;
+}
+
+/* Return an op structure to our pool */
+static void free_op_struct(struct op_struct *op_struct)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&op_struct_list_lock, flags);
+ op_struct->next = op_struct_list;
+ op_struct_list = op_struct;
+ spin_unlock_irqrestore(&op_struct_list_lock, flags);
+}
+
+/* Map our tape return codes to errno values */
+int tape_rc_to_errno(int tape_rc, char *operation, int tapeno)
+{
+ const struct vio_error_entry *err;
+
+ if (tape_rc == 0)
+ return 0;
+
+ err = vio_lookup_rc(viotape_err_table, tape_rc);
+ printk(VIOTAPE_KERN_WARN "error(%s) 0x%04x on Device %d (%-10s): %s\n",
+ operation, tape_rc, tapeno,
+ viotape_unitinfo[tapeno].rsrcname, err->msg);
+ return -err->errno;
+}
+
+/* Write */
+static ssize_t viotap_write(struct file *file, const char *buf,
+ size_t count, loff_t * ppos)
+{
+ HvLpEvent_Rc hvrc;
+ unsigned short flags = file->f_flags;
+ int noblock = ((flags & O_NONBLOCK) != 0);
+ ssize_t ret;
+ struct viot_devinfo_struct devi;
+ struct op_struct *op = get_op_struct();
+
+ if (op == NULL)
+ return -ENOMEM;
+
+ get_dev_info(file->f_path.dentry->d_inode, &devi);
+
+ /*
+ * We need to make sure we can send a request. We use
+ * a semaphore to keep track of # requests in use. If
+ * we are non-blocking, make sure we don't block on the
+ * semaphore
+ */
+ if (noblock) {
+ if (down_trylock(&reqSem)) {
+ ret = -EWOULDBLOCK;
+ goto free_op;
+ }
+ } else
+ down(&reqSem);
+
+ /* Allocate a DMA buffer */
+ op->dev = tape_device[devi.devno];
+ op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr,
+ GFP_ATOMIC);
+
+ if (op->buffer == NULL) {
+ printk(VIOTAPE_KERN_WARN
+ "error allocating dma buffer for len %ld\n",
+ count);
+ ret = -EFAULT;
+ goto up_sem;
+ }
+
+ /* Copy the data into the buffer */
+ if (copy_from_user(op->buffer, buf, count)) {
+ printk(VIOTAPE_KERN_WARN "tape: error on copy from user\n");
+ ret = -EFAULT;
+ goto free_dma;
+ }
+
+ op->non_blocking = noblock;
+ init_completion(&op->com);
+ op->count = count;
+
+ hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
+ HvLpEvent_Type_VirtualIo,
+ viomajorsubtype_tape | viotapewrite,
+ HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
+ viopath_sourceinst(viopath_hostLp),
+ viopath_targetinst(viopath_hostLp),
+ (u64)(unsigned long)op, VIOVERSION << 16,
+ ((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0);
+ if (hvrc != HvLpEvent_Rc_Good) {
+ printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
+ (int)hvrc);
+ ret = -EIO;
+ goto free_dma;
+ }
+
+ if (noblock)
+ return count;
+
+ wait_for_completion(&op->com);
+
+ if (op->rc)
+ ret = tape_rc_to_errno(op->rc, "write", devi.devno);
+ else {
+ chg_state(devi.devno, VIOT_WRITING, file);
+ ret = op->count;
+ }
+
+free_dma:
+ dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr);
+up_sem:
+ up(&reqSem);
+free_op:
+ free_op_struct(op);
+ return ret;
+}
+
+/* read */
+static ssize_t viotap_read(struct file *file, char *buf, size_t count,
+ loff_t *ptr)
+{
+ HvLpEvent_Rc hvrc;
+ unsigned short flags = file->f_flags;
+ struct op_struct *op = get_op_struct();
+ int noblock = ((flags & O_NONBLOCK) != 0);
+ ssize_t ret;
+ struct viot_devinfo_struct devi;
+
+ if (op == NULL)
+ return -ENOMEM;
+
+ get_dev_info(file->f_path.dentry->d_inode, &devi);
+
+ /*
+ * We need to make sure we can send a request. We use
+ * a semaphore to keep track of # requests in use. If
+ * we are non-blocking, make sure we don't block on the
+ * semaphore
+ */
+ if (noblock) {
+ if (down_trylock(&reqSem)) {
+ ret = -EWOULDBLOCK;
+ goto free_op;
+ }
+ } else
+ down(&reqSem);
+
+ chg_state(devi.devno, VIOT_READING, file);
+
+ /* Allocate a DMA buffer */
+ op->dev = tape_device[devi.devno];
+ op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr,
+ GFP_ATOMIC);
+ if (op->buffer == NULL) {
+ ret = -EFAULT;
+ goto up_sem;
+ }
+
+ op->count = count;
+ init_completion(&op->com);
+
+ hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
+ HvLpEvent_Type_VirtualIo,
+ viomajorsubtype_tape | viotaperead,
+ HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
+ viopath_sourceinst(viopath_hostLp),
+ viopath_targetinst(viopath_hostLp),
+ (u64)(unsigned long)op, VIOVERSION << 16,
+ ((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0);
+ if (hvrc != HvLpEvent_Rc_Good) {
+ printk(VIOTAPE_KERN_WARN "tape hv error on op %d\n",
+ (int)hvrc);
+ ret = -EIO;
+ goto free_dma;
+ }
+
+ wait_for_completion(&op->com);
+
+ if (op->rc)
+ ret = tape_rc_to_errno(op->rc, "read", devi.devno);
+ else {
+ ret = op->count;
+ if (ret && copy_to_user(buf, op->buffer, ret)) {
+ printk(VIOTAPE_KERN_WARN "error on copy_to_user\n");
+ ret = -EFAULT;
+ }
+ }
+
+free_dma:
+ dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr);
+up_sem:
+ up(&reqSem);
+free_op:
+ free_op_struct(op);
+ return ret;
+}
+
+/* ioctl */
+static int viotap_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ HvLpEvent_Rc hvrc;
+ int ret;
+ struct viot_devinfo_struct devi;
+ struct mtop mtc;
+ u32 myOp;
+ struct op_struct *op = get_op_struct();
+
+ if (op == NULL)
+ return -ENOMEM;
+
+ get_dev_info(file->f_path.dentry->d_inode, &devi);
+
+ down(&reqSem);
+
+ ret = -EINVAL;
+
+ switch (cmd) {
+ case MTIOCTOP:
+ ret = -EFAULT;
+ /*
+ * inode is null if and only if we (the kernel)
+ * made the request
+ */
+ if (inode == NULL)
+ memcpy(&mtc, (void *) arg, sizeof(struct mtop));
+ else if (copy_from_user((char *)&mtc, (char *)arg,
+ sizeof(struct mtop)))
+ goto free_op;
+
+ ret = -EIO;
+ switch (mtc.mt_op) {
+ case MTRESET:
+ myOp = VIOTAPOP_RESET;
+ break;
+ case MTFSF:
+ myOp = VIOTAPOP_FSF;
+ break;
+ case MTBSF:
+ myOp = VIOTAPOP_BSF;
+ break;
+ case MTFSR:
+ myOp = VIOTAPOP_FSR;
+ break;
+ case MTBSR:
+ myOp = VIOTAPOP_BSR;
+ break;
+ case MTWEOF:
+ myOp = VIOTAPOP_WEOF;
+ break;
+ case MTREW:
+ myOp = VIOTAPOP_REW;
+ break;
+ case MTNOP:
+ myOp = VIOTAPOP_NOP;
+ break;
+ case MTEOM:
+ myOp = VIOTAPOP_EOM;
+ break;
+ case MTERASE:
+ myOp = VIOTAPOP_ERASE;
+ break;
+ case MTSETBLK:
+ myOp = VIOTAPOP_SETBLK;
+ break;
+ case MTSETDENSITY:
+ myOp = VIOTAPOP_SETDENSITY;
+ break;
+ case MTTELL:
+ myOp = VIOTAPOP_GETPOS;
+ break;
+ case MTSEEK:
+ myOp = VIOTAPOP_SETPOS;
+ break;
+ case MTSETPART:
+ myOp = VIOTAPOP_SETPART;
+ break;
+ case MTOFFL:
+ myOp = VIOTAPOP_UNLOAD;
+ break;
+ default:
+ printk(VIOTAPE_KERN_WARN "MTIOCTOP called "
+ "with invalid op 0x%x\n", mtc.mt_op);
+ goto free_op;
+ }
+
+ /*
+ * if we moved the head, we are no longer
+ * reading or writing
+ */
+ switch (mtc.mt_op) {
+ case MTFSF:
+ case MTBSF:
+ case MTFSR:
+ case MTBSR:
+ case MTTELL:
+ case MTSEEK:
+ case MTREW:
+ chg_state(devi.devno, VIOT_IDLE, file);
+ }
+
+ init_completion(&op->com);
+ hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
+ HvLpEvent_Type_VirtualIo,
+ viomajorsubtype_tape | viotapeop,
+ HvLpEvent_AckInd_DoAck,
+ HvLpEvent_AckType_ImmediateAck,
+ viopath_sourceinst(viopath_hostLp),
+ viopath_targetinst(viopath_hostLp),
+ (u64)(unsigned long)op,
+ VIOVERSION << 16,
+ ((u64)devi.devno << 48), 0,
+ (((u64)myOp) << 32) | mtc.mt_count, 0);
+ if (hvrc != HvLpEvent_Rc_Good) {
+ printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
+ (int)hvrc);
+ goto free_op;
+ }
+ wait_for_completion(&op->com);
+ ret = tape_rc_to_errno(op->rc, "tape operation", devi.devno);
+ goto free_op;
+
+ case MTIOCGET:
+ ret = -EIO;
+ init_completion(&op->com);
+ hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
+ HvLpEvent_Type_VirtualIo,
+ viomajorsubtype_tape | viotapegetstatus,
+ HvLpEvent_AckInd_DoAck,
+ HvLpEvent_AckType_ImmediateAck,
+ viopath_sourceinst(viopath_hostLp),
+ viopath_targetinst(viopath_hostLp),
+ (u64)(unsigned long)op, VIOVERSION << 16,
+ ((u64)devi.devno << 48), 0, 0, 0);
+ if (hvrc != HvLpEvent_Rc_Good) {
+ printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
+ (int)hvrc);
+ goto free_op;
+ }
+ wait_for_completion(&op->com);
+
+ /* Operation is complete - grab the error code */
+ ret = tape_rc_to_errno(op->rc, "get status", devi.devno);
+ free_op_struct(op);
+ up(&reqSem);
+
+ if ((ret == 0) && copy_to_user((void *)arg,
+ &viomtget[devi.devno],
+ sizeof(viomtget[0])))
+ ret = -EFAULT;
+ return ret;
+ case MTIOCPOS:
+ printk(VIOTAPE_KERN_WARN "Got an (unsupported) MTIOCPOS\n");
+ break;
+ default:
+ printk(VIOTAPE_KERN_WARN "got an unsupported ioctl 0x%0x\n",
+ cmd);
+ break;
+ }
+
+free_op:
+ free_op_struct(op);
+ up(&reqSem);
+ return ret;
+}
+
+static long viotap_unlocked_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long rc;
+
+ mutex_lock(&proc_viotape_mutex);
+ rc = viotap_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
+ mutex_unlock(&proc_viotape_mutex);
+ return rc;
+}
+
+static int viotap_open(struct inode *inode, struct file *file)
+{
+ HvLpEvent_Rc hvrc;
+ struct viot_devinfo_struct devi;
+ int ret;
+ struct op_struct *op = get_op_struct();
+
+ if (op == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&proc_viotape_mutex);
+ get_dev_info(file->f_path.dentry->d_inode, &devi);
+
+ /* Note: We currently only support one mode! */
+ if ((devi.devno >= viotape_numdev) || (devi.mode)) {
+ ret = -ENODEV;
+ goto free_op;
+ }
+
+ init_completion(&op->com);
+
+ hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
+ HvLpEvent_Type_VirtualIo,
+ viomajorsubtype_tape | viotapeopen,
+ HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
+ viopath_sourceinst(viopath_hostLp),
+ viopath_targetinst(viopath_hostLp),
+ (u64)(unsigned long)op, VIOVERSION << 16,
+ ((u64)devi.devno << 48), 0, 0, 0);
+ if (hvrc != 0) {
+ printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n",
+ (int) hvrc);
+ ret = -EIO;
+ goto free_op;
+ }
+
+ wait_for_completion(&op->com);
+ ret = tape_rc_to_errno(op->rc, "open", devi.devno);
+
+free_op:
+ free_op_struct(op);
+ mutex_unlock(&proc_viotape_mutex);
+ return ret;
+}
+
+
+static int viotap_release(struct inode *inode, struct file *file)
+{
+ HvLpEvent_Rc hvrc;
+ struct viot_devinfo_struct devi;
+ int ret = 0;
+ struct op_struct *op = get_op_struct();
+
+ if (op == NULL)
+ return -ENOMEM;
+ init_completion(&op->com);
+
+ get_dev_info(file->f_path.dentry->d_inode, &devi);
+
+ if (devi.devno >= viotape_numdev) {
+ ret = -ENODEV;
+ goto free_op;
+ }
+
+ chg_state(devi.devno, VIOT_IDLE, file);
+
+ if (devi.rewind) {
+ hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
+ HvLpEvent_Type_VirtualIo,
+ viomajorsubtype_tape | viotapeop,
+ HvLpEvent_AckInd_DoAck,
+ HvLpEvent_AckType_ImmediateAck,
+ viopath_sourceinst(viopath_hostLp),
+ viopath_targetinst(viopath_hostLp),
+ (u64)(unsigned long)op, VIOVERSION << 16,
+ ((u64)devi.devno << 48), 0,
+ ((u64)VIOTAPOP_REW) << 32, 0);
+ wait_for_completion(&op->com);
+
+ tape_rc_to_errno(op->rc, "rewind", devi.devno);
+ }
+
+ hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
+ HvLpEvent_Type_VirtualIo,
+ viomajorsubtype_tape | viotapeclose,
+ HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
+ viopath_sourceinst(viopath_hostLp),
+ viopath_targetinst(viopath_hostLp),
+ (u64)(unsigned long)op, VIOVERSION << 16,
+ ((u64)devi.devno << 48), 0, 0, 0);
+ if (hvrc != 0) {
+ printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n",
+ (int) hvrc);
+ ret = -EIO;
+ goto free_op;
+ }
+
+ wait_for_completion(&op->com);
+
+ if (op->rc)
+ printk(VIOTAPE_KERN_WARN "close failed\n");
+
+free_op:
+ free_op_struct(op);
+ return ret;
+}
+
+const struct file_operations viotap_fops = {
+ .owner = THIS_MODULE,
+ .read = viotap_read,
+ .write = viotap_write,
+ .unlocked_ioctl = viotap_unlocked_ioctl,
+ .open = viotap_open,
+ .release = viotap_release,
+ .llseek = noop_llseek,
+};
+
+/* Handle interrupt events for tape */
+static void vioHandleTapeEvent(struct HvLpEvent *event)
+{
+ int tapeminor;
+ struct op_struct *op;
+ struct viotapelpevent *tevent = (struct viotapelpevent *)event;
+
+ if (event == NULL) {
+ /* Notification that a partition went away! */
+ if (!viopath_isactive(viopath_hostLp)) {
+ /* TODO! Clean up */
+ }
+ return;
+ }
+
+ tapeminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK;
+ op = (struct op_struct *)event->xCorrelationToken;
+ switch (tapeminor) {
+ case viotapeopen:
+ case viotapeclose:
+ op->rc = tevent->sub_type_result;
+ complete(&op->com);
+ break;
+ case viotaperead:
+ op->rc = tevent->sub_type_result;
+ op->count = tevent->len;
+ complete(&op->com);
+ break;
+ case viotapewrite:
+ if (op->non_blocking) {
+ dma_free_coherent(op->dev, op->count,
+ op->buffer, op->dmaaddr);
+ free_op_struct(op);
+ up(&reqSem);
+ } else {
+ op->rc = tevent->sub_type_result;
+ op->count = tevent->len;
+ complete(&op->com);
+ }
+ break;
+ case viotapeop:
+ case viotapegetpos:
+ case viotapesetpos:
+ case viotapegetstatus:
+ if (op) {
+ op->count = tevent->u.op.count;
+ op->rc = tevent->sub_type_result;
+ if (!op->non_blocking)
+ complete(&op->com);
+ }
+ break;
+ default:
+ printk(VIOTAPE_KERN_WARN "weird ack\n");
+ }
+}
+
+static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+{
+ int i = vdev->unit_address;
+ int j;
+ struct device_node *node = vdev->dev.of_node;
+
+ if (i >= VIOTAPE_MAX_TAPE)
+ return -ENODEV;
+ if (!node)
+ return -ENODEV;
+
+ if (i >= viotape_numdev)
+ viotape_numdev = i + 1;
+
+ tape_device[i] = &vdev->dev;
+ viotape_unitinfo[i].rsrcname = of_get_property(node,
+ "linux,vio_rsrcname", NULL);
+ viotape_unitinfo[i].type = of_get_property(node, "linux,vio_type",
+ NULL);
+ viotape_unitinfo[i].model = of_get_property(node, "linux,vio_model",
+ NULL);
+
+ state[i].cur_part = 0;
+ for (j = 0; j < MAX_PARTITIONS; ++j)
+ state[i].part_stat_rwi[j] = VIOT_IDLE;
+ device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i), NULL,
+ "iseries!vt%d", i);
+ device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80), NULL,
+ "iseries!nvt%d", i);
+ printk(VIOTAPE_KERN_INFO "tape iseries/vt%d is iSeries "
+ "resource %10.10s type %4.4s, model %3.3s\n",
+ i, viotape_unitinfo[i].rsrcname,
+ viotape_unitinfo[i].type, viotape_unitinfo[i].model);
+ return 0;
+}
+
+static int viotape_remove(struct vio_dev *vdev)
+{
+ int i = vdev->unit_address;
+
+ device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i | 0x80));
+ device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i));
+ return 0;
+}
+
+/**
+ * viotape_device_table: Used by vio.c to match devices that we
+ * support.
+ */
+static struct vio_device_id viotape_device_table[] __devinitdata = {
+ { "byte", "IBM,iSeries-viotape" },
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, viotape_device_table);
+
+static struct vio_driver viotape_driver = {
+ .id_table = viotape_device_table,
+ .probe = viotape_probe,
+ .remove = viotape_remove,
+ .driver = {
+ .name = "viotape",
+ .owner = THIS_MODULE,
+ }
+};
+
+
+int __init viotap_init(void)
+{
+ int ret;
+
+ if (!firmware_has_feature(FW_FEATURE_ISERIES))
+ return -ENODEV;
+
+ op_struct_list = NULL;
+ if ((ret = add_op_structs(VIOTAPE_MAXREQ)) < 0) {
+ printk(VIOTAPE_KERN_WARN "couldn't allocate op structs\n");
+ return ret;
+ }
+ spin_lock_init(&op_struct_list_lock);
+
+ sema_init(&reqSem, VIOTAPE_MAXREQ);
+
+ if (viopath_hostLp == HvLpIndexInvalid) {
+ vio_set_hostlp();
+ if (viopath_hostLp == HvLpIndexInvalid) {
+ ret = -ENODEV;
+ goto clear_op;
+ }
+ }
+
+ ret = viopath_open(viopath_hostLp, viomajorsubtype_tape,
+ VIOTAPE_MAXREQ + 2);
+ if (ret) {
+ printk(VIOTAPE_KERN_WARN
+ "error on viopath_open to hostlp %d\n", ret);
+ ret = -EIO;
+ goto clear_op;
+ }
+
+ printk(VIOTAPE_KERN_INFO "vers " VIOTAPE_VERSION
+ ", hosting partition %d\n", viopath_hostLp);
+
+ vio_setHandler(viomajorsubtype_tape, vioHandleTapeEvent);
+
+ ret = register_chrdev(VIOTAPE_MAJOR, "viotape", &viotap_fops);
+ if (ret < 0) {
+ printk(VIOTAPE_KERN_WARN "Error registering viotape device\n");
+ goto clear_handler;
+ }
+
+ tape_class = class_create(THIS_MODULE, "tape");
+ if (IS_ERR(tape_class)) {
+ printk(VIOTAPE_KERN_WARN "Unable to allocat class\n");
+ ret = PTR_ERR(tape_class);
+ goto unreg_chrdev;
+ }
+
+ ret = vio_register_driver(&viotape_driver);
+ if (ret)
+ goto unreg_class;
+
+ proc_create("iSeries/viotape", S_IFREG|S_IRUGO, NULL,
+ &proc_viotape_operations);
+
+ return 0;
+
+unreg_class:
+ class_destroy(tape_class);
+unreg_chrdev:
+ unregister_chrdev(VIOTAPE_MAJOR, "viotape");
+clear_handler:
+ vio_clearHandler(viomajorsubtype_tape);
+ viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2);
+clear_op:
+ clear_op_struct_pool();
+ return ret;
+}
+
+/* Give a new state to the tape object */
+static int chg_state(int index, unsigned char new_state, struct file *file)
+{
+ unsigned char *cur_state =
+ &state[index].part_stat_rwi[state[index].cur_part];
+ int rc = 0;
+
+ /* if the same state, don't bother */
+ if (*cur_state == new_state)
+ return 0;
+
+ /* write an EOF if changing from writing to some other state */
+ if (*cur_state == VIOT_WRITING) {
+ struct mtop write_eof = { MTWEOF, 1 };
+
+ rc = viotap_ioctl(NULL, file, MTIOCTOP,
+ (unsigned long)&write_eof);
+ }
+ *cur_state = new_state;
+ return rc;
+}
+
+/* Cleanup */
+static void __exit viotap_exit(void)
+{
+ remove_proc_entry("iSeries/viotape", NULL);
+ vio_unregister_driver(&viotape_driver);
+ class_destroy(tape_class);
+ unregister_chrdev(VIOTAPE_MAJOR, "viotape");
+ viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2);
+ vio_clearHandler(viomajorsubtype_tape);
+ clear_op_struct_pool();
+}
+
+MODULE_LICENSE("GPL");
+module_init(viotap_init);
+module_exit(viotap_exit);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
new file mode 100644
index 00000000..fb68b129
--- /dev/null
+++ b/drivers/char/virtio_console.c
@@ -0,0 +1,1842 @@
+/*
+ * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
+ * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
+ * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/virtio.h>
+#include <linux/virtio_console.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include "../tty/hvc/hvc_console.h"
+
+/*
+ * This is a global struct for storing common data for all the devices
+ * this driver handles.
+ *
+ * Mainly, it has a linked list for all the consoles in one place so
+ * that callbacks from hvc for get_chars(), put_chars() work properly
+ * across multiple devices and multiple ports per device.
+ */
+struct ports_driver_data {
+ /* Used for registering chardevs */
+ struct class *class;
+
+ /* Used for exporting per-port information to debugfs */
+ struct dentry *debugfs_dir;
+
+ /* List of all the devices we're handling */
+ struct list_head portdevs;
+
+ /* Number of devices this driver is handling */
+ unsigned int index;
+
+ /*
+ * This is used to keep track of the number of hvc consoles
+ * spawned by this driver. This number is given as the first
+ * argument to hvc_alloc(). To correctly map an initial
+ * console spawned via hvc_instantiate to the console being
+ * hooked up via hvc_alloc, we need to pass the same vtermno.
+ *
+ * We also just assume the first console being initialised was
+ * the first one that got used as the initial console.
+ */
+ unsigned int next_vtermno;
+
+ /* All the console devices handled by this driver */
+ struct list_head consoles;
+};
+static struct ports_driver_data pdrvdata;
+
+DEFINE_SPINLOCK(pdrvdata_lock);
+
+/* This struct holds information that's relevant only for console ports */
+struct console {
+ /* We'll place all consoles in a list in the pdrvdata struct */
+ struct list_head list;
+
+ /* The hvc device associated with this console port */
+ struct hvc_struct *hvc;
+
+ /* The size of the console */
+ struct winsize ws;
+
+ /*
+ * This number identifies the number that we used to register
+ * with hvc in hvc_instantiate() and hvc_alloc(); this is the
+ * number passed on by the hvc callbacks to us to
+ * differentiate between the other console ports handled by
+ * this driver
+ */
+ u32 vtermno;
+};
+
+struct port_buffer {
+ char *buf;
+
+ /* size of the buffer in *buf above */
+ size_t size;
+
+ /* used length of the buffer */
+ size_t len;
+ /* offset in the buf from which to consume data */
+ size_t offset;
+};
+
+/*
+ * This is a per-device struct that stores data common to all the
+ * ports for that device (vdev->priv).
+ */
+struct ports_device {
+ /* Next portdev in the list, head is in the pdrvdata struct */
+ struct list_head list;
+
+ /*
+ * Workqueue handlers where we process deferred work after
+ * notification
+ */
+ struct work_struct control_work;
+
+ struct list_head ports;
+
+ /* To protect the list of ports */
+ spinlock_t ports_lock;
+
+ /* To protect the vq operations for the control channel */
+ spinlock_t cvq_lock;
+
+ /* The current config space is stored here */
+ struct virtio_console_config config;
+
+ /* The virtio device we're associated with */
+ struct virtio_device *vdev;
+
+ /*
+ * A couple of virtqueues for the control channel: one for
+ * guest->host transfers, one for host->guest transfers
+ */
+ struct virtqueue *c_ivq, *c_ovq;
+
+ /* Array of per-port IO virtqueues */
+ struct virtqueue **in_vqs, **out_vqs;
+
+ /* Used for numbering devices for sysfs and debugfs */
+ unsigned int drv_index;
+
+ /* Major number for this device. Ports will be created as minors. */
+ int chr_major;
+};
+
+/* This struct holds the per-port data */
+struct port {
+ /* Next port in the list, head is in the ports_device */
+ struct list_head list;
+
+ /* Pointer to the parent virtio_console device */
+ struct ports_device *portdev;
+
+ /* The current buffer from which data has to be fed to readers */
+ struct port_buffer *inbuf;
+
+ /*
+ * To protect the operations on the in_vq associated with this
+ * port. Has to be a spinlock because it can be called from
+ * interrupt context (get_char()).
+ */
+ spinlock_t inbuf_lock;
+
+ /* Protect the operations on the out_vq. */
+ spinlock_t outvq_lock;
+
+ /* The IO vqs for this port */
+ struct virtqueue *in_vq, *out_vq;
+
+ /* File in the debugfs directory that exposes this port's information */
+ struct dentry *debugfs_file;
+
+ /*
+ * The entries in this struct will be valid if this port is
+ * hooked up to an hvc console
+ */
+ struct console cons;
+
+ /* Each port associates with a separate char device */
+ struct cdev *cdev;
+ struct device *dev;
+
+ /* Reference-counting to handle port hot-unplugs and file operations */
+ struct kref kref;
+
+ /* A waitqueue for poll() or blocking read operations */
+ wait_queue_head_t waitqueue;
+
+ /* The 'name' of the port that we expose via sysfs properties */
+ char *name;
+
+ /* We can notify apps of host connect / disconnect events via SIGIO */
+ struct fasync_struct *async_queue;
+
+ /* The 'id' to identify the port with the Host */
+ u32 id;
+
+ bool outvq_full;
+
+ /* Is the host device open */
+ bool host_connected;
+
+ /* We should allow only one process to open a port */
+ bool guest_connected;
+};
+
+/* This is the very early arch-specified put chars function. */
+static int (*early_put_chars)(u32, const char *, int);
+
+static struct port *find_port_by_vtermno(u32 vtermno)
+{
+ struct port *port;
+ struct console *cons;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdrvdata_lock, flags);
+ list_for_each_entry(cons, &pdrvdata.consoles, list) {
+ if (cons->vtermno == vtermno) {
+ port = container_of(cons, struct port, cons);
+ goto out;
+ }
+ }
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&pdrvdata_lock, flags);
+ return port;
+}
+
+static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
+ dev_t dev)
+{
+ struct port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&portdev->ports_lock, flags);
+ list_for_each_entry(port, &portdev->ports, list)
+ if (port->cdev->dev == dev)
+ goto out;
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&portdev->ports_lock, flags);
+
+ return port;
+}
+
+static struct port *find_port_by_devt(dev_t dev)
+{
+ struct ports_device *portdev;
+ struct port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdrvdata_lock, flags);
+ list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
+ port = find_port_by_devt_in_portdev(portdev, dev);
+ if (port)
+ goto out;
+ }
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&pdrvdata_lock, flags);
+ return port;
+}
+
+static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
+{
+ struct port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&portdev->ports_lock, flags);
+ list_for_each_entry(port, &portdev->ports, list)
+ if (port->id == id)
+ goto out;
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&portdev->ports_lock, flags);
+
+ return port;
+}
+
+static struct port *find_port_by_vq(struct ports_device *portdev,
+ struct virtqueue *vq)
+{
+ struct port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&portdev->ports_lock, flags);
+ list_for_each_entry(port, &portdev->ports, list)
+ if (port->in_vq == vq || port->out_vq == vq)
+ goto out;
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&portdev->ports_lock, flags);
+ return port;
+}
+
+static bool is_console_port(struct port *port)
+{
+ if (port->cons.hvc)
+ return true;
+ return false;
+}
+
+static inline bool use_multiport(struct ports_device *portdev)
+{
+ /*
+ * This condition can be true when put_chars is called from
+ * early_init
+ */
+ if (!portdev->vdev)
+ return 0;
+ return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
+}
+
+static void free_buf(struct port_buffer *buf)
+{
+ kfree(buf->buf);
+ kfree(buf);
+}
+
+static struct port_buffer *alloc_buf(size_t buf_size)
+{
+ struct port_buffer *buf;
+
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ goto fail;
+ buf->buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf->buf)
+ goto free_buf;
+ buf->len = 0;
+ buf->offset = 0;
+ buf->size = buf_size;
+ return buf;
+
+free_buf:
+ kfree(buf);
+fail:
+ return NULL;
+}
+
+/* Callers should take appropriate locks */
+static void *get_inbuf(struct port *port)
+{
+ struct port_buffer *buf;
+ struct virtqueue *vq;
+ unsigned int len;
+
+ vq = port->in_vq;
+ buf = virtqueue_get_buf(vq, &len);
+ if (buf) {
+ buf->len = len;
+ buf->offset = 0;
+ }
+ return buf;
+}
+
+/*
+ * Create a scatter-gather list representing our input buffer and put
+ * it in the queue.
+ *
+ * Callers should take appropriate locks.
+ */
+static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
+{
+ struct scatterlist sg[1];
+ int ret;
+
+ sg_init_one(sg, buf->buf, buf->size);
+
+ ret = virtqueue_add_buf(vq, sg, 0, 1, buf);
+ virtqueue_kick(vq);
+ return ret;
+}
+
+/* Discard any unread data this port has. Callers lockers. */
+static void discard_port_data(struct port *port)
+{
+ struct port_buffer *buf;
+ struct virtqueue *vq;
+ unsigned int len;
+ int ret;
+
+ if (!port->portdev) {
+ /* Device has been unplugged. vqs are already gone. */
+ return;
+ }
+ vq = port->in_vq;
+ if (port->inbuf)
+ buf = port->inbuf;
+ else
+ buf = virtqueue_get_buf(vq, &len);
+
+ ret = 0;
+ while (buf) {
+ if (add_inbuf(vq, buf) < 0) {
+ ret++;
+ free_buf(buf);
+ }
+ buf = virtqueue_get_buf(vq, &len);
+ }
+ port->inbuf = NULL;
+ if (ret)
+ dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
+ ret);
+}
+
+static bool port_has_data(struct port *port)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&port->inbuf_lock, flags);
+ if (port->inbuf) {
+ ret = true;
+ goto out;
+ }
+ port->inbuf = get_inbuf(port);
+ if (port->inbuf) {
+ ret = true;
+ goto out;
+ }
+ ret = false;
+out:
+ spin_unlock_irqrestore(&port->inbuf_lock, flags);
+ return ret;
+}
+
+static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
+ unsigned int event, unsigned int value)
+{
+ struct scatterlist sg[1];
+ struct virtio_console_control cpkt;
+ struct virtqueue *vq;
+ unsigned int len;
+
+ if (!use_multiport(portdev))
+ return 0;
+
+ cpkt.id = port_id;
+ cpkt.event = event;
+ cpkt.value = value;
+
+ vq = portdev->c_ovq;
+
+ sg_init_one(sg, &cpkt, sizeof(cpkt));
+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
+ virtqueue_kick(vq);
+ while (!virtqueue_get_buf(vq, &len))
+ cpu_relax();
+ }
+ return 0;
+}
+
+static ssize_t send_control_msg(struct port *port, unsigned int event,
+ unsigned int value)
+{
+ /* Did the port get unplugged before userspace closed it? */
+ if (port->portdev)
+ return __send_control_msg(port->portdev, port->id, event, value);
+ return 0;
+}
+
+/* Callers must take the port->outvq_lock */
+static void reclaim_consumed_buffers(struct port *port)
+{
+ void *buf;
+ unsigned int len;
+
+ if (!port->portdev) {
+ /* Device has been unplugged. vqs are already gone. */
+ return;
+ }
+ while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
+ kfree(buf);
+ port->outvq_full = false;
+ }
+}
+
+static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
+ bool nonblock)
+{
+ struct scatterlist sg[1];
+ struct virtqueue *out_vq;
+ ssize_t ret;
+ unsigned long flags;
+ unsigned int len;
+
+ out_vq = port->out_vq;
+
+ spin_lock_irqsave(&port->outvq_lock, flags);
+
+ reclaim_consumed_buffers(port);
+
+ sg_init_one(sg, in_buf, in_count);
+ ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf);
+
+ /* Tell Host to go! */
+ virtqueue_kick(out_vq);
+
+ if (ret < 0) {
+ in_count = 0;
+ goto done;
+ }
+
+ if (ret == 0)
+ port->outvq_full = true;
+
+ if (nonblock)
+ goto done;
+
+ /*
+ * Wait till the host acknowledges it pushed out the data we
+ * sent. This is done for data from the hvc_console; the tty
+ * operations are performed with spinlocks held so we can't
+ * sleep here. An alternative would be to copy the data to a
+ * buffer and relax the spinning requirement. The downside is
+ * we need to kmalloc a GFP_ATOMIC buffer each time the
+ * console driver writes something out.
+ */
+ while (!virtqueue_get_buf(out_vq, &len))
+ cpu_relax();
+done:
+ spin_unlock_irqrestore(&port->outvq_lock, flags);
+ /*
+ * We're expected to return the amount of data we wrote -- all
+ * of it
+ */
+ return in_count;
+}
+
+/*
+ * Give out the data that's requested from the buffer that we have
+ * queued up.
+ */
+static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
+ bool to_user)
+{
+ struct port_buffer *buf;
+ unsigned long flags;
+
+ if (!out_count || !port_has_data(port))
+ return 0;
+
+ buf = port->inbuf;
+ out_count = min(out_count, buf->len - buf->offset);
+
+ if (to_user) {
+ ssize_t ret;
+
+ ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
+ if (ret)
+ return -EFAULT;
+ } else {
+ memcpy(out_buf, buf->buf + buf->offset, out_count);
+ }
+
+ buf->offset += out_count;
+
+ if (buf->offset == buf->len) {
+ /*
+ * We're done using all the data in this buffer.
+ * Re-queue so that the Host can send us more data.
+ */
+ spin_lock_irqsave(&port->inbuf_lock, flags);
+ port->inbuf = NULL;
+
+ if (add_inbuf(port->in_vq, buf) < 0)
+ dev_warn(port->dev, "failed add_buf\n");
+
+ spin_unlock_irqrestore(&port->inbuf_lock, flags);
+ }
+ /* Return the number of bytes actually copied */
+ return out_count;
+}
+
+/* The condition that must be true for polling to end */
+static bool will_read_block(struct port *port)
+{
+ if (!port->guest_connected) {
+ /* Port got hot-unplugged. Let's exit. */
+ return false;
+ }
+ return !port_has_data(port) && port->host_connected;
+}
+
+static bool will_write_block(struct port *port)
+{
+ bool ret;
+
+ if (!port->guest_connected) {
+ /* Port got hot-unplugged. Let's exit. */
+ return false;
+ }
+ if (!port->host_connected)
+ return true;
+
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * Check if the Host has consumed any buffers since we last
+ * sent data (this is only applicable for nonblocking ports).
+ */
+ reclaim_consumed_buffers(port);
+ ret = port->outvq_full;
+ spin_unlock_irq(&port->outvq_lock);
+
+ return ret;
+}
+
+static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct port *port;
+ ssize_t ret;
+
+ port = filp->private_data;
+
+ if (!port_has_data(port)) {
+ /*
+ * If nothing's connected on the host just return 0 in
+ * case of list_empty; this tells the userspace app
+ * that there's no connection
+ */
+ if (!port->host_connected)
+ return 0;
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(port->waitqueue,
+ !will_read_block(port));
+ if (ret < 0)
+ return ret;
+ }
+ /* Port got hot-unplugged. */
+ if (!port->guest_connected)
+ return -ENODEV;
+ /*
+ * We could've received a disconnection message while we were
+ * waiting for more data.
+ *
+ * This check is not clubbed in the if() statement above as we
+ * might receive some data as well as the host could get
+ * disconnected after we got woken up from our wait. So we
+ * really want to give off whatever data we have and only then
+ * check for host_connected.
+ */
+ if (!port_has_data(port) && !port->host_connected)
+ return 0;
+
+ return fill_readbuf(port, ubuf, count, true);
+}
+
+static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct port *port;
+ char *buf;
+ ssize_t ret;
+ bool nonblock;
+
+ /* Userspace could be out to fool us */
+ if (!count)
+ return 0;
+
+ port = filp->private_data;
+
+ nonblock = filp->f_flags & O_NONBLOCK;
+
+ if (will_write_block(port)) {
+ if (nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(port->waitqueue,
+ !will_write_block(port));
+ if (ret < 0)
+ return ret;
+ }
+ /* Port got hot-unplugged. */
+ if (!port->guest_connected)
+ return -ENODEV;
+
+ count = min((size_t)(32 * 1024), count);
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = copy_from_user(buf, ubuf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ /*
+ * We now ask send_buf() to not spin for generic ports -- we
+ * can re-use the same code path that non-blocking file
+ * descriptors take for blocking file descriptors since the
+ * wait is already done and we're certain the write will go
+ * through to the host.
+ */
+ nonblock = true;
+ ret = send_buf(port, buf, count, nonblock);
+
+ if (nonblock && ret > 0)
+ goto out;
+
+free_buf:
+ kfree(buf);
+out:
+ return ret;
+}
+
+static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
+{
+ struct port *port;
+ unsigned int ret;
+
+ port = filp->private_data;
+ poll_wait(filp, &port->waitqueue, wait);
+
+ if (!port->guest_connected) {
+ /* Port got unplugged */
+ return POLLHUP;
+ }
+ ret = 0;
+ if (!will_read_block(port))
+ ret |= POLLIN | POLLRDNORM;
+ if (!will_write_block(port))
+ ret |= POLLOUT;
+ if (!port->host_connected)
+ ret |= POLLHUP;
+
+ return ret;
+}
+
+static void remove_port(struct kref *kref);
+
+static int port_fops_release(struct inode *inode, struct file *filp)
+{
+ struct port *port;
+
+ port = filp->private_data;
+
+ /* Notify host of port being closed */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
+
+ spin_lock_irq(&port->inbuf_lock);
+ port->guest_connected = false;
+
+ discard_port_data(port);
+
+ spin_unlock_irq(&port->inbuf_lock);
+
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
+ /*
+ * Locks aren't necessary here as a port can't be opened after
+ * unplug, and if a port isn't unplugged, a kref would already
+ * exist for the port. Plus, taking ports_lock here would
+ * create a dependency on other locks taken by functions
+ * inside remove_port if we're the last holder of the port,
+ * creating many problems.
+ */
+ kref_put(&port->kref, remove_port);
+
+ return 0;
+}
+
+static int port_fops_open(struct inode *inode, struct file *filp)
+{
+ struct cdev *cdev = inode->i_cdev;
+ struct port *port;
+ int ret;
+
+ port = find_port_by_devt(cdev->dev);
+ filp->private_data = port;
+
+ /* Prevent against a port getting hot-unplugged at the same time */
+ spin_lock_irq(&port->portdev->ports_lock);
+ kref_get(&port->kref);
+ spin_unlock_irq(&port->portdev->ports_lock);
+
+ /*
+ * Don't allow opening of console port devices -- that's done
+ * via /dev/hvc
+ */
+ if (is_console_port(port)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /* Allow only one process to open a particular port at a time */
+ spin_lock_irq(&port->inbuf_lock);
+ if (port->guest_connected) {
+ spin_unlock_irq(&port->inbuf_lock);
+ ret = -EMFILE;
+ goto out;
+ }
+
+ port->guest_connected = true;
+ spin_unlock_irq(&port->inbuf_lock);
+
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * There might be a chance that we missed reclaiming a few
+ * buffers in the window of the port getting previously closed
+ * and opening now.
+ */
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
+ nonseekable_open(inode, filp);
+
+ /* Notify host of port being opened */
+ send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
+
+ return 0;
+out:
+ kref_put(&port->kref, remove_port);
+ return ret;
+}
+
+static int port_fops_fasync(int fd, struct file *filp, int mode)
+{
+ struct port *port;
+
+ port = filp->private_data;
+ return fasync_helper(fd, filp, mode, &port->async_queue);
+}
+
+/*
+ * The file operations that we support: programs in the guest can open
+ * a console device, read from it, write to it, poll for data and
+ * close it. The devices are at
+ * /dev/vport<device number>p<port number>
+ */
+static const struct file_operations port_fops = {
+ .owner = THIS_MODULE,
+ .open = port_fops_open,
+ .read = port_fops_read,
+ .write = port_fops_write,
+ .poll = port_fops_poll,
+ .release = port_fops_release,
+ .fasync = port_fops_fasync,
+ .llseek = no_llseek,
+};
+
+/*
+ * The put_chars() callback is pretty straightforward.
+ *
+ * We turn the characters into a scatter-gather list, add it to the
+ * output queue and then kick the Host. Then we sit here waiting for
+ * it to finish: inefficient in theory, but in practice
+ * implementations will do it immediately (lguest's Launcher does).
+ */
+static int put_chars(u32 vtermno, const char *buf, int count)
+{
+ struct port *port;
+
+ if (unlikely(early_put_chars))
+ return early_put_chars(vtermno, buf, count);
+
+ port = find_port_by_vtermno(vtermno);
+ if (!port)
+ return -EPIPE;
+
+ return send_buf(port, (void *)buf, count, false);
+}
+
+/*
+ * get_chars() is the callback from the hvc_console infrastructure
+ * when an interrupt is received.
+ *
+ * We call out to fill_readbuf that gets us the required data from the
+ * buffers that are queued up.
+ */
+static int get_chars(u32 vtermno, char *buf, int count)
+{
+ struct port *port;
+
+ /* If we've not set up the port yet, we have no input to give. */
+ if (unlikely(early_put_chars))
+ return 0;
+
+ port = find_port_by_vtermno(vtermno);
+ if (!port)
+ return -EPIPE;
+
+ /* If we don't have an input queue yet, we can't get input. */
+ BUG_ON(!port->in_vq);
+
+ return fill_readbuf(port, buf, count, false);
+}
+
+static void resize_console(struct port *port)
+{
+ struct virtio_device *vdev;
+
+ /* The port could have been hot-unplugged */
+ if (!port || !is_console_port(port))
+ return;
+
+ vdev = port->portdev->vdev;
+ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
+ hvc_resize(port->cons.hvc, port->cons.ws);
+}
+
+/* We set the configuration at this point, since we now have a tty */
+static int notifier_add_vio(struct hvc_struct *hp, int data)
+{
+ struct port *port;
+
+ port = find_port_by_vtermno(hp->vtermno);
+ if (!port)
+ return -EINVAL;
+
+ hp->irq_requested = 1;
+ resize_console(port);
+
+ return 0;
+}
+
+static void notifier_del_vio(struct hvc_struct *hp, int data)
+{
+ hp->irq_requested = 0;
+}
+
+/* The operations for console ports. */
+static const struct hv_ops hv_ops = {
+ .get_chars = get_chars,
+ .put_chars = put_chars,
+ .notifier_add = notifier_add_vio,
+ .notifier_del = notifier_del_vio,
+ .notifier_hangup = notifier_del_vio,
+};
+
+/*
+ * Console drivers are initialized very early so boot messages can go
+ * out, so we do things slightly differently from the generic virtio
+ * initialization of the net and block drivers.
+ *
+ * At this stage, the console is output-only. It's too early to set
+ * up a virtqueue, so we let the drivers do some boutique early-output
+ * thing.
+ */
+int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
+{
+ early_put_chars = put_chars;
+ return hvc_instantiate(0, 0, &hv_ops);
+}
+
+int init_port_console(struct port *port)
+{
+ int ret;
+
+ /*
+ * The Host's telling us this port is a console port. Hook it
+ * up with an hvc console.
+ *
+ * To set up and manage our virtual console, we call
+ * hvc_alloc().
+ *
+ * The first argument of hvc_alloc() is the virtual console
+ * number. The second argument is the parameter for the
+ * notification mechanism (like irq number). We currently
+ * leave this as zero, virtqueues have implicit notifications.
+ *
+ * The third argument is a "struct hv_ops" containing the
+ * put_chars() get_chars(), notifier_add() and notifier_del()
+ * pointers. The final argument is the output buffer size: we
+ * can do any size, so we put PAGE_SIZE here.
+ */
+ port->cons.vtermno = pdrvdata.next_vtermno;
+
+ port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
+ if (IS_ERR(port->cons.hvc)) {
+ ret = PTR_ERR(port->cons.hvc);
+ dev_err(port->dev,
+ "error %d allocating hvc for port\n", ret);
+ port->cons.hvc = NULL;
+ return ret;
+ }
+ spin_lock_irq(&pdrvdata_lock);
+ pdrvdata.next_vtermno++;
+ list_add_tail(&port->cons.list, &pdrvdata.consoles);
+ spin_unlock_irq(&pdrvdata_lock);
+ port->guest_connected = true;
+
+ /*
+ * Start using the new console output if this is the first
+ * console to come up.
+ */
+ if (early_put_chars)
+ early_put_chars = NULL;
+
+ /* Notify host of port being opened */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
+
+ return 0;
+}
+
+static ssize_t show_port_name(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct port *port;
+
+ port = dev_get_drvdata(dev);
+
+ return sprintf(buffer, "%s\n", port->name);
+}
+
+static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL);
+
+static struct attribute *port_sysfs_entries[] = {
+ &dev_attr_name.attr,
+ NULL
+};
+
+static struct attribute_group port_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = port_sysfs_entries,
+};
+
+static int debugfs_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct port *port;
+ char *buf;
+ ssize_t ret, out_offset, out_count;
+
+ out_count = 1024;
+ buf = kmalloc(out_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ port = filp->private_data;
+ out_offset = 0;
+ out_offset += snprintf(buf + out_offset, out_count,
+ "name: %s\n", port->name ? port->name : "");
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "guest_connected: %d\n", port->guest_connected);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "host_connected: %d\n", port->host_connected);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "outvq_full: %d\n", port->outvq_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "is_console: %s\n",
+ is_console_port(port) ? "yes" : "no");
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "console_vtermno: %u\n", port->cons.vtermno);
+
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations port_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_open,
+ .read = debugfs_read,
+};
+
+static void set_console_size(struct port *port, u16 rows, u16 cols)
+{
+ if (!port || !is_console_port(port))
+ return;
+
+ port->cons.ws.ws_row = rows;
+ port->cons.ws.ws_col = cols;
+}
+
+static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+{
+ struct port_buffer *buf;
+ unsigned int nr_added_bufs;
+ int ret;
+
+ nr_added_bufs = 0;
+ do {
+ buf = alloc_buf(PAGE_SIZE);
+ if (!buf)
+ break;
+
+ spin_lock_irq(lock);
+ ret = add_inbuf(vq, buf);
+ if (ret < 0) {
+ spin_unlock_irq(lock);
+ free_buf(buf);
+ break;
+ }
+ nr_added_bufs++;
+ spin_unlock_irq(lock);
+ } while (ret > 0);
+
+ return nr_added_bufs;
+}
+
+static void send_sigio_to_port(struct port *port)
+{
+ if (port->async_queue && port->guest_connected)
+ kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
+}
+
+static int add_port(struct ports_device *portdev, u32 id)
+{
+ char debugfs_name[16];
+ struct port *port;
+ struct port_buffer *buf;
+ dev_t devt;
+ unsigned int nr_added_bufs;
+ int err;
+
+ port = kmalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ kref_init(&port->kref);
+
+ port->portdev = portdev;
+ port->id = id;
+
+ port->name = NULL;
+ port->inbuf = NULL;
+ port->cons.hvc = NULL;
+ port->async_queue = NULL;
+
+ port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
+
+ port->host_connected = port->guest_connected = false;
+
+ port->outvq_full = false;
+
+ port->in_vq = portdev->in_vqs[port->id];
+ port->out_vq = portdev->out_vqs[port->id];
+
+ port->cdev = cdev_alloc();
+ if (!port->cdev) {
+ dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
+ err = -ENOMEM;
+ goto free_port;
+ }
+ port->cdev->ops = &port_fops;
+
+ devt = MKDEV(portdev->chr_major, id);
+ err = cdev_add(port->cdev, devt, 1);
+ if (err < 0) {
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d adding cdev for port %u\n", err, id);
+ goto free_cdev;
+ }
+ port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
+ devt, port, "vport%up%u",
+ port->portdev->drv_index, id);
+ if (IS_ERR(port->dev)) {
+ err = PTR_ERR(port->dev);
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d creating device for port %u\n",
+ err, id);
+ goto free_cdev;
+ }
+
+ spin_lock_init(&port->inbuf_lock);
+ spin_lock_init(&port->outvq_lock);
+ init_waitqueue_head(&port->waitqueue);
+
+ /* Fill the in_vq with buffers so the host can send us data. */
+ nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
+ if (!nr_added_bufs) {
+ dev_err(port->dev, "Error allocating inbufs\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+
+ /*
+ * If we're not using multiport support, this has to be a console port
+ */
+ if (!use_multiport(port->portdev)) {
+ err = init_port_console(port);
+ if (err)
+ goto free_inbufs;
+ }
+
+ spin_lock_irq(&portdev->ports_lock);
+ list_add_tail(&port->list, &port->portdev->ports);
+ spin_unlock_irq(&portdev->ports_lock);
+
+ /*
+ * Tell the Host we're set so that it can send us various
+ * configuration parameters for this port (eg, port name,
+ * caching, whether this is a console port, etc.)
+ */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+
+ if (pdrvdata.debugfs_dir) {
+ /*
+ * Finally, create the debugfs file that we can use to
+ * inspect a port's state at any time
+ */
+ sprintf(debugfs_name, "vport%up%u",
+ port->portdev->drv_index, id);
+ port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+ pdrvdata.debugfs_dir,
+ port,
+ &port_debugfs_ops);
+ }
+ return 0;
+
+free_inbufs:
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+ free_buf(buf);
+free_device:
+ device_destroy(pdrvdata.class, port->dev->devt);
+free_cdev:
+ cdev_del(port->cdev);
+free_port:
+ kfree(port);
+fail:
+ /* The host might want to notify management sw about port add failure */
+ __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
+ return err;
+}
+
+/* No users remain, remove all port-specific data. */
+static void remove_port(struct kref *kref)
+{
+ struct port *port;
+
+ port = container_of(kref, struct port, kref);
+
+ sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+ device_destroy(pdrvdata.class, port->dev->devt);
+ cdev_del(port->cdev);
+
+ kfree(port->name);
+
+ debugfs_remove(port->debugfs_file);
+
+ kfree(port);
+}
+
+/*
+ * Port got unplugged. Remove port from portdev's list and drop the
+ * kref reference. If no userspace has this port opened, it will
+ * result in immediate removal the port.
+ */
+static void unplug_port(struct port *port)
+{
+ struct port_buffer *buf;
+
+ spin_lock_irq(&port->portdev->ports_lock);
+ list_del(&port->list);
+ spin_unlock_irq(&port->portdev->ports_lock);
+
+ if (port->guest_connected) {
+ port->guest_connected = false;
+ port->host_connected = false;
+ wake_up_interruptible(&port->waitqueue);
+
+ /* Let the app know the port is going down. */
+ send_sigio_to_port(port);
+ }
+
+ if (is_console_port(port)) {
+ spin_lock_irq(&pdrvdata_lock);
+ list_del(&port->cons.list);
+ spin_unlock_irq(&pdrvdata_lock);
+ hvc_remove(port->cons.hvc);
+ }
+
+ /* Remove unused data this port might have received. */
+ discard_port_data(port);
+
+ reclaim_consumed_buffers(port);
+
+ /* Remove buffers we queued up for the Host to send us data in. */
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+ free_buf(buf);
+
+ /*
+ * We should just assume the device itself has gone off --
+ * else a close on an open port later will try to send out a
+ * control message.
+ */
+ port->portdev = NULL;
+
+ /*
+ * Locks around here are not necessary - a port can't be
+ * opened after we removed the port struct from ports_list
+ * above.
+ */
+ kref_put(&port->kref, remove_port);
+}
+
+/* Any private messages that the Host and Guest want to share */
+static void handle_control_message(struct ports_device *portdev,
+ struct port_buffer *buf)
+{
+ struct virtio_console_control *cpkt;
+ struct port *port;
+ size_t name_size;
+ int err;
+
+ cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
+
+ port = find_port_by_id(portdev, cpkt->id);
+ if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) {
+ /* No valid header at start of buffer. Drop it. */
+ dev_dbg(&portdev->vdev->dev,
+ "Invalid index %u in control packet\n", cpkt->id);
+ return;
+ }
+
+ switch (cpkt->event) {
+ case VIRTIO_CONSOLE_PORT_ADD:
+ if (port) {
+ dev_dbg(&portdev->vdev->dev,
+ "Port %u already added\n", port->id);
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+ break;
+ }
+ if (cpkt->id >= portdev->config.max_nr_ports) {
+ dev_warn(&portdev->vdev->dev,
+ "Request for adding port with out-of-bound id %u, max. supported id: %u\n",
+ cpkt->id, portdev->config.max_nr_ports - 1);
+ break;
+ }
+ add_port(portdev, cpkt->id);
+ break;
+ case VIRTIO_CONSOLE_PORT_REMOVE:
+ unplug_port(port);
+ break;
+ case VIRTIO_CONSOLE_CONSOLE_PORT:
+ if (!cpkt->value)
+ break;
+ if (is_console_port(port))
+ break;
+
+ init_port_console(port);
+ /*
+ * Could remove the port here in case init fails - but
+ * have to notify the host first.
+ */
+ break;
+ case VIRTIO_CONSOLE_RESIZE: {
+ struct {
+ __u16 rows;
+ __u16 cols;
+ } size;
+
+ if (!is_console_port(port))
+ break;
+
+ memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
+ sizeof(size));
+ set_console_size(port, size.rows, size.cols);
+
+ port->cons.hvc->irq_requested = 1;
+ resize_console(port);
+ break;
+ }
+ case VIRTIO_CONSOLE_PORT_OPEN:
+ port->host_connected = cpkt->value;
+ wake_up_interruptible(&port->waitqueue);
+ /*
+ * If the host port got closed and the host had any
+ * unconsumed buffers, we'll be able to reclaim them
+ * now.
+ */
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
+ /*
+ * If the guest is connected, it'll be interested in
+ * knowing the host connection state changed.
+ */
+ send_sigio_to_port(port);
+ break;
+ case VIRTIO_CONSOLE_PORT_NAME:
+ /*
+ * Skip the size of the header and the cpkt to get the size
+ * of the name that was sent
+ */
+ name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
+
+ port->name = kmalloc(name_size, GFP_KERNEL);
+ if (!port->name) {
+ dev_err(port->dev,
+ "Not enough space to store port name\n");
+ break;
+ }
+ strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
+ name_size - 1);
+ port->name[name_size - 1] = 0;
+
+ /*
+ * Since we only have one sysfs attribute, 'name',
+ * create it only if we have a name for the port.
+ */
+ err = sysfs_create_group(&port->dev->kobj,
+ &port_attribute_group);
+ if (err) {
+ dev_err(port->dev,
+ "Error %d creating sysfs device attributes\n",
+ err);
+ } else {
+ /*
+ * Generate a udev event so that appropriate
+ * symlinks can be created based on udev
+ * rules.
+ */
+ kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
+ }
+ break;
+ }
+}
+
+static void control_work_handler(struct work_struct *work)
+{
+ struct ports_device *portdev;
+ struct virtqueue *vq;
+ struct port_buffer *buf;
+ unsigned int len;
+
+ portdev = container_of(work, struct ports_device, control_work);
+ vq = portdev->c_ivq;
+
+ spin_lock(&portdev->cvq_lock);
+ while ((buf = virtqueue_get_buf(vq, &len))) {
+ spin_unlock(&portdev->cvq_lock);
+
+ buf->len = len;
+ buf->offset = 0;
+
+ handle_control_message(portdev, buf);
+
+ spin_lock(&portdev->cvq_lock);
+ if (add_inbuf(portdev->c_ivq, buf) < 0) {
+ dev_warn(&portdev->vdev->dev,
+ "Error adding buffer to queue\n");
+ free_buf(buf);
+ }
+ }
+ spin_unlock(&portdev->cvq_lock);
+}
+
+static void out_intr(struct virtqueue *vq)
+{
+ struct port *port;
+
+ port = find_port_by_vq(vq->vdev->priv, vq);
+ if (!port)
+ return;
+
+ wake_up_interruptible(&port->waitqueue);
+}
+
+static void in_intr(struct virtqueue *vq)
+{
+ struct port *port;
+ unsigned long flags;
+
+ port = find_port_by_vq(vq->vdev->priv, vq);
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&port->inbuf_lock, flags);
+ if (!port->inbuf)
+ port->inbuf = get_inbuf(port);
+
+ /*
+ * Don't queue up data when port is closed. This condition
+ * can be reached when a console port is not yet connected (no
+ * tty is spawned) and the host sends out data to console
+ * ports. For generic serial ports, the host won't
+ * (shouldn't) send data till the guest is connected.
+ */
+ if (!port->guest_connected)
+ discard_port_data(port);
+
+ spin_unlock_irqrestore(&port->inbuf_lock, flags);
+
+ wake_up_interruptible(&port->waitqueue);
+
+ /* Send a SIGIO indicating new data in case the process asked for it */
+ send_sigio_to_port(port);
+
+ if (is_console_port(port) && hvc_poll(port->cons.hvc))
+ hvc_kick();
+}
+
+static void control_intr(struct virtqueue *vq)
+{
+ struct ports_device *portdev;
+
+ portdev = vq->vdev->priv;
+ schedule_work(&portdev->control_work);
+}
+
+static void config_intr(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+
+ portdev = vdev->priv;
+
+ if (!use_multiport(portdev)) {
+ struct port *port;
+ u16 rows, cols;
+
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, cols),
+ &cols, sizeof(u16));
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, rows),
+ &rows, sizeof(u16));
+
+ port = find_port_by_id(portdev, 0);
+ set_console_size(port, rows, cols);
+
+ /*
+ * We'll use this way of resizing only for legacy
+ * support. For newer userspace
+ * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
+ * to indicate console size changes so that it can be
+ * done per-port.
+ */
+ resize_console(port);
+ }
+}
+
+static int init_vqs(struct ports_device *portdev)
+{
+ vq_callback_t **io_callbacks;
+ char **io_names;
+ struct virtqueue **vqs;
+ u32 i, j, nr_ports, nr_queues;
+ int err;
+
+ nr_ports = portdev->config.max_nr_ports;
+ nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
+
+ vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
+ io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL);
+ io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL);
+ portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
+ GFP_KERNEL);
+ portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
+ GFP_KERNEL);
+ if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
+ !portdev->out_vqs) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ /*
+ * For backward compat (newer host but older guest), the host
+ * spawns a console port first and also inits the vqs for port
+ * 0 before others.
+ */
+ j = 0;
+ io_callbacks[j] = in_intr;
+ io_callbacks[j + 1] = out_intr;
+ io_names[j] = "input";
+ io_names[j + 1] = "output";
+ j += 2;
+
+ if (use_multiport(portdev)) {
+ io_callbacks[j] = control_intr;
+ io_callbacks[j + 1] = NULL;
+ io_names[j] = "control-i";
+ io_names[j + 1] = "control-o";
+
+ for (i = 1; i < nr_ports; i++) {
+ j += 2;
+ io_callbacks[j] = in_intr;
+ io_callbacks[j + 1] = out_intr;
+ io_names[j] = "input";
+ io_names[j + 1] = "output";
+ }
+ }
+ /* Find the queues. */
+ err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs,
+ io_callbacks,
+ (const char **)io_names);
+ if (err)
+ goto free;
+
+ j = 0;
+ portdev->in_vqs[0] = vqs[0];
+ portdev->out_vqs[0] = vqs[1];
+ j += 2;
+ if (use_multiport(portdev)) {
+ portdev->c_ivq = vqs[j];
+ portdev->c_ovq = vqs[j + 1];
+
+ for (i = 1; i < nr_ports; i++) {
+ j += 2;
+ portdev->in_vqs[i] = vqs[j];
+ portdev->out_vqs[i] = vqs[j + 1];
+ }
+ }
+ kfree(io_names);
+ kfree(io_callbacks);
+ kfree(vqs);
+
+ return 0;
+
+free:
+ kfree(portdev->out_vqs);
+ kfree(portdev->in_vqs);
+ kfree(io_names);
+ kfree(io_callbacks);
+ kfree(vqs);
+
+ return err;
+}
+
+static const struct file_operations portdev_fops = {
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Once we're further in boot, we get probed like any other virtio
+ * device.
+ *
+ * If the host also supports multiple console ports, we check the
+ * config space to see how many ports the host has spawned. We
+ * initialize each port found.
+ */
+static int __devinit virtcons_probe(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+ int err;
+ bool multiport;
+
+ portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
+ if (!portdev) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ /* Attach this portdev to this virtio_device, and vice-versa. */
+ portdev->vdev = vdev;
+ vdev->priv = portdev;
+
+ spin_lock_irq(&pdrvdata_lock);
+ portdev->drv_index = pdrvdata.index++;
+ spin_unlock_irq(&pdrvdata_lock);
+
+ portdev->chr_major = register_chrdev(0, "virtio-portsdev",
+ &portdev_fops);
+ if (portdev->chr_major < 0) {
+ dev_err(&vdev->dev,
+ "Error %d registering chrdev for device %u\n",
+ portdev->chr_major, portdev->drv_index);
+ err = portdev->chr_major;
+ goto free;
+ }
+
+ multiport = false;
+ portdev->config.max_nr_ports = 1;
+ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
+ multiport = true;
+ vdev->config->get(vdev, offsetof(struct virtio_console_config,
+ max_nr_ports),
+ &portdev->config.max_nr_ports,
+ sizeof(portdev->config.max_nr_ports));
+ }
+
+ err = init_vqs(portdev);
+ if (err < 0) {
+ dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
+ goto free_chrdev;
+ }
+
+ spin_lock_init(&portdev->ports_lock);
+ INIT_LIST_HEAD(&portdev->ports);
+
+ if (multiport) {
+ unsigned int nr_added_bufs;
+
+ spin_lock_init(&portdev->cvq_lock);
+ INIT_WORK(&portdev->control_work, &control_work_handler);
+
+ nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
+ if (!nr_added_bufs) {
+ dev_err(&vdev->dev,
+ "Error allocating buffers for control queue\n");
+ err = -ENOMEM;
+ goto free_vqs;
+ }
+ } else {
+ /*
+ * For backward compatibility: Create a console port
+ * if we're running on older host.
+ */
+ add_port(portdev, 0);
+ }
+
+ spin_lock_irq(&pdrvdata_lock);
+ list_add_tail(&portdev->list, &pdrvdata.portdevs);
+ spin_unlock_irq(&pdrvdata_lock);
+
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 1);
+ return 0;
+
+free_vqs:
+ /* The host might want to notify mgmt sw about device add failure */
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 0);
+ vdev->config->del_vqs(vdev);
+ kfree(portdev->in_vqs);
+ kfree(portdev->out_vqs);
+free_chrdev:
+ unregister_chrdev(portdev->chr_major, "virtio-portsdev");
+free:
+ kfree(portdev);
+fail:
+ return err;
+}
+
+static void virtcons_remove(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+ struct port *port, *port2;
+
+ portdev = vdev->priv;
+
+ spin_lock_irq(&pdrvdata_lock);
+ list_del(&portdev->list);
+ spin_unlock_irq(&pdrvdata_lock);
+
+ /* Disable interrupts for vqs */
+ vdev->config->reset(vdev);
+ /* Finish up work that's lined up */
+ cancel_work_sync(&portdev->control_work);
+
+ list_for_each_entry_safe(port, port2, &portdev->ports, list)
+ unplug_port(port);
+
+ unregister_chrdev(portdev->chr_major, "virtio-portsdev");
+
+ /*
+ * When yanking out a device, we immediately lose the
+ * (device-side) queues. So there's no point in keeping the
+ * guest side around till we drop our final reference. This
+ * also means that any ports which are in an open state will
+ * have to just stop using the port, as the vqs are going
+ * away.
+ */
+ if (use_multiport(portdev)) {
+ struct port_buffer *buf;
+ unsigned int len;
+
+ while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
+ free_buf(buf);
+
+ while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
+ free_buf(buf);
+ }
+
+ vdev->config->del_vqs(vdev);
+ kfree(portdev->in_vqs);
+ kfree(portdev->out_vqs);
+
+ kfree(portdev);
+}
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static unsigned int features[] = {
+ VIRTIO_CONSOLE_F_SIZE,
+ VIRTIO_CONSOLE_F_MULTIPORT,
+};
+
+static struct virtio_driver virtio_console = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtcons_probe,
+ .remove = virtcons_remove,
+ .config_changed = config_intr,
+};
+
+static int __init init(void)
+{
+ int err;
+
+ pdrvdata.class = class_create(THIS_MODULE, "virtio-ports");
+ if (IS_ERR(pdrvdata.class)) {
+ err = PTR_ERR(pdrvdata.class);
+ pr_err("Error %d creating virtio-ports class\n", err);
+ return err;
+ }
+
+ pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
+ if (!pdrvdata.debugfs_dir) {
+ pr_warning("Error %ld creating debugfs dir for virtio-ports\n",
+ PTR_ERR(pdrvdata.debugfs_dir));
+ }
+ INIT_LIST_HEAD(&pdrvdata.consoles);
+ INIT_LIST_HEAD(&pdrvdata.portdevs);
+
+ return register_virtio_driver(&virtio_console);
+}
+
+static void __exit fini(void)
+{
+ unregister_virtio_driver(&virtio_console);
+
+ class_destroy(pdrvdata.class);
+ if (pdrvdata.debugfs_dir)
+ debugfs_remove_recursive(pdrvdata.debugfs_dir);
+}
+module_init(init);
+module_exit(fini);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio console driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/xilinx_hwicap/Makefile b/drivers/char/xilinx_hwicap/Makefile
new file mode 100644
index 00000000..5491cbc4
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Xilinx OPB hwicap driver
+#
+
+obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap_m.o
+
+xilinx_hwicap_m-y := xilinx_hwicap.o fifo_icap.o buffer_icap.o
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c
new file mode 100644
index 00000000..05d89776
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/buffer_icap.c
@@ -0,0 +1,365 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * (c) Copyright 2003-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#include "buffer_icap.h"
+
+/* Indicates how many bytes will fit in a buffer. (1 BRAM) */
+#define XHI_MAX_BUFFER_BYTES 2048
+#define XHI_MAX_BUFFER_INTS (XHI_MAX_BUFFER_BYTES >> 2)
+
+/* File access and error constants */
+#define XHI_DEVICE_READ_ERROR -1
+#define XHI_DEVICE_WRITE_ERROR -2
+#define XHI_BUFFER_OVERFLOW_ERROR -3
+
+#define XHI_DEVICE_READ 0x1
+#define XHI_DEVICE_WRITE 0x0
+
+/* Constants for checking transfer status */
+#define XHI_CYCLE_DONE 0
+#define XHI_CYCLE_EXECUTING 1
+
+/* buffer_icap register offsets */
+
+/* Size of transfer, read & write */
+#define XHI_SIZE_REG_OFFSET 0x800L
+/* offset into bram, read & write */
+#define XHI_BRAM_OFFSET_REG_OFFSET 0x804L
+/* Read not Configure, direction of transfer. Write only */
+#define XHI_RNC_REG_OFFSET 0x808L
+/* Indicates transfer complete. Read only */
+#define XHI_STATUS_REG_OFFSET 0x80CL
+
+/* Constants for setting the RNC register */
+#define XHI_CONFIGURE 0x0UL
+#define XHI_READBACK 0x1UL
+
+/* Constants for the Done register */
+#define XHI_NOT_FINISHED 0x0UL
+#define XHI_FINISHED 0x1UL
+
+#define XHI_BUFFER_START 0
+
+/**
+ * buffer_icap_get_status - Get the contents of the status register.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * The status register contains the ICAP status and the done bit.
+ *
+ * D8 - cfgerr
+ * D7 - dalign
+ * D6 - rip
+ * D5 - in_abort_l
+ * D4 - Always 1
+ * D3 - Always 1
+ * D2 - Always 1
+ * D1 - Always 1
+ * D0 - Done bit
+ **/
+u32 buffer_icap_get_status(struct hwicap_drvdata *drvdata)
+{
+ return in_be32(drvdata->base_address + XHI_STATUS_REG_OFFSET);
+}
+
+/**
+ * buffer_icap_get_bram - Reads data from the storage buffer bram.
+ * @base_address: contains the base address of the component.
+ * @offset: The word offset from which the data should be read.
+ *
+ * A bram is used as a configuration memory cache. One frame of data can
+ * be stored in this "storage buffer".
+ **/
+static inline u32 buffer_icap_get_bram(void __iomem *base_address,
+ u32 offset)
+{
+ return in_be32(base_address + (offset << 2));
+}
+
+/**
+ * buffer_icap_busy - Return true if the icap device is busy
+ * @base_address: is the base address of the device
+ *
+ * The queries the low order bit of the status register, which
+ * indicates whether the current configuration or readback operation
+ * has completed.
+ **/
+static inline bool buffer_icap_busy(void __iomem *base_address)
+{
+ u32 status = in_be32(base_address + XHI_STATUS_REG_OFFSET);
+ return (status & 1) == XHI_NOT_FINISHED;
+}
+
+/**
+ * buffer_icap_set_size - Set the size register.
+ * @base_address: is the base address of the device
+ * @data: The size in bytes.
+ *
+ * The size register holds the number of 8 bit bytes to transfer between
+ * bram and the icap (or icap to bram).
+ **/
+static inline void buffer_icap_set_size(void __iomem *base_address,
+ u32 data)
+{
+ out_be32(base_address + XHI_SIZE_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_set_offset - Set the bram offset register.
+ * @base_address: contains the base address of the device.
+ * @data: is the value to be written to the data register.
+ *
+ * The bram offset register holds the starting bram address to transfer
+ * data from during configuration or write data to during readback.
+ **/
+static inline void buffer_icap_set_offset(void __iomem *base_address,
+ u32 data)
+{
+ out_be32(base_address + XHI_BRAM_OFFSET_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_set_rnc - Set the RNC (Readback not Configure) register.
+ * @base_address: contains the base address of the device.
+ * @data: is the value to be written to the data register.
+ *
+ * The RNC register determines the direction of the data transfer. It
+ * controls whether a configuration or readback take place. Writing to
+ * this register initiates the transfer. A value of 1 initiates a
+ * readback while writing a value of 0 initiates a configuration.
+ **/
+static inline void buffer_icap_set_rnc(void __iomem *base_address,
+ u32 data)
+{
+ out_be32(base_address + XHI_RNC_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_set_bram - Write data to the storage buffer bram.
+ * @base_address: contains the base address of the component.
+ * @offset: The word offset at which the data should be written.
+ * @data: The value to be written to the bram offset.
+ *
+ * A bram is used as a configuration memory cache. One frame of data can
+ * be stored in this "storage buffer".
+ **/
+static inline void buffer_icap_set_bram(void __iomem *base_address,
+ u32 offset, u32 data)
+{
+ out_be32(base_address + (offset << 2), data);
+}
+
+/**
+ * buffer_icap_device_read - Transfer bytes from ICAP to the storage buffer.
+ * @drvdata: a pointer to the drvdata.
+ * @offset: The storage buffer start address.
+ * @count: The number of words (32 bit) to read from the
+ * device (ICAP).
+ **/
+static int buffer_icap_device_read(struct hwicap_drvdata *drvdata,
+ u32 offset, u32 count)
+{
+
+ s32 retries = 0;
+ void __iomem *base_address = drvdata->base_address;
+
+ if (buffer_icap_busy(base_address))
+ return -EBUSY;
+
+ if ((offset + count) > XHI_MAX_BUFFER_INTS)
+ return -EINVAL;
+
+ /* setSize count*4 to get bytes. */
+ buffer_icap_set_size(base_address, (count << 2));
+ buffer_icap_set_offset(base_address, offset);
+ buffer_icap_set_rnc(base_address, XHI_READBACK);
+
+ while (buffer_icap_busy(base_address)) {
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EBUSY;
+ }
+ return 0;
+
+};
+
+/**
+ * buffer_icap_device_write - Transfer bytes from ICAP to the storage buffer.
+ * @drvdata: a pointer to the drvdata.
+ * @offset: The storage buffer start address.
+ * @count: The number of words (32 bit) to read from the
+ * device (ICAP).
+ **/
+static int buffer_icap_device_write(struct hwicap_drvdata *drvdata,
+ u32 offset, u32 count)
+{
+
+ s32 retries = 0;
+ void __iomem *base_address = drvdata->base_address;
+
+ if (buffer_icap_busy(base_address))
+ return -EBUSY;
+
+ if ((offset + count) > XHI_MAX_BUFFER_INTS)
+ return -EINVAL;
+
+ /* setSize count*4 to get bytes. */
+ buffer_icap_set_size(base_address, count << 2);
+ buffer_icap_set_offset(base_address, offset);
+ buffer_icap_set_rnc(base_address, XHI_CONFIGURE);
+
+ while (buffer_icap_busy(base_address)) {
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EBUSY;
+ }
+ return 0;
+
+};
+
+/**
+ * buffer_icap_reset - Reset the logic of the icap device.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * Writing to the status register resets the ICAP logic in an internal
+ * version of the core. For the version of the core published in EDK,
+ * this is a noop.
+ **/
+void buffer_icap_reset(struct hwicap_drvdata *drvdata)
+{
+ out_be32(drvdata->base_address + XHI_STATUS_REG_OFFSET, 0xFEFE);
+}
+
+/**
+ * buffer_icap_set_configuration - Load a partial bitstream from system memory.
+ * @drvdata: a pointer to the drvdata.
+ * @data: Kernel address of the partial bitstream.
+ * @size: the size of the partial bitstream in 32 bit words.
+ **/
+int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size)
+{
+ int status;
+ s32 buffer_count = 0;
+ s32 num_writes = 0;
+ bool dirty = 0;
+ u32 i;
+ void __iomem *base_address = drvdata->base_address;
+
+ /* Loop through all the data */
+ for (i = 0, buffer_count = 0; i < size; i++) {
+
+ /* Copy data to bram */
+ buffer_icap_set_bram(base_address, buffer_count, data[i]);
+ dirty = 1;
+
+ if (buffer_count < XHI_MAX_BUFFER_INTS - 1) {
+ buffer_count++;
+ continue;
+ }
+
+ /* Write data to ICAP */
+ status = buffer_icap_device_write(
+ drvdata,
+ XHI_BUFFER_START,
+ XHI_MAX_BUFFER_INTS);
+ if (status != 0) {
+ /* abort. */
+ buffer_icap_reset(drvdata);
+ return status;
+ }
+
+ buffer_count = 0;
+ num_writes++;
+ dirty = 0;
+ }
+
+ /* Write unwritten data to ICAP */
+ if (dirty) {
+ /* Write data to ICAP */
+ status = buffer_icap_device_write(drvdata, XHI_BUFFER_START,
+ buffer_count);
+ if (status != 0) {
+ /* abort. */
+ buffer_icap_reset(drvdata);
+ }
+ return status;
+ }
+
+ return 0;
+};
+
+/**
+ * buffer_icap_get_configuration - Read configuration data from the device.
+ * @drvdata: a pointer to the drvdata.
+ * @data: Address of the data representing the partial bitstream
+ * @size: the size of the partial bitstream in 32 bit words.
+ **/
+int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size)
+{
+ int status;
+ s32 buffer_count = 0;
+ s32 read_count = 0;
+ u32 i;
+ void __iomem *base_address = drvdata->base_address;
+
+ /* Loop through all the data */
+ for (i = 0, buffer_count = XHI_MAX_BUFFER_INTS; i < size; i++) {
+ if (buffer_count == XHI_MAX_BUFFER_INTS) {
+ u32 words_remaining = size - i;
+ u32 words_to_read =
+ words_remaining <
+ XHI_MAX_BUFFER_INTS ? words_remaining :
+ XHI_MAX_BUFFER_INTS;
+
+ /* Read data from ICAP */
+ status = buffer_icap_device_read(
+ drvdata,
+ XHI_BUFFER_START,
+ words_to_read);
+ if (status != 0) {
+ /* abort. */
+ buffer_icap_reset(drvdata);
+ return status;
+ }
+
+ buffer_count = 0;
+ read_count++;
+ }
+
+ /* Copy data from bram */
+ data[i] = buffer_icap_get_bram(base_address, buffer_count);
+ buffer_count++;
+ }
+
+ return 0;
+};
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.h b/drivers/char/xilinx_hwicap/buffer_icap.h
new file mode 100644
index 00000000..d4f419ee
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/buffer_icap.h
@@ -0,0 +1,54 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * (c) Copyright 2003-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_BUFFER_ICAP_H_ /* prevent circular inclusions */
+#define XILINX_BUFFER_ICAP_H_ /* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include "xilinx_hwicap.h"
+
+/* Loads a partial bitstream from system memory. */
+int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 Size);
+
+/* Loads a partial bitstream from system memory. */
+int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 Size);
+
+u32 buffer_icap_get_status(struct hwicap_drvdata *drvdata);
+void buffer_icap_reset(struct hwicap_drvdata *drvdata);
+
+#endif
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c
new file mode 100644
index 00000000..02225eb1
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/fifo_icap.c
@@ -0,0 +1,393 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * (c) Copyright 2007-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#include "fifo_icap.h"
+
+/* Register offsets for the XHwIcap device. */
+#define XHI_GIER_OFFSET 0x1C /* Device Global Interrupt Enable Reg */
+#define XHI_IPISR_OFFSET 0x20 /* Interrupt Status Register */
+#define XHI_IPIER_OFFSET 0x28 /* Interrupt Enable Register */
+#define XHI_WF_OFFSET 0x100 /* Write FIFO */
+#define XHI_RF_OFFSET 0x104 /* Read FIFO */
+#define XHI_SZ_OFFSET 0x108 /* Size Register */
+#define XHI_CR_OFFSET 0x10C /* Control Register */
+#define XHI_SR_OFFSET 0x110 /* Status Register */
+#define XHI_WFV_OFFSET 0x114 /* Write FIFO Vacancy Register */
+#define XHI_RFO_OFFSET 0x118 /* Read FIFO Occupancy Register */
+
+/* Device Global Interrupt Enable Register (GIER) bit definitions */
+
+#define XHI_GIER_GIE_MASK 0x80000000 /* Global Interrupt enable Mask */
+
+/**
+ * HwIcap Device Interrupt Status/Enable Registers
+ *
+ * Interrupt Status Register (IPISR) : This register holds the
+ * interrupt status flags for the device. These bits are toggle on
+ * write.
+ *
+ * Interrupt Enable Register (IPIER) : This register is used to enable
+ * interrupt sources for the device.
+ * Writing a '1' to a bit enables the corresponding interrupt.
+ * Writing a '0' to a bit disables the corresponding interrupt.
+ *
+ * IPISR/IPIER registers have the same bit definitions and are only defined
+ * once.
+ */
+#define XHI_IPIXR_RFULL_MASK 0x00000008 /* Read FIFO Full */
+#define XHI_IPIXR_WEMPTY_MASK 0x00000004 /* Write FIFO Empty */
+#define XHI_IPIXR_RDP_MASK 0x00000002 /* Read FIFO half full */
+#define XHI_IPIXR_WRP_MASK 0x00000001 /* Write FIFO half full */
+#define XHI_IPIXR_ALL_MASK 0x0000000F /* Mask of all interrupts */
+
+/* Control Register (CR) */
+#define XHI_CR_SW_RESET_MASK 0x00000008 /* SW Reset Mask */
+#define XHI_CR_FIFO_CLR_MASK 0x00000004 /* FIFO Clear Mask */
+#define XHI_CR_READ_MASK 0x00000002 /* Read from ICAP to FIFO */
+#define XHI_CR_WRITE_MASK 0x00000001 /* Write from FIFO to ICAP */
+
+
+#define XHI_WFO_MAX_VACANCY 1024 /* Max Write FIFO Vacancy, in words */
+#define XHI_RFO_MAX_OCCUPANCY 256 /* Max Read FIFO Occupancy, in words */
+/* The maximum amount we can request from fifo_icap_get_configuration
+ at once, in bytes. */
+#define XHI_MAX_READ_TRANSACTION_WORDS 0xFFF
+
+
+/**
+ * fifo_icap_fifo_write - Write data to the write FIFO.
+ * @drvdata: a pointer to the drvdata.
+ * @data: the 32-bit value to be written to the FIFO.
+ *
+ * This function will silently fail if the fifo is full.
+ **/
+static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata,
+ u32 data)
+{
+ dev_dbg(drvdata->dev, "fifo_write: %x\n", data);
+ out_be32(drvdata->base_address + XHI_WF_OFFSET, data);
+}
+
+/**
+ * fifo_icap_fifo_read - Read data from the Read FIFO.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * This function will silently fail if the fifo is empty.
+ **/
+static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata)
+{
+ u32 data = in_be32(drvdata->base_address + XHI_RF_OFFSET);
+ dev_dbg(drvdata->dev, "fifo_read: %x\n", data);
+ return data;
+}
+
+/**
+ * fifo_icap_set_read_size - Set the the size register.
+ * @drvdata: a pointer to the drvdata.
+ * @data: the size of the following read transaction, in words.
+ **/
+static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata,
+ u32 data)
+{
+ out_be32(drvdata->base_address + XHI_SZ_OFFSET, data);
+}
+
+/**
+ * fifo_icap_start_config - Initiate a configuration (write) to the device.
+ * @drvdata: a pointer to the drvdata.
+ **/
+static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata)
+{
+ out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_WRITE_MASK);
+ dev_dbg(drvdata->dev, "configuration started\n");
+}
+
+/**
+ * fifo_icap_start_readback - Initiate a readback from the device.
+ * @drvdata: a pointer to the drvdata.
+ **/
+static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata)
+{
+ out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_READ_MASK);
+ dev_dbg(drvdata->dev, "readback started\n");
+}
+
+/**
+ * fifo_icap_get_status - Get the contents of the status register.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * The status register contains the ICAP status and the done bit.
+ *
+ * D8 - cfgerr
+ * D7 - dalign
+ * D6 - rip
+ * D5 - in_abort_l
+ * D4 - Always 1
+ * D3 - Always 1
+ * D2 - Always 1
+ * D1 - Always 1
+ * D0 - Done bit
+ **/
+u32 fifo_icap_get_status(struct hwicap_drvdata *drvdata)
+{
+ u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET);
+ dev_dbg(drvdata->dev, "Getting status = %x\n", status);
+ return status;
+}
+
+/**
+ * fifo_icap_busy - Return true if the ICAP is still processing a transaction.
+ * @drvdata: a pointer to the drvdata.
+ **/
+static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata)
+{
+ u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET);
+ return (status & XHI_SR_DONE_MASK) ? 0 : 1;
+}
+
+/**
+ * fifo_icap_write_fifo_vacancy - Query the write fifo available space.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * Return the number of words that can be safely pushed into the write fifo.
+ **/
+static inline u32 fifo_icap_write_fifo_vacancy(
+ struct hwicap_drvdata *drvdata)
+{
+ return in_be32(drvdata->base_address + XHI_WFV_OFFSET);
+}
+
+/**
+ * fifo_icap_read_fifo_occupancy - Query the read fifo available data.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * Return the number of words that can be safely read from the read fifo.
+ **/
+static inline u32 fifo_icap_read_fifo_occupancy(
+ struct hwicap_drvdata *drvdata)
+{
+ return in_be32(drvdata->base_address + XHI_RFO_OFFSET);
+}
+
+/**
+ * fifo_icap_set_configuration - Send configuration data to the ICAP.
+ * @drvdata: a pointer to the drvdata.
+ * @frame_buffer: a pointer to the data to be written to the
+ * ICAP device.
+ * @num_words: the number of words (32 bit) to write to the ICAP
+ * device.
+
+ * This function writes the given user data to the Write FIFO in
+ * polled mode and starts the transfer of the data to
+ * the ICAP device.
+ **/
+int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata,
+ u32 *frame_buffer, u32 num_words)
+{
+
+ u32 write_fifo_vacancy = 0;
+ u32 retries = 0;
+ u32 remaining_words;
+
+ dev_dbg(drvdata->dev, "fifo_set_configuration\n");
+
+ /*
+ * Check if the ICAP device is Busy with the last Read/Write
+ */
+ if (fifo_icap_busy(drvdata))
+ return -EBUSY;
+
+ /*
+ * Set up the buffer pointer and the words to be transferred.
+ */
+ remaining_words = num_words;
+
+ while (remaining_words > 0) {
+ /*
+ * Wait until we have some data in the fifo.
+ */
+ while (write_fifo_vacancy == 0) {
+ write_fifo_vacancy =
+ fifo_icap_write_fifo_vacancy(drvdata);
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EIO;
+ }
+
+ /*
+ * Write data into the Write FIFO.
+ */
+ while ((write_fifo_vacancy != 0) &&
+ (remaining_words > 0)) {
+ fifo_icap_fifo_write(drvdata, *frame_buffer);
+
+ remaining_words--;
+ write_fifo_vacancy--;
+ frame_buffer++;
+ }
+ /* Start pushing whatever is in the FIFO into the ICAP. */
+ fifo_icap_start_config(drvdata);
+ }
+
+ /* Wait until the write has finished. */
+ while (fifo_icap_busy(drvdata)) {
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ break;
+ }
+
+ dev_dbg(drvdata->dev, "done fifo_set_configuration\n");
+
+ /*
+ * If the requested number of words have not been read from
+ * the device then indicate failure.
+ */
+ if (remaining_words != 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * fifo_icap_get_configuration - Read configuration data from the device.
+ * @drvdata: a pointer to the drvdata.
+ * @data: Address of the data representing the partial bitstream
+ * @size: the size of the partial bitstream in 32 bit words.
+ *
+ * This function reads the specified number of words from the ICAP device in
+ * the polled mode.
+ */
+int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata,
+ u32 *frame_buffer, u32 num_words)
+{
+
+ u32 read_fifo_occupancy = 0;
+ u32 retries = 0;
+ u32 *data = frame_buffer;
+ u32 remaining_words;
+ u32 words_to_read;
+
+ dev_dbg(drvdata->dev, "fifo_get_configuration\n");
+
+ /*
+ * Check if the ICAP device is Busy with the last Write/Read
+ */
+ if (fifo_icap_busy(drvdata))
+ return -EBUSY;
+
+ remaining_words = num_words;
+
+ while (remaining_words > 0) {
+ words_to_read = remaining_words;
+ /* The hardware has a limit on the number of words
+ that can be read at one time. */
+ if (words_to_read > XHI_MAX_READ_TRANSACTION_WORDS)
+ words_to_read = XHI_MAX_READ_TRANSACTION_WORDS;
+
+ remaining_words -= words_to_read;
+
+ fifo_icap_set_read_size(drvdata, words_to_read);
+ fifo_icap_start_readback(drvdata);
+
+ while (words_to_read > 0) {
+ /* Wait until we have some data in the fifo. */
+ while (read_fifo_occupancy == 0) {
+ read_fifo_occupancy =
+ fifo_icap_read_fifo_occupancy(drvdata);
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EIO;
+ }
+
+ if (read_fifo_occupancy > words_to_read)
+ read_fifo_occupancy = words_to_read;
+
+ words_to_read -= read_fifo_occupancy;
+
+ /* Read the data from the Read FIFO. */
+ while (read_fifo_occupancy != 0) {
+ *data++ = fifo_icap_fifo_read(drvdata);
+ read_fifo_occupancy--;
+ }
+ }
+ }
+
+ dev_dbg(drvdata->dev, "done fifo_get_configuration\n");
+
+ return 0;
+}
+
+/**
+ * buffer_icap_reset - Reset the logic of the icap device.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * This function forces the software reset of the complete HWICAP device.
+ * All the registers will return to the default value and the FIFO is also
+ * flushed as a part of this software reset.
+ */
+void fifo_icap_reset(struct hwicap_drvdata *drvdata)
+{
+ u32 reg_data;
+ /*
+ * Reset the device by setting/clearing the RESET bit in the
+ * Control Register.
+ */
+ reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data | XHI_CR_SW_RESET_MASK);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data & (~XHI_CR_SW_RESET_MASK));
+
+}
+
+/**
+ * fifo_icap_flush_fifo - This function flushes the FIFOs in the device.
+ * @drvdata: a pointer to the drvdata.
+ */
+void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata)
+{
+ u32 reg_data;
+ /*
+ * Flush the FIFO by setting/clearing the FIFO Clear bit in the
+ * Control Register.
+ */
+ reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data | XHI_CR_FIFO_CLR_MASK);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data & (~XHI_CR_FIFO_CLR_MASK));
+}
+
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.h b/drivers/char/xilinx_hwicap/fifo_icap.h
new file mode 100644
index 00000000..4c9dd9a3
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/fifo_icap.h
@@ -0,0 +1,59 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * (c) Copyright 2007-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_FIFO_ICAP_H_ /* prevent circular inclusions */
+#define XILINX_FIFO_ICAP_H_ /* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include "xilinx_hwicap.h"
+
+/* Reads integers from the device into the storage buffer. */
+int fifo_icap_get_configuration(
+ struct hwicap_drvdata *drvdata,
+ u32 *FrameBuffer,
+ u32 NumWords);
+
+/* Writes integers to the device from the storage buffer. */
+int fifo_icap_set_configuration(
+ struct hwicap_drvdata *drvdata,
+ u32 *FrameBuffer,
+ u32 NumWords);
+
+u32 fifo_icap_get_status(struct hwicap_drvdata *drvdata);
+void fifo_icap_reset(struct hwicap_drvdata *drvdata);
+void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata);
+
+#endif
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
new file mode 100644
index 00000000..39ccdead
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -0,0 +1,866 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group
+ * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group
+ * (c) Copyright 2007-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+/*
+ * This is the code behind /dev/icap* -- it allows a user-space
+ * application to use the Xilinx ICAP subsystem.
+ *
+ * The following operations are possible:
+ *
+ * open open the port and initialize for access.
+ * release release port
+ * write Write a bitstream to the configuration processor.
+ * read Read a data stream from the configuration processor.
+ *
+ * After being opened, the port is initialized and accessed to avoid a
+ * corrupted first read which may occur with some hardware. The port
+ * is left in a desynched state, requiring that a synch sequence be
+ * transmitted before any valid configuration data. A user will have
+ * exclusive access to the device while it remains open, and the state
+ * of the ICAP cannot be guaranteed after the device is closed. Note
+ * that a complete reset of the core and the state of the ICAP cannot
+ * be performed on many versions of the cores, hence users of this
+ * device should avoid making inconsistent accesses to the device. In
+ * particular, accessing the read interface, without first generating
+ * a write containing a readback packet can leave the ICAP in an
+ * inaccessible state.
+ *
+ * Note that in order to use the read interface, it is first necessary
+ * to write a request packet to the write interface. i.e., it is not
+ * possible to simply readback the bitstream (or any configuration
+ * bits) from a device without specifically requesting them first.
+ * The code to craft such packets is intended to be part of the
+ * user-space application code that uses this device. The simplest
+ * way to use this interface is simply:
+ *
+ * cp foo.bit /dev/icap0
+ *
+ * Note that unless foo.bit is an appropriately constructed partial
+ * bitstream, this has a high likelihood of overwriting the design
+ * currently programmed in the FPGA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/sysctl.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_OF
+/* For open firmware. */
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#endif
+
+#include "xilinx_hwicap.h"
+#include "buffer_icap.h"
+#include "fifo_icap.h"
+
+#define DRIVER_NAME "icap"
+
+#define HWICAP_REGS (0x10000)
+
+#define XHWICAP_MAJOR 259
+#define XHWICAP_MINOR 0
+#define HWICAP_DEVICES 1
+
+/* An array, which is set to true when the device is registered. */
+static DEFINE_MUTEX(hwicap_mutex);
+static bool probed_devices[HWICAP_DEVICES];
+static struct mutex icap_sem;
+
+static struct class *icap_class;
+
+#define UNIMPLEMENTED 0xFFFF
+
+static const struct config_registers v2_config_registers = {
+ .CRC = 0,
+ .FAR = 1,
+ .FDRI = 2,
+ .FDRO = 3,
+ .CMD = 4,
+ .CTL = 5,
+ .MASK = 6,
+ .STAT = 7,
+ .LOUT = 8,
+ .COR = 9,
+ .MFWR = 10,
+ .FLR = 11,
+ .KEY = 12,
+ .CBC = 13,
+ .IDCODE = 14,
+ .AXSS = UNIMPLEMENTED,
+ .C0R_1 = UNIMPLEMENTED,
+ .CSOB = UNIMPLEMENTED,
+ .WBSTAR = UNIMPLEMENTED,
+ .TIMER = UNIMPLEMENTED,
+ .BOOTSTS = UNIMPLEMENTED,
+ .CTL_1 = UNIMPLEMENTED,
+};
+
+static const struct config_registers v4_config_registers = {
+ .CRC = 0,
+ .FAR = 1,
+ .FDRI = 2,
+ .FDRO = 3,
+ .CMD = 4,
+ .CTL = 5,
+ .MASK = 6,
+ .STAT = 7,
+ .LOUT = 8,
+ .COR = 9,
+ .MFWR = 10,
+ .FLR = UNIMPLEMENTED,
+ .KEY = UNIMPLEMENTED,
+ .CBC = 11,
+ .IDCODE = 12,
+ .AXSS = 13,
+ .C0R_1 = UNIMPLEMENTED,
+ .CSOB = UNIMPLEMENTED,
+ .WBSTAR = UNIMPLEMENTED,
+ .TIMER = UNIMPLEMENTED,
+ .BOOTSTS = UNIMPLEMENTED,
+ .CTL_1 = UNIMPLEMENTED,
+};
+static const struct config_registers v5_config_registers = {
+ .CRC = 0,
+ .FAR = 1,
+ .FDRI = 2,
+ .FDRO = 3,
+ .CMD = 4,
+ .CTL = 5,
+ .MASK = 6,
+ .STAT = 7,
+ .LOUT = 8,
+ .COR = 9,
+ .MFWR = 10,
+ .FLR = UNIMPLEMENTED,
+ .KEY = UNIMPLEMENTED,
+ .CBC = 11,
+ .IDCODE = 12,
+ .AXSS = 13,
+ .C0R_1 = 14,
+ .CSOB = 15,
+ .WBSTAR = 16,
+ .TIMER = 17,
+ .BOOTSTS = 18,
+ .CTL_1 = 19,
+};
+
+/**
+ * hwicap_command_desync - Send a DESYNC command to the ICAP port.
+ * @drvdata: a pointer to the drvdata.
+ *
+ * This command desynchronizes the ICAP After this command, a
+ * bitstream containing a NULL packet, followed by a SYNCH packet is
+ * required before the ICAP will recognize commands.
+ */
+static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
+{
+ u32 buffer[4];
+ u32 index = 0;
+
+ /*
+ * Create the data to be written to the ICAP.
+ */
+ buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1;
+ buffer[index++] = XHI_CMD_DESYNCH;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+
+ /*
+ * Write the data to the FIFO and intiate the transfer of data present
+ * in the FIFO to the ICAP device.
+ */
+ return drvdata->config->set_configuration(drvdata,
+ &buffer[0], index);
+}
+
+/**
+ * hwicap_get_configuration_register - Query a configuration register.
+ * @drvdata: a pointer to the drvdata.
+ * @reg: a constant which represents the configuration
+ * register value to be returned.
+ * Examples: XHI_IDCODE, XHI_FLR.
+ * @reg_data: returns the value of the register.
+ *
+ * Sends a query packet to the ICAP and then receives the response.
+ * The icap is left in Synched state.
+ */
+static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
+ u32 reg, u32 *reg_data)
+{
+ int status;
+ u32 buffer[6];
+ u32 index = 0;
+
+ /*
+ * Create the data to be written to the ICAP.
+ */
+ buffer[index++] = XHI_DUMMY_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = XHI_SYNC_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+
+ /*
+ * Write the data to the FIFO and initiate the transfer of data present
+ * in the FIFO to the ICAP device.
+ */
+ status = drvdata->config->set_configuration(drvdata,
+ &buffer[0], index);
+ if (status)
+ return status;
+
+ /* If the syncword was not found, then we need to start over. */
+ status = drvdata->config->get_status(drvdata);
+ if ((status & XHI_SR_DALIGN_MASK) != XHI_SR_DALIGN_MASK)
+ return -EIO;
+
+ index = 0;
+ buffer[index++] = hwicap_type_1_read(reg) | 1;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+
+ /*
+ * Write the data to the FIFO and intiate the transfer of data present
+ * in the FIFO to the ICAP device.
+ */
+ status = drvdata->config->set_configuration(drvdata,
+ &buffer[0], index);
+ if (status)
+ return status;
+
+ /*
+ * Read the configuration register
+ */
+ status = drvdata->config->get_configuration(drvdata, reg_data, 1);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
+{
+ int status;
+ u32 idcode;
+
+ dev_dbg(drvdata->dev, "initializing\n");
+
+ /* Abort any current transaction, to make sure we have the
+ * ICAP in a good state. */
+ dev_dbg(drvdata->dev, "Reset...\n");
+ drvdata->config->reset(drvdata);
+
+ dev_dbg(drvdata->dev, "Desync...\n");
+ status = hwicap_command_desync(drvdata);
+ if (status)
+ return status;
+
+ /* Attempt to read the IDCODE from ICAP. This
+ * may not be returned correctly, due to the design of the
+ * hardware.
+ */
+ dev_dbg(drvdata->dev, "Reading IDCODE...\n");
+ status = hwicap_get_configuration_register(
+ drvdata, drvdata->config_regs->IDCODE, &idcode);
+ dev_dbg(drvdata->dev, "IDCODE = %x\n", idcode);
+ if (status)
+ return status;
+
+ dev_dbg(drvdata->dev, "Desync...\n");
+ status = hwicap_command_desync(drvdata);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static ssize_t
+hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct hwicap_drvdata *drvdata = file->private_data;
+ ssize_t bytes_to_read = 0;
+ u32 *kbuf;
+ u32 words;
+ u32 bytes_remaining;
+ int status;
+
+ status = mutex_lock_interruptible(&drvdata->sem);
+ if (status)
+ return status;
+
+ if (drvdata->read_buffer_in_use) {
+ /* If there are leftover bytes in the buffer, just */
+ /* return them and don't try to read more from the */
+ /* ICAP device. */
+ bytes_to_read =
+ (count < drvdata->read_buffer_in_use) ? count :
+ drvdata->read_buffer_in_use;
+
+ /* Return the data currently in the read buffer. */
+ if (copy_to_user(buf, drvdata->read_buffer, bytes_to_read)) {
+ status = -EFAULT;
+ goto error;
+ }
+ drvdata->read_buffer_in_use -= bytes_to_read;
+ memmove(drvdata->read_buffer,
+ drvdata->read_buffer + bytes_to_read,
+ 4 - bytes_to_read);
+ } else {
+ /* Get new data from the ICAP, and return was was requested. */
+ kbuf = (u32 *) get_zeroed_page(GFP_KERNEL);
+ if (!kbuf) {
+ status = -ENOMEM;
+ goto error;
+ }
+
+ /* The ICAP device is only able to read complete */
+ /* words. If a number of bytes that do not correspond */
+ /* to complete words is requested, then we read enough */
+ /* words to get the required number of bytes, and then */
+ /* save the remaining bytes for the next read. */
+
+ /* Determine the number of words to read, rounding up */
+ /* if necessary. */
+ words = ((count + 3) >> 2);
+ bytes_to_read = words << 2;
+
+ if (bytes_to_read > PAGE_SIZE)
+ bytes_to_read = PAGE_SIZE;
+
+ /* Ensure we only read a complete number of words. */
+ bytes_remaining = bytes_to_read & 3;
+ bytes_to_read &= ~3;
+ words = bytes_to_read >> 2;
+
+ status = drvdata->config->get_configuration(drvdata,
+ kbuf, words);
+
+ /* If we didn't read correctly, then bail out. */
+ if (status) {
+ free_page((unsigned long)kbuf);
+ goto error;
+ }
+
+ /* If we fail to return the data to the user, then bail out. */
+ if (copy_to_user(buf, kbuf, bytes_to_read)) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ memcpy(drvdata->read_buffer,
+ kbuf,
+ bytes_remaining);
+ drvdata->read_buffer_in_use = bytes_remaining;
+ free_page((unsigned long)kbuf);
+ }
+ status = bytes_to_read;
+ error:
+ mutex_unlock(&drvdata->sem);
+ return status;
+}
+
+static ssize_t
+hwicap_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hwicap_drvdata *drvdata = file->private_data;
+ ssize_t written = 0;
+ ssize_t left = count;
+ u32 *kbuf;
+ ssize_t len;
+ ssize_t status;
+
+ status = mutex_lock_interruptible(&drvdata->sem);
+ if (status)
+ return status;
+
+ left += drvdata->write_buffer_in_use;
+
+ /* Only write multiples of 4 bytes. */
+ if (left < 4) {
+ status = 0;
+ goto error;
+ }
+
+ kbuf = (u32 *) __get_free_page(GFP_KERNEL);
+ if (!kbuf) {
+ status = -ENOMEM;
+ goto error;
+ }
+
+ while (left > 3) {
+ /* only write multiples of 4 bytes, so there might */
+ /* be as many as 3 bytes left (at the end). */
+ len = left;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ len &= ~3;
+
+ if (drvdata->write_buffer_in_use) {
+ memcpy(kbuf, drvdata->write_buffer,
+ drvdata->write_buffer_in_use);
+ if (copy_from_user(
+ (((char *)kbuf) + drvdata->write_buffer_in_use),
+ buf + written,
+ len - (drvdata->write_buffer_in_use))) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ } else {
+ if (copy_from_user(kbuf, buf + written, len)) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ }
+
+ status = drvdata->config->set_configuration(drvdata,
+ kbuf, len >> 2);
+
+ if (status) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ if (drvdata->write_buffer_in_use) {
+ len -= drvdata->write_buffer_in_use;
+ left -= drvdata->write_buffer_in_use;
+ drvdata->write_buffer_in_use = 0;
+ }
+ written += len;
+ left -= len;
+ }
+ if ((left > 0) && (left < 4)) {
+ if (!copy_from_user(drvdata->write_buffer,
+ buf + written, left)) {
+ drvdata->write_buffer_in_use = left;
+ written += left;
+ left = 0;
+ }
+ }
+
+ free_page((unsigned long)kbuf);
+ status = written;
+ error:
+ mutex_unlock(&drvdata->sem);
+ return status;
+}
+
+static int hwicap_open(struct inode *inode, struct file *file)
+{
+ struct hwicap_drvdata *drvdata;
+ int status;
+
+ mutex_lock(&hwicap_mutex);
+ drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev);
+
+ status = mutex_lock_interruptible(&drvdata->sem);
+ if (status)
+ goto out;
+
+ if (drvdata->is_open) {
+ status = -EBUSY;
+ goto error;
+ }
+
+ status = hwicap_initialize_hwicap(drvdata);
+ if (status) {
+ dev_err(drvdata->dev, "Failed to open file");
+ goto error;
+ }
+
+ file->private_data = drvdata;
+ drvdata->write_buffer_in_use = 0;
+ drvdata->read_buffer_in_use = 0;
+ drvdata->is_open = 1;
+
+ error:
+ mutex_unlock(&drvdata->sem);
+ out:
+ mutex_unlock(&hwicap_mutex);
+ return status;
+}
+
+static int hwicap_release(struct inode *inode, struct file *file)
+{
+ struct hwicap_drvdata *drvdata = file->private_data;
+ int i;
+ int status = 0;
+
+ mutex_lock(&drvdata->sem);
+
+ if (drvdata->write_buffer_in_use) {
+ /* Flush write buffer. */
+ for (i = drvdata->write_buffer_in_use; i < 4; i++)
+ drvdata->write_buffer[i] = 0;
+
+ status = drvdata->config->set_configuration(drvdata,
+ (u32 *) drvdata->write_buffer, 1);
+ if (status)
+ goto error;
+ }
+
+ status = hwicap_command_desync(drvdata);
+ if (status)
+ goto error;
+
+ error:
+ drvdata->is_open = 0;
+ mutex_unlock(&drvdata->sem);
+ return status;
+}
+
+static const struct file_operations hwicap_fops = {
+ .owner = THIS_MODULE,
+ .write = hwicap_write,
+ .read = hwicap_read,
+ .open = hwicap_open,
+ .release = hwicap_release,
+ .llseek = noop_llseek,
+};
+
+static int __devinit hwicap_setup(struct device *dev, int id,
+ const struct resource *regs_res,
+ const struct hwicap_driver_config *config,
+ const struct config_registers *config_regs)
+{
+ dev_t devt;
+ struct hwicap_drvdata *drvdata = NULL;
+ int retval = 0;
+
+ dev_info(dev, "Xilinx icap port driver\n");
+
+ mutex_lock(&icap_sem);
+
+ if (id < 0) {
+ for (id = 0; id < HWICAP_DEVICES; id++)
+ if (!probed_devices[id])
+ break;
+ }
+ if (id < 0 || id >= HWICAP_DEVICES) {
+ mutex_unlock(&icap_sem);
+ dev_err(dev, "%s%i too large\n", DRIVER_NAME, id);
+ return -EINVAL;
+ }
+ if (probed_devices[id]) {
+ mutex_unlock(&icap_sem);
+ dev_err(dev, "cannot assign to %s%i; it is already in use\n",
+ DRIVER_NAME, id);
+ return -EBUSY;
+ }
+
+ probed_devices[id] = 1;
+ mutex_unlock(&icap_sem);
+
+ devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR + id);
+
+ drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ dev_err(dev, "Couldn't allocate device private record\n");
+ retval = -ENOMEM;
+ goto failed0;
+ }
+ dev_set_drvdata(dev, (void *)drvdata);
+
+ if (!regs_res) {
+ dev_err(dev, "Couldn't get registers resource\n");
+ retval = -EFAULT;
+ goto failed1;
+ }
+
+ drvdata->mem_start = regs_res->start;
+ drvdata->mem_end = regs_res->end;
+ drvdata->mem_size = regs_res->end - regs_res->start + 1;
+
+ if (!request_mem_region(drvdata->mem_start,
+ drvdata->mem_size, DRIVER_NAME)) {
+ dev_err(dev, "Couldn't lock memory region at %Lx\n",
+ (unsigned long long) regs_res->start);
+ retval = -EBUSY;
+ goto failed1;
+ }
+
+ drvdata->devt = devt;
+ drvdata->dev = dev;
+ drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size);
+ if (!drvdata->base_address) {
+ dev_err(dev, "ioremap() failed\n");
+ goto failed2;
+ }
+
+ drvdata->config = config;
+ drvdata->config_regs = config_regs;
+
+ mutex_init(&drvdata->sem);
+ drvdata->is_open = 0;
+
+ dev_info(dev, "ioremap %llx to %p with size %llx\n",
+ (unsigned long long) drvdata->mem_start,
+ drvdata->base_address,
+ (unsigned long long) drvdata->mem_size);
+
+ cdev_init(&drvdata->cdev, &hwicap_fops);
+ drvdata->cdev.owner = THIS_MODULE;
+ retval = cdev_add(&drvdata->cdev, devt, 1);
+ if (retval) {
+ dev_err(dev, "cdev_add() failed\n");
+ goto failed3;
+ }
+
+ device_create(icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id);
+ return 0; /* success */
+
+ failed3:
+ iounmap(drvdata->base_address);
+
+ failed2:
+ release_mem_region(regs_res->start, drvdata->mem_size);
+
+ failed1:
+ kfree(drvdata);
+
+ failed0:
+ mutex_lock(&icap_sem);
+ probed_devices[id] = 0;
+ mutex_unlock(&icap_sem);
+
+ return retval;
+}
+
+static struct hwicap_driver_config buffer_icap_config = {
+ .get_configuration = buffer_icap_get_configuration,
+ .set_configuration = buffer_icap_set_configuration,
+ .get_status = buffer_icap_get_status,
+ .reset = buffer_icap_reset,
+};
+
+static struct hwicap_driver_config fifo_icap_config = {
+ .get_configuration = fifo_icap_get_configuration,
+ .set_configuration = fifo_icap_set_configuration,
+ .get_status = fifo_icap_get_status,
+ .reset = fifo_icap_reset,
+};
+
+static int __devexit hwicap_remove(struct device *dev)
+{
+ struct hwicap_drvdata *drvdata;
+
+ drvdata = (struct hwicap_drvdata *)dev_get_drvdata(dev);
+
+ if (!drvdata)
+ return 0;
+
+ device_destroy(icap_class, drvdata->devt);
+ cdev_del(&drvdata->cdev);
+ iounmap(drvdata->base_address);
+ release_mem_region(drvdata->mem_start, drvdata->mem_size);
+ kfree(drvdata);
+ dev_set_drvdata(dev, NULL);
+
+ mutex_lock(&icap_sem);
+ probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0;
+ mutex_unlock(&icap_sem);
+ return 0; /* success */
+}
+
+#ifdef CONFIG_OF
+static int __devinit hwicap_of_probe(struct platform_device *op,
+ const struct hwicap_driver_config *config)
+{
+ struct resource res;
+ const unsigned int *id;
+ const char *family;
+ int rc;
+ const struct config_registers *regs;
+
+
+ rc = of_address_to_resource(op->dev.of_node, 0, &res);
+ if (rc) {
+ dev_err(&op->dev, "invalid address\n");
+ return rc;
+ }
+
+ id = of_get_property(op->dev.of_node, "port-number", NULL);
+
+ /* It's most likely that we're using V4, if the family is not
+ specified */
+ regs = &v4_config_registers;
+ family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
+
+ if (family) {
+ if (!strcmp(family, "virtex2p")) {
+ regs = &v2_config_registers;
+ } else if (!strcmp(family, "virtex4")) {
+ regs = &v4_config_registers;
+ } else if (!strcmp(family, "virtex5")) {
+ regs = &v5_config_registers;
+ }
+ }
+ return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
+ regs);
+}
+#else
+static inline int hwicap_of_probe(struct platform_device *op,
+ const struct hwicap_driver_config *config)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
+static const struct of_device_id __devinitconst hwicap_of_match[];
+static int __devinit hwicap_drv_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct resource *res;
+ const struct config_registers *regs;
+ const char *family;
+
+ match = of_match_device(hwicap_of_match, &pdev->dev);
+ if (match)
+ return hwicap_of_probe(pdev, match->data);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ /* It's most likely that we're using V4, if the family is not
+ specified */
+ regs = &v4_config_registers;
+ family = pdev->dev.platform_data;
+
+ if (family) {
+ if (!strcmp(family, "virtex2p")) {
+ regs = &v2_config_registers;
+ } else if (!strcmp(family, "virtex4")) {
+ regs = &v4_config_registers;
+ } else if (!strcmp(family, "virtex5")) {
+ regs = &v5_config_registers;
+ }
+ }
+
+ return hwicap_setup(&pdev->dev, pdev->id, res,
+ &buffer_icap_config, regs);
+}
+
+static int __devexit hwicap_drv_remove(struct platform_device *pdev)
+{
+ return hwicap_remove(&pdev->dev);
+}
+
+#ifdef CONFIG_OF
+/* Match table for device tree binding */
+static const struct of_device_id __devinitconst hwicap_of_match[] = {
+ { .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config},
+ { .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config},
+ {},
+};
+MODULE_DEVICE_TABLE(of, hwicap_of_match);
+#else
+#define hwicap_of_match NULL
+#endif
+
+static struct platform_driver hwicap_platform_driver = {
+ .probe = hwicap_drv_probe,
+ .remove = hwicap_drv_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = hwicap_of_match,
+ },
+};
+
+static int __init hwicap_module_init(void)
+{
+ dev_t devt;
+ int retval;
+
+ icap_class = class_create(THIS_MODULE, "xilinx_config");
+ mutex_init(&icap_sem);
+
+ devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR);
+ retval = register_chrdev_region(devt,
+ HWICAP_DEVICES,
+ DRIVER_NAME);
+ if (retval < 0)
+ return retval;
+
+ retval = platform_driver_register(&hwicap_platform_driver);
+ if (retval)
+ goto failed;
+
+ return retval;
+
+ failed:
+ unregister_chrdev_region(devt, HWICAP_DEVICES);
+
+ return retval;
+}
+
+static void __exit hwicap_module_cleanup(void)
+{
+ dev_t devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR);
+
+ class_destroy(icap_class);
+
+ platform_driver_unregister(&hwicap_platform_driver);
+
+ unregister_chrdev_region(devt, HWICAP_DEVICES);
+}
+
+module_init(hwicap_module_init);
+module_exit(hwicap_module_cleanup);
+
+MODULE_AUTHOR("Xilinx, Inc; Xilinx Research Labs Group");
+MODULE_DESCRIPTION("Xilinx ICAP Port Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
new file mode 100644
index 00000000..8cca1198
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -0,0 +1,213 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * (c) Copyright 2003-2007 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_HWICAP_H_ /* prevent circular inclusions */
+#define XILINX_HWICAP_H_ /* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+
+struct hwicap_drvdata {
+ u32 write_buffer_in_use; /* Always in [0,3] */
+ u8 write_buffer[4];
+ u32 read_buffer_in_use; /* Always in [0,3] */
+ u8 read_buffer[4];
+ resource_size_t mem_start;/* phys. address of the control registers */
+ resource_size_t mem_end; /* phys. address of the control registers */
+ resource_size_t mem_size;
+ void __iomem *base_address;/* virt. address of the control registers */
+
+ struct device *dev;
+ struct cdev cdev; /* Char device structure */
+ dev_t devt;
+
+ const struct hwicap_driver_config *config;
+ const struct config_registers *config_regs;
+ void *private_data;
+ bool is_open;
+ struct mutex sem;
+};
+
+struct hwicap_driver_config {
+ /* Read configuration data given by size into the data buffer.
+ Return 0 if successful. */
+ int (*get_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size);
+ /* Write configuration data given by size from the data buffer.
+ Return 0 if successful. */
+ int (*set_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size);
+ /* Get the status register, bit pattern given by:
+ * D8 - 0 = configuration error
+ * D7 - 1 = alignment found
+ * D6 - 1 = readback in progress
+ * D5 - 0 = abort in progress
+ * D4 - Always 1
+ * D3 - Always 1
+ * D2 - Always 1
+ * D1 - Always 1
+ * D0 - 1 = operation completed
+ */
+ u32 (*get_status)(struct hwicap_drvdata *drvdata);
+ /* Reset the hw */
+ void (*reset)(struct hwicap_drvdata *drvdata);
+};
+
+/* Number of times to poll the done regsiter */
+#define XHI_MAX_RETRIES 10
+
+/************ Constant Definitions *************/
+
+#define XHI_PAD_FRAMES 0x1
+
+/* Mask for calculating configuration packet headers */
+#define XHI_WORD_COUNT_MASK_TYPE_1 0x7FFUL
+#define XHI_WORD_COUNT_MASK_TYPE_2 0x1FFFFFUL
+#define XHI_TYPE_MASK 0x7
+#define XHI_REGISTER_MASK 0xF
+#define XHI_OP_MASK 0x3
+
+#define XHI_TYPE_SHIFT 29
+#define XHI_REGISTER_SHIFT 13
+#define XHI_OP_SHIFT 27
+
+#define XHI_TYPE_1 1
+#define XHI_TYPE_2 2
+#define XHI_OP_WRITE 2
+#define XHI_OP_READ 1
+
+/* Address Block Types */
+#define XHI_FAR_CLB_BLOCK 0
+#define XHI_FAR_BRAM_BLOCK 1
+#define XHI_FAR_BRAM_INT_BLOCK 2
+
+struct config_registers {
+ u32 CRC;
+ u32 FAR;
+ u32 FDRI;
+ u32 FDRO;
+ u32 CMD;
+ u32 CTL;
+ u32 MASK;
+ u32 STAT;
+ u32 LOUT;
+ u32 COR;
+ u32 MFWR;
+ u32 FLR;
+ u32 KEY;
+ u32 CBC;
+ u32 IDCODE;
+ u32 AXSS;
+ u32 C0R_1;
+ u32 CSOB;
+ u32 WBSTAR;
+ u32 TIMER;
+ u32 BOOTSTS;
+ u32 CTL_1;
+};
+
+/* Configuration Commands */
+#define XHI_CMD_NULL 0
+#define XHI_CMD_WCFG 1
+#define XHI_CMD_MFW 2
+#define XHI_CMD_DGHIGH 3
+#define XHI_CMD_RCFG 4
+#define XHI_CMD_START 5
+#define XHI_CMD_RCAP 6
+#define XHI_CMD_RCRC 7
+#define XHI_CMD_AGHIGH 8
+#define XHI_CMD_SWITCH 9
+#define XHI_CMD_GRESTORE 10
+#define XHI_CMD_SHUTDOWN 11
+#define XHI_CMD_GCAPTURE 12
+#define XHI_CMD_DESYNCH 13
+#define XHI_CMD_IPROG 15 /* Only in Virtex5 */
+#define XHI_CMD_CRCC 16 /* Only in Virtex5 */
+#define XHI_CMD_LTIMER 17 /* Only in Virtex5 */
+
+/* Packet constants */
+#define XHI_SYNC_PACKET 0xAA995566UL
+#define XHI_DUMMY_PACKET 0xFFFFFFFFUL
+#define XHI_NOOP_PACKET (XHI_TYPE_1 << XHI_TYPE_SHIFT)
+#define XHI_TYPE_2_READ ((XHI_TYPE_2 << XHI_TYPE_SHIFT) | \
+ (XHI_OP_READ << XHI_OP_SHIFT))
+
+#define XHI_TYPE_2_WRITE ((XHI_TYPE_2 << XHI_TYPE_SHIFT) | \
+ (XHI_OP_WRITE << XHI_OP_SHIFT))
+
+#define XHI_TYPE2_CNT_MASK 0x07FFFFFF
+
+#define XHI_TYPE_1_PACKET_MAX_WORDS 2047UL
+#define XHI_TYPE_1_HEADER_BYTES 4
+#define XHI_TYPE_2_HEADER_BYTES 8
+
+/* Constant to use for CRC check when CRC has been disabled */
+#define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL
+
+/* Meanings of the bits returned by get_status */
+#define XHI_SR_CFGERR_N_MASK 0x00000100 /* Config Error Mask */
+#define XHI_SR_DALIGN_MASK 0x00000080 /* Data Alignment Mask */
+#define XHI_SR_RIP_MASK 0x00000040 /* Read back Mask */
+#define XHI_SR_IN_ABORT_N_MASK 0x00000020 /* Select Map Abort Mask */
+#define XHI_SR_DONE_MASK 0x00000001 /* Done bit Mask */
+
+/**
+ * hwicap_type_1_read - Generates a Type 1 read packet header.
+ * @reg: is the address of the register to be read back.
+ *
+ * Generates a Type 1 read packet header, which is used to indirectly
+ * read registers in the configuration logic. This packet must then
+ * be sent through the icap device, and a return packet received with
+ * the information.
+ **/
+static inline u32 hwicap_type_1_read(u32 reg)
+{
+ return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
+ (reg << XHI_REGISTER_SHIFT) |
+ (XHI_OP_READ << XHI_OP_SHIFT);
+}
+
+/**
+ * hwicap_type_1_write - Generates a Type 1 write packet header
+ * @reg: is the address of the register to be read back.
+ **/
+static inline u32 hwicap_type_1_write(u32 reg)
+{
+ return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
+ (reg << XHI_REGISTER_SHIFT) |
+ (XHI_OP_WRITE << XHI_OP_SHIFT);
+}
+
+#endif