aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/tty
diff options
context:
space:
mode:
authorroot <root@artemis.panaceas.org>2015-12-25 04:40:36 +0000
committerroot <root@artemis.panaceas.org>2015-12-25 04:40:36 +0000
commit849369d6c66d3054688672f97d31fceb8e8230fb (patch)
tree6135abc790ca67dedbe07c39806591e70eda81ce /drivers/tty
downloadlinux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.tar.gz
linux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.tar.bz2
linux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.zip
initial_commit
Diffstat (limited to 'drivers/tty')
-rw-r--r--drivers/tty/Kconfig352
-rw-r--r--drivers/tty/Makefile30
-rw-r--r--drivers/tty/amiserial.c2178
-rw-r--r--drivers/tty/bfin_jtag_comm.c368
-rw-r--r--drivers/tty/cyclades.c4196
-rw-r--r--drivers/tty/hvc/Kconfig105
-rw-r--r--drivers/tty/hvc/Makefile13
-rw-r--r--drivers/tty/hvc/hvc_beat.c134
-rw-r--r--drivers/tty/hvc/hvc_bfin_jtag.c105
-rw-r--r--drivers/tty/hvc/hvc_console.c916
-rw-r--r--drivers/tty/hvc/hvc_console.h119
-rw-r--r--drivers/tty/hvc/hvc_dcc.c106
-rw-r--r--drivers/tty/hvc/hvc_irq.c49
-rw-r--r--drivers/tty/hvc/hvc_iseries.c598
-rw-r--r--drivers/tty/hvc/hvc_iucv.c1337
-rw-r--r--drivers/tty/hvc/hvc_rtas.c133
-rw-r--r--drivers/tty/hvc/hvc_tile.c68
-rw-r--r--drivers/tty/hvc/hvc_udbg.c96
-rw-r--r--drivers/tty/hvc/hvc_vio.c173
-rw-r--r--drivers/tty/hvc/hvc_xen.c273
-rw-r--r--drivers/tty/hvc/hvcs.c1616
-rw-r--r--drivers/tty/hvc/hvsi.c1314
-rw-r--r--drivers/tty/ipwireless/Makefile8
-rw-r--r--drivers/tty/ipwireless/hardware.c1764
-rw-r--r--drivers/tty/ipwireless/hardware.h62
-rw-r--r--drivers/tty/ipwireless/main.c347
-rw-r--r--drivers/tty/ipwireless/main.h68
-rw-r--r--drivers/tty/ipwireless/network.c508
-rw-r--r--drivers/tty/ipwireless/network.h53
-rw-r--r--drivers/tty/ipwireless/setup_protocol.h108
-rw-r--r--drivers/tty/ipwireless/tty.c679
-rw-r--r--drivers/tty/ipwireless/tty.h45
-rw-r--r--drivers/tty/isicom.c1736
-rw-r--r--drivers/tty/moxa.c2084
-rw-r--r--drivers/tty/moxa.h304
-rw-r--r--drivers/tty/mxser.c2757
-rw-r--r--drivers/tty/mxser.h150
-rw-r--r--drivers/tty/n_gsm.c2806
-rw-r--r--drivers/tty/n_hdlc.c1006
-rw-r--r--drivers/tty/n_r3964.c1263
-rw-r--r--drivers/tty/n_tracerouter.c243
-rw-r--r--drivers/tty/n_tracesink.c238
-rw-r--r--drivers/tty/n_tracesink.h36
-rw-r--r--drivers/tty/n_tty.c2128
-rw-r--r--drivers/tty/nozomi.c1971
-rw-r--r--drivers/tty/pty.c796
-rw-r--r--drivers/tty/rocket.c3152
-rw-r--r--drivers/tty/rocket.h111
-rw-r--r--drivers/tty/rocket_int.h1214
-rw-r--r--drivers/tty/serial/21285.c511
-rw-r--r--drivers/tty/serial/68328serial.c1448
-rw-r--r--drivers/tty/serial/68328serial.h187
-rw-r--r--drivers/tty/serial/68360serial.c2979
-rw-r--r--drivers/tty/serial/8250.c3426
-rw-r--r--drivers/tty/serial/8250.h79
-rw-r--r--drivers/tty/serial/8250_accent.c45
-rw-r--r--drivers/tty/serial/8250_acorn.c141
-rw-r--r--drivers/tty/serial/8250_boca.c59
-rw-r--r--drivers/tty/serial/8250_early.c287
-rw-r--r--drivers/tty/serial/8250_exar_st16c554.c50
-rw-r--r--drivers/tty/serial/8250_fourport.c51
-rw-r--r--drivers/tty/serial/8250_gsc.c122
-rw-r--r--drivers/tty/serial/8250_hp300.c327
-rw-r--r--drivers/tty/serial/8250_hub6.c56
-rw-r--r--drivers/tty/serial/8250_mca.c61
-rw-r--r--drivers/tty/serial/8250_pci.c3970
-rw-r--r--drivers/tty/serial/8250_pnp.c524
-rw-r--r--drivers/tty/serial/Kconfig1638
-rw-r--r--drivers/tty/serial/Makefile99
-rw-r--r--drivers/tty/serial/altera_jtaguart.c516
-rw-r--r--drivers/tty/serial/altera_uart.c661
-rw-r--r--drivers/tty/serial/amba-pl010.c823
-rw-r--r--drivers/tty/serial/amba-pl011.c2048
-rw-r--r--drivers/tty/serial/apbuart.c696
-rw-r--r--drivers/tty/serial/apbuart.h64
-rw-r--r--drivers/tty/serial/atmel_serial.c1829
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c901
-rw-r--r--drivers/tty/serial/bfin_5xx.c1597
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c934
-rw-r--r--drivers/tty/serial/bfin_sport_uart.h86
-rw-r--r--drivers/tty/serial/clps711x.c579
-rw-r--r--drivers/tty/serial/cpm_uart/Makefile11
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart.h145
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c1440
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c136
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h32
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c172
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h32
-rw-r--r--drivers/tty/serial/crisv10.c4572
-rw-r--r--drivers/tty/serial/crisv10.h147
-rw-r--r--drivers/tty/serial/dz.c955
-rw-r--r--drivers/tty/serial/dz.h129
-rw-r--r--drivers/tty/serial/icom.c1658
-rw-r--r--drivers/tty/serial/icom.h287
-rw-r--r--drivers/tty/serial/ifx6x60.c1415
-rw-r--r--drivers/tty/serial/ifx6x60.h129
-rw-r--r--drivers/tty/serial/imx.c1909
-rw-r--r--drivers/tty/serial/ioc3_serial.c2199
-rw-r--r--drivers/tty/serial/ioc4_serial.c2953
-rw-r--r--drivers/tty/serial/ip22zilog.c1221
-rw-r--r--drivers/tty/serial/ip22zilog.h281
-rw-r--r--drivers/tty/serial/jsm/Makefile8
-rw-r--r--drivers/tty/serial/jsm/jsm.h381
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c297
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c1413
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c842
-rw-r--r--drivers/tty/serial/kgdboc.c328
-rw-r--r--drivers/tty/serial/lantiq.c756
-rw-r--r--drivers/tty/serial/m32r_sio.c1193
-rw-r--r--drivers/tty/serial/m32r_sio.h48
-rw-r--r--drivers/tty/serial/m32r_sio_reg.h152
-rw-r--r--drivers/tty/serial/max3100.c926
-rw-r--r--drivers/tty/serial/max3107-aava.c344
-rw-r--r--drivers/tty/serial/max3107.c1213
-rw-r--r--drivers/tty/serial/max3107.h441
-rw-r--r--drivers/tty/serial/mcf.c662
-rw-r--r--drivers/tty/serial/mfd.c1470
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c1524
-rw-r--r--drivers/tty/serial/mpsc.c2159
-rw-r--r--drivers/tty/serial/mrst_max3110.c920
-rw-r--r--drivers/tty/serial/mrst_max3110.h60
-rw-r--r--drivers/tty/serial/msm_serial.c966
-rw-r--r--drivers/tty/serial/msm_serial.h187
-rw-r--r--drivers/tty/serial/msm_serial_hs.c1880
-rw-r--r--drivers/tty/serial/msm_smd_tty.c235
-rw-r--r--drivers/tty/serial/mux.c633
-rw-r--r--drivers/tty/serial/mxc_uart_early.c191
-rw-r--r--drivers/tty/serial/mxs-auart.c800
-rw-r--r--drivers/tty/serial/netx-serial.c748
-rw-r--r--drivers/tty/serial/nwpserial.c477
-rw-r--r--drivers/tty/serial/of_serial.c206
-rw-r--r--drivers/tty/serial/omap-serial.c1361
-rw-r--r--drivers/tty/serial/pch_uart.c1624
-rw-r--r--drivers/tty/serial/pmac_zilog.c2206
-rw-r--r--drivers/tty/serial/pmac_zilog.h396
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c854
-rw-r--r--drivers/tty/serial/pxa.c897
-rw-r--r--drivers/tty/serial/s3c2400.c105
-rw-r--r--drivers/tty/serial/s3c2410.c117
-rw-r--r--drivers/tty/serial/s3c2412.c151
-rw-r--r--drivers/tty/serial/s3c2440.c180
-rw-r--r--drivers/tty/serial/s3c24a0.c117
-rw-r--r--drivers/tty/serial/s3c6400.c151
-rw-r--r--drivers/tty/serial/s5pv210.c161
-rw-r--r--drivers/tty/serial/sa1100.c916
-rw-r--r--drivers/tty/serial/samsung.c1486
-rw-r--r--drivers/tty/serial/samsung.h119
-rw-r--r--drivers/tty/serial/sb1250-duart.c974
-rw-r--r--drivers/tty/serial/sc26xx.c757
-rw-r--r--drivers/tty/serial/serial_core.c2510
-rw-r--r--drivers/tty/serial/serial_cs.c870
-rw-r--r--drivers/tty/serial/serial_ks8695.c703
-rw-r--r--drivers/tty/serial/serial_txx9.c1342
-rw-r--r--drivers/tty/serial/sh-sci.c2088
-rw-r--r--drivers/tty/serial/sh-sci.h468
-rw-r--r--drivers/tty/serial/sn_console.c1085
-rw-r--r--drivers/tty/serial/suncore.c247
-rw-r--r--drivers/tty/serial/suncore.h33
-rw-r--r--drivers/tty/serial/sunhv.c661
-rw-r--r--drivers/tty/serial/sunsab.c1152
-rw-r--r--drivers/tty/serial/sunsab.h322
-rw-r--r--drivers/tty/serial/sunsu.c1608
-rw-r--r--drivers/tty/serial/sunzilog.c1655
-rw-r--r--drivers/tty/serial/sunzilog.h289
-rw-r--r--drivers/tty/serial/timbuart.c531
-rw-r--r--drivers/tty/serial/timbuart.h58
-rw-r--r--drivers/tty/serial/uartlite.c654
-rw-r--r--drivers/tty/serial/ucc_uart.c1539
-rw-r--r--drivers/tty/serial/vr41xx_siu.c978
-rw-r--r--drivers/tty/serial/vt8500_serial.c646
-rw-r--r--drivers/tty/serial/xilinx_uartps.c1113
-rw-r--r--drivers/tty/serial/zs.c1304
-rw-r--r--drivers/tty/serial/zs.h284
-rw-r--r--drivers/tty/synclink.c8117
-rw-r--r--drivers/tty/synclink_gt.c5161
-rw-r--r--drivers/tty/synclinkmp.c5600
-rw-r--r--drivers/tty/sysrq.c905
-rw-r--r--drivers/tty/tty_audit.c360
-rw-r--r--drivers/tty/tty_buffer.c522
-rw-r--r--drivers/tty/tty_io.c3357
-rw-r--r--drivers/tty/tty_ioctl.c1181
-rw-r--r--drivers/tty/tty_ldisc.c994
-rw-r--r--drivers/tty/tty_mutex.c44
-rw-r--r--drivers/tty/tty_port.c448
-rw-r--r--drivers/tty/vt/.gitignore2
-rw-r--r--drivers/tty/vt/Makefile34
-rw-r--r--drivers/tty/vt/consolemap.c780
-rw-r--r--drivers/tty/vt/cp437.uni291
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped262
-rw-r--r--drivers/tty/vt/defkeymap.map357
-rw-r--r--drivers/tty/vt/keyboard.c1453
-rw-r--r--drivers/tty/vt/selection.c345
-rw-r--r--drivers/tty/vt/vc_screen.c665
-rw-r--r--drivers/tty/vt/vt.c4226
-rw-r--r--drivers/tty/vt/vt_ioctl.c1799
195 files changed, 179608 insertions, 0 deletions
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
new file mode 100644
index 00000000..bd7cc052
--- /dev/null
+++ b/drivers/tty/Kconfig
@@ -0,0 +1,352 @@
+config VT
+ bool "Virtual terminal" if EXPERT
+ depends on !S390
+ select INPUT
+ default y
+ ---help---
+ If you say Y here, you will get support for terminal devices with
+ display and keyboard devices. These are called "virtual" because you
+ can run several virtual terminals (also called virtual consoles) on
+ one physical terminal. This is rather useful, for example one
+ virtual terminal can collect system messages and warnings, another
+ one can be used for a text-mode user session, and a third could run
+ an X session, all in parallel. Switching between virtual terminals
+ is done with certain key combinations, usually Alt-<function key>.
+
+ The setterm command ("man setterm") can be used to change the
+ properties (such as colors or beeping) of a virtual terminal. The
+ man page console_codes(4) ("man console_codes") contains the special
+ character sequences that can be used to change those properties
+ directly. The fonts used on virtual terminals can be changed with
+ the setfont ("man setfont") command and the key bindings are defined
+ with the loadkeys ("man loadkeys") command.
+
+ You need at least one virtual terminal device in order to make use
+ of your keyboard and monitor. Therefore, only people configuring an
+ embedded system would want to say N here in order to save some
+ memory; the only way to log into such a system is then via a serial
+ or network connection.
+
+ If unsure, say Y, or else you won't be able to do much with your new
+ shiny Linux system :-)
+
+config CONSOLE_TRANSLATIONS
+ depends on VT
+ default y
+ bool "Enable character translations in console" if EXPERT
+ ---help---
+ This enables support for font mapping and Unicode translation
+ on virtual consoles.
+
+config VT_CONSOLE
+ bool "Support for console on virtual terminal" if EXPERT
+ depends on VT
+ default y
+ ---help---
+ The system console is the device which receives all kernel messages
+ and warnings and which allows logins in single user mode. If you
+ answer Y here, a virtual terminal (the device used to interact with
+ a physical terminal) can be used as system console. This is the most
+ common mode of operations, so you should say Y here unless you want
+ the kernel messages be output only to a serial port (in which case
+ you should say Y to "Console on serial port", below).
+
+ If you do say Y here, by default the currently visible virtual
+ terminal (/dev/tty0) will be used as system console. You can change
+ that with a kernel command line option such as "console=tty3" which
+ would use the third virtual terminal as system console. (Try "man
+ bootparam" or see the documentation of your boot loader (lilo or
+ loadlin) about how to pass options to the kernel at boot time.)
+
+ If unsure, say Y.
+
+config HW_CONSOLE
+ bool
+ depends on VT && !S390 && !UML
+ default y
+
+config VT_HW_CONSOLE_BINDING
+ bool "Support for binding and unbinding console drivers"
+ depends on HW_CONSOLE
+ default n
+ ---help---
+ The virtual terminal is the device that interacts with the physical
+ terminal through console drivers. On these systems, at least one
+ console driver is loaded. In other configurations, additional console
+ drivers may be enabled, such as the framebuffer console. If more than
+ 1 console driver is enabled, setting this to 'y' will allow you to
+ select the console driver that will serve as the backend for the
+ virtual terminals.
+
+ See <file:Documentation/console/console.txt> for more
+ information. For framebuffer console users, please refer to
+ <file:Documentation/fb/fbcon.txt>.
+
+config UNIX98_PTYS
+ bool "Unix98 PTY support" if EXPERT
+ default y
+ ---help---
+ A pseudo terminal (PTY) is a software device consisting of two
+ halves: a master and a slave. The slave device behaves identical to
+ a physical terminal; the master device is used by a process to
+ read data from and write data to the slave, thereby emulating a
+ terminal. Typical programs for the master side are telnet servers
+ and xterms.
+
+ Linux has traditionally used the BSD-like names /dev/ptyxx for
+ masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
+ has a number of problems. The GNU C library glibc 2.1 and later,
+ however, supports the Unix98 naming standard: in order to acquire a
+ pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
+ terminal is then made available to the process and the pseudo
+ terminal slave can be accessed as /dev/pts/<number>. What was
+ traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
+
+ All modern Linux systems use the Unix98 ptys. Say Y unless
+ you're on an embedded system and want to conserve memory.
+
+config DEVPTS_MULTIPLE_INSTANCES
+ bool "Support multiple instances of devpts"
+ depends on UNIX98_PTYS
+ default n
+ ---help---
+ Enable support for multiple instances of devpts filesystem.
+ If you want to have isolated PTY namespaces (eg: in containers),
+ say Y here. Otherwise, say N. If enabled, each mount of devpts
+ filesystem with the '-o newinstance' option will create an
+ independent PTY namespace.
+
+config LEGACY_PTYS
+ bool "Legacy (BSD) PTY support"
+ default y
+ ---help---
+ A pseudo terminal (PTY) is a software device consisting of two
+ halves: a master and a slave. The slave device behaves identical to
+ a physical terminal; the master device is used by a process to
+ read data from and write data to the slave, thereby emulating a
+ terminal. Typical programs for the master side are telnet servers
+ and xterms.
+
+ Linux has traditionally used the BSD-like names /dev/ptyxx
+ for masters and /dev/ttyxx for slaves of pseudo
+ terminals. This scheme has a number of problems, including
+ security. This option enables these legacy devices; on most
+ systems, it is safe to say N.
+
+
+config LEGACY_PTY_COUNT
+ int "Maximum number of legacy PTY in use"
+ depends on LEGACY_PTYS
+ range 0 256
+ default "256"
+ ---help---
+ The maximum number of legacy PTYs that can be used at any one time.
+ The default is 256, and should be more than enough. Embedded
+ systems may want to reduce this to save memory.
+
+ When not in use, each legacy PTY occupies 12 bytes on 32-bit
+ architectures and 24 bytes on 64-bit architectures.
+
+config BFIN_JTAG_COMM
+ tristate "Blackfin JTAG Communication"
+ depends on BLACKFIN
+ help
+ Add support for emulating a TTY device over the Blackfin JTAG.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bfin_jtag_comm.
+
+config BFIN_JTAG_COMM_CONSOLE
+ bool "Console on Blackfin JTAG"
+ depends on BFIN_JTAG_COMM=y
+
+config SERIAL_NONSTANDARD
+ bool "Non-standard serial port support"
+ depends on HAS_IOMEM
+ ---help---
+ Say Y here if you have any non-standard serial boards -- boards
+ which aren't supported using the standard "dumb" serial driver.
+ This includes intelligent serial boards such as Cyclades,
+ Digiboards, etc. These are usually used for systems that need many
+ serial ports because they serve many terminals or dial-in
+ connections.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about non-standard serial boards.
+
+ Most people can say N here.
+
+config ROCKETPORT
+ tristate "Comtrol RocketPort support"
+ depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
+ help
+ This driver supports Comtrol RocketPort and RocketModem PCI boards.
+ These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
+ modems. For information about the RocketPort/RocketModem boards
+ and this driver read <file:Documentation/serial/rocket.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rocket.
+
+ If you want to compile this driver into the kernel, say Y here. If
+ you don't have a Comtrol RocketPort/RocketModem card installed, say N.
+
+config CYCLADES
+ tristate "Cyclades async mux support"
+ depends on SERIAL_NONSTANDARD && (PCI || ISA)
+ select FW_LOADER
+ ---help---
+ This driver supports Cyclades Z and Y multiserial boards.
+ You would need something like this to connect more than two modems to
+ your Linux box, for instance in order to become a dial-in server.
+
+ For information about the Cyclades-Z card, read
+ <file:Documentation/serial/README.cycladesZ>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cyclades.
+
+ If you haven't heard about it, it's safe to say N.
+
+config CYZ_INTR
+ bool "Cyclades-Z interrupt mode operation (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && CYCLADES
+ help
+ The Cyclades-Z family of multiport cards allows 2 (two) driver op
+ modes: polling and interrupt. In polling mode, the driver will check
+ the status of the Cyclades-Z ports every certain amount of time
+ (which is called polling cycle and is configurable). In interrupt
+ mode, it will use an interrupt line (IRQ) in order to check the
+ status of the Cyclades-Z ports. The default op mode is polling. If
+ unsure, say N.
+
+config MOXA_INTELLIO
+ tristate "Moxa Intellio support"
+ depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
+ select FW_LOADER
+ help
+ Say Y here if you have a Moxa Intellio multiport serial card.
+
+ To compile this driver as a module, choose M here: the
+ module will be called moxa.
+
+config MOXA_SMARTIO
+ tristate "Moxa SmartIO support v. 2.0"
+ depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA)
+ help
+ Say Y here if you have a Moxa SmartIO multiport serial card and/or
+ want to help develop a new version of this driver.
+
+ This is upgraded (1.9.1) driver from original Moxa drivers with
+ changes finally resulting in PCI probing.
+
+ This driver can also be built as a module. The module will be called
+ mxser. If you want to do that, say M here.
+
+config SYNCLINK
+ tristate "Microgate SyncLink card support"
+ depends on SERIAL_NONSTANDARD && PCI && ISA_DMA_API
+ help
+ Provides support for the SyncLink ISA and PCI multiprotocol serial
+ adapters. These adapters support asynchronous and HDLC bit
+ synchronous communication up to 10Mbps (PCI adapter).
+
+ This driver can only be built as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called synclink. If you want to do that, say M
+ here.
+
+config SYNCLINKMP
+ tristate "SyncLink Multiport support"
+ depends on SERIAL_NONSTANDARD && PCI
+ help
+ Enable support for the SyncLink Multiport (2 or 4 ports)
+ serial adapter, running asynchronous and HDLC communications up
+ to 2.048Mbps. Each ports is independently selectable for
+ RS-232, V.35, RS-449, RS-530, and X.21
+
+ This driver may be built as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called synclinkmp. If you want to do that, say M
+ here.
+
+config SYNCLINK_GT
+ tristate "SyncLink GT/AC support"
+ depends on SERIAL_NONSTANDARD && PCI
+ help
+ Support for SyncLink GT and SyncLink AC families of
+ synchronous and asynchronous serial adapters
+ manufactured by Microgate Systems, Ltd. (www.microgate.com)
+
+config NOZOMI
+ tristate "HSDPA Broadband Wireless Data Card - Globe Trotter"
+ depends on PCI && EXPERIMENTAL
+ help
+ If you have a HSDPA driver Broadband Wireless Data Card -
+ Globe Trotter PCMCIA card, say Y here.
+
+ To compile this driver as a module, choose M here, the module
+ will be called nozomi.
+
+config ISI
+ tristate "Multi-Tech multiport card support (EXPERIMENTAL)"
+ depends on SERIAL_NONSTANDARD && PCI
+ select FW_LOADER
+ help
+ This is a driver for the Multi-Tech cards which provide several
+ serial ports. The driver is experimental and can currently only be
+ built as a module. The module will be called isicom.
+ If you want to do that, choose M here.
+
+config N_HDLC
+ tristate "HDLC line discipline support"
+ depends on SERIAL_NONSTANDARD
+ help
+ Allows synchronous HDLC communications with tty device drivers that
+ support synchronous HDLC such as the Microgate SyncLink adapter.
+
+ This driver can be built as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called n_hdlc. If you want to do that, say M
+ here.
+
+config N_GSM
+ tristate "GSM MUX line discipline support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on NET
+ help
+ This line discipline provides support for the GSM MUX protocol and
+ presents the mux as a set of 61 individual tty devices.
+
+config TRACE_ROUTER
+ tristate "Trace data router for MIPI P1149.7 cJTAG standard"
+ depends on TRACE_SINK
+ default n
+ help
+ The trace router uses the Linux tty line discipline framework to
+ route trace data coming from a tty port (say UART for example) to
+ the trace sink line discipline driver and to another tty port (say
+ USB). This is part of a solution for the MIPI P1149.7, compact JTAG,
+ standard, which is for debugging mobile devices. The PTI driver in
+ drivers/misc/pti.c defines the majority of this MIPI solution.
+
+ You should select this driver if the target kernel is meant for
+ a mobile device containing a modem. Then you will need to select
+ "Trace data sink for MIPI P1149.7 cJTAG standard" line discipline
+ driver.
+
+config TRACE_SINK
+ tristate "Trace data sink for MIPI P1149.7 cJTAG standard"
+ default n
+ help
+ The trace sink uses the Linux line discipline framework to receive
+ trace data coming from the trace router line discipline driver
+ to a user-defined tty port target, like USB.
+ This is to provide a way to extract modem trace data on
+ devices that do not have a PTI HW module, or just need modem
+ trace data to come out of a different HW output port.
+ This is part of a solution for the P1149.7, compact JTAG, standard.
+
+ If you select this option, you need to select
+ "Trace data router for MIPI P1149.7 cJTAG standard".
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
new file mode 100644
index 00000000..ea89b0bd
--- /dev/null
+++ b/drivers/tty/Makefile
@@ -0,0 +1,30 @@
+obj-y += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
+ tty_buffer.o tty_port.o tty_mutex.o
+obj-$(CONFIG_LEGACY_PTYS) += pty.o
+obj-$(CONFIG_UNIX98_PTYS) += pty.o
+obj-$(CONFIG_AUDIT) += tty_audit.o
+obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
+obj-$(CONFIG_N_HDLC) += n_hdlc.o
+obj-$(CONFIG_N_GSM) += n_gsm.o
+obj-$(CONFIG_TRACE_ROUTER) += n_tracerouter.o
+obj-$(CONFIG_TRACE_SINK) += n_tracesink.o
+obj-$(CONFIG_R3964) += n_r3964.o
+
+obj-y += vt/
+obj-$(CONFIG_HVC_DRIVER) += hvc/
+obj-y += serial/
+
+# tty drivers
+obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
+obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o
+obj-$(CONFIG_CYCLADES) += cyclades.o
+obj-$(CONFIG_ISI) += isicom.o
+obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
+obj-$(CONFIG_MOXA_SMARTIO) += mxser.o
+obj-$(CONFIG_NOZOMI) += nozomi.o
+obj-$(CONFIG_ROCKETPORT) += rocket.o
+obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
+obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
+obj-$(CONFIG_SYNCLINK) += synclink.o
+
+obj-y += ipwireless/
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
new file mode 100644
index 00000000..34111486
--- /dev/null
+++ b/drivers/tty/amiserial.c
@@ -0,0 +1,2178 @@
+/*
+ * Serial driver for the amiga builtin port.
+ *
+ * This code was created by taking serial.c version 4.30 from kernel
+ * release 2.3.22, replacing all hardware related stuff with the
+ * corresponding amiga hardware actions, and removing all irrelevant
+ * code. As a consequence, it uses many of the constants and names
+ * associated with the registers and bits of 16550 compatible UARTS -
+ * but only to keep track of status, etc in the state variables. It
+ * was done this was to make it easier to keep the code in line with
+ * (non hardware specific) changes to serial.c.
+ *
+ * The port is registered with the tty driver as minor device 64, and
+ * therefore other ports should should only use 65 upwards.
+ *
+ * Richard Lucock 28/12/99
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997,
+ * 1998, 1999 Theodore Ts'o
+ *
+ */
+
+/*
+ * Serial driver configuration section. Here are the various options:
+ *
+ * SERIAL_PARANOIA_CHECK
+ * Check the magic number for the async_structure where
+ * ever possible.
+ */
+
+#include <linux/delay.h>
+
+#undef SERIAL_PARANOIA_CHECK
+#define SERIAL_DO_RESTART
+
+/* Set of debugging defines */
+
+#undef SERIAL_DEBUG_INTR
+#undef SERIAL_DEBUG_OPEN
+#undef SERIAL_DEBUG_FLOW
+#undef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+
+/* Sanity checks */
+
+#if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT)
+#define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \
+ tty->name, (info->flags), serial_driver->refcount,info->count,tty->count,s)
+#else
+#define DBG_CNT(s)
+#endif
+
+/*
+ * End of serial driver configuration section.
+ */
+
+#include <linux/module.h>
+
+#include <linux/types.h>
+#include <linux/serial.h>
+#include <linux/serialP.h>
+#include <linux/serial_reg.h>
+static char *serial_version = "4.30";
+
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/console.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+
+#include <asm/setup.h>
+
+#include <asm/system.h>
+
+#include <asm/irq.h>
+
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+
+#define custom amiga_custom
+static char *serial_name = "Amiga-builtin serial driver";
+
+static struct tty_driver *serial_driver;
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 256
+
+static struct async_struct *IRQ_ports;
+
+static unsigned char current_ctl_bits;
+
+static void change_speed(struct async_struct *info, struct ktermios *old);
+static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
+
+
+static struct serial_state rs_table[1];
+
+#define NR_PORTS ARRAY_SIZE(rs_table)
+
+#include <asm/uaccess.h>
+
+#define serial_isroot() (capable(CAP_SYS_ADMIN))
+
+
+static inline int serial_paranoia_check(struct async_struct *info,
+ char *name, const char *routine)
+{
+#ifdef SERIAL_PARANOIA_CHECK
+ static const char *badmagic =
+ "Warning: bad magic number for serial struct (%s) in %s\n";
+ static const char *badinfo =
+ "Warning: null async_struct for (%s) in %s\n";
+
+ if (!info) {
+ printk(badinfo, name, routine);
+ return 1;
+ }
+ if (info->magic != SERIAL_MAGIC) {
+ printk(badmagic, name, routine);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/* some serial hardware definitions */
+#define SDR_OVRUN (1<<15)
+#define SDR_RBF (1<<14)
+#define SDR_TBE (1<<13)
+#define SDR_TSRE (1<<12)
+
+#define SERPER_PARENB (1<<15)
+
+#define AC_SETCLR (1<<15)
+#define AC_UARTBRK (1<<11)
+
+#define SER_DTR (1<<7)
+#define SER_RTS (1<<6)
+#define SER_DCD (1<<5)
+#define SER_CTS (1<<4)
+#define SER_DSR (1<<3)
+
+static __inline__ void rtsdtr_ctrl(int bits)
+{
+ ciab.pra = ((bits & (SER_RTS | SER_DTR)) ^ (SER_RTS | SER_DTR)) | (ciab.pra & ~(SER_RTS | SER_DTR));
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_stop() and rs_start()
+ *
+ * This routines are called before setting or resetting tty->stopped.
+ * They enable or disable transmitter interrupts, as necessary.
+ * ------------------------------------------------------------
+ */
+static void rs_stop(struct tty_struct *tty)
+{
+ struct async_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_stop"))
+ return;
+
+ local_irq_save(flags);
+ if (info->IER & UART_IER_THRI) {
+ info->IER &= ~UART_IER_THRI;
+ /* disable Tx interrupt and remove any pending interrupts */
+ custom.intena = IF_TBE;
+ mb();
+ custom.intreq = IF_TBE;
+ mb();
+ }
+ local_irq_restore(flags);
+}
+
+static void rs_start(struct tty_struct *tty)
+{
+ struct async_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_start"))
+ return;
+
+ local_irq_save(flags);
+ if (info->xmit.head != info->xmit.tail
+ && info->xmit.buf
+ && !(info->IER & UART_IER_THRI)) {
+ info->IER |= UART_IER_THRI;
+ custom.intena = IF_SETCLR | IF_TBE;
+ mb();
+ /* set a pending Tx Interrupt, transmitter should restart now */
+ custom.intreq = IF_SETCLR | IF_TBE;
+ mb();
+ }
+ local_irq_restore(flags);
+}
+
+/*
+ * ----------------------------------------------------------------------
+ *
+ * Here starts the interrupt handling routines. All of the following
+ * subroutines are declared as inline and are folded into
+ * rs_interrupt(). They were separated out for readability's sake.
+ *
+ * Note: rs_interrupt() is a "fast" interrupt, which means that it
+ * runs with interrupts turned off. People who may want to modify
+ * rs_interrupt() should try to keep the interrupt handler as fast as
+ * possible. After you are done making modifications, it is not a bad
+ * idea to do:
+ *
+ * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c
+ *
+ * and look at the resulting assemble code in serial.s.
+ *
+ * - Ted Ts'o (tytso@mit.edu), 7-Mar-93
+ * -----------------------------------------------------------------------
+ */
+
+/*
+ * This routine is used by the interrupt handler to schedule
+ * processing in the software interrupt portion of the driver.
+ */
+static void rs_sched_event(struct async_struct *info,
+ int event)
+{
+ info->event |= 1 << event;
+ tasklet_schedule(&info->tlet);
+}
+
+static void receive_chars(struct async_struct *info)
+{
+ int status;
+ int serdatr;
+ struct tty_struct *tty = info->tty;
+ unsigned char ch, flag;
+ struct async_icount *icount;
+ int oe = 0;
+
+ icount = &info->state->icount;
+
+ status = UART_LSR_DR; /* We obviously have a character! */
+ serdatr = custom.serdatr;
+ mb();
+ custom.intreq = IF_RBF;
+ mb();
+
+ if((serdatr & 0x1ff) == 0)
+ status |= UART_LSR_BI;
+ if(serdatr & SDR_OVRUN)
+ status |= UART_LSR_OE;
+
+ ch = serdatr & 0xff;
+ icount->rx++;
+
+#ifdef SERIAL_DEBUG_INTR
+ printk("DR%02x:%02x...", ch, status);
+#endif
+ flag = TTY_NORMAL;
+
+ /*
+ * We don't handle parity or frame errors - but I have left
+ * the code in, since I'm not sure that the errors can't be
+ * detected.
+ */
+
+ if (status & (UART_LSR_BI | UART_LSR_PE |
+ UART_LSR_FE | UART_LSR_OE)) {
+ /*
+ * For statistics only
+ */
+ if (status & UART_LSR_BI) {
+ status &= ~(UART_LSR_FE | UART_LSR_PE);
+ icount->brk++;
+ } else if (status & UART_LSR_PE)
+ icount->parity++;
+ else if (status & UART_LSR_FE)
+ icount->frame++;
+ if (status & UART_LSR_OE)
+ icount->overrun++;
+
+ /*
+ * Now check to see if character should be
+ * ignored, and mask off conditions which
+ * should be ignored.
+ */
+ if (status & info->ignore_status_mask)
+ goto out;
+
+ status &= info->read_status_mask;
+
+ if (status & (UART_LSR_BI)) {
+#ifdef SERIAL_DEBUG_INTR
+ printk("handling break....");
+#endif
+ flag = TTY_BREAK;
+ if (info->flags & ASYNC_SAK)
+ do_SAK(tty);
+ } else if (status & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (status & UART_LSR_FE)
+ flag = TTY_FRAME;
+ if (status & UART_LSR_OE) {
+ /*
+ * Overrun is special, since it's
+ * reported immediately, and doesn't
+ * affect the current character
+ */
+ oe = 1;
+ }
+ }
+ tty_insert_flip_char(tty, ch, flag);
+ if (oe == 1)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_flip_buffer_push(tty);
+out:
+ return;
+}
+
+static void transmit_chars(struct async_struct *info)
+{
+ custom.intreq = IF_TBE;
+ mb();
+ if (info->x_char) {
+ custom.serdat = info->x_char | 0x100;
+ mb();
+ info->state->icount.tx++;
+ info->x_char = 0;
+ return;
+ }
+ if (info->xmit.head == info->xmit.tail
+ || info->tty->stopped
+ || info->tty->hw_stopped) {
+ info->IER &= ~UART_IER_THRI;
+ custom.intena = IF_TBE;
+ mb();
+ return;
+ }
+
+ custom.serdat = info->xmit.buf[info->xmit.tail++] | 0x100;
+ mb();
+ info->xmit.tail = info->xmit.tail & (SERIAL_XMIT_SIZE-1);
+ info->state->icount.tx++;
+
+ if (CIRC_CNT(info->xmit.head,
+ info->xmit.tail,
+ SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
+ rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
+
+#ifdef SERIAL_DEBUG_INTR
+ printk("THRE...");
+#endif
+ if (info->xmit.head == info->xmit.tail) {
+ custom.intena = IF_TBE;
+ mb();
+ info->IER &= ~UART_IER_THRI;
+ }
+}
+
+static void check_modem_status(struct async_struct *info)
+{
+ unsigned char status = ciab.pra & (SER_DCD | SER_CTS | SER_DSR);
+ unsigned char dstatus;
+ struct async_icount *icount;
+
+ /* Determine bits that have changed */
+ dstatus = status ^ current_ctl_bits;
+ current_ctl_bits = status;
+
+ if (dstatus) {
+ icount = &info->state->icount;
+ /* update input line counters */
+ if (dstatus & SER_DSR)
+ icount->dsr++;
+ if (dstatus & SER_DCD) {
+ icount->dcd++;
+#ifdef CONFIG_HARD_PPS
+ if ((info->flags & ASYNC_HARDPPS_CD) &&
+ !(status & SER_DCD))
+ hardpps();
+#endif
+ }
+ if (dstatus & SER_CTS)
+ icount->cts++;
+ wake_up_interruptible(&info->delta_msr_wait);
+ }
+
+ if ((info->flags & ASYNC_CHECK_CD) && (dstatus & SER_DCD)) {
+#if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR))
+ printk("ttyS%d CD now %s...", info->line,
+ (!(status & SER_DCD)) ? "on" : "off");
+#endif
+ if (!(status & SER_DCD))
+ wake_up_interruptible(&info->open_wait);
+ else {
+#ifdef SERIAL_DEBUG_OPEN
+ printk("doing serial hangup...");
+#endif
+ if (info->tty)
+ tty_hangup(info->tty);
+ }
+ }
+ if (info->flags & ASYNC_CTS_FLOW) {
+ if (info->tty->hw_stopped) {
+ if (!(status & SER_CTS)) {
+#if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW))
+ printk("CTS tx start...");
+#endif
+ info->tty->hw_stopped = 0;
+ info->IER |= UART_IER_THRI;
+ custom.intena = IF_SETCLR | IF_TBE;
+ mb();
+ /* set a pending Tx Interrupt, transmitter should restart now */
+ custom.intreq = IF_SETCLR | IF_TBE;
+ mb();
+ rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
+ return;
+ }
+ } else {
+ if ((status & SER_CTS)) {
+#if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW))
+ printk("CTS tx stop...");
+#endif
+ info->tty->hw_stopped = 1;
+ info->IER &= ~UART_IER_THRI;
+ /* disable Tx interrupt and remove any pending interrupts */
+ custom.intena = IF_TBE;
+ mb();
+ custom.intreq = IF_TBE;
+ mb();
+ }
+ }
+ }
+}
+
+static irqreturn_t ser_vbl_int( int irq, void *data)
+{
+ /* vbl is just a periodic interrupt we tie into to update modem status */
+ struct async_struct * info = IRQ_ports;
+ /*
+ * TBD - is it better to unregister from this interrupt or to
+ * ignore it if MSI is clear ?
+ */
+ if(info->IER & UART_IER_MSI)
+ check_modem_status(info);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ser_rx_int(int irq, void *dev_id)
+{
+ struct async_struct * info;
+
+#ifdef SERIAL_DEBUG_INTR
+ printk("ser_rx_int...");
+#endif
+
+ info = IRQ_ports;
+ if (!info || !info->tty)
+ return IRQ_NONE;
+
+ receive_chars(info);
+ info->last_active = jiffies;
+#ifdef SERIAL_DEBUG_INTR
+ printk("end.\n");
+#endif
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ser_tx_int(int irq, void *dev_id)
+{
+ struct async_struct * info;
+
+ if (custom.serdatr & SDR_TBE) {
+#ifdef SERIAL_DEBUG_INTR
+ printk("ser_tx_int...");
+#endif
+
+ info = IRQ_ports;
+ if (!info || !info->tty)
+ return IRQ_NONE;
+
+ transmit_chars(info);
+ info->last_active = jiffies;
+#ifdef SERIAL_DEBUG_INTR
+ printk("end.\n");
+#endif
+ }
+ return IRQ_HANDLED;
+}
+
+/*
+ * -------------------------------------------------------------------
+ * Here ends the serial interrupt routines.
+ * -------------------------------------------------------------------
+ */
+
+/*
+ * This routine is used to handle the "bottom half" processing for the
+ * serial driver, known also the "software interrupt" processing.
+ * This processing is done at the kernel interrupt level, after the
+ * rs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON. This
+ * is where time-consuming activities which can not be done in the
+ * interrupt driver proper are done; the interrupt driver schedules
+ * them using rs_sched_event(), and they get done here.
+ */
+
+static void do_softint(unsigned long private_)
+{
+ struct async_struct *info = (struct async_struct *) private_;
+ struct tty_struct *tty;
+
+ tty = info->tty;
+ if (!tty)
+ return;
+
+ if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event))
+ tty_wakeup(tty);
+}
+
+/*
+ * ---------------------------------------------------------------
+ * Low level utility subroutines for the serial driver: routines to
+ * figure out the appropriate timeout for an interrupt chain, routines
+ * to initialize and startup a serial port, and routines to shutdown a
+ * serial port. Useful stuff like that.
+ * ---------------------------------------------------------------
+ */
+
+static int startup(struct async_struct * info)
+{
+ unsigned long flags;
+ int retval=0;
+ unsigned long page;
+
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ local_irq_save(flags);
+
+ if (info->flags & ASYNC_INITIALIZED) {
+ free_page(page);
+ goto errout;
+ }
+
+ if (info->xmit.buf)
+ free_page(page);
+ else
+ info->xmit.buf = (unsigned char *) page;
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("starting up ttys%d ...", info->line);
+#endif
+
+ /* Clear anything in the input buffer */
+
+ custom.intreq = IF_RBF;
+ mb();
+
+ retval = request_irq(IRQ_AMIGA_VERTB, ser_vbl_int, 0, "serial status", info);
+ if (retval) {
+ if (serial_isroot()) {
+ if (info->tty)
+ set_bit(TTY_IO_ERROR,
+ &info->tty->flags);
+ retval = 0;
+ }
+ goto errout;
+ }
+
+ /* enable both Rx and Tx interrupts */
+ custom.intena = IF_SETCLR | IF_RBF | IF_TBE;
+ mb();
+ info->IER = UART_IER_MSI;
+
+ /* remember current state of the DCD and CTS bits */
+ current_ctl_bits = ciab.pra & (SER_DCD | SER_CTS | SER_DSR);
+
+ IRQ_ports = info;
+
+ info->MCR = 0;
+ if (info->tty->termios->c_cflag & CBAUD)
+ info->MCR = SER_DTR | SER_RTS;
+ rtsdtr_ctrl(info->MCR);
+
+ if (info->tty)
+ clear_bit(TTY_IO_ERROR, &info->tty->flags);
+ info->xmit.head = info->xmit.tail = 0;
+
+ /*
+ * Set up the tty->alt_speed kludge
+ */
+ if (info->tty) {
+ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
+ info->tty->alt_speed = 57600;
+ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
+ info->tty->alt_speed = 115200;
+ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
+ info->tty->alt_speed = 230400;
+ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
+ info->tty->alt_speed = 460800;
+ }
+
+ /*
+ * and set the speed of the serial port
+ */
+ change_speed(info, NULL);
+
+ info->flags |= ASYNC_INITIALIZED;
+ local_irq_restore(flags);
+ return 0;
+
+errout:
+ local_irq_restore(flags);
+ return retval;
+}
+
+/*
+ * This routine will shutdown a serial port; interrupts are disabled, and
+ * DTR is dropped if the hangup on close termio flag is on.
+ */
+static void shutdown(struct async_struct * info)
+{
+ unsigned long flags;
+ struct serial_state *state;
+
+ if (!(info->flags & ASYNC_INITIALIZED))
+ return;
+
+ state = info->state;
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("Shutting down serial port %d ....\n", info->line);
+#endif
+
+ local_irq_save(flags); /* Disable interrupts */
+
+ /*
+ * clear delta_msr_wait queue to avoid mem leaks: we may free the irq
+ * here so the queue might never be waken up
+ */
+ wake_up_interruptible(&info->delta_msr_wait);
+
+ IRQ_ports = NULL;
+
+ /*
+ * Free the IRQ, if necessary
+ */
+ free_irq(IRQ_AMIGA_VERTB, info);
+
+ if (info->xmit.buf) {
+ free_page((unsigned long) info->xmit.buf);
+ info->xmit.buf = NULL;
+ }
+
+ info->IER = 0;
+ custom.intena = IF_RBF | IF_TBE;
+ mb();
+
+ /* disable break condition */
+ custom.adkcon = AC_UARTBRK;
+ mb();
+
+ if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
+ info->MCR &= ~(SER_DTR|SER_RTS);
+ rtsdtr_ctrl(info->MCR);
+
+ if (info->tty)
+ set_bit(TTY_IO_ERROR, &info->tty->flags);
+
+ info->flags &= ~ASYNC_INITIALIZED;
+ local_irq_restore(flags);
+}
+
+
+/*
+ * This routine is called to set the UART divisor registers to match
+ * the specified baud rate for a serial port.
+ */
+static void change_speed(struct async_struct *info,
+ struct ktermios *old_termios)
+{
+ int quot = 0, baud_base, baud;
+ unsigned cflag, cval = 0;
+ int bits;
+ unsigned long flags;
+
+ if (!info->tty || !info->tty->termios)
+ return;
+ cflag = info->tty->termios->c_cflag;
+
+ /* Byte size is always 8 bits plus parity bit if requested */
+
+ cval = 3; bits = 10;
+ if (cflag & CSTOPB) {
+ cval |= 0x04;
+ bits++;
+ }
+ if (cflag & PARENB) {
+ cval |= UART_LCR_PARITY;
+ bits++;
+ }
+ if (!(cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+#ifdef CMSPAR
+ if (cflag & CMSPAR)
+ cval |= UART_LCR_SPAR;
+#endif
+
+ /* Determine divisor based on baud rate */
+ baud = tty_get_baud_rate(info->tty);
+ if (!baud)
+ baud = 9600; /* B0 transition handled in rs_set_termios */
+ baud_base = info->state->baud_base;
+ if (baud == 38400 &&
+ ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST))
+ quot = info->state->custom_divisor;
+ else {
+ if (baud == 134)
+ /* Special case since 134 is really 134.5 */
+ quot = (2*baud_base / 269);
+ else if (baud)
+ quot = baud_base / baud;
+ }
+ /* If the quotient is zero refuse the change */
+ if (!quot && old_termios) {
+ /* FIXME: Will need updating for new tty in the end */
+ info->tty->termios->c_cflag &= ~CBAUD;
+ info->tty->termios->c_cflag |= (old_termios->c_cflag & CBAUD);
+ baud = tty_get_baud_rate(info->tty);
+ if (!baud)
+ baud = 9600;
+ if (baud == 38400 &&
+ ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST))
+ quot = info->state->custom_divisor;
+ else {
+ if (baud == 134)
+ /* Special case since 134 is really 134.5 */
+ quot = (2*baud_base / 269);
+ else if (baud)
+ quot = baud_base / baud;
+ }
+ }
+ /* As a last resort, if the quotient is zero, default to 9600 bps */
+ if (!quot)
+ quot = baud_base / 9600;
+ info->quot = quot;
+ info->timeout = ((info->xmit_fifo_size*HZ*bits*quot) / baud_base);
+ info->timeout += HZ/50; /* Add .02 seconds of slop */
+
+ /* CTS flow control flag and modem status interrupts */
+ info->IER &= ~UART_IER_MSI;
+ if (info->flags & ASYNC_HARDPPS_CD)
+ info->IER |= UART_IER_MSI;
+ if (cflag & CRTSCTS) {
+ info->flags |= ASYNC_CTS_FLOW;
+ info->IER |= UART_IER_MSI;
+ } else
+ info->flags &= ~ASYNC_CTS_FLOW;
+ if (cflag & CLOCAL)
+ info->flags &= ~ASYNC_CHECK_CD;
+ else {
+ info->flags |= ASYNC_CHECK_CD;
+ info->IER |= UART_IER_MSI;
+ }
+ /* TBD:
+ * Does clearing IER_MSI imply that we should disable the VBL interrupt ?
+ */
+
+ /*
+ * Set up parity check flag
+ */
+
+ info->read_status_mask = UART_LSR_OE | UART_LSR_DR;
+ if (I_INPCK(info->tty))
+ info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
+ info->read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characters to ignore
+ */
+ info->ignore_status_mask = 0;
+ if (I_IGNPAR(info->tty))
+ info->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (I_IGNBRK(info->tty)) {
+ info->ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignore parity and break indicators, ignore
+ * overruns too. (For real raw support).
+ */
+ if (I_IGNPAR(info->tty))
+ info->ignore_status_mask |= UART_LSR_OE;
+ }
+ /*
+ * !!! ignore all characters if CREAD is not set
+ */
+ if ((cflag & CREAD) == 0)
+ info->ignore_status_mask |= UART_LSR_DR;
+ local_irq_save(flags);
+
+ {
+ short serper;
+
+ /* Set up the baud rate */
+ serper = quot - 1;
+
+ /* Enable or disable parity bit */
+
+ if(cval & UART_LCR_PARITY)
+ serper |= (SERPER_PARENB);
+
+ custom.serper = serper;
+ mb();
+ }
+
+ info->LCR = cval; /* Save LCR */
+ local_irq_restore(flags);
+}
+
+static int rs_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct async_struct *info;
+ unsigned long flags;
+
+ info = tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_put_char"))
+ return 0;
+
+ if (!info->xmit.buf)
+ return 0;
+
+ local_irq_save(flags);
+ if (CIRC_SPACE(info->xmit.head,
+ info->xmit.tail,
+ SERIAL_XMIT_SIZE) == 0) {
+ local_irq_restore(flags);
+ return 0;
+ }
+
+ info->xmit.buf[info->xmit.head++] = ch;
+ info->xmit.head &= SERIAL_XMIT_SIZE-1;
+ local_irq_restore(flags);
+ return 1;
+}
+
+static void rs_flush_chars(struct tty_struct *tty)
+{
+ struct async_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
+ return;
+
+ if (info->xmit.head == info->xmit.tail
+ || tty->stopped
+ || tty->hw_stopped
+ || !info->xmit.buf)
+ return;
+
+ local_irq_save(flags);
+ info->IER |= UART_IER_THRI;
+ custom.intena = IF_SETCLR | IF_TBE;
+ mb();
+ /* set a pending Tx Interrupt, transmitter should restart now */
+ custom.intreq = IF_SETCLR | IF_TBE;
+ mb();
+ local_irq_restore(flags);
+}
+
+static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+ int c, ret = 0;
+ struct async_struct *info;
+ unsigned long flags;
+
+ info = tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_write"))
+ return 0;
+
+ if (!info->xmit.buf)
+ return 0;
+
+ local_irq_save(flags);
+ while (1) {
+ c = CIRC_SPACE_TO_END(info->xmit.head,
+ info->xmit.tail,
+ SERIAL_XMIT_SIZE);
+ if (count < c)
+ c = count;
+ if (c <= 0) {
+ break;
+ }
+ memcpy(info->xmit.buf + info->xmit.head, buf, c);
+ info->xmit.head = ((info->xmit.head + c) &
+ (SERIAL_XMIT_SIZE-1));
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+ local_irq_restore(flags);
+
+ if (info->xmit.head != info->xmit.tail
+ && !tty->stopped
+ && !tty->hw_stopped
+ && !(info->IER & UART_IER_THRI)) {
+ info->IER |= UART_IER_THRI;
+ local_irq_disable();
+ custom.intena = IF_SETCLR | IF_TBE;
+ mb();
+ /* set a pending Tx Interrupt, transmitter should restart now */
+ custom.intreq = IF_SETCLR | IF_TBE;
+ mb();
+ local_irq_restore(flags);
+ }
+ return ret;
+}
+
+static int rs_write_room(struct tty_struct *tty)
+{
+ struct async_struct *info = tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_write_room"))
+ return 0;
+ return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+}
+
+static int rs_chars_in_buffer(struct tty_struct *tty)
+{
+ struct async_struct *info = tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
+ return 0;
+ return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+}
+
+static void rs_flush_buffer(struct tty_struct *tty)
+{
+ struct async_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
+ return;
+ local_irq_save(flags);
+ info->xmit.head = info->xmit.tail = 0;
+ local_irq_restore(flags);
+ tty_wakeup(tty);
+}
+
+/*
+ * This function is used to send a high-priority XON/XOFF character to
+ * the device
+ */
+static void rs_send_xchar(struct tty_struct *tty, char ch)
+{
+ struct async_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_send_char"))
+ return;
+
+ info->x_char = ch;
+ if (ch) {
+ /* Make sure transmit interrupts are on */
+
+ /* Check this ! */
+ local_irq_save(flags);
+ if(!(custom.intenar & IF_TBE)) {
+ custom.intena = IF_SETCLR | IF_TBE;
+ mb();
+ /* set a pending Tx Interrupt, transmitter should restart now */
+ custom.intreq = IF_SETCLR | IF_TBE;
+ mb();
+ }
+ local_irq_restore(flags);
+
+ info->IER |= UART_IER_THRI;
+ }
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_throttle()
+ *
+ * This routine is called by the upper-layer tty layer to signal that
+ * incoming characters should be throttled.
+ * ------------------------------------------------------------
+ */
+static void rs_throttle(struct tty_struct * tty)
+{
+ struct async_struct *info = tty->driver_data;
+ unsigned long flags;
+#ifdef SERIAL_DEBUG_THROTTLE
+ char buf[64];
+
+ printk("throttle %s: %d....\n", tty_name(tty, buf),
+ tty->ldisc.chars_in_buffer(tty));
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "rs_throttle"))
+ return;
+
+ if (I_IXOFF(tty))
+ rs_send_xchar(tty, STOP_CHAR(tty));
+
+ if (tty->termios->c_cflag & CRTSCTS)
+ info->MCR &= ~SER_RTS;
+
+ local_irq_save(flags);
+ rtsdtr_ctrl(info->MCR);
+ local_irq_restore(flags);
+}
+
+static void rs_unthrottle(struct tty_struct * tty)
+{
+ struct async_struct *info = tty->driver_data;
+ unsigned long flags;
+#ifdef SERIAL_DEBUG_THROTTLE
+ char buf[64];
+
+ printk("unthrottle %s: %d....\n", tty_name(tty, buf),
+ tty->ldisc.chars_in_buffer(tty));
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
+ return;
+
+ if (I_IXOFF(tty)) {
+ if (info->x_char)
+ info->x_char = 0;
+ else
+ rs_send_xchar(tty, START_CHAR(tty));
+ }
+ if (tty->termios->c_cflag & CRTSCTS)
+ info->MCR |= SER_RTS;
+ local_irq_save(flags);
+ rtsdtr_ctrl(info->MCR);
+ local_irq_restore(flags);
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_ioctl() and friends
+ * ------------------------------------------------------------
+ */
+
+static int get_serial_info(struct async_struct * info,
+ struct serial_struct __user * retinfo)
+{
+ struct serial_struct tmp;
+ struct serial_state *state = info->state;
+
+ if (!retinfo)
+ return -EFAULT;
+ memset(&tmp, 0, sizeof(tmp));
+ tty_lock();
+ tmp.type = state->type;
+ tmp.line = state->line;
+ tmp.port = state->port;
+ tmp.irq = state->irq;
+ tmp.flags = state->flags;
+ tmp.xmit_fifo_size = state->xmit_fifo_size;
+ tmp.baud_base = state->baud_base;
+ tmp.close_delay = state->close_delay;
+ tmp.closing_wait = state->closing_wait;
+ tmp.custom_divisor = state->custom_divisor;
+ tty_unlock();
+ if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int set_serial_info(struct async_struct * info,
+ struct serial_struct __user * new_info)
+{
+ struct serial_struct new_serial;
+ struct serial_state old_state, *state;
+ unsigned int change_irq,change_port;
+ int retval = 0;
+
+ if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
+ return -EFAULT;
+
+ tty_lock();
+ state = info->state;
+ old_state = *state;
+
+ change_irq = new_serial.irq != state->irq;
+ change_port = (new_serial.port != state->port);
+ if(change_irq || change_port || (new_serial.xmit_fifo_size != state->xmit_fifo_size)) {
+ tty_unlock();
+ return -EINVAL;
+ }
+
+ if (!serial_isroot()) {
+ if ((new_serial.baud_base != state->baud_base) ||
+ (new_serial.close_delay != state->close_delay) ||
+ (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
+ ((new_serial.flags & ~ASYNC_USR_MASK) !=
+ (state->flags & ~ASYNC_USR_MASK))) {
+ tty_unlock();
+ return -EPERM;
+ }
+ state->flags = ((state->flags & ~ASYNC_USR_MASK) |
+ (new_serial.flags & ASYNC_USR_MASK));
+ info->flags = ((info->flags & ~ASYNC_USR_MASK) |
+ (new_serial.flags & ASYNC_USR_MASK));
+ state->custom_divisor = new_serial.custom_divisor;
+ goto check_and_exit;
+ }
+
+ if (new_serial.baud_base < 9600) {
+ tty_unlock();
+ return -EINVAL;
+ }
+
+ /*
+ * OK, past this point, all the error checking has been done.
+ * At this point, we start making changes.....
+ */
+
+ state->baud_base = new_serial.baud_base;
+ state->flags = ((state->flags & ~ASYNC_FLAGS) |
+ (new_serial.flags & ASYNC_FLAGS));
+ info->flags = ((state->flags & ~ASYNC_INTERNAL_FLAGS) |
+ (info->flags & ASYNC_INTERNAL_FLAGS));
+ state->custom_divisor = new_serial.custom_divisor;
+ state->close_delay = new_serial.close_delay * HZ/100;
+ state->closing_wait = new_serial.closing_wait * HZ/100;
+ info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+check_and_exit:
+ if (info->flags & ASYNC_INITIALIZED) {
+ if (((old_state.flags & ASYNC_SPD_MASK) !=
+ (state->flags & ASYNC_SPD_MASK)) ||
+ (old_state.custom_divisor != state->custom_divisor)) {
+ if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
+ info->tty->alt_speed = 57600;
+ if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
+ info->tty->alt_speed = 115200;
+ if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
+ info->tty->alt_speed = 230400;
+ if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
+ info->tty->alt_speed = 460800;
+ change_speed(info, NULL);
+ }
+ } else
+ retval = startup(info);
+ tty_unlock();
+ return retval;
+}
+
+
+/*
+ * get_lsr_info - get line status register info
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * is emptied. On bus types like RS485, the transmitter must
+ * release the bus after transmitting. This must be done when
+ * the transmit shift register is empty, not be done when the
+ * transmit holding register is empty. This functionality
+ * allows an RS485 driver to be written in user space.
+ */
+static int get_lsr_info(struct async_struct * info, unsigned int __user *value)
+{
+ unsigned char status;
+ unsigned int result;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ status = custom.serdatr;
+ mb();
+ local_irq_restore(flags);
+ result = ((status & SDR_TSRE) ? TIOCSER_TEMT : 0);
+ if (copy_to_user(value, &result, sizeof(int)))
+ return -EFAULT;
+ return 0;
+}
+
+
+static int rs_tiocmget(struct tty_struct *tty)
+{
+ struct async_struct * info = tty->driver_data;
+ unsigned char control, status;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
+ return -ENODEV;
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+
+ control = info->MCR;
+ local_irq_save(flags);
+ status = ciab.pra;
+ local_irq_restore(flags);
+ return ((control & SER_RTS) ? TIOCM_RTS : 0)
+ | ((control & SER_DTR) ? TIOCM_DTR : 0)
+ | (!(status & SER_DCD) ? TIOCM_CAR : 0)
+ | (!(status & SER_DSR) ? TIOCM_DSR : 0)
+ | (!(status & SER_CTS) ? TIOCM_CTS : 0);
+}
+
+static int rs_tiocmset(struct tty_struct *tty, unsigned int set,
+ unsigned int clear)
+{
+ struct async_struct * info = tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
+ return -ENODEV;
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+
+ local_irq_save(flags);
+ if (set & TIOCM_RTS)
+ info->MCR |= SER_RTS;
+ if (set & TIOCM_DTR)
+ info->MCR |= SER_DTR;
+ if (clear & TIOCM_RTS)
+ info->MCR &= ~SER_RTS;
+ if (clear & TIOCM_DTR)
+ info->MCR &= ~SER_DTR;
+ rtsdtr_ctrl(info->MCR);
+ local_irq_restore(flags);
+ return 0;
+}
+
+/*
+ * rs_break() --- routine which turns the break handling on or off
+ */
+static int rs_break(struct tty_struct *tty, int break_state)
+{
+ struct async_struct * info = tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_break"))
+ return -EINVAL;
+
+ local_irq_save(flags);
+ if (break_state == -1)
+ custom.adkcon = AC_SETCLR | AC_UARTBRK;
+ else
+ custom.adkcon = AC_UARTBRK;
+ mb();
+ local_irq_restore(flags);
+ return 0;
+}
+
+/*
+ * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+ * Return: write counters to the user passed counter struct
+ * NB: both 1->0 and 0->1 transitions are counted except for
+ * RI where only 0->1 is counted.
+ */
+static int rs_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+{
+ struct async_struct *info = tty->driver_data;
+ struct async_icount cnow;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ cnow = info->state->icount;
+ local_irq_restore(flags);
+ icount->cts = cnow.cts;
+ icount->dsr = cnow.dsr;
+ icount->rng = cnow.rng;
+ icount->dcd = cnow.dcd;
+ icount->rx = cnow.rx;
+ icount->tx = cnow.tx;
+ icount->frame = cnow.frame;
+ icount->overrun = cnow.overrun;
+ icount->parity = cnow.parity;
+ icount->brk = cnow.brk;
+ icount->buf_overrun = cnow.buf_overrun;
+
+ return 0;
+}
+
+static int rs_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct async_struct * info = tty->driver_data;
+ struct async_icount cprev, cnow; /* kernel counter temps */
+ void __user *argp = (void __user *)arg;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
+ return -ENODEV;
+
+ if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+ (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
+ (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case TIOCGSERIAL:
+ return get_serial_info(info, argp);
+ case TIOCSSERIAL:
+ return set_serial_info(info, argp);
+ case TIOCSERCONFIG:
+ return 0;
+
+ case TIOCSERGETLSR: /* Get line status register */
+ return get_lsr_info(info, argp);
+
+ case TIOCSERGSTRUCT:
+ if (copy_to_user(argp,
+ info, sizeof(struct async_struct)))
+ return -EFAULT;
+ return 0;
+
+ /*
+ * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
+ * - mask passed in arg for lines of interest
+ * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
+ * Caller should use TIOCGICOUNT to see which one it was
+ */
+ case TIOCMIWAIT:
+ local_irq_save(flags);
+ /* note the counters on entry */
+ cprev = info->state->icount;
+ local_irq_restore(flags);
+ while (1) {
+ interruptible_sleep_on(&info->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ local_irq_save(flags);
+ cnow = info->state->icount; /* atomic copy */
+ local_irq_restore(flags);
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
+ return -EIO; /* no change => error */
+ if ( ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
+ ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
+ ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)) ) {
+ return 0;
+ }
+ cprev = cnow;
+ }
+ /* NOTREACHED */
+
+ case TIOCSERGWILD:
+ case TIOCSERSWILD:
+ /* "setserial -W" is called in Debian boot */
+ printk ("TIOCSER?WILD ioctl obsolete, ignored.\n");
+ return 0;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ struct async_struct *info = tty->driver_data;
+ unsigned long flags;
+ unsigned int cflag = tty->termios->c_cflag;
+
+ change_speed(info, old_termios);
+
+ /* Handle transition to B0 status */
+ if ((old_termios->c_cflag & CBAUD) &&
+ !(cflag & CBAUD)) {
+ info->MCR &= ~(SER_DTR|SER_RTS);
+ local_irq_save(flags);
+ rtsdtr_ctrl(info->MCR);
+ local_irq_restore(flags);
+ }
+
+ /* Handle transition away from B0 status */
+ if (!(old_termios->c_cflag & CBAUD) &&
+ (cflag & CBAUD)) {
+ info->MCR |= SER_DTR;
+ if (!(tty->termios->c_cflag & CRTSCTS) ||
+ !test_bit(TTY_THROTTLED, &tty->flags)) {
+ info->MCR |= SER_RTS;
+ }
+ local_irq_save(flags);
+ rtsdtr_ctrl(info->MCR);
+ local_irq_restore(flags);
+ }
+
+ /* Handle turning off CRTSCTS */
+ if ((old_termios->c_cflag & CRTSCTS) &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ rs_start(tty);
+ }
+
+#if 0
+ /*
+ * No need to wake up processes in open wait, since they
+ * sample the CLOCAL flag once, and don't recheck it.
+ * XXX It's not clear whether the current behavior is correct
+ * or not. Hence, this may change.....
+ */
+ if (!(old_termios->c_cflag & CLOCAL) &&
+ (tty->termios->c_cflag & CLOCAL))
+ wake_up_interruptible(&info->open_wait);
+#endif
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_close()
+ *
+ * This routine is called when the serial port gets closed. First, we
+ * wait for the last remaining data to be sent. Then, we unlink its
+ * async structure from the interrupt chain if necessary, and we free
+ * that IRQ if nothing is left in the chain.
+ * ------------------------------------------------------------
+ */
+static void rs_close(struct tty_struct *tty, struct file * filp)
+{
+ struct async_struct * info = tty->driver_data;
+ struct serial_state *state;
+ unsigned long flags;
+
+ if (!info || serial_paranoia_check(info, tty->name, "rs_close"))
+ return;
+
+ state = info->state;
+
+ local_irq_save(flags);
+
+ if (tty_hung_up_p(filp)) {
+ DBG_CNT("before DEC-hung");
+ local_irq_restore(flags);
+ return;
+ }
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_close ttys%d, count = %d\n", info->line, state->count);
+#endif
+ if ((tty->count == 1) && (state->count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. state->count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ printk("rs_close: bad serial port count; tty->count is 1, "
+ "state->count is %d\n", state->count);
+ state->count = 1;
+ }
+ if (--state->count < 0) {
+ printk("rs_close: bad serial port count for ttys%d: %d\n",
+ info->line, state->count);
+ state->count = 0;
+ }
+ if (state->count) {
+ DBG_CNT("before DEC-2");
+ local_irq_restore(flags);
+ return;
+ }
+ info->flags |= ASYNC_CLOSING;
+ /*
+ * Now we wait for the transmit buffer to clear; and we notify
+ * the line discipline to only process XON/XOFF characters.
+ */
+ tty->closing = 1;
+ if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE)
+ tty_wait_until_sent(tty, info->closing_wait);
+ /*
+ * At this point we stop accepting input. To do this, we
+ * disable the receive line status interrupts, and tell the
+ * interrupt driver to stop checking the data ready bit in the
+ * line status register.
+ */
+ info->read_status_mask &= ~UART_LSR_DR;
+ if (info->flags & ASYNC_INITIALIZED) {
+ /* disable receive interrupts */
+ custom.intena = IF_RBF;
+ mb();
+ /* clear any pending receive interrupt */
+ custom.intreq = IF_RBF;
+ mb();
+
+ /*
+ * Before we drop DTR, make sure the UART transmitter
+ * has completely drained; this is especially
+ * important if there is a transmit FIFO!
+ */
+ rs_wait_until_sent(tty, info->timeout);
+ }
+ shutdown(info);
+ rs_flush_buffer(tty);
+
+ tty_ldisc_flush(tty);
+ tty->closing = 0;
+ info->event = 0;
+ info->tty = NULL;
+ if (info->blocked_open) {
+ if (info->close_delay) {
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
+ }
+ wake_up_interruptible(&info->open_wait);
+ }
+ info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
+ wake_up_interruptible(&info->close_wait);
+ local_irq_restore(flags);
+}
+
+/*
+ * rs_wait_until_sent() --- wait until the transmitter is empty
+ */
+static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct async_struct * info = tty->driver_data;
+ unsigned long orig_jiffies, char_time;
+ int tty_was_locked = tty_locked();
+ int lsr;
+
+ if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent"))
+ return;
+
+ if (info->xmit_fifo_size == 0)
+ return; /* Just in case.... */
+
+ orig_jiffies = jiffies;
+
+ /*
+ * tty_wait_until_sent is called from lots of places,
+ * with or without the BTM.
+ */
+ if (!tty_was_locked)
+ tty_lock();
+ /*
+ * Set the check interval to be 1/5 of the estimated time to
+ * send a single character, and make it at least 1. The check
+ * interval should also be less than the timeout.
+ *
+ * Note: we have to use pretty tight timings here to satisfy
+ * the NIST-PCTS.
+ */
+ char_time = (info->timeout - HZ/50) / info->xmit_fifo_size;
+ char_time = char_time / 5;
+ if (char_time == 0)
+ char_time = 1;
+ if (timeout)
+ char_time = min_t(unsigned long, char_time, timeout);
+ /*
+ * If the transmitter hasn't cleared in twice the approximate
+ * amount of time to send the entire FIFO, it probably won't
+ * ever clear. This assumes the UART isn't doing flow
+ * control, which is currently the case. Hence, if it ever
+ * takes longer than info->timeout, this is probably due to a
+ * UART bug of some kind. So, we clamp the timeout parameter at
+ * 2*info->timeout.
+ */
+ if (!timeout || timeout > 2*info->timeout)
+ timeout = 2*info->timeout;
+#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+ printk("In rs_wait_until_sent(%d) check=%lu...", timeout, char_time);
+ printk("jiff=%lu...", jiffies);
+#endif
+ while(!((lsr = custom.serdatr) & SDR_TSRE)) {
+#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+ printk("serdatr = %d (jiff=%lu)...", lsr, jiffies);
+#endif
+ msleep_interruptible(jiffies_to_msecs(char_time));
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ }
+ __set_current_state(TASK_RUNNING);
+ if (!tty_was_locked)
+ tty_unlock();
+#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+ printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
+#endif
+}
+
+/*
+ * rs_hangup() --- called by tty_hangup() when a hangup is signaled.
+ */
+static void rs_hangup(struct tty_struct *tty)
+{
+ struct async_struct * info = tty->driver_data;
+ struct serial_state *state = info->state;
+
+ if (serial_paranoia_check(info, tty->name, "rs_hangup"))
+ return;
+
+ state = info->state;
+
+ rs_flush_buffer(tty);
+ shutdown(info);
+ info->event = 0;
+ state->count = 0;
+ info->flags &= ~ASYNC_NORMAL_ACTIVE;
+ info->tty = NULL;
+ wake_up_interruptible(&info->open_wait);
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_open() and friends
+ * ------------------------------------------------------------
+ */
+static int block_til_ready(struct tty_struct *tty, struct file * filp,
+ struct async_struct *info)
+{
+#ifdef DECLARE_WAITQUEUE
+ DECLARE_WAITQUEUE(wait, current);
+#else
+ struct wait_queue wait = { current, NULL };
+#endif
+ struct serial_state *state = info->state;
+ int retval;
+ int do_clocal = 0, extra_count = 0;
+ unsigned long flags;
+
+ /*
+ * If the device is in the middle of being closed, then block
+ * until it's done, and then try again.
+ */
+ if (tty_hung_up_p(filp) ||
+ (info->flags & ASYNC_CLOSING)) {
+ if (info->flags & ASYNC_CLOSING)
+ interruptible_sleep_on(&info->close_wait);
+#ifdef SERIAL_DO_RESTART
+ return ((info->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS);
+#else
+ return -EAGAIN;
+#endif
+ }
+
+ /*
+ * If non-blocking mode is set, or the port is not enabled,
+ * then make the check up front and then exit.
+ */
+ if ((filp->f_flags & O_NONBLOCK) ||
+ (tty->flags & (1 << TTY_IO_ERROR))) {
+ info->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+
+ if (tty->termios->c_cflag & CLOCAL)
+ do_clocal = 1;
+
+ /*
+ * Block waiting for the carrier detect and the line to become
+ * free (i.e., not in use by the callout). While we are in
+ * this loop, state->count is dropped by one, so that
+ * rs_close() knows when to free things. We restore it upon
+ * exit, either normal or abnormal.
+ */
+ retval = 0;
+ add_wait_queue(&info->open_wait, &wait);
+#ifdef SERIAL_DEBUG_OPEN
+ printk("block_til_ready before block: ttys%d, count = %d\n",
+ state->line, state->count);
+#endif
+ local_irq_save(flags);
+ if (!tty_hung_up_p(filp)) {
+ extra_count = 1;
+ state->count--;
+ }
+ local_irq_restore(flags);
+ info->blocked_open++;
+ while (1) {
+ local_irq_save(flags);
+ if (tty->termios->c_cflag & CBAUD)
+ rtsdtr_ctrl(SER_DTR|SER_RTS);
+ local_irq_restore(flags);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (tty_hung_up_p(filp) ||
+ !(info->flags & ASYNC_INITIALIZED)) {
+#ifdef SERIAL_DO_RESTART
+ if (info->flags & ASYNC_HUP_NOTIFY)
+ retval = -EAGAIN;
+ else
+ retval = -ERESTARTSYS;
+#else
+ retval = -EAGAIN;
+#endif
+ break;
+ }
+ if (!(info->flags & ASYNC_CLOSING) &&
+ (do_clocal || (!(ciab.pra & SER_DCD)) ))
+ break;
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+#ifdef SERIAL_DEBUG_OPEN
+ printk("block_til_ready blocking: ttys%d, count = %d\n",
+ info->line, state->count);
+#endif
+ tty_unlock();
+ schedule();
+ tty_lock();
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&info->open_wait, &wait);
+ if (extra_count)
+ state->count++;
+ info->blocked_open--;
+#ifdef SERIAL_DEBUG_OPEN
+ printk("block_til_ready after blocking: ttys%d, count = %d\n",
+ info->line, state->count);
+#endif
+ if (retval)
+ return retval;
+ info->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+}
+
+static int get_async_struct(int line, struct async_struct **ret_info)
+{
+ struct async_struct *info;
+ struct serial_state *sstate;
+
+ sstate = rs_table + line;
+ sstate->count++;
+ if (sstate->info) {
+ *ret_info = sstate->info;
+ return 0;
+ }
+ info = kzalloc(sizeof(struct async_struct), GFP_KERNEL);
+ if (!info) {
+ sstate->count--;
+ return -ENOMEM;
+ }
+#ifdef DECLARE_WAITQUEUE
+ init_waitqueue_head(&info->open_wait);
+ init_waitqueue_head(&info->close_wait);
+ init_waitqueue_head(&info->delta_msr_wait);
+#endif
+ info->magic = SERIAL_MAGIC;
+ info->port = sstate->port;
+ info->flags = sstate->flags;
+ info->xmit_fifo_size = sstate->xmit_fifo_size;
+ info->line = line;
+ tasklet_init(&info->tlet, do_softint, (unsigned long)info);
+ info->state = sstate;
+ if (sstate->info) {
+ kfree(info);
+ *ret_info = sstate->info;
+ return 0;
+ }
+ *ret_info = sstate->info = info;
+ return 0;
+}
+
+/*
+ * This routine is called whenever a serial port is opened. It
+ * enables interrupts for a serial port, linking in its async structure into
+ * the IRQ chain. It also performs the serial-specific
+ * initialization for the tty structure.
+ */
+static int rs_open(struct tty_struct *tty, struct file * filp)
+{
+ struct async_struct *info;
+ int retval, line;
+
+ line = tty->index;
+ if ((line < 0) || (line >= NR_PORTS)) {
+ return -ENODEV;
+ }
+ retval = get_async_struct(line, &info);
+ if (retval) {
+ return retval;
+ }
+ tty->driver_data = info;
+ info->tty = tty;
+ if (serial_paranoia_check(info, tty->name, "rs_open"))
+ return -ENODEV;
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_open %s, count = %d\n", tty->name, info->state->count);
+#endif
+ info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+ /*
+ * If the port is the middle of closing, bail out now
+ */
+ if (tty_hung_up_p(filp) ||
+ (info->flags & ASYNC_CLOSING)) {
+ if (info->flags & ASYNC_CLOSING)
+ interruptible_sleep_on(&info->close_wait);
+#ifdef SERIAL_DO_RESTART
+ return ((info->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS);
+#else
+ return -EAGAIN;
+#endif
+ }
+
+ /*
+ * Start up serial port
+ */
+ retval = startup(info);
+ if (retval) {
+ return retval;
+ }
+
+ retval = block_til_ready(tty, filp, info);
+ if (retval) {
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_open returning after block_til_ready with %d\n",
+ retval);
+#endif
+ return retval;
+ }
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_open %s successful...", tty->name);
+#endif
+ return 0;
+}
+
+/*
+ * /proc fs routines....
+ */
+
+static inline void line_info(struct seq_file *m, struct serial_state *state)
+{
+ struct async_struct *info = state->info, scr_info;
+ char stat_buf[30], control, status;
+ unsigned long flags;
+
+ seq_printf(m, "%d: uart:amiga_builtin",state->line);
+
+ /*
+ * Figure out the current RS-232 lines
+ */
+ if (!info) {
+ info = &scr_info; /* This is just for serial_{in,out} */
+
+ info->magic = SERIAL_MAGIC;
+ info->flags = state->flags;
+ info->quot = 0;
+ info->tty = NULL;
+ }
+ local_irq_save(flags);
+ status = ciab.pra;
+ control = info ? info->MCR : status;
+ local_irq_restore(flags);
+
+ stat_buf[0] = 0;
+ stat_buf[1] = 0;
+ if(!(control & SER_RTS))
+ strcat(stat_buf, "|RTS");
+ if(!(status & SER_CTS))
+ strcat(stat_buf, "|CTS");
+ if(!(control & SER_DTR))
+ strcat(stat_buf, "|DTR");
+ if(!(status & SER_DSR))
+ strcat(stat_buf, "|DSR");
+ if(!(status & SER_DCD))
+ strcat(stat_buf, "|CD");
+
+ if (info->quot) {
+ seq_printf(m, " baud:%d", state->baud_base / info->quot);
+ }
+
+ seq_printf(m, " tx:%d rx:%d", state->icount.tx, state->icount.rx);
+
+ if (state->icount.frame)
+ seq_printf(m, " fe:%d", state->icount.frame);
+
+ if (state->icount.parity)
+ seq_printf(m, " pe:%d", state->icount.parity);
+
+ if (state->icount.brk)
+ seq_printf(m, " brk:%d", state->icount.brk);
+
+ if (state->icount.overrun)
+ seq_printf(m, " oe:%d", state->icount.overrun);
+
+ /*
+ * Last thing is the RS-232 status lines
+ */
+ seq_printf(m, " %s\n", stat_buf+1);
+}
+
+static int rs_proc_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version);
+ line_info(m, &rs_table[0]);
+ return 0;
+}
+
+static int rs_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rs_proc_show, NULL);
+}
+
+static const struct file_operations rs_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = rs_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * ---------------------------------------------------------------------
+ * rs_init() and friends
+ *
+ * rs_init() is called at boot-time to initialize the serial driver.
+ * ---------------------------------------------------------------------
+ */
+
+/*
+ * This routine prints out the appropriate serial driver version
+ * number, and identifies which options were configured into this
+ * driver.
+ */
+static void show_serial_version(void)
+{
+ printk(KERN_INFO "%s version %s\n", serial_name, serial_version);
+}
+
+
+static const struct tty_operations serial_ops = {
+ .open = rs_open,
+ .close = rs_close,
+ .write = rs_write,
+ .put_char = rs_put_char,
+ .flush_chars = rs_flush_chars,
+ .write_room = rs_write_room,
+ .chars_in_buffer = rs_chars_in_buffer,
+ .flush_buffer = rs_flush_buffer,
+ .ioctl = rs_ioctl,
+ .throttle = rs_throttle,
+ .unthrottle = rs_unthrottle,
+ .set_termios = rs_set_termios,
+ .stop = rs_stop,
+ .start = rs_start,
+ .hangup = rs_hangup,
+ .break_ctl = rs_break,
+ .send_xchar = rs_send_xchar,
+ .wait_until_sent = rs_wait_until_sent,
+ .tiocmget = rs_tiocmget,
+ .tiocmset = rs_tiocmset,
+ .get_icount = rs_get_icount,
+ .proc_fops = &rs_proc_fops,
+};
+
+/*
+ * The serial driver boot-time initialization code!
+ */
+static int __init amiga_serial_probe(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct serial_state * state;
+ int error;
+
+ serial_driver = alloc_tty_driver(1);
+ if (!serial_driver)
+ return -ENOMEM;
+
+ IRQ_ports = NULL;
+
+ show_serial_version();
+
+ /* Initialize the tty_driver structure */
+
+ serial_driver->owner = THIS_MODULE;
+ serial_driver->driver_name = "amiserial";
+ serial_driver->name = "ttyS";
+ serial_driver->major = TTY_MAJOR;
+ serial_driver->minor_start = 64;
+ serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ serial_driver->init_termios = tty_std_termios;
+ serial_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ serial_driver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(serial_driver, &serial_ops);
+
+ error = tty_register_driver(serial_driver);
+ if (error)
+ goto fail_put_tty_driver;
+
+ state = rs_table;
+ state->magic = SSTATE_MAGIC;
+ state->port = (int)&custom.serdatr; /* Just to give it a value */
+ state->line = 0;
+ state->custom_divisor = 0;
+ state->close_delay = 5*HZ/10;
+ state->closing_wait = 30*HZ;
+ state->icount.cts = state->icount.dsr =
+ state->icount.rng = state->icount.dcd = 0;
+ state->icount.rx = state->icount.tx = 0;
+ state->icount.frame = state->icount.parity = 0;
+ state->icount.overrun = state->icount.brk = 0;
+
+ printk(KERN_INFO "ttyS%d is the amiga builtin serial port\n",
+ state->line);
+
+ /* Hardware set up */
+
+ state->baud_base = amiga_colorclock;
+ state->xmit_fifo_size = 1;
+
+ /* set ISRs, and then disable the rx interrupts */
+ error = request_irq(IRQ_AMIGA_TBE, ser_tx_int, 0, "serial TX", state);
+ if (error)
+ goto fail_unregister;
+
+ error = request_irq(IRQ_AMIGA_RBF, ser_rx_int, IRQF_DISABLED,
+ "serial RX", state);
+ if (error)
+ goto fail_free_irq;
+
+ local_irq_save(flags);
+
+ /* turn off Rx and Tx interrupts */
+ custom.intena = IF_RBF | IF_TBE;
+ mb();
+
+ /* clear any pending interrupt */
+ custom.intreq = IF_RBF | IF_TBE;
+ mb();
+
+ local_irq_restore(flags);
+
+ /*
+ * set the appropriate directions for the modem control flags,
+ * and clear RTS and DTR
+ */
+ ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */
+ ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */
+
+ platform_set_drvdata(pdev, state);
+
+ return 0;
+
+fail_free_irq:
+ free_irq(IRQ_AMIGA_TBE, state);
+fail_unregister:
+ tty_unregister_driver(serial_driver);
+fail_put_tty_driver:
+ put_tty_driver(serial_driver);
+ return error;
+}
+
+static int __exit amiga_serial_remove(struct platform_device *pdev)
+{
+ int error;
+ struct serial_state *state = platform_get_drvdata(pdev);
+ struct async_struct *info = state->info;
+
+ /* printk("Unloading %s: version %s\n", serial_name, serial_version); */
+ tasklet_kill(&info->tlet);
+ if ((error = tty_unregister_driver(serial_driver)))
+ printk("SERIAL: failed to unregister serial driver (%d)\n",
+ error);
+ put_tty_driver(serial_driver);
+
+ rs_table[0].info = NULL;
+ kfree(info);
+
+ free_irq(IRQ_AMIGA_TBE, rs_table);
+ free_irq(IRQ_AMIGA_RBF, rs_table);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return error;
+}
+
+static struct platform_driver amiga_serial_driver = {
+ .remove = __exit_p(amiga_serial_remove),
+ .driver = {
+ .name = "amiga-serial",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amiga_serial_init(void)
+{
+ return platform_driver_probe(&amiga_serial_driver, amiga_serial_probe);
+}
+
+module_init(amiga_serial_init);
+
+static void __exit amiga_serial_exit(void)
+{
+ platform_driver_unregister(&amiga_serial_driver);
+}
+
+module_exit(amiga_serial_exit);
+
+
+#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE)
+
+/*
+ * ------------------------------------------------------------
+ * Serial console driver
+ * ------------------------------------------------------------
+ */
+
+static void amiga_serial_putc(char c)
+{
+ custom.serdat = (unsigned char)c | 0x100;
+ while (!(custom.serdatr & 0x2000))
+ barrier();
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console must be locked when we get here.
+ */
+static void serial_console_write(struct console *co, const char *s,
+ unsigned count)
+{
+ unsigned short intena = custom.intenar;
+
+ custom.intena = IF_TBE;
+
+ while (count--) {
+ if (*s == '\n')
+ amiga_serial_putc('\r');
+ amiga_serial_putc(*s++);
+ }
+
+ custom.intena = IF_SETCLR | (intena & IF_TBE);
+}
+
+static struct tty_driver *serial_console_device(struct console *c, int *index)
+{
+ *index = 0;
+ return serial_driver;
+}
+
+static struct console sercons = {
+ .name = "ttyS",
+ .write = serial_console_write,
+ .device = serial_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+/*
+ * Register console.
+ */
+static int __init amiserial_console_init(void)
+{
+ register_console(&sercons);
+ return 0;
+}
+console_initcall(amiserial_console_init);
+
+#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-serial");
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
new file mode 100644
index 00000000..03c285bb
--- /dev/null
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -0,0 +1,368 @@
+/*
+ * TTY over Blackfin JTAG Communication
+ *
+ * Copyright 2008-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#define DRV_NAME "bfin-jtag-comm"
+#define DEV_NAME "ttyBFJC"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include <linux/circ_buf.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <asm/atomic.h>
+
+#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); })
+
+/* See the Debug/Emulation chapter in the HRM */
+#define EMUDOF 0x00000001 /* EMUDAT_OUT full & valid */
+#define EMUDIF 0x00000002 /* EMUDAT_IN full & valid */
+#define EMUDOOVF 0x00000004 /* EMUDAT_OUT overflow */
+#define EMUDIOVF 0x00000008 /* EMUDAT_IN overflow */
+
+static inline uint32_t bfin_write_emudat(uint32_t emudat)
+{
+ __asm__ __volatile__("emudat = %0;" : : "d"(emudat));
+ return emudat;
+}
+
+static inline uint32_t bfin_read_emudat(void)
+{
+ uint32_t emudat;
+ __asm__ __volatile__("%0 = emudat;" : "=d"(emudat));
+ return emudat;
+}
+
+static inline uint32_t bfin_write_emudat_chars(char a, char b, char c, char d)
+{
+ return bfin_write_emudat((a << 0) | (b << 8) | (c << 16) | (d << 24));
+}
+
+#define CIRC_SIZE 2048 /* see comment in tty_io.c:do_tty_write() */
+#define CIRC_MASK (CIRC_SIZE - 1)
+#define circ_empty(circ) ((circ)->head == (circ)->tail)
+#define circ_free(circ) CIRC_SPACE((circ)->head, (circ)->tail, CIRC_SIZE)
+#define circ_cnt(circ) CIRC_CNT((circ)->head, (circ)->tail, CIRC_SIZE)
+#define circ_byte(circ, idx) ((circ)->buf[(idx) & CIRC_MASK])
+
+static struct tty_driver *bfin_jc_driver;
+static struct task_struct *bfin_jc_kthread;
+static struct tty_struct * volatile bfin_jc_tty;
+static unsigned long bfin_jc_count;
+static DEFINE_MUTEX(bfin_jc_tty_mutex);
+static volatile struct circ_buf bfin_jc_write_buf;
+
+static int
+bfin_jc_emudat_manager(void *arg)
+{
+ uint32_t inbound_len = 0, outbound_len = 0;
+
+ while (!kthread_should_stop()) {
+ /* no one left to give data to, so sleep */
+ if (bfin_jc_tty == NULL && circ_empty(&bfin_jc_write_buf)) {
+ pr_debug("waiting for readers\n");
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ }
+
+ /* no data available, so just chill */
+ if (!(bfin_read_DBGSTAT() & EMUDIF) && circ_empty(&bfin_jc_write_buf)) {
+ pr_debug("waiting for data (in_len = %i) (circ: %i %i)\n",
+ inbound_len, bfin_jc_write_buf.tail, bfin_jc_write_buf.head);
+ if (inbound_len)
+ schedule();
+ else
+ schedule_timeout_interruptible(HZ);
+ continue;
+ }
+
+ /* if incoming data is ready, eat it */
+ if (bfin_read_DBGSTAT() & EMUDIF) {
+ struct tty_struct *tty;
+ mutex_lock(&bfin_jc_tty_mutex);
+ tty = (struct tty_struct *)bfin_jc_tty;
+ if (tty != NULL) {
+ uint32_t emudat = bfin_read_emudat();
+ if (inbound_len == 0) {
+ pr_debug("incoming length: 0x%08x\n", emudat);
+ inbound_len = emudat;
+ } else {
+ size_t num_chars = (4 <= inbound_len ? 4 : inbound_len);
+ pr_debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
+ inbound_len -= num_chars;
+ tty_insert_flip_string(tty, (unsigned char *)&emudat, num_chars);
+ tty_flip_buffer_push(tty);
+ }
+ }
+ mutex_unlock(&bfin_jc_tty_mutex);
+ }
+
+ /* if outgoing data is ready, post it */
+ if (!(bfin_read_DBGSTAT() & EMUDOF) && !circ_empty(&bfin_jc_write_buf)) {
+ if (outbound_len == 0) {
+ outbound_len = circ_cnt(&bfin_jc_write_buf);
+ bfin_write_emudat(outbound_len);
+ pr_debug("outgoing length: 0x%08x\n", outbound_len);
+ } else {
+ struct tty_struct *tty;
+ int tail = bfin_jc_write_buf.tail;
+ size_t ate = (4 <= outbound_len ? 4 : outbound_len);
+ uint32_t emudat =
+ bfin_write_emudat_chars(
+ circ_byte(&bfin_jc_write_buf, tail + 0),
+ circ_byte(&bfin_jc_write_buf, tail + 1),
+ circ_byte(&bfin_jc_write_buf, tail + 2),
+ circ_byte(&bfin_jc_write_buf, tail + 3)
+ );
+ bfin_jc_write_buf.tail += ate;
+ outbound_len -= ate;
+ mutex_lock(&bfin_jc_tty_mutex);
+ tty = (struct tty_struct *)bfin_jc_tty;
+ if (tty)
+ tty_wakeup(tty);
+ mutex_unlock(&bfin_jc_tty_mutex);
+ pr_debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate);
+ }
+ }
+ }
+
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+static int
+bfin_jc_open(struct tty_struct *tty, struct file *filp)
+{
+ mutex_lock(&bfin_jc_tty_mutex);
+ pr_debug("open %lu\n", bfin_jc_count);
+ ++bfin_jc_count;
+ bfin_jc_tty = tty;
+ wake_up_process(bfin_jc_kthread);
+ mutex_unlock(&bfin_jc_tty_mutex);
+ return 0;
+}
+
+static void
+bfin_jc_close(struct tty_struct *tty, struct file *filp)
+{
+ mutex_lock(&bfin_jc_tty_mutex);
+ pr_debug("close %lu\n", bfin_jc_count);
+ if (--bfin_jc_count == 0)
+ bfin_jc_tty = NULL;
+ wake_up_process(bfin_jc_kthread);
+ mutex_unlock(&bfin_jc_tty_mutex);
+}
+
+/* XXX: we dont handle the put_char() case where we must handle count = 1 */
+static int
+bfin_jc_circ_write(const unsigned char *buf, int count)
+{
+ int i;
+ count = min(count, circ_free(&bfin_jc_write_buf));
+ pr_debug("going to write chunk of %i bytes\n", count);
+ for (i = 0; i < count; ++i)
+ circ_byte(&bfin_jc_write_buf, bfin_jc_write_buf.head + i) = buf[i];
+ bfin_jc_write_buf.head += i;
+ return i;
+}
+
+#ifndef CONFIG_BFIN_JTAG_COMM_CONSOLE
+# define console_lock()
+# define console_unlock()
+#endif
+static int
+bfin_jc_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ int i;
+ console_lock();
+ i = bfin_jc_circ_write(buf, count);
+ console_unlock();
+ wake_up_process(bfin_jc_kthread);
+ return i;
+}
+
+static void
+bfin_jc_flush_chars(struct tty_struct *tty)
+{
+ wake_up_process(bfin_jc_kthread);
+}
+
+static int
+bfin_jc_write_room(struct tty_struct *tty)
+{
+ return circ_free(&bfin_jc_write_buf);
+}
+
+static int
+bfin_jc_chars_in_buffer(struct tty_struct *tty)
+{
+ return circ_cnt(&bfin_jc_write_buf);
+}
+
+static void
+bfin_jc_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ unsigned long expire = jiffies + timeout;
+ while (!circ_empty(&bfin_jc_write_buf)) {
+ if (signal_pending(current))
+ break;
+ if (time_after(jiffies, expire))
+ break;
+ }
+}
+
+static const struct tty_operations bfin_jc_ops = {
+ .open = bfin_jc_open,
+ .close = bfin_jc_close,
+ .write = bfin_jc_write,
+ /*.put_char = bfin_jc_put_char,*/
+ .flush_chars = bfin_jc_flush_chars,
+ .write_room = bfin_jc_write_room,
+ .chars_in_buffer = bfin_jc_chars_in_buffer,
+ .wait_until_sent = bfin_jc_wait_until_sent,
+};
+
+static int __init bfin_jc_init(void)
+{
+ int ret;
+
+ bfin_jc_kthread = kthread_create(bfin_jc_emudat_manager, NULL, DRV_NAME);
+ if (IS_ERR(bfin_jc_kthread))
+ return PTR_ERR(bfin_jc_kthread);
+
+ ret = -ENOMEM;
+
+ bfin_jc_write_buf.head = bfin_jc_write_buf.tail = 0;
+ bfin_jc_write_buf.buf = kmalloc(CIRC_SIZE, GFP_KERNEL);
+ if (!bfin_jc_write_buf.buf)
+ goto err_buf;
+
+ bfin_jc_driver = alloc_tty_driver(1);
+ if (!bfin_jc_driver)
+ goto err_driver;
+
+ bfin_jc_driver->owner = THIS_MODULE;
+ bfin_jc_driver->driver_name = DRV_NAME;
+ bfin_jc_driver->name = DEV_NAME;
+ bfin_jc_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ bfin_jc_driver->subtype = SERIAL_TYPE_NORMAL;
+ bfin_jc_driver->init_termios = tty_std_termios;
+ tty_set_operations(bfin_jc_driver, &bfin_jc_ops);
+
+ ret = tty_register_driver(bfin_jc_driver);
+ if (ret)
+ goto err;
+
+ pr_init(KERN_INFO DRV_NAME ": initialized\n");
+
+ return 0;
+
+ err:
+ put_tty_driver(bfin_jc_driver);
+ err_driver:
+ kfree(bfin_jc_write_buf.buf);
+ err_buf:
+ kthread_stop(bfin_jc_kthread);
+ return ret;
+}
+module_init(bfin_jc_init);
+
+static void __exit bfin_jc_exit(void)
+{
+ kthread_stop(bfin_jc_kthread);
+ kfree(bfin_jc_write_buf.buf);
+ tty_unregister_driver(bfin_jc_driver);
+ put_tty_driver(bfin_jc_driver);
+}
+module_exit(bfin_jc_exit);
+
+#if defined(CONFIG_BFIN_JTAG_COMM_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
+static void
+bfin_jc_straight_buffer_write(const char *buf, unsigned count)
+{
+ unsigned ate = 0;
+ while (bfin_read_DBGSTAT() & EMUDOF)
+ continue;
+ bfin_write_emudat(count);
+ while (ate < count) {
+ while (bfin_read_DBGSTAT() & EMUDOF)
+ continue;
+ bfin_write_emudat_chars(buf[ate], buf[ate+1], buf[ate+2], buf[ate+3]);
+ ate += 4;
+ }
+}
+#endif
+
+#ifdef CONFIG_BFIN_JTAG_COMM_CONSOLE
+static void
+bfin_jc_console_write(struct console *co, const char *buf, unsigned count)
+{
+ if (bfin_jc_kthread == NULL)
+ bfin_jc_straight_buffer_write(buf, count);
+ else
+ bfin_jc_circ_write(buf, count);
+}
+
+static struct tty_driver *
+bfin_jc_console_device(struct console *co, int *index)
+{
+ *index = co->index;
+ return bfin_jc_driver;
+}
+
+static struct console bfin_jc_console = {
+ .name = DEV_NAME,
+ .write = bfin_jc_console_write,
+ .device = bfin_jc_console_device,
+ .flags = CON_ANYTIME | CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static int __init bfin_jc_console_init(void)
+{
+ register_console(&bfin_jc_console);
+ return 0;
+}
+console_initcall(bfin_jc_console_init);
+#endif
+
+#ifdef CONFIG_EARLY_PRINTK
+static void __init
+bfin_jc_early_write(struct console *co, const char *buf, unsigned int count)
+{
+ bfin_jc_straight_buffer_write(buf, count);
+}
+
+static struct __initdata console bfin_jc_early_console = {
+ .name = "early_BFJC",
+ .write = bfin_jc_early_write,
+ .flags = CON_ANYTIME | CON_PRINTBUFFER,
+ .index = -1,
+};
+
+struct console * __init
+bfin_jc_early_init(unsigned int port, unsigned int cflag)
+{
+ return &bfin_jc_early_console;
+}
+#endif
+
+MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
+MODULE_DESCRIPTION("TTY over Blackfin JTAG Communication");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
new file mode 100644
index 00000000..c0e8f2ee
--- /dev/null
+++ b/drivers/tty/cyclades.c
@@ -0,0 +1,4196 @@
+#undef BLOCKMOVE
+#define Z_WAKE
+#undef Z_EXT_CHARS_IN_BUFFER
+
+/*
+ * This file contains the driver for the Cyclades async multiport
+ * serial boards.
+ *
+ * Initially written by Randolph Bentson <bentson@grieg.seaslug.org>.
+ * Modified and maintained by Marcio Saito <marcio@cyclades.com>.
+ *
+ * Copyright (C) 2007-2009 Jiri Slaby <jirislaby@gmail.com>
+ *
+ * Much of the design and some of the code came from serial.c
+ * which was copyright (C) 1991, 1992 Linus Torvalds. It was
+ * extensively rewritten by Theodore Ts'o, 8/16/92 -- 9/14/92,
+ * and then fixed as suggested by Michael K. Johnson 12/12/92.
+ * Converted to pci probing and cleaned up by Jiri Slaby.
+ *
+ */
+
+#define CY_VERSION "2.6"
+
+/* If you need to install more boards than NR_CARDS, change the constant
+ in the definition below. No other change is necessary to support up to
+ eight boards. Beyond that you'll have to extend cy_isa_addresses. */
+
+#define NR_CARDS 4
+
+/*
+ If the total number of ports is larger than NR_PORTS, change this
+ constant in the definition below. No other change is necessary to
+ support more boards/ports. */
+
+#define NR_PORTS 256
+
+#define ZO_V1 0
+#define ZO_V2 1
+#define ZE_V1 2
+
+#define SERIAL_PARANOIA_CHECK
+#undef CY_DEBUG_OPEN
+#undef CY_DEBUG_THROTTLE
+#undef CY_DEBUG_OTHER
+#undef CY_DEBUG_IO
+#undef CY_DEBUG_COUNT
+#undef CY_DEBUG_DTR
+#undef CY_DEBUG_WAIT_UNTIL_SENT
+#undef CY_DEBUG_INTERRUPTS
+#undef CY_16Y_HACK
+#undef CY_ENABLE_MONITORING
+#undef CY_PCI_DEBUG
+
+/*
+ * Include section
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/cyclades.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+#include <linux/stat.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+static void cy_send_xchar(struct tty_struct *tty, char ch);
+
+#ifndef SERIAL_XMIT_SIZE
+#define SERIAL_XMIT_SIZE (min(PAGE_SIZE, 4096))
+#endif
+
+#define STD_COM_FLAGS (0)
+
+/* firmware stuff */
+#define ZL_MAX_BLOCKS 16
+#define DRIVER_VERSION 0x02010203
+#define RAM_SIZE 0x80000
+
+enum zblock_type {
+ ZBLOCK_PRG = 0,
+ ZBLOCK_FPGA = 1
+};
+
+struct zfile_header {
+ char name[64];
+ char date[32];
+ char aux[32];
+ u32 n_config;
+ u32 config_offset;
+ u32 n_blocks;
+ u32 block_offset;
+ u32 reserved[9];
+} __attribute__ ((packed));
+
+struct zfile_config {
+ char name[64];
+ u32 mailbox;
+ u32 function;
+ u32 n_blocks;
+ u32 block_list[ZL_MAX_BLOCKS];
+} __attribute__ ((packed));
+
+struct zfile_block {
+ u32 type;
+ u32 file_offset;
+ u32 ram_offset;
+ u32 size;
+} __attribute__ ((packed));
+
+static struct tty_driver *cy_serial_driver;
+
+#ifdef CONFIG_ISA
+/* This is the address lookup table. The driver will probe for
+ Cyclom-Y/ISA boards at all addresses in here. If you want the
+ driver to probe addresses at a different address, add it to
+ this table. If the driver is probing some other board and
+ causing problems, remove the offending address from this table.
+*/
+
+static unsigned int cy_isa_addresses[] = {
+ 0xD0000,
+ 0xD2000,
+ 0xD4000,
+ 0xD6000,
+ 0xD8000,
+ 0xDA000,
+ 0xDC000,
+ 0xDE000,
+ 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+#define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses)
+
+static long maddr[NR_CARDS];
+static int irq[NR_CARDS];
+
+module_param_array(maddr, long, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+
+#endif /* CONFIG_ISA */
+
+/* This is the per-card data structure containing address, irq, number of
+ channels, etc. This driver supports a maximum of NR_CARDS cards.
+*/
+static struct cyclades_card cy_card[NR_CARDS];
+
+static int cy_next_channel; /* next minor available */
+
+/*
+ * This is used to look up the divisor speeds and the timeouts
+ * We're normally limited to 15 distinct baud rates. The extra
+ * are accessed via settings in info->port.flags.
+ * 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ * 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ * HI VHI
+ * 20
+ */
+static const int baud_table[] = {
+ 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200,
+ 1800, 2400, 4800, 9600, 19200, 38400, 57600, 76800, 115200, 150000,
+ 230400, 0
+};
+
+static const char baud_co_25[] = { /* 25 MHz clock option table */
+ /* value => 00 01 02 03 04 */
+ /* divide by 8 32 128 512 2048 */
+ 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x02,
+ 0x02, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static const char baud_bpr_25[] = { /* 25 MHz baud rate period table */
+ 0x00, 0xf5, 0xa3, 0x6f, 0x5c, 0x51, 0xf5, 0xa3, 0x51, 0xa3,
+ 0x6d, 0x51, 0xa3, 0x51, 0xa3, 0x51, 0x36, 0x29, 0x1b, 0x15
+};
+
+static const char baud_co_60[] = { /* 60 MHz clock option table (CD1400 J) */
+ /* value => 00 01 02 03 04 */
+ /* divide by 8 32 128 512 2048 */
+ 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03,
+ 0x03, 0x02, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00
+};
+
+static const char baud_bpr_60[] = { /* 60 MHz baud rate period table (CD1400 J) */
+ 0x00, 0x82, 0x21, 0xff, 0xdb, 0xc3, 0x92, 0x62, 0xc3, 0x62,
+ 0x41, 0xc3, 0x62, 0xc3, 0x62, 0xc3, 0x82, 0x62, 0x41, 0x32,
+ 0x21
+};
+
+static const char baud_cor3[] = { /* receive threshold */
+ 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
+ 0x0a, 0x0a, 0x0a, 0x09, 0x09, 0x08, 0x08, 0x08, 0x08, 0x07,
+ 0x07
+};
+
+/*
+ * The Cyclades driver implements HW flow control as any serial driver.
+ * The cyclades_port structure member rflow and the vector rflow_thr
+ * allows us to take advantage of a special feature in the CD1400 to avoid
+ * data loss even when the system interrupt latency is too high. These flags
+ * are to be used only with very special applications. Setting these flags
+ * requires the use of a special cable (DTR and RTS reversed). In the new
+ * CD1400-based boards (rev. 6.00 or later), there is no need for special
+ * cables.
+ */
+
+static const char rflow_thr[] = { /* rflow threshold */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
+ 0x0a
+};
+
+/* The Cyclom-Ye has placed the sequential chips in non-sequential
+ * address order. This look-up table overcomes that problem.
+ */
+static const unsigned int cy_chip_offset[] = { 0x0000,
+ 0x0400,
+ 0x0800,
+ 0x0C00,
+ 0x0200,
+ 0x0600,
+ 0x0A00,
+ 0x0E00
+};
+
+/* PCI related definitions */
+
+#ifdef CONFIG_PCI
+static const struct pci_device_id cy_pci_dev_id[] = {
+ /* PCI < 1Mb */
+ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Lo) },
+ /* PCI > 1Mb */
+ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Hi) },
+ /* 4Y PCI < 1Mb */
+ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Lo) },
+ /* 4Y PCI > 1Mb */
+ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Hi) },
+ /* 8Y PCI < 1Mb */
+ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Lo) },
+ /* 8Y PCI > 1Mb */
+ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Hi) },
+ /* Z PCI < 1Mb */
+ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Lo) },
+ /* Z PCI > 1Mb */
+ { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Hi) },
+ { } /* end of table */
+};
+MODULE_DEVICE_TABLE(pci, cy_pci_dev_id);
+#endif
+
+static void cy_start(struct tty_struct *);
+static void cy_set_line_char(struct cyclades_port *, struct tty_struct *);
+static int cyz_issue_cmd(struct cyclades_card *, __u32, __u8, __u32);
+#ifdef CONFIG_ISA
+static unsigned detect_isa_irq(void __iomem *);
+#endif /* CONFIG_ISA */
+
+#ifndef CONFIG_CYZ_INTR
+static void cyz_poll(unsigned long);
+
+/* The Cyclades-Z polling cycle is defined by this variable */
+static long cyz_polling_cycle = CZ_DEF_POLL;
+
+static DEFINE_TIMER(cyz_timerlist, cyz_poll, 0, 0);
+
+#else /* CONFIG_CYZ_INTR */
+static void cyz_rx_restart(unsigned long);
+static struct timer_list cyz_rx_full_timer[NR_PORTS];
+#endif /* CONFIG_CYZ_INTR */
+
+static inline void cyy_writeb(struct cyclades_port *port, u32 reg, u8 val)
+{
+ struct cyclades_card *card = port->card;
+
+ cy_writeb(port->u.cyy.base_addr + (reg << card->bus_index), val);
+}
+
+static inline u8 cyy_readb(struct cyclades_port *port, u32 reg)
+{
+ struct cyclades_card *card = port->card;
+
+ return readb(port->u.cyy.base_addr + (reg << card->bus_index));
+}
+
+static inline bool cy_is_Z(struct cyclades_card *card)
+{
+ return card->num_chips == (unsigned int)-1;
+}
+
+static inline bool __cyz_fpga_loaded(struct RUNTIME_9060 __iomem *ctl_addr)
+{
+ return readl(&ctl_addr->init_ctrl) & (1 << 17);
+}
+
+static inline bool cyz_fpga_loaded(struct cyclades_card *card)
+{
+ return __cyz_fpga_loaded(card->ctl_addr.p9060);
+}
+
+static inline bool cyz_is_loaded(struct cyclades_card *card)
+{
+ struct FIRM_ID __iomem *fw_id = card->base_addr + ID_ADDRESS;
+
+ return (card->hw_ver == ZO_V1 || cyz_fpga_loaded(card)) &&
+ readl(&fw_id->signature) == ZFIRM_ID;
+}
+
+static inline int serial_paranoia_check(struct cyclades_port *info,
+ const char *name, const char *routine)
+{
+#ifdef SERIAL_PARANOIA_CHECK
+ if (!info) {
+ printk(KERN_WARNING "cyc Warning: null cyclades_port for (%s) "
+ "in %s\n", name, routine);
+ return 1;
+ }
+
+ if (info->magic != CYCLADES_MAGIC) {
+ printk(KERN_WARNING "cyc Warning: bad magic number for serial "
+ "struct (%s) in %s\n", name, routine);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/***********************************************************/
+/********* Start of block of Cyclom-Y specific code ********/
+
+/* This routine waits up to 1000 micro-seconds for the previous
+ command to the Cirrus chip to complete and then issues the
+ new command. An error is returned if the previous command
+ didn't finish within the time limit.
+
+ This function is only called from inside spinlock-protected code.
+ */
+static int __cyy_issue_cmd(void __iomem *base_addr, u8 cmd, int index)
+{
+ void __iomem *ccr = base_addr + (CyCCR << index);
+ unsigned int i;
+
+ /* Check to see that the previous command has completed */
+ for (i = 0; i < 100; i++) {
+ if (readb(ccr) == 0)
+ break;
+ udelay(10L);
+ }
+ /* if the CCR never cleared, the previous command
+ didn't finish within the "reasonable time" */
+ if (i == 100)
+ return -1;
+
+ /* Issue the new command */
+ cy_writeb(ccr, cmd);
+
+ return 0;
+}
+
+static inline int cyy_issue_cmd(struct cyclades_port *port, u8 cmd)
+{
+ return __cyy_issue_cmd(port->u.cyy.base_addr, cmd,
+ port->card->bus_index);
+}
+
+#ifdef CONFIG_ISA
+/* ISA interrupt detection code */
+static unsigned detect_isa_irq(void __iomem *address)
+{
+ int irq;
+ unsigned long irqs, flags;
+ int save_xir, save_car;
+ int index = 0; /* IRQ probing is only for ISA */
+
+ /* forget possible initially masked and pending IRQ */
+ irq = probe_irq_off(probe_irq_on());
+
+ /* Clear interrupts on the board first */
+ cy_writeb(address + (Cy_ClrIntr << index), 0);
+ /* Cy_ClrIntr is 0x1800 */
+
+ irqs = probe_irq_on();
+ /* Wait ... */
+ msleep(5);
+
+ /* Enable the Tx interrupts on the CD1400 */
+ local_irq_save(flags);
+ cy_writeb(address + (CyCAR << index), 0);
+ __cyy_issue_cmd(address, CyCHAN_CTL | CyENB_XMTR, index);
+
+ cy_writeb(address + (CyCAR << index), 0);
+ cy_writeb(address + (CySRER << index),
+ readb(address + (CySRER << index)) | CyTxRdy);
+ local_irq_restore(flags);
+
+ /* Wait ... */
+ msleep(5);
+
+ /* Check which interrupt is in use */
+ irq = probe_irq_off(irqs);
+
+ /* Clean up */
+ save_xir = (u_char) readb(address + (CyTIR << index));
+ save_car = readb(address + (CyCAR << index));
+ cy_writeb(address + (CyCAR << index), (save_xir & 0x3));
+ cy_writeb(address + (CySRER << index),
+ readb(address + (CySRER << index)) & ~CyTxRdy);
+ cy_writeb(address + (CyTIR << index), (save_xir & 0x3f));
+ cy_writeb(address + (CyCAR << index), (save_car));
+ cy_writeb(address + (Cy_ClrIntr << index), 0);
+ /* Cy_ClrIntr is 0x1800 */
+
+ return (irq > 0) ? irq : 0;
+}
+#endif /* CONFIG_ISA */
+
+static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
+ void __iomem *base_addr)
+{
+ struct cyclades_port *info;
+ struct tty_struct *tty;
+ int len, index = cinfo->bus_index;
+ u8 ivr, save_xir, channel, save_car, data, char_count;
+
+#ifdef CY_DEBUG_INTERRUPTS
+ printk(KERN_DEBUG "cyy_interrupt: rcvd intr, chip %d\n", chip);
+#endif
+ /* determine the channel & change to that context */
+ save_xir = readb(base_addr + (CyRIR << index));
+ channel = save_xir & CyIRChannel;
+ info = &cinfo->ports[channel + chip * 4];
+ save_car = cyy_readb(info, CyCAR);
+ cyy_writeb(info, CyCAR, save_xir);
+ ivr = cyy_readb(info, CyRIVR) & CyIVRMask;
+
+ tty = tty_port_tty_get(&info->port);
+ /* if there is nowhere to put the data, discard it */
+ if (tty == NULL) {
+ if (ivr == CyIVRRxEx) { /* exception */
+ data = cyy_readb(info, CyRDSR);
+ } else { /* normal character reception */
+ char_count = cyy_readb(info, CyRDCR);
+ while (char_count--)
+ data = cyy_readb(info, CyRDSR);
+ }
+ goto end;
+ }
+ /* there is an open port for this data */
+ if (ivr == CyIVRRxEx) { /* exception */
+ data = cyy_readb(info, CyRDSR);
+
+ /* For statistics only */
+ if (data & CyBREAK)
+ info->icount.brk++;
+ else if (data & CyFRAME)
+ info->icount.frame++;
+ else if (data & CyPARITY)
+ info->icount.parity++;
+ else if (data & CyOVERRUN)
+ info->icount.overrun++;
+
+ if (data & info->ignore_status_mask) {
+ info->icount.rx++;
+ tty_kref_put(tty);
+ return;
+ }
+ if (tty_buffer_request_room(tty, 1)) {
+ if (data & info->read_status_mask) {
+ if (data & CyBREAK) {
+ tty_insert_flip_char(tty,
+ cyy_readb(info, CyRDSR),
+ TTY_BREAK);
+ info->icount.rx++;
+ if (info->port.flags & ASYNC_SAK)
+ do_SAK(tty);
+ } else if (data & CyFRAME) {
+ tty_insert_flip_char(tty,
+ cyy_readb(info, CyRDSR),
+ TTY_FRAME);
+ info->icount.rx++;
+ info->idle_stats.frame_errs++;
+ } else if (data & CyPARITY) {
+ /* Pieces of seven... */
+ tty_insert_flip_char(tty,
+ cyy_readb(info, CyRDSR),
+ TTY_PARITY);
+ info->icount.rx++;
+ info->idle_stats.parity_errs++;
+ } else if (data & CyOVERRUN) {
+ tty_insert_flip_char(tty, 0,
+ TTY_OVERRUN);
+ info->icount.rx++;
+ /* If the flip buffer itself is
+ overflowing, we still lose
+ the next incoming character.
+ */
+ tty_insert_flip_char(tty,
+ cyy_readb(info, CyRDSR),
+ TTY_FRAME);
+ info->icount.rx++;
+ info->idle_stats.overruns++;
+ /* These two conditions may imply */
+ /* a normal read should be done. */
+ /* } else if(data & CyTIMEOUT) { */
+ /* } else if(data & CySPECHAR) { */
+ } else {
+ tty_insert_flip_char(tty, 0,
+ TTY_NORMAL);
+ info->icount.rx++;
+ }
+ } else {
+ tty_insert_flip_char(tty, 0, TTY_NORMAL);
+ info->icount.rx++;
+ }
+ } else {
+ /* there was a software buffer overrun and nothing
+ * could be done about it!!! */
+ info->icount.buf_overrun++;
+ info->idle_stats.overruns++;
+ }
+ } else { /* normal character reception */
+ /* load # chars available from the chip */
+ char_count = cyy_readb(info, CyRDCR);
+
+#ifdef CY_ENABLE_MONITORING
+ ++info->mon.int_count;
+ info->mon.char_count += char_count;
+ if (char_count > info->mon.char_max)
+ info->mon.char_max = char_count;
+ info->mon.char_last = char_count;
+#endif
+ len = tty_buffer_request_room(tty, char_count);
+ while (len--) {
+ data = cyy_readb(info, CyRDSR);
+ tty_insert_flip_char(tty, data, TTY_NORMAL);
+ info->idle_stats.recv_bytes++;
+ info->icount.rx++;
+#ifdef CY_16Y_HACK
+ udelay(10L);
+#endif
+ }
+ info->idle_stats.recv_idle = jiffies;
+ }
+ tty_schedule_flip(tty);
+ tty_kref_put(tty);
+end:
+ /* end of service */
+ cyy_writeb(info, CyRIR, save_xir & 0x3f);
+ cyy_writeb(info, CyCAR, save_car);
+}
+
+static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
+ void __iomem *base_addr)
+{
+ struct cyclades_port *info;
+ struct tty_struct *tty;
+ int char_count, index = cinfo->bus_index;
+ u8 save_xir, channel, save_car, outch;
+
+ /* Since we only get here when the transmit buffer
+ is empty, we know we can always stuff a dozen
+ characters. */
+#ifdef CY_DEBUG_INTERRUPTS
+ printk(KERN_DEBUG "cyy_interrupt: xmit intr, chip %d\n", chip);
+#endif
+
+ /* determine the channel & change to that context */
+ save_xir = readb(base_addr + (CyTIR << index));
+ channel = save_xir & CyIRChannel;
+ save_car = readb(base_addr + (CyCAR << index));
+ cy_writeb(base_addr + (CyCAR << index), save_xir);
+
+ info = &cinfo->ports[channel + chip * 4];
+ tty = tty_port_tty_get(&info->port);
+ if (tty == NULL) {
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy);
+ goto end;
+ }
+
+ /* load the on-chip space for outbound data */
+ char_count = info->xmit_fifo_size;
+
+ if (info->x_char) { /* send special char */
+ outch = info->x_char;
+ cyy_writeb(info, CyTDR, outch);
+ char_count--;
+ info->icount.tx++;
+ info->x_char = 0;
+ }
+
+ if (info->breakon || info->breakoff) {
+ if (info->breakon) {
+ cyy_writeb(info, CyTDR, 0);
+ cyy_writeb(info, CyTDR, 0x81);
+ info->breakon = 0;
+ char_count -= 2;
+ }
+ if (info->breakoff) {
+ cyy_writeb(info, CyTDR, 0);
+ cyy_writeb(info, CyTDR, 0x83);
+ info->breakoff = 0;
+ char_count -= 2;
+ }
+ }
+
+ while (char_count-- > 0) {
+ if (!info->xmit_cnt) {
+ if (cyy_readb(info, CySRER) & CyTxMpty) {
+ cyy_writeb(info, CySRER,
+ cyy_readb(info, CySRER) & ~CyTxMpty);
+ } else {
+ cyy_writeb(info, CySRER, CyTxMpty |
+ (cyy_readb(info, CySRER) & ~CyTxRdy));
+ }
+ goto done;
+ }
+ if (info->port.xmit_buf == NULL) {
+ cyy_writeb(info, CySRER,
+ cyy_readb(info, CySRER) & ~CyTxRdy);
+ goto done;
+ }
+ if (tty->stopped || tty->hw_stopped) {
+ cyy_writeb(info, CySRER,
+ cyy_readb(info, CySRER) & ~CyTxRdy);
+ goto done;
+ }
+ /* Because the Embedded Transmit Commands have been enabled,
+ * we must check to see if the escape character, NULL, is being
+ * sent. If it is, we must ensure that there is room for it to
+ * be doubled in the output stream. Therefore we no longer
+ * advance the pointer when the character is fetched, but
+ * rather wait until after the check for a NULL output
+ * character. This is necessary because there may not be room
+ * for the two chars needed to send a NULL.)
+ */
+ outch = info->port.xmit_buf[info->xmit_tail];
+ if (outch) {
+ info->xmit_cnt--;
+ info->xmit_tail = (info->xmit_tail + 1) &
+ (SERIAL_XMIT_SIZE - 1);
+ cyy_writeb(info, CyTDR, outch);
+ info->icount.tx++;
+ } else {
+ if (char_count > 1) {
+ info->xmit_cnt--;
+ info->xmit_tail = (info->xmit_tail + 1) &
+ (SERIAL_XMIT_SIZE - 1);
+ cyy_writeb(info, CyTDR, outch);
+ cyy_writeb(info, CyTDR, 0);
+ info->icount.tx++;
+ char_count--;
+ }
+ }
+ }
+
+done:
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+end:
+ /* end of service */
+ cyy_writeb(info, CyTIR, save_xir & 0x3f);
+ cyy_writeb(info, CyCAR, save_car);
+}
+
+static void cyy_chip_modem(struct cyclades_card *cinfo, int chip,
+ void __iomem *base_addr)
+{
+ struct cyclades_port *info;
+ struct tty_struct *tty;
+ int index = cinfo->bus_index;
+ u8 save_xir, channel, save_car, mdm_change, mdm_status;
+
+ /* determine the channel & change to that context */
+ save_xir = readb(base_addr + (CyMIR << index));
+ channel = save_xir & CyIRChannel;
+ info = &cinfo->ports[channel + chip * 4];
+ save_car = cyy_readb(info, CyCAR);
+ cyy_writeb(info, CyCAR, save_xir);
+
+ mdm_change = cyy_readb(info, CyMISR);
+ mdm_status = cyy_readb(info, CyMSVR1);
+
+ tty = tty_port_tty_get(&info->port);
+ if (!tty)
+ goto end;
+
+ if (mdm_change & CyANY_DELTA) {
+ /* For statistics only */
+ if (mdm_change & CyDCD)
+ info->icount.dcd++;
+ if (mdm_change & CyCTS)
+ info->icount.cts++;
+ if (mdm_change & CyDSR)
+ info->icount.dsr++;
+ if (mdm_change & CyRI)
+ info->icount.rng++;
+
+ wake_up_interruptible(&info->port.delta_msr_wait);
+ }
+
+ if ((mdm_change & CyDCD) && (info->port.flags & ASYNC_CHECK_CD)) {
+ if (mdm_status & CyDCD)
+ wake_up_interruptible(&info->port.open_wait);
+ else
+ tty_hangup(tty);
+ }
+ if ((mdm_change & CyCTS) && (info->port.flags & ASYNC_CTS_FLOW)) {
+ if (tty->hw_stopped) {
+ if (mdm_status & CyCTS) {
+ /* cy_start isn't used
+ because... !!! */
+ tty->hw_stopped = 0;
+ cyy_writeb(info, CySRER,
+ cyy_readb(info, CySRER) | CyTxRdy);
+ tty_wakeup(tty);
+ }
+ } else {
+ if (!(mdm_status & CyCTS)) {
+ /* cy_stop isn't used
+ because ... !!! */
+ tty->hw_stopped = 1;
+ cyy_writeb(info, CySRER,
+ cyy_readb(info, CySRER) & ~CyTxRdy);
+ }
+ }
+ }
+/* if (mdm_change & CyDSR) {
+ }
+ if (mdm_change & CyRI) {
+ }*/
+ tty_kref_put(tty);
+end:
+ /* end of service */
+ cyy_writeb(info, CyMIR, save_xir & 0x3f);
+ cyy_writeb(info, CyCAR, save_car);
+}
+
+/* The real interrupt service routine is called
+ whenever the card wants its hand held--chars
+ received, out buffer empty, modem change, etc.
+ */
+static irqreturn_t cyy_interrupt(int irq, void *dev_id)
+{
+ int status;
+ struct cyclades_card *cinfo = dev_id;
+ void __iomem *base_addr, *card_base_addr;
+ unsigned int chip, too_many, had_work;
+ int index;
+
+ if (unlikely(cinfo == NULL)) {
+#ifdef CY_DEBUG_INTERRUPTS
+ printk(KERN_DEBUG "cyy_interrupt: spurious interrupt %d\n",
+ irq);
+#endif
+ return IRQ_NONE; /* spurious interrupt */
+ }
+
+ card_base_addr = cinfo->base_addr;
+ index = cinfo->bus_index;
+
+ /* card was not initialized yet (e.g. DEBUG_SHIRQ) */
+ if (unlikely(card_base_addr == NULL))
+ return IRQ_HANDLED;
+
+ /* This loop checks all chips in the card. Make a note whenever
+ _any_ chip had some work to do, as this is considered an
+ indication that there will be more to do. Only when no chip
+ has any work does this outermost loop exit.
+ */
+ do {
+ had_work = 0;
+ for (chip = 0; chip < cinfo->num_chips; chip++) {
+ base_addr = cinfo->base_addr +
+ (cy_chip_offset[chip] << index);
+ too_many = 0;
+ while ((status = readb(base_addr +
+ (CySVRR << index))) != 0x00) {
+ had_work++;
+ /* The purpose of the following test is to ensure that
+ no chip can monopolize the driver. This forces the
+ chips to be checked in a round-robin fashion (after
+ draining each of a bunch (1000) of characters).
+ */
+ if (1000 < too_many++)
+ break;
+ spin_lock(&cinfo->card_lock);
+ if (status & CySRReceive) /* rx intr */
+ cyy_chip_rx(cinfo, chip, base_addr);
+ if (status & CySRTransmit) /* tx intr */
+ cyy_chip_tx(cinfo, chip, base_addr);
+ if (status & CySRModem) /* modem intr */
+ cyy_chip_modem(cinfo, chip, base_addr);
+ spin_unlock(&cinfo->card_lock);
+ }
+ }
+ } while (had_work);
+
+ /* clear interrupts */
+ spin_lock(&cinfo->card_lock);
+ cy_writeb(card_base_addr + (Cy_ClrIntr << index), 0);
+ /* Cy_ClrIntr is 0x1800 */
+ spin_unlock(&cinfo->card_lock);
+ return IRQ_HANDLED;
+} /* cyy_interrupt */
+
+static void cyy_change_rts_dtr(struct cyclades_port *info, unsigned int set,
+ unsigned int clear)
+{
+ struct cyclades_card *card = info->card;
+ int channel = info->line - card->first_line;
+ u32 rts, dtr, msvrr, msvrd;
+
+ channel &= 0x03;
+
+ if (info->rtsdtr_inv) {
+ msvrr = CyMSVR2;
+ msvrd = CyMSVR1;
+ rts = CyDTR;
+ dtr = CyRTS;
+ } else {
+ msvrr = CyMSVR1;
+ msvrd = CyMSVR2;
+ rts = CyRTS;
+ dtr = CyDTR;
+ }
+ if (set & TIOCM_RTS) {
+ cyy_writeb(info, CyCAR, channel);
+ cyy_writeb(info, msvrr, rts);
+ }
+ if (clear & TIOCM_RTS) {
+ cyy_writeb(info, CyCAR, channel);
+ cyy_writeb(info, msvrr, ~rts);
+ }
+ if (set & TIOCM_DTR) {
+ cyy_writeb(info, CyCAR, channel);
+ cyy_writeb(info, msvrd, dtr);
+#ifdef CY_DEBUG_DTR
+ printk(KERN_DEBUG "cyc:set_modem_info raising DTR\n");
+ printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
+ cyy_readb(info, CyMSVR1),
+ cyy_readb(info, CyMSVR2));
+#endif
+ }
+ if (clear & TIOCM_DTR) {
+ cyy_writeb(info, CyCAR, channel);
+ cyy_writeb(info, msvrd, ~dtr);
+#ifdef CY_DEBUG_DTR
+ printk(KERN_DEBUG "cyc:set_modem_info dropping DTR\n");
+ printk(KERN_DEBUG " status: 0x%x, 0x%x\n",
+ cyy_readb(info, CyMSVR1),
+ cyy_readb(info, CyMSVR2));
+#endif
+ }
+}
+
+/***********************************************************/
+/********* End of block of Cyclom-Y specific code **********/
+/******** Start of block of Cyclades-Z specific code *******/
+/***********************************************************/
+
+static int
+cyz_fetch_msg(struct cyclades_card *cinfo,
+ __u32 *channel, __u8 *cmd, __u32 *param)
+{
+ struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
+ unsigned long loc_doorbell;
+
+ loc_doorbell = readl(&cinfo->ctl_addr.p9060->loc_doorbell);
+ if (loc_doorbell) {
+ *cmd = (char)(0xff & loc_doorbell);
+ *channel = readl(&board_ctrl->fwcmd_channel);
+ *param = (__u32) readl(&board_ctrl->fwcmd_param);
+ cy_writel(&cinfo->ctl_addr.p9060->loc_doorbell, 0xffffffff);
+ return 1;
+ }
+ return 0;
+} /* cyz_fetch_msg */
+
+static int
+cyz_issue_cmd(struct cyclades_card *cinfo,
+ __u32 channel, __u8 cmd, __u32 param)
+{
+ struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
+ __u32 __iomem *pci_doorbell;
+ unsigned int index;
+
+ if (!cyz_is_loaded(cinfo))
+ return -1;
+
+ index = 0;
+ pci_doorbell = &cinfo->ctl_addr.p9060->pci_doorbell;
+ while ((readl(pci_doorbell) & 0xff) != 0) {
+ if (index++ == 1000)
+ return (int)(readl(pci_doorbell) & 0xff);
+ udelay(50L);
+ }
+ cy_writel(&board_ctrl->hcmd_channel, channel);
+ cy_writel(&board_ctrl->hcmd_param, param);
+ cy_writel(pci_doorbell, (long)cmd);
+
+ return 0;
+} /* cyz_issue_cmd */
+
+static void cyz_handle_rx(struct cyclades_port *info, struct tty_struct *tty)
+{
+ struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
+ struct cyclades_card *cinfo = info->card;
+ unsigned int char_count;
+ int len;
+#ifdef BLOCKMOVE
+ unsigned char *buf;
+#else
+ char data;
+#endif
+ __u32 rx_put, rx_get, new_rx_get, rx_bufsize, rx_bufaddr;
+
+ rx_get = new_rx_get = readl(&buf_ctrl->rx_get);
+ rx_put = readl(&buf_ctrl->rx_put);
+ rx_bufsize = readl(&buf_ctrl->rx_bufsize);
+ rx_bufaddr = readl(&buf_ctrl->rx_bufaddr);
+ if (rx_put >= rx_get)
+ char_count = rx_put - rx_get;
+ else
+ char_count = rx_put - rx_get + rx_bufsize;
+
+ if (char_count) {
+#ifdef CY_ENABLE_MONITORING
+ info->mon.int_count++;
+ info->mon.char_count += char_count;
+ if (char_count > info->mon.char_max)
+ info->mon.char_max = char_count;
+ info->mon.char_last = char_count;
+#endif
+ if (tty == NULL) {
+ /* flush received characters */
+ new_rx_get = (new_rx_get + char_count) &
+ (rx_bufsize - 1);
+ info->rflush_count++;
+ } else {
+#ifdef BLOCKMOVE
+ /* we'd like to use memcpy(t, f, n) and memset(s, c, count)
+ for performance, but because of buffer boundaries, there
+ may be several steps to the operation */
+ while (1) {
+ len = tty_prepare_flip_string(tty, &buf,
+ char_count);
+ if (!len)
+ break;
+
+ len = min_t(unsigned int, min(len, char_count),
+ rx_bufsize - new_rx_get);
+
+ memcpy_fromio(buf, cinfo->base_addr +
+ rx_bufaddr + new_rx_get, len);
+
+ new_rx_get = (new_rx_get + len) &
+ (rx_bufsize - 1);
+ char_count -= len;
+ info->icount.rx += len;
+ info->idle_stats.recv_bytes += len;
+ }
+#else
+ len = tty_buffer_request_room(tty, char_count);
+ while (len--) {
+ data = readb(cinfo->base_addr + rx_bufaddr +
+ new_rx_get);
+ new_rx_get = (new_rx_get + 1) &
+ (rx_bufsize - 1);
+ tty_insert_flip_char(tty, data, TTY_NORMAL);
+ info->idle_stats.recv_bytes++;
+ info->icount.rx++;
+ }
+#endif
+#ifdef CONFIG_CYZ_INTR
+ /* Recalculate the number of chars in the RX buffer and issue
+ a cmd in case it's higher than the RX high water mark */
+ rx_put = readl(&buf_ctrl->rx_put);
+ if (rx_put >= rx_get)
+ char_count = rx_put - rx_get;
+ else
+ char_count = rx_put - rx_get + rx_bufsize;
+ if (char_count >= readl(&buf_ctrl->rx_threshold) &&
+ !timer_pending(&cyz_rx_full_timer[
+ info->line]))
+ mod_timer(&cyz_rx_full_timer[info->line],
+ jiffies + 1);
+#endif
+ info->idle_stats.recv_idle = jiffies;
+ tty_schedule_flip(tty);
+ }
+ /* Update rx_get */
+ cy_writel(&buf_ctrl->rx_get, new_rx_get);
+ }
+}
+
+static void cyz_handle_tx(struct cyclades_port *info, struct tty_struct *tty)
+{
+ struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
+ struct cyclades_card *cinfo = info->card;
+ u8 data;
+ unsigned int char_count;
+#ifdef BLOCKMOVE
+ int small_count;
+#endif
+ __u32 tx_put, tx_get, tx_bufsize, tx_bufaddr;
+
+ if (info->xmit_cnt <= 0) /* Nothing to transmit */
+ return;
+
+ tx_get = readl(&buf_ctrl->tx_get);
+ tx_put = readl(&buf_ctrl->tx_put);
+ tx_bufsize = readl(&buf_ctrl->tx_bufsize);
+ tx_bufaddr = readl(&buf_ctrl->tx_bufaddr);
+ if (tx_put >= tx_get)
+ char_count = tx_get - tx_put - 1 + tx_bufsize;
+ else
+ char_count = tx_get - tx_put - 1;
+
+ if (char_count) {
+
+ if (tty == NULL)
+ goto ztxdone;
+
+ if (info->x_char) { /* send special char */
+ data = info->x_char;
+
+ cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
+ tx_put = (tx_put + 1) & (tx_bufsize - 1);
+ info->x_char = 0;
+ char_count--;
+ info->icount.tx++;
+ }
+#ifdef BLOCKMOVE
+ while (0 < (small_count = min_t(unsigned int,
+ tx_bufsize - tx_put, min_t(unsigned int,
+ (SERIAL_XMIT_SIZE - info->xmit_tail),
+ min_t(unsigned int, info->xmit_cnt,
+ char_count))))) {
+
+ memcpy_toio((char *)(cinfo->base_addr + tx_bufaddr +
+ tx_put),
+ &info->port.xmit_buf[info->xmit_tail],
+ small_count);
+
+ tx_put = (tx_put + small_count) & (tx_bufsize - 1);
+ char_count -= small_count;
+ info->icount.tx += small_count;
+ info->xmit_cnt -= small_count;
+ info->xmit_tail = (info->xmit_tail + small_count) &
+ (SERIAL_XMIT_SIZE - 1);
+ }
+#else
+ while (info->xmit_cnt && char_count) {
+ data = info->port.xmit_buf[info->xmit_tail];
+ info->xmit_cnt--;
+ info->xmit_tail = (info->xmit_tail + 1) &
+ (SERIAL_XMIT_SIZE - 1);
+
+ cy_writeb(cinfo->base_addr + tx_bufaddr + tx_put, data);
+ tx_put = (tx_put + 1) & (tx_bufsize - 1);
+ char_count--;
+ info->icount.tx++;
+ }
+#endif
+ tty_wakeup(tty);
+ztxdone:
+ /* Update tx_put */
+ cy_writel(&buf_ctrl->tx_put, tx_put);
+ }
+}
+
+static void cyz_handle_cmd(struct cyclades_card *cinfo)
+{
+ struct BOARD_CTRL __iomem *board_ctrl = cinfo->board_ctrl;
+ struct tty_struct *tty;
+ struct cyclades_port *info;
+ __u32 channel, param, fw_ver;
+ __u8 cmd;
+ int special_count;
+ int delta_count;
+
+ fw_ver = readl(&board_ctrl->fw_version);
+
+ while (cyz_fetch_msg(cinfo, &channel, &cmd, &param) == 1) {
+ special_count = 0;
+ delta_count = 0;
+ info = &cinfo->ports[channel];
+ tty = tty_port_tty_get(&info->port);
+ if (tty == NULL)
+ continue;
+
+ switch (cmd) {
+ case C_CM_PR_ERROR:
+ tty_insert_flip_char(tty, 0, TTY_PARITY);
+ info->icount.rx++;
+ special_count++;
+ break;
+ case C_CM_FR_ERROR:
+ tty_insert_flip_char(tty, 0, TTY_FRAME);
+ info->icount.rx++;
+ special_count++;
+ break;
+ case C_CM_RXBRK:
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
+ info->icount.rx++;
+ special_count++;
+ break;
+ case C_CM_MDCD:
+ info->icount.dcd++;
+ delta_count++;
+ if (info->port.flags & ASYNC_CHECK_CD) {
+ u32 dcd = fw_ver > 241 ? param :
+ readl(&info->u.cyz.ch_ctrl->rs_status);
+ if (dcd & C_RS_DCD)
+ wake_up_interruptible(&info->port.open_wait);
+ else
+ tty_hangup(tty);
+ }
+ break;
+ case C_CM_MCTS:
+ info->icount.cts++;
+ delta_count++;
+ break;
+ case C_CM_MRI:
+ info->icount.rng++;
+ delta_count++;
+ break;
+ case C_CM_MDSR:
+ info->icount.dsr++;
+ delta_count++;
+ break;
+#ifdef Z_WAKE
+ case C_CM_IOCTLW:
+ complete(&info->shutdown_wait);
+ break;
+#endif
+#ifdef CONFIG_CYZ_INTR
+ case C_CM_RXHIWM:
+ case C_CM_RXNNDT:
+ case C_CM_INTBACK2:
+ /* Reception Interrupt */
+#ifdef CY_DEBUG_INTERRUPTS
+ printk(KERN_DEBUG "cyz_interrupt: rcvd intr, card %d, "
+ "port %ld\n", info->card, channel);
+#endif
+ cyz_handle_rx(info, tty);
+ break;
+ case C_CM_TXBEMPTY:
+ case C_CM_TXLOWWM:
+ case C_CM_INTBACK:
+ /* Transmission Interrupt */
+#ifdef CY_DEBUG_INTERRUPTS
+ printk(KERN_DEBUG "cyz_interrupt: xmit intr, card %d, "
+ "port %ld\n", info->card, channel);
+#endif
+ cyz_handle_tx(info, tty);
+ break;
+#endif /* CONFIG_CYZ_INTR */
+ case C_CM_FATAL:
+ /* should do something with this !!! */
+ break;
+ default:
+ break;
+ }
+ if (delta_count)
+ wake_up_interruptible(&info->port.delta_msr_wait);
+ if (special_count)
+ tty_schedule_flip(tty);
+ tty_kref_put(tty);
+ }
+}
+
+#ifdef CONFIG_CYZ_INTR
+static irqreturn_t cyz_interrupt(int irq, void *dev_id)
+{
+ struct cyclades_card *cinfo = dev_id;
+
+ if (unlikely(!cyz_is_loaded(cinfo))) {
+#ifdef CY_DEBUG_INTERRUPTS
+ printk(KERN_DEBUG "cyz_interrupt: board not yet loaded "
+ "(IRQ%d).\n", irq);
+#endif
+ return IRQ_NONE;
+ }
+
+ /* Handle the interrupts */
+ cyz_handle_cmd(cinfo);
+
+ return IRQ_HANDLED;
+} /* cyz_interrupt */
+
+static void cyz_rx_restart(unsigned long arg)
+{
+ struct cyclades_port *info = (struct cyclades_port *)arg;
+ struct cyclades_card *card = info->card;
+ int retval;
+ __u32 channel = info->line - card->first_line;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+ retval = cyz_issue_cmd(card, channel, C_CM_INTBACK2, 0L);
+ if (retval != 0) {
+ printk(KERN_ERR "cyc:cyz_rx_restart retval on ttyC%d was %x\n",
+ info->line, retval);
+ }
+ spin_unlock_irqrestore(&card->card_lock, flags);
+}
+
+#else /* CONFIG_CYZ_INTR */
+
+static void cyz_poll(unsigned long arg)
+{
+ struct cyclades_card *cinfo;
+ struct cyclades_port *info;
+ unsigned long expires = jiffies + HZ;
+ unsigned int port, card;
+
+ for (card = 0; card < NR_CARDS; card++) {
+ cinfo = &cy_card[card];
+
+ if (!cy_is_Z(cinfo))
+ continue;
+ if (!cyz_is_loaded(cinfo))
+ continue;
+
+ /* Skip first polling cycle to avoid racing conditions with the FW */
+ if (!cinfo->intr_enabled) {
+ cinfo->intr_enabled = 1;
+ continue;
+ }
+
+ cyz_handle_cmd(cinfo);
+
+ for (port = 0; port < cinfo->nports; port++) {
+ struct tty_struct *tty;
+
+ info = &cinfo->ports[port];
+ tty = tty_port_tty_get(&info->port);
+ /* OK to pass NULL to the handle functions below.
+ They need to drop the data in that case. */
+
+ if (!info->throttle)
+ cyz_handle_rx(info, tty);
+ cyz_handle_tx(info, tty);
+ tty_kref_put(tty);
+ }
+ /* poll every 'cyz_polling_cycle' period */
+ expires = jiffies + cyz_polling_cycle;
+ }
+ mod_timer(&cyz_timerlist, expires);
+} /* cyz_poll */
+
+#endif /* CONFIG_CYZ_INTR */
+
+/********** End of block of Cyclades-Z specific code *********/
+/***********************************************************/
+
+/* This is called whenever a port becomes active;
+ interrupts are enabled and DTR & RTS are turned on.
+ */
+static int cy_startup(struct cyclades_port *info, struct tty_struct *tty)
+{
+ struct cyclades_card *card;
+ unsigned long flags;
+ int retval = 0;
+ int channel;
+ unsigned long page;
+
+ card = info->card;
+ channel = info->line - card->first_line;
+
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+
+ if (info->port.flags & ASYNC_INITIALIZED)
+ goto errout;
+
+ if (!info->type) {
+ set_bit(TTY_IO_ERROR, &tty->flags);
+ goto errout;
+ }
+
+ if (info->port.xmit_buf)
+ free_page(page);
+ else
+ info->port.xmit_buf = (unsigned char *)page;
+
+ spin_unlock_irqrestore(&card->card_lock, flags);
+
+ cy_set_line_char(info, tty);
+
+ if (!cy_is_Z(card)) {
+ channel &= 0x03;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+
+ cyy_writeb(info, CyCAR, channel);
+
+ cyy_writeb(info, CyRTPR,
+ (info->default_timeout ? info->default_timeout : 0x02));
+ /* 10ms rx timeout */
+
+ cyy_issue_cmd(info, CyCHAN_CTL | CyENB_RCVR | CyENB_XMTR);
+
+ cyy_change_rts_dtr(info, TIOCM_RTS | TIOCM_DTR, 0);
+
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyRxData);
+ } else {
+ struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
+
+ if (!cyz_is_loaded(card))
+ return -ENODEV;
+
+#ifdef CY_DEBUG_OPEN
+ printk(KERN_DEBUG "cyc startup Z card %d, channel %d, "
+ "base_addr %p\n", card, channel, card->base_addr);
+#endif
+ spin_lock_irqsave(&card->card_lock, flags);
+
+ cy_writel(&ch_ctrl->op_mode, C_CH_ENABLE);
+#ifdef Z_WAKE
+#ifdef CONFIG_CYZ_INTR
+ cy_writel(&ch_ctrl->intr_enable,
+ C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM |
+ C_IN_RXNNDT | C_IN_IOCTLW | C_IN_MDCD);
+#else
+ cy_writel(&ch_ctrl->intr_enable,
+ C_IN_IOCTLW | C_IN_MDCD);
+#endif /* CONFIG_CYZ_INTR */
+#else
+#ifdef CONFIG_CYZ_INTR
+ cy_writel(&ch_ctrl->intr_enable,
+ C_IN_TXBEMPTY | C_IN_TXLOWWM | C_IN_RXHIWM |
+ C_IN_RXNNDT | C_IN_MDCD);
+#else
+ cy_writel(&ch_ctrl->intr_enable, C_IN_MDCD);
+#endif /* CONFIG_CYZ_INTR */
+#endif /* Z_WAKE */
+
+ retval = cyz_issue_cmd(card, channel, C_CM_IOCTL, 0L);
+ if (retval != 0) {
+ printk(KERN_ERR "cyc:startup(1) retval on ttyC%d was "
+ "%x\n", info->line, retval);
+ }
+
+ /* Flush RX buffers before raising DTR and RTS */
+ retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_RX, 0L);
+ if (retval != 0) {
+ printk(KERN_ERR "cyc:startup(2) retval on ttyC%d was "
+ "%x\n", info->line, retval);
+ }
+
+ /* set timeout !!! */
+ /* set RTS and DTR !!! */
+ tty_port_raise_dtr_rts(&info->port);
+
+ /* enable send, recv, modem !!! */
+ }
+
+ info->port.flags |= ASYNC_INITIALIZED;
+
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+ info->breakon = info->breakoff = 0;
+ memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats));
+ info->idle_stats.in_use =
+ info->idle_stats.recv_idle =
+ info->idle_stats.xmit_idle = jiffies;
+
+ spin_unlock_irqrestore(&card->card_lock, flags);
+
+#ifdef CY_DEBUG_OPEN
+ printk(KERN_DEBUG "cyc startup done\n");
+#endif
+ return 0;
+
+errout:
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ free_page(page);
+ return retval;
+} /* startup */
+
+static void start_xmit(struct cyclades_port *info)
+{
+ struct cyclades_card *card = info->card;
+ unsigned long flags;
+ int channel = info->line - card->first_line;
+
+ if (!cy_is_Z(card)) {
+ spin_lock_irqsave(&card->card_lock, flags);
+ cyy_writeb(info, CyCAR, channel & 0x03);
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ } else {
+#ifdef CONFIG_CYZ_INTR
+ int retval;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+ retval = cyz_issue_cmd(card, channel, C_CM_INTBACK, 0L);
+ if (retval != 0) {
+ printk(KERN_ERR "cyc:start_xmit retval on ttyC%d was "
+ "%x\n", info->line, retval);
+ }
+ spin_unlock_irqrestore(&card->card_lock, flags);
+#else /* CONFIG_CYZ_INTR */
+ /* Don't have to do anything at this time */
+#endif /* CONFIG_CYZ_INTR */
+ }
+} /* start_xmit */
+
+/*
+ * This routine shuts down a serial port; interrupts are disabled,
+ * and DTR is dropped if the hangup on close termio flag is on.
+ */
+static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty)
+{
+ struct cyclades_card *card;
+ unsigned long flags;
+
+ if (!(info->port.flags & ASYNC_INITIALIZED))
+ return;
+
+ card = info->card;
+ if (!cy_is_Z(card)) {
+ spin_lock_irqsave(&card->card_lock, flags);
+
+ /* Clear delta_msr_wait queue to avoid mem leaks. */
+ wake_up_interruptible(&info->port.delta_msr_wait);
+
+ if (info->port.xmit_buf) {
+ unsigned char *temp;
+ temp = info->port.xmit_buf;
+ info->port.xmit_buf = NULL;
+ free_page((unsigned long)temp);
+ }
+ if (tty->termios->c_cflag & HUPCL)
+ cyy_change_rts_dtr(info, 0, TIOCM_RTS | TIOCM_DTR);
+
+ cyy_issue_cmd(info, CyCHAN_CTL | CyDIS_RCVR);
+ /* it may be appropriate to clear _XMIT at
+ some later date (after testing)!!! */
+
+ set_bit(TTY_IO_ERROR, &tty->flags);
+ info->port.flags &= ~ASYNC_INITIALIZED;
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ } else {
+#ifdef CY_DEBUG_OPEN
+ int channel = info->line - card->first_line;
+ printk(KERN_DEBUG "cyc shutdown Z card %d, channel %d, "
+ "base_addr %p\n", card, channel, card->base_addr);
+#endif
+
+ if (!cyz_is_loaded(card))
+ return;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+
+ if (info->port.xmit_buf) {
+ unsigned char *temp;
+ temp = info->port.xmit_buf;
+ info->port.xmit_buf = NULL;
+ free_page((unsigned long)temp);
+ }
+
+ if (tty->termios->c_cflag & HUPCL)
+ tty_port_lower_dtr_rts(&info->port);
+
+ set_bit(TTY_IO_ERROR, &tty->flags);
+ info->port.flags &= ~ASYNC_INITIALIZED;
+
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ }
+
+#ifdef CY_DEBUG_OPEN
+ printk(KERN_DEBUG "cyc shutdown done\n");
+#endif
+} /* shutdown */
+
+/*
+ * ------------------------------------------------------------
+ * cy_open() and friends
+ * ------------------------------------------------------------
+ */
+
+/*
+ * This routine is called whenever a serial port is opened. It
+ * performs the serial-specific initialization for the tty structure.
+ */
+static int cy_open(struct tty_struct *tty, struct file *filp)
+{
+ struct cyclades_port *info;
+ unsigned int i, line;
+ int retval;
+
+ line = tty->index;
+ if (tty->index < 0 || NR_PORTS <= line)
+ return -ENODEV;
+
+ for (i = 0; i < NR_CARDS; i++)
+ if (line < cy_card[i].first_line + cy_card[i].nports &&
+ line >= cy_card[i].first_line)
+ break;
+ if (i >= NR_CARDS)
+ return -ENODEV;
+ info = &cy_card[i].ports[line - cy_card[i].first_line];
+ if (info->line < 0)
+ return -ENODEV;
+
+ /* If the card's firmware hasn't been loaded,
+ treat it as absent from the system. This
+ will make the user pay attention.
+ */
+ if (cy_is_Z(info->card)) {
+ struct cyclades_card *cinfo = info->card;
+ struct FIRM_ID __iomem *firm_id = cinfo->base_addr + ID_ADDRESS;
+
+ if (!cyz_is_loaded(cinfo)) {
+ if (cinfo->hw_ver == ZE_V1 && cyz_fpga_loaded(cinfo) &&
+ readl(&firm_id->signature) ==
+ ZFIRM_HLT) {
+ printk(KERN_ERR "cyc:Cyclades-Z Error: you "
+ "need an external power supply for "
+ "this number of ports.\nFirmware "
+ "halted.\n");
+ } else {
+ printk(KERN_ERR "cyc:Cyclades-Z firmware not "
+ "yet loaded\n");
+ }
+ return -ENODEV;
+ }
+#ifdef CONFIG_CYZ_INTR
+ else {
+ /* In case this Z board is operating in interrupt mode, its
+ interrupts should be enabled as soon as the first open
+ happens to one of its ports. */
+ if (!cinfo->intr_enabled) {
+ u16 intr;
+
+ /* Enable interrupts on the PLX chip */
+ intr = readw(&cinfo->ctl_addr.p9060->
+ intr_ctrl_stat) | 0x0900;
+ cy_writew(&cinfo->ctl_addr.p9060->
+ intr_ctrl_stat, intr);
+ /* Enable interrupts on the FW */
+ retval = cyz_issue_cmd(cinfo, 0,
+ C_CM_IRQ_ENBL, 0L);
+ if (retval != 0) {
+ printk(KERN_ERR "cyc:IRQ enable retval "
+ "was %x\n", retval);
+ }
+ cinfo->intr_enabled = 1;
+ }
+ }
+#endif /* CONFIG_CYZ_INTR */
+ /* Make sure this Z port really exists in hardware */
+ if (info->line > (cinfo->first_line + cinfo->nports - 1))
+ return -ENODEV;
+ }
+#ifdef CY_DEBUG_OTHER
+ printk(KERN_DEBUG "cyc:cy_open ttyC%d\n", info->line);
+#endif
+ tty->driver_data = info;
+ if (serial_paranoia_check(info, tty->name, "cy_open"))
+ return -ENODEV;
+
+#ifdef CY_DEBUG_OPEN
+ printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
+ info->port.count);
+#endif
+ info->port.count++;
+#ifdef CY_DEBUG_COUNT
+ printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
+ current->pid, info->port.count);
+#endif
+
+ /*
+ * If the port is the middle of closing, bail out now
+ */
+ if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) {
+ wait_event_interruptible_tty(info->port.close_wait,
+ !(info->port.flags & ASYNC_CLOSING));
+ return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS;
+ }
+
+ /*
+ * Start up serial port
+ */
+ retval = cy_startup(info, tty);
+ if (retval)
+ return retval;
+
+ retval = tty_port_block_til_ready(&info->port, tty, filp);
+ if (retval) {
+#ifdef CY_DEBUG_OPEN
+ printk(KERN_DEBUG "cyc:cy_open returning after block_til_ready "
+ "with %d\n", retval);
+#endif
+ return retval;
+ }
+
+ info->throttle = 0;
+ tty_port_tty_set(&info->port, tty);
+
+#ifdef CY_DEBUG_OPEN
+ printk(KERN_DEBUG "cyc:cy_open done\n");
+#endif
+ return 0;
+} /* cy_open */
+
+/*
+ * cy_wait_until_sent() --- wait until the transmitter is empty
+ */
+static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct cyclades_card *card;
+ struct cyclades_port *info = tty->driver_data;
+ unsigned long orig_jiffies;
+ int char_time;
+
+ if (serial_paranoia_check(info, tty->name, "cy_wait_until_sent"))
+ return;
+
+ if (info->xmit_fifo_size == 0)
+ return; /* Just in case.... */
+
+ orig_jiffies = jiffies;
+ /*
+ * Set the check interval to be 1/5 of the estimated time to
+ * send a single character, and make it at least 1. The check
+ * interval should also be less than the timeout.
+ *
+ * Note: we have to use pretty tight timings here to satisfy
+ * the NIST-PCTS.
+ */
+ char_time = (info->timeout - HZ / 50) / info->xmit_fifo_size;
+ char_time = char_time / 5;
+ if (char_time <= 0)
+ char_time = 1;
+ if (timeout < 0)
+ timeout = 0;
+ if (timeout)
+ char_time = min(char_time, timeout);
+ /*
+ * If the transmitter hasn't cleared in twice the approximate
+ * amount of time to send the entire FIFO, it probably won't
+ * ever clear. This assumes the UART isn't doing flow
+ * control, which is currently the case. Hence, if it ever
+ * takes longer than info->timeout, this is probably due to a
+ * UART bug of some kind. So, we clamp the timeout parameter at
+ * 2*info->timeout.
+ */
+ if (!timeout || timeout > 2 * info->timeout)
+ timeout = 2 * info->timeout;
+#ifdef CY_DEBUG_WAIT_UNTIL_SENT
+ printk(KERN_DEBUG "In cy_wait_until_sent(%d) check=%d, jiff=%lu...",
+ timeout, char_time, jiffies);
+#endif
+ card = info->card;
+ if (!cy_is_Z(card)) {
+ while (cyy_readb(info, CySRER) & CyTxRdy) {
+#ifdef CY_DEBUG_WAIT_UNTIL_SENT
+ printk(KERN_DEBUG "Not clean (jiff=%lu)...", jiffies);
+#endif
+ if (msleep_interruptible(jiffies_to_msecs(char_time)))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies +
+ timeout))
+ break;
+ }
+ }
+ /* Run one more char cycle */
+ msleep_interruptible(jiffies_to_msecs(char_time * 5));
+#ifdef CY_DEBUG_WAIT_UNTIL_SENT
+ printk(KERN_DEBUG "Clean (jiff=%lu)...done\n", jiffies);
+#endif
+}
+
+static void cy_flush_buffer(struct tty_struct *tty)
+{
+ struct cyclades_port *info = tty->driver_data;
+ struct cyclades_card *card;
+ int channel, retval;
+ unsigned long flags;
+
+#ifdef CY_DEBUG_IO
+ printk(KERN_DEBUG "cyc:cy_flush_buffer ttyC%d\n", info->line);
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "cy_flush_buffer"))
+ return;
+
+ card = info->card;
+ channel = info->line - card->first_line;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+ spin_unlock_irqrestore(&card->card_lock, flags);
+
+ if (cy_is_Z(card)) { /* If it is a Z card, flush the on-board
+ buffers as well */
+ spin_lock_irqsave(&card->card_lock, flags);
+ retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L);
+ if (retval != 0) {
+ printk(KERN_ERR "cyc: flush_buffer retval on ttyC%d "
+ "was %x\n", info->line, retval);
+ }
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ }
+ tty_wakeup(tty);
+} /* cy_flush_buffer */
+
+
+static void cy_do_close(struct tty_port *port)
+{
+ struct cyclades_port *info = container_of(port, struct cyclades_port,
+ port);
+ struct cyclades_card *card;
+ unsigned long flags;
+ int channel;
+
+ card = info->card;
+ channel = info->line - card->first_line;
+ spin_lock_irqsave(&card->card_lock, flags);
+
+ if (!cy_is_Z(card)) {
+ /* Stop accepting input */
+ cyy_writeb(info, CyCAR, channel & 0x03);
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyRxData);
+ if (info->port.flags & ASYNC_INITIALIZED) {
+ /* Waiting for on-board buffers to be empty before
+ closing the port */
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ cy_wait_until_sent(port->tty, info->timeout);
+ spin_lock_irqsave(&card->card_lock, flags);
+ }
+ } else {
+#ifdef Z_WAKE
+ /* Waiting for on-board buffers to be empty before closing
+ the port */
+ struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
+ int retval;
+
+ if (readl(&ch_ctrl->flow_status) != C_FS_TXIDLE) {
+ retval = cyz_issue_cmd(card, channel, C_CM_IOCTLW, 0L);
+ if (retval != 0) {
+ printk(KERN_DEBUG "cyc:cy_close retval on "
+ "ttyC%d was %x\n", info->line, retval);
+ }
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ wait_for_completion_interruptible(&info->shutdown_wait);
+ spin_lock_irqsave(&card->card_lock, flags);
+ }
+#endif
+ }
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ cy_shutdown(info, port->tty);
+}
+
+/*
+ * This routine is called when a particular tty device is closed.
+ */
+static void cy_close(struct tty_struct *tty, struct file *filp)
+{
+ struct cyclades_port *info = tty->driver_data;
+ if (!info || serial_paranoia_check(info, tty->name, "cy_close"))
+ return;
+ tty_port_close(&info->port, tty, filp);
+} /* cy_close */
+
+/* This routine gets called when tty_write has put something into
+ * the write_queue. The characters may come from user space or
+ * kernel space.
+ *
+ * This routine will return the number of characters actually
+ * accepted for writing.
+ *
+ * If the port is not already transmitting stuff, start it off by
+ * enabling interrupts. The interrupt service routine will then
+ * ensure that the characters are sent.
+ * If the port is already active, there is no need to kick it.
+ *
+ */
+static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ struct cyclades_port *info = tty->driver_data;
+ unsigned long flags;
+ int c, ret = 0;
+
+#ifdef CY_DEBUG_IO
+ printk(KERN_DEBUG "cyc:cy_write ttyC%d\n", info->line);
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "cy_write"))
+ return 0;
+
+ if (!info->port.xmit_buf)
+ return 0;
+
+ spin_lock_irqsave(&info->card->card_lock, flags);
+ while (1) {
+ c = min(count, (int)(SERIAL_XMIT_SIZE - info->xmit_cnt - 1));
+ c = min(c, (int)(SERIAL_XMIT_SIZE - info->xmit_head));
+
+ if (c <= 0)
+ break;
+
+ memcpy(info->port.xmit_buf + info->xmit_head, buf, c);
+ info->xmit_head = (info->xmit_head + c) &
+ (SERIAL_XMIT_SIZE - 1);
+ info->xmit_cnt += c;
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+ spin_unlock_irqrestore(&info->card->card_lock, flags);
+
+ info->idle_stats.xmit_bytes += ret;
+ info->idle_stats.xmit_idle = jiffies;
+
+ if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped)
+ start_xmit(info);
+
+ return ret;
+} /* cy_write */
+
+/*
+ * This routine is called by the kernel to write a single
+ * character to the tty device. If the kernel uses this routine,
+ * it must call the flush_chars() routine (if defined) when it is
+ * done stuffing characters into the driver. If there is no room
+ * in the queue, the character is ignored.
+ */
+static int cy_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct cyclades_port *info = tty->driver_data;
+ unsigned long flags;
+
+#ifdef CY_DEBUG_IO
+ printk(KERN_DEBUG "cyc:cy_put_char ttyC%d\n", info->line);
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "cy_put_char"))
+ return 0;
+
+ if (!info->port.xmit_buf)
+ return 0;
+
+ spin_lock_irqsave(&info->card->card_lock, flags);
+ if (info->xmit_cnt >= (int)(SERIAL_XMIT_SIZE - 1)) {
+ spin_unlock_irqrestore(&info->card->card_lock, flags);
+ return 0;
+ }
+
+ info->port.xmit_buf[info->xmit_head++] = ch;
+ info->xmit_head &= SERIAL_XMIT_SIZE - 1;
+ info->xmit_cnt++;
+ info->idle_stats.xmit_bytes++;
+ info->idle_stats.xmit_idle = jiffies;
+ spin_unlock_irqrestore(&info->card->card_lock, flags);
+ return 1;
+} /* cy_put_char */
+
+/*
+ * This routine is called by the kernel after it has written a
+ * series of characters to the tty device using put_char().
+ */
+static void cy_flush_chars(struct tty_struct *tty)
+{
+ struct cyclades_port *info = tty->driver_data;
+
+#ifdef CY_DEBUG_IO
+ printk(KERN_DEBUG "cyc:cy_flush_chars ttyC%d\n", info->line);
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "cy_flush_chars"))
+ return;
+
+ if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
+ !info->port.xmit_buf)
+ return;
+
+ start_xmit(info);
+} /* cy_flush_chars */
+
+/*
+ * This routine returns the numbers of characters the tty driver
+ * will accept for queuing to be written. This number is subject
+ * to change as output buffers get emptied, or if the output flow
+ * control is activated.
+ */
+static int cy_write_room(struct tty_struct *tty)
+{
+ struct cyclades_port *info = tty->driver_data;
+ int ret;
+
+#ifdef CY_DEBUG_IO
+ printk(KERN_DEBUG "cyc:cy_write_room ttyC%d\n", info->line);
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "cy_write_room"))
+ return 0;
+ ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
+ if (ret < 0)
+ ret = 0;
+ return ret;
+} /* cy_write_room */
+
+static int cy_chars_in_buffer(struct tty_struct *tty)
+{
+ struct cyclades_port *info = tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "cy_chars_in_buffer"))
+ return 0;
+
+#ifdef Z_EXT_CHARS_IN_BUFFER
+ if (!cy_is_Z(info->card)) {
+#endif /* Z_EXT_CHARS_IN_BUFFER */
+#ifdef CY_DEBUG_IO
+ printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n",
+ info->line, info->xmit_cnt);
+#endif
+ return info->xmit_cnt;
+#ifdef Z_EXT_CHARS_IN_BUFFER
+ } else {
+ struct BUF_CTRL __iomem *buf_ctrl = info->u.cyz.buf_ctrl;
+ int char_count;
+ __u32 tx_put, tx_get, tx_bufsize;
+
+ tx_get = readl(&buf_ctrl->tx_get);
+ tx_put = readl(&buf_ctrl->tx_put);
+ tx_bufsize = readl(&buf_ctrl->tx_bufsize);
+ if (tx_put >= tx_get)
+ char_count = tx_put - tx_get;
+ else
+ char_count = tx_put - tx_get + tx_bufsize;
+#ifdef CY_DEBUG_IO
+ printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n",
+ info->line, info->xmit_cnt + char_count);
+#endif
+ return info->xmit_cnt + char_count;
+ }
+#endif /* Z_EXT_CHARS_IN_BUFFER */
+} /* cy_chars_in_buffer */
+
+/*
+ * ------------------------------------------------------------
+ * cy_ioctl() and friends
+ * ------------------------------------------------------------
+ */
+
+static void cyy_baud_calc(struct cyclades_port *info, __u32 baud)
+{
+ int co, co_val, bpr;
+ __u32 cy_clock = ((info->chip_rev >= CD1400_REV_J) ? 60000000 :
+ 25000000);
+
+ if (baud == 0) {
+ info->tbpr = info->tco = info->rbpr = info->rco = 0;
+ return;
+ }
+
+ /* determine which prescaler to use */
+ for (co = 4, co_val = 2048; co; co--, co_val >>= 2) {
+ if (cy_clock / co_val / baud > 63)
+ break;
+ }
+
+ bpr = (cy_clock / co_val * 2 / baud + 1) / 2;
+ if (bpr > 255)
+ bpr = 255;
+
+ info->tbpr = info->rbpr = bpr;
+ info->tco = info->rco = co;
+}
+
+/*
+ * This routine finds or computes the various line characteristics.
+ * It used to be called config_setup
+ */
+static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty)
+{
+ struct cyclades_card *card;
+ unsigned long flags;
+ int channel;
+ unsigned cflag, iflag;
+ int baud, baud_rate = 0;
+ int i;
+
+ if (!tty->termios) /* XXX can this happen at all? */
+ return;
+
+ if (info->line == -1)
+ return;
+
+ cflag = tty->termios->c_cflag;
+ iflag = tty->termios->c_iflag;
+
+ /*
+ * Set up the tty->alt_speed kludge
+ */
+ if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
+ tty->alt_speed = 57600;
+ if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
+ tty->alt_speed = 115200;
+ if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
+ tty->alt_speed = 230400;
+ if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
+ tty->alt_speed = 460800;
+
+ card = info->card;
+ channel = info->line - card->first_line;
+
+ if (!cy_is_Z(card)) {
+ u32 cflags;
+
+ /* baud rate */
+ baud = tty_get_baud_rate(tty);
+ if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
+ ASYNC_SPD_CUST) {
+ if (info->custom_divisor)
+ baud_rate = info->baud / info->custom_divisor;
+ else
+ baud_rate = info->baud;
+ } else if (baud > CD1400_MAX_SPEED) {
+ baud = CD1400_MAX_SPEED;
+ }
+ /* find the baud index */
+ for (i = 0; i < 20; i++) {
+ if (baud == baud_table[i])
+ break;
+ }
+ if (i == 20)
+ i = 19; /* CD1400_MAX_SPEED */
+
+ if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
+ ASYNC_SPD_CUST) {
+ cyy_baud_calc(info, baud_rate);
+ } else {
+ if (info->chip_rev >= CD1400_REV_J) {
+ /* It is a CD1400 rev. J or later */
+ info->tbpr = baud_bpr_60[i]; /* Tx BPR */
+ info->tco = baud_co_60[i]; /* Tx CO */
+ info->rbpr = baud_bpr_60[i]; /* Rx BPR */
+ info->rco = baud_co_60[i]; /* Rx CO */
+ } else {
+ info->tbpr = baud_bpr_25[i]; /* Tx BPR */
+ info->tco = baud_co_25[i]; /* Tx CO */
+ info->rbpr = baud_bpr_25[i]; /* Rx BPR */
+ info->rco = baud_co_25[i]; /* Rx CO */
+ }
+ }
+ if (baud_table[i] == 134) {
+ /* get it right for 134.5 baud */
+ info->timeout = (info->xmit_fifo_size * HZ * 30 / 269) +
+ 2;
+ } else if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
+ ASYNC_SPD_CUST) {
+ info->timeout = (info->xmit_fifo_size * HZ * 15 /
+ baud_rate) + 2;
+ } else if (baud_table[i]) {
+ info->timeout = (info->xmit_fifo_size * HZ * 15 /
+ baud_table[i]) + 2;
+ /* this needs to be propagated into the card info */
+ } else {
+ info->timeout = 0;
+ }
+ /* By tradition (is it a standard?) a baud rate of zero
+ implies the line should be/has been closed. A bit
+ later in this routine such a test is performed. */
+
+ /* byte size and parity */
+ info->cor5 = 0;
+ info->cor4 = 0;
+ /* receive threshold */
+ info->cor3 = (info->default_threshold ?
+ info->default_threshold : baud_cor3[i]);
+ info->cor2 = CyETC;
+ switch (cflag & CSIZE) {
+ case CS5:
+ info->cor1 = Cy_5_BITS;
+ break;
+ case CS6:
+ info->cor1 = Cy_6_BITS;
+ break;
+ case CS7:
+ info->cor1 = Cy_7_BITS;
+ break;
+ case CS8:
+ info->cor1 = Cy_8_BITS;
+ break;
+ }
+ if (cflag & CSTOPB)
+ info->cor1 |= Cy_2_STOP;
+
+ if (cflag & PARENB) {
+ if (cflag & PARODD)
+ info->cor1 |= CyPARITY_O;
+ else
+ info->cor1 |= CyPARITY_E;
+ } else
+ info->cor1 |= CyPARITY_NONE;
+
+ /* CTS flow control flag */
+ if (cflag & CRTSCTS) {
+ info->port.flags |= ASYNC_CTS_FLOW;
+ info->cor2 |= CyCtsAE;
+ } else {
+ info->port.flags &= ~ASYNC_CTS_FLOW;
+ info->cor2 &= ~CyCtsAE;
+ }
+ if (cflag & CLOCAL)
+ info->port.flags &= ~ASYNC_CHECK_CD;
+ else
+ info->port.flags |= ASYNC_CHECK_CD;
+
+ /***********************************************
+ The hardware option, CyRtsAO, presents RTS when
+ the chip has characters to send. Since most modems
+ use RTS as reverse (inbound) flow control, this
+ option is not used. If inbound flow control is
+ necessary, DTR can be programmed to provide the
+ appropriate signals for use with a non-standard
+ cable. Contact Marcio Saito for details.
+ ***********************************************/
+
+ channel &= 0x03;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+ cyy_writeb(info, CyCAR, channel);
+
+ /* tx and rx baud rate */
+
+ cyy_writeb(info, CyTCOR, info->tco);
+ cyy_writeb(info, CyTBPR, info->tbpr);
+ cyy_writeb(info, CyRCOR, info->rco);
+ cyy_writeb(info, CyRBPR, info->rbpr);
+
+ /* set line characteristics according configuration */
+
+ cyy_writeb(info, CySCHR1, START_CHAR(tty));
+ cyy_writeb(info, CySCHR2, STOP_CHAR(tty));
+ cyy_writeb(info, CyCOR1, info->cor1);
+ cyy_writeb(info, CyCOR2, info->cor2);
+ cyy_writeb(info, CyCOR3, info->cor3);
+ cyy_writeb(info, CyCOR4, info->cor4);
+ cyy_writeb(info, CyCOR5, info->cor5);
+
+ cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch |
+ CyCOR3ch);
+
+ /* !!! Is this needed? */
+ cyy_writeb(info, CyCAR, channel);
+ cyy_writeb(info, CyRTPR,
+ (info->default_timeout ? info->default_timeout : 0x02));
+ /* 10ms rx timeout */
+
+ cflags = CyCTS;
+ if (!C_CLOCAL(tty))
+ cflags |= CyDSR | CyRI | CyDCD;
+ /* without modem intr */
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyMdmCh);
+ /* act on 1->0 modem transitions */
+ if ((cflag & CRTSCTS) && info->rflow)
+ cyy_writeb(info, CyMCOR1, cflags | rflow_thr[i]);
+ else
+ cyy_writeb(info, CyMCOR1, cflags);
+ /* act on 0->1 modem transitions */
+ cyy_writeb(info, CyMCOR2, cflags);
+
+ if (i == 0) /* baud rate is zero, turn off line */
+ cyy_change_rts_dtr(info, 0, TIOCM_DTR);
+ else
+ cyy_change_rts_dtr(info, TIOCM_DTR, 0);
+
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+
+ } else {
+ struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
+ __u32 sw_flow;
+ int retval;
+
+ if (!cyz_is_loaded(card))
+ return;
+
+ /* baud rate */
+ baud = tty_get_baud_rate(tty);
+ if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
+ ASYNC_SPD_CUST) {
+ if (info->custom_divisor)
+ baud_rate = info->baud / info->custom_divisor;
+ else
+ baud_rate = info->baud;
+ } else if (baud > CYZ_MAX_SPEED) {
+ baud = CYZ_MAX_SPEED;
+ }
+ cy_writel(&ch_ctrl->comm_baud, baud);
+
+ if (baud == 134) {
+ /* get it right for 134.5 baud */
+ info->timeout = (info->xmit_fifo_size * HZ * 30 / 269) +
+ 2;
+ } else if (baud == 38400 && (info->port.flags & ASYNC_SPD_MASK) ==
+ ASYNC_SPD_CUST) {
+ info->timeout = (info->xmit_fifo_size * HZ * 15 /
+ baud_rate) + 2;
+ } else if (baud) {
+ info->timeout = (info->xmit_fifo_size * HZ * 15 /
+ baud) + 2;
+ /* this needs to be propagated into the card info */
+ } else {
+ info->timeout = 0;
+ }
+
+ /* byte size and parity */
+ switch (cflag & CSIZE) {
+ case CS5:
+ cy_writel(&ch_ctrl->comm_data_l, C_DL_CS5);
+ break;
+ case CS6:
+ cy_writel(&ch_ctrl->comm_data_l, C_DL_CS6);
+ break;
+ case CS7:
+ cy_writel(&ch_ctrl->comm_data_l, C_DL_CS7);
+ break;
+ case CS8:
+ cy_writel(&ch_ctrl->comm_data_l, C_DL_CS8);
+ break;
+ }
+ if (cflag & CSTOPB) {
+ cy_writel(&ch_ctrl->comm_data_l,
+ readl(&ch_ctrl->comm_data_l) | C_DL_2STOP);
+ } else {
+ cy_writel(&ch_ctrl->comm_data_l,
+ readl(&ch_ctrl->comm_data_l) | C_DL_1STOP);
+ }
+ if (cflag & PARENB) {
+ if (cflag & PARODD)
+ cy_writel(&ch_ctrl->comm_parity, C_PR_ODD);
+ else
+ cy_writel(&ch_ctrl->comm_parity, C_PR_EVEN);
+ } else
+ cy_writel(&ch_ctrl->comm_parity, C_PR_NONE);
+
+ /* CTS flow control flag */
+ if (cflag & CRTSCTS) {
+ cy_writel(&ch_ctrl->hw_flow,
+ readl(&ch_ctrl->hw_flow) | C_RS_CTS | C_RS_RTS);
+ } else {
+ cy_writel(&ch_ctrl->hw_flow, readl(&ch_ctrl->hw_flow) &
+ ~(C_RS_CTS | C_RS_RTS));
+ }
+ /* As the HW flow control is done in firmware, the driver
+ doesn't need to care about it */
+ info->port.flags &= ~ASYNC_CTS_FLOW;
+
+ /* XON/XOFF/XANY flow control flags */
+ sw_flow = 0;
+ if (iflag & IXON) {
+ sw_flow |= C_FL_OXX;
+ if (iflag & IXANY)
+ sw_flow |= C_FL_OIXANY;
+ }
+ cy_writel(&ch_ctrl->sw_flow, sw_flow);
+
+ retval = cyz_issue_cmd(card, channel, C_CM_IOCTL, 0L);
+ if (retval != 0) {
+ printk(KERN_ERR "cyc:set_line_char retval on ttyC%d "
+ "was %x\n", info->line, retval);
+ }
+
+ /* CD sensitivity */
+ if (cflag & CLOCAL)
+ info->port.flags &= ~ASYNC_CHECK_CD;
+ else
+ info->port.flags |= ASYNC_CHECK_CD;
+
+ if (baud == 0) { /* baud rate is zero, turn off line */
+ cy_writel(&ch_ctrl->rs_control,
+ readl(&ch_ctrl->rs_control) & ~C_RS_DTR);
+#ifdef CY_DEBUG_DTR
+ printk(KERN_DEBUG "cyc:set_line_char dropping Z DTR\n");
+#endif
+ } else {
+ cy_writel(&ch_ctrl->rs_control,
+ readl(&ch_ctrl->rs_control) | C_RS_DTR);
+#ifdef CY_DEBUG_DTR
+ printk(KERN_DEBUG "cyc:set_line_char raising Z DTR\n");
+#endif
+ }
+
+ retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L);
+ if (retval != 0) {
+ printk(KERN_ERR "cyc:set_line_char(2) retval on ttyC%d "
+ "was %x\n", info->line, retval);
+ }
+
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+ }
+} /* set_line_char */
+
+static int cy_get_serial_info(struct cyclades_port *info,
+ struct serial_struct __user *retinfo)
+{
+ struct cyclades_card *cinfo = info->card;
+ struct serial_struct tmp = {
+ .type = info->type,
+ .line = info->line,
+ .port = (info->card - cy_card) * 0x100 + info->line -
+ cinfo->first_line,
+ .irq = cinfo->irq,
+ .flags = info->port.flags,
+ .close_delay = info->port.close_delay,
+ .closing_wait = info->port.closing_wait,
+ .baud_base = info->baud,
+ .custom_divisor = info->custom_divisor,
+ .hub6 = 0, /*!!! */
+ };
+ return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
+}
+
+static int
+cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty,
+ struct serial_struct __user *new_info)
+{
+ struct serial_struct new_serial;
+ int ret;
+
+ if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
+ return -EFAULT;
+
+ mutex_lock(&info->port.mutex);
+ if (!capable(CAP_SYS_ADMIN)) {
+ if (new_serial.close_delay != info->port.close_delay ||
+ new_serial.baud_base != info->baud ||
+ (new_serial.flags & ASYNC_FLAGS &
+ ~ASYNC_USR_MASK) !=
+ (info->port.flags & ASYNC_FLAGS & ~ASYNC_USR_MASK))
+ {
+ mutex_unlock(&info->port.mutex);
+ return -EPERM;
+ }
+ info->port.flags = (info->port.flags & ~ASYNC_USR_MASK) |
+ (new_serial.flags & ASYNC_USR_MASK);
+ info->baud = new_serial.baud_base;
+ info->custom_divisor = new_serial.custom_divisor;
+ goto check_and_exit;
+ }
+
+ /*
+ * OK, past this point, all the error checking has been done.
+ * At this point, we start making changes.....
+ */
+
+ info->baud = new_serial.baud_base;
+ info->custom_divisor = new_serial.custom_divisor;
+ info->port.flags = (info->port.flags & ~ASYNC_FLAGS) |
+ (new_serial.flags & ASYNC_FLAGS);
+ info->port.close_delay = new_serial.close_delay * HZ / 100;
+ info->port.closing_wait = new_serial.closing_wait * HZ / 100;
+
+check_and_exit:
+ if (info->port.flags & ASYNC_INITIALIZED) {
+ cy_set_line_char(info, tty);
+ ret = 0;
+ } else {
+ ret = cy_startup(info, tty);
+ }
+ mutex_unlock(&info->port.mutex);
+ return ret;
+} /* set_serial_info */
+
+/*
+ * get_lsr_info - get line status register info
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * is emptied. On bus types like RS485, the transmitter must
+ * release the bus after transmitting. This must be done when
+ * the transmit shift register is empty, not be done when the
+ * transmit holding register is empty. This functionality
+ * allows an RS485 driver to be written in user space.
+ */
+static int get_lsr_info(struct cyclades_port *info, unsigned int __user *value)
+{
+ struct cyclades_card *card = info->card;
+ unsigned int result;
+ unsigned long flags;
+ u8 status;
+
+ if (!cy_is_Z(card)) {
+ spin_lock_irqsave(&card->card_lock, flags);
+ status = cyy_readb(info, CySRER) & (CyTxRdy | CyTxMpty);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ result = (status ? 0 : TIOCSER_TEMT);
+ } else {
+ /* Not supported yet */
+ return -EINVAL;
+ }
+ return put_user(result, (unsigned long __user *)value);
+}
+
+static int cy_tiocmget(struct tty_struct *tty)
+{
+ struct cyclades_port *info = tty->driver_data;
+ struct cyclades_card *card;
+ int result;
+
+ if (serial_paranoia_check(info, tty->name, __func__))
+ return -ENODEV;
+
+ card = info->card;
+
+ if (!cy_is_Z(card)) {
+ unsigned long flags;
+ int channel = info->line - card->first_line;
+ u8 status;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+ cyy_writeb(info, CyCAR, channel & 0x03);
+ status = cyy_readb(info, CyMSVR1);
+ status |= cyy_readb(info, CyMSVR2);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+
+ if (info->rtsdtr_inv) {
+ result = ((status & CyRTS) ? TIOCM_DTR : 0) |
+ ((status & CyDTR) ? TIOCM_RTS : 0);
+ } else {
+ result = ((status & CyRTS) ? TIOCM_RTS : 0) |
+ ((status & CyDTR) ? TIOCM_DTR : 0);
+ }
+ result |= ((status & CyDCD) ? TIOCM_CAR : 0) |
+ ((status & CyRI) ? TIOCM_RNG : 0) |
+ ((status & CyDSR) ? TIOCM_DSR : 0) |
+ ((status & CyCTS) ? TIOCM_CTS : 0);
+ } else {
+ u32 lstatus;
+
+ if (!cyz_is_loaded(card)) {
+ result = -ENODEV;
+ goto end;
+ }
+
+ lstatus = readl(&info->u.cyz.ch_ctrl->rs_status);
+ result = ((lstatus & C_RS_RTS) ? TIOCM_RTS : 0) |
+ ((lstatus & C_RS_DTR) ? TIOCM_DTR : 0) |
+ ((lstatus & C_RS_DCD) ? TIOCM_CAR : 0) |
+ ((lstatus & C_RS_RI) ? TIOCM_RNG : 0) |
+ ((lstatus & C_RS_DSR) ? TIOCM_DSR : 0) |
+ ((lstatus & C_RS_CTS) ? TIOCM_CTS : 0);
+ }
+end:
+ return result;
+} /* cy_tiomget */
+
+static int
+cy_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct cyclades_port *info = tty->driver_data;
+ struct cyclades_card *card;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, __func__))
+ return -ENODEV;
+
+ card = info->card;
+ if (!cy_is_Z(card)) {
+ spin_lock_irqsave(&card->card_lock, flags);
+ cyy_change_rts_dtr(info, set, clear);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ } else {
+ struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
+ int retval, channel = info->line - card->first_line;
+ u32 rs;
+
+ if (!cyz_is_loaded(card))
+ return -ENODEV;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+ rs = readl(&ch_ctrl->rs_control);
+ if (set & TIOCM_RTS)
+ rs |= C_RS_RTS;
+ if (clear & TIOCM_RTS)
+ rs &= ~C_RS_RTS;
+ if (set & TIOCM_DTR) {
+ rs |= C_RS_DTR;
+#ifdef CY_DEBUG_DTR
+ printk(KERN_DEBUG "cyc:set_modem_info raising Z DTR\n");
+#endif
+ }
+ if (clear & TIOCM_DTR) {
+ rs &= ~C_RS_DTR;
+#ifdef CY_DEBUG_DTR
+ printk(KERN_DEBUG "cyc:set_modem_info clearing "
+ "Z DTR\n");
+#endif
+ }
+ cy_writel(&ch_ctrl->rs_control, rs);
+ retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ if (retval != 0) {
+ printk(KERN_ERR "cyc:set_modem_info retval on ttyC%d "
+ "was %x\n", info->line, retval);
+ }
+ }
+ return 0;
+}
+
+/*
+ * cy_break() --- routine which turns the break handling on or off
+ */
+static int cy_break(struct tty_struct *tty, int break_state)
+{
+ struct cyclades_port *info = tty->driver_data;
+ struct cyclades_card *card;
+ unsigned long flags;
+ int retval = 0;
+
+ if (serial_paranoia_check(info, tty->name, "cy_break"))
+ return -EINVAL;
+
+ card = info->card;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+ if (!cy_is_Z(card)) {
+ /* Let the transmit ISR take care of this (since it
+ requires stuffing characters into the output stream).
+ */
+ if (break_state == -1) {
+ if (!info->breakon) {
+ info->breakon = 1;
+ if (!info->xmit_cnt) {
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ start_xmit(info);
+ spin_lock_irqsave(&card->card_lock, flags);
+ }
+ }
+ } else {
+ if (!info->breakoff) {
+ info->breakoff = 1;
+ if (!info->xmit_cnt) {
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ start_xmit(info);
+ spin_lock_irqsave(&card->card_lock, flags);
+ }
+ }
+ }
+ } else {
+ if (break_state == -1) {
+ retval = cyz_issue_cmd(card,
+ info->line - card->first_line,
+ C_CM_SET_BREAK, 0L);
+ if (retval != 0) {
+ printk(KERN_ERR "cyc:cy_break (set) retval on "
+ "ttyC%d was %x\n", info->line, retval);
+ }
+ } else {
+ retval = cyz_issue_cmd(card,
+ info->line - card->first_line,
+ C_CM_CLR_BREAK, 0L);
+ if (retval != 0) {
+ printk(KERN_DEBUG "cyc:cy_break (clr) retval "
+ "on ttyC%d was %x\n", info->line,
+ retval);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ return retval;
+} /* cy_break */
+
+static int set_threshold(struct cyclades_port *info, unsigned long value)
+{
+ struct cyclades_card *card = info->card;
+ unsigned long flags;
+
+ if (!cy_is_Z(card)) {
+ info->cor3 &= ~CyREC_FIFO;
+ info->cor3 |= value & CyREC_FIFO;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+ cyy_writeb(info, CyCOR3, info->cor3);
+ cyy_issue_cmd(info, CyCOR_CHANGE | CyCOR3ch);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ }
+ return 0;
+} /* set_threshold */
+
+static int get_threshold(struct cyclades_port *info,
+ unsigned long __user *value)
+{
+ struct cyclades_card *card = info->card;
+
+ if (!cy_is_Z(card)) {
+ u8 tmp = cyy_readb(info, CyCOR3) & CyREC_FIFO;
+ return put_user(tmp, value);
+ }
+ return 0;
+} /* get_threshold */
+
+static int set_timeout(struct cyclades_port *info, unsigned long value)
+{
+ struct cyclades_card *card = info->card;
+ unsigned long flags;
+
+ if (!cy_is_Z(card)) {
+ spin_lock_irqsave(&card->card_lock, flags);
+ cyy_writeb(info, CyRTPR, value & 0xff);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ }
+ return 0;
+} /* set_timeout */
+
+static int get_timeout(struct cyclades_port *info,
+ unsigned long __user *value)
+{
+ struct cyclades_card *card = info->card;
+
+ if (!cy_is_Z(card)) {
+ u8 tmp = cyy_readb(info, CyRTPR);
+ return put_user(tmp, value);
+ }
+ return 0;
+} /* get_timeout */
+
+static int cy_cflags_changed(struct cyclades_port *info, unsigned long arg,
+ struct cyclades_icount *cprev)
+{
+ struct cyclades_icount cnow;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&info->card->card_lock, flags);
+ cnow = info->icount; /* atomic copy */
+ spin_unlock_irqrestore(&info->card->card_lock, flags);
+
+ ret = ((arg & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
+ ((arg & TIOCM_CD) && (cnow.dcd != cprev->dcd)) ||
+ ((arg & TIOCM_CTS) && (cnow.cts != cprev->cts));
+
+ *cprev = cnow;
+
+ return ret;
+}
+
+/*
+ * This routine allows the tty driver to implement device-
+ * specific ioctl's. If the ioctl number passed in cmd is
+ * not recognized by the driver, it should return ENOIOCTLCMD.
+ */
+static int
+cy_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cyclades_port *info = tty->driver_data;
+ struct cyclades_icount cnow; /* kernel counter temps */
+ int ret_val = 0;
+ unsigned long flags;
+ void __user *argp = (void __user *)arg;
+
+ if (serial_paranoia_check(info, tty->name, "cy_ioctl"))
+ return -ENODEV;
+
+#ifdef CY_DEBUG_OTHER
+ printk(KERN_DEBUG "cyc:cy_ioctl ttyC%d, cmd = %x arg = %lx\n",
+ info->line, cmd, arg);
+#endif
+
+ switch (cmd) {
+ case CYGETMON:
+ if (copy_to_user(argp, &info->mon, sizeof(info->mon))) {
+ ret_val = -EFAULT;
+ break;
+ }
+ memset(&info->mon, 0, sizeof(info->mon));
+ break;
+ case CYGETTHRESH:
+ ret_val = get_threshold(info, argp);
+ break;
+ case CYSETTHRESH:
+ ret_val = set_threshold(info, arg);
+ break;
+ case CYGETDEFTHRESH:
+ ret_val = put_user(info->default_threshold,
+ (unsigned long __user *)argp);
+ break;
+ case CYSETDEFTHRESH:
+ info->default_threshold = arg & 0x0f;
+ break;
+ case CYGETTIMEOUT:
+ ret_val = get_timeout(info, argp);
+ break;
+ case CYSETTIMEOUT:
+ ret_val = set_timeout(info, arg);
+ break;
+ case CYGETDEFTIMEOUT:
+ ret_val = put_user(info->default_timeout,
+ (unsigned long __user *)argp);
+ break;
+ case CYSETDEFTIMEOUT:
+ info->default_timeout = arg & 0xff;
+ break;
+ case CYSETRFLOW:
+ info->rflow = (int)arg;
+ break;
+ case CYGETRFLOW:
+ ret_val = info->rflow;
+ break;
+ case CYSETRTSDTR_INV:
+ info->rtsdtr_inv = (int)arg;
+ break;
+ case CYGETRTSDTR_INV:
+ ret_val = info->rtsdtr_inv;
+ break;
+ case CYGETCD1400VER:
+ ret_val = info->chip_rev;
+ break;
+#ifndef CONFIG_CYZ_INTR
+ case CYZSETPOLLCYCLE:
+ cyz_polling_cycle = (arg * HZ) / 1000;
+ break;
+ case CYZGETPOLLCYCLE:
+ ret_val = (cyz_polling_cycle * 1000) / HZ;
+ break;
+#endif /* CONFIG_CYZ_INTR */
+ case CYSETWAIT:
+ info->port.closing_wait = (unsigned short)arg * HZ / 100;
+ break;
+ case CYGETWAIT:
+ ret_val = info->port.closing_wait / (HZ / 100);
+ break;
+ case TIOCGSERIAL:
+ ret_val = cy_get_serial_info(info, argp);
+ break;
+ case TIOCSSERIAL:
+ ret_val = cy_set_serial_info(info, tty, argp);
+ break;
+ case TIOCSERGETLSR: /* Get line status register */
+ ret_val = get_lsr_info(info, argp);
+ break;
+ /*
+ * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
+ * - mask passed in arg for lines of interest
+ * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
+ * Caller should use TIOCGICOUNT to see which one it was
+ */
+ case TIOCMIWAIT:
+ spin_lock_irqsave(&info->card->card_lock, flags);
+ /* note the counters on entry */
+ cnow = info->icount;
+ spin_unlock_irqrestore(&info->card->card_lock, flags);
+ ret_val = wait_event_interruptible(info->port.delta_msr_wait,
+ cy_cflags_changed(info, arg, &cnow));
+ break;
+
+ /*
+ * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+ * Return: write counters to the user passed counter struct
+ * NB: both 1->0 and 0->1 transitions are counted except for
+ * RI where only 0->1 is counted.
+ */
+ default:
+ ret_val = -ENOIOCTLCMD;
+ }
+
+#ifdef CY_DEBUG_OTHER
+ printk(KERN_DEBUG "cyc:cy_ioctl done\n");
+#endif
+ return ret_val;
+} /* cy_ioctl */
+
+static int cy_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *sic)
+{
+ struct cyclades_port *info = tty->driver_data;
+ struct cyclades_icount cnow; /* Used to snapshot */
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->card->card_lock, flags);
+ cnow = info->icount;
+ spin_unlock_irqrestore(&info->card->card_lock, flags);
+
+ sic->cts = cnow.cts;
+ sic->dsr = cnow.dsr;
+ sic->rng = cnow.rng;
+ sic->dcd = cnow.dcd;
+ sic->rx = cnow.rx;
+ sic->tx = cnow.tx;
+ sic->frame = cnow.frame;
+ sic->overrun = cnow.overrun;
+ sic->parity = cnow.parity;
+ sic->brk = cnow.brk;
+ sic->buf_overrun = cnow.buf_overrun;
+ return 0;
+}
+
+/*
+ * This routine allows the tty driver to be notified when
+ * device's termios settings have changed. Note that a
+ * well-designed tty driver should be prepared to accept the case
+ * where old == NULL, and try to do something rational.
+ */
+static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ struct cyclades_port *info = tty->driver_data;
+
+#ifdef CY_DEBUG_OTHER
+ printk(KERN_DEBUG "cyc:cy_set_termios ttyC%d\n", info->line);
+#endif
+
+ cy_set_line_char(info, tty);
+
+ if ((old_termios->c_cflag & CRTSCTS) &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ cy_start(tty);
+ }
+#if 0
+ /*
+ * No need to wake up processes in open wait, since they
+ * sample the CLOCAL flag once, and don't recheck it.
+ * XXX It's not clear whether the current behavior is correct
+ * or not. Hence, this may change.....
+ */
+ if (!(old_termios->c_cflag & CLOCAL) &&
+ (tty->termios->c_cflag & CLOCAL))
+ wake_up_interruptible(&info->port.open_wait);
+#endif
+} /* cy_set_termios */
+
+/* This function is used to send a high-priority XON/XOFF character to
+ the device.
+*/
+static void cy_send_xchar(struct tty_struct *tty, char ch)
+{
+ struct cyclades_port *info = tty->driver_data;
+ struct cyclades_card *card;
+ int channel;
+
+ if (serial_paranoia_check(info, tty->name, "cy_send_xchar"))
+ return;
+
+ info->x_char = ch;
+
+ if (ch)
+ cy_start(tty);
+
+ card = info->card;
+ channel = info->line - card->first_line;
+
+ if (cy_is_Z(card)) {
+ if (ch == STOP_CHAR(tty))
+ cyz_issue_cmd(card, channel, C_CM_SENDXOFF, 0L);
+ else if (ch == START_CHAR(tty))
+ cyz_issue_cmd(card, channel, C_CM_SENDXON, 0L);
+ }
+}
+
+/* This routine is called by the upper-layer tty layer to signal
+ that incoming characters should be throttled because the input
+ buffers are close to full.
+ */
+static void cy_throttle(struct tty_struct *tty)
+{
+ struct cyclades_port *info = tty->driver_data;
+ struct cyclades_card *card;
+ unsigned long flags;
+
+#ifdef CY_DEBUG_THROTTLE
+ char buf[64];
+
+ printk(KERN_DEBUG "cyc:throttle %s: %ld...ttyC%d\n", tty_name(tty, buf),
+ tty->ldisc.chars_in_buffer(tty), info->line);
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "cy_throttle"))
+ return;
+
+ card = info->card;
+
+ if (I_IXOFF(tty)) {
+ if (!cy_is_Z(card))
+ cy_send_xchar(tty, STOP_CHAR(tty));
+ else
+ info->throttle = 1;
+ }
+
+ if (tty->termios->c_cflag & CRTSCTS) {
+ if (!cy_is_Z(card)) {
+ spin_lock_irqsave(&card->card_lock, flags);
+ cyy_change_rts_dtr(info, 0, TIOCM_RTS);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ } else {
+ info->throttle = 1;
+ }
+ }
+} /* cy_throttle */
+
+/*
+ * This routine notifies the tty driver that it should signal
+ * that characters can now be sent to the tty without fear of
+ * overrunning the input buffers of the line disciplines.
+ */
+static void cy_unthrottle(struct tty_struct *tty)
+{
+ struct cyclades_port *info = tty->driver_data;
+ struct cyclades_card *card;
+ unsigned long flags;
+
+#ifdef CY_DEBUG_THROTTLE
+ char buf[64];
+
+ printk(KERN_DEBUG "cyc:unthrottle %s: %ld...ttyC%d\n",
+ tty_name(tty, buf), tty_chars_in_buffer(tty), info->line);
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "cy_unthrottle"))
+ return;
+
+ if (I_IXOFF(tty)) {
+ if (info->x_char)
+ info->x_char = 0;
+ else
+ cy_send_xchar(tty, START_CHAR(tty));
+ }
+
+ if (tty->termios->c_cflag & CRTSCTS) {
+ card = info->card;
+ if (!cy_is_Z(card)) {
+ spin_lock_irqsave(&card->card_lock, flags);
+ cyy_change_rts_dtr(info, TIOCM_RTS, 0);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ } else {
+ info->throttle = 0;
+ }
+ }
+} /* cy_unthrottle */
+
+/* cy_start and cy_stop provide software output flow control as a
+ function of XON/XOFF, software CTS, and other such stuff.
+*/
+static void cy_stop(struct tty_struct *tty)
+{
+ struct cyclades_card *cinfo;
+ struct cyclades_port *info = tty->driver_data;
+ int channel;
+ unsigned long flags;
+
+#ifdef CY_DEBUG_OTHER
+ printk(KERN_DEBUG "cyc:cy_stop ttyC%d\n", info->line);
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "cy_stop"))
+ return;
+
+ cinfo = info->card;
+ channel = info->line - cinfo->first_line;
+ if (!cy_is_Z(cinfo)) {
+ spin_lock_irqsave(&cinfo->card_lock, flags);
+ cyy_writeb(info, CyCAR, channel & 0x03);
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyTxRdy);
+ spin_unlock_irqrestore(&cinfo->card_lock, flags);
+ }
+} /* cy_stop */
+
+static void cy_start(struct tty_struct *tty)
+{
+ struct cyclades_card *cinfo;
+ struct cyclades_port *info = tty->driver_data;
+ int channel;
+ unsigned long flags;
+
+#ifdef CY_DEBUG_OTHER
+ printk(KERN_DEBUG "cyc:cy_start ttyC%d\n", info->line);
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "cy_start"))
+ return;
+
+ cinfo = info->card;
+ channel = info->line - cinfo->first_line;
+ if (!cy_is_Z(cinfo)) {
+ spin_lock_irqsave(&cinfo->card_lock, flags);
+ cyy_writeb(info, CyCAR, channel & 0x03);
+ cyy_writeb(info, CySRER, cyy_readb(info, CySRER) | CyTxRdy);
+ spin_unlock_irqrestore(&cinfo->card_lock, flags);
+ }
+} /* cy_start */
+
+/*
+ * cy_hangup() --- called by tty_hangup() when a hangup is signaled.
+ */
+static void cy_hangup(struct tty_struct *tty)
+{
+ struct cyclades_port *info = tty->driver_data;
+
+#ifdef CY_DEBUG_OTHER
+ printk(KERN_DEBUG "cyc:cy_hangup ttyC%d\n", info->line);
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "cy_hangup"))
+ return;
+
+ cy_flush_buffer(tty);
+ cy_shutdown(info, tty);
+ tty_port_hangup(&info->port);
+} /* cy_hangup */
+
+static int cyy_carrier_raised(struct tty_port *port)
+{
+ struct cyclades_port *info = container_of(port, struct cyclades_port,
+ port);
+ struct cyclades_card *cinfo = info->card;
+ unsigned long flags;
+ int channel = info->line - cinfo->first_line;
+ u32 cd;
+
+ spin_lock_irqsave(&cinfo->card_lock, flags);
+ cyy_writeb(info, CyCAR, channel & 0x03);
+ cd = cyy_readb(info, CyMSVR1) & CyDCD;
+ spin_unlock_irqrestore(&cinfo->card_lock, flags);
+
+ return cd;
+}
+
+static void cyy_dtr_rts(struct tty_port *port, int raise)
+{
+ struct cyclades_port *info = container_of(port, struct cyclades_port,
+ port);
+ struct cyclades_card *cinfo = info->card;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cinfo->card_lock, flags);
+ cyy_change_rts_dtr(info, raise ? TIOCM_RTS | TIOCM_DTR : 0,
+ raise ? 0 : TIOCM_RTS | TIOCM_DTR);
+ spin_unlock_irqrestore(&cinfo->card_lock, flags);
+}
+
+static int cyz_carrier_raised(struct tty_port *port)
+{
+ struct cyclades_port *info = container_of(port, struct cyclades_port,
+ port);
+
+ return readl(&info->u.cyz.ch_ctrl->rs_status) & C_RS_DCD;
+}
+
+static void cyz_dtr_rts(struct tty_port *port, int raise)
+{
+ struct cyclades_port *info = container_of(port, struct cyclades_port,
+ port);
+ struct cyclades_card *cinfo = info->card;
+ struct CH_CTRL __iomem *ch_ctrl = info->u.cyz.ch_ctrl;
+ int ret, channel = info->line - cinfo->first_line;
+ u32 rs;
+
+ rs = readl(&ch_ctrl->rs_control);
+ if (raise)
+ rs |= C_RS_RTS | C_RS_DTR;
+ else
+ rs &= ~(C_RS_RTS | C_RS_DTR);
+ cy_writel(&ch_ctrl->rs_control, rs);
+ ret = cyz_issue_cmd(cinfo, channel, C_CM_IOCTLM, 0L);
+ if (ret != 0)
+ printk(KERN_ERR "%s: retval on ttyC%d was %x\n",
+ __func__, info->line, ret);
+#ifdef CY_DEBUG_DTR
+ printk(KERN_DEBUG "%s: raising Z DTR\n", __func__);
+#endif
+}
+
+static const struct tty_port_operations cyy_port_ops = {
+ .carrier_raised = cyy_carrier_raised,
+ .dtr_rts = cyy_dtr_rts,
+ .shutdown = cy_do_close,
+};
+
+static const struct tty_port_operations cyz_port_ops = {
+ .carrier_raised = cyz_carrier_raised,
+ .dtr_rts = cyz_dtr_rts,
+ .shutdown = cy_do_close,
+};
+
+/*
+ * ---------------------------------------------------------------------
+ * cy_init() and friends
+ *
+ * cy_init() is called at boot-time to initialize the serial driver.
+ * ---------------------------------------------------------------------
+ */
+
+static int __devinit cy_init_card(struct cyclades_card *cinfo)
+{
+ struct cyclades_port *info;
+ unsigned int channel, port;
+
+ spin_lock_init(&cinfo->card_lock);
+ cinfo->intr_enabled = 0;
+
+ cinfo->ports = kcalloc(cinfo->nports, sizeof(*cinfo->ports),
+ GFP_KERNEL);
+ if (cinfo->ports == NULL) {
+ printk(KERN_ERR "Cyclades: cannot allocate ports\n");
+ return -ENOMEM;
+ }
+
+ for (channel = 0, port = cinfo->first_line; channel < cinfo->nports;
+ channel++, port++) {
+ info = &cinfo->ports[channel];
+ tty_port_init(&info->port);
+ info->magic = CYCLADES_MAGIC;
+ info->card = cinfo;
+ info->line = port;
+
+ info->port.closing_wait = CLOSING_WAIT_DELAY;
+ info->port.close_delay = 5 * HZ / 10;
+ info->port.flags = STD_COM_FLAGS;
+ init_completion(&info->shutdown_wait);
+
+ if (cy_is_Z(cinfo)) {
+ struct FIRM_ID *firm_id = cinfo->base_addr + ID_ADDRESS;
+ struct ZFW_CTRL *zfw_ctrl;
+
+ info->port.ops = &cyz_port_ops;
+ info->type = PORT_STARTECH;
+
+ zfw_ctrl = cinfo->base_addr +
+ (readl(&firm_id->zfwctrl_addr) & 0xfffff);
+ info->u.cyz.ch_ctrl = &zfw_ctrl->ch_ctrl[channel];
+ info->u.cyz.buf_ctrl = &zfw_ctrl->buf_ctrl[channel];
+
+ if (cinfo->hw_ver == ZO_V1)
+ info->xmit_fifo_size = CYZ_FIFO_SIZE;
+ else
+ info->xmit_fifo_size = 4 * CYZ_FIFO_SIZE;
+#ifdef CONFIG_CYZ_INTR
+ setup_timer(&cyz_rx_full_timer[port],
+ cyz_rx_restart, (unsigned long)info);
+#endif
+ } else {
+ unsigned short chip_number;
+ int index = cinfo->bus_index;
+
+ info->port.ops = &cyy_port_ops;
+ info->type = PORT_CIRRUS;
+ info->xmit_fifo_size = CyMAX_CHAR_FIFO;
+ info->cor1 = CyPARITY_NONE | Cy_1_STOP | Cy_8_BITS;
+ info->cor2 = CyETC;
+ info->cor3 = 0x08; /* _very_ small rcv threshold */
+
+ chip_number = channel / CyPORTS_PER_CHIP;
+ info->u.cyy.base_addr = cinfo->base_addr +
+ (cy_chip_offset[chip_number] << index);
+ info->chip_rev = cyy_readb(info, CyGFRCR);
+
+ if (info->chip_rev >= CD1400_REV_J) {
+ /* It is a CD1400 rev. J or later */
+ info->tbpr = baud_bpr_60[13]; /* Tx BPR */
+ info->tco = baud_co_60[13]; /* Tx CO */
+ info->rbpr = baud_bpr_60[13]; /* Rx BPR */
+ info->rco = baud_co_60[13]; /* Rx CO */
+ info->rtsdtr_inv = 1;
+ } else {
+ info->tbpr = baud_bpr_25[13]; /* Tx BPR */
+ info->tco = baud_co_25[13]; /* Tx CO */
+ info->rbpr = baud_bpr_25[13]; /* Rx BPR */
+ info->rco = baud_co_25[13]; /* Rx CO */
+ info->rtsdtr_inv = 0;
+ }
+ info->read_status_mask = CyTIMEOUT | CySPECHAR |
+ CyBREAK | CyPARITY | CyFRAME | CyOVERRUN;
+ }
+
+ }
+
+#ifndef CONFIG_CYZ_INTR
+ if (cy_is_Z(cinfo) && !timer_pending(&cyz_timerlist)) {
+ mod_timer(&cyz_timerlist, jiffies + 1);
+#ifdef CY_PCI_DEBUG
+ printk(KERN_DEBUG "Cyclades-Z polling initialized\n");
+#endif
+ }
+#endif
+ return 0;
+}
+
+/* initialize chips on Cyclom-Y card -- return number of valid
+ chips (which is number of ports/4) */
+static unsigned short __devinit cyy_init_card(void __iomem *true_base_addr,
+ int index)
+{
+ unsigned int chip_number;
+ void __iomem *base_addr;
+
+ cy_writeb(true_base_addr + (Cy_HwReset << index), 0);
+ /* Cy_HwReset is 0x1400 */
+ cy_writeb(true_base_addr + (Cy_ClrIntr << index), 0);
+ /* Cy_ClrIntr is 0x1800 */
+ udelay(500L);
+
+ for (chip_number = 0; chip_number < CyMAX_CHIPS_PER_CARD;
+ chip_number++) {
+ base_addr =
+ true_base_addr + (cy_chip_offset[chip_number] << index);
+ mdelay(1);
+ if (readb(base_addr + (CyCCR << index)) != 0x00) {
+ /*************
+ printk(" chip #%d at %#6lx is never idle (CCR != 0)\n",
+ chip_number, (unsigned long)base_addr);
+ *************/
+ return chip_number;
+ }
+
+ cy_writeb(base_addr + (CyGFRCR << index), 0);
+ udelay(10L);
+
+ /* The Cyclom-16Y does not decode address bit 9 and therefore
+ cannot distinguish between references to chip 0 and a non-
+ existent chip 4. If the preceding clearing of the supposed
+ chip 4 GFRCR register appears at chip 0, there is no chip 4
+ and this must be a Cyclom-16Y, not a Cyclom-32Ye.
+ */
+ if (chip_number == 4 && readb(true_base_addr +
+ (cy_chip_offset[0] << index) +
+ (CyGFRCR << index)) == 0) {
+ return chip_number;
+ }
+
+ cy_writeb(base_addr + (CyCCR << index), CyCHIP_RESET);
+ mdelay(1);
+
+ if (readb(base_addr + (CyGFRCR << index)) == 0x00) {
+ /*
+ printk(" chip #%d at %#6lx is not responding ",
+ chip_number, (unsigned long)base_addr);
+ printk("(GFRCR stayed 0)\n",
+ */
+ return chip_number;
+ }
+ if ((0xf0 & (readb(base_addr + (CyGFRCR << index)))) !=
+ 0x40) {
+ /*
+ printk(" chip #%d at %#6lx is not valid (GFRCR == "
+ "%#2x)\n",
+ chip_number, (unsigned long)base_addr,
+ base_addr[CyGFRCR<<index]);
+ */
+ return chip_number;
+ }
+ cy_writeb(base_addr + (CyGCR << index), CyCH0_SERIAL);
+ if (readb(base_addr + (CyGFRCR << index)) >= CD1400_REV_J) {
+ /* It is a CD1400 rev. J or later */
+ /* Impossible to reach 5ms with this chip.
+ Changed to 2ms instead (f = 500 Hz). */
+ cy_writeb(base_addr + (CyPPR << index), CyCLOCK_60_2MS);
+ } else {
+ /* f = 200 Hz */
+ cy_writeb(base_addr + (CyPPR << index), CyCLOCK_25_5MS);
+ }
+
+ /*
+ printk(" chip #%d at %#6lx is rev 0x%2x\n",
+ chip_number, (unsigned long)base_addr,
+ readb(base_addr+(CyGFRCR<<index)));
+ */
+ }
+ return chip_number;
+} /* cyy_init_card */
+
+/*
+ * ---------------------------------------------------------------------
+ * cy_detect_isa() - Probe for Cyclom-Y/ISA boards.
+ * sets global variables and return the number of ISA boards found.
+ * ---------------------------------------------------------------------
+ */
+static int __init cy_detect_isa(void)
+{
+#ifdef CONFIG_ISA
+ unsigned short cy_isa_irq, nboard;
+ void __iomem *cy_isa_address;
+ unsigned short i, j, cy_isa_nchan;
+ int isparam = 0;
+
+ nboard = 0;
+
+ /* Check for module parameters */
+ for (i = 0; i < NR_CARDS; i++) {
+ if (maddr[i] || i) {
+ isparam = 1;
+ cy_isa_addresses[i] = maddr[i];
+ }
+ if (!maddr[i])
+ break;
+ }
+
+ /* scan the address table probing for Cyclom-Y/ISA boards */
+ for (i = 0; i < NR_ISA_ADDRS; i++) {
+ unsigned int isa_address = cy_isa_addresses[i];
+ if (isa_address == 0x0000)
+ return nboard;
+
+ /* probe for CD1400... */
+ cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin);
+ if (cy_isa_address == NULL) {
+ printk(KERN_ERR "Cyclom-Y/ISA: can't remap base "
+ "address\n");
+ continue;
+ }
+ cy_isa_nchan = CyPORTS_PER_CHIP *
+ cyy_init_card(cy_isa_address, 0);
+ if (cy_isa_nchan == 0) {
+ iounmap(cy_isa_address);
+ continue;
+ }
+
+ if (isparam && i < NR_CARDS && irq[i])
+ cy_isa_irq = irq[i];
+ else
+ /* find out the board's irq by probing */
+ cy_isa_irq = detect_isa_irq(cy_isa_address);
+ if (cy_isa_irq == 0) {
+ printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but the "
+ "IRQ could not be detected.\n",
+ (unsigned long)cy_isa_address);
+ iounmap(cy_isa_address);
+ continue;
+ }
+
+ if ((cy_next_channel + cy_isa_nchan) > NR_PORTS) {
+ printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but no "
+ "more channels are available. Change NR_PORTS "
+ "in cyclades.c and recompile kernel.\n",
+ (unsigned long)cy_isa_address);
+ iounmap(cy_isa_address);
+ return nboard;
+ }
+ /* fill the next cy_card structure available */
+ for (j = 0; j < NR_CARDS; j++) {
+ if (cy_card[j].base_addr == NULL)
+ break;
+ }
+ if (j == NR_CARDS) { /* no more cy_cards available */
+ printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but no "
+ "more cards can be used. Change NR_CARDS in "
+ "cyclades.c and recompile kernel.\n",
+ (unsigned long)cy_isa_address);
+ iounmap(cy_isa_address);
+ return nboard;
+ }
+
+ /* allocate IRQ */
+ if (request_irq(cy_isa_irq, cyy_interrupt,
+ IRQF_DISABLED, "Cyclom-Y", &cy_card[j])) {
+ printk(KERN_ERR "Cyclom-Y/ISA found at 0x%lx, but "
+ "could not allocate IRQ#%d.\n",
+ (unsigned long)cy_isa_address, cy_isa_irq);
+ iounmap(cy_isa_address);
+ return nboard;
+ }
+
+ /* set cy_card */
+ cy_card[j].base_addr = cy_isa_address;
+ cy_card[j].ctl_addr.p9050 = NULL;
+ cy_card[j].irq = (int)cy_isa_irq;
+ cy_card[j].bus_index = 0;
+ cy_card[j].first_line = cy_next_channel;
+ cy_card[j].num_chips = cy_isa_nchan / CyPORTS_PER_CHIP;
+ cy_card[j].nports = cy_isa_nchan;
+ if (cy_init_card(&cy_card[j])) {
+ cy_card[j].base_addr = NULL;
+ free_irq(cy_isa_irq, &cy_card[j]);
+ iounmap(cy_isa_address);
+ continue;
+ }
+ nboard++;
+
+ printk(KERN_INFO "Cyclom-Y/ISA #%d: 0x%lx-0x%lx, IRQ%d found: "
+ "%d channels starting from port %d\n",
+ j + 1, (unsigned long)cy_isa_address,
+ (unsigned long)(cy_isa_address + (CyISA_Ywin - 1)),
+ cy_isa_irq, cy_isa_nchan, cy_next_channel);
+
+ for (j = cy_next_channel;
+ j < cy_next_channel + cy_isa_nchan; j++)
+ tty_register_device(cy_serial_driver, j, NULL);
+ cy_next_channel += cy_isa_nchan;
+ }
+ return nboard;
+#else
+ return 0;
+#endif /* CONFIG_ISA */
+} /* cy_detect_isa */
+
+#ifdef CONFIG_PCI
+static inline int __devinit cyc_isfwstr(const char *str, unsigned int size)
+{
+ unsigned int a;
+
+ for (a = 0; a < size && *str; a++, str++)
+ if (*str & 0x80)
+ return -EINVAL;
+
+ for (; a < size; a++, str++)
+ if (*str)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline void __devinit cyz_fpga_copy(void __iomem *fpga, const u8 *data,
+ unsigned int size)
+{
+ for (; size > 0; size--) {
+ cy_writel(fpga, *data++);
+ udelay(10);
+ }
+}
+
+static void __devinit plx_init(struct pci_dev *pdev, int irq,
+ struct RUNTIME_9060 __iomem *addr)
+{
+ /* Reset PLX */
+ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x40000000);
+ udelay(100L);
+ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x40000000);
+
+ /* Reload Config. Registers from EEPROM */
+ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x20000000);
+ udelay(100L);
+ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x20000000);
+
+ /* For some yet unknown reason, once the PLX9060 reloads the EEPROM,
+ * the IRQ is lost and, thus, we have to re-write it to the PCI config.
+ * registers. This will remain here until we find a permanent fix.
+ */
+ pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
+}
+
+static int __devinit __cyz_load_fw(const struct firmware *fw,
+ const char *name, const u32 mailbox, void __iomem *base,
+ void __iomem *fpga)
+{
+ const void *ptr = fw->data;
+ const struct zfile_header *h = ptr;
+ const struct zfile_config *c, *cs;
+ const struct zfile_block *b, *bs;
+ unsigned int a, tmp, len = fw->size;
+#define BAD_FW KERN_ERR "Bad firmware: "
+ if (len < sizeof(*h)) {
+ printk(BAD_FW "too short: %u<%zu\n", len, sizeof(*h));
+ return -EINVAL;
+ }
+
+ cs = ptr + h->config_offset;
+ bs = ptr + h->block_offset;
+
+ if ((void *)(cs + h->n_config) > ptr + len ||
+ (void *)(bs + h->n_blocks) > ptr + len) {
+ printk(BAD_FW "too short");
+ return -EINVAL;
+ }
+
+ if (cyc_isfwstr(h->name, sizeof(h->name)) ||
+ cyc_isfwstr(h->date, sizeof(h->date))) {
+ printk(BAD_FW "bad formatted header string\n");
+ return -EINVAL;
+ }
+
+ if (strncmp(name, h->name, sizeof(h->name))) {
+ printk(BAD_FW "bad name '%s' (expected '%s')\n", h->name, name);
+ return -EINVAL;
+ }
+
+ tmp = 0;
+ for (c = cs; c < cs + h->n_config; c++) {
+ for (a = 0; a < c->n_blocks; a++)
+ if (c->block_list[a] > h->n_blocks) {
+ printk(BAD_FW "bad block ref number in cfgs\n");
+ return -EINVAL;
+ }
+ if (c->mailbox == mailbox && c->function == 0) /* 0 is normal */
+ tmp++;
+ }
+ if (!tmp) {
+ printk(BAD_FW "nothing appropriate\n");
+ return -EINVAL;
+ }
+
+ for (b = bs; b < bs + h->n_blocks; b++)
+ if (b->file_offset + b->size > len) {
+ printk(BAD_FW "bad block data offset\n");
+ return -EINVAL;
+ }
+
+ /* everything is OK, let's seek'n'load it */
+ for (c = cs; c < cs + h->n_config; c++)
+ if (c->mailbox == mailbox && c->function == 0)
+ break;
+
+ for (a = 0; a < c->n_blocks; a++) {
+ b = &bs[c->block_list[a]];
+ if (b->type == ZBLOCK_FPGA) {
+ if (fpga != NULL)
+ cyz_fpga_copy(fpga, ptr + b->file_offset,
+ b->size);
+ } else {
+ if (base != NULL)
+ memcpy_toio(base + b->ram_offset,
+ ptr + b->file_offset, b->size);
+ }
+ }
+#undef BAD_FW
+ return 0;
+}
+
+static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
+ struct RUNTIME_9060 __iomem *ctl_addr, int irq)
+{
+ const struct firmware *fw;
+ struct FIRM_ID __iomem *fid = base_addr + ID_ADDRESS;
+ struct CUSTOM_REG __iomem *cust = base_addr;
+ struct ZFW_CTRL __iomem *pt_zfwctrl;
+ void __iomem *tmp;
+ u32 mailbox, status, nchan;
+ unsigned int i;
+ int retval;
+
+ retval = request_firmware(&fw, "cyzfirm.bin", &pdev->dev);
+ if (retval) {
+ dev_err(&pdev->dev, "can't get firmware\n");
+ goto err;
+ }
+
+ /* Check whether the firmware is already loaded and running. If
+ positive, skip this board */
+ if (__cyz_fpga_loaded(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) {
+ u32 cntval = readl(base_addr + 0x190);
+
+ udelay(100);
+ if (cntval != readl(base_addr + 0x190)) {
+ /* FW counter is working, FW is running */
+ dev_dbg(&pdev->dev, "Cyclades-Z FW already loaded. "
+ "Skipping board.\n");
+ retval = 0;
+ goto err_rel;
+ }
+ }
+
+ /* start boot */
+ cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) &
+ ~0x00030800UL);
+
+ mailbox = readl(&ctl_addr->mail_box_0);
+
+ if (mailbox == 0 || __cyz_fpga_loaded(ctl_addr)) {
+ /* stops CPU and set window to beginning of RAM */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
+ cy_writel(&cust->cpu_stop, 0);
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+ udelay(100);
+ }
+
+ plx_init(pdev, irq, ctl_addr);
+
+ if (mailbox != 0) {
+ /* load FPGA */
+ retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, NULL,
+ base_addr);
+ if (retval)
+ goto err_rel;
+ if (!__cyz_fpga_loaded(ctl_addr)) {
+ dev_err(&pdev->dev, "fw upload successful, but fw is "
+ "not loaded\n");
+ goto err_rel;
+ }
+ }
+
+ /* stops CPU and set window to beginning of RAM */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
+ cy_writel(&cust->cpu_stop, 0);
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+ udelay(100);
+
+ /* clear memory */
+ for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
+ cy_writeb(tmp, 255);
+ if (mailbox != 0) {
+ /* set window to last 512K of RAM */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE);
+ for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
+ cy_writeb(tmp, 255);
+ /* set window to beginning of RAM */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+ }
+
+ retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL);
+ release_firmware(fw);
+ if (retval)
+ goto err;
+
+ /* finish boot and start boards */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
+ cy_writel(&cust->cpu_start, 0);
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+ i = 0;
+ while ((status = readl(&fid->signature)) != ZFIRM_ID && i++ < 40)
+ msleep(100);
+ if (status != ZFIRM_ID) {
+ if (status == ZFIRM_HLT) {
+ dev_err(&pdev->dev, "you need an external power supply "
+ "for this number of ports. Firmware halted and "
+ "board reset.\n");
+ retval = -EIO;
+ goto err;
+ }
+ dev_warn(&pdev->dev, "fid->signature = 0x%x... Waiting "
+ "some more time\n", status);
+ while ((status = readl(&fid->signature)) != ZFIRM_ID &&
+ i++ < 200)
+ msleep(100);
+ if (status != ZFIRM_ID) {
+ dev_err(&pdev->dev, "Board not started in 20 seconds! "
+ "Giving up. (fid->signature = 0x%x)\n",
+ status);
+ dev_info(&pdev->dev, "*** Warning ***: if you are "
+ "upgrading the FW, please power cycle the "
+ "system before loading the new FW to the "
+ "Cyclades-Z.\n");
+
+ if (__cyz_fpga_loaded(ctl_addr))
+ plx_init(pdev, irq, ctl_addr);
+
+ retval = -EIO;
+ goto err;
+ }
+ dev_dbg(&pdev->dev, "Firmware started after %d seconds.\n",
+ i / 10);
+ }
+ pt_zfwctrl = base_addr + readl(&fid->zfwctrl_addr);
+
+ dev_dbg(&pdev->dev, "fid=> %p, zfwctrl_addr=> %x, npt_zfwctrl=> %p\n",
+ base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr),
+ base_addr + readl(&fid->zfwctrl_addr));
+
+ nchan = readl(&pt_zfwctrl->board_ctrl.n_channel);
+ dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n",
+ readl(&pt_zfwctrl->board_ctrl.fw_version), nchan);
+
+ if (nchan == 0) {
+ dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please "
+ "check the connection between the Z host card and the "
+ "serial expanders.\n");
+
+ if (__cyz_fpga_loaded(ctl_addr))
+ plx_init(pdev, irq, ctl_addr);
+
+ dev_info(&pdev->dev, "Null number of ports detected. Board "
+ "reset.\n");
+ retval = 0;
+ goto err;
+ }
+
+ cy_writel(&pt_zfwctrl->board_ctrl.op_system, C_OS_LINUX);
+ cy_writel(&pt_zfwctrl->board_ctrl.dr_version, DRIVER_VERSION);
+
+ /*
+ Early firmware failed to start looking for commands.
+ This enables firmware interrupts for those commands.
+ */
+ cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
+ (1 << 17));
+ cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
+ 0x00030800UL);
+
+ return nchan;
+err_rel:
+ release_firmware(fw);
+err:
+ return retval;
+}
+
+static int __devinit cy_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ void __iomem *addr0 = NULL, *addr2 = NULL;
+ char *card_name = NULL;
+ u32 uninitialized_var(mailbox);
+ unsigned int device_id, nchan = 0, card_no, i;
+ unsigned char plx_ver;
+ int retval, irq;
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "cannot enable device\n");
+ goto err;
+ }
+
+ /* read PCI configuration area */
+ irq = pdev->irq;
+ device_id = pdev->device & ~PCI_DEVICE_ID_MASK;
+
+#if defined(__alpha__)
+ if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo) { /* below 1M? */
+ dev_err(&pdev->dev, "Cyclom-Y/PCI not supported for low "
+ "addresses on Alpha systems.\n");
+ retval = -EIO;
+ goto err_dis;
+ }
+#endif
+ if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Lo) {
+ dev_err(&pdev->dev, "Cyclades-Z/PCI not supported for low "
+ "addresses\n");
+ retval = -EIO;
+ goto err_dis;
+ }
+
+ if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) {
+ dev_warn(&pdev->dev, "PCI I/O bit incorrectly set. Ignoring "
+ "it...\n");
+ pdev->resource[2].flags &= ~IORESOURCE_IO;
+ }
+
+ retval = pci_request_regions(pdev, "cyclades");
+ if (retval) {
+ dev_err(&pdev->dev, "failed to reserve resources\n");
+ goto err_dis;
+ }
+
+ retval = -EIO;
+ if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo ||
+ device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) {
+ card_name = "Cyclom-Y";
+
+ addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
+ CyPCI_Yctl);
+ if (addr0 == NULL) {
+ dev_err(&pdev->dev, "can't remap ctl region\n");
+ goto err_reg;
+ }
+ addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
+ CyPCI_Ywin);
+ if (addr2 == NULL) {
+ dev_err(&pdev->dev, "can't remap base region\n");
+ goto err_unmap;
+ }
+
+ nchan = CyPORTS_PER_CHIP * cyy_init_card(addr2, 1);
+ if (nchan == 0) {
+ dev_err(&pdev->dev, "Cyclom-Y PCI host card with no "
+ "Serial-Modules\n");
+ goto err_unmap;
+ }
+ } else if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Hi) {
+ struct RUNTIME_9060 __iomem *ctl_addr;
+
+ ctl_addr = addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
+ CyPCI_Zctl);
+ if (addr0 == NULL) {
+ dev_err(&pdev->dev, "can't remap ctl region\n");
+ goto err_reg;
+ }
+
+ /* Disable interrupts on the PLX before resetting it */
+ cy_writew(&ctl_addr->intr_ctrl_stat,
+ readw(&ctl_addr->intr_ctrl_stat) & ~0x0900);
+
+ plx_init(pdev, irq, addr0);
+
+ mailbox = readl(&ctl_addr->mail_box_0);
+
+ addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
+ mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin);
+ if (addr2 == NULL) {
+ dev_err(&pdev->dev, "can't remap base region\n");
+ goto err_unmap;
+ }
+
+ if (mailbox == ZE_V1) {
+ card_name = "Cyclades-Ze";
+ } else {
+ card_name = "Cyclades-8Zo";
+#ifdef CY_PCI_DEBUG
+ if (mailbox == ZO_V1) {
+ cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
+ dev_info(&pdev->dev, "Cyclades-8Zo/PCI: FPGA "
+ "id %lx, ver %lx\n", (ulong)(0xff &
+ readl(&((struct CUSTOM_REG *)addr2)->
+ fpga_id)), (ulong)(0xff &
+ readl(&((struct CUSTOM_REG *)addr2)->
+ fpga_version)));
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+ } else {
+ dev_info(&pdev->dev, "Cyclades-Z/PCI: New "
+ "Cyclades-Z board. FPGA not loaded\n");
+ }
+#endif
+ /* The following clears the firmware id word. This
+ ensures that the driver will not attempt to talk to
+ the board until it has been properly initialized.
+ */
+ if ((mailbox == ZO_V1) || (mailbox == ZO_V2))
+ cy_writel(addr2 + ID_ADDRESS, 0L);
+ }
+
+ retval = cyz_load_fw(pdev, addr2, addr0, irq);
+ if (retval <= 0)
+ goto err_unmap;
+ nchan = retval;
+ }
+
+ if ((cy_next_channel + nchan) > NR_PORTS) {
+ dev_err(&pdev->dev, "Cyclades-8Zo/PCI found, but no "
+ "channels are available. Change NR_PORTS in "
+ "cyclades.c and recompile kernel.\n");
+ goto err_unmap;
+ }
+ /* fill the next cy_card structure available */
+ for (card_no = 0; card_no < NR_CARDS; card_no++) {
+ if (cy_card[card_no].base_addr == NULL)
+ break;
+ }
+ if (card_no == NR_CARDS) { /* no more cy_cards available */
+ dev_err(&pdev->dev, "Cyclades-8Zo/PCI found, but no "
+ "more cards can be used. Change NR_CARDS in "
+ "cyclades.c and recompile kernel.\n");
+ goto err_unmap;
+ }
+
+ if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo ||
+ device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) {
+ /* allocate IRQ */
+ retval = request_irq(irq, cyy_interrupt,
+ IRQF_SHARED, "Cyclom-Y", &cy_card[card_no]);
+ if (retval) {
+ dev_err(&pdev->dev, "could not allocate IRQ\n");
+ goto err_unmap;
+ }
+ cy_card[card_no].num_chips = nchan / CyPORTS_PER_CHIP;
+ } else {
+ struct FIRM_ID __iomem *firm_id = addr2 + ID_ADDRESS;
+ struct ZFW_CTRL __iomem *zfw_ctrl;
+
+ zfw_ctrl = addr2 + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
+
+ cy_card[card_no].hw_ver = mailbox;
+ cy_card[card_no].num_chips = (unsigned int)-1;
+ cy_card[card_no].board_ctrl = &zfw_ctrl->board_ctrl;
+#ifdef CONFIG_CYZ_INTR
+ /* allocate IRQ only if board has an IRQ */
+ if (irq != 0 && irq != 255) {
+ retval = request_irq(irq, cyz_interrupt,
+ IRQF_SHARED, "Cyclades-Z",
+ &cy_card[card_no]);
+ if (retval) {
+ dev_err(&pdev->dev, "could not allocate IRQ\n");
+ goto err_unmap;
+ }
+ }
+#endif /* CONFIG_CYZ_INTR */
+ }
+
+ /* set cy_card */
+ cy_card[card_no].base_addr = addr2;
+ cy_card[card_no].ctl_addr.p9050 = addr0;
+ cy_card[card_no].irq = irq;
+ cy_card[card_no].bus_index = 1;
+ cy_card[card_no].first_line = cy_next_channel;
+ cy_card[card_no].nports = nchan;
+ retval = cy_init_card(&cy_card[card_no]);
+ if (retval)
+ goto err_null;
+
+ pci_set_drvdata(pdev, &cy_card[card_no]);
+
+ if (device_id == PCI_DEVICE_ID_CYCLOM_Y_Lo ||
+ device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) {
+ /* enable interrupts in the PCI interface */
+ plx_ver = readb(addr2 + CyPLX_VER) & 0x0f;
+ switch (plx_ver) {
+ case PLX_9050:
+ cy_writeb(addr0 + 0x4c, 0x43);
+ break;
+
+ case PLX_9060:
+ case PLX_9080:
+ default: /* Old boards, use PLX_9060 */
+ {
+ struct RUNTIME_9060 __iomem *ctl_addr = addr0;
+ plx_init(pdev, irq, ctl_addr);
+ cy_writew(&ctl_addr->intr_ctrl_stat,
+ readw(&ctl_addr->intr_ctrl_stat) | 0x0900);
+ break;
+ }
+ }
+ }
+
+ dev_info(&pdev->dev, "%s/PCI #%d found: %d channels starting from "
+ "port %d.\n", card_name, card_no + 1, nchan, cy_next_channel);
+ for (i = cy_next_channel; i < cy_next_channel + nchan; i++)
+ tty_register_device(cy_serial_driver, i, &pdev->dev);
+ cy_next_channel += nchan;
+
+ return 0;
+err_null:
+ cy_card[card_no].base_addr = NULL;
+ free_irq(irq, &cy_card[card_no]);
+err_unmap:
+ iounmap(addr0);
+ if (addr2)
+ iounmap(addr2);
+err_reg:
+ pci_release_regions(pdev);
+err_dis:
+ pci_disable_device(pdev);
+err:
+ return retval;
+}
+
+static void __devexit cy_pci_remove(struct pci_dev *pdev)
+{
+ struct cyclades_card *cinfo = pci_get_drvdata(pdev);
+ unsigned int i;
+
+ /* non-Z with old PLX */
+ if (!cy_is_Z(cinfo) && (readb(cinfo->base_addr + CyPLX_VER) & 0x0f) ==
+ PLX_9050)
+ cy_writeb(cinfo->ctl_addr.p9050 + 0x4c, 0);
+ else
+#ifndef CONFIG_CYZ_INTR
+ if (!cy_is_Z(cinfo))
+#endif
+ cy_writew(&cinfo->ctl_addr.p9060->intr_ctrl_stat,
+ readw(&cinfo->ctl_addr.p9060->intr_ctrl_stat) &
+ ~0x0900);
+
+ iounmap(cinfo->base_addr);
+ if (cinfo->ctl_addr.p9050)
+ iounmap(cinfo->ctl_addr.p9050);
+ if (cinfo->irq
+#ifndef CONFIG_CYZ_INTR
+ && !cy_is_Z(cinfo)
+#endif /* CONFIG_CYZ_INTR */
+ )
+ free_irq(cinfo->irq, cinfo);
+ pci_release_regions(pdev);
+
+ cinfo->base_addr = NULL;
+ for (i = cinfo->first_line; i < cinfo->first_line +
+ cinfo->nports; i++)
+ tty_unregister_device(cy_serial_driver, i);
+ cinfo->nports = 0;
+ kfree(cinfo->ports);
+}
+
+static struct pci_driver cy_pci_driver = {
+ .name = "cyclades",
+ .id_table = cy_pci_dev_id,
+ .probe = cy_pci_probe,
+ .remove = __devexit_p(cy_pci_remove)
+};
+#endif
+
+static int cyclades_proc_show(struct seq_file *m, void *v)
+{
+ struct cyclades_port *info;
+ unsigned int i, j;
+ __u32 cur_jifs = jiffies;
+
+ seq_puts(m, "Dev TimeOpen BytesOut IdleOut BytesIn "
+ "IdleIn Overruns Ldisc\n");
+
+ /* Output one line for each known port */
+ for (i = 0; i < NR_CARDS; i++)
+ for (j = 0; j < cy_card[i].nports; j++) {
+ info = &cy_card[i].ports[j];
+
+ if (info->port.count) {
+ /* XXX is the ldisc num worth this? */
+ struct tty_struct *tty;
+ struct tty_ldisc *ld;
+ int num = 0;
+ tty = tty_port_tty_get(&info->port);
+ if (tty) {
+ ld = tty_ldisc_ref(tty);
+ if (ld) {
+ num = ld->ops->num;
+ tty_ldisc_deref(ld);
+ }
+ tty_kref_put(tty);
+ }
+ seq_printf(m, "%3d %8lu %10lu %8lu "
+ "%10lu %8lu %9lu %6d\n", info->line,
+ (cur_jifs - info->idle_stats.in_use) /
+ HZ, info->idle_stats.xmit_bytes,
+ (cur_jifs - info->idle_stats.xmit_idle)/
+ HZ, info->idle_stats.recv_bytes,
+ (cur_jifs - info->idle_stats.recv_idle)/
+ HZ, info->idle_stats.overruns,
+ num);
+ } else
+ seq_printf(m, "%3d %8lu %10lu %8lu "
+ "%10lu %8lu %9lu %6ld\n",
+ info->line, 0L, 0L, 0L, 0L, 0L, 0L, 0L);
+ }
+ return 0;
+}
+
+static int cyclades_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cyclades_proc_show, NULL);
+}
+
+static const struct file_operations cyclades_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = cyclades_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* The serial driver boot-time initialization code!
+ Hardware I/O ports are mapped to character special devices on a
+ first found, first allocated manner. That is, this code searches
+ for Cyclom cards in the system. As each is found, it is probed
+ to discover how many chips (and thus how many ports) are present.
+ These ports are mapped to the tty ports 32 and upward in monotonic
+ fashion. If an 8-port card is replaced with a 16-port card, the
+ port mapping on a following card will shift.
+
+ This approach is different from what is used in the other serial
+ device driver because the Cyclom is more properly a multiplexer,
+ not just an aggregation of serial ports on one card.
+
+ If there are more cards with more ports than have been
+ statically allocated above, a warning is printed and the
+ extra ports are ignored.
+ */
+
+static const struct tty_operations cy_ops = {
+ .open = cy_open,
+ .close = cy_close,
+ .write = cy_write,
+ .put_char = cy_put_char,
+ .flush_chars = cy_flush_chars,
+ .write_room = cy_write_room,
+ .chars_in_buffer = cy_chars_in_buffer,
+ .flush_buffer = cy_flush_buffer,
+ .ioctl = cy_ioctl,
+ .throttle = cy_throttle,
+ .unthrottle = cy_unthrottle,
+ .set_termios = cy_set_termios,
+ .stop = cy_stop,
+ .start = cy_start,
+ .hangup = cy_hangup,
+ .break_ctl = cy_break,
+ .wait_until_sent = cy_wait_until_sent,
+ .tiocmget = cy_tiocmget,
+ .tiocmset = cy_tiocmset,
+ .get_icount = cy_get_icount,
+ .proc_fops = &cyclades_proc_fops,
+};
+
+static int __init cy_init(void)
+{
+ unsigned int nboards;
+ int retval = -ENOMEM;
+
+ cy_serial_driver = alloc_tty_driver(NR_PORTS);
+ if (!cy_serial_driver)
+ goto err;
+
+ printk(KERN_INFO "Cyclades driver " CY_VERSION "\n");
+
+ /* Initialize the tty_driver structure */
+
+ cy_serial_driver->owner = THIS_MODULE;
+ cy_serial_driver->driver_name = "cyclades";
+ cy_serial_driver->name = "ttyC";
+ cy_serial_driver->major = CYCLADES_MAJOR;
+ cy_serial_driver->minor_start = 0;
+ cy_serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ cy_serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ cy_serial_driver->init_termios = tty_std_termios;
+ cy_serial_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ cy_serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(cy_serial_driver, &cy_ops);
+
+ retval = tty_register_driver(cy_serial_driver);
+ if (retval) {
+ printk(KERN_ERR "Couldn't register Cyclades serial driver\n");
+ goto err_frtty;
+ }
+
+ /* the code below is responsible to find the boards. Each different
+ type of board has its own detection routine. If a board is found,
+ the next cy_card structure available is set by the detection
+ routine. These functions are responsible for checking the
+ availability of cy_card and cy_port data structures and updating
+ the cy_next_channel. */
+
+ /* look for isa boards */
+ nboards = cy_detect_isa();
+
+#ifdef CONFIG_PCI
+ /* look for pci boards */
+ retval = pci_register_driver(&cy_pci_driver);
+ if (retval && !nboards) {
+ tty_unregister_driver(cy_serial_driver);
+ goto err_frtty;
+ }
+#endif
+
+ return 0;
+err_frtty:
+ put_tty_driver(cy_serial_driver);
+err:
+ return retval;
+} /* cy_init */
+
+static void __exit cy_cleanup_module(void)
+{
+ struct cyclades_card *card;
+ unsigned int i, e1;
+
+#ifndef CONFIG_CYZ_INTR
+ del_timer_sync(&cyz_timerlist);
+#endif /* CONFIG_CYZ_INTR */
+
+ e1 = tty_unregister_driver(cy_serial_driver);
+ if (e1)
+ printk(KERN_ERR "failed to unregister Cyclades serial "
+ "driver(%d)\n", e1);
+
+#ifdef CONFIG_PCI
+ pci_unregister_driver(&cy_pci_driver);
+#endif
+
+ for (i = 0; i < NR_CARDS; i++) {
+ card = &cy_card[i];
+ if (card->base_addr) {
+ /* clear interrupt */
+ cy_writeb(card->base_addr + Cy_ClrIntr, 0);
+ iounmap(card->base_addr);
+ if (card->ctl_addr.p9050)
+ iounmap(card->ctl_addr.p9050);
+ if (card->irq
+#ifndef CONFIG_CYZ_INTR
+ && !cy_is_Z(card)
+#endif /* CONFIG_CYZ_INTR */
+ )
+ free_irq(card->irq, card);
+ for (e1 = card->first_line; e1 < card->first_line +
+ card->nports; e1++)
+ tty_unregister_device(cy_serial_driver, e1);
+ kfree(card->ports);
+ }
+ }
+
+ put_tty_driver(cy_serial_driver);
+} /* cy_cleanup_module */
+
+module_init(cy_init);
+module_exit(cy_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(CY_VERSION);
+MODULE_ALIAS_CHARDEV_MAJOR(CYCLADES_MAJOR);
+MODULE_FIRMWARE("cyzfirm.bin");
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
new file mode 100644
index 00000000..6f2c9809
--- /dev/null
+++ b/drivers/tty/hvc/Kconfig
@@ -0,0 +1,105 @@
+config HVC_DRIVER
+ bool
+ help
+ Generic "hypervisor virtual console" infrastructure for various
+ hypervisors (pSeries, iSeries, Xen, lguest).
+ It will automatically be selected if one of the back-end console drivers
+ is selected.
+
+config HVC_IRQ
+ bool
+
+config HVC_CONSOLE
+ bool "pSeries Hypervisor Virtual Console support"
+ depends on PPC_PSERIES
+ select HVC_DRIVER
+ select HVC_IRQ
+ help
+ pSeries machines when partitioned support a hypervisor virtual
+ console. This driver allows each pSeries partition to have a console
+ which is accessed via the HMC.
+
+config HVC_ISERIES
+ bool "iSeries Hypervisor Virtual Console support"
+ depends on PPC_ISERIES
+ default y
+ select HVC_DRIVER
+ select HVC_IRQ
+ select VIOPATH
+ help
+ iSeries machines support a hypervisor virtual console.
+
+config HVC_RTAS
+ bool "IBM RTAS Console support"
+ depends on PPC_RTAS
+ select HVC_DRIVER
+ help
+ IBM Console device driver which makes use of RTAS
+
+config HVC_BEAT
+ bool "Toshiba's Beat Hypervisor Console support"
+ depends on PPC_CELLEB
+ select HVC_DRIVER
+ help
+ Toshiba's Cell Reference Set Beat Console device driver
+
+config HVC_IUCV
+ bool "z/VM IUCV Hypervisor console support (VM only)"
+ depends on S390
+ select HVC_DRIVER
+ select IUCV
+ default y
+ help
+ This driver provides a Hypervisor console (HVC) back-end to access
+ a Linux (console) terminal via a z/VM IUCV communication path.
+
+config HVC_XEN
+ bool "Xen Hypervisor Console support"
+ depends on XEN
+ select HVC_DRIVER
+ select HVC_IRQ
+ default y
+ help
+ Xen virtual console device driver
+
+config HVC_UDBG
+ bool "udbg based fake hypervisor console"
+ depends on PPC && EXPERIMENTAL
+ select HVC_DRIVER
+ default n
+
+config HVC_DCC
+ bool "ARM JTAG DCC console"
+ depends on ARM
+ select HVC_DRIVER
+ help
+ This console uses the JTAG DCC on ARM to create a console under the HVC
+ driver. This console is used through a JTAG only on ARM. If you don't have
+ a JTAG then you probably don't want this option.
+
+config HVC_BFIN_JTAG
+ bool "Blackfin JTAG console"
+ depends on BLACKFIN
+ select HVC_DRIVER
+ help
+ This console uses the Blackfin JTAG to create a console under the
+ the HVC driver. If you don't have JTAG, then you probably don't
+ want this option.
+
+config HVCS
+ tristate "IBM Hypervisor Virtual Console Server support"
+ depends on PPC_PSERIES && HVC_CONSOLE
+ help
+ Partitionable IBM Power5 ppc64 machines allow hosting of
+ firmware virtual consoles from one Linux partition by
+ another Linux partition. This driver allows console data
+ from Linux partitions to be accessed through TTY device
+ interfaces in the device tree of a Linux partition running
+ this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hvcs. Additionally, this module
+ will depend on arch specific APIs exported from hvcserver.ko
+ which will also be compiled when this driver is built as a
+ module.
+
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile
new file mode 100644
index 00000000..40a25d93
--- /dev/null
+++ b/drivers/tty/hvc/Makefile
@@ -0,0 +1,13 @@
+obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
+obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
+obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
+obj-$(CONFIG_HVC_TILE) += hvc_tile.o
+obj-$(CONFIG_HVC_DCC) += hvc_dcc.o
+obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
+obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
+obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
+obj-$(CONFIG_HVC_XEN) += hvc_xen.o
+obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o
+obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o
+obj-$(CONFIG_HVC_BFIN_JTAG) += hvc_bfin_jtag.o
+obj-$(CONFIG_HVCS) += hvcs.o
diff --git a/drivers/tty/hvc/hvc_beat.c b/drivers/tty/hvc/hvc_beat.c
new file mode 100644
index 00000000..5fe4631e
--- /dev/null
+++ b/drivers/tty/hvc/hvc_beat.c
@@ -0,0 +1,134 @@
+/*
+ * Beat hypervisor console driver
+ *
+ * (C) Copyright 2006 TOSHIBA CORPORATION
+ *
+ * This code is based on drivers/char/hvc_rtas.c:
+ * (C) Copyright IBM Corporation 2001-2005
+ * (C) Copyright Red Hat, Inc. 2005
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/console.h>
+#include <asm/prom.h>
+#include <asm/hvconsole.h>
+#include <asm/firmware.h>
+
+#include "hvc_console.h"
+
+extern int64_t beat_get_term_char(uint64_t, uint64_t *, uint64_t *, uint64_t *);
+extern int64_t beat_put_term_char(uint64_t, uint64_t, uint64_t, uint64_t);
+
+struct hvc_struct *hvc_beat_dev = NULL;
+
+/* bug: only one queue is available regardless of vtermno */
+static int hvc_beat_get_chars(uint32_t vtermno, char *buf, int cnt)
+{
+ static unsigned char q[sizeof(unsigned long) * 2]
+ __attribute__((aligned(sizeof(unsigned long))));
+ static int qlen = 0;
+ u64 got;
+
+again:
+ if (qlen) {
+ if (qlen > cnt) {
+ memcpy(buf, q, cnt);
+ qlen -= cnt;
+ memmove(q + cnt, q, qlen);
+ return cnt;
+ } else { /* qlen <= cnt */
+ int r;
+
+ memcpy(buf, q, qlen);
+ r = qlen;
+ qlen = 0;
+ return r;
+ }
+ }
+ if (beat_get_term_char(vtermno, &got,
+ ((u64 *)q), ((u64 *)q) + 1) == 0) {
+ qlen = got;
+ goto again;
+ }
+ return 0;
+}
+
+static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
+{
+ unsigned long kb[2];
+ int rest, nlen;
+
+ for (rest = cnt; rest > 0; rest -= nlen) {
+ nlen = (rest > 16) ? 16 : rest;
+ memcpy(kb, buf, nlen);
+ beat_put_term_char(vtermno, nlen, kb[0], kb[1]);
+ buf += nlen;
+ }
+ return cnt;
+}
+
+static const struct hv_ops hvc_beat_get_put_ops = {
+ .get_chars = hvc_beat_get_chars,
+ .put_chars = hvc_beat_put_chars,
+};
+
+static int hvc_beat_useit = 1;
+
+static int hvc_beat_config(char *p)
+{
+ hvc_beat_useit = simple_strtoul(p, NULL, 0);
+ return 0;
+}
+
+static int __init hvc_beat_console_init(void)
+{
+ if (hvc_beat_useit && of_machine_is_compatible("Beat")) {
+ hvc_instantiate(0, 0, &hvc_beat_get_put_ops);
+ }
+ return 0;
+}
+
+/* temp */
+static int __init hvc_beat_init(void)
+{
+ struct hvc_struct *hp;
+
+ if (!firmware_has_feature(FW_FEATURE_BEAT))
+ return -ENODEV;
+
+ hp = hvc_alloc(0, NO_IRQ, &hvc_beat_get_put_ops, 16);
+ if (IS_ERR(hp))
+ return PTR_ERR(hp);
+ hvc_beat_dev = hp;
+ return 0;
+}
+
+static void __exit hvc_beat_exit(void)
+{
+ if (hvc_beat_dev)
+ hvc_remove(hvc_beat_dev);
+}
+
+module_init(hvc_beat_init);
+module_exit(hvc_beat_exit);
+
+__setup("hvc_beat=", hvc_beat_config);
+
+console_initcall(hvc_beat_console_init);
diff --git a/drivers/tty/hvc/hvc_bfin_jtag.c b/drivers/tty/hvc/hvc_bfin_jtag.c
new file mode 100644
index 00000000..31d6cc6a
--- /dev/null
+++ b/drivers/tty/hvc/hvc_bfin_jtag.c
@@ -0,0 +1,105 @@
+/*
+ * Console via Blackfin JTAG Communication
+ *
+ * Copyright 2008-2011 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#include "hvc_console.h"
+
+/* See the Debug/Emulation chapter in the HRM */
+#define EMUDOF 0x00000001 /* EMUDAT_OUT full & valid */
+#define EMUDIF 0x00000002 /* EMUDAT_IN full & valid */
+#define EMUDOOVF 0x00000004 /* EMUDAT_OUT overflow */
+#define EMUDIOVF 0x00000008 /* EMUDAT_IN overflow */
+
+/* Helper functions to glue the register API to simple C operations */
+static inline uint32_t bfin_write_emudat(uint32_t emudat)
+{
+ __asm__ __volatile__("emudat = %0;" : : "d"(emudat));
+ return emudat;
+}
+
+static inline uint32_t bfin_read_emudat(void)
+{
+ uint32_t emudat;
+ __asm__ __volatile__("%0 = emudat;" : "=d"(emudat));
+ return emudat;
+}
+
+/* Send data to the host */
+static int hvc_bfin_put_chars(uint32_t vt, const char *buf, int count)
+{
+ static uint32_t outbound_len;
+ uint32_t emudat;
+ int ret;
+
+ if (bfin_read_DBGSTAT() & EMUDOF)
+ return 0;
+
+ if (!outbound_len) {
+ outbound_len = count;
+ bfin_write_emudat(outbound_len);
+ return 0;
+ }
+
+ ret = min(outbound_len, (uint32_t)4);
+ memcpy(&emudat, buf, ret);
+ bfin_write_emudat(emudat);
+ outbound_len -= ret;
+
+ return ret;
+}
+
+/* Receive data from the host */
+static int hvc_bfin_get_chars(uint32_t vt, char *buf, int count)
+{
+ static uint32_t inbound_len;
+ uint32_t emudat;
+ int ret;
+
+ if (!(bfin_read_DBGSTAT() & EMUDIF))
+ return 0;
+ emudat = bfin_read_emudat();
+
+ if (!inbound_len) {
+ inbound_len = emudat;
+ return 0;
+ }
+
+ ret = min(inbound_len, (uint32_t)4);
+ memcpy(buf, &emudat, ret);
+ inbound_len -= ret;
+
+ return ret;
+}
+
+/* Glue the HVC layers to the Blackfin layers */
+static const struct hv_ops hvc_bfin_get_put_ops = {
+ .get_chars = hvc_bfin_get_chars,
+ .put_chars = hvc_bfin_put_chars,
+};
+
+static int __init hvc_bfin_console_init(void)
+{
+ hvc_instantiate(0, 0, &hvc_bfin_get_put_ops);
+ return 0;
+}
+console_initcall(hvc_bfin_console_init);
+
+static int __init hvc_bfin_init(void)
+{
+ hvc_alloc(0, 0, &hvc_bfin_get_put_ops, 128);
+ return 0;
+}
+device_initcall(hvc_bfin_init);
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
new file mode 100644
index 00000000..aa845550
--- /dev/null
+++ b/drivers/tty/hvc/hvc_console.c
@@ -0,0 +1,916 @@
+/*
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2004 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Additional Author(s):
+ * Ryan S. Arnold <rsa@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/console.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/kbd_kern.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/major.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+
+#include "hvc_console.h"
+
+#define HVC_MAJOR 229
+#define HVC_MINOR 0
+
+/*
+ * Wait this long per iteration while trying to push buffered data to the
+ * hypervisor before allowing the tty to complete a close operation.
+ */
+#define HVC_CLOSE_WAIT (HZ/100) /* 1/10 of a second */
+
+/*
+ * These sizes are most efficient for vio, because they are the
+ * native transfer size. We could make them selectable in the
+ * future to better deal with backends that want other buffer sizes.
+ */
+#define N_OUTBUF 16
+#define N_INBUF 16
+
+#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
+
+static struct tty_driver *hvc_driver;
+static struct task_struct *hvc_task;
+
+/* Picks up late kicks after list walk but before schedule() */
+static int hvc_kicked;
+
+static int hvc_init(void);
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static int sysrq_pressed;
+#endif
+
+/* dynamic list of hvc_struct instances */
+static LIST_HEAD(hvc_structs);
+
+/*
+ * Protect the list of hvc_struct instances from inserts and removals during
+ * list traversal.
+ */
+static DEFINE_SPINLOCK(hvc_structs_lock);
+
+/*
+ * This value is used to assign a tty->index value to a hvc_struct based
+ * upon order of exposure via hvc_probe(), when we can not match it to
+ * a console candidate registered with hvc_instantiate().
+ */
+static int last_hvc = -1;
+
+/*
+ * Do not call this function with either the hvc_structs_lock or the hvc_struct
+ * lock held. If successful, this function increments the kref reference
+ * count against the target hvc_struct so it should be released when finished.
+ */
+static struct hvc_struct *hvc_get_by_index(int index)
+{
+ struct hvc_struct *hp;
+ unsigned long flags;
+
+ spin_lock(&hvc_structs_lock);
+
+ list_for_each_entry(hp, &hvc_structs, next) {
+ spin_lock_irqsave(&hp->lock, flags);
+ if (hp->index == index) {
+ kref_get(&hp->kref);
+ spin_unlock_irqrestore(&hp->lock, flags);
+ spin_unlock(&hvc_structs_lock);
+ return hp;
+ }
+ spin_unlock_irqrestore(&hp->lock, flags);
+ }
+ hp = NULL;
+
+ spin_unlock(&hvc_structs_lock);
+ return hp;
+}
+
+
+/*
+ * Initial console vtermnos for console API usage prior to full console
+ * initialization. Any vty adapter outside this range will not have usable
+ * console interfaces but can still be used as a tty device. This has to be
+ * static because kmalloc will not work during early console init.
+ */
+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
+static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
+ {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
+
+/*
+ * Console APIs, NOT TTY. These APIs are available immediately when
+ * hvc_console_setup() finds adapters.
+ */
+
+static void hvc_console_print(struct console *co, const char *b,
+ unsigned count)
+{
+ char c[N_OUTBUF] __ALIGNED__;
+ unsigned i = 0, n = 0;
+ int r, donecr = 0, index = co->index;
+
+ /* Console access attempt outside of acceptable console range. */
+ if (index >= MAX_NR_HVC_CONSOLES)
+ return;
+
+ /* This console adapter was removed so it is not usable. */
+ if (vtermnos[index] == -1)
+ return;
+
+ while (count > 0 || i > 0) {
+ if (count > 0 && i < sizeof(c)) {
+ if (b[n] == '\n' && !donecr) {
+ c[i++] = '\r';
+ donecr = 1;
+ } else {
+ c[i++] = b[n++];
+ donecr = 0;
+ --count;
+ }
+ } else {
+ r = cons_ops[index]->put_chars(vtermnos[index], c, i);
+ if (r <= 0) {
+ /* throw away characters on error
+ * but spin in case of -EAGAIN */
+ if (r != -EAGAIN)
+ i = 0;
+ } else if (r > 0) {
+ i -= r;
+ if (i > 0)
+ memmove(c, c+r, i);
+ }
+ }
+ }
+}
+
+static struct tty_driver *hvc_console_device(struct console *c, int *index)
+{
+ if (vtermnos[c->index] == -1)
+ return NULL;
+
+ *index = c->index;
+ return hvc_driver;
+}
+
+static int __init hvc_console_setup(struct console *co, char *options)
+{
+ if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
+ return -ENODEV;
+
+ if (vtermnos[co->index] == -1)
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct console hvc_console = {
+ .name = "hvc",
+ .write = hvc_console_print,
+ .device = hvc_console_device,
+ .setup = hvc_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+/*
+ * Early console initialization. Precedes driver initialization.
+ *
+ * (1) we are first, and the user specified another driver
+ * -- index will remain -1
+ * (2) we are first and the user specified no driver
+ * -- index will be set to 0, then we will fail setup.
+ * (3) we are first and the user specified our driver
+ * -- index will be set to user specified driver, and we will fail
+ * (4) we are after driver, and this initcall will register us
+ * -- if the user didn't specify a driver then the console will match
+ *
+ * Note that for cases 2 and 3, we will match later when the io driver
+ * calls hvc_instantiate() and call register again.
+ */
+static int __init hvc_console_init(void)
+{
+ register_console(&hvc_console);
+ return 0;
+}
+console_initcall(hvc_console_init);
+
+/* callback when the kboject ref count reaches zero. */
+static void destroy_hvc_struct(struct kref *kref)
+{
+ struct hvc_struct *hp = container_of(kref, struct hvc_struct, kref);
+ unsigned long flags;
+
+ spin_lock(&hvc_structs_lock);
+
+ spin_lock_irqsave(&hp->lock, flags);
+ list_del(&(hp->next));
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ spin_unlock(&hvc_structs_lock);
+
+ kfree(hp);
+}
+
+/*
+ * hvc_instantiate() is an early console discovery method which locates
+ * consoles * prior to the vio subsystem discovering them. Hotplugged
+ * vty adapters do NOT get an hvc_instantiate() callback since they
+ * appear after early console init.
+ */
+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
+{
+ struct hvc_struct *hp;
+
+ if (index < 0 || index >= MAX_NR_HVC_CONSOLES)
+ return -1;
+
+ if (vtermnos[index] != -1)
+ return -1;
+
+ /* make sure no no tty has been registered in this index */
+ hp = hvc_get_by_index(index);
+ if (hp) {
+ kref_put(&hp->kref, destroy_hvc_struct);
+ return -1;
+ }
+
+ vtermnos[index] = vtermno;
+ cons_ops[index] = ops;
+
+ /* reserve all indices up to and including this index */
+ if (last_hvc < index)
+ last_hvc = index;
+
+ /* if this index is what the user requested, then register
+ * now (setup won't fail at this point). It's ok to just
+ * call register again if previously .setup failed.
+ */
+ if (index == hvc_console.index)
+ register_console(&hvc_console);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hvc_instantiate);
+
+/* Wake the sleeping khvcd */
+void hvc_kick(void)
+{
+ hvc_kicked = 1;
+ wake_up_process(hvc_task);
+}
+EXPORT_SYMBOL_GPL(hvc_kick);
+
+static void hvc_unthrottle(struct tty_struct *tty)
+{
+ hvc_kick();
+}
+
+/*
+ * The TTY interface won't be used until after the vio layer has exposed the vty
+ * adapter to the kernel.
+ */
+static int hvc_open(struct tty_struct *tty, struct file * filp)
+{
+ struct hvc_struct *hp;
+ unsigned long flags;
+ int rc = 0;
+
+ /* Auto increments kref reference if found. */
+ if (!(hp = hvc_get_by_index(tty->index)))
+ return -ENODEV;
+
+ spin_lock_irqsave(&hp->lock, flags);
+ /* Check and then increment for fast path open. */
+ if (hp->count++ > 0) {
+ tty_kref_get(tty);
+ spin_unlock_irqrestore(&hp->lock, flags);
+ hvc_kick();
+ return 0;
+ } /* else count == 0 */
+
+ tty->driver_data = hp;
+
+ hp->tty = tty_kref_get(tty);
+
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ if (hp->ops->notifier_add)
+ rc = hp->ops->notifier_add(hp, hp->data);
+
+ /*
+ * If the notifier fails we return an error. The tty layer
+ * will call hvc_close() after a failed open but we don't want to clean
+ * up there so we'll clean up here and clear out the previously set
+ * tty fields and return the kref reference.
+ */
+ if (rc) {
+ spin_lock_irqsave(&hp->lock, flags);
+ hp->tty = NULL;
+ spin_unlock_irqrestore(&hp->lock, flags);
+ tty_kref_put(tty);
+ tty->driver_data = NULL;
+ kref_put(&hp->kref, destroy_hvc_struct);
+ printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
+ }
+ /* Force wakeup of the polling thread */
+ hvc_kick();
+
+ return rc;
+}
+
+static void hvc_close(struct tty_struct *tty, struct file * filp)
+{
+ struct hvc_struct *hp;
+ unsigned long flags;
+
+ if (tty_hung_up_p(filp))
+ return;
+
+ /*
+ * No driver_data means that this close was issued after a failed
+ * hvc_open by the tty layer's release_dev() function and we can just
+ * exit cleanly because the kref reference wasn't made.
+ */
+ if (!tty->driver_data)
+ return;
+
+ hp = tty->driver_data;
+
+ spin_lock_irqsave(&hp->lock, flags);
+
+ if (--hp->count == 0) {
+ /* We are done with the tty pointer now. */
+ hp->tty = NULL;
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ if (hp->ops->notifier_del)
+ hp->ops->notifier_del(hp, hp->data);
+
+ /* cancel pending tty resize work */
+ cancel_work_sync(&hp->tty_resize);
+
+ /*
+ * Chain calls chars_in_buffer() and returns immediately if
+ * there is no buffered data otherwise sleeps on a wait queue
+ * waking periodically to check chars_in_buffer().
+ */
+ tty_wait_until_sent(tty, HVC_CLOSE_WAIT);
+ } else {
+ if (hp->count < 0)
+ printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
+ hp->vtermno, hp->count);
+ spin_unlock_irqrestore(&hp->lock, flags);
+ }
+
+ tty_kref_put(tty);
+ kref_put(&hp->kref, destroy_hvc_struct);
+}
+
+static void hvc_hangup(struct tty_struct *tty)
+{
+ struct hvc_struct *hp = tty->driver_data;
+ unsigned long flags;
+ int temp_open_count;
+
+ if (!hp)
+ return;
+
+ /* cancel pending tty resize work */
+ cancel_work_sync(&hp->tty_resize);
+
+ spin_lock_irqsave(&hp->lock, flags);
+
+ /*
+ * The N_TTY line discipline has problems such that in a close vs
+ * open->hangup case this can be called after the final close so prevent
+ * that from happening for now.
+ */
+ if (hp->count <= 0) {
+ spin_unlock_irqrestore(&hp->lock, flags);
+ return;
+ }
+
+ temp_open_count = hp->count;
+ hp->count = 0;
+ hp->n_outbuf = 0;
+ hp->tty = NULL;
+
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ if (hp->ops->notifier_hangup)
+ hp->ops->notifier_hangup(hp, hp->data);
+
+ while(temp_open_count) {
+ --temp_open_count;
+ tty_kref_put(tty);
+ kref_put(&hp->kref, destroy_hvc_struct);
+ }
+}
+
+/*
+ * Push buffered characters whether they were just recently buffered or waiting
+ * on a blocked hypervisor. Call this function with hp->lock held.
+ */
+static int hvc_push(struct hvc_struct *hp)
+{
+ int n;
+
+ n = hp->ops->put_chars(hp->vtermno, hp->outbuf, hp->n_outbuf);
+ if (n <= 0) {
+ if (n == 0 || n == -EAGAIN) {
+ hp->do_wakeup = 1;
+ return 0;
+ }
+ /* throw away output on error; this happens when
+ there is no session connected to the vterm. */
+ hp->n_outbuf = 0;
+ } else
+ hp->n_outbuf -= n;
+ if (hp->n_outbuf > 0)
+ memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf);
+ else
+ hp->do_wakeup = 1;
+
+ return n;
+}
+
+static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ struct hvc_struct *hp = tty->driver_data;
+ unsigned long flags;
+ int rsize, written = 0;
+
+ /* This write was probably executed during a tty close. */
+ if (!hp)
+ return -EPIPE;
+
+ if (hp->count <= 0)
+ return -EIO;
+
+ spin_lock_irqsave(&hp->lock, flags);
+
+ /* Push pending writes */
+ if (hp->n_outbuf > 0)
+ hvc_push(hp);
+
+ while (count > 0 && (rsize = hp->outbuf_size - hp->n_outbuf) > 0) {
+ if (rsize > count)
+ rsize = count;
+ memcpy(hp->outbuf + hp->n_outbuf, buf, rsize);
+ count -= rsize;
+ buf += rsize;
+ hp->n_outbuf += rsize;
+ written += rsize;
+ hvc_push(hp);
+ }
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ /*
+ * Racy, but harmless, kick thread if there is still pending data.
+ */
+ if (hp->n_outbuf)
+ hvc_kick();
+
+ return written;
+}
+
+/**
+ * hvc_set_winsz() - Resize the hvc tty terminal window.
+ * @work: work structure.
+ *
+ * The routine shall not be called within an atomic context because it
+ * might sleep.
+ *
+ * Locking: hp->lock
+ */
+static void hvc_set_winsz(struct work_struct *work)
+{
+ struct hvc_struct *hp;
+ unsigned long hvc_flags;
+ struct tty_struct *tty;
+ struct winsize ws;
+
+ hp = container_of(work, struct hvc_struct, tty_resize);
+
+ spin_lock_irqsave(&hp->lock, hvc_flags);
+ if (!hp->tty) {
+ spin_unlock_irqrestore(&hp->lock, hvc_flags);
+ return;
+ }
+ ws = hp->ws;
+ tty = tty_kref_get(hp->tty);
+ spin_unlock_irqrestore(&hp->lock, hvc_flags);
+
+ tty_do_resize(tty, &ws);
+ tty_kref_put(tty);
+}
+
+/*
+ * This is actually a contract between the driver and the tty layer outlining
+ * how much write room the driver can guarantee will be sent OR BUFFERED. This
+ * driver MUST honor the return value.
+ */
+static int hvc_write_room(struct tty_struct *tty)
+{
+ struct hvc_struct *hp = tty->driver_data;
+
+ if (!hp)
+ return -1;
+
+ return hp->outbuf_size - hp->n_outbuf;
+}
+
+static int hvc_chars_in_buffer(struct tty_struct *tty)
+{
+ struct hvc_struct *hp = tty->driver_data;
+
+ if (!hp)
+ return 0;
+ return hp->n_outbuf;
+}
+
+/*
+ * timeout will vary between the MIN and MAX values defined here. By default
+ * and during console activity we will use a default MIN_TIMEOUT of 10. When
+ * the console is idle, we increase the timeout value on each pass through
+ * msleep until we reach the max. This may be noticeable as a brief (average
+ * one second) delay on the console before the console responds to input when
+ * there has been no input for some time.
+ */
+#define MIN_TIMEOUT (10)
+#define MAX_TIMEOUT (2000)
+static u32 timeout = MIN_TIMEOUT;
+
+#define HVC_POLL_READ 0x00000001
+#define HVC_POLL_WRITE 0x00000002
+
+int hvc_poll(struct hvc_struct *hp)
+{
+ struct tty_struct *tty;
+ int i, n, poll_mask = 0;
+ char buf[N_INBUF] __ALIGNED__;
+ unsigned long flags;
+ int read_total = 0;
+ int written_total = 0;
+
+ spin_lock_irqsave(&hp->lock, flags);
+
+ /* Push pending writes */
+ if (hp->n_outbuf > 0)
+ written_total = hvc_push(hp);
+
+ /* Reschedule us if still some write pending */
+ if (hp->n_outbuf > 0) {
+ poll_mask |= HVC_POLL_WRITE;
+ /* If hvc_push() was not able to write, sleep a few msecs */
+ timeout = (written_total) ? 0 : MIN_TIMEOUT;
+ }
+
+ /* No tty attached, just skip */
+ tty = tty_kref_get(hp->tty);
+ if (tty == NULL)
+ goto bail;
+
+ /* Now check if we can get data (are we throttled ?) */
+ if (test_bit(TTY_THROTTLED, &tty->flags))
+ goto throttled;
+
+ /* If we aren't notifier driven and aren't throttled, we always
+ * request a reschedule
+ */
+ if (!hp->irq_requested)
+ poll_mask |= HVC_POLL_READ;
+
+ /* Read data if any */
+ for (;;) {
+ int count = tty_buffer_request_room(tty, N_INBUF);
+
+ /* If flip is full, just reschedule a later read */
+ if (count == 0) {
+ poll_mask |= HVC_POLL_READ;
+ break;
+ }
+
+ n = hp->ops->get_chars(hp->vtermno, buf, count);
+ if (n <= 0) {
+ /* Hangup the tty when disconnected from host */
+ if (n == -EPIPE) {
+ spin_unlock_irqrestore(&hp->lock, flags);
+ tty_hangup(tty);
+ spin_lock_irqsave(&hp->lock, flags);
+ } else if ( n == -EAGAIN ) {
+ /*
+ * Some back-ends can only ensure a certain min
+ * num of bytes read, which may be > 'count'.
+ * Let the tty clear the flip buff to make room.
+ */
+ poll_mask |= HVC_POLL_READ;
+ }
+ break;
+ }
+ for (i = 0; i < n; ++i) {
+#ifdef CONFIG_MAGIC_SYSRQ
+ if (hp->index == hvc_console.index) {
+ /* Handle the SysRq Hack */
+ /* XXX should support a sequence */
+ if (buf[i] == '\x0f') { /* ^O */
+ /* if ^O is pressed again, reset
+ * sysrq_pressed and flip ^O char */
+ sysrq_pressed = !sysrq_pressed;
+ if (sysrq_pressed)
+ continue;
+ } else if (sysrq_pressed) {
+ handle_sysrq(buf[i]);
+ sysrq_pressed = 0;
+ continue;
+ }
+ }
+#endif /* CONFIG_MAGIC_SYSRQ */
+ tty_insert_flip_char(tty, buf[i], 0);
+ }
+
+ read_total += n;
+ }
+ throttled:
+ /* Wakeup write queue if necessary */
+ if (hp->do_wakeup) {
+ hp->do_wakeup = 0;
+ tty_wakeup(tty);
+ }
+ bail:
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ if (read_total) {
+ /* Activity is occurring, so reset the polling backoff value to
+ a minimum for performance. */
+ timeout = MIN_TIMEOUT;
+
+ tty_flip_buffer_push(tty);
+ }
+ if (tty)
+ tty_kref_put(tty);
+
+ return poll_mask;
+}
+EXPORT_SYMBOL_GPL(hvc_poll);
+
+/**
+ * __hvc_resize() - Update terminal window size information.
+ * @hp: HVC console pointer
+ * @ws: Terminal window size structure
+ *
+ * Stores the specified window size information in the hvc structure of @hp.
+ * The function schedule the tty resize update.
+ *
+ * Locking: Locking free; the function MUST be called holding hp->lock
+ */
+void __hvc_resize(struct hvc_struct *hp, struct winsize ws)
+{
+ hp->ws = ws;
+ schedule_work(&hp->tty_resize);
+}
+EXPORT_SYMBOL_GPL(__hvc_resize);
+
+/*
+ * This kthread is either polling or interrupt driven. This is determined by
+ * calling hvc_poll() who determines whether a console adapter support
+ * interrupts.
+ */
+static int khvcd(void *unused)
+{
+ int poll_mask;
+ struct hvc_struct *hp;
+
+ set_freezable();
+ do {
+ poll_mask = 0;
+ hvc_kicked = 0;
+ try_to_freeze();
+ wmb();
+ if (!cpus_are_in_xmon()) {
+ spin_lock(&hvc_structs_lock);
+ list_for_each_entry(hp, &hvc_structs, next) {
+ poll_mask |= hvc_poll(hp);
+ }
+ spin_unlock(&hvc_structs_lock);
+ } else
+ poll_mask |= HVC_POLL_READ;
+ if (hvc_kicked)
+ continue;
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!hvc_kicked) {
+ if (poll_mask == 0)
+ schedule();
+ else {
+ if (timeout < MAX_TIMEOUT)
+ timeout += (timeout >> 6) + 1;
+
+ msleep_interruptible(timeout);
+ }
+ }
+ __set_current_state(TASK_RUNNING);
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static const struct tty_operations hvc_ops = {
+ .open = hvc_open,
+ .close = hvc_close,
+ .write = hvc_write,
+ .hangup = hvc_hangup,
+ .unthrottle = hvc_unthrottle,
+ .write_room = hvc_write_room,
+ .chars_in_buffer = hvc_chars_in_buffer,
+};
+
+struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
+ const struct hv_ops *ops,
+ int outbuf_size)
+{
+ struct hvc_struct *hp;
+ int i;
+
+ /* We wait until a driver actually comes along */
+ if (!hvc_driver) {
+ int err = hvc_init();
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ hp = kzalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size,
+ GFP_KERNEL);
+ if (!hp)
+ return ERR_PTR(-ENOMEM);
+
+ hp->vtermno = vtermno;
+ hp->data = data;
+ hp->ops = ops;
+ hp->outbuf_size = outbuf_size;
+ hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))];
+
+ kref_init(&hp->kref);
+
+ INIT_WORK(&hp->tty_resize, hvc_set_winsz);
+ spin_lock_init(&hp->lock);
+ spin_lock(&hvc_structs_lock);
+
+ /*
+ * find index to use:
+ * see if this vterm id matches one registered for console.
+ */
+ for (i=0; i < MAX_NR_HVC_CONSOLES; i++)
+ if (vtermnos[i] == hp->vtermno &&
+ cons_ops[i] == hp->ops)
+ break;
+
+ /* no matching slot, just use a counter */
+ if (i >= MAX_NR_HVC_CONSOLES)
+ i = ++last_hvc;
+
+ hp->index = i;
+
+ list_add_tail(&(hp->next), &hvc_structs);
+ spin_unlock(&hvc_structs_lock);
+
+ return hp;
+}
+EXPORT_SYMBOL_GPL(hvc_alloc);
+
+int hvc_remove(struct hvc_struct *hp)
+{
+ unsigned long flags;
+ struct tty_struct *tty;
+
+ spin_lock_irqsave(&hp->lock, flags);
+ tty = tty_kref_get(hp->tty);
+
+ if (hp->index < MAX_NR_HVC_CONSOLES)
+ vtermnos[hp->index] = -1;
+
+ /* Don't whack hp->irq because tty_hangup() will need to free the irq. */
+
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ /*
+ * We 'put' the instance that was grabbed when the kref instance
+ * was initialized using kref_init(). Let the last holder of this
+ * kref cause it to be removed, which will probably be the tty_vhangup
+ * below.
+ */
+ kref_put(&hp->kref, destroy_hvc_struct);
+
+ /*
+ * This function call will auto chain call hvc_hangup.
+ */
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hvc_remove);
+
+/* Driver initialization: called as soon as someone uses hvc_alloc(). */
+static int hvc_init(void)
+{
+ struct tty_driver *drv;
+ int err;
+
+ /* We need more than hvc_count adapters due to hotplug additions. */
+ drv = alloc_tty_driver(HVC_ALLOC_TTY_ADAPTERS);
+ if (!drv) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ drv->owner = THIS_MODULE;
+ drv->driver_name = "hvc";
+ drv->name = "hvc";
+ drv->major = HVC_MAJOR;
+ drv->minor_start = HVC_MINOR;
+ drv->type = TTY_DRIVER_TYPE_SYSTEM;
+ drv->init_termios = tty_std_termios;
+ drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
+ tty_set_operations(drv, &hvc_ops);
+
+ /* Always start the kthread because there can be hotplug vty adapters
+ * added later. */
+ hvc_task = kthread_run(khvcd, NULL, "khvcd");
+ if (IS_ERR(hvc_task)) {
+ printk(KERN_ERR "Couldn't create kthread for console.\n");
+ err = PTR_ERR(hvc_task);
+ goto put_tty;
+ }
+
+ err = tty_register_driver(drv);
+ if (err) {
+ printk(KERN_ERR "Couldn't register hvc console driver\n");
+ goto stop_thread;
+ }
+
+ /*
+ * Make sure tty is fully registered before allowing it to be
+ * found by hvc_console_device.
+ */
+ smp_mb();
+ hvc_driver = drv;
+ return 0;
+
+stop_thread:
+ kthread_stop(hvc_task);
+ hvc_task = NULL;
+put_tty:
+ put_tty_driver(drv);
+out:
+ return err;
+}
+
+/* This isn't particularly necessary due to this being a console driver
+ * but it is nice to be thorough.
+ */
+static void __exit hvc_exit(void)
+{
+ if (hvc_driver) {
+ kthread_stop(hvc_task);
+
+ tty_unregister_driver(hvc_driver);
+ /* return tty_struct instances allocated in hvc_init(). */
+ put_tty_driver(hvc_driver);
+ unregister_console(&hvc_console);
+ }
+}
+module_exit(hvc_exit);
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
new file mode 100644
index 00000000..54381eba
--- /dev/null
+++ b/drivers/tty/hvc/hvc_console.h
@@ -0,0 +1,119 @@
+/*
+ * hvc_console.h
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Author(s):
+ * Ryan S. Arnold <rsa@us.ibm.com>
+ *
+ * hvc_console header information:
+ * moved here from arch/powerpc/include/asm/hvconsole.h
+ * and drivers/char/hvc_console.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef HVC_CONSOLE_H
+#define HVC_CONSOLE_H
+#include <linux/kref.h>
+#include <linux/tty.h>
+#include <linux/spinlock.h>
+
+/*
+ * This is the max number of console adapters that can/will be found as
+ * console devices on first stage console init. Any number beyond this range
+ * can't be used as a console device but is still a valid tty device.
+ */
+#define MAX_NR_HVC_CONSOLES 16
+
+/*
+ * The Linux TTY code does not support dynamic addition of tty derived devices
+ * so we need to know how many tty devices we might need when space is allocated
+ * for the tty device. Since this driver supports hotplug of vty adapters we
+ * need to make sure we have enough allocated.
+ */
+#define HVC_ALLOC_TTY_ADAPTERS 8
+
+struct hvc_struct {
+ spinlock_t lock;
+ int index;
+ struct tty_struct *tty;
+ int count;
+ int do_wakeup;
+ char *outbuf;
+ int outbuf_size;
+ int n_outbuf;
+ uint32_t vtermno;
+ const struct hv_ops *ops;
+ int irq_requested;
+ int data;
+ struct winsize ws;
+ struct work_struct tty_resize;
+ struct list_head next;
+ struct kref kref; /* ref count & hvc_struct lifetime */
+};
+
+/* implemented by a low level driver */
+struct hv_ops {
+ int (*get_chars)(uint32_t vtermno, char *buf, int count);
+ int (*put_chars)(uint32_t vtermno, const char *buf, int count);
+
+ /* Callbacks for notification. Called in open, close and hangup */
+ int (*notifier_add)(struct hvc_struct *hp, int irq);
+ void (*notifier_del)(struct hvc_struct *hp, int irq);
+ void (*notifier_hangup)(struct hvc_struct *hp, int irq);
+};
+
+/* Register a vterm and a slot index for use as a console (console_init) */
+extern int hvc_instantiate(uint32_t vtermno, int index,
+ const struct hv_ops *ops);
+
+/* register a vterm for hvc tty operation (module_init or hotplug add) */
+extern struct hvc_struct * hvc_alloc(uint32_t vtermno, int data,
+ const struct hv_ops *ops, int outbuf_size);
+/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
+extern int hvc_remove(struct hvc_struct *hp);
+
+/* data available */
+int hvc_poll(struct hvc_struct *hp);
+void hvc_kick(void);
+
+/* Resize hvc tty terminal window */
+extern void __hvc_resize(struct hvc_struct *hp, struct winsize ws);
+
+static inline void hvc_resize(struct hvc_struct *hp, struct winsize ws)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hp->lock, flags);
+ __hvc_resize(hp, ws);
+ spin_unlock_irqrestore(&hp->lock, flags);
+}
+
+/* default notifier for irq based notification */
+extern int notifier_add_irq(struct hvc_struct *hp, int data);
+extern void notifier_del_irq(struct hvc_struct *hp, int data);
+extern void notifier_hangup_irq(struct hvc_struct *hp, int data);
+
+
+#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
+#include <asm/xmon.h>
+#else
+static inline int cpus_are_in_xmon(void)
+{
+ return 0;
+}
+#endif
+
+#endif // HVC_CONSOLE_H
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
new file mode 100644
index 00000000..44fbebab
--- /dev/null
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -0,0 +1,106 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#include <asm/processor.h>
+
+#include "hvc_console.h"
+
+/* DCC Status Bits */
+#define DCC_STATUS_RX (1 << 30)
+#define DCC_STATUS_TX (1 << 29)
+
+static inline u32 __dcc_getstatus(void)
+{
+ u32 __ret;
+ asm volatile("mrc p14, 0, %0, c0, c1, 0 @ read comms ctrl reg"
+ : "=r" (__ret) : : "cc");
+
+ return __ret;
+}
+
+
+static inline char __dcc_getchar(void)
+{
+ char __c;
+
+ asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
+ : "=r" (__c));
+ isb();
+
+ return __c;
+}
+
+static inline void __dcc_putchar(char c)
+{
+ asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
+ : /* no output register */
+ : "r" (c));
+ isb();
+}
+
+static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ while (__dcc_getstatus() & DCC_STATUS_TX)
+ cpu_relax();
+
+ __dcc_putchar(buf[i]);
+ }
+
+ return count;
+}
+
+static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count)
+{
+ int i;
+
+ for (i = 0; i < count; ++i)
+ if (__dcc_getstatus() & DCC_STATUS_RX)
+ buf[i] = __dcc_getchar();
+ else
+ break;
+
+ return i;
+}
+
+static const struct hv_ops hvc_dcc_get_put_ops = {
+ .get_chars = hvc_dcc_get_chars,
+ .put_chars = hvc_dcc_put_chars,
+};
+
+static int __init hvc_dcc_console_init(void)
+{
+ hvc_instantiate(0, 0, &hvc_dcc_get_put_ops);
+ return 0;
+}
+console_initcall(hvc_dcc_console_init);
+
+static int __init hvc_dcc_init(void)
+{
+ hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128);
+ return 0;
+}
+device_initcall(hvc_dcc_init);
diff --git a/drivers/tty/hvc/hvc_irq.c b/drivers/tty/hvc/hvc_irq.c
new file mode 100644
index 00000000..2623e177
--- /dev/null
+++ b/drivers/tty/hvc/hvc_irq.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright IBM Corp. 2001,2008
+ *
+ * This file contains the IRQ specific code for hvc_console
+ *
+ */
+
+#include <linux/interrupt.h>
+
+#include "hvc_console.h"
+
+static irqreturn_t hvc_handle_interrupt(int irq, void *dev_instance)
+{
+ /* if hvc_poll request a repoll, then kick the hvcd thread */
+ if (hvc_poll(dev_instance))
+ hvc_kick();
+ return IRQ_HANDLED;
+}
+
+/*
+ * For IRQ based systems these callbacks can be used
+ */
+int notifier_add_irq(struct hvc_struct *hp, int irq)
+{
+ int rc;
+
+ if (!irq) {
+ hp->irq_requested = 0;
+ return 0;
+ }
+ rc = request_irq(irq, hvc_handle_interrupt, IRQF_DISABLED,
+ "hvc_console", hp);
+ if (!rc)
+ hp->irq_requested = 1;
+ return rc;
+}
+
+void notifier_del_irq(struct hvc_struct *hp, int irq)
+{
+ if (!hp->irq_requested)
+ return;
+ free_irq(irq, hp);
+ hp->irq_requested = 0;
+}
+
+void notifier_hangup_irq(struct hvc_struct *hp, int irq)
+{
+ notifier_del_irq(hp, irq);
+}
diff --git a/drivers/tty/hvc/hvc_iseries.c b/drivers/tty/hvc/hvc_iseries.c
new file mode 100644
index 00000000..21c54955
--- /dev/null
+++ b/drivers/tty/hvc/hvc_iseries.c
@@ -0,0 +1,598 @@
+/*
+ * iSeries vio driver interface to hvc_console.c
+ *
+ * This code is based heavily on hvc_vio.c and viocons.c
+ *
+ * Copyright (C) 2006 Stephen Rothwell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/console.h>
+
+#include <asm/hvconsole.h>
+#include <asm/vio.h>
+#include <asm/prom.h>
+#include <asm/firmware.h>
+#include <asm/iseries/vio.h>
+#include <asm/iseries/hv_call.h>
+#include <asm/iseries/hv_lp_config.h>
+#include <asm/iseries/hv_lp_event.h>
+
+#include "hvc_console.h"
+
+#define VTTY_PORTS 10
+
+static DEFINE_SPINLOCK(consolelock);
+static DEFINE_SPINLOCK(consoleloglock);
+
+static const char hvc_driver_name[] = "hvc_console";
+
+#define IN_BUF_SIZE 200
+
+/*
+ * Our port information.
+ */
+static struct port_info {
+ HvLpIndex lp;
+ u64 seq; /* sequence number of last HV send */
+ u64 ack; /* last ack from HV */
+ struct hvc_struct *hp;
+ int in_start;
+ int in_end;
+ unsigned char in_buf[IN_BUF_SIZE];
+} port_info[VTTY_PORTS] = {
+ [ 0 ... VTTY_PORTS - 1 ] = {
+ .lp = HvLpIndexInvalid
+ }
+};
+
+#define viochar_is_console(pi) ((pi) == &port_info[0])
+
+static struct vio_device_id hvc_driver_table[] __devinitdata = {
+ {"serial", "IBM,iSeries-vty"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, hvc_driver_table);
+
+static void hvlog(char *fmt, ...)
+{
+ int i;
+ unsigned long flags;
+ va_list args;
+ static char buf[256];
+
+ spin_lock_irqsave(&consoleloglock, flags);
+ va_start(args, fmt);
+ i = vscnprintf(buf, sizeof(buf) - 1, fmt, args);
+ va_end(args);
+ buf[i++] = '\r';
+ HvCall_writeLogBuffer(buf, i);
+ spin_unlock_irqrestore(&consoleloglock, flags);
+}
+
+/*
+ * Initialize the common fields in a charLpEvent
+ */
+static void init_data_event(struct viocharlpevent *viochar, HvLpIndex lp)
+{
+ struct HvLpEvent *hev = &viochar->event;
+
+ memset(viochar, 0, sizeof(struct viocharlpevent));
+
+ hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DEFERRED_ACK |
+ HV_LP_EVENT_INT;
+ hev->xType = HvLpEvent_Type_VirtualIo;
+ hev->xSubtype = viomajorsubtype_chario | viochardata;
+ hev->xSourceLp = HvLpConfig_getLpIndex();
+ hev->xTargetLp = lp;
+ hev->xSizeMinus1 = sizeof(struct viocharlpevent);
+ hev->xSourceInstanceId = viopath_sourceinst(lp);
+ hev->xTargetInstanceId = viopath_targetinst(lp);
+}
+
+static int get_chars(uint32_t vtermno, char *buf, int count)
+{
+ struct port_info *pi;
+ int n = 0;
+ unsigned long flags;
+
+ if (vtermno >= VTTY_PORTS)
+ return -EINVAL;
+ if (count == 0)
+ return 0;
+
+ pi = &port_info[vtermno];
+ spin_lock_irqsave(&consolelock, flags);
+
+ if (pi->in_end == 0)
+ goto done;
+
+ n = pi->in_end - pi->in_start;
+ if (n > count)
+ n = count;
+ memcpy(buf, &pi->in_buf[pi->in_start], n);
+ pi->in_start += n;
+ if (pi->in_start == pi->in_end) {
+ pi->in_start = 0;
+ pi->in_end = 0;
+ }
+done:
+ spin_unlock_irqrestore(&consolelock, flags);
+ return n;
+}
+
+static int put_chars(uint32_t vtermno, const char *buf, int count)
+{
+ struct viocharlpevent *viochar;
+ struct port_info *pi;
+ HvLpEvent_Rc hvrc;
+ unsigned long flags;
+ int sent = 0;
+
+ if (vtermno >= VTTY_PORTS)
+ return -EINVAL;
+
+ pi = &port_info[vtermno];
+
+ spin_lock_irqsave(&consolelock, flags);
+
+ if (viochar_is_console(pi) && !viopath_isactive(pi->lp)) {
+ HvCall_writeLogBuffer(buf, count);
+ sent = count;
+ goto done;
+ }
+
+ viochar = vio_get_event_buffer(viomajorsubtype_chario);
+ if (viochar == NULL) {
+ hvlog("\n\rviocons: Can't get viochar buffer.");
+ goto done;
+ }
+
+ while ((count > 0) && ((pi->seq - pi->ack) < VIOCHAR_WINDOW)) {
+ int len;
+
+ len = (count > VIOCHAR_MAX_DATA) ? VIOCHAR_MAX_DATA : count;
+
+ if (viochar_is_console(pi))
+ HvCall_writeLogBuffer(buf, len);
+
+ init_data_event(viochar, pi->lp);
+
+ viochar->len = len;
+ viochar->event.xCorrelationToken = pi->seq++;
+ viochar->event.xSizeMinus1 =
+ offsetof(struct viocharlpevent, data) + len;
+
+ memcpy(viochar->data, buf, len);
+
+ hvrc = HvCallEvent_signalLpEvent(&viochar->event);
+ if (hvrc)
+ hvlog("\n\rerror sending event! return code %d\n\r",
+ (int)hvrc);
+ sent += len;
+ count -= len;
+ buf += len;
+ }
+
+ vio_free_event_buffer(viomajorsubtype_chario, viochar);
+done:
+ spin_unlock_irqrestore(&consolelock, flags);
+ return sent;
+}
+
+static const struct hv_ops hvc_get_put_ops = {
+ .get_chars = get_chars,
+ .put_chars = put_chars,
+ .notifier_add = notifier_add_irq,
+ .notifier_del = notifier_del_irq,
+ .notifier_hangup = notifier_hangup_irq,
+};
+
+static int __devinit hvc_vio_probe(struct vio_dev *vdev,
+ const struct vio_device_id *id)
+{
+ struct hvc_struct *hp;
+ struct port_info *pi;
+
+ /* probed with invalid parameters. */
+ if (!vdev || !id)
+ return -EPERM;
+
+ if (vdev->unit_address >= VTTY_PORTS)
+ return -ENODEV;
+
+ pi = &port_info[vdev->unit_address];
+
+ hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops,
+ VIOCHAR_MAX_DATA);
+ if (IS_ERR(hp))
+ return PTR_ERR(hp);
+ pi->hp = hp;
+ dev_set_drvdata(&vdev->dev, pi);
+
+ return 0;
+}
+
+static int __devexit hvc_vio_remove(struct vio_dev *vdev)
+{
+ struct port_info *pi = dev_get_drvdata(&vdev->dev);
+ struct hvc_struct *hp = pi->hp;
+
+ return hvc_remove(hp);
+}
+
+static struct vio_driver hvc_vio_driver = {
+ .id_table = hvc_driver_table,
+ .probe = hvc_vio_probe,
+ .remove = __devexit_p(hvc_vio_remove),
+ .driver = {
+ .name = hvc_driver_name,
+ .owner = THIS_MODULE,
+ }
+};
+
+static void hvc_open_event(struct HvLpEvent *event)
+{
+ unsigned long flags;
+ struct viocharlpevent *cevent = (struct viocharlpevent *)event;
+ u8 port = cevent->virtual_device;
+ struct port_info *pi;
+ int reject = 0;
+
+ if (hvlpevent_is_ack(event)) {
+ if (port >= VTTY_PORTS)
+ return;
+
+ spin_lock_irqsave(&consolelock, flags);
+
+ pi = &port_info[port];
+ if (event->xRc == HvLpEvent_Rc_Good) {
+ pi->seq = pi->ack = 0;
+ /*
+ * This line allows connections from the primary
+ * partition but once one is connected from the
+ * primary partition nothing short of a reboot
+ * of linux will allow access from the hosting
+ * partition again without a required iSeries fix.
+ */
+ pi->lp = event->xTargetLp;
+ }
+
+ spin_unlock_irqrestore(&consolelock, flags);
+ if (event->xRc != HvLpEvent_Rc_Good)
+ printk(KERN_WARNING
+ "hvc: handle_open_event: event->xRc == (%d).\n",
+ event->xRc);
+
+ if (event->xCorrelationToken != 0) {
+ atomic_t *aptr= (atomic_t *)event->xCorrelationToken;
+ atomic_set(aptr, 1);
+ } else
+ printk(KERN_WARNING
+ "hvc: weird...got open ack without atomic\n");
+ return;
+ }
+
+ /* This had better require an ack, otherwise complain */
+ if (!hvlpevent_need_ack(event)) {
+ printk(KERN_WARNING "hvc: viocharopen without ack bit!\n");
+ return;
+ }
+
+ spin_lock_irqsave(&consolelock, flags);
+
+ /* Make sure this is a good virtual tty */
+ if (port >= VTTY_PORTS) {
+ event->xRc = HvLpEvent_Rc_SubtypeError;
+ cevent->subtype_result_code = viorc_openRejected;
+ /*
+ * Flag state here since we can't printk while holding
+ * the consolelock spinlock.
+ */
+ reject = 1;
+ } else {
+ pi = &port_info[port];
+ if ((pi->lp != HvLpIndexInvalid) &&
+ (pi->lp != event->xSourceLp)) {
+ /*
+ * If this is tty is already connected to a different
+ * partition, fail.
+ */
+ event->xRc = HvLpEvent_Rc_SubtypeError;
+ cevent->subtype_result_code = viorc_openRejected;
+ reject = 2;
+ } else {
+ pi->lp = event->xSourceLp;
+ event->xRc = HvLpEvent_Rc_Good;
+ cevent->subtype_result_code = viorc_good;
+ pi->seq = pi->ack = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&consolelock, flags);
+
+ if (reject == 1)
+ printk(KERN_WARNING "hvc: open rejected: bad virtual tty.\n");
+ else if (reject == 2)
+ printk(KERN_WARNING "hvc: open rejected: console in exclusive "
+ "use by another partition.\n");
+
+ /* Return the acknowledgement */
+ HvCallEvent_ackLpEvent(event);
+}
+
+/*
+ * Handle a close charLpEvent. This should ONLY be an Interrupt because the
+ * virtual console should never actually issue a close event to the hypervisor
+ * because the virtual console never goes away. A close event coming from the
+ * hypervisor simply means that there are no client consoles connected to the
+ * virtual console.
+ */
+static void hvc_close_event(struct HvLpEvent *event)
+{
+ unsigned long flags;
+ struct viocharlpevent *cevent = (struct viocharlpevent *)event;
+ u8 port = cevent->virtual_device;
+
+ if (!hvlpevent_is_int(event)) {
+ printk(KERN_WARNING
+ "hvc: got unexpected close acknowledgement\n");
+ return;
+ }
+
+ if (port >= VTTY_PORTS) {
+ printk(KERN_WARNING
+ "hvc: close message from invalid virtual device.\n");
+ return;
+ }
+
+ /* For closes, just mark the console partition invalid */
+ spin_lock_irqsave(&consolelock, flags);
+
+ if (port_info[port].lp == event->xSourceLp)
+ port_info[port].lp = HvLpIndexInvalid;
+
+ spin_unlock_irqrestore(&consolelock, flags);
+}
+
+static void hvc_data_event(struct HvLpEvent *event)
+{
+ unsigned long flags;
+ struct viocharlpevent *cevent = (struct viocharlpevent *)event;
+ struct port_info *pi;
+ int n;
+ u8 port = cevent->virtual_device;
+
+ if (port >= VTTY_PORTS) {
+ printk(KERN_WARNING "hvc: data on invalid virtual device %d\n",
+ port);
+ return;
+ }
+ if (cevent->len == 0)
+ return;
+
+ /*
+ * Change 05/01/2003 - Ryan Arnold: If a partition other than
+ * the current exclusive partition tries to send us data
+ * events then just drop them on the floor because we don't
+ * want his stinking data. He isn't authorized to receive
+ * data because he wasn't the first one to get the console,
+ * therefore he shouldn't be allowed to send data either.
+ * This will work without an iSeries fix.
+ */
+ pi = &port_info[port];
+ if (pi->lp != event->xSourceLp)
+ return;
+
+ spin_lock_irqsave(&consolelock, flags);
+
+ n = IN_BUF_SIZE - pi->in_end;
+ if (n > cevent->len)
+ n = cevent->len;
+ if (n > 0) {
+ memcpy(&pi->in_buf[pi->in_end], cevent->data, n);
+ pi->in_end += n;
+ }
+ spin_unlock_irqrestore(&consolelock, flags);
+ if (n == 0)
+ printk(KERN_WARNING "hvc: input buffer overflow\n");
+}
+
+static void hvc_ack_event(struct HvLpEvent *event)
+{
+ struct viocharlpevent *cevent = (struct viocharlpevent *)event;
+ unsigned long flags;
+ u8 port = cevent->virtual_device;
+
+ if (port >= VTTY_PORTS) {
+ printk(KERN_WARNING "hvc: data on invalid virtual device\n");
+ return;
+ }
+
+ spin_lock_irqsave(&consolelock, flags);
+ port_info[port].ack = event->xCorrelationToken;
+ spin_unlock_irqrestore(&consolelock, flags);
+}
+
+static void hvc_config_event(struct HvLpEvent *event)
+{
+ struct viocharlpevent *cevent = (struct viocharlpevent *)event;
+
+ if (cevent->data[0] == 0x01)
+ printk(KERN_INFO "hvc: window resized to %d: %d: %d: %d\n",
+ cevent->data[1], cevent->data[2],
+ cevent->data[3], cevent->data[4]);
+ else
+ printk(KERN_WARNING "hvc: unknown config event\n");
+}
+
+static void hvc_handle_event(struct HvLpEvent *event)
+{
+ int charminor;
+
+ if (event == NULL)
+ return;
+
+ charminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK;
+ switch (charminor) {
+ case viocharopen:
+ hvc_open_event(event);
+ break;
+ case viocharclose:
+ hvc_close_event(event);
+ break;
+ case viochardata:
+ hvc_data_event(event);
+ break;
+ case viocharack:
+ hvc_ack_event(event);
+ break;
+ case viocharconfig:
+ hvc_config_event(event);
+ break;
+ default:
+ if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
+ event->xRc = HvLpEvent_Rc_InvalidSubtype;
+ HvCallEvent_ackLpEvent(event);
+ }
+ }
+}
+
+static int __init send_open(HvLpIndex remoteLp, void *sem)
+{
+ return HvCallEvent_signalLpEventFast(remoteLp,
+ HvLpEvent_Type_VirtualIo,
+ viomajorsubtype_chario | viocharopen,
+ HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
+ viopath_sourceinst(remoteLp),
+ viopath_targetinst(remoteLp),
+ (u64)(unsigned long)sem, VIOVERSION << 16,
+ 0, 0, 0, 0);
+}
+
+static int __init hvc_vio_init(void)
+{
+ atomic_t wait_flag;
+ int rc;
+
+ if (!firmware_has_feature(FW_FEATURE_ISERIES))
+ return -EIO;
+
+ /* +2 for fudge */
+ rc = viopath_open(HvLpConfig_getPrimaryLpIndex(),
+ viomajorsubtype_chario, VIOCHAR_WINDOW + 2);
+ if (rc)
+ printk(KERN_WARNING "hvc: error opening to primary %d\n", rc);
+
+ if (viopath_hostLp == HvLpIndexInvalid)
+ vio_set_hostlp();
+
+ /*
+ * And if the primary is not the same as the hosting LP, open to the
+ * hosting lp
+ */
+ if ((viopath_hostLp != HvLpIndexInvalid) &&
+ (viopath_hostLp != HvLpConfig_getPrimaryLpIndex())) {
+ printk(KERN_INFO "hvc: open path to hosting (%d)\n",
+ viopath_hostLp);
+ rc = viopath_open(viopath_hostLp, viomajorsubtype_chario,
+ VIOCHAR_WINDOW + 2); /* +2 for fudge */
+ if (rc)
+ printk(KERN_WARNING
+ "error opening to partition %d: %d\n",
+ viopath_hostLp, rc);
+ }
+
+ if (vio_setHandler(viomajorsubtype_chario, hvc_handle_event) < 0)
+ printk(KERN_WARNING
+ "hvc: error seting handler for console events!\n");
+
+ /*
+ * First, try to open the console to the hosting lp.
+ * Wait on a semaphore for the response.
+ */
+ atomic_set(&wait_flag, 0);
+ if ((viopath_isactive(viopath_hostLp)) &&
+ (send_open(viopath_hostLp, &wait_flag) == 0)) {
+ printk(KERN_INFO "hvc: hosting partition %d\n", viopath_hostLp);
+ while (atomic_read(&wait_flag) == 0)
+ mb();
+ atomic_set(&wait_flag, 0);
+ }
+
+ /*
+ * If we don't have an active console, try the primary
+ */
+ if ((!viopath_isactive(port_info[0].lp)) &&
+ (viopath_isactive(HvLpConfig_getPrimaryLpIndex())) &&
+ (send_open(HvLpConfig_getPrimaryLpIndex(), &wait_flag) == 0)) {
+ printk(KERN_INFO "hvc: opening console to primary partition\n");
+ while (atomic_read(&wait_flag) == 0)
+ mb();
+ }
+
+ /* Register as a vio device to receive callbacks */
+ rc = vio_register_driver(&hvc_vio_driver);
+
+ return rc;
+}
+module_init(hvc_vio_init); /* after drivers/char/hvc_console.c */
+
+static void __exit hvc_vio_exit(void)
+{
+ vio_unregister_driver(&hvc_vio_driver);
+}
+module_exit(hvc_vio_exit);
+
+/* the device tree order defines our numbering */
+static int __init hvc_find_vtys(void)
+{
+ struct device_node *vty;
+ int num_found = 0;
+
+ for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL;
+ vty = of_find_node_by_name(vty, "vty")) {
+ const uint32_t *vtermno;
+
+ /* We have statically defined space for only a certain number
+ * of console adapters.
+ */
+ if ((num_found >= MAX_NR_HVC_CONSOLES) ||
+ (num_found >= VTTY_PORTS)) {
+ of_node_put(vty);
+ break;
+ }
+
+ vtermno = of_get_property(vty, "reg", NULL);
+ if (!vtermno)
+ continue;
+
+ if (!of_device_is_compatible(vty, "IBM,iSeries-vty"))
+ continue;
+
+ if (num_found == 0)
+ add_preferred_console("hvc", 0, NULL);
+ hvc_instantiate(*vtermno, num_found, &hvc_get_put_ops);
+ ++num_found;
+ }
+
+ return num_found;
+}
+console_initcall(hvc_find_vtys);
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
new file mode 100644
index 00000000..b6f7d52f
--- /dev/null
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -0,0 +1,1337 @@
+/*
+ * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
+ *
+ * This HVC device driver provides terminal access using
+ * z/VM IUCV communication paths.
+ *
+ * Copyright IBM Corp. 2008, 2009
+ *
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+#define KMSG_COMPONENT "hvc_iucv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/ebcdic.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/mempool.h>
+#include <linux/moduleparam.h>
+#include <linux/tty.h>
+#include <linux/wait.h>
+#include <net/iucv/iucv.h>
+
+#include "hvc_console.h"
+
+
+/* General device driver settings */
+#define HVC_IUCV_MAGIC 0xc9e4c3e5
+#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
+#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
+
+/* IUCV TTY message */
+#define MSG_VERSION 0x02 /* Message version */
+#define MSG_TYPE_ERROR 0x01 /* Error message */
+#define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
+#define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
+#define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
+#define MSG_TYPE_DATA 0x10 /* Terminal data */
+
+struct iucv_tty_msg {
+ u8 version; /* Message version */
+ u8 type; /* Message type */
+#define MSG_MAX_DATALEN ((u16)(~0))
+ u16 datalen; /* Payload length */
+ u8 data[]; /* Payload buffer */
+} __attribute__((packed));
+#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
+
+enum iucv_state_t {
+ IUCV_DISCONN = 0,
+ IUCV_CONNECTED = 1,
+ IUCV_SEVERED = 2,
+};
+
+enum tty_state_t {
+ TTY_CLOSED = 0,
+ TTY_OPENED = 1,
+};
+
+struct hvc_iucv_private {
+ struct hvc_struct *hvc; /* HVC struct reference */
+ u8 srv_name[8]; /* IUCV service name (ebcdic) */
+ unsigned char is_console; /* Linux console usage flag */
+ enum iucv_state_t iucv_state; /* IUCV connection status */
+ enum tty_state_t tty_state; /* TTY status */
+ struct iucv_path *path; /* IUCV path pointer */
+ spinlock_t lock; /* hvc_iucv_private lock */
+#define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
+ void *sndbuf; /* send buffer */
+ size_t sndbuf_len; /* length of send buffer */
+#define QUEUE_SNDBUF_DELAY (HZ / 25)
+ struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
+ wait_queue_head_t sndbuf_waitq; /* wait for send completion */
+ struct list_head tty_outqueue; /* outgoing IUCV messages */
+ struct list_head tty_inqueue; /* incoming IUCV messages */
+ struct device *dev; /* device structure */
+};
+
+struct iucv_tty_buffer {
+ struct list_head list; /* list pointer */
+ struct iucv_message msg; /* store an IUCV message */
+ size_t offset; /* data buffer offset */
+ struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
+};
+
+/* IUCV callback handler */
+static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
+static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
+static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
+static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
+
+
+/* Kernel module parameter: use one terminal device as default */
+static unsigned long hvc_iucv_devices = 1;
+
+/* Array of allocated hvc iucv tty lines... */
+static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
+#define IUCV_HVC_CON_IDX (0)
+/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
+#define MAX_VMID_FILTER (500)
+static size_t hvc_iucv_filter_size;
+static void *hvc_iucv_filter;
+static const char *hvc_iucv_filter_string;
+static DEFINE_RWLOCK(hvc_iucv_filter_lock);
+
+/* Kmem cache and mempool for iucv_tty_buffer elements */
+static struct kmem_cache *hvc_iucv_buffer_cache;
+static mempool_t *hvc_iucv_mempool;
+
+/* IUCV handler callback functions */
+static struct iucv_handler hvc_iucv_handler = {
+ .path_pending = hvc_iucv_path_pending,
+ .path_severed = hvc_iucv_path_severed,
+ .message_complete = hvc_iucv_msg_complete,
+ .message_pending = hvc_iucv_msg_pending,
+};
+
+
+/**
+ * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
+ * @num: The HVC virtual terminal number (vtermno)
+ *
+ * This function returns the struct hvc_iucv_private instance that corresponds
+ * to the HVC virtual terminal number specified as parameter @num.
+ */
+struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
+{
+ if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
+ return NULL;
+ return hvc_iucv_table[num - HVC_IUCV_MAGIC];
+}
+
+/**
+ * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
+ * @size: Size of the internal buffer used to store data.
+ * @flags: Memory allocation flags passed to mempool.
+ *
+ * This function allocates a new struct iucv_tty_buffer element and, optionally,
+ * allocates an internal data buffer with the specified size @size.
+ * The internal data buffer is always allocated with GFP_DMA which is
+ * required for receiving and sending data with IUCV.
+ * Note: The total message size arises from the internal buffer size and the
+ * members of the iucv_tty_msg structure.
+ * The function returns NULL if memory allocation has failed.
+ */
+static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
+{
+ struct iucv_tty_buffer *bufp;
+
+ bufp = mempool_alloc(hvc_iucv_mempool, flags);
+ if (!bufp)
+ return NULL;
+ memset(bufp, 0, sizeof(*bufp));
+
+ if (size > 0) {
+ bufp->msg.length = MSG_SIZE(size);
+ bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
+ if (!bufp->mbuf) {
+ mempool_free(bufp, hvc_iucv_mempool);
+ return NULL;
+ }
+ bufp->mbuf->version = MSG_VERSION;
+ bufp->mbuf->type = MSG_TYPE_DATA;
+ bufp->mbuf->datalen = (u16) size;
+ }
+ return bufp;
+}
+
+/**
+ * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
+ * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
+ */
+static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
+{
+ kfree(bufp->mbuf);
+ mempool_free(bufp, hvc_iucv_mempool);
+}
+
+/**
+ * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
+ * @list: List containing struct iucv_tty_buffer elements.
+ */
+static void destroy_tty_buffer_list(struct list_head *list)
+{
+ struct iucv_tty_buffer *ent, *next;
+
+ list_for_each_entry_safe(ent, next, list, list) {
+ list_del(&ent->list);
+ destroy_tty_buffer(ent);
+ }
+}
+
+/**
+ * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
+ * @priv: Pointer to struct hvc_iucv_private
+ * @buf: HVC buffer for writing received terminal data.
+ * @count: HVC buffer size.
+ * @has_more_data: Pointer to an int variable.
+ *
+ * The function picks up pending messages from the input queue and receives
+ * the message data that is then written to the specified buffer @buf.
+ * If the buffer size @count is less than the data message size, the
+ * message is kept on the input queue and @has_more_data is set to 1.
+ * If all message data has been written, the message is removed from
+ * the input queue.
+ *
+ * The function returns the number of bytes written to the terminal, zero if
+ * there are no pending data messages available or if there is no established
+ * IUCV path.
+ * If the IUCV path has been severed, then -EPIPE is returned to cause a
+ * hang up (that is issued by the HVC layer).
+ */
+static int hvc_iucv_write(struct hvc_iucv_private *priv,
+ char *buf, int count, int *has_more_data)
+{
+ struct iucv_tty_buffer *rb;
+ int written;
+ int rc;
+
+ /* immediately return if there is no IUCV connection */
+ if (priv->iucv_state == IUCV_DISCONN)
+ return 0;
+
+ /* if the IUCV path has been severed, return -EPIPE to inform the
+ * HVC layer to hang up the tty device. */
+ if (priv->iucv_state == IUCV_SEVERED)
+ return -EPIPE;
+
+ /* check if there are pending messages */
+ if (list_empty(&priv->tty_inqueue))
+ return 0;
+
+ /* receive an iucv message and flip data to the tty (ldisc) */
+ rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
+
+ written = 0;
+ if (!rb->mbuf) { /* message not yet received ... */
+ /* allocate mem to store msg data; if no memory is available
+ * then leave the buffer on the list and re-try later */
+ rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
+ if (!rb->mbuf)
+ return -ENOMEM;
+
+ rc = __iucv_message_receive(priv->path, &rb->msg, 0,
+ rb->mbuf, rb->msg.length, NULL);
+ switch (rc) {
+ case 0: /* Successful */
+ break;
+ case 2: /* No message found */
+ case 9: /* Message purged */
+ break;
+ default:
+ written = -EIO;
+ }
+ /* remove buffer if an error has occurred or received data
+ * is not correct */
+ if (rc || (rb->mbuf->version != MSG_VERSION) ||
+ (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
+ goto out_remove_buffer;
+ }
+
+ switch (rb->mbuf->type) {
+ case MSG_TYPE_DATA:
+ written = min_t(int, rb->mbuf->datalen - rb->offset, count);
+ memcpy(buf, rb->mbuf->data + rb->offset, written);
+ if (written < (rb->mbuf->datalen - rb->offset)) {
+ rb->offset += written;
+ *has_more_data = 1;
+ goto out_written;
+ }
+ break;
+
+ case MSG_TYPE_WINSIZE:
+ if (rb->mbuf->datalen != sizeof(struct winsize))
+ break;
+ /* The caller must ensure that the hvc is locked, which
+ * is the case when called from hvc_iucv_get_chars() */
+ __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
+ break;
+
+ case MSG_TYPE_ERROR: /* ignored ... */
+ case MSG_TYPE_TERMENV: /* ignored ... */
+ case MSG_TYPE_TERMIOS: /* ignored ... */
+ break;
+ }
+
+out_remove_buffer:
+ list_del(&rb->list);
+ destroy_tty_buffer(rb);
+ *has_more_data = !list_empty(&priv->tty_inqueue);
+
+out_written:
+ return written;
+}
+
+/**
+ * hvc_iucv_get_chars() - HVC get_chars operation.
+ * @vtermno: HVC virtual terminal number.
+ * @buf: Pointer to a buffer to store data
+ * @count: Size of buffer available for writing
+ *
+ * The HVC thread calls this method to read characters from the back-end.
+ * If an IUCV communication path has been established, pending IUCV messages
+ * are received and data is copied into buffer @buf up to @count bytes.
+ *
+ * Locking: The routine gets called under an irqsave() spinlock; and
+ * the routine locks the struct hvc_iucv_private->lock to call
+ * helper functions.
+ */
+static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
+{
+ struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
+ int written;
+ int has_more_data;
+
+ if (count <= 0)
+ return 0;
+
+ if (!priv)
+ return -ENODEV;
+
+ spin_lock(&priv->lock);
+ has_more_data = 0;
+ written = hvc_iucv_write(priv, buf, count, &has_more_data);
+ spin_unlock(&priv->lock);
+
+ /* if there are still messages on the queue... schedule another run */
+ if (has_more_data)
+ hvc_kick();
+
+ return written;
+}
+
+/**
+ * hvc_iucv_queue() - Buffer terminal data for sending.
+ * @priv: Pointer to struct hvc_iucv_private instance.
+ * @buf: Buffer containing data to send.
+ * @count: Size of buffer and amount of data to send.
+ *
+ * The function queues data for sending. To actually send the buffered data,
+ * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
+ * The function returns the number of data bytes that has been buffered.
+ *
+ * If the device is not connected, data is ignored and the function returns
+ * @count.
+ * If the buffer is full, the function returns 0.
+ * If an existing IUCV communicaton path has been severed, -EPIPE is returned
+ * (that can be passed to HVC layer to cause a tty hangup).
+ */
+static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
+ int count)
+{
+ size_t len;
+
+ if (priv->iucv_state == IUCV_DISCONN)
+ return count; /* ignore data */
+
+ if (priv->iucv_state == IUCV_SEVERED)
+ return -EPIPE;
+
+ len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
+ if (!len)
+ return 0;
+
+ memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
+ priv->sndbuf_len += len;
+
+ if (priv->iucv_state == IUCV_CONNECTED)
+ schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
+
+ return len;
+}
+
+/**
+ * hvc_iucv_send() - Send an IUCV message containing terminal data.
+ * @priv: Pointer to struct hvc_iucv_private instance.
+ *
+ * If an IUCV communication path has been established, the buffered output data
+ * is sent via an IUCV message and the number of bytes sent is returned.
+ * Returns 0 if there is no established IUCV communication path or
+ * -EPIPE if an existing IUCV communicaton path has been severed.
+ */
+static int hvc_iucv_send(struct hvc_iucv_private *priv)
+{
+ struct iucv_tty_buffer *sb;
+ int rc, len;
+
+ if (priv->iucv_state == IUCV_SEVERED)
+ return -EPIPE;
+
+ if (priv->iucv_state == IUCV_DISCONN)
+ return -EIO;
+
+ if (!priv->sndbuf_len)
+ return 0;
+
+ /* allocate internal buffer to store msg data and also compute total
+ * message length */
+ sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
+ if (!sb)
+ return -ENOMEM;
+
+ memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
+ sb->mbuf->datalen = (u16) priv->sndbuf_len;
+ sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
+
+ list_add_tail(&sb->list, &priv->tty_outqueue);
+
+ rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
+ (void *) sb->mbuf, sb->msg.length);
+ if (rc) {
+ /* drop the message here; however we might want to handle
+ * 0x03 (msg limit reached) by trying again... */
+ list_del(&sb->list);
+ destroy_tty_buffer(sb);
+ }
+ len = priv->sndbuf_len;
+ priv->sndbuf_len = 0;
+
+ return len;
+}
+
+/**
+ * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
+ * @work: Work structure.
+ *
+ * This work queue function sends buffered output data over IUCV and,
+ * if not all buffered data could be sent, reschedules itself.
+ */
+static void hvc_iucv_sndbuf_work(struct work_struct *work)
+{
+ struct hvc_iucv_private *priv;
+
+ priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
+ if (!priv)
+ return;
+
+ spin_lock_bh(&priv->lock);
+ hvc_iucv_send(priv);
+ spin_unlock_bh(&priv->lock);
+}
+
+/**
+ * hvc_iucv_put_chars() - HVC put_chars operation.
+ * @vtermno: HVC virtual terminal number.
+ * @buf: Pointer to an buffer to read data from
+ * @count: Size of buffer available for reading
+ *
+ * The HVC thread calls this method to write characters to the back-end.
+ * The function calls hvc_iucv_queue() to queue terminal data for sending.
+ *
+ * Locking: The method gets called under an irqsave() spinlock; and
+ * locks struct hvc_iucv_private->lock.
+ */
+static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
+{
+ struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
+ int queued;
+
+ if (count <= 0)
+ return 0;
+
+ if (!priv)
+ return -ENODEV;
+
+ spin_lock(&priv->lock);
+ queued = hvc_iucv_queue(priv, buf, count);
+ spin_unlock(&priv->lock);
+
+ return queued;
+}
+
+/**
+ * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
+ * @hp: Pointer to the HVC device (struct hvc_struct)
+ * @id: Additional data (originally passed to hvc_alloc): the index of an struct
+ * hvc_iucv_private instance.
+ *
+ * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
+ * instance that is derived from @id. Always returns 0.
+ *
+ * Locking: struct hvc_iucv_private->lock, spin_lock_bh
+ */
+static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
+{
+ struct hvc_iucv_private *priv;
+
+ priv = hvc_iucv_get_private(id);
+ if (!priv)
+ return 0;
+
+ spin_lock_bh(&priv->lock);
+ priv->tty_state = TTY_OPENED;
+ spin_unlock_bh(&priv->lock);
+
+ return 0;
+}
+
+/**
+ * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
+ * @priv: Pointer to the struct hvc_iucv_private instance.
+ */
+static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
+{
+ destroy_tty_buffer_list(&priv->tty_outqueue);
+ destroy_tty_buffer_list(&priv->tty_inqueue);
+
+ priv->tty_state = TTY_CLOSED;
+ priv->iucv_state = IUCV_DISCONN;
+
+ priv->sndbuf_len = 0;
+}
+
+/**
+ * tty_outqueue_empty() - Test if the tty outq is empty
+ * @priv: Pointer to struct hvc_iucv_private instance.
+ */
+static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
+{
+ int rc;
+
+ spin_lock_bh(&priv->lock);
+ rc = list_empty(&priv->tty_outqueue);
+ spin_unlock_bh(&priv->lock);
+
+ return rc;
+}
+
+/**
+ * flush_sndbuf_sync() - Flush send buffer and wait for completion
+ * @priv: Pointer to struct hvc_iucv_private instance.
+ *
+ * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
+ * to flush any buffered terminal output data and waits for completion.
+ */
+static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
+{
+ int sync_wait;
+
+ cancel_delayed_work_sync(&priv->sndbuf_work);
+
+ spin_lock_bh(&priv->lock);
+ hvc_iucv_send(priv); /* force sending buffered data */
+ sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
+ spin_unlock_bh(&priv->lock);
+
+ if (sync_wait)
+ wait_event_timeout(priv->sndbuf_waitq,
+ tty_outqueue_empty(priv), HZ/10);
+}
+
+/**
+ * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
+ * @priv: Pointer to hvc_iucv_private structure
+ *
+ * This routine severs an existing IUCV communication path and hangs
+ * up the underlying HVC terminal device.
+ * The hang-up occurs only if an IUCV communication path is established;
+ * otherwise there is no need to hang up the terminal device.
+ *
+ * The IUCV HVC hang-up is separated into two steps:
+ * 1. After the IUCV path has been severed, the iucv_state is set to
+ * IUCV_SEVERED.
+ * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
+ * IUCV_SEVERED state causes the tty hang-up in the HVC layer.
+ *
+ * If the tty has not yet been opened, clean up the hvc_iucv_private
+ * structure to allow re-connects.
+ * If the tty has been opened, let get_chars() return -EPIPE to signal
+ * the HVC layer to hang up the tty and, if so, wake up the HVC thread
+ * to call get_chars()...
+ *
+ * Special notes on hanging up a HVC terminal instantiated as console:
+ * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
+ * 2. do_tty_hangup() calls tty->ops->close() for console_filp
+ * => no hangup notifier is called by HVC (default)
+ * 2. hvc_close() returns because of tty_hung_up_p(filp)
+ * => no delete notifier is called!
+ * Finally, the back-end is not being notified, thus, the tty session is
+ * kept active (TTY_OPEN) to be ready for re-connects.
+ *
+ * Locking: spin_lock(&priv->lock) w/o disabling bh
+ */
+static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
+{
+ struct iucv_path *path;
+
+ path = NULL;
+ spin_lock(&priv->lock);
+ if (priv->iucv_state == IUCV_CONNECTED) {
+ path = priv->path;
+ priv->path = NULL;
+ priv->iucv_state = IUCV_SEVERED;
+ if (priv->tty_state == TTY_CLOSED)
+ hvc_iucv_cleanup(priv);
+ else
+ /* console is special (see above) */
+ if (priv->is_console) {
+ hvc_iucv_cleanup(priv);
+ priv->tty_state = TTY_OPENED;
+ } else
+ hvc_kick();
+ }
+ spin_unlock(&priv->lock);
+
+ /* finally sever path (outside of priv->lock due to lock ordering) */
+ if (path) {
+ iucv_path_sever(path, NULL);
+ iucv_path_free(path);
+ }
+}
+
+/**
+ * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
+ * @hp: Pointer to the HVC device (struct hvc_struct)
+ * @id: Additional data (originally passed to hvc_alloc):
+ * the index of an struct hvc_iucv_private instance.
+ *
+ * This routine notifies the HVC back-end that a tty hangup (carrier loss,
+ * virtual or otherwise) has occurred.
+ * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
+ * to keep an existing IUCV communication path established.
+ * (Background: vhangup() is called from user space (by getty or login) to
+ * disable writing to the tty by other applications).
+ * If the tty has been opened and an established IUCV path has been severed
+ * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
+ *
+ * Locking: struct hvc_iucv_private->lock
+ */
+static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
+{
+ struct hvc_iucv_private *priv;
+
+ priv = hvc_iucv_get_private(id);
+ if (!priv)
+ return;
+
+ flush_sndbuf_sync(priv);
+
+ spin_lock_bh(&priv->lock);
+ /* NOTE: If the hangup was scheduled by ourself (from the iucv
+ * path_servered callback [IUCV_SEVERED]), we have to clean up
+ * our structure and to set state to TTY_CLOSED.
+ * If the tty was hung up otherwise (e.g. vhangup()), then we
+ * ignore this hangup and keep an established IUCV path open...
+ * (...the reason is that we are not able to connect back to the
+ * client if we disconnect on hang up) */
+ priv->tty_state = TTY_CLOSED;
+
+ if (priv->iucv_state == IUCV_SEVERED)
+ hvc_iucv_cleanup(priv);
+ spin_unlock_bh(&priv->lock);
+}
+
+/**
+ * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
+ * @hp: Pointer to the HVC device (struct hvc_struct)
+ * @id: Additional data (originally passed to hvc_alloc):
+ * the index of an struct hvc_iucv_private instance.
+ *
+ * This routine notifies the HVC back-end that the last tty device fd has been
+ * closed. The function calls hvc_iucv_cleanup() to clean up the struct
+ * hvc_iucv_private instance.
+ *
+ * Locking: struct hvc_iucv_private->lock
+ */
+static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
+{
+ struct hvc_iucv_private *priv;
+ struct iucv_path *path;
+
+ priv = hvc_iucv_get_private(id);
+ if (!priv)
+ return;
+
+ flush_sndbuf_sync(priv);
+
+ spin_lock_bh(&priv->lock);
+ path = priv->path; /* save reference to IUCV path */
+ priv->path = NULL;
+ hvc_iucv_cleanup(priv);
+ spin_unlock_bh(&priv->lock);
+
+ /* sever IUCV path outside of priv->lock due to lock ordering of:
+ * priv->lock <--> iucv_table_lock */
+ if (path) {
+ iucv_path_sever(path, NULL);
+ iucv_path_free(path);
+ }
+}
+
+/**
+ * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
+ * @ipvmid: Originating z/VM user ID (right padded with blanks)
+ *
+ * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
+ * non-zero.
+ */
+static int hvc_iucv_filter_connreq(u8 ipvmid[8])
+{
+ size_t i;
+
+ /* Note: default policy is ACCEPT if no filter is set */
+ if (!hvc_iucv_filter_size)
+ return 0;
+
+ for (i = 0; i < hvc_iucv_filter_size; i++)
+ if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
+ return 0;
+ return 1;
+}
+
+/**
+ * hvc_iucv_path_pending() - IUCV handler to process a connection request.
+ * @path: Pending path (struct iucv_path)
+ * @ipvmid: z/VM system identifier of originator
+ * @ipuser: User specified data for this path
+ * (AF_IUCV: port/service name and originator port)
+ *
+ * The function uses the @ipuser data to determine if the pending path belongs
+ * to a terminal managed by this device driver.
+ * If the path belongs to this driver, ensure that the terminal is not accessed
+ * multiple times (only one connection to a terminal is allowed).
+ * If the terminal is not yet connected, the pending path is accepted and is
+ * associated to the appropriate struct hvc_iucv_private instance.
+ *
+ * Returns 0 if @path belongs to a terminal managed by the this device driver;
+ * otherwise returns -ENODEV in order to dispatch this path to other handlers.
+ *
+ * Locking: struct hvc_iucv_private->lock
+ */
+static int hvc_iucv_path_pending(struct iucv_path *path,
+ u8 ipvmid[8], u8 ipuser[16])
+{
+ struct hvc_iucv_private *priv;
+ u8 nuser_data[16];
+ u8 vm_user_id[9];
+ int i, rc;
+
+ priv = NULL;
+ for (i = 0; i < hvc_iucv_devices; i++)
+ if (hvc_iucv_table[i] &&
+ (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
+ priv = hvc_iucv_table[i];
+ break;
+ }
+ if (!priv)
+ return -ENODEV;
+
+ /* Enforce that ipvmid is allowed to connect to us */
+ read_lock(&hvc_iucv_filter_lock);
+ rc = hvc_iucv_filter_connreq(ipvmid);
+ read_unlock(&hvc_iucv_filter_lock);
+ if (rc) {
+ iucv_path_sever(path, ipuser);
+ iucv_path_free(path);
+ memcpy(vm_user_id, ipvmid, 8);
+ vm_user_id[8] = 0;
+ pr_info("A connection request from z/VM user ID %s "
+ "was refused\n", vm_user_id);
+ return 0;
+ }
+
+ spin_lock(&priv->lock);
+
+ /* If the terminal is already connected or being severed, then sever
+ * this path to enforce that there is only ONE established communication
+ * path per terminal. */
+ if (priv->iucv_state != IUCV_DISCONN) {
+ iucv_path_sever(path, ipuser);
+ iucv_path_free(path);
+ goto out_path_handled;
+ }
+
+ /* accept path */
+ memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
+ memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
+ path->msglim = 0xffff; /* IUCV MSGLIMIT */
+ path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
+ rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
+ if (rc) {
+ iucv_path_sever(path, ipuser);
+ iucv_path_free(path);
+ goto out_path_handled;
+ }
+ priv->path = path;
+ priv->iucv_state = IUCV_CONNECTED;
+
+ /* flush buffered output data... */
+ schedule_delayed_work(&priv->sndbuf_work, 5);
+
+out_path_handled:
+ spin_unlock(&priv->lock);
+ return 0;
+}
+
+/**
+ * hvc_iucv_path_severed() - IUCV handler to process a path sever.
+ * @path: Pending path (struct iucv_path)
+ * @ipuser: User specified data for this path
+ * (AF_IUCV: port/service name and originator port)
+ *
+ * This function calls the hvc_iucv_hangup() function for the
+ * respective IUCV HVC terminal.
+ *
+ * Locking: struct hvc_iucv_private->lock
+ */
+static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
+{
+ struct hvc_iucv_private *priv = path->private;
+
+ hvc_iucv_hangup(priv);
+}
+
+/**
+ * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
+ * @path: Pending path (struct iucv_path)
+ * @msg: Pointer to the IUCV message
+ *
+ * The function puts an incoming message on the input queue for later
+ * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
+ * If the tty has not yet been opened, the message is rejected.
+ *
+ * Locking: struct hvc_iucv_private->lock
+ */
+static void hvc_iucv_msg_pending(struct iucv_path *path,
+ struct iucv_message *msg)
+{
+ struct hvc_iucv_private *priv = path->private;
+ struct iucv_tty_buffer *rb;
+
+ /* reject messages that exceed max size of iucv_tty_msg->datalen */
+ if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
+ iucv_message_reject(path, msg);
+ return;
+ }
+
+ spin_lock(&priv->lock);
+
+ /* reject messages if tty has not yet been opened */
+ if (priv->tty_state == TTY_CLOSED) {
+ iucv_message_reject(path, msg);
+ goto unlock_return;
+ }
+
+ /* allocate tty buffer to save iucv msg only */
+ rb = alloc_tty_buffer(0, GFP_ATOMIC);
+ if (!rb) {
+ iucv_message_reject(path, msg);
+ goto unlock_return; /* -ENOMEM */
+ }
+ rb->msg = *msg;
+
+ list_add_tail(&rb->list, &priv->tty_inqueue);
+
+ hvc_kick(); /* wake up hvc thread */
+
+unlock_return:
+ spin_unlock(&priv->lock);
+}
+
+/**
+ * hvc_iucv_msg_complete() - IUCV handler to process message completion
+ * @path: Pending path (struct iucv_path)
+ * @msg: Pointer to the IUCV message
+ *
+ * The function is called upon completion of message delivery to remove the
+ * message from the outqueue. Additional delivery information can be found
+ * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
+ * purged messages (0x010000 (IPADPGNR)).
+ *
+ * Locking: struct hvc_iucv_private->lock
+ */
+static void hvc_iucv_msg_complete(struct iucv_path *path,
+ struct iucv_message *msg)
+{
+ struct hvc_iucv_private *priv = path->private;
+ struct iucv_tty_buffer *ent, *next;
+ LIST_HEAD(list_remove);
+
+ spin_lock(&priv->lock);
+ list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
+ if (ent->msg.id == msg->id) {
+ list_move(&ent->list, &list_remove);
+ break;
+ }
+ wake_up(&priv->sndbuf_waitq);
+ spin_unlock(&priv->lock);
+ destroy_tty_buffer_list(&list_remove);
+}
+
+/**
+ * hvc_iucv_pm_freeze() - Freeze PM callback
+ * @dev: IUVC HVC terminal device
+ *
+ * Sever an established IUCV communication path and
+ * trigger a hang-up of the underlying HVC terminal.
+ */
+static int hvc_iucv_pm_freeze(struct device *dev)
+{
+ struct hvc_iucv_private *priv = dev_get_drvdata(dev);
+
+ local_bh_disable();
+ hvc_iucv_hangup(priv);
+ local_bh_enable();
+
+ return 0;
+}
+
+/**
+ * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
+ * @dev: IUVC HVC terminal device
+ *
+ * Wake up the HVC thread to trigger hang-up and respective
+ * HVC back-end notifier invocations.
+ */
+static int hvc_iucv_pm_restore_thaw(struct device *dev)
+{
+ hvc_kick();
+ return 0;
+}
+
+
+/* HVC operations */
+static const struct hv_ops hvc_iucv_ops = {
+ .get_chars = hvc_iucv_get_chars,
+ .put_chars = hvc_iucv_put_chars,
+ .notifier_add = hvc_iucv_notifier_add,
+ .notifier_del = hvc_iucv_notifier_del,
+ .notifier_hangup = hvc_iucv_notifier_hangup,
+};
+
+/* Suspend / resume device operations */
+static const struct dev_pm_ops hvc_iucv_pm_ops = {
+ .freeze = hvc_iucv_pm_freeze,
+ .thaw = hvc_iucv_pm_restore_thaw,
+ .restore = hvc_iucv_pm_restore_thaw,
+};
+
+/* IUCV HVC device driver */
+static struct device_driver hvc_iucv_driver = {
+ .name = KMSG_COMPONENT,
+ .bus = &iucv_bus,
+ .pm = &hvc_iucv_pm_ops,
+};
+
+/**
+ * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
+ * @id: hvc_iucv_table index
+ * @is_console: Flag if the instance is used as Linux console
+ *
+ * This function allocates a new hvc_iucv_private structure and stores
+ * the instance in hvc_iucv_table at index @id.
+ * Returns 0 on success; otherwise non-zero.
+ */
+static int __init hvc_iucv_alloc(int id, unsigned int is_console)
+{
+ struct hvc_iucv_private *priv;
+ char name[9];
+ int rc;
+
+ priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ INIT_LIST_HEAD(&priv->tty_outqueue);
+ INIT_LIST_HEAD(&priv->tty_inqueue);
+ INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
+ init_waitqueue_head(&priv->sndbuf_waitq);
+
+ priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!priv->sndbuf) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+
+ /* set console flag */
+ priv->is_console = is_console;
+
+ /* allocate hvc device */
+ priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
+ HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
+ if (IS_ERR(priv->hvc)) {
+ rc = PTR_ERR(priv->hvc);
+ goto out_error_hvc;
+ }
+
+ /* notify HVC thread instead of using polling */
+ priv->hvc->irq_requested = 1;
+
+ /* setup iucv related information */
+ snprintf(name, 9, "lnxhvc%-2d", id);
+ memcpy(priv->srv_name, name, 8);
+ ASCEBC(priv->srv_name, 8);
+
+ /* create and setup device */
+ priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
+ if (!priv->dev) {
+ rc = -ENOMEM;
+ goto out_error_dev;
+ }
+ dev_set_name(priv->dev, "hvc_iucv%d", id);
+ dev_set_drvdata(priv->dev, priv);
+ priv->dev->bus = &iucv_bus;
+ priv->dev->parent = iucv_root;
+ priv->dev->driver = &hvc_iucv_driver;
+ priv->dev->release = (void (*)(struct device *)) kfree;
+ rc = device_register(priv->dev);
+ if (rc) {
+ put_device(priv->dev);
+ goto out_error_dev;
+ }
+
+ hvc_iucv_table[id] = priv;
+ return 0;
+
+out_error_dev:
+ hvc_remove(priv->hvc);
+out_error_hvc:
+ free_page((unsigned long) priv->sndbuf);
+ kfree(priv);
+
+ return rc;
+}
+
+/**
+ * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
+ */
+static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
+{
+ hvc_remove(priv->hvc);
+ device_unregister(priv->dev);
+ free_page((unsigned long) priv->sndbuf);
+ kfree(priv);
+}
+
+/**
+ * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
+ * @filter: String containing a comma-separated list of z/VM user IDs
+ */
+static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
+{
+ const char *nextdelim, *residual;
+ size_t len;
+
+ nextdelim = strchr(filter, ',');
+ if (nextdelim) {
+ len = nextdelim - filter;
+ residual = nextdelim + 1;
+ } else {
+ len = strlen(filter);
+ residual = filter + len;
+ }
+
+ if (len == 0)
+ return ERR_PTR(-EINVAL);
+
+ /* check for '\n' (if called from sysfs) */
+ if (filter[len - 1] == '\n')
+ len--;
+
+ if (len > 8)
+ return ERR_PTR(-EINVAL);
+
+ /* pad with blanks and save upper case version of user ID */
+ memset(dest, ' ', 8);
+ while (len--)
+ dest[len] = toupper(filter[len]);
+ return residual;
+}
+
+/**
+ * hvc_iucv_setup_filter() - Set up z/VM user ID filter
+ * @filter: String consisting of a comma-separated list of z/VM user IDs
+ *
+ * The function parses the @filter string and creates an array containing
+ * the list of z/VM user ID filter entries.
+ * Return code 0 means success, -EINVAL if the filter is syntactically
+ * incorrect, -ENOMEM if there was not enough memory to allocate the
+ * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
+ */
+static int hvc_iucv_setup_filter(const char *val)
+{
+ const char *residual;
+ int err;
+ size_t size, count;
+ void *array, *old_filter;
+
+ count = strlen(val);
+ if (count == 0 || (count == 1 && val[0] == '\n')) {
+ size = 0;
+ array = NULL;
+ goto out_replace_filter; /* clear filter */
+ }
+
+ /* count user IDs in order to allocate sufficient memory */
+ size = 1;
+ residual = val;
+ while ((residual = strchr(residual, ',')) != NULL) {
+ residual++;
+ size++;
+ }
+
+ /* check if the specified list exceeds the filter limit */
+ if (size > MAX_VMID_FILTER)
+ return -ENOSPC;
+
+ array = kzalloc(size * 8, GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ count = size;
+ residual = val;
+ while (*residual && count) {
+ residual = hvc_iucv_parse_filter(residual,
+ array + ((size - count) * 8));
+ if (IS_ERR(residual)) {
+ err = PTR_ERR(residual);
+ kfree(array);
+ goto out_err;
+ }
+ count--;
+ }
+
+out_replace_filter:
+ write_lock_bh(&hvc_iucv_filter_lock);
+ old_filter = hvc_iucv_filter;
+ hvc_iucv_filter_size = size;
+ hvc_iucv_filter = array;
+ write_unlock_bh(&hvc_iucv_filter_lock);
+ kfree(old_filter);
+
+ err = 0;
+out_err:
+ return err;
+}
+
+/**
+ * param_set_vmidfilter() - Set z/VM user ID filter parameter
+ * @val: String consisting of a comma-separated list of z/VM user IDs
+ * @kp: Kernel parameter pointing to hvc_iucv_filter array
+ *
+ * The function sets up the z/VM user ID filter specified as comma-separated
+ * list of user IDs in @val.
+ * Note: If it is called early in the boot process, @val is stored and
+ * parsed later in hvc_iucv_init().
+ */
+static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
+{
+ int rc;
+
+ if (!MACHINE_IS_VM || !hvc_iucv_devices)
+ return -ENODEV;
+
+ if (!val)
+ return -EINVAL;
+
+ rc = 0;
+ if (slab_is_available())
+ rc = hvc_iucv_setup_filter(val);
+ else
+ hvc_iucv_filter_string = val; /* defer... */
+ return rc;
+}
+
+/**
+ * param_get_vmidfilter() - Get z/VM user ID filter
+ * @buffer: Buffer to store z/VM user ID filter,
+ * (buffer size assumption PAGE_SIZE)
+ * @kp: Kernel parameter pointing to the hvc_iucv_filter array
+ *
+ * The function stores the filter as a comma-separated list of z/VM user IDs
+ * in @buffer. Typically, sysfs routines call this function for attr show.
+ */
+static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
+{
+ int rc;
+ size_t index, len;
+ void *start, *end;
+
+ if (!MACHINE_IS_VM || !hvc_iucv_devices)
+ return -ENODEV;
+
+ rc = 0;
+ read_lock_bh(&hvc_iucv_filter_lock);
+ for (index = 0; index < hvc_iucv_filter_size; index++) {
+ start = hvc_iucv_filter + (8 * index);
+ end = memchr(start, ' ', 8);
+ len = (end) ? end - start : 8;
+ memcpy(buffer + rc, start, len);
+ rc += len;
+ buffer[rc++] = ',';
+ }
+ read_unlock_bh(&hvc_iucv_filter_lock);
+ if (rc)
+ buffer[--rc] = '\0'; /* replace last comma and update rc */
+ return rc;
+}
+
+#define param_check_vmidfilter(name, p) __param_check(name, p, void)
+
+static struct kernel_param_ops param_ops_vmidfilter = {
+ .set = param_set_vmidfilter,
+ .get = param_get_vmidfilter,
+};
+
+/**
+ * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
+ */
+static int __init hvc_iucv_init(void)
+{
+ int rc;
+ unsigned int i;
+
+ if (!hvc_iucv_devices)
+ return -ENODEV;
+
+ if (!MACHINE_IS_VM) {
+ pr_notice("The z/VM IUCV HVC device driver cannot "
+ "be used without z/VM\n");
+ rc = -ENODEV;
+ goto out_error;
+ }
+
+ if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
+ pr_err("%lu is not a valid value for the hvc_iucv= "
+ "kernel parameter\n", hvc_iucv_devices);
+ rc = -EINVAL;
+ goto out_error;
+ }
+
+ /* register IUCV HVC device driver */
+ rc = driver_register(&hvc_iucv_driver);
+ if (rc)
+ goto out_error;
+
+ /* parse hvc_iucv_allow string and create z/VM user ID filter list */
+ if (hvc_iucv_filter_string) {
+ rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
+ switch (rc) {
+ case 0:
+ break;
+ case -ENOMEM:
+ pr_err("Allocating memory failed with "
+ "reason code=%d\n", 3);
+ goto out_error;
+ case -EINVAL:
+ pr_err("hvc_iucv_allow= does not specify a valid "
+ "z/VM user ID list\n");
+ goto out_error;
+ case -ENOSPC:
+ pr_err("hvc_iucv_allow= specifies too many "
+ "z/VM user IDs\n");
+ goto out_error;
+ default:
+ goto out_error;
+ }
+ }
+
+ hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
+ sizeof(struct iucv_tty_buffer),
+ 0, 0, NULL);
+ if (!hvc_iucv_buffer_cache) {
+ pr_err("Allocating memory failed with reason code=%d\n", 1);
+ rc = -ENOMEM;
+ goto out_error;
+ }
+
+ hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
+ hvc_iucv_buffer_cache);
+ if (!hvc_iucv_mempool) {
+ pr_err("Allocating memory failed with reason code=%d\n", 2);
+ kmem_cache_destroy(hvc_iucv_buffer_cache);
+ rc = -ENOMEM;
+ goto out_error;
+ }
+
+ /* register the first terminal device as console
+ * (must be done before allocating hvc terminal devices) */
+ rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
+ if (rc) {
+ pr_err("Registering HVC terminal device as "
+ "Linux console failed\n");
+ goto out_error_memory;
+ }
+
+ /* allocate hvc_iucv_private structs */
+ for (i = 0; i < hvc_iucv_devices; i++) {
+ rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
+ if (rc) {
+ pr_err("Creating a new HVC terminal device "
+ "failed with error code=%d\n", rc);
+ goto out_error_hvc;
+ }
+ }
+
+ /* register IUCV callback handler */
+ rc = iucv_register(&hvc_iucv_handler, 0);
+ if (rc) {
+ pr_err("Registering IUCV handlers failed with error code=%d\n",
+ rc);
+ goto out_error_hvc;
+ }
+
+ return 0;
+
+out_error_hvc:
+ for (i = 0; i < hvc_iucv_devices; i++)
+ if (hvc_iucv_table[i])
+ hvc_iucv_destroy(hvc_iucv_table[i]);
+out_error_memory:
+ mempool_destroy(hvc_iucv_mempool);
+ kmem_cache_destroy(hvc_iucv_buffer_cache);
+out_error:
+ if (hvc_iucv_filter)
+ kfree(hvc_iucv_filter);
+ hvc_iucv_devices = 0; /* ensure that we do not provide any device */
+ return rc;
+}
+
+/**
+ * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
+ * @val: Parameter value (numeric)
+ */
+static int __init hvc_iucv_config(char *val)
+{
+ return strict_strtoul(val, 10, &hvc_iucv_devices);
+}
+
+
+device_initcall(hvc_iucv_init);
+__setup("hvc_iucv=", hvc_iucv_config);
+core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
new file mode 100644
index 00000000..61c4a615
--- /dev/null
+++ b/drivers/tty/hvc/hvc_rtas.c
@@ -0,0 +1,133 @@
+/*
+ * IBM RTAS driver interface to hvc_console.c
+ *
+ * (C) Copyright IBM Corporation 2001-2005
+ * (C) Copyright Red Hat, Inc. 2005
+ *
+ * Author(s): Maximino Augilar <IBM STI Design Center>
+ * : Ryan S. Arnold <rsa@us.ibm.com>
+ * : Utz Bacher <utz.bacher@de.ibm.com>
+ * : David Woodhouse <dwmw2@infradead.org>
+ *
+ * inspired by drivers/char/hvc_console.c
+ * written by Anton Blanchard and Paul Mackerras
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#include <asm/irq.h>
+#include <asm/rtas.h>
+#include "hvc_console.h"
+
+#define hvc_rtas_cookie 0x67781e15
+struct hvc_struct *hvc_rtas_dev;
+
+static int rtascons_put_char_token = RTAS_UNKNOWN_SERVICE;
+static int rtascons_get_char_token = RTAS_UNKNOWN_SERVICE;
+
+static inline int hvc_rtas_write_console(uint32_t vtermno, const char *buf,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (rtas_call(rtascons_put_char_token, 1, 1, NULL, buf[i]))
+ break;
+ }
+
+ return i;
+}
+
+static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
+{
+ int i, c;
+
+ for (i = 0; i < count; i++) {
+ if (rtas_call(rtascons_get_char_token, 0, 2, &c))
+ break;
+
+ buf[i] = c;
+ }
+
+ return i;
+}
+
+static const struct hv_ops hvc_rtas_get_put_ops = {
+ .get_chars = hvc_rtas_read_console,
+ .put_chars = hvc_rtas_write_console,
+};
+
+static int __init hvc_rtas_init(void)
+{
+ struct hvc_struct *hp;
+
+ if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
+ rtascons_put_char_token = rtas_token("put-term-char");
+ if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
+ return -EIO;
+
+ if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE)
+ rtascons_get_char_token = rtas_token("get-term-char");
+ if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE)
+ return -EIO;
+
+ BUG_ON(hvc_rtas_dev);
+
+ /* Allocate an hvc_struct for the console device we instantiated
+ * earlier. Save off hp so that we can return it on exit */
+ hp = hvc_alloc(hvc_rtas_cookie, NO_IRQ, &hvc_rtas_get_put_ops, 16);
+ if (IS_ERR(hp))
+ return PTR_ERR(hp);
+
+ hvc_rtas_dev = hp;
+
+ return 0;
+}
+module_init(hvc_rtas_init);
+
+/* This will tear down the tty portion of the driver */
+static void __exit hvc_rtas_exit(void)
+{
+ /* Really the fun isn't over until the worker thread breaks down and
+ * the tty cleans up */
+ if (hvc_rtas_dev)
+ hvc_remove(hvc_rtas_dev);
+}
+module_exit(hvc_rtas_exit);
+
+/* This will happen prior to module init. There is no tty at this time? */
+static int __init hvc_rtas_console_init(void)
+{
+ rtascons_put_char_token = rtas_token("put-term-char");
+ if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
+ return -EIO;
+
+ rtascons_get_char_token = rtas_token("get-term-char");
+ if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE)
+ return -EIO;
+
+ hvc_instantiate(hvc_rtas_cookie, 0, &hvc_rtas_get_put_ops);
+ add_preferred_console("hvc", 0, NULL);
+
+ return 0;
+}
+console_initcall(hvc_rtas_console_init);
diff --git a/drivers/tty/hvc/hvc_tile.c b/drivers/tty/hvc/hvc_tile.c
new file mode 100644
index 00000000..7a84a059
--- /dev/null
+++ b/drivers/tty/hvc/hvc_tile.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * Tilera TILE Processor hypervisor console
+ */
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#include <hv/hypervisor.h>
+
+#include "hvc_console.h"
+
+static int hvc_tile_put_chars(uint32_t vt, const char *buf, int count)
+{
+ return hv_console_write((HV_VirtAddr)buf, count);
+}
+
+static int hvc_tile_get_chars(uint32_t vt, char *buf, int count)
+{
+ int i, c;
+
+ for (i = 0; i < count; ++i) {
+ c = hv_console_read_if_ready();
+ if (c < 0)
+ break;
+ buf[i] = c;
+ }
+
+ return i;
+}
+
+static const struct hv_ops hvc_tile_get_put_ops = {
+ .get_chars = hvc_tile_get_chars,
+ .put_chars = hvc_tile_put_chars,
+};
+
+static int __init hvc_tile_console_init(void)
+{
+ extern void disable_early_printk(void);
+ hvc_instantiate(0, 0, &hvc_tile_get_put_ops);
+ add_preferred_console("hvc", 0, NULL);
+ disable_early_printk();
+ return 0;
+}
+console_initcall(hvc_tile_console_init);
+
+static int __init hvc_tile_init(void)
+{
+ struct hvc_struct *s;
+ s = hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128);
+ return IS_ERR(s) ? PTR_ERR(s) : 0;
+}
+device_initcall(hvc_tile_init);
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
new file mode 100644
index 00000000..b0957e61
--- /dev/null
+++ b/drivers/tty/hvc/hvc_udbg.c
@@ -0,0 +1,96 @@
+/*
+ * udbg interface to hvc_console.c
+ *
+ * (C) Copyright David Gibson, IBM Corporation 2008.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+
+#include <asm/udbg.h>
+
+#include "hvc_console.h"
+
+struct hvc_struct *hvc_udbg_dev;
+
+static int hvc_udbg_put(uint32_t vtermno, const char *buf, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ udbg_putc(buf[i]);
+
+ return i;
+}
+
+static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
+{
+ int i, c;
+
+ if (!udbg_getc_poll)
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ if ((c = udbg_getc_poll()) == -1)
+ break;
+ buf[i] = c;
+ }
+
+ return i;
+}
+
+static const struct hv_ops hvc_udbg_ops = {
+ .get_chars = hvc_udbg_get,
+ .put_chars = hvc_udbg_put,
+};
+
+static int __init hvc_udbg_init(void)
+{
+ struct hvc_struct *hp;
+
+ BUG_ON(hvc_udbg_dev);
+
+ hp = hvc_alloc(0, NO_IRQ, &hvc_udbg_ops, 16);
+ if (IS_ERR(hp))
+ return PTR_ERR(hp);
+
+ hvc_udbg_dev = hp;
+
+ return 0;
+}
+module_init(hvc_udbg_init);
+
+static void __exit hvc_udbg_exit(void)
+{
+ if (hvc_udbg_dev)
+ hvc_remove(hvc_udbg_dev);
+}
+module_exit(hvc_udbg_exit);
+
+static int __init hvc_udbg_console_init(void)
+{
+ hvc_instantiate(0, 0, &hvc_udbg_ops);
+ add_preferred_console("hvc", 0, NULL);
+
+ return 0;
+}
+console_initcall(hvc_udbg_console_init);
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
new file mode 100644
index 00000000..e6eea148
--- /dev/null
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -0,0 +1,173 @@
+/*
+ * vio driver interface to hvc_console.c
+ *
+ * This code was moved here to allow the remaining code to be reused as a
+ * generic polling mode with semi-reliable transport driver core to the
+ * console and tty subsystems.
+ *
+ *
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2004 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Additional Author(s):
+ * Ryan S. Arnold <rsa@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/hvconsole.h>
+#include <asm/vio.h>
+#include <asm/prom.h>
+#include <asm/firmware.h>
+
+#include "hvc_console.h"
+
+static const char hvc_driver_name[] = "hvc_console";
+
+static struct vio_device_id hvc_driver_table[] __devinitdata = {
+ {"serial", "hvterm1"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, hvc_driver_table);
+
+static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
+{
+ unsigned long got;
+ int i;
+
+ /*
+ * Vio firmware will read up to SIZE_VIO_GET_CHARS at its own discretion
+ * so we play safe and avoid the situation where got > count which could
+ * overload the flip buffer.
+ */
+ if (count < SIZE_VIO_GET_CHARS)
+ return -EAGAIN;
+
+ got = hvc_get_chars(vtermno, buf, count);
+
+ /*
+ * Work around a HV bug where it gives us a null
+ * after every \r. -- paulus
+ */
+ for (i = 1; i < got; ++i) {
+ if (buf[i] == 0 && buf[i-1] == '\r') {
+ --got;
+ if (i < got)
+ memmove(&buf[i], &buf[i+1],
+ got - i);
+ }
+ }
+ return got;
+}
+
+static const struct hv_ops hvc_get_put_ops = {
+ .get_chars = filtered_get_chars,
+ .put_chars = hvc_put_chars,
+ .notifier_add = notifier_add_irq,
+ .notifier_del = notifier_del_irq,
+ .notifier_hangup = notifier_hangup_irq,
+};
+
+static int __devinit hvc_vio_probe(struct vio_dev *vdev,
+ const struct vio_device_id *id)
+{
+ struct hvc_struct *hp;
+
+ /* probed with invalid parameters. */
+ if (!vdev || !id)
+ return -EPERM;
+
+ hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops,
+ MAX_VIO_PUT_CHARS);
+ if (IS_ERR(hp))
+ return PTR_ERR(hp);
+ dev_set_drvdata(&vdev->dev, hp);
+
+ return 0;
+}
+
+static int __devexit hvc_vio_remove(struct vio_dev *vdev)
+{
+ struct hvc_struct *hp = dev_get_drvdata(&vdev->dev);
+
+ return hvc_remove(hp);
+}
+
+static struct vio_driver hvc_vio_driver = {
+ .id_table = hvc_driver_table,
+ .probe = hvc_vio_probe,
+ .remove = __devexit_p(hvc_vio_remove),
+ .driver = {
+ .name = hvc_driver_name,
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init hvc_vio_init(void)
+{
+ int rc;
+
+ if (firmware_has_feature(FW_FEATURE_ISERIES))
+ return -EIO;
+
+ /* Register as a vio device to receive callbacks */
+ rc = vio_register_driver(&hvc_vio_driver);
+
+ return rc;
+}
+module_init(hvc_vio_init); /* after drivers/char/hvc_console.c */
+
+static void __exit hvc_vio_exit(void)
+{
+ vio_unregister_driver(&hvc_vio_driver);
+}
+module_exit(hvc_vio_exit);
+
+/* the device tree order defines our numbering */
+static int hvc_find_vtys(void)
+{
+ struct device_node *vty;
+ int num_found = 0;
+
+ for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL;
+ vty = of_find_node_by_name(vty, "vty")) {
+ const uint32_t *vtermno;
+
+ /* We have statically defined space for only a certain number
+ * of console adapters.
+ */
+ if (num_found >= MAX_NR_HVC_CONSOLES) {
+ of_node_put(vty);
+ break;
+ }
+
+ vtermno = of_get_property(vty, "reg", NULL);
+ if (!vtermno)
+ continue;
+
+ if (of_device_is_compatible(vty, "hvterm1")) {
+ hvc_instantiate(*vtermno, num_found, &hvc_get_put_ops);
+ ++num_found;
+ }
+ }
+
+ return num_found;
+}
+console_initcall(hvc_find_vtys);
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
new file mode 100644
index 00000000..52fdf60b
--- /dev/null
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -0,0 +1,273 @@
+/*
+ * xen console driver interface to hvc_console.c
+ *
+ * (c) 2007 Gerd Hoffmann <kraxel@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <asm/xen/hypervisor.h>
+
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/events.h>
+#include <xen/interface/io/console.h>
+#include <xen/hvc-console.h>
+
+#include "hvc_console.h"
+
+#define HVC_COOKIE 0x58656e /* "Xen" in hex */
+
+static struct hvc_struct *hvc;
+static int xencons_irq;
+
+/* ------------------------------------------------------------------ */
+
+static unsigned long console_pfn = ~0ul;
+
+static inline struct xencons_interface *xencons_interface(void)
+{
+ if (console_pfn == ~0ul)
+ return mfn_to_virt(xen_start_info->console.domU.mfn);
+ else
+ return __va(console_pfn << PAGE_SHIFT);
+}
+
+static inline void notify_daemon(void)
+{
+ /* Use evtchn: this is called early, before irq is set up. */
+ notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
+}
+
+static int __write_console(const char *data, int len)
+{
+ struct xencons_interface *intf = xencons_interface();
+ XENCONS_RING_IDX cons, prod;
+ int sent = 0;
+
+ cons = intf->out_cons;
+ prod = intf->out_prod;
+ mb(); /* update queue values before going on */
+ BUG_ON((prod - cons) > sizeof(intf->out));
+
+ while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
+ intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
+
+ wmb(); /* write ring before updating pointer */
+ intf->out_prod = prod;
+
+ if (sent)
+ notify_daemon();
+ return sent;
+}
+
+static int domU_write_console(uint32_t vtermno, const char *data, int len)
+{
+ int ret = len;
+
+ /*
+ * Make sure the whole buffer is emitted, polling if
+ * necessary. We don't ever want to rely on the hvc daemon
+ * because the most interesting console output is when the
+ * kernel is crippled.
+ */
+ while (len) {
+ int sent = __write_console(data, len);
+
+ data += sent;
+ len -= sent;
+
+ if (unlikely(len))
+ HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
+ }
+
+ return ret;
+}
+
+static int domU_read_console(uint32_t vtermno, char *buf, int len)
+{
+ struct xencons_interface *intf = xencons_interface();
+ XENCONS_RING_IDX cons, prod;
+ int recv = 0;
+
+ cons = intf->in_cons;
+ prod = intf->in_prod;
+ mb(); /* get pointers before reading ring */
+ BUG_ON((prod - cons) > sizeof(intf->in));
+
+ while (cons != prod && recv < len)
+ buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
+
+ mb(); /* read ring before consuming */
+ intf->in_cons = cons;
+
+ notify_daemon();
+ return recv;
+}
+
+static struct hv_ops domU_hvc_ops = {
+ .get_chars = domU_read_console,
+ .put_chars = domU_write_console,
+ .notifier_add = notifier_add_irq,
+ .notifier_del = notifier_del_irq,
+ .notifier_hangup = notifier_hangup_irq,
+};
+
+static int dom0_read_console(uint32_t vtermno, char *buf, int len)
+{
+ return HYPERVISOR_console_io(CONSOLEIO_read, len, buf);
+}
+
+/*
+ * Either for a dom0 to write to the system console, or a domU with a
+ * debug version of Xen
+ */
+static int dom0_write_console(uint32_t vtermno, const char *str, int len)
+{
+ int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str);
+ if (rc < 0)
+ return 0;
+
+ return len;
+}
+
+static struct hv_ops dom0_hvc_ops = {
+ .get_chars = dom0_read_console,
+ .put_chars = dom0_write_console,
+ .notifier_add = notifier_add_irq,
+ .notifier_del = notifier_del_irq,
+ .notifier_hangup = notifier_hangup_irq,
+};
+
+static int __init xen_hvc_init(void)
+{
+ struct hvc_struct *hp;
+ struct hv_ops *ops;
+
+ if (!xen_pv_domain())
+ return -ENODEV;
+
+ if (xen_initial_domain()) {
+ ops = &dom0_hvc_ops;
+ xencons_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
+ } else {
+ if (!xen_start_info->console.domU.evtchn)
+ return -ENODEV;
+
+ ops = &domU_hvc_ops;
+ xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn);
+ }
+ if (xencons_irq < 0)
+ xencons_irq = 0; /* NO_IRQ */
+ else
+ irq_set_noprobe(xencons_irq);
+
+ hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256);
+ if (IS_ERR(hp))
+ return PTR_ERR(hp);
+
+ hvc = hp;
+
+ console_pfn = mfn_to_pfn(xen_start_info->console.domU.mfn);
+
+ return 0;
+}
+
+void xen_console_resume(void)
+{
+ if (xencons_irq)
+ rebind_evtchn_irq(xen_start_info->console.domU.evtchn, xencons_irq);
+}
+
+static void __exit xen_hvc_fini(void)
+{
+ if (hvc)
+ hvc_remove(hvc);
+}
+
+static int xen_cons_init(void)
+{
+ struct hv_ops *ops;
+
+ if (!xen_pv_domain())
+ return 0;
+
+ if (xen_initial_domain())
+ ops = &dom0_hvc_ops;
+ else
+ ops = &domU_hvc_ops;
+
+ hvc_instantiate(HVC_COOKIE, 0, ops);
+ return 0;
+}
+
+module_init(xen_hvc_init);
+module_exit(xen_hvc_fini);
+console_initcall(xen_cons_init);
+
+#ifdef CONFIG_EARLY_PRINTK
+static void xenboot_write_console(struct console *console, const char *string,
+ unsigned len)
+{
+ unsigned int linelen, off = 0;
+ const char *pos;
+
+ dom0_write_console(0, string, len);
+
+ if (xen_initial_domain())
+ return;
+
+ domU_write_console(0, "(early) ", 8);
+ while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
+ linelen = pos-string+off;
+ if (off + linelen > len)
+ break;
+ domU_write_console(0, string+off, linelen);
+ domU_write_console(0, "\r\n", 2);
+ off += linelen + 1;
+ }
+ if (off < len)
+ domU_write_console(0, string+off, len-off);
+}
+
+struct console xenboot_console = {
+ .name = "xenboot",
+ .write = xenboot_write_console,
+ .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
+};
+#endif /* CONFIG_EARLY_PRINTK */
+
+void xen_raw_console_write(const char *str)
+{
+ dom0_write_console(0, str, strlen(str));
+}
+
+void xen_raw_printk(const char *fmt, ...)
+{
+ static char buf[512];
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ xen_raw_console_write(buf);
+}
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
new file mode 100644
index 00000000..4c8b6654
--- /dev/null
+++ b/drivers/tty/hvc/hvcs.c
@@ -0,0 +1,1616 @@
+/*
+ * IBM eServer Hypervisor Virtual Console Server Device Driver
+ * Copyright (C) 2003, 2004 IBM Corp.
+ * Ryan S. Arnold (rsa@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author(s) : Ryan S. Arnold <rsa@us.ibm.com>
+ *
+ * This is the device driver for the IBM Hypervisor Virtual Console Server,
+ * "hvcs". The IBM hvcs provides a tty driver interface to allow Linux
+ * user space applications access to the system consoles of logically
+ * partitioned operating systems, e.g. Linux, running on the same partitioned
+ * Power5 ppc64 system. Physical hardware consoles per partition are not
+ * practical on this hardware so system consoles are accessed by this driver
+ * using inter-partition firmware interfaces to virtual terminal devices.
+ *
+ * A vty is known to the HMC as a "virtual serial server adapter". It is a
+ * virtual terminal device that is created by firmware upon partition creation
+ * to act as a partitioned OS's console device.
+ *
+ * Firmware dynamically (via hotplug) exposes vty-servers to a running ppc64
+ * Linux system upon their creation by the HMC or their exposure during boot.
+ * The non-user interactive backend of this driver is implemented as a vio
+ * device driver so that it can receive notification of vty-server lifetimes
+ * after it registers with the vio bus to handle vty-server probe and remove
+ * callbacks.
+ *
+ * Many vty-servers can be configured to connect to one vty, but a vty can
+ * only be actively connected to by a single vty-server, in any manner, at one
+ * time. If the HMC is currently hosting the console for a target Linux
+ * partition; attempts to open the tty device to the partition's console using
+ * the hvcs on any partition will return -EBUSY with every open attempt until
+ * the HMC frees the connection between its vty-server and the desired
+ * partition's vty device. Conversely, a vty-server may only be connected to
+ * a single vty at one time even though it may have several configured vty
+ * partner possibilities.
+ *
+ * Firmware does not provide notification of vty partner changes to this
+ * driver. This means that an HMC Super Admin may add or remove partner vtys
+ * from a vty-server's partner list but the changes will not be signaled to
+ * the vty-server. Firmware only notifies the driver when a vty-server is
+ * added or removed from the system. To compensate for this deficiency, this
+ * driver implements a sysfs update attribute which provides a method for
+ * rescanning partner information upon a user's request.
+ *
+ * Each vty-server, prior to being exposed to this driver is reference counted
+ * using the 2.6 Linux kernel kref construct.
+ *
+ * For direction on installation and usage of this driver please reference
+ * Documentation/powerpc/hvcs.txt.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <asm/hvconsole.h>
+#include <asm/hvcserver.h>
+#include <asm/uaccess.h>
+#include <asm/vio.h>
+
+/*
+ * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
+ * Removed braces around single statements following conditionals. Removed '=
+ * 0' after static int declarations since these default to zero. Removed
+ * list_for_each_safe() and replaced with list_for_each_entry() in
+ * hvcs_get_by_index(). The 'safe' version is un-needed now that the driver is
+ * using spinlocks. Changed spin_lock_irqsave() to spin_lock() when locking
+ * hvcs_structs_lock and hvcs_pi_lock since these are not touched in an int
+ * handler. Initialized hvcs_structs_lock and hvcs_pi_lock to
+ * SPIN_LOCK_UNLOCKED at declaration time rather than in hvcs_module_init().
+ * Added spin_lock around list_del() in destroy_hvcs_struct() to protect the
+ * list traversals from a deletion. Removed '= NULL' from pointer declaration
+ * statements since they are initialized NULL by default. Removed wmb()
+ * instances from hvcs_try_write(). They probably aren't needed with locking in
+ * place. Added check and cleanup for hvcs_pi_buff = kmalloc() in
+ * hvcs_module_init(). Exposed hvcs_struct.index via a sysfs attribute so that
+ * the coupling between /dev/hvcs* and a vty-server can be automatically
+ * determined. Moved kobject_put() in hvcs_open outside of the
+ * spin_unlock_irqrestore().
+ *
+ * 1.3.1 -> 1.3.2 Changed method for determining hvcs_struct->index and had it
+ * align with how the tty layer always assigns the lowest index available. This
+ * change resulted in a list of ints that denotes which indexes are available.
+ * Device additions and removals use the new hvcs_get_index() and
+ * hvcs_return_index() helper functions. The list is created with
+ * hvsc_alloc_index_list() and it is destroyed with hvcs_free_index_list().
+ * Without these fixes hotplug vty-server adapter support goes crazy with this
+ * driver if the user removes a vty-server adapter. Moved free_irq() outside of
+ * the hvcs_final_close() function in order to get it out of the spinlock.
+ * Rearranged hvcs_close(). Cleaned up some printks and did some housekeeping
+ * on the changelog. Removed local CLC_LENGTH and used HVCS_CLC_LENGTH from
+ * arch/powerepc/include/asm/hvcserver.h
+ *
+ * 1.3.2 -> 1.3.3 Replaced yield() in hvcs_close() with tty_wait_until_sent() to
+ * prevent possible lockup with realtime scheduling as similarly pointed out by
+ * akpm in hvc_console. Changed resulted in the removal of hvcs_final_close()
+ * to reorder cleanup operations and prevent discarding of pending data during
+ * an hvcs_close(). Removed spinlock protection of hvcs_struct data members in
+ * hvcs_write_room() and hvcs_chars_in_buffer() because they aren't needed.
+ */
+
+#define HVCS_DRIVER_VERSION "1.3.3"
+
+MODULE_AUTHOR("Ryan S. Arnold <rsa@us.ibm.com>");
+MODULE_DESCRIPTION("IBM hvcs (Hypervisor Virtual Console Server) Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(HVCS_DRIVER_VERSION);
+
+/*
+ * Wait this long per iteration while trying to push buffered data to the
+ * hypervisor before allowing the tty to complete a close operation.
+ */
+#define HVCS_CLOSE_WAIT (HZ/100) /* 1/10 of a second */
+
+/*
+ * Since the Linux TTY code does not currently (2-04-2004) support dynamic
+ * addition of tty derived devices and we shouldn't allocate thousands of
+ * tty_device pointers when the number of vty-server & vty partner connections
+ * will most often be much lower than this, we'll arbitrarily allocate
+ * HVCS_DEFAULT_SERVER_ADAPTERS tty_structs and cdev's by default when we
+ * register the tty_driver. This can be overridden using an insmod parameter.
+ */
+#define HVCS_DEFAULT_SERVER_ADAPTERS 64
+
+/*
+ * The user can't insmod with more than HVCS_MAX_SERVER_ADAPTERS hvcs device
+ * nodes as a sanity check. Theoretically there can be over 1 Billion
+ * vty-server & vty partner connections.
+ */
+#define HVCS_MAX_SERVER_ADAPTERS 1024
+
+/*
+ * We let Linux assign us a major number and we start the minors at zero. There
+ * is no intuitive mapping between minor number and the target vty-server
+ * adapter except that each new vty-server adapter is always assigned to the
+ * smallest minor number available.
+ */
+#define HVCS_MINOR_START 0
+
+/*
+ * The hcall interface involves putting 8 chars into each of two registers.
+ * We load up those 2 registers (in arch/powerpc/platforms/pseries/hvconsole.c)
+ * by casting char[16] to long[2]. It would work without __ALIGNED__, but a
+ * little (tiny) bit slower because an unaligned load is slower than aligned
+ * load.
+ */
+#define __ALIGNED__ __attribute__((__aligned__(8)))
+
+/*
+ * How much data can firmware send with each hvc_put_chars()? Maybe this
+ * should be moved into an architecture specific area.
+ */
+#define HVCS_BUFF_LEN 16
+
+/*
+ * This is the maximum amount of data we'll let the user send us (hvcs_write) at
+ * once in a chunk as a sanity check.
+ */
+#define HVCS_MAX_FROM_USER 4096
+
+/*
+ * Be careful when adding flags to this line discipline. Don't add anything
+ * that will cause echoing or we'll go into recursive loop echoing chars back
+ * and forth with the console drivers.
+ */
+static struct ktermios hvcs_tty_termios = {
+ .c_iflag = IGNBRK | IGNPAR,
+ .c_oflag = OPOST,
+ .c_cflag = B38400 | CS8 | CREAD | HUPCL,
+ .c_cc = INIT_C_CC,
+ .c_ispeed = 38400,
+ .c_ospeed = 38400
+};
+
+/*
+ * This value is used to take the place of a command line parameter when the
+ * module is inserted. It starts as -1 and stays as such if the user doesn't
+ * specify a module insmod parameter. If they DO specify one then it is set to
+ * the value of the integer passed in.
+ */
+static int hvcs_parm_num_devs = -1;
+module_param(hvcs_parm_num_devs, int, 0);
+
+static const char hvcs_driver_name[] = "hvcs";
+static const char hvcs_device_node[] = "hvcs";
+static const char hvcs_driver_string[]
+ = "IBM hvcs (Hypervisor Virtual Console Server) Driver";
+
+/* Status of partner info rescan triggered via sysfs. */
+static int hvcs_rescan_status;
+
+static struct tty_driver *hvcs_tty_driver;
+
+/*
+ * In order to be somewhat sane this driver always associates the hvcs_struct
+ * index element with the numerically equal tty->index. This means that a
+ * hotplugged vty-server adapter will always map to the lowest index valued
+ * device node. If vty-servers were hotplug removed from the system and then
+ * new ones added the new vty-server may have the largest slot number of all
+ * the vty-server adapters in the partition but it may have the lowest dev node
+ * index of all the adapters due to the hole left by the hotplug removed
+ * adapter. There are a set of functions provided to get the lowest index for
+ * a new device as well as return the index to the list. This list is allocated
+ * with a number of elements equal to the number of device nodes requested when
+ * the module was inserted.
+ */
+static int *hvcs_index_list;
+
+/*
+ * How large is the list? This is kept for traversal since the list is
+ * dynamically created.
+ */
+static int hvcs_index_count;
+
+/*
+ * Used by the khvcsd to pick up I/O operations when the kernel_thread is
+ * already awake but potentially shifted to TASK_INTERRUPTIBLE state.
+ */
+static int hvcs_kicked;
+
+/*
+ * Use by the kthread construct for task operations like waking the sleeping
+ * thread and stopping the kthread.
+ */
+static struct task_struct *hvcs_task;
+
+/*
+ * We allocate this for the use of all of the hvcs_structs when they fetch
+ * partner info.
+ */
+static unsigned long *hvcs_pi_buff;
+
+/* Only allow one hvcs_struct to use the hvcs_pi_buff at a time. */
+static DEFINE_SPINLOCK(hvcs_pi_lock);
+
+/* One vty-server per hvcs_struct */
+struct hvcs_struct {
+ spinlock_t lock;
+
+ /*
+ * This index identifies this hvcs device as the complement to a
+ * specific tty index.
+ */
+ unsigned int index;
+
+ struct tty_struct *tty;
+ int open_count;
+
+ /*
+ * Used to tell the driver kernel_thread what operations need to take
+ * place upon this hvcs_struct instance.
+ */
+ int todo_mask;
+
+ /*
+ * This buffer is required so that when hvcs_write_room() reports that
+ * it can send HVCS_BUFF_LEN characters that it will buffer the full
+ * HVCS_BUFF_LEN characters if need be. This is essential for opost
+ * writes since they do not do high level buffering and expect to be
+ * able to send what the driver commits to sending buffering
+ * [e.g. tab to space conversions in n_tty.c opost()].
+ */
+ char buffer[HVCS_BUFF_LEN];
+ int chars_in_buffer;
+
+ /*
+ * Any variable below the kref is valid before a tty is connected and
+ * stays valid after the tty is disconnected. These shouldn't be
+ * whacked until the kobject refcount reaches zero though some entries
+ * may be changed via sysfs initiatives.
+ */
+ struct kref kref; /* ref count & hvcs_struct lifetime */
+ int connected; /* is the vty-server currently connected to a vty? */
+ uint32_t p_unit_address; /* partner unit address */
+ uint32_t p_partition_ID; /* partner partition ID */
+ char p_location_code[HVCS_CLC_LENGTH + 1]; /* CLC + Null Term */
+ struct list_head next; /* list management */
+ struct vio_dev *vdev;
+};
+
+/* Required to back map a kref to its containing object */
+#define from_kref(k) container_of(k, struct hvcs_struct, kref)
+
+static LIST_HEAD(hvcs_structs);
+static DEFINE_SPINLOCK(hvcs_structs_lock);
+static DEFINE_MUTEX(hvcs_init_mutex);
+
+static void hvcs_unthrottle(struct tty_struct *tty);
+static void hvcs_throttle(struct tty_struct *tty);
+static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance);
+
+static int hvcs_write(struct tty_struct *tty,
+ const unsigned char *buf, int count);
+static int hvcs_write_room(struct tty_struct *tty);
+static int hvcs_chars_in_buffer(struct tty_struct *tty);
+
+static int hvcs_has_pi(struct hvcs_struct *hvcsd);
+static void hvcs_set_pi(struct hvcs_partner_info *pi,
+ struct hvcs_struct *hvcsd);
+static int hvcs_get_pi(struct hvcs_struct *hvcsd);
+static int hvcs_rescan_devices_list(void);
+
+static int hvcs_partner_connect(struct hvcs_struct *hvcsd);
+static void hvcs_partner_free(struct hvcs_struct *hvcsd);
+
+static int hvcs_enable_device(struct hvcs_struct *hvcsd,
+ uint32_t unit_address, unsigned int irq, struct vio_dev *dev);
+
+static int hvcs_open(struct tty_struct *tty, struct file *filp);
+static void hvcs_close(struct tty_struct *tty, struct file *filp);
+static void hvcs_hangup(struct tty_struct * tty);
+
+static int __devinit hvcs_probe(struct vio_dev *dev,
+ const struct vio_device_id *id);
+static int __devexit hvcs_remove(struct vio_dev *dev);
+static int __init hvcs_module_init(void);
+static void __exit hvcs_module_exit(void);
+static int __devinit hvcs_initialize(void);
+
+#define HVCS_SCHED_READ 0x00000001
+#define HVCS_QUICK_READ 0x00000002
+#define HVCS_TRY_WRITE 0x00000004
+#define HVCS_READ_MASK (HVCS_SCHED_READ | HVCS_QUICK_READ)
+
+static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
+{
+ return dev_get_drvdata(&viod->dev);
+}
+/* The sysfs interface for the driver and devices */
+
+static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct vio_dev *viod = to_vio_dev(dev);
+ struct hvcs_struct *hvcsd = from_vio_dev(viod);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ retval = sprintf(buf, "%X\n", hvcsd->p_unit_address);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return retval;
+}
+static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL);
+
+static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct vio_dev *viod = to_vio_dev(dev);
+ struct hvcs_struct *hvcsd = from_vio_dev(viod);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return retval;
+}
+static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL);
+
+static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf,
+ size_t count)
+{
+ /*
+ * Don't need this feature at the present time because firmware doesn't
+ * yet support multiple partners.
+ */
+ printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n");
+ return -EPERM;
+}
+
+static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct vio_dev *viod = to_vio_dev(dev);
+ struct hvcs_struct *hvcsd = from_vio_dev(viod);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return retval;
+}
+
+static DEVICE_ATTR(current_vty,
+ S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store);
+
+static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct vio_dev *viod = to_vio_dev(dev);
+ struct hvcs_struct *hvcsd = from_vio_dev(viod);
+ unsigned long flags;
+
+ /* writing a '0' to this sysfs entry will result in the disconnect. */
+ if (simple_strtol(buf, NULL, 0) != 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+
+ if (hvcsd->open_count > 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ printk(KERN_INFO "HVCS: vterm state unchanged. "
+ "The hvcs device node is still in use.\n");
+ return -EPERM;
+ }
+
+ if (hvcsd->connected == 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ printk(KERN_INFO "HVCS: vterm state unchanged. The"
+ " vty-server is not connected to a vty.\n");
+ return -EPERM;
+ }
+
+ hvcs_partner_free(hvcsd);
+ printk(KERN_INFO "HVCS: Closed vty-server@%X and"
+ " partner vty@%X:%d connection.\n",
+ hvcsd->vdev->unit_address,
+ hvcsd->p_unit_address,
+ (uint32_t)hvcsd->p_partition_ID);
+
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return count;
+}
+
+static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct vio_dev *viod = to_vio_dev(dev);
+ struct hvcs_struct *hvcsd = from_vio_dev(viod);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ retval = sprintf(buf, "%d\n", hvcsd->connected);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return retval;
+}
+static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR,
+ hvcs_vterm_state_show, hvcs_vterm_state_store);
+
+static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct vio_dev *viod = to_vio_dev(dev);
+ struct hvcs_struct *hvcsd = from_vio_dev(viod);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ retval = sprintf(buf, "%d\n", hvcsd->index);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return retval;
+}
+
+static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL);
+
+static struct attribute *hvcs_attrs[] = {
+ &dev_attr_partner_vtys.attr,
+ &dev_attr_partner_clcs.attr,
+ &dev_attr_current_vty.attr,
+ &dev_attr_vterm_state.attr,
+ &dev_attr_index.attr,
+ NULL,
+};
+
+static struct attribute_group hvcs_attr_group = {
+ .attrs = hvcs_attrs,
+};
+
+static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf)
+{
+ /* A 1 means it is updating, a 0 means it is done updating */
+ return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
+}
+
+static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
+ size_t count)
+{
+ if ((simple_strtol(buf, NULL, 0) != 1)
+ && (hvcs_rescan_status != 0))
+ return -EINVAL;
+
+ hvcs_rescan_status = 1;
+ printk(KERN_INFO "HVCS: rescanning partner info for all"
+ " vty-servers.\n");
+ hvcs_rescan_devices_list();
+ hvcs_rescan_status = 0;
+ return count;
+}
+
+static DRIVER_ATTR(rescan,
+ S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store);
+
+static void hvcs_kick(void)
+{
+ hvcs_kicked = 1;
+ wmb();
+ wake_up_process(hvcs_task);
+}
+
+static void hvcs_unthrottle(struct tty_struct *tty)
+{
+ struct hvcs_struct *hvcsd = tty->driver_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ hvcsd->todo_mask |= HVCS_SCHED_READ;
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ hvcs_kick();
+}
+
+static void hvcs_throttle(struct tty_struct *tty)
+{
+ struct hvcs_struct *hvcsd = tty->driver_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ vio_disable_interrupts(hvcsd->vdev);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+}
+
+/*
+ * If the device is being removed we don't have to worry about this interrupt
+ * handler taking any further interrupts because they are disabled which means
+ * the hvcs_struct will always be valid in this handler.
+ */
+static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance)
+{
+ struct hvcs_struct *hvcsd = dev_instance;
+
+ spin_lock(&hvcsd->lock);
+ vio_disable_interrupts(hvcsd->vdev);
+ hvcsd->todo_mask |= HVCS_SCHED_READ;
+ spin_unlock(&hvcsd->lock);
+ hvcs_kick();
+
+ return IRQ_HANDLED;
+}
+
+/* This function must be called with the hvcsd->lock held */
+static void hvcs_try_write(struct hvcs_struct *hvcsd)
+{
+ uint32_t unit_address = hvcsd->vdev->unit_address;
+ struct tty_struct *tty = hvcsd->tty;
+ int sent;
+
+ if (hvcsd->todo_mask & HVCS_TRY_WRITE) {
+ /* won't send partial writes */
+ sent = hvc_put_chars(unit_address,
+ &hvcsd->buffer[0],
+ hvcsd->chars_in_buffer );
+ if (sent > 0) {
+ hvcsd->chars_in_buffer = 0;
+ /* wmb(); */
+ hvcsd->todo_mask &= ~(HVCS_TRY_WRITE);
+ /* wmb(); */
+
+ /*
+ * We are still obligated to deliver the data to the
+ * hypervisor even if the tty has been closed because
+ * we committed to delivering it. But don't try to wake
+ * a non-existent tty.
+ */
+ if (tty) {
+ tty_wakeup(tty);
+ }
+ }
+ }
+}
+
+static int hvcs_io(struct hvcs_struct *hvcsd)
+{
+ uint32_t unit_address;
+ struct tty_struct *tty;
+ char buf[HVCS_BUFF_LEN] __ALIGNED__;
+ unsigned long flags;
+ int got = 0;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+
+ unit_address = hvcsd->vdev->unit_address;
+ tty = hvcsd->tty;
+
+ hvcs_try_write(hvcsd);
+
+ if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) {
+ hvcsd->todo_mask &= ~(HVCS_READ_MASK);
+ goto bail;
+ } else if (!(hvcsd->todo_mask & (HVCS_READ_MASK)))
+ goto bail;
+
+ /* remove the read masks */
+ hvcsd->todo_mask &= ~(HVCS_READ_MASK);
+
+ if (tty_buffer_request_room(tty, HVCS_BUFF_LEN) >= HVCS_BUFF_LEN) {
+ got = hvc_get_chars(unit_address,
+ &buf[0],
+ HVCS_BUFF_LEN);
+ tty_insert_flip_string(tty, buf, got);
+ }
+
+ /* Give the TTY time to process the data we just sent. */
+ if (got)
+ hvcsd->todo_mask |= HVCS_QUICK_READ;
+
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ /* This is synch because tty->low_latency == 1 */
+ if(got)
+ tty_flip_buffer_push(tty);
+
+ if (!got) {
+ /* Do this _after_ the flip_buffer_push */
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ vio_enable_interrupts(hvcsd->vdev);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ }
+
+ return hvcsd->todo_mask;
+
+ bail:
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return hvcsd->todo_mask;
+}
+
+static int khvcsd(void *unused)
+{
+ struct hvcs_struct *hvcsd;
+ int hvcs_todo_mask;
+
+ __set_current_state(TASK_RUNNING);
+
+ do {
+ hvcs_todo_mask = 0;
+ hvcs_kicked = 0;
+ wmb();
+
+ spin_lock(&hvcs_structs_lock);
+ list_for_each_entry(hvcsd, &hvcs_structs, next) {
+ hvcs_todo_mask |= hvcs_io(hvcsd);
+ }
+ spin_unlock(&hvcs_structs_lock);
+
+ /*
+ * If any of the hvcs adapters want to try a write or quick read
+ * don't schedule(), yield a smidgen then execute the hvcs_io
+ * thread again for those that want the write.
+ */
+ if (hvcs_todo_mask & (HVCS_TRY_WRITE | HVCS_QUICK_READ)) {
+ yield();
+ continue;
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!hvcs_kicked)
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static struct vio_device_id hvcs_driver_table[] __devinitdata= {
+ {"serial-server", "hvterm2"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, hvcs_driver_table);
+
+static void hvcs_return_index(int index)
+{
+ /* Paranoia check */
+ if (!hvcs_index_list)
+ return;
+ if (index < 0 || index >= hvcs_index_count)
+ return;
+ if (hvcs_index_list[index] == -1)
+ return;
+ else
+ hvcs_index_list[index] = -1;
+}
+
+/* callback when the kref ref count reaches zero */
+static void destroy_hvcs_struct(struct kref *kref)
+{
+ struct hvcs_struct *hvcsd = from_kref(kref);
+ struct vio_dev *vdev;
+ unsigned long flags;
+
+ spin_lock(&hvcs_structs_lock);
+ spin_lock_irqsave(&hvcsd->lock, flags);
+
+ /* the list_del poisons the pointers */
+ list_del(&(hvcsd->next));
+
+ if (hvcsd->connected == 1) {
+ hvcs_partner_free(hvcsd);
+ printk(KERN_INFO "HVCS: Closed vty-server@%X and"
+ " partner vty@%X:%d connection.\n",
+ hvcsd->vdev->unit_address,
+ hvcsd->p_unit_address,
+ (uint32_t)hvcsd->p_partition_ID);
+ }
+ printk(KERN_INFO "HVCS: Destroyed hvcs_struct for vty-server@%X.\n",
+ hvcsd->vdev->unit_address);
+
+ vdev = hvcsd->vdev;
+ hvcsd->vdev = NULL;
+
+ hvcsd->p_unit_address = 0;
+ hvcsd->p_partition_ID = 0;
+ hvcs_return_index(hvcsd->index);
+ memset(&hvcsd->p_location_code[0], 0x00, HVCS_CLC_LENGTH + 1);
+
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ spin_unlock(&hvcs_structs_lock);
+
+ sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group);
+
+ kfree(hvcsd);
+}
+
+static int hvcs_get_index(void)
+{
+ int i;
+ /* Paranoia check */
+ if (!hvcs_index_list) {
+ printk(KERN_ERR "HVCS: hvcs_index_list NOT valid!.\n");
+ return -EFAULT;
+ }
+ /* Find the numerically lowest first free index. */
+ for(i = 0; i < hvcs_index_count; i++) {
+ if (hvcs_index_list[i] == -1) {
+ hvcs_index_list[i] = 0;
+ return i;
+ }
+ }
+ return -1;
+}
+
+static int __devinit hvcs_probe(
+ struct vio_dev *dev,
+ const struct vio_device_id *id)
+{
+ struct hvcs_struct *hvcsd;
+ int index, rc;
+ int retval;
+
+ if (!dev || !id) {
+ printk(KERN_ERR "HVCS: probed with invalid parameter.\n");
+ return -EPERM;
+ }
+
+ /* Make sure we are properly initialized */
+ rc = hvcs_initialize();
+ if (rc) {
+ pr_err("HVCS: Failed to initialize core driver.\n");
+ return rc;
+ }
+
+ /* early to avoid cleanup on failure */
+ index = hvcs_get_index();
+ if (index < 0) {
+ return -EFAULT;
+ }
+
+ hvcsd = kzalloc(sizeof(*hvcsd), GFP_KERNEL);
+ if (!hvcsd)
+ return -ENODEV;
+
+
+ spin_lock_init(&hvcsd->lock);
+ /* Automatically incs the refcount the first time */
+ kref_init(&hvcsd->kref);
+
+ hvcsd->vdev = dev;
+ dev_set_drvdata(&dev->dev, hvcsd);
+
+ hvcsd->index = index;
+
+ /* hvcsd->index = ++hvcs_struct_count; */
+ hvcsd->chars_in_buffer = 0;
+ hvcsd->todo_mask = 0;
+ hvcsd->connected = 0;
+
+ /*
+ * This will populate the hvcs_struct's partner info fields for the
+ * first time.
+ */
+ if (hvcs_get_pi(hvcsd)) {
+ printk(KERN_ERR "HVCS: Failed to fetch partner"
+ " info for vty-server@%X on device probe.\n",
+ hvcsd->vdev->unit_address);
+ }
+
+ /*
+ * If a user app opens a tty that corresponds to this vty-server before
+ * the hvcs_struct has been added to the devices list then the user app
+ * will get -ENODEV.
+ */
+ spin_lock(&hvcs_structs_lock);
+ list_add_tail(&(hvcsd->next), &hvcs_structs);
+ spin_unlock(&hvcs_structs_lock);
+
+ retval = sysfs_create_group(&dev->dev.kobj, &hvcs_attr_group);
+ if (retval) {
+ printk(KERN_ERR "HVCS: Can't create sysfs attrs for vty-server@%X\n",
+ hvcsd->vdev->unit_address);
+ return retval;
+ }
+
+ printk(KERN_INFO "HVCS: vty-server@%X added to the vio bus.\n", dev->unit_address);
+
+ /*
+ * DON'T enable interrupts here because there is no user to receive the
+ * data.
+ */
+ return 0;
+}
+
+static int __devexit hvcs_remove(struct vio_dev *dev)
+{
+ struct hvcs_struct *hvcsd = dev_get_drvdata(&dev->dev);
+ unsigned long flags;
+ struct tty_struct *tty;
+
+ if (!hvcsd)
+ return -ENODEV;
+
+ /* By this time the vty-server won't be getting any more interrupts */
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+
+ tty = hvcsd->tty;
+
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+
+ /*
+ * Let the last holder of this object cause it to be removed, which
+ * would probably be tty_hangup below.
+ */
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
+
+ /*
+ * The hangup is a scheduled function which will auto chain call
+ * hvcs_hangup. The tty should always be valid at this time unless a
+ * simultaneous tty close already cleaned up the hvcs_struct.
+ */
+ if (tty)
+ tty_hangup(tty);
+
+ printk(KERN_INFO "HVCS: vty-server@%X removed from the"
+ " vio bus.\n", dev->unit_address);
+ return 0;
+};
+
+static struct vio_driver hvcs_vio_driver = {
+ .id_table = hvcs_driver_table,
+ .probe = hvcs_probe,
+ .remove = __devexit_p(hvcs_remove),
+ .driver = {
+ .name = hvcs_driver_name,
+ .owner = THIS_MODULE,
+ }
+};
+
+/* Only called from hvcs_get_pi please */
+static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd)
+{
+ int clclength;
+
+ hvcsd->p_unit_address = pi->unit_address;
+ hvcsd->p_partition_ID = pi->partition_ID;
+ clclength = strlen(&pi->location_code[0]);
+ if (clclength > HVCS_CLC_LENGTH)
+ clclength = HVCS_CLC_LENGTH;
+
+ /* copy the null-term char too */
+ strncpy(&hvcsd->p_location_code[0],
+ &pi->location_code[0], clclength + 1);
+}
+
+/*
+ * Traverse the list and add the partner info that is found to the hvcs_struct
+ * struct entry. NOTE: At this time I know that partner info will return a
+ * single entry but in the future there may be multiple partner info entries per
+ * vty-server and you'll want to zero out that list and reset it. If for some
+ * reason you have an old version of this driver but there IS more than one
+ * partner info then hvcsd->p_* will hold the last partner info data from the
+ * firmware query. A good way to update this code would be to replace the three
+ * partner info fields in hvcs_struct with a list of hvcs_partner_info
+ * instances.
+ *
+ * This function must be called with the hvcsd->lock held.
+ */
+static int hvcs_get_pi(struct hvcs_struct *hvcsd)
+{
+ struct hvcs_partner_info *pi;
+ uint32_t unit_address = hvcsd->vdev->unit_address;
+ struct list_head head;
+ int retval;
+
+ spin_lock(&hvcs_pi_lock);
+ if (!hvcs_pi_buff) {
+ spin_unlock(&hvcs_pi_lock);
+ return -EFAULT;
+ }
+ retval = hvcs_get_partner_info(unit_address, &head, hvcs_pi_buff);
+ spin_unlock(&hvcs_pi_lock);
+ if (retval) {
+ printk(KERN_ERR "HVCS: Failed to fetch partner"
+ " info for vty-server@%x.\n", unit_address);
+ return retval;
+ }
+
+ /* nixes the values if the partner vty went away */
+ hvcsd->p_unit_address = 0;
+ hvcsd->p_partition_ID = 0;
+
+ list_for_each_entry(pi, &head, node)
+ hvcs_set_pi(pi, hvcsd);
+
+ hvcs_free_partner_info(&head);
+ return 0;
+}
+
+/*
+ * This function is executed by the driver "rescan" sysfs entry. It shouldn't
+ * be executed elsewhere, in order to prevent deadlock issues.
+ */
+static int hvcs_rescan_devices_list(void)
+{
+ struct hvcs_struct *hvcsd;
+ unsigned long flags;
+
+ spin_lock(&hvcs_structs_lock);
+
+ list_for_each_entry(hvcsd, &hvcs_structs, next) {
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ hvcs_get_pi(hvcsd);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ }
+
+ spin_unlock(&hvcs_structs_lock);
+
+ return 0;
+}
+
+/*
+ * Farm this off into its own function because it could be more complex once
+ * multiple partners support is added. This function should be called with
+ * the hvcsd->lock held.
+ */
+static int hvcs_has_pi(struct hvcs_struct *hvcsd)
+{
+ if ((!hvcsd->p_unit_address) || (!hvcsd->p_partition_ID))
+ return 0;
+ return 1;
+}
+
+/*
+ * NOTE: It is possible that the super admin removed a partner vty and then
+ * added a different vty as the new partner.
+ *
+ * This function must be called with the hvcsd->lock held.
+ */
+static int hvcs_partner_connect(struct hvcs_struct *hvcsd)
+{
+ int retval;
+ unsigned int unit_address = hvcsd->vdev->unit_address;
+
+ /*
+ * If there wasn't any pi when the device was added it doesn't meant
+ * there isn't any now. This driver isn't notified when a new partner
+ * vty is added to a vty-server so we discover changes on our own.
+ * Please see comments in hvcs_register_connection() for justification
+ * of this bizarre code.
+ */
+ retval = hvcs_register_connection(unit_address,
+ hvcsd->p_partition_ID,
+ hvcsd->p_unit_address);
+ if (!retval) {
+ hvcsd->connected = 1;
+ return 0;
+ } else if (retval != -EINVAL)
+ return retval;
+
+ /*
+ * As per the spec re-get the pi and try again if -EINVAL after the
+ * first connection attempt.
+ */
+ if (hvcs_get_pi(hvcsd))
+ return -ENOMEM;
+
+ if (!hvcs_has_pi(hvcsd))
+ return -ENODEV;
+
+ retval = hvcs_register_connection(unit_address,
+ hvcsd->p_partition_ID,
+ hvcsd->p_unit_address);
+ if (retval != -EINVAL) {
+ hvcsd->connected = 1;
+ return retval;
+ }
+
+ /*
+ * EBUSY is the most likely scenario though the vty could have been
+ * removed or there really could be an hcall error due to the parameter
+ * data but thanks to ambiguous firmware return codes we can't really
+ * tell.
+ */
+ printk(KERN_INFO "HVCS: vty-server or partner"
+ " vty is busy. Try again later.\n");
+ return -EBUSY;
+}
+
+/* This function must be called with the hvcsd->lock held */
+static void hvcs_partner_free(struct hvcs_struct *hvcsd)
+{
+ int retval;
+ do {
+ retval = hvcs_free_connection(hvcsd->vdev->unit_address);
+ } while (retval == -EBUSY);
+ hvcsd->connected = 0;
+}
+
+/* This helper function must be called WITHOUT the hvcsd->lock held */
+static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
+ unsigned int irq, struct vio_dev *vdev)
+{
+ unsigned long flags;
+ int rc;
+
+ /*
+ * It is possible that the vty-server was removed between the time that
+ * the conn was registered and now.
+ */
+ if (!(rc = request_irq(irq, &hvcs_handle_interrupt,
+ IRQF_DISABLED, "ibmhvcs", hvcsd))) {
+ /*
+ * It is possible the vty-server was removed after the irq was
+ * requested but before we have time to enable interrupts.
+ */
+ if (vio_enable_interrupts(vdev) == H_SUCCESS)
+ return 0;
+ else {
+ printk(KERN_ERR "HVCS: int enable failed for"
+ " vty-server@%X.\n", unit_address);
+ free_irq(irq, hvcsd);
+ }
+ } else
+ printk(KERN_ERR "HVCS: irq req failed for"
+ " vty-server@%X.\n", unit_address);
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ hvcs_partner_free(hvcsd);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+
+ return rc;
+
+}
+
+/*
+ * This always increments the kref ref count if the call is successful.
+ * Please remember to dec when you are done with the instance.
+ *
+ * NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when
+ * calling this function or you will get deadlock.
+ */
+static struct hvcs_struct *hvcs_get_by_index(int index)
+{
+ struct hvcs_struct *hvcsd = NULL;
+ unsigned long flags;
+
+ spin_lock(&hvcs_structs_lock);
+ /* We can immediately discard OOB requests */
+ if (index >= 0 && index < HVCS_MAX_SERVER_ADAPTERS) {
+ list_for_each_entry(hvcsd, &hvcs_structs, next) {
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ if (hvcsd->index == index) {
+ kref_get(&hvcsd->kref);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ spin_unlock(&hvcs_structs_lock);
+ return hvcsd;
+ }
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ }
+ hvcsd = NULL;
+ }
+
+ spin_unlock(&hvcs_structs_lock);
+ return hvcsd;
+}
+
+/*
+ * This is invoked via the tty_open interface when a user app connects to the
+ * /dev node.
+ */
+static int hvcs_open(struct tty_struct *tty, struct file *filp)
+{
+ struct hvcs_struct *hvcsd;
+ int rc, retval = 0;
+ unsigned long flags;
+ unsigned int irq;
+ struct vio_dev *vdev;
+ unsigned long unit_address;
+
+ if (tty->driver_data)
+ goto fast_open;
+
+ /*
+ * Is there a vty-server that shares the same index?
+ * This function increments the kref index.
+ */
+ if (!(hvcsd = hvcs_get_by_index(tty->index))) {
+ printk(KERN_WARNING "HVCS: open failed, no device associated"
+ " with tty->index %d.\n", tty->index);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+
+ if (hvcsd->connected == 0)
+ if ((retval = hvcs_partner_connect(hvcsd)))
+ goto error_release;
+
+ hvcsd->open_count = 1;
+ hvcsd->tty = tty;
+ tty->driver_data = hvcsd;
+
+ memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN);
+
+ /*
+ * Save these in the spinlock for the enable operations that need them
+ * outside of the spinlock.
+ */
+ irq = hvcsd->vdev->irq;
+ vdev = hvcsd->vdev;
+ unit_address = hvcsd->vdev->unit_address;
+
+ hvcsd->todo_mask |= HVCS_SCHED_READ;
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+
+ /*
+ * This must be done outside of the spinlock because it requests irqs
+ * and will grab the spinlock and free the connection if it fails.
+ */
+ if (((rc = hvcs_enable_device(hvcsd, unit_address, irq, vdev)))) {
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ printk(KERN_WARNING "HVCS: enable device failed.\n");
+ return rc;
+ }
+
+ goto open_success;
+
+fast_open:
+ hvcsd = tty->driver_data;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ kref_get(&hvcsd->kref);
+ hvcsd->open_count++;
+ hvcsd->todo_mask |= HVCS_SCHED_READ;
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+
+open_success:
+ hvcs_kick();
+
+ printk(KERN_INFO "HVCS: vty-server@%X connection opened.\n",
+ hvcsd->vdev->unit_address );
+
+ return 0;
+
+error_release:
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
+
+ printk(KERN_WARNING "HVCS: partner connect failed.\n");
+ return retval;
+}
+
+static void hvcs_close(struct tty_struct *tty, struct file *filp)
+{
+ struct hvcs_struct *hvcsd;
+ unsigned long flags;
+ int irq = NO_IRQ;
+
+ /*
+ * Is someone trying to close the file associated with this device after
+ * we have hung up? If so tty->driver_data wouldn't be valid.
+ */
+ if (tty_hung_up_p(filp))
+ return;
+
+ /*
+ * No driver_data means that this close was probably issued after a
+ * failed hvcs_open by the tty layer's release_dev() api and we can just
+ * exit cleanly.
+ */
+ if (!tty->driver_data)
+ return;
+
+ hvcsd = tty->driver_data;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ if (--hvcsd->open_count == 0) {
+
+ vio_disable_interrupts(hvcsd->vdev);
+
+ /*
+ * NULL this early so that the kernel_thread doesn't try to
+ * execute any operations on the TTY even though it is obligated
+ * to deliver any pending I/O to the hypervisor.
+ */
+ hvcsd->tty = NULL;
+
+ irq = hvcsd->vdev->irq;
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+
+ tty_wait_until_sent(tty, HVCS_CLOSE_WAIT);
+
+ /*
+ * This line is important because it tells hvcs_open that this
+ * device needs to be re-configured the next time hvcs_open is
+ * called.
+ */
+ tty->driver_data = NULL;
+
+ free_irq(irq, hvcsd);
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ return;
+ } else if (hvcsd->open_count < 0) {
+ printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
+ " is missmanaged.\n",
+ hvcsd->vdev->unit_address, hvcsd->open_count);
+ }
+
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
+}
+
+static void hvcs_hangup(struct tty_struct * tty)
+{
+ struct hvcs_struct *hvcsd = tty->driver_data;
+ unsigned long flags;
+ int temp_open_count;
+ int irq = NO_IRQ;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ /* Preserve this so that we know how many kref refs to put */
+ temp_open_count = hvcsd->open_count;
+
+ /*
+ * Don't kref put inside the spinlock because the destruction
+ * callback may use the spinlock and it may get called before the
+ * spinlock has been released.
+ */
+ vio_disable_interrupts(hvcsd->vdev);
+
+ hvcsd->todo_mask = 0;
+
+ /* I don't think the tty needs the hvcs_struct pointer after a hangup */
+ hvcsd->tty->driver_data = NULL;
+ hvcsd->tty = NULL;
+
+ hvcsd->open_count = 0;
+
+ /* This will drop any buffered data on the floor which is OK in a hangup
+ * scenario. */
+ memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN);
+ hvcsd->chars_in_buffer = 0;
+
+ irq = hvcsd->vdev->irq;
+
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+
+ free_irq(irq, hvcsd);
+
+ /*
+ * We need to kref_put() for every open_count we have since the
+ * tty_hangup() function doesn't invoke a close per open connection on a
+ * non-console device.
+ */
+ while(temp_open_count) {
+ --temp_open_count;
+ /*
+ * The final put will trigger destruction of the hvcs_struct.
+ * NOTE: If this hangup was signaled from user space then the
+ * final put will never happen.
+ */
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ }
+}
+
+/*
+ * NOTE: This is almost always from_user since user level apps interact with the
+ * /dev nodes. I'm trusting that if hvcs_write gets called and interrupted by
+ * hvcs_remove (which removes the target device and executes tty_hangup()) that
+ * tty_hangup will allow hvcs_write time to complete execution before it
+ * terminates our device.
+ */
+static int hvcs_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ struct hvcs_struct *hvcsd = tty->driver_data;
+ unsigned int unit_address;
+ const unsigned char *charbuf;
+ unsigned long flags;
+ int total_sent = 0;
+ int tosend = 0;
+ int result = 0;
+
+ /*
+ * If they don't check the return code off of their open they may
+ * attempt this even if there is no connected device.
+ */
+ if (!hvcsd)
+ return -ENODEV;
+
+ /* Reasonable size to prevent user level flooding */
+ if (count > HVCS_MAX_FROM_USER) {
+ printk(KERN_WARNING "HVCS write: count being truncated to"
+ " HVCS_MAX_FROM_USER.\n");
+ count = HVCS_MAX_FROM_USER;
+ }
+
+ charbuf = buf;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+
+ /*
+ * Somehow an open succeeded but the device was removed or the
+ * connection terminated between the vty-server and partner vty during
+ * the middle of a write operation? This is a crummy place to do this
+ * but we want to keep it all in the spinlock.
+ */
+ if (hvcsd->open_count <= 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return -ENODEV;
+ }
+
+ unit_address = hvcsd->vdev->unit_address;
+
+ while (count > 0) {
+ tosend = min(count, (HVCS_BUFF_LEN - hvcsd->chars_in_buffer));
+ /*
+ * No more space, this probably means that the last call to
+ * hvcs_write() didn't succeed and the buffer was filled up.
+ */
+ if (!tosend)
+ break;
+
+ memcpy(&hvcsd->buffer[hvcsd->chars_in_buffer],
+ &charbuf[total_sent],
+ tosend);
+
+ hvcsd->chars_in_buffer += tosend;
+
+ result = 0;
+
+ /*
+ * If this is true then we don't want to try writing to the
+ * hypervisor because that is the kernel_threads job now. We'll
+ * just add to the buffer.
+ */
+ if (!(hvcsd->todo_mask & HVCS_TRY_WRITE))
+ /* won't send partial writes */
+ result = hvc_put_chars(unit_address,
+ &hvcsd->buffer[0],
+ hvcsd->chars_in_buffer);
+
+ /*
+ * Since we know we have enough room in hvcsd->buffer for
+ * tosend we record that it was sent regardless of whether the
+ * hypervisor actually took it because we have it buffered.
+ */
+ total_sent+=tosend;
+ count-=tosend;
+ if (result == 0) {
+ hvcsd->todo_mask |= HVCS_TRY_WRITE;
+ hvcs_kick();
+ break;
+ }
+
+ hvcsd->chars_in_buffer = 0;
+ /*
+ * Test after the chars_in_buffer reset otherwise this could
+ * deadlock our writes if hvc_put_chars fails.
+ */
+ if (result < 0)
+ break;
+ }
+
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+
+ if (result == -1)
+ return -EIO;
+ else
+ return total_sent;
+}
+
+/*
+ * This is really asking how much can we guarantee that we can send or that we
+ * absolutely WILL BUFFER if we can't send it. This driver MUST honor the
+ * return value, hence the reason for hvcs_struct buffering.
+ */
+static int hvcs_write_room(struct tty_struct *tty)
+{
+ struct hvcs_struct *hvcsd = tty->driver_data;
+
+ if (!hvcsd || hvcsd->open_count <= 0)
+ return 0;
+
+ return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
+}
+
+static int hvcs_chars_in_buffer(struct tty_struct *tty)
+{
+ struct hvcs_struct *hvcsd = tty->driver_data;
+
+ return hvcsd->chars_in_buffer;
+}
+
+static const struct tty_operations hvcs_ops = {
+ .open = hvcs_open,
+ .close = hvcs_close,
+ .hangup = hvcs_hangup,
+ .write = hvcs_write,
+ .write_room = hvcs_write_room,
+ .chars_in_buffer = hvcs_chars_in_buffer,
+ .unthrottle = hvcs_unthrottle,
+ .throttle = hvcs_throttle,
+};
+
+static int hvcs_alloc_index_list(int n)
+{
+ int i;
+
+ hvcs_index_list = kmalloc(n * sizeof(hvcs_index_count),GFP_KERNEL);
+ if (!hvcs_index_list)
+ return -ENOMEM;
+ hvcs_index_count = n;
+ for (i = 0; i < hvcs_index_count; i++)
+ hvcs_index_list[i] = -1;
+ return 0;
+}
+
+static void hvcs_free_index_list(void)
+{
+ /* Paranoia check to be thorough. */
+ kfree(hvcs_index_list);
+ hvcs_index_list = NULL;
+ hvcs_index_count = 0;
+}
+
+static int __devinit hvcs_initialize(void)
+{
+ int rc, num_ttys_to_alloc;
+
+ mutex_lock(&hvcs_init_mutex);
+ if (hvcs_task) {
+ mutex_unlock(&hvcs_init_mutex);
+ return 0;
+ }
+
+ /* Has the user specified an overload with an insmod param? */
+ if (hvcs_parm_num_devs <= 0 ||
+ (hvcs_parm_num_devs > HVCS_MAX_SERVER_ADAPTERS)) {
+ num_ttys_to_alloc = HVCS_DEFAULT_SERVER_ADAPTERS;
+ } else
+ num_ttys_to_alloc = hvcs_parm_num_devs;
+
+ hvcs_tty_driver = alloc_tty_driver(num_ttys_to_alloc);
+ if (!hvcs_tty_driver)
+ return -ENOMEM;
+
+ if (hvcs_alloc_index_list(num_ttys_to_alloc)) {
+ rc = -ENOMEM;
+ goto index_fail;
+ }
+
+ hvcs_tty_driver->owner = THIS_MODULE;
+
+ hvcs_tty_driver->driver_name = hvcs_driver_name;
+ hvcs_tty_driver->name = hvcs_device_node;
+
+ /*
+ * We'll let the system assign us a major number, indicated by leaving
+ * it blank.
+ */
+
+ hvcs_tty_driver->minor_start = HVCS_MINOR_START;
+ hvcs_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
+
+ /*
+ * We role our own so that we DONT ECHO. We can't echo because the
+ * device we are connecting to already echoes by default and this would
+ * throw us into a horrible recursive echo-echo-echo loop.
+ */
+ hvcs_tty_driver->init_termios = hvcs_tty_termios;
+ hvcs_tty_driver->flags = TTY_DRIVER_REAL_RAW;
+
+ tty_set_operations(hvcs_tty_driver, &hvcs_ops);
+
+ /*
+ * The following call will result in sysfs entries that denote the
+ * dynamically assigned major and minor numbers for our devices.
+ */
+ if (tty_register_driver(hvcs_tty_driver)) {
+ printk(KERN_ERR "HVCS: registration as a tty driver failed.\n");
+ rc = -EIO;
+ goto register_fail;
+ }
+
+ hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!hvcs_pi_buff) {
+ rc = -ENOMEM;
+ goto buff_alloc_fail;
+ }
+
+ hvcs_task = kthread_run(khvcsd, NULL, "khvcsd");
+ if (IS_ERR(hvcs_task)) {
+ printk(KERN_ERR "HVCS: khvcsd creation failed.\n");
+ rc = -EIO;
+ goto kthread_fail;
+ }
+ mutex_unlock(&hvcs_init_mutex);
+ return 0;
+
+kthread_fail:
+ kfree(hvcs_pi_buff);
+buff_alloc_fail:
+ tty_unregister_driver(hvcs_tty_driver);
+register_fail:
+ hvcs_free_index_list();
+index_fail:
+ put_tty_driver(hvcs_tty_driver);
+ hvcs_tty_driver = NULL;
+ mutex_unlock(&hvcs_init_mutex);
+ return rc;
+}
+
+static int __init hvcs_module_init(void)
+{
+ int rc = vio_register_driver(&hvcs_vio_driver);
+ if (rc) {
+ printk(KERN_ERR "HVCS: can't register vio driver\n");
+ return rc;
+ }
+
+ pr_info("HVCS: Driver registered.\n");
+
+ /* This needs to be done AFTER the vio_register_driver() call or else
+ * the kobjects won't be initialized properly.
+ */
+ rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan);
+ if (rc)
+ pr_warning(KERN_ERR "HVCS: Failed to create rescan file (err %d)\n", rc);
+
+ return 0;
+}
+
+static void __exit hvcs_module_exit(void)
+{
+ /*
+ * This driver receives hvcs_remove callbacks for each device upon
+ * module removal.
+ */
+ vio_unregister_driver(&hvcs_vio_driver);
+ if (!hvcs_task)
+ return;
+
+ /*
+ * This synchronous operation will wake the khvcsd kthread if it is
+ * asleep and will return when khvcsd has terminated.
+ */
+ kthread_stop(hvcs_task);
+
+ spin_lock(&hvcs_pi_lock);
+ kfree(hvcs_pi_buff);
+ hvcs_pi_buff = NULL;
+ spin_unlock(&hvcs_pi_lock);
+
+ driver_remove_file(&hvcs_vio_driver.driver, &driver_attr_rescan);
+
+ tty_unregister_driver(hvcs_tty_driver);
+
+ hvcs_free_index_list();
+
+ put_tty_driver(hvcs_tty_driver);
+
+ printk(KERN_INFO "HVCS: driver module removed.\n");
+}
+
+module_init(hvcs_module_init);
+module_exit(hvcs_module_exit);
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
new file mode 100644
index 00000000..8a8d6373
--- /dev/null
+++ b/drivers/tty/hvc/hvsi.c
@@ -0,0 +1,1314 @@
+/*
+ * Copyright (C) 2004 Hollis Blanchard <hollisb@us.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* Host Virtual Serial Interface (HVSI) is a protocol between the hosted OS
+ * and the service processor on IBM pSeries servers. On these servers, there
+ * are no serial ports under the OS's control, and sometimes there is no other
+ * console available either. However, the service processor has two standard
+ * serial ports, so this over-complicated protocol allows the OS to control
+ * those ports by proxy.
+ *
+ * Besides data, the procotol supports the reading/writing of the serial
+ * port's DTR line, and the reading of the CD line. This is to allow the OS to
+ * control a modem attached to the service processor's serial port. Note that
+ * the OS cannot change the speed of the port through this protocol.
+ */
+
+#undef DEBUG
+
+#include <linux/console.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/major.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <asm/hvcall.h>
+#include <asm/hvconsole.h>
+#include <asm/prom.h>
+#include <asm/uaccess.h>
+#include <asm/vio.h>
+#include <asm/param.h>
+
+#define HVSI_MAJOR 229
+#define HVSI_MINOR 128
+#define MAX_NR_HVSI_CONSOLES 4
+
+#define HVSI_TIMEOUT (5*HZ)
+#define HVSI_VERSION 1
+#define HVSI_MAX_PACKET 256
+#define HVSI_MAX_READ 16
+#define HVSI_MAX_OUTGOING_DATA 12
+#define N_OUTBUF 12
+
+/*
+ * we pass data via two 8-byte registers, so we would like our char arrays
+ * properly aligned for those loads.
+ */
+#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
+
+struct hvsi_struct {
+ struct delayed_work writer;
+ struct work_struct handshaker;
+ wait_queue_head_t emptyq; /* woken when outbuf is emptied */
+ wait_queue_head_t stateq; /* woken when HVSI state changes */
+ spinlock_t lock;
+ int index;
+ struct tty_struct *tty;
+ int count;
+ uint8_t throttle_buf[128];
+ uint8_t outbuf[N_OUTBUF]; /* to implement write_room and chars_in_buffer */
+ /* inbuf is for packet reassembly. leave a little room for leftovers. */
+ uint8_t inbuf[HVSI_MAX_PACKET + HVSI_MAX_READ];
+ uint8_t *inbuf_end;
+ int n_throttle;
+ int n_outbuf;
+ uint32_t vtermno;
+ uint32_t virq;
+ atomic_t seqno; /* HVSI packet sequence number */
+ uint16_t mctrl;
+ uint8_t state; /* HVSI protocol state */
+ uint8_t flags;
+#ifdef CONFIG_MAGIC_SYSRQ
+ uint8_t sysrq;
+#endif /* CONFIG_MAGIC_SYSRQ */
+};
+static struct hvsi_struct hvsi_ports[MAX_NR_HVSI_CONSOLES];
+
+static struct tty_driver *hvsi_driver;
+static int hvsi_count;
+static int (*hvsi_wait)(struct hvsi_struct *hp, int state);
+
+enum HVSI_PROTOCOL_STATE {
+ HVSI_CLOSED,
+ HVSI_WAIT_FOR_VER_RESPONSE,
+ HVSI_WAIT_FOR_VER_QUERY,
+ HVSI_OPEN,
+ HVSI_WAIT_FOR_MCTRL_RESPONSE,
+ HVSI_FSP_DIED,
+};
+#define HVSI_CONSOLE 0x1
+
+#define VS_DATA_PACKET_HEADER 0xff
+#define VS_CONTROL_PACKET_HEADER 0xfe
+#define VS_QUERY_PACKET_HEADER 0xfd
+#define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc
+
+/* control verbs */
+#define VSV_SET_MODEM_CTL 1 /* to service processor only */
+#define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */
+#define VSV_CLOSE_PROTOCOL 3
+
+/* query verbs */
+#define VSV_SEND_VERSION_NUMBER 1
+#define VSV_SEND_MODEM_CTL_STATUS 2
+
+/* yes, these masks are not consecutive. */
+#define HVSI_TSDTR 0x01
+#define HVSI_TSCD 0x20
+
+struct hvsi_header {
+ uint8_t type;
+ uint8_t len;
+ uint16_t seqno;
+} __attribute__((packed));
+
+struct hvsi_data {
+ uint8_t type;
+ uint8_t len;
+ uint16_t seqno;
+ uint8_t data[HVSI_MAX_OUTGOING_DATA];
+} __attribute__((packed));
+
+struct hvsi_control {
+ uint8_t type;
+ uint8_t len;
+ uint16_t seqno;
+ uint16_t verb;
+ /* optional depending on verb: */
+ uint32_t word;
+ uint32_t mask;
+} __attribute__((packed));
+
+struct hvsi_query {
+ uint8_t type;
+ uint8_t len;
+ uint16_t seqno;
+ uint16_t verb;
+} __attribute__((packed));
+
+struct hvsi_query_response {
+ uint8_t type;
+ uint8_t len;
+ uint16_t seqno;
+ uint16_t verb;
+ uint16_t query_seqno;
+ union {
+ uint8_t version;
+ uint32_t mctrl_word;
+ } u;
+} __attribute__((packed));
+
+
+
+static inline int is_console(struct hvsi_struct *hp)
+{
+ return hp->flags & HVSI_CONSOLE;
+}
+
+static inline int is_open(struct hvsi_struct *hp)
+{
+ /* if we're waiting for an mctrl then we're already open */
+ return (hp->state == HVSI_OPEN)
+ || (hp->state == HVSI_WAIT_FOR_MCTRL_RESPONSE);
+}
+
+static inline void print_state(struct hvsi_struct *hp)
+{
+#ifdef DEBUG
+ static const char *state_names[] = {
+ "HVSI_CLOSED",
+ "HVSI_WAIT_FOR_VER_RESPONSE",
+ "HVSI_WAIT_FOR_VER_QUERY",
+ "HVSI_OPEN",
+ "HVSI_WAIT_FOR_MCTRL_RESPONSE",
+ "HVSI_FSP_DIED",
+ };
+ const char *name = (hp->state < ARRAY_SIZE(state_names))
+ ? state_names[hp->state] : "UNKNOWN";
+
+ pr_debug("hvsi%i: state = %s\n", hp->index, name);
+#endif /* DEBUG */
+}
+
+static inline void __set_state(struct hvsi_struct *hp, int state)
+{
+ hp->state = state;
+ print_state(hp);
+ wake_up_all(&hp->stateq);
+}
+
+static inline void set_state(struct hvsi_struct *hp, int state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hp->lock, flags);
+ __set_state(hp, state);
+ spin_unlock_irqrestore(&hp->lock, flags);
+}
+
+static inline int len_packet(const uint8_t *packet)
+{
+ return (int)((struct hvsi_header *)packet)->len;
+}
+
+static inline int is_header(const uint8_t *packet)
+{
+ struct hvsi_header *header = (struct hvsi_header *)packet;
+ return header->type >= VS_QUERY_RESPONSE_PACKET_HEADER;
+}
+
+static inline int got_packet(const struct hvsi_struct *hp, uint8_t *packet)
+{
+ if (hp->inbuf_end < packet + sizeof(struct hvsi_header))
+ return 0; /* don't even have the packet header */
+
+ if (hp->inbuf_end < (packet + len_packet(packet)))
+ return 0; /* don't have the rest of the packet */
+
+ return 1;
+}
+
+/* shift remaining bytes in packetbuf down */
+static void compact_inbuf(struct hvsi_struct *hp, uint8_t *read_to)
+{
+ int remaining = (int)(hp->inbuf_end - read_to);
+
+ pr_debug("%s: %i chars remain\n", __func__, remaining);
+
+ if (read_to != hp->inbuf)
+ memmove(hp->inbuf, read_to, remaining);
+
+ hp->inbuf_end = hp->inbuf + remaining;
+}
+
+#ifdef DEBUG
+#define dbg_dump_packet(packet) dump_packet(packet)
+#define dbg_dump_hex(data, len) dump_hex(data, len)
+#else
+#define dbg_dump_packet(packet) do { } while (0)
+#define dbg_dump_hex(data, len) do { } while (0)
+#endif
+
+static void dump_hex(const uint8_t *data, int len)
+{
+ int i;
+
+ printk(" ");
+ for (i=0; i < len; i++)
+ printk("%.2x", data[i]);
+
+ printk("\n ");
+ for (i=0; i < len; i++) {
+ if (isprint(data[i]))
+ printk("%c", data[i]);
+ else
+ printk(".");
+ }
+ printk("\n");
+}
+
+static void dump_packet(uint8_t *packet)
+{
+ struct hvsi_header *header = (struct hvsi_header *)packet;
+
+ printk("type 0x%x, len %i, seqno %i:\n", header->type, header->len,
+ header->seqno);
+
+ dump_hex(packet, header->len);
+}
+
+static int hvsi_read(struct hvsi_struct *hp, char *buf, int count)
+{
+ unsigned long got;
+
+ got = hvc_get_chars(hp->vtermno, buf, count);
+
+ return got;
+}
+
+static void hvsi_recv_control(struct hvsi_struct *hp, uint8_t *packet,
+ struct tty_struct **to_hangup, struct hvsi_struct **to_handshake)
+{
+ struct hvsi_control *header = (struct hvsi_control *)packet;
+
+ switch (header->verb) {
+ case VSV_MODEM_CTL_UPDATE:
+ if ((header->word & HVSI_TSCD) == 0) {
+ /* CD went away; no more connection */
+ pr_debug("hvsi%i: CD dropped\n", hp->index);
+ hp->mctrl &= TIOCM_CD;
+ /* If userland hasn't done an open(2) yet, hp->tty is NULL. */
+ if (hp->tty && !(hp->tty->flags & CLOCAL))
+ *to_hangup = hp->tty;
+ }
+ break;
+ case VSV_CLOSE_PROTOCOL:
+ pr_debug("hvsi%i: service processor came back\n", hp->index);
+ if (hp->state != HVSI_CLOSED) {
+ *to_handshake = hp;
+ }
+ break;
+ default:
+ printk(KERN_WARNING "hvsi%i: unknown HVSI control packet: ",
+ hp->index);
+ dump_packet(packet);
+ break;
+ }
+}
+
+static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet)
+{
+ struct hvsi_query_response *resp = (struct hvsi_query_response *)packet;
+
+ switch (hp->state) {
+ case HVSI_WAIT_FOR_VER_RESPONSE:
+ __set_state(hp, HVSI_WAIT_FOR_VER_QUERY);
+ break;
+ case HVSI_WAIT_FOR_MCTRL_RESPONSE:
+ hp->mctrl = 0;
+ if (resp->u.mctrl_word & HVSI_TSDTR)
+ hp->mctrl |= TIOCM_DTR;
+ if (resp->u.mctrl_word & HVSI_TSCD)
+ hp->mctrl |= TIOCM_CD;
+ __set_state(hp, HVSI_OPEN);
+ break;
+ default:
+ printk(KERN_ERR "hvsi%i: unexpected query response: ", hp->index);
+ dump_packet(packet);
+ break;
+ }
+}
+
+/* respond to service processor's version query */
+static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
+{
+ struct hvsi_query_response packet __ALIGNED__;
+ int wrote;
+
+ packet.type = VS_QUERY_RESPONSE_PACKET_HEADER;
+ packet.len = sizeof(struct hvsi_query_response);
+ packet.seqno = atomic_inc_return(&hp->seqno);
+ packet.verb = VSV_SEND_VERSION_NUMBER;
+ packet.u.version = HVSI_VERSION;
+ packet.query_seqno = query_seqno+1;
+
+ pr_debug("%s: sending %i bytes\n", __func__, packet.len);
+ dbg_dump_hex((uint8_t*)&packet, packet.len);
+
+ wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
+ if (wrote != packet.len) {
+ printk(KERN_ERR "hvsi%i: couldn't send query response!\n",
+ hp->index);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void hvsi_recv_query(struct hvsi_struct *hp, uint8_t *packet)
+{
+ struct hvsi_query *query = (struct hvsi_query *)packet;
+
+ switch (hp->state) {
+ case HVSI_WAIT_FOR_VER_QUERY:
+ hvsi_version_respond(hp, query->seqno);
+ __set_state(hp, HVSI_OPEN);
+ break;
+ default:
+ printk(KERN_ERR "hvsi%i: unexpected query: ", hp->index);
+ dump_packet(packet);
+ break;
+ }
+}
+
+static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
+{
+ int i;
+
+ for (i=0; i < len; i++) {
+ char c = buf[i];
+#ifdef CONFIG_MAGIC_SYSRQ
+ if (c == '\0') {
+ hp->sysrq = 1;
+ continue;
+ } else if (hp->sysrq) {
+ handle_sysrq(c);
+ hp->sysrq = 0;
+ continue;
+ }
+#endif /* CONFIG_MAGIC_SYSRQ */
+ tty_insert_flip_char(hp->tty, c, 0);
+ }
+}
+
+/*
+ * We could get 252 bytes of data at once here. But the tty layer only
+ * throttles us at TTY_THRESHOLD_THROTTLE (128) bytes, so we could overflow
+ * it. Accordingly we won't send more than 128 bytes at a time to the flip
+ * buffer, which will give the tty buffer a chance to throttle us. Should the
+ * value of TTY_THRESHOLD_THROTTLE change in n_tty.c, this code should be
+ * revisited.
+ */
+#define TTY_THRESHOLD_THROTTLE 128
+static struct tty_struct *hvsi_recv_data(struct hvsi_struct *hp,
+ const uint8_t *packet)
+{
+ const struct hvsi_header *header = (const struct hvsi_header *)packet;
+ const uint8_t *data = packet + sizeof(struct hvsi_header);
+ int datalen = header->len - sizeof(struct hvsi_header);
+ int overflow = datalen - TTY_THRESHOLD_THROTTLE;
+
+ pr_debug("queueing %i chars '%.*s'\n", datalen, datalen, data);
+
+ if (datalen == 0)
+ return NULL;
+
+ if (overflow > 0) {
+ pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __func__);
+ datalen = TTY_THRESHOLD_THROTTLE;
+ }
+
+ hvsi_insert_chars(hp, data, datalen);
+
+ if (overflow > 0) {
+ /*
+ * we still have more data to deliver, so we need to save off the
+ * overflow and send it later
+ */
+ pr_debug("%s: deferring overflow\n", __func__);
+ memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow);
+ hp->n_throttle = overflow;
+ }
+
+ return hp->tty;
+}
+
+/*
+ * Returns true/false indicating data successfully read from hypervisor.
+ * Used both to get packets for tty connections and to advance the state
+ * machine during console handshaking (in which case tty = NULL and we ignore
+ * incoming data).
+ */
+static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip,
+ struct tty_struct **hangup, struct hvsi_struct **handshake)
+{
+ uint8_t *packet = hp->inbuf;
+ int chunklen;
+
+ *flip = NULL;
+ *hangup = NULL;
+ *handshake = NULL;
+
+ chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ);
+ if (chunklen == 0) {
+ pr_debug("%s: 0-length read\n", __func__);
+ return 0;
+ }
+
+ pr_debug("%s: got %i bytes\n", __func__, chunklen);
+ dbg_dump_hex(hp->inbuf_end, chunklen);
+
+ hp->inbuf_end += chunklen;
+
+ /* handle all completed packets */
+ while ((packet < hp->inbuf_end) && got_packet(hp, packet)) {
+ struct hvsi_header *header = (struct hvsi_header *)packet;
+
+ if (!is_header(packet)) {
+ printk(KERN_ERR "hvsi%i: got malformed packet\n", hp->index);
+ /* skip bytes until we find a header or run out of data */
+ while ((packet < hp->inbuf_end) && (!is_header(packet)))
+ packet++;
+ continue;
+ }
+
+ pr_debug("%s: handling %i-byte packet\n", __func__,
+ len_packet(packet));
+ dbg_dump_packet(packet);
+
+ switch (header->type) {
+ case VS_DATA_PACKET_HEADER:
+ if (!is_open(hp))
+ break;
+ if (hp->tty == NULL)
+ break; /* no tty buffer to put data in */
+ *flip = hvsi_recv_data(hp, packet);
+ break;
+ case VS_CONTROL_PACKET_HEADER:
+ hvsi_recv_control(hp, packet, hangup, handshake);
+ break;
+ case VS_QUERY_RESPONSE_PACKET_HEADER:
+ hvsi_recv_response(hp, packet);
+ break;
+ case VS_QUERY_PACKET_HEADER:
+ hvsi_recv_query(hp, packet);
+ break;
+ default:
+ printk(KERN_ERR "hvsi%i: unknown HVSI packet type 0x%x\n",
+ hp->index, header->type);
+ dump_packet(packet);
+ break;
+ }
+
+ packet += len_packet(packet);
+
+ if (*hangup || *handshake) {
+ pr_debug("%s: hangup or handshake\n", __func__);
+ /*
+ * we need to send the hangup now before receiving any more data.
+ * If we get "data, hangup, data", we can't deliver the second
+ * data before the hangup.
+ */
+ break;
+ }
+ }
+
+ compact_inbuf(hp, packet);
+
+ return 1;
+}
+
+static void hvsi_send_overflow(struct hvsi_struct *hp)
+{
+ pr_debug("%s: delivering %i bytes overflow\n", __func__,
+ hp->n_throttle);
+
+ hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle);
+ hp->n_throttle = 0;
+}
+
+/*
+ * must get all pending data because we only get an irq on empty->non-empty
+ * transition
+ */
+static irqreturn_t hvsi_interrupt(int irq, void *arg)
+{
+ struct hvsi_struct *hp = (struct hvsi_struct *)arg;
+ struct tty_struct *flip;
+ struct tty_struct *hangup;
+ struct hvsi_struct *handshake;
+ unsigned long flags;
+ int again = 1;
+
+ pr_debug("%s\n", __func__);
+
+ while (again) {
+ spin_lock_irqsave(&hp->lock, flags);
+ again = hvsi_load_chunk(hp, &flip, &hangup, &handshake);
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ /*
+ * we have to call tty_flip_buffer_push() and tty_hangup() outside our
+ * spinlock. But we also have to keep going until we've read all the
+ * available data.
+ */
+
+ if (flip) {
+ /* there was data put in the tty flip buffer */
+ tty_flip_buffer_push(flip);
+ flip = NULL;
+ }
+
+ if (hangup) {
+ tty_hangup(hangup);
+ }
+
+ if (handshake) {
+ pr_debug("hvsi%i: attempting re-handshake\n", handshake->index);
+ schedule_work(&handshake->handshaker);
+ }
+ }
+
+ spin_lock_irqsave(&hp->lock, flags);
+ if (hp->tty && hp->n_throttle
+ && (!test_bit(TTY_THROTTLED, &hp->tty->flags))) {
+ /* we weren't hung up and we weren't throttled, so we can deliver the
+ * rest now */
+ flip = hp->tty;
+ hvsi_send_overflow(hp);
+ }
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ if (flip) {
+ tty_flip_buffer_push(flip);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* for boot console, before the irq handler is running */
+static int __init poll_for_state(struct hvsi_struct *hp, int state)
+{
+ unsigned long end_jiffies = jiffies + HVSI_TIMEOUT;
+
+ for (;;) {
+ hvsi_interrupt(hp->virq, (void *)hp); /* get pending data */
+
+ if (hp->state == state)
+ return 0;
+
+ mdelay(5);
+ if (time_after(jiffies, end_jiffies))
+ return -EIO;
+ }
+}
+
+/* wait for irq handler to change our state */
+static int wait_for_state(struct hvsi_struct *hp, int state)
+{
+ int ret = 0;
+
+ if (!wait_event_timeout(hp->stateq, (hp->state == state), HVSI_TIMEOUT))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
+{
+ struct hvsi_query packet __ALIGNED__;
+ int wrote;
+
+ packet.type = VS_QUERY_PACKET_HEADER;
+ packet.len = sizeof(struct hvsi_query);
+ packet.seqno = atomic_inc_return(&hp->seqno);
+ packet.verb = verb;
+
+ pr_debug("%s: sending %i bytes\n", __func__, packet.len);
+ dbg_dump_hex((uint8_t*)&packet, packet.len);
+
+ wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
+ if (wrote != packet.len) {
+ printk(KERN_ERR "hvsi%i: couldn't send query (%i)!\n", hp->index,
+ wrote);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hvsi_get_mctrl(struct hvsi_struct *hp)
+{
+ int ret;
+
+ set_state(hp, HVSI_WAIT_FOR_MCTRL_RESPONSE);
+ hvsi_query(hp, VSV_SEND_MODEM_CTL_STATUS);
+
+ ret = hvsi_wait(hp, HVSI_OPEN);
+ if (ret < 0) {
+ printk(KERN_ERR "hvsi%i: didn't get modem flags\n", hp->index);
+ set_state(hp, HVSI_OPEN);
+ return ret;
+ }
+
+ pr_debug("%s: mctrl 0x%x\n", __func__, hp->mctrl);
+
+ return 0;
+}
+
+/* note that we can only set DTR */
+static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
+{
+ struct hvsi_control packet __ALIGNED__;
+ int wrote;
+
+ packet.type = VS_CONTROL_PACKET_HEADER,
+ packet.seqno = atomic_inc_return(&hp->seqno);
+ packet.len = sizeof(struct hvsi_control);
+ packet.verb = VSV_SET_MODEM_CTL;
+ packet.mask = HVSI_TSDTR;
+
+ if (mctrl & TIOCM_DTR)
+ packet.word = HVSI_TSDTR;
+
+ pr_debug("%s: sending %i bytes\n", __func__, packet.len);
+ dbg_dump_hex((uint8_t*)&packet, packet.len);
+
+ wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
+ if (wrote != packet.len) {
+ printk(KERN_ERR "hvsi%i: couldn't set DTR!\n", hp->index);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void hvsi_drain_input(struct hvsi_struct *hp)
+{
+ uint8_t buf[HVSI_MAX_READ] __ALIGNED__;
+ unsigned long end_jiffies = jiffies + HVSI_TIMEOUT;
+
+ while (time_before(end_jiffies, jiffies))
+ if (0 == hvsi_read(hp, buf, HVSI_MAX_READ))
+ break;
+}
+
+static int hvsi_handshake(struct hvsi_struct *hp)
+{
+ int ret;
+
+ /*
+ * We could have a CLOSE or other data waiting for us before we even try
+ * to open; try to throw it all away so we don't get confused. (CLOSE
+ * is the first message sent up the pipe when the FSP comes online. We
+ * need to distinguish between "it came up a while ago and we're the first
+ * user" and "it was just reset before it saw our handshake packet".)
+ */
+ hvsi_drain_input(hp);
+
+ set_state(hp, HVSI_WAIT_FOR_VER_RESPONSE);
+ ret = hvsi_query(hp, VSV_SEND_VERSION_NUMBER);
+ if (ret < 0) {
+ printk(KERN_ERR "hvsi%i: couldn't send version query\n", hp->index);
+ return ret;
+ }
+
+ ret = hvsi_wait(hp, HVSI_OPEN);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void hvsi_handshaker(struct work_struct *work)
+{
+ struct hvsi_struct *hp =
+ container_of(work, struct hvsi_struct, handshaker);
+
+ if (hvsi_handshake(hp) >= 0)
+ return;
+
+ printk(KERN_ERR "hvsi%i: re-handshaking failed\n", hp->index);
+ if (is_console(hp)) {
+ /*
+ * ttys will re-attempt the handshake via hvsi_open, but
+ * the console will not.
+ */
+ printk(KERN_ERR "hvsi%i: lost console!\n", hp->index);
+ }
+}
+
+static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
+{
+ struct hvsi_data packet __ALIGNED__;
+ int ret;
+
+ BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
+
+ packet.type = VS_DATA_PACKET_HEADER;
+ packet.seqno = atomic_inc_return(&hp->seqno);
+ packet.len = count + sizeof(struct hvsi_header);
+ memcpy(&packet.data, buf, count);
+
+ ret = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
+ if (ret == packet.len) {
+ /* return the number of chars written, not the packet length */
+ return count;
+ }
+ return ret; /* return any errors */
+}
+
+static void hvsi_close_protocol(struct hvsi_struct *hp)
+{
+ struct hvsi_control packet __ALIGNED__;
+
+ packet.type = VS_CONTROL_PACKET_HEADER;
+ packet.seqno = atomic_inc_return(&hp->seqno);
+ packet.len = 6;
+ packet.verb = VSV_CLOSE_PROTOCOL;
+
+ pr_debug("%s: sending %i bytes\n", __func__, packet.len);
+ dbg_dump_hex((uint8_t*)&packet, packet.len);
+
+ hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
+}
+
+static int hvsi_open(struct tty_struct *tty, struct file *filp)
+{
+ struct hvsi_struct *hp;
+ unsigned long flags;
+ int line = tty->index;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ if (line < 0 || line >= hvsi_count)
+ return -ENODEV;
+ hp = &hvsi_ports[line];
+
+ tty->driver_data = hp;
+
+ mb();
+ if (hp->state == HVSI_FSP_DIED)
+ return -EIO;
+
+ spin_lock_irqsave(&hp->lock, flags);
+ hp->tty = tty;
+ hp->count++;
+ atomic_set(&hp->seqno, 0);
+ h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ if (is_console(hp))
+ return 0; /* this has already been handshaked as the console */
+
+ ret = hvsi_handshake(hp);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: HVSI handshaking failed\n", tty->name);
+ return ret;
+ }
+
+ ret = hvsi_get_mctrl(hp);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: couldn't get initial modem flags\n", tty->name);
+ return ret;
+ }
+
+ ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: couldn't set DTR\n", tty->name);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* wait for hvsi_write_worker to empty hp->outbuf */
+static void hvsi_flush_output(struct hvsi_struct *hp)
+{
+ wait_event_timeout(hp->emptyq, (hp->n_outbuf <= 0), HVSI_TIMEOUT);
+
+ /* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */
+ cancel_delayed_work_sync(&hp->writer);
+ flush_work_sync(&hp->handshaker);
+
+ /*
+ * it's also possible that our timeout expired and hvsi_write_worker
+ * didn't manage to push outbuf. poof.
+ */
+ hp->n_outbuf = 0;
+}
+
+static void hvsi_close(struct tty_struct *tty, struct file *filp)
+{
+ struct hvsi_struct *hp = tty->driver_data;
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ if (tty_hung_up_p(filp))
+ return;
+
+ spin_lock_irqsave(&hp->lock, flags);
+
+ if (--hp->count == 0) {
+ hp->tty = NULL;
+ hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
+
+ /* only close down connection if it is not the console */
+ if (!is_console(hp)) {
+ h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); /* no more irqs */
+ __set_state(hp, HVSI_CLOSED);
+ /*
+ * any data delivered to the tty layer after this will be
+ * discarded (except for XON/XOFF)
+ */
+ tty->closing = 1;
+
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ /* let any existing irq handlers finish. no more will start. */
+ synchronize_irq(hp->virq);
+
+ /* hvsi_write_worker will re-schedule until outbuf is empty. */
+ hvsi_flush_output(hp);
+
+ /* tell FSP to stop sending data */
+ hvsi_close_protocol(hp);
+
+ /*
+ * drain anything FSP is still in the middle of sending, and let
+ * hvsi_handshake drain the rest on the next open.
+ */
+ hvsi_drain_input(hp);
+
+ spin_lock_irqsave(&hp->lock, flags);
+ }
+ } else if (hp->count < 0)
+ printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
+ hp - hvsi_ports, hp->count);
+
+ spin_unlock_irqrestore(&hp->lock, flags);
+}
+
+static void hvsi_hangup(struct tty_struct *tty)
+{
+ struct hvsi_struct *hp = tty->driver_data;
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ spin_lock_irqsave(&hp->lock, flags);
+
+ hp->count = 0;
+ hp->n_outbuf = 0;
+ hp->tty = NULL;
+
+ spin_unlock_irqrestore(&hp->lock, flags);
+}
+
+/* called with hp->lock held */
+static void hvsi_push(struct hvsi_struct *hp)
+{
+ int n;
+
+ if (hp->n_outbuf <= 0)
+ return;
+
+ n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf);
+ if (n > 0) {
+ /* success */
+ pr_debug("%s: wrote %i chars\n", __func__, n);
+ hp->n_outbuf = 0;
+ } else if (n == -EIO) {
+ __set_state(hp, HVSI_FSP_DIED);
+ printk(KERN_ERR "hvsi%i: service processor died\n", hp->index);
+ }
+}
+
+/* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
+static void hvsi_write_worker(struct work_struct *work)
+{
+ struct hvsi_struct *hp =
+ container_of(work, struct hvsi_struct, writer.work);
+ unsigned long flags;
+#ifdef DEBUG
+ static long start_j = 0;
+
+ if (start_j == 0)
+ start_j = jiffies;
+#endif /* DEBUG */
+
+ spin_lock_irqsave(&hp->lock, flags);
+
+ pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
+
+ if (!is_open(hp)) {
+ /*
+ * We could have a non-open connection if the service processor died
+ * while we were busily scheduling ourselves. In that case, it could
+ * be minutes before the service processor comes back, so only try
+ * again once a second.
+ */
+ schedule_delayed_work(&hp->writer, HZ);
+ goto out;
+ }
+
+ hvsi_push(hp);
+ if (hp->n_outbuf > 0)
+ schedule_delayed_work(&hp->writer, 10);
+ else {
+#ifdef DEBUG
+ pr_debug("%s: outbuf emptied after %li jiffies\n", __func__,
+ jiffies - start_j);
+ start_j = 0;
+#endif /* DEBUG */
+ wake_up_all(&hp->emptyq);
+ tty_wakeup(hp->tty);
+ }
+
+out:
+ spin_unlock_irqrestore(&hp->lock, flags);
+}
+
+static int hvsi_write_room(struct tty_struct *tty)
+{
+ struct hvsi_struct *hp = tty->driver_data;
+
+ return N_OUTBUF - hp->n_outbuf;
+}
+
+static int hvsi_chars_in_buffer(struct tty_struct *tty)
+{
+ struct hvsi_struct *hp = tty->driver_data;
+
+ return hp->n_outbuf;
+}
+
+static int hvsi_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ struct hvsi_struct *hp = tty->driver_data;
+ const char *source = buf;
+ unsigned long flags;
+ int total = 0;
+ int origcount = count;
+
+ spin_lock_irqsave(&hp->lock, flags);
+
+ pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
+
+ if (!is_open(hp)) {
+ /* we're either closing or not yet open; don't accept data */
+ pr_debug("%s: not open\n", __func__);
+ goto out;
+ }
+
+ /*
+ * when the hypervisor buffer (16K) fills, data will stay in hp->outbuf
+ * and hvsi_write_worker will be scheduled. subsequent hvsi_write() calls
+ * will see there is no room in outbuf and return.
+ */
+ while ((count > 0) && (hvsi_write_room(hp->tty) > 0)) {
+ int chunksize = min(count, hvsi_write_room(hp->tty));
+
+ BUG_ON(hp->n_outbuf < 0);
+ memcpy(hp->outbuf + hp->n_outbuf, source, chunksize);
+ hp->n_outbuf += chunksize;
+
+ total += chunksize;
+ source += chunksize;
+ count -= chunksize;
+ hvsi_push(hp);
+ }
+
+ if (hp->n_outbuf > 0) {
+ /*
+ * we weren't able to write it all to the hypervisor.
+ * schedule another push attempt.
+ */
+ schedule_delayed_work(&hp->writer, 10);
+ }
+
+out:
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ if (total != origcount)
+ pr_debug("%s: wanted %i, only wrote %i\n", __func__, origcount,
+ total);
+
+ return total;
+}
+
+/*
+ * I have never seen throttle or unthrottle called, so this little throttle
+ * buffering scheme may or may not work.
+ */
+static void hvsi_throttle(struct tty_struct *tty)
+{
+ struct hvsi_struct *hp = tty->driver_data;
+
+ pr_debug("%s\n", __func__);
+
+ h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE);
+}
+
+static void hvsi_unthrottle(struct tty_struct *tty)
+{
+ struct hvsi_struct *hp = tty->driver_data;
+ unsigned long flags;
+ int shouldflip = 0;
+
+ pr_debug("%s\n", __func__);
+
+ spin_lock_irqsave(&hp->lock, flags);
+ if (hp->n_throttle) {
+ hvsi_send_overflow(hp);
+ shouldflip = 1;
+ }
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ if (shouldflip)
+ tty_flip_buffer_push(hp->tty);
+
+ h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
+}
+
+static int hvsi_tiocmget(struct tty_struct *tty)
+{
+ struct hvsi_struct *hp = tty->driver_data;
+
+ hvsi_get_mctrl(hp);
+ return hp->mctrl;
+}
+
+static int hvsi_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct hvsi_struct *hp = tty->driver_data;
+ unsigned long flags;
+ uint16_t new_mctrl;
+
+ /* we can only alter DTR */
+ clear &= TIOCM_DTR;
+ set &= TIOCM_DTR;
+
+ spin_lock_irqsave(&hp->lock, flags);
+
+ new_mctrl = (hp->mctrl & ~clear) | set;
+
+ if (hp->mctrl != new_mctrl) {
+ hvsi_set_mctrl(hp, new_mctrl);
+ hp->mctrl = new_mctrl;
+ }
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ return 0;
+}
+
+
+static const struct tty_operations hvsi_ops = {
+ .open = hvsi_open,
+ .close = hvsi_close,
+ .write = hvsi_write,
+ .hangup = hvsi_hangup,
+ .write_room = hvsi_write_room,
+ .chars_in_buffer = hvsi_chars_in_buffer,
+ .throttle = hvsi_throttle,
+ .unthrottle = hvsi_unthrottle,
+ .tiocmget = hvsi_tiocmget,
+ .tiocmset = hvsi_tiocmset,
+};
+
+static int __init hvsi_init(void)
+{
+ int i;
+
+ hvsi_driver = alloc_tty_driver(hvsi_count);
+ if (!hvsi_driver)
+ return -ENOMEM;
+
+ hvsi_driver->owner = THIS_MODULE;
+ hvsi_driver->driver_name = "hvsi";
+ hvsi_driver->name = "hvsi";
+ hvsi_driver->major = HVSI_MAJOR;
+ hvsi_driver->minor_start = HVSI_MINOR;
+ hvsi_driver->type = TTY_DRIVER_TYPE_SYSTEM;
+ hvsi_driver->init_termios = tty_std_termios;
+ hvsi_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL;
+ hvsi_driver->init_termios.c_ispeed = 9600;
+ hvsi_driver->init_termios.c_ospeed = 9600;
+ hvsi_driver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(hvsi_driver, &hvsi_ops);
+
+ for (i=0; i < hvsi_count; i++) {
+ struct hvsi_struct *hp = &hvsi_ports[i];
+ int ret = 1;
+
+ ret = request_irq(hp->virq, hvsi_interrupt, IRQF_DISABLED, "hvsi", hp);
+ if (ret)
+ printk(KERN_ERR "HVSI: couldn't reserve irq 0x%x (error %i)\n",
+ hp->virq, ret);
+ }
+ hvsi_wait = wait_for_state; /* irqs active now */
+
+ if (tty_register_driver(hvsi_driver))
+ panic("Couldn't register hvsi console driver\n");
+
+ printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count);
+
+ return 0;
+}
+device_initcall(hvsi_init);
+
+/***** console (not tty) code: *****/
+
+static void hvsi_console_print(struct console *console, const char *buf,
+ unsigned int count)
+{
+ struct hvsi_struct *hp = &hvsi_ports[console->index];
+ char c[HVSI_MAX_OUTGOING_DATA] __ALIGNED__;
+ unsigned int i = 0, n = 0;
+ int ret, donecr = 0;
+
+ mb();
+ if (!is_open(hp))
+ return;
+
+ /*
+ * ugh, we have to translate LF -> CRLF ourselves, in place.
+ * copied from hvc_console.c:
+ */
+ while (count > 0 || i > 0) {
+ if (count > 0 && i < sizeof(c)) {
+ if (buf[n] == '\n' && !donecr) {
+ c[i++] = '\r';
+ donecr = 1;
+ } else {
+ c[i++] = buf[n++];
+ donecr = 0;
+ --count;
+ }
+ } else {
+ ret = hvsi_put_chars(hp, c, i);
+ if (ret < 0)
+ i = 0;
+ i -= ret;
+ }
+ }
+}
+
+static struct tty_driver *hvsi_console_device(struct console *console,
+ int *index)
+{
+ *index = console->index;
+ return hvsi_driver;
+}
+
+static int __init hvsi_console_setup(struct console *console, char *options)
+{
+ struct hvsi_struct *hp;
+ int ret;
+
+ if (console->index < 0 || console->index >= hvsi_count)
+ return -1;
+ hp = &hvsi_ports[console->index];
+
+ /* give the FSP a chance to change the baud rate when we re-open */
+ hvsi_close_protocol(hp);
+
+ ret = hvsi_handshake(hp);
+ if (ret < 0)
+ return ret;
+
+ ret = hvsi_get_mctrl(hp);
+ if (ret < 0)
+ return ret;
+
+ ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR);
+ if (ret < 0)
+ return ret;
+
+ hp->flags |= HVSI_CONSOLE;
+
+ return 0;
+}
+
+static struct console hvsi_console = {
+ .name = "hvsi",
+ .write = hvsi_console_print,
+ .device = hvsi_console_device,
+ .setup = hvsi_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static int __init hvsi_console_init(void)
+{
+ struct device_node *vty;
+
+ hvsi_wait = poll_for_state; /* no irqs yet; must poll */
+
+ /* search device tree for vty nodes */
+ for (vty = of_find_compatible_node(NULL, "serial", "hvterm-protocol");
+ vty != NULL;
+ vty = of_find_compatible_node(vty, "serial", "hvterm-protocol")) {
+ struct hvsi_struct *hp;
+ const uint32_t *vtermno, *irq;
+
+ vtermno = of_get_property(vty, "reg", NULL);
+ irq = of_get_property(vty, "interrupts", NULL);
+ if (!vtermno || !irq)
+ continue;
+
+ if (hvsi_count >= MAX_NR_HVSI_CONSOLES) {
+ of_node_put(vty);
+ break;
+ }
+
+ hp = &hvsi_ports[hvsi_count];
+ INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
+ INIT_WORK(&hp->handshaker, hvsi_handshaker);
+ init_waitqueue_head(&hp->emptyq);
+ init_waitqueue_head(&hp->stateq);
+ spin_lock_init(&hp->lock);
+ hp->index = hvsi_count;
+ hp->inbuf_end = hp->inbuf;
+ hp->state = HVSI_CLOSED;
+ hp->vtermno = *vtermno;
+ hp->virq = irq_create_mapping(NULL, irq[0]);
+ if (hp->virq == NO_IRQ) {
+ printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n",
+ __func__, irq[0]);
+ continue;
+ }
+
+ hvsi_count++;
+ }
+
+ if (hvsi_count)
+ register_console(&hvsi_console);
+ return 0;
+}
+console_initcall(hvsi_console_init);
diff --git a/drivers/tty/ipwireless/Makefile b/drivers/tty/ipwireless/Makefile
new file mode 100644
index 00000000..fe2e1730
--- /dev/null
+++ b/drivers/tty/ipwireless/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the IPWireless driver
+#
+
+obj-$(CONFIG_IPWIRELESS) += ipwireless.o
+
+ipwireless-y := hardware.o main.o network.o tty.o
+
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
new file mode 100644
index 00000000..0aeb5a38
--- /dev/null
+++ b/drivers/tty/ipwireless/hardware.c
@@ -0,0 +1,1764 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "hardware.h"
+#include "setup_protocol.h"
+#include "network.h"
+#include "main.h"
+
+static void ipw_send_setup_packet(struct ipw_hardware *hw);
+static void handle_received_SETUP_packet(struct ipw_hardware *ipw,
+ unsigned int address,
+ const unsigned char *data, int len,
+ int is_last);
+static void ipwireless_setup_timer(unsigned long data);
+static void handle_received_CTRL_packet(struct ipw_hardware *hw,
+ unsigned int channel_idx, const unsigned char *data, int len);
+
+/*#define TIMING_DIAGNOSTICS*/
+
+#ifdef TIMING_DIAGNOSTICS
+
+static struct timing_stats {
+ unsigned long last_report_time;
+ unsigned long read_time;
+ unsigned long write_time;
+ unsigned long read_bytes;
+ unsigned long write_bytes;
+ unsigned long start_time;
+};
+
+static void start_timing(void)
+{
+ timing_stats.start_time = jiffies;
+}
+
+static void end_read_timing(unsigned length)
+{
+ timing_stats.read_time += (jiffies - start_time);
+ timing_stats.read_bytes += length + 2;
+ report_timing();
+}
+
+static void end_write_timing(unsigned length)
+{
+ timing_stats.write_time += (jiffies - start_time);
+ timing_stats.write_bytes += length + 2;
+ report_timing();
+}
+
+static void report_timing(void)
+{
+ unsigned long since = jiffies - timing_stats.last_report_time;
+
+ /* If it's been more than one second... */
+ if (since >= HZ) {
+ int first = (timing_stats.last_report_time == 0);
+
+ timing_stats.last_report_time = jiffies;
+ if (!first)
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": %u us elapsed - read %lu bytes in %u us, wrote %lu bytes in %u us\n",
+ jiffies_to_usecs(since),
+ timing_stats.read_bytes,
+ jiffies_to_usecs(timing_stats.read_time),
+ timing_stats.write_bytes,
+ jiffies_to_usecs(timing_stats.write_time));
+
+ timing_stats.read_time = 0;
+ timing_stats.write_time = 0;
+ timing_stats.read_bytes = 0;
+ timing_stats.write_bytes = 0;
+ }
+}
+#else
+static void start_timing(void) { }
+static void end_read_timing(unsigned length) { }
+static void end_write_timing(unsigned length) { }
+#endif
+
+/* Imported IPW definitions */
+
+#define LL_MTU_V1 318
+#define LL_MTU_V2 250
+#define LL_MTU_MAX (LL_MTU_V1 > LL_MTU_V2 ? LL_MTU_V1 : LL_MTU_V2)
+
+#define PRIO_DATA 2
+#define PRIO_CTRL 1
+#define PRIO_SETUP 0
+
+/* Addresses */
+#define ADDR_SETUP_PROT 0
+
+/* Protocol ids */
+enum {
+ /* Identifier for the Com Data protocol */
+ TL_PROTOCOLID_COM_DATA = 0,
+
+ /* Identifier for the Com Control protocol */
+ TL_PROTOCOLID_COM_CTRL = 1,
+
+ /* Identifier for the Setup protocol */
+ TL_PROTOCOLID_SETUP = 2
+};
+
+/* Number of bytes in NL packet header (cannot do
+ * sizeof(nl_packet_header) since it's a bitfield) */
+#define NL_FIRST_PACKET_HEADER_SIZE 3
+
+/* Number of bytes in NL packet header (cannot do
+ * sizeof(nl_packet_header) since it's a bitfield) */
+#define NL_FOLLOWING_PACKET_HEADER_SIZE 1
+
+struct nl_first_packet_header {
+ unsigned char protocol:3;
+ unsigned char address:3;
+ unsigned char packet_rank:2;
+ unsigned char length_lsb;
+ unsigned char length_msb;
+};
+
+struct nl_packet_header {
+ unsigned char protocol:3;
+ unsigned char address:3;
+ unsigned char packet_rank:2;
+};
+
+/* Value of 'packet_rank' above */
+#define NL_INTERMEDIATE_PACKET 0x0
+#define NL_LAST_PACKET 0x1
+#define NL_FIRST_PACKET 0x2
+
+union nl_packet {
+ /* Network packet header of the first packet (a special case) */
+ struct nl_first_packet_header hdr_first;
+ /* Network packet header of the following packets (if any) */
+ struct nl_packet_header hdr;
+ /* Complete network packet (header + data) */
+ unsigned char rawpkt[LL_MTU_MAX];
+} __attribute__ ((__packed__));
+
+#define HW_VERSION_UNKNOWN -1
+#define HW_VERSION_1 1
+#define HW_VERSION_2 2
+
+/* IPW I/O ports */
+#define IOIER 0x00 /* Interrupt Enable Register */
+#define IOIR 0x02 /* Interrupt Source/ACK register */
+#define IODCR 0x04 /* Data Control Register */
+#define IODRR 0x06 /* Data Read Register */
+#define IODWR 0x08 /* Data Write Register */
+#define IOESR 0x0A /* Embedded Driver Status Register */
+#define IORXR 0x0C /* Rx Fifo Register (Host to Embedded) */
+#define IOTXR 0x0E /* Tx Fifo Register (Embedded to Host) */
+
+/* I/O ports and bit definitions for version 1 of the hardware */
+
+/* IER bits*/
+#define IER_RXENABLED 0x1
+#define IER_TXENABLED 0x2
+
+/* ISR bits */
+#define IR_RXINTR 0x1
+#define IR_TXINTR 0x2
+
+/* DCR bits */
+#define DCR_RXDONE 0x1
+#define DCR_TXDONE 0x2
+#define DCR_RXRESET 0x4
+#define DCR_TXRESET 0x8
+
+/* I/O ports and bit definitions for version 2 of the hardware */
+
+struct MEMCCR {
+ unsigned short reg_config_option; /* PCCOR: Configuration Option Register */
+ unsigned short reg_config_and_status; /* PCCSR: Configuration and Status Register */
+ unsigned short reg_pin_replacement; /* PCPRR: Pin Replacemant Register */
+ unsigned short reg_socket_and_copy; /* PCSCR: Socket and Copy Register */
+ unsigned short reg_ext_status; /* PCESR: Extendend Status Register */
+ unsigned short reg_io_base; /* PCIOB: I/O Base Register */
+};
+
+struct MEMINFREG {
+ unsigned short memreg_tx_old; /* TX Register (R/W) */
+ unsigned short pad1;
+ unsigned short memreg_rx_done; /* RXDone Register (R/W) */
+ unsigned short pad2;
+ unsigned short memreg_rx; /* RX Register (R/W) */
+ unsigned short pad3;
+ unsigned short memreg_pc_interrupt_ack; /* PC intr Ack Register (W) */
+ unsigned short pad4;
+ unsigned long memreg_card_present;/* Mask for Host to check (R) for
+ * CARD_PRESENT_VALUE */
+ unsigned short memreg_tx_new; /* TX2 (new) Register (R/W) */
+};
+
+#define CARD_PRESENT_VALUE (0xBEEFCAFEUL)
+
+#define MEMTX_TX 0x0001
+#define MEMRX_RX 0x0001
+#define MEMRX_RX_DONE 0x0001
+#define MEMRX_PCINTACKK 0x0001
+
+#define NL_NUM_OF_PRIORITIES 3
+#define NL_NUM_OF_PROTOCOLS 3
+#define NL_NUM_OF_ADDRESSES NO_OF_IPW_CHANNELS
+
+struct ipw_hardware {
+ unsigned int base_port;
+ short hw_version;
+ unsigned short ll_mtu;
+ spinlock_t lock;
+
+ int initializing;
+ int init_loops;
+ struct timer_list setup_timer;
+
+ /* Flag if hw is ready to send next packet */
+ int tx_ready;
+ /* Count of pending packets to be sent */
+ int tx_queued;
+ struct list_head tx_queue[NL_NUM_OF_PRIORITIES];
+
+ int rx_bytes_queued;
+ struct list_head rx_queue;
+ /* Pool of rx_packet structures that are not currently used. */
+ struct list_head rx_pool;
+ int rx_pool_size;
+ /* True if reception of data is blocked while userspace processes it. */
+ int blocking_rx;
+ /* True if there is RX data ready on the hardware. */
+ int rx_ready;
+ unsigned short last_memtx_serial;
+ /*
+ * Newer versions of the V2 card firmware send serial numbers in the
+ * MemTX register. 'serial_number_detected' is set true when we detect
+ * a non-zero serial number (indicating the new firmware). Thereafter,
+ * the driver can safely ignore the Timer Recovery re-sends to avoid
+ * out-of-sync problems.
+ */
+ int serial_number_detected;
+ struct work_struct work_rx;
+
+ /* True if we are to send the set-up data to the hardware. */
+ int to_setup;
+
+ /* Card has been removed */
+ int removed;
+ /* Saved irq value when we disable the interrupt. */
+ int irq;
+ /* True if this driver is shutting down. */
+ int shutting_down;
+ /* Modem control lines */
+ unsigned int control_lines[NL_NUM_OF_ADDRESSES];
+ struct ipw_rx_packet *packet_assembler[NL_NUM_OF_ADDRESSES];
+
+ struct tasklet_struct tasklet;
+
+ /* The handle for the network layer, for the sending of events to it. */
+ struct ipw_network *network;
+ struct MEMINFREG __iomem *memory_info_regs;
+ struct MEMCCR __iomem *memregs_CCR;
+ void (*reboot_callback) (void *data);
+ void *reboot_callback_data;
+
+ unsigned short __iomem *memreg_tx;
+};
+
+/*
+ * Packet info structure for tx packets.
+ * Note: not all the fields defined here are required for all protocols
+ */
+struct ipw_tx_packet {
+ struct list_head queue;
+ /* channel idx + 1 */
+ unsigned char dest_addr;
+ /* SETUP, CTRL or DATA */
+ unsigned char protocol;
+ /* Length of data block, which starts at the end of this structure */
+ unsigned short length;
+ /* Sending state */
+ /* Offset of where we've sent up to so far */
+ unsigned long offset;
+ /* Count of packet fragments, starting at 0 */
+ int fragment_count;
+
+ /* Called after packet is sent and before is freed */
+ void (*packet_callback) (void *cb_data, unsigned int packet_length);
+ void *callback_data;
+};
+
+/* Signals from DTE */
+#define COMCTRL_RTS 0
+#define COMCTRL_DTR 1
+
+/* Signals from DCE */
+#define COMCTRL_CTS 2
+#define COMCTRL_DCD 3
+#define COMCTRL_DSR 4
+#define COMCTRL_RI 5
+
+struct ipw_control_packet_body {
+ /* DTE signal or DCE signal */
+ unsigned char sig_no;
+ /* 0: set signal, 1: clear signal */
+ unsigned char value;
+} __attribute__ ((__packed__));
+
+struct ipw_control_packet {
+ struct ipw_tx_packet header;
+ struct ipw_control_packet_body body;
+};
+
+struct ipw_rx_packet {
+ struct list_head queue;
+ unsigned int capacity;
+ unsigned int length;
+ unsigned int protocol;
+ unsigned int channel_idx;
+};
+
+static char *data_type(const unsigned char *buf, unsigned length)
+{
+ struct nl_packet_header *hdr = (struct nl_packet_header *) buf;
+
+ if (length == 0)
+ return " ";
+
+ if (hdr->packet_rank & NL_FIRST_PACKET) {
+ switch (hdr->protocol) {
+ case TL_PROTOCOLID_COM_DATA: return "DATA ";
+ case TL_PROTOCOLID_COM_CTRL: return "CTRL ";
+ case TL_PROTOCOLID_SETUP: return "SETUP";
+ default: return "???? ";
+ }
+ } else
+ return " ";
+}
+
+#define DUMP_MAX_BYTES 64
+
+static void dump_data_bytes(const char *type, const unsigned char *data,
+ unsigned length)
+{
+ char prefix[56];
+
+ sprintf(prefix, IPWIRELESS_PCCARD_NAME ": %s %s ",
+ type, data_type(data, length));
+ print_hex_dump_bytes(prefix, 0, (void *)data,
+ length < DUMP_MAX_BYTES ? length : DUMP_MAX_BYTES);
+}
+
+static void swap_packet_bitfield_to_le(unsigned char *data)
+{
+#ifdef __BIG_ENDIAN_BITFIELD
+ unsigned char tmp = *data, ret = 0;
+
+ /*
+ * transform bits from aa.bbb.ccc to ccc.bbb.aa
+ */
+ ret |= tmp & 0xc0 >> 6;
+ ret |= tmp & 0x38 >> 1;
+ ret |= tmp & 0x07 << 5;
+ *data = ret & 0xff;
+#endif
+}
+
+static void swap_packet_bitfield_from_le(unsigned char *data)
+{
+#ifdef __BIG_ENDIAN_BITFIELD
+ unsigned char tmp = *data, ret = 0;
+
+ /*
+ * transform bits from ccc.bbb.aa to aa.bbb.ccc
+ */
+ ret |= tmp & 0xe0 >> 5;
+ ret |= tmp & 0x1c << 1;
+ ret |= tmp & 0x03 << 6;
+ *data = ret & 0xff;
+#endif
+}
+
+static void do_send_fragment(struct ipw_hardware *hw, unsigned char *data,
+ unsigned length)
+{
+ unsigned i;
+ unsigned long flags;
+
+ start_timing();
+ BUG_ON(length > hw->ll_mtu);
+
+ if (ipwireless_debug)
+ dump_data_bytes("send", data, length);
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ hw->tx_ready = 0;
+ swap_packet_bitfield_to_le(data);
+
+ if (hw->hw_version == HW_VERSION_1) {
+ outw((unsigned short) length, hw->base_port + IODWR);
+
+ for (i = 0; i < length; i += 2) {
+ unsigned short d = data[i];
+ __le16 raw_data;
+
+ if (i + 1 < length)
+ d |= data[i + 1] << 8;
+ raw_data = cpu_to_le16(d);
+ outw(raw_data, hw->base_port + IODWR);
+ }
+
+ outw(DCR_TXDONE, hw->base_port + IODCR);
+ } else if (hw->hw_version == HW_VERSION_2) {
+ outw((unsigned short) length, hw->base_port);
+
+ for (i = 0; i < length; i += 2) {
+ unsigned short d = data[i];
+ __le16 raw_data;
+
+ if (i + 1 < length)
+ d |= data[i + 1] << 8;
+ raw_data = cpu_to_le16(d);
+ outw(raw_data, hw->base_port);
+ }
+ while ((i & 3) != 2) {
+ outw((unsigned short) 0xDEAD, hw->base_port);
+ i += 2;
+ }
+ writew(MEMRX_RX, &hw->memory_info_regs->memreg_rx);
+ }
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ end_write_timing(length);
+}
+
+static void do_send_packet(struct ipw_hardware *hw, struct ipw_tx_packet *packet)
+{
+ unsigned short fragment_data_len;
+ unsigned short data_left = packet->length - packet->offset;
+ unsigned short header_size;
+ union nl_packet pkt;
+
+ header_size =
+ (packet->fragment_count == 0)
+ ? NL_FIRST_PACKET_HEADER_SIZE
+ : NL_FOLLOWING_PACKET_HEADER_SIZE;
+ fragment_data_len = hw->ll_mtu - header_size;
+ if (data_left < fragment_data_len)
+ fragment_data_len = data_left;
+
+ /*
+ * hdr_first is now in machine bitfield order, which will be swapped
+ * to le just before it goes to hw
+ */
+ pkt.hdr_first.protocol = packet->protocol;
+ pkt.hdr_first.address = packet->dest_addr;
+ pkt.hdr_first.packet_rank = 0;
+
+ /* First packet? */
+ if (packet->fragment_count == 0) {
+ pkt.hdr_first.packet_rank |= NL_FIRST_PACKET;
+ pkt.hdr_first.length_lsb = (unsigned char) packet->length;
+ pkt.hdr_first.length_msb =
+ (unsigned char) (packet->length >> 8);
+ }
+
+ memcpy(pkt.rawpkt + header_size,
+ ((unsigned char *) packet) + sizeof(struct ipw_tx_packet) +
+ packet->offset, fragment_data_len);
+ packet->offset += fragment_data_len;
+ packet->fragment_count++;
+
+ /* Last packet? (May also be first packet.) */
+ if (packet->offset == packet->length)
+ pkt.hdr_first.packet_rank |= NL_LAST_PACKET;
+ do_send_fragment(hw, pkt.rawpkt, header_size + fragment_data_len);
+
+ /* If this packet has unsent data, then re-queue it. */
+ if (packet->offset < packet->length) {
+ /*
+ * Re-queue it at the head of the highest priority queue so
+ * it goes before all other packets
+ */
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ list_add(&packet->queue, &hw->tx_queue[0]);
+ hw->tx_queued++;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ } else {
+ if (packet->packet_callback)
+ packet->packet_callback(packet->callback_data,
+ packet->length);
+ kfree(packet);
+ }
+}
+
+static void ipw_setup_hardware(struct ipw_hardware *hw)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (hw->hw_version == HW_VERSION_1) {
+ /* Reset RX FIFO */
+ outw(DCR_RXRESET, hw->base_port + IODCR);
+ /* SB: Reset TX FIFO */
+ outw(DCR_TXRESET, hw->base_port + IODCR);
+
+ /* Enable TX and RX interrupts. */
+ outw(IER_TXENABLED | IER_RXENABLED, hw->base_port + IOIER);
+ } else {
+ /*
+ * Set INTRACK bit (bit 0), which means we must explicitly
+ * acknowledge interrupts by clearing bit 2 of reg_config_and_status.
+ */
+ unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status);
+
+ csr |= 1;
+ writew(csr, &hw->memregs_CCR->reg_config_and_status);
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+/*
+ * If 'packet' is NULL, then this function allocates a new packet, setting its
+ * length to 0 and ensuring it has the specified minimum amount of free space.
+ *
+ * If 'packet' is not NULL, then this function enlarges it if it doesn't
+ * have the specified minimum amount of free space.
+ *
+ */
+static struct ipw_rx_packet *pool_allocate(struct ipw_hardware *hw,
+ struct ipw_rx_packet *packet,
+ int minimum_free_space)
+{
+
+ if (!packet) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (!list_empty(&hw->rx_pool)) {
+ packet = list_first_entry(&hw->rx_pool,
+ struct ipw_rx_packet, queue);
+ hw->rx_pool_size--;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ list_del(&packet->queue);
+ } else {
+ const int min_capacity =
+ ipwireless_ppp_mru(hw->network) + 2;
+ int new_capacity;
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+ new_capacity =
+ (minimum_free_space > min_capacity
+ ? minimum_free_space
+ : min_capacity);
+ packet = kmalloc(sizeof(struct ipw_rx_packet)
+ + new_capacity, GFP_ATOMIC);
+ if (!packet)
+ return NULL;
+ packet->capacity = new_capacity;
+ }
+ packet->length = 0;
+ }
+
+ if (packet->length + minimum_free_space > packet->capacity) {
+ struct ipw_rx_packet *old_packet = packet;
+
+ packet = kmalloc(sizeof(struct ipw_rx_packet) +
+ old_packet->length + minimum_free_space,
+ GFP_ATOMIC);
+ if (!packet) {
+ kfree(old_packet);
+ return NULL;
+ }
+ memcpy(packet, old_packet,
+ sizeof(struct ipw_rx_packet)
+ + old_packet->length);
+ packet->capacity = old_packet->length + minimum_free_space;
+ kfree(old_packet);
+ }
+
+ return packet;
+}
+
+static void pool_free(struct ipw_hardware *hw, struct ipw_rx_packet *packet)
+{
+ if (hw->rx_pool_size > 6)
+ kfree(packet);
+ else {
+ hw->rx_pool_size++;
+ list_add(&packet->queue, &hw->rx_pool);
+ }
+}
+
+static void queue_received_packet(struct ipw_hardware *hw,
+ unsigned int protocol,
+ unsigned int address,
+ const unsigned char *data, int length,
+ int is_last)
+{
+ unsigned int channel_idx = address - 1;
+ struct ipw_rx_packet *packet = NULL;
+ unsigned long flags;
+
+ /* Discard packet if channel index is out of range. */
+ if (channel_idx >= NL_NUM_OF_ADDRESSES) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": data packet has bad address %u\n", address);
+ return;
+ }
+
+ /*
+ * ->packet_assembler is safe to touch unlocked, this is the only place
+ */
+ if (protocol == TL_PROTOCOLID_COM_DATA) {
+ struct ipw_rx_packet **assem =
+ &hw->packet_assembler[channel_idx];
+
+ /*
+ * Create a new packet, or assembler already contains one
+ * enlarge it by 'length' bytes.
+ */
+ (*assem) = pool_allocate(hw, *assem, length);
+ if (!(*assem)) {
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": no memory for incomming data packet, dropped!\n");
+ return;
+ }
+ (*assem)->protocol = protocol;
+ (*assem)->channel_idx = channel_idx;
+
+ /* Append this packet data onto existing data. */
+ memcpy((unsigned char *)(*assem) +
+ sizeof(struct ipw_rx_packet)
+ + (*assem)->length, data, length);
+ (*assem)->length += length;
+ if (is_last) {
+ packet = *assem;
+ *assem = NULL;
+ /* Count queued DATA bytes only */
+ spin_lock_irqsave(&hw->lock, flags);
+ hw->rx_bytes_queued += packet->length;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ }
+ } else {
+ /* If it's a CTRL packet, don't assemble, just queue it. */
+ packet = pool_allocate(hw, NULL, length);
+ if (!packet) {
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": no memory for incomming ctrl packet, dropped!\n");
+ return;
+ }
+ packet->protocol = protocol;
+ packet->channel_idx = channel_idx;
+ memcpy((unsigned char *)packet + sizeof(struct ipw_rx_packet),
+ data, length);
+ packet->length = length;
+ }
+
+ /*
+ * If this is the last packet, then send the assembled packet on to the
+ * network layer.
+ */
+ if (packet) {
+ spin_lock_irqsave(&hw->lock, flags);
+ list_add_tail(&packet->queue, &hw->rx_queue);
+ /* Block reception of incoming packets if queue is full. */
+ hw->blocking_rx =
+ (hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+ schedule_work(&hw->work_rx);
+ }
+}
+
+/*
+ * Workqueue callback
+ */
+static void ipw_receive_data_work(struct work_struct *work_rx)
+{
+ struct ipw_hardware *hw =
+ container_of(work_rx, struct ipw_hardware, work_rx);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ while (!list_empty(&hw->rx_queue)) {
+ struct ipw_rx_packet *packet =
+ list_first_entry(&hw->rx_queue,
+ struct ipw_rx_packet, queue);
+
+ if (hw->shutting_down)
+ break;
+ list_del(&packet->queue);
+
+ /*
+ * Note: ipwireless_network_packet_received must be called in a
+ * process context (i.e. via schedule_work) because the tty
+ * output code can sleep in the tty_flip_buffer_push call.
+ */
+ if (packet->protocol == TL_PROTOCOLID_COM_DATA) {
+ if (hw->network != NULL) {
+ /* If the network hasn't been disconnected. */
+ spin_unlock_irqrestore(&hw->lock, flags);
+ /*
+ * This must run unlocked due to tty processing
+ * and mutex locking
+ */
+ ipwireless_network_packet_received(
+ hw->network,
+ packet->channel_idx,
+ (unsigned char *)packet
+ + sizeof(struct ipw_rx_packet),
+ packet->length);
+ spin_lock_irqsave(&hw->lock, flags);
+ }
+ /* Count queued DATA bytes only */
+ hw->rx_bytes_queued -= packet->length;
+ } else {
+ /*
+ * This is safe to be called locked, callchain does
+ * not block
+ */
+ handle_received_CTRL_packet(hw, packet->channel_idx,
+ (unsigned char *)packet
+ + sizeof(struct ipw_rx_packet),
+ packet->length);
+ }
+ pool_free(hw, packet);
+ /*
+ * Unblock reception of incoming packets if queue is no longer
+ * full.
+ */
+ hw->blocking_rx =
+ hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE;
+ if (hw->shutting_down)
+ break;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void handle_received_CTRL_packet(struct ipw_hardware *hw,
+ unsigned int channel_idx,
+ const unsigned char *data, int len)
+{
+ const struct ipw_control_packet_body *body =
+ (const struct ipw_control_packet_body *) data;
+ unsigned int changed_mask;
+
+ if (len != sizeof(struct ipw_control_packet_body)) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": control packet was %d bytes - wrong size!\n",
+ len);
+ return;
+ }
+
+ switch (body->sig_no) {
+ case COMCTRL_CTS:
+ changed_mask = IPW_CONTROL_LINE_CTS;
+ break;
+ case COMCTRL_DCD:
+ changed_mask = IPW_CONTROL_LINE_DCD;
+ break;
+ case COMCTRL_DSR:
+ changed_mask = IPW_CONTROL_LINE_DSR;
+ break;
+ case COMCTRL_RI:
+ changed_mask = IPW_CONTROL_LINE_RI;
+ break;
+ default:
+ changed_mask = 0;
+ }
+
+ if (changed_mask != 0) {
+ if (body->value)
+ hw->control_lines[channel_idx] |= changed_mask;
+ else
+ hw->control_lines[channel_idx] &= ~changed_mask;
+ if (hw->network)
+ ipwireless_network_notify_control_line_change(
+ hw->network,
+ channel_idx,
+ hw->control_lines[channel_idx],
+ changed_mask);
+ }
+}
+
+static void handle_received_packet(struct ipw_hardware *hw,
+ const union nl_packet *packet,
+ unsigned short len)
+{
+ unsigned int protocol = packet->hdr.protocol;
+ unsigned int address = packet->hdr.address;
+ unsigned int header_length;
+ const unsigned char *data;
+ unsigned int data_len;
+ int is_last = packet->hdr.packet_rank & NL_LAST_PACKET;
+
+ if (packet->hdr.packet_rank & NL_FIRST_PACKET)
+ header_length = NL_FIRST_PACKET_HEADER_SIZE;
+ else
+ header_length = NL_FOLLOWING_PACKET_HEADER_SIZE;
+
+ data = packet->rawpkt + header_length;
+ data_len = len - header_length;
+ switch (protocol) {
+ case TL_PROTOCOLID_COM_DATA:
+ case TL_PROTOCOLID_COM_CTRL:
+ queue_received_packet(hw, protocol, address, data, data_len,
+ is_last);
+ break;
+ case TL_PROTOCOLID_SETUP:
+ handle_received_SETUP_packet(hw, address, data, data_len,
+ is_last);
+ break;
+ }
+}
+
+static void acknowledge_data_read(struct ipw_hardware *hw)
+{
+ if (hw->hw_version == HW_VERSION_1)
+ outw(DCR_RXDONE, hw->base_port + IODCR);
+ else
+ writew(MEMRX_PCINTACKK,
+ &hw->memory_info_regs->memreg_pc_interrupt_ack);
+}
+
+/*
+ * Retrieve a packet from the IPW hardware.
+ */
+static void do_receive_packet(struct ipw_hardware *hw)
+{
+ unsigned len;
+ unsigned i;
+ unsigned char pkt[LL_MTU_MAX];
+
+ start_timing();
+
+ if (hw->hw_version == HW_VERSION_1) {
+ len = inw(hw->base_port + IODRR);
+ if (len > hw->ll_mtu) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": received a packet of %u bytes - longer than the MTU!\n", len);
+ outw(DCR_RXDONE | DCR_RXRESET, hw->base_port + IODCR);
+ return;
+ }
+
+ for (i = 0; i < len; i += 2) {
+ __le16 raw_data = inw(hw->base_port + IODRR);
+ unsigned short data = le16_to_cpu(raw_data);
+
+ pkt[i] = (unsigned char) data;
+ pkt[i + 1] = (unsigned char) (data >> 8);
+ }
+ } else {
+ len = inw(hw->base_port);
+ if (len > hw->ll_mtu) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": received a packet of %u bytes - longer than the MTU!\n", len);
+ writew(MEMRX_PCINTACKK,
+ &hw->memory_info_regs->memreg_pc_interrupt_ack);
+ return;
+ }
+
+ for (i = 0; i < len; i += 2) {
+ __le16 raw_data = inw(hw->base_port);
+ unsigned short data = le16_to_cpu(raw_data);
+
+ pkt[i] = (unsigned char) data;
+ pkt[i + 1] = (unsigned char) (data >> 8);
+ }
+
+ while ((i & 3) != 2) {
+ inw(hw->base_port);
+ i += 2;
+ }
+ }
+
+ acknowledge_data_read(hw);
+
+ swap_packet_bitfield_from_le(pkt);
+
+ if (ipwireless_debug)
+ dump_data_bytes("recv", pkt, len);
+
+ handle_received_packet(hw, (union nl_packet *) pkt, len);
+
+ end_read_timing(len);
+}
+
+static int get_current_packet_priority(struct ipw_hardware *hw)
+{
+ /*
+ * If we're initializing, don't send anything of higher priority than
+ * PRIO_SETUP. The network layer therefore need not care about
+ * hardware initialization - any of its stuff will simply be queued
+ * until setup is complete.
+ */
+ return (hw->to_setup || hw->initializing
+ ? PRIO_SETUP + 1 : NL_NUM_OF_PRIORITIES);
+}
+
+/*
+ * return 1 if something has been received from hw
+ */
+static int get_packets_from_hw(struct ipw_hardware *hw)
+{
+ int received = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ while (hw->rx_ready && !hw->blocking_rx) {
+ received = 1;
+ hw->rx_ready--;
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ do_receive_packet(hw);
+
+ spin_lock_irqsave(&hw->lock, flags);
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ return received;
+}
+
+/*
+ * Send pending packet up to given priority, prioritize SETUP data until
+ * hardware is fully setup.
+ *
+ * return 1 if more packets can be sent
+ */
+static int send_pending_packet(struct ipw_hardware *hw, int priority_limit)
+{
+ int more_to_send = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (hw->tx_queued && hw->tx_ready) {
+ int priority;
+ struct ipw_tx_packet *packet = NULL;
+
+ /* Pick a packet */
+ for (priority = 0; priority < priority_limit; priority++) {
+ if (!list_empty(&hw->tx_queue[priority])) {
+ packet = list_first_entry(
+ &hw->tx_queue[priority],
+ struct ipw_tx_packet,
+ queue);
+
+ hw->tx_queued--;
+ list_del(&packet->queue);
+
+ break;
+ }
+ }
+ if (!packet) {
+ hw->tx_queued = 0;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ return 0;
+ }
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ /* Send */
+ do_send_packet(hw, packet);
+
+ /* Check if more to send */
+ spin_lock_irqsave(&hw->lock, flags);
+ for (priority = 0; priority < priority_limit; priority++)
+ if (!list_empty(&hw->tx_queue[priority])) {
+ more_to_send = 1;
+ break;
+ }
+
+ if (!more_to_send)
+ hw->tx_queued = 0;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ return more_to_send;
+}
+
+/*
+ * Send and receive all queued packets.
+ */
+static void ipwireless_do_tasklet(unsigned long hw_)
+{
+ struct ipw_hardware *hw = (struct ipw_hardware *) hw_;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (hw->shutting_down) {
+ spin_unlock_irqrestore(&hw->lock, flags);
+ return;
+ }
+
+ if (hw->to_setup == 1) {
+ /*
+ * Initial setup data sent to hardware
+ */
+ hw->to_setup = 2;
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ ipw_setup_hardware(hw);
+ ipw_send_setup_packet(hw);
+
+ send_pending_packet(hw, PRIO_SETUP + 1);
+ get_packets_from_hw(hw);
+ } else {
+ int priority_limit = get_current_packet_priority(hw);
+ int again;
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ do {
+ again = send_pending_packet(hw, priority_limit);
+ again |= get_packets_from_hw(hw);
+ } while (again);
+ }
+}
+
+/*
+ * return true if the card is physically present.
+ */
+static int is_card_present(struct ipw_hardware *hw)
+{
+ if (hw->hw_version == HW_VERSION_1)
+ return inw(hw->base_port + IOIR) != 0xFFFF;
+ else
+ return readl(&hw->memory_info_regs->memreg_card_present) ==
+ CARD_PRESENT_VALUE;
+}
+
+static irqreturn_t ipwireless_handle_v1_interrupt(int irq,
+ struct ipw_hardware *hw)
+{
+ unsigned short irqn;
+
+ irqn = inw(hw->base_port + IOIR);
+
+ /* Check if card is present */
+ if (irqn == 0xFFFF)
+ return IRQ_NONE;
+ else if (irqn != 0) {
+ unsigned short ack = 0;
+ unsigned long flags;
+
+ /* Transmit complete. */
+ if (irqn & IR_TXINTR) {
+ ack |= IR_TXINTR;
+ spin_lock_irqsave(&hw->lock, flags);
+ hw->tx_ready = 1;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ }
+ /* Received data */
+ if (irqn & IR_RXINTR) {
+ ack |= IR_RXINTR;
+ spin_lock_irqsave(&hw->lock, flags);
+ hw->rx_ready++;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ }
+ if (ack != 0) {
+ outw(ack, hw->base_port + IOIR);
+ tasklet_schedule(&hw->tasklet);
+ }
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static void acknowledge_pcmcia_interrupt(struct ipw_hardware *hw)
+{
+ unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status);
+
+ csr &= 0xfffd;
+ writew(csr, &hw->memregs_CCR->reg_config_and_status);
+}
+
+static irqreturn_t ipwireless_handle_v2_v3_interrupt(int irq,
+ struct ipw_hardware *hw)
+{
+ int tx = 0;
+ int rx = 0;
+ int rx_repeat = 0;
+ int try_mem_tx_old;
+ unsigned long flags;
+
+ do {
+
+ unsigned short memtx = readw(hw->memreg_tx);
+ unsigned short memtx_serial;
+ unsigned short memrxdone =
+ readw(&hw->memory_info_regs->memreg_rx_done);
+
+ try_mem_tx_old = 0;
+
+ /* check whether the interrupt was generated by ipwireless card */
+ if (!(memtx & MEMTX_TX) && !(memrxdone & MEMRX_RX_DONE)) {
+
+ /* check if the card uses memreg_tx_old register */
+ if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) {
+ memtx = readw(&hw->memory_info_regs->memreg_tx_old);
+ if (memtx & MEMTX_TX) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": Using memreg_tx_old\n");
+ hw->memreg_tx =
+ &hw->memory_info_regs->memreg_tx_old;
+ } else {
+ return IRQ_NONE;
+ }
+ } else
+ return IRQ_NONE;
+ }
+
+ /*
+ * See if the card is physically present. Note that while it is
+ * powering up, it appears not to be present.
+ */
+ if (!is_card_present(hw)) {
+ acknowledge_pcmcia_interrupt(hw);
+ return IRQ_HANDLED;
+ }
+
+ memtx_serial = memtx & (unsigned short) 0xff00;
+ if (memtx & MEMTX_TX) {
+ writew(memtx_serial, hw->memreg_tx);
+
+ if (hw->serial_number_detected) {
+ if (memtx_serial != hw->last_memtx_serial) {
+ hw->last_memtx_serial = memtx_serial;
+ spin_lock_irqsave(&hw->lock, flags);
+ hw->rx_ready++;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ rx = 1;
+ } else
+ /* Ignore 'Timer Recovery' duplicates. */
+ rx_repeat = 1;
+ } else {
+ /*
+ * If a non-zero serial number is seen, then enable
+ * serial number checking.
+ */
+ if (memtx_serial != 0) {
+ hw->serial_number_detected = 1;
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
+ ": memreg_tx serial num detected\n");
+
+ spin_lock_irqsave(&hw->lock, flags);
+ hw->rx_ready++;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ }
+ rx = 1;
+ }
+ }
+ if (memrxdone & MEMRX_RX_DONE) {
+ writew(0, &hw->memory_info_regs->memreg_rx_done);
+ spin_lock_irqsave(&hw->lock, flags);
+ hw->tx_ready = 1;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ tx = 1;
+ }
+ if (tx)
+ writew(MEMRX_PCINTACKK,
+ &hw->memory_info_regs->memreg_pc_interrupt_ack);
+
+ acknowledge_pcmcia_interrupt(hw);
+
+ if (tx || rx)
+ tasklet_schedule(&hw->tasklet);
+ else if (!rx_repeat) {
+ if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) {
+ if (hw->serial_number_detected)
+ printk(KERN_WARNING IPWIRELESS_PCCARD_NAME
+ ": spurious interrupt - new_tx mode\n");
+ else {
+ printk(KERN_WARNING IPWIRELESS_PCCARD_NAME
+ ": no valid memreg_tx value - switching to the old memreg_tx\n");
+ hw->memreg_tx =
+ &hw->memory_info_regs->memreg_tx_old;
+ try_mem_tx_old = 1;
+ }
+ } else
+ printk(KERN_WARNING IPWIRELESS_PCCARD_NAME
+ ": spurious interrupt - old_tx mode\n");
+ }
+
+ } while (try_mem_tx_old == 1);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t ipwireless_interrupt(int irq, void *dev_id)
+{
+ struct ipw_dev *ipw = dev_id;
+
+ if (ipw->hardware->hw_version == HW_VERSION_1)
+ return ipwireless_handle_v1_interrupt(irq, ipw->hardware);
+ else
+ return ipwireless_handle_v2_v3_interrupt(irq, ipw->hardware);
+}
+
+static void flush_packets_to_hw(struct ipw_hardware *hw)
+{
+ int priority_limit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ priority_limit = get_current_packet_priority(hw);
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ while (send_pending_packet(hw, priority_limit));
+}
+
+static void send_packet(struct ipw_hardware *hw, int priority,
+ struct ipw_tx_packet *packet)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ list_add_tail(&packet->queue, &hw->tx_queue[priority]);
+ hw->tx_queued++;
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ flush_packets_to_hw(hw);
+}
+
+/* Create data packet, non-atomic allocation */
+static void *alloc_data_packet(int data_size,
+ unsigned char dest_addr,
+ unsigned char protocol)
+{
+ struct ipw_tx_packet *packet = kzalloc(
+ sizeof(struct ipw_tx_packet) + data_size,
+ GFP_ATOMIC);
+
+ if (!packet)
+ return NULL;
+
+ INIT_LIST_HEAD(&packet->queue);
+ packet->dest_addr = dest_addr;
+ packet->protocol = protocol;
+ packet->length = data_size;
+
+ return packet;
+}
+
+static void *alloc_ctrl_packet(int header_size,
+ unsigned char dest_addr,
+ unsigned char protocol,
+ unsigned char sig_no)
+{
+ /*
+ * sig_no is located right after ipw_tx_packet struct in every
+ * CTRL or SETUP packets, we can use ipw_control_packet as a
+ * common struct
+ */
+ struct ipw_control_packet *packet = kzalloc(header_size, GFP_ATOMIC);
+
+ if (!packet)
+ return NULL;
+
+ INIT_LIST_HEAD(&packet->header.queue);
+ packet->header.dest_addr = dest_addr;
+ packet->header.protocol = protocol;
+ packet->header.length = header_size - sizeof(struct ipw_tx_packet);
+ packet->body.sig_no = sig_no;
+
+ return packet;
+}
+
+int ipwireless_send_packet(struct ipw_hardware *hw, unsigned int channel_idx,
+ const unsigned char *data, unsigned int length,
+ void (*callback) (void *cb, unsigned int length),
+ void *callback_data)
+{
+ struct ipw_tx_packet *packet;
+
+ packet = alloc_data_packet(length, (channel_idx + 1),
+ TL_PROTOCOLID_COM_DATA);
+ if (!packet)
+ return -ENOMEM;
+ packet->packet_callback = callback;
+ packet->callback_data = callback_data;
+ memcpy((unsigned char *) packet + sizeof(struct ipw_tx_packet), data,
+ length);
+
+ send_packet(hw, PRIO_DATA, packet);
+ return 0;
+}
+
+static int set_control_line(struct ipw_hardware *hw, int prio,
+ unsigned int channel_idx, int line, int state)
+{
+ struct ipw_control_packet *packet;
+ int protocolid = TL_PROTOCOLID_COM_CTRL;
+
+ if (prio == PRIO_SETUP)
+ protocolid = TL_PROTOCOLID_SETUP;
+
+ packet = alloc_ctrl_packet(sizeof(struct ipw_control_packet),
+ (channel_idx + 1), protocolid, line);
+ if (!packet)
+ return -ENOMEM;
+ packet->header.length = sizeof(struct ipw_control_packet_body);
+ packet->body.value = (state == 0 ? 0 : 1);
+ send_packet(hw, prio, &packet->header);
+ return 0;
+}
+
+
+static int set_DTR(struct ipw_hardware *hw, int priority,
+ unsigned int channel_idx, int state)
+{
+ if (state != 0)
+ hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_DTR;
+ else
+ hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_DTR;
+
+ return set_control_line(hw, priority, channel_idx, COMCTRL_DTR, state);
+}
+
+static int set_RTS(struct ipw_hardware *hw, int priority,
+ unsigned int channel_idx, int state)
+{
+ if (state != 0)
+ hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_RTS;
+ else
+ hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_RTS;
+
+ return set_control_line(hw, priority, channel_idx, COMCTRL_RTS, state);
+}
+
+int ipwireless_set_DTR(struct ipw_hardware *hw, unsigned int channel_idx,
+ int state)
+{
+ return set_DTR(hw, PRIO_CTRL, channel_idx, state);
+}
+
+int ipwireless_set_RTS(struct ipw_hardware *hw, unsigned int channel_idx,
+ int state)
+{
+ return set_RTS(hw, PRIO_CTRL, channel_idx, state);
+}
+
+struct ipw_setup_get_version_query_packet {
+ struct ipw_tx_packet header;
+ struct tl_setup_get_version_qry body;
+};
+
+struct ipw_setup_config_packet {
+ struct ipw_tx_packet header;
+ struct tl_setup_config_msg body;
+};
+
+struct ipw_setup_config_done_packet {
+ struct ipw_tx_packet header;
+ struct tl_setup_config_done_msg body;
+};
+
+struct ipw_setup_open_packet {
+ struct ipw_tx_packet header;
+ struct tl_setup_open_msg body;
+};
+
+struct ipw_setup_info_packet {
+ struct ipw_tx_packet header;
+ struct tl_setup_info_msg body;
+};
+
+struct ipw_setup_reboot_msg_ack {
+ struct ipw_tx_packet header;
+ struct TlSetupRebootMsgAck body;
+};
+
+/* This handles the actual initialization of the card */
+static void __handle_setup_get_version_rsp(struct ipw_hardware *hw)
+{
+ struct ipw_setup_config_packet *config_packet;
+ struct ipw_setup_config_done_packet *config_done_packet;
+ struct ipw_setup_open_packet *open_packet;
+ struct ipw_setup_info_packet *info_packet;
+ int port;
+ unsigned int channel_idx;
+
+ /* generate config packet */
+ for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) {
+ config_packet = alloc_ctrl_packet(
+ sizeof(struct ipw_setup_config_packet),
+ ADDR_SETUP_PROT,
+ TL_PROTOCOLID_SETUP,
+ TL_SETUP_SIGNO_CONFIG_MSG);
+ if (!config_packet)
+ goto exit_nomem;
+ config_packet->header.length = sizeof(struct tl_setup_config_msg);
+ config_packet->body.port_no = port;
+ config_packet->body.prio_data = PRIO_DATA;
+ config_packet->body.prio_ctrl = PRIO_CTRL;
+ send_packet(hw, PRIO_SETUP, &config_packet->header);
+ }
+ config_done_packet = alloc_ctrl_packet(
+ sizeof(struct ipw_setup_config_done_packet),
+ ADDR_SETUP_PROT,
+ TL_PROTOCOLID_SETUP,
+ TL_SETUP_SIGNO_CONFIG_DONE_MSG);
+ if (!config_done_packet)
+ goto exit_nomem;
+ config_done_packet->header.length = sizeof(struct tl_setup_config_done_msg);
+ send_packet(hw, PRIO_SETUP, &config_done_packet->header);
+
+ /* generate open packet */
+ for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) {
+ open_packet = alloc_ctrl_packet(
+ sizeof(struct ipw_setup_open_packet),
+ ADDR_SETUP_PROT,
+ TL_PROTOCOLID_SETUP,
+ TL_SETUP_SIGNO_OPEN_MSG);
+ if (!open_packet)
+ goto exit_nomem;
+ open_packet->header.length = sizeof(struct tl_setup_open_msg);
+ open_packet->body.port_no = port;
+ send_packet(hw, PRIO_SETUP, &open_packet->header);
+ }
+ for (channel_idx = 0;
+ channel_idx < NL_NUM_OF_ADDRESSES; channel_idx++) {
+ int ret;
+
+ ret = set_DTR(hw, PRIO_SETUP, channel_idx,
+ (hw->control_lines[channel_idx] &
+ IPW_CONTROL_LINE_DTR) != 0);
+ if (ret) {
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": error setting DTR (%d)\n", ret);
+ return;
+ }
+
+ set_RTS(hw, PRIO_SETUP, channel_idx,
+ (hw->control_lines [channel_idx] &
+ IPW_CONTROL_LINE_RTS) != 0);
+ if (ret) {
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": error setting RTS (%d)\n", ret);
+ return;
+ }
+ }
+ /*
+ * For NDIS we assume that we are using sync PPP frames, for COM async.
+ * This driver uses NDIS mode too. We don't bother with translation
+ * from async -> sync PPP.
+ */
+ info_packet = alloc_ctrl_packet(sizeof(struct ipw_setup_info_packet),
+ ADDR_SETUP_PROT,
+ TL_PROTOCOLID_SETUP,
+ TL_SETUP_SIGNO_INFO_MSG);
+ if (!info_packet)
+ goto exit_nomem;
+ info_packet->header.length = sizeof(struct tl_setup_info_msg);
+ info_packet->body.driver_type = NDISWAN_DRIVER;
+ info_packet->body.major_version = NDISWAN_DRIVER_MAJOR_VERSION;
+ info_packet->body.minor_version = NDISWAN_DRIVER_MINOR_VERSION;
+ send_packet(hw, PRIO_SETUP, &info_packet->header);
+
+ /* Initialization is now complete, so we clear the 'to_setup' flag */
+ hw->to_setup = 0;
+
+ return;
+
+exit_nomem:
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": not enough memory to alloc control packet\n");
+ hw->to_setup = -1;
+}
+
+static void handle_setup_get_version_rsp(struct ipw_hardware *hw,
+ unsigned char vers_no)
+{
+ del_timer(&hw->setup_timer);
+ hw->initializing = 0;
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": card is ready.\n");
+
+ if (vers_no == TL_SETUP_VERSION)
+ __handle_setup_get_version_rsp(hw);
+ else
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": invalid hardware version no %u\n",
+ (unsigned int) vers_no);
+}
+
+static void ipw_send_setup_packet(struct ipw_hardware *hw)
+{
+ struct ipw_setup_get_version_query_packet *ver_packet;
+
+ ver_packet = alloc_ctrl_packet(
+ sizeof(struct ipw_setup_get_version_query_packet),
+ ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP,
+ TL_SETUP_SIGNO_GET_VERSION_QRY);
+ ver_packet->header.length = sizeof(struct tl_setup_get_version_qry);
+
+ /*
+ * Response is handled in handle_received_SETUP_packet
+ */
+ send_packet(hw, PRIO_SETUP, &ver_packet->header);
+}
+
+static void handle_received_SETUP_packet(struct ipw_hardware *hw,
+ unsigned int address,
+ const unsigned char *data, int len,
+ int is_last)
+{
+ const union ipw_setup_rx_msg *rx_msg = (const union ipw_setup_rx_msg *) data;
+
+ if (address != ADDR_SETUP_PROT) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": setup packet has bad address %d\n", address);
+ return;
+ }
+
+ switch (rx_msg->sig_no) {
+ case TL_SETUP_SIGNO_GET_VERSION_RSP:
+ if (hw->to_setup)
+ handle_setup_get_version_rsp(hw,
+ rx_msg->version_rsp_msg.version);
+ break;
+
+ case TL_SETUP_SIGNO_OPEN_MSG:
+ if (ipwireless_debug) {
+ unsigned int channel_idx = rx_msg->open_msg.port_no - 1;
+
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": OPEN_MSG [channel %u] reply received\n",
+ channel_idx);
+ }
+ break;
+
+ case TL_SETUP_SIGNO_INFO_MSG_ACK:
+ if (ipwireless_debug)
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
+ ": card successfully configured as NDISWAN\n");
+ break;
+
+ case TL_SETUP_SIGNO_REBOOT_MSG:
+ if (hw->to_setup)
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
+ ": Setup not completed - ignoring reboot msg\n");
+ else {
+ struct ipw_setup_reboot_msg_ack *packet;
+
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
+ ": Acknowledging REBOOT message\n");
+ packet = alloc_ctrl_packet(
+ sizeof(struct ipw_setup_reboot_msg_ack),
+ ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP,
+ TL_SETUP_SIGNO_REBOOT_MSG_ACK);
+ packet->header.length =
+ sizeof(struct TlSetupRebootMsgAck);
+ send_packet(hw, PRIO_SETUP, &packet->header);
+ if (hw->reboot_callback)
+ hw->reboot_callback(hw->reboot_callback_data);
+ }
+ break;
+
+ default:
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": unknown setup message %u received\n",
+ (unsigned int) rx_msg->sig_no);
+ }
+}
+
+static void do_close_hardware(struct ipw_hardware *hw)
+{
+ unsigned int irqn;
+
+ if (hw->hw_version == HW_VERSION_1) {
+ /* Disable TX and RX interrupts. */
+ outw(0, hw->base_port + IOIER);
+
+ /* Acknowledge any outstanding interrupt requests */
+ irqn = inw(hw->base_port + IOIR);
+ if (irqn & IR_TXINTR)
+ outw(IR_TXINTR, hw->base_port + IOIR);
+ if (irqn & IR_RXINTR)
+ outw(IR_RXINTR, hw->base_port + IOIR);
+
+ synchronize_irq(hw->irq);
+ }
+}
+
+struct ipw_hardware *ipwireless_hardware_create(void)
+{
+ int i;
+ struct ipw_hardware *hw =
+ kzalloc(sizeof(struct ipw_hardware), GFP_KERNEL);
+
+ if (!hw)
+ return NULL;
+
+ hw->irq = -1;
+ hw->initializing = 1;
+ hw->tx_ready = 1;
+ hw->rx_bytes_queued = 0;
+ hw->rx_pool_size = 0;
+ hw->last_memtx_serial = (unsigned short) 0xffff;
+ for (i = 0; i < NL_NUM_OF_PRIORITIES; i++)
+ INIT_LIST_HEAD(&hw->tx_queue[i]);
+
+ INIT_LIST_HEAD(&hw->rx_queue);
+ INIT_LIST_HEAD(&hw->rx_pool);
+ spin_lock_init(&hw->lock);
+ tasklet_init(&hw->tasklet, ipwireless_do_tasklet, (unsigned long) hw);
+ INIT_WORK(&hw->work_rx, ipw_receive_data_work);
+ setup_timer(&hw->setup_timer, ipwireless_setup_timer,
+ (unsigned long) hw);
+
+ return hw;
+}
+
+void ipwireless_init_hardware_v1(struct ipw_hardware *hw,
+ unsigned int base_port,
+ void __iomem *attr_memory,
+ void __iomem *common_memory,
+ int is_v2_card,
+ void (*reboot_callback) (void *data),
+ void *reboot_callback_data)
+{
+ if (hw->removed) {
+ hw->removed = 0;
+ enable_irq(hw->irq);
+ }
+ hw->base_port = base_port;
+ hw->hw_version = (is_v2_card ? HW_VERSION_2 : HW_VERSION_1);
+ hw->ll_mtu = (hw->hw_version == HW_VERSION_1 ? LL_MTU_V1 : LL_MTU_V2);
+ hw->memregs_CCR = (struct MEMCCR __iomem *)
+ ((unsigned short __iomem *) attr_memory + 0x200);
+ hw->memory_info_regs = (struct MEMINFREG __iomem *) common_memory;
+ hw->memreg_tx = &hw->memory_info_regs->memreg_tx_new;
+ hw->reboot_callback = reboot_callback;
+ hw->reboot_callback_data = reboot_callback_data;
+}
+
+void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw)
+{
+ hw->initializing = 1;
+ hw->init_loops = 0;
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": waiting for card to start up...\n");
+ ipwireless_setup_timer((unsigned long) hw);
+}
+
+static void ipwireless_setup_timer(unsigned long data)
+{
+ struct ipw_hardware *hw = (struct ipw_hardware *) data;
+
+ hw->init_loops++;
+
+ if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY &&
+ hw->hw_version == HW_VERSION_2 &&
+ hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": failed to startup using TX2, trying TX\n");
+
+ hw->memreg_tx = &hw->memory_info_regs->memreg_tx_old;
+ hw->init_loops = 0;
+ }
+ /* Give up after a certain number of retries */
+ if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": card failed to start up!\n");
+ hw->initializing = 0;
+ } else {
+ /* Do not attempt to write to the board if it is not present. */
+ if (is_card_present(hw)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ hw->to_setup = 1;
+ hw->tx_ready = 1;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ tasklet_schedule(&hw->tasklet);
+ }
+
+ mod_timer(&hw->setup_timer,
+ jiffies + msecs_to_jiffies(TL_SETUP_VERSION_QRY_TMO));
+ }
+}
+
+/*
+ * Stop any interrupts from executing so that, once this function returns,
+ * other layers of the driver can be sure they won't get any more callbacks.
+ * Thus must be called on a proper process context.
+ */
+void ipwireless_stop_interrupts(struct ipw_hardware *hw)
+{
+ if (!hw->shutting_down) {
+ /* Tell everyone we are going down. */
+ hw->shutting_down = 1;
+ del_timer(&hw->setup_timer);
+
+ /* Prevent the hardware from sending any more interrupts */
+ do_close_hardware(hw);
+ }
+}
+
+void ipwireless_hardware_free(struct ipw_hardware *hw)
+{
+ int i;
+ struct ipw_rx_packet *rp, *rq;
+ struct ipw_tx_packet *tp, *tq;
+
+ ipwireless_stop_interrupts(hw);
+
+ flush_work_sync(&hw->work_rx);
+
+ for (i = 0; i < NL_NUM_OF_ADDRESSES; i++)
+ if (hw->packet_assembler[i] != NULL)
+ kfree(hw->packet_assembler[i]);
+
+ for (i = 0; i < NL_NUM_OF_PRIORITIES; i++)
+ list_for_each_entry_safe(tp, tq, &hw->tx_queue[i], queue) {
+ list_del(&tp->queue);
+ kfree(tp);
+ }
+
+ list_for_each_entry_safe(rp, rq, &hw->rx_queue, queue) {
+ list_del(&rp->queue);
+ kfree(rp);
+ }
+
+ list_for_each_entry_safe(rp, rq, &hw->rx_pool, queue) {
+ list_del(&rp->queue);
+ kfree(rp);
+ }
+ kfree(hw);
+}
+
+/*
+ * Associate the specified network with this hardware, so it will receive events
+ * from it.
+ */
+void ipwireless_associate_network(struct ipw_hardware *hw,
+ struct ipw_network *network)
+{
+ hw->network = network;
+}
diff --git a/drivers/tty/ipwireless/hardware.h b/drivers/tty/ipwireless/hardware.h
new file mode 100644
index 00000000..90a8590e
--- /dev/null
+++ b/drivers/tty/ipwireless/hardware.h
@@ -0,0 +1,62 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#ifndef _IPWIRELESS_CS_HARDWARE_H_
+#define _IPWIRELESS_CS_HARDWARE_H_
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#define IPW_CONTROL_LINE_CTS 0x0001
+#define IPW_CONTROL_LINE_DCD 0x0002
+#define IPW_CONTROL_LINE_DSR 0x0004
+#define IPW_CONTROL_LINE_RI 0x0008
+#define IPW_CONTROL_LINE_DTR 0x0010
+#define IPW_CONTROL_LINE_RTS 0x0020
+
+struct ipw_hardware;
+struct ipw_network;
+
+struct ipw_hardware *ipwireless_hardware_create(void);
+void ipwireless_hardware_free(struct ipw_hardware *hw);
+irqreturn_t ipwireless_interrupt(int irq, void *dev_id);
+int ipwireless_set_DTR(struct ipw_hardware *hw, unsigned int channel_idx,
+ int state);
+int ipwireless_set_RTS(struct ipw_hardware *hw, unsigned int channel_idx,
+ int state);
+int ipwireless_send_packet(struct ipw_hardware *hw,
+ unsigned int channel_idx,
+ const unsigned char *data,
+ unsigned int length,
+ void (*packet_sent_callback) (void *cb,
+ unsigned int length),
+ void *sent_cb_data);
+void ipwireless_associate_network(struct ipw_hardware *hw,
+ struct ipw_network *net);
+void ipwireless_stop_interrupts(struct ipw_hardware *hw);
+void ipwireless_init_hardware_v1(struct ipw_hardware *hw,
+ unsigned int base_port,
+ void __iomem *attr_memory,
+ void __iomem *common_memory,
+ int is_v2_card,
+ void (*reboot_cb) (void *data),
+ void *reboot_cb_data);
+void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw);
+void ipwireless_sleep(unsigned int tenths);
+
+#endif
diff --git a/drivers/tty/ipwireless/main.c b/drivers/tty/ipwireless/main.c
new file mode 100644
index 00000000..655c7948
--- /dev/null
+++ b/drivers/tty/ipwireless/main.c
@@ -0,0 +1,347 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#include "hardware.h"
+#include "network.h"
+#include "main.h"
+#include "tty.h"
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <pcmcia/cisreg.h>
+#include <pcmcia/device_id.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/ds.h>
+
+static const struct pcmcia_device_id ipw_ids[] = {
+ PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0100),
+ PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0200),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, ipw_ids);
+
+static void ipwireless_detach(struct pcmcia_device *link);
+
+/*
+ * Module params
+ */
+/* Debug mode: more verbose, print sent/recv bytes */
+int ipwireless_debug;
+int ipwireless_loopback;
+int ipwireless_out_queue = 10;
+
+module_param_named(debug, ipwireless_debug, int, 0);
+module_param_named(loopback, ipwireless_loopback, int, 0);
+module_param_named(out_queue, ipwireless_out_queue, int, 0);
+MODULE_PARM_DESC(debug, "switch on debug messages [0]");
+MODULE_PARM_DESC(loopback,
+ "debug: enable ras_raw channel [0]");
+MODULE_PARM_DESC(out_queue, "debug: set size of outgoing PPP queue [10]");
+
+/* Executes in process context. */
+static void signalled_reboot_work(struct work_struct *work_reboot)
+{
+ struct ipw_dev *ipw = container_of(work_reboot, struct ipw_dev,
+ work_reboot);
+ struct pcmcia_device *link = ipw->link;
+ pcmcia_reset_card(link->socket);
+}
+
+static void signalled_reboot_callback(void *callback_data)
+{
+ struct ipw_dev *ipw = (struct ipw_dev *) callback_data;
+
+ /* Delegate to process context. */
+ schedule_work(&ipw->work_reboot);
+}
+
+static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
+{
+ struct ipw_dev *ipw = priv_data;
+ int ret;
+
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+
+ /* 0x40 causes it to generate level mode interrupts. */
+ /* 0x04 enables IREQ pin. */
+ p_dev->config_index |= 0x44;
+ p_dev->io_lines = 16;
+ ret = pcmcia_request_io(p_dev);
+ if (ret)
+ return ret;
+
+ if (!request_region(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ p_dev->resource[2]->flags |=
+ WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
+
+ ret = pcmcia_request_window(p_dev, p_dev->resource[2], 0);
+ if (ret != 0)
+ goto exit1;
+
+ ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
+ if (ret != 0)
+ goto exit1;
+
+ ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
+
+ ipw->common_memory = ioremap(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]));
+ if (!request_mem_region(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit2;
+ }
+
+ p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
+ WIN_ENABLE;
+ p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
+ ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
+ if (ret != 0)
+ goto exit3;
+
+ ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
+ if (ret != 0)
+ goto exit3;
+
+ ipw->attr_memory = ioremap(p_dev->resource[3]->start,
+ resource_size(p_dev->resource[3]));
+ if (!request_mem_region(p_dev->resource[3]->start,
+ resource_size(p_dev->resource[3]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit4;
+ }
+
+ return 0;
+
+exit4:
+ iounmap(ipw->attr_memory);
+exit3:
+ release_mem_region(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]));
+exit2:
+ iounmap(ipw->common_memory);
+exit1:
+ release_region(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]));
+exit:
+ pcmcia_disable_device(p_dev);
+ return ret;
+}
+
+static int config_ipwireless(struct ipw_dev *ipw)
+{
+ struct pcmcia_device *link = ipw->link;
+ int ret = 0;
+
+ ipw->is_v2_card = 0;
+ link->config_flags |= CONF_AUTO_SET_IO | CONF_AUTO_SET_IOMEM |
+ CONF_ENABLE_IRQ;
+
+ ret = pcmcia_loop_config(link, ipwireless_probe, ipw);
+ if (ret != 0)
+ return ret;
+
+ INIT_WORK(&ipw->work_reboot, signalled_reboot_work);
+
+ ipwireless_init_hardware_v1(ipw->hardware, link->resource[0]->start,
+ ipw->attr_memory, ipw->common_memory,
+ ipw->is_v2_card, signalled_reboot_callback,
+ ipw);
+
+ ret = pcmcia_request_irq(link, ipwireless_interrupt);
+ if (ret != 0)
+ goto exit;
+
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n",
+ ipw->is_v2_card ? "V2/V3" : "V1");
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": I/O ports %pR, irq %d\n", link->resource[0],
+ (unsigned int) link->irq);
+ if (ipw->attr_memory && ipw->common_memory)
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": attr memory %pR, common memory %pR\n",
+ link->resource[3],
+ link->resource[2]);
+
+ ipw->network = ipwireless_network_create(ipw->hardware);
+ if (!ipw->network)
+ goto exit;
+
+ ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network);
+ if (!ipw->tty)
+ goto exit;
+
+ ipwireless_init_hardware_v2_v3(ipw->hardware);
+
+ /*
+ * Do the RequestConfiguration last, because it enables interrupts.
+ * Then we don't get any interrupts before we're ready for them.
+ */
+ ret = pcmcia_enable_device(link);
+ if (ret != 0)
+ goto exit;
+
+ return 0;
+
+exit:
+ if (ipw->common_memory) {
+ release_mem_region(link->resource[2]->start,
+ resource_size(link->resource[2]));
+ iounmap(ipw->common_memory);
+ }
+ if (ipw->attr_memory) {
+ release_mem_region(link->resource[3]->start,
+ resource_size(link->resource[3]));
+ iounmap(ipw->attr_memory);
+ }
+ pcmcia_disable_device(link);
+ return -1;
+}
+
+static void release_ipwireless(struct ipw_dev *ipw)
+{
+ release_region(ipw->link->resource[0]->start,
+ resource_size(ipw->link->resource[0]));
+ if (ipw->common_memory) {
+ release_mem_region(ipw->link->resource[2]->start,
+ resource_size(ipw->link->resource[2]));
+ iounmap(ipw->common_memory);
+ }
+ if (ipw->attr_memory) {
+ release_mem_region(ipw->link->resource[3]->start,
+ resource_size(ipw->link->resource[3]));
+ iounmap(ipw->attr_memory);
+ }
+ pcmcia_disable_device(ipw->link);
+}
+
+/*
+ * ipwireless_attach() creates an "instance" of the driver, allocating
+ * local data structures for one device (one interface). The device
+ * is registered with Card Services.
+ *
+ * The pcmcia_device structure is initialized, but we don't actually
+ * configure the card at this point -- we wait until we receive a
+ * card insertion event.
+ */
+static int ipwireless_attach(struct pcmcia_device *link)
+{
+ struct ipw_dev *ipw;
+ int ret;
+
+ ipw = kzalloc(sizeof(struct ipw_dev), GFP_KERNEL);
+ if (!ipw)
+ return -ENOMEM;
+
+ ipw->link = link;
+ link->priv = ipw;
+
+ ipw->hardware = ipwireless_hardware_create();
+ if (!ipw->hardware) {
+ kfree(ipw);
+ return -ENOMEM;
+ }
+ /* RegisterClient will call config_ipwireless */
+
+ ret = config_ipwireless(ipw);
+
+ if (ret != 0) {
+ ipwireless_detach(link);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This deletes a driver "instance". The device is de-registered with
+ * Card Services. If it has been released, all local data structures
+ * are freed. Otherwise, the structures will be freed when the device
+ * is released.
+ */
+static void ipwireless_detach(struct pcmcia_device *link)
+{
+ struct ipw_dev *ipw = link->priv;
+
+ release_ipwireless(ipw);
+
+ if (ipw->tty != NULL)
+ ipwireless_tty_free(ipw->tty);
+ if (ipw->network != NULL)
+ ipwireless_network_free(ipw->network);
+ if (ipw->hardware != NULL)
+ ipwireless_hardware_free(ipw->hardware);
+ kfree(ipw);
+}
+
+static struct pcmcia_driver me = {
+ .owner = THIS_MODULE,
+ .probe = ipwireless_attach,
+ .remove = ipwireless_detach,
+ .name = IPWIRELESS_PCCARD_NAME,
+ .id_table = ipw_ids
+};
+
+/*
+ * Module insertion : initialisation of the module.
+ * Register the card with cardmgr...
+ */
+static int __init init_ipwireless(void)
+{
+ int ret;
+
+ ret = ipwireless_tty_init();
+ if (ret != 0)
+ return ret;
+
+ ret = pcmcia_register_driver(&me);
+ if (ret != 0)
+ ipwireless_tty_release();
+
+ return ret;
+}
+
+/*
+ * Module removal
+ */
+static void __exit exit_ipwireless(void)
+{
+ pcmcia_unregister_driver(&me);
+ ipwireless_tty_release();
+}
+
+module_init(init_ipwireless);
+module_exit(exit_ipwireless);
+
+MODULE_AUTHOR(IPWIRELESS_PCMCIA_AUTHOR);
+MODULE_DESCRIPTION(IPWIRELESS_PCCARD_NAME " " IPWIRELESS_PCMCIA_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/ipwireless/main.h b/drivers/tty/ipwireless/main.h
new file mode 100644
index 00000000..f2cbb116
--- /dev/null
+++ b/drivers/tty/ipwireless/main.h
@@ -0,0 +1,68 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#ifndef _IPWIRELESS_CS_H_
+#define _IPWIRELESS_CS_H_
+
+#include <linux/sched.h>
+#include <linux/types.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include "hardware.h"
+
+#define IPWIRELESS_PCCARD_NAME "ipwireless"
+#define IPWIRELESS_PCMCIA_VERSION "1.1"
+#define IPWIRELESS_PCMCIA_AUTHOR \
+ "Stephen Blackheath, Ben Martel, Jiri Kosina and David Sterba"
+
+#define IPWIRELESS_TX_QUEUE_SIZE 262144
+#define IPWIRELESS_RX_QUEUE_SIZE 262144
+
+#define IPWIRELESS_STATE_DEBUG
+
+struct ipw_hardware;
+struct ipw_network;
+struct ipw_tty;
+
+struct ipw_dev {
+ struct pcmcia_device *link;
+ int is_v2_card;
+
+ void __iomem *attr_memory;
+
+ void __iomem *common_memory;
+
+ /* Reference to attribute memory, containing CIS data */
+ void *attribute_memory;
+
+ /* Hardware context */
+ struct ipw_hardware *hardware;
+ /* Network layer context */
+ struct ipw_network *network;
+ /* TTY device context */
+ struct ipw_tty *tty;
+ struct work_struct work_reboot;
+};
+
+/* Module parametres */
+extern int ipwireless_debug;
+extern int ipwireless_loopback;
+extern int ipwireless_out_queue;
+
+#endif
diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c
new file mode 100644
index 00000000..f7daeea5
--- /dev/null
+++ b/drivers/tty/ipwireless/network.c
@@ -0,0 +1,508 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/slab.h>
+#include <linux/if_ppp.h>
+#include <linux/skbuff.h>
+
+#include "network.h"
+#include "hardware.h"
+#include "main.h"
+#include "tty.h"
+
+#define MAX_ASSOCIATED_TTYS 2
+
+#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+struct ipw_network {
+ /* Hardware context, used for calls to hardware layer. */
+ struct ipw_hardware *hardware;
+ /* Context for kernel 'generic_ppp' functionality */
+ struct ppp_channel *ppp_channel;
+ /* tty context connected with IPW console */
+ struct ipw_tty *associated_ttys[NO_OF_IPW_CHANNELS][MAX_ASSOCIATED_TTYS];
+ /* True if ppp needs waking up once we're ready to xmit */
+ int ppp_blocked;
+ /* Number of packets queued up in hardware module. */
+ int outgoing_packets_queued;
+ /* Spinlock to avoid interrupts during shutdown */
+ spinlock_t lock;
+ struct mutex close_lock;
+
+ /* PPP ioctl data, not actually used anywere */
+ unsigned int flags;
+ unsigned int rbits;
+ u32 xaccm[8];
+ u32 raccm;
+ int mru;
+
+ int shutting_down;
+ unsigned int ras_control_lines;
+
+ struct work_struct work_go_online;
+ struct work_struct work_go_offline;
+};
+
+static void notify_packet_sent(void *callback_data, unsigned int packet_length)
+{
+ struct ipw_network *network = callback_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&network->lock, flags);
+ network->outgoing_packets_queued--;
+ if (network->ppp_channel != NULL) {
+ if (network->ppp_blocked) {
+ network->ppp_blocked = 0;
+ spin_unlock_irqrestore(&network->lock, flags);
+ ppp_output_wakeup(network->ppp_channel);
+ if (ipwireless_debug)
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
+ ": ppp unblocked\n");
+ } else
+ spin_unlock_irqrestore(&network->lock, flags);
+ } else
+ spin_unlock_irqrestore(&network->lock, flags);
+}
+
+/*
+ * Called by the ppp system when it has a packet to send to the hardware.
+ */
+static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
+ struct sk_buff *skb)
+{
+ struct ipw_network *network = ppp_channel->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&network->lock, flags);
+ if (network->outgoing_packets_queued < ipwireless_out_queue) {
+ unsigned char *buf;
+ static unsigned char header[] = {
+ PPP_ALLSTATIONS, /* 0xff */
+ PPP_UI, /* 0x03 */
+ };
+ int ret;
+
+ network->outgoing_packets_queued++;
+ spin_unlock_irqrestore(&network->lock, flags);
+
+ /*
+ * If we have the requested amount of headroom in the skb we
+ * were handed, then we can add the header efficiently.
+ */
+ if (skb_headroom(skb) >= 2) {
+ memcpy(skb_push(skb, 2), header, 2);
+ ret = ipwireless_send_packet(network->hardware,
+ IPW_CHANNEL_RAS, skb->data,
+ skb->len,
+ notify_packet_sent,
+ network);
+ if (ret == -1) {
+ skb_pull(skb, 2);
+ return 0;
+ }
+ } else {
+ /* Otherwise (rarely) we do it inefficiently. */
+ buf = kmalloc(skb->len + 2, GFP_ATOMIC);
+ if (!buf)
+ return 0;
+ memcpy(buf + 2, skb->data, skb->len);
+ memcpy(buf, header, 2);
+ ret = ipwireless_send_packet(network->hardware,
+ IPW_CHANNEL_RAS, buf,
+ skb->len + 2,
+ notify_packet_sent,
+ network);
+ kfree(buf);
+ if (ret == -1)
+ return 0;
+ }
+ kfree_skb(skb);
+ return 1;
+ } else {
+ /*
+ * Otherwise reject the packet, and flag that the ppp system
+ * needs to be unblocked once we are ready to send.
+ */
+ network->ppp_blocked = 1;
+ spin_unlock_irqrestore(&network->lock, flags);
+ if (ipwireless_debug)
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": ppp blocked\n");
+ return 0;
+ }
+}
+
+/* Handle an ioctl call that has come in via ppp. (copy of ppp_async_ioctl() */
+static int ipwireless_ppp_ioctl(struct ppp_channel *ppp_channel,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ipw_network *network = ppp_channel->private;
+ int err, val;
+ u32 accm[8];
+ int __user *user_arg = (int __user *) arg;
+
+ err = -EFAULT;
+ switch (cmd) {
+ case PPPIOCGFLAGS:
+ val = network->flags | network->rbits;
+ if (put_user(val, user_arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSFLAGS:
+ if (get_user(val, user_arg))
+ break;
+ network->flags = val & ~SC_RCV_BITS;
+ network->rbits = val & SC_RCV_BITS;
+ err = 0;
+ break;
+
+ case PPPIOCGASYNCMAP:
+ if (put_user(network->xaccm[0], user_arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSASYNCMAP:
+ if (get_user(network->xaccm[0], user_arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGRASYNCMAP:
+ if (put_user(network->raccm, user_arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSRASYNCMAP:
+ if (get_user(network->raccm, user_arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGXASYNCMAP:
+ if (copy_to_user((void __user *) arg, network->xaccm,
+ sizeof(network->xaccm)))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSXASYNCMAP:
+ if (copy_from_user(accm, (void __user *) arg, sizeof(accm)))
+ break;
+ accm[2] &= ~0x40000000U; /* can't escape 0x5e */
+ accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
+ memcpy(network->xaccm, accm, sizeof(network->xaccm));
+ err = 0;
+ break;
+
+ case PPPIOCGMRU:
+ if (put_user(network->mru, user_arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSMRU:
+ if (get_user(val, user_arg))
+ break;
+ if (val < PPP_MRU)
+ val = PPP_MRU;
+ network->mru = val;
+ err = 0;
+ break;
+
+ default:
+ err = -ENOTTY;
+ }
+
+ return err;
+}
+
+static const struct ppp_channel_ops ipwireless_ppp_channel_ops = {
+ .start_xmit = ipwireless_ppp_start_xmit,
+ .ioctl = ipwireless_ppp_ioctl
+};
+
+static void do_go_online(struct work_struct *work_go_online)
+{
+ struct ipw_network *network =
+ container_of(work_go_online, struct ipw_network,
+ work_go_online);
+ unsigned long flags;
+
+ spin_lock_irqsave(&network->lock, flags);
+ if (!network->ppp_channel) {
+ struct ppp_channel *channel;
+
+ spin_unlock_irqrestore(&network->lock, flags);
+ channel = kzalloc(sizeof(struct ppp_channel), GFP_KERNEL);
+ if (!channel) {
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": unable to allocate PPP channel\n");
+ return;
+ }
+ channel->private = network;
+ channel->mtu = 16384; /* Wild guess */
+ channel->hdrlen = 2;
+ channel->ops = &ipwireless_ppp_channel_ops;
+
+ network->flags = 0;
+ network->rbits = 0;
+ network->mru = PPP_MRU;
+ memset(network->xaccm, 0, sizeof(network->xaccm));
+ network->xaccm[0] = ~0U;
+ network->xaccm[3] = 0x60000000U;
+ network->raccm = ~0U;
+ ppp_register_channel(channel);
+ spin_lock_irqsave(&network->lock, flags);
+ network->ppp_channel = channel;
+ }
+ spin_unlock_irqrestore(&network->lock, flags);
+}
+
+static void do_go_offline(struct work_struct *work_go_offline)
+{
+ struct ipw_network *network =
+ container_of(work_go_offline, struct ipw_network,
+ work_go_offline);
+ unsigned long flags;
+
+ mutex_lock(&network->close_lock);
+ spin_lock_irqsave(&network->lock, flags);
+ if (network->ppp_channel != NULL) {
+ struct ppp_channel *channel = network->ppp_channel;
+
+ network->ppp_channel = NULL;
+ spin_unlock_irqrestore(&network->lock, flags);
+ mutex_unlock(&network->close_lock);
+ ppp_unregister_channel(channel);
+ } else {
+ spin_unlock_irqrestore(&network->lock, flags);
+ mutex_unlock(&network->close_lock);
+ }
+}
+
+void ipwireless_network_notify_control_line_change(struct ipw_network *network,
+ unsigned int channel_idx,
+ unsigned int control_lines,
+ unsigned int changed_mask)
+{
+ int i;
+
+ if (channel_idx == IPW_CHANNEL_RAS)
+ network->ras_control_lines = control_lines;
+
+ for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) {
+ struct ipw_tty *tty =
+ network->associated_ttys[channel_idx][i];
+
+ /*
+ * If it's associated with a tty (other than the RAS channel
+ * when we're online), then send the data to that tty. The RAS
+ * channel's data is handled above - it always goes through
+ * ppp_generic.
+ */
+ if (tty)
+ ipwireless_tty_notify_control_line_change(tty,
+ channel_idx,
+ control_lines,
+ changed_mask);
+ }
+}
+
+/*
+ * Some versions of firmware stuff packets with 0xff 0x03 (PPP: ALLSTATIONS, UI)
+ * bytes, which are required on sent packet, but not always present on received
+ * packets
+ */
+static struct sk_buff *ipw_packet_received_skb(unsigned char *data,
+ unsigned int length)
+{
+ struct sk_buff *skb;
+
+ if (length > 2 && data[0] == PPP_ALLSTATIONS && data[1] == PPP_UI) {
+ length -= 2;
+ data += 2;
+ }
+
+ skb = dev_alloc_skb(length + 4);
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb, length), data, length);
+
+ return skb;
+}
+
+void ipwireless_network_packet_received(struct ipw_network *network,
+ unsigned int channel_idx,
+ unsigned char *data,
+ unsigned int length)
+{
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) {
+ struct ipw_tty *tty = network->associated_ttys[channel_idx][i];
+
+ if (!tty)
+ continue;
+
+ /*
+ * If it's associated with a tty (other than the RAS channel
+ * when we're online), then send the data to that tty. The RAS
+ * channel's data is handled above - it always goes through
+ * ppp_generic.
+ */
+ if (channel_idx == IPW_CHANNEL_RAS
+ && (network->ras_control_lines &
+ IPW_CONTROL_LINE_DCD) != 0
+ && ipwireless_tty_is_modem(tty)) {
+ /*
+ * If data came in on the RAS channel and this tty is
+ * the modem tty, and we are online, then we send it to
+ * the PPP layer.
+ */
+ mutex_lock(&network->close_lock);
+ spin_lock_irqsave(&network->lock, flags);
+ if (network->ppp_channel != NULL) {
+ struct sk_buff *skb;
+
+ spin_unlock_irqrestore(&network->lock,
+ flags);
+
+ /* Send the data to the ppp_generic module. */
+ skb = ipw_packet_received_skb(data, length);
+ ppp_input(network->ppp_channel, skb);
+ } else
+ spin_unlock_irqrestore(&network->lock,
+ flags);
+ mutex_unlock(&network->close_lock);
+ }
+ /* Otherwise we send it out the tty. */
+ else
+ ipwireless_tty_received(tty, data, length);
+ }
+}
+
+struct ipw_network *ipwireless_network_create(struct ipw_hardware *hw)
+{
+ struct ipw_network *network =
+ kzalloc(sizeof(struct ipw_network), GFP_ATOMIC);
+
+ if (!network)
+ return NULL;
+
+ spin_lock_init(&network->lock);
+ mutex_init(&network->close_lock);
+
+ network->hardware = hw;
+
+ INIT_WORK(&network->work_go_online, do_go_online);
+ INIT_WORK(&network->work_go_offline, do_go_offline);
+
+ ipwireless_associate_network(hw, network);
+
+ return network;
+}
+
+void ipwireless_network_free(struct ipw_network *network)
+{
+ network->shutting_down = 1;
+
+ ipwireless_ppp_close(network);
+ flush_work_sync(&network->work_go_online);
+ flush_work_sync(&network->work_go_offline);
+
+ ipwireless_stop_interrupts(network->hardware);
+ ipwireless_associate_network(network->hardware, NULL);
+
+ kfree(network);
+}
+
+void ipwireless_associate_network_tty(struct ipw_network *network,
+ unsigned int channel_idx,
+ struct ipw_tty *tty)
+{
+ int i;
+
+ for (i = 0; i < MAX_ASSOCIATED_TTYS; i++)
+ if (network->associated_ttys[channel_idx][i] == NULL) {
+ network->associated_ttys[channel_idx][i] = tty;
+ break;
+ }
+}
+
+void ipwireless_disassociate_network_ttys(struct ipw_network *network,
+ unsigned int channel_idx)
+{
+ int i;
+
+ for (i = 0; i < MAX_ASSOCIATED_TTYS; i++)
+ network->associated_ttys[channel_idx][i] = NULL;
+}
+
+void ipwireless_ppp_open(struct ipw_network *network)
+{
+ if (ipwireless_debug)
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": online\n");
+ schedule_work(&network->work_go_online);
+}
+
+void ipwireless_ppp_close(struct ipw_network *network)
+{
+ /* Disconnect from the wireless network. */
+ if (ipwireless_debug)
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": offline\n");
+ schedule_work(&network->work_go_offline);
+}
+
+int ipwireless_ppp_channel_index(struct ipw_network *network)
+{
+ int ret = -1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&network->lock, flags);
+ if (network->ppp_channel != NULL)
+ ret = ppp_channel_index(network->ppp_channel);
+ spin_unlock_irqrestore(&network->lock, flags);
+
+ return ret;
+}
+
+int ipwireless_ppp_unit_number(struct ipw_network *network)
+{
+ int ret = -1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&network->lock, flags);
+ if (network->ppp_channel != NULL)
+ ret = ppp_unit_number(network->ppp_channel);
+ spin_unlock_irqrestore(&network->lock, flags);
+
+ return ret;
+}
+
+int ipwireless_ppp_mru(const struct ipw_network *network)
+{
+ return network->mru;
+}
diff --git a/drivers/tty/ipwireless/network.h b/drivers/tty/ipwireless/network.h
new file mode 100644
index 00000000..561f765b
--- /dev/null
+++ b/drivers/tty/ipwireless/network.h
@@ -0,0 +1,53 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#ifndef _IPWIRELESS_CS_NETWORK_H_
+#define _IPWIRELESS_CS_NETWORK_H_
+
+#include <linux/types.h>
+
+struct ipw_network;
+struct ipw_tty;
+struct ipw_hardware;
+
+/* Definitions of the different channels on the PCMCIA UE */
+#define IPW_CHANNEL_RAS 0
+#define IPW_CHANNEL_DIALLER 1
+#define IPW_CHANNEL_CONSOLE 2
+#define NO_OF_IPW_CHANNELS 5
+
+void ipwireless_network_notify_control_line_change(struct ipw_network *net,
+ unsigned int channel_idx, unsigned int control_lines,
+ unsigned int control_mask);
+void ipwireless_network_packet_received(struct ipw_network *net,
+ unsigned int channel_idx, unsigned char *data,
+ unsigned int length);
+struct ipw_network *ipwireless_network_create(struct ipw_hardware *hw);
+void ipwireless_network_free(struct ipw_network *net);
+void ipwireless_associate_network_tty(struct ipw_network *net,
+ unsigned int channel_idx, struct ipw_tty *tty);
+void ipwireless_disassociate_network_ttys(struct ipw_network *net,
+ unsigned int channel_idx);
+
+void ipwireless_ppp_open(struct ipw_network *net);
+
+void ipwireless_ppp_close(struct ipw_network *net);
+int ipwireless_ppp_channel_index(struct ipw_network *net);
+int ipwireless_ppp_unit_number(struct ipw_network *net);
+int ipwireless_ppp_mru(const struct ipw_network *net);
+
+#endif
diff --git a/drivers/tty/ipwireless/setup_protocol.h b/drivers/tty/ipwireless/setup_protocol.h
new file mode 100644
index 00000000..9d6bcc77
--- /dev/null
+++ b/drivers/tty/ipwireless/setup_protocol.h
@@ -0,0 +1,108 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#ifndef _IPWIRELESS_CS_SETUP_PROTOCOL_H_
+#define _IPWIRELESS_CS_SETUP_PROTOCOL_H_
+
+/* Version of the setup protocol and transport protocols */
+#define TL_SETUP_VERSION 1
+
+#define TL_SETUP_VERSION_QRY_TMO 1000
+#define TL_SETUP_MAX_VERSION_QRY 30
+
+/* Message numbers 0-9 are obsoleted and must not be reused! */
+#define TL_SETUP_SIGNO_GET_VERSION_QRY 10
+#define TL_SETUP_SIGNO_GET_VERSION_RSP 11
+#define TL_SETUP_SIGNO_CONFIG_MSG 12
+#define TL_SETUP_SIGNO_CONFIG_DONE_MSG 13
+#define TL_SETUP_SIGNO_OPEN_MSG 14
+#define TL_SETUP_SIGNO_CLOSE_MSG 15
+
+#define TL_SETUP_SIGNO_INFO_MSG 20
+#define TL_SETUP_SIGNO_INFO_MSG_ACK 21
+
+#define TL_SETUP_SIGNO_REBOOT_MSG 22
+#define TL_SETUP_SIGNO_REBOOT_MSG_ACK 23
+
+/* Synchronous start-messages */
+struct tl_setup_get_version_qry {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_GET_VERSION_QRY */
+} __attribute__ ((__packed__));
+
+struct tl_setup_get_version_rsp {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_GET_VERSION_RSP */
+ unsigned char version; /* TL_SETUP_VERSION */
+} __attribute__ ((__packed__));
+
+struct tl_setup_config_msg {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_CONFIG_MSG */
+ unsigned char port_no;
+ unsigned char prio_data;
+ unsigned char prio_ctrl;
+} __attribute__ ((__packed__));
+
+struct tl_setup_config_done_msg {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_CONFIG_DONE_MSG */
+} __attribute__ ((__packed__));
+
+/* Asyncronous messages */
+struct tl_setup_open_msg {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_OPEN_MSG */
+ unsigned char port_no;
+} __attribute__ ((__packed__));
+
+struct tl_setup_close_msg {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_CLOSE_MSG */
+ unsigned char port_no;
+} __attribute__ ((__packed__));
+
+/* Driver type - for use in tl_setup_info_msg.driver_type */
+#define COMM_DRIVER 0
+#define NDISWAN_DRIVER 1
+#define NDISWAN_DRIVER_MAJOR_VERSION 2
+#define NDISWAN_DRIVER_MINOR_VERSION 0
+
+/*
+ * It should not matter when this message comes over as we just store the
+ * results and send the ACK.
+ */
+struct tl_setup_info_msg {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_INFO_MSG */
+ unsigned char driver_type;
+ unsigned char major_version;
+ unsigned char minor_version;
+} __attribute__ ((__packed__));
+
+struct tl_setup_info_msgAck {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_INFO_MSG_ACK */
+} __attribute__ ((__packed__));
+
+struct TlSetupRebootMsgAck {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_REBOOT_MSG_ACK */
+} __attribute__ ((__packed__));
+
+/* Define a union of all the msgs that the driver can receive from the card.*/
+union ipw_setup_rx_msg {
+ unsigned char sig_no;
+ struct tl_setup_get_version_rsp version_rsp_msg;
+ struct tl_setup_open_msg open_msg;
+ struct tl_setup_close_msg close_msg;
+ struct tl_setup_info_msg InfoMsg;
+ struct tl_setup_info_msgAck info_msg_ack;
+} __attribute__ ((__packed__));
+
+#endif /* _IPWIRELESS_CS_SETUP_PROTOCOL_H_ */
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
new file mode 100644
index 00000000..ef928695
--- /dev/null
+++ b/drivers/tty/ipwireless/tty.c
@@ -0,0 +1,679 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ppp_defs.h>
+#include <linux/if.h>
+#include <linux/if_ppp.h>
+#include <linux/sched.h>
+#include <linux/serial.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/uaccess.h>
+
+#include "tty.h"
+#include "network.h"
+#include "hardware.h"
+#include "main.h"
+
+#define IPWIRELESS_PCMCIA_START (0)
+#define IPWIRELESS_PCMCIA_MINORS (24)
+#define IPWIRELESS_PCMCIA_MINOR_RANGE (8)
+
+#define TTYTYPE_MODEM (0)
+#define TTYTYPE_MONITOR (1)
+#define TTYTYPE_RAS_RAW (2)
+
+struct ipw_tty {
+ int index;
+ struct ipw_hardware *hardware;
+ unsigned int channel_idx;
+ unsigned int secondary_channel_idx;
+ int tty_type;
+ struct ipw_network *network;
+ struct tty_struct *linux_tty;
+ int open_count;
+ unsigned int control_lines;
+ struct mutex ipw_tty_mutex;
+ int tx_bytes_queued;
+ int closing;
+};
+
+static struct ipw_tty *ttys[IPWIRELESS_PCMCIA_MINORS];
+
+static struct tty_driver *ipw_tty_driver;
+
+static char *tty_type_name(int tty_type)
+{
+ static char *channel_names[] = {
+ "modem",
+ "monitor",
+ "RAS-raw"
+ };
+
+ return channel_names[tty_type];
+}
+
+static void report_registering(struct ipw_tty *tty)
+{
+ char *iftype = tty_type_name(tty->tty_type);
+
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": registering %s device ttyIPWp%d\n", iftype, tty->index);
+}
+
+static void report_deregistering(struct ipw_tty *tty)
+{
+ char *iftype = tty_type_name(tty->tty_type);
+
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": deregistering %s device ttyIPWp%d\n", iftype,
+ tty->index);
+}
+
+static struct ipw_tty *get_tty(int minor)
+{
+ if (minor < ipw_tty_driver->minor_start
+ || minor >= ipw_tty_driver->minor_start +
+ IPWIRELESS_PCMCIA_MINORS)
+ return NULL;
+ else {
+ int minor_offset = minor - ipw_tty_driver->minor_start;
+
+ /*
+ * The 'ras_raw' channel is only available when 'loopback' mode
+ * is enabled.
+ * Number of minor starts with 16 (_RANGE * _RAS_RAW).
+ */
+ if (!ipwireless_loopback &&
+ minor_offset >=
+ IPWIRELESS_PCMCIA_MINOR_RANGE * TTYTYPE_RAS_RAW)
+ return NULL;
+
+ return ttys[minor_offset];
+ }
+}
+
+static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
+{
+ int minor = linux_tty->index;
+ struct ipw_tty *tty = get_tty(minor);
+
+ if (!tty)
+ return -ENODEV;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+
+ if (tty->closing) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return -ENODEV;
+ }
+ if (tty->open_count == 0)
+ tty->tx_bytes_queued = 0;
+
+ tty->open_count++;
+
+ tty->linux_tty = linux_tty;
+ linux_tty->driver_data = tty;
+ linux_tty->low_latency = 1;
+
+ if (tty->tty_type == TTYTYPE_MODEM)
+ ipwireless_ppp_open(tty->network);
+
+ mutex_unlock(&tty->ipw_tty_mutex);
+
+ return 0;
+}
+
+static void do_ipw_close(struct ipw_tty *tty)
+{
+ tty->open_count--;
+
+ if (tty->open_count == 0) {
+ struct tty_struct *linux_tty = tty->linux_tty;
+
+ if (linux_tty != NULL) {
+ tty->linux_tty = NULL;
+ linux_tty->driver_data = NULL;
+
+ if (tty->tty_type == TTYTYPE_MODEM)
+ ipwireless_ppp_close(tty->network);
+ }
+ }
+}
+
+static void ipw_hangup(struct tty_struct *linux_tty)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+
+ if (!tty)
+ return;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+ if (tty->open_count == 0) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+
+ do_ipw_close(tty);
+
+ mutex_unlock(&tty->ipw_tty_mutex);
+}
+
+static void ipw_close(struct tty_struct *linux_tty, struct file *filp)
+{
+ ipw_hangup(linux_tty);
+}
+
+/* Take data received from hardware, and send it out the tty */
+void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
+ unsigned int length)
+{
+ struct tty_struct *linux_tty;
+ int work = 0;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+ linux_tty = tty->linux_tty;
+ if (linux_tty == NULL) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+
+ if (!tty->open_count) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+ mutex_unlock(&tty->ipw_tty_mutex);
+
+ work = tty_insert_flip_string(linux_tty, data, length);
+
+ if (work != length)
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
+ ": %d chars not inserted to flip buffer!\n",
+ length - work);
+
+ /*
+ * This may sleep if ->low_latency is set
+ */
+ if (work)
+ tty_flip_buffer_push(linux_tty);
+}
+
+static void ipw_write_packet_sent_callback(void *callback_data,
+ unsigned int packet_length)
+{
+ struct ipw_tty *tty = callback_data;
+
+ /*
+ * Packet has been sent, so we subtract the number of bytes from our
+ * tally of outstanding TX bytes.
+ */
+ tty->tx_bytes_queued -= packet_length;
+}
+
+static int ipw_write(struct tty_struct *linux_tty,
+ const unsigned char *buf, int count)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+ int room, ret;
+
+ if (!tty)
+ return -ENODEV;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+ if (!tty->open_count) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return -EINVAL;
+ }
+
+ room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
+ if (room < 0)
+ room = 0;
+ /* Don't allow caller to write any more than we have room for */
+ if (count > room)
+ count = room;
+
+ if (count == 0) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return 0;
+ }
+
+ ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS,
+ buf, count,
+ ipw_write_packet_sent_callback, tty);
+ if (ret == -1) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return 0;
+ }
+
+ tty->tx_bytes_queued += count;
+ mutex_unlock(&tty->ipw_tty_mutex);
+
+ return count;
+}
+
+static int ipw_write_room(struct tty_struct *linux_tty)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+ int room;
+
+ /* FIXME: Exactly how is the tty object locked here .. */
+ if (!tty)
+ return -ENODEV;
+
+ if (!tty->open_count)
+ return -EINVAL;
+
+ room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
+ if (room < 0)
+ room = 0;
+
+ return room;
+}
+
+static int ipwireless_get_serial_info(struct ipw_tty *tty,
+ struct serial_struct __user *retinfo)
+{
+ struct serial_struct tmp;
+
+ if (!retinfo)
+ return (-EFAULT);
+
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.type = PORT_UNKNOWN;
+ tmp.line = tty->index;
+ tmp.port = 0;
+ tmp.irq = 0;
+ tmp.flags = 0;
+ tmp.baud_base = 115200;
+ tmp.close_delay = 0;
+ tmp.closing_wait = 0;
+ tmp.custom_divisor = 0;
+ tmp.hub6 = 0;
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+
+ if (!tty)
+ return 0;
+
+ if (!tty->open_count)
+ return 0;
+
+ return tty->tx_bytes_queued;
+}
+
+static int get_control_lines(struct ipw_tty *tty)
+{
+ unsigned int my = tty->control_lines;
+ unsigned int out = 0;
+
+ if (my & IPW_CONTROL_LINE_RTS)
+ out |= TIOCM_RTS;
+ if (my & IPW_CONTROL_LINE_DTR)
+ out |= TIOCM_DTR;
+ if (my & IPW_CONTROL_LINE_CTS)
+ out |= TIOCM_CTS;
+ if (my & IPW_CONTROL_LINE_DSR)
+ out |= TIOCM_DSR;
+ if (my & IPW_CONTROL_LINE_DCD)
+ out |= TIOCM_CD;
+
+ return out;
+}
+
+static int set_control_lines(struct ipw_tty *tty, unsigned int set,
+ unsigned int clear)
+{
+ int ret;
+
+ if (set & TIOCM_RTS) {
+ ret = ipwireless_set_RTS(tty->hardware, tty->channel_idx, 1);
+ if (ret)
+ return ret;
+ if (tty->secondary_channel_idx != -1) {
+ ret = ipwireless_set_RTS(tty->hardware,
+ tty->secondary_channel_idx, 1);
+ if (ret)
+ return ret;
+ }
+ }
+ if (set & TIOCM_DTR) {
+ ret = ipwireless_set_DTR(tty->hardware, tty->channel_idx, 1);
+ if (ret)
+ return ret;
+ if (tty->secondary_channel_idx != -1) {
+ ret = ipwireless_set_DTR(tty->hardware,
+ tty->secondary_channel_idx, 1);
+ if (ret)
+ return ret;
+ }
+ }
+ if (clear & TIOCM_RTS) {
+ ret = ipwireless_set_RTS(tty->hardware, tty->channel_idx, 0);
+ if (tty->secondary_channel_idx != -1) {
+ ret = ipwireless_set_RTS(tty->hardware,
+ tty->secondary_channel_idx, 0);
+ if (ret)
+ return ret;
+ }
+ }
+ if (clear & TIOCM_DTR) {
+ ret = ipwireless_set_DTR(tty->hardware, tty->channel_idx, 0);
+ if (tty->secondary_channel_idx != -1) {
+ ret = ipwireless_set_DTR(tty->hardware,
+ tty->secondary_channel_idx, 0);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int ipw_tiocmget(struct tty_struct *linux_tty)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+ /* FIXME: Exactly how is the tty object locked here .. */
+
+ if (!tty)
+ return -ENODEV;
+
+ if (!tty->open_count)
+ return -EINVAL;
+
+ return get_control_lines(tty);
+}
+
+static int
+ipw_tiocmset(struct tty_struct *linux_tty,
+ unsigned int set, unsigned int clear)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+ /* FIXME: Exactly how is the tty object locked here .. */
+
+ if (!tty)
+ return -ENODEV;
+
+ if (!tty->open_count)
+ return -EINVAL;
+
+ return set_control_lines(tty, set, clear);
+}
+
+static int ipw_ioctl(struct tty_struct *linux_tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+
+ if (!tty)
+ return -ENODEV;
+
+ if (!tty->open_count)
+ return -EINVAL;
+
+ /* FIXME: Exactly how is the tty object locked here .. */
+
+ switch (cmd) {
+ case TIOCGSERIAL:
+ return ipwireless_get_serial_info(tty, (void __user *) arg);
+
+ case TIOCSSERIAL:
+ return 0; /* Keeps the PCMCIA scripts happy. */
+ }
+
+ if (tty->tty_type == TTYTYPE_MODEM) {
+ switch (cmd) {
+ case PPPIOCGCHAN:
+ {
+ int chan = ipwireless_ppp_channel_index(
+ tty->network);
+
+ if (chan < 0)
+ return -ENODEV;
+ if (put_user(chan, (int __user *) arg))
+ return -EFAULT;
+ }
+ return 0;
+
+ case PPPIOCGUNIT:
+ {
+ int unit = ipwireless_ppp_unit_number(
+ tty->network);
+
+ if (unit < 0)
+ return -ENODEV;
+ if (put_user(unit, (int __user *) arg))
+ return -EFAULT;
+ }
+ return 0;
+
+ case FIONREAD:
+ {
+ int val = 0;
+
+ if (put_user(val, (int __user *) arg))
+ return -EFAULT;
+ }
+ return 0;
+ case TCFLSH:
+ return tty_perform_flush(linux_tty, arg);
+ }
+ }
+ return -ENOIOCTLCMD;
+}
+
+static int add_tty(int j,
+ struct ipw_hardware *hardware,
+ struct ipw_network *network, int channel_idx,
+ int secondary_channel_idx, int tty_type)
+{
+ ttys[j] = kzalloc(sizeof(struct ipw_tty), GFP_KERNEL);
+ if (!ttys[j])
+ return -ENOMEM;
+ ttys[j]->index = j;
+ ttys[j]->hardware = hardware;
+ ttys[j]->channel_idx = channel_idx;
+ ttys[j]->secondary_channel_idx = secondary_channel_idx;
+ ttys[j]->network = network;
+ ttys[j]->tty_type = tty_type;
+ mutex_init(&ttys[j]->ipw_tty_mutex);
+
+ tty_register_device(ipw_tty_driver, j, NULL);
+ ipwireless_associate_network_tty(network, channel_idx, ttys[j]);
+
+ if (secondary_channel_idx != -1)
+ ipwireless_associate_network_tty(network,
+ secondary_channel_idx,
+ ttys[j]);
+ if (get_tty(j + ipw_tty_driver->minor_start) == ttys[j])
+ report_registering(ttys[j]);
+ return 0;
+}
+
+struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware,
+ struct ipw_network *network)
+{
+ int i, j;
+
+ for (i = 0; i < IPWIRELESS_PCMCIA_MINOR_RANGE; i++) {
+ int allfree = 1;
+
+ for (j = i; j < IPWIRELESS_PCMCIA_MINORS;
+ j += IPWIRELESS_PCMCIA_MINOR_RANGE)
+ if (ttys[j] != NULL) {
+ allfree = 0;
+ break;
+ }
+
+ if (allfree) {
+ j = i;
+
+ if (add_tty(j, hardware, network,
+ IPW_CHANNEL_DIALLER, IPW_CHANNEL_RAS,
+ TTYTYPE_MODEM))
+ return NULL;
+
+ j += IPWIRELESS_PCMCIA_MINOR_RANGE;
+ if (add_tty(j, hardware, network,
+ IPW_CHANNEL_DIALLER, -1,
+ TTYTYPE_MONITOR))
+ return NULL;
+
+ j += IPWIRELESS_PCMCIA_MINOR_RANGE;
+ if (add_tty(j, hardware, network,
+ IPW_CHANNEL_RAS, -1,
+ TTYTYPE_RAS_RAW))
+ return NULL;
+
+ return ttys[i];
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Must be called before ipwireless_network_free().
+ */
+void ipwireless_tty_free(struct ipw_tty *tty)
+{
+ int j;
+ struct ipw_network *network = ttys[tty->index]->network;
+
+ for (j = tty->index; j < IPWIRELESS_PCMCIA_MINORS;
+ j += IPWIRELESS_PCMCIA_MINOR_RANGE) {
+ struct ipw_tty *ttyj = ttys[j];
+
+ if (ttyj) {
+ mutex_lock(&ttyj->ipw_tty_mutex);
+ if (get_tty(j + ipw_tty_driver->minor_start) == ttyj)
+ report_deregistering(ttyj);
+ ttyj->closing = 1;
+ if (ttyj->linux_tty != NULL) {
+ mutex_unlock(&ttyj->ipw_tty_mutex);
+ tty_hangup(ttyj->linux_tty);
+ /* Wait till the tty_hangup has completed */
+ flush_work_sync(&ttyj->linux_tty->hangup_work);
+ /* FIXME: Exactly how is the tty object locked here
+ against a parallel ioctl etc */
+ mutex_lock(&ttyj->ipw_tty_mutex);
+ }
+ while (ttyj->open_count)
+ do_ipw_close(ttyj);
+ ipwireless_disassociate_network_ttys(network,
+ ttyj->channel_idx);
+ tty_unregister_device(ipw_tty_driver, j);
+ ttys[j] = NULL;
+ mutex_unlock(&ttyj->ipw_tty_mutex);
+ kfree(ttyj);
+ }
+ }
+}
+
+static const struct tty_operations tty_ops = {
+ .open = ipw_open,
+ .close = ipw_close,
+ .hangup = ipw_hangup,
+ .write = ipw_write,
+ .write_room = ipw_write_room,
+ .ioctl = ipw_ioctl,
+ .chars_in_buffer = ipw_chars_in_buffer,
+ .tiocmget = ipw_tiocmget,
+ .tiocmset = ipw_tiocmset,
+};
+
+int ipwireless_tty_init(void)
+{
+ int result;
+
+ ipw_tty_driver = alloc_tty_driver(IPWIRELESS_PCMCIA_MINORS);
+ if (!ipw_tty_driver)
+ return -ENOMEM;
+
+ ipw_tty_driver->owner = THIS_MODULE;
+ ipw_tty_driver->driver_name = IPWIRELESS_PCCARD_NAME;
+ ipw_tty_driver->name = "ttyIPWp";
+ ipw_tty_driver->major = 0;
+ ipw_tty_driver->minor_start = IPWIRELESS_PCMCIA_START;
+ ipw_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ ipw_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ ipw_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ ipw_tty_driver->init_termios = tty_std_termios;
+ ipw_tty_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ ipw_tty_driver->init_termios.c_ispeed = 9600;
+ ipw_tty_driver->init_termios.c_ospeed = 9600;
+ tty_set_operations(ipw_tty_driver, &tty_ops);
+ result = tty_register_driver(ipw_tty_driver);
+ if (result) {
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": failed to register tty driver\n");
+ put_tty_driver(ipw_tty_driver);
+ return result;
+ }
+
+ return 0;
+}
+
+void ipwireless_tty_release(void)
+{
+ int ret;
+
+ ret = tty_unregister_driver(ipw_tty_driver);
+ put_tty_driver(ipw_tty_driver);
+ if (ret != 0)
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": tty_unregister_driver failed with code %d\n", ret);
+}
+
+int ipwireless_tty_is_modem(struct ipw_tty *tty)
+{
+ return tty->tty_type == TTYTYPE_MODEM;
+}
+
+void
+ipwireless_tty_notify_control_line_change(struct ipw_tty *tty,
+ unsigned int channel_idx,
+ unsigned int control_lines,
+ unsigned int changed_mask)
+{
+ unsigned int old_control_lines = tty->control_lines;
+
+ tty->control_lines = (tty->control_lines & ~changed_mask)
+ | (control_lines & changed_mask);
+
+ /*
+ * If DCD is de-asserted, we close the tty so pppd can tell that we
+ * have gone offline.
+ */
+ if ((old_control_lines & IPW_CONTROL_LINE_DCD)
+ && !(tty->control_lines & IPW_CONTROL_LINE_DCD)
+ && tty->linux_tty) {
+ tty_hangup(tty->linux_tty);
+ }
+}
+
diff --git a/drivers/tty/ipwireless/tty.h b/drivers/tty/ipwireless/tty.h
new file mode 100644
index 00000000..747b2d63
--- /dev/null
+++ b/drivers/tty/ipwireless/tty.h
@@ -0,0 +1,45 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#ifndef _IPWIRELESS_CS_TTY_H_
+#define _IPWIRELESS_CS_TTY_H_
+
+#include <linux/types.h>
+#include <linux/sched.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+struct ipw_tty;
+struct ipw_network;
+struct ipw_hardware;
+
+int ipwireless_tty_init(void);
+void ipwireless_tty_release(void);
+
+struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hw,
+ struct ipw_network *net);
+void ipwireless_tty_free(struct ipw_tty *tty);
+void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
+ unsigned int length);
+int ipwireless_tty_is_modem(struct ipw_tty *tty);
+void ipwireless_tty_notify_control_line_change(struct ipw_tty *tty,
+ unsigned int channel_idx,
+ unsigned int control_lines,
+ unsigned int changed_mask);
+
+#endif
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
new file mode 100644
index 00000000..db1cf9c3
--- /dev/null
+++ b/drivers/tty/isicom.c
@@ -0,0 +1,1736 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Original driver code supplied by Multi-Tech
+ *
+ * Changes
+ * 1/9/98 alan@lxorguk.ukuu.org.uk
+ * Merge to 2.0.x kernel tree
+ * Obtain and use official major/minors
+ * Loader switched to a misc device
+ * (fixed range check bug as a side effect)
+ * Printk clean up
+ * 9/12/98 alan@lxorguk.ukuu.org.uk
+ * Rough port to 2.1.x
+ *
+ * 10/6/99 sameer Merged the ISA and PCI drivers to
+ * a new unified driver.
+ *
+ * 3/9/99 sameer Added support for ISI4616 cards.
+ *
+ * 16/9/99 sameer We do not force RTS low anymore.
+ * This is to prevent the firmware
+ * from getting confused.
+ *
+ * 26/10/99 sameer Cosmetic changes:The driver now
+ * dumps the Port Count information
+ * along with I/O address and IRQ.
+ *
+ * 13/12/99 sameer Fixed the problem with IRQ sharing.
+ *
+ * 10/5/00 sameer Fixed isicom_shutdown_board()
+ * to not lower DTR on all the ports
+ * when the last port on the card is
+ * closed.
+ *
+ * 10/5/00 sameer Signal mask setup command added
+ * to isicom_setup_port and
+ * isicom_shutdown_port.
+ *
+ * 24/5/00 sameer The driver is now SMP aware.
+ *
+ *
+ * 27/11/00 Vinayak P Risbud Fixed the Driver Crash Problem
+ *
+ *
+ * 03/01/01 anil .s Added support for resetting the
+ * internal modems on ISI cards.
+ *
+ * 08/02/01 anil .s Upgraded the driver for kernel
+ * 2.4.x
+ *
+ * 11/04/01 Kevin Fixed firmware load problem with
+ * ISIHP-4X card
+ *
+ * 30/04/01 anil .s Fixed the remote login through
+ * ISI port problem. Now the link
+ * does not go down before password
+ * prompt.
+ *
+ * 03/05/01 anil .s Fixed the problem with IRQ sharing
+ * among ISI-PCI cards.
+ *
+ * 03/05/01 anil .s Added support to display the version
+ * info during insmod as well as module
+ * listing by lsmod.
+ *
+ * 10/05/01 anil .s Done the modifications to the source
+ * file and Install script so that the
+ * same installation can be used for
+ * 2.2.x and 2.4.x kernel.
+ *
+ * 06/06/01 anil .s Now we drop both dtr and rts during
+ * shutdown_port as well as raise them
+ * during isicom_config_port.
+ *
+ * 09/06/01 acme@conectiva.com.br use capable, not suser, do
+ * restore_flags on failure in
+ * isicom_send_break, verify put_user
+ * result
+ *
+ * 11/02/03 ranjeeth Added support for 230 Kbps and 460 Kbps
+ * Baud index extended to 21
+ *
+ * 20/03/03 ranjeeth Made to work for Linux Advanced server.
+ * Taken care of license warning.
+ *
+ * 10/12/03 Ravindra Made to work for Fedora Core 1 of
+ * Red Hat Distribution
+ *
+ * 06/01/05 Alan Cox Merged the ISI and base kernel strands
+ * into a single 2.6 driver
+ *
+ * ***********************************************************
+ *
+ * To use this driver you also need the support package. You
+ * can find this in RPM format on
+ * ftp://ftp.linux.org.uk/pub/linux/alan
+ *
+ * You can find the original tools for this direct from Multitech
+ * ftp://ftp.multitech.com/ISI-Cards/
+ *
+ * Having installed the cards the module options (/etc/modprobe.conf)
+ *
+ * options isicom io=card1,card2,card3,card4 irq=card1,card2,card3,card4
+ *
+ * Omit those entries for boards you don't have installed.
+ *
+ * TODO
+ * Merge testing
+ * 64-bit verification
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/termios.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/serial.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <asm/system.h>
+
+#include <linux/pci.h>
+
+#include <linux/isicom.h>
+
+#define InterruptTheCard(base) outw(0, (base) + 0xc)
+#define ClearInterrupt(base) inw((base) + 0x0a)
+
+#ifdef DEBUG
+#define isicom_paranoia_check(a, b, c) __isicom_paranoia_check((a), (b), (c))
+#else
+#define isicom_paranoia_check(a, b, c) 0
+#endif
+
+static int isicom_probe(struct pci_dev *, const struct pci_device_id *);
+static void __devexit isicom_remove(struct pci_dev *);
+
+static struct pci_device_id isicom_pci_tbl[] = {
+ { PCI_DEVICE(VENDOR_ID, 0x2028) },
+ { PCI_DEVICE(VENDOR_ID, 0x2051) },
+ { PCI_DEVICE(VENDOR_ID, 0x2052) },
+ { PCI_DEVICE(VENDOR_ID, 0x2053) },
+ { PCI_DEVICE(VENDOR_ID, 0x2054) },
+ { PCI_DEVICE(VENDOR_ID, 0x2055) },
+ { PCI_DEVICE(VENDOR_ID, 0x2056) },
+ { PCI_DEVICE(VENDOR_ID, 0x2057) },
+ { PCI_DEVICE(VENDOR_ID, 0x2058) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, isicom_pci_tbl);
+
+static struct pci_driver isicom_driver = {
+ .name = "isicom",
+ .id_table = isicom_pci_tbl,
+ .probe = isicom_probe,
+ .remove = __devexit_p(isicom_remove)
+};
+
+static int prev_card = 3; /* start servicing isi_card[0] */
+static struct tty_driver *isicom_normal;
+
+static void isicom_tx(unsigned long _data);
+static void isicom_start(struct tty_struct *tty);
+
+static DEFINE_TIMER(tx, isicom_tx, 0, 0);
+
+/* baud index mappings from linux defns to isi */
+
+static signed char linuxb_to_isib[] = {
+ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 13, 15, 16, 17, 18, 19, 20, 21
+};
+
+struct isi_board {
+ unsigned long base;
+ int irq;
+ unsigned char port_count;
+ unsigned short status;
+ unsigned short port_status; /* each bit for each port */
+ unsigned short shift_count;
+ struct isi_port *ports;
+ signed char count;
+ spinlock_t card_lock; /* Card wide lock 11/5/00 -sameer */
+ unsigned long flags;
+ unsigned int index;
+};
+
+struct isi_port {
+ unsigned short magic;
+ struct tty_port port;
+ u16 channel;
+ u16 status;
+ struct isi_board *card;
+ unsigned char *xmit_buf;
+ int xmit_head;
+ int xmit_tail;
+ int xmit_cnt;
+};
+
+static struct isi_board isi_card[BOARD_COUNT];
+static struct isi_port isi_ports[PORT_COUNT];
+
+/*
+ * Locking functions for card level locking. We need to own both
+ * the kernel lock for the card and have the card in a position that
+ * it wants to talk.
+ */
+
+static inline int WaitTillCardIsFree(unsigned long base)
+{
+ unsigned int count = 0;
+ unsigned int a = in_atomic(); /* do we run under spinlock? */
+
+ while (!(inw(base + 0xe) & 0x1) && count++ < 100)
+ if (a)
+ mdelay(1);
+ else
+ msleep(1);
+
+ return !(inw(base + 0xe) & 0x1);
+}
+
+static int lock_card(struct isi_board *card)
+{
+ unsigned long base = card->base;
+ unsigned int retries, a;
+
+ for (retries = 0; retries < 10; retries++) {
+ spin_lock_irqsave(&card->card_lock, card->flags);
+ for (a = 0; a < 10; a++) {
+ if (inw(base + 0xe) & 0x1)
+ return 1;
+ udelay(10);
+ }
+ spin_unlock_irqrestore(&card->card_lock, card->flags);
+ msleep(10);
+ }
+ pr_warning("Failed to lock Card (0x%lx)\n", card->base);
+
+ return 0; /* Failed to acquire the card! */
+}
+
+static void unlock_card(struct isi_board *card)
+{
+ spin_unlock_irqrestore(&card->card_lock, card->flags);
+}
+
+/*
+ * ISI Card specific ops ...
+ */
+
+/* card->lock HAS to be held */
+static void raise_dtr(struct isi_port *port)
+{
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+ u16 channel = port->channel;
+
+ if (WaitTillCardIsFree(base))
+ return;
+
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
+ outw(0x0504, base);
+ InterruptTheCard(base);
+ port->status |= ISI_DTR;
+}
+
+/* card->lock HAS to be held */
+static inline void drop_dtr(struct isi_port *port)
+{
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+ u16 channel = port->channel;
+
+ if (WaitTillCardIsFree(base))
+ return;
+
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
+ outw(0x0404, base);
+ InterruptTheCard(base);
+ port->status &= ~ISI_DTR;
+}
+
+/* card->lock HAS to be held */
+static inline void raise_rts(struct isi_port *port)
+{
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+ u16 channel = port->channel;
+
+ if (WaitTillCardIsFree(base))
+ return;
+
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
+ outw(0x0a04, base);
+ InterruptTheCard(base);
+ port->status |= ISI_RTS;
+}
+
+/* card->lock HAS to be held */
+static inline void drop_rts(struct isi_port *port)
+{
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+ u16 channel = port->channel;
+
+ if (WaitTillCardIsFree(base))
+ return;
+
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
+ outw(0x0804, base);
+ InterruptTheCard(base);
+ port->status &= ~ISI_RTS;
+}
+
+/* card->lock MUST NOT be held */
+
+static void isicom_dtr_rts(struct tty_port *port, int on)
+{
+ struct isi_port *ip = container_of(port, struct isi_port, port);
+ struct isi_board *card = ip->card;
+ unsigned long base = card->base;
+ u16 channel = ip->channel;
+
+ if (!lock_card(card))
+ return;
+
+ if (on) {
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
+ outw(0x0f04, base);
+ InterruptTheCard(base);
+ ip->status |= (ISI_DTR | ISI_RTS);
+ } else {
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
+ outw(0x0C04, base);
+ InterruptTheCard(base);
+ ip->status &= ~(ISI_DTR | ISI_RTS);
+ }
+ unlock_card(card);
+}
+
+/* card->lock HAS to be held */
+static void drop_dtr_rts(struct isi_port *port)
+{
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+ u16 channel = port->channel;
+
+ if (WaitTillCardIsFree(base))
+ return;
+
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
+ outw(0x0c04, base);
+ InterruptTheCard(base);
+ port->status &= ~(ISI_RTS | ISI_DTR);
+}
+
+/*
+ * ISICOM Driver specific routines ...
+ *
+ */
+
+static inline int __isicom_paranoia_check(struct isi_port const *port,
+ char *name, const char *routine)
+{
+ if (!port) {
+ pr_warning("Warning: bad isicom magic for dev %s in %s.\n",
+ name, routine);
+ return 1;
+ }
+ if (port->magic != ISICOM_MAGIC) {
+ pr_warning("Warning: NULL isicom port for dev %s in %s.\n",
+ name, routine);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Transmitter.
+ *
+ * We shovel data into the card buffers on a regular basis. The card
+ * will do the rest of the work for us.
+ */
+
+static void isicom_tx(unsigned long _data)
+{
+ unsigned long flags, base;
+ unsigned int retries;
+ short count = (BOARD_COUNT-1), card;
+ short txcount, wrd, residue, word_count, cnt;
+ struct isi_port *port;
+ struct tty_struct *tty;
+
+ /* find next active board */
+ card = (prev_card + 1) & 0x0003;
+ while (count-- > 0) {
+ if (isi_card[card].status & BOARD_ACTIVE)
+ break;
+ card = (card + 1) & 0x0003;
+ }
+ if (!(isi_card[card].status & BOARD_ACTIVE))
+ goto sched_again;
+
+ prev_card = card;
+
+ count = isi_card[card].port_count;
+ port = isi_card[card].ports;
+ base = isi_card[card].base;
+
+ spin_lock_irqsave(&isi_card[card].card_lock, flags);
+ for (retries = 0; retries < 100; retries++) {
+ if (inw(base + 0xe) & 0x1)
+ break;
+ udelay(2);
+ }
+ if (retries >= 100)
+ goto unlock;
+
+ tty = tty_port_tty_get(&port->port);
+ if (tty == NULL)
+ goto put_unlock;
+
+ for (; count > 0; count--, port++) {
+ /* port not active or tx disabled to force flow control */
+ if (!(port->port.flags & ASYNC_INITIALIZED) ||
+ !(port->status & ISI_TXOK))
+ continue;
+
+ txcount = min_t(short, TX_SIZE, port->xmit_cnt);
+ if (txcount <= 0 || tty->stopped || tty->hw_stopped)
+ continue;
+
+ if (!(inw(base + 0x02) & (1 << port->channel)))
+ continue;
+
+ pr_debug("txing %d bytes, port%d.\n",
+ txcount, port->channel + 1);
+ outw((port->channel << isi_card[card].shift_count) | txcount,
+ base);
+ residue = NO;
+ wrd = 0;
+ while (1) {
+ cnt = min_t(int, txcount, (SERIAL_XMIT_SIZE
+ - port->xmit_tail));
+ if (residue == YES) {
+ residue = NO;
+ if (cnt > 0) {
+ wrd |= (port->port.xmit_buf[port->xmit_tail]
+ << 8);
+ port->xmit_tail = (port->xmit_tail + 1)
+ & (SERIAL_XMIT_SIZE - 1);
+ port->xmit_cnt--;
+ txcount--;
+ cnt--;
+ outw(wrd, base);
+ } else {
+ outw(wrd, base);
+ break;
+ }
+ }
+ if (cnt <= 0)
+ break;
+ word_count = cnt >> 1;
+ outsw(base, port->port.xmit_buf+port->xmit_tail, word_count);
+ port->xmit_tail = (port->xmit_tail
+ + (word_count << 1)) & (SERIAL_XMIT_SIZE - 1);
+ txcount -= (word_count << 1);
+ port->xmit_cnt -= (word_count << 1);
+ if (cnt & 0x0001) {
+ residue = YES;
+ wrd = port->port.xmit_buf[port->xmit_tail];
+ port->xmit_tail = (port->xmit_tail + 1)
+ & (SERIAL_XMIT_SIZE - 1);
+ port->xmit_cnt--;
+ txcount--;
+ }
+ }
+
+ InterruptTheCard(base);
+ if (port->xmit_cnt <= 0)
+ port->status &= ~ISI_TXOK;
+ if (port->xmit_cnt <= WAKEUP_CHARS)
+ tty_wakeup(tty);
+ }
+
+put_unlock:
+ tty_kref_put(tty);
+unlock:
+ spin_unlock_irqrestore(&isi_card[card].card_lock, flags);
+ /* schedule another tx for hopefully in about 10ms */
+sched_again:
+ mod_timer(&tx, jiffies + msecs_to_jiffies(10));
+}
+
+/*
+ * Main interrupt handler routine
+ */
+
+static irqreturn_t isicom_interrupt(int irq, void *dev_id)
+{
+ struct isi_board *card = dev_id;
+ struct isi_port *port;
+ struct tty_struct *tty;
+ unsigned long base;
+ u16 header, word_count, count, channel;
+ short byte_count;
+ unsigned char *rp;
+
+ if (!card || !(card->status & FIRMWARE_LOADED))
+ return IRQ_NONE;
+
+ base = card->base;
+
+ /* did the card interrupt us? */
+ if (!(inw(base + 0x0e) & 0x02))
+ return IRQ_NONE;
+
+ spin_lock(&card->card_lock);
+
+ /*
+ * disable any interrupts from the PCI card and lower the
+ * interrupt line
+ */
+ outw(0x8000, base+0x04);
+ ClearInterrupt(base);
+
+ inw(base); /* get the dummy word out */
+ header = inw(base);
+ channel = (header & 0x7800) >> card->shift_count;
+ byte_count = header & 0xff;
+
+ if (channel + 1 > card->port_count) {
+ pr_warning("%s(0x%lx): %d(channel) > port_count.\n",
+ __func__, base, channel+1);
+ outw(0x0000, base+0x04); /* enable interrupts */
+ spin_unlock(&card->card_lock);
+ return IRQ_HANDLED;
+ }
+ port = card->ports + channel;
+ if (!(port->port.flags & ASYNC_INITIALIZED)) {
+ outw(0x0000, base+0x04); /* enable interrupts */
+ spin_unlock(&card->card_lock);
+ return IRQ_HANDLED;
+ }
+
+ tty = tty_port_tty_get(&port->port);
+ if (tty == NULL) {
+ word_count = byte_count >> 1;
+ while (byte_count > 1) {
+ inw(base);
+ byte_count -= 2;
+ }
+ if (byte_count & 0x01)
+ inw(base);
+ outw(0x0000, base+0x04); /* enable interrupts */
+ spin_unlock(&card->card_lock);
+ return IRQ_HANDLED;
+ }
+
+ if (header & 0x8000) { /* Status Packet */
+ header = inw(base);
+ switch (header & 0xff) {
+ case 0: /* Change in EIA signals */
+ if (port->port.flags & ASYNC_CHECK_CD) {
+ if (port->status & ISI_DCD) {
+ if (!(header & ISI_DCD)) {
+ /* Carrier has been lost */
+ pr_debug("%s: DCD->low.\n",
+ __func__);
+ port->status &= ~ISI_DCD;
+ tty_hangup(tty);
+ }
+ } else if (header & ISI_DCD) {
+ /* Carrier has been detected */
+ pr_debug("%s: DCD->high.\n",
+ __func__);
+ port->status |= ISI_DCD;
+ wake_up_interruptible(&port->port.open_wait);
+ }
+ } else {
+ if (header & ISI_DCD)
+ port->status |= ISI_DCD;
+ else
+ port->status &= ~ISI_DCD;
+ }
+
+ if (port->port.flags & ASYNC_CTS_FLOW) {
+ if (tty->hw_stopped) {
+ if (header & ISI_CTS) {
+ port->port.tty->hw_stopped = 0;
+ /* start tx ing */
+ port->status |= (ISI_TXOK
+ | ISI_CTS);
+ tty_wakeup(tty);
+ }
+ } else if (!(header & ISI_CTS)) {
+ tty->hw_stopped = 1;
+ /* stop tx ing */
+ port->status &= ~(ISI_TXOK | ISI_CTS);
+ }
+ } else {
+ if (header & ISI_CTS)
+ port->status |= ISI_CTS;
+ else
+ port->status &= ~ISI_CTS;
+ }
+
+ if (header & ISI_DSR)
+ port->status |= ISI_DSR;
+ else
+ port->status &= ~ISI_DSR;
+
+ if (header & ISI_RI)
+ port->status |= ISI_RI;
+ else
+ port->status &= ~ISI_RI;
+
+ break;
+
+ case 1: /* Received Break !!! */
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
+ if (port->port.flags & ASYNC_SAK)
+ do_SAK(tty);
+ tty_flip_buffer_push(tty);
+ break;
+
+ case 2: /* Statistics */
+ pr_debug("%s: stats!!!\n", __func__);
+ break;
+
+ default:
+ pr_debug("%s: Unknown code in status packet.\n",
+ __func__);
+ break;
+ }
+ } else { /* Data Packet */
+
+ count = tty_prepare_flip_string(tty, &rp, byte_count & ~1);
+ pr_debug("%s: Can rx %d of %d bytes.\n",
+ __func__, count, byte_count);
+ word_count = count >> 1;
+ insw(base, rp, word_count);
+ byte_count -= (word_count << 1);
+ if (count & 0x0001) {
+ tty_insert_flip_char(tty, inw(base) & 0xff,
+ TTY_NORMAL);
+ byte_count -= 2;
+ }
+ if (byte_count > 0) {
+ pr_debug("%s(0x%lx:%d): Flip buffer overflow! dropping bytes...\n",
+ __func__, base, channel + 1);
+ /* drain out unread xtra data */
+ while (byte_count > 0) {
+ inw(base);
+ byte_count -= 2;
+ }
+ }
+ tty_flip_buffer_push(tty);
+ }
+ outw(0x0000, base+0x04); /* enable interrupts */
+ spin_unlock(&card->card_lock);
+ tty_kref_put(tty);
+
+ return IRQ_HANDLED;
+}
+
+static void isicom_config_port(struct tty_struct *tty)
+{
+ struct isi_port *port = tty->driver_data;
+ struct isi_board *card = port->card;
+ unsigned long baud;
+ unsigned long base = card->base;
+ u16 channel_setup, channel = port->channel,
+ shift_count = card->shift_count;
+ unsigned char flow_ctrl;
+
+ /* FIXME: Switch to new tty baud API */
+ baud = C_BAUD(tty);
+ if (baud & CBAUDEX) {
+ baud &= ~CBAUDEX;
+
+ /* if CBAUDEX bit is on and the baud is set to either 50 or 75
+ * then the card is programmed for 57.6Kbps or 115Kbps
+ * respectively.
+ */
+
+ /* 1,2,3,4 => 57.6, 115.2, 230, 460 kbps resp. */
+ if (baud < 1 || baud > 4)
+ tty->termios->c_cflag &= ~CBAUDEX;
+ else
+ baud += 15;
+ }
+ if (baud == 15) {
+
+ /* the ASYNC_SPD_HI and ASYNC_SPD_VHI options are set
+ * by the set_serial_info ioctl ... this is done by
+ * the 'setserial' utility.
+ */
+
+ if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
+ baud++; /* 57.6 Kbps */
+ if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
+ baud += 2; /* 115 Kbps */
+ if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
+ baud += 3; /* 230 kbps*/
+ if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
+ baud += 4; /* 460 kbps*/
+ }
+ if (linuxb_to_isib[baud] == -1) {
+ /* hang up */
+ drop_dtr(port);
+ return;
+ } else
+ raise_dtr(port);
+
+ if (WaitTillCardIsFree(base) == 0) {
+ outw(0x8000 | (channel << shift_count) | 0x03, base);
+ outw(linuxb_to_isib[baud] << 8 | 0x03, base);
+ channel_setup = 0;
+ switch (C_CSIZE(tty)) {
+ case CS5:
+ channel_setup |= ISICOM_CS5;
+ break;
+ case CS6:
+ channel_setup |= ISICOM_CS6;
+ break;
+ case CS7:
+ channel_setup |= ISICOM_CS7;
+ break;
+ case CS8:
+ channel_setup |= ISICOM_CS8;
+ break;
+ }
+
+ if (C_CSTOPB(tty))
+ channel_setup |= ISICOM_2SB;
+ if (C_PARENB(tty)) {
+ channel_setup |= ISICOM_EVPAR;
+ if (C_PARODD(tty))
+ channel_setup |= ISICOM_ODPAR;
+ }
+ outw(channel_setup, base);
+ InterruptTheCard(base);
+ }
+ if (C_CLOCAL(tty))
+ port->port.flags &= ~ASYNC_CHECK_CD;
+ else
+ port->port.flags |= ASYNC_CHECK_CD;
+
+ /* flow control settings ...*/
+ flow_ctrl = 0;
+ port->port.flags &= ~ASYNC_CTS_FLOW;
+ if (C_CRTSCTS(tty)) {
+ port->port.flags |= ASYNC_CTS_FLOW;
+ flow_ctrl |= ISICOM_CTSRTS;
+ }
+ if (I_IXON(tty))
+ flow_ctrl |= ISICOM_RESPOND_XONXOFF;
+ if (I_IXOFF(tty))
+ flow_ctrl |= ISICOM_INITIATE_XONXOFF;
+
+ if (WaitTillCardIsFree(base) == 0) {
+ outw(0x8000 | (channel << shift_count) | 0x04, base);
+ outw(flow_ctrl << 8 | 0x05, base);
+ outw((STOP_CHAR(tty)) << 8 | (START_CHAR(tty)), base);
+ InterruptTheCard(base);
+ }
+
+ /* rx enabled -> enable port for rx on the card */
+ if (C_CREAD(tty)) {
+ card->port_status |= (1 << channel);
+ outw(card->port_status, base + 0x02);
+ }
+}
+
+/* open et all */
+
+static inline void isicom_setup_board(struct isi_board *bp)
+{
+ int channel;
+ struct isi_port *port;
+
+ bp->count++;
+ if (!(bp->status & BOARD_INIT)) {
+ port = bp->ports;
+ for (channel = 0; channel < bp->port_count; channel++, port++)
+ drop_dtr_rts(port);
+ }
+ bp->status |= BOARD_ACTIVE | BOARD_INIT;
+}
+
+/* Activate and thus setup board are protected from races against shutdown
+ by the tty_port mutex */
+
+static int isicom_activate(struct tty_port *tport, struct tty_struct *tty)
+{
+ struct isi_port *port = container_of(tport, struct isi_port, port);
+ struct isi_board *card = port->card;
+ unsigned long flags;
+
+ if (tty_port_alloc_xmit_buf(tport) < 0)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+ isicom_setup_board(card);
+
+ port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
+
+ /* discard any residual data */
+ if (WaitTillCardIsFree(card->base) == 0) {
+ outw(0x8000 | (port->channel << card->shift_count) | 0x02,
+ card->base);
+ outw(((ISICOM_KILLTX | ISICOM_KILLRX) << 8) | 0x06, card->base);
+ InterruptTheCard(card->base);
+ }
+ isicom_config_port(tty);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+
+ return 0;
+}
+
+static int isicom_carrier_raised(struct tty_port *port)
+{
+ struct isi_port *ip = container_of(port, struct isi_port, port);
+ return (ip->status & ISI_DCD)?1 : 0;
+}
+
+static struct tty_port *isicom_find_port(struct tty_struct *tty)
+{
+ struct isi_port *port;
+ struct isi_board *card;
+ unsigned int board;
+ int line = tty->index;
+
+ if (line < 0 || line > PORT_COUNT-1)
+ return NULL;
+ board = BOARD(line);
+ card = &isi_card[board];
+
+ if (!(card->status & FIRMWARE_LOADED))
+ return NULL;
+
+ /* open on a port greater than the port count for the card !!! */
+ if (line > ((board * 16) + card->port_count - 1))
+ return NULL;
+
+ port = &isi_ports[line];
+ if (isicom_paranoia_check(port, tty->name, "isicom_open"))
+ return NULL;
+
+ return &port->port;
+}
+
+static int isicom_open(struct tty_struct *tty, struct file *filp)
+{
+ struct isi_port *port;
+ struct tty_port *tport;
+
+ tport = isicom_find_port(tty);
+ if (tport == NULL)
+ return -ENODEV;
+ port = container_of(tport, struct isi_port, port);
+
+ tty->driver_data = port;
+ return tty_port_open(tport, tty, filp);
+}
+
+/* close et all */
+
+/* card->lock HAS to be held */
+static void isicom_shutdown_port(struct isi_port *port)
+{
+ struct isi_board *card = port->card;
+
+ if (--card->count < 0) {
+ pr_debug("%s: bad board(0x%lx) count %d.\n",
+ __func__, card->base, card->count);
+ card->count = 0;
+ }
+ /* last port was closed, shutdown that board too */
+ if (!card->count)
+ card->status &= BOARD_ACTIVE;
+}
+
+static void isicom_flush_buffer(struct tty_struct *tty)
+{
+ struct isi_port *port = tty->driver_data;
+ struct isi_board *card = port->card;
+ unsigned long flags;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_flush_buffer"))
+ return;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+ port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
+ spin_unlock_irqrestore(&card->card_lock, flags);
+
+ tty_wakeup(tty);
+}
+
+static void isicom_shutdown(struct tty_port *port)
+{
+ struct isi_port *ip = container_of(port, struct isi_port, port);
+ struct isi_board *card = ip->card;
+ unsigned long flags;
+
+ /* indicate to the card that no more data can be received
+ on this port */
+ spin_lock_irqsave(&card->card_lock, flags);
+ card->port_status &= ~(1 << ip->channel);
+ outw(card->port_status, card->base + 0x02);
+ isicom_shutdown_port(ip);
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ tty_port_free_xmit_buf(port);
+}
+
+static void isicom_close(struct tty_struct *tty, struct file *filp)
+{
+ struct isi_port *ip = tty->driver_data;
+ struct tty_port *port;
+
+ if (ip == NULL)
+ return;
+
+ port = &ip->port;
+ if (isicom_paranoia_check(ip, tty->name, "isicom_close"))
+ return;
+ tty_port_close(port, tty, filp);
+}
+
+/* write et all */
+static int isicom_write(struct tty_struct *tty, const unsigned char *buf,
+ int count)
+{
+ struct isi_port *port = tty->driver_data;
+ struct isi_board *card = port->card;
+ unsigned long flags;
+ int cnt, total = 0;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_write"))
+ return 0;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+
+ while (1) {
+ cnt = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt
+ - 1, SERIAL_XMIT_SIZE - port->xmit_head));
+ if (cnt <= 0)
+ break;
+
+ memcpy(port->port.xmit_buf + port->xmit_head, buf, cnt);
+ port->xmit_head = (port->xmit_head + cnt) & (SERIAL_XMIT_SIZE
+ - 1);
+ port->xmit_cnt += cnt;
+ buf += cnt;
+ count -= cnt;
+ total += cnt;
+ }
+ if (port->xmit_cnt && !tty->stopped && !tty->hw_stopped)
+ port->status |= ISI_TXOK;
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ return total;
+}
+
+/* put_char et all */
+static int isicom_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct isi_port *port = tty->driver_data;
+ struct isi_board *card = port->card;
+ unsigned long flags;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_put_char"))
+ return 0;
+
+ spin_lock_irqsave(&card->card_lock, flags);
+ if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) {
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ return 0;
+ }
+
+ port->port.xmit_buf[port->xmit_head++] = ch;
+ port->xmit_head &= (SERIAL_XMIT_SIZE - 1);
+ port->xmit_cnt++;
+ spin_unlock_irqrestore(&card->card_lock, flags);
+ return 1;
+}
+
+/* flush_chars et all */
+static void isicom_flush_chars(struct tty_struct *tty)
+{
+ struct isi_port *port = tty->driver_data;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_flush_chars"))
+ return;
+
+ if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
+ !port->port.xmit_buf)
+ return;
+
+ /* this tells the transmitter to consider this port for
+ data output to the card ... that's the best we can do. */
+ port->status |= ISI_TXOK;
+}
+
+/* write_room et all */
+static int isicom_write_room(struct tty_struct *tty)
+{
+ struct isi_port *port = tty->driver_data;
+ int free;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_write_room"))
+ return 0;
+
+ free = SERIAL_XMIT_SIZE - port->xmit_cnt - 1;
+ if (free < 0)
+ free = 0;
+ return free;
+}
+
+/* chars_in_buffer et all */
+static int isicom_chars_in_buffer(struct tty_struct *tty)
+{
+ struct isi_port *port = tty->driver_data;
+ if (isicom_paranoia_check(port, tty->name, "isicom_chars_in_buffer"))
+ return 0;
+ return port->xmit_cnt;
+}
+
+/* ioctl et all */
+static int isicom_send_break(struct tty_struct *tty, int length)
+{
+ struct isi_port *port = tty->driver_data;
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+
+ if (length == -1)
+ return -EOPNOTSUPP;
+
+ if (!lock_card(card))
+ return -EINVAL;
+
+ outw(0x8000 | ((port->channel) << (card->shift_count)) | 0x3, base);
+ outw((length & 0xff) << 8 | 0x00, base);
+ outw((length & 0xff00), base);
+ InterruptTheCard(base);
+
+ unlock_card(card);
+ return 0;
+}
+
+static int isicom_tiocmget(struct tty_struct *tty)
+{
+ struct isi_port *port = tty->driver_data;
+ /* just send the port status */
+ u16 status = port->status;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_ioctl"))
+ return -ENODEV;
+
+ return ((status & ISI_RTS) ? TIOCM_RTS : 0) |
+ ((status & ISI_DTR) ? TIOCM_DTR : 0) |
+ ((status & ISI_DCD) ? TIOCM_CAR : 0) |
+ ((status & ISI_DSR) ? TIOCM_DSR : 0) |
+ ((status & ISI_CTS) ? TIOCM_CTS : 0) |
+ ((status & ISI_RI ) ? TIOCM_RI : 0);
+}
+
+static int isicom_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct isi_port *port = tty->driver_data;
+ unsigned long flags;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_ioctl"))
+ return -ENODEV;
+
+ spin_lock_irqsave(&port->card->card_lock, flags);
+ if (set & TIOCM_RTS)
+ raise_rts(port);
+ if (set & TIOCM_DTR)
+ raise_dtr(port);
+
+ if (clear & TIOCM_RTS)
+ drop_rts(port);
+ if (clear & TIOCM_DTR)
+ drop_dtr(port);
+ spin_unlock_irqrestore(&port->card->card_lock, flags);
+
+ return 0;
+}
+
+static int isicom_set_serial_info(struct tty_struct *tty,
+ struct serial_struct __user *info)
+{
+ struct isi_port *port = tty->driver_data;
+ struct serial_struct newinfo;
+ int reconfig_port;
+
+ if (copy_from_user(&newinfo, info, sizeof(newinfo)))
+ return -EFAULT;
+
+ mutex_lock(&port->port.mutex);
+ reconfig_port = ((port->port.flags & ASYNC_SPD_MASK) !=
+ (newinfo.flags & ASYNC_SPD_MASK));
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ if ((newinfo.close_delay != port->port.close_delay) ||
+ (newinfo.closing_wait != port->port.closing_wait) ||
+ ((newinfo.flags & ~ASYNC_USR_MASK) !=
+ (port->port.flags & ~ASYNC_USR_MASK))) {
+ mutex_unlock(&port->port.mutex);
+ return -EPERM;
+ }
+ port->port.flags = ((port->port.flags & ~ASYNC_USR_MASK) |
+ (newinfo.flags & ASYNC_USR_MASK));
+ } else {
+ port->port.close_delay = newinfo.close_delay;
+ port->port.closing_wait = newinfo.closing_wait;
+ port->port.flags = ((port->port.flags & ~ASYNC_FLAGS) |
+ (newinfo.flags & ASYNC_FLAGS));
+ }
+ if (reconfig_port) {
+ unsigned long flags;
+ spin_lock_irqsave(&port->card->card_lock, flags);
+ isicom_config_port(tty);
+ spin_unlock_irqrestore(&port->card->card_lock, flags);
+ }
+ mutex_unlock(&port->port.mutex);
+ return 0;
+}
+
+static int isicom_get_serial_info(struct isi_port *port,
+ struct serial_struct __user *info)
+{
+ struct serial_struct out_info;
+
+ mutex_lock(&port->port.mutex);
+ memset(&out_info, 0, sizeof(out_info));
+/* out_info.type = ? */
+ out_info.line = port - isi_ports;
+ out_info.port = port->card->base;
+ out_info.irq = port->card->irq;
+ out_info.flags = port->port.flags;
+/* out_info.baud_base = ? */
+ out_info.close_delay = port->port.close_delay;
+ out_info.closing_wait = port->port.closing_wait;
+ mutex_unlock(&port->port.mutex);
+ if (copy_to_user(info, &out_info, sizeof(out_info)))
+ return -EFAULT;
+ return 0;
+}
+
+static int isicom_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct isi_port *port = tty->driver_data;
+ void __user *argp = (void __user *)arg;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_ioctl"))
+ return -ENODEV;
+
+ switch (cmd) {
+ case TIOCGSERIAL:
+ return isicom_get_serial_info(port, argp);
+
+ case TIOCSSERIAL:
+ return isicom_set_serial_info(tty, argp);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+/* set_termios et all */
+static void isicom_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios)
+{
+ struct isi_port *port = tty->driver_data;
+ unsigned long flags;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_set_termios"))
+ return;
+
+ if (tty->termios->c_cflag == old_termios->c_cflag &&
+ tty->termios->c_iflag == old_termios->c_iflag)
+ return;
+
+ spin_lock_irqsave(&port->card->card_lock, flags);
+ isicom_config_port(tty);
+ spin_unlock_irqrestore(&port->card->card_lock, flags);
+
+ if ((old_termios->c_cflag & CRTSCTS) &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ isicom_start(tty);
+ }
+}
+
+/* throttle et all */
+static void isicom_throttle(struct tty_struct *tty)
+{
+ struct isi_port *port = tty->driver_data;
+ struct isi_board *card = port->card;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_throttle"))
+ return;
+
+ /* tell the card that this port cannot handle any more data for now */
+ card->port_status &= ~(1 << port->channel);
+ outw(card->port_status, card->base + 0x02);
+}
+
+/* unthrottle et all */
+static void isicom_unthrottle(struct tty_struct *tty)
+{
+ struct isi_port *port = tty->driver_data;
+ struct isi_board *card = port->card;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_unthrottle"))
+ return;
+
+ /* tell the card that this port is ready to accept more data */
+ card->port_status |= (1 << port->channel);
+ outw(card->port_status, card->base + 0x02);
+}
+
+/* stop et all */
+static void isicom_stop(struct tty_struct *tty)
+{
+ struct isi_port *port = tty->driver_data;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_stop"))
+ return;
+
+ /* this tells the transmitter not to consider this port for
+ data output to the card. */
+ port->status &= ~ISI_TXOK;
+}
+
+/* start et all */
+static void isicom_start(struct tty_struct *tty)
+{
+ struct isi_port *port = tty->driver_data;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_start"))
+ return;
+
+ /* this tells the transmitter to consider this port for
+ data output to the card. */
+ port->status |= ISI_TXOK;
+}
+
+static void isicom_hangup(struct tty_struct *tty)
+{
+ struct isi_port *port = tty->driver_data;
+
+ if (isicom_paranoia_check(port, tty->name, "isicom_hangup"))
+ return;
+ tty_port_hangup(&port->port);
+}
+
+
+/*
+ * Driver init and deinit functions
+ */
+
+static const struct tty_operations isicom_ops = {
+ .open = isicom_open,
+ .close = isicom_close,
+ .write = isicom_write,
+ .put_char = isicom_put_char,
+ .flush_chars = isicom_flush_chars,
+ .write_room = isicom_write_room,
+ .chars_in_buffer = isicom_chars_in_buffer,
+ .ioctl = isicom_ioctl,
+ .set_termios = isicom_set_termios,
+ .throttle = isicom_throttle,
+ .unthrottle = isicom_unthrottle,
+ .stop = isicom_stop,
+ .start = isicom_start,
+ .hangup = isicom_hangup,
+ .flush_buffer = isicom_flush_buffer,
+ .tiocmget = isicom_tiocmget,
+ .tiocmset = isicom_tiocmset,
+ .break_ctl = isicom_send_break,
+};
+
+static const struct tty_port_operations isicom_port_ops = {
+ .carrier_raised = isicom_carrier_raised,
+ .dtr_rts = isicom_dtr_rts,
+ .activate = isicom_activate,
+ .shutdown = isicom_shutdown,
+};
+
+static int __devinit reset_card(struct pci_dev *pdev,
+ const unsigned int card, unsigned int *signature)
+{
+ struct isi_board *board = pci_get_drvdata(pdev);
+ unsigned long base = board->base;
+ unsigned int sig, portcount = 0;
+ int retval = 0;
+
+ dev_dbg(&pdev->dev, "ISILoad:Resetting Card%d at 0x%lx\n", card + 1,
+ base);
+
+ inw(base + 0x8);
+
+ msleep(10);
+
+ outw(0, base + 0x8); /* Reset */
+
+ msleep(1000);
+
+ sig = inw(base + 0x4) & 0xff;
+
+ if (sig != 0xa5 && sig != 0xbb && sig != 0xcc && sig != 0xdd &&
+ sig != 0xee) {
+ dev_warn(&pdev->dev, "ISILoad:Card%u reset failure (Possible "
+ "bad I/O Port Address 0x%lx).\n", card + 1, base);
+ dev_dbg(&pdev->dev, "Sig=0x%x\n", sig);
+ retval = -EIO;
+ goto end;
+ }
+
+ msleep(10);
+
+ portcount = inw(base + 0x2);
+ if (!(inw(base + 0xe) & 0x1) || (portcount != 0 && portcount != 4 &&
+ portcount != 8 && portcount != 16)) {
+ dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n",
+ card + 1);
+ retval = -EIO;
+ goto end;
+ }
+
+ switch (sig) {
+ case 0xa5:
+ case 0xbb:
+ case 0xdd:
+ board->port_count = (portcount == 4) ? 4 : 8;
+ board->shift_count = 12;
+ break;
+ case 0xcc:
+ case 0xee:
+ board->port_count = 16;
+ board->shift_count = 11;
+ break;
+ }
+ dev_info(&pdev->dev, "-Done\n");
+ *signature = sig;
+
+end:
+ return retval;
+}
+
+static int __devinit load_firmware(struct pci_dev *pdev,
+ const unsigned int index, const unsigned int signature)
+{
+ struct isi_board *board = pci_get_drvdata(pdev);
+ const struct firmware *fw;
+ unsigned long base = board->base;
+ unsigned int a;
+ u16 word_count, status;
+ int retval = -EIO;
+ char *name;
+ u8 *data;
+
+ struct stframe {
+ u16 addr;
+ u16 count;
+ u8 data[0];
+ } *frame;
+
+ switch (signature) {
+ case 0xa5:
+ name = "isi608.bin";
+ break;
+ case 0xbb:
+ name = "isi608em.bin";
+ break;
+ case 0xcc:
+ name = "isi616em.bin";
+ break;
+ case 0xdd:
+ name = "isi4608.bin";
+ break;
+ case 0xee:
+ name = "isi4616.bin";
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown signature.\n");
+ goto end;
+ }
+
+ retval = request_firmware(&fw, name, &pdev->dev);
+ if (retval)
+ goto end;
+
+ retval = -EIO;
+
+ for (frame = (struct stframe *)fw->data;
+ frame < (struct stframe *)(fw->data + fw->size);
+ frame = (struct stframe *)((u8 *)(frame + 1) +
+ frame->count)) {
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ outw(0xf0, base); /* start upload sequence */
+ outw(0x00, base);
+ outw(frame->addr, base); /* lsb of address */
+
+ word_count = frame->count / 2 + frame->count % 2;
+ outw(word_count, base);
+ InterruptTheCard(base);
+
+ udelay(100); /* 0x2f */
+
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ status = inw(base + 0x4);
+ if (status != 0) {
+ dev_warn(&pdev->dev, "Card%d rejected load header:\n"
+ "Address:0x%x\n"
+ "Count:0x%x\n"
+ "Status:0x%x\n",
+ index + 1, frame->addr, frame->count, status);
+ goto errrelfw;
+ }
+ outsw(base, frame->data, word_count);
+
+ InterruptTheCard(base);
+
+ udelay(50); /* 0x0f */
+
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ status = inw(base + 0x4);
+ if (status != 0) {
+ dev_err(&pdev->dev, "Card%d got out of sync.Card "
+ "Status:0x%x\n", index + 1, status);
+ goto errrelfw;
+ }
+ }
+
+/* XXX: should we test it by reading it back and comparing with original like
+ * in load firmware package? */
+ for (frame = (struct stframe *)fw->data;
+ frame < (struct stframe *)(fw->data + fw->size);
+ frame = (struct stframe *)((u8 *)(frame + 1) +
+ frame->count)) {
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ outw(0xf1, base); /* start download sequence */
+ outw(0x00, base);
+ outw(frame->addr, base); /* lsb of address */
+
+ word_count = (frame->count >> 1) + frame->count % 2;
+ outw(word_count + 1, base);
+ InterruptTheCard(base);
+
+ udelay(50); /* 0xf */
+
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ status = inw(base + 0x4);
+ if (status != 0) {
+ dev_warn(&pdev->dev, "Card%d rejected verify header:\n"
+ "Address:0x%x\n"
+ "Count:0x%x\n"
+ "Status: 0x%x\n",
+ index + 1, frame->addr, frame->count, status);
+ goto errrelfw;
+ }
+
+ data = kmalloc(word_count * 2, GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&pdev->dev, "Card%d, firmware upload "
+ "failed, not enough memory\n", index + 1);
+ goto errrelfw;
+ }
+ inw(base);
+ insw(base, data, word_count);
+ InterruptTheCard(base);
+
+ for (a = 0; a < frame->count; a++)
+ if (data[a] != frame->data[a]) {
+ kfree(data);
+ dev_err(&pdev->dev, "Card%d, firmware upload "
+ "failed\n", index + 1);
+ goto errrelfw;
+ }
+ kfree(data);
+
+ udelay(50); /* 0xf */
+
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ status = inw(base + 0x4);
+ if (status != 0) {
+ dev_err(&pdev->dev, "Card%d verify got out of sync. "
+ "Card Status:0x%x\n", index + 1, status);
+ goto errrelfw;
+ }
+ }
+
+ /* xfer ctrl */
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ outw(0xf2, base);
+ outw(0x800, base);
+ outw(0x0, base);
+ outw(0x0, base);
+ InterruptTheCard(base);
+ outw(0x0, base + 0x4); /* for ISI4608 cards */
+
+ board->status |= FIRMWARE_LOADED;
+ retval = 0;
+
+errrelfw:
+ release_firmware(fw);
+end:
+ return retval;
+}
+
+/*
+ * Insmod can set static symbols so keep these static
+ */
+static unsigned int card_count;
+
+static int __devinit isicom_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ unsigned int uninitialized_var(signature), index;
+ int retval = -EPERM;
+ struct isi_board *board = NULL;
+
+ if (card_count >= BOARD_COUNT)
+ goto err;
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "failed to enable\n");
+ goto err;
+ }
+
+ dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device);
+
+ /* allot the first empty slot in the array */
+ for (index = 0; index < BOARD_COUNT; index++) {
+ if (isi_card[index].base == 0) {
+ board = &isi_card[index];
+ break;
+ }
+ }
+ if (index == BOARD_COUNT) {
+ retval = -ENODEV;
+ goto err_disable;
+ }
+
+ board->index = index;
+ board->base = pci_resource_start(pdev, 3);
+ board->irq = pdev->irq;
+ card_count++;
+
+ pci_set_drvdata(pdev, board);
+
+ retval = pci_request_region(pdev, 3, ISICOM_NAME);
+ if (retval) {
+ dev_err(&pdev->dev, "I/O Region 0x%lx-0x%lx is busy. Card%d "
+ "will be disabled.\n", board->base, board->base + 15,
+ index + 1);
+ retval = -EBUSY;
+ goto errdec;
+ }
+
+ retval = request_irq(board->irq, isicom_interrupt,
+ IRQF_SHARED | IRQF_DISABLED, ISICOM_NAME, board);
+ if (retval < 0) {
+ dev_err(&pdev->dev, "Could not install handler at Irq %d. "
+ "Card%d will be disabled.\n", board->irq, index + 1);
+ goto errunrr;
+ }
+
+ retval = reset_card(pdev, index, &signature);
+ if (retval < 0)
+ goto errunri;
+
+ retval = load_firmware(pdev, index, signature);
+ if (retval < 0)
+ goto errunri;
+
+ for (index = 0; index < board->port_count; index++)
+ tty_register_device(isicom_normal, board->index * 16 + index,
+ &pdev->dev);
+
+ return 0;
+
+errunri:
+ free_irq(board->irq, board);
+errunrr:
+ pci_release_region(pdev, 3);
+errdec:
+ board->base = 0;
+ card_count--;
+err_disable:
+ pci_disable_device(pdev);
+err:
+ return retval;
+}
+
+static void __devexit isicom_remove(struct pci_dev *pdev)
+{
+ struct isi_board *board = pci_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < board->port_count; i++)
+ tty_unregister_device(isicom_normal, board->index * 16 + i);
+
+ free_irq(board->irq, board);
+ pci_release_region(pdev, 3);
+ board->base = 0;
+ card_count--;
+ pci_disable_device(pdev);
+}
+
+static int __init isicom_init(void)
+{
+ int retval, idx, channel;
+ struct isi_port *port;
+
+ for (idx = 0; idx < BOARD_COUNT; idx++) {
+ port = &isi_ports[idx * 16];
+ isi_card[idx].ports = port;
+ spin_lock_init(&isi_card[idx].card_lock);
+ for (channel = 0; channel < 16; channel++, port++) {
+ tty_port_init(&port->port);
+ port->port.ops = &isicom_port_ops;
+ port->magic = ISICOM_MAGIC;
+ port->card = &isi_card[idx];
+ port->channel = channel;
+ port->port.close_delay = 50 * HZ/100;
+ port->port.closing_wait = 3000 * HZ/100;
+ port->status = 0;
+ /* . . . */
+ }
+ isi_card[idx].base = 0;
+ isi_card[idx].irq = 0;
+ }
+
+ /* tty driver structure initialization */
+ isicom_normal = alloc_tty_driver(PORT_COUNT);
+ if (!isicom_normal) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ isicom_normal->owner = THIS_MODULE;
+ isicom_normal->name = "ttyM";
+ isicom_normal->major = ISICOM_NMAJOR;
+ isicom_normal->minor_start = 0;
+ isicom_normal->type = TTY_DRIVER_TYPE_SERIAL;
+ isicom_normal->subtype = SERIAL_TYPE_NORMAL;
+ isicom_normal->init_termios = tty_std_termios;
+ isicom_normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL |
+ CLOCAL;
+ isicom_normal->flags = TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK;
+ tty_set_operations(isicom_normal, &isicom_ops);
+
+ retval = tty_register_driver(isicom_normal);
+ if (retval) {
+ pr_debug("Couldn't register the dialin driver\n");
+ goto err_puttty;
+ }
+
+ retval = pci_register_driver(&isicom_driver);
+ if (retval < 0) {
+ pr_err("Unable to register pci driver.\n");
+ goto err_unrtty;
+ }
+
+ mod_timer(&tx, jiffies + 1);
+
+ return 0;
+err_unrtty:
+ tty_unregister_driver(isicom_normal);
+err_puttty:
+ put_tty_driver(isicom_normal);
+error:
+ return retval;
+}
+
+static void __exit isicom_exit(void)
+{
+ del_timer_sync(&tx);
+
+ pci_unregister_driver(&isicom_driver);
+ tty_unregister_driver(isicom_normal);
+ put_tty_driver(isicom_normal);
+}
+
+module_init(isicom_init);
+module_exit(isicom_exit);
+
+MODULE_AUTHOR("MultiTech");
+MODULE_DESCRIPTION("Driver for the ISI series of cards by MultiTech");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("isi608.bin");
+MODULE_FIRMWARE("isi608em.bin");
+MODULE_FIRMWARE("isi616em.bin");
+MODULE_FIRMWARE("isi4608.bin");
+MODULE_FIRMWARE("isi4616.bin");
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
new file mode 100644
index 00000000..8f82f7ab
--- /dev/null
+++ b/drivers/tty/moxa.c
@@ -0,0 +1,2084 @@
+/*****************************************************************************/
+/*
+ * moxa.c -- MOXA Intellio family multiport serial driver.
+ *
+ * Copyright (C) 1999-2000 Moxa Technologies (support@moxa.com).
+ * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
+ *
+ * This code is loosely based on the Linux serial driver, written by
+ * Linus Torvalds, Theodore T'so and others.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/*
+ * MOXA Intellio Series Driver
+ * for : LINUX
+ * date : 1999/1/7
+ * version : 5.1
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/serial.h>
+#include <linux/tty_driver.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include "moxa.h"
+
+#define MOXA_VERSION "6.0k"
+
+#define MOXA_FW_HDRLEN 32
+
+#define MOXAMAJOR 172
+
+#define MAX_BOARDS 4 /* Don't change this value */
+#define MAX_PORTS_PER_BOARD 32 /* Don't change this value */
+#define MAX_PORTS (MAX_BOARDS * MAX_PORTS_PER_BOARD)
+
+#define MOXA_IS_320(brd) ((brd)->boardType == MOXA_BOARD_C320_ISA || \
+ (brd)->boardType == MOXA_BOARD_C320_PCI)
+
+/*
+ * Define the Moxa PCI vendor and device IDs.
+ */
+#define MOXA_BUS_TYPE_ISA 0
+#define MOXA_BUS_TYPE_PCI 1
+
+enum {
+ MOXA_BOARD_C218_PCI = 1,
+ MOXA_BOARD_C218_ISA,
+ MOXA_BOARD_C320_PCI,
+ MOXA_BOARD_C320_ISA,
+ MOXA_BOARD_CP204J,
+};
+
+static char *moxa_brdname[] =
+{
+ "C218 Turbo PCI series",
+ "C218 Turbo ISA series",
+ "C320 Turbo PCI series",
+ "C320 Turbo ISA series",
+ "CP-204J series",
+};
+
+#ifdef CONFIG_PCI
+static struct pci_device_id moxa_pcibrds[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_C218),
+ .driver_data = MOXA_BOARD_C218_PCI },
+ { PCI_DEVICE(PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_C320),
+ .driver_data = MOXA_BOARD_C320_PCI },
+ { PCI_DEVICE(PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP204J),
+ .driver_data = MOXA_BOARD_CP204J },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, moxa_pcibrds);
+#endif /* CONFIG_PCI */
+
+struct moxa_port;
+
+static struct moxa_board_conf {
+ int boardType;
+ int numPorts;
+ int busType;
+
+ unsigned int ready;
+
+ struct moxa_port *ports;
+
+ void __iomem *basemem;
+ void __iomem *intNdx;
+ void __iomem *intPend;
+ void __iomem *intTable;
+} moxa_boards[MAX_BOARDS];
+
+struct mxser_mstatus {
+ tcflag_t cflag;
+ int cts;
+ int dsr;
+ int ri;
+ int dcd;
+};
+
+struct moxaq_str {
+ int inq;
+ int outq;
+};
+
+struct moxa_port {
+ struct tty_port port;
+ struct moxa_board_conf *board;
+ void __iomem *tableAddr;
+
+ int type;
+ int cflag;
+ unsigned long statusflags;
+
+ u8 DCDState; /* Protected by the port lock */
+ u8 lineCtrl;
+ u8 lowChkFlag;
+};
+
+struct mon_str {
+ int tick;
+ int rxcnt[MAX_PORTS];
+ int txcnt[MAX_PORTS];
+};
+
+/* statusflags */
+#define TXSTOPPED 1
+#define LOWWAIT 2
+#define EMPTYWAIT 3
+
+#define SERIAL_DO_RESTART
+
+#define WAKEUP_CHARS 256
+
+static int ttymajor = MOXAMAJOR;
+static struct mon_str moxaLog;
+static unsigned int moxaFuncTout = HZ / 2;
+static unsigned int moxaLowWaterChk;
+static DEFINE_MUTEX(moxa_openlock);
+static DEFINE_SPINLOCK(moxa_lock);
+
+static unsigned long baseaddr[MAX_BOARDS];
+static unsigned int type[MAX_BOARDS];
+static unsigned int numports[MAX_BOARDS];
+
+MODULE_AUTHOR("William Chen");
+MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("c218tunx.cod");
+MODULE_FIRMWARE("cp204unx.cod");
+MODULE_FIRMWARE("c320tunx.cod");
+
+module_param_array(type, uint, NULL, 0);
+MODULE_PARM_DESC(type, "card type: C218=2, C320=4");
+module_param_array(baseaddr, ulong, NULL, 0);
+MODULE_PARM_DESC(baseaddr, "base address");
+module_param_array(numports, uint, NULL, 0);
+MODULE_PARM_DESC(numports, "numports (ignored for C218)");
+
+module_param(ttymajor, int, 0);
+
+/*
+ * static functions:
+ */
+static int moxa_open(struct tty_struct *, struct file *);
+static void moxa_close(struct tty_struct *, struct file *);
+static int moxa_write(struct tty_struct *, const unsigned char *, int);
+static int moxa_write_room(struct tty_struct *);
+static void moxa_flush_buffer(struct tty_struct *);
+static int moxa_chars_in_buffer(struct tty_struct *);
+static void moxa_set_termios(struct tty_struct *, struct ktermios *);
+static void moxa_stop(struct tty_struct *);
+static void moxa_start(struct tty_struct *);
+static void moxa_hangup(struct tty_struct *);
+static int moxa_tiocmget(struct tty_struct *tty);
+static int moxa_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear);
+static void moxa_poll(unsigned long);
+static void moxa_set_tty_param(struct tty_struct *, struct ktermios *);
+static void moxa_shutdown(struct tty_port *);
+static int moxa_carrier_raised(struct tty_port *);
+static void moxa_dtr_rts(struct tty_port *, int);
+/*
+ * moxa board interface functions:
+ */
+static void MoxaPortEnable(struct moxa_port *);
+static void MoxaPortDisable(struct moxa_port *);
+static int MoxaPortSetTermio(struct moxa_port *, struct ktermios *, speed_t);
+static int MoxaPortGetLineOut(struct moxa_port *, int *, int *);
+static void MoxaPortLineCtrl(struct moxa_port *, int, int);
+static void MoxaPortFlowCtrl(struct moxa_port *, int, int, int, int, int);
+static int MoxaPortLineStatus(struct moxa_port *);
+static void MoxaPortFlushData(struct moxa_port *, int);
+static int MoxaPortWriteData(struct tty_struct *, const unsigned char *, int);
+static int MoxaPortReadData(struct moxa_port *);
+static int MoxaPortTxQueue(struct moxa_port *);
+static int MoxaPortRxQueue(struct moxa_port *);
+static int MoxaPortTxFree(struct moxa_port *);
+static void MoxaPortTxDisable(struct moxa_port *);
+static void MoxaPortTxEnable(struct moxa_port *);
+static int moxa_get_serial_info(struct moxa_port *, struct serial_struct __user *);
+static int moxa_set_serial_info(struct moxa_port *, struct serial_struct __user *);
+static void MoxaSetFifo(struct moxa_port *port, int enable);
+
+/*
+ * I/O functions
+ */
+
+static DEFINE_SPINLOCK(moxafunc_lock);
+
+static void moxa_wait_finish(void __iomem *ofsAddr)
+{
+ unsigned long end = jiffies + moxaFuncTout;
+
+ while (readw(ofsAddr + FuncCode) != 0)
+ if (time_after(jiffies, end))
+ return;
+ if (readw(ofsAddr + FuncCode) != 0 && printk_ratelimit())
+ printk(KERN_WARNING "moxa function expired\n");
+}
+
+static void moxafunc(void __iomem *ofsAddr, u16 cmd, u16 arg)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&moxafunc_lock, flags);
+ writew(arg, ofsAddr + FuncArg);
+ writew(cmd, ofsAddr + FuncCode);
+ moxa_wait_finish(ofsAddr);
+ spin_unlock_irqrestore(&moxafunc_lock, flags);
+}
+
+static int moxafuncret(void __iomem *ofsAddr, u16 cmd, u16 arg)
+{
+ unsigned long flags;
+ u16 ret;
+ spin_lock_irqsave(&moxafunc_lock, flags);
+ writew(arg, ofsAddr + FuncArg);
+ writew(cmd, ofsAddr + FuncCode);
+ moxa_wait_finish(ofsAddr);
+ ret = readw(ofsAddr + FuncArg);
+ spin_unlock_irqrestore(&moxafunc_lock, flags);
+ return ret;
+}
+
+static void moxa_low_water_check(void __iomem *ofsAddr)
+{
+ u16 rptr, wptr, mask, len;
+
+ if (readb(ofsAddr + FlagStat) & Xoff_state) {
+ rptr = readw(ofsAddr + RXrptr);
+ wptr = readw(ofsAddr + RXwptr);
+ mask = readw(ofsAddr + RX_mask);
+ len = (wptr - rptr) & mask;
+ if (len <= Low_water)
+ moxafunc(ofsAddr, FC_SendXon, 0);
+ }
+}
+
+/*
+ * TTY operations
+ */
+
+static int moxa_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct moxa_port *ch = tty->driver_data;
+ void __user *argp = (void __user *)arg;
+ int status, ret = 0;
+
+ if (tty->index == MAX_PORTS) {
+ if (cmd != MOXA_GETDATACOUNT && cmd != MOXA_GET_IOQUEUE &&
+ cmd != MOXA_GETMSTATUS)
+ return -EINVAL;
+ } else if (!ch)
+ return -ENODEV;
+
+ switch (cmd) {
+ case MOXA_GETDATACOUNT:
+ moxaLog.tick = jiffies;
+ if (copy_to_user(argp, &moxaLog, sizeof(moxaLog)))
+ ret = -EFAULT;
+ break;
+ case MOXA_FLUSH_QUEUE:
+ MoxaPortFlushData(ch, arg);
+ break;
+ case MOXA_GET_IOQUEUE: {
+ struct moxaq_str __user *argm = argp;
+ struct moxaq_str tmp;
+ struct moxa_port *p;
+ unsigned int i, j;
+
+ for (i = 0; i < MAX_BOARDS; i++) {
+ p = moxa_boards[i].ports;
+ for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
+ memset(&tmp, 0, sizeof(tmp));
+ spin_lock_bh(&moxa_lock);
+ if (moxa_boards[i].ready) {
+ tmp.inq = MoxaPortRxQueue(p);
+ tmp.outq = MoxaPortTxQueue(p);
+ }
+ spin_unlock_bh(&moxa_lock);
+ if (copy_to_user(argm, &tmp, sizeof(tmp)))
+ return -EFAULT;
+ }
+ }
+ break;
+ } case MOXA_GET_OQUEUE:
+ status = MoxaPortTxQueue(ch);
+ ret = put_user(status, (unsigned long __user *)argp);
+ break;
+ case MOXA_GET_IQUEUE:
+ status = MoxaPortRxQueue(ch);
+ ret = put_user(status, (unsigned long __user *)argp);
+ break;
+ case MOXA_GETMSTATUS: {
+ struct mxser_mstatus __user *argm = argp;
+ struct mxser_mstatus tmp;
+ struct moxa_port *p;
+ unsigned int i, j;
+
+ for (i = 0; i < MAX_BOARDS; i++) {
+ p = moxa_boards[i].ports;
+ for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
+ struct tty_struct *ttyp;
+ memset(&tmp, 0, sizeof(tmp));
+ spin_lock_bh(&moxa_lock);
+ if (!moxa_boards[i].ready) {
+ spin_unlock_bh(&moxa_lock);
+ goto copy;
+ }
+
+ status = MoxaPortLineStatus(p);
+ spin_unlock_bh(&moxa_lock);
+
+ if (status & 1)
+ tmp.cts = 1;
+ if (status & 2)
+ tmp.dsr = 1;
+ if (status & 4)
+ tmp.dcd = 1;
+
+ ttyp = tty_port_tty_get(&p->port);
+ if (!ttyp || !ttyp->termios)
+ tmp.cflag = p->cflag;
+ else
+ tmp.cflag = ttyp->termios->c_cflag;
+ tty_kref_put(ttyp);
+copy:
+ if (copy_to_user(argm, &tmp, sizeof(tmp)))
+ return -EFAULT;
+ }
+ }
+ break;
+ }
+ case TIOCGSERIAL:
+ mutex_lock(&ch->port.mutex);
+ ret = moxa_get_serial_info(ch, argp);
+ mutex_unlock(&ch->port.mutex);
+ break;
+ case TIOCSSERIAL:
+ mutex_lock(&ch->port.mutex);
+ ret = moxa_set_serial_info(ch, argp);
+ mutex_unlock(&ch->port.mutex);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+ return ret;
+}
+
+static int moxa_break_ctl(struct tty_struct *tty, int state)
+{
+ struct moxa_port *port = tty->driver_data;
+
+ moxafunc(port->tableAddr, state ? FC_SendBreak : FC_StopBreak,
+ Magic_code);
+ return 0;
+}
+
+static const struct tty_operations moxa_ops = {
+ .open = moxa_open,
+ .close = moxa_close,
+ .write = moxa_write,
+ .write_room = moxa_write_room,
+ .flush_buffer = moxa_flush_buffer,
+ .chars_in_buffer = moxa_chars_in_buffer,
+ .ioctl = moxa_ioctl,
+ .set_termios = moxa_set_termios,
+ .stop = moxa_stop,
+ .start = moxa_start,
+ .hangup = moxa_hangup,
+ .break_ctl = moxa_break_ctl,
+ .tiocmget = moxa_tiocmget,
+ .tiocmset = moxa_tiocmset,
+};
+
+static const struct tty_port_operations moxa_port_ops = {
+ .carrier_raised = moxa_carrier_raised,
+ .dtr_rts = moxa_dtr_rts,
+ .shutdown = moxa_shutdown,
+};
+
+static struct tty_driver *moxaDriver;
+static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0);
+
+/*
+ * HW init
+ */
+
+static int moxa_check_fw_model(struct moxa_board_conf *brd, u8 model)
+{
+ switch (brd->boardType) {
+ case MOXA_BOARD_C218_ISA:
+ case MOXA_BOARD_C218_PCI:
+ if (model != 1)
+ goto err;
+ break;
+ case MOXA_BOARD_CP204J:
+ if (model != 3)
+ goto err;
+ break;
+ default:
+ if (model != 2)
+ goto err;
+ break;
+ }
+ return 0;
+err:
+ return -EINVAL;
+}
+
+static int moxa_check_fw(const void *ptr)
+{
+ const __le16 *lptr = ptr;
+
+ if (*lptr != cpu_to_le16(0x7980))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int moxa_load_bios(struct moxa_board_conf *brd, const u8 *buf,
+ size_t len)
+{
+ void __iomem *baseAddr = brd->basemem;
+ u16 tmp;
+
+ writeb(HW_reset, baseAddr + Control_reg); /* reset */
+ msleep(10);
+ memset_io(baseAddr, 0, 4096);
+ memcpy_toio(baseAddr, buf, len); /* download BIOS */
+ writeb(0, baseAddr + Control_reg); /* restart */
+
+ msleep(2000);
+
+ switch (brd->boardType) {
+ case MOXA_BOARD_C218_ISA:
+ case MOXA_BOARD_C218_PCI:
+ tmp = readw(baseAddr + C218_key);
+ if (tmp != C218_KeyCode)
+ goto err;
+ break;
+ case MOXA_BOARD_CP204J:
+ tmp = readw(baseAddr + C218_key);
+ if (tmp != CP204J_KeyCode)
+ goto err;
+ break;
+ default:
+ tmp = readw(baseAddr + C320_key);
+ if (tmp != C320_KeyCode)
+ goto err;
+ tmp = readw(baseAddr + C320_status);
+ if (tmp != STS_init) {
+ printk(KERN_ERR "MOXA: bios upload failed -- CPU/Basic "
+ "module not found\n");
+ return -EIO;
+ }
+ break;
+ }
+
+ return 0;
+err:
+ printk(KERN_ERR "MOXA: bios upload failed -- board not found\n");
+ return -EIO;
+}
+
+static int moxa_load_320b(struct moxa_board_conf *brd, const u8 *ptr,
+ size_t len)
+{
+ void __iomem *baseAddr = brd->basemem;
+
+ if (len < 7168) {
+ printk(KERN_ERR "MOXA: invalid 320 bios -- too short\n");
+ return -EINVAL;
+ }
+
+ writew(len - 7168 - 2, baseAddr + C320bapi_len);
+ writeb(1, baseAddr + Control_reg); /* Select Page 1 */
+ memcpy_toio(baseAddr + DynPage_addr, ptr, 7168);
+ writeb(2, baseAddr + Control_reg); /* Select Page 2 */
+ memcpy_toio(baseAddr + DynPage_addr, ptr + 7168, len - 7168);
+
+ return 0;
+}
+
+static int moxa_real_load_code(struct moxa_board_conf *brd, const void *ptr,
+ size_t len)
+{
+ void __iomem *baseAddr = brd->basemem;
+ const __le16 *uptr = ptr;
+ size_t wlen, len2, j;
+ unsigned long key, loadbuf, loadlen, checksum, checksum_ok;
+ unsigned int i, retry;
+ u16 usum, keycode;
+
+ keycode = (brd->boardType == MOXA_BOARD_CP204J) ? CP204J_KeyCode :
+ C218_KeyCode;
+
+ switch (brd->boardType) {
+ case MOXA_BOARD_CP204J:
+ case MOXA_BOARD_C218_ISA:
+ case MOXA_BOARD_C218_PCI:
+ key = C218_key;
+ loadbuf = C218_LoadBuf;
+ loadlen = C218DLoad_len;
+ checksum = C218check_sum;
+ checksum_ok = C218chksum_ok;
+ break;
+ default:
+ key = C320_key;
+ keycode = C320_KeyCode;
+ loadbuf = C320_LoadBuf;
+ loadlen = C320DLoad_len;
+ checksum = C320check_sum;
+ checksum_ok = C320chksum_ok;
+ break;
+ }
+
+ usum = 0;
+ wlen = len >> 1;
+ for (i = 0; i < wlen; i++)
+ usum += le16_to_cpu(uptr[i]);
+ retry = 0;
+ do {
+ wlen = len >> 1;
+ j = 0;
+ while (wlen) {
+ len2 = (wlen > 2048) ? 2048 : wlen;
+ wlen -= len2;
+ memcpy_toio(baseAddr + loadbuf, ptr + j, len2 << 1);
+ j += len2 << 1;
+
+ writew(len2, baseAddr + loadlen);
+ writew(0, baseAddr + key);
+ for (i = 0; i < 100; i++) {
+ if (readw(baseAddr + key) == keycode)
+ break;
+ msleep(10);
+ }
+ if (readw(baseAddr + key) != keycode)
+ return -EIO;
+ }
+ writew(0, baseAddr + loadlen);
+ writew(usum, baseAddr + checksum);
+ writew(0, baseAddr + key);
+ for (i = 0; i < 100; i++) {
+ if (readw(baseAddr + key) == keycode)
+ break;
+ msleep(10);
+ }
+ retry++;
+ } while ((readb(baseAddr + checksum_ok) != 1) && (retry < 3));
+ if (readb(baseAddr + checksum_ok) != 1)
+ return -EIO;
+
+ writew(0, baseAddr + key);
+ for (i = 0; i < 600; i++) {
+ if (readw(baseAddr + Magic_no) == Magic_code)
+ break;
+ msleep(10);
+ }
+ if (readw(baseAddr + Magic_no) != Magic_code)
+ return -EIO;
+
+ if (MOXA_IS_320(brd)) {
+ if (brd->busType == MOXA_BUS_TYPE_PCI) { /* ASIC board */
+ writew(0x3800, baseAddr + TMS320_PORT1);
+ writew(0x3900, baseAddr + TMS320_PORT2);
+ writew(28499, baseAddr + TMS320_CLOCK);
+ } else {
+ writew(0x3200, baseAddr + TMS320_PORT1);
+ writew(0x3400, baseAddr + TMS320_PORT2);
+ writew(19999, baseAddr + TMS320_CLOCK);
+ }
+ }
+ writew(1, baseAddr + Disable_IRQ);
+ writew(0, baseAddr + Magic_no);
+ for (i = 0; i < 500; i++) {
+ if (readw(baseAddr + Magic_no) == Magic_code)
+ break;
+ msleep(10);
+ }
+ if (readw(baseAddr + Magic_no) != Magic_code)
+ return -EIO;
+
+ if (MOXA_IS_320(brd)) {
+ j = readw(baseAddr + Module_cnt);
+ if (j <= 0)
+ return -EIO;
+ brd->numPorts = j * 8;
+ writew(j, baseAddr + Module_no);
+ writew(0, baseAddr + Magic_no);
+ for (i = 0; i < 600; i++) {
+ if (readw(baseAddr + Magic_no) == Magic_code)
+ break;
+ msleep(10);
+ }
+ if (readw(baseAddr + Magic_no) != Magic_code)
+ return -EIO;
+ }
+ brd->intNdx = baseAddr + IRQindex;
+ brd->intPend = baseAddr + IRQpending;
+ brd->intTable = baseAddr + IRQtable;
+
+ return 0;
+}
+
+static int moxa_load_code(struct moxa_board_conf *brd, const void *ptr,
+ size_t len)
+{
+ void __iomem *ofsAddr, *baseAddr = brd->basemem;
+ struct moxa_port *port;
+ int retval, i;
+
+ if (len % 2) {
+ printk(KERN_ERR "MOXA: bios length is not even\n");
+ return -EINVAL;
+ }
+
+ retval = moxa_real_load_code(brd, ptr, len); /* may change numPorts */
+ if (retval)
+ return retval;
+
+ switch (brd->boardType) {
+ case MOXA_BOARD_C218_ISA:
+ case MOXA_BOARD_C218_PCI:
+ case MOXA_BOARD_CP204J:
+ port = brd->ports;
+ for (i = 0; i < brd->numPorts; i++, port++) {
+ port->board = brd;
+ port->DCDState = 0;
+ port->tableAddr = baseAddr + Extern_table +
+ Extern_size * i;
+ ofsAddr = port->tableAddr;
+ writew(C218rx_mask, ofsAddr + RX_mask);
+ writew(C218tx_mask, ofsAddr + TX_mask);
+ writew(C218rx_spage + i * C218buf_pageno, ofsAddr + Page_rxb);
+ writew(readw(ofsAddr + Page_rxb) + C218rx_pageno, ofsAddr + EndPage_rxb);
+
+ writew(C218tx_spage + i * C218buf_pageno, ofsAddr + Page_txb);
+ writew(readw(ofsAddr + Page_txb) + C218tx_pageno, ofsAddr + EndPage_txb);
+
+ }
+ break;
+ default:
+ port = brd->ports;
+ for (i = 0; i < brd->numPorts; i++, port++) {
+ port->board = brd;
+ port->DCDState = 0;
+ port->tableAddr = baseAddr + Extern_table +
+ Extern_size * i;
+ ofsAddr = port->tableAddr;
+ switch (brd->numPorts) {
+ case 8:
+ writew(C320p8rx_mask, ofsAddr + RX_mask);
+ writew(C320p8tx_mask, ofsAddr + TX_mask);
+ writew(C320p8rx_spage + i * C320p8buf_pgno, ofsAddr + Page_rxb);
+ writew(readw(ofsAddr + Page_rxb) + C320p8rx_pgno, ofsAddr + EndPage_rxb);
+ writew(C320p8tx_spage + i * C320p8buf_pgno, ofsAddr + Page_txb);
+ writew(readw(ofsAddr + Page_txb) + C320p8tx_pgno, ofsAddr + EndPage_txb);
+
+ break;
+ case 16:
+ writew(C320p16rx_mask, ofsAddr + RX_mask);
+ writew(C320p16tx_mask, ofsAddr + TX_mask);
+ writew(C320p16rx_spage + i * C320p16buf_pgno, ofsAddr + Page_rxb);
+ writew(readw(ofsAddr + Page_rxb) + C320p16rx_pgno, ofsAddr + EndPage_rxb);
+ writew(C320p16tx_spage + i * C320p16buf_pgno, ofsAddr + Page_txb);
+ writew(readw(ofsAddr + Page_txb) + C320p16tx_pgno, ofsAddr + EndPage_txb);
+ break;
+
+ case 24:
+ writew(C320p24rx_mask, ofsAddr + RX_mask);
+ writew(C320p24tx_mask, ofsAddr + TX_mask);
+ writew(C320p24rx_spage + i * C320p24buf_pgno, ofsAddr + Page_rxb);
+ writew(readw(ofsAddr + Page_rxb) + C320p24rx_pgno, ofsAddr + EndPage_rxb);
+ writew(C320p24tx_spage + i * C320p24buf_pgno, ofsAddr + Page_txb);
+ writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb);
+ break;
+ case 32:
+ writew(C320p32rx_mask, ofsAddr + RX_mask);
+ writew(C320p32tx_mask, ofsAddr + TX_mask);
+ writew(C320p32tx_ofs, ofsAddr + Ofs_txb);
+ writew(C320p32rx_spage + i * C320p32buf_pgno, ofsAddr + Page_rxb);
+ writew(readb(ofsAddr + Page_rxb), ofsAddr + EndPage_rxb);
+ writew(C320p32tx_spage + i * C320p32buf_pgno, ofsAddr + Page_txb);
+ writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb);
+ break;
+ }
+ }
+ break;
+ }
+ return 0;
+}
+
+static int moxa_load_fw(struct moxa_board_conf *brd, const struct firmware *fw)
+{
+ const void *ptr = fw->data;
+ char rsn[64];
+ u16 lens[5];
+ size_t len;
+ unsigned int a, lenp, lencnt;
+ int ret = -EINVAL;
+ struct {
+ __le32 magic; /* 0x34303430 */
+ u8 reserved1[2];
+ u8 type; /* UNIX = 3 */
+ u8 model; /* C218T=1, C320T=2, CP204=3 */
+ u8 reserved2[8];
+ __le16 len[5];
+ } const *hdr = ptr;
+
+ BUILD_BUG_ON(ARRAY_SIZE(hdr->len) != ARRAY_SIZE(lens));
+
+ if (fw->size < MOXA_FW_HDRLEN) {
+ strcpy(rsn, "too short (even header won't fit)");
+ goto err;
+ }
+ if (hdr->magic != cpu_to_le32(0x30343034)) {
+ sprintf(rsn, "bad magic: %.8x", le32_to_cpu(hdr->magic));
+ goto err;
+ }
+ if (hdr->type != 3) {
+ sprintf(rsn, "not for linux, type is %u", hdr->type);
+ goto err;
+ }
+ if (moxa_check_fw_model(brd, hdr->model)) {
+ sprintf(rsn, "not for this card, model is %u", hdr->model);
+ goto err;
+ }
+
+ len = MOXA_FW_HDRLEN;
+ lencnt = hdr->model == 2 ? 5 : 3;
+ for (a = 0; a < ARRAY_SIZE(lens); a++) {
+ lens[a] = le16_to_cpu(hdr->len[a]);
+ if (lens[a] && len + lens[a] <= fw->size &&
+ moxa_check_fw(&fw->data[len]))
+ printk(KERN_WARNING "MOXA firmware: unexpected input "
+ "at offset %u, but going on\n", (u32)len);
+ if (!lens[a] && a < lencnt) {
+ sprintf(rsn, "too few entries in fw file");
+ goto err;
+ }
+ len += lens[a];
+ }
+
+ if (len != fw->size) {
+ sprintf(rsn, "bad length: %u (should be %u)", (u32)fw->size,
+ (u32)len);
+ goto err;
+ }
+
+ ptr += MOXA_FW_HDRLEN;
+ lenp = 0; /* bios */
+
+ strcpy(rsn, "read above");
+
+ ret = moxa_load_bios(brd, ptr, lens[lenp]);
+ if (ret)
+ goto err;
+
+ /* we skip the tty section (lens[1]), since we don't need it */
+ ptr += lens[lenp] + lens[lenp + 1];
+ lenp += 2; /* comm */
+
+ if (hdr->model == 2) {
+ ret = moxa_load_320b(brd, ptr, lens[lenp]);
+ if (ret)
+ goto err;
+ /* skip another tty */
+ ptr += lens[lenp] + lens[lenp + 1];
+ lenp += 2;
+ }
+
+ ret = moxa_load_code(brd, ptr, lens[lenp]);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ printk(KERN_ERR "firmware failed to load, reason: %s\n", rsn);
+ return ret;
+}
+
+static int moxa_init_board(struct moxa_board_conf *brd, struct device *dev)
+{
+ const struct firmware *fw;
+ const char *file;
+ struct moxa_port *p;
+ unsigned int i;
+ int ret;
+
+ brd->ports = kcalloc(MAX_PORTS_PER_BOARD, sizeof(*brd->ports),
+ GFP_KERNEL);
+ if (brd->ports == NULL) {
+ printk(KERN_ERR "cannot allocate memory for ports\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0, p = brd->ports; i < MAX_PORTS_PER_BOARD; i++, p++) {
+ tty_port_init(&p->port);
+ p->port.ops = &moxa_port_ops;
+ p->type = PORT_16550A;
+ p->cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL;
+ }
+
+ switch (brd->boardType) {
+ case MOXA_BOARD_C218_ISA:
+ case MOXA_BOARD_C218_PCI:
+ file = "c218tunx.cod";
+ break;
+ case MOXA_BOARD_CP204J:
+ file = "cp204unx.cod";
+ break;
+ default:
+ file = "c320tunx.cod";
+ break;
+ }
+
+ ret = request_firmware(&fw, file, dev);
+ if (ret) {
+ printk(KERN_ERR "MOXA: request_firmware failed. Make sure "
+ "you've placed '%s' file into your firmware "
+ "loader directory (e.g. /lib/firmware)\n",
+ file);
+ goto err_free;
+ }
+
+ ret = moxa_load_fw(brd, fw);
+
+ release_firmware(fw);
+
+ if (ret)
+ goto err_free;
+
+ spin_lock_bh(&moxa_lock);
+ brd->ready = 1;
+ if (!timer_pending(&moxaTimer))
+ mod_timer(&moxaTimer, jiffies + HZ / 50);
+ spin_unlock_bh(&moxa_lock);
+
+ return 0;
+err_free:
+ kfree(brd->ports);
+err:
+ return ret;
+}
+
+static void moxa_board_deinit(struct moxa_board_conf *brd)
+{
+ unsigned int a, opened;
+
+ mutex_lock(&moxa_openlock);
+ spin_lock_bh(&moxa_lock);
+ brd->ready = 0;
+ spin_unlock_bh(&moxa_lock);
+
+ /* pci hot-un-plug support */
+ for (a = 0; a < brd->numPorts; a++)
+ if (brd->ports[a].port.flags & ASYNC_INITIALIZED) {
+ struct tty_struct *tty = tty_port_tty_get(
+ &brd->ports[a].port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ }
+ while (1) {
+ opened = 0;
+ for (a = 0; a < brd->numPorts; a++)
+ if (brd->ports[a].port.flags & ASYNC_INITIALIZED)
+ opened++;
+ mutex_unlock(&moxa_openlock);
+ if (!opened)
+ break;
+ msleep(50);
+ mutex_lock(&moxa_openlock);
+ }
+
+ iounmap(brd->basemem);
+ brd->basemem = NULL;
+ kfree(brd->ports);
+}
+
+#ifdef CONFIG_PCI
+static int __devinit moxa_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct moxa_board_conf *board;
+ unsigned int i;
+ int board_type = ent->driver_data;
+ int retval;
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "can't enable pci device\n");
+ goto err;
+ }
+
+ for (i = 0; i < MAX_BOARDS; i++)
+ if (moxa_boards[i].basemem == NULL)
+ break;
+
+ retval = -ENODEV;
+ if (i >= MAX_BOARDS) {
+ dev_warn(&pdev->dev, "more than %u MOXA Intellio family boards "
+ "found. Board is ignored.\n", MAX_BOARDS);
+ goto err;
+ }
+
+ board = &moxa_boards[i];
+
+ retval = pci_request_region(pdev, 2, "moxa-base");
+ if (retval) {
+ dev_err(&pdev->dev, "can't request pci region 2\n");
+ goto err;
+ }
+
+ board->basemem = ioremap_nocache(pci_resource_start(pdev, 2), 0x4000);
+ if (board->basemem == NULL) {
+ dev_err(&pdev->dev, "can't remap io space 2\n");
+ goto err_reg;
+ }
+
+ board->boardType = board_type;
+ switch (board_type) {
+ case MOXA_BOARD_C218_ISA:
+ case MOXA_BOARD_C218_PCI:
+ board->numPorts = 8;
+ break;
+
+ case MOXA_BOARD_CP204J:
+ board->numPorts = 4;
+ break;
+ default:
+ board->numPorts = 0;
+ break;
+ }
+ board->busType = MOXA_BUS_TYPE_PCI;
+
+ retval = moxa_init_board(board, &pdev->dev);
+ if (retval)
+ goto err_base;
+
+ pci_set_drvdata(pdev, board);
+
+ dev_info(&pdev->dev, "board '%s' ready (%u ports, firmware loaded)\n",
+ moxa_brdname[board_type - 1], board->numPorts);
+
+ return 0;
+err_base:
+ iounmap(board->basemem);
+ board->basemem = NULL;
+err_reg:
+ pci_release_region(pdev, 2);
+err:
+ return retval;
+}
+
+static void __devexit moxa_pci_remove(struct pci_dev *pdev)
+{
+ struct moxa_board_conf *brd = pci_get_drvdata(pdev);
+
+ moxa_board_deinit(brd);
+
+ pci_release_region(pdev, 2);
+}
+
+static struct pci_driver moxa_pci_driver = {
+ .name = "moxa",
+ .id_table = moxa_pcibrds,
+ .probe = moxa_pci_probe,
+ .remove = __devexit_p(moxa_pci_remove)
+};
+#endif /* CONFIG_PCI */
+
+static int __init moxa_init(void)
+{
+ unsigned int isabrds = 0;
+ int retval = 0;
+ struct moxa_board_conf *brd = moxa_boards;
+ unsigned int i;
+
+ printk(KERN_INFO "MOXA Intellio family driver version %s\n",
+ MOXA_VERSION);
+ moxaDriver = alloc_tty_driver(MAX_PORTS + 1);
+ if (!moxaDriver)
+ return -ENOMEM;
+
+ moxaDriver->owner = THIS_MODULE;
+ moxaDriver->name = "ttyMX";
+ moxaDriver->major = ttymajor;
+ moxaDriver->minor_start = 0;
+ moxaDriver->type = TTY_DRIVER_TYPE_SERIAL;
+ moxaDriver->subtype = SERIAL_TYPE_NORMAL;
+ moxaDriver->init_termios = tty_std_termios;
+ moxaDriver->init_termios.c_cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL;
+ moxaDriver->init_termios.c_ispeed = 9600;
+ moxaDriver->init_termios.c_ospeed = 9600;
+ moxaDriver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(moxaDriver, &moxa_ops);
+
+ if (tty_register_driver(moxaDriver)) {
+ printk(KERN_ERR "can't register MOXA Smartio tty driver!\n");
+ put_tty_driver(moxaDriver);
+ return -1;
+ }
+
+ /* Find the boards defined from module args. */
+
+ for (i = 0; i < MAX_BOARDS; i++) {
+ if (!baseaddr[i])
+ break;
+ if (type[i] == MOXA_BOARD_C218_ISA ||
+ type[i] == MOXA_BOARD_C320_ISA) {
+ pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n",
+ isabrds + 1, moxa_brdname[type[i] - 1],
+ baseaddr[i]);
+ brd->boardType = type[i];
+ brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 :
+ numports[i];
+ brd->busType = MOXA_BUS_TYPE_ISA;
+ brd->basemem = ioremap_nocache(baseaddr[i], 0x4000);
+ if (!brd->basemem) {
+ printk(KERN_ERR "MOXA: can't remap %lx\n",
+ baseaddr[i]);
+ continue;
+ }
+ if (moxa_init_board(brd, NULL)) {
+ iounmap(brd->basemem);
+ brd->basemem = NULL;
+ continue;
+ }
+
+ printk(KERN_INFO "MOXA isa board found at 0x%.8lu and "
+ "ready (%u ports, firmware loaded)\n",
+ baseaddr[i], brd->numPorts);
+
+ brd++;
+ isabrds++;
+ }
+ }
+
+#ifdef CONFIG_PCI
+ retval = pci_register_driver(&moxa_pci_driver);
+ if (retval) {
+ printk(KERN_ERR "Can't register MOXA pci driver!\n");
+ if (isabrds)
+ retval = 0;
+ }
+#endif
+
+ return retval;
+}
+
+static void __exit moxa_exit(void)
+{
+ unsigned int i;
+
+#ifdef CONFIG_PCI
+ pci_unregister_driver(&moxa_pci_driver);
+#endif
+
+ for (i = 0; i < MAX_BOARDS; i++) /* ISA boards */
+ if (moxa_boards[i].ready)
+ moxa_board_deinit(&moxa_boards[i]);
+
+ del_timer_sync(&moxaTimer);
+
+ if (tty_unregister_driver(moxaDriver))
+ printk(KERN_ERR "Couldn't unregister MOXA Intellio family "
+ "serial driver\n");
+ put_tty_driver(moxaDriver);
+}
+
+module_init(moxa_init);
+module_exit(moxa_exit);
+
+static void moxa_shutdown(struct tty_port *port)
+{
+ struct moxa_port *ch = container_of(port, struct moxa_port, port);
+ MoxaPortDisable(ch);
+ MoxaPortFlushData(ch, 2);
+}
+
+static int moxa_carrier_raised(struct tty_port *port)
+{
+ struct moxa_port *ch = container_of(port, struct moxa_port, port);
+ int dcd;
+
+ spin_lock_irq(&port->lock);
+ dcd = ch->DCDState;
+ spin_unlock_irq(&port->lock);
+ return dcd;
+}
+
+static void moxa_dtr_rts(struct tty_port *port, int onoff)
+{
+ struct moxa_port *ch = container_of(port, struct moxa_port, port);
+ MoxaPortLineCtrl(ch, onoff, onoff);
+}
+
+
+static int moxa_open(struct tty_struct *tty, struct file *filp)
+{
+ struct moxa_board_conf *brd;
+ struct moxa_port *ch;
+ int port;
+
+ port = tty->index;
+ if (port == MAX_PORTS) {
+ return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
+ }
+ if (mutex_lock_interruptible(&moxa_openlock))
+ return -ERESTARTSYS;
+ brd = &moxa_boards[port / MAX_PORTS_PER_BOARD];
+ if (!brd->ready) {
+ mutex_unlock(&moxa_openlock);
+ return -ENODEV;
+ }
+
+ if (port % MAX_PORTS_PER_BOARD >= brd->numPorts) {
+ mutex_unlock(&moxa_openlock);
+ return -ENODEV;
+ }
+
+ ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
+ ch->port.count++;
+ tty->driver_data = ch;
+ tty_port_tty_set(&ch->port, tty);
+ mutex_lock(&ch->port.mutex);
+ if (!(ch->port.flags & ASYNC_INITIALIZED)) {
+ ch->statusflags = 0;
+ moxa_set_tty_param(tty, tty->termios);
+ MoxaPortLineCtrl(ch, 1, 1);
+ MoxaPortEnable(ch);
+ MoxaSetFifo(ch, ch->type == PORT_16550A);
+ ch->port.flags |= ASYNC_INITIALIZED;
+ }
+ mutex_unlock(&ch->port.mutex);
+ mutex_unlock(&moxa_openlock);
+
+ return tty_port_block_til_ready(&ch->port, tty, filp);
+}
+
+static void moxa_close(struct tty_struct *tty, struct file *filp)
+{
+ struct moxa_port *ch = tty->driver_data;
+ ch->cflag = tty->termios->c_cflag;
+ tty_port_close(&ch->port, tty, filp);
+}
+
+static int moxa_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ struct moxa_port *ch = tty->driver_data;
+ unsigned long flags;
+ int len;
+
+ if (ch == NULL)
+ return 0;
+
+ spin_lock_irqsave(&moxa_lock, flags);
+ len = MoxaPortWriteData(tty, buf, count);
+ spin_unlock_irqrestore(&moxa_lock, flags);
+
+ set_bit(LOWWAIT, &ch->statusflags);
+ return len;
+}
+
+static int moxa_write_room(struct tty_struct *tty)
+{
+ struct moxa_port *ch;
+
+ if (tty->stopped)
+ return 0;
+ ch = tty->driver_data;
+ if (ch == NULL)
+ return 0;
+ return MoxaPortTxFree(ch);
+}
+
+static void moxa_flush_buffer(struct tty_struct *tty)
+{
+ struct moxa_port *ch = tty->driver_data;
+
+ if (ch == NULL)
+ return;
+ MoxaPortFlushData(ch, 1);
+ tty_wakeup(tty);
+}
+
+static int moxa_chars_in_buffer(struct tty_struct *tty)
+{
+ struct moxa_port *ch = tty->driver_data;
+ int chars;
+
+ chars = MoxaPortTxQueue(ch);
+ if (chars)
+ /*
+ * Make it possible to wakeup anything waiting for output
+ * in tty_ioctl.c, etc.
+ */
+ set_bit(EMPTYWAIT, &ch->statusflags);
+ return chars;
+}
+
+static int moxa_tiocmget(struct tty_struct *tty)
+{
+ struct moxa_port *ch = tty->driver_data;
+ int flag = 0, dtr, rts;
+
+ MoxaPortGetLineOut(ch, &dtr, &rts);
+ if (dtr)
+ flag |= TIOCM_DTR;
+ if (rts)
+ flag |= TIOCM_RTS;
+ dtr = MoxaPortLineStatus(ch);
+ if (dtr & 1)
+ flag |= TIOCM_CTS;
+ if (dtr & 2)
+ flag |= TIOCM_DSR;
+ if (dtr & 4)
+ flag |= TIOCM_CD;
+ return flag;
+}
+
+static int moxa_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct moxa_port *ch;
+ int dtr, rts;
+
+ mutex_lock(&moxa_openlock);
+ ch = tty->driver_data;
+ if (!ch) {
+ mutex_unlock(&moxa_openlock);
+ return -EINVAL;
+ }
+
+ MoxaPortGetLineOut(ch, &dtr, &rts);
+ if (set & TIOCM_RTS)
+ rts = 1;
+ if (set & TIOCM_DTR)
+ dtr = 1;
+ if (clear & TIOCM_RTS)
+ rts = 0;
+ if (clear & TIOCM_DTR)
+ dtr = 0;
+ MoxaPortLineCtrl(ch, dtr, rts);
+ mutex_unlock(&moxa_openlock);
+ return 0;
+}
+
+static void moxa_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios)
+{
+ struct moxa_port *ch = tty->driver_data;
+
+ if (ch == NULL)
+ return;
+ moxa_set_tty_param(tty, old_termios);
+ if (!(old_termios->c_cflag & CLOCAL) && C_CLOCAL(tty))
+ wake_up_interruptible(&ch->port.open_wait);
+}
+
+static void moxa_stop(struct tty_struct *tty)
+{
+ struct moxa_port *ch = tty->driver_data;
+
+ if (ch == NULL)
+ return;
+ MoxaPortTxDisable(ch);
+ set_bit(TXSTOPPED, &ch->statusflags);
+}
+
+
+static void moxa_start(struct tty_struct *tty)
+{
+ struct moxa_port *ch = tty->driver_data;
+
+ if (ch == NULL)
+ return;
+
+ if (!test_bit(TXSTOPPED, &ch->statusflags))
+ return;
+
+ MoxaPortTxEnable(ch);
+ clear_bit(TXSTOPPED, &ch->statusflags);
+}
+
+static void moxa_hangup(struct tty_struct *tty)
+{
+ struct moxa_port *ch = tty->driver_data;
+ tty_port_hangup(&ch->port);
+}
+
+static void moxa_new_dcdstate(struct moxa_port *p, u8 dcd)
+{
+ struct tty_struct *tty;
+ unsigned long flags;
+ dcd = !!dcd;
+
+ spin_lock_irqsave(&p->port.lock, flags);
+ if (dcd != p->DCDState) {
+ p->DCDState = dcd;
+ spin_unlock_irqrestore(&p->port.lock, flags);
+ tty = tty_port_tty_get(&p->port);
+ if (tty && C_CLOCAL(tty) && !dcd)
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ else
+ spin_unlock_irqrestore(&p->port.lock, flags);
+}
+
+static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
+ u16 __iomem *ip)
+{
+ struct tty_struct *tty = tty_port_tty_get(&p->port);
+ void __iomem *ofsAddr;
+ unsigned int inited = p->port.flags & ASYNC_INITIALIZED;
+ u16 intr;
+
+ if (tty) {
+ if (test_bit(EMPTYWAIT, &p->statusflags) &&
+ MoxaPortTxQueue(p) == 0) {
+ clear_bit(EMPTYWAIT, &p->statusflags);
+ tty_wakeup(tty);
+ }
+ if (test_bit(LOWWAIT, &p->statusflags) && !tty->stopped &&
+ MoxaPortTxQueue(p) <= WAKEUP_CHARS) {
+ clear_bit(LOWWAIT, &p->statusflags);
+ tty_wakeup(tty);
+ }
+
+ if (inited && !test_bit(TTY_THROTTLED, &tty->flags) &&
+ MoxaPortRxQueue(p) > 0) { /* RX */
+ MoxaPortReadData(p);
+ tty_schedule_flip(tty);
+ }
+ } else {
+ clear_bit(EMPTYWAIT, &p->statusflags);
+ MoxaPortFlushData(p, 0); /* flush RX */
+ }
+
+ if (!handle) /* nothing else to do */
+ goto put;
+
+ intr = readw(ip); /* port irq status */
+ if (intr == 0)
+ goto put;
+
+ writew(0, ip); /* ACK port */
+ ofsAddr = p->tableAddr;
+ if (intr & IntrTx) /* disable tx intr */
+ writew(readw(ofsAddr + HostStat) & ~WakeupTx,
+ ofsAddr + HostStat);
+
+ if (!inited)
+ goto put;
+
+ if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
+ tty_schedule_flip(tty);
+ }
+
+ if (intr & IntrLine)
+ moxa_new_dcdstate(p, readb(ofsAddr + FlagStat) & DCD_state);
+put:
+ tty_kref_put(tty);
+
+ return 0;
+}
+
+static void moxa_poll(unsigned long ignored)
+{
+ struct moxa_board_conf *brd;
+ u16 __iomem *ip;
+ unsigned int card, port, served = 0;
+
+ spin_lock(&moxa_lock);
+ for (card = 0; card < MAX_BOARDS; card++) {
+ brd = &moxa_boards[card];
+ if (!brd->ready)
+ continue;
+
+ served++;
+
+ ip = NULL;
+ if (readb(brd->intPend) == 0xff)
+ ip = brd->intTable + readb(brd->intNdx);
+
+ for (port = 0; port < brd->numPorts; port++)
+ moxa_poll_port(&brd->ports[port], !!ip, ip + port);
+
+ if (ip)
+ writeb(0, brd->intPend); /* ACK */
+
+ if (moxaLowWaterChk) {
+ struct moxa_port *p = brd->ports;
+ for (port = 0; port < brd->numPorts; port++, p++)
+ if (p->lowChkFlag) {
+ p->lowChkFlag = 0;
+ moxa_low_water_check(p->tableAddr);
+ }
+ }
+ }
+ moxaLowWaterChk = 0;
+
+ if (served)
+ mod_timer(&moxaTimer, jiffies + HZ / 50);
+ spin_unlock(&moxa_lock);
+}
+
+/******************************************************************************/
+
+static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ register struct ktermios *ts = tty->termios;
+ struct moxa_port *ch = tty->driver_data;
+ int rts, cts, txflow, rxflow, xany, baud;
+
+ rts = cts = txflow = rxflow = xany = 0;
+ if (ts->c_cflag & CRTSCTS)
+ rts = cts = 1;
+ if (ts->c_iflag & IXON)
+ txflow = 1;
+ if (ts->c_iflag & IXOFF)
+ rxflow = 1;
+ if (ts->c_iflag & IXANY)
+ xany = 1;
+
+ /* Clear the features we don't support */
+ ts->c_cflag &= ~CMSPAR;
+ MoxaPortFlowCtrl(ch, rts, cts, txflow, rxflow, xany);
+ baud = MoxaPortSetTermio(ch, ts, tty_get_baud_rate(tty));
+ if (baud == -1)
+ baud = tty_termios_baud_rate(old_termios);
+ /* Not put the baud rate into the termios data */
+ tty_encode_baud_rate(tty, baud, baud);
+}
+
+/*****************************************************************************
+ * Driver level functions: *
+ *****************************************************************************/
+
+static void MoxaPortFlushData(struct moxa_port *port, int mode)
+{
+ void __iomem *ofsAddr;
+ if (mode < 0 || mode > 2)
+ return;
+ ofsAddr = port->tableAddr;
+ moxafunc(ofsAddr, FC_FlushQueue, mode);
+ if (mode != 1) {
+ port->lowChkFlag = 0;
+ moxa_low_water_check(ofsAddr);
+ }
+}
+
+/*
+ * Moxa Port Number Description:
+ *
+ * MOXA serial driver supports up to 4 MOXA-C218/C320 boards. And,
+ * the port number using in MOXA driver functions will be 0 to 31 for
+ * first MOXA board, 32 to 63 for second, 64 to 95 for third and 96
+ * to 127 for fourth. For example, if you setup three MOXA boards,
+ * first board is C218, second board is C320-16 and third board is
+ * C320-32. The port number of first board (C218 - 8 ports) is from
+ * 0 to 7. The port number of second board (C320 - 16 ports) is form
+ * 32 to 47. The port number of third board (C320 - 32 ports) is from
+ * 64 to 95. And those port numbers form 8 to 31, 48 to 63 and 96 to
+ * 127 will be invalid.
+ *
+ *
+ * Moxa Functions Description:
+ *
+ * Function 1: Driver initialization routine, this routine must be
+ * called when initialized driver.
+ * Syntax:
+ * void MoxaDriverInit();
+ *
+ *
+ * Function 2: Moxa driver private IOCTL command processing.
+ * Syntax:
+ * int MoxaDriverIoctl(unsigned int cmd, unsigned long arg, int port);
+ *
+ * unsigned int cmd : IOCTL command
+ * unsigned long arg : IOCTL argument
+ * int port : port number (0 - 127)
+ *
+ * return: 0 (OK)
+ * -EINVAL
+ * -ENOIOCTLCMD
+ *
+ *
+ * Function 6: Enable this port to start Tx/Rx data.
+ * Syntax:
+ * void MoxaPortEnable(int port);
+ * int port : port number (0 - 127)
+ *
+ *
+ * Function 7: Disable this port
+ * Syntax:
+ * void MoxaPortDisable(int port);
+ * int port : port number (0 - 127)
+ *
+ *
+ * Function 10: Setting baud rate of this port.
+ * Syntax:
+ * speed_t MoxaPortSetBaud(int port, speed_t baud);
+ * int port : port number (0 - 127)
+ * long baud : baud rate (50 - 115200)
+ *
+ * return: 0 : this port is invalid or baud < 50
+ * 50 - 115200 : the real baud rate set to the port, if
+ * the argument baud is large than maximun
+ * available baud rate, the real setting
+ * baud rate will be the maximun baud rate.
+ *
+ *
+ * Function 12: Configure the port.
+ * Syntax:
+ * int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud);
+ * int port : port number (0 - 127)
+ * struct ktermios * termio : termio structure pointer
+ * speed_t baud : baud rate
+ *
+ * return: -1 : this port is invalid or termio == NULL
+ * 0 : setting O.K.
+ *
+ *
+ * Function 13: Get the DTR/RTS state of this port.
+ * Syntax:
+ * int MoxaPortGetLineOut(int port, int *dtrState, int *rtsState);
+ * int port : port number (0 - 127)
+ * int * dtrState : pointer to INT to receive the current DTR
+ * state. (if NULL, this function will not
+ * write to this address)
+ * int * rtsState : pointer to INT to receive the current RTS
+ * state. (if NULL, this function will not
+ * write to this address)
+ *
+ * return: -1 : this port is invalid
+ * 0 : O.K.
+ *
+ *
+ * Function 14: Setting the DTR/RTS output state of this port.
+ * Syntax:
+ * void MoxaPortLineCtrl(int port, int dtrState, int rtsState);
+ * int port : port number (0 - 127)
+ * int dtrState : DTR output state (0: off, 1: on)
+ * int rtsState : RTS output state (0: off, 1: on)
+ *
+ *
+ * Function 15: Setting the flow control of this port.
+ * Syntax:
+ * void MoxaPortFlowCtrl(int port, int rtsFlow, int ctsFlow, int rxFlow,
+ * int txFlow,int xany);
+ * int port : port number (0 - 127)
+ * int rtsFlow : H/W RTS flow control (0: no, 1: yes)
+ * int ctsFlow : H/W CTS flow control (0: no, 1: yes)
+ * int rxFlow : S/W Rx XON/XOFF flow control (0: no, 1: yes)
+ * int txFlow : S/W Tx XON/XOFF flow control (0: no, 1: yes)
+ * int xany : S/W XANY flow control (0: no, 1: yes)
+ *
+ *
+ * Function 16: Get ths line status of this port
+ * Syntax:
+ * int MoxaPortLineStatus(int port);
+ * int port : port number (0 - 127)
+ *
+ * return: Bit 0 - CTS state (0: off, 1: on)
+ * Bit 1 - DSR state (0: off, 1: on)
+ * Bit 2 - DCD state (0: off, 1: on)
+ *
+ *
+ * Function 19: Flush the Rx/Tx buffer data of this port.
+ * Syntax:
+ * void MoxaPortFlushData(int port, int mode);
+ * int port : port number (0 - 127)
+ * int mode
+ * 0 : flush the Rx buffer
+ * 1 : flush the Tx buffer
+ * 2 : flush the Rx and Tx buffer
+ *
+ *
+ * Function 20: Write data.
+ * Syntax:
+ * int MoxaPortWriteData(int port, unsigned char * buffer, int length);
+ * int port : port number (0 - 127)
+ * unsigned char * buffer : pointer to write data buffer.
+ * int length : write data length
+ *
+ * return: 0 - length : real write data length
+ *
+ *
+ * Function 21: Read data.
+ * Syntax:
+ * int MoxaPortReadData(int port, struct tty_struct *tty);
+ * int port : port number (0 - 127)
+ * struct tty_struct *tty : tty for data
+ *
+ * return: 0 - length : real read data length
+ *
+ *
+ * Function 24: Get the Tx buffer current queued data bytes
+ * Syntax:
+ * int MoxaPortTxQueue(int port);
+ * int port : port number (0 - 127)
+ *
+ * return: .. : Tx buffer current queued data bytes
+ *
+ *
+ * Function 25: Get the Tx buffer current free space
+ * Syntax:
+ * int MoxaPortTxFree(int port);
+ * int port : port number (0 - 127)
+ *
+ * return: .. : Tx buffer current free space
+ *
+ *
+ * Function 26: Get the Rx buffer current queued data bytes
+ * Syntax:
+ * int MoxaPortRxQueue(int port);
+ * int port : port number (0 - 127)
+ *
+ * return: .. : Rx buffer current queued data bytes
+ *
+ *
+ * Function 28: Disable port data transmission.
+ * Syntax:
+ * void MoxaPortTxDisable(int port);
+ * int port : port number (0 - 127)
+ *
+ *
+ * Function 29: Enable port data transmission.
+ * Syntax:
+ * void MoxaPortTxEnable(int port);
+ * int port : port number (0 - 127)
+ *
+ *
+ * Function 31: Get the received BREAK signal count and reset it.
+ * Syntax:
+ * int MoxaPortResetBrkCnt(int port);
+ * int port : port number (0 - 127)
+ *
+ * return: 0 - .. : BREAK signal count
+ *
+ *
+ */
+
+static void MoxaPortEnable(struct moxa_port *port)
+{
+ void __iomem *ofsAddr;
+ u16 lowwater = 512;
+
+ ofsAddr = port->tableAddr;
+ writew(lowwater, ofsAddr + Low_water);
+ if (MOXA_IS_320(port->board))
+ moxafunc(ofsAddr, FC_SetBreakIrq, 0);
+ else
+ writew(readw(ofsAddr + HostStat) | WakeupBreak,
+ ofsAddr + HostStat);
+
+ moxafunc(ofsAddr, FC_SetLineIrq, Magic_code);
+ moxafunc(ofsAddr, FC_FlushQueue, 2);
+
+ moxafunc(ofsAddr, FC_EnableCH, Magic_code);
+ MoxaPortLineStatus(port);
+}
+
+static void MoxaPortDisable(struct moxa_port *port)
+{
+ void __iomem *ofsAddr = port->tableAddr;
+
+ moxafunc(ofsAddr, FC_SetFlowCtl, 0); /* disable flow control */
+ moxafunc(ofsAddr, FC_ClrLineIrq, Magic_code);
+ writew(0, ofsAddr + HostStat);
+ moxafunc(ofsAddr, FC_DisableCH, Magic_code);
+}
+
+static speed_t MoxaPortSetBaud(struct moxa_port *port, speed_t baud)
+{
+ void __iomem *ofsAddr = port->tableAddr;
+ unsigned int clock, val;
+ speed_t max;
+
+ max = MOXA_IS_320(port->board) ? 460800 : 921600;
+ if (baud < 50)
+ return 0;
+ if (baud > max)
+ baud = max;
+ clock = 921600;
+ val = clock / baud;
+ moxafunc(ofsAddr, FC_SetBaud, val);
+ baud = clock / val;
+ return baud;
+}
+
+static int MoxaPortSetTermio(struct moxa_port *port, struct ktermios *termio,
+ speed_t baud)
+{
+ void __iomem *ofsAddr;
+ tcflag_t mode = 0;
+
+ ofsAddr = port->tableAddr;
+
+ mode = termio->c_cflag & CSIZE;
+ if (mode == CS5)
+ mode = MX_CS5;
+ else if (mode == CS6)
+ mode = MX_CS6;
+ else if (mode == CS7)
+ mode = MX_CS7;
+ else if (mode == CS8)
+ mode = MX_CS8;
+
+ if (termio->c_cflag & CSTOPB) {
+ if (mode == MX_CS5)
+ mode |= MX_STOP15;
+ else
+ mode |= MX_STOP2;
+ } else
+ mode |= MX_STOP1;
+
+ if (termio->c_cflag & PARENB) {
+ if (termio->c_cflag & PARODD)
+ mode |= MX_PARODD;
+ else
+ mode |= MX_PAREVEN;
+ } else
+ mode |= MX_PARNONE;
+
+ moxafunc(ofsAddr, FC_SetDataMode, (u16)mode);
+
+ if (MOXA_IS_320(port->board) && baud >= 921600)
+ return -1;
+
+ baud = MoxaPortSetBaud(port, baud);
+
+ if (termio->c_iflag & (IXON | IXOFF | IXANY)) {
+ spin_lock_irq(&moxafunc_lock);
+ writeb(termio->c_cc[VSTART], ofsAddr + FuncArg);
+ writeb(termio->c_cc[VSTOP], ofsAddr + FuncArg1);
+ writeb(FC_SetXonXoff, ofsAddr + FuncCode);
+ moxa_wait_finish(ofsAddr);
+ spin_unlock_irq(&moxafunc_lock);
+
+ }
+ return baud;
+}
+
+static int MoxaPortGetLineOut(struct moxa_port *port, int *dtrState,
+ int *rtsState)
+{
+ if (dtrState)
+ *dtrState = !!(port->lineCtrl & DTR_ON);
+ if (rtsState)
+ *rtsState = !!(port->lineCtrl & RTS_ON);
+
+ return 0;
+}
+
+static void MoxaPortLineCtrl(struct moxa_port *port, int dtr, int rts)
+{
+ u8 mode = 0;
+
+ if (dtr)
+ mode |= DTR_ON;
+ if (rts)
+ mode |= RTS_ON;
+ port->lineCtrl = mode;
+ moxafunc(port->tableAddr, FC_LineControl, mode);
+}
+
+static void MoxaPortFlowCtrl(struct moxa_port *port, int rts, int cts,
+ int txflow, int rxflow, int txany)
+{
+ int mode = 0;
+
+ if (rts)
+ mode |= RTS_FlowCtl;
+ if (cts)
+ mode |= CTS_FlowCtl;
+ if (txflow)
+ mode |= Tx_FlowCtl;
+ if (rxflow)
+ mode |= Rx_FlowCtl;
+ if (txany)
+ mode |= IXM_IXANY;
+ moxafunc(port->tableAddr, FC_SetFlowCtl, mode);
+}
+
+static int MoxaPortLineStatus(struct moxa_port *port)
+{
+ void __iomem *ofsAddr;
+ int val;
+
+ ofsAddr = port->tableAddr;
+ if (MOXA_IS_320(port->board))
+ val = moxafuncret(ofsAddr, FC_LineStatus, 0);
+ else
+ val = readw(ofsAddr + FlagStat) >> 4;
+ val &= 0x0B;
+ if (val & 8)
+ val |= 4;
+ moxa_new_dcdstate(port, val & 8);
+ val &= 7;
+ return val;
+}
+
+static int MoxaPortWriteData(struct tty_struct *tty,
+ const unsigned char *buffer, int len)
+{
+ struct moxa_port *port = tty->driver_data;
+ void __iomem *baseAddr, *ofsAddr, *ofs;
+ unsigned int c, total;
+ u16 head, tail, tx_mask, spage, epage;
+ u16 pageno, pageofs, bufhead;
+
+ ofsAddr = port->tableAddr;
+ baseAddr = port->board->basemem;
+ tx_mask = readw(ofsAddr + TX_mask);
+ spage = readw(ofsAddr + Page_txb);
+ epage = readw(ofsAddr + EndPage_txb);
+ tail = readw(ofsAddr + TXwptr);
+ head = readw(ofsAddr + TXrptr);
+ c = (head > tail) ? (head - tail - 1) : (head - tail + tx_mask);
+ if (c > len)
+ c = len;
+ moxaLog.txcnt[port->port.tty->index] += c;
+ total = c;
+ if (spage == epage) {
+ bufhead = readw(ofsAddr + Ofs_txb);
+ writew(spage, baseAddr + Control_reg);
+ while (c > 0) {
+ if (head > tail)
+ len = head - tail - 1;
+ else
+ len = tx_mask + 1 - tail;
+ len = (c > len) ? len : c;
+ ofs = baseAddr + DynPage_addr + bufhead + tail;
+ memcpy_toio(ofs, buffer, len);
+ buffer += len;
+ tail = (tail + len) & tx_mask;
+ c -= len;
+ }
+ } else {
+ pageno = spage + (tail >> 13);
+ pageofs = tail & Page_mask;
+ while (c > 0) {
+ len = Page_size - pageofs;
+ if (len > c)
+ len = c;
+ writeb(pageno, baseAddr + Control_reg);
+ ofs = baseAddr + DynPage_addr + pageofs;
+ memcpy_toio(ofs, buffer, len);
+ buffer += len;
+ if (++pageno == epage)
+ pageno = spage;
+ pageofs = 0;
+ c -= len;
+ }
+ tail = (tail + total) & tx_mask;
+ }
+ writew(tail, ofsAddr + TXwptr);
+ writeb(1, ofsAddr + CD180TXirq); /* start to send */
+ return total;
+}
+
+static int MoxaPortReadData(struct moxa_port *port)
+{
+ struct tty_struct *tty = port->port.tty;
+ unsigned char *dst;
+ void __iomem *baseAddr, *ofsAddr, *ofs;
+ unsigned int count, len, total;
+ u16 tail, rx_mask, spage, epage;
+ u16 pageno, pageofs, bufhead, head;
+
+ ofsAddr = port->tableAddr;
+ baseAddr = port->board->basemem;
+ head = readw(ofsAddr + RXrptr);
+ tail = readw(ofsAddr + RXwptr);
+ rx_mask = readw(ofsAddr + RX_mask);
+ spage = readw(ofsAddr + Page_rxb);
+ epage = readw(ofsAddr + EndPage_rxb);
+ count = (tail >= head) ? (tail - head) : (tail - head + rx_mask + 1);
+ if (count == 0)
+ return 0;
+
+ total = count;
+ moxaLog.rxcnt[tty->index] += total;
+ if (spage == epage) {
+ bufhead = readw(ofsAddr + Ofs_rxb);
+ writew(spage, baseAddr + Control_reg);
+ while (count > 0) {
+ ofs = baseAddr + DynPage_addr + bufhead + head;
+ len = (tail >= head) ? (tail - head) :
+ (rx_mask + 1 - head);
+ len = tty_prepare_flip_string(tty, &dst,
+ min(len, count));
+ memcpy_fromio(dst, ofs, len);
+ head = (head + len) & rx_mask;
+ count -= len;
+ }
+ } else {
+ pageno = spage + (head >> 13);
+ pageofs = head & Page_mask;
+ while (count > 0) {
+ writew(pageno, baseAddr + Control_reg);
+ ofs = baseAddr + DynPage_addr + pageofs;
+ len = tty_prepare_flip_string(tty, &dst,
+ min(Page_size - pageofs, count));
+ memcpy_fromio(dst, ofs, len);
+
+ count -= len;
+ pageofs = (pageofs + len) & Page_mask;
+ if (pageofs == 0 && ++pageno == epage)
+ pageno = spage;
+ }
+ head = (head + total) & rx_mask;
+ }
+ writew(head, ofsAddr + RXrptr);
+ if (readb(ofsAddr + FlagStat) & Xoff_state) {
+ moxaLowWaterChk = 1;
+ port->lowChkFlag = 1;
+ }
+ return total;
+}
+
+
+static int MoxaPortTxQueue(struct moxa_port *port)
+{
+ void __iomem *ofsAddr = port->tableAddr;
+ u16 rptr, wptr, mask;
+
+ rptr = readw(ofsAddr + TXrptr);
+ wptr = readw(ofsAddr + TXwptr);
+ mask = readw(ofsAddr + TX_mask);
+ return (wptr - rptr) & mask;
+}
+
+static int MoxaPortTxFree(struct moxa_port *port)
+{
+ void __iomem *ofsAddr = port->tableAddr;
+ u16 rptr, wptr, mask;
+
+ rptr = readw(ofsAddr + TXrptr);
+ wptr = readw(ofsAddr + TXwptr);
+ mask = readw(ofsAddr + TX_mask);
+ return mask - ((wptr - rptr) & mask);
+}
+
+static int MoxaPortRxQueue(struct moxa_port *port)
+{
+ void __iomem *ofsAddr = port->tableAddr;
+ u16 rptr, wptr, mask;
+
+ rptr = readw(ofsAddr + RXrptr);
+ wptr = readw(ofsAddr + RXwptr);
+ mask = readw(ofsAddr + RX_mask);
+ return (wptr - rptr) & mask;
+}
+
+static void MoxaPortTxDisable(struct moxa_port *port)
+{
+ moxafunc(port->tableAddr, FC_SetXoffState, Magic_code);
+}
+
+static void MoxaPortTxEnable(struct moxa_port *port)
+{
+ moxafunc(port->tableAddr, FC_SetXonState, Magic_code);
+}
+
+static int moxa_get_serial_info(struct moxa_port *info,
+ struct serial_struct __user *retinfo)
+{
+ struct serial_struct tmp = {
+ .type = info->type,
+ .line = info->port.tty->index,
+ .flags = info->port.flags,
+ .baud_base = 921600,
+ .close_delay = info->port.close_delay
+ };
+ return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
+}
+
+
+static int moxa_set_serial_info(struct moxa_port *info,
+ struct serial_struct __user *new_info)
+{
+ struct serial_struct new_serial;
+
+ if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
+ return -EFAULT;
+
+ if (new_serial.irq != 0 || new_serial.port != 0 ||
+ new_serial.custom_divisor != 0 ||
+ new_serial.baud_base != 921600)
+ return -EPERM;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ if (((new_serial.flags & ~ASYNC_USR_MASK) !=
+ (info->port.flags & ~ASYNC_USR_MASK)))
+ return -EPERM;
+ } else
+ info->port.close_delay = new_serial.close_delay * HZ / 100;
+
+ new_serial.flags = (new_serial.flags & ~ASYNC_FLAGS);
+ new_serial.flags |= (info->port.flags & ASYNC_FLAGS);
+
+ MoxaSetFifo(info, new_serial.type == PORT_16550A);
+
+ info->type = new_serial.type;
+ return 0;
+}
+
+
+
+/*****************************************************************************
+ * Static local functions: *
+ *****************************************************************************/
+
+static void MoxaSetFifo(struct moxa_port *port, int enable)
+{
+ void __iomem *ofsAddr = port->tableAddr;
+
+ if (!enable) {
+ moxafunc(ofsAddr, FC_SetRxFIFOTrig, 0);
+ moxafunc(ofsAddr, FC_SetTxFIFOCnt, 1);
+ } else {
+ moxafunc(ofsAddr, FC_SetRxFIFOTrig, 3);
+ moxafunc(ofsAddr, FC_SetTxFIFOCnt, 16);
+ }
+}
diff --git a/drivers/tty/moxa.h b/drivers/tty/moxa.h
new file mode 100644
index 00000000..87d16ce5
--- /dev/null
+++ b/drivers/tty/moxa.h
@@ -0,0 +1,304 @@
+#ifndef MOXA_H_FILE
+#define MOXA_H_FILE
+
+#define MOXA 0x400
+#define MOXA_GET_IQUEUE (MOXA + 1) /* get input buffered count */
+#define MOXA_GET_OQUEUE (MOXA + 2) /* get output buffered count */
+#define MOXA_GETDATACOUNT (MOXA + 23)
+#define MOXA_GET_IOQUEUE (MOXA + 27)
+#define MOXA_FLUSH_QUEUE (MOXA + 28)
+#define MOXA_GETMSTATUS (MOXA + 65)
+
+/*
+ * System Configuration
+ */
+
+#define Magic_code 0x404
+
+/*
+ * for C218 BIOS initialization
+ */
+#define C218_ConfBase 0x800
+#define C218_status (C218_ConfBase + 0) /* BIOS running status */
+#define C218_diag (C218_ConfBase + 2) /* diagnostic status */
+#define C218_key (C218_ConfBase + 4) /* WORD (0x218 for C218) */
+#define C218DLoad_len (C218_ConfBase + 6) /* WORD */
+#define C218check_sum (C218_ConfBase + 8) /* BYTE */
+#define C218chksum_ok (C218_ConfBase + 0x0a) /* BYTE (1:ok) */
+#define C218_TestRx (C218_ConfBase + 0x10) /* 8 bytes for 8 ports */
+#define C218_TestTx (C218_ConfBase + 0x18) /* 8 bytes for 8 ports */
+#define C218_RXerr (C218_ConfBase + 0x20) /* 8 bytes for 8 ports */
+#define C218_ErrFlag (C218_ConfBase + 0x28) /* 8 bytes for 8 ports */
+
+#define C218_LoadBuf 0x0F00
+#define C218_KeyCode 0x218
+#define CP204J_KeyCode 0x204
+
+/*
+ * for C320 BIOS initialization
+ */
+#define C320_ConfBase 0x800
+#define C320_LoadBuf 0x0f00
+#define STS_init 0x05 /* for C320_status */
+
+#define C320_status C320_ConfBase + 0 /* BIOS running status */
+#define C320_diag C320_ConfBase + 2 /* diagnostic status */
+#define C320_key C320_ConfBase + 4 /* WORD (0320H for C320) */
+#define C320DLoad_len C320_ConfBase + 6 /* WORD */
+#define C320check_sum C320_ConfBase + 8 /* WORD */
+#define C320chksum_ok C320_ConfBase + 0x0a /* WORD (1:ok) */
+#define C320bapi_len C320_ConfBase + 0x0c /* WORD */
+#define C320UART_no C320_ConfBase + 0x0e /* WORD */
+
+#define C320_KeyCode 0x320
+
+#define FixPage_addr 0x0000 /* starting addr of static page */
+#define DynPage_addr 0x2000 /* starting addr of dynamic page */
+#define C218_start 0x3000 /* starting addr of C218 BIOS prg */
+#define Control_reg 0x1ff0 /* select page and reset control */
+#define HW_reset 0x80
+
+/*
+ * Function Codes
+ */
+#define FC_CardReset 0x80
+#define FC_ChannelReset 1 /* C320 firmware not supported */
+#define FC_EnableCH 2
+#define FC_DisableCH 3
+#define FC_SetParam 4
+#define FC_SetMode 5
+#define FC_SetRate 6
+#define FC_LineControl 7
+#define FC_LineStatus 8
+#define FC_XmitControl 9
+#define FC_FlushQueue 10
+#define FC_SendBreak 11
+#define FC_StopBreak 12
+#define FC_LoopbackON 13
+#define FC_LoopbackOFF 14
+#define FC_ClrIrqTable 15
+#define FC_SendXon 16
+#define FC_SetTermIrq 17 /* C320 firmware not supported */
+#define FC_SetCntIrq 18 /* C320 firmware not supported */
+#define FC_SetBreakIrq 19
+#define FC_SetLineIrq 20
+#define FC_SetFlowCtl 21
+#define FC_GenIrq 22
+#define FC_InCD180 23
+#define FC_OutCD180 24
+#define FC_InUARTreg 23
+#define FC_OutUARTreg 24
+#define FC_SetXonXoff 25
+#define FC_OutCD180CCR 26
+#define FC_ExtIQueue 27
+#define FC_ExtOQueue 28
+#define FC_ClrLineIrq 29
+#define FC_HWFlowCtl 30
+#define FC_GetClockRate 35
+#define FC_SetBaud 36
+#define FC_SetDataMode 41
+#define FC_GetCCSR 43
+#define FC_GetDataError 45
+#define FC_RxControl 50
+#define FC_ImmSend 51
+#define FC_SetXonState 52
+#define FC_SetXoffState 53
+#define FC_SetRxFIFOTrig 54
+#define FC_SetTxFIFOCnt 55
+#define FC_UnixRate 56
+#define FC_UnixResetTimer 57
+
+#define RxFIFOTrig1 0
+#define RxFIFOTrig4 1
+#define RxFIFOTrig8 2
+#define RxFIFOTrig14 3
+
+/*
+ * Dual-Ported RAM
+ */
+#define DRAM_global 0
+#define INT_data (DRAM_global + 0)
+#define Config_base (DRAM_global + 0x108)
+
+#define IRQindex (INT_data + 0)
+#define IRQpending (INT_data + 4)
+#define IRQtable (INT_data + 8)
+
+/*
+ * Interrupt Status
+ */
+#define IntrRx 0x01 /* receiver data O.K. */
+#define IntrTx 0x02 /* transmit buffer empty */
+#define IntrFunc 0x04 /* function complete */
+#define IntrBreak 0x08 /* received break */
+#define IntrLine 0x10 /* line status change
+ for transmitter */
+#define IntrIntr 0x20 /* received INTR code */
+#define IntrQuit 0x40 /* received QUIT code */
+#define IntrEOF 0x80 /* received EOF code */
+
+#define IntrRxTrigger 0x100 /* rx data count reach tigger value */
+#define IntrTxTrigger 0x200 /* tx data count below trigger value */
+
+#define Magic_no (Config_base + 0)
+#define Card_model_no (Config_base + 2)
+#define Total_ports (Config_base + 4)
+#define Module_cnt (Config_base + 8)
+#define Module_no (Config_base + 10)
+#define Timer_10ms (Config_base + 14)
+#define Disable_IRQ (Config_base + 20)
+#define TMS320_PORT1 (Config_base + 22)
+#define TMS320_PORT2 (Config_base + 24)
+#define TMS320_CLOCK (Config_base + 26)
+
+/*
+ * DATA BUFFER in DRAM
+ */
+#define Extern_table 0x400 /* Base address of the external table
+ (24 words * 64) total 3K bytes
+ (24 words * 128) total 6K bytes */
+#define Extern_size 0x60 /* 96 bytes */
+#define RXrptr 0x00 /* read pointer for RX buffer */
+#define RXwptr 0x02 /* write pointer for RX buffer */
+#define TXrptr 0x04 /* read pointer for TX buffer */
+#define TXwptr 0x06 /* write pointer for TX buffer */
+#define HostStat 0x08 /* IRQ flag and general flag */
+#define FlagStat 0x0A
+#define FlowControl 0x0C /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ /* x x x x | | | | */
+ /* | | | + CTS flow */
+ /* | | +--- RTS flow */
+ /* | +------ TX Xon/Xoff */
+ /* +--------- RX Xon/Xoff */
+#define Break_cnt 0x0E /* received break count */
+#define CD180TXirq 0x10 /* if non-0: enable TX irq */
+#define RX_mask 0x12
+#define TX_mask 0x14
+#define Ofs_rxb 0x16
+#define Ofs_txb 0x18
+#define Page_rxb 0x1A
+#define Page_txb 0x1C
+#define EndPage_rxb 0x1E
+#define EndPage_txb 0x20
+#define Data_error 0x22
+#define RxTrigger 0x28
+#define TxTrigger 0x2a
+
+#define rRXwptr 0x34
+#define Low_water 0x36
+
+#define FuncCode 0x40
+#define FuncArg 0x42
+#define FuncArg1 0x44
+
+#define C218rx_size 0x2000 /* 8K bytes */
+#define C218tx_size 0x8000 /* 32K bytes */
+
+#define C218rx_mask (C218rx_size - 1)
+#define C218tx_mask (C218tx_size - 1)
+
+#define C320p8rx_size 0x2000
+#define C320p8tx_size 0x8000
+#define C320p8rx_mask (C320p8rx_size - 1)
+#define C320p8tx_mask (C320p8tx_size - 1)
+
+#define C320p16rx_size 0x2000
+#define C320p16tx_size 0x4000
+#define C320p16rx_mask (C320p16rx_size - 1)
+#define C320p16tx_mask (C320p16tx_size - 1)
+
+#define C320p24rx_size 0x2000
+#define C320p24tx_size 0x2000
+#define C320p24rx_mask (C320p24rx_size - 1)
+#define C320p24tx_mask (C320p24tx_size - 1)
+
+#define C320p32rx_size 0x1000
+#define C320p32tx_size 0x1000
+#define C320p32rx_mask (C320p32rx_size - 1)
+#define C320p32tx_mask (C320p32tx_size - 1)
+
+#define Page_size 0x2000U
+#define Page_mask (Page_size - 1)
+#define C218rx_spage 3
+#define C218tx_spage 4
+#define C218rx_pageno 1
+#define C218tx_pageno 4
+#define C218buf_pageno 5
+
+#define C320p8rx_spage 3
+#define C320p8tx_spage 4
+#define C320p8rx_pgno 1
+#define C320p8tx_pgno 4
+#define C320p8buf_pgno 5
+
+#define C320p16rx_spage 3
+#define C320p16tx_spage 4
+#define C320p16rx_pgno 1
+#define C320p16tx_pgno 2
+#define C320p16buf_pgno 3
+
+#define C320p24rx_spage 3
+#define C320p24tx_spage 4
+#define C320p24rx_pgno 1
+#define C320p24tx_pgno 1
+#define C320p24buf_pgno 2
+
+#define C320p32rx_spage 3
+#define C320p32tx_ofs C320p32rx_size
+#define C320p32tx_spage 3
+#define C320p32buf_pgno 1
+
+/*
+ * Host Status
+ */
+#define WakeupRx 0x01
+#define WakeupTx 0x02
+#define WakeupBreak 0x08
+#define WakeupLine 0x10
+#define WakeupIntr 0x20
+#define WakeupQuit 0x40
+#define WakeupEOF 0x80 /* used in VTIME control */
+#define WakeupRxTrigger 0x100
+#define WakeupTxTrigger 0x200
+/*
+ * Flag status
+ */
+#define Rx_over 0x01
+#define Xoff_state 0x02
+#define Tx_flowOff 0x04
+#define Tx_enable 0x08
+#define CTS_state 0x10
+#define DSR_state 0x20
+#define DCD_state 0x80
+/*
+ * FlowControl
+ */
+#define CTS_FlowCtl 1
+#define RTS_FlowCtl 2
+#define Tx_FlowCtl 4
+#define Rx_FlowCtl 8
+#define IXM_IXANY 0x10
+
+#define LowWater 128
+
+#define DTR_ON 1
+#define RTS_ON 2
+#define CTS_ON 1
+#define DSR_ON 2
+#define DCD_ON 8
+
+/* mode definition */
+#define MX_CS8 0x03
+#define MX_CS7 0x02
+#define MX_CS6 0x01
+#define MX_CS5 0x00
+
+#define MX_STOP1 0x00
+#define MX_STOP15 0x04
+#define MX_STOP2 0x08
+
+#define MX_PARNONE 0x00
+#define MX_PAREVEN 0x40
+#define MX_PARODD 0xC0
+
+#endif
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
new file mode 100644
index 00000000..d188f378
--- /dev/null
+++ b/drivers/tty/mxser.c
@@ -0,0 +1,2757 @@
+/*
+ * mxser.c -- MOXA Smartio/Industio family multiport serial driver.
+ *
+ * Copyright (C) 1999-2006 Moxa Technologies (support@moxa.com).
+ * Copyright (C) 2006-2008 Jiri Slaby <jirislaby@gmail.com>
+ *
+ * This code is loosely based on the 1.8 moxa driver which is based on
+ * Linux serial driver, written by Linus Torvalds, Theodore T'so and
+ * others.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Fed through a cleanup, indent and remove of non 2.6 code by Alan Cox
+ * <alan@lxorguk.ukuu.org.uk>. The original 1.8 code is available on
+ * www.moxa.com.
+ * - Fixed x86_64 cleanness
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_reg.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "mxser.h"
+
+#define MXSER_VERSION "2.0.5" /* 1.14 */
+#define MXSERMAJOR 174
+
+#define MXSER_BOARDS 4 /* Max. boards */
+#define MXSER_PORTS_PER_BOARD 8 /* Max. ports per board */
+#define MXSER_PORTS (MXSER_BOARDS * MXSER_PORTS_PER_BOARD)
+#define MXSER_ISR_PASS_LIMIT 100
+
+/*CheckIsMoxaMust return value*/
+#define MOXA_OTHER_UART 0x00
+#define MOXA_MUST_MU150_HWID 0x01
+#define MOXA_MUST_MU860_HWID 0x02
+
+#define WAKEUP_CHARS 256
+
+#define UART_MCR_AFE 0x20
+#define UART_LSR_SPECIAL 0x1E
+
+#define PCI_DEVICE_ID_POS104UL 0x1044
+#define PCI_DEVICE_ID_CB108 0x1080
+#define PCI_DEVICE_ID_CP102UF 0x1023
+#define PCI_DEVICE_ID_CP112UL 0x1120
+#define PCI_DEVICE_ID_CB114 0x1142
+#define PCI_DEVICE_ID_CP114UL 0x1143
+#define PCI_DEVICE_ID_CB134I 0x1341
+#define PCI_DEVICE_ID_CP138U 0x1380
+
+
+#define C168_ASIC_ID 1
+#define C104_ASIC_ID 2
+#define C102_ASIC_ID 0xB
+#define CI132_ASIC_ID 4
+#define CI134_ASIC_ID 3
+#define CI104J_ASIC_ID 5
+
+#define MXSER_HIGHBAUD 1
+#define MXSER_HAS2 2
+
+/* This is only for PCI */
+static const struct {
+ int type;
+ int tx_fifo;
+ int rx_fifo;
+ int xmit_fifo_size;
+ int rx_high_water;
+ int rx_trigger;
+ int rx_low_water;
+ long max_baud;
+} Gpci_uart_info[] = {
+ {MOXA_OTHER_UART, 16, 16, 16, 14, 14, 1, 921600L},
+ {MOXA_MUST_MU150_HWID, 64, 64, 64, 48, 48, 16, 230400L},
+ {MOXA_MUST_MU860_HWID, 128, 128, 128, 96, 96, 32, 921600L}
+};
+#define UART_INFO_NUM ARRAY_SIZE(Gpci_uart_info)
+
+struct mxser_cardinfo {
+ char *name;
+ unsigned int nports;
+ unsigned int flags;
+};
+
+static const struct mxser_cardinfo mxser_cards[] = {
+/* 0*/ { "C168 series", 8, },
+ { "C104 series", 4, },
+ { "CI-104J series", 4, },
+ { "C168H/PCI series", 8, },
+ { "C104H/PCI series", 4, },
+/* 5*/ { "C102 series", 4, MXSER_HAS2 }, /* C102-ISA */
+ { "CI-132 series", 4, MXSER_HAS2 },
+ { "CI-134 series", 4, },
+ { "CP-132 series", 2, },
+ { "CP-114 series", 4, },
+/*10*/ { "CT-114 series", 4, },
+ { "CP-102 series", 2, MXSER_HIGHBAUD },
+ { "CP-104U series", 4, },
+ { "CP-168U series", 8, },
+ { "CP-132U series", 2, },
+/*15*/ { "CP-134U series", 4, },
+ { "CP-104JU series", 4, },
+ { "Moxa UC7000 Serial", 8, }, /* RC7000 */
+ { "CP-118U series", 8, },
+ { "CP-102UL series", 2, },
+/*20*/ { "CP-102U series", 2, },
+ { "CP-118EL series", 8, },
+ { "CP-168EL series", 8, },
+ { "CP-104EL series", 4, },
+ { "CB-108 series", 8, },
+/*25*/ { "CB-114 series", 4, },
+ { "CB-134I series", 4, },
+ { "CP-138U series", 8, },
+ { "POS-104UL series", 4, },
+ { "CP-114UL series", 4, },
+/*30*/ { "CP-102UF series", 2, },
+ { "CP-112UL series", 2, },
+};
+
+/* driver_data correspond to the lines in the structure above
+ see also ISA probe function before you change something */
+static struct pci_device_id mxser_pcibrds[] = {
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C168), .driver_data = 3 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C104), .driver_data = 4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132), .driver_data = 8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114), .driver_data = 9 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CT114), .driver_data = 10 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102), .driver_data = 11 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104U), .driver_data = 12 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168U), .driver_data = 13 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132U), .driver_data = 14 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134U), .driver_data = 15 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104JU),.driver_data = 16 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_RC7000), .driver_data = 17 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118U), .driver_data = 18 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102UL),.driver_data = 19 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102U), .driver_data = 20 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118EL),.driver_data = 21 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168EL),.driver_data = 22 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104EL),.driver_data = 23 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB108), .driver_data = 24 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB114), .driver_data = 25 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB134I), .driver_data = 26 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP138U), .driver_data = 27 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_POS104UL), .driver_data = 28 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP114UL), .driver_data = 29 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP102UF), .driver_data = 30 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP112UL), .driver_data = 31 },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
+
+static unsigned long ioaddr[MXSER_BOARDS];
+static int ttymajor = MXSERMAJOR;
+
+/* Variables for insmod */
+
+MODULE_AUTHOR("Casper Yang");
+MODULE_DESCRIPTION("MOXA Smartio/Industio Family Multiport Board Device Driver");
+module_param_array(ioaddr, ulong, NULL, 0);
+MODULE_PARM_DESC(ioaddr, "ISA io addresses to look for a moxa board");
+module_param(ttymajor, int, 0);
+MODULE_LICENSE("GPL");
+
+struct mxser_log {
+ int tick;
+ unsigned long rxcnt[MXSER_PORTS];
+ unsigned long txcnt[MXSER_PORTS];
+};
+
+struct mxser_mon {
+ unsigned long rxcnt;
+ unsigned long txcnt;
+ unsigned long up_rxcnt;
+ unsigned long up_txcnt;
+ int modem_status;
+ unsigned char hold_reason;
+};
+
+struct mxser_mon_ext {
+ unsigned long rx_cnt[32];
+ unsigned long tx_cnt[32];
+ unsigned long up_rxcnt[32];
+ unsigned long up_txcnt[32];
+ int modem_status[32];
+
+ long baudrate[32];
+ int databits[32];
+ int stopbits[32];
+ int parity[32];
+ int flowctrl[32];
+ int fifo[32];
+ int iftype[32];
+};
+
+struct mxser_board;
+
+struct mxser_port {
+ struct tty_port port;
+ struct mxser_board *board;
+
+ unsigned long ioaddr;
+ unsigned long opmode_ioaddr;
+ int max_baud;
+
+ int rx_high_water;
+ int rx_trigger; /* Rx fifo trigger level */
+ int rx_low_water;
+ int baud_base; /* max. speed */
+ int type; /* UART type */
+
+ int x_char; /* xon/xoff character */
+ int IER; /* Interrupt Enable Register */
+ int MCR; /* Modem control register */
+
+ unsigned char stop_rx;
+ unsigned char ldisc_stop_rx;
+
+ int custom_divisor;
+ unsigned char err_shadow;
+
+ struct async_icount icount; /* kernel counters for 4 input interrupts */
+ int timeout;
+
+ int read_status_mask;
+ int ignore_status_mask;
+ int xmit_fifo_size;
+ int xmit_head;
+ int xmit_tail;
+ int xmit_cnt;
+
+ struct ktermios normal_termios;
+
+ struct mxser_mon mon_data;
+
+ spinlock_t slock;
+};
+
+struct mxser_board {
+ unsigned int idx;
+ int irq;
+ const struct mxser_cardinfo *info;
+ unsigned long vector;
+ unsigned long vector_mask;
+
+ int chip_flag;
+ int uart_type;
+
+ struct mxser_port ports[MXSER_PORTS_PER_BOARD];
+};
+
+struct mxser_mstatus {
+ tcflag_t cflag;
+ int cts;
+ int dsr;
+ int ri;
+ int dcd;
+};
+
+static struct mxser_board mxser_boards[MXSER_BOARDS];
+static struct tty_driver *mxvar_sdriver;
+static struct mxser_log mxvar_log;
+static int mxser_set_baud_method[MXSER_PORTS + 1];
+
+static void mxser_enable_must_enchance_mode(unsigned long baseio)
+{
+ u8 oldlcr;
+ u8 efr;
+
+ oldlcr = inb(baseio + UART_LCR);
+ outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+ efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+ efr |= MOXA_MUST_EFR_EFRB_ENABLE;
+
+ outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ outb(oldlcr, baseio + UART_LCR);
+}
+
+#ifdef CONFIG_PCI
+static void mxser_disable_must_enchance_mode(unsigned long baseio)
+{
+ u8 oldlcr;
+ u8 efr;
+
+ oldlcr = inb(baseio + UART_LCR);
+ outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+ efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+ efr &= ~MOXA_MUST_EFR_EFRB_ENABLE;
+
+ outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ outb(oldlcr, baseio + UART_LCR);
+}
+#endif
+
+static void mxser_set_must_xon1_value(unsigned long baseio, u8 value)
+{
+ u8 oldlcr;
+ u8 efr;
+
+ oldlcr = inb(baseio + UART_LCR);
+ outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+ efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+ efr &= ~MOXA_MUST_EFR_BANK_MASK;
+ efr |= MOXA_MUST_EFR_BANK0;
+
+ outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ outb(value, baseio + MOXA_MUST_XON1_REGISTER);
+ outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_set_must_xoff1_value(unsigned long baseio, u8 value)
+{
+ u8 oldlcr;
+ u8 efr;
+
+ oldlcr = inb(baseio + UART_LCR);
+ outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+ efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+ efr &= ~MOXA_MUST_EFR_BANK_MASK;
+ efr |= MOXA_MUST_EFR_BANK0;
+
+ outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ outb(value, baseio + MOXA_MUST_XOFF1_REGISTER);
+ outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_set_must_fifo_value(struct mxser_port *info)
+{
+ u8 oldlcr;
+ u8 efr;
+
+ oldlcr = inb(info->ioaddr + UART_LCR);
+ outb(MOXA_MUST_ENTER_ENCHANCE, info->ioaddr + UART_LCR);
+
+ efr = inb(info->ioaddr + MOXA_MUST_EFR_REGISTER);
+ efr &= ~MOXA_MUST_EFR_BANK_MASK;
+ efr |= MOXA_MUST_EFR_BANK1;
+
+ outb(efr, info->ioaddr + MOXA_MUST_EFR_REGISTER);
+ outb((u8)info->rx_high_water, info->ioaddr + MOXA_MUST_RBRTH_REGISTER);
+ outb((u8)info->rx_trigger, info->ioaddr + MOXA_MUST_RBRTI_REGISTER);
+ outb((u8)info->rx_low_water, info->ioaddr + MOXA_MUST_RBRTL_REGISTER);
+ outb(oldlcr, info->ioaddr + UART_LCR);
+}
+
+static void mxser_set_must_enum_value(unsigned long baseio, u8 value)
+{
+ u8 oldlcr;
+ u8 efr;
+
+ oldlcr = inb(baseio + UART_LCR);
+ outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+ efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+ efr &= ~MOXA_MUST_EFR_BANK_MASK;
+ efr |= MOXA_MUST_EFR_BANK2;
+
+ outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ outb(value, baseio + MOXA_MUST_ENUM_REGISTER);
+ outb(oldlcr, baseio + UART_LCR);
+}
+
+#ifdef CONFIG_PCI
+static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId)
+{
+ u8 oldlcr;
+ u8 efr;
+
+ oldlcr = inb(baseio + UART_LCR);
+ outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+ efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+ efr &= ~MOXA_MUST_EFR_BANK_MASK;
+ efr |= MOXA_MUST_EFR_BANK2;
+
+ outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ *pId = inb(baseio + MOXA_MUST_HWID_REGISTER);
+ outb(oldlcr, baseio + UART_LCR);
+}
+#endif
+
+static void SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(unsigned long baseio)
+{
+ u8 oldlcr;
+ u8 efr;
+
+ oldlcr = inb(baseio + UART_LCR);
+ outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+ efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+ efr &= ~MOXA_MUST_EFR_SF_MASK;
+
+ outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_enable_must_tx_software_flow_control(unsigned long baseio)
+{
+ u8 oldlcr;
+ u8 efr;
+
+ oldlcr = inb(baseio + UART_LCR);
+ outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+ efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+ efr &= ~MOXA_MUST_EFR_SF_TX_MASK;
+ efr |= MOXA_MUST_EFR_SF_TX1;
+
+ outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_disable_must_tx_software_flow_control(unsigned long baseio)
+{
+ u8 oldlcr;
+ u8 efr;
+
+ oldlcr = inb(baseio + UART_LCR);
+ outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+ efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+ efr &= ~MOXA_MUST_EFR_SF_TX_MASK;
+
+ outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_enable_must_rx_software_flow_control(unsigned long baseio)
+{
+ u8 oldlcr;
+ u8 efr;
+
+ oldlcr = inb(baseio + UART_LCR);
+ outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+ efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+ efr &= ~MOXA_MUST_EFR_SF_RX_MASK;
+ efr |= MOXA_MUST_EFR_SF_RX1;
+
+ outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_disable_must_rx_software_flow_control(unsigned long baseio)
+{
+ u8 oldlcr;
+ u8 efr;
+
+ oldlcr = inb(baseio + UART_LCR);
+ outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+ efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+ efr &= ~MOXA_MUST_EFR_SF_RX_MASK;
+
+ outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+ outb(oldlcr, baseio + UART_LCR);
+}
+
+#ifdef CONFIG_PCI
+static int __devinit CheckIsMoxaMust(unsigned long io)
+{
+ u8 oldmcr, hwid;
+ int i;
+
+ outb(0, io + UART_LCR);
+ mxser_disable_must_enchance_mode(io);
+ oldmcr = inb(io + UART_MCR);
+ outb(0, io + UART_MCR);
+ mxser_set_must_xon1_value(io, 0x11);
+ if ((hwid = inb(io + UART_MCR)) != 0) {
+ outb(oldmcr, io + UART_MCR);
+ return MOXA_OTHER_UART;
+ }
+
+ mxser_get_must_hardware_id(io, &hwid);
+ for (i = 1; i < UART_INFO_NUM; i++) { /* 0 = OTHER_UART */
+ if (hwid == Gpci_uart_info[i].type)
+ return (int)hwid;
+ }
+ return MOXA_OTHER_UART;
+}
+#endif
+
+static void process_txrx_fifo(struct mxser_port *info)
+{
+ int i;
+
+ if ((info->type == PORT_16450) || (info->type == PORT_8250)) {
+ info->rx_trigger = 1;
+ info->rx_high_water = 1;
+ info->rx_low_water = 1;
+ info->xmit_fifo_size = 1;
+ } else
+ for (i = 0; i < UART_INFO_NUM; i++)
+ if (info->board->chip_flag == Gpci_uart_info[i].type) {
+ info->rx_trigger = Gpci_uart_info[i].rx_trigger;
+ info->rx_low_water = Gpci_uart_info[i].rx_low_water;
+ info->rx_high_water = Gpci_uart_info[i].rx_high_water;
+ info->xmit_fifo_size = Gpci_uart_info[i].xmit_fifo_size;
+ break;
+ }
+}
+
+static unsigned char mxser_get_msr(int baseaddr, int mode, int port)
+{
+ static unsigned char mxser_msr[MXSER_PORTS + 1];
+ unsigned char status = 0;
+
+ status = inb(baseaddr + UART_MSR);
+
+ mxser_msr[port] &= 0x0F;
+ mxser_msr[port] |= status;
+ status = mxser_msr[port];
+ if (mode)
+ mxser_msr[port] = 0;
+
+ return status;
+}
+
+static int mxser_carrier_raised(struct tty_port *port)
+{
+ struct mxser_port *mp = container_of(port, struct mxser_port, port);
+ return (inb(mp->ioaddr + UART_MSR) & UART_MSR_DCD)?1:0;
+}
+
+static void mxser_dtr_rts(struct tty_port *port, int on)
+{
+ struct mxser_port *mp = container_of(port, struct mxser_port, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mp->slock, flags);
+ if (on)
+ outb(inb(mp->ioaddr + UART_MCR) |
+ UART_MCR_DTR | UART_MCR_RTS, mp->ioaddr + UART_MCR);
+ else
+ outb(inb(mp->ioaddr + UART_MCR)&~(UART_MCR_DTR | UART_MCR_RTS),
+ mp->ioaddr + UART_MCR);
+ spin_unlock_irqrestore(&mp->slock, flags);
+}
+
+static int mxser_set_baud(struct tty_struct *tty, long newspd)
+{
+ struct mxser_port *info = tty->driver_data;
+ int quot = 0, baud;
+ unsigned char cval;
+
+ if (!info->ioaddr)
+ return -1;
+
+ if (newspd > info->max_baud)
+ return -1;
+
+ if (newspd == 134) {
+ quot = 2 * info->baud_base / 269;
+ tty_encode_baud_rate(tty, 134, 134);
+ } else if (newspd) {
+ quot = info->baud_base / newspd;
+ if (quot == 0)
+ quot = 1;
+ baud = info->baud_base/quot;
+ tty_encode_baud_rate(tty, baud, baud);
+ } else {
+ quot = 0;
+ }
+
+ info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base);
+ info->timeout += HZ / 50; /* Add .02 seconds of slop */
+
+ if (quot) {
+ info->MCR |= UART_MCR_DTR;
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ } else {
+ info->MCR &= ~UART_MCR_DTR;
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ return 0;
+ }
+
+ cval = inb(info->ioaddr + UART_LCR);
+
+ outb(cval | UART_LCR_DLAB, info->ioaddr + UART_LCR); /* set DLAB */
+
+ outb(quot & 0xff, info->ioaddr + UART_DLL); /* LS of divisor */
+ outb(quot >> 8, info->ioaddr + UART_DLM); /* MS of divisor */
+ outb(cval, info->ioaddr + UART_LCR); /* reset DLAB */
+
+#ifdef BOTHER
+ if (C_BAUD(tty) == BOTHER) {
+ quot = info->baud_base % newspd;
+ quot *= 8;
+ if (quot % newspd > newspd / 2) {
+ quot /= newspd;
+ quot++;
+ } else
+ quot /= newspd;
+
+ mxser_set_must_enum_value(info->ioaddr, quot);
+ } else
+#endif
+ mxser_set_must_enum_value(info->ioaddr, 0);
+
+ return 0;
+}
+
+/*
+ * This routine is called to set the UART divisor registers to match
+ * the specified baud rate for a serial port.
+ */
+static int mxser_change_speed(struct tty_struct *tty,
+ struct ktermios *old_termios)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned cflag, cval, fcr;
+ int ret = 0;
+ unsigned char status;
+
+ cflag = tty->termios->c_cflag;
+ if (!info->ioaddr)
+ return ret;
+
+ if (mxser_set_baud_method[tty->index] == 0)
+ mxser_set_baud(tty, tty_get_baud_rate(tty));
+
+ /* byte size and parity */
+ switch (cflag & CSIZE) {
+ case CS5:
+ cval = 0x00;
+ break;
+ case CS6:
+ cval = 0x01;
+ break;
+ case CS7:
+ cval = 0x02;
+ break;
+ case CS8:
+ cval = 0x03;
+ break;
+ default:
+ cval = 0x00;
+ break; /* too keep GCC shut... */
+ }
+ if (cflag & CSTOPB)
+ cval |= 0x04;
+ if (cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+ if (cflag & CMSPAR)
+ cval |= UART_LCR_SPAR;
+
+ if ((info->type == PORT_8250) || (info->type == PORT_16450)) {
+ if (info->board->chip_flag) {
+ fcr = UART_FCR_ENABLE_FIFO;
+ fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
+ mxser_set_must_fifo_value(info);
+ } else
+ fcr = 0;
+ } else {
+ fcr = UART_FCR_ENABLE_FIFO;
+ if (info->board->chip_flag) {
+ fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
+ mxser_set_must_fifo_value(info);
+ } else {
+ switch (info->rx_trigger) {
+ case 1:
+ fcr |= UART_FCR_TRIGGER_1;
+ break;
+ case 4:
+ fcr |= UART_FCR_TRIGGER_4;
+ break;
+ case 8:
+ fcr |= UART_FCR_TRIGGER_8;
+ break;
+ default:
+ fcr |= UART_FCR_TRIGGER_14;
+ break;
+ }
+ }
+ }
+
+ /* CTS flow control flag and modem status interrupts */
+ info->IER &= ~UART_IER_MSI;
+ info->MCR &= ~UART_MCR_AFE;
+ if (cflag & CRTSCTS) {
+ info->port.flags |= ASYNC_CTS_FLOW;
+ info->IER |= UART_IER_MSI;
+ if ((info->type == PORT_16550A) || (info->board->chip_flag)) {
+ info->MCR |= UART_MCR_AFE;
+ } else {
+ status = inb(info->ioaddr + UART_MSR);
+ if (tty->hw_stopped) {
+ if (status & UART_MSR_CTS) {
+ tty->hw_stopped = 0;
+ if (info->type != PORT_16550A &&
+ !info->board->chip_flag) {
+ outb(info->IER & ~UART_IER_THRI,
+ info->ioaddr +
+ UART_IER);
+ info->IER |= UART_IER_THRI;
+ outb(info->IER, info->ioaddr +
+ UART_IER);
+ }
+ tty_wakeup(tty);
+ }
+ } else {
+ if (!(status & UART_MSR_CTS)) {
+ tty->hw_stopped = 1;
+ if ((info->type != PORT_16550A) &&
+ (!info->board->chip_flag)) {
+ info->IER &= ~UART_IER_THRI;
+ outb(info->IER, info->ioaddr +
+ UART_IER);
+ }
+ }
+ }
+ }
+ } else {
+ info->port.flags &= ~ASYNC_CTS_FLOW;
+ }
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ if (cflag & CLOCAL) {
+ info->port.flags &= ~ASYNC_CHECK_CD;
+ } else {
+ info->port.flags |= ASYNC_CHECK_CD;
+ info->IER |= UART_IER_MSI;
+ }
+ outb(info->IER, info->ioaddr + UART_IER);
+
+ /*
+ * Set up parity check flag
+ */
+ info->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (I_INPCK(tty))
+ info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (I_BRKINT(tty) || I_PARMRK(tty))
+ info->read_status_mask |= UART_LSR_BI;
+
+ info->ignore_status_mask = 0;
+
+ if (I_IGNBRK(tty)) {
+ info->ignore_status_mask |= UART_LSR_BI;
+ info->read_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignore parity and break indicators, ignore
+ * overruns too. (For real raw support).
+ */
+ if (I_IGNPAR(tty)) {
+ info->ignore_status_mask |=
+ UART_LSR_OE |
+ UART_LSR_PE |
+ UART_LSR_FE;
+ info->read_status_mask |=
+ UART_LSR_OE |
+ UART_LSR_PE |
+ UART_LSR_FE;
+ }
+ }
+ if (info->board->chip_flag) {
+ mxser_set_must_xon1_value(info->ioaddr, START_CHAR(tty));
+ mxser_set_must_xoff1_value(info->ioaddr, STOP_CHAR(tty));
+ if (I_IXON(tty)) {
+ mxser_enable_must_rx_software_flow_control(
+ info->ioaddr);
+ } else {
+ mxser_disable_must_rx_software_flow_control(
+ info->ioaddr);
+ }
+ if (I_IXOFF(tty)) {
+ mxser_enable_must_tx_software_flow_control(
+ info->ioaddr);
+ } else {
+ mxser_disable_must_tx_software_flow_control(
+ info->ioaddr);
+ }
+ }
+
+
+ outb(fcr, info->ioaddr + UART_FCR); /* set fcr */
+ outb(cval, info->ioaddr + UART_LCR);
+
+ return ret;
+}
+
+static void mxser_check_modem_status(struct tty_struct *tty,
+ struct mxser_port *port, int status)
+{
+ /* update input line counters */
+ if (status & UART_MSR_TERI)
+ port->icount.rng++;
+ if (status & UART_MSR_DDSR)
+ port->icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ port->icount.dcd++;
+ if (status & UART_MSR_DCTS)
+ port->icount.cts++;
+ port->mon_data.modem_status = status;
+ wake_up_interruptible(&port->port.delta_msr_wait);
+
+ if ((port->port.flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) {
+ if (status & UART_MSR_DCD)
+ wake_up_interruptible(&port->port.open_wait);
+ }
+
+ if (port->port.flags & ASYNC_CTS_FLOW) {
+ if (tty->hw_stopped) {
+ if (status & UART_MSR_CTS) {
+ tty->hw_stopped = 0;
+
+ if ((port->type != PORT_16550A) &&
+ (!port->board->chip_flag)) {
+ outb(port->IER & ~UART_IER_THRI,
+ port->ioaddr + UART_IER);
+ port->IER |= UART_IER_THRI;
+ outb(port->IER, port->ioaddr +
+ UART_IER);
+ }
+ tty_wakeup(tty);
+ }
+ } else {
+ if (!(status & UART_MSR_CTS)) {
+ tty->hw_stopped = 1;
+ if (port->type != PORT_16550A &&
+ !port->board->chip_flag) {
+ port->IER &= ~UART_IER_THRI;
+ outb(port->IER, port->ioaddr +
+ UART_IER);
+ }
+ }
+ }
+ }
+}
+
+static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct mxser_port *info = container_of(port, struct mxser_port, port);
+ unsigned long page;
+ unsigned long flags;
+
+ page = __get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&info->slock, flags);
+
+ if (!info->ioaddr || !info->type) {
+ set_bit(TTY_IO_ERROR, &tty->flags);
+ free_page(page);
+ spin_unlock_irqrestore(&info->slock, flags);
+ return 0;
+ }
+ info->port.xmit_buf = (unsigned char *) page;
+
+ /*
+ * Clear the FIFO buffers and disable them
+ * (they will be reenabled in mxser_change_speed())
+ */
+ if (info->board->chip_flag)
+ outb((UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT |
+ MOXA_MUST_FCR_GDA_MODE_ENABLE), info->ioaddr + UART_FCR);
+ else
+ outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
+ info->ioaddr + UART_FCR);
+
+ /*
+ * At this point there's no way the LSR could still be 0xFF;
+ * if it is, then bail out, because there's likely no UART
+ * here.
+ */
+ if (inb(info->ioaddr + UART_LSR) == 0xff) {
+ spin_unlock_irqrestore(&info->slock, flags);
+ if (capable(CAP_SYS_ADMIN)) {
+ set_bit(TTY_IO_ERROR, &tty->flags);
+ return 0;
+ } else
+ return -ENODEV;
+ }
+
+ /*
+ * Clear the interrupt registers.
+ */
+ (void) inb(info->ioaddr + UART_LSR);
+ (void) inb(info->ioaddr + UART_RX);
+ (void) inb(info->ioaddr + UART_IIR);
+ (void) inb(info->ioaddr + UART_MSR);
+
+ /*
+ * Now, initialize the UART
+ */
+ outb(UART_LCR_WLEN8, info->ioaddr + UART_LCR); /* reset DLAB */
+ info->MCR = UART_MCR_DTR | UART_MCR_RTS;
+ outb(info->MCR, info->ioaddr + UART_MCR);
+
+ /*
+ * Finally, enable interrupts
+ */
+ info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
+
+ if (info->board->chip_flag)
+ info->IER |= MOXA_MUST_IER_EGDAI;
+ outb(info->IER, info->ioaddr + UART_IER); /* enable interrupts */
+
+ /*
+ * And clear the interrupt registers again for luck.
+ */
+ (void) inb(info->ioaddr + UART_LSR);
+ (void) inb(info->ioaddr + UART_RX);
+ (void) inb(info->ioaddr + UART_IIR);
+ (void) inb(info->ioaddr + UART_MSR);
+
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+
+ /*
+ * and set the speed of the serial port
+ */
+ mxser_change_speed(tty, NULL);
+ spin_unlock_irqrestore(&info->slock, flags);
+
+ return 0;
+}
+
+/*
+ * This routine will shutdown a serial port
+ */
+static void mxser_shutdown_port(struct tty_port *port)
+{
+ struct mxser_port *info = container_of(port, struct mxser_port, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->slock, flags);
+
+ /*
+ * clear delta_msr_wait queue to avoid mem leaks: we may free the irq
+ * here so the queue might never be waken up
+ */
+ wake_up_interruptible(&info->port.delta_msr_wait);
+
+ /*
+ * Free the xmit buffer, if necessary
+ */
+ if (info->port.xmit_buf) {
+ free_page((unsigned long) info->port.xmit_buf);
+ info->port.xmit_buf = NULL;
+ }
+
+ info->IER = 0;
+ outb(0x00, info->ioaddr + UART_IER);
+
+ /* clear Rx/Tx FIFO's */
+ if (info->board->chip_flag)
+ outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT |
+ MOXA_MUST_FCR_GDA_MODE_ENABLE,
+ info->ioaddr + UART_FCR);
+ else
+ outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
+ info->ioaddr + UART_FCR);
+
+ /* read data port to reset things */
+ (void) inb(info->ioaddr + UART_RX);
+
+
+ if (info->board->chip_flag)
+ SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+
+ spin_unlock_irqrestore(&info->slock, flags);
+}
+
+/*
+ * This routine is called whenever a serial port is opened. It
+ * enables interrupts for a serial port, linking in its async structure into
+ * the IRQ chain. It also performs the serial-specific
+ * initialization for the tty structure.
+ */
+static int mxser_open(struct tty_struct *tty, struct file *filp)
+{
+ struct mxser_port *info;
+ int line;
+
+ line = tty->index;
+ if (line == MXSER_PORTS)
+ return 0;
+ if (line < 0 || line > MXSER_PORTS)
+ return -ENODEV;
+ info = &mxser_boards[line / MXSER_PORTS_PER_BOARD].ports[line % MXSER_PORTS_PER_BOARD];
+ if (!info->ioaddr)
+ return -ENODEV;
+
+ tty->driver_data = info;
+ return tty_port_open(&info->port, tty, filp);
+}
+
+static void mxser_flush_buffer(struct tty_struct *tty)
+{
+ struct mxser_port *info = tty->driver_data;
+ char fcr;
+ unsigned long flags;
+
+
+ spin_lock_irqsave(&info->slock, flags);
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+
+ fcr = inb(info->ioaddr + UART_FCR);
+ outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
+ info->ioaddr + UART_FCR);
+ outb(fcr, info->ioaddr + UART_FCR);
+
+ spin_unlock_irqrestore(&info->slock, flags);
+
+ tty_wakeup(tty);
+}
+
+
+static void mxser_close_port(struct tty_port *port)
+{
+ struct mxser_port *info = container_of(port, struct mxser_port, port);
+ unsigned long timeout;
+ /*
+ * At this point we stop accepting input. To do this, we
+ * disable the receive line status interrupts, and tell the
+ * interrupt driver to stop checking the data ready bit in the
+ * line status register.
+ */
+ info->IER &= ~UART_IER_RLSI;
+ if (info->board->chip_flag)
+ info->IER &= ~MOXA_MUST_RECV_ISR;
+
+ outb(info->IER, info->ioaddr + UART_IER);
+ /*
+ * Before we drop DTR, make sure the UART transmitter
+ * has completely drained; this is especially
+ * important if there is a transmit FIFO!
+ */
+ timeout = jiffies + HZ;
+ while (!(inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT)) {
+ schedule_timeout_interruptible(5);
+ if (time_after(jiffies, timeout))
+ break;
+ }
+}
+
+/*
+ * This routine is called when the serial port gets closed. First, we
+ * wait for the last remaining data to be sent. Then, we unlink its
+ * async structure from the interrupt chain if necessary, and we free
+ * that IRQ if nothing is left in the chain.
+ */
+static void mxser_close(struct tty_struct *tty, struct file *filp)
+{
+ struct mxser_port *info = tty->driver_data;
+ struct tty_port *port = &info->port;
+
+ if (tty->index == MXSER_PORTS || info == NULL)
+ return;
+ if (tty_port_close_start(port, tty, filp) == 0)
+ return;
+ mutex_lock(&port->mutex);
+ mxser_close_port(port);
+ mxser_flush_buffer(tty);
+ mxser_shutdown_port(port);
+ clear_bit(ASYNCB_INITIALIZED, &port->flags);
+ mutex_unlock(&port->mutex);
+ /* Right now the tty_port set is done outside of the close_end helper
+ as we don't yet have everyone using refcounts */
+ tty_port_close_end(port, tty);
+ tty_port_tty_set(port, NULL);
+}
+
+static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ int c, total = 0;
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
+
+ if (!info->port.xmit_buf)
+ return 0;
+
+ while (1) {
+ c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - info->xmit_head));
+ if (c <= 0)
+ break;
+
+ memcpy(info->port.xmit_buf + info->xmit_head, buf, c);
+ spin_lock_irqsave(&info->slock, flags);
+ info->xmit_head = (info->xmit_head + c) &
+ (SERIAL_XMIT_SIZE - 1);
+ info->xmit_cnt += c;
+ spin_unlock_irqrestore(&info->slock, flags);
+
+ buf += c;
+ count -= c;
+ total += c;
+ }
+
+ if (info->xmit_cnt && !tty->stopped) {
+ if (!tty->hw_stopped ||
+ (info->type == PORT_16550A) ||
+ (info->board->chip_flag)) {
+ spin_lock_irqsave(&info->slock, flags);
+ outb(info->IER & ~UART_IER_THRI, info->ioaddr +
+ UART_IER);
+ info->IER |= UART_IER_THRI;
+ outb(info->IER, info->ioaddr + UART_IER);
+ spin_unlock_irqrestore(&info->slock, flags);
+ }
+ }
+ return total;
+}
+
+static int mxser_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
+
+ if (!info->port.xmit_buf)
+ return 0;
+
+ if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1)
+ return 0;
+
+ spin_lock_irqsave(&info->slock, flags);
+ info->port.xmit_buf[info->xmit_head++] = ch;
+ info->xmit_head &= SERIAL_XMIT_SIZE - 1;
+ info->xmit_cnt++;
+ spin_unlock_irqrestore(&info->slock, flags);
+ if (!tty->stopped) {
+ if (!tty->hw_stopped ||
+ (info->type == PORT_16550A) ||
+ info->board->chip_flag) {
+ spin_lock_irqsave(&info->slock, flags);
+ outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
+ info->IER |= UART_IER_THRI;
+ outb(info->IER, info->ioaddr + UART_IER);
+ spin_unlock_irqrestore(&info->slock, flags);
+ }
+ }
+ return 1;
+}
+
+
+static void mxser_flush_chars(struct tty_struct *tty)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
+
+ if (info->xmit_cnt <= 0 || tty->stopped || !info->port.xmit_buf ||
+ (tty->hw_stopped && info->type != PORT_16550A &&
+ !info->board->chip_flag))
+ return;
+
+ spin_lock_irqsave(&info->slock, flags);
+
+ outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
+ info->IER |= UART_IER_THRI;
+ outb(info->IER, info->ioaddr + UART_IER);
+
+ spin_unlock_irqrestore(&info->slock, flags);
+}
+
+static int mxser_write_room(struct tty_struct *tty)
+{
+ struct mxser_port *info = tty->driver_data;
+ int ret;
+
+ ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
+ return ret < 0 ? 0 : ret;
+}
+
+static int mxser_chars_in_buffer(struct tty_struct *tty)
+{
+ struct mxser_port *info = tty->driver_data;
+ return info->xmit_cnt;
+}
+
+/*
+ * ------------------------------------------------------------
+ * friends of mxser_ioctl()
+ * ------------------------------------------------------------
+ */
+static int mxser_get_serial_info(struct tty_struct *tty,
+ struct serial_struct __user *retinfo)
+{
+ struct mxser_port *info = tty->driver_data;
+ struct serial_struct tmp = {
+ .type = info->type,
+ .line = tty->index,
+ .port = info->ioaddr,
+ .irq = info->board->irq,
+ .flags = info->port.flags,
+ .baud_base = info->baud_base,
+ .close_delay = info->port.close_delay,
+ .closing_wait = info->port.closing_wait,
+ .custom_divisor = info->custom_divisor,
+ .hub6 = 0
+ };
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int mxser_set_serial_info(struct tty_struct *tty,
+ struct serial_struct __user *new_info)
+{
+ struct mxser_port *info = tty->driver_data;
+ struct tty_port *port = &info->port;
+ struct serial_struct new_serial;
+ speed_t baud;
+ unsigned long sl_flags;
+ unsigned int flags;
+ int retval = 0;
+
+ if (!new_info || !info->ioaddr)
+ return -ENODEV;
+ if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
+ return -EFAULT;
+
+ if (new_serial.irq != info->board->irq ||
+ new_serial.port != info->ioaddr)
+ return -EINVAL;
+
+ flags = port->flags & ASYNC_SPD_MASK;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ if ((new_serial.baud_base != info->baud_base) ||
+ (new_serial.close_delay != info->port.close_delay) ||
+ ((new_serial.flags & ~ASYNC_USR_MASK) != (info->port.flags & ~ASYNC_USR_MASK)))
+ return -EPERM;
+ info->port.flags = ((info->port.flags & ~ASYNC_USR_MASK) |
+ (new_serial.flags & ASYNC_USR_MASK));
+ } else {
+ /*
+ * OK, past this point, all the error checking has been done.
+ * At this point, we start making changes.....
+ */
+ port->flags = ((port->flags & ~ASYNC_FLAGS) |
+ (new_serial.flags & ASYNC_FLAGS));
+ port->close_delay = new_serial.close_delay * HZ / 100;
+ port->closing_wait = new_serial.closing_wait * HZ / 100;
+ tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
+ (new_serial.baud_base != info->baud_base ||
+ new_serial.custom_divisor !=
+ info->custom_divisor)) {
+ if (new_serial.custom_divisor == 0)
+ return -EINVAL;
+ baud = new_serial.baud_base / new_serial.custom_divisor;
+ tty_encode_baud_rate(tty, baud, baud);
+ }
+ }
+
+ info->type = new_serial.type;
+
+ process_txrx_fifo(info);
+
+ if (test_bit(ASYNCB_INITIALIZED, &port->flags)) {
+ if (flags != (port->flags & ASYNC_SPD_MASK)) {
+ spin_lock_irqsave(&info->slock, sl_flags);
+ mxser_change_speed(tty, NULL);
+ spin_unlock_irqrestore(&info->slock, sl_flags);
+ }
+ } else {
+ retval = mxser_activate(port, tty);
+ if (retval == 0)
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+ }
+ return retval;
+}
+
+/*
+ * mxser_get_lsr_info - get line status register info
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * is emptied. On bus types like RS485, the transmitter must
+ * release the bus after transmitting. This must be done when
+ * the transmit shift register is empty, not be done when the
+ * transmit holding register is empty. This functionality
+ * allows an RS485 driver to be written in user space.
+ */
+static int mxser_get_lsr_info(struct mxser_port *info,
+ unsigned int __user *value)
+{
+ unsigned char status;
+ unsigned int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->slock, flags);
+ status = inb(info->ioaddr + UART_LSR);
+ spin_unlock_irqrestore(&info->slock, flags);
+ result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
+ return put_user(result, value);
+}
+
+static int mxser_tiocmget(struct tty_struct *tty)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned char control, status;
+ unsigned long flags;
+
+
+ if (tty->index == MXSER_PORTS)
+ return -ENOIOCTLCMD;
+ if (test_bit(TTY_IO_ERROR, &tty->flags))
+ return -EIO;
+
+ control = info->MCR;
+
+ spin_lock_irqsave(&info->slock, flags);
+ status = inb(info->ioaddr + UART_MSR);
+ if (status & UART_MSR_ANY_DELTA)
+ mxser_check_modem_status(tty, info, status);
+ spin_unlock_irqrestore(&info->slock, flags);
+ return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) |
+ ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) |
+ ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) |
+ ((status & UART_MSR_RI) ? TIOCM_RNG : 0) |
+ ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) |
+ ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
+}
+
+static int mxser_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
+
+
+ if (tty->index == MXSER_PORTS)
+ return -ENOIOCTLCMD;
+ if (test_bit(TTY_IO_ERROR, &tty->flags))
+ return -EIO;
+
+ spin_lock_irqsave(&info->slock, flags);
+
+ if (set & TIOCM_RTS)
+ info->MCR |= UART_MCR_RTS;
+ if (set & TIOCM_DTR)
+ info->MCR |= UART_MCR_DTR;
+
+ if (clear & TIOCM_RTS)
+ info->MCR &= ~UART_MCR_RTS;
+ if (clear & TIOCM_DTR)
+ info->MCR &= ~UART_MCR_DTR;
+
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ spin_unlock_irqrestore(&info->slock, flags);
+ return 0;
+}
+
+static int __init mxser_program_mode(int port)
+{
+ int id, i, j, n;
+
+ outb(0, port);
+ outb(0, port);
+ outb(0, port);
+ (void)inb(port);
+ (void)inb(port);
+ outb(0, port);
+ (void)inb(port);
+
+ id = inb(port + 1) & 0x1F;
+ if ((id != C168_ASIC_ID) &&
+ (id != C104_ASIC_ID) &&
+ (id != C102_ASIC_ID) &&
+ (id != CI132_ASIC_ID) &&
+ (id != CI134_ASIC_ID) &&
+ (id != CI104J_ASIC_ID))
+ return -1;
+ for (i = 0, j = 0; i < 4; i++) {
+ n = inb(port + 2);
+ if (n == 'M') {
+ j = 1;
+ } else if ((j == 1) && (n == 1)) {
+ j = 2;
+ break;
+ } else
+ j = 0;
+ }
+ if (j != 2)
+ id = -2;
+ return id;
+}
+
+static void __init mxser_normal_mode(int port)
+{
+ int i, n;
+
+ outb(0xA5, port + 1);
+ outb(0x80, port + 3);
+ outb(12, port + 0); /* 9600 bps */
+ outb(0, port + 1);
+ outb(0x03, port + 3); /* 8 data bits */
+ outb(0x13, port + 4); /* loop back mode */
+ for (i = 0; i < 16; i++) {
+ n = inb(port + 5);
+ if ((n & 0x61) == 0x60)
+ break;
+ if ((n & 1) == 1)
+ (void)inb(port);
+ }
+ outb(0x00, port + 4);
+}
+
+#define CHIP_SK 0x01 /* Serial Data Clock in Eprom */
+#define CHIP_DO 0x02 /* Serial Data Output in Eprom */
+#define CHIP_CS 0x04 /* Serial Chip Select in Eprom */
+#define CHIP_DI 0x08 /* Serial Data Input in Eprom */
+#define EN_CCMD 0x000 /* Chip's command register */
+#define EN0_RSARLO 0x008 /* Remote start address reg 0 */
+#define EN0_RSARHI 0x009 /* Remote start address reg 1 */
+#define EN0_RCNTLO 0x00A /* Remote byte count reg WR */
+#define EN0_RCNTHI 0x00B /* Remote byte count reg WR */
+#define EN0_DCFG 0x00E /* Data configuration reg WR */
+#define EN0_PORT 0x010 /* Rcv missed frame error counter RD */
+#define ENC_PAGE0 0x000 /* Select page 0 of chip registers */
+#define ENC_PAGE3 0x0C0 /* Select page 3 of chip registers */
+static int __init mxser_read_register(int port, unsigned short *regs)
+{
+ int i, k, value, id;
+ unsigned int j;
+
+ id = mxser_program_mode(port);
+ if (id < 0)
+ return id;
+ for (i = 0; i < 14; i++) {
+ k = (i & 0x3F) | 0x180;
+ for (j = 0x100; j > 0; j >>= 1) {
+ outb(CHIP_CS, port);
+ if (k & j) {
+ outb(CHIP_CS | CHIP_DO, port);
+ outb(CHIP_CS | CHIP_DO | CHIP_SK, port); /* A? bit of read */
+ } else {
+ outb(CHIP_CS, port);
+ outb(CHIP_CS | CHIP_SK, port); /* A? bit of read */
+ }
+ }
+ (void)inb(port);
+ value = 0;
+ for (k = 0, j = 0x8000; k < 16; k++, j >>= 1) {
+ outb(CHIP_CS, port);
+ outb(CHIP_CS | CHIP_SK, port);
+ if (inb(port) & CHIP_DI)
+ value |= j;
+ }
+ regs[i] = value;
+ outb(0, port);
+ }
+ mxser_normal_mode(port);
+ return id;
+}
+
+static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
+{
+ struct mxser_port *ip;
+ struct tty_port *port;
+ struct tty_struct *tty;
+ int result, status;
+ unsigned int i, j;
+ int ret = 0;
+
+ switch (cmd) {
+ case MOXA_GET_MAJOR:
+ if (printk_ratelimit())
+ printk(KERN_WARNING "mxser: '%s' uses deprecated ioctl "
+ "%x (GET_MAJOR), fix your userspace\n",
+ current->comm, cmd);
+ return put_user(ttymajor, (int __user *)argp);
+
+ case MOXA_CHKPORTENABLE:
+ result = 0;
+ for (i = 0; i < MXSER_BOARDS; i++)
+ for (j = 0; j < MXSER_PORTS_PER_BOARD; j++)
+ if (mxser_boards[i].ports[j].ioaddr)
+ result |= (1 << i);
+ return put_user(result, (unsigned long __user *)argp);
+ case MOXA_GETDATACOUNT:
+ /* The receive side is locked by port->slock but it isn't
+ clear that an exact snapshot is worth copying here */
+ if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
+ ret = -EFAULT;
+ return ret;
+ case MOXA_GETMSTATUS: {
+ struct mxser_mstatus ms, __user *msu = argp;
+ for (i = 0; i < MXSER_BOARDS; i++)
+ for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
+ ip = &mxser_boards[i].ports[j];
+ port = &ip->port;
+ memset(&ms, 0, sizeof(ms));
+
+ mutex_lock(&port->mutex);
+ if (!ip->ioaddr)
+ goto copy;
+
+ tty = tty_port_tty_get(port);
+
+ if (!tty || !tty->termios)
+ ms.cflag = ip->normal_termios.c_cflag;
+ else
+ ms.cflag = tty->termios->c_cflag;
+ tty_kref_put(tty);
+ spin_lock_irq(&ip->slock);
+ status = inb(ip->ioaddr + UART_MSR);
+ spin_unlock_irq(&ip->slock);
+ if (status & UART_MSR_DCD)
+ ms.dcd = 1;
+ if (status & UART_MSR_DSR)
+ ms.dsr = 1;
+ if (status & UART_MSR_CTS)
+ ms.cts = 1;
+ copy:
+ mutex_unlock(&port->mutex);
+ if (copy_to_user(msu, &ms, sizeof(ms)))
+ return -EFAULT;
+ msu++;
+ }
+ return 0;
+ }
+ case MOXA_ASPP_MON_EXT: {
+ struct mxser_mon_ext *me; /* it's 2k, stack unfriendly */
+ unsigned int cflag, iflag, p;
+ u8 opmode;
+
+ me = kzalloc(sizeof(*me), GFP_KERNEL);
+ if (!me)
+ return -ENOMEM;
+
+ for (i = 0, p = 0; i < MXSER_BOARDS; i++) {
+ for (j = 0; j < MXSER_PORTS_PER_BOARD; j++, p++) {
+ if (p >= ARRAY_SIZE(me->rx_cnt)) {
+ i = MXSER_BOARDS;
+ break;
+ }
+ ip = &mxser_boards[i].ports[j];
+ port = &ip->port;
+
+ mutex_lock(&port->mutex);
+ if (!ip->ioaddr) {
+ mutex_unlock(&port->mutex);
+ continue;
+ }
+
+ spin_lock_irq(&ip->slock);
+ status = mxser_get_msr(ip->ioaddr, 0, p);
+
+ if (status & UART_MSR_TERI)
+ ip->icount.rng++;
+ if (status & UART_MSR_DDSR)
+ ip->icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ ip->icount.dcd++;
+ if (status & UART_MSR_DCTS)
+ ip->icount.cts++;
+
+ ip->mon_data.modem_status = status;
+ me->rx_cnt[p] = ip->mon_data.rxcnt;
+ me->tx_cnt[p] = ip->mon_data.txcnt;
+ me->up_rxcnt[p] = ip->mon_data.up_rxcnt;
+ me->up_txcnt[p] = ip->mon_data.up_txcnt;
+ me->modem_status[p] =
+ ip->mon_data.modem_status;
+ spin_unlock_irq(&ip->slock);
+
+ tty = tty_port_tty_get(&ip->port);
+
+ if (!tty || !tty->termios) {
+ cflag = ip->normal_termios.c_cflag;
+ iflag = ip->normal_termios.c_iflag;
+ me->baudrate[p] = tty_termios_baud_rate(&ip->normal_termios);
+ } else {
+ cflag = tty->termios->c_cflag;
+ iflag = tty->termios->c_iflag;
+ me->baudrate[p] = tty_get_baud_rate(tty);
+ }
+ tty_kref_put(tty);
+
+ me->databits[p] = cflag & CSIZE;
+ me->stopbits[p] = cflag & CSTOPB;
+ me->parity[p] = cflag & (PARENB | PARODD |
+ CMSPAR);
+
+ if (cflag & CRTSCTS)
+ me->flowctrl[p] |= 0x03;
+
+ if (iflag & (IXON | IXOFF))
+ me->flowctrl[p] |= 0x0C;
+
+ if (ip->type == PORT_16550A)
+ me->fifo[p] = 1;
+
+ opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2);
+ opmode &= OP_MODE_MASK;
+ me->iftype[p] = opmode;
+ mutex_unlock(&port->mutex);
+ }
+ }
+ if (copy_to_user(argp, me, sizeof(*me)))
+ ret = -EFAULT;
+ kfree(me);
+ return ret;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static int mxser_cflags_changed(struct mxser_port *info, unsigned long arg,
+ struct async_icount *cprev)
+{
+ struct async_icount cnow;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&info->slock, flags);
+ cnow = info->icount; /* atomic copy */
+ spin_unlock_irqrestore(&info->slock, flags);
+
+ ret = ((arg & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
+ ((arg & TIOCM_CD) && (cnow.dcd != cprev->dcd)) ||
+ ((arg & TIOCM_CTS) && (cnow.cts != cprev->cts));
+
+ *cprev = cnow;
+
+ return ret;
+}
+
+static int mxser_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct mxser_port *info = tty->driver_data;
+ struct tty_port *port = &info->port;
+ struct async_icount cnow;
+ unsigned long flags;
+ void __user *argp = (void __user *)arg;
+ int retval;
+
+ if (tty->index == MXSER_PORTS)
+ return mxser_ioctl_special(cmd, argp);
+
+ if (cmd == MOXA_SET_OP_MODE || cmd == MOXA_GET_OP_MODE) {
+ int p;
+ unsigned long opmode;
+ static unsigned char ModeMask[] = { 0xfc, 0xf3, 0xcf, 0x3f };
+ int shiftbit;
+ unsigned char val, mask;
+
+ p = tty->index % 4;
+ if (cmd == MOXA_SET_OP_MODE) {
+ if (get_user(opmode, (int __user *) argp))
+ return -EFAULT;
+ if (opmode != RS232_MODE &&
+ opmode != RS485_2WIRE_MODE &&
+ opmode != RS422_MODE &&
+ opmode != RS485_4WIRE_MODE)
+ return -EFAULT;
+ mask = ModeMask[p];
+ shiftbit = p * 2;
+ spin_lock_irq(&info->slock);
+ val = inb(info->opmode_ioaddr);
+ val &= mask;
+ val |= (opmode << shiftbit);
+ outb(val, info->opmode_ioaddr);
+ spin_unlock_irq(&info->slock);
+ } else {
+ shiftbit = p * 2;
+ spin_lock_irq(&info->slock);
+ opmode = inb(info->opmode_ioaddr) >> shiftbit;
+ spin_unlock_irq(&info->slock);
+ opmode &= OP_MODE_MASK;
+ if (put_user(opmode, (int __user *)argp))
+ return -EFAULT;
+ }
+ return 0;
+ }
+
+ if (cmd != TIOCGSERIAL && cmd != TIOCMIWAIT &&
+ test_bit(TTY_IO_ERROR, &tty->flags))
+ return -EIO;
+
+ switch (cmd) {
+ case TIOCGSERIAL:
+ mutex_lock(&port->mutex);
+ retval = mxser_get_serial_info(tty, argp);
+ mutex_unlock(&port->mutex);
+ return retval;
+ case TIOCSSERIAL:
+ mutex_lock(&port->mutex);
+ retval = mxser_set_serial_info(tty, argp);
+ mutex_unlock(&port->mutex);
+ return retval;
+ case TIOCSERGETLSR: /* Get line status register */
+ return mxser_get_lsr_info(info, argp);
+ /*
+ * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
+ * - mask passed in arg for lines of interest
+ * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
+ * Caller should use TIOCGICOUNT to see which one it was
+ */
+ case TIOCMIWAIT:
+ spin_lock_irqsave(&info->slock, flags);
+ cnow = info->icount; /* note the counters on entry */
+ spin_unlock_irqrestore(&info->slock, flags);
+
+ return wait_event_interruptible(info->port.delta_msr_wait,
+ mxser_cflags_changed(info, arg, &cnow));
+ case MOXA_HighSpeedOn:
+ return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
+ case MOXA_SDS_RSTICOUNTER:
+ spin_lock_irq(&info->slock);
+ info->mon_data.rxcnt = 0;
+ info->mon_data.txcnt = 0;
+ spin_unlock_irq(&info->slock);
+ return 0;
+
+ case MOXA_ASPP_OQUEUE:{
+ int len, lsr;
+
+ len = mxser_chars_in_buffer(tty);
+ spin_lock_irq(&info->slock);
+ lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_THRE;
+ spin_unlock_irq(&info->slock);
+ len += (lsr ? 0 : 1);
+
+ return put_user(len, (int __user *)argp);
+ }
+ case MOXA_ASPP_MON: {
+ int mcr, status;
+
+ spin_lock_irq(&info->slock);
+ status = mxser_get_msr(info->ioaddr, 1, tty->index);
+ mxser_check_modem_status(tty, info, status);
+
+ mcr = inb(info->ioaddr + UART_MCR);
+ spin_unlock_irq(&info->slock);
+
+ if (mcr & MOXA_MUST_MCR_XON_FLAG)
+ info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFHOLD;
+ else
+ info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFHOLD;
+
+ if (mcr & MOXA_MUST_MCR_TX_XON)
+ info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFXENT;
+ else
+ info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFXENT;
+
+ if (tty->hw_stopped)
+ info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD;
+ else
+ info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD;
+
+ if (copy_to_user(argp, &info->mon_data,
+ sizeof(struct mxser_mon)))
+ return -EFAULT;
+
+ return 0;
+ }
+ case MOXA_ASPP_LSTATUS: {
+ if (put_user(info->err_shadow, (unsigned char __user *)argp))
+ return -EFAULT;
+
+ info->err_shadow = 0;
+ return 0;
+ }
+ case MOXA_SET_BAUD_METHOD: {
+ int method;
+
+ if (get_user(method, (int __user *)argp))
+ return -EFAULT;
+ mxser_set_baud_method[tty->index] = method;
+ return put_user(method, (int __user *)argp);
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+ /*
+ * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+ * Return: write counters to the user passed counter struct
+ * NB: both 1->0 and 0->1 transitions are counted except for
+ * RI where only 0->1 is counted.
+ */
+
+static int mxser_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+
+{
+ struct mxser_port *info = tty->driver_data;
+ struct async_icount cnow;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->slock, flags);
+ cnow = info->icount;
+ spin_unlock_irqrestore(&info->slock, flags);
+
+ icount->frame = cnow.frame;
+ icount->brk = cnow.brk;
+ icount->overrun = cnow.overrun;
+ icount->buf_overrun = cnow.buf_overrun;
+ icount->parity = cnow.parity;
+ icount->rx = cnow.rx;
+ icount->tx = cnow.tx;
+ icount->cts = cnow.cts;
+ icount->dsr = cnow.dsr;
+ icount->rng = cnow.rng;
+ icount->dcd = cnow.dcd;
+ return 0;
+}
+
+static void mxser_stoprx(struct tty_struct *tty)
+{
+ struct mxser_port *info = tty->driver_data;
+
+ info->ldisc_stop_rx = 1;
+ if (I_IXOFF(tty)) {
+ if (info->board->chip_flag) {
+ info->IER &= ~MOXA_MUST_RECV_ISR;
+ outb(info->IER, info->ioaddr + UART_IER);
+ } else {
+ info->x_char = STOP_CHAR(tty);
+ outb(0, info->ioaddr + UART_IER);
+ info->IER |= UART_IER_THRI;
+ outb(info->IER, info->ioaddr + UART_IER);
+ }
+ }
+
+ if (tty->termios->c_cflag & CRTSCTS) {
+ info->MCR &= ~UART_MCR_RTS;
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ }
+}
+
+/*
+ * This routine is called by the upper-layer tty layer to signal that
+ * incoming characters should be throttled.
+ */
+static void mxser_throttle(struct tty_struct *tty)
+{
+ mxser_stoprx(tty);
+}
+
+static void mxser_unthrottle(struct tty_struct *tty)
+{
+ struct mxser_port *info = tty->driver_data;
+
+ /* startrx */
+ info->ldisc_stop_rx = 0;
+ if (I_IXOFF(tty)) {
+ if (info->x_char)
+ info->x_char = 0;
+ else {
+ if (info->board->chip_flag) {
+ info->IER |= MOXA_MUST_RECV_ISR;
+ outb(info->IER, info->ioaddr + UART_IER);
+ } else {
+ info->x_char = START_CHAR(tty);
+ outb(0, info->ioaddr + UART_IER);
+ info->IER |= UART_IER_THRI;
+ outb(info->IER, info->ioaddr + UART_IER);
+ }
+ }
+ }
+
+ if (tty->termios->c_cflag & CRTSCTS) {
+ info->MCR |= UART_MCR_RTS;
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ }
+}
+
+/*
+ * mxser_stop() and mxser_start()
+ *
+ * This routines are called before setting or resetting tty->stopped.
+ * They enable or disable transmitter interrupts, as necessary.
+ */
+static void mxser_stop(struct tty_struct *tty)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->slock, flags);
+ if (info->IER & UART_IER_THRI) {
+ info->IER &= ~UART_IER_THRI;
+ outb(info->IER, info->ioaddr + UART_IER);
+ }
+ spin_unlock_irqrestore(&info->slock, flags);
+}
+
+static void mxser_start(struct tty_struct *tty)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->slock, flags);
+ if (info->xmit_cnt && info->port.xmit_buf) {
+ outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
+ info->IER |= UART_IER_THRI;
+ outb(info->IER, info->ioaddr + UART_IER);
+ }
+ spin_unlock_irqrestore(&info->slock, flags);
+}
+
+static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->slock, flags);
+ mxser_change_speed(tty, old_termios);
+ spin_unlock_irqrestore(&info->slock, flags);
+
+ if ((old_termios->c_cflag & CRTSCTS) &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ mxser_start(tty);
+ }
+
+ /* Handle sw stopped */
+ if ((old_termios->c_iflag & IXON) &&
+ !(tty->termios->c_iflag & IXON)) {
+ tty->stopped = 0;
+
+ if (info->board->chip_flag) {
+ spin_lock_irqsave(&info->slock, flags);
+ mxser_disable_must_rx_software_flow_control(
+ info->ioaddr);
+ spin_unlock_irqrestore(&info->slock, flags);
+ }
+
+ mxser_start(tty);
+ }
+}
+
+/*
+ * mxser_wait_until_sent() --- wait until the transmitter is empty
+ */
+static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned long orig_jiffies, char_time;
+ unsigned long flags;
+ int lsr;
+
+ if (info->type == PORT_UNKNOWN)
+ return;
+
+ if (info->xmit_fifo_size == 0)
+ return; /* Just in case.... */
+
+ orig_jiffies = jiffies;
+ /*
+ * Set the check interval to be 1/5 of the estimated time to
+ * send a single character, and make it at least 1. The check
+ * interval should also be less than the timeout.
+ *
+ * Note: we have to use pretty tight timings here to satisfy
+ * the NIST-PCTS.
+ */
+ char_time = (info->timeout - HZ / 50) / info->xmit_fifo_size;
+ char_time = char_time / 5;
+ if (char_time == 0)
+ char_time = 1;
+ if (timeout && timeout < char_time)
+ char_time = timeout;
+ /*
+ * If the transmitter hasn't cleared in twice the approximate
+ * amount of time to send the entire FIFO, it probably won't
+ * ever clear. This assumes the UART isn't doing flow
+ * control, which is currently the case. Hence, if it ever
+ * takes longer than info->timeout, this is probably due to a
+ * UART bug of some kind. So, we clamp the timeout parameter at
+ * 2*info->timeout.
+ */
+ if (!timeout || timeout > 2 * info->timeout)
+ timeout = 2 * info->timeout;
+#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+ printk(KERN_DEBUG "In rs_wait_until_sent(%d) check=%lu...",
+ timeout, char_time);
+ printk("jiff=%lu...", jiffies);
+#endif
+ spin_lock_irqsave(&info->slock, flags);
+ while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) {
+#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+ printk("lsr = %d (jiff=%lu)...", lsr, jiffies);
+#endif
+ spin_unlock_irqrestore(&info->slock, flags);
+ schedule_timeout_interruptible(char_time);
+ spin_lock_irqsave(&info->slock, flags);
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ }
+ spin_unlock_irqrestore(&info->slock, flags);
+ set_current_state(TASK_RUNNING);
+
+#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+ printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
+#endif
+}
+
+/*
+ * This routine is called by tty_hangup() when a hangup is signaled.
+ */
+static void mxser_hangup(struct tty_struct *tty)
+{
+ struct mxser_port *info = tty->driver_data;
+
+ mxser_flush_buffer(tty);
+ tty_port_hangup(&info->port);
+}
+
+/*
+ * mxser_rs_break() --- routine which turns the break handling on or off
+ */
+static int mxser_rs_break(struct tty_struct *tty, int break_state)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->slock, flags);
+ if (break_state == -1)
+ outb(inb(info->ioaddr + UART_LCR) | UART_LCR_SBC,
+ info->ioaddr + UART_LCR);
+ else
+ outb(inb(info->ioaddr + UART_LCR) & ~UART_LCR_SBC,
+ info->ioaddr + UART_LCR);
+ spin_unlock_irqrestore(&info->slock, flags);
+ return 0;
+}
+
+static void mxser_receive_chars(struct tty_struct *tty,
+ struct mxser_port *port, int *status)
+{
+ unsigned char ch, gdl;
+ int ignored = 0;
+ int cnt = 0;
+ int recv_room;
+ int max = 256;
+
+ recv_room = tty->receive_room;
+ if (recv_room == 0 && !port->ldisc_stop_rx)
+ mxser_stoprx(tty);
+ if (port->board->chip_flag != MOXA_OTHER_UART) {
+
+ if (*status & UART_LSR_SPECIAL)
+ goto intr_old;
+ if (port->board->chip_flag == MOXA_MUST_MU860_HWID &&
+ (*status & MOXA_MUST_LSR_RERR))
+ goto intr_old;
+ if (*status & MOXA_MUST_LSR_RERR)
+ goto intr_old;
+
+ gdl = inb(port->ioaddr + MOXA_MUST_GDL_REGISTER);
+
+ if (port->board->chip_flag == MOXA_MUST_MU150_HWID)
+ gdl &= MOXA_MUST_GDL_MASK;
+ if (gdl >= recv_room) {
+ if (!port->ldisc_stop_rx)
+ mxser_stoprx(tty);
+ }
+ while (gdl--) {
+ ch = inb(port->ioaddr + UART_RX);
+ tty_insert_flip_char(tty, ch, 0);
+ cnt++;
+ }
+ goto end_intr;
+ }
+intr_old:
+
+ do {
+ if (max-- < 0)
+ break;
+
+ ch = inb(port->ioaddr + UART_RX);
+ if (port->board->chip_flag && (*status & UART_LSR_OE))
+ outb(0x23, port->ioaddr + UART_FCR);
+ *status &= port->read_status_mask;
+ if (*status & port->ignore_status_mask) {
+ if (++ignored > 100)
+ break;
+ } else {
+ char flag = 0;
+ if (*status & UART_LSR_SPECIAL) {
+ if (*status & UART_LSR_BI) {
+ flag = TTY_BREAK;
+ port->icount.brk++;
+
+ if (port->port.flags & ASYNC_SAK)
+ do_SAK(tty);
+ } else if (*status & UART_LSR_PE) {
+ flag = TTY_PARITY;
+ port->icount.parity++;
+ } else if (*status & UART_LSR_FE) {
+ flag = TTY_FRAME;
+ port->icount.frame++;
+ } else if (*status & UART_LSR_OE) {
+ flag = TTY_OVERRUN;
+ port->icount.overrun++;
+ } else
+ flag = TTY_BREAK;
+ }
+ tty_insert_flip_char(tty, ch, flag);
+ cnt++;
+ if (cnt >= recv_room) {
+ if (!port->ldisc_stop_rx)
+ mxser_stoprx(tty);
+ break;
+ }
+
+ }
+
+ if (port->board->chip_flag)
+ break;
+
+ *status = inb(port->ioaddr + UART_LSR);
+ } while (*status & UART_LSR_DR);
+
+end_intr:
+ mxvar_log.rxcnt[tty->index] += cnt;
+ port->mon_data.rxcnt += cnt;
+ port->mon_data.up_rxcnt += cnt;
+
+ /*
+ * We are called from an interrupt context with &port->slock
+ * being held. Drop it temporarily in order to prevent
+ * recursive locking.
+ */
+ spin_unlock(&port->slock);
+ tty_flip_buffer_push(tty);
+ spin_lock(&port->slock);
+}
+
+static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port)
+{
+ int count, cnt;
+
+ if (port->x_char) {
+ outb(port->x_char, port->ioaddr + UART_TX);
+ port->x_char = 0;
+ mxvar_log.txcnt[tty->index]++;
+ port->mon_data.txcnt++;
+ port->mon_data.up_txcnt++;
+ port->icount.tx++;
+ return;
+ }
+
+ if (port->port.xmit_buf == NULL)
+ return;
+
+ if (port->xmit_cnt <= 0 || tty->stopped ||
+ (tty->hw_stopped &&
+ (port->type != PORT_16550A) &&
+ (!port->board->chip_flag))) {
+ port->IER &= ~UART_IER_THRI;
+ outb(port->IER, port->ioaddr + UART_IER);
+ return;
+ }
+
+ cnt = port->xmit_cnt;
+ count = port->xmit_fifo_size;
+ do {
+ outb(port->port.xmit_buf[port->xmit_tail++],
+ port->ioaddr + UART_TX);
+ port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE - 1);
+ if (--port->xmit_cnt <= 0)
+ break;
+ } while (--count > 0);
+ mxvar_log.txcnt[tty->index] += (cnt - port->xmit_cnt);
+
+ port->mon_data.txcnt += (cnt - port->xmit_cnt);
+ port->mon_data.up_txcnt += (cnt - port->xmit_cnt);
+ port->icount.tx += (cnt - port->xmit_cnt);
+
+ if (port->xmit_cnt < WAKEUP_CHARS)
+ tty_wakeup(tty);
+
+ if (port->xmit_cnt <= 0) {
+ port->IER &= ~UART_IER_THRI;
+ outb(port->IER, port->ioaddr + UART_IER);
+ }
+}
+
+/*
+ * This is the serial driver's generic interrupt routine
+ */
+static irqreturn_t mxser_interrupt(int irq, void *dev_id)
+{
+ int status, iir, i;
+ struct mxser_board *brd = NULL;
+ struct mxser_port *port;
+ int max, irqbits, bits, msr;
+ unsigned int int_cnt, pass_counter = 0;
+ int handled = IRQ_NONE;
+ struct tty_struct *tty;
+
+ for (i = 0; i < MXSER_BOARDS; i++)
+ if (dev_id == &mxser_boards[i]) {
+ brd = dev_id;
+ break;
+ }
+
+ if (i == MXSER_BOARDS)
+ goto irq_stop;
+ if (brd == NULL)
+ goto irq_stop;
+ max = brd->info->nports;
+ while (pass_counter++ < MXSER_ISR_PASS_LIMIT) {
+ irqbits = inb(brd->vector) & brd->vector_mask;
+ if (irqbits == brd->vector_mask)
+ break;
+
+ handled = IRQ_HANDLED;
+ for (i = 0, bits = 1; i < max; i++, irqbits |= bits, bits <<= 1) {
+ if (irqbits == brd->vector_mask)
+ break;
+ if (bits & irqbits)
+ continue;
+ port = &brd->ports[i];
+
+ int_cnt = 0;
+ spin_lock(&port->slock);
+ do {
+ iir = inb(port->ioaddr + UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ break;
+ iir &= MOXA_MUST_IIR_MASK;
+ tty = tty_port_tty_get(&port->port);
+ if (!tty ||
+ (port->port.flags & ASYNC_CLOSING) ||
+ !(port->port.flags &
+ ASYNC_INITIALIZED)) {
+ status = inb(port->ioaddr + UART_LSR);
+ outb(0x27, port->ioaddr + UART_FCR);
+ inb(port->ioaddr + UART_MSR);
+ tty_kref_put(tty);
+ break;
+ }
+
+ status = inb(port->ioaddr + UART_LSR);
+
+ if (status & UART_LSR_PE)
+ port->err_shadow |= NPPI_NOTIFY_PARITY;
+ if (status & UART_LSR_FE)
+ port->err_shadow |= NPPI_NOTIFY_FRAMING;
+ if (status & UART_LSR_OE)
+ port->err_shadow |=
+ NPPI_NOTIFY_HW_OVERRUN;
+ if (status & UART_LSR_BI)
+ port->err_shadow |= NPPI_NOTIFY_BREAK;
+
+ if (port->board->chip_flag) {
+ if (iir == MOXA_MUST_IIR_GDA ||
+ iir == MOXA_MUST_IIR_RDA ||
+ iir == MOXA_MUST_IIR_RTO ||
+ iir == MOXA_MUST_IIR_LSR)
+ mxser_receive_chars(tty, port,
+ &status);
+
+ } else {
+ status &= port->read_status_mask;
+ if (status & UART_LSR_DR)
+ mxser_receive_chars(tty, port,
+ &status);
+ }
+ msr = inb(port->ioaddr + UART_MSR);
+ if (msr & UART_MSR_ANY_DELTA)
+ mxser_check_modem_status(tty, port, msr);
+
+ if (port->board->chip_flag) {
+ if (iir == 0x02 && (status &
+ UART_LSR_THRE))
+ mxser_transmit_chars(tty, port);
+ } else {
+ if (status & UART_LSR_THRE)
+ mxser_transmit_chars(tty, port);
+ }
+ tty_kref_put(tty);
+ } while (int_cnt++ < MXSER_ISR_PASS_LIMIT);
+ spin_unlock(&port->slock);
+ }
+ }
+
+irq_stop:
+ return handled;
+}
+
+static const struct tty_operations mxser_ops = {
+ .open = mxser_open,
+ .close = mxser_close,
+ .write = mxser_write,
+ .put_char = mxser_put_char,
+ .flush_chars = mxser_flush_chars,
+ .write_room = mxser_write_room,
+ .chars_in_buffer = mxser_chars_in_buffer,
+ .flush_buffer = mxser_flush_buffer,
+ .ioctl = mxser_ioctl,
+ .throttle = mxser_throttle,
+ .unthrottle = mxser_unthrottle,
+ .set_termios = mxser_set_termios,
+ .stop = mxser_stop,
+ .start = mxser_start,
+ .hangup = mxser_hangup,
+ .break_ctl = mxser_rs_break,
+ .wait_until_sent = mxser_wait_until_sent,
+ .tiocmget = mxser_tiocmget,
+ .tiocmset = mxser_tiocmset,
+ .get_icount = mxser_get_icount,
+};
+
+struct tty_port_operations mxser_port_ops = {
+ .carrier_raised = mxser_carrier_raised,
+ .dtr_rts = mxser_dtr_rts,
+ .activate = mxser_activate,
+ .shutdown = mxser_shutdown_port,
+};
+
+/*
+ * The MOXA Smartio/Industio serial driver boot-time initialization code!
+ */
+
+static void mxser_release_ISA_res(struct mxser_board *brd)
+{
+ free_irq(brd->irq, brd);
+ release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
+ release_region(brd->vector, 1);
+}
+
+static int __devinit mxser_initbrd(struct mxser_board *brd,
+ struct pci_dev *pdev)
+{
+ struct mxser_port *info;
+ unsigned int i;
+ int retval;
+
+ printk(KERN_INFO "mxser: max. baud rate = %d bps\n",
+ brd->ports[0].max_baud);
+
+ for (i = 0; i < brd->info->nports; i++) {
+ info = &brd->ports[i];
+ tty_port_init(&info->port);
+ info->port.ops = &mxser_port_ops;
+ info->board = brd;
+ info->stop_rx = 0;
+ info->ldisc_stop_rx = 0;
+
+ /* Enhance mode enabled here */
+ if (brd->chip_flag != MOXA_OTHER_UART)
+ mxser_enable_must_enchance_mode(info->ioaddr);
+
+ info->port.flags = ASYNC_SHARE_IRQ;
+ info->type = brd->uart_type;
+
+ process_txrx_fifo(info);
+
+ info->custom_divisor = info->baud_base * 16;
+ info->port.close_delay = 5 * HZ / 10;
+ info->port.closing_wait = 30 * HZ;
+ info->normal_termios = mxvar_sdriver->init_termios;
+ memset(&info->mon_data, 0, sizeof(struct mxser_mon));
+ info->err_shadow = 0;
+ spin_lock_init(&info->slock);
+
+ /* before set INT ISR, disable all int */
+ outb(inb(info->ioaddr + UART_IER) & 0xf0,
+ info->ioaddr + UART_IER);
+ }
+
+ retval = request_irq(brd->irq, mxser_interrupt, IRQF_SHARED, "mxser",
+ brd);
+ if (retval)
+ printk(KERN_ERR "Board %s: Request irq failed, IRQ (%d) may "
+ "conflict with another device.\n",
+ brd->info->name, brd->irq);
+
+ return retval;
+}
+
+static int __init mxser_get_ISA_conf(int cap, struct mxser_board *brd)
+{
+ int id, i, bits;
+ unsigned short regs[16], irq;
+ unsigned char scratch, scratch2;
+
+ brd->chip_flag = MOXA_OTHER_UART;
+
+ id = mxser_read_register(cap, regs);
+ switch (id) {
+ case C168_ASIC_ID:
+ brd->info = &mxser_cards[0];
+ break;
+ case C104_ASIC_ID:
+ brd->info = &mxser_cards[1];
+ break;
+ case CI104J_ASIC_ID:
+ brd->info = &mxser_cards[2];
+ break;
+ case C102_ASIC_ID:
+ brd->info = &mxser_cards[5];
+ break;
+ case CI132_ASIC_ID:
+ brd->info = &mxser_cards[6];
+ break;
+ case CI134_ASIC_ID:
+ brd->info = &mxser_cards[7];
+ break;
+ default:
+ return 0;
+ }
+
+ irq = 0;
+ /* some ISA cards have 2 ports, but we want to see them as 4-port (why?)
+ Flag-hack checks if configuration should be read as 2-port here. */
+ if (brd->info->nports == 2 || (brd->info->flags & MXSER_HAS2)) {
+ irq = regs[9] & 0xF000;
+ irq = irq | (irq >> 4);
+ if (irq != (regs[9] & 0xFF00))
+ goto err_irqconflict;
+ } else if (brd->info->nports == 4) {
+ irq = regs[9] & 0xF000;
+ irq = irq | (irq >> 4);
+ irq = irq | (irq >> 8);
+ if (irq != regs[9])
+ goto err_irqconflict;
+ } else if (brd->info->nports == 8) {
+ irq = regs[9] & 0xF000;
+ irq = irq | (irq >> 4);
+ irq = irq | (irq >> 8);
+ if ((irq != regs[9]) || (irq != regs[10]))
+ goto err_irqconflict;
+ }
+
+ if (!irq) {
+ printk(KERN_ERR "mxser: interrupt number unset\n");
+ return -EIO;
+ }
+ brd->irq = ((int)(irq & 0xF000) >> 12);
+ for (i = 0; i < 8; i++)
+ brd->ports[i].ioaddr = (int) regs[i + 1] & 0xFFF8;
+ if ((regs[12] & 0x80) == 0) {
+ printk(KERN_ERR "mxser: invalid interrupt vector\n");
+ return -EIO;
+ }
+ brd->vector = (int)regs[11]; /* interrupt vector */
+ if (id == 1)
+ brd->vector_mask = 0x00FF;
+ else
+ brd->vector_mask = 0x000F;
+ for (i = 7, bits = 0x0100; i >= 0; i--, bits <<= 1) {
+ if (regs[12] & bits) {
+ brd->ports[i].baud_base = 921600;
+ brd->ports[i].max_baud = 921600;
+ } else {
+ brd->ports[i].baud_base = 115200;
+ brd->ports[i].max_baud = 115200;
+ }
+ }
+ scratch2 = inb(cap + UART_LCR) & (~UART_LCR_DLAB);
+ outb(scratch2 | UART_LCR_DLAB, cap + UART_LCR);
+ outb(0, cap + UART_EFR); /* EFR is the same as FCR */
+ outb(scratch2, cap + UART_LCR);
+ outb(UART_FCR_ENABLE_FIFO, cap + UART_FCR);
+ scratch = inb(cap + UART_IIR);
+
+ if (scratch & 0xC0)
+ brd->uart_type = PORT_16550A;
+ else
+ brd->uart_type = PORT_16450;
+ if (!request_region(brd->ports[0].ioaddr, 8 * brd->info->nports,
+ "mxser(IO)")) {
+ printk(KERN_ERR "mxser: can't request ports I/O region: "
+ "0x%.8lx-0x%.8lx\n",
+ brd->ports[0].ioaddr, brd->ports[0].ioaddr +
+ 8 * brd->info->nports - 1);
+ return -EIO;
+ }
+ if (!request_region(brd->vector, 1, "mxser(vector)")) {
+ release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
+ printk(KERN_ERR "mxser: can't request interrupt vector region: "
+ "0x%.8lx-0x%.8lx\n",
+ brd->ports[0].ioaddr, brd->ports[0].ioaddr +
+ 8 * brd->info->nports - 1);
+ return -EIO;
+ }
+ return brd->info->nports;
+
+err_irqconflict:
+ printk(KERN_ERR "mxser: invalid interrupt number\n");
+ return -EIO;
+}
+
+static int __devinit mxser_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+#ifdef CONFIG_PCI
+ struct mxser_board *brd;
+ unsigned int i, j;
+ unsigned long ioaddress;
+ int retval = -EINVAL;
+
+ for (i = 0; i < MXSER_BOARDS; i++)
+ if (mxser_boards[i].info == NULL)
+ break;
+
+ if (i >= MXSER_BOARDS) {
+ dev_err(&pdev->dev, "too many boards found (maximum %d), board "
+ "not configured\n", MXSER_BOARDS);
+ goto err;
+ }
+
+ brd = &mxser_boards[i];
+ brd->idx = i * MXSER_PORTS_PER_BOARD;
+ dev_info(&pdev->dev, "found MOXA %s board (BusNo=%d, DevNo=%d)\n",
+ mxser_cards[ent->driver_data].name,
+ pdev->bus->number, PCI_SLOT(pdev->devfn));
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "PCI enable failed\n");
+ goto err;
+ }
+
+ /* io address */
+ ioaddress = pci_resource_start(pdev, 2);
+ retval = pci_request_region(pdev, 2, "mxser(IO)");
+ if (retval)
+ goto err_dis;
+
+ brd->info = &mxser_cards[ent->driver_data];
+ for (i = 0; i < brd->info->nports; i++)
+ brd->ports[i].ioaddr = ioaddress + 8 * i;
+
+ /* vector */
+ ioaddress = pci_resource_start(pdev, 3);
+ retval = pci_request_region(pdev, 3, "mxser(vector)");
+ if (retval)
+ goto err_zero;
+ brd->vector = ioaddress;
+
+ /* irq */
+ brd->irq = pdev->irq;
+
+ brd->chip_flag = CheckIsMoxaMust(brd->ports[0].ioaddr);
+ brd->uart_type = PORT_16550A;
+ brd->vector_mask = 0;
+
+ for (i = 0; i < brd->info->nports; i++) {
+ for (j = 0; j < UART_INFO_NUM; j++) {
+ if (Gpci_uart_info[j].type == brd->chip_flag) {
+ brd->ports[i].max_baud =
+ Gpci_uart_info[j].max_baud;
+
+ /* exception....CP-102 */
+ if (brd->info->flags & MXSER_HIGHBAUD)
+ brd->ports[i].max_baud = 921600;
+ break;
+ }
+ }
+ }
+
+ if (brd->chip_flag == MOXA_MUST_MU860_HWID) {
+ for (i = 0; i < brd->info->nports; i++) {
+ if (i < 4)
+ brd->ports[i].opmode_ioaddr = ioaddress + 4;
+ else
+ brd->ports[i].opmode_ioaddr = ioaddress + 0x0c;
+ }
+ outb(0, ioaddress + 4); /* default set to RS232 mode */
+ outb(0, ioaddress + 0x0c); /* default set to RS232 mode */
+ }
+
+ for (i = 0; i < brd->info->nports; i++) {
+ brd->vector_mask |= (1 << i);
+ brd->ports[i].baud_base = 921600;
+ }
+
+ /* mxser_initbrd will hook ISR. */
+ retval = mxser_initbrd(brd, pdev);
+ if (retval)
+ goto err_rel3;
+
+ for (i = 0; i < brd->info->nports; i++)
+ tty_register_device(mxvar_sdriver, brd->idx + i, &pdev->dev);
+
+ pci_set_drvdata(pdev, brd);
+
+ return 0;
+err_rel3:
+ pci_release_region(pdev, 3);
+err_zero:
+ brd->info = NULL;
+ pci_release_region(pdev, 2);
+err_dis:
+ pci_disable_device(pdev);
+err:
+ return retval;
+#else
+ return -ENODEV;
+#endif
+}
+
+static void __devexit mxser_remove(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PCI
+ struct mxser_board *brd = pci_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < brd->info->nports; i++)
+ tty_unregister_device(mxvar_sdriver, brd->idx + i);
+
+ free_irq(pdev->irq, brd);
+ pci_release_region(pdev, 2);
+ pci_release_region(pdev, 3);
+ pci_disable_device(pdev);
+ brd->info = NULL;
+#endif
+}
+
+static struct pci_driver mxser_driver = {
+ .name = "mxser",
+ .id_table = mxser_pcibrds,
+ .probe = mxser_probe,
+ .remove = __devexit_p(mxser_remove)
+};
+
+static int __init mxser_module_init(void)
+{
+ struct mxser_board *brd;
+ unsigned int b, i, m;
+ int retval;
+
+ mxvar_sdriver = alloc_tty_driver(MXSER_PORTS + 1);
+ if (!mxvar_sdriver)
+ return -ENOMEM;
+
+ printk(KERN_INFO "MOXA Smartio/Industio family driver version %s\n",
+ MXSER_VERSION);
+
+ /* Initialize the tty_driver structure */
+ mxvar_sdriver->owner = THIS_MODULE;
+ mxvar_sdriver->magic = TTY_DRIVER_MAGIC;
+ mxvar_sdriver->name = "ttyMI";
+ mxvar_sdriver->major = ttymajor;
+ mxvar_sdriver->minor_start = 0;
+ mxvar_sdriver->num = MXSER_PORTS + 1;
+ mxvar_sdriver->type = TTY_DRIVER_TYPE_SERIAL;
+ mxvar_sdriver->subtype = SERIAL_TYPE_NORMAL;
+ mxvar_sdriver->init_termios = tty_std_termios;
+ mxvar_sdriver->init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
+ mxvar_sdriver->flags = TTY_DRIVER_REAL_RAW|TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(mxvar_sdriver, &mxser_ops);
+
+ retval = tty_register_driver(mxvar_sdriver);
+ if (retval) {
+ printk(KERN_ERR "Couldn't install MOXA Smartio/Industio family "
+ "tty driver !\n");
+ goto err_put;
+ }
+
+ /* Start finding ISA boards here */
+ for (m = 0, b = 0; b < MXSER_BOARDS; b++) {
+ if (!ioaddr[b])
+ continue;
+
+ brd = &mxser_boards[m];
+ retval = mxser_get_ISA_conf(ioaddr[b], brd);
+ if (retval <= 0) {
+ brd->info = NULL;
+ continue;
+ }
+
+ printk(KERN_INFO "mxser: found MOXA %s board (CAP=0x%lx)\n",
+ brd->info->name, ioaddr[b]);
+
+ /* mxser_initbrd will hook ISR. */
+ if (mxser_initbrd(brd, NULL) < 0) {
+ brd->info = NULL;
+ continue;
+ }
+
+ brd->idx = m * MXSER_PORTS_PER_BOARD;
+ for (i = 0; i < brd->info->nports; i++)
+ tty_register_device(mxvar_sdriver, brd->idx + i, NULL);
+
+ m++;
+ }
+
+ retval = pci_register_driver(&mxser_driver);
+ if (retval) {
+ printk(KERN_ERR "mxser: can't register pci driver\n");
+ if (!m) {
+ retval = -ENODEV;
+ goto err_unr;
+ } /* else: we have some ISA cards under control */
+ }
+
+ return 0;
+err_unr:
+ tty_unregister_driver(mxvar_sdriver);
+err_put:
+ put_tty_driver(mxvar_sdriver);
+ return retval;
+}
+
+static void __exit mxser_module_exit(void)
+{
+ unsigned int i, j;
+
+ pci_unregister_driver(&mxser_driver);
+
+ for (i = 0; i < MXSER_BOARDS; i++) /* ISA remains */
+ if (mxser_boards[i].info != NULL)
+ for (j = 0; j < mxser_boards[i].info->nports; j++)
+ tty_unregister_device(mxvar_sdriver,
+ mxser_boards[i].idx + j);
+ tty_unregister_driver(mxvar_sdriver);
+ put_tty_driver(mxvar_sdriver);
+
+ for (i = 0; i < MXSER_BOARDS; i++)
+ if (mxser_boards[i].info != NULL)
+ mxser_release_ISA_res(&mxser_boards[i]);
+}
+
+module_init(mxser_module_init);
+module_exit(mxser_module_exit);
diff --git a/drivers/tty/mxser.h b/drivers/tty/mxser.h
new file mode 100644
index 00000000..0bf79431
--- /dev/null
+++ b/drivers/tty/mxser.h
@@ -0,0 +1,150 @@
+#ifndef _MXSER_H
+#define _MXSER_H
+
+/*
+ * Semi-public control interfaces
+ */
+
+/*
+ * MOXA ioctls
+ */
+
+#define MOXA 0x400
+#define MOXA_GETDATACOUNT (MOXA + 23)
+#define MOXA_DIAGNOSE (MOXA + 50)
+#define MOXA_CHKPORTENABLE (MOXA + 60)
+#define MOXA_HighSpeedOn (MOXA + 61)
+#define MOXA_GET_MAJOR (MOXA + 63)
+#define MOXA_GETMSTATUS (MOXA + 65)
+#define MOXA_SET_OP_MODE (MOXA + 66)
+#define MOXA_GET_OP_MODE (MOXA + 67)
+
+#define RS232_MODE 0
+#define RS485_2WIRE_MODE 1
+#define RS422_MODE 2
+#define RS485_4WIRE_MODE 3
+#define OP_MODE_MASK 3
+
+#define MOXA_SDS_RSTICOUNTER (MOXA + 69)
+#define MOXA_ASPP_OQUEUE (MOXA + 70)
+#define MOXA_ASPP_MON (MOXA + 73)
+#define MOXA_ASPP_LSTATUS (MOXA + 74)
+#define MOXA_ASPP_MON_EXT (MOXA + 75)
+#define MOXA_SET_BAUD_METHOD (MOXA + 76)
+
+/* --------------------------------------------------- */
+
+#define NPPI_NOTIFY_PARITY 0x01
+#define NPPI_NOTIFY_FRAMING 0x02
+#define NPPI_NOTIFY_HW_OVERRUN 0x04
+#define NPPI_NOTIFY_SW_OVERRUN 0x08
+#define NPPI_NOTIFY_BREAK 0x10
+
+#define NPPI_NOTIFY_CTSHOLD 0x01 /* Tx hold by CTS low */
+#define NPPI_NOTIFY_DSRHOLD 0x02 /* Tx hold by DSR low */
+#define NPPI_NOTIFY_XOFFHOLD 0x08 /* Tx hold by Xoff received */
+#define NPPI_NOTIFY_XOFFXENT 0x10 /* Xoff Sent */
+
+/* follow just for Moxa Must chip define. */
+/* */
+/* when LCR register (offset 0x03) write following value, */
+/* the Must chip will enter enchance mode. And write value */
+/* on EFR (offset 0x02) bit 6,7 to change bank. */
+#define MOXA_MUST_ENTER_ENCHANCE 0xBF
+
+/* when enhance mode enable, access on general bank register */
+#define MOXA_MUST_GDL_REGISTER 0x07
+#define MOXA_MUST_GDL_MASK 0x7F
+#define MOXA_MUST_GDL_HAS_BAD_DATA 0x80
+
+#define MOXA_MUST_LSR_RERR 0x80 /* error in receive FIFO */
+/* enchance register bank select and enchance mode setting register */
+/* when LCR register equal to 0xBF */
+#define MOXA_MUST_EFR_REGISTER 0x02
+/* enchance mode enable */
+#define MOXA_MUST_EFR_EFRB_ENABLE 0x10
+/* enchance reister bank set 0, 1, 2 */
+#define MOXA_MUST_EFR_BANK0 0x00
+#define MOXA_MUST_EFR_BANK1 0x40
+#define MOXA_MUST_EFR_BANK2 0x80
+#define MOXA_MUST_EFR_BANK3 0xC0
+#define MOXA_MUST_EFR_BANK_MASK 0xC0
+
+/* set XON1 value register, when LCR=0xBF and change to bank0 */
+#define MOXA_MUST_XON1_REGISTER 0x04
+
+/* set XON2 value register, when LCR=0xBF and change to bank0 */
+#define MOXA_MUST_XON2_REGISTER 0x05
+
+/* set XOFF1 value register, when LCR=0xBF and change to bank0 */
+#define MOXA_MUST_XOFF1_REGISTER 0x06
+
+/* set XOFF2 value register, when LCR=0xBF and change to bank0 */
+#define MOXA_MUST_XOFF2_REGISTER 0x07
+
+#define MOXA_MUST_RBRTL_REGISTER 0x04
+#define MOXA_MUST_RBRTH_REGISTER 0x05
+#define MOXA_MUST_RBRTI_REGISTER 0x06
+#define MOXA_MUST_THRTL_REGISTER 0x07
+#define MOXA_MUST_ENUM_REGISTER 0x04
+#define MOXA_MUST_HWID_REGISTER 0x05
+#define MOXA_MUST_ECR_REGISTER 0x06
+#define MOXA_MUST_CSR_REGISTER 0x07
+
+/* good data mode enable */
+#define MOXA_MUST_FCR_GDA_MODE_ENABLE 0x20
+/* only good data put into RxFIFO */
+#define MOXA_MUST_FCR_GDA_ONLY_ENABLE 0x10
+
+/* enable CTS interrupt */
+#define MOXA_MUST_IER_ECTSI 0x80
+/* enable RTS interrupt */
+#define MOXA_MUST_IER_ERTSI 0x40
+/* enable Xon/Xoff interrupt */
+#define MOXA_MUST_IER_XINT 0x20
+/* enable GDA interrupt */
+#define MOXA_MUST_IER_EGDAI 0x10
+
+#define MOXA_MUST_RECV_ISR (UART_IER_RDI | MOXA_MUST_IER_EGDAI)
+
+/* GDA interrupt pending */
+#define MOXA_MUST_IIR_GDA 0x1C
+#define MOXA_MUST_IIR_RDA 0x04
+#define MOXA_MUST_IIR_RTO 0x0C
+#define MOXA_MUST_IIR_LSR 0x06
+
+/* received Xon/Xoff or specical interrupt pending */
+#define MOXA_MUST_IIR_XSC 0x10
+
+/* RTS/CTS change state interrupt pending */
+#define MOXA_MUST_IIR_RTSCTS 0x20
+#define MOXA_MUST_IIR_MASK 0x3E
+
+#define MOXA_MUST_MCR_XON_FLAG 0x40
+#define MOXA_MUST_MCR_XON_ANY 0x80
+#define MOXA_MUST_MCR_TX_XON 0x08
+
+/* software flow control on chip mask value */
+#define MOXA_MUST_EFR_SF_MASK 0x0F
+/* send Xon1/Xoff1 */
+#define MOXA_MUST_EFR_SF_TX1 0x08
+/* send Xon2/Xoff2 */
+#define MOXA_MUST_EFR_SF_TX2 0x04
+/* send Xon1,Xon2/Xoff1,Xoff2 */
+#define MOXA_MUST_EFR_SF_TX12 0x0C
+/* don't send Xon/Xoff */
+#define MOXA_MUST_EFR_SF_TX_NO 0x00
+/* Tx software flow control mask */
+#define MOXA_MUST_EFR_SF_TX_MASK 0x0C
+/* don't receive Xon/Xoff */
+#define MOXA_MUST_EFR_SF_RX_NO 0x00
+/* receive Xon1/Xoff1 */
+#define MOXA_MUST_EFR_SF_RX1 0x02
+/* receive Xon2/Xoff2 */
+#define MOXA_MUST_EFR_SF_RX2 0x01
+/* receive Xon1,Xon2/Xoff1,Xoff2 */
+#define MOXA_MUST_EFR_SF_RX12 0x03
+/* Rx software flow control mask */
+#define MOXA_MUST_EFR_SF_RX_MASK 0x03
+
+#endif
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
new file mode 100644
index 00000000..c0d34add
--- /dev/null
+++ b/drivers/tty/n_gsm.c
@@ -0,0 +1,2806 @@
+/*
+ * n_gsm.c GSM 0710 tty multiplexor
+ * Copyright (c) 2009/10 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
+ *
+ * TO DO:
+ * Mostly done: ioctls for setting modes/timing
+ * Partly done: hooks so you can pull off frames to non tty devs
+ * Restart DLCI 0 when it closes ?
+ * Test basic encoding
+ * Improve the tx engine
+ * Resolve tx side locking by adding a queue_head and routing
+ * all control traffic via it
+ * General tidy/document
+ * Review the locking/move to refcounts more (mux now moved to an
+ * alloc/free model ready)
+ * Use newest tty open/close port helpers and install hooks
+ * What to do about power functions ?
+ * Termios setting and negotiation
+ * Do we need a 'which mux are you' ioctl to correlate mux and tty sets
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/bitops.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/tty_flip.h>
+#include <linux/tty_driver.h>
+#include <linux/serial.h>
+#include <linux/kfifo.h>
+#include <linux/skbuff.h>
+#include <linux/gsmmux.h>
+
+static int debug;
+module_param(debug, int, 0600);
+
+#define T1 (HZ/10)
+#define T2 (HZ/3)
+#define N2 3
+
+/* Use long timers for testing at low speed with debug on */
+#ifdef DEBUG_TIMING
+#define T1 HZ
+#define T2 (2 * HZ)
+#endif
+
+/*
+ * Semi-arbitrary buffer size limits. 0710 is normally run with 32-64 byte
+ * limits so this is plenty
+ */
+#define MAX_MRU 512
+#define MAX_MTU 512
+
+/*
+ * Each block of data we have queued to go out is in the form of
+ * a gsm_msg which holds everything we need in a link layer independent
+ * format
+ */
+
+struct gsm_msg {
+ struct gsm_msg *next;
+ u8 addr; /* DLCI address + flags */
+ u8 ctrl; /* Control byte + flags */
+ unsigned int len; /* Length of data block (can be zero) */
+ unsigned char *data; /* Points into buffer but not at the start */
+ unsigned char buffer[0];
+};
+
+/*
+ * Each active data link has a gsm_dlci structure associated which ties
+ * the link layer to an optional tty (if the tty side is open). To avoid
+ * complexity right now these are only ever freed up when the mux is
+ * shut down.
+ *
+ * At the moment we don't free DLCI objects until the mux is torn down
+ * this avoid object life time issues but might be worth review later.
+ */
+
+struct gsm_dlci {
+ struct gsm_mux *gsm;
+ int addr;
+ int state;
+#define DLCI_CLOSED 0
+#define DLCI_OPENING 1 /* Sending SABM not seen UA */
+#define DLCI_OPEN 2 /* SABM/UA complete */
+#define DLCI_CLOSING 3 /* Sending DISC not seen UA/DM */
+
+ /* Link layer */
+ spinlock_t lock; /* Protects the internal state */
+ struct timer_list t1; /* Retransmit timer for SABM and UA */
+ int retries;
+ /* Uplink tty if active */
+ struct tty_port port; /* The tty bound to this DLCI if there is one */
+ struct kfifo *fifo; /* Queue fifo for the DLCI */
+ struct kfifo _fifo; /* For new fifo API porting only */
+ int adaption; /* Adaption layer in use */
+ u32 modem_rx; /* Our incoming virtual modem lines */
+ u32 modem_tx; /* Our outgoing modem lines */
+ int dead; /* Refuse re-open */
+ /* Flow control */
+ int throttled; /* Private copy of throttle state */
+ int constipated; /* Throttle status for outgoing */
+ /* Packetised I/O */
+ struct sk_buff *skb; /* Frame being sent */
+ struct sk_buff_head skb_list; /* Queued frames */
+ /* Data handling callback */
+ void (*data)(struct gsm_dlci *dlci, u8 *data, int len);
+};
+
+/* DLCI 0, 62/63 are special or reseved see gsmtty_open */
+
+#define NUM_DLCI 64
+
+/*
+ * DLCI 0 is used to pass control blocks out of band of the data
+ * flow (and with a higher link priority). One command can be outstanding
+ * at a time and we use this structure to manage them. They are created
+ * and destroyed by the user context, and updated by the receive paths
+ * and timers
+ */
+
+struct gsm_control {
+ u8 cmd; /* Command we are issuing */
+ u8 *data; /* Data for the command in case we retransmit */
+ int len; /* Length of block for retransmission */
+ int done; /* Done flag */
+ int error; /* Error if any */
+};
+
+/*
+ * Each GSM mux we have is represented by this structure. If we are
+ * operating as an ldisc then we use this structure as our ldisc
+ * state. We need to sort out lifetimes and locking with respect
+ * to the gsm mux array. For now we don't free DLCI objects that
+ * have been instantiated until the mux itself is terminated.
+ *
+ * To consider further: tty open versus mux shutdown.
+ */
+
+struct gsm_mux {
+ struct tty_struct *tty; /* The tty our ldisc is bound to */
+ spinlock_t lock;
+
+ /* Events on the GSM channel */
+ wait_queue_head_t event;
+
+ /* Bits for GSM mode decoding */
+
+ /* Framing Layer */
+ unsigned char *buf;
+ int state;
+#define GSM_SEARCH 0
+#define GSM_START 1
+#define GSM_ADDRESS 2
+#define GSM_CONTROL 3
+#define GSM_LEN 4
+#define GSM_DATA 5
+#define GSM_FCS 6
+#define GSM_OVERRUN 7
+#define GSM_LEN0 8
+#define GSM_LEN1 9
+#define GSM_SSOF 10
+ unsigned int len;
+ unsigned int address;
+ unsigned int count;
+ int escape;
+ int encoding;
+ u8 control;
+ u8 fcs;
+ u8 received_fcs;
+ u8 *txframe; /* TX framing buffer */
+
+ /* Methods for the receiver side */
+ void (*receive)(struct gsm_mux *gsm, u8 ch);
+ void (*error)(struct gsm_mux *gsm, u8 ch, u8 flag);
+ /* And transmit side */
+ int (*output)(struct gsm_mux *mux, u8 *data, int len);
+
+ /* Link Layer */
+ unsigned int mru;
+ unsigned int mtu;
+ int initiator; /* Did we initiate connection */
+ int dead; /* Has the mux been shut down */
+ struct gsm_dlci *dlci[NUM_DLCI];
+ int constipated; /* Asked by remote to shut up */
+
+ spinlock_t tx_lock;
+ unsigned int tx_bytes; /* TX data outstanding */
+#define TX_THRESH_HI 8192
+#define TX_THRESH_LO 2048
+ struct gsm_msg *tx_head; /* Pending data packets */
+ struct gsm_msg *tx_tail;
+
+ /* Control messages */
+ struct timer_list t2_timer; /* Retransmit timer for commands */
+ int cretries; /* Command retry counter */
+ struct gsm_control *pending_cmd;/* Our current pending command */
+ spinlock_t control_lock; /* Protects the pending command */
+
+ /* Configuration */
+ int adaption; /* 1 or 2 supported */
+ u8 ftype; /* UI or UIH */
+ int t1, t2; /* Timers in 1/100th of a sec */
+ int n2; /* Retry count */
+
+ /* Statistics (not currently exposed) */
+ unsigned long bad_fcs;
+ unsigned long malformed;
+ unsigned long io_error;
+ unsigned long bad_size;
+ unsigned long unsupported;
+};
+
+
+/*
+ * Mux objects - needed so that we can translate a tty index into the
+ * relevant mux and DLCI.
+ */
+
+#define MAX_MUX 4 /* 256 minors */
+static struct gsm_mux *gsm_mux[MAX_MUX]; /* GSM muxes */
+static spinlock_t gsm_mux_lock;
+
+/*
+ * This section of the driver logic implements the GSM encodings
+ * both the basic and the 'advanced'. Reliable transport is not
+ * supported.
+ */
+
+#define CR 0x02
+#define EA 0x01
+#define PF 0x10
+
+/* I is special: the rest are ..*/
+#define RR 0x01
+#define UI 0x03
+#define RNR 0x05
+#define REJ 0x09
+#define DM 0x0F
+#define SABM 0x2F
+#define DISC 0x43
+#define UA 0x63
+#define UIH 0xEF
+
+/* Channel commands */
+#define CMD_NSC 0x09
+#define CMD_TEST 0x11
+#define CMD_PSC 0x21
+#define CMD_RLS 0x29
+#define CMD_FCOFF 0x31
+#define CMD_PN 0x41
+#define CMD_RPN 0x49
+#define CMD_FCON 0x51
+#define CMD_CLD 0x61
+#define CMD_SNC 0x69
+#define CMD_MSC 0x71
+
+/* Virtual modem bits */
+#define MDM_FC 0x01
+#define MDM_RTC 0x02
+#define MDM_RTR 0x04
+#define MDM_IC 0x20
+#define MDM_DV 0x40
+
+#define GSM0_SOF 0xF9
+#define GSM1_SOF 0x7E
+#define GSM1_ESCAPE 0x7D
+#define GSM1_ESCAPE_BITS 0x20
+#define XON 0x11
+#define XOFF 0x13
+
+static const struct tty_port_operations gsm_port_ops;
+
+/*
+ * CRC table for GSM 0710
+ */
+
+static const u8 gsm_fcs8[256] = {
+ 0x00, 0x91, 0xE3, 0x72, 0x07, 0x96, 0xE4, 0x75,
+ 0x0E, 0x9F, 0xED, 0x7C, 0x09, 0x98, 0xEA, 0x7B,
+ 0x1C, 0x8D, 0xFF, 0x6E, 0x1B, 0x8A, 0xF8, 0x69,
+ 0x12, 0x83, 0xF1, 0x60, 0x15, 0x84, 0xF6, 0x67,
+ 0x38, 0xA9, 0xDB, 0x4A, 0x3F, 0xAE, 0xDC, 0x4D,
+ 0x36, 0xA7, 0xD5, 0x44, 0x31, 0xA0, 0xD2, 0x43,
+ 0x24, 0xB5, 0xC7, 0x56, 0x23, 0xB2, 0xC0, 0x51,
+ 0x2A, 0xBB, 0xC9, 0x58, 0x2D, 0xBC, 0xCE, 0x5F,
+ 0x70, 0xE1, 0x93, 0x02, 0x77, 0xE6, 0x94, 0x05,
+ 0x7E, 0xEF, 0x9D, 0x0C, 0x79, 0xE8, 0x9A, 0x0B,
+ 0x6C, 0xFD, 0x8F, 0x1E, 0x6B, 0xFA, 0x88, 0x19,
+ 0x62, 0xF3, 0x81, 0x10, 0x65, 0xF4, 0x86, 0x17,
+ 0x48, 0xD9, 0xAB, 0x3A, 0x4F, 0xDE, 0xAC, 0x3D,
+ 0x46, 0xD7, 0xA5, 0x34, 0x41, 0xD0, 0xA2, 0x33,
+ 0x54, 0xC5, 0xB7, 0x26, 0x53, 0xC2, 0xB0, 0x21,
+ 0x5A, 0xCB, 0xB9, 0x28, 0x5D, 0xCC, 0xBE, 0x2F,
+ 0xE0, 0x71, 0x03, 0x92, 0xE7, 0x76, 0x04, 0x95,
+ 0xEE, 0x7F, 0x0D, 0x9C, 0xE9, 0x78, 0x0A, 0x9B,
+ 0xFC, 0x6D, 0x1F, 0x8E, 0xFB, 0x6A, 0x18, 0x89,
+ 0xF2, 0x63, 0x11, 0x80, 0xF5, 0x64, 0x16, 0x87,
+ 0xD8, 0x49, 0x3B, 0xAA, 0xDF, 0x4E, 0x3C, 0xAD,
+ 0xD6, 0x47, 0x35, 0xA4, 0xD1, 0x40, 0x32, 0xA3,
+ 0xC4, 0x55, 0x27, 0xB6, 0xC3, 0x52, 0x20, 0xB1,
+ 0xCA, 0x5B, 0x29, 0xB8, 0xCD, 0x5C, 0x2E, 0xBF,
+ 0x90, 0x01, 0x73, 0xE2, 0x97, 0x06, 0x74, 0xE5,
+ 0x9E, 0x0F, 0x7D, 0xEC, 0x99, 0x08, 0x7A, 0xEB,
+ 0x8C, 0x1D, 0x6F, 0xFE, 0x8B, 0x1A, 0x68, 0xF9,
+ 0x82, 0x13, 0x61, 0xF0, 0x85, 0x14, 0x66, 0xF7,
+ 0xA8, 0x39, 0x4B, 0xDA, 0xAF, 0x3E, 0x4C, 0xDD,
+ 0xA6, 0x37, 0x45, 0xD4, 0xA1, 0x30, 0x42, 0xD3,
+ 0xB4, 0x25, 0x57, 0xC6, 0xB3, 0x22, 0x50, 0xC1,
+ 0xBA, 0x2B, 0x59, 0xC8, 0xBD, 0x2C, 0x5E, 0xCF
+};
+
+#define INIT_FCS 0xFF
+#define GOOD_FCS 0xCF
+
+/**
+ * gsm_fcs_add - update FCS
+ * @fcs: Current FCS
+ * @c: Next data
+ *
+ * Update the FCS to include c. Uses the algorithm in the specification
+ * notes.
+ */
+
+static inline u8 gsm_fcs_add(u8 fcs, u8 c)
+{
+ return gsm_fcs8[fcs ^ c];
+}
+
+/**
+ * gsm_fcs_add_block - update FCS for a block
+ * @fcs: Current FCS
+ * @c: buffer of data
+ * @len: length of buffer
+ *
+ * Update the FCS to include c. Uses the algorithm in the specification
+ * notes.
+ */
+
+static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len)
+{
+ while (len--)
+ fcs = gsm_fcs8[fcs ^ *c++];
+ return fcs;
+}
+
+/**
+ * gsm_read_ea - read a byte into an EA
+ * @val: variable holding value
+ * c: byte going into the EA
+ *
+ * Processes one byte of an EA. Updates the passed variable
+ * and returns 1 if the EA is now completely read
+ */
+
+static int gsm_read_ea(unsigned int *val, u8 c)
+{
+ /* Add the next 7 bits into the value */
+ *val <<= 7;
+ *val |= c >> 1;
+ /* Was this the last byte of the EA 1 = yes*/
+ return c & EA;
+}
+
+/**
+ * gsm_encode_modem - encode modem data bits
+ * @dlci: DLCI to encode from
+ *
+ * Returns the correct GSM encoded modem status bits (6 bit field) for
+ * the current status of the DLCI and attached tty object
+ */
+
+static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
+{
+ u8 modembits = 0;
+ /* FC is true flow control not modem bits */
+ if (dlci->throttled)
+ modembits |= MDM_FC;
+ if (dlci->modem_tx & TIOCM_DTR)
+ modembits |= MDM_RTC;
+ if (dlci->modem_tx & TIOCM_RTS)
+ modembits |= MDM_RTR;
+ if (dlci->modem_tx & TIOCM_RI)
+ modembits |= MDM_IC;
+ if (dlci->modem_tx & TIOCM_CD)
+ modembits |= MDM_DV;
+ return modembits;
+}
+
+/**
+ * gsm_print_packet - display a frame for debug
+ * @hdr: header to print before decode
+ * @addr: address EA from the frame
+ * @cr: C/R bit from the frame
+ * @control: control including PF bit
+ * @data: following data bytes
+ * @dlen: length of data
+ *
+ * Displays a packet in human readable format for debugging purposes. The
+ * style is based on amateur radio LAP-B dump display.
+ */
+
+static void gsm_print_packet(const char *hdr, int addr, int cr,
+ u8 control, const u8 *data, int dlen)
+{
+ if (!(debug & 1))
+ return;
+
+ pr_info("%s %d) %c: ", hdr, addr, "RC"[cr]);
+
+ switch (control & ~PF) {
+ case SABM:
+ pr_cont("SABM");
+ break;
+ case UA:
+ pr_cont("UA");
+ break;
+ case DISC:
+ pr_cont("DISC");
+ break;
+ case DM:
+ pr_cont("DM");
+ break;
+ case UI:
+ pr_cont("UI");
+ break;
+ case UIH:
+ pr_cont("UIH");
+ break;
+ default:
+ if (!(control & 0x01)) {
+ pr_cont("I N(S)%d N(R)%d",
+ (control & 0x0E) >> 1, (control & 0xE) >> 5);
+ } else switch (control & 0x0F) {
+ case RR:
+ pr_cont("RR(%d)", (control & 0xE0) >> 5);
+ break;
+ case RNR:
+ pr_cont("RNR(%d)", (control & 0xE0) >> 5);
+ break;
+ case REJ:
+ pr_cont("REJ(%d)", (control & 0xE0) >> 5);
+ break;
+ default:
+ pr_cont("[%02X]", control);
+ }
+ }
+
+ if (control & PF)
+ pr_cont("(P)");
+ else
+ pr_cont("(F)");
+
+ if (dlen) {
+ int ct = 0;
+ while (dlen--) {
+ if (ct % 8 == 0) {
+ pr_cont("\n");
+ pr_debug(" ");
+ }
+ pr_cont("%02X ", *data++);
+ ct++;
+ }
+ }
+ pr_cont("\n");
+}
+
+
+/*
+ * Link level transmission side
+ */
+
+/**
+ * gsm_stuff_packet - bytestuff a packet
+ * @ibuf: input
+ * @obuf: output
+ * @len: length of input
+ *
+ * Expand a buffer by bytestuffing it. The worst case size change
+ * is doubling and the caller is responsible for handing out
+ * suitable sized buffers.
+ */
+
+static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
+{
+ int olen = 0;
+ while (len--) {
+ if (*input == GSM1_SOF || *input == GSM1_ESCAPE
+ || *input == XON || *input == XOFF) {
+ *output++ = GSM1_ESCAPE;
+ *output++ = *input++ ^ GSM1_ESCAPE_BITS;
+ olen++;
+ } else
+ *output++ = *input++;
+ olen++;
+ }
+ return olen;
+}
+
+/**
+ * gsm_send - send a control frame
+ * @gsm: our GSM mux
+ * @addr: address for control frame
+ * @cr: command/response bit
+ * @control: control byte including PF bit
+ *
+ * Format up and transmit a control frame. These do not go via the
+ * queueing logic as they should be transmitted ahead of data when
+ * they are needed.
+ *
+ * FIXME: Lock versus data TX path
+ */
+
+static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
+{
+ int len;
+ u8 cbuf[10];
+ u8 ibuf[3];
+
+ switch (gsm->encoding) {
+ case 0:
+ cbuf[0] = GSM0_SOF;
+ cbuf[1] = (addr << 2) | (cr << 1) | EA;
+ cbuf[2] = control;
+ cbuf[3] = EA; /* Length of data = 0 */
+ cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
+ cbuf[5] = GSM0_SOF;
+ len = 6;
+ break;
+ case 1:
+ case 2:
+ /* Control frame + packing (but not frame stuffing) in mode 1 */
+ ibuf[0] = (addr << 2) | (cr << 1) | EA;
+ ibuf[1] = control;
+ ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
+ /* Stuffing may double the size worst case */
+ len = gsm_stuff_frame(ibuf, cbuf + 1, 3);
+ /* Now add the SOF markers */
+ cbuf[0] = GSM1_SOF;
+ cbuf[len + 1] = GSM1_SOF;
+ /* FIXME: we can omit the lead one in many cases */
+ len += 2;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ gsm->output(gsm, cbuf, len);
+ gsm_print_packet("-->", addr, cr, control, NULL, 0);
+}
+
+/**
+ * gsm_response - send a control response
+ * @gsm: our GSM mux
+ * @addr: address for control frame
+ * @control: control byte including PF bit
+ *
+ * Format up and transmit a link level response frame.
+ */
+
+static inline void gsm_response(struct gsm_mux *gsm, int addr, int control)
+{
+ gsm_send(gsm, addr, 0, control);
+}
+
+/**
+ * gsm_command - send a control command
+ * @gsm: our GSM mux
+ * @addr: address for control frame
+ * @control: control byte including PF bit
+ *
+ * Format up and transmit a link level command frame.
+ */
+
+static inline void gsm_command(struct gsm_mux *gsm, int addr, int control)
+{
+ gsm_send(gsm, addr, 1, control);
+}
+
+/* Data transmission */
+
+#define HDR_LEN 6 /* ADDR CTRL [LEN.2] DATA FCS */
+
+/**
+ * gsm_data_alloc - allocate data frame
+ * @gsm: GSM mux
+ * @addr: DLCI address
+ * @len: length excluding header and FCS
+ * @ctrl: control byte
+ *
+ * Allocate a new data buffer for sending frames with data. Space is left
+ * at the front for header bytes but that is treated as an implementation
+ * detail and not for the high level code to use
+ */
+
+static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ u8 ctrl)
+{
+ struct gsm_msg *m = kmalloc(sizeof(struct gsm_msg) + len + HDR_LEN,
+ GFP_ATOMIC);
+ if (m == NULL)
+ return NULL;
+ m->data = m->buffer + HDR_LEN - 1; /* Allow for FCS */
+ m->len = len;
+ m->addr = addr;
+ m->ctrl = ctrl;
+ m->next = NULL;
+ return m;
+}
+
+/**
+ * gsm_data_kick - poke the queue
+ * @gsm: GSM Mux
+ *
+ * The tty device has called us to indicate that room has appeared in
+ * the transmit queue. Ram more data into the pipe if we have any
+ *
+ * FIXME: lock against link layer control transmissions
+ */
+
+static void gsm_data_kick(struct gsm_mux *gsm)
+{
+ struct gsm_msg *msg = gsm->tx_head;
+ int len;
+ int skip_sof = 0;
+
+ /* FIXME: We need to apply this solely to data messages */
+ if (gsm->constipated)
+ return;
+
+ while (gsm->tx_head != NULL) {
+ msg = gsm->tx_head;
+ if (gsm->encoding != 0) {
+ gsm->txframe[0] = GSM1_SOF;
+ len = gsm_stuff_frame(msg->data,
+ gsm->txframe + 1, msg->len);
+ gsm->txframe[len + 1] = GSM1_SOF;
+ len += 2;
+ } else {
+ gsm->txframe[0] = GSM0_SOF;
+ memcpy(gsm->txframe + 1 , msg->data, msg->len);
+ gsm->txframe[msg->len + 1] = GSM0_SOF;
+ len = msg->len + 2;
+ }
+
+ if (debug & 4)
+ print_hex_dump_bytes("gsm_data_kick: ",
+ DUMP_PREFIX_OFFSET,
+ gsm->txframe, len);
+
+ if (gsm->output(gsm, gsm->txframe + skip_sof,
+ len - skip_sof) < 0)
+ break;
+ /* FIXME: Can eliminate one SOF in many more cases */
+ gsm->tx_head = msg->next;
+ if (gsm->tx_head == NULL)
+ gsm->tx_tail = NULL;
+ gsm->tx_bytes -= msg->len;
+ kfree(msg);
+ /* For a burst of frames skip the extra SOF within the
+ burst */
+ skip_sof = 1;
+ }
+}
+
+/**
+ * __gsm_data_queue - queue a UI or UIH frame
+ * @dlci: DLCI sending the data
+ * @msg: message queued
+ *
+ * Add data to the transmit queue and try and get stuff moving
+ * out of the mux tty if not already doing so. The Caller must hold
+ * the gsm tx lock.
+ */
+
+static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ u8 *dp = msg->data;
+ u8 *fcs = dp + msg->len;
+
+ /* Fill in the header */
+ if (gsm->encoding == 0) {
+ if (msg->len < 128)
+ *--dp = (msg->len << 1) | EA;
+ else {
+ *--dp = (msg->len >> 7); /* bits 7 - 15 */
+ *--dp = (msg->len & 127) << 1; /* bits 0 - 6 */
+ }
+ }
+
+ *--dp = msg->ctrl;
+ if (gsm->initiator)
+ *--dp = (msg->addr << 2) | 2 | EA;
+ else
+ *--dp = (msg->addr << 2) | EA;
+ *fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp);
+ /* Ugly protocol layering violation */
+ if (msg->ctrl == UI || msg->ctrl == (UI|PF))
+ *fcs = gsm_fcs_add_block(*fcs, msg->data, msg->len);
+ *fcs = 0xFF - *fcs;
+
+ gsm_print_packet("Q> ", msg->addr, gsm->initiator, msg->ctrl,
+ msg->data, msg->len);
+
+ /* Move the header back and adjust the length, also allow for the FCS
+ now tacked on the end */
+ msg->len += (msg->data - dp) + 1;
+ msg->data = dp;
+
+ /* Add to the actual output queue */
+ if (gsm->tx_tail)
+ gsm->tx_tail->next = msg;
+ else
+ gsm->tx_head = msg;
+ gsm->tx_tail = msg;
+ gsm->tx_bytes += msg->len;
+ gsm_data_kick(gsm);
+}
+
+/**
+ * gsm_data_queue - queue a UI or UIH frame
+ * @dlci: DLCI sending the data
+ * @msg: message queued
+ *
+ * Add data to the transmit queue and try and get stuff moving
+ * out of the mux tty if not already doing so. Take the
+ * the gsm tx lock and dlci lock.
+ */
+
+static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ __gsm_data_queue(dlci, msg);
+ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+}
+
+/**
+ * gsm_dlci_data_output - try and push data out of a DLCI
+ * @gsm: mux
+ * @dlci: the DLCI to pull data from
+ *
+ * Pull data from a DLCI and send it into the transmit queue if there
+ * is data. Keep to the MRU of the mux. This path handles the usual tty
+ * interface which is a byte stream with optional modem data.
+ *
+ * Caller must hold the tx_lock of the mux.
+ */
+
+static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+{
+ struct gsm_msg *msg;
+ u8 *dp;
+ int len, size;
+ int h = dlci->adaption - 1;
+
+ len = kfifo_len(dlci->fifo);
+ if (len == 0)
+ return 0;
+
+ /* MTU/MRU count only the data bits */
+ if (len > gsm->mtu)
+ len = gsm->mtu;
+
+ size = len + h;
+
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+ /* FIXME: need a timer or something to kick this so it can't
+ get stuck with no work outstanding and no buffer free */
+ if (msg == NULL)
+ return -ENOMEM;
+ dp = msg->data;
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructed with modem bits. Always one byte as we never
+ send inline break data */
+ *dp += gsm_encode_modem(dlci);
+ len--;
+ break;
+ }
+ WARN_ON(kfifo_out_locked(dlci->fifo, dp , len, &dlci->lock) != len);
+ __gsm_data_queue(dlci, msg);
+ /* Bytes of data we used up */
+ return size;
+}
+
+/**
+ * gsm_dlci_data_output_framed - try and push data out of a DLCI
+ * @gsm: mux
+ * @dlci: the DLCI to pull data from
+ *
+ * Pull data from a DLCI and send it into the transmit queue if there
+ * is data. Keep to the MRU of the mux. This path handles framed data
+ * queued as skbuffs to the DLCI.
+ *
+ * Caller must hold the tx_lock of the mux.
+ */
+
+static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+ struct gsm_dlci *dlci)
+{
+ struct gsm_msg *msg;
+ u8 *dp;
+ int len, size;
+ int last = 0, first = 0;
+ int overhead = 0;
+
+ /* One byte per frame is used for B/F flags */
+ if (dlci->adaption == 4)
+ overhead = 1;
+
+ /* dlci->skb is locked by tx_lock */
+ if (dlci->skb == NULL) {
+ dlci->skb = skb_dequeue(&dlci->skb_list);
+ if (dlci->skb == NULL)
+ return 0;
+ first = 1;
+ }
+ len = dlci->skb->len + overhead;
+
+ /* MTU/MRU count only the data bits */
+ if (len > gsm->mtu) {
+ if (dlci->adaption == 3) {
+ /* Over long frame, bin it */
+ kfree_skb(dlci->skb);
+ dlci->skb = NULL;
+ return 0;
+ }
+ len = gsm->mtu;
+ } else
+ last = 1;
+
+ size = len + overhead;
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+
+ /* FIXME: need a timer or something to kick this so it can't
+ get stuck with no work outstanding and no buffer free */
+ if (msg == NULL)
+ return -ENOMEM;
+ dp = msg->data;
+
+ if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */
+ /* Flag byte to carry the start/end info */
+ *dp++ = last << 7 | first << 6 | 1; /* EA */
+ len--;
+ }
+ memcpy(dp, dlci->skb->data, len);
+ skb_pull(dlci->skb, len);
+ __gsm_data_queue(dlci, msg);
+ if (last)
+ dlci->skb = NULL;
+ return size;
+}
+
+/**
+ * gsm_dlci_data_sweep - look for data to send
+ * @gsm: the GSM mux
+ *
+ * Sweep the GSM mux channels in priority order looking for ones with
+ * data to send. We could do with optimising this scan a bit. We aim
+ * to fill the queue totally or up to TX_THRESH_HI bytes. Once we hit
+ * TX_THRESH_LO we get called again
+ *
+ * FIXME: We should round robin between groups and in theory you can
+ * renegotiate DLCI priorities with optional stuff. Needs optimising.
+ */
+
+static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
+{
+ int len;
+ /* Priority ordering: We should do priority with RR of the groups */
+ int i = 1;
+
+ while (i < NUM_DLCI) {
+ struct gsm_dlci *dlci;
+
+ if (gsm->tx_bytes > TX_THRESH_HI)
+ break;
+ dlci = gsm->dlci[i];
+ if (dlci == NULL || dlci->constipated) {
+ i++;
+ continue;
+ }
+ if (dlci->adaption < 3)
+ len = gsm_dlci_data_output(gsm, dlci);
+ else
+ len = gsm_dlci_data_output_framed(gsm, dlci);
+ if (len < 0)
+ break;
+ /* DLCI empty - try the next */
+ if (len == 0)
+ i++;
+ }
+}
+
+/**
+ * gsm_dlci_data_kick - transmit if possible
+ * @dlci: DLCI to kick
+ *
+ * Transmit data from this DLCI if the queue is empty. We can't rely on
+ * a tty wakeup except when we filled the pipe so we need to fire off
+ * new data ourselves in other cases.
+ */
+
+static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ /* If we have nothing running then we need to fire up */
+ if (dlci->gsm->tx_bytes == 0)
+ gsm_dlci_data_output(dlci->gsm, dlci);
+ else if (dlci->gsm->tx_bytes < TX_THRESH_LO)
+ gsm_dlci_data_sweep(dlci->gsm);
+ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+}
+
+/*
+ * Control message processing
+ */
+
+
+/**
+ * gsm_control_reply - send a response frame to a control
+ * @gsm: gsm channel
+ * @cmd: the command to use
+ * @data: data to follow encoded info
+ * @dlen: length of data
+ *
+ * Encode up and queue a UI/UIH frame containing our response.
+ */
+
+static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data,
+ int dlen)
+{
+ struct gsm_msg *msg;
+ msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype);
+ if (msg == NULL)
+ return;
+ msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */
+ msg->data[1] = (dlen << 1) | EA;
+ memcpy(msg->data + 2, data, dlen);
+ gsm_data_queue(gsm->dlci[0], msg);
+}
+
+/**
+ * gsm_process_modem - process received modem status
+ * @tty: virtual tty bound to the DLCI
+ * @dlci: DLCI to affect
+ * @modem: modem bits (full EA)
+ *
+ * Used when a modem control message or line state inline in adaption
+ * layer 2 is processed. Sort out the local modem state and throttles
+ */
+
+static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
+ u32 modem, int clen)
+{
+ int mlines = 0;
+ u8 brk = 0;
+
+ /* The modem status command can either contain one octet (v.24 signals)
+ or two octets (v.24 signals + break signals). The length field will
+ either be 2 or 3 respectively. This is specified in section
+ 5.4.6.3.7 of the 27.010 mux spec. */
+
+ if (clen == 2)
+ modem = modem & 0x7f;
+ else {
+ brk = modem & 0x7f;
+ modem = (modem >> 7) & 0x7f;
+ };
+
+ /* Flow control/ready to communicate */
+ if (modem & MDM_FC) {
+ /* Need to throttle our output on this device */
+ dlci->constipated = 1;
+ }
+ if (modem & MDM_RTC) {
+ mlines |= TIOCM_DSR | TIOCM_DTR;
+ dlci->constipated = 0;
+ gsm_dlci_data_kick(dlci);
+ }
+ /* Map modem bits */
+ if (modem & MDM_RTR)
+ mlines |= TIOCM_RTS | TIOCM_CTS;
+ if (modem & MDM_IC)
+ mlines |= TIOCM_RI;
+ if (modem & MDM_DV)
+ mlines |= TIOCM_CD;
+
+ /* Carrier drop -> hangup */
+ if (tty) {
+ if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD))
+ if (!(tty->termios->c_cflag & CLOCAL))
+ tty_hangup(tty);
+ if (brk & 0x01)
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
+ }
+ dlci->modem_rx = mlines;
+}
+
+/**
+ * gsm_control_modem - modem status received
+ * @gsm: GSM channel
+ * @data: data following command
+ * @clen: command length
+ *
+ * We have received a modem status control message. This is used by
+ * the GSM mux protocol to pass virtual modem line status and optionally
+ * to indicate break signals. Unpack it, convert to Linux representation
+ * and if need be stuff a break message down the tty.
+ */
+
+static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
+{
+ unsigned int addr = 0;
+ unsigned int modem = 0;
+ struct gsm_dlci *dlci;
+ int len = clen;
+ u8 *dp = data;
+ struct tty_struct *tty;
+
+ while (gsm_read_ea(&addr, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ /* Must be at least one byte following the EA */
+ len--;
+ if (len <= 0)
+ return;
+
+ addr >>= 1;
+ /* Closed port, or invalid ? */
+ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
+ return;
+ dlci = gsm->dlci[addr];
+
+ while (gsm_read_ea(&modem, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ tty = tty_port_tty_get(&dlci->port);
+ gsm_process_modem(tty, dlci, modem, clen);
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
+ gsm_control_reply(gsm, CMD_MSC, data, clen);
+}
+
+/**
+ * gsm_control_rls - remote line status
+ * @gsm: GSM channel
+ * @data: data bytes
+ * @clen: data length
+ *
+ * The modem sends us a two byte message on the control channel whenever
+ * it wishes to send us an error state from the virtual link. Stuff
+ * this into the uplink tty if present
+ */
+
+static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen)
+{
+ struct tty_struct *tty;
+ unsigned int addr = 0 ;
+ u8 bits;
+ int len = clen;
+ u8 *dp = data;
+
+ while (gsm_read_ea(&addr, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ /* Must be at least one byte following ea */
+ len--;
+ if (len <= 0)
+ return;
+ addr >>= 1;
+ /* Closed port, or invalid ? */
+ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
+ return;
+ /* No error ? */
+ bits = *dp;
+ if ((bits & 1) == 0)
+ return;
+ /* See if we have an uplink tty */
+ tty = tty_port_tty_get(&gsm->dlci[addr]->port);
+
+ if (tty) {
+ if (bits & 2)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ if (bits & 4)
+ tty_insert_flip_char(tty, 0, TTY_PARITY);
+ if (bits & 8)
+ tty_insert_flip_char(tty, 0, TTY_FRAME);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ }
+ gsm_control_reply(gsm, CMD_RLS, data, clen);
+}
+
+static void gsm_dlci_begin_close(struct gsm_dlci *dlci);
+
+/**
+ * gsm_control_message - DLCI 0 control processing
+ * @gsm: our GSM mux
+ * @command: the command EA
+ * @data: data beyond the command/length EAs
+ * @clen: length
+ *
+ * Input processor for control messages from the other end of the link.
+ * Processes the incoming request and queues a response frame or an
+ * NSC response if not supported
+ */
+
+static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
+ u8 *data, int clen)
+{
+ u8 buf[1];
+ switch (command) {
+ case CMD_CLD: {
+ struct gsm_dlci *dlci = gsm->dlci[0];
+ /* Modem wishes to close down */
+ if (dlci) {
+ dlci->dead = 1;
+ gsm->dead = 1;
+ gsm_dlci_begin_close(dlci);
+ }
+ }
+ break;
+ case CMD_TEST:
+ /* Modem wishes to test, reply with the data */
+ gsm_control_reply(gsm, CMD_TEST, data, clen);
+ break;
+ case CMD_FCON:
+ /* Modem wants us to STFU */
+ gsm->constipated = 1;
+ gsm_control_reply(gsm, CMD_FCON, NULL, 0);
+ break;
+ case CMD_FCOFF:
+ /* Modem can accept data again */
+ gsm->constipated = 0;
+ gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
+ /* Kick the link in case it is idling */
+ gsm_data_kick(gsm);
+ break;
+ case CMD_MSC:
+ /* Out of band modem line change indicator for a DLCI */
+ gsm_control_modem(gsm, data, clen);
+ break;
+ case CMD_RLS:
+ /* Out of band error reception for a DLCI */
+ gsm_control_rls(gsm, data, clen);
+ break;
+ case CMD_PSC:
+ /* Modem wishes to enter power saving state */
+ gsm_control_reply(gsm, CMD_PSC, NULL, 0);
+ break;
+ /* Optional unsupported commands */
+ case CMD_PN: /* Parameter negotiation */
+ case CMD_RPN: /* Remote port negotiation */
+ case CMD_SNC: /* Service negotiation command */
+ default:
+ /* Reply to bad commands with an NSC */
+ buf[0] = command;
+ gsm_control_reply(gsm, CMD_NSC, buf, 1);
+ break;
+ }
+}
+
+/**
+ * gsm_control_response - process a response to our control
+ * @gsm: our GSM mux
+ * @command: the command (response) EA
+ * @data: data beyond the command/length EA
+ * @clen: length
+ *
+ * Process a response to an outstanding command. We only allow a single
+ * control message in flight so this is fairly easy. All the clean up
+ * is done by the caller, we just update the fields, flag it as done
+ * and return
+ */
+
+static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
+ u8 *data, int clen)
+{
+ struct gsm_control *ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm->control_lock, flags);
+
+ ctrl = gsm->pending_cmd;
+ /* Does the reply match our command */
+ command |= 1;
+ if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) {
+ /* Our command was replied to, kill the retry timer */
+ del_timer(&gsm->t2_timer);
+ gsm->pending_cmd = NULL;
+ /* Rejected by the other end */
+ if (command == CMD_NSC)
+ ctrl->error = -EOPNOTSUPP;
+ ctrl->done = 1;
+ wake_up(&gsm->event);
+ }
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+}
+
+/**
+ * gsm_control_transmit - send control packet
+ * @gsm: gsm mux
+ * @ctrl: frame to send
+ *
+ * Send out a pending control command (called under control lock)
+ */
+
+static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
+{
+ struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1, gsm->ftype);
+ if (msg == NULL)
+ return;
+ msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */
+ memcpy(msg->data + 1, ctrl->data, ctrl->len);
+ gsm_data_queue(gsm->dlci[0], msg);
+}
+
+/**
+ * gsm_control_retransmit - retransmit a control frame
+ * @data: pointer to our gsm object
+ *
+ * Called off the T2 timer expiry in order to retransmit control frames
+ * that have been lost in the system somewhere. The control_lock protects
+ * us from colliding with another sender or a receive completion event.
+ * In that situation the timer may still occur in a small window but
+ * gsm->pending_cmd will be NULL and we just let the timer expire.
+ */
+
+static void gsm_control_retransmit(unsigned long data)
+{
+ struct gsm_mux *gsm = (struct gsm_mux *)data;
+ struct gsm_control *ctrl;
+ unsigned long flags;
+ spin_lock_irqsave(&gsm->control_lock, flags);
+ ctrl = gsm->pending_cmd;
+ if (ctrl) {
+ gsm->cretries--;
+ if (gsm->cretries == 0) {
+ gsm->pending_cmd = NULL;
+ ctrl->error = -ETIMEDOUT;
+ ctrl->done = 1;
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+ wake_up(&gsm->event);
+ return;
+ }
+ gsm_control_transmit(gsm, ctrl);
+ mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
+ }
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+}
+
+/**
+ * gsm_control_send - send a control frame on DLCI 0
+ * @gsm: the GSM channel
+ * @command: command to send including CR bit
+ * @data: bytes of data (must be kmalloced)
+ * @len: length of the block to send
+ *
+ * Queue and dispatch a control command. Only one command can be
+ * active at a time. In theory more can be outstanding but the matching
+ * gets really complicated so for now stick to one outstanding.
+ */
+
+static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
+ unsigned int command, u8 *data, int clen)
+{
+ struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
+ GFP_KERNEL);
+ unsigned long flags;
+ if (ctrl == NULL)
+ return NULL;
+retry:
+ wait_event(gsm->event, gsm->pending_cmd == NULL);
+ spin_lock_irqsave(&gsm->control_lock, flags);
+ if (gsm->pending_cmd != NULL) {
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+ goto retry;
+ }
+ ctrl->cmd = command;
+ ctrl->data = data;
+ ctrl->len = clen;
+ gsm->pending_cmd = ctrl;
+ gsm->cretries = gsm->n2;
+ mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
+ gsm_control_transmit(gsm, ctrl);
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+ return ctrl;
+}
+
+/**
+ * gsm_control_wait - wait for a control to finish
+ * @gsm: GSM mux
+ * @control: control we are waiting on
+ *
+ * Waits for the control to complete or time out. Frees any used
+ * resources and returns 0 for success, or an error if the remote
+ * rejected or ignored the request.
+ */
+
+static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
+{
+ int err;
+ wait_event(gsm->event, control->done == 1);
+ err = control->error;
+ kfree(control);
+ return err;
+}
+
+
+/*
+ * DLCI level handling: Needs krefs
+ */
+
+/*
+ * State transitions and timers
+ */
+
+/**
+ * gsm_dlci_close - a DLCI has closed
+ * @dlci: DLCI that closed
+ *
+ * Perform processing when moving a DLCI into closed state. If there
+ * is an attached tty this is hung up
+ */
+
+static void gsm_dlci_close(struct gsm_dlci *dlci)
+{
+ del_timer(&dlci->t1);
+ if (debug & 8)
+ pr_debug("DLCI %d goes closed.\n", dlci->addr);
+ dlci->state = DLCI_CLOSED;
+ if (dlci->addr != 0) {
+ struct tty_struct *tty = tty_port_tty_get(&dlci->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ kfifo_reset(dlci->fifo);
+ } else
+ dlci->gsm->dead = 1;
+ wake_up(&dlci->gsm->event);
+ /* A DLCI 0 close is a MUX termination so we need to kick that
+ back to userspace somehow */
+}
+
+/**
+ * gsm_dlci_open - a DLCI has opened
+ * @dlci: DLCI that opened
+ *
+ * Perform processing when moving a DLCI into open state.
+ */
+
+static void gsm_dlci_open(struct gsm_dlci *dlci)
+{
+ /* Note that SABM UA .. SABM UA first UA lost can mean that we go
+ open -> open */
+ del_timer(&dlci->t1);
+ /* This will let a tty open continue */
+ dlci->state = DLCI_OPEN;
+ if (debug & 8)
+ pr_debug("DLCI %d goes open.\n", dlci->addr);
+ wake_up(&dlci->gsm->event);
+}
+
+/**
+ * gsm_dlci_t1 - T1 timer expiry
+ * @dlci: DLCI that opened
+ *
+ * The T1 timer handles retransmits of control frames (essentially of
+ * SABM and DISC). We resend the command until the retry count runs out
+ * in which case an opening port goes back to closed and a closing port
+ * is simply put into closed state (any further frames from the other
+ * end will get a DM response)
+ */
+
+static void gsm_dlci_t1(unsigned long data)
+{
+ struct gsm_dlci *dlci = (struct gsm_dlci *)data;
+ struct gsm_mux *gsm = dlci->gsm;
+
+ switch (dlci->state) {
+ case DLCI_OPENING:
+ dlci->retries--;
+ if (dlci->retries) {
+ gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+ } else
+ gsm_dlci_close(dlci);
+ break;
+ case DLCI_CLOSING:
+ dlci->retries--;
+ if (dlci->retries) {
+ gsm_command(dlci->gsm, dlci->addr, DISC|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+ } else
+ gsm_dlci_close(dlci);
+ break;
+ }
+}
+
+/**
+ * gsm_dlci_begin_open - start channel open procedure
+ * @dlci: DLCI to open
+ *
+ * Commence opening a DLCI from the Linux side. We issue SABM messages
+ * to the modem which should then reply with a UA, at which point we
+ * will move into open state. Opening is done asynchronously with retry
+ * running off timers and the responses.
+ */
+
+static void gsm_dlci_begin_open(struct gsm_dlci *dlci)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ if (dlci->state == DLCI_OPEN || dlci->state == DLCI_OPENING)
+ return;
+ dlci->retries = gsm->n2;
+ dlci->state = DLCI_OPENING;
+ gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+}
+
+/**
+ * gsm_dlci_begin_close - start channel open procedure
+ * @dlci: DLCI to open
+ *
+ * Commence closing a DLCI from the Linux side. We issue DISC messages
+ * to the modem which should then reply with a UA, at which point we
+ * will move into closed state. Closing is done asynchronously with retry
+ * off timers. We may also receive a DM reply from the other end which
+ * indicates the channel was already closed.
+ */
+
+static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING)
+ return;
+ dlci->retries = gsm->n2;
+ dlci->state = DLCI_CLOSING;
+ gsm_command(dlci->gsm, dlci->addr, DISC|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+}
+
+/**
+ * gsm_dlci_data - data arrived
+ * @dlci: channel
+ * @data: block of bytes received
+ * @len: length of received block
+ *
+ * A UI or UIH frame has arrived which contains data for a channel
+ * other than the control channel. If the relevant virtual tty is
+ * open we shovel the bits down it, if not we drop them.
+ */
+
+static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int clen)
+{
+ /* krefs .. */
+ struct tty_port *port = &dlci->port;
+ struct tty_struct *tty = tty_port_tty_get(port);
+ unsigned int modem = 0;
+ int len = clen;
+
+ if (debug & 16)
+ pr_debug("%d bytes for tty %p\n", len, tty);
+ if (tty) {
+ switch (dlci->adaption) {
+ /* Unsupported types */
+ /* Packetised interruptible data */
+ case 4:
+ break;
+ /* Packetised uininterruptible voice/data */
+ case 3:
+ break;
+ /* Asynchronous serial with line state in each frame */
+ case 2:
+ while (gsm_read_ea(&modem, *data++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ gsm_process_modem(tty, dlci, modem, clen);
+ /* Line state will go via DLCI 0 controls only */
+ case 1:
+ default:
+ tty_insert_flip_string(tty, data, len);
+ tty_flip_buffer_push(tty);
+ }
+ tty_kref_put(tty);
+ }
+}
+
+/**
+ * gsm_dlci_control - data arrived on control channel
+ * @dlci: channel
+ * @data: block of bytes received
+ * @len: length of received block
+ *
+ * A UI or UIH frame has arrived which contains data for DLCI 0 the
+ * control channel. This should contain a command EA followed by
+ * control data bytes. The command EA contains a command/response bit
+ * and we divide up the work accordingly.
+ */
+
+static void gsm_dlci_command(struct gsm_dlci *dlci, u8 *data, int len)
+{
+ /* See what command is involved */
+ unsigned int command = 0;
+ while (len-- > 0) {
+ if (gsm_read_ea(&command, *data++) == 1) {
+ int clen = *data++;
+ len--;
+ /* FIXME: this is properly an EA */
+ clen >>= 1;
+ /* Malformed command ? */
+ if (clen > len)
+ return;
+ if (command & 1)
+ gsm_control_message(dlci->gsm, command,
+ data, clen);
+ else
+ gsm_control_response(dlci->gsm, command,
+ data, clen);
+ return;
+ }
+ }
+}
+
+/*
+ * Allocate/Free DLCI channels
+ */
+
+/**
+ * gsm_dlci_alloc - allocate a DLCI
+ * @gsm: GSM mux
+ * @addr: address of the DLCI
+ *
+ * Allocate and install a new DLCI object into the GSM mux.
+ *
+ * FIXME: review locking races
+ */
+
+static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
+{
+ struct gsm_dlci *dlci = kzalloc(sizeof(struct gsm_dlci), GFP_ATOMIC);
+ if (dlci == NULL)
+ return NULL;
+ spin_lock_init(&dlci->lock);
+ dlci->fifo = &dlci->_fifo;
+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
+ kfree(dlci);
+ return NULL;
+ }
+
+ skb_queue_head_init(&dlci->skb_list);
+ init_timer(&dlci->t1);
+ dlci->t1.function = gsm_dlci_t1;
+ dlci->t1.data = (unsigned long)dlci;
+ tty_port_init(&dlci->port);
+ dlci->port.ops = &gsm_port_ops;
+ dlci->gsm = gsm;
+ dlci->addr = addr;
+ dlci->adaption = gsm->adaption;
+ dlci->state = DLCI_CLOSED;
+ if (addr)
+ dlci->data = gsm_dlci_data;
+ else
+ dlci->data = gsm_dlci_command;
+ gsm->dlci[addr] = dlci;
+ return dlci;
+}
+
+/**
+ * gsm_dlci_free - release DLCI
+ * @dlci: DLCI to destroy
+ *
+ * Free up a DLCI. Currently to keep the lifetime rules sane we only
+ * clean up DLCI objects when the MUX closes rather than as the port
+ * is closed down on both the tty and mux levels.
+ *
+ * Can sleep.
+ */
+static void gsm_dlci_free(struct gsm_dlci *dlci)
+{
+ struct tty_struct *tty = tty_port_tty_get(&dlci->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+ del_timer_sync(&dlci->t1);
+ dlci->gsm->dlci[dlci->addr] = NULL;
+ kfifo_free(dlci->fifo);
+ kfree(dlci);
+}
+
+/*
+ * LAPBish link layer logic
+ */
+
+/**
+ * gsm_queue - a GSM frame is ready to process
+ * @gsm: pointer to our gsm mux
+ *
+ * At this point in time a frame has arrived and been demangled from
+ * the line encoding. All the differences between the encodings have
+ * been handled below us and the frame is unpacked into the structures.
+ * The fcs holds the header FCS but any data FCS must be added here.
+ */
+
+static void gsm_queue(struct gsm_mux *gsm)
+{
+ struct gsm_dlci *dlci;
+ u8 cr;
+ int address;
+ /* We have to sneak a look at the packet body to do the FCS.
+ A somewhat layering violation in the spec */
+
+ if ((gsm->control & ~PF) == UI)
+ gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
+ if (gsm->encoding == 0){
+ /* WARNING: gsm->received_fcs is used for gsm->encoding = 0 only.
+ In this case it contain the last piece of data
+ required to generate final CRC */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
+ }
+ if (gsm->fcs != GOOD_FCS) {
+ gsm->bad_fcs++;
+ if (debug & 4)
+ pr_debug("BAD FCS %02x\n", gsm->fcs);
+ return;
+ }
+ address = gsm->address >> 1;
+ if (address >= NUM_DLCI)
+ goto invalid;
+
+ cr = gsm->address & 1; /* C/R bit */
+
+ gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len);
+
+ cr ^= 1 - gsm->initiator; /* Flip so 1 always means command */
+ dlci = gsm->dlci[address];
+
+ switch (gsm->control) {
+ case SABM|PF:
+ if (cr == 0)
+ goto invalid;
+ if (dlci == NULL)
+ dlci = gsm_dlci_alloc(gsm, address);
+ if (dlci == NULL)
+ return;
+ if (dlci->dead)
+ gsm_response(gsm, address, DM);
+ else {
+ gsm_response(gsm, address, UA);
+ gsm_dlci_open(dlci);
+ }
+ break;
+ case DISC|PF:
+ if (cr == 0)
+ goto invalid;
+ if (dlci == NULL || dlci->state == DLCI_CLOSED) {
+ gsm_response(gsm, address, DM);
+ return;
+ }
+ /* Real close complete */
+ gsm_response(gsm, address, UA);
+ gsm_dlci_close(dlci);
+ break;
+ case UA:
+ case UA|PF:
+ if (cr == 0 || dlci == NULL)
+ break;
+ switch (dlci->state) {
+ case DLCI_CLOSING:
+ gsm_dlci_close(dlci);
+ break;
+ case DLCI_OPENING:
+ gsm_dlci_open(dlci);
+ break;
+ }
+ break;
+ case DM: /* DM can be valid unsolicited */
+ case DM|PF:
+ if (cr)
+ goto invalid;
+ if (dlci == NULL)
+ return;
+ gsm_dlci_close(dlci);
+ break;
+ case UI:
+ case UI|PF:
+ case UIH:
+ case UIH|PF:
+#if 0
+ if (cr)
+ goto invalid;
+#endif
+ if (dlci == NULL || dlci->state != DLCI_OPEN) {
+ gsm_command(gsm, address, DM|PF);
+ return;
+ }
+ dlci->data(dlci, gsm->buf, gsm->len);
+ break;
+ default:
+ goto invalid;
+ }
+ return;
+invalid:
+ gsm->malformed++;
+ return;
+}
+
+
+/**
+ * gsm0_receive - perform processing for non-transparency
+ * @gsm: gsm data for this ldisc instance
+ * @c: character
+ *
+ * Receive bytes in gsm mode 0
+ */
+
+static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+{
+ unsigned int len;
+
+ switch (gsm->state) {
+ case GSM_SEARCH: /* SOF marker */
+ if (c == GSM0_SOF) {
+ gsm->state = GSM_ADDRESS;
+ gsm->address = 0;
+ gsm->len = 0;
+ gsm->fcs = INIT_FCS;
+ }
+ break;
+ case GSM_ADDRESS: /* Address EA */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->address, c))
+ gsm->state = GSM_CONTROL;
+ break;
+ case GSM_CONTROL: /* Control Byte */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ gsm->control = c;
+ gsm->state = GSM_LEN0;
+ break;
+ case GSM_LEN0: /* Length EA */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->len, c)) {
+ if (gsm->len > gsm->mru) {
+ gsm->bad_size++;
+ gsm->state = GSM_SEARCH;
+ break;
+ }
+ gsm->count = 0;
+ if (!gsm->len)
+ gsm->state = GSM_FCS;
+ else
+ gsm->state = GSM_DATA;
+ break;
+ }
+ gsm->state = GSM_LEN1;
+ break;
+ case GSM_LEN1:
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ len = c;
+ gsm->len |= len << 7;
+ if (gsm->len > gsm->mru) {
+ gsm->bad_size++;
+ gsm->state = GSM_SEARCH;
+ break;
+ }
+ gsm->count = 0;
+ if (!gsm->len)
+ gsm->state = GSM_FCS;
+ else
+ gsm->state = GSM_DATA;
+ break;
+ case GSM_DATA: /* Data */
+ gsm->buf[gsm->count++] = c;
+ if (gsm->count == gsm->len)
+ gsm->state = GSM_FCS;
+ break;
+ case GSM_FCS: /* FCS follows the packet */
+ gsm->received_fcs = c;
+ gsm_queue(gsm);
+ gsm->state = GSM_SSOF;
+ break;
+ case GSM_SSOF:
+ if (c == GSM0_SOF) {
+ gsm->state = GSM_SEARCH;
+ break;
+ }
+ break;
+ }
+}
+
+/**
+ * gsm1_receive - perform processing for non-transparency
+ * @gsm: gsm data for this ldisc instance
+ * @c: character
+ *
+ * Receive bytes in mode 1 (Advanced option)
+ */
+
+static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+{
+ if (c == GSM1_SOF) {
+ /* EOF is only valid in frame if we have got to the data state
+ and received at least one byte (the FCS) */
+ if (gsm->state == GSM_DATA && gsm->count) {
+ /* Extract the FCS */
+ gsm->count--;
+ gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]);
+ gsm->len = gsm->count;
+ gsm_queue(gsm);
+ gsm->state = GSM_START;
+ return;
+ }
+ /* Any partial frame was a runt so go back to start */
+ if (gsm->state != GSM_START) {
+ gsm->malformed++;
+ gsm->state = GSM_START;
+ }
+ /* A SOF in GSM_START means we are still reading idling or
+ framing bytes */
+ return;
+ }
+
+ if (c == GSM1_ESCAPE) {
+ gsm->escape = 1;
+ return;
+ }
+
+ /* Only an unescaped SOF gets us out of GSM search */
+ if (gsm->state == GSM_SEARCH)
+ return;
+
+ if (gsm->escape) {
+ c ^= GSM1_ESCAPE_BITS;
+ gsm->escape = 0;
+ }
+ switch (gsm->state) {
+ case GSM_START: /* First byte after SOF */
+ gsm->address = 0;
+ gsm->state = GSM_ADDRESS;
+ gsm->fcs = INIT_FCS;
+ /* Drop through */
+ case GSM_ADDRESS: /* Address continuation */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->address, c))
+ gsm->state = GSM_CONTROL;
+ break;
+ case GSM_CONTROL: /* Control Byte */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ gsm->control = c;
+ gsm->count = 0;
+ gsm->state = GSM_DATA;
+ break;
+ case GSM_DATA: /* Data */
+ if (gsm->count > gsm->mru) { /* Allow one for the FCS */
+ gsm->state = GSM_OVERRUN;
+ gsm->bad_size++;
+ } else
+ gsm->buf[gsm->count++] = c;
+ break;
+ case GSM_OVERRUN: /* Over-long - eg a dropped SOF */
+ break;
+ }
+}
+
+/**
+ * gsm_error - handle tty error
+ * @gsm: ldisc data
+ * @data: byte received (may be invalid)
+ * @flag: error received
+ *
+ * Handle an error in the receipt of data for a frame. Currently we just
+ * go back to hunting for a SOF.
+ *
+ * FIXME: better diagnostics ?
+ */
+
+static void gsm_error(struct gsm_mux *gsm,
+ unsigned char data, unsigned char flag)
+{
+ gsm->state = GSM_SEARCH;
+ gsm->io_error++;
+}
+
+/**
+ * gsm_cleanup_mux - generic GSM protocol cleanup
+ * @gsm: our mux
+ *
+ * Clean up the bits of the mux which are the same for all framing
+ * protocols. Remove the mux from the mux table, stop all the timers
+ * and then shut down each device hanging up the channels as we go.
+ */
+
+void gsm_cleanup_mux(struct gsm_mux *gsm)
+{
+ int i;
+ struct gsm_dlci *dlci = gsm->dlci[0];
+ struct gsm_msg *txq;
+
+ gsm->dead = 1;
+
+ spin_lock(&gsm_mux_lock);
+ for (i = 0; i < MAX_MUX; i++) {
+ if (gsm_mux[i] == gsm) {
+ gsm_mux[i] = NULL;
+ break;
+ }
+ }
+ spin_unlock(&gsm_mux_lock);
+ WARN_ON(i == MAX_MUX);
+
+ del_timer_sync(&gsm->t2_timer);
+ /* Now we are sure T2 has stopped */
+ if (dlci) {
+ dlci->dead = 1;
+ gsm_dlci_begin_close(dlci);
+ wait_event_interruptible(gsm->event,
+ dlci->state == DLCI_CLOSED);
+ }
+ /* Free up any link layer users */
+ for (i = 0; i < NUM_DLCI; i++)
+ if (gsm->dlci[i])
+ gsm_dlci_free(gsm->dlci[i]);
+ /* Now wipe the queues */
+ for (txq = gsm->tx_head; txq != NULL; txq = gsm->tx_head) {
+ gsm->tx_head = txq->next;
+ kfree(txq);
+ }
+ gsm->tx_tail = NULL;
+}
+EXPORT_SYMBOL_GPL(gsm_cleanup_mux);
+
+/**
+ * gsm_activate_mux - generic GSM setup
+ * @gsm: our mux
+ *
+ * Set up the bits of the mux which are the same for all framing
+ * protocols. Add the mux to the mux table so it can be opened and
+ * finally kick off connecting to DLCI 0 on the modem.
+ */
+
+int gsm_activate_mux(struct gsm_mux *gsm)
+{
+ struct gsm_dlci *dlci;
+ int i = 0;
+
+ init_timer(&gsm->t2_timer);
+ gsm->t2_timer.function = gsm_control_retransmit;
+ gsm->t2_timer.data = (unsigned long)gsm;
+ init_waitqueue_head(&gsm->event);
+ spin_lock_init(&gsm->control_lock);
+ spin_lock_init(&gsm->tx_lock);
+
+ if (gsm->encoding == 0)
+ gsm->receive = gsm0_receive;
+ else
+ gsm->receive = gsm1_receive;
+ gsm->error = gsm_error;
+
+ spin_lock(&gsm_mux_lock);
+ for (i = 0; i < MAX_MUX; i++) {
+ if (gsm_mux[i] == NULL) {
+ gsm_mux[i] = gsm;
+ break;
+ }
+ }
+ spin_unlock(&gsm_mux_lock);
+ if (i == MAX_MUX)
+ return -EBUSY;
+
+ dlci = gsm_dlci_alloc(gsm, 0);
+ if (dlci == NULL)
+ return -ENOMEM;
+ gsm->dead = 0; /* Tty opens are now permissible */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gsm_activate_mux);
+
+/**
+ * gsm_free_mux - free up a mux
+ * @mux: mux to free
+ *
+ * Dispose of allocated resources for a dead mux. No refcounting
+ * at present so the mux must be truly dead.
+ */
+void gsm_free_mux(struct gsm_mux *gsm)
+{
+ kfree(gsm->txframe);
+ kfree(gsm->buf);
+ kfree(gsm);
+}
+EXPORT_SYMBOL_GPL(gsm_free_mux);
+
+/**
+ * gsm_alloc_mux - allocate a mux
+ *
+ * Creates a new mux ready for activation.
+ */
+
+struct gsm_mux *gsm_alloc_mux(void)
+{
+ struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
+ if (gsm == NULL)
+ return NULL;
+ gsm->buf = kmalloc(MAX_MRU + 1, GFP_KERNEL);
+ if (gsm->buf == NULL) {
+ kfree(gsm);
+ return NULL;
+ }
+ gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL);
+ if (gsm->txframe == NULL) {
+ kfree(gsm->buf);
+ kfree(gsm);
+ return NULL;
+ }
+ spin_lock_init(&gsm->lock);
+
+ gsm->t1 = T1;
+ gsm->t2 = T2;
+ gsm->n2 = N2;
+ gsm->ftype = UIH;
+ gsm->initiator = 0;
+ gsm->adaption = 1;
+ gsm->encoding = 1;
+ gsm->mru = 64; /* Default to encoding 1 so these should be 64 */
+ gsm->mtu = 64;
+ gsm->dead = 1; /* Avoid early tty opens */
+
+ return gsm;
+}
+EXPORT_SYMBOL_GPL(gsm_alloc_mux);
+
+/**
+ * gsmld_output - write to link
+ * @gsm: our mux
+ * @data: bytes to output
+ * @len: size
+ *
+ * Write a block of data from the GSM mux to the data channel. This
+ * will eventually be serialized from above but at the moment isn't.
+ */
+
+static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
+{
+ if (tty_write_room(gsm->tty) < len) {
+ set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
+ return -ENOSPC;
+ }
+ if (debug & 4)
+ print_hex_dump_bytes("gsmld_output: ", DUMP_PREFIX_OFFSET,
+ data, len);
+ gsm->tty->ops->write(gsm->tty, data, len);
+ return len;
+}
+
+/**
+ * gsmld_attach_gsm - mode set up
+ * @tty: our tty structure
+ * @gsm: our mux
+ *
+ * Set up the MUX for basic mode and commence connecting to the
+ * modem. Currently called from the line discipline set up but
+ * will need moving to an ioctl path.
+ */
+
+static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+{
+ int ret;
+
+ gsm->tty = tty_kref_get(tty);
+ gsm->output = gsmld_output;
+ ret = gsm_activate_mux(gsm);
+ if (ret != 0)
+ tty_kref_put(gsm->tty);
+ return ret;
+}
+
+
+/**
+ * gsmld_detach_gsm - stop doing 0710 mux
+ * @tty: tty attached to the mux
+ * @gsm: mux
+ *
+ * Shutdown and then clean up the resources used by the line discipline
+ */
+
+static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+{
+ WARN_ON(tty != gsm->tty);
+ gsm_cleanup_mux(gsm);
+ tty_kref_put(gsm->tty);
+ gsm->tty = NULL;
+}
+
+static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct gsm_mux *gsm = tty->disc_data;
+ const unsigned char *dp;
+ char *f;
+ int i;
+ char buf[64];
+ char flags;
+
+ if (debug & 4)
+ print_hex_dump_bytes("gsmld_receive: ", DUMP_PREFIX_OFFSET,
+ cp, count);
+
+ for (i = count, dp = cp, f = fp; i; i--, dp++) {
+ flags = *f++;
+ switch (flags) {
+ case TTY_NORMAL:
+ gsm->receive(gsm, *dp);
+ break;
+ case TTY_OVERRUN:
+ case TTY_BREAK:
+ case TTY_PARITY:
+ case TTY_FRAME:
+ gsm->error(gsm, *dp, flags);
+ break;
+ default:
+ WARN_ONCE("%s: unknown flag %d\n",
+ tty_name(tty, buf), flags);
+ break;
+ }
+ }
+ /* FASYNC if needed ? */
+ /* If clogged call tty_throttle(tty); */
+}
+
+/**
+ * gsmld_chars_in_buffer - report available bytes
+ * @tty: tty device
+ *
+ * Report the number of characters buffered to be delivered to user
+ * at this instant in time.
+ *
+ * Locking: gsm lock
+ */
+
+static ssize_t gsmld_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0;
+}
+
+/**
+ * gsmld_flush_buffer - clean input queue
+ * @tty: terminal device
+ *
+ * Flush the input buffer. Called when the line discipline is
+ * being closed, when the tty layer wants the buffer flushed (eg
+ * at hangup).
+ */
+
+static void gsmld_flush_buffer(struct tty_struct *tty)
+{
+}
+
+/**
+ * gsmld_close - close the ldisc for this tty
+ * @tty: device
+ *
+ * Called from the terminal layer when this line discipline is
+ * being shut down, either because of a close or becsuse of a
+ * discipline change. The function will not be called while other
+ * ldisc methods are in progress.
+ */
+
+static void gsmld_close(struct tty_struct *tty)
+{
+ struct gsm_mux *gsm = tty->disc_data;
+
+ gsmld_detach_gsm(tty, gsm);
+
+ gsmld_flush_buffer(tty);
+ /* Do other clean up here */
+ gsm_free_mux(gsm);
+}
+
+/**
+ * gsmld_open - open an ldisc
+ * @tty: terminal to open
+ *
+ * Called when this line discipline is being attached to the
+ * terminal device. Can sleep. Called serialized so that no
+ * other events will occur in parallel. No further open will occur
+ * until a close.
+ */
+
+static int gsmld_open(struct tty_struct *tty)
+{
+ struct gsm_mux *gsm;
+
+ if (tty->ops->write == NULL)
+ return -EINVAL;
+
+ /* Attach our ldisc data */
+ gsm = gsm_alloc_mux();
+ if (gsm == NULL)
+ return -ENOMEM;
+
+ tty->disc_data = gsm;
+ tty->receive_room = 65536;
+
+ /* Attach the initial passive connection */
+ gsm->encoding = 1;
+ return gsmld_attach_gsm(tty, gsm);
+}
+
+/**
+ * gsmld_write_wakeup - asynchronous I/O notifier
+ * @tty: tty device
+ *
+ * Required for the ptys, serial driver etc. since processes
+ * that attach themselves to the master and rely on ASYNC
+ * IO must be woken up
+ */
+
+static void gsmld_write_wakeup(struct tty_struct *tty)
+{
+ struct gsm_mux *gsm = tty->disc_data;
+ unsigned long flags;
+
+ /* Queue poll */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ gsm_data_kick(gsm);
+ if (gsm->tx_bytes < TX_THRESH_LO) {
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_dlci_data_sweep(gsm);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ }
+}
+
+/**
+ * gsmld_read - read function for tty
+ * @tty: tty device
+ * @file: file object
+ * @buf: userspace buffer pointer
+ * @nr: size of I/O
+ *
+ * Perform reads for the line discipline. We are guaranteed that the
+ * line discipline will not be closed under us but we may get multiple
+ * parallel readers and must handle this ourselves. We may also get
+ * a hangup. Always called in user context, may sleep.
+ *
+ * This code must be sure never to sleep through a hangup.
+ */
+
+static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t nr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * gsmld_write - write function for tty
+ * @tty: tty device
+ * @file: file object
+ * @buf: userspace buffer pointer
+ * @nr: size of I/O
+ *
+ * Called when the owner of the device wants to send a frame
+ * itself (or some other control data). The data is transferred
+ * as-is and must be properly framed and checksummed as appropriate
+ * by userspace. Frames are either sent whole or not at all as this
+ * avoids pain user side.
+ */
+
+static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr)
+{
+ int space = tty_write_room(tty);
+ if (space >= nr)
+ return tty->ops->write(tty, buf, nr);
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ return -ENOBUFS;
+}
+
+/**
+ * gsmld_poll - poll method for N_GSM0710
+ * @tty: terminal device
+ * @file: file accessing it
+ * @wait: poll table
+ *
+ * Called when the line discipline is asked to poll() for data or
+ * for special events. This code is not serialized with respect to
+ * other events save open/close.
+ *
+ * This code must be sure never to sleep through a hangup.
+ * Called without the kernel lock held - fine
+ */
+
+static unsigned int gsmld_poll(struct tty_struct *tty, struct file *file,
+ poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct gsm_mux *gsm = tty->disc_data;
+
+ poll_wait(file, &tty->read_wait, wait);
+ poll_wait(file, &tty->write_wait, wait);
+ if (tty_hung_up_p(file))
+ mask |= POLLHUP;
+ if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
+ mask |= POLLOUT | POLLWRNORM;
+ if (gsm->dead)
+ mask |= POLLHUP;
+ return mask;
+}
+
+static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
+ struct gsm_config *c)
+{
+ int need_close = 0;
+ int need_restart = 0;
+
+ /* Stuff we don't support yet - UI or I frame transport, windowing */
+ if ((c->adaption != 1 && c->adaption != 2) || c->k)
+ return -EOPNOTSUPP;
+ /* Check the MRU/MTU range looks sane */
+ if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8)
+ return -EINVAL;
+ if (c->n2 < 3)
+ return -EINVAL;
+ if (c->encapsulation > 1) /* Basic, advanced, no I */
+ return -EINVAL;
+ if (c->initiator > 1)
+ return -EINVAL;
+ if (c->i == 0 || c->i > 2) /* UIH and UI only */
+ return -EINVAL;
+ /*
+ * See what is needed for reconfiguration
+ */
+
+ /* Timing fields */
+ if (c->t1 != 0 && c->t1 != gsm->t1)
+ need_restart = 1;
+ if (c->t2 != 0 && c->t2 != gsm->t2)
+ need_restart = 1;
+ if (c->encapsulation != gsm->encoding)
+ need_restart = 1;
+ if (c->adaption != gsm->adaption)
+ need_restart = 1;
+ /* Requires care */
+ if (c->initiator != gsm->initiator)
+ need_close = 1;
+ if (c->mru != gsm->mru)
+ need_restart = 1;
+ if (c->mtu != gsm->mtu)
+ need_restart = 1;
+
+ /*
+ * Close down what is needed, restart and initiate the new
+ * configuration
+ */
+
+ if (need_close || need_restart) {
+ gsm_dlci_begin_close(gsm->dlci[0]);
+ /* This will timeout if the link is down due to N2 expiring */
+ wait_event_interruptible(gsm->event,
+ gsm->dlci[0]->state == DLCI_CLOSED);
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ if (need_restart)
+ gsm_cleanup_mux(gsm);
+
+ gsm->initiator = c->initiator;
+ gsm->mru = c->mru;
+ gsm->mtu = c->mtu;
+ gsm->encoding = c->encapsulation;
+ gsm->adaption = c->adaption;
+ gsm->n2 = c->n2;
+
+ if (c->i == 1)
+ gsm->ftype = UIH;
+ else if (c->i == 2)
+ gsm->ftype = UI;
+
+ if (c->t1)
+ gsm->t1 = c->t1;
+ if (c->t2)
+ gsm->t2 = c->t2;
+
+ /* FIXME: We need to separate activation/deactivation from adding
+ and removing from the mux array */
+ if (need_restart)
+ gsm_activate_mux(gsm);
+ if (gsm->initiator && need_close)
+ gsm_dlci_begin_open(gsm->dlci[0]);
+ return 0;
+}
+
+static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct gsm_config c;
+ struct gsm_mux *gsm = tty->disc_data;
+
+ switch (cmd) {
+ case GSMIOC_GETCONF:
+ memset(&c, 0, sizeof(c));
+ c.adaption = gsm->adaption;
+ c.encapsulation = gsm->encoding;
+ c.initiator = gsm->initiator;
+ c.t1 = gsm->t1;
+ c.t2 = gsm->t2;
+ c.t3 = 0; /* Not supported */
+ c.n2 = gsm->n2;
+ if (gsm->ftype == UIH)
+ c.i = 1;
+ else
+ c.i = 2;
+ pr_debug("Ftype %d i %d\n", gsm->ftype, c.i);
+ c.mru = gsm->mru;
+ c.mtu = gsm->mtu;
+ c.k = 0;
+ if (copy_to_user((void *)arg, &c, sizeof(c)))
+ return -EFAULT;
+ return 0;
+ case GSMIOC_SETCONF:
+ if (copy_from_user(&c, (void *)arg, sizeof(c)))
+ return -EFAULT;
+ return gsmld_config(tty, gsm, &c);
+ default:
+ return n_tty_ioctl_helper(tty, file, cmd, arg);
+ }
+}
+
+
+/* Line discipline for real tty */
+struct tty_ldisc_ops tty_ldisc_packet = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_gsm",
+ .open = gsmld_open,
+ .close = gsmld_close,
+ .flush_buffer = gsmld_flush_buffer,
+ .chars_in_buffer = gsmld_chars_in_buffer,
+ .read = gsmld_read,
+ .write = gsmld_write,
+ .ioctl = gsmld_ioctl,
+ .poll = gsmld_poll,
+ .receive_buf = gsmld_receive_buf,
+ .write_wakeup = gsmld_write_wakeup
+};
+
+/*
+ * Virtual tty side
+ */
+
+#define TX_SIZE 512
+
+static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
+{
+ u8 modembits[5];
+ struct gsm_control *ctrl;
+ int len = 2;
+
+ if (brk)
+ len++;
+
+ modembits[0] = len << 1 | EA; /* Data bytes */
+ modembits[1] = dlci->addr << 2 | 3; /* DLCI, EA, 1 */
+ modembits[2] = gsm_encode_modem(dlci) << 1 | EA;
+ if (brk)
+ modembits[3] = brk << 4 | 2 | EA; /* Valid, EA */
+ ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1);
+ if (ctrl == NULL)
+ return -ENOMEM;
+ return gsm_control_wait(dlci->gsm, ctrl);
+}
+
+static int gsm_carrier_raised(struct tty_port *port)
+{
+ struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ /* Not yet open so no carrier info */
+ if (dlci->state != DLCI_OPEN)
+ return 0;
+ if (debug & 2)
+ return 1;
+ return dlci->modem_rx & TIOCM_CD;
+}
+
+static void gsm_dtr_rts(struct tty_port *port, int onoff)
+{
+ struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ unsigned int modem_tx = dlci->modem_tx;
+ if (onoff)
+ modem_tx |= TIOCM_DTR | TIOCM_RTS;
+ else
+ modem_tx &= ~(TIOCM_DTR | TIOCM_RTS);
+ if (modem_tx != dlci->modem_tx) {
+ dlci->modem_tx = modem_tx;
+ gsmtty_modem_update(dlci, 0);
+ }
+}
+
+static const struct tty_port_operations gsm_port_ops = {
+ .carrier_raised = gsm_carrier_raised,
+ .dtr_rts = gsm_dtr_rts,
+};
+
+
+static int gsmtty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct gsm_mux *gsm;
+ struct gsm_dlci *dlci;
+ struct tty_port *port;
+ unsigned int line = tty->index;
+ unsigned int mux = line >> 6;
+
+ line = line & 0x3F;
+
+ if (mux >= MAX_MUX)
+ return -ENXIO;
+ /* FIXME: we need to lock gsm_mux for lifetimes of ttys eventually */
+ if (gsm_mux[mux] == NULL)
+ return -EUNATCH;
+ if (line == 0 || line > 61) /* 62/63 reserved */
+ return -ECHRNG;
+ gsm = gsm_mux[mux];
+ if (gsm->dead)
+ return -EL2HLT;
+ dlci = gsm->dlci[line];
+ if (dlci == NULL)
+ dlci = gsm_dlci_alloc(gsm, line);
+ if (dlci == NULL)
+ return -ENOMEM;
+ port = &dlci->port;
+ port->count++;
+ tty->driver_data = dlci;
+ tty_port_tty_set(port, tty);
+
+ dlci->modem_rx = 0;
+ /* We could in theory open and close before we wait - eg if we get
+ a DM straight back. This is ok as that will have caused a hangup */
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+ /* Start sending off SABM messages */
+ gsm_dlci_begin_open(dlci);
+ /* And wait for virtual carrier */
+ return tty_port_block_til_ready(port, tty, filp);
+}
+
+static void gsmtty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci == NULL)
+ return;
+ if (tty_port_close_start(&dlci->port, tty, filp) == 0)
+ return;
+ gsm_dlci_begin_close(dlci);
+ tty_port_close_end(&dlci->port, tty);
+ tty_port_tty_set(&dlci->port, NULL);
+}
+
+static void gsmtty_hangup(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ tty_port_hangup(&dlci->port);
+ gsm_dlci_begin_close(dlci);
+}
+
+static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
+ int len)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ /* Stuff the bytes into the fifo queue */
+ int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
+ /* Need to kick the channel */
+ gsm_dlci_data_kick(dlci);
+ return sent;
+}
+
+static int gsmtty_write_room(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ return TX_SIZE - kfifo_len(dlci->fifo);
+}
+
+static int gsmtty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ return kfifo_len(dlci->fifo);
+}
+
+static void gsmtty_flush_buffer(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ /* Caution needed: If we implement reliable transport classes
+ then the data being transmitted can't simply be junked once
+ it has first hit the stack. Until then we can just blow it
+ away */
+ kfifo_reset(dlci->fifo);
+ /* Need to unhook this DLCI from the transmit queue logic */
+}
+
+static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ /* The FIFO handles the queue so the kernel will do the right
+ thing waiting on chars_in_buffer before calling us. No work
+ to do here */
+}
+
+static int gsmtty_tiocmget(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ return dlci->modem_rx;
+}
+
+static int gsmtty_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ unsigned int modem_tx = dlci->modem_tx;
+
+ modem_tx &= clear;
+ modem_tx |= set;
+
+ if (modem_tx != dlci->modem_tx) {
+ dlci->modem_tx = modem_tx;
+ return gsmtty_modem_update(dlci, 0);
+ }
+ return 0;
+}
+
+
+static int gsmtty_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+
+static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+ /* For the moment its fixed. In actual fact the speed information
+ for the virtual channel can be propogated in both directions by
+ the RPN control message. This however rapidly gets nasty as we
+ then have to remap modem signals each way according to whether
+ our virtual cable is null modem etc .. */
+ tty_termios_copy_hw(tty->termios, old);
+}
+
+static void gsmtty_throttle(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (tty->termios->c_cflag & CRTSCTS)
+ dlci->modem_tx &= ~TIOCM_DTR;
+ dlci->throttled = 1;
+ /* Send an MSC with DTR cleared */
+ gsmtty_modem_update(dlci, 0);
+}
+
+static void gsmtty_unthrottle(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (tty->termios->c_cflag & CRTSCTS)
+ dlci->modem_tx |= TIOCM_DTR;
+ dlci->throttled = 0;
+ /* Send an MSC with DTR set */
+ gsmtty_modem_update(dlci, 0);
+}
+
+static int gsmtty_break_ctl(struct tty_struct *tty, int state)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ int encode = 0; /* Off */
+
+ if (state == -1) /* "On indefinitely" - we can't encode this
+ properly */
+ encode = 0x0F;
+ else if (state > 0) {
+ encode = state / 200; /* mS to encoding */
+ if (encode > 0x0F)
+ encode = 0x0F; /* Best effort */
+ }
+ return gsmtty_modem_update(dlci, encode);
+}
+
+static struct tty_driver *gsm_tty_driver;
+
+/* Virtual ttys for the demux */
+static const struct tty_operations gsmtty_ops = {
+ .open = gsmtty_open,
+ .close = gsmtty_close,
+ .write = gsmtty_write,
+ .write_room = gsmtty_write_room,
+ .chars_in_buffer = gsmtty_chars_in_buffer,
+ .flush_buffer = gsmtty_flush_buffer,
+ .ioctl = gsmtty_ioctl,
+ .throttle = gsmtty_throttle,
+ .unthrottle = gsmtty_unthrottle,
+ .set_termios = gsmtty_set_termios,
+ .hangup = gsmtty_hangup,
+ .wait_until_sent = gsmtty_wait_until_sent,
+ .tiocmget = gsmtty_tiocmget,
+ .tiocmset = gsmtty_tiocmset,
+ .break_ctl = gsmtty_break_ctl,
+};
+
+
+
+static int __init gsm_init(void)
+{
+ /* Fill in our line protocol discipline, and register it */
+ int status = tty_register_ldisc(N_GSM0710, &tty_ldisc_packet);
+ if (status != 0) {
+ pr_err("n_gsm: can't register line discipline (err = %d)\n",
+ status);
+ return status;
+ }
+
+ gsm_tty_driver = alloc_tty_driver(256);
+ if (!gsm_tty_driver) {
+ tty_unregister_ldisc(N_GSM0710);
+ pr_err("gsm_init: tty allocation failed.\n");
+ return -EINVAL;
+ }
+ gsm_tty_driver->owner = THIS_MODULE;
+ gsm_tty_driver->driver_name = "gsmtty";
+ gsm_tty_driver->name = "gsmtty";
+ gsm_tty_driver->major = 0; /* Dynamic */
+ gsm_tty_driver->minor_start = 0;
+ gsm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ gsm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ gsm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
+ | TTY_DRIVER_HARDWARE_BREAK;
+ gsm_tty_driver->init_termios = tty_std_termios;
+ /* Fixme */
+ gsm_tty_driver->init_termios.c_lflag &= ~ECHO;
+ tty_set_operations(gsm_tty_driver, &gsmtty_ops);
+
+ spin_lock_init(&gsm_mux_lock);
+
+ if (tty_register_driver(gsm_tty_driver)) {
+ put_tty_driver(gsm_tty_driver);
+ tty_unregister_ldisc(N_GSM0710);
+ pr_err("gsm_init: tty registration failed.\n");
+ return -EBUSY;
+ }
+ pr_debug("gsm_init: loaded as %d,%d.\n",
+ gsm_tty_driver->major, gsm_tty_driver->minor_start);
+ return 0;
+}
+
+static void __exit gsm_exit(void)
+{
+ int status = tty_unregister_ldisc(N_GSM0710);
+ if (status != 0)
+ pr_err("n_gsm: can't unregister line discipline (err = %d)\n",
+ status);
+ tty_unregister_driver(gsm_tty_driver);
+ put_tty_driver(gsm_tty_driver);
+}
+
+module_init(gsm_init);
+module_exit(gsm_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_GSM0710);
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
new file mode 100644
index 00000000..cea56033
--- /dev/null
+++ b/drivers/tty/n_hdlc.c
@@ -0,0 +1,1006 @@
+/* generic HDLC line discipline for Linux
+ *
+ * Written by Paul Fulghum paulkf@microgate.com
+ * for Microgate Corporation
+ *
+ * Microgate and SyncLink are registered trademarks of Microgate Corporation
+ *
+ * Adapted from ppp.c, written by Michael Callahan <callahan@maths.ox.ac.uk>,
+ * Al Longyear <longyear@netcom.com>,
+ * Paul Mackerras <Paul.Mackerras@cs.anu.edu.au>
+ *
+ * Original release 01/11/99
+ *
+ * This code is released under the GNU General Public License (GPL)
+ *
+ * This module implements the tty line discipline N_HDLC for use with
+ * tty device drivers that support bit-synchronous HDLC communications.
+ *
+ * All HDLC data is frame oriented which means:
+ *
+ * 1. tty write calls represent one complete transmit frame of data
+ * The device driver should accept the complete frame or none of
+ * the frame (busy) in the write method. Each write call should have
+ * a byte count in the range of 2-65535 bytes (2 is min HDLC frame
+ * with 1 addr byte and 1 ctrl byte). The max byte count of 65535
+ * should include any crc bytes required. For example, when using
+ * CCITT CRC32, 4 crc bytes are required, so the maximum size frame
+ * the application may transmit is limited to 65531 bytes. For CCITT
+ * CRC16, the maximum application frame size would be 65533.
+ *
+ *
+ * 2. receive callbacks from the device driver represents
+ * one received frame. The device driver should bypass
+ * the tty flip buffer and call the line discipline receive
+ * callback directly to avoid fragmenting or concatenating
+ * multiple frames into a single receive callback.
+ *
+ * The HDLC line discipline queues the receive frames in separate
+ * buffers so complete receive frames can be returned by the
+ * tty read calls.
+ *
+ * 3. tty read calls returns an entire frame of data or nothing.
+ *
+ * 4. all send and receive data is considered raw. No processing
+ * or translation is performed by the line discipline, regardless
+ * of the tty flags
+ *
+ * 5. When line discipline is queried for the amount of receive
+ * data available (FIOC), 0 is returned if no data available,
+ * otherwise the count of the next available frame is returned.
+ * (instead of the sum of all received frame counts).
+ *
+ * These conventions allow the standard tty programming interface
+ * to be used for synchronous HDLC applications when used with
+ * this line discipline (or another line discipline that is frame
+ * oriented such as N_PPP).
+ *
+ * The SyncLink driver (synclink.c) implements both asynchronous
+ * (using standard line discipline N_TTY) and synchronous HDLC
+ * (using N_HDLC) communications, with the latter using the above
+ * conventions.
+ *
+ * This implementation is very basic and does not maintain
+ * any statistics. The main point is to enforce the raw data
+ * and frame orientation of HDLC communications.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define HDLC_MAGIC 0x239e
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+
+#undef VERSION
+#define VERSION(major,minor,patch) (((((major)<<8)+(minor))<<8)+(patch))
+
+#include <linux/poll.h>
+#include <linux/in.h>
+#include <linux/ioctl.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/string.h> /* used in new tty drivers */
+#include <linux/signal.h> /* used in new tty drivers */
+#include <linux/if.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/termios.h>
+#include <asm/uaccess.h>
+
+/*
+ * Buffers for individual HDLC frames
+ */
+#define MAX_HDLC_FRAME_SIZE 65535
+#define DEFAULT_RX_BUF_COUNT 10
+#define MAX_RX_BUF_COUNT 60
+#define DEFAULT_TX_BUF_COUNT 3
+
+struct n_hdlc_buf {
+ struct n_hdlc_buf *link;
+ int count;
+ char buf[1];
+};
+
+#define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe)
+
+struct n_hdlc_buf_list {
+ struct n_hdlc_buf *head;
+ struct n_hdlc_buf *tail;
+ int count;
+ spinlock_t spinlock;
+};
+
+/**
+ * struct n_hdlc - per device instance data structure
+ * @magic - magic value for structure
+ * @flags - miscellaneous control flags
+ * @tty - ptr to TTY structure
+ * @backup_tty - TTY to use if tty gets closed
+ * @tbusy - reentrancy flag for tx wakeup code
+ * @woke_up - FIXME: describe this field
+ * @tbuf - currently transmitting tx buffer
+ * @tx_buf_list - list of pending transmit frame buffers
+ * @rx_buf_list - list of received frame buffers
+ * @tx_free_buf_list - list unused transmit frame buffers
+ * @rx_free_buf_list - list unused received frame buffers
+ */
+struct n_hdlc {
+ int magic;
+ __u32 flags;
+ struct tty_struct *tty;
+ struct tty_struct *backup_tty;
+ int tbusy;
+ int woke_up;
+ struct n_hdlc_buf *tbuf;
+ struct n_hdlc_buf_list tx_buf_list;
+ struct n_hdlc_buf_list rx_buf_list;
+ struct n_hdlc_buf_list tx_free_buf_list;
+ struct n_hdlc_buf_list rx_free_buf_list;
+};
+
+/*
+ * HDLC buffer list manipulation functions
+ */
+static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list);
+static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
+ struct n_hdlc_buf *buf);
+static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
+
+/* Local functions */
+
+static struct n_hdlc *n_hdlc_alloc (void);
+
+/* debug level can be set by insmod for debugging purposes */
+#define DEBUG_LEVEL_INFO 1
+static int debuglevel;
+
+/* max frame size for memory allocations */
+static int maxframe = 4096;
+
+/* TTY callbacks */
+
+static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
+ __u8 __user *buf, size_t nr);
+static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr);
+static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
+static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+ poll_table *wait);
+static int n_hdlc_tty_open(struct tty_struct *tty);
+static void n_hdlc_tty_close(struct tty_struct *tty);
+static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *cp,
+ char *fp, int count);
+static void n_hdlc_tty_wakeup(struct tty_struct *tty);
+
+#define bset(p,b) ((p)[(b) >> 5] |= (1 << ((b) & 0x1f)))
+
+#define tty2n_hdlc(tty) ((struct n_hdlc *) ((tty)->disc_data))
+#define n_hdlc2tty(n_hdlc) ((n_hdlc)->tty)
+
+static void flush_rx_queue(struct tty_struct *tty)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc_buf *buf;
+
+ while ((buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list)))
+ n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, buf);
+}
+
+static void flush_tx_queue(struct tty_struct *tty)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc_buf *buf;
+ unsigned long flags;
+
+ while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
+ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
+ spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
+ if (n_hdlc->tbuf) {
+ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
+ n_hdlc->tbuf = NULL;
+ }
+ spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+}
+
+static struct tty_ldisc_ops n_hdlc_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "hdlc",
+ .open = n_hdlc_tty_open,
+ .close = n_hdlc_tty_close,
+ .read = n_hdlc_tty_read,
+ .write = n_hdlc_tty_write,
+ .ioctl = n_hdlc_tty_ioctl,
+ .poll = n_hdlc_tty_poll,
+ .receive_buf = n_hdlc_tty_receive,
+ .write_wakeup = n_hdlc_tty_wakeup,
+ .flush_buffer = flush_rx_queue,
+};
+
+/**
+ * n_hdlc_release - release an n_hdlc per device line discipline info structure
+ * @n_hdlc - per device line discipline info structure
+ */
+static void n_hdlc_release(struct n_hdlc *n_hdlc)
+{
+ struct tty_struct *tty = n_hdlc2tty (n_hdlc);
+ struct n_hdlc_buf *buf;
+
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_release() called\n",__FILE__,__LINE__);
+
+ /* Ensure that the n_hdlcd process is not hanging on select()/poll() */
+ wake_up_interruptible (&tty->read_wait);
+ wake_up_interruptible (&tty->write_wait);
+
+ if (tty->disc_data == n_hdlc)
+ tty->disc_data = NULL; /* Break the tty->n_hdlc link */
+
+ /* Release transmit and receive buffers */
+ for(;;) {
+ buf = n_hdlc_buf_get(&n_hdlc->rx_free_buf_list);
+ if (buf) {
+ kfree(buf);
+ } else
+ break;
+ }
+ for(;;) {
+ buf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list);
+ if (buf) {
+ kfree(buf);
+ } else
+ break;
+ }
+ for(;;) {
+ buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
+ if (buf) {
+ kfree(buf);
+ } else
+ break;
+ }
+ for(;;) {
+ buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
+ if (buf) {
+ kfree(buf);
+ } else
+ break;
+ }
+ kfree(n_hdlc->tbuf);
+ kfree(n_hdlc);
+
+} /* end of n_hdlc_release() */
+
+/**
+ * n_hdlc_tty_close - line discipline close
+ * @tty - pointer to tty info structure
+ *
+ * Called when the line discipline is changed to something
+ * else, the tty is closed, or the tty detects a hangup.
+ */
+static void n_hdlc_tty_close(struct tty_struct *tty)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
+
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_tty_close() called\n",__FILE__,__LINE__);
+
+ if (n_hdlc != NULL) {
+ if (n_hdlc->magic != HDLC_MAGIC) {
+ printk (KERN_WARNING"n_hdlc: trying to close unopened tty!\n");
+ return;
+ }
+#if defined(TTY_NO_WRITE_SPLIT)
+ clear_bit(TTY_NO_WRITE_SPLIT,&tty->flags);
+#endif
+ tty->disc_data = NULL;
+ if (tty == n_hdlc->backup_tty)
+ n_hdlc->backup_tty = NULL;
+ if (tty != n_hdlc->tty)
+ return;
+ if (n_hdlc->backup_tty) {
+ n_hdlc->tty = n_hdlc->backup_tty;
+ } else {
+ n_hdlc_release (n_hdlc);
+ }
+ }
+
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_tty_close() success\n",__FILE__,__LINE__);
+
+} /* end of n_hdlc_tty_close() */
+
+/**
+ * n_hdlc_tty_open - called when line discipline changed to n_hdlc
+ * @tty - pointer to tty info structure
+ *
+ * Returns 0 if success, otherwise error code
+ */
+static int n_hdlc_tty_open (struct tty_struct *tty)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
+
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_tty_open() called (device=%s)\n",
+ __FILE__,__LINE__,
+ tty->name);
+
+ /* There should not be an existing table for this slot. */
+ if (n_hdlc) {
+ printk (KERN_ERR"n_hdlc_tty_open:tty already associated!\n" );
+ return -EEXIST;
+ }
+
+ n_hdlc = n_hdlc_alloc();
+ if (!n_hdlc) {
+ printk (KERN_ERR "n_hdlc_alloc failed\n");
+ return -ENFILE;
+ }
+
+ tty->disc_data = n_hdlc;
+ n_hdlc->tty = tty;
+ tty->receive_room = 65536;
+
+#if defined(TTY_NO_WRITE_SPLIT)
+ /* change tty_io write() to not split large writes into 8K chunks */
+ set_bit(TTY_NO_WRITE_SPLIT,&tty->flags);
+#endif
+
+ /* flush receive data from driver */
+ tty_driver_flush_buffer(tty);
+
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_tty_open() success\n",__FILE__,__LINE__);
+
+ return 0;
+
+} /* end of n_tty_hdlc_open() */
+
+/**
+ * n_hdlc_send_frames - send frames on pending send buffer list
+ * @n_hdlc - pointer to ldisc instance data
+ * @tty - pointer to tty instance data
+ *
+ * Send frames on pending send buffer list until the driver does not accept a
+ * frame (busy) this function is called after adding a frame to the send buffer
+ * list and by the tty wakeup callback.
+ */
+static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
+{
+ register int actual;
+ unsigned long flags;
+ struct n_hdlc_buf *tbuf;
+
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_send_frames() called\n",__FILE__,__LINE__);
+ check_again:
+
+ spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
+ if (n_hdlc->tbusy) {
+ n_hdlc->woke_up = 1;
+ spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+ return;
+ }
+ n_hdlc->tbusy = 1;
+ n_hdlc->woke_up = 0;
+ spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+
+ /* get current transmit buffer or get new transmit */
+ /* buffer from list of pending transmit buffers */
+
+ tbuf = n_hdlc->tbuf;
+ if (!tbuf)
+ tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
+
+ while (tbuf) {
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)sending frame %p, count=%d\n",
+ __FILE__,__LINE__,tbuf,tbuf->count);
+
+ /* Send the next block of data to device */
+ tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
+ actual = tty->ops->write(tty, tbuf->buf, tbuf->count);
+
+ /* rollback was possible and has been done */
+ if (actual == -ERESTARTSYS) {
+ n_hdlc->tbuf = tbuf;
+ break;
+ }
+ /* if transmit error, throw frame away by */
+ /* pretending it was accepted by driver */
+ if (actual < 0)
+ actual = tbuf->count;
+
+ if (actual == tbuf->count) {
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)frame %p completed\n",
+ __FILE__,__LINE__,tbuf);
+
+ /* free current transmit buffer */
+ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
+
+ /* this tx buffer is done */
+ n_hdlc->tbuf = NULL;
+
+ /* wait up sleeping writers */
+ wake_up_interruptible(&tty->write_wait);
+
+ /* get next pending transmit buffer */
+ tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
+ } else {
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)frame %p pending\n",
+ __FILE__,__LINE__,tbuf);
+
+ /* buffer not accepted by driver */
+ /* set this buffer as pending buffer */
+ n_hdlc->tbuf = tbuf;
+ break;
+ }
+ }
+
+ if (!tbuf)
+ tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+
+ /* Clear the re-entry flag */
+ spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
+ n_hdlc->tbusy = 0;
+ spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+
+ if (n_hdlc->woke_up)
+ goto check_again;
+
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_send_frames() exit\n",__FILE__,__LINE__);
+
+} /* end of n_hdlc_send_frames() */
+
+/**
+ * n_hdlc_tty_wakeup - Callback for transmit wakeup
+ * @tty - pointer to associated tty instance data
+ *
+ * Called when low level device driver can accept more send data.
+ */
+static void n_hdlc_tty_wakeup(struct tty_struct *tty)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_tty_wakeup() called\n",__FILE__,__LINE__);
+
+ if (!n_hdlc)
+ return;
+
+ if (tty != n_hdlc->tty) {
+ tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ return;
+ }
+
+ n_hdlc_send_frames (n_hdlc, tty);
+
+} /* end of n_hdlc_tty_wakeup() */
+
+/**
+ * n_hdlc_tty_receive - Called by tty driver when receive data is available
+ * @tty - pointer to tty instance data
+ * @data - pointer to received data
+ * @flags - pointer to flags for data
+ * @count - count of received data in bytes
+ *
+ * Called by tty low level driver when receive data is available. Data is
+ * interpreted as one HDLC frame.
+ */
+static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
+ char *flags, int count)
+{
+ register struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
+ register struct n_hdlc_buf *buf;
+
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_tty_receive() called count=%d\n",
+ __FILE__,__LINE__, count);
+
+ /* This can happen if stuff comes in on the backup tty */
+ if (!n_hdlc || tty != n_hdlc->tty)
+ return;
+
+ /* verify line is using HDLC discipline */
+ if (n_hdlc->magic != HDLC_MAGIC) {
+ printk("%s(%d) line not using HDLC discipline\n",
+ __FILE__,__LINE__);
+ return;
+ }
+
+ if ( count>maxframe ) {
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d) rx count>maxframesize, data discarded\n",
+ __FILE__,__LINE__);
+ return;
+ }
+
+ /* get a free HDLC buffer */
+ buf = n_hdlc_buf_get(&n_hdlc->rx_free_buf_list);
+ if (!buf) {
+ /* no buffers in free list, attempt to allocate another rx buffer */
+ /* unless the maximum count has been reached */
+ if (n_hdlc->rx_buf_list.count < MAX_RX_BUF_COUNT)
+ buf = kmalloc(N_HDLC_BUF_SIZE, GFP_ATOMIC);
+ }
+
+ if (!buf) {
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d) no more rx buffers, data discarded\n",
+ __FILE__,__LINE__);
+ return;
+ }
+
+ /* copy received data to HDLC buffer */
+ memcpy(buf->buf,data,count);
+ buf->count=count;
+
+ /* add HDLC buffer to list of received frames */
+ n_hdlc_buf_put(&n_hdlc->rx_buf_list, buf);
+
+ /* wake up any blocked reads and perform async signalling */
+ wake_up_interruptible (&tty->read_wait);
+ if (n_hdlc->tty->fasync != NULL)
+ kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN);
+
+} /* end of n_hdlc_tty_receive() */
+
+/**
+ * n_hdlc_tty_read - Called to retrieve one frame of data (if available)
+ * @tty - pointer to tty instance data
+ * @file - pointer to open file object
+ * @buf - pointer to returned data buffer
+ * @nr - size of returned data buffer
+ *
+ * Returns the number of bytes returned or error code.
+ */
+static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
+ __u8 __user *buf, size_t nr)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ int ret = 0;
+ struct n_hdlc_buf *rbuf;
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_tty_read() called\n",__FILE__,__LINE__);
+
+ /* Validate the pointers */
+ if (!n_hdlc)
+ return -EIO;
+
+ /* verify user access to buffer */
+ if (!access_ok(VERIFY_WRITE, buf, nr)) {
+ printk(KERN_WARNING "%s(%d) n_hdlc_tty_read() can't verify user "
+ "buffer\n", __FILE__, __LINE__);
+ return -EFAULT;
+ }
+
+ add_wait_queue(&tty->read_wait, &wait);
+
+ for (;;) {
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
+ ret = -EIO;
+ break;
+ }
+ if (tty_hung_up_p(file))
+ break;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
+ if (rbuf) {
+ if (rbuf->count > nr) {
+ /* too large for caller's buffer */
+ ret = -EOVERFLOW;
+ } else {
+ if (copy_to_user(buf, rbuf->buf, rbuf->count))
+ ret = -EFAULT;
+ else
+ ret = rbuf->count;
+ }
+
+ if (n_hdlc->rx_free_buf_list.count >
+ DEFAULT_RX_BUF_COUNT)
+ kfree(rbuf);
+ else
+ n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
+ break;
+ }
+
+ /* no data */
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ schedule();
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ }
+
+ remove_wait_queue(&tty->read_wait, &wait);
+ __set_current_state(TASK_RUNNING);
+
+ return ret;
+
+} /* end of n_hdlc_tty_read() */
+
+/**
+ * n_hdlc_tty_write - write a single frame of data to device
+ * @tty - pointer to associated tty device instance data
+ * @file - pointer to file object data
+ * @data - pointer to transmit data (one frame)
+ * @count - size of transmit frame in bytes
+ *
+ * Returns the number of bytes written (or error code).
+ */
+static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *data, size_t count)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
+ int error = 0;
+ DECLARE_WAITQUEUE(wait, current);
+ struct n_hdlc_buf *tbuf;
+
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_tty_write() called count=%Zd\n",
+ __FILE__,__LINE__,count);
+
+ /* Verify pointers */
+ if (!n_hdlc)
+ return -EIO;
+
+ if (n_hdlc->magic != HDLC_MAGIC)
+ return -EIO;
+
+ /* verify frame size */
+ if (count > maxframe ) {
+ if (debuglevel & DEBUG_LEVEL_INFO)
+ printk (KERN_WARNING
+ "n_hdlc_tty_write: truncating user packet "
+ "from %lu to %d\n", (unsigned long) count,
+ maxframe );
+ count = maxframe;
+ }
+
+ add_wait_queue(&tty->write_wait, &wait);
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list);
+ if (tbuf)
+ break;
+
+ if (file->f_flags & O_NONBLOCK) {
+ error = -EAGAIN;
+ break;
+ }
+ schedule();
+
+ n_hdlc = tty2n_hdlc (tty);
+ if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC ||
+ tty != n_hdlc->tty) {
+ printk("n_hdlc_tty_write: %p invalid after wait!\n", n_hdlc);
+ error = -EIO;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ error = -EINTR;
+ break;
+ }
+ }
+
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&tty->write_wait, &wait);
+
+ if (!error) {
+ /* Retrieve the user's buffer */
+ memcpy(tbuf->buf, data, count);
+
+ /* Send the data */
+ tbuf->count = error = count;
+ n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf);
+ n_hdlc_send_frames(n_hdlc,tty);
+ }
+
+ return error;
+
+} /* end of n_hdlc_tty_write() */
+
+/**
+ * n_hdlc_tty_ioctl - process IOCTL system call for the tty device.
+ * @tty - pointer to tty instance data
+ * @file - pointer to open file object for device
+ * @cmd - IOCTL command code
+ * @arg - argument for IOCTL call (cmd dependent)
+ *
+ * Returns command dependent result.
+ */
+static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
+ int error = 0;
+ int count;
+ unsigned long flags;
+
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_tty_ioctl() called %d\n",
+ __FILE__,__LINE__,cmd);
+
+ /* Verify the status of the device */
+ if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC)
+ return -EBADF;
+
+ switch (cmd) {
+ case FIONREAD:
+ /* report count of read data available */
+ /* in next available frame (if any) */
+ spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags);
+ if (n_hdlc->rx_buf_list.head)
+ count = n_hdlc->rx_buf_list.head->count;
+ else
+ count = 0;
+ spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags);
+ error = put_user(count, (int __user *)arg);
+ break;
+
+ case TIOCOUTQ:
+ /* get the pending tx byte count in the driver */
+ count = tty_chars_in_buffer(tty);
+ /* add size of next output frame in queue */
+ spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
+ if (n_hdlc->tx_buf_list.head)
+ count += n_hdlc->tx_buf_list.head->count;
+ spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags);
+ error = put_user(count, (int __user *)arg);
+ break;
+
+ case TCFLSH:
+ switch (arg) {
+ case TCIOFLUSH:
+ case TCOFLUSH:
+ flush_tx_queue(tty);
+ }
+ /* fall through to default */
+
+ default:
+ error = n_tty_ioctl_helper(tty, file, cmd, arg);
+ break;
+ }
+ return error;
+
+} /* end of n_hdlc_tty_ioctl() */
+
+/**
+ * n_hdlc_tty_poll - TTY callback for poll system call
+ * @tty - pointer to tty instance data
+ * @filp - pointer to open file object for device
+ * @poll_table - wait queue for operations
+ *
+ * Determine which operations (read/write) will not block and return info
+ * to caller.
+ * Returns a bit mask containing info on which ops will not block.
+ */
+static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+ poll_table *wait)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
+ unsigned int mask = 0;
+
+ if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_tty_poll() called\n",__FILE__,__LINE__);
+
+ if (n_hdlc && n_hdlc->magic == HDLC_MAGIC && tty == n_hdlc->tty) {
+ /* queue current process into any wait queue that */
+ /* may awaken in the future (read and write) */
+
+ poll_wait(filp, &tty->read_wait, wait);
+ poll_wait(filp, &tty->write_wait, wait);
+
+ /* set bits for operations that won't block */
+ if (n_hdlc->rx_buf_list.head)
+ mask |= POLLIN | POLLRDNORM; /* readable */
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ mask |= POLLHUP;
+ if (tty_hung_up_p(filp))
+ mask |= POLLHUP;
+ if (!tty_is_writelocked(tty) &&
+ n_hdlc->tx_free_buf_list.head)
+ mask |= POLLOUT | POLLWRNORM; /* writable */
+ }
+ return mask;
+} /* end of n_hdlc_tty_poll() */
+
+/**
+ * n_hdlc_alloc - allocate an n_hdlc instance data structure
+ *
+ * Returns a pointer to newly created structure if success, otherwise %NULL
+ */
+static struct n_hdlc *n_hdlc_alloc(void)
+{
+ struct n_hdlc_buf *buf;
+ int i;
+ struct n_hdlc *n_hdlc = kmalloc(sizeof(*n_hdlc), GFP_KERNEL);
+
+ if (!n_hdlc)
+ return NULL;
+
+ memset(n_hdlc, 0, sizeof(*n_hdlc));
+
+ n_hdlc_buf_list_init(&n_hdlc->rx_free_buf_list);
+ n_hdlc_buf_list_init(&n_hdlc->tx_free_buf_list);
+ n_hdlc_buf_list_init(&n_hdlc->rx_buf_list);
+ n_hdlc_buf_list_init(&n_hdlc->tx_buf_list);
+
+ /* allocate free rx buffer list */
+ for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
+ buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
+ if (buf)
+ n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,buf);
+ else if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_alloc(), kalloc() failed for rx buffer %d\n",__FILE__,__LINE__, i);
+ }
+
+ /* allocate free tx buffer list */
+ for(i=0;i<DEFAULT_TX_BUF_COUNT;i++) {
+ buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
+ if (buf)
+ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list,buf);
+ else if (debuglevel >= DEBUG_LEVEL_INFO)
+ printk("%s(%d)n_hdlc_alloc(), kalloc() failed for tx buffer %d\n",__FILE__,__LINE__, i);
+ }
+
+ /* Initialize the control block */
+ n_hdlc->magic = HDLC_MAGIC;
+ n_hdlc->flags = 0;
+
+ return n_hdlc;
+
+} /* end of n_hdlc_alloc() */
+
+/**
+ * n_hdlc_buf_list_init - initialize specified HDLC buffer list
+ * @list - pointer to buffer list
+ */
+static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list)
+{
+ memset(list, 0, sizeof(*list));
+ spin_lock_init(&list->spinlock);
+} /* end of n_hdlc_buf_list_init() */
+
+/**
+ * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
+ * @list - pointer to buffer list
+ * @buf - pointer to buffer
+ */
+static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
+ struct n_hdlc_buf *buf)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&list->spinlock,flags);
+
+ buf->link=NULL;
+ if (list->tail)
+ list->tail->link = buf;
+ else
+ list->head = buf;
+ list->tail = buf;
+ (list->count)++;
+
+ spin_unlock_irqrestore(&list->spinlock,flags);
+
+} /* end of n_hdlc_buf_put() */
+
+/**
+ * n_hdlc_buf_get - remove and return an HDLC buffer from list
+ * @list - pointer to HDLC buffer list
+ *
+ * Remove and return an HDLC buffer from the head of the specified HDLC buffer
+ * list.
+ * Returns a pointer to HDLC buffer if available, otherwise %NULL.
+ */
+static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
+{
+ unsigned long flags;
+ struct n_hdlc_buf *buf;
+ spin_lock_irqsave(&list->spinlock,flags);
+
+ buf = list->head;
+ if (buf) {
+ list->head = buf->link;
+ (list->count)--;
+ }
+ if (!list->head)
+ list->tail = NULL;
+
+ spin_unlock_irqrestore(&list->spinlock,flags);
+ return buf;
+
+} /* end of n_hdlc_buf_get() */
+
+static char hdlc_banner[] __initdata =
+ KERN_INFO "HDLC line discipline maxframe=%u\n";
+static char hdlc_register_ok[] __initdata =
+ KERN_INFO "N_HDLC line discipline registered.\n";
+static char hdlc_register_fail[] __initdata =
+ KERN_ERR "error registering line discipline: %d\n";
+static char hdlc_init_fail[] __initdata =
+ KERN_INFO "N_HDLC: init failure %d\n";
+
+static int __init n_hdlc_init(void)
+{
+ int status;
+
+ /* range check maxframe arg */
+ if (maxframe < 4096)
+ maxframe = 4096;
+ else if (maxframe > 65535)
+ maxframe = 65535;
+
+ printk(hdlc_banner, maxframe);
+
+ status = tty_register_ldisc(N_HDLC, &n_hdlc_ldisc);
+ if (!status)
+ printk(hdlc_register_ok);
+ else
+ printk(hdlc_register_fail, status);
+
+ if (status)
+ printk(hdlc_init_fail, status);
+ return status;
+
+} /* end of init_module() */
+
+static char hdlc_unregister_ok[] __exitdata =
+ KERN_INFO "N_HDLC: line discipline unregistered\n";
+static char hdlc_unregister_fail[] __exitdata =
+ KERN_ERR "N_HDLC: can't unregister line discipline (err = %d)\n";
+
+static void __exit n_hdlc_exit(void)
+{
+ /* Release tty registration of line discipline */
+ int status = tty_unregister_ldisc(N_HDLC);
+
+ if (status)
+ printk(hdlc_unregister_fail, status);
+ else
+ printk(hdlc_unregister_ok);
+}
+
+module_init(n_hdlc_init);
+module_exit(n_hdlc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul Fulghum paulkf@microgate.com");
+module_param(debuglevel, int, 0);
+module_param(maxframe, int, 0);
+MODULE_ALIAS_LDISC(N_HDLC);
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
new file mode 100644
index 00000000..5c6c3145
--- /dev/null
+++ b/drivers/tty/n_r3964.c
@@ -0,0 +1,1263 @@
+/* r3964 linediscipline for linux
+ *
+ * -----------------------------------------------------------
+ * Copyright by
+ * Philips Automation Projects
+ * Kassel (Germany)
+ * -----------------------------------------------------------
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License, incorporated herein by reference.
+ *
+ * Author:
+ * L. Haag
+ *
+ * $Log: n_r3964.c,v $
+ * Revision 1.10 2001/03/18 13:02:24 dwmw2
+ * Fix timer usage, use spinlocks properly.
+ *
+ * Revision 1.9 2001/03/18 12:52:14 dwmw2
+ * Merge changes in 2.4.2
+ *
+ * Revision 1.8 2000/03/23 14:14:54 dwmw2
+ * Fix race in sleeping in r3964_read()
+ *
+ * Revision 1.7 1999/28/08 11:41:50 dwmw2
+ * Port to 2.3 kernel
+ *
+ * Revision 1.6 1998/09/30 00:40:40 dwmw2
+ * Fixed compilation on 2.0.x kernels
+ * Updated to newly registered tty-ldisc number 9
+ *
+ * Revision 1.5 1998/09/04 21:57:36 dwmw2
+ * Signal handling bug fixes, port to 2.1.x.
+ *
+ * Revision 1.4 1998/04/02 20:26:59 lhaag
+ * select, blocking, ...
+ *
+ * Revision 1.3 1998/02/12 18:58:43 root
+ * fixed some memory leaks
+ * calculation of checksum characters
+ *
+ * Revision 1.2 1998/02/07 13:03:34 root
+ * ioctl read_telegram
+ *
+ * Revision 1.1 1998/02/06 19:21:03 root
+ * Initial revision
+ *
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/string.h> /* used in new tty drivers */
+#include <linux/signal.h> /* used in new tty drivers */
+#include <linux/ioctl.h>
+#include <linux/n_r3964.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+
+/*#define DEBUG_QUEUE*/
+
+/* Log successful handshake and protocol operations */
+/*#define DEBUG_PROTO_S*/
+
+/* Log handshake and protocol errors: */
+/*#define DEBUG_PROTO_E*/
+
+/* Log Linediscipline operations (open, close, read, write...): */
+/*#define DEBUG_LDISC*/
+
+/* Log module and memory operations (init, cleanup; kmalloc, kfree): */
+/*#define DEBUG_MODUL*/
+
+/* Macro helpers for debug output: */
+#define TRACE(format, args...) printk("r3964: " format "\n" , ## args)
+
+#ifdef DEBUG_MODUL
+#define TRACE_M(format, args...) printk("r3964: " format "\n" , ## args)
+#else
+#define TRACE_M(fmt, arg...) do {} while (0)
+#endif
+#ifdef DEBUG_PROTO_S
+#define TRACE_PS(format, args...) printk("r3964: " format "\n" , ## args)
+#else
+#define TRACE_PS(fmt, arg...) do {} while (0)
+#endif
+#ifdef DEBUG_PROTO_E
+#define TRACE_PE(format, args...) printk("r3964: " format "\n" , ## args)
+#else
+#define TRACE_PE(fmt, arg...) do {} while (0)
+#endif
+#ifdef DEBUG_LDISC
+#define TRACE_L(format, args...) printk("r3964: " format "\n" , ## args)
+#else
+#define TRACE_L(fmt, arg...) do {} while (0)
+#endif
+#ifdef DEBUG_QUEUE
+#define TRACE_Q(format, args...) printk("r3964: " format "\n" , ## args)
+#else
+#define TRACE_Q(fmt, arg...) do {} while (0)
+#endif
+static void add_tx_queue(struct r3964_info *, struct r3964_block_header *);
+static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code);
+static void put_char(struct r3964_info *pInfo, unsigned char ch);
+static void trigger_transmit(struct r3964_info *pInfo);
+static void retry_transmit(struct r3964_info *pInfo);
+static void transmit_block(struct r3964_info *pInfo);
+static void receive_char(struct r3964_info *pInfo, const unsigned char c);
+static void receive_error(struct r3964_info *pInfo, const char flag);
+static void on_timeout(unsigned long priv);
+static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg);
+static int read_telegram(struct r3964_info *pInfo, struct pid *pid,
+ unsigned char __user * buf);
+static void add_msg(struct r3964_client_info *pClient, int msg_id, int arg,
+ int error_code, struct r3964_block_header *pBlock);
+static struct r3964_message *remove_msg(struct r3964_info *pInfo,
+ struct r3964_client_info *pClient);
+static void remove_client_block(struct r3964_info *pInfo,
+ struct r3964_client_info *pClient);
+
+static int r3964_open(struct tty_struct *tty);
+static void r3964_close(struct tty_struct *tty);
+static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user * buf, size_t nr);
+static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr);
+static int r3964_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
+static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
+static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
+ struct poll_table_struct *wait);
+static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count);
+
+static struct tty_ldisc_ops tty_ldisc_N_R3964 = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "R3964",
+ .open = r3964_open,
+ .close = r3964_close,
+ .read = r3964_read,
+ .write = r3964_write,
+ .ioctl = r3964_ioctl,
+ .set_termios = r3964_set_termios,
+ .poll = r3964_poll,
+ .receive_buf = r3964_receive_buf,
+};
+
+static void dump_block(const unsigned char *block, unsigned int length)
+{
+ unsigned int i, j;
+ char linebuf[16 * 3 + 1];
+
+ for (i = 0; i < length; i += 16) {
+ for (j = 0; (j < 16) && (j + i < length); j++) {
+ sprintf(linebuf + 3 * j, "%02x ", block[i + j]);
+ }
+ linebuf[3 * j] = '\0';
+ TRACE_PS("%s", linebuf);
+ }
+}
+
+/*************************************************************
+ * Driver initialisation
+ *************************************************************/
+
+/*************************************************************
+ * Module support routines
+ *************************************************************/
+
+static void __exit r3964_exit(void)
+{
+ int status;
+
+ TRACE_M("cleanup_module()");
+
+ status = tty_unregister_ldisc(N_R3964);
+
+ if (status != 0) {
+ printk(KERN_ERR "r3964: error unregistering linediscipline: "
+ "%d\n", status);
+ } else {
+ TRACE_L("linediscipline successfully unregistered");
+ }
+}
+
+static int __init r3964_init(void)
+{
+ int status;
+
+ printk("r3964: Philips r3964 Driver $Revision: 1.10 $\n");
+
+ /*
+ * Register the tty line discipline
+ */
+
+ status = tty_register_ldisc(N_R3964, &tty_ldisc_N_R3964);
+ if (status == 0) {
+ TRACE_L("line discipline %d registered", N_R3964);
+ TRACE_L("flags=%x num=%x", tty_ldisc_N_R3964.flags,
+ tty_ldisc_N_R3964.num);
+ TRACE_L("open=%p", tty_ldisc_N_R3964.open);
+ TRACE_L("tty_ldisc_N_R3964 = %p", &tty_ldisc_N_R3964);
+ } else {
+ printk(KERN_ERR "r3964: error registering line discipline: "
+ "%d\n", status);
+ }
+ return status;
+}
+
+module_init(r3964_init);
+module_exit(r3964_exit);
+
+/*************************************************************
+ * Protocol implementation routines
+ *************************************************************/
+
+static void add_tx_queue(struct r3964_info *pInfo,
+ struct r3964_block_header *pHeader)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pInfo->lock, flags);
+
+ pHeader->next = NULL;
+
+ if (pInfo->tx_last == NULL) {
+ pInfo->tx_first = pInfo->tx_last = pHeader;
+ } else {
+ pInfo->tx_last->next = pHeader;
+ pInfo->tx_last = pHeader;
+ }
+
+ spin_unlock_irqrestore(&pInfo->lock, flags);
+
+ TRACE_Q("add_tx_queue %p, length %d, tx_first = %p",
+ pHeader, pHeader->length, pInfo->tx_first);
+}
+
+static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code)
+{
+ struct r3964_block_header *pHeader;
+ unsigned long flags;
+#ifdef DEBUG_QUEUE
+ struct r3964_block_header *pDump;
+#endif
+
+ pHeader = pInfo->tx_first;
+
+ if (pHeader == NULL)
+ return;
+
+#ifdef DEBUG_QUEUE
+ printk("r3964: remove_from_tx_queue: %p, length %u - ",
+ pHeader, pHeader->length);
+ for (pDump = pHeader; pDump; pDump = pDump->next)
+ printk("%p ", pDump);
+ printk("\n");
+#endif
+
+ if (pHeader->owner) {
+ if (error_code) {
+ add_msg(pHeader->owner, R3964_MSG_ACK, 0,
+ error_code, NULL);
+ } else {
+ add_msg(pHeader->owner, R3964_MSG_ACK, pHeader->length,
+ error_code, NULL);
+ }
+ wake_up_interruptible(&pInfo->read_wait);
+ }
+
+ spin_lock_irqsave(&pInfo->lock, flags);
+
+ pInfo->tx_first = pHeader->next;
+ if (pInfo->tx_first == NULL) {
+ pInfo->tx_last = NULL;
+ }
+
+ spin_unlock_irqrestore(&pInfo->lock, flags);
+
+ kfree(pHeader);
+ TRACE_M("remove_from_tx_queue - kfree %p", pHeader);
+
+ TRACE_Q("remove_from_tx_queue: tx_first = %p, tx_last = %p",
+ pInfo->tx_first, pInfo->tx_last);
+}
+
+static void add_rx_queue(struct r3964_info *pInfo,
+ struct r3964_block_header *pHeader)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pInfo->lock, flags);
+
+ pHeader->next = NULL;
+
+ if (pInfo->rx_last == NULL) {
+ pInfo->rx_first = pInfo->rx_last = pHeader;
+ } else {
+ pInfo->rx_last->next = pHeader;
+ pInfo->rx_last = pHeader;
+ }
+ pInfo->blocks_in_rx_queue++;
+
+ spin_unlock_irqrestore(&pInfo->lock, flags);
+
+ TRACE_Q("add_rx_queue: %p, length = %d, rx_first = %p, count = %d",
+ pHeader, pHeader->length,
+ pInfo->rx_first, pInfo->blocks_in_rx_queue);
+}
+
+static void remove_from_rx_queue(struct r3964_info *pInfo,
+ struct r3964_block_header *pHeader)
+{
+ unsigned long flags;
+ struct r3964_block_header *pFind;
+
+ if (pHeader == NULL)
+ return;
+
+ TRACE_Q("remove_from_rx_queue: rx_first = %p, rx_last = %p, count = %d",
+ pInfo->rx_first, pInfo->rx_last, pInfo->blocks_in_rx_queue);
+ TRACE_Q("remove_from_rx_queue: %p, length %u",
+ pHeader, pHeader->length);
+
+ spin_lock_irqsave(&pInfo->lock, flags);
+
+ if (pInfo->rx_first == pHeader) {
+ /* Remove the first block in the linked list: */
+ pInfo->rx_first = pHeader->next;
+
+ if (pInfo->rx_first == NULL) {
+ pInfo->rx_last = NULL;
+ }
+ pInfo->blocks_in_rx_queue--;
+ } else {
+ /* Find block to remove: */
+ for (pFind = pInfo->rx_first; pFind; pFind = pFind->next) {
+ if (pFind->next == pHeader) {
+ /* Got it. */
+ pFind->next = pHeader->next;
+ pInfo->blocks_in_rx_queue--;
+ if (pFind->next == NULL) {
+ /* Oh, removed the last one! */
+ pInfo->rx_last = pFind;
+ }
+ break;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&pInfo->lock, flags);
+
+ kfree(pHeader);
+ TRACE_M("remove_from_rx_queue - kfree %p", pHeader);
+
+ TRACE_Q("remove_from_rx_queue: rx_first = %p, rx_last = %p, count = %d",
+ pInfo->rx_first, pInfo->rx_last, pInfo->blocks_in_rx_queue);
+}
+
+static void put_char(struct r3964_info *pInfo, unsigned char ch)
+{
+ struct tty_struct *tty = pInfo->tty;
+ /* FIXME: put_char should not be called from an IRQ */
+ tty_put_char(tty, ch);
+ pInfo->bcc ^= ch;
+}
+
+static void flush(struct r3964_info *pInfo)
+{
+ struct tty_struct *tty = pInfo->tty;
+
+ if (tty == NULL || tty->ops->flush_chars == NULL)
+ return;
+ tty->ops->flush_chars(tty);
+}
+
+static void trigger_transmit(struct r3964_info *pInfo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pInfo->lock, flags);
+
+ if ((pInfo->state == R3964_IDLE) && (pInfo->tx_first != NULL)) {
+ pInfo->state = R3964_TX_REQUEST;
+ pInfo->nRetry = 0;
+ pInfo->flags &= ~R3964_ERROR;
+ mod_timer(&pInfo->tmr, jiffies + R3964_TO_QVZ);
+
+ spin_unlock_irqrestore(&pInfo->lock, flags);
+
+ TRACE_PS("trigger_transmit - sent STX");
+
+ put_char(pInfo, STX);
+ flush(pInfo);
+
+ pInfo->bcc = 0;
+ } else {
+ spin_unlock_irqrestore(&pInfo->lock, flags);
+ }
+}
+
+static void retry_transmit(struct r3964_info *pInfo)
+{
+ if (pInfo->nRetry < R3964_MAX_RETRIES) {
+ TRACE_PE("transmission failed. Retry #%d", pInfo->nRetry);
+ pInfo->bcc = 0;
+ put_char(pInfo, STX);
+ flush(pInfo);
+ pInfo->state = R3964_TX_REQUEST;
+ pInfo->nRetry++;
+ mod_timer(&pInfo->tmr, jiffies + R3964_TO_QVZ);
+ } else {
+ TRACE_PE("transmission failed after %d retries",
+ R3964_MAX_RETRIES);
+
+ remove_from_tx_queue(pInfo, R3964_TX_FAIL);
+
+ put_char(pInfo, NAK);
+ flush(pInfo);
+ pInfo->state = R3964_IDLE;
+
+ trigger_transmit(pInfo);
+ }
+}
+
+static void transmit_block(struct r3964_info *pInfo)
+{
+ struct tty_struct *tty = pInfo->tty;
+ struct r3964_block_header *pBlock = pInfo->tx_first;
+ int room = 0;
+
+ if (tty == NULL || pBlock == NULL) {
+ return;
+ }
+
+ room = tty_write_room(tty);
+
+ TRACE_PS("transmit_block %p, room %d, length %d",
+ pBlock, room, pBlock->length);
+
+ while (pInfo->tx_position < pBlock->length) {
+ if (room < 2)
+ break;
+
+ if (pBlock->data[pInfo->tx_position] == DLE) {
+ /* send additional DLE char: */
+ put_char(pInfo, DLE);
+ }
+ put_char(pInfo, pBlock->data[pInfo->tx_position++]);
+
+ room--;
+ }
+
+ if ((pInfo->tx_position == pBlock->length) && (room >= 3)) {
+ put_char(pInfo, DLE);
+ put_char(pInfo, ETX);
+ if (pInfo->flags & R3964_BCC) {
+ put_char(pInfo, pInfo->bcc);
+ }
+ pInfo->state = R3964_WAIT_FOR_TX_ACK;
+ mod_timer(&pInfo->tmr, jiffies + R3964_TO_QVZ);
+ }
+ flush(pInfo);
+}
+
+static void on_receive_block(struct r3964_info *pInfo)
+{
+ unsigned int length;
+ struct r3964_client_info *pClient;
+ struct r3964_block_header *pBlock;
+
+ length = pInfo->rx_position;
+
+ /* compare byte checksum characters: */
+ if (pInfo->flags & R3964_BCC) {
+ if (pInfo->bcc != pInfo->last_rx) {
+ TRACE_PE("checksum error - got %x but expected %x",
+ pInfo->last_rx, pInfo->bcc);
+ pInfo->flags |= R3964_CHECKSUM;
+ }
+ }
+
+ /* check for errors (parity, overrun,...): */
+ if (pInfo->flags & R3964_ERROR) {
+ TRACE_PE("on_receive_block - transmission failed error %x",
+ pInfo->flags & R3964_ERROR);
+
+ put_char(pInfo, NAK);
+ flush(pInfo);
+ if (pInfo->nRetry < R3964_MAX_RETRIES) {
+ pInfo->state = R3964_WAIT_FOR_RX_REPEAT;
+ pInfo->nRetry++;
+ mod_timer(&pInfo->tmr, jiffies + R3964_TO_RX_PANIC);
+ } else {
+ TRACE_PE("on_receive_block - failed after max retries");
+ pInfo->state = R3964_IDLE;
+ }
+ return;
+ }
+
+ /* received block; submit DLE: */
+ put_char(pInfo, DLE);
+ flush(pInfo);
+ del_timer_sync(&pInfo->tmr);
+ TRACE_PS(" rx success: got %d chars", length);
+
+ /* prepare struct r3964_block_header: */
+ pBlock = kmalloc(length + sizeof(struct r3964_block_header),
+ GFP_KERNEL);
+ TRACE_M("on_receive_block - kmalloc %p", pBlock);
+
+ if (pBlock == NULL)
+ return;
+
+ pBlock->length = length;
+ pBlock->data = ((unsigned char *)pBlock) +
+ sizeof(struct r3964_block_header);
+ pBlock->locks = 0;
+ pBlock->next = NULL;
+ pBlock->owner = NULL;
+
+ memcpy(pBlock->data, pInfo->rx_buf, length);
+
+ /* queue block into rx_queue: */
+ add_rx_queue(pInfo, pBlock);
+
+ /* notify attached client processes: */
+ for (pClient = pInfo->firstClient; pClient; pClient = pClient->next) {
+ if (pClient->sig_flags & R3964_SIG_DATA) {
+ add_msg(pClient, R3964_MSG_DATA, length, R3964_OK,
+ pBlock);
+ }
+ }
+ wake_up_interruptible(&pInfo->read_wait);
+
+ pInfo->state = R3964_IDLE;
+
+ trigger_transmit(pInfo);
+}
+
+static void receive_char(struct r3964_info *pInfo, const unsigned char c)
+{
+ switch (pInfo->state) {
+ case R3964_TX_REQUEST:
+ if (c == DLE) {
+ TRACE_PS("TX_REQUEST - got DLE");
+
+ pInfo->state = R3964_TRANSMITTING;
+ pInfo->tx_position = 0;
+
+ transmit_block(pInfo);
+ } else if (c == STX) {
+ if (pInfo->nRetry == 0) {
+ TRACE_PE("TX_REQUEST - init conflict");
+ if (pInfo->priority == R3964_SLAVE) {
+ goto start_receiving;
+ }
+ } else {
+ TRACE_PE("TX_REQUEST - secondary init "
+ "conflict!? Switching to SLAVE mode "
+ "for next rx.");
+ goto start_receiving;
+ }
+ } else {
+ TRACE_PE("TX_REQUEST - char != DLE: %x", c);
+ retry_transmit(pInfo);
+ }
+ break;
+ case R3964_TRANSMITTING:
+ if (c == NAK) {
+ TRACE_PE("TRANSMITTING - got NAK");
+ retry_transmit(pInfo);
+ } else {
+ TRACE_PE("TRANSMITTING - got invalid char");
+
+ pInfo->state = R3964_WAIT_ZVZ_BEFORE_TX_RETRY;
+ mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ);
+ }
+ break;
+ case R3964_WAIT_FOR_TX_ACK:
+ if (c == DLE) {
+ TRACE_PS("WAIT_FOR_TX_ACK - got DLE");
+ remove_from_tx_queue(pInfo, R3964_OK);
+
+ pInfo->state = R3964_IDLE;
+ trigger_transmit(pInfo);
+ } else {
+ retry_transmit(pInfo);
+ }
+ break;
+ case R3964_WAIT_FOR_RX_REPEAT:
+ /* FALLTHROUGH */
+ case R3964_IDLE:
+ if (c == STX) {
+ /* Prevent rx_queue from overflow: */
+ if (pInfo->blocks_in_rx_queue >=
+ R3964_MAX_BLOCKS_IN_RX_QUEUE) {
+ TRACE_PE("IDLE - got STX but no space in "
+ "rx_queue!");
+ pInfo->state = R3964_WAIT_FOR_RX_BUF;
+ mod_timer(&pInfo->tmr,
+ jiffies + R3964_TO_NO_BUF);
+ break;
+ }
+start_receiving:
+ /* Ok, start receiving: */
+ TRACE_PS("IDLE - got STX");
+ pInfo->rx_position = 0;
+ pInfo->last_rx = 0;
+ pInfo->flags &= ~R3964_ERROR;
+ pInfo->state = R3964_RECEIVING;
+ mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ);
+ pInfo->nRetry = 0;
+ put_char(pInfo, DLE);
+ flush(pInfo);
+ pInfo->bcc = 0;
+ }
+ break;
+ case R3964_RECEIVING:
+ if (pInfo->rx_position < RX_BUF_SIZE) {
+ pInfo->bcc ^= c;
+
+ if (c == DLE) {
+ if (pInfo->last_rx == DLE) {
+ pInfo->last_rx = 0;
+ goto char_to_buf;
+ }
+ pInfo->last_rx = DLE;
+ break;
+ } else if ((c == ETX) && (pInfo->last_rx == DLE)) {
+ if (pInfo->flags & R3964_BCC) {
+ pInfo->state = R3964_WAIT_FOR_BCC;
+ mod_timer(&pInfo->tmr,
+ jiffies + R3964_TO_ZVZ);
+ } else {
+ on_receive_block(pInfo);
+ }
+ } else {
+ pInfo->last_rx = c;
+char_to_buf:
+ pInfo->rx_buf[pInfo->rx_position++] = c;
+ mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ);
+ }
+ }
+ /* else: overflow-msg? BUF_SIZE>MTU; should not happen? */
+ break;
+ case R3964_WAIT_FOR_BCC:
+ pInfo->last_rx = c;
+ on_receive_block(pInfo);
+ break;
+ }
+}
+
+static void receive_error(struct r3964_info *pInfo, const char flag)
+{
+ switch (flag) {
+ case TTY_NORMAL:
+ break;
+ case TTY_BREAK:
+ TRACE_PE("received break");
+ pInfo->flags |= R3964_BREAK;
+ break;
+ case TTY_PARITY:
+ TRACE_PE("parity error");
+ pInfo->flags |= R3964_PARITY;
+ break;
+ case TTY_FRAME:
+ TRACE_PE("frame error");
+ pInfo->flags |= R3964_FRAME;
+ break;
+ case TTY_OVERRUN:
+ TRACE_PE("frame overrun");
+ pInfo->flags |= R3964_OVERRUN;
+ break;
+ default:
+ TRACE_PE("receive_error - unknown flag %d", flag);
+ pInfo->flags |= R3964_UNKNOWN;
+ break;
+ }
+}
+
+static void on_timeout(unsigned long priv)
+{
+ struct r3964_info *pInfo = (void *)priv;
+
+ switch (pInfo->state) {
+ case R3964_TX_REQUEST:
+ TRACE_PE("TX_REQUEST - timeout");
+ retry_transmit(pInfo);
+ break;
+ case R3964_WAIT_ZVZ_BEFORE_TX_RETRY:
+ put_char(pInfo, NAK);
+ flush(pInfo);
+ retry_transmit(pInfo);
+ break;
+ case R3964_WAIT_FOR_TX_ACK:
+ TRACE_PE("WAIT_FOR_TX_ACK - timeout");
+ retry_transmit(pInfo);
+ break;
+ case R3964_WAIT_FOR_RX_BUF:
+ TRACE_PE("WAIT_FOR_RX_BUF - timeout");
+ put_char(pInfo, NAK);
+ flush(pInfo);
+ pInfo->state = R3964_IDLE;
+ break;
+ case R3964_RECEIVING:
+ TRACE_PE("RECEIVING - timeout after %d chars",
+ pInfo->rx_position);
+ put_char(pInfo, NAK);
+ flush(pInfo);
+ pInfo->state = R3964_IDLE;
+ break;
+ case R3964_WAIT_FOR_RX_REPEAT:
+ TRACE_PE("WAIT_FOR_RX_REPEAT - timeout");
+ pInfo->state = R3964_IDLE;
+ break;
+ case R3964_WAIT_FOR_BCC:
+ TRACE_PE("WAIT_FOR_BCC - timeout");
+ put_char(pInfo, NAK);
+ flush(pInfo);
+ pInfo->state = R3964_IDLE;
+ break;
+ }
+}
+
+static struct r3964_client_info *findClient(struct r3964_info *pInfo,
+ struct pid *pid)
+{
+ struct r3964_client_info *pClient;
+
+ for (pClient = pInfo->firstClient; pClient; pClient = pClient->next) {
+ if (pClient->pid == pid) {
+ return pClient;
+ }
+ }
+ return NULL;
+}
+
+static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg)
+{
+ struct r3964_client_info *pClient;
+ struct r3964_client_info **ppClient;
+ struct r3964_message *pMsg;
+
+ if ((arg & R3964_SIG_ALL) == 0) {
+ /* Remove client from client list */
+ for (ppClient = &pInfo->firstClient; *ppClient;
+ ppClient = &(*ppClient)->next) {
+ pClient = *ppClient;
+
+ if (pClient->pid == pid) {
+ TRACE_PS("removing client %d from client list",
+ pid_nr(pid));
+ *ppClient = pClient->next;
+ while (pClient->msg_count) {
+ pMsg = remove_msg(pInfo, pClient);
+ if (pMsg) {
+ kfree(pMsg);
+ TRACE_M("enable_signals - msg "
+ "kfree %p", pMsg);
+ }
+ }
+ put_pid(pClient->pid);
+ kfree(pClient);
+ TRACE_M("enable_signals - kfree %p", pClient);
+ return 0;
+ }
+ }
+ return -EINVAL;
+ } else {
+ pClient = findClient(pInfo, pid);
+ if (pClient) {
+ /* update signal options */
+ pClient->sig_flags = arg;
+ } else {
+ /* add client to client list */
+ pClient = kmalloc(sizeof(struct r3964_client_info),
+ GFP_KERNEL);
+ TRACE_M("enable_signals - kmalloc %p", pClient);
+ if (pClient == NULL)
+ return -ENOMEM;
+
+ TRACE_PS("add client %d to client list", pid_nr(pid));
+ spin_lock_init(&pClient->lock);
+ pClient->sig_flags = arg;
+ pClient->pid = get_pid(pid);
+ pClient->next = pInfo->firstClient;
+ pClient->first_msg = NULL;
+ pClient->last_msg = NULL;
+ pClient->next_block_to_read = NULL;
+ pClient->msg_count = 0;
+ pInfo->firstClient = pClient;
+ }
+ }
+
+ return 0;
+}
+
+static int read_telegram(struct r3964_info *pInfo, struct pid *pid,
+ unsigned char __user * buf)
+{
+ struct r3964_client_info *pClient;
+ struct r3964_block_header *block;
+
+ if (!buf) {
+ return -EINVAL;
+ }
+
+ pClient = findClient(pInfo, pid);
+ if (pClient == NULL) {
+ return -EINVAL;
+ }
+
+ block = pClient->next_block_to_read;
+ if (!block) {
+ return 0;
+ } else {
+ if (copy_to_user(buf, block->data, block->length))
+ return -EFAULT;
+
+ remove_client_block(pInfo, pClient);
+ return block->length;
+ }
+
+ return -EINVAL;
+}
+
+static void add_msg(struct r3964_client_info *pClient, int msg_id, int arg,
+ int error_code, struct r3964_block_header *pBlock)
+{
+ struct r3964_message *pMsg;
+ unsigned long flags;
+
+ if (pClient->msg_count < R3964_MAX_MSG_COUNT - 1) {
+queue_the_message:
+
+ pMsg = kmalloc(sizeof(struct r3964_message),
+ error_code ? GFP_ATOMIC : GFP_KERNEL);
+ TRACE_M("add_msg - kmalloc %p", pMsg);
+ if (pMsg == NULL) {
+ return;
+ }
+
+ spin_lock_irqsave(&pClient->lock, flags);
+
+ pMsg->msg_id = msg_id;
+ pMsg->arg = arg;
+ pMsg->error_code = error_code;
+ pMsg->block = pBlock;
+ pMsg->next = NULL;
+
+ if (pClient->last_msg == NULL) {
+ pClient->first_msg = pClient->last_msg = pMsg;
+ } else {
+ pClient->last_msg->next = pMsg;
+ pClient->last_msg = pMsg;
+ }
+
+ pClient->msg_count++;
+
+ if (pBlock != NULL) {
+ pBlock->locks++;
+ }
+ spin_unlock_irqrestore(&pClient->lock, flags);
+ } else {
+ if ((pClient->last_msg->msg_id == R3964_MSG_ACK)
+ && (pClient->last_msg->error_code == R3964_OVERFLOW)) {
+ pClient->last_msg->arg++;
+ TRACE_PE("add_msg - inc prev OVERFLOW-msg");
+ } else {
+ msg_id = R3964_MSG_ACK;
+ arg = 0;
+ error_code = R3964_OVERFLOW;
+ pBlock = NULL;
+ TRACE_PE("add_msg - queue OVERFLOW-msg");
+ goto queue_the_message;
+ }
+ }
+ /* Send SIGIO signal to client process: */
+ if (pClient->sig_flags & R3964_USE_SIGIO) {
+ kill_pid(pClient->pid, SIGIO, 1);
+ }
+}
+
+static struct r3964_message *remove_msg(struct r3964_info *pInfo,
+ struct r3964_client_info *pClient)
+{
+ struct r3964_message *pMsg = NULL;
+ unsigned long flags;
+
+ if (pClient->first_msg) {
+ spin_lock_irqsave(&pClient->lock, flags);
+
+ pMsg = pClient->first_msg;
+ pClient->first_msg = pMsg->next;
+ if (pClient->first_msg == NULL) {
+ pClient->last_msg = NULL;
+ }
+
+ pClient->msg_count--;
+ if (pMsg->block) {
+ remove_client_block(pInfo, pClient);
+ pClient->next_block_to_read = pMsg->block;
+ }
+ spin_unlock_irqrestore(&pClient->lock, flags);
+ }
+ return pMsg;
+}
+
+static void remove_client_block(struct r3964_info *pInfo,
+ struct r3964_client_info *pClient)
+{
+ struct r3964_block_header *block;
+
+ TRACE_PS("remove_client_block PID %d", pid_nr(pClient->pid));
+
+ block = pClient->next_block_to_read;
+ if (block) {
+ block->locks--;
+ if (block->locks == 0) {
+ remove_from_rx_queue(pInfo, block);
+ }
+ }
+ pClient->next_block_to_read = NULL;
+}
+
+/*************************************************************
+ * Line discipline routines
+ *************************************************************/
+
+static int r3964_open(struct tty_struct *tty)
+{
+ struct r3964_info *pInfo;
+
+ TRACE_L("open");
+ TRACE_L("tty=%p, PID=%d, disc_data=%p",
+ tty, current->pid, tty->disc_data);
+
+ pInfo = kmalloc(sizeof(struct r3964_info), GFP_KERNEL);
+ TRACE_M("r3964_open - info kmalloc %p", pInfo);
+
+ if (!pInfo) {
+ printk(KERN_ERR "r3964: failed to alloc info structure\n");
+ return -ENOMEM;
+ }
+
+ pInfo->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
+ TRACE_M("r3964_open - rx_buf kmalloc %p", pInfo->rx_buf);
+
+ if (!pInfo->rx_buf) {
+ printk(KERN_ERR "r3964: failed to alloc receive buffer\n");
+ kfree(pInfo);
+ TRACE_M("r3964_open - info kfree %p", pInfo);
+ return -ENOMEM;
+ }
+
+ pInfo->tx_buf = kmalloc(TX_BUF_SIZE, GFP_KERNEL);
+ TRACE_M("r3964_open - tx_buf kmalloc %p", pInfo->tx_buf);
+
+ if (!pInfo->tx_buf) {
+ printk(KERN_ERR "r3964: failed to alloc transmit buffer\n");
+ kfree(pInfo->rx_buf);
+ TRACE_M("r3964_open - rx_buf kfree %p", pInfo->rx_buf);
+ kfree(pInfo);
+ TRACE_M("r3964_open - info kfree %p", pInfo);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&pInfo->lock);
+ pInfo->tty = tty;
+ init_waitqueue_head(&pInfo->read_wait);
+ pInfo->priority = R3964_MASTER;
+ pInfo->rx_first = pInfo->rx_last = NULL;
+ pInfo->tx_first = pInfo->tx_last = NULL;
+ pInfo->rx_position = 0;
+ pInfo->tx_position = 0;
+ pInfo->last_rx = 0;
+ pInfo->blocks_in_rx_queue = 0;
+ pInfo->firstClient = NULL;
+ pInfo->state = R3964_IDLE;
+ pInfo->flags = R3964_DEBUG;
+ pInfo->nRetry = 0;
+
+ tty->disc_data = pInfo;
+ tty->receive_room = 65536;
+
+ setup_timer(&pInfo->tmr, on_timeout, (unsigned long)pInfo);
+
+ return 0;
+}
+
+static void r3964_close(struct tty_struct *tty)
+{
+ struct r3964_info *pInfo = tty->disc_data;
+ struct r3964_client_info *pClient, *pNext;
+ struct r3964_message *pMsg;
+ struct r3964_block_header *pHeader, *pNextHeader;
+ unsigned long flags;
+
+ TRACE_L("close");
+
+ /*
+ * Make sure that our task queue isn't activated. If it
+ * is, take it out of the linked list.
+ */
+ del_timer_sync(&pInfo->tmr);
+
+ /* Remove client-structs and message queues: */
+ pClient = pInfo->firstClient;
+ while (pClient) {
+ pNext = pClient->next;
+ while (pClient->msg_count) {
+ pMsg = remove_msg(pInfo, pClient);
+ if (pMsg) {
+ kfree(pMsg);
+ TRACE_M("r3964_close - msg kfree %p", pMsg);
+ }
+ }
+ put_pid(pClient->pid);
+ kfree(pClient);
+ TRACE_M("r3964_close - client kfree %p", pClient);
+ pClient = pNext;
+ }
+ /* Remove jobs from tx_queue: */
+ spin_lock_irqsave(&pInfo->lock, flags);
+ pHeader = pInfo->tx_first;
+ pInfo->tx_first = pInfo->tx_last = NULL;
+ spin_unlock_irqrestore(&pInfo->lock, flags);
+
+ while (pHeader) {
+ pNextHeader = pHeader->next;
+ kfree(pHeader);
+ pHeader = pNextHeader;
+ }
+
+ /* Free buffers: */
+ wake_up_interruptible(&pInfo->read_wait);
+ kfree(pInfo->rx_buf);
+ TRACE_M("r3964_close - rx_buf kfree %p", pInfo->rx_buf);
+ kfree(pInfo->tx_buf);
+ TRACE_M("r3964_close - tx_buf kfree %p", pInfo->tx_buf);
+ kfree(pInfo);
+ TRACE_M("r3964_close - info kfree %p", pInfo);
+}
+
+static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user * buf, size_t nr)
+{
+ struct r3964_info *pInfo = tty->disc_data;
+ struct r3964_client_info *pClient;
+ struct r3964_message *pMsg;
+ struct r3964_client_message theMsg;
+ int ret;
+
+ TRACE_L("read()");
+
+ tty_lock();
+
+ pClient = findClient(pInfo, task_pid(current));
+ if (pClient) {
+ pMsg = remove_msg(pInfo, pClient);
+ if (pMsg == NULL) {
+ /* no messages available. */
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
+ /* block until there is a message: */
+ wait_event_interruptible_tty(pInfo->read_wait,
+ (pMsg = remove_msg(pInfo, pClient)));
+ }
+
+ /* If we still haven't got a message, we must have been signalled */
+
+ if (!pMsg) {
+ ret = -EINTR;
+ goto unlock;
+ }
+
+ /* deliver msg to client process: */
+ theMsg.msg_id = pMsg->msg_id;
+ theMsg.arg = pMsg->arg;
+ theMsg.error_code = pMsg->error_code;
+ ret = sizeof(struct r3964_client_message);
+
+ kfree(pMsg);
+ TRACE_M("r3964_read - msg kfree %p", pMsg);
+
+ if (copy_to_user(buf, &theMsg, ret)) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ TRACE_PS("read - return %d", ret);
+ goto unlock;
+ }
+ ret = -EPERM;
+unlock:
+ tty_unlock();
+ return ret;
+}
+
+static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *data, size_t count)
+{
+ struct r3964_info *pInfo = tty->disc_data;
+ struct r3964_block_header *pHeader;
+ struct r3964_client_info *pClient;
+ unsigned char *new_data;
+
+ TRACE_L("write request, %d characters", count);
+/*
+ * Verify the pointers
+ */
+
+ if (!pInfo)
+ return -EIO;
+
+/*
+ * Ensure that the caller does not wish to send too much.
+ */
+ if (count > R3964_MTU) {
+ if (pInfo->flags & R3964_DEBUG) {
+ TRACE_L(KERN_WARNING "r3964_write: truncating user "
+ "packet from %u to mtu %d", count, R3964_MTU);
+ }
+ count = R3964_MTU;
+ }
+/*
+ * Allocate a buffer for the data and copy it from the buffer with header prepended
+ */
+ new_data = kmalloc(count + sizeof(struct r3964_block_header),
+ GFP_KERNEL);
+ TRACE_M("r3964_write - kmalloc %p", new_data);
+ if (new_data == NULL) {
+ if (pInfo->flags & R3964_DEBUG) {
+ printk(KERN_ERR "r3964_write: no memory\n");
+ }
+ return -ENOSPC;
+ }
+
+ pHeader = (struct r3964_block_header *)new_data;
+ pHeader->data = new_data + sizeof(struct r3964_block_header);
+ pHeader->length = count;
+ pHeader->locks = 0;
+ pHeader->owner = NULL;
+
+ tty_lock();
+
+ pClient = findClient(pInfo, task_pid(current));
+ if (pClient) {
+ pHeader->owner = pClient;
+ }
+
+ memcpy(pHeader->data, data, count); /* We already verified this */
+
+ if (pInfo->flags & R3964_DEBUG) {
+ dump_block(pHeader->data, count);
+ }
+
+/*
+ * Add buffer to transmit-queue:
+ */
+ add_tx_queue(pInfo, pHeader);
+ trigger_transmit(pInfo);
+
+ tty_unlock();
+
+ return 0;
+}
+
+static int r3964_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct r3964_info *pInfo = tty->disc_data;
+ if (pInfo == NULL)
+ return -EINVAL;
+ switch (cmd) {
+ case R3964_ENABLE_SIGNALS:
+ return enable_signals(pInfo, task_pid(current), arg);
+ case R3964_SETPRIORITY:
+ if (arg < R3964_MASTER || arg > R3964_SLAVE)
+ return -EINVAL;
+ pInfo->priority = arg & 0xff;
+ return 0;
+ case R3964_USE_BCC:
+ if (arg)
+ pInfo->flags |= R3964_BCC;
+ else
+ pInfo->flags &= ~R3964_BCC;
+ return 0;
+ case R3964_READ_TELEGRAM:
+ return read_telegram(pInfo, task_pid(current),
+ (unsigned char __user *)arg);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+ TRACE_L("set_termios");
+}
+
+/* Called without the kernel lock held - fine */
+static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct r3964_info *pInfo = tty->disc_data;
+ struct r3964_client_info *pClient;
+ struct r3964_message *pMsg = NULL;
+ unsigned long flags;
+ int result = POLLOUT;
+
+ TRACE_L("POLL");
+
+ pClient = findClient(pInfo, task_pid(current));
+ if (pClient) {
+ poll_wait(file, &pInfo->read_wait, wait);
+ spin_lock_irqsave(&pInfo->lock, flags);
+ pMsg = pClient->first_msg;
+ spin_unlock_irqrestore(&pInfo->lock, flags);
+ if (pMsg)
+ result |= POLLIN | POLLRDNORM;
+ } else {
+ result = -EINVAL;
+ }
+ return result;
+}
+
+static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct r3964_info *pInfo = tty->disc_data;
+ const unsigned char *p;
+ char *f, flags = 0;
+ int i;
+
+ for (i = count, p = cp, f = fp; i; i--, p++) {
+ if (f)
+ flags = *f++;
+ if (flags == TTY_NORMAL) {
+ receive_char(pInfo, *p);
+ } else {
+ receive_error(pInfo, flags);
+ }
+
+ }
+}
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_R3964);
diff --git a/drivers/tty/n_tracerouter.c b/drivers/tty/n_tracerouter.c
new file mode 100644
index 00000000..1f063d3a
--- /dev/null
+++ b/drivers/tty/n_tracerouter.c
@@ -0,0 +1,243 @@
+/*
+ * n_tracerouter.c - Trace data router through tty space
+ *
+ * Copyright (C) Intel 2011
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This trace router uses the Linux line discipline framework to route
+ * trace data coming from a HW Modem to a PTI (Parallel Trace Module) port.
+ * The solution is not specific to a HW modem and this line disciple can
+ * be used to route any stream of data in kernel space.
+ * This is part of a solution for the P1149.7, compact JTAG, standard.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/tty.h>
+#include <linux/tty_ldisc.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <asm-generic/bug.h>
+#include "n_tracesink.h"
+
+/*
+ * Other ldisc drivers use 65536 which basically means,
+ * 'I can always accept 64k' and flow control is off.
+ * This number is deemed appropriate for this driver.
+ */
+#define RECEIVE_ROOM 65536
+#define DRIVERNAME "n_tracerouter"
+
+/*
+ * struct to hold private configuration data for this ldisc.
+ * opencalled is used to hold if this ldisc has been opened.
+ * kref_tty holds the tty reference the ldisc sits on top of.
+ */
+struct tracerouter_data {
+ u8 opencalled;
+ struct tty_struct *kref_tty;
+};
+static struct tracerouter_data *tr_data;
+
+/* lock for when tty reference is being used */
+static DEFINE_MUTEX(routelock);
+
+/**
+ * n_tracerouter_open() - Called when a tty is opened by a SW entity.
+ * @tty: terminal device to the ldisc.
+ *
+ * Return:
+ * 0 for success.
+ *
+ * Caveats: This should only be opened one time per SW entity.
+ */
+static int n_tracerouter_open(struct tty_struct *tty)
+{
+ int retval = -EEXIST;
+
+ mutex_lock(&routelock);
+ if (tr_data->opencalled == 0) {
+
+ tr_data->kref_tty = tty_kref_get(tty);
+ if (tr_data->kref_tty == NULL) {
+ retval = -EFAULT;
+ } else {
+ tr_data->opencalled = 1;
+ tty->disc_data = tr_data;
+ tty->receive_room = RECEIVE_ROOM;
+ tty_driver_flush_buffer(tty);
+ retval = 0;
+ }
+ }
+ mutex_unlock(&routelock);
+ return retval;
+}
+
+/**
+ * n_tracerouter_close() - close connection
+ * @tty: terminal device to the ldisc.
+ *
+ * Called when a software entity wants to close a connection.
+ */
+static void n_tracerouter_close(struct tty_struct *tty)
+{
+ struct tracerouter_data *tptr = tty->disc_data;
+
+ mutex_lock(&routelock);
+ WARN_ON(tptr->kref_tty != tr_data->kref_tty);
+ tty_driver_flush_buffer(tty);
+ tty_kref_put(tr_data->kref_tty);
+ tr_data->kref_tty = NULL;
+ tr_data->opencalled = 0;
+ tty->disc_data = NULL;
+ mutex_unlock(&routelock);
+}
+
+/**
+ * n_tracerouter_read() - read request from user space
+ * @tty: terminal device passed into the ldisc.
+ * @file: pointer to open file object.
+ * @buf: pointer to the data buffer that gets eventually returned.
+ * @nr: number of bytes of the data buffer that is returned.
+ *
+ * function that allows read() functionality in userspace. By default if this
+ * is not implemented it returns -EIO. This module is functioning like a
+ * router via n_tracerouter_receivebuf(), and there is no real requirement
+ * to implement this function. However, an error return value other than
+ * -EIO should be used just to show that there was an intent not to have
+ * this function implemented. Return value based on read() man pages.
+ *
+ * Return:
+ * -EINVAL
+ */
+static ssize_t n_tracerouter_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t nr) {
+ return -EINVAL;
+}
+
+/**
+ * n_tracerouter_write() - Function that allows write() in userspace.
+ * @tty: terminal device passed into the ldisc.
+ * @file: pointer to open file object.
+ * @buf: pointer to the data buffer that gets eventually returned.
+ * @nr: number of bytes of the data buffer that is returned.
+ *
+ * By default if this is not implemented, it returns -EIO.
+ * This should not be implemented, ever, because
+ * 1. this driver is functioning like a router via
+ * n_tracerouter_receivebuf()
+ * 2. No writes to HW will ever go through this line discpline driver.
+ * However, an error return value other than -EIO should be used
+ * just to show that there was an intent not to have this function
+ * implemented. Return value based on write() man pages.
+ *
+ * Return:
+ * -EINVAL
+ */
+static ssize_t n_tracerouter_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr) {
+ return -EINVAL;
+}
+
+/**
+ * n_tracerouter_receivebuf() - Routing function for driver.
+ * @tty: terminal device passed into the ldisc. It's assumed
+ * tty will never be NULL.
+ * @cp: buffer, block of characters to be eventually read by
+ * someone, somewhere (user read() call or some kernel function).
+ * @fp: flag buffer.
+ * @count: number of characters (aka, bytes) in cp.
+ *
+ * This function takes the input buffer, cp, and passes it to
+ * an external API function for processing.
+ */
+static void n_tracerouter_receivebuf(struct tty_struct *tty,
+ const unsigned char *cp,
+ char *fp, int count)
+{
+ mutex_lock(&routelock);
+ n_tracesink_datadrain((u8 *) cp, count);
+ mutex_unlock(&routelock);
+}
+
+/*
+ * Flush buffer is not impelemented as the ldisc has no internal buffering
+ * so the tty_driver_flush_buffer() is sufficient for this driver's needs.
+ */
+
+static struct tty_ldisc_ops tty_ptirouter_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = DRIVERNAME,
+ .open = n_tracerouter_open,
+ .close = n_tracerouter_close,
+ .read = n_tracerouter_read,
+ .write = n_tracerouter_write,
+ .receive_buf = n_tracerouter_receivebuf
+};
+
+/**
+ * n_tracerouter_init - module initialisation
+ *
+ * Registers this module as a line discipline driver.
+ *
+ * Return:
+ * 0 for success, any other value error.
+ */
+static int __init n_tracerouter_init(void)
+{
+ int retval;
+
+ tr_data = kzalloc(sizeof(struct tracerouter_data), GFP_KERNEL);
+ if (tr_data == NULL)
+ return -ENOMEM;
+
+
+ /* Note N_TRACEROUTER is defined in linux/tty.h */
+ retval = tty_register_ldisc(N_TRACEROUTER, &tty_ptirouter_ldisc);
+ if (retval < 0) {
+ pr_err("%s: Registration failed: %d\n", __func__, retval);
+ kfree(tr_data);
+ }
+ return retval;
+}
+
+/**
+ * n_tracerouter_exit - module unload
+ *
+ * Removes this module as a line discipline driver.
+ */
+static void __exit n_tracerouter_exit(void)
+{
+ int retval = tty_unregister_ldisc(N_TRACEROUTER);
+
+ if (retval < 0)
+ pr_err("%s: Unregistration failed: %d\n", __func__, retval);
+ else
+ kfree(tr_data);
+}
+
+module_init(n_tracerouter_init);
+module_exit(n_tracerouter_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jay Freyensee");
+MODULE_ALIAS_LDISC(N_TRACEROUTER);
+MODULE_DESCRIPTION("Trace router ldisc driver");
diff --git a/drivers/tty/n_tracesink.c b/drivers/tty/n_tracesink.c
new file mode 100644
index 00000000..ddce58b9
--- /dev/null
+++ b/drivers/tty/n_tracesink.c
@@ -0,0 +1,238 @@
+/*
+ * n_tracesink.c - Trace data router and sink path through tty space.
+ *
+ * Copyright (C) Intel 2011
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The trace sink uses the Linux line discipline framework to receive
+ * trace data coming from the PTI source line discipline driver
+ * to a user-desired tty port, like USB.
+ * This is to provide a way to extract modem trace data on
+ * devices that do not have a PTI HW module, or just need modem
+ * trace data to come out of a different HW output port.
+ * This is part of a solution for the P1149.7, compact JTAG, standard.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/tty.h>
+#include <linux/tty_ldisc.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <asm-generic/bug.h>
+#include "n_tracesink.h"
+
+/*
+ * Other ldisc drivers use 65536 which basically means,
+ * 'I can always accept 64k' and flow control is off.
+ * This number is deemed appropriate for this driver.
+ */
+#define RECEIVE_ROOM 65536
+#define DRIVERNAME "n_tracesink"
+
+/*
+ * there is a quirk with this ldisc is he can write data
+ * to a tty from anyone calling his kernel API, which
+ * meets customer requirements in the drivers/misc/pti.c
+ * project. So he needs to know when he can and cannot write when
+ * the API is called. In theory, the API can be called
+ * after an init() but before a successful open() which
+ * would crash the system if tty is not checked.
+ */
+static struct tty_struct *this_tty;
+static DEFINE_MUTEX(writelock);
+
+/**
+ * n_tracesink_open() - Called when a tty is opened by a SW entity.
+ * @tty: terminal device to the ldisc.
+ *
+ * Return:
+ * 0 for success,
+ * -EFAULT = couldn't get a tty kref n_tracesink will sit
+ * on top of
+ * -EEXIST = open() called successfully once and it cannot
+ * be called again.
+ *
+ * Caveats: open() should only be successful the first time a
+ * SW entity calls it.
+ */
+static int n_tracesink_open(struct tty_struct *tty)
+{
+ int retval = -EEXIST;
+
+ mutex_lock(&writelock);
+ if (this_tty == NULL) {
+ this_tty = tty_kref_get(tty);
+ if (this_tty == NULL) {
+ retval = -EFAULT;
+ } else {
+ tty->disc_data = this_tty;
+ tty_driver_flush_buffer(tty);
+ retval = 0;
+ }
+ }
+ mutex_unlock(&writelock);
+
+ return retval;
+}
+
+/**
+ * n_tracesink_close() - close connection
+ * @tty: terminal device to the ldisc.
+ *
+ * Called when a software entity wants to close a connection.
+ */
+static void n_tracesink_close(struct tty_struct *tty)
+{
+ mutex_lock(&writelock);
+ tty_driver_flush_buffer(tty);
+ tty_kref_put(this_tty);
+ this_tty = NULL;
+ tty->disc_data = NULL;
+ mutex_unlock(&writelock);
+}
+
+/**
+ * n_tracesink_read() - read request from user space
+ * @tty: terminal device passed into the ldisc.
+ * @file: pointer to open file object.
+ * @buf: pointer to the data buffer that gets eventually returned.
+ * @nr: number of bytes of the data buffer that is returned.
+ *
+ * function that allows read() functionality in userspace. By default if this
+ * is not implemented it returns -EIO. This module is functioning like a
+ * router via n_tracesink_receivebuf(), and there is no real requirement
+ * to implement this function. However, an error return value other than
+ * -EIO should be used just to show that there was an intent not to have
+ * this function implemented. Return value based on read() man pages.
+ *
+ * Return:
+ * -EINVAL
+ */
+static ssize_t n_tracesink_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t nr) {
+ return -EINVAL;
+}
+
+/**
+ * n_tracesink_write() - Function that allows write() in userspace.
+ * @tty: terminal device passed into the ldisc.
+ * @file: pointer to open file object.
+ * @buf: pointer to the data buffer that gets eventually returned.
+ * @nr: number of bytes of the data buffer that is returned.
+ *
+ * By default if this is not implemented, it returns -EIO.
+ * This should not be implemented, ever, because
+ * 1. this driver is functioning like a router via
+ * n_tracesink_receivebuf()
+ * 2. No writes to HW will ever go through this line discpline driver.
+ * However, an error return value other than -EIO should be used
+ * just to show that there was an intent not to have this function
+ * implemented. Return value based on write() man pages.
+ *
+ * Return:
+ * -EINVAL
+ */
+static ssize_t n_tracesink_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr) {
+ return -EINVAL;
+}
+
+/**
+ * n_tracesink_datadrain() - Kernel API function used to route
+ * trace debugging data to user-defined
+ * port like USB.
+ *
+ * @buf: Trace debuging data buffer to write to tty target
+ * port. Null value will return with no write occurring.
+ * @count: Size of buf. Value of 0 or a negative number will
+ * return with no write occuring.
+ *
+ * Caveat: If this line discipline does not set the tty it sits
+ * on top of via an open() call, this API function will not
+ * call the tty's write() call because it will have no pointer
+ * to call the write().
+ */
+void n_tracesink_datadrain(u8 *buf, int count)
+{
+ mutex_lock(&writelock);
+
+ if ((buf != NULL) && (count > 0) && (this_tty != NULL))
+ this_tty->ops->write(this_tty, buf, count);
+
+ mutex_unlock(&writelock);
+}
+EXPORT_SYMBOL_GPL(n_tracesink_datadrain);
+
+/*
+ * Flush buffer is not impelemented as the ldisc has no internal buffering
+ * so the tty_driver_flush_buffer() is sufficient for this driver's needs.
+ */
+
+/*
+ * tty_ldisc function operations for this driver.
+ */
+static struct tty_ldisc_ops tty_n_tracesink = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = DRIVERNAME,
+ .open = n_tracesink_open,
+ .close = n_tracesink_close,
+ .read = n_tracesink_read,
+ .write = n_tracesink_write
+};
+
+/**
+ * n_tracesink_init- module initialisation
+ *
+ * Registers this module as a line discipline driver.
+ *
+ * Return:
+ * 0 for success, any other value error.
+ */
+static int __init n_tracesink_init(void)
+{
+ /* Note N_TRACESINK is defined in linux/tty.h */
+ int retval = tty_register_ldisc(N_TRACESINK, &tty_n_tracesink);
+
+ if (retval < 0)
+ pr_err("%s: Registration failed: %d\n", __func__, retval);
+
+ return retval;
+}
+
+/**
+ * n_tracesink_exit - module unload
+ *
+ * Removes this module as a line discipline driver.
+ */
+static void __exit n_tracesink_exit(void)
+{
+ int retval = tty_unregister_ldisc(N_TRACESINK);
+
+ if (retval < 0)
+ pr_err("%s: Unregistration failed: %d\n", __func__, retval);
+}
+
+module_init(n_tracesink_init);
+module_exit(n_tracesink_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jay Freyensee");
+MODULE_ALIAS_LDISC(N_TRACESINK);
+MODULE_DESCRIPTION("Trace sink ldisc driver");
diff --git a/drivers/tty/n_tracesink.h b/drivers/tty/n_tracesink.h
new file mode 100644
index 00000000..a68bb44f
--- /dev/null
+++ b/drivers/tty/n_tracesink.h
@@ -0,0 +1,36 @@
+/*
+ * n_tracesink.h - Kernel driver API to route trace data in kernel space.
+ *
+ * Copyright (C) Intel 2011
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The PTI (Parallel Trace Interface) driver directs trace data routed from
+ * various parts in the system out through the Intel Penwell PTI port and
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard.
+ *
+ * This header file is used by n_tracerouter to be able to send the
+ * data of it's tty port to the tty port this module sits. This
+ * mechanism can also be used independent of the PTI module.
+ *
+ */
+
+#ifndef N_TRACESINK_H_
+#define N_TRACESINK_H_
+
+void n_tracesink_datadrain(u8 *buf, int count);
+
+#endif
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
new file mode 100644
index 00000000..c3954fbf
--- /dev/null
+++ b/drivers/tty/n_tty.c
@@ -0,0 +1,2128 @@
+/*
+ * n_tty.c --- implements the N_TTY line discipline.
+ *
+ * This code used to be in tty_io.c, but things are getting hairy
+ * enough that it made sense to split things off. (The N_TTY
+ * processing has changed so much that it's hardly recognizable,
+ * anyway...)
+ *
+ * Note that the open routine for N_TTY is guaranteed never to return
+ * an error. This is because Linux will fall back to setting a line
+ * to N_TTY if it can not switch to any other line discipline.
+ *
+ * Written by Theodore Ts'o, Copyright 1994.
+ *
+ * This file also contains code originally written by Linus Torvalds,
+ * Copyright 1991, 1992, 1993, and by Julian Cowley, Copyright 1994.
+ *
+ * This file may be redistributed under the terms of the GNU General Public
+ * License.
+ *
+ * Reduced memory usage for older ARM systems - Russell King.
+ *
+ * 2000/01/20 Fixed SMP locking on put_tty_queue using bits of
+ * the patch by Andrew J. Kroll <ag784@freenet.buffalo.edu>
+ * who actually finally proved there really was a race.
+ *
+ * 2002/03/18 Implemented n_tty_wakeup to send SIGIO POLL_OUTs to
+ * waiting writing processes-Sapan Bhatia <sapan@corewars.org>.
+ * Also fixed a bug in BLOCKING mode where n_tty_write returns
+ * EAGAIN
+ */
+
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/bitops.h>
+#include <linux/audit.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+
+/* number of characters left in xmit buffer before select has we have room */
+#define WAKEUP_CHARS 256
+
+/*
+ * This defines the low- and high-watermarks for throttling and
+ * unthrottling the TTY driver. These watermarks are used for
+ * controlling the space in the read buffer.
+ */
+#define TTY_THRESHOLD_THROTTLE 128 /* now based on remaining room */
+#define TTY_THRESHOLD_UNTHROTTLE 128
+
+/*
+ * Special byte codes used in the echo buffer to represent operations
+ * or special handling of characters. Bytes in the echo buffer that
+ * are not part of such special blocks are treated as normal character
+ * codes.
+ */
+#define ECHO_OP_START 0xff
+#define ECHO_OP_MOVE_BACK_COL 0x80
+#define ECHO_OP_SET_CANON_COL 0x81
+#define ECHO_OP_ERASE_TAB 0x82
+
+static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
+ unsigned char __user *ptr)
+{
+ tty_audit_add_data(tty, &x, 1);
+ return put_user(x, ptr);
+}
+
+/**
+ * n_tty_set__room - receive space
+ * @tty: terminal
+ *
+ * Called by the driver to find out how much data it is
+ * permitted to feed to the line discipline without any being lost
+ * and thus to manage flow control. Not serialized. Answers for the
+ * "instant".
+ */
+
+static void n_tty_set_room(struct tty_struct *tty)
+{
+ /* tty->read_cnt is not read locked ? */
+ int left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
+ int old_left;
+
+ /*
+ * If we are doing input canonicalization, and there are no
+ * pending newlines, let characters through without limit, so
+ * that erase characters will be handled. Other excess
+ * characters will be beeped.
+ */
+ if (left <= 0)
+ left = tty->icanon && !tty->canon_data;
+ old_left = tty->receive_room;
+ tty->receive_room = left;
+
+ /* Did this open up the receive buffer? We may need to flip */
+ if (left && !old_left)
+ schedule_work(&tty->buf.work);
+}
+
+static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty)
+{
+ if (tty->read_cnt < N_TTY_BUF_SIZE) {
+ tty->read_buf[tty->read_head] = c;
+ tty->read_head = (tty->read_head + 1) & (N_TTY_BUF_SIZE-1);
+ tty->read_cnt++;
+ }
+}
+
+/**
+ * put_tty_queue - add character to tty
+ * @c: character
+ * @tty: tty device
+ *
+ * Add a character to the tty read_buf queue. This is done under the
+ * read_lock to serialize character addition and also to protect us
+ * against parallel reads or flushes
+ */
+
+static void put_tty_queue(unsigned char c, struct tty_struct *tty)
+{
+ unsigned long flags;
+ /*
+ * The problem of stomping on the buffers ends here.
+ * Why didn't anyone see this one coming? --AJK
+ */
+ spin_lock_irqsave(&tty->read_lock, flags);
+ put_tty_queue_nolock(c, tty);
+ spin_unlock_irqrestore(&tty->read_lock, flags);
+}
+
+/**
+ * check_unthrottle - allow new receive data
+ * @tty; tty device
+ *
+ * Check whether to call the driver unthrottle functions
+ *
+ * Can sleep, may be called under the atomic_read_lock mutex but
+ * this is not guaranteed.
+ */
+static void check_unthrottle(struct tty_struct *tty)
+{
+ if (tty->count)
+ tty_unthrottle(tty);
+}
+
+/**
+ * reset_buffer_flags - reset buffer state
+ * @tty: terminal to reset
+ *
+ * Reset the read buffer counters, clear the flags,
+ * and make sure the driver is unthrottled. Called
+ * from n_tty_open() and n_tty_flush_buffer().
+ *
+ * Locking: tty_read_lock for read fields.
+ */
+
+static void reset_buffer_flags(struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tty->read_lock, flags);
+ tty->read_head = tty->read_tail = tty->read_cnt = 0;
+ spin_unlock_irqrestore(&tty->read_lock, flags);
+
+ mutex_lock(&tty->echo_lock);
+ tty->echo_pos = tty->echo_cnt = tty->echo_overrun = 0;
+ mutex_unlock(&tty->echo_lock);
+
+ tty->canon_head = tty->canon_data = tty->erasing = 0;
+ memset(&tty->read_flags, 0, sizeof tty->read_flags);
+ n_tty_set_room(tty);
+ check_unthrottle(tty);
+}
+
+/**
+ * n_tty_flush_buffer - clean input queue
+ * @tty: terminal device
+ *
+ * Flush the input buffer. Called when the line discipline is
+ * being closed, when the tty layer wants the buffer flushed (eg
+ * at hangup) or when the N_TTY line discipline internally has to
+ * clean the pending queue (for example some signals).
+ *
+ * Locking: ctrl_lock, read_lock.
+ */
+
+static void n_tty_flush_buffer(struct tty_struct *tty)
+{
+ unsigned long flags;
+ /* clear everything and unthrottle the driver */
+ reset_buffer_flags(tty);
+
+ if (!tty->link)
+ return;
+
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ if (tty->link->packet) {
+ tty->ctrl_status |= TIOCPKT_FLUSHREAD;
+ wake_up_interruptible(&tty->link->read_wait);
+ }
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+}
+
+/**
+ * n_tty_chars_in_buffer - report available bytes
+ * @tty: tty device
+ *
+ * Report the number of characters buffered to be delivered to user
+ * at this instant in time.
+ *
+ * Locking: read_lock
+ */
+
+static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ unsigned long flags;
+ ssize_t n = 0;
+
+ spin_lock_irqsave(&tty->read_lock, flags);
+ if (!tty->icanon) {
+ n = tty->read_cnt;
+ } else if (tty->canon_data) {
+ n = (tty->canon_head > tty->read_tail) ?
+ tty->canon_head - tty->read_tail :
+ tty->canon_head + (N_TTY_BUF_SIZE - tty->read_tail);
+ }
+ spin_unlock_irqrestore(&tty->read_lock, flags);
+ return n;
+}
+
+/**
+ * is_utf8_continuation - utf8 multibyte check
+ * @c: byte to check
+ *
+ * Returns true if the utf8 character 'c' is a multibyte continuation
+ * character. We use this to correctly compute the on screen size
+ * of the character when printing
+ */
+
+static inline int is_utf8_continuation(unsigned char c)
+{
+ return (c & 0xc0) == 0x80;
+}
+
+/**
+ * is_continuation - multibyte check
+ * @c: byte to check
+ *
+ * Returns true if the utf8 character 'c' is a multibyte continuation
+ * character and the terminal is in unicode mode.
+ */
+
+static inline int is_continuation(unsigned char c, struct tty_struct *tty)
+{
+ return I_IUTF8(tty) && is_utf8_continuation(c);
+}
+
+/**
+ * do_output_char - output one character
+ * @c: character (or partial unicode symbol)
+ * @tty: terminal device
+ * @space: space available in tty driver write buffer
+ *
+ * This is a helper function that handles one output character
+ * (including special characters like TAB, CR, LF, etc.),
+ * doing OPOST processing and putting the results in the
+ * tty driver's write buffer.
+ *
+ * Note that Linux currently ignores TABDLY, CRDLY, VTDLY, FFDLY
+ * and NLDLY. They simply aren't relevant in the world today.
+ * If you ever need them, add them here.
+ *
+ * Returns the number of bytes of buffer space used or -1 if
+ * no space left.
+ *
+ * Locking: should be called under the output_lock to protect
+ * the column state and space left in the buffer
+ */
+
+static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
+{
+ int spaces;
+
+ if (!space)
+ return -1;
+
+ switch (c) {
+ case '\n':
+ if (O_ONLRET(tty))
+ tty->column = 0;
+ if (O_ONLCR(tty)) {
+ if (space < 2)
+ return -1;
+ tty->canon_column = tty->column = 0;
+ tty->ops->write(tty, "\r\n", 2);
+ return 2;
+ }
+ tty->canon_column = tty->column;
+ break;
+ case '\r':
+ if (O_ONOCR(tty) && tty->column == 0)
+ return 0;
+ if (O_OCRNL(tty)) {
+ c = '\n';
+ if (O_ONLRET(tty))
+ tty->canon_column = tty->column = 0;
+ break;
+ }
+ tty->canon_column = tty->column = 0;
+ break;
+ case '\t':
+ spaces = 8 - (tty->column & 7);
+ if (O_TABDLY(tty) == XTABS) {
+ if (space < spaces)
+ return -1;
+ tty->column += spaces;
+ tty->ops->write(tty, " ", spaces);
+ return spaces;
+ }
+ tty->column += spaces;
+ break;
+ case '\b':
+ if (tty->column > 0)
+ tty->column--;
+ break;
+ default:
+ if (!iscntrl(c)) {
+ if (O_OLCUC(tty))
+ c = toupper(c);
+ if (!is_continuation(c, tty))
+ tty->column++;
+ }
+ break;
+ }
+
+ tty_put_char(tty, c);
+ return 1;
+}
+
+/**
+ * process_output - output post processor
+ * @c: character (or partial unicode symbol)
+ * @tty: terminal device
+ *
+ * Output one character with OPOST processing.
+ * Returns -1 when the output device is full and the character
+ * must be retried.
+ *
+ * Locking: output_lock to protect column state and space left
+ * (also, this is called from n_tty_write under the
+ * tty layer write lock)
+ */
+
+static int process_output(unsigned char c, struct tty_struct *tty)
+{
+ int space, retval;
+
+ mutex_lock(&tty->output_lock);
+
+ space = tty_write_room(tty);
+ retval = do_output_char(c, tty, space);
+
+ mutex_unlock(&tty->output_lock);
+ if (retval < 0)
+ return -1;
+ else
+ return 0;
+}
+
+/**
+ * process_output_block - block post processor
+ * @tty: terminal device
+ * @buf: character buffer
+ * @nr: number of bytes to output
+ *
+ * Output a block of characters with OPOST processing.
+ * Returns the number of characters output.
+ *
+ * This path is used to speed up block console writes, among other
+ * things when processing blocks of output data. It handles only
+ * the simple cases normally found and helps to generate blocks of
+ * symbols for the console driver and thus improve performance.
+ *
+ * Locking: output_lock to protect column state and space left
+ * (also, this is called from n_tty_write under the
+ * tty layer write lock)
+ */
+
+static ssize_t process_output_block(struct tty_struct *tty,
+ const unsigned char *buf, unsigned int nr)
+{
+ int space;
+ int i;
+ const unsigned char *cp;
+
+ mutex_lock(&tty->output_lock);
+
+ space = tty_write_room(tty);
+ if (!space) {
+ mutex_unlock(&tty->output_lock);
+ return 0;
+ }
+ if (nr > space)
+ nr = space;
+
+ for (i = 0, cp = buf; i < nr; i++, cp++) {
+ unsigned char c = *cp;
+
+ switch (c) {
+ case '\n':
+ if (O_ONLRET(tty))
+ tty->column = 0;
+ if (O_ONLCR(tty))
+ goto break_out;
+ tty->canon_column = tty->column;
+ break;
+ case '\r':
+ if (O_ONOCR(tty) && tty->column == 0)
+ goto break_out;
+ if (O_OCRNL(tty))
+ goto break_out;
+ tty->canon_column = tty->column = 0;
+ break;
+ case '\t':
+ goto break_out;
+ case '\b':
+ if (tty->column > 0)
+ tty->column--;
+ break;
+ default:
+ if (!iscntrl(c)) {
+ if (O_OLCUC(tty))
+ goto break_out;
+ if (!is_continuation(c, tty))
+ tty->column++;
+ }
+ break;
+ }
+ }
+break_out:
+ i = tty->ops->write(tty, buf, i);
+
+ mutex_unlock(&tty->output_lock);
+ return i;
+}
+
+/**
+ * process_echoes - write pending echo characters
+ * @tty: terminal device
+ *
+ * Write previously buffered echo (and other ldisc-generated)
+ * characters to the tty.
+ *
+ * Characters generated by the ldisc (including echoes) need to
+ * be buffered because the driver's write buffer can fill during
+ * heavy program output. Echoing straight to the driver will
+ * often fail under these conditions, causing lost characters and
+ * resulting mismatches of ldisc state information.
+ *
+ * Since the ldisc state must represent the characters actually sent
+ * to the driver at the time of the write, operations like certain
+ * changes in column state are also saved in the buffer and executed
+ * here.
+ *
+ * A circular fifo buffer is used so that the most recent characters
+ * are prioritized. Also, when control characters are echoed with a
+ * prefixed "^", the pair is treated atomically and thus not separated.
+ *
+ * Locking: output_lock to protect column state and space left,
+ * echo_lock to protect the echo buffer
+ */
+
+static void process_echoes(struct tty_struct *tty)
+{
+ int space, nr;
+ unsigned char c;
+ unsigned char *cp, *buf_end;
+
+ if (!tty->echo_cnt)
+ return;
+
+ mutex_lock(&tty->output_lock);
+ mutex_lock(&tty->echo_lock);
+
+ space = tty_write_room(tty);
+
+ buf_end = tty->echo_buf + N_TTY_BUF_SIZE;
+ cp = tty->echo_buf + tty->echo_pos;
+ nr = tty->echo_cnt;
+ while (nr > 0) {
+ c = *cp;
+ if (c == ECHO_OP_START) {
+ unsigned char op;
+ unsigned char *opp;
+ int no_space_left = 0;
+
+ /*
+ * If the buffer byte is the start of a multi-byte
+ * operation, get the next byte, which is either the
+ * op code or a control character value.
+ */
+ opp = cp + 1;
+ if (opp == buf_end)
+ opp -= N_TTY_BUF_SIZE;
+ op = *opp;
+
+ switch (op) {
+ unsigned int num_chars, num_bs;
+
+ case ECHO_OP_ERASE_TAB:
+ if (++opp == buf_end)
+ opp -= N_TTY_BUF_SIZE;
+ num_chars = *opp;
+
+ /*
+ * Determine how many columns to go back
+ * in order to erase the tab.
+ * This depends on the number of columns
+ * used by other characters within the tab
+ * area. If this (modulo 8) count is from
+ * the start of input rather than from a
+ * previous tab, we offset by canon column.
+ * Otherwise, tab spacing is normal.
+ */
+ if (!(num_chars & 0x80))
+ num_chars += tty->canon_column;
+ num_bs = 8 - (num_chars & 7);
+
+ if (num_bs > space) {
+ no_space_left = 1;
+ break;
+ }
+ space -= num_bs;
+ while (num_bs--) {
+ tty_put_char(tty, '\b');
+ if (tty->column > 0)
+ tty->column--;
+ }
+ cp += 3;
+ nr -= 3;
+ break;
+
+ case ECHO_OP_SET_CANON_COL:
+ tty->canon_column = tty->column;
+ cp += 2;
+ nr -= 2;
+ break;
+
+ case ECHO_OP_MOVE_BACK_COL:
+ if (tty->column > 0)
+ tty->column--;
+ cp += 2;
+ nr -= 2;
+ break;
+
+ case ECHO_OP_START:
+ /* This is an escaped echo op start code */
+ if (!space) {
+ no_space_left = 1;
+ break;
+ }
+ tty_put_char(tty, ECHO_OP_START);
+ tty->column++;
+ space--;
+ cp += 2;
+ nr -= 2;
+ break;
+
+ default:
+ /*
+ * If the op is not a special byte code,
+ * it is a ctrl char tagged to be echoed
+ * as "^X" (where X is the letter
+ * representing the control char).
+ * Note that we must ensure there is
+ * enough space for the whole ctrl pair.
+ *
+ */
+ if (space < 2) {
+ no_space_left = 1;
+ break;
+ }
+ tty_put_char(tty, '^');
+ tty_put_char(tty, op ^ 0100);
+ tty->column += 2;
+ space -= 2;
+ cp += 2;
+ nr -= 2;
+ }
+
+ if (no_space_left)
+ break;
+ } else {
+ if (O_OPOST(tty) &&
+ !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) {
+ int retval = do_output_char(c, tty, space);
+ if (retval < 0)
+ break;
+ space -= retval;
+ } else {
+ if (!space)
+ break;
+ tty_put_char(tty, c);
+ space -= 1;
+ }
+ cp += 1;
+ nr -= 1;
+ }
+
+ /* When end of circular buffer reached, wrap around */
+ if (cp >= buf_end)
+ cp -= N_TTY_BUF_SIZE;
+ }
+
+ if (nr == 0) {
+ tty->echo_pos = 0;
+ tty->echo_cnt = 0;
+ tty->echo_overrun = 0;
+ } else {
+ int num_processed = tty->echo_cnt - nr;
+ tty->echo_pos += num_processed;
+ tty->echo_pos &= N_TTY_BUF_SIZE - 1;
+ tty->echo_cnt = nr;
+ if (num_processed > 0)
+ tty->echo_overrun = 0;
+ }
+
+ mutex_unlock(&tty->echo_lock);
+ mutex_unlock(&tty->output_lock);
+
+ if (tty->ops->flush_chars)
+ tty->ops->flush_chars(tty);
+}
+
+/**
+ * add_echo_byte - add a byte to the echo buffer
+ * @c: unicode byte to echo
+ * @tty: terminal device
+ *
+ * Add a character or operation byte to the echo buffer.
+ *
+ * Should be called under the echo lock to protect the echo buffer.
+ */
+
+static void add_echo_byte(unsigned char c, struct tty_struct *tty)
+{
+ int new_byte_pos;
+
+ if (tty->echo_cnt == N_TTY_BUF_SIZE) {
+ /* Circular buffer is already at capacity */
+ new_byte_pos = tty->echo_pos;
+
+ /*
+ * Since the buffer start position needs to be advanced,
+ * be sure to step by a whole operation byte group.
+ */
+ if (tty->echo_buf[tty->echo_pos] == ECHO_OP_START) {
+ if (tty->echo_buf[(tty->echo_pos + 1) &
+ (N_TTY_BUF_SIZE - 1)] ==
+ ECHO_OP_ERASE_TAB) {
+ tty->echo_pos += 3;
+ tty->echo_cnt -= 2;
+ } else {
+ tty->echo_pos += 2;
+ tty->echo_cnt -= 1;
+ }
+ } else {
+ tty->echo_pos++;
+ }
+ tty->echo_pos &= N_TTY_BUF_SIZE - 1;
+
+ tty->echo_overrun = 1;
+ } else {
+ new_byte_pos = tty->echo_pos + tty->echo_cnt;
+ new_byte_pos &= N_TTY_BUF_SIZE - 1;
+ tty->echo_cnt++;
+ }
+
+ tty->echo_buf[new_byte_pos] = c;
+}
+
+/**
+ * echo_move_back_col - add operation to move back a column
+ * @tty: terminal device
+ *
+ * Add an operation to the echo buffer to move back one column.
+ *
+ * Locking: echo_lock to protect the echo buffer
+ */
+
+static void echo_move_back_col(struct tty_struct *tty)
+{
+ mutex_lock(&tty->echo_lock);
+
+ add_echo_byte(ECHO_OP_START, tty);
+ add_echo_byte(ECHO_OP_MOVE_BACK_COL, tty);
+
+ mutex_unlock(&tty->echo_lock);
+}
+
+/**
+ * echo_set_canon_col - add operation to set the canon column
+ * @tty: terminal device
+ *
+ * Add an operation to the echo buffer to set the canon column
+ * to the current column.
+ *
+ * Locking: echo_lock to protect the echo buffer
+ */
+
+static void echo_set_canon_col(struct tty_struct *tty)
+{
+ mutex_lock(&tty->echo_lock);
+
+ add_echo_byte(ECHO_OP_START, tty);
+ add_echo_byte(ECHO_OP_SET_CANON_COL, tty);
+
+ mutex_unlock(&tty->echo_lock);
+}
+
+/**
+ * echo_erase_tab - add operation to erase a tab
+ * @num_chars: number of character columns already used
+ * @after_tab: true if num_chars starts after a previous tab
+ * @tty: terminal device
+ *
+ * Add an operation to the echo buffer to erase a tab.
+ *
+ * Called by the eraser function, which knows how many character
+ * columns have been used since either a previous tab or the start
+ * of input. This information will be used later, along with
+ * canon column (if applicable), to go back the correct number
+ * of columns.
+ *
+ * Locking: echo_lock to protect the echo buffer
+ */
+
+static void echo_erase_tab(unsigned int num_chars, int after_tab,
+ struct tty_struct *tty)
+{
+ mutex_lock(&tty->echo_lock);
+
+ add_echo_byte(ECHO_OP_START, tty);
+ add_echo_byte(ECHO_OP_ERASE_TAB, tty);
+
+ /* We only need to know this modulo 8 (tab spacing) */
+ num_chars &= 7;
+
+ /* Set the high bit as a flag if num_chars is after a previous tab */
+ if (after_tab)
+ num_chars |= 0x80;
+
+ add_echo_byte(num_chars, tty);
+
+ mutex_unlock(&tty->echo_lock);
+}
+
+/**
+ * echo_char_raw - echo a character raw
+ * @c: unicode byte to echo
+ * @tty: terminal device
+ *
+ * Echo user input back onto the screen. This must be called only when
+ * L_ECHO(tty) is true. Called from the driver receive_buf path.
+ *
+ * This variant does not treat control characters specially.
+ *
+ * Locking: echo_lock to protect the echo buffer
+ */
+
+static void echo_char_raw(unsigned char c, struct tty_struct *tty)
+{
+ mutex_lock(&tty->echo_lock);
+
+ if (c == ECHO_OP_START) {
+ add_echo_byte(ECHO_OP_START, tty);
+ add_echo_byte(ECHO_OP_START, tty);
+ } else {
+ add_echo_byte(c, tty);
+ }
+
+ mutex_unlock(&tty->echo_lock);
+}
+
+/**
+ * echo_char - echo a character
+ * @c: unicode byte to echo
+ * @tty: terminal device
+ *
+ * Echo user input back onto the screen. This must be called only when
+ * L_ECHO(tty) is true. Called from the driver receive_buf path.
+ *
+ * This variant tags control characters to be echoed as "^X"
+ * (where X is the letter representing the control char).
+ *
+ * Locking: echo_lock to protect the echo buffer
+ */
+
+static void echo_char(unsigned char c, struct tty_struct *tty)
+{
+ mutex_lock(&tty->echo_lock);
+
+ if (c == ECHO_OP_START) {
+ add_echo_byte(ECHO_OP_START, tty);
+ add_echo_byte(ECHO_OP_START, tty);
+ } else {
+ if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t')
+ add_echo_byte(ECHO_OP_START, tty);
+ add_echo_byte(c, tty);
+ }
+
+ mutex_unlock(&tty->echo_lock);
+}
+
+/**
+ * finish_erasing - complete erase
+ * @tty: tty doing the erase
+ */
+
+static inline void finish_erasing(struct tty_struct *tty)
+{
+ if (tty->erasing) {
+ echo_char_raw('/', tty);
+ tty->erasing = 0;
+ }
+}
+
+/**
+ * eraser - handle erase function
+ * @c: character input
+ * @tty: terminal device
+ *
+ * Perform erase and necessary output when an erase character is
+ * present in the stream from the driver layer. Handles the complexities
+ * of UTF-8 multibyte symbols.
+ *
+ * Locking: read_lock for tty buffers
+ */
+
+static void eraser(unsigned char c, struct tty_struct *tty)
+{
+ enum { ERASE, WERASE, KILL } kill_type;
+ int head, seen_alnums, cnt;
+ unsigned long flags;
+
+ /* FIXME: locking needed ? */
+ if (tty->read_head == tty->canon_head) {
+ /* process_output('\a', tty); */ /* what do you think? */
+ return;
+ }
+ if (c == ERASE_CHAR(tty))
+ kill_type = ERASE;
+ else if (c == WERASE_CHAR(tty))
+ kill_type = WERASE;
+ else {
+ if (!L_ECHO(tty)) {
+ spin_lock_irqsave(&tty->read_lock, flags);
+ tty->read_cnt -= ((tty->read_head - tty->canon_head) &
+ (N_TTY_BUF_SIZE - 1));
+ tty->read_head = tty->canon_head;
+ spin_unlock_irqrestore(&tty->read_lock, flags);
+ return;
+ }
+ if (!L_ECHOK(tty) || !L_ECHOKE(tty) || !L_ECHOE(tty)) {
+ spin_lock_irqsave(&tty->read_lock, flags);
+ tty->read_cnt -= ((tty->read_head - tty->canon_head) &
+ (N_TTY_BUF_SIZE - 1));
+ tty->read_head = tty->canon_head;
+ spin_unlock_irqrestore(&tty->read_lock, flags);
+ finish_erasing(tty);
+ echo_char(KILL_CHAR(tty), tty);
+ /* Add a newline if ECHOK is on and ECHOKE is off. */
+ if (L_ECHOK(tty))
+ echo_char_raw('\n', tty);
+ return;
+ }
+ kill_type = KILL;
+ }
+
+ seen_alnums = 0;
+ /* FIXME: Locking ?? */
+ while (tty->read_head != tty->canon_head) {
+ head = tty->read_head;
+
+ /* erase a single possibly multibyte character */
+ do {
+ head = (head - 1) & (N_TTY_BUF_SIZE-1);
+ c = tty->read_buf[head];
+ } while (is_continuation(c, tty) && head != tty->canon_head);
+
+ /* do not partially erase */
+ if (is_continuation(c, tty))
+ break;
+
+ if (kill_type == WERASE) {
+ /* Equivalent to BSD's ALTWERASE. */
+ if (isalnum(c) || c == '_')
+ seen_alnums++;
+ else if (seen_alnums)
+ break;
+ }
+ cnt = (tty->read_head - head) & (N_TTY_BUF_SIZE-1);
+ spin_lock_irqsave(&tty->read_lock, flags);
+ tty->read_head = head;
+ tty->read_cnt -= cnt;
+ spin_unlock_irqrestore(&tty->read_lock, flags);
+ if (L_ECHO(tty)) {
+ if (L_ECHOPRT(tty)) {
+ if (!tty->erasing) {
+ echo_char_raw('\\', tty);
+ tty->erasing = 1;
+ }
+ /* if cnt > 1, output a multi-byte character */
+ echo_char(c, tty);
+ while (--cnt > 0) {
+ head = (head+1) & (N_TTY_BUF_SIZE-1);
+ echo_char_raw(tty->read_buf[head], tty);
+ echo_move_back_col(tty);
+ }
+ } else if (kill_type == ERASE && !L_ECHOE(tty)) {
+ echo_char(ERASE_CHAR(tty), tty);
+ } else if (c == '\t') {
+ unsigned int num_chars = 0;
+ int after_tab = 0;
+ unsigned long tail = tty->read_head;
+
+ /*
+ * Count the columns used for characters
+ * since the start of input or after a
+ * previous tab.
+ * This info is used to go back the correct
+ * number of columns.
+ */
+ while (tail != tty->canon_head) {
+ tail = (tail-1) & (N_TTY_BUF_SIZE-1);
+ c = tty->read_buf[tail];
+ if (c == '\t') {
+ after_tab = 1;
+ break;
+ } else if (iscntrl(c)) {
+ if (L_ECHOCTL(tty))
+ num_chars += 2;
+ } else if (!is_continuation(c, tty)) {
+ num_chars++;
+ }
+ }
+ echo_erase_tab(num_chars, after_tab, tty);
+ } else {
+ if (iscntrl(c) && L_ECHOCTL(tty)) {
+ echo_char_raw('\b', tty);
+ echo_char_raw(' ', tty);
+ echo_char_raw('\b', tty);
+ }
+ if (!iscntrl(c) || L_ECHOCTL(tty)) {
+ echo_char_raw('\b', tty);
+ echo_char_raw(' ', tty);
+ echo_char_raw('\b', tty);
+ }
+ }
+ }
+ if (kill_type == ERASE)
+ break;
+ }
+ if (tty->read_head == tty->canon_head && L_ECHO(tty))
+ finish_erasing(tty);
+}
+
+/**
+ * isig - handle the ISIG optio
+ * @sig: signal
+ * @tty: terminal
+ * @flush: force flush
+ *
+ * Called when a signal is being sent due to terminal input. This
+ * may caus terminal flushing to take place according to the termios
+ * settings and character used. Called from the driver receive_buf
+ * path so serialized.
+ *
+ * Locking: ctrl_lock, read_lock (both via flush buffer)
+ */
+
+static inline void isig(int sig, struct tty_struct *tty, int flush)
+{
+ if (tty->pgrp)
+ kill_pgrp(tty->pgrp, sig, 1);
+ if (flush || !L_NOFLSH(tty)) {
+ n_tty_flush_buffer(tty);
+ tty_driver_flush_buffer(tty);
+ }
+}
+
+/**
+ * n_tty_receive_break - handle break
+ * @tty: terminal
+ *
+ * An RS232 break event has been hit in the incoming bitstream. This
+ * can cause a variety of events depending upon the termios settings.
+ *
+ * Called from the receive_buf path so single threaded.
+ */
+
+static inline void n_tty_receive_break(struct tty_struct *tty)
+{
+ if (I_IGNBRK(tty))
+ return;
+ if (I_BRKINT(tty)) {
+ isig(SIGINT, tty, 1);
+ return;
+ }
+ if (I_PARMRK(tty)) {
+ put_tty_queue('\377', tty);
+ put_tty_queue('\0', tty);
+ }
+ put_tty_queue('\0', tty);
+ wake_up_interruptible(&tty->read_wait);
+}
+
+/**
+ * n_tty_receive_overrun - handle overrun reporting
+ * @tty: terminal
+ *
+ * Data arrived faster than we could process it. While the tty
+ * driver has flagged this the bits that were missed are gone
+ * forever.
+ *
+ * Called from the receive_buf path so single threaded. Does not
+ * need locking as num_overrun and overrun_time are function
+ * private.
+ */
+
+static inline void n_tty_receive_overrun(struct tty_struct *tty)
+{
+ char buf[64];
+
+ tty->num_overrun++;
+ if (time_before(tty->overrun_time, jiffies - HZ) ||
+ time_after(tty->overrun_time, jiffies)) {
+ printk(KERN_WARNING "%s: %d input overrun(s)\n",
+ tty_name(tty, buf),
+ tty->num_overrun);
+ tty->overrun_time = jiffies;
+ tty->num_overrun = 0;
+ }
+}
+
+/**
+ * n_tty_receive_parity_error - error notifier
+ * @tty: terminal device
+ * @c: character
+ *
+ * Process a parity error and queue the right data to indicate
+ * the error case if necessary. Locking as per n_tty_receive_buf.
+ */
+static inline void n_tty_receive_parity_error(struct tty_struct *tty,
+ unsigned char c)
+{
+ if (I_IGNPAR(tty))
+ return;
+ if (I_PARMRK(tty)) {
+ put_tty_queue('\377', tty);
+ put_tty_queue('\0', tty);
+ put_tty_queue(c, tty);
+ } else if (I_INPCK(tty))
+ put_tty_queue('\0', tty);
+ else
+ put_tty_queue(c, tty);
+ wake_up_interruptible(&tty->read_wait);
+}
+
+/**
+ * n_tty_receive_char - perform processing
+ * @tty: terminal device
+ * @c: character
+ *
+ * Process an individual character of input received from the driver.
+ * This is serialized with respect to itself by the rules for the
+ * driver above.
+ */
+
+static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
+{
+ unsigned long flags;
+ int parmrk;
+
+ if (tty->raw) {
+ put_tty_queue(c, tty);
+ return;
+ }
+
+ if (I_ISTRIP(tty))
+ c &= 0x7f;
+ if (I_IUCLC(tty) && L_IEXTEN(tty))
+ c = tolower(c);
+
+ if (L_EXTPROC(tty)) {
+ put_tty_queue(c, tty);
+ return;
+ }
+
+ if (tty->stopped && !tty->flow_stopped && I_IXON(tty) &&
+ I_IXANY(tty) && c != START_CHAR(tty) && c != STOP_CHAR(tty) &&
+ c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) && c != SUSP_CHAR(tty)) {
+ start_tty(tty);
+ process_echoes(tty);
+ }
+
+ if (tty->closing) {
+ if (I_IXON(tty)) {
+ if (c == START_CHAR(tty)) {
+ start_tty(tty);
+ process_echoes(tty);
+ } else if (c == STOP_CHAR(tty))
+ stop_tty(tty);
+ }
+ return;
+ }
+
+ /*
+ * If the previous character was LNEXT, or we know that this
+ * character is not one of the characters that we'll have to
+ * handle specially, do shortcut processing to speed things
+ * up.
+ */
+ if (!test_bit(c, tty->process_char_map) || tty->lnext) {
+ tty->lnext = 0;
+ parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0;
+ if (tty->read_cnt >= (N_TTY_BUF_SIZE - parmrk - 1)) {
+ /* beep if no space */
+ if (L_ECHO(tty))
+ process_output('\a', tty);
+ return;
+ }
+ if (L_ECHO(tty)) {
+ finish_erasing(tty);
+ /* Record the column of first canon char. */
+ if (tty->canon_head == tty->read_head)
+ echo_set_canon_col(tty);
+ echo_char(c, tty);
+ process_echoes(tty);
+ }
+ if (parmrk)
+ put_tty_queue(c, tty);
+ put_tty_queue(c, tty);
+ return;
+ }
+
+ if (I_IXON(tty)) {
+ if (c == START_CHAR(tty)) {
+ start_tty(tty);
+ process_echoes(tty);
+ return;
+ }
+ if (c == STOP_CHAR(tty)) {
+ stop_tty(tty);
+ return;
+ }
+ }
+
+ if (L_ISIG(tty)) {
+ int signal;
+ signal = SIGINT;
+ if (c == INTR_CHAR(tty))
+ goto send_signal;
+ signal = SIGQUIT;
+ if (c == QUIT_CHAR(tty))
+ goto send_signal;
+ signal = SIGTSTP;
+ if (c == SUSP_CHAR(tty)) {
+send_signal:
+ /*
+ * Note that we do not use isig() here because we want
+ * the order to be:
+ * 1) flush, 2) echo, 3) signal
+ */
+ if (!L_NOFLSH(tty)) {
+ n_tty_flush_buffer(tty);
+ tty_driver_flush_buffer(tty);
+ }
+ if (I_IXON(tty))
+ start_tty(tty);
+ if (L_ECHO(tty)) {
+ echo_char(c, tty);
+ process_echoes(tty);
+ }
+ if (tty->pgrp)
+ kill_pgrp(tty->pgrp, signal, 1);
+ return;
+ }
+ }
+
+ if (c == '\r') {
+ if (I_IGNCR(tty))
+ return;
+ if (I_ICRNL(tty))
+ c = '\n';
+ } else if (c == '\n' && I_INLCR(tty))
+ c = '\r';
+
+ if (tty->icanon) {
+ if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) ||
+ (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) {
+ eraser(c, tty);
+ process_echoes(tty);
+ return;
+ }
+ if (c == LNEXT_CHAR(tty) && L_IEXTEN(tty)) {
+ tty->lnext = 1;
+ if (L_ECHO(tty)) {
+ finish_erasing(tty);
+ if (L_ECHOCTL(tty)) {
+ echo_char_raw('^', tty);
+ echo_char_raw('\b', tty);
+ process_echoes(tty);
+ }
+ }
+ return;
+ }
+ if (c == REPRINT_CHAR(tty) && L_ECHO(tty) &&
+ L_IEXTEN(tty)) {
+ unsigned long tail = tty->canon_head;
+
+ finish_erasing(tty);
+ echo_char(c, tty);
+ echo_char_raw('\n', tty);
+ while (tail != tty->read_head) {
+ echo_char(tty->read_buf[tail], tty);
+ tail = (tail+1) & (N_TTY_BUF_SIZE-1);
+ }
+ process_echoes(tty);
+ return;
+ }
+ if (c == '\n') {
+ if (tty->read_cnt >= N_TTY_BUF_SIZE) {
+ if (L_ECHO(tty))
+ process_output('\a', tty);
+ return;
+ }
+ if (L_ECHO(tty) || L_ECHONL(tty)) {
+ echo_char_raw('\n', tty);
+ process_echoes(tty);
+ }
+ goto handle_newline;
+ }
+ if (c == EOF_CHAR(tty)) {
+ if (tty->read_cnt >= N_TTY_BUF_SIZE)
+ return;
+ if (tty->canon_head != tty->read_head)
+ set_bit(TTY_PUSH, &tty->flags);
+ c = __DISABLED_CHAR;
+ goto handle_newline;
+ }
+ if ((c == EOL_CHAR(tty)) ||
+ (c == EOL2_CHAR(tty) && L_IEXTEN(tty))) {
+ parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty))
+ ? 1 : 0;
+ if (tty->read_cnt >= (N_TTY_BUF_SIZE - parmrk)) {
+ if (L_ECHO(tty))
+ process_output('\a', tty);
+ return;
+ }
+ /*
+ * XXX are EOL_CHAR and EOL2_CHAR echoed?!?
+ */
+ if (L_ECHO(tty)) {
+ /* Record the column of first canon char. */
+ if (tty->canon_head == tty->read_head)
+ echo_set_canon_col(tty);
+ echo_char(c, tty);
+ process_echoes(tty);
+ }
+ /*
+ * XXX does PARMRK doubling happen for
+ * EOL_CHAR and EOL2_CHAR?
+ */
+ if (parmrk)
+ put_tty_queue(c, tty);
+
+handle_newline:
+ spin_lock_irqsave(&tty->read_lock, flags);
+ set_bit(tty->read_head, tty->read_flags);
+ put_tty_queue_nolock(c, tty);
+ tty->canon_head = tty->read_head;
+ tty->canon_data++;
+ spin_unlock_irqrestore(&tty->read_lock, flags);
+ kill_fasync(&tty->fasync, SIGIO, POLL_IN);
+ if (waitqueue_active(&tty->read_wait))
+ wake_up_interruptible(&tty->read_wait);
+ return;
+ }
+ }
+
+ parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0;
+ if (tty->read_cnt >= (N_TTY_BUF_SIZE - parmrk - 1)) {
+ /* beep if no space */
+ if (L_ECHO(tty))
+ process_output('\a', tty);
+ return;
+ }
+ if (L_ECHO(tty)) {
+ finish_erasing(tty);
+ if (c == '\n')
+ echo_char_raw('\n', tty);
+ else {
+ /* Record the column of first canon char. */
+ if (tty->canon_head == tty->read_head)
+ echo_set_canon_col(tty);
+ echo_char(c, tty);
+ }
+ process_echoes(tty);
+ }
+
+ if (parmrk)
+ put_tty_queue(c, tty);
+
+ put_tty_queue(c, tty);
+}
+
+
+/**
+ * n_tty_write_wakeup - asynchronous I/O notifier
+ * @tty: tty device
+ *
+ * Required for the ptys, serial driver etc. since processes
+ * that attach themselves to the master and rely on ASYNC
+ * IO must be woken up
+ */
+
+static void n_tty_write_wakeup(struct tty_struct *tty)
+{
+ if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags))
+ kill_fasync(&tty->fasync, SIGIO, POLL_OUT);
+}
+
+/**
+ * n_tty_receive_buf - data receive
+ * @tty: terminal device
+ * @cp: buffer
+ * @fp: flag buffer
+ * @count: characters
+ *
+ * Called by the terminal driver when a block of characters has
+ * been received. This function must be called from soft contexts
+ * not from interrupt context. The driver is responsible for making
+ * calls one at a time and in order (or using flush_to_ldisc)
+ */
+
+static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ const unsigned char *p;
+ char *f, flags = TTY_NORMAL;
+ int i;
+ char buf[64];
+ unsigned long cpuflags;
+
+ if (!tty->read_buf)
+ return;
+
+ if (tty->real_raw) {
+ spin_lock_irqsave(&tty->read_lock, cpuflags);
+ i = min(N_TTY_BUF_SIZE - tty->read_cnt,
+ N_TTY_BUF_SIZE - tty->read_head);
+ i = min(count, i);
+ memcpy(tty->read_buf + tty->read_head, cp, i);
+ tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
+ tty->read_cnt += i;
+ cp += i;
+ count -= i;
+
+ i = min(N_TTY_BUF_SIZE - tty->read_cnt,
+ N_TTY_BUF_SIZE - tty->read_head);
+ i = min(count, i);
+ memcpy(tty->read_buf + tty->read_head, cp, i);
+ tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
+ tty->read_cnt += i;
+ spin_unlock_irqrestore(&tty->read_lock, cpuflags);
+ } else {
+ for (i = count, p = cp, f = fp; i; i--, p++) {
+ if (f)
+ flags = *f++;
+ switch (flags) {
+ case TTY_NORMAL:
+ n_tty_receive_char(tty, *p);
+ break;
+ case TTY_BREAK:
+ n_tty_receive_break(tty);
+ break;
+ case TTY_PARITY:
+ case TTY_FRAME:
+ n_tty_receive_parity_error(tty, *p);
+ break;
+ case TTY_OVERRUN:
+ n_tty_receive_overrun(tty);
+ break;
+ default:
+ printk(KERN_ERR "%s: unknown flag %d\n",
+ tty_name(tty, buf), flags);
+ break;
+ }
+ }
+ if (tty->ops->flush_chars)
+ tty->ops->flush_chars(tty);
+ }
+
+ n_tty_set_room(tty);
+
+ if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) ||
+ L_EXTPROC(tty)) {
+ kill_fasync(&tty->fasync, SIGIO, POLL_IN);
+ if (waitqueue_active(&tty->read_wait))
+ wake_up_interruptible(&tty->read_wait);
+ }
+
+ /*
+ * Check the remaining room for the input canonicalization
+ * mode. We don't want to throttle the driver if we're in
+ * canonical mode and don't have a newline yet!
+ */
+ if (tty->receive_room < TTY_THRESHOLD_THROTTLE)
+ tty_throttle(tty);
+}
+
+int is_ignored(int sig)
+{
+ return (sigismember(&current->blocked, sig) ||
+ current->sighand->action[sig-1].sa.sa_handler == SIG_IGN);
+}
+
+/**
+ * n_tty_set_termios - termios data changed
+ * @tty: terminal
+ * @old: previous data
+ *
+ * Called by the tty layer when the user changes termios flags so
+ * that the line discipline can plan ahead. This function cannot sleep
+ * and is protected from re-entry by the tty layer. The user is
+ * guaranteed that this function will not be re-entered or in progress
+ * when the ldisc is closed.
+ *
+ * Locking: Caller holds tty->termios_mutex
+ */
+
+static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+ int canon_change = 1;
+ BUG_ON(!tty);
+
+ if (old)
+ canon_change = (old->c_lflag ^ tty->termios->c_lflag) & ICANON;
+ if (canon_change) {
+ memset(&tty->read_flags, 0, sizeof tty->read_flags);
+ tty->canon_head = tty->read_tail;
+ tty->canon_data = 0;
+ tty->erasing = 0;
+ }
+
+ if (canon_change && !L_ICANON(tty) && tty->read_cnt)
+ wake_up_interruptible(&tty->read_wait);
+
+ tty->icanon = (L_ICANON(tty) != 0);
+ if (test_bit(TTY_HW_COOK_IN, &tty->flags)) {
+ tty->raw = 1;
+ tty->real_raw = 1;
+ n_tty_set_room(tty);
+ return;
+ }
+ if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
+ I_ICRNL(tty) || I_INLCR(tty) || L_ICANON(tty) ||
+ I_IXON(tty) || L_ISIG(tty) || L_ECHO(tty) ||
+ I_PARMRK(tty)) {
+ memset(tty->process_char_map, 0, 256/8);
+
+ if (I_IGNCR(tty) || I_ICRNL(tty))
+ set_bit('\r', tty->process_char_map);
+ if (I_INLCR(tty))
+ set_bit('\n', tty->process_char_map);
+
+ if (L_ICANON(tty)) {
+ set_bit(ERASE_CHAR(tty), tty->process_char_map);
+ set_bit(KILL_CHAR(tty), tty->process_char_map);
+ set_bit(EOF_CHAR(tty), tty->process_char_map);
+ set_bit('\n', tty->process_char_map);
+ set_bit(EOL_CHAR(tty), tty->process_char_map);
+ if (L_IEXTEN(tty)) {
+ set_bit(WERASE_CHAR(tty),
+ tty->process_char_map);
+ set_bit(LNEXT_CHAR(tty),
+ tty->process_char_map);
+ set_bit(EOL2_CHAR(tty),
+ tty->process_char_map);
+ if (L_ECHO(tty))
+ set_bit(REPRINT_CHAR(tty),
+ tty->process_char_map);
+ }
+ }
+ if (I_IXON(tty)) {
+ set_bit(START_CHAR(tty), tty->process_char_map);
+ set_bit(STOP_CHAR(tty), tty->process_char_map);
+ }
+ if (L_ISIG(tty)) {
+ set_bit(INTR_CHAR(tty), tty->process_char_map);
+ set_bit(QUIT_CHAR(tty), tty->process_char_map);
+ set_bit(SUSP_CHAR(tty), tty->process_char_map);
+ }
+ clear_bit(__DISABLED_CHAR, tty->process_char_map);
+ tty->raw = 0;
+ tty->real_raw = 0;
+ } else {
+ tty->raw = 1;
+ if ((I_IGNBRK(tty) || (!I_BRKINT(tty) && !I_PARMRK(tty))) &&
+ (I_IGNPAR(tty) || !I_INPCK(tty)) &&
+ (tty->driver->flags & TTY_DRIVER_REAL_RAW))
+ tty->real_raw = 1;
+ else
+ tty->real_raw = 0;
+ }
+ n_tty_set_room(tty);
+ /* The termios change make the tty ready for I/O */
+ wake_up_interruptible(&tty->write_wait);
+ wake_up_interruptible(&tty->read_wait);
+}
+
+/**
+ * n_tty_close - close the ldisc for this tty
+ * @tty: device
+ *
+ * Called from the terminal layer when this line discipline is
+ * being shut down, either because of a close or becsuse of a
+ * discipline change. The function will not be called while other
+ * ldisc methods are in progress.
+ */
+
+static void n_tty_close(struct tty_struct *tty)
+{
+ n_tty_flush_buffer(tty);
+ if (tty->read_buf) {
+ kfree(tty->read_buf);
+ tty->read_buf = NULL;
+ }
+ if (tty->echo_buf) {
+ kfree(tty->echo_buf);
+ tty->echo_buf = NULL;
+ }
+}
+
+/**
+ * n_tty_open - open an ldisc
+ * @tty: terminal to open
+ *
+ * Called when this line discipline is being attached to the
+ * terminal device. Can sleep. Called serialized so that no
+ * other events will occur in parallel. No further open will occur
+ * until a close.
+ */
+
+static int n_tty_open(struct tty_struct *tty)
+{
+ if (!tty)
+ return -EINVAL;
+
+ /* These are ugly. Currently a malloc failure here can panic */
+ if (!tty->read_buf) {
+ tty->read_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
+ if (!tty->read_buf)
+ return -ENOMEM;
+ }
+ if (!tty->echo_buf) {
+ tty->echo_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
+
+ if (!tty->echo_buf)
+ return -ENOMEM;
+ }
+ reset_buffer_flags(tty);
+ tty->column = 0;
+ n_tty_set_termios(tty, NULL);
+ tty->minimum_to_wake = 1;
+ tty->closing = 0;
+ return 0;
+}
+
+static inline int input_available_p(struct tty_struct *tty, int amt)
+{
+ tty_flush_to_ldisc(tty);
+ if (tty->icanon && !L_EXTPROC(tty)) {
+ if (tty->canon_data)
+ return 1;
+ } else if (tty->read_cnt >= (amt ? amt : 1))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * copy_from_read_buf - copy read data directly
+ * @tty: terminal device
+ * @b: user data
+ * @nr: size of data
+ *
+ * Helper function to speed up n_tty_read. It is only called when
+ * ICANON is off; it copies characters straight from the tty queue to
+ * user space directly. It can be profitably called twice; once to
+ * drain the space from the tail pointer to the (physical) end of the
+ * buffer, and once to drain the space from the (physical) beginning of
+ * the buffer to head pointer.
+ *
+ * Called under the tty->atomic_read_lock sem
+ *
+ */
+
+static int copy_from_read_buf(struct tty_struct *tty,
+ unsigned char __user **b,
+ size_t *nr)
+
+{
+ int retval;
+ size_t n;
+ unsigned long flags;
+
+ retval = 0;
+ spin_lock_irqsave(&tty->read_lock, flags);
+ n = min(tty->read_cnt, N_TTY_BUF_SIZE - tty->read_tail);
+ n = min(*nr, n);
+ spin_unlock_irqrestore(&tty->read_lock, flags);
+ if (n) {
+ retval = copy_to_user(*b, &tty->read_buf[tty->read_tail], n);
+ n -= retval;
+ tty_audit_add_data(tty, &tty->read_buf[tty->read_tail], n);
+ spin_lock_irqsave(&tty->read_lock, flags);
+ tty->read_tail = (tty->read_tail + n) & (N_TTY_BUF_SIZE-1);
+ tty->read_cnt -= n;
+ /* Turn single EOF into zero-length read */
+ if (L_EXTPROC(tty) && tty->icanon && n == 1) {
+ if (!tty->read_cnt && (*b)[n-1] == EOF_CHAR(tty))
+ n--;
+ }
+ spin_unlock_irqrestore(&tty->read_lock, flags);
+ *b += n;
+ *nr -= n;
+ }
+ return retval;
+}
+
+extern ssize_t redirected_tty_write(struct file *, const char __user *,
+ size_t, loff_t *);
+
+/**
+ * job_control - check job control
+ * @tty: tty
+ * @file: file handle
+ *
+ * Perform job control management checks on this file/tty descriptor
+ * and if appropriate send any needed signals and return a negative
+ * error code if action should be taken.
+ *
+ * FIXME:
+ * Locking: None - redirected write test is safe, testing
+ * current->signal should possibly lock current->sighand
+ * pgrp locking ?
+ */
+
+static int job_control(struct tty_struct *tty, struct file *file)
+{
+ /* Job control check -- must be done at start and after
+ every sleep (POSIX.1 7.1.1.4). */
+ /* NOTE: not yet done after every sleep pending a thorough
+ check of the logic of this change. -- jlc */
+ /* don't stop on /dev/console */
+ if (file->f_op->write != redirected_tty_write &&
+ current->signal->tty == tty) {
+ if (!tty->pgrp)
+ printk(KERN_ERR "n_tty_read: no tty->pgrp!\n");
+ else if (task_pgrp(current) != tty->pgrp) {
+ if (is_ignored(SIGTTIN) ||
+ is_current_pgrp_orphaned())
+ return -EIO;
+ kill_pgrp(task_pgrp(current), SIGTTIN, 1);
+ set_thread_flag(TIF_SIGPENDING);
+ return -ERESTARTSYS;
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * n_tty_read - read function for tty
+ * @tty: tty device
+ * @file: file object
+ * @buf: userspace buffer pointer
+ * @nr: size of I/O
+ *
+ * Perform reads for the line discipline. We are guaranteed that the
+ * line discipline will not be closed under us but we may get multiple
+ * parallel readers and must handle this ourselves. We may also get
+ * a hangup. Always called in user context, may sleep.
+ *
+ * This code must be sure never to sleep through a hangup.
+ */
+
+static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t nr)
+{
+ unsigned char __user *b = buf;
+ DECLARE_WAITQUEUE(wait, current);
+ int c;
+ int minimum, time;
+ ssize_t retval = 0;
+ ssize_t size;
+ long timeout;
+ unsigned long flags;
+ int packet;
+
+do_it_again:
+
+ BUG_ON(!tty->read_buf);
+
+ c = job_control(tty, file);
+ if (c < 0)
+ return c;
+
+ minimum = time = 0;
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (!tty->icanon) {
+ time = (HZ / 10) * TIME_CHAR(tty);
+ minimum = MIN_CHAR(tty);
+ if (minimum) {
+ if (time)
+ tty->minimum_to_wake = 1;
+ else if (!waitqueue_active(&tty->read_wait) ||
+ (tty->minimum_to_wake > minimum))
+ tty->minimum_to_wake = minimum;
+ } else {
+ timeout = 0;
+ if (time) {
+ timeout = time;
+ time = 0;
+ }
+ tty->minimum_to_wake = minimum = 1;
+ }
+ }
+
+ /*
+ * Internal serialization of reads.
+ */
+ if (file->f_flags & O_NONBLOCK) {
+ if (!mutex_trylock(&tty->atomic_read_lock))
+ return -EAGAIN;
+ } else {
+ if (mutex_lock_interruptible(&tty->atomic_read_lock))
+ return -ERESTARTSYS;
+ }
+ packet = tty->packet;
+
+ add_wait_queue(&tty->read_wait, &wait);
+ while (nr) {
+ /* First test for status change. */
+ if (packet && tty->link->ctrl_status) {
+ unsigned char cs;
+ if (b != buf)
+ break;
+ spin_lock_irqsave(&tty->link->ctrl_lock, flags);
+ cs = tty->link->ctrl_status;
+ tty->link->ctrl_status = 0;
+ spin_unlock_irqrestore(&tty->link->ctrl_lock, flags);
+ if (tty_put_user(tty, cs, b++)) {
+ retval = -EFAULT;
+ b--;
+ break;
+ }
+ nr--;
+ break;
+ }
+ /* This statement must be first before checking for input
+ so that any interrupt will set the state back to
+ TASK_RUNNING. */
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (((minimum - (b - buf)) < tty->minimum_to_wake) &&
+ ((minimum - (b - buf)) >= 1))
+ tty->minimum_to_wake = (minimum - (b - buf));
+
+ if (!input_available_p(tty, 0)) {
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
+ retval = -EIO;
+ break;
+ }
+ if (tty_hung_up_p(file))
+ break;
+ if (!timeout)
+ break;
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+ /* FIXME: does n_tty_set_room need locking ? */
+ n_tty_set_room(tty);
+ timeout = schedule_timeout(timeout);
+ BUG_ON(!tty->read_buf);
+ continue;
+ }
+ __set_current_state(TASK_RUNNING);
+
+ /* Deal with packet mode. */
+ if (packet && b == buf) {
+ if (tty_put_user(tty, TIOCPKT_DATA, b++)) {
+ retval = -EFAULT;
+ b--;
+ break;
+ }
+ nr--;
+ }
+
+ if (tty->icanon && !L_EXTPROC(tty)) {
+ /* N.B. avoid overrun if nr == 0 */
+ while (nr && tty->read_cnt) {
+ int eol;
+
+ eol = test_and_clear_bit(tty->read_tail,
+ tty->read_flags);
+ c = tty->read_buf[tty->read_tail];
+ spin_lock_irqsave(&tty->read_lock, flags);
+ tty->read_tail = ((tty->read_tail+1) &
+ (N_TTY_BUF_SIZE-1));
+ tty->read_cnt--;
+ if (eol) {
+ /* this test should be redundant:
+ * we shouldn't be reading data if
+ * canon_data is 0
+ */
+ if (--tty->canon_data < 0)
+ tty->canon_data = 0;
+ }
+ spin_unlock_irqrestore(&tty->read_lock, flags);
+
+ if (!eol || (c != __DISABLED_CHAR)) {
+ if (tty_put_user(tty, c, b++)) {
+ retval = -EFAULT;
+ b--;
+ break;
+ }
+ nr--;
+ }
+ if (eol) {
+ tty_audit_push(tty);
+ break;
+ }
+ }
+ if (retval)
+ break;
+ } else {
+ int uncopied;
+ /* The copy function takes the read lock and handles
+ locking internally for this case */
+ uncopied = copy_from_read_buf(tty, &b, &nr);
+ uncopied += copy_from_read_buf(tty, &b, &nr);
+ if (uncopied) {
+ retval = -EFAULT;
+ break;
+ }
+ }
+
+ /* If there is enough space in the read buffer now, let the
+ * low-level driver know. We use n_tty_chars_in_buffer() to
+ * check the buffer, as it now knows about canonical mode.
+ * Otherwise, if the driver is throttled and the line is
+ * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
+ * we won't get any more characters.
+ */
+ if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
+ n_tty_set_room(tty);
+ check_unthrottle(tty);
+ }
+
+ if (b - buf >= minimum)
+ break;
+ if (time)
+ timeout = time;
+ }
+ mutex_unlock(&tty->atomic_read_lock);
+ remove_wait_queue(&tty->read_wait, &wait);
+
+ if (!waitqueue_active(&tty->read_wait))
+ tty->minimum_to_wake = minimum;
+
+ __set_current_state(TASK_RUNNING);
+ size = b - buf;
+ if (size) {
+ retval = size;
+ if (nr)
+ clear_bit(TTY_PUSH, &tty->flags);
+ } else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
+ goto do_it_again;
+
+ n_tty_set_room(tty);
+ return retval;
+}
+
+/**
+ * n_tty_write - write function for tty
+ * @tty: tty device
+ * @file: file object
+ * @buf: userspace buffer pointer
+ * @nr: size of I/O
+ *
+ * Write function of the terminal device. This is serialized with
+ * respect to other write callers but not to termios changes, reads
+ * and other such events. Since the receive code will echo characters,
+ * thus calling driver write methods, the output_lock is used in
+ * the output processing functions called here as well as in the
+ * echo processing function to protect the column state and space
+ * left in the buffer.
+ *
+ * This code must be sure never to sleep through a hangup.
+ *
+ * Locking: output_lock to protect column state and space left
+ * (note that the process_output*() functions take this
+ * lock themselves)
+ */
+
+static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr)
+{
+ const unsigned char *b = buf;
+ DECLARE_WAITQUEUE(wait, current);
+ int c;
+ ssize_t retval = 0;
+
+ /* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
+ if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ }
+
+ /* Write out any echoed characters that are still pending */
+ process_echoes(tty);
+
+ add_wait_queue(&tty->write_wait, &wait);
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+ if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) {
+ retval = -EIO;
+ break;
+ }
+ if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) {
+ while (nr > 0) {
+ ssize_t num = process_output_block(tty, b, nr);
+ if (num < 0) {
+ if (num == -EAGAIN)
+ break;
+ retval = num;
+ goto break_out;
+ }
+ b += num;
+ nr -= num;
+ if (nr == 0)
+ break;
+ c = *b;
+ if (process_output(c, tty) < 0)
+ break;
+ b++; nr--;
+ }
+ if (tty->ops->flush_chars)
+ tty->ops->flush_chars(tty);
+ } else {
+ while (nr > 0) {
+ c = tty->ops->write(tty, b, nr);
+ if (c < 0) {
+ retval = c;
+ goto break_out;
+ }
+ if (!c)
+ break;
+ b += c;
+ nr -= c;
+ }
+ }
+ if (!nr)
+ break;
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ break;
+ }
+ schedule();
+ }
+break_out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&tty->write_wait, &wait);
+ if (b - buf != nr && tty->fasync)
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ return (b - buf) ? b - buf : retval;
+}
+
+/**
+ * n_tty_poll - poll method for N_TTY
+ * @tty: terminal device
+ * @file: file accessing it
+ * @wait: poll table
+ *
+ * Called when the line discipline is asked to poll() for data or
+ * for special events. This code is not serialized with respect to
+ * other events save open/close.
+ *
+ * This code must be sure never to sleep through a hangup.
+ * Called without the kernel lock held - fine
+ */
+
+static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
+ poll_table *wait)
+{
+ unsigned int mask = 0;
+
+ poll_wait(file, &tty->read_wait, wait);
+ poll_wait(file, &tty->write_wait, wait);
+ if (input_available_p(tty, TIME_CHAR(tty) ? 0 : MIN_CHAR(tty)))
+ mask |= POLLIN | POLLRDNORM;
+ if (tty->packet && tty->link->ctrl_status)
+ mask |= POLLPRI | POLLIN | POLLRDNORM;
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ mask |= POLLHUP;
+ if (tty_hung_up_p(file))
+ mask |= POLLHUP;
+ if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
+ if (MIN_CHAR(tty) && !TIME_CHAR(tty))
+ tty->minimum_to_wake = MIN_CHAR(tty);
+ else
+ tty->minimum_to_wake = 1;
+ }
+ if (tty->ops->write && !tty_is_writelocked(tty) &&
+ tty_chars_in_buffer(tty) < WAKEUP_CHARS &&
+ tty_write_room(tty) > 0)
+ mask |= POLLOUT | POLLWRNORM;
+ return mask;
+}
+
+static unsigned long inq_canon(struct tty_struct *tty)
+{
+ int nr, head, tail;
+
+ if (!tty->canon_data)
+ return 0;
+ head = tty->canon_head;
+ tail = tty->read_tail;
+ nr = (head - tail) & (N_TTY_BUF_SIZE-1);
+ /* Skip EOF-chars.. */
+ while (head != tail) {
+ if (test_bit(tail, tty->read_flags) &&
+ tty->read_buf[tail] == __DISABLED_CHAR)
+ nr--;
+ tail = (tail+1) & (N_TTY_BUF_SIZE-1);
+ }
+ return nr;
+}
+
+static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int retval;
+
+ switch (cmd) {
+ case TIOCOUTQ:
+ return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
+ case TIOCINQ:
+ /* FIXME: Locking */
+ retval = tty->read_cnt;
+ if (L_ICANON(tty))
+ retval = inq_canon(tty);
+ return put_user(retval, (unsigned int __user *) arg);
+ default:
+ return n_tty_ioctl_helper(tty, file, cmd, arg);
+ }
+}
+
+struct tty_ldisc_ops tty_ldisc_N_TTY = {
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_tty",
+ .open = n_tty_open,
+ .close = n_tty_close,
+ .flush_buffer = n_tty_flush_buffer,
+ .chars_in_buffer = n_tty_chars_in_buffer,
+ .read = n_tty_read,
+ .write = n_tty_write,
+ .ioctl = n_tty_ioctl,
+ .set_termios = n_tty_set_termios,
+ .poll = n_tty_poll,
+ .receive_buf = n_tty_receive_buf,
+ .write_wakeup = n_tty_write_wakeup
+};
+
+/**
+ * n_tty_inherit_ops - inherit N_TTY methods
+ * @ops: struct tty_ldisc_ops where to save N_TTY methods
+ *
+ * Used by a generic struct tty_ldisc_ops to easily inherit N_TTY
+ * methods.
+ */
+
+void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
+{
+ *ops = tty_ldisc_N_TTY;
+ ops->owner = NULL;
+ ops->refcount = ops->flags = 0;
+}
+EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
new file mode 100644
index 00000000..fd347ff3
--- /dev/null
+++ b/drivers/tty/nozomi.c
@@ -0,0 +1,1971 @@
+/*
+ * nozomi.c -- HSDPA driver Broadband Wireless Data Card - Globe Trotter
+ *
+ * Written by: Ulf Jakobsson,
+ * Jan Ã…kerfeldt,
+ * Stefan Thomasson,
+ *
+ * Maintained by: Paul Hardwick (p.hardwick@option.com)
+ *
+ * Patches:
+ * Locking code changes for Vodafone by Sphere Systems Ltd,
+ * Andrew Bird (ajb@spheresystems.co.uk )
+ * & Phil Sanderson
+ *
+ * Source has been ported from an implementation made by Filip Aben @ Option
+ *
+ * --------------------------------------------------------------------------
+ *
+ * Copyright (c) 2005,2006 Option Wireless Sweden AB
+ * Copyright (c) 2006 Sphere Systems Ltd
+ * Copyright (c) 2006 Option Wireless n/v
+ * All rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * --------------------------------------------------------------------------
+ */
+
+/* Enable this to have a lot of debug printouts */
+#define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/sched.h>
+#include <linux/serial.h>
+#include <linux/interrupt.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/kfifo.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+
+#include <linux/delay.h>
+
+
+#define VERSION_STRING DRIVER_DESC " 2.1d"
+
+/* Macros definitions */
+
+/* Default debug printout level */
+#define NOZOMI_DEBUG_LEVEL 0x00
+
+#define P_BUF_SIZE 128
+#define NFO(_err_flag_, args...) \
+do { \
+ char tmp[P_BUF_SIZE]; \
+ snprintf(tmp, sizeof(tmp), ##args); \
+ printk(_err_flag_ "[%d] %s(): %s\n", __LINE__, \
+ __func__, tmp); \
+} while (0)
+
+#define DBG1(args...) D_(0x01, ##args)
+#define DBG2(args...) D_(0x02, ##args)
+#define DBG3(args...) D_(0x04, ##args)
+#define DBG4(args...) D_(0x08, ##args)
+#define DBG5(args...) D_(0x10, ##args)
+#define DBG6(args...) D_(0x20, ##args)
+#define DBG7(args...) D_(0x40, ##args)
+#define DBG8(args...) D_(0x80, ##args)
+
+#ifdef DEBUG
+/* Do we need this settable at runtime? */
+static int debug = NOZOMI_DEBUG_LEVEL;
+
+#define D(lvl, args...) do \
+ {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
+ while (0)
+#define D_(lvl, args...) D(lvl, ##args)
+
+/* These printouts are always printed */
+
+#else
+static int debug;
+#define D_(lvl, args...)
+#endif
+
+/* TODO: rewrite to optimize macros... */
+
+#define TMP_BUF_MAX 256
+
+#define DUMP(buf__,len__) \
+ do { \
+ char tbuf[TMP_BUF_MAX] = {0};\
+ if (len__ > 1) {\
+ snprintf(tbuf, len__ > TMP_BUF_MAX ? TMP_BUF_MAX : len__, "%s", buf__);\
+ if (tbuf[len__-2] == '\r') {\
+ tbuf[len__-2] = 'r';\
+ } \
+ DBG1("SENDING: '%s' (%d+n)", tbuf, len__);\
+ } else {\
+ DBG1("SENDING: '%s' (%d)", tbuf, len__);\
+ } \
+} while (0)
+
+/* Defines */
+#define NOZOMI_NAME "nozomi"
+#define NOZOMI_NAME_TTY "nozomi_tty"
+#define DRIVER_DESC "Nozomi driver"
+
+#define NTTY_TTY_MAXMINORS 256
+#define NTTY_FIFO_BUFFER_SIZE 8192
+
+/* Must be power of 2 */
+#define FIFO_BUFFER_SIZE_UL 8192
+
+/* Size of tmp send buffer to card */
+#define SEND_BUF_MAX 1024
+#define RECEIVE_BUF_MAX 4
+
+
+#define R_IIR 0x0000 /* Interrupt Identity Register */
+#define R_FCR 0x0000 /* Flow Control Register */
+#define R_IER 0x0004 /* Interrupt Enable Register */
+
+#define CONFIG_MAGIC 0xEFEFFEFE
+#define TOGGLE_VALID 0x0000
+
+/* Definition of interrupt tokens */
+#define MDM_DL1 0x0001
+#define MDM_UL1 0x0002
+#define MDM_DL2 0x0004
+#define MDM_UL2 0x0008
+#define DIAG_DL1 0x0010
+#define DIAG_DL2 0x0020
+#define DIAG_UL 0x0040
+#define APP1_DL 0x0080
+#define APP1_UL 0x0100
+#define APP2_DL 0x0200
+#define APP2_UL 0x0400
+#define CTRL_DL 0x0800
+#define CTRL_UL 0x1000
+#define RESET 0x8000
+
+#define MDM_DL (MDM_DL1 | MDM_DL2)
+#define MDM_UL (MDM_UL1 | MDM_UL2)
+#define DIAG_DL (DIAG_DL1 | DIAG_DL2)
+
+/* modem signal definition */
+#define CTRL_DSR 0x0001
+#define CTRL_DCD 0x0002
+#define CTRL_RI 0x0004
+#define CTRL_CTS 0x0008
+
+#define CTRL_DTR 0x0001
+#define CTRL_RTS 0x0002
+
+#define MAX_PORT 4
+#define NOZOMI_MAX_PORTS 5
+#define NOZOMI_MAX_CARDS (NTTY_TTY_MAXMINORS / MAX_PORT)
+
+/* Type definitions */
+
+/*
+ * There are two types of nozomi cards,
+ * one with 2048 memory and with 8192 memory
+ */
+enum card_type {
+ F32_2 = 2048, /* 512 bytes downlink + uplink * 2 -> 2048 */
+ F32_8 = 8192, /* 3072 bytes downl. + 1024 bytes uplink * 2 -> 8192 */
+};
+
+/* Initialization states a card can be in */
+enum card_state {
+ NOZOMI_STATE_UKNOWN = 0,
+ NOZOMI_STATE_ENABLED = 1, /* pci device enabled */
+ NOZOMI_STATE_ALLOCATED = 2, /* config setup done */
+ NOZOMI_STATE_READY = 3, /* flowcontrols received */
+};
+
+/* Two different toggle channels exist */
+enum channel_type {
+ CH_A = 0,
+ CH_B = 1,
+};
+
+/* Port definition for the card regarding flow control */
+enum ctrl_port_type {
+ CTRL_CMD = 0,
+ CTRL_MDM = 1,
+ CTRL_DIAG = 2,
+ CTRL_APP1 = 3,
+ CTRL_APP2 = 4,
+ CTRL_ERROR = -1,
+};
+
+/* Ports that the nozomi has */
+enum port_type {
+ PORT_MDM = 0,
+ PORT_DIAG = 1,
+ PORT_APP1 = 2,
+ PORT_APP2 = 3,
+ PORT_CTRL = 4,
+ PORT_ERROR = -1,
+};
+
+#ifdef __BIG_ENDIAN
+/* Big endian */
+
+struct toggles {
+ unsigned int enabled:5; /*
+ * Toggle fields are valid if enabled is 0,
+ * else A-channels must always be used.
+ */
+ unsigned int diag_dl:1;
+ unsigned int mdm_dl:1;
+ unsigned int mdm_ul:1;
+} __attribute__ ((packed));
+
+/* Configuration table to read at startup of card */
+/* Is for now only needed during initialization phase */
+struct config_table {
+ u32 signature;
+ u16 product_information;
+ u16 version;
+ u8 pad3[3];
+ struct toggles toggle;
+ u8 pad1[4];
+ u16 dl_mdm_len1; /*
+ * If this is 64, it can hold
+ * 60 bytes + 4 that is length field
+ */
+ u16 dl_start;
+
+ u16 dl_diag_len1;
+ u16 dl_mdm_len2; /*
+ * If this is 64, it can hold
+ * 60 bytes + 4 that is length field
+ */
+ u16 dl_app1_len;
+
+ u16 dl_diag_len2;
+ u16 dl_ctrl_len;
+ u16 dl_app2_len;
+ u8 pad2[16];
+ u16 ul_mdm_len1;
+ u16 ul_start;
+ u16 ul_diag_len;
+ u16 ul_mdm_len2;
+ u16 ul_app1_len;
+ u16 ul_app2_len;
+ u16 ul_ctrl_len;
+} __attribute__ ((packed));
+
+/* This stores all control downlink flags */
+struct ctrl_dl {
+ u8 port;
+ unsigned int reserved:4;
+ unsigned int CTS:1;
+ unsigned int RI:1;
+ unsigned int DCD:1;
+ unsigned int DSR:1;
+} __attribute__ ((packed));
+
+/* This stores all control uplink flags */
+struct ctrl_ul {
+ u8 port;
+ unsigned int reserved:6;
+ unsigned int RTS:1;
+ unsigned int DTR:1;
+} __attribute__ ((packed));
+
+#else
+/* Little endian */
+
+/* This represents the toggle information */
+struct toggles {
+ unsigned int mdm_ul:1;
+ unsigned int mdm_dl:1;
+ unsigned int diag_dl:1;
+ unsigned int enabled:5; /*
+ * Toggle fields are valid if enabled is 0,
+ * else A-channels must always be used.
+ */
+} __attribute__ ((packed));
+
+/* Configuration table to read at startup of card */
+struct config_table {
+ u32 signature;
+ u16 version;
+ u16 product_information;
+ struct toggles toggle;
+ u8 pad1[7];
+ u16 dl_start;
+ u16 dl_mdm_len1; /*
+ * If this is 64, it can hold
+ * 60 bytes + 4 that is length field
+ */
+ u16 dl_mdm_len2;
+ u16 dl_diag_len1;
+ u16 dl_diag_len2;
+ u16 dl_app1_len;
+ u16 dl_app2_len;
+ u16 dl_ctrl_len;
+ u8 pad2[16];
+ u16 ul_start;
+ u16 ul_mdm_len2;
+ u16 ul_mdm_len1;
+ u16 ul_diag_len;
+ u16 ul_app1_len;
+ u16 ul_app2_len;
+ u16 ul_ctrl_len;
+} __attribute__ ((packed));
+
+/* This stores all control downlink flags */
+struct ctrl_dl {
+ unsigned int DSR:1;
+ unsigned int DCD:1;
+ unsigned int RI:1;
+ unsigned int CTS:1;
+ unsigned int reserverd:4;
+ u8 port;
+} __attribute__ ((packed));
+
+/* This stores all control uplink flags */
+struct ctrl_ul {
+ unsigned int DTR:1;
+ unsigned int RTS:1;
+ unsigned int reserved:6;
+ u8 port;
+} __attribute__ ((packed));
+#endif
+
+/* This holds all information that is needed regarding a port */
+struct port {
+ struct tty_port port;
+ u8 update_flow_control;
+ struct ctrl_ul ctrl_ul;
+ struct ctrl_dl ctrl_dl;
+ struct kfifo fifo_ul;
+ void __iomem *dl_addr[2];
+ u32 dl_size[2];
+ u8 toggle_dl;
+ void __iomem *ul_addr[2];
+ u32 ul_size[2];
+ u8 toggle_ul;
+ u16 token_dl;
+
+ wait_queue_head_t tty_wait;
+ struct async_icount tty_icount;
+
+ struct nozomi *dc;
+};
+
+/* Private data one for each card in the system */
+struct nozomi {
+ void __iomem *base_addr;
+ unsigned long flip;
+
+ /* Pointers to registers */
+ void __iomem *reg_iir;
+ void __iomem *reg_fcr;
+ void __iomem *reg_ier;
+
+ u16 last_ier;
+ enum card_type card_type;
+ struct config_table config_table; /* Configuration table */
+ struct pci_dev *pdev;
+ struct port port[NOZOMI_MAX_PORTS];
+ u8 *send_buf;
+
+ spinlock_t spin_mutex; /* secures access to registers and tty */
+
+ unsigned int index_start;
+ enum card_state state;
+ u32 open_ttys;
+};
+
+/* This is a data packet that is read or written to/from card */
+struct buffer {
+ u32 size; /* size is the length of the data buffer */
+ u8 *data;
+} __attribute__ ((packed));
+
+/* Global variables */
+static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = {
+ {PCI_DEVICE(0x1931, 0x000c)}, /* Nozomi HSDPA */
+ {},
+};
+
+MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl);
+
+static struct nozomi *ndevs[NOZOMI_MAX_CARDS];
+static struct tty_driver *ntty_driver;
+
+static const struct tty_port_operations noz_tty_port_ops;
+
+/*
+ * find card by tty_index
+ */
+static inline struct nozomi *get_dc_by_tty(const struct tty_struct *tty)
+{
+ return tty ? ndevs[tty->index / MAX_PORT] : NULL;
+}
+
+static inline struct port *get_port_by_tty(const struct tty_struct *tty)
+{
+ struct nozomi *ndev = get_dc_by_tty(tty);
+ return ndev ? &ndev->port[tty->index % MAX_PORT] : NULL;
+}
+
+/*
+ * TODO:
+ * -Optimize
+ * -Rewrite cleaner
+ */
+
+static void read_mem32(u32 *buf, const void __iomem *mem_addr_start,
+ u32 size_bytes)
+{
+ u32 i = 0;
+ const u32 __iomem *ptr = mem_addr_start;
+ u16 *buf16;
+
+ if (unlikely(!ptr || !buf))
+ goto out;
+
+ /* shortcut for extremely often used cases */
+ switch (size_bytes) {
+ case 2: /* 2 bytes */
+ buf16 = (u16 *) buf;
+ *buf16 = __le16_to_cpu(readw(ptr));
+ goto out;
+ break;
+ case 4: /* 4 bytes */
+ *(buf) = __le32_to_cpu(readl(ptr));
+ goto out;
+ break;
+ }
+
+ while (i < size_bytes) {
+ if (size_bytes - i == 2) {
+ /* Handle 2 bytes in the end */
+ buf16 = (u16 *) buf;
+ *(buf16) = __le16_to_cpu(readw(ptr));
+ i += 2;
+ } else {
+ /* Read 4 bytes */
+ *(buf) = __le32_to_cpu(readl(ptr));
+ i += 4;
+ }
+ buf++;
+ ptr++;
+ }
+out:
+ return;
+}
+
+/*
+ * TODO:
+ * -Optimize
+ * -Rewrite cleaner
+ */
+static u32 write_mem32(void __iomem *mem_addr_start, const u32 *buf,
+ u32 size_bytes)
+{
+ u32 i = 0;
+ u32 __iomem *ptr = mem_addr_start;
+ const u16 *buf16;
+
+ if (unlikely(!ptr || !buf))
+ return 0;
+
+ /* shortcut for extremely often used cases */
+ switch (size_bytes) {
+ case 2: /* 2 bytes */
+ buf16 = (const u16 *)buf;
+ writew(__cpu_to_le16(*buf16), ptr);
+ return 2;
+ break;
+ case 1: /*
+ * also needs to write 4 bytes in this case
+ * so falling through..
+ */
+ case 4: /* 4 bytes */
+ writel(__cpu_to_le32(*buf), ptr);
+ return 4;
+ break;
+ }
+
+ while (i < size_bytes) {
+ if (size_bytes - i == 2) {
+ /* 2 bytes */
+ buf16 = (const u16 *)buf;
+ writew(__cpu_to_le16(*buf16), ptr);
+ i += 2;
+ } else {
+ /* 4 bytes */
+ writel(__cpu_to_le32(*buf), ptr);
+ i += 4;
+ }
+ buf++;
+ ptr++;
+ }
+ return i;
+}
+
+/* Setup pointers to different channels and also setup buffer sizes. */
+static void setup_memory(struct nozomi *dc)
+{
+ void __iomem *offset = dc->base_addr + dc->config_table.dl_start;
+ /* The length reported is including the length field of 4 bytes,
+ * hence subtract with 4.
+ */
+ const u16 buff_offset = 4;
+
+ /* Modem port dl configuration */
+ dc->port[PORT_MDM].dl_addr[CH_A] = offset;
+ dc->port[PORT_MDM].dl_addr[CH_B] =
+ (offset += dc->config_table.dl_mdm_len1);
+ dc->port[PORT_MDM].dl_size[CH_A] =
+ dc->config_table.dl_mdm_len1 - buff_offset;
+ dc->port[PORT_MDM].dl_size[CH_B] =
+ dc->config_table.dl_mdm_len2 - buff_offset;
+
+ /* Diag port dl configuration */
+ dc->port[PORT_DIAG].dl_addr[CH_A] =
+ (offset += dc->config_table.dl_mdm_len2);
+ dc->port[PORT_DIAG].dl_size[CH_A] =
+ dc->config_table.dl_diag_len1 - buff_offset;
+ dc->port[PORT_DIAG].dl_addr[CH_B] =
+ (offset += dc->config_table.dl_diag_len1);
+ dc->port[PORT_DIAG].dl_size[CH_B] =
+ dc->config_table.dl_diag_len2 - buff_offset;
+
+ /* App1 port dl configuration */
+ dc->port[PORT_APP1].dl_addr[CH_A] =
+ (offset += dc->config_table.dl_diag_len2);
+ dc->port[PORT_APP1].dl_size[CH_A] =
+ dc->config_table.dl_app1_len - buff_offset;
+
+ /* App2 port dl configuration */
+ dc->port[PORT_APP2].dl_addr[CH_A] =
+ (offset += dc->config_table.dl_app1_len);
+ dc->port[PORT_APP2].dl_size[CH_A] =
+ dc->config_table.dl_app2_len - buff_offset;
+
+ /* Ctrl dl configuration */
+ dc->port[PORT_CTRL].dl_addr[CH_A] =
+ (offset += dc->config_table.dl_app2_len);
+ dc->port[PORT_CTRL].dl_size[CH_A] =
+ dc->config_table.dl_ctrl_len - buff_offset;
+
+ offset = dc->base_addr + dc->config_table.ul_start;
+
+ /* Modem Port ul configuration */
+ dc->port[PORT_MDM].ul_addr[CH_A] = offset;
+ dc->port[PORT_MDM].ul_size[CH_A] =
+ dc->config_table.ul_mdm_len1 - buff_offset;
+ dc->port[PORT_MDM].ul_addr[CH_B] =
+ (offset += dc->config_table.ul_mdm_len1);
+ dc->port[PORT_MDM].ul_size[CH_B] =
+ dc->config_table.ul_mdm_len2 - buff_offset;
+
+ /* Diag port ul configuration */
+ dc->port[PORT_DIAG].ul_addr[CH_A] =
+ (offset += dc->config_table.ul_mdm_len2);
+ dc->port[PORT_DIAG].ul_size[CH_A] =
+ dc->config_table.ul_diag_len - buff_offset;
+
+ /* App1 port ul configuration */
+ dc->port[PORT_APP1].ul_addr[CH_A] =
+ (offset += dc->config_table.ul_diag_len);
+ dc->port[PORT_APP1].ul_size[CH_A] =
+ dc->config_table.ul_app1_len - buff_offset;
+
+ /* App2 port ul configuration */
+ dc->port[PORT_APP2].ul_addr[CH_A] =
+ (offset += dc->config_table.ul_app1_len);
+ dc->port[PORT_APP2].ul_size[CH_A] =
+ dc->config_table.ul_app2_len - buff_offset;
+
+ /* Ctrl ul configuration */
+ dc->port[PORT_CTRL].ul_addr[CH_A] =
+ (offset += dc->config_table.ul_app2_len);
+ dc->port[PORT_CTRL].ul_size[CH_A] =
+ dc->config_table.ul_ctrl_len - buff_offset;
+}
+
+/* Dump config table under initalization phase */
+#ifdef DEBUG
+static void dump_table(const struct nozomi *dc)
+{
+ DBG3("signature: 0x%08X", dc->config_table.signature);
+ DBG3("version: 0x%04X", dc->config_table.version);
+ DBG3("product_information: 0x%04X", \
+ dc->config_table.product_information);
+ DBG3("toggle enabled: %d", dc->config_table.toggle.enabled);
+ DBG3("toggle up_mdm: %d", dc->config_table.toggle.mdm_ul);
+ DBG3("toggle dl_mdm: %d", dc->config_table.toggle.mdm_dl);
+ DBG3("toggle dl_dbg: %d", dc->config_table.toggle.diag_dl);
+
+ DBG3("dl_start: 0x%04X", dc->config_table.dl_start);
+ DBG3("dl_mdm_len0: 0x%04X, %d", dc->config_table.dl_mdm_len1,
+ dc->config_table.dl_mdm_len1);
+ DBG3("dl_mdm_len1: 0x%04X, %d", dc->config_table.dl_mdm_len2,
+ dc->config_table.dl_mdm_len2);
+ DBG3("dl_diag_len0: 0x%04X, %d", dc->config_table.dl_diag_len1,
+ dc->config_table.dl_diag_len1);
+ DBG3("dl_diag_len1: 0x%04X, %d", dc->config_table.dl_diag_len2,
+ dc->config_table.dl_diag_len2);
+ DBG3("dl_app1_len: 0x%04X, %d", dc->config_table.dl_app1_len,
+ dc->config_table.dl_app1_len);
+ DBG3("dl_app2_len: 0x%04X, %d", dc->config_table.dl_app2_len,
+ dc->config_table.dl_app2_len);
+ DBG3("dl_ctrl_len: 0x%04X, %d", dc->config_table.dl_ctrl_len,
+ dc->config_table.dl_ctrl_len);
+ DBG3("ul_start: 0x%04X, %d", dc->config_table.ul_start,
+ dc->config_table.ul_start);
+ DBG3("ul_mdm_len[0]: 0x%04X, %d", dc->config_table.ul_mdm_len1,
+ dc->config_table.ul_mdm_len1);
+ DBG3("ul_mdm_len[1]: 0x%04X, %d", dc->config_table.ul_mdm_len2,
+ dc->config_table.ul_mdm_len2);
+ DBG3("ul_diag_len: 0x%04X, %d", dc->config_table.ul_diag_len,
+ dc->config_table.ul_diag_len);
+ DBG3("ul_app1_len: 0x%04X, %d", dc->config_table.ul_app1_len,
+ dc->config_table.ul_app1_len);
+ DBG3("ul_app2_len: 0x%04X, %d", dc->config_table.ul_app2_len,
+ dc->config_table.ul_app2_len);
+ DBG3("ul_ctrl_len: 0x%04X, %d", dc->config_table.ul_ctrl_len,
+ dc->config_table.ul_ctrl_len);
+}
+#else
+static inline void dump_table(const struct nozomi *dc) { }
+#endif
+
+/*
+ * Read configuration table from card under intalization phase
+ * Returns 1 if ok, else 0
+ */
+static int nozomi_read_config_table(struct nozomi *dc)
+{
+ read_mem32((u32 *) &dc->config_table, dc->base_addr + 0,
+ sizeof(struct config_table));
+
+ if (dc->config_table.signature != CONFIG_MAGIC) {
+ dev_err(&dc->pdev->dev, "ConfigTable Bad! 0x%08X != 0x%08X\n",
+ dc->config_table.signature, CONFIG_MAGIC);
+ return 0;
+ }
+
+ if ((dc->config_table.version == 0)
+ || (dc->config_table.toggle.enabled == TOGGLE_VALID)) {
+ int i;
+ DBG1("Second phase, configuring card");
+
+ setup_memory(dc);
+
+ dc->port[PORT_MDM].toggle_ul = dc->config_table.toggle.mdm_ul;
+ dc->port[PORT_MDM].toggle_dl = dc->config_table.toggle.mdm_dl;
+ dc->port[PORT_DIAG].toggle_dl = dc->config_table.toggle.diag_dl;
+ DBG1("toggle ports: MDM UL:%d MDM DL:%d, DIAG DL:%d",
+ dc->port[PORT_MDM].toggle_ul,
+ dc->port[PORT_MDM].toggle_dl, dc->port[PORT_DIAG].toggle_dl);
+
+ dump_table(dc);
+
+ for (i = PORT_MDM; i < MAX_PORT; i++) {
+ memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl));
+ memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul));
+ }
+
+ /* Enable control channel */
+ dc->last_ier = dc->last_ier | CTRL_DL;
+ writew(dc->last_ier, dc->reg_ier);
+
+ dc->state = NOZOMI_STATE_ALLOCATED;
+ dev_info(&dc->pdev->dev, "Initialization OK!\n");
+ return 1;
+ }
+
+ if ((dc->config_table.version > 0)
+ && (dc->config_table.toggle.enabled != TOGGLE_VALID)) {
+ u32 offset = 0;
+ DBG1("First phase: pushing upload buffers, clearing download");
+
+ dev_info(&dc->pdev->dev, "Version of card: %d\n",
+ dc->config_table.version);
+
+ /* Here we should disable all I/O over F32. */
+ setup_memory(dc);
+
+ /*
+ * We should send ALL channel pair tokens back along
+ * with reset token
+ */
+
+ /* push upload modem buffers */
+ write_mem32(dc->port[PORT_MDM].ul_addr[CH_A],
+ (u32 *) &offset, 4);
+ write_mem32(dc->port[PORT_MDM].ul_addr[CH_B],
+ (u32 *) &offset, 4);
+
+ writew(MDM_UL | DIAG_DL | MDM_DL, dc->reg_fcr);
+
+ DBG1("First phase done");
+ }
+
+ return 1;
+}
+
+/* Enable uplink interrupts */
+static void enable_transmit_ul(enum port_type port, struct nozomi *dc)
+{
+ static const u16 mask[] = {MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL};
+
+ if (port < NOZOMI_MAX_PORTS) {
+ dc->last_ier |= mask[port];
+ writew(dc->last_ier, dc->reg_ier);
+ } else {
+ dev_err(&dc->pdev->dev, "Called with wrong port?\n");
+ }
+}
+
+/* Disable uplink interrupts */
+static void disable_transmit_ul(enum port_type port, struct nozomi *dc)
+{
+ static const u16 mask[] =
+ {~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL};
+
+ if (port < NOZOMI_MAX_PORTS) {
+ dc->last_ier &= mask[port];
+ writew(dc->last_ier, dc->reg_ier);
+ } else {
+ dev_err(&dc->pdev->dev, "Called with wrong port?\n");
+ }
+}
+
+/* Enable downlink interrupts */
+static void enable_transmit_dl(enum port_type port, struct nozomi *dc)
+{
+ static const u16 mask[] = {MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL};
+
+ if (port < NOZOMI_MAX_PORTS) {
+ dc->last_ier |= mask[port];
+ writew(dc->last_ier, dc->reg_ier);
+ } else {
+ dev_err(&dc->pdev->dev, "Called with wrong port?\n");
+ }
+}
+
+/* Disable downlink interrupts */
+static void disable_transmit_dl(enum port_type port, struct nozomi *dc)
+{
+ static const u16 mask[] =
+ {~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL};
+
+ if (port < NOZOMI_MAX_PORTS) {
+ dc->last_ier &= mask[port];
+ writew(dc->last_ier, dc->reg_ier);
+ } else {
+ dev_err(&dc->pdev->dev, "Called with wrong port?\n");
+ }
+}
+
+/*
+ * Return 1 - send buffer to card and ack.
+ * Return 0 - don't ack, don't send buffer to card.
+ */
+static int send_data(enum port_type index, struct nozomi *dc)
+{
+ u32 size = 0;
+ struct port *port = &dc->port[index];
+ const u8 toggle = port->toggle_ul;
+ void __iomem *addr = port->ul_addr[toggle];
+ const u32 ul_size = port->ul_size[toggle];
+ struct tty_struct *tty = tty_port_tty_get(&port->port);
+
+ /* Get data from tty and place in buf for now */
+ size = kfifo_out(&port->fifo_ul, dc->send_buf,
+ ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX);
+
+ if (size == 0) {
+ DBG4("No more data to send, disable link:");
+ tty_kref_put(tty);
+ return 0;
+ }
+
+ /* DUMP(buf, size); */
+
+ /* Write length + data */
+ write_mem32(addr, (u32 *) &size, 4);
+ write_mem32(addr + 4, (u32 *) dc->send_buf, size);
+
+ if (tty)
+ tty_wakeup(tty);
+
+ tty_kref_put(tty);
+ return 1;
+}
+
+/* If all data has been read, return 1, else 0 */
+static int receive_data(enum port_type index, struct nozomi *dc)
+{
+ u8 buf[RECEIVE_BUF_MAX] = { 0 };
+ int size;
+ u32 offset = 4;
+ struct port *port = &dc->port[index];
+ void __iomem *addr = port->dl_addr[port->toggle_dl];
+ struct tty_struct *tty = tty_port_tty_get(&port->port);
+ int i, ret;
+
+ if (unlikely(!tty)) {
+ DBG1("tty not open for port: %d?", index);
+ return 1;
+ }
+
+ read_mem32((u32 *) &size, addr, 4);
+ /* DBG1( "%d bytes port: %d", size, index); */
+
+ if (test_bit(TTY_THROTTLED, &tty->flags)) {
+ DBG1("No room in tty, don't read data, don't ack interrupt, "
+ "disable interrupt");
+
+ /* disable interrupt in downlink... */
+ disable_transmit_dl(index, dc);
+ ret = 0;
+ goto put;
+ }
+
+ if (unlikely(size == 0)) {
+ dev_err(&dc->pdev->dev, "size == 0?\n");
+ ret = 1;
+ goto put;
+ }
+
+ while (size > 0) {
+ read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX);
+
+ if (size == 1) {
+ tty_insert_flip_char(tty, buf[0], TTY_NORMAL);
+ size = 0;
+ } else if (size < RECEIVE_BUF_MAX) {
+ size -= tty_insert_flip_string(tty, (char *) buf, size);
+ } else {
+ i = tty_insert_flip_string(tty, \
+ (char *) buf, RECEIVE_BUF_MAX);
+ size -= i;
+ offset += i;
+ }
+ }
+
+ set_bit(index, &dc->flip);
+ ret = 1;
+put:
+ tty_kref_put(tty);
+ return ret;
+}
+
+/* Debug for interrupts */
+#ifdef DEBUG
+static char *interrupt2str(u16 interrupt)
+{
+ static char buf[TMP_BUF_MAX];
+ char *p = buf;
+
+ interrupt & MDM_DL1 ? p += snprintf(p, TMP_BUF_MAX, "MDM_DL1 ") : NULL;
+ interrupt & MDM_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "MDM_DL2 ") : NULL;
+
+ interrupt & MDM_UL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "MDM_UL1 ") : NULL;
+ interrupt & MDM_UL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "MDM_UL2 ") : NULL;
+
+ interrupt & DIAG_DL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "DIAG_DL1 ") : NULL;
+ interrupt & DIAG_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "DIAG_DL2 ") : NULL;
+
+ interrupt & DIAG_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "DIAG_UL ") : NULL;
+
+ interrupt & APP1_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "APP1_DL ") : NULL;
+ interrupt & APP2_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "APP2_DL ") : NULL;
+
+ interrupt & APP1_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "APP1_UL ") : NULL;
+ interrupt & APP2_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "APP2_UL ") : NULL;
+
+ interrupt & CTRL_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "CTRL_DL ") : NULL;
+ interrupt & CTRL_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "CTRL_UL ") : NULL;
+
+ interrupt & RESET ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "RESET ") : NULL;
+
+ return buf;
+}
+#endif
+
+/*
+ * Receive flow control
+ * Return 1 - If ok, else 0
+ */
+static int receive_flow_control(struct nozomi *dc)
+{
+ enum port_type port = PORT_MDM;
+ struct ctrl_dl ctrl_dl;
+ struct ctrl_dl old_ctrl;
+ u16 enable_ier = 0;
+
+ read_mem32((u32 *) &ctrl_dl, dc->port[PORT_CTRL].dl_addr[CH_A], 2);
+
+ switch (ctrl_dl.port) {
+ case CTRL_CMD:
+ DBG1("The Base Band sends this value as a response to a "
+ "request for IMSI detach sent over the control "
+ "channel uplink (see section 7.6.1).");
+ break;
+ case CTRL_MDM:
+ port = PORT_MDM;
+ enable_ier = MDM_DL;
+ break;
+ case CTRL_DIAG:
+ port = PORT_DIAG;
+ enable_ier = DIAG_DL;
+ break;
+ case CTRL_APP1:
+ port = PORT_APP1;
+ enable_ier = APP1_DL;
+ break;
+ case CTRL_APP2:
+ port = PORT_APP2;
+ enable_ier = APP2_DL;
+ if (dc->state == NOZOMI_STATE_ALLOCATED) {
+ /*
+ * After card initialization the flow control
+ * received for APP2 is always the last
+ */
+ dc->state = NOZOMI_STATE_READY;
+ dev_info(&dc->pdev->dev, "Device READY!\n");
+ }
+ break;
+ default:
+ dev_err(&dc->pdev->dev,
+ "ERROR: flow control received for non-existing port\n");
+ return 0;
+ };
+
+ DBG1("0x%04X->0x%04X", *((u16 *)&dc->port[port].ctrl_dl),
+ *((u16 *)&ctrl_dl));
+
+ old_ctrl = dc->port[port].ctrl_dl;
+ dc->port[port].ctrl_dl = ctrl_dl;
+
+ if (old_ctrl.CTS == 1 && ctrl_dl.CTS == 0) {
+ DBG1("Disable interrupt (0x%04X) on port: %d",
+ enable_ier, port);
+ disable_transmit_ul(port, dc);
+
+ } else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) {
+
+ if (kfifo_len(&dc->port[port].fifo_ul)) {
+ DBG1("Enable interrupt (0x%04X) on port: %d",
+ enable_ier, port);
+ DBG1("Data in buffer [%d], enable transmit! ",
+ kfifo_len(&dc->port[port].fifo_ul));
+ enable_transmit_ul(port, dc);
+ } else {
+ DBG1("No data in buffer...");
+ }
+ }
+
+ if (*(u16 *)&old_ctrl == *(u16 *)&ctrl_dl) {
+ DBG1(" No change in mctrl");
+ return 1;
+ }
+ /* Update statistics */
+ if (old_ctrl.CTS != ctrl_dl.CTS)
+ dc->port[port].tty_icount.cts++;
+ if (old_ctrl.DSR != ctrl_dl.DSR)
+ dc->port[port].tty_icount.dsr++;
+ if (old_ctrl.RI != ctrl_dl.RI)
+ dc->port[port].tty_icount.rng++;
+ if (old_ctrl.DCD != ctrl_dl.DCD)
+ dc->port[port].tty_icount.dcd++;
+
+ wake_up_interruptible(&dc->port[port].tty_wait);
+
+ DBG1("port: %d DCD(%d), CTS(%d), RI(%d), DSR(%d)",
+ port,
+ dc->port[port].tty_icount.dcd, dc->port[port].tty_icount.cts,
+ dc->port[port].tty_icount.rng, dc->port[port].tty_icount.dsr);
+
+ return 1;
+}
+
+static enum ctrl_port_type port2ctrl(enum port_type port,
+ const struct nozomi *dc)
+{
+ switch (port) {
+ case PORT_MDM:
+ return CTRL_MDM;
+ case PORT_DIAG:
+ return CTRL_DIAG;
+ case PORT_APP1:
+ return CTRL_APP1;
+ case PORT_APP2:
+ return CTRL_APP2;
+ default:
+ dev_err(&dc->pdev->dev,
+ "ERROR: send flow control " \
+ "received for non-existing port\n");
+ };
+ return CTRL_ERROR;
+}
+
+/*
+ * Send flow control, can only update one channel at a time
+ * Return 0 - If we have updated all flow control
+ * Return 1 - If we need to update more flow control, ack current enable more
+ */
+static int send_flow_control(struct nozomi *dc)
+{
+ u32 i, more_flow_control_to_be_updated = 0;
+ u16 *ctrl;
+
+ for (i = PORT_MDM; i < MAX_PORT; i++) {
+ if (dc->port[i].update_flow_control) {
+ if (more_flow_control_to_be_updated) {
+ /* We have more flow control to be updated */
+ return 1;
+ }
+ dc->port[i].ctrl_ul.port = port2ctrl(i, dc);
+ ctrl = (u16 *)&dc->port[i].ctrl_ul;
+ write_mem32(dc->port[PORT_CTRL].ul_addr[0], \
+ (u32 *) ctrl, 2);
+ dc->port[i].update_flow_control = 0;
+ more_flow_control_to_be_updated = 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Handle downlink data, ports that are handled are modem and diagnostics
+ * Return 1 - ok
+ * Return 0 - toggle fields are out of sync
+ */
+static int handle_data_dl(struct nozomi *dc, enum port_type port, u8 *toggle,
+ u16 read_iir, u16 mask1, u16 mask2)
+{
+ if (*toggle == 0 && read_iir & mask1) {
+ if (receive_data(port, dc)) {
+ writew(mask1, dc->reg_fcr);
+ *toggle = !(*toggle);
+ }
+
+ if (read_iir & mask2) {
+ if (receive_data(port, dc)) {
+ writew(mask2, dc->reg_fcr);
+ *toggle = !(*toggle);
+ }
+ }
+ } else if (*toggle == 1 && read_iir & mask2) {
+ if (receive_data(port, dc)) {
+ writew(mask2, dc->reg_fcr);
+ *toggle = !(*toggle);
+ }
+
+ if (read_iir & mask1) {
+ if (receive_data(port, dc)) {
+ writew(mask1, dc->reg_fcr);
+ *toggle = !(*toggle);
+ }
+ }
+ } else {
+ dev_err(&dc->pdev->dev, "port out of sync!, toggle:%d\n",
+ *toggle);
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Handle uplink data, this is currently for the modem port
+ * Return 1 - ok
+ * Return 0 - toggle field are out of sync
+ */
+static int handle_data_ul(struct nozomi *dc, enum port_type port, u16 read_iir)
+{
+ u8 *toggle = &(dc->port[port].toggle_ul);
+
+ if (*toggle == 0 && read_iir & MDM_UL1) {
+ dc->last_ier &= ~MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(port, dc)) {
+ writew(MDM_UL1, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ *toggle = !*toggle;
+ }
+
+ if (read_iir & MDM_UL2) {
+ dc->last_ier &= ~MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(port, dc)) {
+ writew(MDM_UL2, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ *toggle = !*toggle;
+ }
+ }
+
+ } else if (*toggle == 1 && read_iir & MDM_UL2) {
+ dc->last_ier &= ~MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(port, dc)) {
+ writew(MDM_UL2, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ *toggle = !*toggle;
+ }
+
+ if (read_iir & MDM_UL1) {
+ dc->last_ier &= ~MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(port, dc)) {
+ writew(MDM_UL1, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ *toggle = !*toggle;
+ }
+ }
+ } else {
+ writew(read_iir & MDM_UL, dc->reg_fcr);
+ dev_err(&dc->pdev->dev, "port out of sync!\n");
+ return 0;
+ }
+ return 1;
+}
+
+static irqreturn_t interrupt_handler(int irq, void *dev_id)
+{
+ struct nozomi *dc = dev_id;
+ unsigned int a;
+ u16 read_iir;
+
+ if (!dc)
+ return IRQ_NONE;
+
+ spin_lock(&dc->spin_mutex);
+ read_iir = readw(dc->reg_iir);
+
+ /* Card removed */
+ if (read_iir == (u16)-1)
+ goto none;
+ /*
+ * Just handle interrupt enabled in IER
+ * (by masking with dc->last_ier)
+ */
+ read_iir &= dc->last_ier;
+
+ if (read_iir == 0)
+ goto none;
+
+
+ DBG4("%s irq:0x%04X, prev:0x%04X", interrupt2str(read_iir), read_iir,
+ dc->last_ier);
+
+ if (read_iir & RESET) {
+ if (unlikely(!nozomi_read_config_table(dc))) {
+ dc->last_ier = 0x0;
+ writew(dc->last_ier, dc->reg_ier);
+ dev_err(&dc->pdev->dev, "Could not read status from "
+ "card, we should disable interface\n");
+ } else {
+ writew(RESET, dc->reg_fcr);
+ }
+ /* No more useful info if this was the reset interrupt. */
+ goto exit_handler;
+ }
+ if (read_iir & CTRL_UL) {
+ DBG1("CTRL_UL");
+ dc->last_ier &= ~CTRL_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_flow_control(dc)) {
+ writew(CTRL_UL, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | CTRL_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ }
+ }
+ if (read_iir & CTRL_DL) {
+ receive_flow_control(dc);
+ writew(CTRL_DL, dc->reg_fcr);
+ }
+ if (read_iir & MDM_DL) {
+ if (!handle_data_dl(dc, PORT_MDM,
+ &(dc->port[PORT_MDM].toggle_dl), read_iir,
+ MDM_DL1, MDM_DL2)) {
+ dev_err(&dc->pdev->dev, "MDM_DL out of sync!\n");
+ goto exit_handler;
+ }
+ }
+ if (read_iir & MDM_UL) {
+ if (!handle_data_ul(dc, PORT_MDM, read_iir)) {
+ dev_err(&dc->pdev->dev, "MDM_UL out of sync!\n");
+ goto exit_handler;
+ }
+ }
+ if (read_iir & DIAG_DL) {
+ if (!handle_data_dl(dc, PORT_DIAG,
+ &(dc->port[PORT_DIAG].toggle_dl), read_iir,
+ DIAG_DL1, DIAG_DL2)) {
+ dev_err(&dc->pdev->dev, "DIAG_DL out of sync!\n");
+ goto exit_handler;
+ }
+ }
+ if (read_iir & DIAG_UL) {
+ dc->last_ier &= ~DIAG_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(PORT_DIAG, dc)) {
+ writew(DIAG_UL, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | DIAG_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ }
+ }
+ if (read_iir & APP1_DL) {
+ if (receive_data(PORT_APP1, dc))
+ writew(APP1_DL, dc->reg_fcr);
+ }
+ if (read_iir & APP1_UL) {
+ dc->last_ier &= ~APP1_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(PORT_APP1, dc)) {
+ writew(APP1_UL, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | APP1_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ }
+ }
+ if (read_iir & APP2_DL) {
+ if (receive_data(PORT_APP2, dc))
+ writew(APP2_DL, dc->reg_fcr);
+ }
+ if (read_iir & APP2_UL) {
+ dc->last_ier &= ~APP2_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(PORT_APP2, dc)) {
+ writew(APP2_UL, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | APP2_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ }
+ }
+
+exit_handler:
+ spin_unlock(&dc->spin_mutex);
+ for (a = 0; a < NOZOMI_MAX_PORTS; a++) {
+ struct tty_struct *tty;
+ if (test_and_clear_bit(a, &dc->flip)) {
+ tty = tty_port_tty_get(&dc->port[a].port);
+ if (tty)
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ }
+ }
+ return IRQ_HANDLED;
+none:
+ spin_unlock(&dc->spin_mutex);
+ return IRQ_NONE;
+}
+
+static void nozomi_get_card_type(struct nozomi *dc)
+{
+ int i;
+ u32 size = 0;
+
+ for (i = 0; i < 6; i++)
+ size += pci_resource_len(dc->pdev, i);
+
+ /* Assume card type F32_8 if no match */
+ dc->card_type = size == 2048 ? F32_2 : F32_8;
+
+ dev_info(&dc->pdev->dev, "Card type is: %d\n", dc->card_type);
+}
+
+static void nozomi_setup_private_data(struct nozomi *dc)
+{
+ void __iomem *offset = dc->base_addr + dc->card_type / 2;
+ unsigned int i;
+
+ dc->reg_fcr = (void __iomem *)(offset + R_FCR);
+ dc->reg_iir = (void __iomem *)(offset + R_IIR);
+ dc->reg_ier = (void __iomem *)(offset + R_IER);
+ dc->last_ier = 0;
+ dc->flip = 0;
+
+ dc->port[PORT_MDM].token_dl = MDM_DL;
+ dc->port[PORT_DIAG].token_dl = DIAG_DL;
+ dc->port[PORT_APP1].token_dl = APP1_DL;
+ dc->port[PORT_APP2].token_dl = APP2_DL;
+
+ for (i = 0; i < MAX_PORT; i++)
+ init_waitqueue_head(&dc->port[i].tty_wait);
+}
+
+static ssize_t card_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%d\n", dc->card_type);
+}
+static DEVICE_ATTR(card_type, S_IRUGO, card_type_show, NULL);
+
+static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%u\n", dc->open_ttys);
+}
+static DEVICE_ATTR(open_ttys, S_IRUGO, open_ttys_show, NULL);
+
+static void make_sysfs_files(struct nozomi *dc)
+{
+ if (device_create_file(&dc->pdev->dev, &dev_attr_card_type))
+ dev_err(&dc->pdev->dev,
+ "Could not create sysfs file for card_type\n");
+ if (device_create_file(&dc->pdev->dev, &dev_attr_open_ttys))
+ dev_err(&dc->pdev->dev,
+ "Could not create sysfs file for open_ttys\n");
+}
+
+static void remove_sysfs_files(struct nozomi *dc)
+{
+ device_remove_file(&dc->pdev->dev, &dev_attr_card_type);
+ device_remove_file(&dc->pdev->dev, &dev_attr_open_ttys);
+}
+
+/* Allocate memory for one device */
+static int __devinit nozomi_card_init(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ resource_size_t start;
+ int ret;
+ struct nozomi *dc = NULL;
+ int ndev_idx;
+ int i;
+
+ dev_dbg(&pdev->dev, "Init, new card found\n");
+
+ for (ndev_idx = 0; ndev_idx < ARRAY_SIZE(ndevs); ndev_idx++)
+ if (!ndevs[ndev_idx])
+ break;
+
+ if (ndev_idx >= ARRAY_SIZE(ndevs)) {
+ dev_err(&pdev->dev, "no free tty range for this card left\n");
+ ret = -EIO;
+ goto err;
+ }
+
+ dc = kzalloc(sizeof(struct nozomi), GFP_KERNEL);
+ if (unlikely(!dc)) {
+ dev_err(&pdev->dev, "Could not allocate memory\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ dc->pdev = pdev;
+
+ ret = pci_enable_device(dc->pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable PCI Device\n");
+ goto err_free;
+ }
+
+ ret = pci_request_regions(dc->pdev, NOZOMI_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "I/O address 0x%04x already in use\n",
+ (int) /* nozomi_private.io_addr */ 0);
+ goto err_disable_device;
+ }
+
+ start = pci_resource_start(dc->pdev, 0);
+ if (start == 0) {
+ dev_err(&pdev->dev, "No I/O address for card detected\n");
+ ret = -ENODEV;
+ goto err_rel_regs;
+ }
+
+ /* Find out what card type it is */
+ nozomi_get_card_type(dc);
+
+ dc->base_addr = ioremap_nocache(start, dc->card_type);
+ if (!dc->base_addr) {
+ dev_err(&pdev->dev, "Unable to map card MMIO\n");
+ ret = -ENODEV;
+ goto err_rel_regs;
+ }
+
+ dc->send_buf = kmalloc(SEND_BUF_MAX, GFP_KERNEL);
+ if (!dc->send_buf) {
+ dev_err(&pdev->dev, "Could not allocate send buffer?\n");
+ ret = -ENOMEM;
+ goto err_free_sbuf;
+ }
+
+ for (i = PORT_MDM; i < MAX_PORT; i++) {
+ if (kfifo_alloc(&dc->port[i].fifo_ul, FIFO_BUFFER_SIZE_UL,
+ GFP_KERNEL)) {
+ dev_err(&pdev->dev,
+ "Could not allocate kfifo buffer\n");
+ ret = -ENOMEM;
+ goto err_free_kfifo;
+ }
+ }
+
+ spin_lock_init(&dc->spin_mutex);
+
+ nozomi_setup_private_data(dc);
+
+ /* Disable all interrupts */
+ dc->last_ier = 0;
+ writew(dc->last_ier, dc->reg_ier);
+
+ ret = request_irq(pdev->irq, &interrupt_handler, IRQF_SHARED,
+ NOZOMI_NAME, dc);
+ if (unlikely(ret)) {
+ dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
+ goto err_free_kfifo;
+ }
+
+ DBG1("base_addr: %p", dc->base_addr);
+
+ make_sysfs_files(dc);
+
+ dc->index_start = ndev_idx * MAX_PORT;
+ ndevs[ndev_idx] = dc;
+
+ pci_set_drvdata(pdev, dc);
+
+ /* Enable RESET interrupt */
+ dc->last_ier = RESET;
+ iowrite16(dc->last_ier, dc->reg_ier);
+
+ dc->state = NOZOMI_STATE_ENABLED;
+
+ for (i = 0; i < MAX_PORT; i++) {
+ struct device *tty_dev;
+ struct port *port = &dc->port[i];
+ port->dc = dc;
+ tty_port_init(&port->port);
+ port->port.ops = &noz_tty_port_ops;
+ tty_dev = tty_register_device(ntty_driver, dc->index_start + i,
+ &pdev->dev);
+
+ if (IS_ERR(tty_dev)) {
+ ret = PTR_ERR(tty_dev);
+ dev_err(&pdev->dev, "Could not allocate tty?\n");
+ goto err_free_tty;
+ }
+ }
+
+ return 0;
+
+err_free_tty:
+ for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i)
+ tty_unregister_device(ntty_driver, i);
+err_free_kfifo:
+ for (i = 0; i < MAX_PORT; i++)
+ kfifo_free(&dc->port[i].fifo_ul);
+err_free_sbuf:
+ kfree(dc->send_buf);
+ iounmap(dc->base_addr);
+err_rel_regs:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_free:
+ kfree(dc);
+err:
+ return ret;
+}
+
+static void __devexit tty_exit(struct nozomi *dc)
+{
+ unsigned int i;
+
+ DBG1(" ");
+
+ for (i = 0; i < MAX_PORT; ++i) {
+ struct tty_struct *tty = tty_port_tty_get(&dc->port[i].port);
+ if (tty && list_empty(&tty->hangup_work.entry))
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ /* Racy below - surely should wait for scheduled work to be done or
+ complete off a hangup method ? */
+ while (dc->open_ttys)
+ msleep(1);
+ for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i)
+ tty_unregister_device(ntty_driver, i);
+}
+
+/* Deallocate memory for one device */
+static void __devexit nozomi_card_exit(struct pci_dev *pdev)
+{
+ int i;
+ struct ctrl_ul ctrl;
+ struct nozomi *dc = pci_get_drvdata(pdev);
+
+ /* Disable all interrupts */
+ dc->last_ier = 0;
+ writew(dc->last_ier, dc->reg_ier);
+
+ tty_exit(dc);
+
+ /* Send 0x0001, command card to resend the reset token. */
+ /* This is to get the reset when the module is reloaded. */
+ ctrl.port = 0x00;
+ ctrl.reserved = 0;
+ ctrl.RTS = 0;
+ ctrl.DTR = 1;
+ DBG1("sending flow control 0x%04X", *((u16 *)&ctrl));
+
+ /* Setup dc->reg addresses to we can use defines here */
+ write_mem32(dc->port[PORT_CTRL].ul_addr[0], (u32 *)&ctrl, 2);
+ writew(CTRL_UL, dc->reg_fcr); /* push the token to the card. */
+
+ remove_sysfs_files(dc);
+
+ free_irq(pdev->irq, dc);
+
+ for (i = 0; i < MAX_PORT; i++)
+ kfifo_free(&dc->port[i].fifo_ul);
+
+ kfree(dc->send_buf);
+
+ iounmap(dc->base_addr);
+
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+
+ ndevs[dc->index_start / MAX_PORT] = NULL;
+
+ kfree(dc);
+}
+
+static void set_rts(const struct tty_struct *tty, int rts)
+{
+ struct port *port = get_port_by_tty(tty);
+
+ port->ctrl_ul.RTS = rts;
+ port->update_flow_control = 1;
+ enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty));
+}
+
+static void set_dtr(const struct tty_struct *tty, int dtr)
+{
+ struct port *port = get_port_by_tty(tty);
+
+ DBG1("SETTING DTR index: %d, dtr: %d", tty->index, dtr);
+
+ port->ctrl_ul.DTR = dtr;
+ port->update_flow_control = 1;
+ enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty));
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * TTY code
+ * ----------------------------------------------------------------------------
+ */
+
+static int ntty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct port *port = get_port_by_tty(tty);
+ struct nozomi *dc = get_dc_by_tty(tty);
+ int ret;
+ if (!port || !dc || dc->state != NOZOMI_STATE_READY)
+ return -ENODEV;
+ ret = tty_init_termios(tty);
+ if (ret == 0) {
+ tty_driver_kref_get(driver);
+ tty->count++;
+ tty->driver_data = port;
+ driver->ttys[tty->index] = tty;
+ }
+ return ret;
+}
+
+static void ntty_cleanup(struct tty_struct *tty)
+{
+ tty->driver_data = NULL;
+}
+
+static int ntty_activate(struct tty_port *tport, struct tty_struct *tty)
+{
+ struct port *port = container_of(tport, struct port, port);
+ struct nozomi *dc = port->dc;
+ unsigned long flags;
+
+ DBG1("open: %d", port->token_dl);
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ dc->last_ier = dc->last_ier | port->token_dl;
+ writew(dc->last_ier, dc->reg_ier);
+ dc->open_ttys++;
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+ printk("noz: activated %d: %p\n", tty->index, tport);
+ return 0;
+}
+
+static int ntty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct port *port = tty->driver_data;
+ return tty_port_open(&port->port, tty, filp);
+}
+
+static void ntty_shutdown(struct tty_port *tport)
+{
+ struct port *port = container_of(tport, struct port, port);
+ struct nozomi *dc = port->dc;
+ unsigned long flags;
+
+ DBG1("close: %d", port->token_dl);
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ dc->last_ier &= ~(port->token_dl);
+ writew(dc->last_ier, dc->reg_ier);
+ dc->open_ttys--;
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+ printk("noz: shutdown %p\n", tport);
+}
+
+static void ntty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct port *port = tty->driver_data;
+ if (port)
+ tty_port_close(&port->port, tty, filp);
+}
+
+static void ntty_hangup(struct tty_struct *tty)
+{
+ struct port *port = tty->driver_data;
+ tty_port_hangup(&port->port);
+}
+
+/*
+ * called when the userspace process writes to the tty (/dev/noz*).
+ * Data is inserted into a fifo, which is then read and transferred to the modem.
+ */
+static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
+ int count)
+{
+ int rval = -EINVAL;
+ struct nozomi *dc = get_dc_by_tty(tty);
+ struct port *port = tty->driver_data;
+ unsigned long flags;
+
+ /* DBG1( "WRITEx: %d, index = %d", count, index); */
+
+ if (!dc || !port)
+ return -ENODEV;
+
+ rval = kfifo_in(&port->fifo_ul, (unsigned char *)buffer, count);
+
+ /* notify card */
+ if (unlikely(dc == NULL)) {
+ DBG1("No device context?");
+ goto exit;
+ }
+
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ /* CTS is only valid on the modem channel */
+ if (port == &(dc->port[PORT_MDM])) {
+ if (port->ctrl_dl.CTS) {
+ DBG4("Enable interrupt");
+ enable_transmit_ul(tty->index % MAX_PORT, dc);
+ } else {
+ dev_err(&dc->pdev->dev,
+ "CTS not active on modem port?\n");
+ }
+ } else {
+ enable_transmit_ul(tty->index % MAX_PORT, dc);
+ }
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+
+exit:
+ return rval;
+}
+
+/*
+ * Calculate how much is left in device
+ * This method is called by the upper tty layer.
+ * #according to sources N_TTY.c it expects a value >= 0 and
+ * does not check for negative values.
+ *
+ * If the port is unplugged report lots of room and let the bits
+ * dribble away so we don't block anything.
+ */
+static int ntty_write_room(struct tty_struct *tty)
+{
+ struct port *port = tty->driver_data;
+ int room = 4096;
+ const struct nozomi *dc = get_dc_by_tty(tty);
+
+ if (dc)
+ room = kfifo_avail(&port->fifo_ul);
+
+ return room;
+}
+
+/* Gets io control parameters */
+static int ntty_tiocmget(struct tty_struct *tty)
+{
+ const struct port *port = tty->driver_data;
+ const struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
+ const struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
+
+ /* Note: these could change under us but it is not clear this
+ matters if so */
+ return (ctrl_ul->RTS ? TIOCM_RTS : 0) |
+ (ctrl_ul->DTR ? TIOCM_DTR : 0) |
+ (ctrl_dl->DCD ? TIOCM_CAR : 0) |
+ (ctrl_dl->RI ? TIOCM_RNG : 0) |
+ (ctrl_dl->DSR ? TIOCM_DSR : 0) |
+ (ctrl_dl->CTS ? TIOCM_CTS : 0);
+}
+
+/* Sets io controls parameters */
+static int ntty_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct nozomi *dc = get_dc_by_tty(tty);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ if (set & TIOCM_RTS)
+ set_rts(tty, 1);
+ else if (clear & TIOCM_RTS)
+ set_rts(tty, 0);
+
+ if (set & TIOCM_DTR)
+ set_dtr(tty, 1);
+ else if (clear & TIOCM_DTR)
+ set_dtr(tty, 0);
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+
+ return 0;
+}
+
+static int ntty_cflags_changed(struct port *port, unsigned long flags,
+ struct async_icount *cprev)
+{
+ const struct async_icount cnow = port->tty_icount;
+ int ret;
+
+ ret = ((flags & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
+ ((flags & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
+ ((flags & TIOCM_CD) && (cnow.dcd != cprev->dcd)) ||
+ ((flags & TIOCM_CTS) && (cnow.cts != cprev->cts));
+
+ *cprev = cnow;
+
+ return ret;
+}
+
+static int ntty_tiocgicount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+{
+ struct port *port = tty->driver_data;
+ const struct async_icount cnow = port->tty_icount;
+
+ icount->cts = cnow.cts;
+ icount->dsr = cnow.dsr;
+ icount->rng = cnow.rng;
+ icount->dcd = cnow.dcd;
+ icount->rx = cnow.rx;
+ icount->tx = cnow.tx;
+ icount->frame = cnow.frame;
+ icount->overrun = cnow.overrun;
+ icount->parity = cnow.parity;
+ icount->brk = cnow.brk;
+ icount->buf_overrun = cnow.buf_overrun;
+ return 0;
+}
+
+static int ntty_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct port *port = tty->driver_data;
+ int rval = -ENOIOCTLCMD;
+
+ DBG1("******** IOCTL, cmd: %d", cmd);
+
+ switch (cmd) {
+ case TIOCMIWAIT: {
+ struct async_icount cprev = port->tty_icount;
+
+ rval = wait_event_interruptible(port->tty_wait,
+ ntty_cflags_changed(port, arg, &cprev));
+ break;
+ }
+ default:
+ DBG1("ERR: 0x%08X, %d", cmd, cmd);
+ break;
+ };
+
+ return rval;
+}
+
+/*
+ * Called by the upper tty layer when tty buffers are ready
+ * to receive data again after a call to throttle.
+ */
+static void ntty_unthrottle(struct tty_struct *tty)
+{
+ struct nozomi *dc = get_dc_by_tty(tty);
+ unsigned long flags;
+
+ DBG1("UNTHROTTLE");
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ enable_transmit_dl(tty->index % MAX_PORT, dc);
+ set_rts(tty, 1);
+
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+}
+
+/*
+ * Called by the upper tty layer when the tty buffers are almost full.
+ * The driver should stop send more data.
+ */
+static void ntty_throttle(struct tty_struct *tty)
+{
+ struct nozomi *dc = get_dc_by_tty(tty);
+ unsigned long flags;
+
+ DBG1("THROTTLE");
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ set_rts(tty, 0);
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+}
+
+/* Returns number of chars in buffer, called by tty layer */
+static s32 ntty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct port *port = tty->driver_data;
+ struct nozomi *dc = get_dc_by_tty(tty);
+ s32 rval = 0;
+
+ if (unlikely(!dc || !port)) {
+ goto exit_in_buffer;
+ }
+
+ rval = kfifo_len(&port->fifo_ul);
+
+exit_in_buffer:
+ return rval;
+}
+
+static const struct tty_port_operations noz_tty_port_ops = {
+ .activate = ntty_activate,
+ .shutdown = ntty_shutdown,
+};
+
+static const struct tty_operations tty_ops = {
+ .ioctl = ntty_ioctl,
+ .open = ntty_open,
+ .close = ntty_close,
+ .hangup = ntty_hangup,
+ .write = ntty_write,
+ .write_room = ntty_write_room,
+ .unthrottle = ntty_unthrottle,
+ .throttle = ntty_throttle,
+ .chars_in_buffer = ntty_chars_in_buffer,
+ .tiocmget = ntty_tiocmget,
+ .tiocmset = ntty_tiocmset,
+ .get_icount = ntty_tiocgicount,
+ .install = ntty_install,
+ .cleanup = ntty_cleanup,
+};
+
+/* Module initialization */
+static struct pci_driver nozomi_driver = {
+ .name = NOZOMI_NAME,
+ .id_table = nozomi_pci_tbl,
+ .probe = nozomi_card_init,
+ .remove = __devexit_p(nozomi_card_exit),
+};
+
+static __init int nozomi_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Initializing %s\n", VERSION_STRING);
+
+ ntty_driver = alloc_tty_driver(NTTY_TTY_MAXMINORS);
+ if (!ntty_driver)
+ return -ENOMEM;
+
+ ntty_driver->owner = THIS_MODULE;
+ ntty_driver->driver_name = NOZOMI_NAME_TTY;
+ ntty_driver->name = "noz";
+ ntty_driver->major = 0;
+ ntty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ ntty_driver->subtype = SERIAL_TYPE_NORMAL;
+ ntty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ ntty_driver->init_termios = tty_std_termios;
+ ntty_driver->init_termios.c_cflag = B115200 | CS8 | CREAD | \
+ HUPCL | CLOCAL;
+ ntty_driver->init_termios.c_ispeed = 115200;
+ ntty_driver->init_termios.c_ospeed = 115200;
+ tty_set_operations(ntty_driver, &tty_ops);
+
+ ret = tty_register_driver(ntty_driver);
+ if (ret) {
+ printk(KERN_ERR "Nozomi: failed to register ntty driver\n");
+ goto free_tty;
+ }
+
+ ret = pci_register_driver(&nozomi_driver);
+ if (ret) {
+ printk(KERN_ERR "Nozomi: can't register pci driver\n");
+ goto unr_tty;
+ }
+
+ return 0;
+unr_tty:
+ tty_unregister_driver(ntty_driver);
+free_tty:
+ put_tty_driver(ntty_driver);
+ return ret;
+}
+
+static __exit void nozomi_exit(void)
+{
+ printk(KERN_INFO "Unloading %s\n", DRIVER_DESC);
+ pci_unregister_driver(&nozomi_driver);
+ tty_unregister_driver(ntty_driver);
+ put_tty_driver(ntty_driver);
+}
+
+module_init(nozomi_init);
+module_exit(nozomi_exit);
+
+module_param(debug, int, S_IRUGO | S_IWUSR);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
new file mode 100644
index 00000000..e18604b3
--- /dev/null
+++ b/drivers/tty/pty.c
@@ -0,0 +1,796 @@
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Added support for a Unix98-style ptmx device.
+ * -- C. Scott Ananian <cananian@alumni.princeton.edu>, 14-Jan-1998
+ *
+ * When reading this code see also fs/devpts. In particular note that the
+ * driver_data field is used by the devpts side as a binding to the devpts
+ * inode.
+ */
+
+#include <linux/module.h>
+
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/major.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/devpts_fs.h>
+#include <linux/slab.h>
+
+#include <asm/system.h>
+
+#ifdef CONFIG_UNIX98_PTYS
+static struct tty_driver *ptm_driver;
+static struct tty_driver *pts_driver;
+#endif
+
+static void pty_close(struct tty_struct *tty, struct file *filp)
+{
+ BUG_ON(!tty);
+ if (tty->driver->subtype == PTY_TYPE_MASTER)
+ WARN_ON(tty->count > 1);
+ else {
+ if (tty->count > 2)
+ return;
+ }
+ wake_up_interruptible(&tty->read_wait);
+ wake_up_interruptible(&tty->write_wait);
+ tty->packet = 0;
+ if (!tty->link)
+ return;
+ tty->link->packet = 0;
+ set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
+ wake_up_interruptible(&tty->link->read_wait);
+ wake_up_interruptible(&tty->link->write_wait);
+ if (tty->driver->subtype == PTY_TYPE_MASTER) {
+ set_bit(TTY_OTHER_CLOSED, &tty->flags);
+#ifdef CONFIG_UNIX98_PTYS
+ if (tty->driver == ptm_driver)
+ devpts_pty_kill(tty->link);
+#endif
+ tty_unlock();
+ tty_vhangup(tty->link);
+ tty_lock();
+ }
+}
+
+/*
+ * The unthrottle routine is called by the line discipline to signal
+ * that it can receive more characters. For PTY's, the TTY_THROTTLED
+ * flag is always set, to force the line discipline to always call the
+ * unthrottle routine when there are fewer than TTY_THRESHOLD_UNTHROTTLE
+ * characters in the queue. This is necessary since each time this
+ * happens, we need to wake up any sleeping processes that could be
+ * (1) trying to send data to the pty, or (2) waiting in wait_until_sent()
+ * for the pty buffer to be drained.
+ */
+static void pty_unthrottle(struct tty_struct *tty)
+{
+ tty_wakeup(tty->link);
+ set_bit(TTY_THROTTLED, &tty->flags);
+}
+
+/**
+ * pty_space - report space left for writing
+ * @to: tty we are writing into
+ *
+ * The tty buffers allow 64K but we sneak a peak and clip at 8K this
+ * allows a lot of overspill room for echo and other fun messes to
+ * be handled properly
+ */
+
+static int pty_space(struct tty_struct *to)
+{
+ int n = 8192 - to->buf.memory_used;
+ if (n < 0)
+ return 0;
+ return n;
+}
+
+/**
+ * pty_write - write to a pty
+ * @tty: the tty we write from
+ * @buf: kernel buffer of data
+ * @count: bytes to write
+ *
+ * Our "hardware" write method. Data is coming from the ldisc which
+ * may be in a non sleeping state. We simply throw this at the other
+ * end of the link as if we were an IRQ handler receiving stuff for
+ * the other side of the pty/tty pair.
+ */
+
+static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
+{
+ struct tty_struct *to = tty->link;
+
+ if (tty->stopped)
+ return 0;
+
+ if (c > 0) {
+ /* Stuff the data into the input queue of the other end */
+ c = tty_insert_flip_string(to, buf, c);
+ /* And shovel */
+ if (c) {
+ tty_flip_buffer_push(to);
+ tty_wakeup(tty);
+ }
+ }
+ return c;
+}
+
+/**
+ * pty_write_room - write space
+ * @tty: tty we are writing from
+ *
+ * Report how many bytes the ldisc can send into the queue for
+ * the other device.
+ */
+
+static int pty_write_room(struct tty_struct *tty)
+{
+ if (tty->stopped)
+ return 0;
+ return pty_space(tty->link);
+}
+
+/**
+ * pty_chars_in_buffer - characters currently in our tx queue
+ * @tty: our tty
+ *
+ * Report how much we have in the transmit queue. As everything is
+ * instantly at the other end this is easy to implement.
+ */
+
+static int pty_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0;
+}
+
+/* Set the lock flag on a pty */
+static int pty_set_lock(struct tty_struct *tty, int __user *arg)
+{
+ int val;
+ if (get_user(val, arg))
+ return -EFAULT;
+ if (val)
+ set_bit(TTY_PTY_LOCK, &tty->flags);
+ else
+ clear_bit(TTY_PTY_LOCK, &tty->flags);
+ return 0;
+}
+
+/* Send a signal to the slave */
+static int pty_signal(struct tty_struct *tty, int sig)
+{
+ unsigned long flags;
+ struct pid *pgrp;
+
+ if (tty->link) {
+ spin_lock_irqsave(&tty->link->ctrl_lock, flags);
+ pgrp = get_pid(tty->link->pgrp);
+ spin_unlock_irqrestore(&tty->link->ctrl_lock, flags);
+
+ kill_pgrp(pgrp, sig, 1);
+ put_pid(pgrp);
+ }
+ return 0;
+}
+
+static void pty_flush_buffer(struct tty_struct *tty)
+{
+ struct tty_struct *to = tty->link;
+ unsigned long flags;
+
+ if (!to)
+ return;
+ /* tty_buffer_flush(to); FIXME */
+ if (to->packet) {
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
+ wake_up_interruptible(&to->read_wait);
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ }
+}
+
+static int pty_open(struct tty_struct *tty, struct file *filp)
+{
+ int retval = -ENODEV;
+
+ if (!tty || !tty->link)
+ goto out;
+
+ retval = -EIO;
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ goto out;
+ if (test_bit(TTY_PTY_LOCK, &tty->link->flags))
+ goto out;
+ if (tty->link->count != 1)
+ goto out;
+
+ clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
+ set_bit(TTY_THROTTLED, &tty->flags);
+ retval = 0;
+out:
+ return retval;
+}
+
+static void pty_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios)
+{
+ tty->termios->c_cflag &= ~(CSIZE | PARENB);
+ tty->termios->c_cflag |= (CS8 | CREAD);
+}
+
+/**
+ * pty_do_resize - resize event
+ * @tty: tty being resized
+ * @ws: window size being set.
+ *
+ * Update the termios variables and send the necessary signals to
+ * peform a terminal resize correctly
+ */
+
+int pty_resize(struct tty_struct *tty, struct winsize *ws)
+{
+ struct pid *pgrp, *rpgrp;
+ unsigned long flags;
+ struct tty_struct *pty = tty->link;
+
+ /* For a PTY we need to lock the tty side */
+ mutex_lock(&tty->termios_mutex);
+ if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
+ goto done;
+
+ /* Get the PID values and reference them so we can
+ avoid holding the tty ctrl lock while sending signals.
+ We need to lock these individually however. */
+
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ pgrp = get_pid(tty->pgrp);
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+ spin_lock_irqsave(&pty->ctrl_lock, flags);
+ rpgrp = get_pid(pty->pgrp);
+ spin_unlock_irqrestore(&pty->ctrl_lock, flags);
+
+ if (pgrp)
+ kill_pgrp(pgrp, SIGWINCH, 1);
+ if (rpgrp != pgrp && rpgrp)
+ kill_pgrp(rpgrp, SIGWINCH, 1);
+
+ put_pid(pgrp);
+ put_pid(rpgrp);
+
+ tty->winsize = *ws;
+ pty->winsize = *ws; /* Never used so will go away soon */
+done:
+ mutex_unlock(&tty->termios_mutex);
+ return 0;
+}
+
+/* Traditional BSD devices */
+#ifdef CONFIG_LEGACY_PTYS
+
+static int pty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct tty_struct *o_tty;
+ int idx = tty->index;
+ int retval;
+
+ o_tty = alloc_tty_struct();
+ if (!o_tty)
+ return -ENOMEM;
+ if (!try_module_get(driver->other->owner)) {
+ /* This cannot in fact currently happen */
+ retval = -ENOMEM;
+ goto err_free_tty;
+ }
+ initialize_tty_struct(o_tty, driver->other, idx);
+
+ /* We always use new tty termios data so we can do this
+ the easy way .. */
+ retval = tty_init_termios(tty);
+ if (retval)
+ goto err_deinit_tty;
+
+ retval = tty_init_termios(o_tty);
+ if (retval)
+ goto err_free_termios;
+
+ /*
+ * Everything allocated ... set up the o_tty structure.
+ */
+ driver->other->ttys[idx] = o_tty;
+ tty_driver_kref_get(driver->other);
+ if (driver->subtype == PTY_TYPE_MASTER)
+ o_tty->count++;
+ /* Establish the links in both directions */
+ tty->link = o_tty;
+ o_tty->link = tty;
+
+ tty_driver_kref_get(driver);
+ tty->count++;
+ driver->ttys[idx] = tty;
+ return 0;
+err_free_termios:
+ tty_free_termios(tty);
+err_deinit_tty:
+ deinitialize_tty_struct(o_tty);
+ module_put(o_tty->driver->owner);
+err_free_tty:
+ free_tty_struct(o_tty);
+ return retval;
+}
+
+static int pty_bsd_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case TIOCSPTLCK: /* Set PT Lock (disallow slave open) */
+ return pty_set_lock(tty, (int __user *) arg);
+ case TIOCSIG: /* Send signal to other side of pty */
+ return pty_signal(tty, (int) arg);
+ }
+ return -ENOIOCTLCMD;
+}
+
+static int legacy_count = CONFIG_LEGACY_PTY_COUNT;
+module_param(legacy_count, int, 0);
+
+/*
+ * The master side of a pty can do TIOCSPTLCK and thus
+ * has pty_bsd_ioctl.
+ */
+static const struct tty_operations master_pty_ops_bsd = {
+ .install = pty_install,
+ .open = pty_open,
+ .close = pty_close,
+ .write = pty_write,
+ .write_room = pty_write_room,
+ .flush_buffer = pty_flush_buffer,
+ .chars_in_buffer = pty_chars_in_buffer,
+ .unthrottle = pty_unthrottle,
+ .set_termios = pty_set_termios,
+ .ioctl = pty_bsd_ioctl,
+ .resize = pty_resize
+};
+
+static const struct tty_operations slave_pty_ops_bsd = {
+ .install = pty_install,
+ .open = pty_open,
+ .close = pty_close,
+ .write = pty_write,
+ .write_room = pty_write_room,
+ .flush_buffer = pty_flush_buffer,
+ .chars_in_buffer = pty_chars_in_buffer,
+ .unthrottle = pty_unthrottle,
+ .set_termios = pty_set_termios,
+ .resize = pty_resize
+};
+
+static void __init legacy_pty_init(void)
+{
+ struct tty_driver *pty_driver, *pty_slave_driver;
+
+ if (legacy_count <= 0)
+ return;
+
+ pty_driver = alloc_tty_driver(legacy_count);
+ if (!pty_driver)
+ panic("Couldn't allocate pty driver");
+
+ pty_slave_driver = alloc_tty_driver(legacy_count);
+ if (!pty_slave_driver)
+ panic("Couldn't allocate pty slave driver");
+
+ pty_driver->owner = THIS_MODULE;
+ pty_driver->driver_name = "pty_master";
+ pty_driver->name = "pty";
+ pty_driver->major = PTY_MASTER_MAJOR;
+ pty_driver->minor_start = 0;
+ pty_driver->type = TTY_DRIVER_TYPE_PTY;
+ pty_driver->subtype = PTY_TYPE_MASTER;
+ pty_driver->init_termios = tty_std_termios;
+ pty_driver->init_termios.c_iflag = 0;
+ pty_driver->init_termios.c_oflag = 0;
+ pty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
+ pty_driver->init_termios.c_lflag = 0;
+ pty_driver->init_termios.c_ispeed = 38400;
+ pty_driver->init_termios.c_ospeed = 38400;
+ pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW;
+ pty_driver->other = pty_slave_driver;
+ tty_set_operations(pty_driver, &master_pty_ops_bsd);
+
+ pty_slave_driver->owner = THIS_MODULE;
+ pty_slave_driver->driver_name = "pty_slave";
+ pty_slave_driver->name = "ttyp";
+ pty_slave_driver->major = PTY_SLAVE_MAJOR;
+ pty_slave_driver->minor_start = 0;
+ pty_slave_driver->type = TTY_DRIVER_TYPE_PTY;
+ pty_slave_driver->subtype = PTY_TYPE_SLAVE;
+ pty_slave_driver->init_termios = tty_std_termios;
+ pty_slave_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
+ pty_slave_driver->init_termios.c_ispeed = 38400;
+ pty_slave_driver->init_termios.c_ospeed = 38400;
+ pty_slave_driver->flags = TTY_DRIVER_RESET_TERMIOS |
+ TTY_DRIVER_REAL_RAW;
+ pty_slave_driver->other = pty_driver;
+ tty_set_operations(pty_slave_driver, &slave_pty_ops_bsd);
+
+ if (tty_register_driver(pty_driver))
+ panic("Couldn't register pty driver");
+ if (tty_register_driver(pty_slave_driver))
+ panic("Couldn't register pty slave driver");
+}
+#else
+static inline void legacy_pty_init(void) { }
+#endif
+
+/* Unix98 devices */
+#ifdef CONFIG_UNIX98_PTYS
+/*
+ * sysctl support for setting limits on the number of Unix98 ptys allocated.
+ * Otherwise one can eat up all kernel memory by opening /dev/ptmx repeatedly.
+ */
+int pty_limit = NR_UNIX98_PTY_DEFAULT;
+static int pty_limit_min;
+static int pty_limit_max = NR_UNIX98_PTY_MAX;
+static int tty_count;
+static int pty_count;
+
+static inline void pty_inc_count(void)
+{
+ pty_count = (++tty_count) / 2;
+}
+
+static inline void pty_dec_count(void)
+{
+ pty_count = (--tty_count) / 2;
+}
+
+static struct cdev ptmx_cdev;
+
+static struct ctl_table pty_table[] = {
+ {
+ .procname = "max",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .data = &pty_limit,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &pty_limit_min,
+ .extra2 = &pty_limit_max,
+ }, {
+ .procname = "nr",
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .data = &pty_count,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+
+static struct ctl_table pty_kern_table[] = {
+ {
+ .procname = "pty",
+ .mode = 0555,
+ .child = pty_table,
+ },
+ {}
+};
+
+static struct ctl_table pty_root_table[] = {
+ {
+ .procname = "kernel",
+ .mode = 0555,
+ .child = pty_kern_table,
+ },
+ {}
+};
+
+
+static int pty_unix98_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case TIOCSPTLCK: /* Set PT Lock (disallow slave open) */
+ return pty_set_lock(tty, (int __user *)arg);
+ case TIOCGPTN: /* Get PT Number */
+ return put_user(tty->index, (unsigned int __user *)arg);
+ case TIOCSIG: /* Send signal to other side of pty */
+ return pty_signal(tty, (int) arg);
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+/**
+ * ptm_unix98_lookup - find a pty master
+ * @driver: ptm driver
+ * @idx: tty index
+ *
+ * Look up a pty master device. Called under the tty_mutex for now.
+ * This provides our locking.
+ */
+
+static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
+ struct inode *ptm_inode, int idx)
+{
+ struct tty_struct *tty = devpts_get_tty(ptm_inode, idx);
+ if (tty)
+ tty = tty->link;
+ return tty;
+}
+
+/**
+ * pts_unix98_lookup - find a pty slave
+ * @driver: pts driver
+ * @idx: tty index
+ *
+ * Look up a pty master device. Called under the tty_mutex for now.
+ * This provides our locking.
+ */
+
+static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
+ struct inode *pts_inode, int idx)
+{
+ struct tty_struct *tty = devpts_get_tty(pts_inode, idx);
+ /* Master must be open before slave */
+ if (!tty)
+ return ERR_PTR(-EIO);
+ return tty;
+}
+
+static void pty_unix98_shutdown(struct tty_struct *tty)
+{
+ tty_driver_remove_tty(tty->driver, tty);
+ /* We have our own method as we don't use the tty index */
+ kfree(tty->termios);
+}
+
+/* We have no need to install and remove our tty objects as devpts does all
+ the work for us */
+
+static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct tty_struct *o_tty;
+ int idx = tty->index;
+
+ o_tty = alloc_tty_struct();
+ if (!o_tty)
+ return -ENOMEM;
+ if (!try_module_get(driver->other->owner)) {
+ /* This cannot in fact currently happen */
+ goto err_free_tty;
+ }
+ initialize_tty_struct(o_tty, driver->other, idx);
+
+ tty->termios = kzalloc(sizeof(struct ktermios[2]), GFP_KERNEL);
+ if (tty->termios == NULL)
+ goto err_free_mem;
+ *tty->termios = driver->init_termios;
+ tty->termios_locked = tty->termios + 1;
+
+ o_tty->termios = kzalloc(sizeof(struct ktermios[2]), GFP_KERNEL);
+ if (o_tty->termios == NULL)
+ goto err_free_mem;
+ *o_tty->termios = driver->other->init_termios;
+ o_tty->termios_locked = o_tty->termios + 1;
+
+ tty_driver_kref_get(driver->other);
+ if (driver->subtype == PTY_TYPE_MASTER)
+ o_tty->count++;
+ /* Establish the links in both directions */
+ tty->link = o_tty;
+ o_tty->link = tty;
+ /*
+ * All structures have been allocated, so now we install them.
+ * Failures after this point use release_tty to clean up, so
+ * there's no need to null out the local pointers.
+ */
+ tty_driver_kref_get(driver);
+ tty->count++;
+ pty_inc_count(); /* tty */
+ pty_inc_count(); /* tty->link */
+ return 0;
+err_free_mem:
+ deinitialize_tty_struct(o_tty);
+ kfree(o_tty->termios);
+ kfree(tty->termios);
+ module_put(o_tty->driver->owner);
+err_free_tty:
+ free_tty_struct(o_tty);
+ return -ENOMEM;
+}
+
+static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
+{
+ pty_dec_count();
+}
+
+static const struct tty_operations ptm_unix98_ops = {
+ .lookup = ptm_unix98_lookup,
+ .install = pty_unix98_install,
+ .remove = pty_unix98_remove,
+ .open = pty_open,
+ .close = pty_close,
+ .write = pty_write,
+ .write_room = pty_write_room,
+ .flush_buffer = pty_flush_buffer,
+ .chars_in_buffer = pty_chars_in_buffer,
+ .unthrottle = pty_unthrottle,
+ .set_termios = pty_set_termios,
+ .ioctl = pty_unix98_ioctl,
+ .shutdown = pty_unix98_shutdown,
+ .resize = pty_resize
+};
+
+static const struct tty_operations pty_unix98_ops = {
+ .lookup = pts_unix98_lookup,
+ .install = pty_unix98_install,
+ .remove = pty_unix98_remove,
+ .open = pty_open,
+ .close = pty_close,
+ .write = pty_write,
+ .write_room = pty_write_room,
+ .flush_buffer = pty_flush_buffer,
+ .chars_in_buffer = pty_chars_in_buffer,
+ .unthrottle = pty_unthrottle,
+ .set_termios = pty_set_termios,
+ .shutdown = pty_unix98_shutdown
+};
+
+/**
+ * ptmx_open - open a unix 98 pty master
+ * @inode: inode of device file
+ * @filp: file pointer to tty
+ *
+ * Allocate a unix98 pty master device from the ptmx driver.
+ *
+ * Locking: tty_mutex protects the init_dev work. tty->count should
+ * protect the rest.
+ * allocated_ptys_lock handles the list of free pty numbers
+ */
+
+static int ptmx_open(struct inode *inode, struct file *filp)
+{
+ struct tty_struct *tty;
+ int retval;
+ int index;
+
+ nonseekable_open(inode, filp);
+
+ retval = tty_alloc_file(filp);
+ if (retval)
+ return retval;
+
+ /* find a device that is not in use. */
+ tty_lock();
+ index = devpts_new_index(inode);
+ tty_unlock();
+ if (index < 0) {
+ retval = index;
+ goto err_file;
+ }
+
+ mutex_lock(&tty_mutex);
+ tty_lock();
+ tty = tty_init_dev(ptm_driver, index, 1);
+ mutex_unlock(&tty_mutex);
+
+ if (IS_ERR(tty)) {
+ retval = PTR_ERR(tty);
+ goto out;
+ }
+
+ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+
+ tty_add_file(tty, filp);
+
+ retval = devpts_pty_new(inode, tty->link);
+ if (retval)
+ goto err_release;
+
+ retval = ptm_driver->ops->open(tty, filp);
+ if (retval)
+ goto err_release;
+
+ tty_unlock();
+ return 0;
+err_release:
+ tty_unlock();
+ tty_release(inode, filp);
+ return retval;
+out:
+ devpts_kill_index(inode, index);
+ tty_unlock();
+err_file:
+ tty_free_file(filp);
+ return retval;
+}
+
+static struct file_operations ptmx_fops;
+
+static void __init unix98_pty_init(void)
+{
+ ptm_driver = alloc_tty_driver(NR_UNIX98_PTY_MAX);
+ if (!ptm_driver)
+ panic("Couldn't allocate Unix98 ptm driver");
+ pts_driver = alloc_tty_driver(NR_UNIX98_PTY_MAX);
+ if (!pts_driver)
+ panic("Couldn't allocate Unix98 pts driver");
+
+ ptm_driver->owner = THIS_MODULE;
+ ptm_driver->driver_name = "pty_master";
+ ptm_driver->name = "ptm";
+ ptm_driver->major = UNIX98_PTY_MASTER_MAJOR;
+ ptm_driver->minor_start = 0;
+ ptm_driver->type = TTY_DRIVER_TYPE_PTY;
+ ptm_driver->subtype = PTY_TYPE_MASTER;
+ ptm_driver->init_termios = tty_std_termios;
+ ptm_driver->init_termios.c_iflag = 0;
+ ptm_driver->init_termios.c_oflag = 0;
+ ptm_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
+ ptm_driver->init_termios.c_lflag = 0;
+ ptm_driver->init_termios.c_ispeed = 38400;
+ ptm_driver->init_termios.c_ospeed = 38400;
+ ptm_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_DEVPTS_MEM;
+ ptm_driver->other = pts_driver;
+ tty_set_operations(ptm_driver, &ptm_unix98_ops);
+
+ pts_driver->owner = THIS_MODULE;
+ pts_driver->driver_name = "pty_slave";
+ pts_driver->name = "pts";
+ pts_driver->major = UNIX98_PTY_SLAVE_MAJOR;
+ pts_driver->minor_start = 0;
+ pts_driver->type = TTY_DRIVER_TYPE_PTY;
+ pts_driver->subtype = PTY_TYPE_SLAVE;
+ pts_driver->init_termios = tty_std_termios;
+ pts_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
+ pts_driver->init_termios.c_ispeed = 38400;
+ pts_driver->init_termios.c_ospeed = 38400;
+ pts_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_DEVPTS_MEM;
+ pts_driver->other = ptm_driver;
+ tty_set_operations(pts_driver, &pty_unix98_ops);
+
+ if (tty_register_driver(ptm_driver))
+ panic("Couldn't register Unix98 ptm driver");
+ if (tty_register_driver(pts_driver))
+ panic("Couldn't register Unix98 pts driver");
+
+ register_sysctl_table(pty_root_table);
+
+ /* Now create the /dev/ptmx special device */
+ tty_default_fops(&ptmx_fops);
+ ptmx_fops.open = ptmx_open;
+
+ cdev_init(&ptmx_cdev, &ptmx_fops);
+ if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
+ register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
+ panic("Couldn't register /dev/ptmx driver\n");
+ device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 2), NULL, "ptmx");
+}
+
+#else
+static inline void unix98_pty_init(void) { }
+#endif
+
+static int __init pty_init(void)
+{
+ legacy_pty_init();
+ unix98_pty_init();
+ return 0;
+}
+module_init(pty_init);
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
new file mode 100644
index 00000000..13043e8d
--- /dev/null
+++ b/drivers/tty/rocket.c
@@ -0,0 +1,3152 @@
+/*
+ * RocketPort device driver for Linux
+ *
+ * Written by Theodore Ts'o, 1995, 1996, 1997, 1998, 1999, 2000.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2003 by Comtrol, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Kernel Synchronization:
+ *
+ * This driver has 2 kernel control paths - exception handlers (calls into the driver
+ * from user mode) and the timer bottom half (tasklet). This is a polled driver, interrupts
+ * are not used.
+ *
+ * Critical data:
+ * - rp_table[], accessed through passed "info" pointers, is a global (static) array of
+ * serial port state information and the xmit_buf circular buffer. Protected by
+ * a per port spinlock.
+ * - xmit_flags[], an array of ints indexed by line (port) number, indicating that there
+ * is data to be transmitted. Protected by atomic bit operations.
+ * - rp_num_ports, int indicating number of open ports, protected by atomic operations.
+ *
+ * rp_write() and rp_write_char() functions use a per port semaphore to protect against
+ * simultaneous access to the same port by more than one process.
+ */
+
+/****** Defines ******/
+#define ROCKET_PARANOIA_CHECK
+#define ROCKET_DISABLE_SIMUSAGE
+
+#undef ROCKET_SOFT_FLOW
+#undef ROCKET_DEBUG_OPEN
+#undef ROCKET_DEBUG_INTR
+#undef ROCKET_DEBUG_WRITE
+#undef ROCKET_DEBUG_FLOW
+#undef ROCKET_DEBUG_THROTTLE
+#undef ROCKET_DEBUG_WAIT_UNTIL_SENT
+#undef ROCKET_DEBUG_RECEIVE
+#undef ROCKET_DEBUG_HANGUP
+#undef REV_PCI_ORDER
+#undef ROCKET_DEBUG_IO
+
+#define POLL_PERIOD HZ/100 /* Polling period .01 seconds (10ms) */
+
+/****** Kernel includes ******/
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/major.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/mutex.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <asm/atomic.h>
+#include <asm/unaligned.h>
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+
+/****** RocketPort includes ******/
+
+#include "rocket_int.h"
+#include "rocket.h"
+
+#define ROCKET_VERSION "2.09"
+#define ROCKET_DATE "12-June-2003"
+
+/****** RocketPort Local Variables ******/
+
+static void rp_do_poll(unsigned long dummy);
+
+static struct tty_driver *rocket_driver;
+
+static struct rocket_version driver_version = {
+ ROCKET_VERSION, ROCKET_DATE
+};
+
+static struct r_port *rp_table[MAX_RP_PORTS]; /* The main repository of serial port state information. */
+static unsigned int xmit_flags[NUM_BOARDS]; /* Bit significant, indicates port had data to transmit. */
+ /* eg. Bit 0 indicates port 0 has xmit data, ... */
+static atomic_t rp_num_ports_open; /* Number of serial ports open */
+static DEFINE_TIMER(rocket_timer, rp_do_poll, 0, 0);
+
+static unsigned long board1; /* ISA addresses, retrieved from rocketport.conf */
+static unsigned long board2;
+static unsigned long board3;
+static unsigned long board4;
+static unsigned long controller;
+static int support_low_speed;
+static unsigned long modem1;
+static unsigned long modem2;
+static unsigned long modem3;
+static unsigned long modem4;
+static unsigned long pc104_1[8];
+static unsigned long pc104_2[8];
+static unsigned long pc104_3[8];
+static unsigned long pc104_4[8];
+static unsigned long *pc104[4] = { pc104_1, pc104_2, pc104_3, pc104_4 };
+
+static int rp_baud_base[NUM_BOARDS]; /* Board config info (Someday make a per-board structure) */
+static unsigned long rcktpt_io_addr[NUM_BOARDS];
+static int rcktpt_type[NUM_BOARDS];
+static int is_PCI[NUM_BOARDS];
+static rocketModel_t rocketModel[NUM_BOARDS];
+static int max_board;
+static const struct tty_port_operations rocket_port_ops;
+
+/*
+ * The following arrays define the interrupt bits corresponding to each AIOP.
+ * These bits are different between the ISA and regular PCI boards and the
+ * Universal PCI boards.
+ */
+
+static Word_t aiop_intr_bits[AIOP_CTL_SIZE] = {
+ AIOP_INTR_BIT_0,
+ AIOP_INTR_BIT_1,
+ AIOP_INTR_BIT_2,
+ AIOP_INTR_BIT_3
+};
+
+static Word_t upci_aiop_intr_bits[AIOP_CTL_SIZE] = {
+ UPCI_AIOP_INTR_BIT_0,
+ UPCI_AIOP_INTR_BIT_1,
+ UPCI_AIOP_INTR_BIT_2,
+ UPCI_AIOP_INTR_BIT_3
+};
+
+static Byte_t RData[RDATASIZE] = {
+ 0x00, 0x09, 0xf6, 0x82,
+ 0x02, 0x09, 0x86, 0xfb,
+ 0x04, 0x09, 0x00, 0x0a,
+ 0x06, 0x09, 0x01, 0x0a,
+ 0x08, 0x09, 0x8a, 0x13,
+ 0x0a, 0x09, 0xc5, 0x11,
+ 0x0c, 0x09, 0x86, 0x85,
+ 0x0e, 0x09, 0x20, 0x0a,
+ 0x10, 0x09, 0x21, 0x0a,
+ 0x12, 0x09, 0x41, 0xff,
+ 0x14, 0x09, 0x82, 0x00,
+ 0x16, 0x09, 0x82, 0x7b,
+ 0x18, 0x09, 0x8a, 0x7d,
+ 0x1a, 0x09, 0x88, 0x81,
+ 0x1c, 0x09, 0x86, 0x7a,
+ 0x1e, 0x09, 0x84, 0x81,
+ 0x20, 0x09, 0x82, 0x7c,
+ 0x22, 0x09, 0x0a, 0x0a
+};
+
+static Byte_t RRegData[RREGDATASIZE] = {
+ 0x00, 0x09, 0xf6, 0x82, /* 00: Stop Rx processor */
+ 0x08, 0x09, 0x8a, 0x13, /* 04: Tx software flow control */
+ 0x0a, 0x09, 0xc5, 0x11, /* 08: XON char */
+ 0x0c, 0x09, 0x86, 0x85, /* 0c: XANY */
+ 0x12, 0x09, 0x41, 0xff, /* 10: Rx mask char */
+ 0x14, 0x09, 0x82, 0x00, /* 14: Compare/Ignore #0 */
+ 0x16, 0x09, 0x82, 0x7b, /* 18: Compare #1 */
+ 0x18, 0x09, 0x8a, 0x7d, /* 1c: Compare #2 */
+ 0x1a, 0x09, 0x88, 0x81, /* 20: Interrupt #1 */
+ 0x1c, 0x09, 0x86, 0x7a, /* 24: Ignore/Replace #1 */
+ 0x1e, 0x09, 0x84, 0x81, /* 28: Interrupt #2 */
+ 0x20, 0x09, 0x82, 0x7c, /* 2c: Ignore/Replace #2 */
+ 0x22, 0x09, 0x0a, 0x0a /* 30: Rx FIFO Enable */
+};
+
+static CONTROLLER_T sController[CTL_SIZE] = {
+ {-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0},
+ {0, 0, 0, 0}, {-1, -1, -1, -1}, {0, 0, 0, 0}},
+ {-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0},
+ {0, 0, 0, 0}, {-1, -1, -1, -1}, {0, 0, 0, 0}},
+ {-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0},
+ {0, 0, 0, 0}, {-1, -1, -1, -1}, {0, 0, 0, 0}},
+ {-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0},
+ {0, 0, 0, 0}, {-1, -1, -1, -1}, {0, 0, 0, 0}}
+};
+
+static Byte_t sBitMapClrTbl[8] = {
+ 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f
+};
+
+static Byte_t sBitMapSetTbl[8] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
+};
+
+static int sClockPrescale = 0x14;
+
+/*
+ * Line number is the ttySIx number (x), the Minor number. We
+ * assign them sequentially, starting at zero. The following
+ * array keeps track of the line number assigned to a given board/aiop/channel.
+ */
+static unsigned char lineNumbers[MAX_RP_PORTS];
+static unsigned long nextLineNumber;
+
+/***** RocketPort Static Prototypes *********/
+static int __init init_ISA(int i);
+static void rp_wait_until_sent(struct tty_struct *tty, int timeout);
+static void rp_flush_buffer(struct tty_struct *tty);
+static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model);
+static unsigned char GetLineNumber(int ctrl, int aiop, int ch);
+static unsigned char SetLineNumber(int ctrl, int aiop, int ch);
+static void rp_start(struct tty_struct *tty);
+static int sInitChan(CONTROLLER_T * CtlP, CHANNEL_T * ChP, int AiopNum,
+ int ChanNum);
+static void sSetInterfaceMode(CHANNEL_T * ChP, Byte_t mode);
+static void sFlushRxFIFO(CHANNEL_T * ChP);
+static void sFlushTxFIFO(CHANNEL_T * ChP);
+static void sEnInterrupts(CHANNEL_T * ChP, Word_t Flags);
+static void sDisInterrupts(CHANNEL_T * ChP, Word_t Flags);
+static void sModemReset(CONTROLLER_T * CtlP, int chan, int on);
+static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on);
+static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data);
+static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
+ ByteIO_t * AiopIOList, int AiopIOListSize,
+ WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
+ int PeriodicOnly, int altChanRingIndicator,
+ int UPCIRingInd);
+static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO,
+ ByteIO_t * AiopIOList, int AiopIOListSize,
+ int IRQNum, Byte_t Frequency, int PeriodicOnly);
+static int sReadAiopID(ByteIO_t io);
+static int sReadAiopNumChan(WordIO_t io);
+
+MODULE_AUTHOR("Theodore Ts'o");
+MODULE_DESCRIPTION("Comtrol RocketPort driver");
+module_param(board1, ulong, 0);
+MODULE_PARM_DESC(board1, "I/O port for (ISA) board #1");
+module_param(board2, ulong, 0);
+MODULE_PARM_DESC(board2, "I/O port for (ISA) board #2");
+module_param(board3, ulong, 0);
+MODULE_PARM_DESC(board3, "I/O port for (ISA) board #3");
+module_param(board4, ulong, 0);
+MODULE_PARM_DESC(board4, "I/O port for (ISA) board #4");
+module_param(controller, ulong, 0);
+MODULE_PARM_DESC(controller, "I/O port for (ISA) rocketport controller");
+module_param(support_low_speed, bool, 0);
+MODULE_PARM_DESC(support_low_speed, "1 means support 50 baud, 0 means support 460400 baud");
+module_param(modem1, ulong, 0);
+MODULE_PARM_DESC(modem1, "1 means (ISA) board #1 is a RocketModem");
+module_param(modem2, ulong, 0);
+MODULE_PARM_DESC(modem2, "1 means (ISA) board #2 is a RocketModem");
+module_param(modem3, ulong, 0);
+MODULE_PARM_DESC(modem3, "1 means (ISA) board #3 is a RocketModem");
+module_param(modem4, ulong, 0);
+MODULE_PARM_DESC(modem4, "1 means (ISA) board #4 is a RocketModem");
+module_param_array(pc104_1, ulong, NULL, 0);
+MODULE_PARM_DESC(pc104_1, "set interface types for ISA(PC104) board #1 (e.g. pc104_1=232,232,485,485,...");
+module_param_array(pc104_2, ulong, NULL, 0);
+MODULE_PARM_DESC(pc104_2, "set interface types for ISA(PC104) board #2 (e.g. pc104_2=232,232,485,485,...");
+module_param_array(pc104_3, ulong, NULL, 0);
+MODULE_PARM_DESC(pc104_3, "set interface types for ISA(PC104) board #3 (e.g. pc104_3=232,232,485,485,...");
+module_param_array(pc104_4, ulong, NULL, 0);
+MODULE_PARM_DESC(pc104_4, "set interface types for ISA(PC104) board #4 (e.g. pc104_4=232,232,485,485,...");
+
+static int rp_init(void);
+static void rp_cleanup_module(void);
+
+module_init(rp_init);
+module_exit(rp_cleanup_module);
+
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+/*************************************************************************/
+/* Module code starts here */
+
+static inline int rocket_paranoia_check(struct r_port *info,
+ const char *routine)
+{
+#ifdef ROCKET_PARANOIA_CHECK
+ if (!info)
+ return 1;
+ if (info->magic != RPORT_MAGIC) {
+ printk(KERN_WARNING "Warning: bad magic number for rocketport "
+ "struct in %s\n", routine);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+
+/* Serial port receive data function. Called (from timer poll) when an AIOPIC signals
+ * that receive data is present on a serial port. Pulls data from FIFO, moves it into the
+ * tty layer.
+ */
+static void rp_do_receive(struct r_port *info,
+ struct tty_struct *tty,
+ CHANNEL_t * cp, unsigned int ChanStatus)
+{
+ unsigned int CharNStat;
+ int ToRecv, wRecv, space;
+ unsigned char *cbuf;
+
+ ToRecv = sGetRxCnt(cp);
+#ifdef ROCKET_DEBUG_INTR
+ printk(KERN_INFO "rp_do_receive(%d)...\n", ToRecv);
+#endif
+ if (ToRecv == 0)
+ return;
+
+ /*
+ * if status indicates there are errored characters in the
+ * FIFO, then enter status mode (a word in FIFO holds
+ * character and status).
+ */
+ if (ChanStatus & (RXFOVERFL | RXBREAK | RXFRAME | RXPARITY)) {
+ if (!(ChanStatus & STATMODE)) {
+#ifdef ROCKET_DEBUG_RECEIVE
+ printk(KERN_INFO "Entering STATMODE...\n");
+#endif
+ ChanStatus |= STATMODE;
+ sEnRxStatusMode(cp);
+ }
+ }
+
+ /*
+ * if we previously entered status mode, then read down the
+ * FIFO one word at a time, pulling apart the character and
+ * the status. Update error counters depending on status
+ */
+ if (ChanStatus & STATMODE) {
+#ifdef ROCKET_DEBUG_RECEIVE
+ printk(KERN_INFO "Ignore %x, read %x...\n",
+ info->ignore_status_mask, info->read_status_mask);
+#endif
+ while (ToRecv) {
+ char flag;
+
+ CharNStat = sInW(sGetTxRxDataIO(cp));
+#ifdef ROCKET_DEBUG_RECEIVE
+ printk(KERN_INFO "%x...\n", CharNStat);
+#endif
+ if (CharNStat & STMBREAKH)
+ CharNStat &= ~(STMFRAMEH | STMPARITYH);
+ if (CharNStat & info->ignore_status_mask) {
+ ToRecv--;
+ continue;
+ }
+ CharNStat &= info->read_status_mask;
+ if (CharNStat & STMBREAKH)
+ flag = TTY_BREAK;
+ else if (CharNStat & STMPARITYH)
+ flag = TTY_PARITY;
+ else if (CharNStat & STMFRAMEH)
+ flag = TTY_FRAME;
+ else if (CharNStat & STMRCVROVRH)
+ flag = TTY_OVERRUN;
+ else
+ flag = TTY_NORMAL;
+ tty_insert_flip_char(tty, CharNStat & 0xff, flag);
+ ToRecv--;
+ }
+
+ /*
+ * after we've emptied the FIFO in status mode, turn
+ * status mode back off
+ */
+ if (sGetRxCnt(cp) == 0) {
+#ifdef ROCKET_DEBUG_RECEIVE
+ printk(KERN_INFO "Status mode off.\n");
+#endif
+ sDisRxStatusMode(cp);
+ }
+ } else {
+ /*
+ * we aren't in status mode, so read down the FIFO two
+ * characters at time by doing repeated word IO
+ * transfer.
+ */
+ space = tty_prepare_flip_string(tty, &cbuf, ToRecv);
+ if (space < ToRecv) {
+#ifdef ROCKET_DEBUG_RECEIVE
+ printk(KERN_INFO "rp_do_receive:insufficient space ToRecv=%d space=%d\n", ToRecv, space);
+#endif
+ if (space <= 0)
+ return;
+ ToRecv = space;
+ }
+ wRecv = ToRecv >> 1;
+ if (wRecv)
+ sInStrW(sGetTxRxDataIO(cp), (unsigned short *) cbuf, wRecv);
+ if (ToRecv & 1)
+ cbuf[ToRecv - 1] = sInB(sGetTxRxDataIO(cp));
+ }
+ /* Push the data up to the tty layer */
+ tty_flip_buffer_push(tty);
+}
+
+/*
+ * Serial port transmit data function. Called from the timer polling loop as a
+ * result of a bit set in xmit_flags[], indicating data (from the tty layer) is ready
+ * to be sent out the serial port. Data is buffered in rp_table[line].xmit_buf, it is
+ * moved to the port's xmit FIFO. *info is critical data, protected by spinlocks.
+ */
+static void rp_do_transmit(struct r_port *info)
+{
+ int c;
+ CHANNEL_t *cp = &info->channel;
+ struct tty_struct *tty;
+ unsigned long flags;
+
+#ifdef ROCKET_DEBUG_INTR
+ printk(KERN_DEBUG "%s\n", __func__);
+#endif
+ if (!info)
+ return;
+ tty = tty_port_tty_get(&info->port);
+
+ if (tty == NULL) {
+ printk(KERN_WARNING "rp: WARNING %s called with tty==NULL\n", __func__);
+ clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
+ return;
+ }
+
+ spin_lock_irqsave(&info->slock, flags);
+ info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp);
+
+ /* Loop sending data to FIFO until done or FIFO full */
+ while (1) {
+ if (tty->stopped || tty->hw_stopped)
+ break;
+ c = min(info->xmit_fifo_room, info->xmit_cnt);
+ c = min(c, XMIT_BUF_SIZE - info->xmit_tail);
+ if (c <= 0 || info->xmit_fifo_room <= 0)
+ break;
+ sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) (info->xmit_buf + info->xmit_tail), c / 2);
+ if (c & 1)
+ sOutB(sGetTxRxDataIO(cp), info->xmit_buf[info->xmit_tail + c - 1]);
+ info->xmit_tail += c;
+ info->xmit_tail &= XMIT_BUF_SIZE - 1;
+ info->xmit_cnt -= c;
+ info->xmit_fifo_room -= c;
+#ifdef ROCKET_DEBUG_INTR
+ printk(KERN_INFO "tx %d chars...\n", c);
+#endif
+ }
+
+ if (info->xmit_cnt == 0)
+ clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
+
+ if (info->xmit_cnt < WAKEUP_CHARS) {
+ tty_wakeup(tty);
+#ifdef ROCKETPORT_HAVE_POLL_WAIT
+ wake_up_interruptible(&tty->poll_wait);
+#endif
+ }
+
+ spin_unlock_irqrestore(&info->slock, flags);
+ tty_kref_put(tty);
+
+#ifdef ROCKET_DEBUG_INTR
+ printk(KERN_DEBUG "(%d,%d,%d,%d)...\n", info->xmit_cnt, info->xmit_head,
+ info->xmit_tail, info->xmit_fifo_room);
+#endif
+}
+
+/*
+ * Called when a serial port signals it has read data in it's RX FIFO.
+ * It checks what interrupts are pending and services them, including
+ * receiving serial data.
+ */
+static void rp_handle_port(struct r_port *info)
+{
+ CHANNEL_t *cp;
+ struct tty_struct *tty;
+ unsigned int IntMask, ChanStatus;
+
+ if (!info)
+ return;
+
+ if ((info->port.flags & ASYNC_INITIALIZED) == 0) {
+ printk(KERN_WARNING "rp: WARNING: rp_handle_port called with "
+ "info->flags & NOT_INIT\n");
+ return;
+ }
+ tty = tty_port_tty_get(&info->port);
+ if (!tty) {
+ printk(KERN_WARNING "rp: WARNING: rp_handle_port called with "
+ "tty==NULL\n");
+ return;
+ }
+ cp = &info->channel;
+
+ IntMask = sGetChanIntID(cp) & info->intmask;
+#ifdef ROCKET_DEBUG_INTR
+ printk(KERN_INFO "rp_interrupt %02x...\n", IntMask);
+#endif
+ ChanStatus = sGetChanStatus(cp);
+ if (IntMask & RXF_TRIG) { /* Rx FIFO trigger level */
+ rp_do_receive(info, tty, cp, ChanStatus);
+ }
+ if (IntMask & DELTA_CD) { /* CD change */
+#if (defined(ROCKET_DEBUG_OPEN) || defined(ROCKET_DEBUG_INTR) || defined(ROCKET_DEBUG_HANGUP))
+ printk(KERN_INFO "ttyR%d CD now %s...\n", info->line,
+ (ChanStatus & CD_ACT) ? "on" : "off");
+#endif
+ if (!(ChanStatus & CD_ACT) && info->cd_status) {
+#ifdef ROCKET_DEBUG_HANGUP
+ printk(KERN_INFO "CD drop, calling hangup.\n");
+#endif
+ tty_hangup(tty);
+ }
+ info->cd_status = (ChanStatus & CD_ACT) ? 1 : 0;
+ wake_up_interruptible(&info->port.open_wait);
+ }
+#ifdef ROCKET_DEBUG_INTR
+ if (IntMask & DELTA_CTS) { /* CTS change */
+ printk(KERN_INFO "CTS change...\n");
+ }
+ if (IntMask & DELTA_DSR) { /* DSR change */
+ printk(KERN_INFO "DSR change...\n");
+ }
+#endif
+ tty_kref_put(tty);
+}
+
+/*
+ * The top level polling routine. Repeats every 1/100 HZ (10ms).
+ */
+static void rp_do_poll(unsigned long dummy)
+{
+ CONTROLLER_t *ctlp;
+ int ctrl, aiop, ch, line;
+ unsigned int xmitmask, i;
+ unsigned int CtlMask;
+ unsigned char AiopMask;
+ Word_t bit;
+
+ /* Walk through all the boards (ctrl's) */
+ for (ctrl = 0; ctrl < max_board; ctrl++) {
+ if (rcktpt_io_addr[ctrl] <= 0)
+ continue;
+
+ /* Get a ptr to the board's control struct */
+ ctlp = sCtlNumToCtlPtr(ctrl);
+
+ /* Get the interrupt status from the board */
+#ifdef CONFIG_PCI
+ if (ctlp->BusType == isPCI)
+ CtlMask = sPCIGetControllerIntStatus(ctlp);
+ else
+#endif
+ CtlMask = sGetControllerIntStatus(ctlp);
+
+ /* Check if any AIOP read bits are set */
+ for (aiop = 0; CtlMask; aiop++) {
+ bit = ctlp->AiopIntrBits[aiop];
+ if (CtlMask & bit) {
+ CtlMask &= ~bit;
+ AiopMask = sGetAiopIntStatus(ctlp, aiop);
+
+ /* Check if any port read bits are set */
+ for (ch = 0; AiopMask; AiopMask >>= 1, ch++) {
+ if (AiopMask & 1) {
+
+ /* Get the line number (/dev/ttyRx number). */
+ /* Read the data from the port. */
+ line = GetLineNumber(ctrl, aiop, ch);
+ rp_handle_port(rp_table[line]);
+ }
+ }
+ }
+ }
+
+ xmitmask = xmit_flags[ctrl];
+
+ /*
+ * xmit_flags contains bit-significant flags, indicating there is data
+ * to xmit on the port. Bit 0 is port 0 on this board, bit 1 is port
+ * 1, ... (32 total possible). The variable i has the aiop and ch
+ * numbers encoded in it (port 0-7 are aiop0, 8-15 are aiop1, etc).
+ */
+ if (xmitmask) {
+ for (i = 0; i < rocketModel[ctrl].numPorts; i++) {
+ if (xmitmask & (1 << i)) {
+ aiop = (i & 0x18) >> 3;
+ ch = i & 0x07;
+ line = GetLineNumber(ctrl, aiop, ch);
+ rp_do_transmit(rp_table[line]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Reset the timer so we get called at the next clock tick (10ms).
+ */
+ if (atomic_read(&rp_num_ports_open))
+ mod_timer(&rocket_timer, jiffies + POLL_PERIOD);
+}
+
+/*
+ * Initializes the r_port structure for a port, as well as enabling the port on
+ * the board.
+ * Inputs: board, aiop, chan numbers
+ */
+static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
+{
+ unsigned rocketMode;
+ struct r_port *info;
+ int line;
+ CONTROLLER_T *ctlp;
+
+ /* Get the next available line number */
+ line = SetLineNumber(board, aiop, chan);
+
+ ctlp = sCtlNumToCtlPtr(board);
+
+ /* Get a r_port struct for the port, fill it in and save it globally, indexed by line number */
+ info = kzalloc(sizeof (struct r_port), GFP_KERNEL);
+ if (!info) {
+ printk(KERN_ERR "Couldn't allocate info struct for line #%d\n",
+ line);
+ return;
+ }
+
+ info->magic = RPORT_MAGIC;
+ info->line = line;
+ info->ctlp = ctlp;
+ info->board = board;
+ info->aiop = aiop;
+ info->chan = chan;
+ tty_port_init(&info->port);
+ info->port.ops = &rocket_port_ops;
+ init_completion(&info->close_wait);
+ info->flags &= ~ROCKET_MODE_MASK;
+ switch (pc104[board][line]) {
+ case 422:
+ info->flags |= ROCKET_MODE_RS422;
+ break;
+ case 485:
+ info->flags |= ROCKET_MODE_RS485;
+ break;
+ case 232:
+ default:
+ info->flags |= ROCKET_MODE_RS232;
+ break;
+ }
+
+ info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR;
+ if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) {
+ printk(KERN_ERR "RocketPort sInitChan(%d, %d, %d) failed!\n",
+ board, aiop, chan);
+ kfree(info);
+ return;
+ }
+
+ rocketMode = info->flags & ROCKET_MODE_MASK;
+
+ if ((info->flags & ROCKET_RTS_TOGGLE) || (rocketMode == ROCKET_MODE_RS485))
+ sEnRTSToggle(&info->channel);
+ else
+ sDisRTSToggle(&info->channel);
+
+ if (ctlp->boardType == ROCKET_TYPE_PC104) {
+ switch (rocketMode) {
+ case ROCKET_MODE_RS485:
+ sSetInterfaceMode(&info->channel, InterfaceModeRS485);
+ break;
+ case ROCKET_MODE_RS422:
+ sSetInterfaceMode(&info->channel, InterfaceModeRS422);
+ break;
+ case ROCKET_MODE_RS232:
+ default:
+ if (info->flags & ROCKET_RTS_TOGGLE)
+ sSetInterfaceMode(&info->channel, InterfaceModeRS232T);
+ else
+ sSetInterfaceMode(&info->channel, InterfaceModeRS232);
+ break;
+ }
+ }
+ spin_lock_init(&info->slock);
+ mutex_init(&info->write_mtx);
+ rp_table[line] = info;
+ tty_register_device(rocket_driver, line, pci_dev ? &pci_dev->dev :
+ NULL);
+}
+
+/*
+ * Configures a rocketport port according to its termio settings. Called from
+ * user mode into the driver (exception handler). *info CD manipulation is spinlock protected.
+ */
+static void configure_r_port(struct tty_struct *tty, struct r_port *info,
+ struct ktermios *old_termios)
+{
+ unsigned cflag;
+ unsigned long flags;
+ unsigned rocketMode;
+ int bits, baud, divisor;
+ CHANNEL_t *cp;
+ struct ktermios *t = tty->termios;
+
+ cp = &info->channel;
+ cflag = t->c_cflag;
+
+ /* Byte size and parity */
+ if ((cflag & CSIZE) == CS8) {
+ sSetData8(cp);
+ bits = 10;
+ } else {
+ sSetData7(cp);
+ bits = 9;
+ }
+ if (cflag & CSTOPB) {
+ sSetStop2(cp);
+ bits++;
+ } else {
+ sSetStop1(cp);
+ }
+
+ if (cflag & PARENB) {
+ sEnParity(cp);
+ bits++;
+ if (cflag & PARODD) {
+ sSetOddParity(cp);
+ } else {
+ sSetEvenParity(cp);
+ }
+ } else {
+ sDisParity(cp);
+ }
+
+ /* baud rate */
+ baud = tty_get_baud_rate(tty);
+ if (!baud)
+ baud = 9600;
+ divisor = ((rp_baud_base[info->board] + (baud >> 1)) / baud) - 1;
+ if ((divisor >= 8192 || divisor < 0) && old_termios) {
+ baud = tty_termios_baud_rate(old_termios);
+ if (!baud)
+ baud = 9600;
+ divisor = (rp_baud_base[info->board] / baud) - 1;
+ }
+ if (divisor >= 8192 || divisor < 0) {
+ baud = 9600;
+ divisor = (rp_baud_base[info->board] / baud) - 1;
+ }
+ info->cps = baud / bits;
+ sSetBaud(cp, divisor);
+
+ /* FIXME: Should really back compute a baud rate from the divisor */
+ tty_encode_baud_rate(tty, baud, baud);
+
+ if (cflag & CRTSCTS) {
+ info->intmask |= DELTA_CTS;
+ sEnCTSFlowCtl(cp);
+ } else {
+ info->intmask &= ~DELTA_CTS;
+ sDisCTSFlowCtl(cp);
+ }
+ if (cflag & CLOCAL) {
+ info->intmask &= ~DELTA_CD;
+ } else {
+ spin_lock_irqsave(&info->slock, flags);
+ if (sGetChanStatus(cp) & CD_ACT)
+ info->cd_status = 1;
+ else
+ info->cd_status = 0;
+ info->intmask |= DELTA_CD;
+ spin_unlock_irqrestore(&info->slock, flags);
+ }
+
+ /*
+ * Handle software flow control in the board
+ */
+#ifdef ROCKET_SOFT_FLOW
+ if (I_IXON(tty)) {
+ sEnTxSoftFlowCtl(cp);
+ if (I_IXANY(tty)) {
+ sEnIXANY(cp);
+ } else {
+ sDisIXANY(cp);
+ }
+ sSetTxXONChar(cp, START_CHAR(tty));
+ sSetTxXOFFChar(cp, STOP_CHAR(tty));
+ } else {
+ sDisTxSoftFlowCtl(cp);
+ sDisIXANY(cp);
+ sClrTxXOFF(cp);
+ }
+#endif
+
+ /*
+ * Set up ignore/read mask words
+ */
+ info->read_status_mask = STMRCVROVRH | 0xFF;
+ if (I_INPCK(tty))
+ info->read_status_mask |= STMFRAMEH | STMPARITYH;
+ if (I_BRKINT(tty) || I_PARMRK(tty))
+ info->read_status_mask |= STMBREAKH;
+
+ /*
+ * Characters to ignore
+ */
+ info->ignore_status_mask = 0;
+ if (I_IGNPAR(tty))
+ info->ignore_status_mask |= STMFRAMEH | STMPARITYH;
+ if (I_IGNBRK(tty)) {
+ info->ignore_status_mask |= STMBREAKH;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too. (For real raw support).
+ */
+ if (I_IGNPAR(tty))
+ info->ignore_status_mask |= STMRCVROVRH;
+ }
+
+ rocketMode = info->flags & ROCKET_MODE_MASK;
+
+ if ((info->flags & ROCKET_RTS_TOGGLE)
+ || (rocketMode == ROCKET_MODE_RS485))
+ sEnRTSToggle(cp);
+ else
+ sDisRTSToggle(cp);
+
+ sSetRTS(&info->channel);
+
+ if (cp->CtlP->boardType == ROCKET_TYPE_PC104) {
+ switch (rocketMode) {
+ case ROCKET_MODE_RS485:
+ sSetInterfaceMode(cp, InterfaceModeRS485);
+ break;
+ case ROCKET_MODE_RS422:
+ sSetInterfaceMode(cp, InterfaceModeRS422);
+ break;
+ case ROCKET_MODE_RS232:
+ default:
+ if (info->flags & ROCKET_RTS_TOGGLE)
+ sSetInterfaceMode(cp, InterfaceModeRS232T);
+ else
+ sSetInterfaceMode(cp, InterfaceModeRS232);
+ break;
+ }
+ }
+}
+
+static int carrier_raised(struct tty_port *port)
+{
+ struct r_port *info = container_of(port, struct r_port, port);
+ return (sGetChanStatusLo(&info->channel) & CD_ACT) ? 1 : 0;
+}
+
+static void dtr_rts(struct tty_port *port, int on)
+{
+ struct r_port *info = container_of(port, struct r_port, port);
+ if (on) {
+ sSetDTR(&info->channel);
+ sSetRTS(&info->channel);
+ } else {
+ sClrDTR(&info->channel);
+ sClrRTS(&info->channel);
+ }
+}
+
+/*
+ * Exception handler that opens a serial port. Creates xmit_buf storage, fills in
+ * port's r_port struct. Initializes the port hardware.
+ */
+static int rp_open(struct tty_struct *tty, struct file *filp)
+{
+ struct r_port *info;
+ struct tty_port *port;
+ int line = 0, retval;
+ CHANNEL_t *cp;
+ unsigned long page;
+
+ line = tty->index;
+ if (line < 0 || line >= MAX_RP_PORTS || ((info = rp_table[line]) == NULL))
+ return -ENXIO;
+ port = &info->port;
+
+ page = __get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ if (port->flags & ASYNC_CLOSING) {
+ retval = wait_for_completion_interruptible(&info->close_wait);
+ free_page(page);
+ if (retval)
+ return retval;
+ return ((port->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS);
+ }
+
+ /*
+ * We must not sleep from here until the port is marked fully in use.
+ */
+ if (info->xmit_buf)
+ free_page(page);
+ else
+ info->xmit_buf = (unsigned char *) page;
+
+ tty->driver_data = info;
+ tty_port_tty_set(port, tty);
+
+ if (port->count++ == 0) {
+ atomic_inc(&rp_num_ports_open);
+
+#ifdef ROCKET_DEBUG_OPEN
+ printk(KERN_INFO "rocket mod++ = %d...\n",
+ atomic_read(&rp_num_ports_open));
+#endif
+ }
+#ifdef ROCKET_DEBUG_OPEN
+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
+#endif
+
+ /*
+ * Info->count is now 1; so it's safe to sleep now.
+ */
+ if (!test_bit(ASYNCB_INITIALIZED, &port->flags)) {
+ cp = &info->channel;
+ sSetRxTrigger(cp, TRIG_1);
+ if (sGetChanStatus(cp) & CD_ACT)
+ info->cd_status = 1;
+ else
+ info->cd_status = 0;
+ sDisRxStatusMode(cp);
+ sFlushRxFIFO(cp);
+ sFlushTxFIFO(cp);
+
+ sEnInterrupts(cp, (TXINT_EN | MCINT_EN | RXINT_EN | SRCINT_EN | CHANINT_EN));
+ sSetRxTrigger(cp, TRIG_1);
+
+ sGetChanStatus(cp);
+ sDisRxStatusMode(cp);
+ sClrTxXOFF(cp);
+
+ sDisCTSFlowCtl(cp);
+ sDisTxSoftFlowCtl(cp);
+
+ sEnRxFIFO(cp);
+ sEnTransmit(cp);
+
+ set_bit(ASYNCB_INITIALIZED, &info->port.flags);
+
+ /*
+ * Set up the tty->alt_speed kludge
+ */
+ if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_HI)
+ tty->alt_speed = 57600;
+ if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_VHI)
+ tty->alt_speed = 115200;
+ if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_SHI)
+ tty->alt_speed = 230400;
+ if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_WARP)
+ tty->alt_speed = 460800;
+
+ configure_r_port(tty, info, NULL);
+ if (tty->termios->c_cflag & CBAUD) {
+ sSetDTR(cp);
+ sSetRTS(cp);
+ }
+ }
+ /* Starts (or resets) the maint polling loop */
+ mod_timer(&rocket_timer, jiffies + POLL_PERIOD);
+
+ retval = tty_port_block_til_ready(port, tty, filp);
+ if (retval) {
+#ifdef ROCKET_DEBUG_OPEN
+ printk(KERN_INFO "rp_open returning after block_til_ready with %d\n", retval);
+#endif
+ return retval;
+ }
+ return 0;
+}
+
+/*
+ * Exception handler that closes a serial port. info->port.count is considered critical.
+ */
+static void rp_close(struct tty_struct *tty, struct file *filp)
+{
+ struct r_port *info = tty->driver_data;
+ struct tty_port *port = &info->port;
+ int timeout;
+ CHANNEL_t *cp;
+
+ if (rocket_paranoia_check(info, "rp_close"))
+ return;
+
+#ifdef ROCKET_DEBUG_OPEN
+ printk(KERN_INFO "rp_close ttyR%d, count = %d\n", info->line, info->port.count);
+#endif
+
+ if (tty_port_close_start(port, tty, filp) == 0)
+ return;
+
+ mutex_lock(&port->mutex);
+ cp = &info->channel;
+ /*
+ * Before we drop DTR, make sure the UART transmitter
+ * has completely drained; this is especially
+ * important if there is a transmit FIFO!
+ */
+ timeout = (sGetTxCnt(cp) + 1) * HZ / info->cps;
+ if (timeout == 0)
+ timeout = 1;
+ rp_wait_until_sent(tty, timeout);
+ clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
+
+ sDisTransmit(cp);
+ sDisInterrupts(cp, (TXINT_EN | MCINT_EN | RXINT_EN | SRCINT_EN | CHANINT_EN));
+ sDisCTSFlowCtl(cp);
+ sDisTxSoftFlowCtl(cp);
+ sClrTxXOFF(cp);
+ sFlushRxFIFO(cp);
+ sFlushTxFIFO(cp);
+ sClrRTS(cp);
+ if (C_HUPCL(tty))
+ sClrDTR(cp);
+
+ rp_flush_buffer(tty);
+
+ tty_ldisc_flush(tty);
+
+ clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
+
+ /* We can't yet use tty_port_close_end as the buffer handling in this
+ driver is a bit different to the usual */
+
+ if (port->blocked_open) {
+ if (port->close_delay) {
+ msleep_interruptible(jiffies_to_msecs(port->close_delay));
+ }
+ wake_up_interruptible(&port->open_wait);
+ } else {
+ if (info->xmit_buf) {
+ free_page((unsigned long) info->xmit_buf);
+ info->xmit_buf = NULL;
+ }
+ }
+ spin_lock_irq(&port->lock);
+ info->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_CLOSING | ASYNC_NORMAL_ACTIVE);
+ tty->closing = 0;
+ spin_unlock_irq(&port->lock);
+ mutex_unlock(&port->mutex);
+ tty_port_tty_set(port, NULL);
+
+ wake_up_interruptible(&port->close_wait);
+ complete_all(&info->close_wait);
+ atomic_dec(&rp_num_ports_open);
+
+#ifdef ROCKET_DEBUG_OPEN
+ printk(KERN_INFO "rocket mod-- = %d...\n",
+ atomic_read(&rp_num_ports_open));
+ printk(KERN_INFO "rp_close ttyR%d complete shutdown\n", info->line);
+#endif
+
+}
+
+static void rp_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios)
+{
+ struct r_port *info = tty->driver_data;
+ CHANNEL_t *cp;
+ unsigned cflag;
+
+ if (rocket_paranoia_check(info, "rp_set_termios"))
+ return;
+
+ cflag = tty->termios->c_cflag;
+
+ /*
+ * This driver doesn't support CS5 or CS6
+ */
+ if (((cflag & CSIZE) == CS5) || ((cflag & CSIZE) == CS6))
+ tty->termios->c_cflag =
+ ((cflag & ~CSIZE) | (old_termios->c_cflag & CSIZE));
+ /* Or CMSPAR */
+ tty->termios->c_cflag &= ~CMSPAR;
+
+ configure_r_port(tty, info, old_termios);
+
+ cp = &info->channel;
+
+ /* Handle transition to B0 status */
+ if ((old_termios->c_cflag & CBAUD) && !(tty->termios->c_cflag & CBAUD)) {
+ sClrDTR(cp);
+ sClrRTS(cp);
+ }
+
+ /* Handle transition away from B0 status */
+ if (!(old_termios->c_cflag & CBAUD) && (tty->termios->c_cflag & CBAUD)) {
+ if (!tty->hw_stopped || !(tty->termios->c_cflag & CRTSCTS))
+ sSetRTS(cp);
+ sSetDTR(cp);
+ }
+
+ if ((old_termios->c_cflag & CRTSCTS) && !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ rp_start(tty);
+ }
+}
+
+static int rp_break(struct tty_struct *tty, int break_state)
+{
+ struct r_port *info = tty->driver_data;
+ unsigned long flags;
+
+ if (rocket_paranoia_check(info, "rp_break"))
+ return -EINVAL;
+
+ spin_lock_irqsave(&info->slock, flags);
+ if (break_state == -1)
+ sSendBreak(&info->channel);
+ else
+ sClrBreak(&info->channel);
+ spin_unlock_irqrestore(&info->slock, flags);
+ return 0;
+}
+
+/*
+ * sGetChanRI used to be a macro in rocket_int.h. When the functionality for
+ * the UPCI boards was added, it was decided to make this a function because
+ * the macro was getting too complicated. All cases except the first one
+ * (UPCIRingInd) are taken directly from the original macro.
+ */
+static int sGetChanRI(CHANNEL_T * ChP)
+{
+ CONTROLLER_t *CtlP = ChP->CtlP;
+ int ChanNum = ChP->ChanNum;
+ int RingInd = 0;
+
+ if (CtlP->UPCIRingInd)
+ RingInd = !(sInB(CtlP->UPCIRingInd) & sBitMapSetTbl[ChanNum]);
+ else if (CtlP->AltChanRingIndicator)
+ RingInd = sInB((ByteIO_t) (ChP->ChanStat + 8)) & DSR_ACT;
+ else if (CtlP->boardType == ROCKET_TYPE_PC104)
+ RingInd = !(sInB(CtlP->AiopIO[3]) & sBitMapSetTbl[ChanNum]);
+
+ return RingInd;
+}
+
+/********************************************************************************************/
+/* Here are the routines used by rp_ioctl. These are all called from exception handlers. */
+
+/*
+ * Returns the state of the serial modem control lines. These next 2 functions
+ * are the way kernel versions > 2.5 handle modem control lines rather than IOCTLs.
+ */
+static int rp_tiocmget(struct tty_struct *tty)
+{
+ struct r_port *info = tty->driver_data;
+ unsigned int control, result, ChanStatus;
+
+ ChanStatus = sGetChanStatusLo(&info->channel);
+ control = info->channel.TxControl[3];
+ result = ((control & SET_RTS) ? TIOCM_RTS : 0) |
+ ((control & SET_DTR) ? TIOCM_DTR : 0) |
+ ((ChanStatus & CD_ACT) ? TIOCM_CAR : 0) |
+ (sGetChanRI(&info->channel) ? TIOCM_RNG : 0) |
+ ((ChanStatus & DSR_ACT) ? TIOCM_DSR : 0) |
+ ((ChanStatus & CTS_ACT) ? TIOCM_CTS : 0);
+
+ return result;
+}
+
+/*
+ * Sets the modem control lines
+ */
+static int rp_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct r_port *info = tty->driver_data;
+
+ if (set & TIOCM_RTS)
+ info->channel.TxControl[3] |= SET_RTS;
+ if (set & TIOCM_DTR)
+ info->channel.TxControl[3] |= SET_DTR;
+ if (clear & TIOCM_RTS)
+ info->channel.TxControl[3] &= ~SET_RTS;
+ if (clear & TIOCM_DTR)
+ info->channel.TxControl[3] &= ~SET_DTR;
+
+ out32(info->channel.IndexAddr, info->channel.TxControl);
+ return 0;
+}
+
+static int get_config(struct r_port *info, struct rocket_config __user *retinfo)
+{
+ struct rocket_config tmp;
+
+ if (!retinfo)
+ return -EFAULT;
+ memset(&tmp, 0, sizeof (tmp));
+ mutex_lock(&info->port.mutex);
+ tmp.line = info->line;
+ tmp.flags = info->flags;
+ tmp.close_delay = info->port.close_delay;
+ tmp.closing_wait = info->port.closing_wait;
+ tmp.port = rcktpt_io_addr[(info->line >> 5) & 3];
+ mutex_unlock(&info->port.mutex);
+
+ if (copy_to_user(retinfo, &tmp, sizeof (*retinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int set_config(struct tty_struct *tty, struct r_port *info,
+ struct rocket_config __user *new_info)
+{
+ struct rocket_config new_serial;
+
+ if (copy_from_user(&new_serial, new_info, sizeof (new_serial)))
+ return -EFAULT;
+
+ mutex_lock(&info->port.mutex);
+ if (!capable(CAP_SYS_ADMIN))
+ {
+ if ((new_serial.flags & ~ROCKET_USR_MASK) != (info->flags & ~ROCKET_USR_MASK)) {
+ mutex_unlock(&info->port.mutex);
+ return -EPERM;
+ }
+ info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK));
+ configure_r_port(tty, info, NULL);
+ mutex_unlock(&info->port.mutex);
+ return 0;
+ }
+
+ info->flags = ((info->flags & ~ROCKET_FLAGS) | (new_serial.flags & ROCKET_FLAGS));
+ info->port.close_delay = new_serial.close_delay;
+ info->port.closing_wait = new_serial.closing_wait;
+
+ if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_HI)
+ tty->alt_speed = 57600;
+ if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_VHI)
+ tty->alt_speed = 115200;
+ if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_SHI)
+ tty->alt_speed = 230400;
+ if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_WARP)
+ tty->alt_speed = 460800;
+ mutex_unlock(&info->port.mutex);
+
+ configure_r_port(tty, info, NULL);
+ return 0;
+}
+
+/*
+ * This function fills in a rocket_ports struct with information
+ * about what boards/ports are in the system. This info is passed
+ * to user space. See setrocket.c where the info is used to create
+ * the /dev/ttyRx ports.
+ */
+static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
+{
+ struct rocket_ports tmp;
+ int board;
+
+ if (!retports)
+ return -EFAULT;
+ memset(&tmp, 0, sizeof (tmp));
+ tmp.tty_major = rocket_driver->major;
+
+ for (board = 0; board < 4; board++) {
+ tmp.rocketModel[board].model = rocketModel[board].model;
+ strcpy(tmp.rocketModel[board].modelString, rocketModel[board].modelString);
+ tmp.rocketModel[board].numPorts = rocketModel[board].numPorts;
+ tmp.rocketModel[board].loadrm2 = rocketModel[board].loadrm2;
+ tmp.rocketModel[board].startingPortNumber = rocketModel[board].startingPortNumber;
+ }
+ if (copy_to_user(retports, &tmp, sizeof (*retports)))
+ return -EFAULT;
+ return 0;
+}
+
+static int reset_rm2(struct r_port *info, void __user *arg)
+{
+ int reset;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&reset, arg, sizeof (int)))
+ return -EFAULT;
+ if (reset)
+ reset = 1;
+
+ if (rcktpt_type[info->board] != ROCKET_TYPE_MODEMII &&
+ rcktpt_type[info->board] != ROCKET_TYPE_MODEMIII)
+ return -EINVAL;
+
+ if (info->ctlp->BusType == isISA)
+ sModemReset(info->ctlp, info->chan, reset);
+ else
+ sPCIModemReset(info->ctlp, info->chan, reset);
+
+ return 0;
+}
+
+static int get_version(struct r_port *info, struct rocket_version __user *retvers)
+{
+ if (copy_to_user(retvers, &driver_version, sizeof (*retvers)))
+ return -EFAULT;
+ return 0;
+}
+
+/* IOCTL call handler into the driver */
+static int rp_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct r_port *info = tty->driver_data;
+ void __user *argp = (void __user *)arg;
+ int ret = 0;
+
+ if (cmd != RCKP_GET_PORTS && rocket_paranoia_check(info, "rp_ioctl"))
+ return -ENXIO;
+
+ switch (cmd) {
+ case RCKP_GET_STRUCT:
+ if (copy_to_user(argp, info, sizeof (struct r_port)))
+ ret = -EFAULT;
+ break;
+ case RCKP_GET_CONFIG:
+ ret = get_config(info, argp);
+ break;
+ case RCKP_SET_CONFIG:
+ ret = set_config(tty, info, argp);
+ break;
+ case RCKP_GET_PORTS:
+ ret = get_ports(info, argp);
+ break;
+ case RCKP_RESET_RM2:
+ ret = reset_rm2(info, argp);
+ break;
+ case RCKP_GET_VERSION:
+ ret = get_version(info, argp);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+ return ret;
+}
+
+static void rp_send_xchar(struct tty_struct *tty, char ch)
+{
+ struct r_port *info = tty->driver_data;
+ CHANNEL_t *cp;
+
+ if (rocket_paranoia_check(info, "rp_send_xchar"))
+ return;
+
+ cp = &info->channel;
+ if (sGetTxCnt(cp))
+ sWriteTxPrioByte(cp, ch);
+ else
+ sWriteTxByte(sGetTxRxDataIO(cp), ch);
+}
+
+static void rp_throttle(struct tty_struct *tty)
+{
+ struct r_port *info = tty->driver_data;
+
+#ifdef ROCKET_DEBUG_THROTTLE
+ printk(KERN_INFO "throttle %s: %d....\n", tty->name,
+ tty->ldisc.chars_in_buffer(tty));
+#endif
+
+ if (rocket_paranoia_check(info, "rp_throttle"))
+ return;
+
+ if (I_IXOFF(tty))
+ rp_send_xchar(tty, STOP_CHAR(tty));
+
+ sClrRTS(&info->channel);
+}
+
+static void rp_unthrottle(struct tty_struct *tty)
+{
+ struct r_port *info = tty->driver_data;
+#ifdef ROCKET_DEBUG_THROTTLE
+ printk(KERN_INFO "unthrottle %s: %d....\n", tty->name,
+ tty->ldisc.chars_in_buffer(tty));
+#endif
+
+ if (rocket_paranoia_check(info, "rp_throttle"))
+ return;
+
+ if (I_IXOFF(tty))
+ rp_send_xchar(tty, START_CHAR(tty));
+
+ sSetRTS(&info->channel);
+}
+
+/*
+ * ------------------------------------------------------------
+ * rp_stop() and rp_start()
+ *
+ * This routines are called before setting or resetting tty->stopped.
+ * They enable or disable transmitter interrupts, as necessary.
+ * ------------------------------------------------------------
+ */
+static void rp_stop(struct tty_struct *tty)
+{
+ struct r_port *info = tty->driver_data;
+
+#ifdef ROCKET_DEBUG_FLOW
+ printk(KERN_INFO "stop %s: %d %d....\n", tty->name,
+ info->xmit_cnt, info->xmit_fifo_room);
+#endif
+
+ if (rocket_paranoia_check(info, "rp_stop"))
+ return;
+
+ if (sGetTxCnt(&info->channel))
+ sDisTransmit(&info->channel);
+}
+
+static void rp_start(struct tty_struct *tty)
+{
+ struct r_port *info = tty->driver_data;
+
+#ifdef ROCKET_DEBUG_FLOW
+ printk(KERN_INFO "start %s: %d %d....\n", tty->name,
+ info->xmit_cnt, info->xmit_fifo_room);
+#endif
+
+ if (rocket_paranoia_check(info, "rp_stop"))
+ return;
+
+ sEnTransmit(&info->channel);
+ set_bit((info->aiop * 8) + info->chan,
+ (void *) &xmit_flags[info->board]);
+}
+
+/*
+ * rp_wait_until_sent() --- wait until the transmitter is empty
+ */
+static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct r_port *info = tty->driver_data;
+ CHANNEL_t *cp;
+ unsigned long orig_jiffies;
+ int check_time, exit_time;
+ int txcnt;
+
+ if (rocket_paranoia_check(info, "rp_wait_until_sent"))
+ return;
+
+ cp = &info->channel;
+
+ orig_jiffies = jiffies;
+#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
+ printk(KERN_INFO "In RP_wait_until_sent(%d) (jiff=%lu)...\n", timeout,
+ jiffies);
+ printk(KERN_INFO "cps=%d...\n", info->cps);
+#endif
+ while (1) {
+ txcnt = sGetTxCnt(cp);
+ if (!txcnt) {
+ if (sGetChanStatusLo(cp) & TXSHRMT)
+ break;
+ check_time = (HZ / info->cps) / 5;
+ } else {
+ check_time = HZ * txcnt / info->cps;
+ }
+ if (timeout) {
+ exit_time = orig_jiffies + timeout - jiffies;
+ if (exit_time <= 0)
+ break;
+ if (exit_time < check_time)
+ check_time = exit_time;
+ }
+ if (check_time == 0)
+ check_time = 1;
+#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
+ printk(KERN_INFO "txcnt = %d (jiff=%lu,check=%d)...\n", txcnt,
+ jiffies, check_time);
+#endif
+ msleep_interruptible(jiffies_to_msecs(check_time));
+ if (signal_pending(current))
+ break;
+ }
+ __set_current_state(TASK_RUNNING);
+#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
+ printk(KERN_INFO "txcnt = %d (jiff=%lu)...done\n", txcnt, jiffies);
+#endif
+}
+
+/*
+ * rp_hangup() --- called by tty_hangup() when a hangup is signaled.
+ */
+static void rp_hangup(struct tty_struct *tty)
+{
+ CHANNEL_t *cp;
+ struct r_port *info = tty->driver_data;
+ unsigned long flags;
+
+ if (rocket_paranoia_check(info, "rp_hangup"))
+ return;
+
+#if (defined(ROCKET_DEBUG_OPEN) || defined(ROCKET_DEBUG_HANGUP))
+ printk(KERN_INFO "rp_hangup of ttyR%d...\n", info->line);
+#endif
+ rp_flush_buffer(tty);
+ spin_lock_irqsave(&info->port.lock, flags);
+ if (info->port.flags & ASYNC_CLOSING) {
+ spin_unlock_irqrestore(&info->port.lock, flags);
+ return;
+ }
+ if (info->port.count)
+ atomic_dec(&rp_num_ports_open);
+ clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
+ spin_unlock_irqrestore(&info->port.lock, flags);
+
+ tty_port_hangup(&info->port);
+
+ cp = &info->channel;
+ sDisRxFIFO(cp);
+ sDisTransmit(cp);
+ sDisInterrupts(cp, (TXINT_EN | MCINT_EN | RXINT_EN | SRCINT_EN | CHANINT_EN));
+ sDisCTSFlowCtl(cp);
+ sDisTxSoftFlowCtl(cp);
+ sClrTxXOFF(cp);
+ clear_bit(ASYNCB_INITIALIZED, &info->port.flags);
+
+ wake_up_interruptible(&info->port.open_wait);
+}
+
+/*
+ * Exception handler - write char routine. The RocketPort driver uses a
+ * double-buffering strategy, with the twist that if the in-memory CPU
+ * buffer is empty, and there's space in the transmit FIFO, the
+ * writing routines will write directly to transmit FIFO.
+ * Write buffer and counters protected by spinlocks
+ */
+static int rp_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct r_port *info = tty->driver_data;
+ CHANNEL_t *cp;
+ unsigned long flags;
+
+ if (rocket_paranoia_check(info, "rp_put_char"))
+ return 0;
+
+ /*
+ * Grab the port write mutex, locking out other processes that try to
+ * write to this port
+ */
+ mutex_lock(&info->write_mtx);
+
+#ifdef ROCKET_DEBUG_WRITE
+ printk(KERN_INFO "rp_put_char %c...\n", ch);
+#endif
+
+ spin_lock_irqsave(&info->slock, flags);
+ cp = &info->channel;
+
+ if (!tty->stopped && !tty->hw_stopped && info->xmit_fifo_room == 0)
+ info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp);
+
+ if (tty->stopped || tty->hw_stopped || info->xmit_fifo_room == 0 || info->xmit_cnt != 0) {
+ info->xmit_buf[info->xmit_head++] = ch;
+ info->xmit_head &= XMIT_BUF_SIZE - 1;
+ info->xmit_cnt++;
+ set_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
+ } else {
+ sOutB(sGetTxRxDataIO(cp), ch);
+ info->xmit_fifo_room--;
+ }
+ spin_unlock_irqrestore(&info->slock, flags);
+ mutex_unlock(&info->write_mtx);
+ return 1;
+}
+
+/*
+ * Exception handler - write routine, called when user app writes to the device.
+ * A per port write mutex is used to protect from another process writing to
+ * this port at the same time. This other process could be running on the other CPU
+ * or get control of the CPU if the copy_from_user() blocks due to a page fault (swapped out).
+ * Spinlocks protect the info xmit members.
+ */
+static int rp_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ struct r_port *info = tty->driver_data;
+ CHANNEL_t *cp;
+ const unsigned char *b;
+ int c, retval = 0;
+ unsigned long flags;
+
+ if (count <= 0 || rocket_paranoia_check(info, "rp_write"))
+ return 0;
+
+ if (mutex_lock_interruptible(&info->write_mtx))
+ return -ERESTARTSYS;
+
+#ifdef ROCKET_DEBUG_WRITE
+ printk(KERN_INFO "rp_write %d chars...\n", count);
+#endif
+ cp = &info->channel;
+
+ if (!tty->stopped && !tty->hw_stopped && info->xmit_fifo_room < count)
+ info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp);
+
+ /*
+ * If the write queue for the port is empty, and there is FIFO space, stuff bytes
+ * into FIFO. Use the write queue for temp storage.
+ */
+ if (!tty->stopped && !tty->hw_stopped && info->xmit_cnt == 0 && info->xmit_fifo_room > 0) {
+ c = min(count, info->xmit_fifo_room);
+ b = buf;
+
+ /* Push data into FIFO, 2 bytes at a time */
+ sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) b, c / 2);
+
+ /* If there is a byte remaining, write it */
+ if (c & 1)
+ sOutB(sGetTxRxDataIO(cp), b[c - 1]);
+
+ retval += c;
+ buf += c;
+ count -= c;
+
+ spin_lock_irqsave(&info->slock, flags);
+ info->xmit_fifo_room -= c;
+ spin_unlock_irqrestore(&info->slock, flags);
+ }
+
+ /* If count is zero, we wrote it all and are done */
+ if (!count)
+ goto end;
+
+ /* Write remaining data into the port's xmit_buf */
+ while (1) {
+ /* Hung up ? */
+ if (!test_bit(ASYNCB_NORMAL_ACTIVE, &info->port.flags))
+ goto end;
+ c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1);
+ c = min(c, XMIT_BUF_SIZE - info->xmit_head);
+ if (c <= 0)
+ break;
+
+ b = buf;
+ memcpy(info->xmit_buf + info->xmit_head, b, c);
+
+ spin_lock_irqsave(&info->slock, flags);
+ info->xmit_head =
+ (info->xmit_head + c) & (XMIT_BUF_SIZE - 1);
+ info->xmit_cnt += c;
+ spin_unlock_irqrestore(&info->slock, flags);
+
+ buf += c;
+ count -= c;
+ retval += c;
+ }
+
+ if ((retval > 0) && !tty->stopped && !tty->hw_stopped)
+ set_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
+
+end:
+ if (info->xmit_cnt < WAKEUP_CHARS) {
+ tty_wakeup(tty);
+#ifdef ROCKETPORT_HAVE_POLL_WAIT
+ wake_up_interruptible(&tty->poll_wait);
+#endif
+ }
+ mutex_unlock(&info->write_mtx);
+ return retval;
+}
+
+/*
+ * Return the number of characters that can be sent. We estimate
+ * only using the in-memory transmit buffer only, and ignore the
+ * potential space in the transmit FIFO.
+ */
+static int rp_write_room(struct tty_struct *tty)
+{
+ struct r_port *info = tty->driver_data;
+ int ret;
+
+ if (rocket_paranoia_check(info, "rp_write_room"))
+ return 0;
+
+ ret = XMIT_BUF_SIZE - info->xmit_cnt - 1;
+ if (ret < 0)
+ ret = 0;
+#ifdef ROCKET_DEBUG_WRITE
+ printk(KERN_INFO "rp_write_room returns %d...\n", ret);
+#endif
+ return ret;
+}
+
+/*
+ * Return the number of characters in the buffer. Again, this only
+ * counts those characters in the in-memory transmit buffer.
+ */
+static int rp_chars_in_buffer(struct tty_struct *tty)
+{
+ struct r_port *info = tty->driver_data;
+
+ if (rocket_paranoia_check(info, "rp_chars_in_buffer"))
+ return 0;
+
+#ifdef ROCKET_DEBUG_WRITE
+ printk(KERN_INFO "rp_chars_in_buffer returns %d...\n", info->xmit_cnt);
+#endif
+ return info->xmit_cnt;
+}
+
+/*
+ * Flushes the TX fifo for a port, deletes data in the xmit_buf stored in the
+ * r_port struct for the port. Note that spinlock are used to protect info members,
+ * do not call this function if the spinlock is already held.
+ */
+static void rp_flush_buffer(struct tty_struct *tty)
+{
+ struct r_port *info = tty->driver_data;
+ CHANNEL_t *cp;
+ unsigned long flags;
+
+ if (rocket_paranoia_check(info, "rp_flush_buffer"))
+ return;
+
+ spin_lock_irqsave(&info->slock, flags);
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+ spin_unlock_irqrestore(&info->slock, flags);
+
+#ifdef ROCKETPORT_HAVE_POLL_WAIT
+ wake_up_interruptible(&tty->poll_wait);
+#endif
+ tty_wakeup(tty);
+
+ cp = &info->channel;
+ sFlushTxFIFO(cp);
+}
+
+#ifdef CONFIG_PCI
+
+static struct pci_device_id __devinitdata __used rocket_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_ANY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, rocket_pci_ids);
+
+/*
+ * Called when a PCI card is found. Retrieves and stores model information,
+ * init's aiopic and serial port hardware.
+ * Inputs: i is the board number (0-n)
+ */
+static __init int register_PCI(int i, struct pci_dev *dev)
+{
+ int num_aiops, aiop, max_num_aiops, num_chan, chan;
+ unsigned int aiopio[MAX_AIOPS_PER_BOARD];
+ CONTROLLER_t *ctlp;
+
+ int fast_clock = 0;
+ int altChanRingIndicator = 0;
+ int ports_per_aiop = 8;
+ WordIO_t ConfigIO = 0;
+ ByteIO_t UPCIRingInd = 0;
+
+ if (!dev || pci_enable_device(dev))
+ return 0;
+
+ rcktpt_io_addr[i] = pci_resource_start(dev, 0);
+
+ rcktpt_type[i] = ROCKET_TYPE_NORMAL;
+ rocketModel[i].loadrm2 = 0;
+ rocketModel[i].startingPortNumber = nextLineNumber;
+
+ /* Depending on the model, set up some config variables */
+ switch (dev->device) {
+ case PCI_DEVICE_ID_RP4QUAD:
+ max_num_aiops = 1;
+ ports_per_aiop = 4;
+ rocketModel[i].model = MODEL_RP4QUAD;
+ strcpy(rocketModel[i].modelString, "RocketPort 4 port w/quad cable");
+ rocketModel[i].numPorts = 4;
+ break;
+ case PCI_DEVICE_ID_RP8OCTA:
+ max_num_aiops = 1;
+ rocketModel[i].model = MODEL_RP8OCTA;
+ strcpy(rocketModel[i].modelString, "RocketPort 8 port w/octa cable");
+ rocketModel[i].numPorts = 8;
+ break;
+ case PCI_DEVICE_ID_URP8OCTA:
+ max_num_aiops = 1;
+ rocketModel[i].model = MODEL_UPCI_RP8OCTA;
+ strcpy(rocketModel[i].modelString, "RocketPort UPCI 8 port w/octa cable");
+ rocketModel[i].numPorts = 8;
+ break;
+ case PCI_DEVICE_ID_RP8INTF:
+ max_num_aiops = 1;
+ rocketModel[i].model = MODEL_RP8INTF;
+ strcpy(rocketModel[i].modelString, "RocketPort 8 port w/external I/F");
+ rocketModel[i].numPorts = 8;
+ break;
+ case PCI_DEVICE_ID_URP8INTF:
+ max_num_aiops = 1;
+ rocketModel[i].model = MODEL_UPCI_RP8INTF;
+ strcpy(rocketModel[i].modelString, "RocketPort UPCI 8 port w/external I/F");
+ rocketModel[i].numPorts = 8;
+ break;
+ case PCI_DEVICE_ID_RP8J:
+ max_num_aiops = 1;
+ rocketModel[i].model = MODEL_RP8J;
+ strcpy(rocketModel[i].modelString, "RocketPort 8 port w/RJ11 connectors");
+ rocketModel[i].numPorts = 8;
+ break;
+ case PCI_DEVICE_ID_RP4J:
+ max_num_aiops = 1;
+ ports_per_aiop = 4;
+ rocketModel[i].model = MODEL_RP4J;
+ strcpy(rocketModel[i].modelString, "RocketPort 4 port w/RJ45 connectors");
+ rocketModel[i].numPorts = 4;
+ break;
+ case PCI_DEVICE_ID_RP8SNI:
+ max_num_aiops = 1;
+ rocketModel[i].model = MODEL_RP8SNI;
+ strcpy(rocketModel[i].modelString, "RocketPort 8 port w/ custom DB78");
+ rocketModel[i].numPorts = 8;
+ break;
+ case PCI_DEVICE_ID_RP16SNI:
+ max_num_aiops = 2;
+ rocketModel[i].model = MODEL_RP16SNI;
+ strcpy(rocketModel[i].modelString, "RocketPort 16 port w/ custom DB78");
+ rocketModel[i].numPorts = 16;
+ break;
+ case PCI_DEVICE_ID_RP16INTF:
+ max_num_aiops = 2;
+ rocketModel[i].model = MODEL_RP16INTF;
+ strcpy(rocketModel[i].modelString, "RocketPort 16 port w/external I/F");
+ rocketModel[i].numPorts = 16;
+ break;
+ case PCI_DEVICE_ID_URP16INTF:
+ max_num_aiops = 2;
+ rocketModel[i].model = MODEL_UPCI_RP16INTF;
+ strcpy(rocketModel[i].modelString, "RocketPort UPCI 16 port w/external I/F");
+ rocketModel[i].numPorts = 16;
+ break;
+ case PCI_DEVICE_ID_CRP16INTF:
+ max_num_aiops = 2;
+ rocketModel[i].model = MODEL_CPCI_RP16INTF;
+ strcpy(rocketModel[i].modelString, "RocketPort Compact PCI 16 port w/external I/F");
+ rocketModel[i].numPorts = 16;
+ break;
+ case PCI_DEVICE_ID_RP32INTF:
+ max_num_aiops = 4;
+ rocketModel[i].model = MODEL_RP32INTF;
+ strcpy(rocketModel[i].modelString, "RocketPort 32 port w/external I/F");
+ rocketModel[i].numPorts = 32;
+ break;
+ case PCI_DEVICE_ID_URP32INTF:
+ max_num_aiops = 4;
+ rocketModel[i].model = MODEL_UPCI_RP32INTF;
+ strcpy(rocketModel[i].modelString, "RocketPort UPCI 32 port w/external I/F");
+ rocketModel[i].numPorts = 32;
+ break;
+ case PCI_DEVICE_ID_RPP4:
+ max_num_aiops = 1;
+ ports_per_aiop = 4;
+ altChanRingIndicator++;
+ fast_clock++;
+ rocketModel[i].model = MODEL_RPP4;
+ strcpy(rocketModel[i].modelString, "RocketPort Plus 4 port");
+ rocketModel[i].numPorts = 4;
+ break;
+ case PCI_DEVICE_ID_RPP8:
+ max_num_aiops = 2;
+ ports_per_aiop = 4;
+ altChanRingIndicator++;
+ fast_clock++;
+ rocketModel[i].model = MODEL_RPP8;
+ strcpy(rocketModel[i].modelString, "RocketPort Plus 8 port");
+ rocketModel[i].numPorts = 8;
+ break;
+ case PCI_DEVICE_ID_RP2_232:
+ max_num_aiops = 1;
+ ports_per_aiop = 2;
+ altChanRingIndicator++;
+ fast_clock++;
+ rocketModel[i].model = MODEL_RP2_232;
+ strcpy(rocketModel[i].modelString, "RocketPort Plus 2 port RS232");
+ rocketModel[i].numPorts = 2;
+ break;
+ case PCI_DEVICE_ID_RP2_422:
+ max_num_aiops = 1;
+ ports_per_aiop = 2;
+ altChanRingIndicator++;
+ fast_clock++;
+ rocketModel[i].model = MODEL_RP2_422;
+ strcpy(rocketModel[i].modelString, "RocketPort Plus 2 port RS422");
+ rocketModel[i].numPorts = 2;
+ break;
+ case PCI_DEVICE_ID_RP6M:
+
+ max_num_aiops = 1;
+ ports_per_aiop = 6;
+
+ /* If revision is 1, the rocketmodem flash must be loaded.
+ * If it is 2 it is a "socketed" version. */
+ if (dev->revision == 1) {
+ rcktpt_type[i] = ROCKET_TYPE_MODEMII;
+ rocketModel[i].loadrm2 = 1;
+ } else {
+ rcktpt_type[i] = ROCKET_TYPE_MODEM;
+ }
+
+ rocketModel[i].model = MODEL_RP6M;
+ strcpy(rocketModel[i].modelString, "RocketModem 6 port");
+ rocketModel[i].numPorts = 6;
+ break;
+ case PCI_DEVICE_ID_RP4M:
+ max_num_aiops = 1;
+ ports_per_aiop = 4;
+ if (dev->revision == 1) {
+ rcktpt_type[i] = ROCKET_TYPE_MODEMII;
+ rocketModel[i].loadrm2 = 1;
+ } else {
+ rcktpt_type[i] = ROCKET_TYPE_MODEM;
+ }
+
+ rocketModel[i].model = MODEL_RP4M;
+ strcpy(rocketModel[i].modelString, "RocketModem 4 port");
+ rocketModel[i].numPorts = 4;
+ break;
+ default:
+ max_num_aiops = 0;
+ break;
+ }
+
+ /*
+ * Check for UPCI boards.
+ */
+
+ switch (dev->device) {
+ case PCI_DEVICE_ID_URP32INTF:
+ case PCI_DEVICE_ID_URP8INTF:
+ case PCI_DEVICE_ID_URP16INTF:
+ case PCI_DEVICE_ID_CRP16INTF:
+ case PCI_DEVICE_ID_URP8OCTA:
+ rcktpt_io_addr[i] = pci_resource_start(dev, 2);
+ ConfigIO = pci_resource_start(dev, 1);
+ if (dev->device == PCI_DEVICE_ID_URP8OCTA) {
+ UPCIRingInd = rcktpt_io_addr[i] + _PCI_9030_RING_IND;
+
+ /*
+ * Check for octa or quad cable.
+ */
+ if (!
+ (sInW(ConfigIO + _PCI_9030_GPIO_CTRL) &
+ PCI_GPIO_CTRL_8PORT)) {
+ ports_per_aiop = 4;
+ rocketModel[i].numPorts = 4;
+ }
+ }
+ break;
+ case PCI_DEVICE_ID_UPCI_RM3_8PORT:
+ max_num_aiops = 1;
+ rocketModel[i].model = MODEL_UPCI_RM3_8PORT;
+ strcpy(rocketModel[i].modelString, "RocketModem III 8 port");
+ rocketModel[i].numPorts = 8;
+ rcktpt_io_addr[i] = pci_resource_start(dev, 2);
+ UPCIRingInd = rcktpt_io_addr[i] + _PCI_9030_RING_IND;
+ ConfigIO = pci_resource_start(dev, 1);
+ rcktpt_type[i] = ROCKET_TYPE_MODEMIII;
+ break;
+ case PCI_DEVICE_ID_UPCI_RM3_4PORT:
+ max_num_aiops = 1;
+ rocketModel[i].model = MODEL_UPCI_RM3_4PORT;
+ strcpy(rocketModel[i].modelString, "RocketModem III 4 port");
+ rocketModel[i].numPorts = 4;
+ rcktpt_io_addr[i] = pci_resource_start(dev, 2);
+ UPCIRingInd = rcktpt_io_addr[i] + _PCI_9030_RING_IND;
+ ConfigIO = pci_resource_start(dev, 1);
+ rcktpt_type[i] = ROCKET_TYPE_MODEMIII;
+ break;
+ default:
+ break;
+ }
+
+ if (fast_clock) {
+ sClockPrescale = 0x12; /* mod 2 (divide by 3) */
+ rp_baud_base[i] = 921600;
+ } else {
+ /*
+ * If support_low_speed is set, use the slow clock
+ * prescale, which supports 50 bps
+ */
+ if (support_low_speed) {
+ /* mod 9 (divide by 10) prescale */
+ sClockPrescale = 0x19;
+ rp_baud_base[i] = 230400;
+ } else {
+ /* mod 4 (divide by 5) prescale */
+ sClockPrescale = 0x14;
+ rp_baud_base[i] = 460800;
+ }
+ }
+
+ for (aiop = 0; aiop < max_num_aiops; aiop++)
+ aiopio[aiop] = rcktpt_io_addr[i] + (aiop * 0x40);
+ ctlp = sCtlNumToCtlPtr(i);
+ num_aiops = sPCIInitController(ctlp, i, aiopio, max_num_aiops, ConfigIO, 0, FREQ_DIS, 0, altChanRingIndicator, UPCIRingInd);
+ for (aiop = 0; aiop < max_num_aiops; aiop++)
+ ctlp->AiopNumChan[aiop] = ports_per_aiop;
+
+ dev_info(&dev->dev, "comtrol PCI controller #%d found at "
+ "address %04lx, %d AIOP(s) (%s), creating ttyR%d - %ld\n",
+ i, rcktpt_io_addr[i], num_aiops, rocketModel[i].modelString,
+ rocketModel[i].startingPortNumber,
+ rocketModel[i].startingPortNumber + rocketModel[i].numPorts-1);
+
+ if (num_aiops <= 0) {
+ rcktpt_io_addr[i] = 0;
+ return (0);
+ }
+ is_PCI[i] = 1;
+
+ /* Reset the AIOPIC, init the serial ports */
+ for (aiop = 0; aiop < num_aiops; aiop++) {
+ sResetAiopByNum(ctlp, aiop);
+ num_chan = ports_per_aiop;
+ for (chan = 0; chan < num_chan; chan++)
+ init_r_port(i, aiop, chan, dev);
+ }
+
+ /* Rocket modems must be reset */
+ if ((rcktpt_type[i] == ROCKET_TYPE_MODEM) ||
+ (rcktpt_type[i] == ROCKET_TYPE_MODEMII) ||
+ (rcktpt_type[i] == ROCKET_TYPE_MODEMIII)) {
+ num_chan = ports_per_aiop;
+ for (chan = 0; chan < num_chan; chan++)
+ sPCIModemReset(ctlp, chan, 1);
+ msleep(500);
+ for (chan = 0; chan < num_chan; chan++)
+ sPCIModemReset(ctlp, chan, 0);
+ msleep(500);
+ rmSpeakerReset(ctlp, rocketModel[i].model);
+ }
+ return (1);
+}
+
+/*
+ * Probes for PCI cards, inits them if found
+ * Input: board_found = number of ISA boards already found, or the
+ * starting board number
+ * Returns: Number of PCI boards found
+ */
+static int __init init_PCI(int boards_found)
+{
+ struct pci_dev *dev = NULL;
+ int count = 0;
+
+ /* Work through the PCI device list, pulling out ours */
+ while ((dev = pci_get_device(PCI_VENDOR_ID_RP, PCI_ANY_ID, dev))) {
+ if (register_PCI(count + boards_found, dev))
+ count++;
+ }
+ return (count);
+}
+
+#endif /* CONFIG_PCI */
+
+/*
+ * Probes for ISA cards
+ * Input: i = the board number to look for
+ * Returns: 1 if board found, 0 else
+ */
+static int __init init_ISA(int i)
+{
+ int num_aiops, num_chan = 0, total_num_chan = 0;
+ int aiop, chan;
+ unsigned int aiopio[MAX_AIOPS_PER_BOARD];
+ CONTROLLER_t *ctlp;
+ char *type_string;
+
+ /* If io_addr is zero, no board configured */
+ if (rcktpt_io_addr[i] == 0)
+ return (0);
+
+ /* Reserve the IO region */
+ if (!request_region(rcktpt_io_addr[i], 64, "Comtrol RocketPort")) {
+ printk(KERN_ERR "Unable to reserve IO region for configured "
+ "ISA RocketPort at address 0x%lx, board not "
+ "installed...\n", rcktpt_io_addr[i]);
+ rcktpt_io_addr[i] = 0;
+ return (0);
+ }
+
+ ctlp = sCtlNumToCtlPtr(i);
+
+ ctlp->boardType = rcktpt_type[i];
+
+ switch (rcktpt_type[i]) {
+ case ROCKET_TYPE_PC104:
+ type_string = "(PC104)";
+ break;
+ case ROCKET_TYPE_MODEM:
+ type_string = "(RocketModem)";
+ break;
+ case ROCKET_TYPE_MODEMII:
+ type_string = "(RocketModem II)";
+ break;
+ default:
+ type_string = "";
+ break;
+ }
+
+ /*
+ * If support_low_speed is set, use the slow clock prescale,
+ * which supports 50 bps
+ */
+ if (support_low_speed) {
+ sClockPrescale = 0x19; /* mod 9 (divide by 10) prescale */
+ rp_baud_base[i] = 230400;
+ } else {
+ sClockPrescale = 0x14; /* mod 4 (divide by 5) prescale */
+ rp_baud_base[i] = 460800;
+ }
+
+ for (aiop = 0; aiop < MAX_AIOPS_PER_BOARD; aiop++)
+ aiopio[aiop] = rcktpt_io_addr[i] + (aiop * 0x400);
+
+ num_aiops = sInitController(ctlp, i, controller + (i * 0x400), aiopio, MAX_AIOPS_PER_BOARD, 0, FREQ_DIS, 0);
+
+ if (ctlp->boardType == ROCKET_TYPE_PC104) {
+ sEnAiop(ctlp, 2); /* only one AIOPIC, but these */
+ sEnAiop(ctlp, 3); /* CSels used for other stuff */
+ }
+
+ /* If something went wrong initing the AIOP's release the ISA IO memory */
+ if (num_aiops <= 0) {
+ release_region(rcktpt_io_addr[i], 64);
+ rcktpt_io_addr[i] = 0;
+ return (0);
+ }
+
+ rocketModel[i].startingPortNumber = nextLineNumber;
+
+ for (aiop = 0; aiop < num_aiops; aiop++) {
+ sResetAiopByNum(ctlp, aiop);
+ sEnAiop(ctlp, aiop);
+ num_chan = sGetAiopNumChan(ctlp, aiop);
+ total_num_chan += num_chan;
+ for (chan = 0; chan < num_chan; chan++)
+ init_r_port(i, aiop, chan, NULL);
+ }
+ is_PCI[i] = 0;
+ if ((rcktpt_type[i] == ROCKET_TYPE_MODEM) || (rcktpt_type[i] == ROCKET_TYPE_MODEMII)) {
+ num_chan = sGetAiopNumChan(ctlp, 0);
+ total_num_chan = num_chan;
+ for (chan = 0; chan < num_chan; chan++)
+ sModemReset(ctlp, chan, 1);
+ msleep(500);
+ for (chan = 0; chan < num_chan; chan++)
+ sModemReset(ctlp, chan, 0);
+ msleep(500);
+ strcpy(rocketModel[i].modelString, "RocketModem ISA");
+ } else {
+ strcpy(rocketModel[i].modelString, "RocketPort ISA");
+ }
+ rocketModel[i].numPorts = total_num_chan;
+ rocketModel[i].model = MODEL_ISA;
+
+ printk(KERN_INFO "RocketPort ISA card #%d found at 0x%lx - %d AIOPs %s\n",
+ i, rcktpt_io_addr[i], num_aiops, type_string);
+
+ printk(KERN_INFO "Installing %s, creating /dev/ttyR%d - %ld\n",
+ rocketModel[i].modelString,
+ rocketModel[i].startingPortNumber,
+ rocketModel[i].startingPortNumber +
+ rocketModel[i].numPorts - 1);
+
+ return (1);
+}
+
+static const struct tty_operations rocket_ops = {
+ .open = rp_open,
+ .close = rp_close,
+ .write = rp_write,
+ .put_char = rp_put_char,
+ .write_room = rp_write_room,
+ .chars_in_buffer = rp_chars_in_buffer,
+ .flush_buffer = rp_flush_buffer,
+ .ioctl = rp_ioctl,
+ .throttle = rp_throttle,
+ .unthrottle = rp_unthrottle,
+ .set_termios = rp_set_termios,
+ .stop = rp_stop,
+ .start = rp_start,
+ .hangup = rp_hangup,
+ .break_ctl = rp_break,
+ .send_xchar = rp_send_xchar,
+ .wait_until_sent = rp_wait_until_sent,
+ .tiocmget = rp_tiocmget,
+ .tiocmset = rp_tiocmset,
+};
+
+static const struct tty_port_operations rocket_port_ops = {
+ .carrier_raised = carrier_raised,
+ .dtr_rts = dtr_rts,
+};
+
+/*
+ * The module "startup" routine; it's run when the module is loaded.
+ */
+static int __init rp_init(void)
+{
+ int ret = -ENOMEM, pci_boards_found, isa_boards_found, i;
+
+ printk(KERN_INFO "RocketPort device driver module, version %s, %s\n",
+ ROCKET_VERSION, ROCKET_DATE);
+
+ rocket_driver = alloc_tty_driver(MAX_RP_PORTS);
+ if (!rocket_driver)
+ goto err;
+
+ /*
+ * If board 1 is non-zero, there is at least one ISA configured. If controller is
+ * zero, use the default controller IO address of board1 + 0x40.
+ */
+ if (board1) {
+ if (controller == 0)
+ controller = board1 + 0x40;
+ } else {
+ controller = 0; /* Used as a flag, meaning no ISA boards */
+ }
+
+ /* If an ISA card is configured, reserve the 4 byte IO space for the Mudbac controller */
+ if (controller && (!request_region(controller, 4, "Comtrol RocketPort"))) {
+ printk(KERN_ERR "Unable to reserve IO region for first "
+ "configured ISA RocketPort controller 0x%lx. "
+ "Driver exiting\n", controller);
+ ret = -EBUSY;
+ goto err_tty;
+ }
+
+ /* Store ISA variable retrieved from command line or .conf file. */
+ rcktpt_io_addr[0] = board1;
+ rcktpt_io_addr[1] = board2;
+ rcktpt_io_addr[2] = board3;
+ rcktpt_io_addr[3] = board4;
+
+ rcktpt_type[0] = modem1 ? ROCKET_TYPE_MODEM : ROCKET_TYPE_NORMAL;
+ rcktpt_type[0] = pc104_1[0] ? ROCKET_TYPE_PC104 : rcktpt_type[0];
+ rcktpt_type[1] = modem2 ? ROCKET_TYPE_MODEM : ROCKET_TYPE_NORMAL;
+ rcktpt_type[1] = pc104_2[0] ? ROCKET_TYPE_PC104 : rcktpt_type[1];
+ rcktpt_type[2] = modem3 ? ROCKET_TYPE_MODEM : ROCKET_TYPE_NORMAL;
+ rcktpt_type[2] = pc104_3[0] ? ROCKET_TYPE_PC104 : rcktpt_type[2];
+ rcktpt_type[3] = modem4 ? ROCKET_TYPE_MODEM : ROCKET_TYPE_NORMAL;
+ rcktpt_type[3] = pc104_4[0] ? ROCKET_TYPE_PC104 : rcktpt_type[3];
+
+ /*
+ * Set up the tty driver structure and then register this
+ * driver with the tty layer.
+ */
+
+ rocket_driver->owner = THIS_MODULE;
+ rocket_driver->flags = TTY_DRIVER_DYNAMIC_DEV;
+ rocket_driver->name = "ttyR";
+ rocket_driver->driver_name = "Comtrol RocketPort";
+ rocket_driver->major = TTY_ROCKET_MAJOR;
+ rocket_driver->minor_start = 0;
+ rocket_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ rocket_driver->subtype = SERIAL_TYPE_NORMAL;
+ rocket_driver->init_termios = tty_std_termios;
+ rocket_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ rocket_driver->init_termios.c_ispeed = 9600;
+ rocket_driver->init_termios.c_ospeed = 9600;
+#ifdef ROCKET_SOFT_FLOW
+ rocket_driver->flags |= TTY_DRIVER_REAL_RAW;
+#endif
+ tty_set_operations(rocket_driver, &rocket_ops);
+
+ ret = tty_register_driver(rocket_driver);
+ if (ret < 0) {
+ printk(KERN_ERR "Couldn't install tty RocketPort driver\n");
+ goto err_controller;
+ }
+
+#ifdef ROCKET_DEBUG_OPEN
+ printk(KERN_INFO "RocketPort driver is major %d\n", rocket_driver.major);
+#endif
+
+ /*
+ * OK, let's probe each of the controllers looking for boards. Any boards found
+ * will be initialized here.
+ */
+ isa_boards_found = 0;
+ pci_boards_found = 0;
+
+ for (i = 0; i < NUM_BOARDS; i++) {
+ if (init_ISA(i))
+ isa_boards_found++;
+ }
+
+#ifdef CONFIG_PCI
+ if (isa_boards_found < NUM_BOARDS)
+ pci_boards_found = init_PCI(isa_boards_found);
+#endif
+
+ max_board = pci_boards_found + isa_boards_found;
+
+ if (max_board == 0) {
+ printk(KERN_ERR "No rocketport ports found; unloading driver\n");
+ ret = -ENXIO;
+ goto err_ttyu;
+ }
+
+ return 0;
+err_ttyu:
+ tty_unregister_driver(rocket_driver);
+err_controller:
+ if (controller)
+ release_region(controller, 4);
+err_tty:
+ put_tty_driver(rocket_driver);
+err:
+ return ret;
+}
+
+
+static void rp_cleanup_module(void)
+{
+ int retval;
+ int i;
+
+ del_timer_sync(&rocket_timer);
+
+ retval = tty_unregister_driver(rocket_driver);
+ if (retval)
+ printk(KERN_ERR "Error %d while trying to unregister "
+ "rocketport driver\n", -retval);
+
+ for (i = 0; i < MAX_RP_PORTS; i++)
+ if (rp_table[i]) {
+ tty_unregister_device(rocket_driver, i);
+ kfree(rp_table[i]);
+ }
+
+ put_tty_driver(rocket_driver);
+
+ for (i = 0; i < NUM_BOARDS; i++) {
+ if (rcktpt_io_addr[i] <= 0 || is_PCI[i])
+ continue;
+ release_region(rcktpt_io_addr[i], 64);
+ }
+ if (controller)
+ release_region(controller, 4);
+}
+
+/***************************************************************************
+Function: sInitController
+Purpose: Initialization of controller global registers and controller
+ structure.
+Call: sInitController(CtlP,CtlNum,MudbacIO,AiopIOList,AiopIOListSize,
+ IRQNum,Frequency,PeriodicOnly)
+ CONTROLLER_T *CtlP; Ptr to controller structure
+ int CtlNum; Controller number
+ ByteIO_t MudbacIO; Mudbac base I/O address.
+ ByteIO_t *AiopIOList; List of I/O addresses for each AIOP.
+ This list must be in the order the AIOPs will be found on the
+ controller. Once an AIOP in the list is not found, it is
+ assumed that there are no more AIOPs on the controller.
+ int AiopIOListSize; Number of addresses in AiopIOList
+ int IRQNum; Interrupt Request number. Can be any of the following:
+ 0: Disable global interrupts
+ 3: IRQ 3
+ 4: IRQ 4
+ 5: IRQ 5
+ 9: IRQ 9
+ 10: IRQ 10
+ 11: IRQ 11
+ 12: IRQ 12
+ 15: IRQ 15
+ Byte_t Frequency: A flag identifying the frequency
+ of the periodic interrupt, can be any one of the following:
+ FREQ_DIS - periodic interrupt disabled
+ FREQ_137HZ - 137 Hertz
+ FREQ_69HZ - 69 Hertz
+ FREQ_34HZ - 34 Hertz
+ FREQ_17HZ - 17 Hertz
+ FREQ_9HZ - 9 Hertz
+ FREQ_4HZ - 4 Hertz
+ If IRQNum is set to 0 the Frequency parameter is
+ overidden, it is forced to a value of FREQ_DIS.
+ int PeriodicOnly: 1 if all interrupts except the periodic
+ interrupt are to be blocked.
+ 0 is both the periodic interrupt and
+ other channel interrupts are allowed.
+ If IRQNum is set to 0 the PeriodicOnly parameter is
+ overidden, it is forced to a value of 0.
+Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller
+ initialization failed.
+
+Comments:
+ If periodic interrupts are to be disabled but AIOP interrupts
+ are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0.
+
+ If interrupts are to be completely disabled set IRQNum to 0.
+
+ Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an
+ invalid combination.
+
+ This function performs initialization of global interrupt modes,
+ but it does not actually enable global interrupts. To enable
+ and disable global interrupts use functions sEnGlobalInt() and
+ sDisGlobalInt(). Enabling of global interrupts is normally not
+ done until all other initializations are complete.
+
+ Even if interrupts are globally enabled, they must also be
+ individually enabled for each channel that is to generate
+ interrupts.
+
+Warnings: No range checking on any of the parameters is done.
+
+ No context switches are allowed while executing this function.
+
+ After this function all AIOPs on the controller are disabled,
+ they can be enabled with sEnAiop().
+*/
+static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO,
+ ByteIO_t * AiopIOList, int AiopIOListSize,
+ int IRQNum, Byte_t Frequency, int PeriodicOnly)
+{
+ int i;
+ ByteIO_t io;
+ int done;
+
+ CtlP->AiopIntrBits = aiop_intr_bits;
+ CtlP->AltChanRingIndicator = 0;
+ CtlP->CtlNum = CtlNum;
+ CtlP->CtlID = CTLID_0001; /* controller release 1 */
+ CtlP->BusType = isISA;
+ CtlP->MBaseIO = MudbacIO;
+ CtlP->MReg1IO = MudbacIO + 1;
+ CtlP->MReg2IO = MudbacIO + 2;
+ CtlP->MReg3IO = MudbacIO + 3;
+#if 1
+ CtlP->MReg2 = 0; /* interrupt disable */
+ CtlP->MReg3 = 0; /* no periodic interrupts */
+#else
+ if (sIRQMap[IRQNum] == 0) { /* interrupts globally disabled */
+ CtlP->MReg2 = 0; /* interrupt disable */
+ CtlP->MReg3 = 0; /* no periodic interrupts */
+ } else {
+ CtlP->MReg2 = sIRQMap[IRQNum]; /* set IRQ number */
+ CtlP->MReg3 = Frequency; /* set frequency */
+ if (PeriodicOnly) { /* periodic interrupt only */
+ CtlP->MReg3 |= PERIODIC_ONLY;
+ }
+ }
+#endif
+ sOutB(CtlP->MReg2IO, CtlP->MReg2);
+ sOutB(CtlP->MReg3IO, CtlP->MReg3);
+ sControllerEOI(CtlP); /* clear EOI if warm init */
+ /* Init AIOPs */
+ CtlP->NumAiop = 0;
+ for (i = done = 0; i < AiopIOListSize; i++) {
+ io = AiopIOList[i];
+ CtlP->AiopIO[i] = (WordIO_t) io;
+ CtlP->AiopIntChanIO[i] = io + _INT_CHAN;
+ sOutB(CtlP->MReg2IO, CtlP->MReg2 | (i & 0x03)); /* AIOP index */
+ sOutB(MudbacIO, (Byte_t) (io >> 6)); /* set up AIOP I/O in MUDBAC */
+ if (done)
+ continue;
+ sEnAiop(CtlP, i); /* enable the AIOP */
+ CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */
+ if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */
+ done = 1; /* done looking for AIOPs */
+ else {
+ CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */
+ sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */
+ sOutB(io + _INDX_DATA, sClockPrescale);
+ CtlP->NumAiop++; /* bump count of AIOPs */
+ }
+ sDisAiop(CtlP, i); /* disable AIOP */
+ }
+
+ if (CtlP->NumAiop == 0)
+ return (-1);
+ else
+ return (CtlP->NumAiop);
+}
+
+/***************************************************************************
+Function: sPCIInitController
+Purpose: Initialization of controller global registers and controller
+ structure.
+Call: sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize,
+ IRQNum,Frequency,PeriodicOnly)
+ CONTROLLER_T *CtlP; Ptr to controller structure
+ int CtlNum; Controller number
+ ByteIO_t *AiopIOList; List of I/O addresses for each AIOP.
+ This list must be in the order the AIOPs will be found on the
+ controller. Once an AIOP in the list is not found, it is
+ assumed that there are no more AIOPs on the controller.
+ int AiopIOListSize; Number of addresses in AiopIOList
+ int IRQNum; Interrupt Request number. Can be any of the following:
+ 0: Disable global interrupts
+ 3: IRQ 3
+ 4: IRQ 4
+ 5: IRQ 5
+ 9: IRQ 9
+ 10: IRQ 10
+ 11: IRQ 11
+ 12: IRQ 12
+ 15: IRQ 15
+ Byte_t Frequency: A flag identifying the frequency
+ of the periodic interrupt, can be any one of the following:
+ FREQ_DIS - periodic interrupt disabled
+ FREQ_137HZ - 137 Hertz
+ FREQ_69HZ - 69 Hertz
+ FREQ_34HZ - 34 Hertz
+ FREQ_17HZ - 17 Hertz
+ FREQ_9HZ - 9 Hertz
+ FREQ_4HZ - 4 Hertz
+ If IRQNum is set to 0 the Frequency parameter is
+ overidden, it is forced to a value of FREQ_DIS.
+ int PeriodicOnly: 1 if all interrupts except the periodic
+ interrupt are to be blocked.
+ 0 is both the periodic interrupt and
+ other channel interrupts are allowed.
+ If IRQNum is set to 0 the PeriodicOnly parameter is
+ overidden, it is forced to a value of 0.
+Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller
+ initialization failed.
+
+Comments:
+ If periodic interrupts are to be disabled but AIOP interrupts
+ are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0.
+
+ If interrupts are to be completely disabled set IRQNum to 0.
+
+ Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an
+ invalid combination.
+
+ This function performs initialization of global interrupt modes,
+ but it does not actually enable global interrupts. To enable
+ and disable global interrupts use functions sEnGlobalInt() and
+ sDisGlobalInt(). Enabling of global interrupts is normally not
+ done until all other initializations are complete.
+
+ Even if interrupts are globally enabled, they must also be
+ individually enabled for each channel that is to generate
+ interrupts.
+
+Warnings: No range checking on any of the parameters is done.
+
+ No context switches are allowed while executing this function.
+
+ After this function all AIOPs on the controller are disabled,
+ they can be enabled with sEnAiop().
+*/
+static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
+ ByteIO_t * AiopIOList, int AiopIOListSize,
+ WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
+ int PeriodicOnly, int altChanRingIndicator,
+ int UPCIRingInd)
+{
+ int i;
+ ByteIO_t io;
+
+ CtlP->AltChanRingIndicator = altChanRingIndicator;
+ CtlP->UPCIRingInd = UPCIRingInd;
+ CtlP->CtlNum = CtlNum;
+ CtlP->CtlID = CTLID_0001; /* controller release 1 */
+ CtlP->BusType = isPCI; /* controller release 1 */
+
+ if (ConfigIO) {
+ CtlP->isUPCI = 1;
+ CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL;
+ CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL;
+ CtlP->AiopIntrBits = upci_aiop_intr_bits;
+ } else {
+ CtlP->isUPCI = 0;
+ CtlP->PCIIO =
+ (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC);
+ CtlP->AiopIntrBits = aiop_intr_bits;
+ }
+
+ sPCIControllerEOI(CtlP); /* clear EOI if warm init */
+ /* Init AIOPs */
+ CtlP->NumAiop = 0;
+ for (i = 0; i < AiopIOListSize; i++) {
+ io = AiopIOList[i];
+ CtlP->AiopIO[i] = (WordIO_t) io;
+ CtlP->AiopIntChanIO[i] = io + _INT_CHAN;
+
+ CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */
+ if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */
+ break; /* done looking for AIOPs */
+
+ CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */
+ sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */
+ sOutB(io + _INDX_DATA, sClockPrescale);
+ CtlP->NumAiop++; /* bump count of AIOPs */
+ }
+
+ if (CtlP->NumAiop == 0)
+ return (-1);
+ else
+ return (CtlP->NumAiop);
+}
+
+/***************************************************************************
+Function: sReadAiopID
+Purpose: Read the AIOP idenfication number directly from an AIOP.
+Call: sReadAiopID(io)
+ ByteIO_t io: AIOP base I/O address
+Return: int: Flag AIOPID_XXXX if a valid AIOP is found, where X
+ is replace by an identifying number.
+ Flag AIOPID_NULL if no valid AIOP is found
+Warnings: No context switches are allowed while executing this function.
+
+*/
+static int sReadAiopID(ByteIO_t io)
+{
+ Byte_t AiopID; /* ID byte from AIOP */
+
+ sOutB(io + _CMD_REG, RESET_ALL); /* reset AIOP */
+ sOutB(io + _CMD_REG, 0x0);
+ AiopID = sInW(io + _CHN_STAT0) & 0x07;
+ if (AiopID == 0x06)
+ return (1);
+ else /* AIOP does not exist */
+ return (-1);
+}
+
+/***************************************************************************
+Function: sReadAiopNumChan
+Purpose: Read the number of channels available in an AIOP directly from
+ an AIOP.
+Call: sReadAiopNumChan(io)
+ WordIO_t io: AIOP base I/O address
+Return: int: The number of channels available
+Comments: The number of channels is determined by write/reads from identical
+ offsets within the SRAM address spaces for channels 0 and 4.
+ If the channel 4 space is mirrored to channel 0 it is a 4 channel
+ AIOP, otherwise it is an 8 channel.
+Warnings: No context switches are allowed while executing this function.
+*/
+static int sReadAiopNumChan(WordIO_t io)
+{
+ Word_t x;
+ static Byte_t R[4] = { 0x00, 0x00, 0x34, 0x12 };
+
+ /* write to chan 0 SRAM */
+ out32((DWordIO_t) io + _INDX_ADDR, R);
+ sOutW(io + _INDX_ADDR, 0); /* read from SRAM, chan 0 */
+ x = sInW(io + _INDX_DATA);
+ sOutW(io + _INDX_ADDR, 0x4000); /* read from SRAM, chan 4 */
+ if (x != sInW(io + _INDX_DATA)) /* if different must be 8 chan */
+ return (8);
+ else
+ return (4);
+}
+
+/***************************************************************************
+Function: sInitChan
+Purpose: Initialization of a channel and channel structure
+Call: sInitChan(CtlP,ChP,AiopNum,ChanNum)
+ CONTROLLER_T *CtlP; Ptr to controller structure
+ CHANNEL_T *ChP; Ptr to channel structure
+ int AiopNum; AIOP number within controller
+ int ChanNum; Channel number within AIOP
+Return: int: 1 if initialization succeeded, 0 if it fails because channel
+ number exceeds number of channels available in AIOP.
+Comments: This function must be called before a channel can be used.
+Warnings: No range checking on any of the parameters is done.
+
+ No context switches are allowed while executing this function.
+*/
+static int sInitChan(CONTROLLER_T * CtlP, CHANNEL_T * ChP, int AiopNum,
+ int ChanNum)
+{
+ int i;
+ WordIO_t AiopIO;
+ WordIO_t ChIOOff;
+ Byte_t *ChR;
+ Word_t ChOff;
+ static Byte_t R[4];
+ int brd9600;
+
+ if (ChanNum >= CtlP->AiopNumChan[AiopNum])
+ return 0; /* exceeds num chans in AIOP */
+
+ /* Channel, AIOP, and controller identifiers */
+ ChP->CtlP = CtlP;
+ ChP->ChanID = CtlP->AiopID[AiopNum];
+ ChP->AiopNum = AiopNum;
+ ChP->ChanNum = ChanNum;
+
+ /* Global direct addresses */
+ AiopIO = CtlP->AiopIO[AiopNum];
+ ChP->Cmd = (ByteIO_t) AiopIO + _CMD_REG;
+ ChP->IntChan = (ByteIO_t) AiopIO + _INT_CHAN;
+ ChP->IntMask = (ByteIO_t) AiopIO + _INT_MASK;
+ ChP->IndexAddr = (DWordIO_t) AiopIO + _INDX_ADDR;
+ ChP->IndexData = AiopIO + _INDX_DATA;
+
+ /* Channel direct addresses */
+ ChIOOff = AiopIO + ChP->ChanNum * 2;
+ ChP->TxRxData = ChIOOff + _TD0;
+ ChP->ChanStat = ChIOOff + _CHN_STAT0;
+ ChP->TxRxCount = ChIOOff + _FIFO_CNT0;
+ ChP->IntID = (ByteIO_t) AiopIO + ChP->ChanNum + _INT_ID0;
+
+ /* Initialize the channel from the RData array */
+ for (i = 0; i < RDATASIZE; i += 4) {
+ R[0] = RData[i];
+ R[1] = RData[i + 1] + 0x10 * ChanNum;
+ R[2] = RData[i + 2];
+ R[3] = RData[i + 3];
+ out32(ChP->IndexAddr, R);
+ }
+
+ ChR = ChP->R;
+ for (i = 0; i < RREGDATASIZE; i += 4) {
+ ChR[i] = RRegData[i];
+ ChR[i + 1] = RRegData[i + 1] + 0x10 * ChanNum;
+ ChR[i + 2] = RRegData[i + 2];
+ ChR[i + 3] = RRegData[i + 3];
+ }
+
+ /* Indexed registers */
+ ChOff = (Word_t) ChanNum *0x1000;
+
+ if (sClockPrescale == 0x14)
+ brd9600 = 47;
+ else
+ brd9600 = 23;
+
+ ChP->BaudDiv[0] = (Byte_t) (ChOff + _BAUD);
+ ChP->BaudDiv[1] = (Byte_t) ((ChOff + _BAUD) >> 8);
+ ChP->BaudDiv[2] = (Byte_t) brd9600;
+ ChP->BaudDiv[3] = (Byte_t) (brd9600 >> 8);
+ out32(ChP->IndexAddr, ChP->BaudDiv);
+
+ ChP->TxControl[0] = (Byte_t) (ChOff + _TX_CTRL);
+ ChP->TxControl[1] = (Byte_t) ((ChOff + _TX_CTRL) >> 8);
+ ChP->TxControl[2] = 0;
+ ChP->TxControl[3] = 0;
+ out32(ChP->IndexAddr, ChP->TxControl);
+
+ ChP->RxControl[0] = (Byte_t) (ChOff + _RX_CTRL);
+ ChP->RxControl[1] = (Byte_t) ((ChOff + _RX_CTRL) >> 8);
+ ChP->RxControl[2] = 0;
+ ChP->RxControl[3] = 0;
+ out32(ChP->IndexAddr, ChP->RxControl);
+
+ ChP->TxEnables[0] = (Byte_t) (ChOff + _TX_ENBLS);
+ ChP->TxEnables[1] = (Byte_t) ((ChOff + _TX_ENBLS) >> 8);
+ ChP->TxEnables[2] = 0;
+ ChP->TxEnables[3] = 0;
+ out32(ChP->IndexAddr, ChP->TxEnables);
+
+ ChP->TxCompare[0] = (Byte_t) (ChOff + _TXCMP1);
+ ChP->TxCompare[1] = (Byte_t) ((ChOff + _TXCMP1) >> 8);
+ ChP->TxCompare[2] = 0;
+ ChP->TxCompare[3] = 0;
+ out32(ChP->IndexAddr, ChP->TxCompare);
+
+ ChP->TxReplace1[0] = (Byte_t) (ChOff + _TXREP1B1);
+ ChP->TxReplace1[1] = (Byte_t) ((ChOff + _TXREP1B1) >> 8);
+ ChP->TxReplace1[2] = 0;
+ ChP->TxReplace1[3] = 0;
+ out32(ChP->IndexAddr, ChP->TxReplace1);
+
+ ChP->TxReplace2[0] = (Byte_t) (ChOff + _TXREP2);
+ ChP->TxReplace2[1] = (Byte_t) ((ChOff + _TXREP2) >> 8);
+ ChP->TxReplace2[2] = 0;
+ ChP->TxReplace2[3] = 0;
+ out32(ChP->IndexAddr, ChP->TxReplace2);
+
+ ChP->TxFIFOPtrs = ChOff + _TXF_OUTP;
+ ChP->TxFIFO = ChOff + _TX_FIFO;
+
+ sOutB(ChP->Cmd, (Byte_t) ChanNum | RESTXFCNT); /* apply reset Tx FIFO count */
+ sOutB(ChP->Cmd, (Byte_t) ChanNum); /* remove reset Tx FIFO count */
+ sOutW((WordIO_t) ChP->IndexAddr, ChP->TxFIFOPtrs); /* clear Tx in/out ptrs */
+ sOutW(ChP->IndexData, 0);
+ ChP->RxFIFOPtrs = ChOff + _RXF_OUTP;
+ ChP->RxFIFO = ChOff + _RX_FIFO;
+
+ sOutB(ChP->Cmd, (Byte_t) ChanNum | RESRXFCNT); /* apply reset Rx FIFO count */
+ sOutB(ChP->Cmd, (Byte_t) ChanNum); /* remove reset Rx FIFO count */
+ sOutW((WordIO_t) ChP->IndexAddr, ChP->RxFIFOPtrs); /* clear Rx out ptr */
+ sOutW(ChP->IndexData, 0);
+ sOutW((WordIO_t) ChP->IndexAddr, ChP->RxFIFOPtrs + 2); /* clear Rx in ptr */
+ sOutW(ChP->IndexData, 0);
+ ChP->TxPrioCnt = ChOff + _TXP_CNT;
+ sOutW((WordIO_t) ChP->IndexAddr, ChP->TxPrioCnt);
+ sOutB(ChP->IndexData, 0);
+ ChP->TxPrioPtr = ChOff + _TXP_PNTR;
+ sOutW((WordIO_t) ChP->IndexAddr, ChP->TxPrioPtr);
+ sOutB(ChP->IndexData, 0);
+ ChP->TxPrioBuf = ChOff + _TXP_BUF;
+ sEnRxProcessor(ChP); /* start the Rx processor */
+
+ return 1;
+}
+
+/***************************************************************************
+Function: sStopRxProcessor
+Purpose: Stop the receive processor from processing a channel.
+Call: sStopRxProcessor(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+
+Comments: The receive processor can be started again with sStartRxProcessor().
+ This function causes the receive processor to skip over the
+ stopped channel. It does not stop it from processing other channels.
+
+Warnings: No context switches are allowed while executing this function.
+
+ Do not leave the receive processor stopped for more than one
+ character time.
+
+ After calling this function a delay of 4 uS is required to ensure
+ that the receive processor is no longer processing this channel.
+*/
+static void sStopRxProcessor(CHANNEL_T * ChP)
+{
+ Byte_t R[4];
+
+ R[0] = ChP->R[0];
+ R[1] = ChP->R[1];
+ R[2] = 0x0a;
+ R[3] = ChP->R[3];
+ out32(ChP->IndexAddr, R);
+}
+
+/***************************************************************************
+Function: sFlushRxFIFO
+Purpose: Flush the Rx FIFO
+Call: sFlushRxFIFO(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Return: void
+Comments: To prevent data from being enqueued or dequeued in the Tx FIFO
+ while it is being flushed the receive processor is stopped
+ and the transmitter is disabled. After these operations a
+ 4 uS delay is done before clearing the pointers to allow
+ the receive processor to stop. These items are handled inside
+ this function.
+Warnings: No context switches are allowed while executing this function.
+*/
+static void sFlushRxFIFO(CHANNEL_T * ChP)
+{
+ int i;
+ Byte_t Ch; /* channel number within AIOP */
+ int RxFIFOEnabled; /* 1 if Rx FIFO enabled */
+
+ if (sGetRxCnt(ChP) == 0) /* Rx FIFO empty */
+ return; /* don't need to flush */
+
+ RxFIFOEnabled = 0;
+ if (ChP->R[0x32] == 0x08) { /* Rx FIFO is enabled */
+ RxFIFOEnabled = 1;
+ sDisRxFIFO(ChP); /* disable it */
+ for (i = 0; i < 2000 / 200; i++) /* delay 2 uS to allow proc to disable FIFO */
+ sInB(ChP->IntChan); /* depends on bus i/o timing */
+ }
+ sGetChanStatus(ChP); /* clear any pending Rx errors in chan stat */
+ Ch = (Byte_t) sGetChanNum(ChP);
+ sOutB(ChP->Cmd, Ch | RESRXFCNT); /* apply reset Rx FIFO count */
+ sOutB(ChP->Cmd, Ch); /* remove reset Rx FIFO count */
+ sOutW((WordIO_t) ChP->IndexAddr, ChP->RxFIFOPtrs); /* clear Rx out ptr */
+ sOutW(ChP->IndexData, 0);
+ sOutW((WordIO_t) ChP->IndexAddr, ChP->RxFIFOPtrs + 2); /* clear Rx in ptr */
+ sOutW(ChP->IndexData, 0);
+ if (RxFIFOEnabled)
+ sEnRxFIFO(ChP); /* enable Rx FIFO */
+}
+
+/***************************************************************************
+Function: sFlushTxFIFO
+Purpose: Flush the Tx FIFO
+Call: sFlushTxFIFO(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Return: void
+Comments: To prevent data from being enqueued or dequeued in the Tx FIFO
+ while it is being flushed the receive processor is stopped
+ and the transmitter is disabled. After these operations a
+ 4 uS delay is done before clearing the pointers to allow
+ the receive processor to stop. These items are handled inside
+ this function.
+Warnings: No context switches are allowed while executing this function.
+*/
+static void sFlushTxFIFO(CHANNEL_T * ChP)
+{
+ int i;
+ Byte_t Ch; /* channel number within AIOP */
+ int TxEnabled; /* 1 if transmitter enabled */
+
+ if (sGetTxCnt(ChP) == 0) /* Tx FIFO empty */
+ return; /* don't need to flush */
+
+ TxEnabled = 0;
+ if (ChP->TxControl[3] & TX_ENABLE) {
+ TxEnabled = 1;
+ sDisTransmit(ChP); /* disable transmitter */
+ }
+ sStopRxProcessor(ChP); /* stop Rx processor */
+ for (i = 0; i < 4000 / 200; i++) /* delay 4 uS to allow proc to stop */
+ sInB(ChP->IntChan); /* depends on bus i/o timing */
+ Ch = (Byte_t) sGetChanNum(ChP);
+ sOutB(ChP->Cmd, Ch | RESTXFCNT); /* apply reset Tx FIFO count */
+ sOutB(ChP->Cmd, Ch); /* remove reset Tx FIFO count */
+ sOutW((WordIO_t) ChP->IndexAddr, ChP->TxFIFOPtrs); /* clear Tx in/out ptrs */
+ sOutW(ChP->IndexData, 0);
+ if (TxEnabled)
+ sEnTransmit(ChP); /* enable transmitter */
+ sStartRxProcessor(ChP); /* restart Rx processor */
+}
+
+/***************************************************************************
+Function: sWriteTxPrioByte
+Purpose: Write a byte of priority transmit data to a channel
+Call: sWriteTxPrioByte(ChP,Data)
+ CHANNEL_T *ChP; Ptr to channel structure
+ Byte_t Data; The transmit data byte
+
+Return: int: 1 if the bytes is successfully written, otherwise 0.
+
+Comments: The priority byte is transmitted before any data in the Tx FIFO.
+
+Warnings: No context switches are allowed while executing this function.
+*/
+static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data)
+{
+ Byte_t DWBuf[4]; /* buffer for double word writes */
+ Word_t *WordPtr; /* must be far because Win SS != DS */
+ register DWordIO_t IndexAddr;
+
+ if (sGetTxCnt(ChP) > 1) { /* write it to Tx priority buffer */
+ IndexAddr = ChP->IndexAddr;
+ sOutW((WordIO_t) IndexAddr, ChP->TxPrioCnt); /* get priority buffer status */
+ if (sInB((ByteIO_t) ChP->IndexData) & PRI_PEND) /* priority buffer busy */
+ return (0); /* nothing sent */
+
+ WordPtr = (Word_t *) (&DWBuf[0]);
+ *WordPtr = ChP->TxPrioBuf; /* data byte address */
+
+ DWBuf[2] = Data; /* data byte value */
+ out32(IndexAddr, DWBuf); /* write it out */
+
+ *WordPtr = ChP->TxPrioCnt; /* Tx priority count address */
+
+ DWBuf[2] = PRI_PEND + 1; /* indicate 1 byte pending */
+ DWBuf[3] = 0; /* priority buffer pointer */
+ out32(IndexAddr, DWBuf); /* write it out */
+ } else { /* write it to Tx FIFO */
+
+ sWriteTxByte(sGetTxRxDataIO(ChP), Data);
+ }
+ return (1); /* 1 byte sent */
+}
+
+/***************************************************************************
+Function: sEnInterrupts
+Purpose: Enable one or more interrupts for a channel
+Call: sEnInterrupts(ChP,Flags)
+ CHANNEL_T *ChP; Ptr to channel structure
+ Word_t Flags: Interrupt enable flags, can be any combination
+ of the following flags:
+ TXINT_EN: Interrupt on Tx FIFO empty
+ RXINT_EN: Interrupt on Rx FIFO at trigger level (see
+ sSetRxTrigger())
+ SRCINT_EN: Interrupt on SRC (Special Rx Condition)
+ MCINT_EN: Interrupt on modem input change
+ CHANINT_EN: Allow channel interrupt signal to the AIOP's
+ Interrupt Channel Register.
+Return: void
+Comments: If an interrupt enable flag is set in Flags, that interrupt will be
+ enabled. If an interrupt enable flag is not set in Flags, that
+ interrupt will not be changed. Interrupts can be disabled with
+ function sDisInterrupts().
+
+ This function sets the appropriate bit for the channel in the AIOP's
+ Interrupt Mask Register if the CHANINT_EN flag is set. This allows
+ this channel's bit to be set in the AIOP's Interrupt Channel Register.
+
+ Interrupts must also be globally enabled before channel interrupts
+ will be passed on to the host. This is done with function
+ sEnGlobalInt().
+
+ In some cases it may be desirable to disable interrupts globally but
+ enable channel interrupts. This would allow the global interrupt
+ status register to be used to determine which AIOPs need service.
+*/
+static void sEnInterrupts(CHANNEL_T * ChP, Word_t Flags)
+{
+ Byte_t Mask; /* Interrupt Mask Register */
+
+ ChP->RxControl[2] |=
+ ((Byte_t) Flags & (RXINT_EN | SRCINT_EN | MCINT_EN));
+
+ out32(ChP->IndexAddr, ChP->RxControl);
+
+ ChP->TxControl[2] |= ((Byte_t) Flags & TXINT_EN);
+
+ out32(ChP->IndexAddr, ChP->TxControl);
+
+ if (Flags & CHANINT_EN) {
+ Mask = sInB(ChP->IntMask) | sBitMapSetTbl[ChP->ChanNum];
+ sOutB(ChP->IntMask, Mask);
+ }
+}
+
+/***************************************************************************
+Function: sDisInterrupts
+Purpose: Disable one or more interrupts for a channel
+Call: sDisInterrupts(ChP,Flags)
+ CHANNEL_T *ChP; Ptr to channel structure
+ Word_t Flags: Interrupt flags, can be any combination
+ of the following flags:
+ TXINT_EN: Interrupt on Tx FIFO empty
+ RXINT_EN: Interrupt on Rx FIFO at trigger level (see
+ sSetRxTrigger())
+ SRCINT_EN: Interrupt on SRC (Special Rx Condition)
+ MCINT_EN: Interrupt on modem input change
+ CHANINT_EN: Disable channel interrupt signal to the
+ AIOP's Interrupt Channel Register.
+Return: void
+Comments: If an interrupt flag is set in Flags, that interrupt will be
+ disabled. If an interrupt flag is not set in Flags, that
+ interrupt will not be changed. Interrupts can be enabled with
+ function sEnInterrupts().
+
+ This function clears the appropriate bit for the channel in the AIOP's
+ Interrupt Mask Register if the CHANINT_EN flag is set. This blocks
+ this channel's bit from being set in the AIOP's Interrupt Channel
+ Register.
+*/
+static void sDisInterrupts(CHANNEL_T * ChP, Word_t Flags)
+{
+ Byte_t Mask; /* Interrupt Mask Register */
+
+ ChP->RxControl[2] &=
+ ~((Byte_t) Flags & (RXINT_EN | SRCINT_EN | MCINT_EN));
+ out32(ChP->IndexAddr, ChP->RxControl);
+ ChP->TxControl[2] &= ~((Byte_t) Flags & TXINT_EN);
+ out32(ChP->IndexAddr, ChP->TxControl);
+
+ if (Flags & CHANINT_EN) {
+ Mask = sInB(ChP->IntMask) & sBitMapClrTbl[ChP->ChanNum];
+ sOutB(ChP->IntMask, Mask);
+ }
+}
+
+static void sSetInterfaceMode(CHANNEL_T * ChP, Byte_t mode)
+{
+ sOutB(ChP->CtlP->AiopIO[2], (mode & 0x18) | ChP->ChanNum);
+}
+
+/*
+ * Not an official SSCI function, but how to reset RocketModems.
+ * ISA bus version
+ */
+static void sModemReset(CONTROLLER_T * CtlP, int chan, int on)
+{
+ ByteIO_t addr;
+ Byte_t val;
+
+ addr = CtlP->AiopIO[0] + 0x400;
+ val = sInB(CtlP->MReg3IO);
+ /* if AIOP[1] is not enabled, enable it */
+ if ((val & 2) == 0) {
+ val = sInB(CtlP->MReg2IO);
+ sOutB(CtlP->MReg2IO, (val & 0xfc) | (1 & 0x03));
+ sOutB(CtlP->MBaseIO, (unsigned char) (addr >> 6));
+ }
+
+ sEnAiop(CtlP, 1);
+ if (!on)
+ addr += 8;
+ sOutB(addr + chan, 0); /* apply or remove reset */
+ sDisAiop(CtlP, 1);
+}
+
+/*
+ * Not an official SSCI function, but how to reset RocketModems.
+ * PCI bus version
+ */
+static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on)
+{
+ ByteIO_t addr;
+
+ addr = CtlP->AiopIO[0] + 0x40; /* 2nd AIOP */
+ if (!on)
+ addr += 8;
+ sOutB(addr + chan, 0); /* apply or remove reset */
+}
+
+/* Resets the speaker controller on RocketModem II and III devices */
+static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model)
+{
+ ByteIO_t addr;
+
+ /* RocketModem II speaker control is at the 8th port location of offset 0x40 */
+ if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) {
+ addr = CtlP->AiopIO[0] + 0x4F;
+ sOutB(addr, 0);
+ }
+
+ /* RocketModem III speaker control is at the 1st port location of offset 0x80 */
+ if ((model == MODEL_UPCI_RM3_8PORT)
+ || (model == MODEL_UPCI_RM3_4PORT)) {
+ addr = CtlP->AiopIO[0] + 0x88;
+ sOutB(addr, 0);
+ }
+}
+
+/* Returns the line number given the controller (board), aiop and channel number */
+static unsigned char GetLineNumber(int ctrl, int aiop, int ch)
+{
+ return lineNumbers[(ctrl << 5) | (aiop << 3) | ch];
+}
+
+/*
+ * Stores the line number associated with a given controller (board), aiop
+ * and channel number.
+ * Returns: The line number assigned
+ */
+static unsigned char SetLineNumber(int ctrl, int aiop, int ch)
+{
+ lineNumbers[(ctrl << 5) | (aiop << 3) | ch] = nextLineNumber++;
+ return (nextLineNumber - 1);
+}
diff --git a/drivers/tty/rocket.h b/drivers/tty/rocket.h
new file mode 100644
index 00000000..ec863f35
--- /dev/null
+++ b/drivers/tty/rocket.h
@@ -0,0 +1,111 @@
+/*
+ * rocket.h --- the exported interface of the rocket driver to its configuration program.
+ *
+ * Written by Theodore Ts'o, Copyright 1997.
+ * Copyright 1997 Comtrol Corporation.
+ *
+ */
+
+/* Model Information Struct */
+typedef struct {
+ unsigned long model;
+ char modelString[80];
+ unsigned long numPorts;
+ int loadrm2;
+ int startingPortNumber;
+} rocketModel_t;
+
+struct rocket_config {
+ int line;
+ int flags;
+ int closing_wait;
+ int close_delay;
+ int port;
+ int reserved[32];
+};
+
+struct rocket_ports {
+ int tty_major;
+ int callout_major;
+ rocketModel_t rocketModel[8];
+};
+
+struct rocket_version {
+ char rocket_version[32];
+ char rocket_date[32];
+ char reserved[64];
+};
+
+/*
+ * Rocketport flags
+ */
+/*#define ROCKET_CALLOUT_NOHUP 0x00000001 */
+#define ROCKET_FORCE_CD 0x00000002
+#define ROCKET_HUP_NOTIFY 0x00000004
+#define ROCKET_SPLIT_TERMIOS 0x00000008
+#define ROCKET_SPD_MASK 0x00000070
+#define ROCKET_SPD_HI 0x00000010 /* Use 56000 instead of 38400 bps */
+#define ROCKET_SPD_VHI 0x00000020 /* Use 115200 instead of 38400 bps */
+#define ROCKET_SPD_SHI 0x00000030 /* Use 230400 instead of 38400 bps */
+#define ROCKET_SPD_WARP 0x00000040 /* Use 460800 instead of 38400 bps */
+#define ROCKET_SAK 0x00000080
+#define ROCKET_SESSION_LOCKOUT 0x00000100
+#define ROCKET_PGRP_LOCKOUT 0x00000200
+#define ROCKET_RTS_TOGGLE 0x00000400
+#define ROCKET_MODE_MASK 0x00003000
+#define ROCKET_MODE_RS232 0x00000000
+#define ROCKET_MODE_RS485 0x00001000
+#define ROCKET_MODE_RS422 0x00002000
+#define ROCKET_FLAGS 0x00003FFF
+
+#define ROCKET_USR_MASK 0x0071 /* Legal flags that non-privileged
+ * users can set or reset */
+
+/*
+ * For closing_wait and closing_wait2
+ */
+#define ROCKET_CLOSING_WAIT_NONE ASYNC_CLOSING_WAIT_NONE
+#define ROCKET_CLOSING_WAIT_INF ASYNC_CLOSING_WAIT_INF
+
+/*
+ * Rocketport ioctls -- "RP"
+ */
+#define RCKP_GET_STRUCT 0x00525001
+#define RCKP_GET_CONFIG 0x00525002
+#define RCKP_SET_CONFIG 0x00525003
+#define RCKP_GET_PORTS 0x00525004
+#define RCKP_RESET_RM2 0x00525005
+#define RCKP_GET_VERSION 0x00525006
+
+/* Rocketport Models */
+#define MODEL_RP32INTF 0x0001 /* RP 32 port w/external I/F */
+#define MODEL_RP8INTF 0x0002 /* RP 8 port w/external I/F */
+#define MODEL_RP16INTF 0x0003 /* RP 16 port w/external I/F */
+#define MODEL_RP8OCTA 0x0005 /* RP 8 port w/octa cable */
+#define MODEL_RP4QUAD 0x0004 /* RP 4 port w/quad cable */
+#define MODEL_RP8J 0x0006 /* RP 8 port w/RJ11 connectors */
+#define MODEL_RP4J 0x0007 /* RP 4 port w/RJ45 connectors */
+#define MODEL_RP8SNI 0x0008 /* RP 8 port w/ DB78 SNI connector */
+#define MODEL_RP16SNI 0x0009 /* RP 16 port w/ DB78 SNI connector */
+#define MODEL_RPP4 0x000A /* RP Plus 4 port */
+#define MODEL_RPP8 0x000B /* RP Plus 8 port */
+#define MODEL_RP2_232 0x000E /* RP Plus 2 port RS232 */
+#define MODEL_RP2_422 0x000F /* RP Plus 2 port RS232 */
+
+/* Rocketmodem II Models */
+#define MODEL_RP6M 0x000C /* RM 6 port */
+#define MODEL_RP4M 0x000D /* RM 4 port */
+
+/* Universal PCI boards */
+#define MODEL_UPCI_RP32INTF 0x0801 /* RP UPCI 32 port w/external I/F */
+#define MODEL_UPCI_RP8INTF 0x0802 /* RP UPCI 8 port w/external I/F */
+#define MODEL_UPCI_RP16INTF 0x0803 /* RP UPCI 16 port w/external I/F */
+#define MODEL_UPCI_RP8OCTA 0x0805 /* RP UPCI 8 port w/octa cable */
+#define MODEL_UPCI_RM3_8PORT 0x080C /* RP UPCI Rocketmodem III 8 port */
+#define MODEL_UPCI_RM3_4PORT 0x080C /* RP UPCI Rocketmodem III 4 port */
+
+/* Compact PCI 16 port */
+#define MODEL_CPCI_RP16INTF 0x0903 /* RP Compact PCI 16 port w/external I/F */
+
+/* All ISA boards */
+#define MODEL_ISA 0x1000
diff --git a/drivers/tty/rocket_int.h b/drivers/tty/rocket_int.h
new file mode 100644
index 00000000..67e0f1e7
--- /dev/null
+++ b/drivers/tty/rocket_int.h
@@ -0,0 +1,1214 @@
+/*
+ * rocket_int.h --- internal header file for rocket.c
+ *
+ * Written by Theodore Ts'o, Copyright 1997.
+ * Copyright 1997 Comtrol Corporation.
+ *
+ */
+
+/*
+ * Definition of the types in rcktpt_type
+ */
+#define ROCKET_TYPE_NORMAL 0
+#define ROCKET_TYPE_MODEM 1
+#define ROCKET_TYPE_MODEMII 2
+#define ROCKET_TYPE_MODEMIII 3
+#define ROCKET_TYPE_PC104 4
+
+#include <linux/mutex.h>
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+typedef unsigned char Byte_t;
+typedef unsigned int ByteIO_t;
+
+typedef unsigned int Word_t;
+typedef unsigned int WordIO_t;
+
+typedef unsigned int DWordIO_t;
+
+/*
+ * Note! Normally the Linux I/O macros already take care of
+ * byte-swapping the I/O instructions. However, all accesses using
+ * sOutDW aren't really 32-bit accesses, but should be handled in byte
+ * order. Hence the use of the cpu_to_le32() macro to byte-swap
+ * things to no-op the byte swapping done by the big-endian outl()
+ * instruction.
+ */
+
+static inline void sOutB(unsigned short port, unsigned char value)
+{
+#ifdef ROCKET_DEBUG_IO
+ printk(KERN_DEBUG "sOutB(%x, %x)...\n", port, value);
+#endif
+ outb_p(value, port);
+}
+
+static inline void sOutW(unsigned short port, unsigned short value)
+{
+#ifdef ROCKET_DEBUG_IO
+ printk(KERN_DEBUG "sOutW(%x, %x)...\n", port, value);
+#endif
+ outw_p(value, port);
+}
+
+static inline void out32(unsigned short port, Byte_t *p)
+{
+ u32 value = get_unaligned_le32(p);
+#ifdef ROCKET_DEBUG_IO
+ printk(KERN_DEBUG "out32(%x, %lx)...\n", port, value);
+#endif
+ outl_p(value, port);
+}
+
+static inline unsigned char sInB(unsigned short port)
+{
+ return inb_p(port);
+}
+
+static inline unsigned short sInW(unsigned short port)
+{
+ return inw_p(port);
+}
+
+/* This is used to move arrays of bytes so byte swapping isn't appropriate. */
+#define sOutStrW(port, addr, count) if (count) outsw(port, addr, count)
+#define sInStrW(port, addr, count) if (count) insw(port, addr, count)
+
+#define CTL_SIZE 8
+#define AIOP_CTL_SIZE 4
+#define CHAN_AIOP_SIZE 8
+#define MAX_PORTS_PER_AIOP 8
+#define MAX_AIOPS_PER_BOARD 4
+#define MAX_PORTS_PER_BOARD 32
+
+/* Bus type ID */
+#define isISA 0
+#define isPCI 1
+#define isMC 2
+
+/* Controller ID numbers */
+#define CTLID_NULL -1 /* no controller exists */
+#define CTLID_0001 0x0001 /* controller release 1 */
+
+/* AIOP ID numbers, identifies AIOP type implementing channel */
+#define AIOPID_NULL -1 /* no AIOP or channel exists */
+#define AIOPID_0001 0x0001 /* AIOP release 1 */
+
+/************************************************************************
+ Global Register Offsets - Direct Access - Fixed values
+************************************************************************/
+
+#define _CMD_REG 0x38 /* Command Register 8 Write */
+#define _INT_CHAN 0x39 /* Interrupt Channel Register 8 Read */
+#define _INT_MASK 0x3A /* Interrupt Mask Register 8 Read / Write */
+#define _UNUSED 0x3B /* Unused 8 */
+#define _INDX_ADDR 0x3C /* Index Register Address 16 Write */
+#define _INDX_DATA 0x3E /* Index Register Data 8/16 Read / Write */
+
+/************************************************************************
+ Channel Register Offsets for 1st channel in AIOP - Direct Access
+************************************************************************/
+#define _TD0 0x00 /* Transmit Data 16 Write */
+#define _RD0 0x00 /* Receive Data 16 Read */
+#define _CHN_STAT0 0x20 /* Channel Status 8/16 Read / Write */
+#define _FIFO_CNT0 0x10 /* Transmit/Receive FIFO Count 16 Read */
+#define _INT_ID0 0x30 /* Interrupt Identification 8 Read */
+
+/************************************************************************
+ Tx Control Register Offsets - Indexed - External - Fixed
+************************************************************************/
+#define _TX_ENBLS 0x980 /* Tx Processor Enables Register 8 Read / Write */
+#define _TXCMP1 0x988 /* Transmit Compare Value #1 8 Read / Write */
+#define _TXCMP2 0x989 /* Transmit Compare Value #2 8 Read / Write */
+#define _TXREP1B1 0x98A /* Tx Replace Value #1 - Byte 1 8 Read / Write */
+#define _TXREP1B2 0x98B /* Tx Replace Value #1 - Byte 2 8 Read / Write */
+#define _TXREP2 0x98C /* Transmit Replace Value #2 8 Read / Write */
+
+/************************************************************************
+Memory Controller Register Offsets - Indexed - External - Fixed
+************************************************************************/
+#define _RX_FIFO 0x000 /* Rx FIFO */
+#define _TX_FIFO 0x800 /* Tx FIFO */
+#define _RXF_OUTP 0x990 /* Rx FIFO OUT pointer 16 Read / Write */
+#define _RXF_INP 0x992 /* Rx FIFO IN pointer 16 Read / Write */
+#define _TXF_OUTP 0x994 /* Tx FIFO OUT pointer 8 Read / Write */
+#define _TXF_INP 0x995 /* Tx FIFO IN pointer 8 Read / Write */
+#define _TXP_CNT 0x996 /* Tx Priority Count 8 Read / Write */
+#define _TXP_PNTR 0x997 /* Tx Priority Pointer 8 Read / Write */
+
+#define PRI_PEND 0x80 /* Priority data pending (bit7, Tx pri cnt) */
+#define TXFIFO_SIZE 255 /* size of Tx FIFO */
+#define RXFIFO_SIZE 1023 /* size of Rx FIFO */
+
+/************************************************************************
+Tx Priority Buffer - Indexed - External - Fixed
+************************************************************************/
+#define _TXP_BUF 0x9C0 /* Tx Priority Buffer 32 Bytes Read / Write */
+#define TXP_SIZE 0x20 /* 32 bytes */
+
+/************************************************************************
+Channel Register Offsets - Indexed - Internal - Fixed
+************************************************************************/
+
+#define _TX_CTRL 0xFF0 /* Transmit Control 16 Write */
+#define _RX_CTRL 0xFF2 /* Receive Control 8 Write */
+#define _BAUD 0xFF4 /* Baud Rate 16 Write */
+#define _CLK_PRE 0xFF6 /* Clock Prescaler 8 Write */
+
+#define STMBREAK 0x08 /* BREAK */
+#define STMFRAME 0x04 /* framing error */
+#define STMRCVROVR 0x02 /* receiver over run error */
+#define STMPARITY 0x01 /* parity error */
+#define STMERROR (STMBREAK | STMFRAME | STMPARITY)
+#define STMBREAKH 0x800 /* BREAK */
+#define STMFRAMEH 0x400 /* framing error */
+#define STMRCVROVRH 0x200 /* receiver over run error */
+#define STMPARITYH 0x100 /* parity error */
+#define STMERRORH (STMBREAKH | STMFRAMEH | STMPARITYH)
+
+#define CTS_ACT 0x20 /* CTS input asserted */
+#define DSR_ACT 0x10 /* DSR input asserted */
+#define CD_ACT 0x08 /* CD input asserted */
+#define TXFIFOMT 0x04 /* Tx FIFO is empty */
+#define TXSHRMT 0x02 /* Tx shift register is empty */
+#define RDA 0x01 /* Rx data available */
+#define DRAINED (TXFIFOMT | TXSHRMT) /* indicates Tx is drained */
+
+#define STATMODE 0x8000 /* status mode enable bit */
+#define RXFOVERFL 0x2000 /* receive FIFO overflow */
+#define RX2MATCH 0x1000 /* receive compare byte 2 match */
+#define RX1MATCH 0x0800 /* receive compare byte 1 match */
+#define RXBREAK 0x0400 /* received BREAK */
+#define RXFRAME 0x0200 /* received framing error */
+#define RXPARITY 0x0100 /* received parity error */
+#define STATERROR (RXBREAK | RXFRAME | RXPARITY)
+
+#define CTSFC_EN 0x80 /* CTS flow control enable bit */
+#define RTSTOG_EN 0x40 /* RTS toggle enable bit */
+#define TXINT_EN 0x10 /* transmit interrupt enable */
+#define STOP2 0x08 /* enable 2 stop bits (0 = 1 stop) */
+#define PARITY_EN 0x04 /* enable parity (0 = no parity) */
+#define EVEN_PAR 0x02 /* even parity (0 = odd parity) */
+#define DATA8BIT 0x01 /* 8 bit data (0 = 7 bit data) */
+
+#define SETBREAK 0x10 /* send break condition (must clear) */
+#define LOCALLOOP 0x08 /* local loopback set for test */
+#define SET_DTR 0x04 /* assert DTR */
+#define SET_RTS 0x02 /* assert RTS */
+#define TX_ENABLE 0x01 /* enable transmitter */
+
+#define RTSFC_EN 0x40 /* RTS flow control enable */
+#define RXPROC_EN 0x20 /* receive processor enable */
+#define TRIG_NO 0x00 /* Rx FIFO trigger level 0 (no trigger) */
+#define TRIG_1 0x08 /* trigger level 1 char */
+#define TRIG_1_2 0x10 /* trigger level 1/2 */
+#define TRIG_7_8 0x18 /* trigger level 7/8 */
+#define TRIG_MASK 0x18 /* trigger level mask */
+#define SRCINT_EN 0x04 /* special Rx condition interrupt enable */
+#define RXINT_EN 0x02 /* Rx interrupt enable */
+#define MCINT_EN 0x01 /* modem change interrupt enable */
+
+#define RXF_TRIG 0x20 /* Rx FIFO trigger level interrupt */
+#define TXFIFO_MT 0x10 /* Tx FIFO empty interrupt */
+#define SRC_INT 0x08 /* special receive condition interrupt */
+#define DELTA_CD 0x04 /* CD change interrupt */
+#define DELTA_CTS 0x02 /* CTS change interrupt */
+#define DELTA_DSR 0x01 /* DSR change interrupt */
+
+#define REP1W2_EN 0x10 /* replace byte 1 with 2 bytes enable */
+#define IGN2_EN 0x08 /* ignore byte 2 enable */
+#define IGN1_EN 0x04 /* ignore byte 1 enable */
+#define COMP2_EN 0x02 /* compare byte 2 enable */
+#define COMP1_EN 0x01 /* compare byte 1 enable */
+
+#define RESET_ALL 0x80 /* reset AIOP (all channels) */
+#define TXOVERIDE 0x40 /* Transmit software off override */
+#define RESETUART 0x20 /* reset channel's UART */
+#define RESTXFCNT 0x10 /* reset channel's Tx FIFO count register */
+#define RESRXFCNT 0x08 /* reset channel's Rx FIFO count register */
+
+#define INTSTAT0 0x01 /* AIOP 0 interrupt status */
+#define INTSTAT1 0x02 /* AIOP 1 interrupt status */
+#define INTSTAT2 0x04 /* AIOP 2 interrupt status */
+#define INTSTAT3 0x08 /* AIOP 3 interrupt status */
+
+#define INTR_EN 0x08 /* allow interrupts to host */
+#define INT_STROB 0x04 /* strobe and clear interrupt line (EOI) */
+
+/**************************************************************************
+ MUDBAC remapped for PCI
+**************************************************************************/
+
+#define _CFG_INT_PCI 0x40
+#define _PCI_INT_FUNC 0x3A
+
+#define PCI_STROB 0x2000 /* bit 13 of int aiop register */
+#define INTR_EN_PCI 0x0010 /* allow interrupts to host */
+
+/*
+ * Definitions for Universal PCI board registers
+ */
+#define _PCI_9030_INT_CTRL 0x4c /* Offsets from BAR1 */
+#define _PCI_9030_GPIO_CTRL 0x54
+#define PCI_INT_CTRL_AIOP 0x0001
+#define PCI_GPIO_CTRL_8PORT 0x4000
+#define _PCI_9030_RING_IND 0xc0 /* Offsets from BAR1 */
+
+#define CHAN3_EN 0x08 /* enable AIOP 3 */
+#define CHAN2_EN 0x04 /* enable AIOP 2 */
+#define CHAN1_EN 0x02 /* enable AIOP 1 */
+#define CHAN0_EN 0x01 /* enable AIOP 0 */
+#define FREQ_DIS 0x00
+#define FREQ_274HZ 0x60
+#define FREQ_137HZ 0x50
+#define FREQ_69HZ 0x40
+#define FREQ_34HZ 0x30
+#define FREQ_17HZ 0x20
+#define FREQ_9HZ 0x10
+#define PERIODIC_ONLY 0x80 /* only PERIODIC interrupt */
+
+#define CHANINT_EN 0x0100 /* flags to enable/disable channel ints */
+
+#define RDATASIZE 72
+#define RREGDATASIZE 52
+
+/*
+ * AIOP interrupt bits for ISA/PCI boards and UPCI boards.
+ */
+#define AIOP_INTR_BIT_0 0x0001
+#define AIOP_INTR_BIT_1 0x0002
+#define AIOP_INTR_BIT_2 0x0004
+#define AIOP_INTR_BIT_3 0x0008
+
+#define AIOP_INTR_BITS ( \
+ AIOP_INTR_BIT_0 \
+ | AIOP_INTR_BIT_1 \
+ | AIOP_INTR_BIT_2 \
+ | AIOP_INTR_BIT_3)
+
+#define UPCI_AIOP_INTR_BIT_0 0x0004
+#define UPCI_AIOP_INTR_BIT_1 0x0020
+#define UPCI_AIOP_INTR_BIT_2 0x0100
+#define UPCI_AIOP_INTR_BIT_3 0x0800
+
+#define UPCI_AIOP_INTR_BITS ( \
+ UPCI_AIOP_INTR_BIT_0 \
+ | UPCI_AIOP_INTR_BIT_1 \
+ | UPCI_AIOP_INTR_BIT_2 \
+ | UPCI_AIOP_INTR_BIT_3)
+
+/* Controller level information structure */
+typedef struct {
+ int CtlID;
+ int CtlNum;
+ int BusType;
+ int boardType;
+ int isUPCI;
+ WordIO_t PCIIO;
+ WordIO_t PCIIO2;
+ ByteIO_t MBaseIO;
+ ByteIO_t MReg1IO;
+ ByteIO_t MReg2IO;
+ ByteIO_t MReg3IO;
+ Byte_t MReg2;
+ Byte_t MReg3;
+ int NumAiop;
+ int AltChanRingIndicator;
+ ByteIO_t UPCIRingInd;
+ WordIO_t AiopIO[AIOP_CTL_SIZE];
+ ByteIO_t AiopIntChanIO[AIOP_CTL_SIZE];
+ int AiopID[AIOP_CTL_SIZE];
+ int AiopNumChan[AIOP_CTL_SIZE];
+ Word_t *AiopIntrBits;
+} CONTROLLER_T;
+
+typedef CONTROLLER_T CONTROLLER_t;
+
+/* Channel level information structure */
+typedef struct {
+ CONTROLLER_T *CtlP;
+ int AiopNum;
+ int ChanID;
+ int ChanNum;
+ int rtsToggle;
+
+ ByteIO_t Cmd;
+ ByteIO_t IntChan;
+ ByteIO_t IntMask;
+ DWordIO_t IndexAddr;
+ WordIO_t IndexData;
+
+ WordIO_t TxRxData;
+ WordIO_t ChanStat;
+ WordIO_t TxRxCount;
+ ByteIO_t IntID;
+
+ Word_t TxFIFO;
+ Word_t TxFIFOPtrs;
+ Word_t RxFIFO;
+ Word_t RxFIFOPtrs;
+ Word_t TxPrioCnt;
+ Word_t TxPrioPtr;
+ Word_t TxPrioBuf;
+
+ Byte_t R[RREGDATASIZE];
+
+ Byte_t BaudDiv[4];
+ Byte_t TxControl[4];
+ Byte_t RxControl[4];
+ Byte_t TxEnables[4];
+ Byte_t TxCompare[4];
+ Byte_t TxReplace1[4];
+ Byte_t TxReplace2[4];
+} CHANNEL_T;
+
+typedef CHANNEL_T CHANNEL_t;
+typedef CHANNEL_T *CHANPTR_T;
+
+#define InterfaceModeRS232 0x00
+#define InterfaceModeRS422 0x08
+#define InterfaceModeRS485 0x10
+#define InterfaceModeRS232T 0x18
+
+/***************************************************************************
+Function: sClrBreak
+Purpose: Stop sending a transmit BREAK signal
+Call: sClrBreak(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sClrBreak(ChP) \
+do { \
+ (ChP)->TxControl[3] &= ~SETBREAK; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sClrDTR
+Purpose: Clr the DTR output
+Call: sClrDTR(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sClrDTR(ChP) \
+do { \
+ (ChP)->TxControl[3] &= ~SET_DTR; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sClrRTS
+Purpose: Clr the RTS output
+Call: sClrRTS(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sClrRTS(ChP) \
+do { \
+ if ((ChP)->rtsToggle) break; \
+ (ChP)->TxControl[3] &= ~SET_RTS; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sClrTxXOFF
+Purpose: Clear any existing transmit software flow control off condition
+Call: sClrTxXOFF(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sClrTxXOFF(ChP) \
+do { \
+ sOutB((ChP)->Cmd,TXOVERIDE | (Byte_t)(ChP)->ChanNum); \
+ sOutB((ChP)->Cmd,(Byte_t)(ChP)->ChanNum); \
+} while (0)
+
+/***************************************************************************
+Function: sCtlNumToCtlPtr
+Purpose: Convert a controller number to controller structure pointer
+Call: sCtlNumToCtlPtr(CtlNum)
+ int CtlNum; Controller number
+Return: CONTROLLER_T *: Ptr to controller structure
+*/
+#define sCtlNumToCtlPtr(CTLNUM) &sController[CTLNUM]
+
+/***************************************************************************
+Function: sControllerEOI
+Purpose: Strobe the MUDBAC's End Of Interrupt bit.
+Call: sControllerEOI(CtlP)
+ CONTROLLER_T *CtlP; Ptr to controller structure
+*/
+#define sControllerEOI(CTLP) sOutB((CTLP)->MReg2IO,(CTLP)->MReg2 | INT_STROB)
+
+/***************************************************************************
+Function: sPCIControllerEOI
+Purpose: Strobe the PCI End Of Interrupt bit.
+ For the UPCI boards, toggle the AIOP interrupt enable bit
+ (this was taken from the Windows driver).
+Call: sPCIControllerEOI(CtlP)
+ CONTROLLER_T *CtlP; Ptr to controller structure
+*/
+#define sPCIControllerEOI(CTLP) \
+do { \
+ if ((CTLP)->isUPCI) { \
+ Word_t w = sInW((CTLP)->PCIIO); \
+ sOutW((CTLP)->PCIIO, (w ^ PCI_INT_CTRL_AIOP)); \
+ sOutW((CTLP)->PCIIO, w); \
+ } \
+ else { \
+ sOutW((CTLP)->PCIIO, PCI_STROB); \
+ } \
+} while (0)
+
+/***************************************************************************
+Function: sDisAiop
+Purpose: Disable I/O access to an AIOP
+Call: sDisAiop(CltP)
+ CONTROLLER_T *CtlP; Ptr to controller structure
+ int AiopNum; Number of AIOP on controller
+*/
+#define sDisAiop(CTLP,AIOPNUM) \
+do { \
+ (CTLP)->MReg3 &= sBitMapClrTbl[AIOPNUM]; \
+ sOutB((CTLP)->MReg3IO,(CTLP)->MReg3); \
+} while (0)
+
+/***************************************************************************
+Function: sDisCTSFlowCtl
+Purpose: Disable output flow control using CTS
+Call: sDisCTSFlowCtl(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sDisCTSFlowCtl(ChP) \
+do { \
+ (ChP)->TxControl[2] &= ~CTSFC_EN; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sDisIXANY
+Purpose: Disable IXANY Software Flow Control
+Call: sDisIXANY(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sDisIXANY(ChP) \
+do { \
+ (ChP)->R[0x0e] = 0x86; \
+ out32((ChP)->IndexAddr,&(ChP)->R[0x0c]); \
+} while (0)
+
+/***************************************************************************
+Function: DisParity
+Purpose: Disable parity
+Call: sDisParity(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Comments: Function sSetParity() can be used in place of functions sEnParity(),
+ sDisParity(), sSetOddParity(), and sSetEvenParity().
+*/
+#define sDisParity(ChP) \
+do { \
+ (ChP)->TxControl[2] &= ~PARITY_EN; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sDisRTSToggle
+Purpose: Disable RTS toggle
+Call: sDisRTSToggle(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sDisRTSToggle(ChP) \
+do { \
+ (ChP)->TxControl[2] &= ~RTSTOG_EN; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+ (ChP)->rtsToggle = 0; \
+} while (0)
+
+/***************************************************************************
+Function: sDisRxFIFO
+Purpose: Disable Rx FIFO
+Call: sDisRxFIFO(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sDisRxFIFO(ChP) \
+do { \
+ (ChP)->R[0x32] = 0x0a; \
+ out32((ChP)->IndexAddr,&(ChP)->R[0x30]); \
+} while (0)
+
+/***************************************************************************
+Function: sDisRxStatusMode
+Purpose: Disable the Rx status mode
+Call: sDisRxStatusMode(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Comments: This takes the channel out of the receive status mode. All
+ subsequent reads of receive data using sReadRxWord() will return
+ two data bytes.
+*/
+#define sDisRxStatusMode(ChP) sOutW((ChP)->ChanStat,0)
+
+/***************************************************************************
+Function: sDisTransmit
+Purpose: Disable transmit
+Call: sDisTransmit(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+ This disables movement of Tx data from the Tx FIFO into the 1 byte
+ Tx buffer. Therefore there could be up to a 2 byte latency
+ between the time sDisTransmit() is called and the transmit buffer
+ and transmit shift register going completely empty.
+*/
+#define sDisTransmit(ChP) \
+do { \
+ (ChP)->TxControl[3] &= ~TX_ENABLE; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sDisTxSoftFlowCtl
+Purpose: Disable Tx Software Flow Control
+Call: sDisTxSoftFlowCtl(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sDisTxSoftFlowCtl(ChP) \
+do { \
+ (ChP)->R[0x06] = 0x8a; \
+ out32((ChP)->IndexAddr,&(ChP)->R[0x04]); \
+} while (0)
+
+/***************************************************************************
+Function: sEnAiop
+Purpose: Enable I/O access to an AIOP
+Call: sEnAiop(CltP)
+ CONTROLLER_T *CtlP; Ptr to controller structure
+ int AiopNum; Number of AIOP on controller
+*/
+#define sEnAiop(CTLP,AIOPNUM) \
+do { \
+ (CTLP)->MReg3 |= sBitMapSetTbl[AIOPNUM]; \
+ sOutB((CTLP)->MReg3IO,(CTLP)->MReg3); \
+} while (0)
+
+/***************************************************************************
+Function: sEnCTSFlowCtl
+Purpose: Enable output flow control using CTS
+Call: sEnCTSFlowCtl(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sEnCTSFlowCtl(ChP) \
+do { \
+ (ChP)->TxControl[2] |= CTSFC_EN; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sEnIXANY
+Purpose: Enable IXANY Software Flow Control
+Call: sEnIXANY(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sEnIXANY(ChP) \
+do { \
+ (ChP)->R[0x0e] = 0x21; \
+ out32((ChP)->IndexAddr,&(ChP)->R[0x0c]); \
+} while (0)
+
+/***************************************************************************
+Function: EnParity
+Purpose: Enable parity
+Call: sEnParity(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Comments: Function sSetParity() can be used in place of functions sEnParity(),
+ sDisParity(), sSetOddParity(), and sSetEvenParity().
+
+Warnings: Before enabling parity odd or even parity should be chosen using
+ functions sSetOddParity() or sSetEvenParity().
+*/
+#define sEnParity(ChP) \
+do { \
+ (ChP)->TxControl[2] |= PARITY_EN; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sEnRTSToggle
+Purpose: Enable RTS toggle
+Call: sEnRTSToggle(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Comments: This function will disable RTS flow control and clear the RTS
+ line to allow operation of RTS toggle.
+*/
+#define sEnRTSToggle(ChP) \
+do { \
+ (ChP)->RxControl[2] &= ~RTSFC_EN; \
+ out32((ChP)->IndexAddr,(ChP)->RxControl); \
+ (ChP)->TxControl[2] |= RTSTOG_EN; \
+ (ChP)->TxControl[3] &= ~SET_RTS; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+ (ChP)->rtsToggle = 1; \
+} while (0)
+
+/***************************************************************************
+Function: sEnRxFIFO
+Purpose: Enable Rx FIFO
+Call: sEnRxFIFO(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sEnRxFIFO(ChP) \
+do { \
+ (ChP)->R[0x32] = 0x08; \
+ out32((ChP)->IndexAddr,&(ChP)->R[0x30]); \
+} while (0)
+
+/***************************************************************************
+Function: sEnRxProcessor
+Purpose: Enable the receive processor
+Call: sEnRxProcessor(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Comments: This function is used to start the receive processor. When
+ the channel is in the reset state the receive processor is not
+ running. This is done to prevent the receive processor from
+ executing invalid microcode instructions prior to the
+ downloading of the microcode.
+
+Warnings: This function must be called after valid microcode has been
+ downloaded to the AIOP, and it must not be called before the
+ microcode has been downloaded.
+*/
+#define sEnRxProcessor(ChP) \
+do { \
+ (ChP)->RxControl[2] |= RXPROC_EN; \
+ out32((ChP)->IndexAddr,(ChP)->RxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sEnRxStatusMode
+Purpose: Enable the Rx status mode
+Call: sEnRxStatusMode(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Comments: This places the channel in the receive status mode. All subsequent
+ reads of receive data using sReadRxWord() will return a data byte
+ in the low word and a status byte in the high word.
+
+*/
+#define sEnRxStatusMode(ChP) sOutW((ChP)->ChanStat,STATMODE)
+
+/***************************************************************************
+Function: sEnTransmit
+Purpose: Enable transmit
+Call: sEnTransmit(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sEnTransmit(ChP) \
+do { \
+ (ChP)->TxControl[3] |= TX_ENABLE; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sEnTxSoftFlowCtl
+Purpose: Enable Tx Software Flow Control
+Call: sEnTxSoftFlowCtl(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sEnTxSoftFlowCtl(ChP) \
+do { \
+ (ChP)->R[0x06] = 0xc5; \
+ out32((ChP)->IndexAddr,&(ChP)->R[0x04]); \
+} while (0)
+
+/***************************************************************************
+Function: sGetAiopIntStatus
+Purpose: Get the AIOP interrupt status
+Call: sGetAiopIntStatus(CtlP,AiopNum)
+ CONTROLLER_T *CtlP; Ptr to controller structure
+ int AiopNum; AIOP number
+Return: Byte_t: The AIOP interrupt status. Bits 0 through 7
+ represent channels 0 through 7 respectively. If a
+ bit is set that channel is interrupting.
+*/
+#define sGetAiopIntStatus(CTLP,AIOPNUM) sInB((CTLP)->AiopIntChanIO[AIOPNUM])
+
+/***************************************************************************
+Function: sGetAiopNumChan
+Purpose: Get the number of channels supported by an AIOP
+Call: sGetAiopNumChan(CtlP,AiopNum)
+ CONTROLLER_T *CtlP; Ptr to controller structure
+ int AiopNum; AIOP number
+Return: int: The number of channels supported by the AIOP
+*/
+#define sGetAiopNumChan(CTLP,AIOPNUM) (CTLP)->AiopNumChan[AIOPNUM]
+
+/***************************************************************************
+Function: sGetChanIntID
+Purpose: Get a channel's interrupt identification byte
+Call: sGetChanIntID(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Return: Byte_t: The channel interrupt ID. Can be any
+ combination of the following flags:
+ RXF_TRIG: Rx FIFO trigger level interrupt
+ TXFIFO_MT: Tx FIFO empty interrupt
+ SRC_INT: Special receive condition interrupt
+ DELTA_CD: CD change interrupt
+ DELTA_CTS: CTS change interrupt
+ DELTA_DSR: DSR change interrupt
+*/
+#define sGetChanIntID(ChP) (sInB((ChP)->IntID) & (RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR))
+
+/***************************************************************************
+Function: sGetChanNum
+Purpose: Get the number of a channel within an AIOP
+Call: sGetChanNum(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Return: int: Channel number within AIOP, or NULLCHAN if channel does
+ not exist.
+*/
+#define sGetChanNum(ChP) (ChP)->ChanNum
+
+/***************************************************************************
+Function: sGetChanStatus
+Purpose: Get the channel status
+Call: sGetChanStatus(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Return: Word_t: The channel status. Can be any combination of
+ the following flags:
+ LOW BYTE FLAGS
+ CTS_ACT: CTS input asserted
+ DSR_ACT: DSR input asserted
+ CD_ACT: CD input asserted
+ TXFIFOMT: Tx FIFO is empty
+ TXSHRMT: Tx shift register is empty
+ RDA: Rx data available
+
+ HIGH BYTE FLAGS
+ STATMODE: status mode enable bit
+ RXFOVERFL: receive FIFO overflow
+ RX2MATCH: receive compare byte 2 match
+ RX1MATCH: receive compare byte 1 match
+ RXBREAK: received BREAK
+ RXFRAME: received framing error
+ RXPARITY: received parity error
+Warnings: This function will clear the high byte flags in the Channel
+ Status Register.
+*/
+#define sGetChanStatus(ChP) sInW((ChP)->ChanStat)
+
+/***************************************************************************
+Function: sGetChanStatusLo
+Purpose: Get the low byte only of the channel status
+Call: sGetChanStatusLo(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Return: Byte_t: The channel status low byte. Can be any combination
+ of the following flags:
+ CTS_ACT: CTS input asserted
+ DSR_ACT: DSR input asserted
+ CD_ACT: CD input asserted
+ TXFIFOMT: Tx FIFO is empty
+ TXSHRMT: Tx shift register is empty
+ RDA: Rx data available
+*/
+#define sGetChanStatusLo(ChP) sInB((ByteIO_t)(ChP)->ChanStat)
+
+/**********************************************************************
+ * Get RI status of channel
+ * Defined as a function in rocket.c -aes
+ */
+#if 0
+#define sGetChanRI(ChP) ((ChP)->CtlP->AltChanRingIndicator ? \
+ (sInB((ByteIO_t)((ChP)->ChanStat+8)) & DSR_ACT) : \
+ (((ChP)->CtlP->boardType == ROCKET_TYPE_PC104) ? \
+ (!(sInB((ChP)->CtlP->AiopIO[3]) & sBitMapSetTbl[(ChP)->ChanNum])) : \
+ 0))
+#endif
+
+/***************************************************************************
+Function: sGetControllerIntStatus
+Purpose: Get the controller interrupt status
+Call: sGetControllerIntStatus(CtlP)
+ CONTROLLER_T *CtlP; Ptr to controller structure
+Return: Byte_t: The controller interrupt status in the lower 4
+ bits. Bits 0 through 3 represent AIOP's 0
+ through 3 respectively. If a bit is set that
+ AIOP is interrupting. Bits 4 through 7 will
+ always be cleared.
+*/
+#define sGetControllerIntStatus(CTLP) (sInB((CTLP)->MReg1IO) & 0x0f)
+
+/***************************************************************************
+Function: sPCIGetControllerIntStatus
+Purpose: Get the controller interrupt status
+Call: sPCIGetControllerIntStatus(CtlP)
+ CONTROLLER_T *CtlP; Ptr to controller structure
+Return: unsigned char: The controller interrupt status in the lower 4
+ bits and bit 4. Bits 0 through 3 represent AIOP's 0
+ through 3 respectively. Bit 4 is set if the int
+ was generated from periodic. If a bit is set the
+ AIOP is interrupting.
+*/
+#define sPCIGetControllerIntStatus(CTLP) \
+ ((CTLP)->isUPCI ? \
+ (sInW((CTLP)->PCIIO2) & UPCI_AIOP_INTR_BITS) : \
+ ((sInW((CTLP)->PCIIO) >> 8) & AIOP_INTR_BITS))
+
+/***************************************************************************
+
+Function: sGetRxCnt
+Purpose: Get the number of data bytes in the Rx FIFO
+Call: sGetRxCnt(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Return: int: The number of data bytes in the Rx FIFO.
+Comments: Byte read of count register is required to obtain Rx count.
+
+*/
+#define sGetRxCnt(ChP) sInW((ChP)->TxRxCount)
+
+/***************************************************************************
+Function: sGetTxCnt
+Purpose: Get the number of data bytes in the Tx FIFO
+Call: sGetTxCnt(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Return: Byte_t: The number of data bytes in the Tx FIFO.
+Comments: Byte read of count register is required to obtain Tx count.
+
+*/
+#define sGetTxCnt(ChP) sInB((ByteIO_t)(ChP)->TxRxCount)
+
+/*****************************************************************************
+Function: sGetTxRxDataIO
+Purpose: Get the I/O address of a channel's TxRx Data register
+Call: sGetTxRxDataIO(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Return: WordIO_t: I/O address of a channel's TxRx Data register
+*/
+#define sGetTxRxDataIO(ChP) (ChP)->TxRxData
+
+/***************************************************************************
+Function: sInitChanDefaults
+Purpose: Initialize a channel structure to it's default state.
+Call: sInitChanDefaults(ChP)
+ CHANNEL_T *ChP; Ptr to the channel structure
+Comments: This function must be called once for every channel structure
+ that exists before any other SSCI calls can be made.
+
+*/
+#define sInitChanDefaults(ChP) \
+do { \
+ (ChP)->CtlP = NULLCTLPTR; \
+ (ChP)->AiopNum = NULLAIOP; \
+ (ChP)->ChanID = AIOPID_NULL; \
+ (ChP)->ChanNum = NULLCHAN; \
+} while (0)
+
+/***************************************************************************
+Function: sResetAiopByNum
+Purpose: Reset the AIOP by number
+Call: sResetAiopByNum(CTLP,AIOPNUM)
+ CONTROLLER_T CTLP; Ptr to controller structure
+ AIOPNUM; AIOP index
+*/
+#define sResetAiopByNum(CTLP,AIOPNUM) \
+do { \
+ sOutB((CTLP)->AiopIO[(AIOPNUM)]+_CMD_REG,RESET_ALL); \
+ sOutB((CTLP)->AiopIO[(AIOPNUM)]+_CMD_REG,0x0); \
+} while (0)
+
+/***************************************************************************
+Function: sSendBreak
+Purpose: Send a transmit BREAK signal
+Call: sSendBreak(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sSendBreak(ChP) \
+do { \
+ (ChP)->TxControl[3] |= SETBREAK; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sSetBaud
+Purpose: Set baud rate
+Call: sSetBaud(ChP,Divisor)
+ CHANNEL_T *ChP; Ptr to channel structure
+ Word_t Divisor; 16 bit baud rate divisor for channel
+*/
+#define sSetBaud(ChP,DIVISOR) \
+do { \
+ (ChP)->BaudDiv[2] = (Byte_t)(DIVISOR); \
+ (ChP)->BaudDiv[3] = (Byte_t)((DIVISOR) >> 8); \
+ out32((ChP)->IndexAddr,(ChP)->BaudDiv); \
+} while (0)
+
+/***************************************************************************
+Function: sSetData7
+Purpose: Set data bits to 7
+Call: sSetData7(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sSetData7(ChP) \
+do { \
+ (ChP)->TxControl[2] &= ~DATA8BIT; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sSetData8
+Purpose: Set data bits to 8
+Call: sSetData8(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sSetData8(ChP) \
+do { \
+ (ChP)->TxControl[2] |= DATA8BIT; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sSetDTR
+Purpose: Set the DTR output
+Call: sSetDTR(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sSetDTR(ChP) \
+do { \
+ (ChP)->TxControl[3] |= SET_DTR; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sSetEvenParity
+Purpose: Set even parity
+Call: sSetEvenParity(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Comments: Function sSetParity() can be used in place of functions sEnParity(),
+ sDisParity(), sSetOddParity(), and sSetEvenParity().
+
+Warnings: This function has no effect unless parity is enabled with function
+ sEnParity().
+*/
+#define sSetEvenParity(ChP) \
+do { \
+ (ChP)->TxControl[2] |= EVEN_PAR; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sSetOddParity
+Purpose: Set odd parity
+Call: sSetOddParity(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Comments: Function sSetParity() can be used in place of functions sEnParity(),
+ sDisParity(), sSetOddParity(), and sSetEvenParity().
+
+Warnings: This function has no effect unless parity is enabled with function
+ sEnParity().
+*/
+#define sSetOddParity(ChP) \
+do { \
+ (ChP)->TxControl[2] &= ~EVEN_PAR; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sSetRTS
+Purpose: Set the RTS output
+Call: sSetRTS(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sSetRTS(ChP) \
+do { \
+ if ((ChP)->rtsToggle) break; \
+ (ChP)->TxControl[3] |= SET_RTS; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sSetRxTrigger
+Purpose: Set the Rx FIFO trigger level
+Call: sSetRxProcessor(ChP,Level)
+ CHANNEL_T *ChP; Ptr to channel structure
+ Byte_t Level; Number of characters in Rx FIFO at which the
+ interrupt will be generated. Can be any of the following flags:
+
+ TRIG_NO: no trigger
+ TRIG_1: 1 character in FIFO
+ TRIG_1_2: FIFO 1/2 full
+ TRIG_7_8: FIFO 7/8 full
+Comments: An interrupt will be generated when the trigger level is reached
+ only if function sEnInterrupt() has been called with flag
+ RXINT_EN set. The RXF_TRIG flag in the Interrupt Idenfification
+ register will be set whenever the trigger level is reached
+ regardless of the setting of RXINT_EN.
+
+*/
+#define sSetRxTrigger(ChP,LEVEL) \
+do { \
+ (ChP)->RxControl[2] &= ~TRIG_MASK; \
+ (ChP)->RxControl[2] |= LEVEL; \
+ out32((ChP)->IndexAddr,(ChP)->RxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sSetStop1
+Purpose: Set stop bits to 1
+Call: sSetStop1(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sSetStop1(ChP) \
+do { \
+ (ChP)->TxControl[2] &= ~STOP2; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sSetStop2
+Purpose: Set stop bits to 2
+Call: sSetStop2(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+*/
+#define sSetStop2(ChP) \
+do { \
+ (ChP)->TxControl[2] |= STOP2; \
+ out32((ChP)->IndexAddr,(ChP)->TxControl); \
+} while (0)
+
+/***************************************************************************
+Function: sSetTxXOFFChar
+Purpose: Set the Tx XOFF flow control character
+Call: sSetTxXOFFChar(ChP,Ch)
+ CHANNEL_T *ChP; Ptr to channel structure
+ Byte_t Ch; The value to set the Tx XOFF character to
+*/
+#define sSetTxXOFFChar(ChP,CH) \
+do { \
+ (ChP)->R[0x07] = (CH); \
+ out32((ChP)->IndexAddr,&(ChP)->R[0x04]); \
+} while (0)
+
+/***************************************************************************
+Function: sSetTxXONChar
+Purpose: Set the Tx XON flow control character
+Call: sSetTxXONChar(ChP,Ch)
+ CHANNEL_T *ChP; Ptr to channel structure
+ Byte_t Ch; The value to set the Tx XON character to
+*/
+#define sSetTxXONChar(ChP,CH) \
+do { \
+ (ChP)->R[0x0b] = (CH); \
+ out32((ChP)->IndexAddr,&(ChP)->R[0x08]); \
+} while (0)
+
+/***************************************************************************
+Function: sStartRxProcessor
+Purpose: Start a channel's receive processor
+Call: sStartRxProcessor(ChP)
+ CHANNEL_T *ChP; Ptr to channel structure
+Comments: This function is used to start a Rx processor after it was
+ stopped with sStopRxProcessor() or sStopSWInFlowCtl(). It
+ will restart both the Rx processor and software input flow control.
+
+*/
+#define sStartRxProcessor(ChP) out32((ChP)->IndexAddr,&(ChP)->R[0])
+
+/***************************************************************************
+Function: sWriteTxByte
+Purpose: Write a transmit data byte to a channel.
+ ByteIO_t io: Channel transmit register I/O address. This can
+ be obtained with sGetTxRxDataIO().
+ Byte_t Data; The transmit data byte.
+Warnings: This function writes the data byte without checking to see if
+ sMaxTxSize is exceeded in the Tx FIFO.
+*/
+#define sWriteTxByte(IO,DATA) sOutB(IO,DATA)
+
+/*
+ * Begin Linux specific definitions for the Rocketport driver
+ *
+ * This code is Copyright Theodore Ts'o, 1995-1997
+ */
+
+struct r_port {
+ int magic;
+ struct tty_port port;
+ int line;
+ int flags; /* Don't yet match the ASY_ flags!! */
+ unsigned int board:3;
+ unsigned int aiop:2;
+ unsigned int chan:3;
+ CONTROLLER_t *ctlp;
+ CHANNEL_t channel;
+ int intmask;
+ int xmit_fifo_room; /* room in xmit fifo */
+ unsigned char *xmit_buf;
+ int xmit_head;
+ int xmit_tail;
+ int xmit_cnt;
+ int cd_status;
+ int ignore_status_mask;
+ int read_status_mask;
+ int cps;
+
+ struct completion close_wait; /* Not yet matching the core */
+ spinlock_t slock;
+ struct mutex write_mtx;
+};
+
+#define RPORT_MAGIC 0x525001
+
+#define NUM_BOARDS 8
+#define MAX_RP_PORTS (32*NUM_BOARDS)
+
+/*
+ * The size of the xmit buffer is 1 page, or 4096 bytes
+ */
+#define XMIT_BUF_SIZE 4096
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 256
+
+/*
+ * Assigned major numbers for the Comtrol Rocketport
+ */
+#define TTY_ROCKET_MAJOR 46
+#define CUA_ROCKET_MAJOR 47
+
+#ifdef PCI_VENDOR_ID_RP
+#undef PCI_VENDOR_ID_RP
+#undef PCI_DEVICE_ID_RP8OCTA
+#undef PCI_DEVICE_ID_RP8INTF
+#undef PCI_DEVICE_ID_RP16INTF
+#undef PCI_DEVICE_ID_RP32INTF
+#undef PCI_DEVICE_ID_URP8OCTA
+#undef PCI_DEVICE_ID_URP8INTF
+#undef PCI_DEVICE_ID_URP16INTF
+#undef PCI_DEVICE_ID_CRP16INTF
+#undef PCI_DEVICE_ID_URP32INTF
+#endif
+
+/* Comtrol PCI Vendor ID */
+#define PCI_VENDOR_ID_RP 0x11fe
+
+/* Comtrol Device ID's */
+#define PCI_DEVICE_ID_RP32INTF 0x0001 /* Rocketport 32 port w/external I/F */
+#define PCI_DEVICE_ID_RP8INTF 0x0002 /* Rocketport 8 port w/external I/F */
+#define PCI_DEVICE_ID_RP16INTF 0x0003 /* Rocketport 16 port w/external I/F */
+#define PCI_DEVICE_ID_RP4QUAD 0x0004 /* Rocketport 4 port w/quad cable */
+#define PCI_DEVICE_ID_RP8OCTA 0x0005 /* Rocketport 8 port w/octa cable */
+#define PCI_DEVICE_ID_RP8J 0x0006 /* Rocketport 8 port w/RJ11 connectors */
+#define PCI_DEVICE_ID_RP4J 0x0007 /* Rocketport 4 port w/RJ11 connectors */
+#define PCI_DEVICE_ID_RP8SNI 0x0008 /* Rocketport 8 port w/ DB78 SNI (Siemens) connector */
+#define PCI_DEVICE_ID_RP16SNI 0x0009 /* Rocketport 16 port w/ DB78 SNI (Siemens) connector */
+#define PCI_DEVICE_ID_RPP4 0x000A /* Rocketport Plus 4 port */
+#define PCI_DEVICE_ID_RPP8 0x000B /* Rocketport Plus 8 port */
+#define PCI_DEVICE_ID_RP6M 0x000C /* RocketModem 6 port */
+#define PCI_DEVICE_ID_RP4M 0x000D /* RocketModem 4 port */
+#define PCI_DEVICE_ID_RP2_232 0x000E /* Rocketport Plus 2 port RS232 */
+#define PCI_DEVICE_ID_RP2_422 0x000F /* Rocketport Plus 2 port RS422 */
+
+/* Universal PCI boards */
+#define PCI_DEVICE_ID_URP32INTF 0x0801 /* Rocketport UPCI 32 port w/external I/F */
+#define PCI_DEVICE_ID_URP8INTF 0x0802 /* Rocketport UPCI 8 port w/external I/F */
+#define PCI_DEVICE_ID_URP16INTF 0x0803 /* Rocketport UPCI 16 port w/external I/F */
+#define PCI_DEVICE_ID_URP8OCTA 0x0805 /* Rocketport UPCI 8 port w/octa cable */
+#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C /* Rocketmodem III 8 port */
+#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D /* Rocketmodem III 4 port */
+
+/* Compact PCI device */
+#define PCI_DEVICE_ID_CRP16INTF 0x0903 /* Rocketport Compact PCI 16 port w/external I/F */
+
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
new file mode 100644
index 00000000..1b37626e
--- /dev/null
+++ b/drivers/tty/serial/21285.c
@@ -0,0 +1,511 @@
+/*
+ * Driver for the serial port on the 21285 StrongArm-110 core logic chip.
+ *
+ * Based on drivers/char/serial.c
+ */
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/device.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+#include <asm/hardware/dec21285.h>
+#include <mach/hardware.h>
+
+#define BAUD_BASE (mem_fclk_21285/64)
+
+#define SERIAL_21285_NAME "ttyFB"
+#define SERIAL_21285_MAJOR 204
+#define SERIAL_21285_MINOR 4
+
+#define RXSTAT_DUMMY_READ 0x80000000
+#define RXSTAT_FRAME (1 << 0)
+#define RXSTAT_PARITY (1 << 1)
+#define RXSTAT_OVERRUN (1 << 2)
+#define RXSTAT_ANYERR (RXSTAT_FRAME|RXSTAT_PARITY|RXSTAT_OVERRUN)
+
+#define H_UBRLCR_BREAK (1 << 0)
+#define H_UBRLCR_PARENB (1 << 1)
+#define H_UBRLCR_PAREVN (1 << 2)
+#define H_UBRLCR_STOPB (1 << 3)
+#define H_UBRLCR_FIFO (1 << 4)
+
+static const char serial21285_name[] = "Footbridge UART";
+
+#define tx_enabled(port) ((port)->unused[0])
+#define rx_enabled(port) ((port)->unused[1])
+
+/*
+ * The documented expression for selecting the divisor is:
+ * BAUD_BASE / baud - 1
+ * However, typically BAUD_BASE is not divisible by baud, so
+ * we want to select the divisor that gives us the minimum
+ * error. Therefore, we want:
+ * int(BAUD_BASE / baud - 0.5) ->
+ * int(BAUD_BASE / baud - (baud >> 1) / baud) ->
+ * int((BAUD_BASE - (baud >> 1)) / baud)
+ */
+
+static void serial21285_stop_tx(struct uart_port *port)
+{
+ if (tx_enabled(port)) {
+ disable_irq_nosync(IRQ_CONTX);
+ tx_enabled(port) = 0;
+ }
+}
+
+static void serial21285_start_tx(struct uart_port *port)
+{
+ if (!tx_enabled(port)) {
+ enable_irq(IRQ_CONTX);
+ tx_enabled(port) = 1;
+ }
+}
+
+static void serial21285_stop_rx(struct uart_port *port)
+{
+ if (rx_enabled(port)) {
+ disable_irq_nosync(IRQ_CONRX);
+ rx_enabled(port) = 0;
+ }
+}
+
+static void serial21285_enable_ms(struct uart_port *port)
+{
+}
+
+static irqreturn_t serial21285_rx_chars(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int status, ch, flag, rxs, max_count = 256;
+
+ status = *CSR_UARTFLG;
+ while (!(status & 0x10) && max_count--) {
+ ch = *CSR_UARTDR;
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ rxs = *CSR_RXSTAT | RXSTAT_DUMMY_READ;
+ if (unlikely(rxs & RXSTAT_ANYERR)) {
+ if (rxs & RXSTAT_PARITY)
+ port->icount.parity++;
+ else if (rxs & RXSTAT_FRAME)
+ port->icount.frame++;
+ if (rxs & RXSTAT_OVERRUN)
+ port->icount.overrun++;
+
+ rxs &= port->read_status_mask;
+
+ if (rxs & RXSTAT_PARITY)
+ flag = TTY_PARITY;
+ else if (rxs & RXSTAT_FRAME)
+ flag = TTY_FRAME;
+ }
+
+ uart_insert_char(port, rxs, RXSTAT_OVERRUN, ch, flag);
+
+ status = *CSR_UARTFLG;
+ }
+ tty_flip_buffer_push(tty);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t serial21285_tx_chars(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct circ_buf *xmit = &port->state->xmit;
+ int count = 256;
+
+ if (port->x_char) {
+ *CSR_UARTDR = port->x_char;
+ port->icount.tx++;
+ port->x_char = 0;
+ goto out;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ serial21285_stop_tx(port);
+ goto out;
+ }
+
+ do {
+ *CSR_UARTDR = xmit->buf[xmit->tail];
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0 && !(*CSR_UARTFLG & 0x20));
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ serial21285_stop_tx(port);
+
+ out:
+ return IRQ_HANDLED;
+}
+
+static unsigned int serial21285_tx_empty(struct uart_port *port)
+{
+ return (*CSR_UARTFLG & 8) ? 0 : TIOCSER_TEMT;
+}
+
+/* no modem control lines */
+static unsigned int serial21285_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+}
+
+static void serial21285_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static void serial21285_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned long flags;
+ unsigned int h_lcr;
+
+ spin_lock_irqsave(&port->lock, flags);
+ h_lcr = *CSR_H_UBRLCR;
+ if (break_state)
+ h_lcr |= H_UBRLCR_BREAK;
+ else
+ h_lcr &= ~H_UBRLCR_BREAK;
+ *CSR_H_UBRLCR = h_lcr;
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int serial21285_startup(struct uart_port *port)
+{
+ int ret;
+
+ tx_enabled(port) = 1;
+ rx_enabled(port) = 1;
+
+ ret = request_irq(IRQ_CONRX, serial21285_rx_chars, 0,
+ serial21285_name, port);
+ if (ret == 0) {
+ ret = request_irq(IRQ_CONTX, serial21285_tx_chars, 0,
+ serial21285_name, port);
+ if (ret)
+ free_irq(IRQ_CONRX, port);
+ }
+
+ return ret;
+}
+
+static void serial21285_shutdown(struct uart_port *port)
+{
+ free_irq(IRQ_CONTX, port);
+ free_irq(IRQ_CONRX, port);
+}
+
+static void
+serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int baud, quot, h_lcr, b;
+
+ /*
+ * We don't support modem control lines.
+ */
+ termios->c_cflag &= ~(HUPCL | CRTSCTS | CMSPAR);
+ termios->c_cflag |= CLOCAL;
+
+ /*
+ * We don't support BREAK character recognition.
+ */
+ termios->c_iflag &= ~(IGNBRK | BRKINT);
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = uart_get_divisor(port, baud);
+ b = port->uartclk / (16 * quot);
+ tty_termios_encode_baud_rate(termios, b, b);
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ h_lcr = 0x00;
+ break;
+ case CS6:
+ h_lcr = 0x20;
+ break;
+ case CS7:
+ h_lcr = 0x40;
+ break;
+ default: /* CS8 */
+ h_lcr = 0x60;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ h_lcr |= H_UBRLCR_STOPB;
+ if (termios->c_cflag & PARENB) {
+ h_lcr |= H_UBRLCR_PARENB;
+ if (!(termios->c_cflag & PARODD))
+ h_lcr |= H_UBRLCR_PAREVN;
+ }
+
+ if (port->fifosize)
+ h_lcr |= H_UBRLCR_FIFO;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ /*
+ * Which character status flags are we interested in?
+ */
+ port->read_status_mask = RXSTAT_OVERRUN;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= RXSTAT_FRAME | RXSTAT_PARITY;
+
+ /*
+ * Which character status flags should we ignore?
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= RXSTAT_FRAME | RXSTAT_PARITY;
+ if (termios->c_iflag & IGNBRK && termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= RXSTAT_OVERRUN;
+
+ /*
+ * Ignore all characters if CREAD is not set.
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= RXSTAT_DUMMY_READ;
+
+ quot -= 1;
+
+ *CSR_UARTCON = 0;
+ *CSR_L_UBRLCR = quot & 0xff;
+ *CSR_M_UBRLCR = (quot >> 8) & 0x0f;
+ *CSR_H_UBRLCR = h_lcr;
+ *CSR_UARTCON = 1;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *serial21285_type(struct uart_port *port)
+{
+ return port->type == PORT_21285 ? "DC21285" : NULL;
+}
+
+static void serial21285_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, 32);
+}
+
+static int serial21285_request_port(struct uart_port *port)
+{
+ return request_mem_region(port->mapbase, 32, serial21285_name)
+ != NULL ? 0 : -EBUSY;
+}
+
+static void serial21285_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE && serial21285_request_port(port) == 0)
+ port->type = PORT_21285;
+}
+
+/*
+ * verify the new serial_struct (for TIOCSSERIAL).
+ */
+static int serial21285_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ int ret = 0;
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_21285)
+ ret = -EINVAL;
+ if (ser->irq != NO_IRQ)
+ ret = -EINVAL;
+ if (ser->baud_base != port->uartclk / 16)
+ ret = -EINVAL;
+ return ret;
+}
+
+static struct uart_ops serial21285_ops = {
+ .tx_empty = serial21285_tx_empty,
+ .get_mctrl = serial21285_get_mctrl,
+ .set_mctrl = serial21285_set_mctrl,
+ .stop_tx = serial21285_stop_tx,
+ .start_tx = serial21285_start_tx,
+ .stop_rx = serial21285_stop_rx,
+ .enable_ms = serial21285_enable_ms,
+ .break_ctl = serial21285_break_ctl,
+ .startup = serial21285_startup,
+ .shutdown = serial21285_shutdown,
+ .set_termios = serial21285_set_termios,
+ .type = serial21285_type,
+ .release_port = serial21285_release_port,
+ .request_port = serial21285_request_port,
+ .config_port = serial21285_config_port,
+ .verify_port = serial21285_verify_port,
+};
+
+static struct uart_port serial21285_port = {
+ .mapbase = 0x42000160,
+ .iotype = UPIO_MEM,
+ .irq = NO_IRQ,
+ .fifosize = 16,
+ .ops = &serial21285_ops,
+ .flags = UPF_BOOT_AUTOCONF,
+};
+
+static void serial21285_setup_ports(void)
+{
+ serial21285_port.uartclk = mem_fclk_21285 / 4;
+}
+
+#ifdef CONFIG_SERIAL_21285_CONSOLE
+static void serial21285_console_putchar(struct uart_port *port, int ch)
+{
+ while (*CSR_UARTFLG & 0x20)
+ barrier();
+ *CSR_UARTDR = ch;
+}
+
+static void
+serial21285_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ uart_console_write(&serial21285_port, s, count, serial21285_console_putchar);
+}
+
+static void __init
+serial21285_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits)
+{
+ if (*CSR_UARTCON == 1) {
+ unsigned int tmp;
+
+ tmp = *CSR_H_UBRLCR;
+ switch (tmp & 0x60) {
+ case 0x00:
+ *bits = 5;
+ break;
+ case 0x20:
+ *bits = 6;
+ break;
+ case 0x40:
+ *bits = 7;
+ break;
+ default:
+ case 0x60:
+ *bits = 8;
+ break;
+ }
+
+ if (tmp & H_UBRLCR_PARENB) {
+ *parity = 'o';
+ if (tmp & H_UBRLCR_PAREVN)
+ *parity = 'e';
+ }
+
+ tmp = *CSR_L_UBRLCR | (*CSR_M_UBRLCR << 8);
+
+ *baud = port->uartclk / (16 * (tmp + 1));
+ }
+}
+
+static int __init serial21285_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port = &serial21285_port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (machine_is_personal_server())
+ baud = 57600;
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ serial21285_get_options(port, &baud, &parity, &bits);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver serial21285_reg;
+
+static struct console serial21285_console =
+{
+ .name = SERIAL_21285_NAME,
+ .write = serial21285_console_write,
+ .device = uart_console_device,
+ .setup = serial21285_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &serial21285_reg,
+};
+
+static int __init rs285_console_init(void)
+{
+ serial21285_setup_ports();
+ register_console(&serial21285_console);
+ return 0;
+}
+console_initcall(rs285_console_init);
+
+#define SERIAL_21285_CONSOLE &serial21285_console
+#else
+#define SERIAL_21285_CONSOLE NULL
+#endif
+
+static struct uart_driver serial21285_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyFB",
+ .dev_name = "ttyFB",
+ .major = SERIAL_21285_MAJOR,
+ .minor = SERIAL_21285_MINOR,
+ .nr = 1,
+ .cons = SERIAL_21285_CONSOLE,
+};
+
+static int __init serial21285_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Serial: 21285 driver\n");
+
+ serial21285_setup_ports();
+
+ ret = uart_register_driver(&serial21285_reg);
+ if (ret == 0)
+ uart_add_one_port(&serial21285_reg, &serial21285_port);
+
+ return ret;
+}
+
+static void __exit serial21285_exit(void)
+{
+ uart_remove_one_port(&serial21285_reg, &serial21285_port);
+ uart_unregister_driver(&serial21285_reg);
+}
+
+module_init(serial21285_init);
+module_exit(serial21285_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel Footbridge (21285) serial driver");
+MODULE_ALIAS_CHARDEV(SERIAL_21285_MAJOR, SERIAL_21285_MINOR);
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
new file mode 100644
index 00000000..e0a77540
--- /dev/null
+++ b/drivers/tty/serial/68328serial.c
@@ -0,0 +1,1448 @@
+/* 68328serial.c: Serial port driver for 68328 microcontroller
+ *
+ * Copyright (C) 1995 David S. Miller <davem@caip.rutgers.edu>
+ * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>
+ * Copyright (C) 1998, 1999 D. Jeff Dionne <jeff@uclinux.org>
+ * Copyright (C) 1999 Vladimir Gurevich <vgurevic@cisco.com>
+ * Copyright (C) 2002-2003 David McCullough <davidm@snapgear.com>
+ * Copyright (C) 2002 Greg Ungerer <gerg@snapgear.com>
+ *
+ * VZ Support/Fixes Evan Stawnyczy <e@lineo.ca>
+ * Multiple UART support Daniel Potts <danielp@cse.unsw.edu.au>
+ * Power management support Daniel Potts <danielp@cse.unsw.edu.au>
+ * VZ Second Serial Port enable Phil Wilshire
+ * 2.4/2.5 port David McCullough
+ */
+
+#include <asm/dbg.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/reboot.h>
+#include <linux/keyboard.h>
+#include <linux/init.h>
+#include <linux/pm.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/delay.h>
+#include <asm/uaccess.h>
+
+/* (es) */
+/* note: perhaps we can murge these files, so that you can just
+ * define 1 of them, and they can sort that out for themselves
+ */
+#if defined(CONFIG_M68EZ328)
+#include <asm/MC68EZ328.h>
+#else
+#if defined(CONFIG_M68VZ328)
+#include <asm/MC68VZ328.h>
+#else
+#include <asm/MC68328.h>
+#endif /* CONFIG_M68VZ328 */
+#endif /* CONFIG_M68EZ328 */
+
+#include "68328serial.h"
+
+/* Turn off usage of real serial interrupt code, to "support" Copilot */
+#ifdef CONFIG_XCOPILOT_BUGS
+#undef USE_INTS
+#else
+#define USE_INTS
+#endif
+
+static struct m68k_serial m68k_soft[NR_PORTS];
+
+static unsigned int uart_irqs[NR_PORTS] = UART_IRQ_DEFNS;
+
+/* multiple ports are contiguous in memory */
+m68328_uart *uart_addr = (m68328_uart *)USTCNT_ADDR;
+
+struct tty_struct m68k_ttys;
+struct m68k_serial *m68k_consinfo = 0;
+
+#define M68K_CLOCK (16667000) /* FIXME: 16MHz is likely wrong */
+
+struct tty_driver *serial_driver;
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 256
+
+/* Debugging... DEBUG_INTR is bad to use when one of the zs
+ * lines is your console ;(
+ */
+#undef SERIAL_DEBUG_INTR
+#undef SERIAL_DEBUG_OPEN
+#undef SERIAL_DEBUG_FLOW
+
+#define RS_ISR_PASS_LIMIT 256
+
+static void change_speed(struct m68k_serial *info);
+
+/*
+ * Setup for console. Argument comes from the boot command line.
+ */
+
+/* note: this is messy, but it works, again, perhaps defined somewhere else?*/
+#ifdef CONFIG_M68VZ328
+#define CONSOLE_BAUD_RATE 19200
+#define DEFAULT_CBAUD B19200
+#endif
+
+
+#ifndef CONSOLE_BAUD_RATE
+#define CONSOLE_BAUD_RATE 9600
+#define DEFAULT_CBAUD B9600
+#endif
+
+
+static int m68328_console_initted = 0;
+static int m68328_console_baud = CONSOLE_BAUD_RATE;
+static int m68328_console_cbaud = DEFAULT_CBAUD;
+
+
+static inline int serial_paranoia_check(struct m68k_serial *info,
+ char *name, const char *routine)
+{
+#ifdef SERIAL_PARANOIA_CHECK
+ static const char *badmagic =
+ "Warning: bad magic number for serial struct %s in %s\n";
+ static const char *badinfo =
+ "Warning: null m68k_serial for %s in %s\n";
+
+ if (!info) {
+ printk(badinfo, name, routine);
+ return 1;
+ }
+ if (info->magic != SERIAL_MAGIC) {
+ printk(badmagic, name, routine);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/*
+ * This is used to figure out the divisor speeds and the timeouts
+ */
+static int baud_table[] = {
+ 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
+ 9600, 19200, 38400, 57600, 115200, 0 };
+
+/* Sets or clears DTR/RTS on the requested line */
+static inline void m68k_rtsdtr(struct m68k_serial *ss, int set)
+{
+ if (set) {
+ /* set the RTS/CTS line */
+ } else {
+ /* clear it */
+ }
+ return;
+}
+
+/* Utility routines */
+static inline int get_baud(struct m68k_serial *ss)
+{
+ unsigned long result = 115200;
+ unsigned short int baud = uart_addr[ss->line].ubaud;
+ if (GET_FIELD(baud, UBAUD_PRESCALER) == 0x38) result = 38400;
+ result >>= GET_FIELD(baud, UBAUD_DIVIDE);
+
+ return result;
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_stop() and rs_start()
+ *
+ * This routines are called before setting or resetting tty->stopped.
+ * They enable or disable transmitter interrupts, as necessary.
+ * ------------------------------------------------------------
+ */
+static void rs_stop(struct tty_struct *tty)
+{
+ struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
+ m68328_uart *uart = &uart_addr[info->line];
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_stop"))
+ return;
+
+ local_irq_save(flags);
+ uart->ustcnt &= ~USTCNT_TXEN;
+ local_irq_restore(flags);
+}
+
+static int rs_put_char(char ch)
+{
+ int flags, loops = 0;
+
+ local_irq_save(flags);
+
+ while (!(UTX & UTX_TX_AVAIL) && (loops < 1000)) {
+ loops++;
+ udelay(5);
+ }
+
+ UTX_TXDATA = ch;
+ udelay(5);
+ local_irq_restore(flags);
+ return 1;
+}
+
+static void rs_start(struct tty_struct *tty)
+{
+ struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
+ m68328_uart *uart = &uart_addr[info->line];
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_start"))
+ return;
+
+ local_irq_save(flags);
+ if (info->xmit_cnt && info->xmit_buf && !(uart->ustcnt & USTCNT_TXEN)) {
+#ifdef USE_INTS
+ uart->ustcnt |= USTCNT_TXEN | USTCNT_TX_INTR_MASK;
+#else
+ uart->ustcnt |= USTCNT_TXEN;
+#endif
+ }
+ local_irq_restore(flags);
+}
+
+/* Drop into either the boot monitor or kadb upon receiving a break
+ * from keyboard/console input.
+ */
+static void batten_down_hatches(void)
+{
+ /* Drop into the debugger */
+}
+
+static void status_handle(struct m68k_serial *info, unsigned short status)
+{
+#if 0
+ if(status & DCD) {
+ if((info->port.tty->termios->c_cflag & CRTSCTS) &&
+ ((info->curregs[3] & AUTO_ENAB)==0)) {
+ info->curregs[3] |= AUTO_ENAB;
+ info->pendregs[3] |= AUTO_ENAB;
+ write_zsreg(info->m68k_channel, 3, info->curregs[3]);
+ }
+ } else {
+ if((info->curregs[3] & AUTO_ENAB)) {
+ info->curregs[3] &= ~AUTO_ENAB;
+ info->pendregs[3] &= ~AUTO_ENAB;
+ write_zsreg(info->m68k_channel, 3, info->curregs[3]);
+ }
+ }
+#endif
+ /* If this is console input and this is a
+ * 'break asserted' status change interrupt
+ * see if we can drop into the debugger
+ */
+ if((status & URX_BREAK) && info->break_abort)
+ batten_down_hatches();
+ return;
+}
+
+static void receive_chars(struct m68k_serial *info, unsigned short rx)
+{
+ struct tty_struct *tty = info->tty;
+ m68328_uart *uart = &uart_addr[info->line];
+ unsigned char ch, flag;
+
+ /*
+ * This do { } while() loop will get ALL chars out of Rx FIFO
+ */
+#ifndef CONFIG_XCOPILOT_BUGS
+ do {
+#endif
+ ch = GET_FIELD(rx, URX_RXDATA);
+
+ if(info->is_cons) {
+ if(URX_BREAK & rx) { /* whee, break received */
+ status_handle(info, rx);
+ return;
+#ifdef CONFIG_MAGIC_SYSRQ
+ } else if (ch == 0x10) { /* ^P */
+ show_state();
+ show_free_areas(0);
+ show_buffers();
+/* show_net_buffers(); */
+ return;
+ } else if (ch == 0x12) { /* ^R */
+ emergency_restart();
+ return;
+#endif /* CONFIG_MAGIC_SYSRQ */
+ }
+ }
+
+ if(!tty)
+ goto clear_and_exit;
+
+ flag = TTY_NORMAL;
+
+ if(rx & URX_PARITY_ERROR) {
+ flag = TTY_PARITY;
+ status_handle(info, rx);
+ } else if(rx & URX_OVRUN) {
+ flag = TTY_OVERRUN;
+ status_handle(info, rx);
+ } else if(rx & URX_FRAME_ERROR) {
+ flag = TTY_FRAME;
+ status_handle(info, rx);
+ }
+ tty_insert_flip_char(tty, ch, flag);
+#ifndef CONFIG_XCOPILOT_BUGS
+ } while((rx = uart->urx.w) & URX_DATA_READY);
+#endif
+
+ tty_schedule_flip(tty);
+
+clear_and_exit:
+ return;
+}
+
+static void transmit_chars(struct m68k_serial *info)
+{
+ m68328_uart *uart = &uart_addr[info->line];
+
+ if (info->x_char) {
+ /* Send next char */
+ uart->utx.b.txdata = info->x_char;
+ info->x_char = 0;
+ goto clear_and_return;
+ }
+
+ if((info->xmit_cnt <= 0) || info->tty->stopped) {
+ /* That's peculiar... TX ints off */
+ uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
+ goto clear_and_return;
+ }
+
+ /* Send char */
+ uart->utx.b.txdata = info->xmit_buf[info->xmit_tail++];
+ info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
+ info->xmit_cnt--;
+
+ if (info->xmit_cnt < WAKEUP_CHARS)
+ schedule_work(&info->tqueue);
+
+ if(info->xmit_cnt <= 0) {
+ /* All done for now... TX ints off */
+ uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
+ goto clear_and_return;
+ }
+
+clear_and_return:
+ /* Clear interrupt (should be auto)*/
+ return;
+}
+
+/*
+ * This is the serial driver's generic interrupt routine
+ */
+irqreturn_t rs_interrupt(int irq, void *dev_id)
+{
+ struct m68k_serial *info = dev_id;
+ m68328_uart *uart;
+ unsigned short rx;
+ unsigned short tx;
+
+ uart = &uart_addr[info->line];
+ rx = uart->urx.w;
+
+#ifdef USE_INTS
+ tx = uart->utx.w;
+
+ if (rx & URX_DATA_READY) receive_chars(info, rx);
+ if (tx & UTX_TX_AVAIL) transmit_chars(info);
+#else
+ receive_chars(info, rx);
+#endif
+ return IRQ_HANDLED;
+}
+
+static void do_softint(struct work_struct *work)
+{
+ struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue);
+ struct tty_struct *tty;
+
+ tty = info->tty;
+ if (!tty)
+ return;
+#if 0
+ if (clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event)) {
+ tty_wakeup(tty);
+ }
+#endif
+}
+
+static int startup(struct m68k_serial * info)
+{
+ m68328_uart *uart = &uart_addr[info->line];
+ unsigned long flags;
+
+ if (info->flags & S_INITIALIZED)
+ return 0;
+
+ if (!info->xmit_buf) {
+ info->xmit_buf = (unsigned char *) __get_free_page(GFP_KERNEL);
+ if (!info->xmit_buf)
+ return -ENOMEM;
+ }
+
+ local_irq_save(flags);
+
+ /*
+ * Clear the FIFO buffers and disable them
+ * (they will be reenabled in change_speed())
+ */
+
+ uart->ustcnt = USTCNT_UEN;
+ info->xmit_fifo_size = 1;
+ uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_TXEN;
+ (void)uart->urx.w;
+
+ /*
+ * Finally, enable sequencing and interrupts
+ */
+#ifdef USE_INTS
+ uart->ustcnt = USTCNT_UEN | USTCNT_RXEN |
+ USTCNT_RX_INTR_MASK | USTCNT_TX_INTR_MASK;
+#else
+ uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK;
+#endif
+
+ if (info->tty)
+ clear_bit(TTY_IO_ERROR, &info->tty->flags);
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+
+ /*
+ * and set the speed of the serial port
+ */
+
+ change_speed(info);
+
+ info->flags |= S_INITIALIZED;
+ local_irq_restore(flags);
+ return 0;
+}
+
+/*
+ * This routine will shutdown a serial port; interrupts are disabled, and
+ * DTR is dropped if the hangup on close termio flag is on.
+ */
+static void shutdown(struct m68k_serial * info)
+{
+ m68328_uart *uart = &uart_addr[info->line];
+ unsigned long flags;
+
+ uart->ustcnt = 0; /* All off! */
+ if (!(info->flags & S_INITIALIZED))
+ return;
+
+ local_irq_save(flags);
+
+ if (info->xmit_buf) {
+ free_page((unsigned long) info->xmit_buf);
+ info->xmit_buf = 0;
+ }
+
+ if (info->tty)
+ set_bit(TTY_IO_ERROR, &info->tty->flags);
+
+ info->flags &= ~S_INITIALIZED;
+ local_irq_restore(flags);
+}
+
+struct {
+ int divisor, prescale;
+}
+#ifndef CONFIG_M68VZ328
+ hw_baud_table[18] = {
+ {0,0}, /* 0 */
+ {0,0}, /* 50 */
+ {0,0}, /* 75 */
+ {0,0}, /* 110 */
+ {0,0}, /* 134 */
+ {0,0}, /* 150 */
+ {0,0}, /* 200 */
+ {7,0x26}, /* 300 */
+ {6,0x26}, /* 600 */
+ {5,0x26}, /* 1200 */
+ {0,0}, /* 1800 */
+ {4,0x26}, /* 2400 */
+ {3,0x26}, /* 4800 */
+ {2,0x26}, /* 9600 */
+ {1,0x26}, /* 19200 */
+ {0,0x26}, /* 38400 */
+ {1,0x38}, /* 57600 */
+ {0,0x38}, /* 115200 */
+};
+#else
+ hw_baud_table[18] = {
+ {0,0}, /* 0 */
+ {0,0}, /* 50 */
+ {0,0}, /* 75 */
+ {0,0}, /* 110 */
+ {0,0}, /* 134 */
+ {0,0}, /* 150 */
+ {0,0}, /* 200 */
+ {0,0}, /* 300 */
+ {7,0x26}, /* 600 */
+ {6,0x26}, /* 1200 */
+ {0,0}, /* 1800 */
+ {5,0x26}, /* 2400 */
+ {4,0x26}, /* 4800 */
+ {3,0x26}, /* 9600 */
+ {2,0x26}, /* 19200 */
+ {1,0x26}, /* 38400 */
+ {0,0x26}, /* 57600 */
+ {1,0x38}, /* 115200 */
+};
+#endif
+/* rate = 1036800 / ((65 - prescale) * (1<<divider)) */
+
+/*
+ * This routine is called to set the UART divisor registers to match
+ * the specified baud rate for a serial port.
+ */
+static void change_speed(struct m68k_serial *info)
+{
+ m68328_uart *uart = &uart_addr[info->line];
+ unsigned short port;
+ unsigned short ustcnt;
+ unsigned cflag;
+ int i;
+
+ if (!info->tty || !info->tty->termios)
+ return;
+ cflag = info->tty->termios->c_cflag;
+ if (!(port = info->port))
+ return;
+
+ ustcnt = uart->ustcnt;
+ uart->ustcnt = ustcnt & ~USTCNT_TXEN;
+
+ i = cflag & CBAUD;
+ if (i & CBAUDEX) {
+ i = (i & ~CBAUDEX) + B38400;
+ }
+
+ info->baud = baud_table[i];
+ uart->ubaud = PUT_FIELD(UBAUD_DIVIDE, hw_baud_table[i].divisor) |
+ PUT_FIELD(UBAUD_PRESCALER, hw_baud_table[i].prescale);
+
+ ustcnt &= ~(USTCNT_PARITYEN | USTCNT_ODD_EVEN | USTCNT_STOP | USTCNT_8_7);
+
+ if ((cflag & CSIZE) == CS8)
+ ustcnt |= USTCNT_8_7;
+
+ if (cflag & CSTOPB)
+ ustcnt |= USTCNT_STOP;
+
+ if (cflag & PARENB)
+ ustcnt |= USTCNT_PARITYEN;
+ if (cflag & PARODD)
+ ustcnt |= USTCNT_ODD_EVEN;
+
+#ifdef CONFIG_SERIAL_68328_RTS_CTS
+ if (cflag & CRTSCTS) {
+ uart->utx.w &= ~ UTX_NOCTS;
+ } else {
+ uart->utx.w |= UTX_NOCTS;
+ }
+#endif
+
+ ustcnt |= USTCNT_TXEN;
+
+ uart->ustcnt = ustcnt;
+ return;
+}
+
+/*
+ * Fair output driver allows a process to speak.
+ */
+static void rs_fair_output(void)
+{
+ int left; /* Output no more than that */
+ unsigned long flags;
+ struct m68k_serial *info = &m68k_soft[0];
+ char c;
+
+ if (info == 0) return;
+ if (info->xmit_buf == 0) return;
+
+ local_irq_save(flags);
+ left = info->xmit_cnt;
+ while (left != 0) {
+ c = info->xmit_buf[info->xmit_tail];
+ info->xmit_tail = (info->xmit_tail+1) & (SERIAL_XMIT_SIZE-1);
+ info->xmit_cnt--;
+ local_irq_restore(flags);
+
+ rs_put_char(c);
+
+ local_irq_save(flags);
+ left = min(info->xmit_cnt, left-1);
+ }
+
+ /* Last character is being transmitted now (hopefully). */
+ udelay(5);
+
+ local_irq_restore(flags);
+ return;
+}
+
+/*
+ * m68k_console_print is registered for printk.
+ */
+void console_print_68328(const char *p)
+{
+ char c;
+
+ while((c=*(p++)) != 0) {
+ if(c == '\n')
+ rs_put_char('\r');
+ rs_put_char(c);
+ }
+
+ /* Comment this if you want to have a strict interrupt-driven output */
+ rs_fair_output();
+
+ return;
+}
+
+static void rs_set_ldisc(struct tty_struct *tty)
+{
+ struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_set_ldisc"))
+ return;
+
+ info->is_cons = (tty->termios->c_line == N_TTY);
+
+ printk("ttyS%d console mode %s\n", info->line, info->is_cons ? "on" : "off");
+}
+
+static void rs_flush_chars(struct tty_struct *tty)
+{
+ struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
+ m68328_uart *uart = &uart_addr[info->line];
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
+ return;
+#ifndef USE_INTS
+ for(;;) {
+#endif
+
+ /* Enable transmitter */
+ local_irq_save(flags);
+
+ if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
+ !info->xmit_buf) {
+ local_irq_restore(flags);
+ return;
+ }
+
+#ifdef USE_INTS
+ uart->ustcnt |= USTCNT_TXEN | USTCNT_TX_INTR_MASK;
+#else
+ uart->ustcnt |= USTCNT_TXEN;
+#endif
+
+#ifdef USE_INTS
+ if (uart->utx.w & UTX_TX_AVAIL) {
+#else
+ if (1) {
+#endif
+ /* Send char */
+ uart->utx.b.txdata = info->xmit_buf[info->xmit_tail++];
+ info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
+ info->xmit_cnt--;
+ }
+
+#ifndef USE_INTS
+ while (!(uart->utx.w & UTX_TX_AVAIL)) udelay(5);
+ }
+#endif
+ local_irq_restore(flags);
+}
+
+extern void console_printn(const char * b, int count);
+
+static int rs_write(struct tty_struct * tty,
+ const unsigned char *buf, int count)
+{
+ int c, total = 0;
+ struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
+ m68328_uart *uart = &uart_addr[info->line];
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_write"))
+ return 0;
+
+ if (!tty || !info->xmit_buf)
+ return 0;
+
+ local_save_flags(flags);
+ while (1) {
+ local_irq_disable();
+ c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - info->xmit_head));
+ local_irq_restore(flags);
+
+ if (c <= 0)
+ break;
+
+ memcpy(info->xmit_buf + info->xmit_head, buf, c);
+
+ local_irq_disable();
+ info->xmit_head = (info->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
+ info->xmit_cnt += c;
+ local_irq_restore(flags);
+ buf += c;
+ count -= c;
+ total += c;
+ }
+
+ if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
+ /* Enable transmitter */
+ local_irq_disable();
+#ifndef USE_INTS
+ while(info->xmit_cnt) {
+#endif
+
+ uart->ustcnt |= USTCNT_TXEN;
+#ifdef USE_INTS
+ uart->ustcnt |= USTCNT_TX_INTR_MASK;
+#else
+ while (!(uart->utx.w & UTX_TX_AVAIL)) udelay(5);
+#endif
+ if (uart->utx.w & UTX_TX_AVAIL) {
+ uart->utx.b.txdata = info->xmit_buf[info->xmit_tail++];
+ info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
+ info->xmit_cnt--;
+ }
+
+#ifndef USE_INTS
+ }
+#endif
+ local_irq_restore(flags);
+ }
+
+ return total;
+}
+
+static int rs_write_room(struct tty_struct *tty)
+{
+ struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
+ int ret;
+
+ if (serial_paranoia_check(info, tty->name, "rs_write_room"))
+ return 0;
+ ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
+ if (ret < 0)
+ ret = 0;
+ return ret;
+}
+
+static int rs_chars_in_buffer(struct tty_struct *tty)
+{
+ struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
+ return 0;
+ return info->xmit_cnt;
+}
+
+static void rs_flush_buffer(struct tty_struct *tty)
+{
+ struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
+ unsigned long flags;
+
+ if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
+ return;
+ local_irq_save(flags);
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+ local_irq_restore(flags);
+ tty_wakeup(tty);
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_throttle()
+ *
+ * This routine is called by the upper-layer tty layer to signal that
+ * incoming characters should be throttled.
+ * ------------------------------------------------------------
+ */
+static void rs_throttle(struct tty_struct * tty)
+{
+ struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_throttle"))
+ return;
+
+ if (I_IXOFF(tty))
+ info->x_char = STOP_CHAR(tty);
+
+ /* Turn off RTS line (do this atomic) */
+}
+
+static void rs_unthrottle(struct tty_struct * tty)
+{
+ struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
+ return;
+
+ if (I_IXOFF(tty)) {
+ if (info->x_char)
+ info->x_char = 0;
+ else
+ info->x_char = START_CHAR(tty);
+ }
+
+ /* Assert RTS line (do this atomic) */
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_ioctl() and friends
+ * ------------------------------------------------------------
+ */
+
+static int get_serial_info(struct m68k_serial * info,
+ struct serial_struct * retinfo)
+{
+ struct serial_struct tmp;
+
+ if (!retinfo)
+ return -EFAULT;
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.type = info->type;
+ tmp.line = info->line;
+ tmp.port = info->port;
+ tmp.irq = info->irq;
+ tmp.flags = info->flags;
+ tmp.baud_base = info->baud_base;
+ tmp.close_delay = info->close_delay;
+ tmp.closing_wait = info->closing_wait;
+ tmp.custom_divisor = info->custom_divisor;
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_serial_info(struct m68k_serial * info,
+ struct serial_struct * new_info)
+{
+ struct serial_struct new_serial;
+ struct m68k_serial old_info;
+ int retval = 0;
+
+ if (!new_info)
+ return -EFAULT;
+ if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
+ return -EFAULT;
+ old_info = *info;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ if ((new_serial.baud_base != info->baud_base) ||
+ (new_serial.type != info->type) ||
+ (new_serial.close_delay != info->close_delay) ||
+ ((new_serial.flags & ~S_USR_MASK) !=
+ (info->flags & ~S_USR_MASK)))
+ return -EPERM;
+ info->flags = ((info->flags & ~S_USR_MASK) |
+ (new_serial.flags & S_USR_MASK));
+ info->custom_divisor = new_serial.custom_divisor;
+ goto check_and_exit;
+ }
+
+ if (info->count > 1)
+ return -EBUSY;
+
+ /*
+ * OK, past this point, all the error checking has been done.
+ * At this point, we start making changes.....
+ */
+
+ info->baud_base = new_serial.baud_base;
+ info->flags = ((info->flags & ~S_FLAGS) |
+ (new_serial.flags & S_FLAGS));
+ info->type = new_serial.type;
+ info->close_delay = new_serial.close_delay;
+ info->closing_wait = new_serial.closing_wait;
+
+check_and_exit:
+ retval = startup(info);
+ return retval;
+}
+
+/*
+ * get_lsr_info - get line status register info
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * is emptied. On bus types like RS485, the transmitter must
+ * release the bus after transmitting. This must be done when
+ * the transmit shift register is empty, not be done when the
+ * transmit holding register is empty. This functionality
+ * allows an RS485 driver to be written in user space.
+ */
+static int get_lsr_info(struct m68k_serial * info, unsigned int *value)
+{
+#ifdef CONFIG_SERIAL_68328_RTS_CTS
+ m68328_uart *uart = &uart_addr[info->line];
+#endif
+ unsigned char status;
+ unsigned long flags;
+
+ local_irq_save(flags);
+#ifdef CONFIG_SERIAL_68328_RTS_CTS
+ status = (uart->utx.w & UTX_CTS_STAT) ? 1 : 0;
+#else
+ status = 0;
+#endif
+ local_irq_restore(flags);
+ return put_user(status, value);
+}
+
+/*
+ * This routine sends a break character out the serial port.
+ */
+static void send_break(struct m68k_serial * info, unsigned int duration)
+{
+ m68328_uart *uart = &uart_addr[info->line];
+ unsigned long flags;
+ if (!info->port)
+ return;
+ local_irq_save(flags);
+#ifdef USE_INTS
+ uart->utx.w |= UTX_SEND_BREAK;
+ msleep_interruptible(duration);
+ uart->utx.w &= ~UTX_SEND_BREAK;
+#endif
+ local_irq_restore(flags);
+}
+
+static int rs_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct m68k_serial * info = (struct m68k_serial *)tty->driver_data;
+ int retval;
+
+ if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
+ return -ENODEV;
+
+ if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+ (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD) &&
+ (cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT)) {
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case TCSBRK: /* SVID version: non-zero arg --> no break */
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ tty_wait_until_sent(tty, 0);
+ if (!arg)
+ send_break(info, 250); /* 1/4 second */
+ return 0;
+ case TCSBRKP: /* support for POSIX tcsendbreak() */
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ tty_wait_until_sent(tty, 0);
+ send_break(info, arg ? arg*(100) : 250);
+ return 0;
+ case TIOCGSERIAL:
+ return get_serial_info(info,
+ (struct serial_struct *) arg);
+ case TIOCSSERIAL:
+ return set_serial_info(info,
+ (struct serial_struct *) arg);
+ case TIOCSERGETLSR: /* Get line status register */
+ return get_lsr_info(info, (unsigned int *) arg);
+ case TIOCSERGSTRUCT:
+ if (copy_to_user((struct m68k_serial *) arg,
+ info, sizeof(struct m68k_serial)))
+ return -EFAULT;
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
+
+ change_speed(info);
+
+ if ((old_termios->c_cflag & CRTSCTS) &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ rs_start(tty);
+ }
+
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_close()
+ *
+ * This routine is called when the serial port gets closed. First, we
+ * wait for the last remaining data to be sent. Then, we unlink its
+ * S structure from the interrupt chain if necessary, and we free
+ * that IRQ if nothing is left in the chain.
+ * ------------------------------------------------------------
+ */
+static void rs_close(struct tty_struct *tty, struct file * filp)
+{
+ struct m68k_serial * info = (struct m68k_serial *)tty->driver_data;
+ m68328_uart *uart = &uart_addr[info->line];
+ unsigned long flags;
+
+ if (!info || serial_paranoia_check(info, tty->name, "rs_close"))
+ return;
+
+ local_irq_save(flags);
+
+ if (tty_hung_up_p(filp)) {
+ local_irq_restore(flags);
+ return;
+ }
+
+ if ((tty->count == 1) && (info->count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. Info->count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ printk("rs_close: bad serial port count; tty->count is 1, "
+ "info->count is %d\n", info->count);
+ info->count = 1;
+ }
+ if (--info->count < 0) {
+ printk("rs_close: bad serial port count for ttyS%d: %d\n",
+ info->line, info->count);
+ info->count = 0;
+ }
+ if (info->count) {
+ local_irq_restore(flags);
+ return;
+ }
+ info->flags |= S_CLOSING;
+ /*
+ * Now we wait for the transmit buffer to clear; and we notify
+ * the line discipline to only process XON/XOFF characters.
+ */
+ tty->closing = 1;
+ if (info->closing_wait != S_CLOSING_WAIT_NONE)
+ tty_wait_until_sent(tty, info->closing_wait);
+ /*
+ * At this point we stop accepting input. To do this, we
+ * disable the receive line status interrupts, and tell the
+ * interrupt driver to stop checking the data ready bit in the
+ * line status register.
+ */
+
+ uart->ustcnt &= ~USTCNT_RXEN;
+ uart->ustcnt &= ~(USTCNT_RXEN | USTCNT_RX_INTR_MASK);
+
+ shutdown(info);
+ rs_flush_buffer(tty);
+
+ tty_ldisc_flush(tty);
+ tty->closing = 0;
+ info->event = 0;
+ info->tty = NULL;
+#warning "This is not and has never been valid so fix it"
+#if 0
+ if (tty->ldisc.num != ldiscs[N_TTY].num) {
+ if (tty->ldisc.close)
+ (tty->ldisc.close)(tty);
+ tty->ldisc = ldiscs[N_TTY];
+ tty->termios->c_line = N_TTY;
+ if (tty->ldisc.open)
+ (tty->ldisc.open)(tty);
+ }
+#endif
+ if (info->blocked_open) {
+ if (info->close_delay) {
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
+ }
+ wake_up_interruptible(&info->open_wait);
+ }
+ info->flags &= ~(S_NORMAL_ACTIVE|S_CLOSING);
+ wake_up_interruptible(&info->close_wait);
+ local_irq_restore(flags);
+}
+
+/*
+ * rs_hangup() --- called by tty_hangup() when a hangup is signaled.
+ */
+void rs_hangup(struct tty_struct *tty)
+{
+ struct m68k_serial * info = (struct m68k_serial *)tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_hangup"))
+ return;
+
+ rs_flush_buffer(tty);
+ shutdown(info);
+ info->event = 0;
+ info->count = 0;
+ info->flags &= ~S_NORMAL_ACTIVE;
+ info->tty = NULL;
+ wake_up_interruptible(&info->open_wait);
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_open() and friends
+ * ------------------------------------------------------------
+ */
+static int block_til_ready(struct tty_struct *tty, struct file * filp,
+ struct m68k_serial *info)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int retval;
+ int do_clocal = 0;
+
+ /*
+ * If the device is in the middle of being closed, then block
+ * until it's done, and then try again.
+ */
+ if (info->flags & S_CLOSING) {
+ interruptible_sleep_on(&info->close_wait);
+#ifdef SERIAL_DO_RESTART
+ if (info->flags & S_HUP_NOTIFY)
+ return -EAGAIN;
+ else
+ return -ERESTARTSYS;
+#else
+ return -EAGAIN;
+#endif
+ }
+
+ /*
+ * If non-blocking mode is set, or the port is not enabled,
+ * then make the check up front and then exit.
+ */
+ if ((filp->f_flags & O_NONBLOCK) ||
+ (tty->flags & (1 << TTY_IO_ERROR))) {
+ info->flags |= S_NORMAL_ACTIVE;
+ return 0;
+ }
+
+ if (tty->termios->c_cflag & CLOCAL)
+ do_clocal = 1;
+
+ /*
+ * Block waiting for the carrier detect and the line to become
+ * free (i.e., not in use by the callout). While we are in
+ * this loop, info->count is dropped by one, so that
+ * rs_close() knows when to free things. We restore it upon
+ * exit, either normal or abnormal.
+ */
+ retval = 0;
+ add_wait_queue(&info->open_wait, &wait);
+
+ info->count--;
+ info->blocked_open++;
+ while (1) {
+ local_irq_disable();
+ m68k_rtsdtr(info, 1);
+ local_irq_enable();
+ current->state = TASK_INTERRUPTIBLE;
+ if (tty_hung_up_p(filp) ||
+ !(info->flags & S_INITIALIZED)) {
+#ifdef SERIAL_DO_RESTART
+ if (info->flags & S_HUP_NOTIFY)
+ retval = -EAGAIN;
+ else
+ retval = -ERESTARTSYS;
+#else
+ retval = -EAGAIN;
+#endif
+ break;
+ }
+ if (!(info->flags & S_CLOSING) && do_clocal)
+ break;
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+ tty_unlock();
+ schedule();
+ tty_lock();
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&info->open_wait, &wait);
+ if (!tty_hung_up_p(filp))
+ info->count++;
+ info->blocked_open--;
+
+ if (retval)
+ return retval;
+ info->flags |= S_NORMAL_ACTIVE;
+ return 0;
+}
+
+/*
+ * This routine is called whenever a serial port is opened. It
+ * enables interrupts for a serial port, linking in its S structure into
+ * the IRQ chain. It also performs the serial-specific
+ * initialization for the tty structure.
+ */
+int rs_open(struct tty_struct *tty, struct file * filp)
+{
+ struct m68k_serial *info;
+ int retval, line;
+
+ line = tty->index;
+
+ if (line >= NR_PORTS || line < 0) /* we have exactly one */
+ return -ENODEV;
+
+ info = &m68k_soft[line];
+
+ if (serial_paranoia_check(info, tty->name, "rs_open"))
+ return -ENODEV;
+
+ info->count++;
+ tty->driver_data = info;
+ info->tty = tty;
+
+ /*
+ * Start up serial port
+ */
+ retval = startup(info);
+ if (retval)
+ return retval;
+
+ return block_til_ready(tty, filp, info);
+}
+
+/* Finally, routines used to initialize the serial driver. */
+
+static void show_serial_version(void)
+{
+ printk("MC68328 serial driver version 1.00\n");
+}
+
+static const struct tty_operations rs_ops = {
+ .open = rs_open,
+ .close = rs_close,
+ .write = rs_write,
+ .flush_chars = rs_flush_chars,
+ .write_room = rs_write_room,
+ .chars_in_buffer = rs_chars_in_buffer,
+ .flush_buffer = rs_flush_buffer,
+ .ioctl = rs_ioctl,
+ .throttle = rs_throttle,
+ .unthrottle = rs_unthrottle,
+ .set_termios = rs_set_termios,
+ .stop = rs_stop,
+ .start = rs_start,
+ .hangup = rs_hangup,
+ .set_ldisc = rs_set_ldisc,
+};
+
+/* rs_init inits the driver */
+static int __init
+rs68328_init(void)
+{
+ int flags, i;
+ struct m68k_serial *info;
+
+ serial_driver = alloc_tty_driver(NR_PORTS);
+ if (!serial_driver)
+ return -ENOMEM;
+
+ show_serial_version();
+
+ /* Initialize the tty_driver structure */
+ /* SPARC: Not all of this is exactly right for us. */
+
+ serial_driver->name = "ttyS";
+ serial_driver->major = TTY_MAJOR;
+ serial_driver->minor_start = 64;
+ serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ serial_driver->init_termios = tty_std_termios;
+ serial_driver->init_termios.c_cflag =
+ m68328_console_cbaud | CS8 | CREAD | HUPCL | CLOCAL;
+ serial_driver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(serial_driver, &rs_ops);
+
+ if (tty_register_driver(serial_driver)) {
+ put_tty_driver(serial_driver);
+ printk(KERN_ERR "Couldn't register serial driver\n");
+ return -ENOMEM;
+ }
+
+ local_irq_save(flags);
+
+ for(i=0;i<NR_PORTS;i++) {
+
+ info = &m68k_soft[i];
+ info->magic = SERIAL_MAGIC;
+ info->port = (int) &uart_addr[i];
+ info->tty = NULL;
+ info->irq = uart_irqs[i];
+ info->custom_divisor = 16;
+ info->close_delay = 50;
+ info->closing_wait = 3000;
+ info->x_char = 0;
+ info->event = 0;
+ info->count = 0;
+ info->blocked_open = 0;
+ INIT_WORK(&info->tqueue, do_softint);
+ init_waitqueue_head(&info->open_wait);
+ init_waitqueue_head(&info->close_wait);
+ info->line = i;
+ info->is_cons = 1; /* Means shortcuts work */
+
+ printk("%s%d at 0x%08x (irq = %d)", serial_driver->name, info->line,
+ info->port, info->irq);
+ printk(" is a builtin MC68328 UART\n");
+
+#ifdef CONFIG_M68VZ328
+ if (i > 0 )
+ PJSEL &= 0xCF; /* PSW enable second port output */
+#endif
+
+ if (request_irq(uart_irqs[i],
+ rs_interrupt,
+ IRQF_DISABLED,
+ "M68328_UART", info))
+ panic("Unable to attach 68328 serial interrupt\n");
+ }
+ local_irq_restore(flags);
+ return 0;
+}
+
+module_init(rs68328_init);
+
+
+
+static void m68328_set_baud(void)
+{
+ unsigned short ustcnt;
+ int i;
+
+ ustcnt = USTCNT;
+ USTCNT = ustcnt & ~USTCNT_TXEN;
+
+again:
+ for (i = 0; i < ARRAY_SIZE(baud_table); i++)
+ if (baud_table[i] == m68328_console_baud)
+ break;
+ if (i >= ARRAY_SIZE(baud_table)) {
+ m68328_console_baud = 9600;
+ goto again;
+ }
+
+ UBAUD = PUT_FIELD(UBAUD_DIVIDE, hw_baud_table[i].divisor) |
+ PUT_FIELD(UBAUD_PRESCALER, hw_baud_table[i].prescale);
+ ustcnt &= ~(USTCNT_PARITYEN | USTCNT_ODD_EVEN | USTCNT_STOP | USTCNT_8_7);
+ ustcnt |= USTCNT_8_7;
+ ustcnt |= USTCNT_TXEN;
+ USTCNT = ustcnt;
+ m68328_console_initted = 1;
+ return;
+}
+
+
+int m68328_console_setup(struct console *cp, char *arg)
+{
+ int i, n = CONSOLE_BAUD_RATE;
+
+ if (!cp)
+ return(-1);
+
+ if (arg)
+ n = simple_strtoul(arg,NULL,0);
+
+ for (i = 0; i < ARRAY_SIZE(baud_table); i++)
+ if (baud_table[i] == n)
+ break;
+ if (i < ARRAY_SIZE(baud_table)) {
+ m68328_console_baud = n;
+ m68328_console_cbaud = 0;
+ if (i > 15) {
+ m68328_console_cbaud |= CBAUDEX;
+ i -= 15;
+ }
+ m68328_console_cbaud |= i;
+ }
+
+ m68328_set_baud(); /* make sure baud rate changes */
+ return(0);
+}
+
+
+static struct tty_driver *m68328_console_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return serial_driver;
+}
+
+
+void m68328_console_write (struct console *co, const char *str,
+ unsigned int count)
+{
+ if (!m68328_console_initted)
+ m68328_set_baud();
+ while (count--) {
+ if (*str == '\n')
+ rs_put_char('\r');
+ rs_put_char( *str++ );
+ }
+}
+
+
+static struct console m68328_driver = {
+ .name = "ttyS",
+ .write = m68328_console_write,
+ .device = m68328_console_device,
+ .setup = m68328_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+
+static int __init m68328_console_init(void)
+{
+ register_console(&m68328_driver);
+ return 0;
+}
+
+console_initcall(m68328_console_init);
diff --git a/drivers/tty/serial/68328serial.h b/drivers/tty/serial/68328serial.h
new file mode 100644
index 00000000..8c9c3c07
--- /dev/null
+++ b/drivers/tty/serial/68328serial.h
@@ -0,0 +1,187 @@
+/* 68328serial.h: Definitions for the mc68328 serial driver.
+ *
+ * Copyright (C) 1995 David S. Miller <davem@caip.rutgers.edu>
+ * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>
+ * Copyright (C) 1998, 1999 D. Jeff Dionne <jeff@uclinux.org>
+ * Copyright (C) 1999 Vladimir Gurevich <vgurevic@cisco.com>
+ *
+ * VZ Support/Fixes Evan Stawnyczy <e@lineo.ca>
+ */
+
+#ifndef _MC683XX_SERIAL_H
+#define _MC683XX_SERIAL_H
+
+
+struct serial_struct {
+ int type;
+ int line;
+ int port;
+ int irq;
+ int flags;
+ int xmit_fifo_size;
+ int custom_divisor;
+ int baud_base;
+ unsigned short close_delay;
+ char reserved_char[2];
+ int hub6; /* FIXME: We don't have AT&T Hub6 boards! */
+ unsigned short closing_wait; /* time to wait before closing */
+ unsigned short closing_wait2; /* no longer used... */
+ int reserved[4];
+};
+
+/*
+ * For the close wait times, 0 means wait forever for serial port to
+ * flush its output. 65535 means don't wait at all.
+ */
+#define S_CLOSING_WAIT_INF 0
+#define S_CLOSING_WAIT_NONE 65535
+
+/*
+ * Definitions for S_struct (and serial_struct) flags field
+ */
+#define S_HUP_NOTIFY 0x0001 /* Notify getty on hangups and closes
+ on the callout port */
+#define S_FOURPORT 0x0002 /* Set OU1, OUT2 per AST Fourport settings */
+#define S_SAK 0x0004 /* Secure Attention Key (Orange book) */
+#define S_SPLIT_TERMIOS 0x0008 /* Separate termios for dialin/callout */
+
+#define S_SPD_MASK 0x0030
+#define S_SPD_HI 0x0010 /* Use 56000 instead of 38400 bps */
+
+#define S_SPD_VHI 0x0020 /* Use 115200 instead of 38400 bps */
+#define S_SPD_CUST 0x0030 /* Use user-specified divisor */
+
+#define S_SKIP_TEST 0x0040 /* Skip UART test during autoconfiguration */
+#define S_AUTO_IRQ 0x0080 /* Do automatic IRQ during autoconfiguration */
+#define S_SESSION_LOCKOUT 0x0100 /* Lock out cua opens based on session */
+#define S_PGRP_LOCKOUT 0x0200 /* Lock out cua opens based on pgrp */
+#define S_CALLOUT_NOHUP 0x0400 /* Don't do hangups for cua device */
+
+#define S_FLAGS 0x0FFF /* Possible legal S flags */
+#define S_USR_MASK 0x0430 /* Legal flags that non-privileged
+ * users can set or reset */
+
+/* Internal flags used only by kernel/chr_drv/serial.c */
+#define S_INITIALIZED 0x80000000 /* Serial port was initialized */
+#define S_CALLOUT_ACTIVE 0x40000000 /* Call out device is active */
+#define S_NORMAL_ACTIVE 0x20000000 /* Normal device is active */
+#define S_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */
+#define S_CLOSING 0x08000000 /* Serial port is closing */
+#define S_CTS_FLOW 0x04000000 /* Do CTS flow control */
+#define S_CHECK_CD 0x02000000 /* i.e., CLOCAL */
+
+/* Software state per channel */
+
+#ifdef __KERNEL__
+
+/*
+ * I believe this is the optimal setting that reduces the number of interrupts.
+ * At high speeds the output might become a little "bursted" (use USTCNT_TXHE
+ * if that bothers you), but in most cases it will not, since we try to
+ * transmit characters every time rs_interrupt is called. Thus, quite often
+ * you'll see that a receive interrupt occures before the transmit one.
+ * -- Vladimir Gurevich
+ */
+#define USTCNT_TX_INTR_MASK (USTCNT_TXEE)
+
+/*
+ * 68328 and 68EZ328 UARTS are a little bit different. EZ328 has special
+ * "Old data interrupt" which occures whenever the data stay in the FIFO
+ * longer than 30 bits time. This allows us to use FIFO without compromising
+ * latency. '328 does not have this feature and without the real 328-based
+ * board I would assume that RXRE is the safest setting.
+ *
+ * For EZ328 I use RXHE (Half empty) interrupt to reduce the number of
+ * interrupts. RXFE (receive queue full) causes the system to lose data
+ * at least at 115200 baud
+ *
+ * If your board is busy doing other stuff, you might consider to use
+ * RXRE (data ready intrrupt) instead.
+ *
+ * The other option is to make these INTR masks run-time configurable, so
+ * that people can dynamically adapt them according to the current usage.
+ * -- Vladimir Gurevich
+ */
+
+/* (es) */
+#if defined(CONFIG_M68EZ328) || defined(CONFIG_M68VZ328)
+#define USTCNT_RX_INTR_MASK (USTCNT_RXHE | USTCNT_ODEN)
+#elif defined(CONFIG_M68328)
+#define USTCNT_RX_INTR_MASK (USTCNT_RXRE)
+#else
+#error Please, define the Rx interrupt events for your CPU
+#endif
+/* (/es) */
+
+/*
+ * This is our internal structure for each serial port's state.
+ *
+ * Many fields are paralleled by the structure used by the serial_struct
+ * structure.
+ *
+ * For definitions of the flags field, see tty.h
+ */
+
+struct m68k_serial {
+ char soft_carrier; /* Use soft carrier on this channel */
+ char break_abort; /* Is serial console in, so process brk/abrt */
+ char is_cons; /* Is this our console. */
+
+ /* We need to know the current clock divisor
+ * to read the bps rate the chip has currently
+ * loaded.
+ */
+ unsigned char clk_divisor; /* May be 1, 16, 32, or 64 */
+ int baud;
+ int magic;
+ int baud_base;
+ int port;
+ int irq;
+ int flags; /* defined in tty.h */
+ int type; /* UART type */
+ struct tty_struct *tty;
+ int read_status_mask;
+ int ignore_status_mask;
+ int timeout;
+ int xmit_fifo_size;
+ int custom_divisor;
+ int x_char; /* xon/xoff character */
+ int close_delay;
+ unsigned short closing_wait;
+ unsigned short closing_wait2;
+ unsigned long event;
+ unsigned long last_active;
+ int line;
+ int count; /* # of fd on device */
+ int blocked_open; /* # of blocked opens */
+ unsigned char *xmit_buf;
+ int xmit_head;
+ int xmit_tail;
+ int xmit_cnt;
+ struct work_struct tqueue;
+ wait_queue_head_t open_wait;
+ wait_queue_head_t close_wait;
+};
+
+
+#define SERIAL_MAGIC 0x5301
+
+/*
+ * The size of the serial xmit buffer is 1 page, or 4096 bytes
+ */
+#define SERIAL_XMIT_SIZE 4096
+
+/*
+ * Events are used to schedule things to happen at timer-interrupt
+ * time, instead of at rs interrupt time.
+ */
+#define RS_EVENT_WRITE_WAKEUP 0
+
+/*
+ * Define the number of ports supported and their irqs.
+ */
+#define NR_PORTS 1
+#define UART_IRQ_DEFNS {UART_IRQ_NUM}
+
+#endif /* __KERNEL__ */
+#endif /* !(_MC683XX_SERIAL_H) */
diff --git a/drivers/tty/serial/68360serial.c b/drivers/tty/serial/68360serial.c
new file mode 100644
index 00000000..0a3e8787
--- /dev/null
+++ b/drivers/tty/serial/68360serial.c
@@ -0,0 +1,2979 @@
+/*
+ * UART driver for 68360 CPM SCC or SMC
+ * Copyright (c) 2000 D. Jeff Dionne <jeff@uclinux.org>,
+ * Copyright (c) 2000 Michael Leslie <mleslie@lineo.ca>
+ * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
+ *
+ * I used the serial.c driver as the framework for this driver.
+ * Give credit to those guys.
+ * The original code was written for the MBX860 board. I tried to make
+ * it generic, but there may be some assumptions in the structures that
+ * have to be fixed later.
+ * To save porting time, I did not bother to change any object names
+ * that are not accessed outside of this file.
+ * It still needs lots of work........When it was easy, I included code
+ * to support the SCCs, but this has never been tested, nor is it complete.
+ * Only the SCCs support modem control, so that is not complete either.
+ *
+ * This module exports the following rs232 io functions:
+ *
+ * int rs_360_init(void);
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serialP.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <asm/irq.h>
+#include <asm/m68360.h>
+#include <asm/commproc.h>
+
+
+#ifdef CONFIG_KGDB
+extern void breakpoint(void);
+extern void set_debug_traps(void);
+extern int kgdb_output_string (const char* s, unsigned int count);
+#endif
+
+
+/* #ifdef CONFIG_SERIAL_CONSOLE */ /* This seems to be a post 2.0 thing - mles */
+#include <linux/console.h>
+#include <linux/jiffies.h>
+
+/* this defines the index into rs_table for the port to use
+ */
+#ifndef CONFIG_SERIAL_CONSOLE_PORT
+#define CONFIG_SERIAL_CONSOLE_PORT 1 /* ie SMC2 - note USE_SMC2 must be defined */
+#endif
+/* #endif */
+
+#if 0
+/* SCC2 for console
+ */
+#undef CONFIG_SERIAL_CONSOLE_PORT
+#define CONFIG_SERIAL_CONSOLE_PORT 2
+#endif
+
+
+#define TX_WAKEUP ASYNC_SHARE_IRQ
+
+static char *serial_name = "CPM UART driver";
+static char *serial_version = "0.03";
+
+static struct tty_driver *serial_driver;
+int serial_console_setup(struct console *co, char *options);
+
+/*
+ * Serial driver configuration section. Here are the various options:
+ */
+#define SERIAL_PARANOIA_CHECK
+#define CONFIG_SERIAL_NOPAUSE_IO
+#define SERIAL_DO_RESTART
+
+/* Set of debugging defines */
+
+#undef SERIAL_DEBUG_INTR
+#undef SERIAL_DEBUG_OPEN
+#undef SERIAL_DEBUG_FLOW
+#undef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+
+#define _INLINE_ inline
+
+#define DBG_CNT(s)
+
+/* We overload some of the items in the data structure to meet our
+ * needs. For example, the port address is the CPM parameter ram
+ * offset for the SCC or SMC. The maximum number of ports is 4 SCCs and
+ * 2 SMCs. The "hub6" field is used to indicate the channel number, with
+ * a flag indicating SCC or SMC, and the number is used as an index into
+ * the CPM parameter area for this device.
+ * The "type" field is currently set to 0, for PORT_UNKNOWN. It is
+ * not currently used. I should probably use it to indicate the port
+ * type of SMC or SCC.
+ * The SMCs do not support any modem control signals.
+ */
+#define smc_scc_num hub6
+#define NUM_IS_SCC ((int)0x00010000)
+#define PORT_NUM(P) ((P) & 0x0000ffff)
+
+
+#if defined (CONFIG_UCQUICC)
+
+volatile extern void *_periph_base;
+/* sipex transceiver
+ * mode bits for are on pins
+ *
+ * SCC2 d16..19
+ * SCC3 d20..23
+ * SCC4 d24..27
+ */
+#define SIPEX_MODE(n,m) ((m & 0x0f)<<(16+4*(n-1)))
+
+static uint sipex_mode_bits = 0x00000000;
+
+#endif
+
+/* There is no `serial_state' defined back here in 2.0.
+ * Try to get by with serial_struct
+ */
+/* #define serial_state serial_struct */
+
+/* 2.4 -> 2.0 portability problem: async_icount in 2.4 has a few
+ * extras: */
+
+#if 0
+struct async_icount_24 {
+ __u32 cts, dsr, rng, dcd, tx, rx;
+ __u32 frame, parity, overrun, brk;
+ __u32 buf_overrun;
+} icount;
+#endif
+
+#if 0
+
+struct serial_state {
+ int magic;
+ int baud_base;
+ unsigned long port;
+ int irq;
+ int flags;
+ int hub6;
+ int type;
+ int line;
+ int revision; /* Chip revision (950) */
+ int xmit_fifo_size;
+ int custom_divisor;
+ int count;
+ u8 *iomem_base;
+ u16 iomem_reg_shift;
+ unsigned short close_delay;
+ unsigned short closing_wait; /* time to wait before closing */
+ struct async_icount_24 icount;
+ int io_type;
+ struct async_struct *info;
+};
+#endif
+
+#define SSTATE_MAGIC 0x5302
+
+
+
+/* SMC2 is sometimes used for low performance TDM interfaces. Define
+ * this as 1 if you want SMC2 as a serial port UART managed by this driver.
+ * Define this as 0 if you wish to use SMC2 for something else.
+ */
+#define USE_SMC2 1
+
+#if 0
+/* Define SCC to ttySx mapping. */
+#define SCC_NUM_BASE (USE_SMC2 + 1) /* SCC base tty "number" */
+
+/* Define which SCC is the first one to use for a serial port. These
+ * are 0-based numbers, i.e. this assumes the first SCC (SCC1) is used
+ * for Ethernet, and the first available SCC for serial UART is SCC2.
+ * NOTE: IF YOU CHANGE THIS, you have to change the PROFF_xxx and
+ * interrupt vectors in the table below to match.
+ */
+#define SCC_IDX_BASE 1 /* table index */
+#endif
+
+
+/* Processors other than the 860 only get SMCs configured by default.
+ * Either they don't have SCCs or they are allocated somewhere else.
+ * Of course, there are now 860s without some SCCs, so we will need to
+ * address that someday.
+ * The Embedded Planet Multimedia I/O cards use TDM interfaces to the
+ * stereo codec parts, and we use SMC2 to help support that.
+ */
+static struct serial_state rs_table[] = {
+/* type line PORT IRQ FLAGS smc_scc_num (F.K.A. hub6) */
+ { 0, 0, PRSLOT_SMC1, CPMVEC_SMC1, 0, 0 } /* SMC1 ttyS0 */
+#if USE_SMC2
+ ,{ 0, 0, PRSLOT_SMC2, CPMVEC_SMC2, 0, 1 } /* SMC2 ttyS1 */
+#endif
+
+#if defined(CONFIG_SERIAL_68360_SCC)
+ ,{ 0, 0, PRSLOT_SCC2, CPMVEC_SCC2, 0, (NUM_IS_SCC | 1) } /* SCC2 ttyS2 */
+ ,{ 0, 0, PRSLOT_SCC3, CPMVEC_SCC3, 0, (NUM_IS_SCC | 2) } /* SCC3 ttyS3 */
+ ,{ 0, 0, PRSLOT_SCC4, CPMVEC_SCC4, 0, (NUM_IS_SCC | 3) } /* SCC4 ttyS4 */
+#endif
+};
+
+#define NR_PORTS (sizeof(rs_table)/sizeof(struct serial_state))
+
+/* The number of buffer descriptors and their sizes.
+ */
+#define RX_NUM_FIFO 4
+#define RX_BUF_SIZE 32
+#define TX_NUM_FIFO 4
+#define TX_BUF_SIZE 32
+
+#define CONSOLE_NUM_FIFO 2
+#define CONSOLE_BUF_SIZE 4
+
+char *console_fifos[CONSOLE_NUM_FIFO * CONSOLE_BUF_SIZE];
+
+/* The async_struct in serial.h does not really give us what we
+ * need, so define our own here.
+ */
+typedef struct serial_info {
+ int magic;
+ int flags;
+
+ struct serial_state *state;
+ /* struct serial_struct *state; */
+ /* struct async_struct *state; */
+
+ struct tty_struct *tty;
+ int read_status_mask;
+ int ignore_status_mask;
+ int timeout;
+ int line;
+ int x_char; /* xon/xoff character */
+ int close_delay;
+ unsigned short closing_wait;
+ unsigned short closing_wait2;
+ unsigned long event;
+ unsigned long last_active;
+ int blocked_open; /* # of blocked opens */
+ struct work_struct tqueue;
+ struct work_struct tqueue_hangup;
+ wait_queue_head_t open_wait;
+ wait_queue_head_t close_wait;
+
+
+/* CPM Buffer Descriptor pointers.
+ */
+ QUICC_BD *rx_bd_base;
+ QUICC_BD *rx_cur;
+ QUICC_BD *tx_bd_base;
+ QUICC_BD *tx_cur;
+} ser_info_t;
+
+
+/* since kmalloc_init() does not get called until much after this initialization: */
+static ser_info_t quicc_ser_info[NR_PORTS];
+static char rx_buf_pool[NR_PORTS * RX_NUM_FIFO * RX_BUF_SIZE];
+static char tx_buf_pool[NR_PORTS * TX_NUM_FIFO * TX_BUF_SIZE];
+
+static void change_speed(ser_info_t *info);
+static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout);
+
+static inline int serial_paranoia_check(ser_info_t *info,
+ char *name, const char *routine)
+{
+#ifdef SERIAL_PARANOIA_CHECK
+ static const char *badmagic =
+ "Warning: bad magic number for serial struct (%s) in %s\n";
+ static const char *badinfo =
+ "Warning: null async_struct for (%s) in %s\n";
+
+ if (!info) {
+ printk(badinfo, name, routine);
+ return 1;
+ }
+ if (info->magic != SERIAL_MAGIC) {
+ printk(badmagic, name, routine);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/*
+ * This is used to figure out the divisor speeds and the timeouts,
+ * indexed by the termio value. The generic CPM functions are responsible
+ * for setting and assigning baud rate generators for us.
+ */
+static int baud_table[] = {
+ 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
+ 9600, 19200, 38400, 57600, 115200, 230400, 460800, 0 };
+
+/* This sucks. There is a better way: */
+#if defined(CONFIG_CONSOLE_9600)
+ #define CONSOLE_BAUDRATE 9600
+#elif defined(CONFIG_CONSOLE_19200)
+ #define CONSOLE_BAUDRATE 19200
+#elif defined(CONFIG_CONSOLE_115200)
+ #define CONSOLE_BAUDRATE 115200
+#else
+ #warning "console baud rate undefined"
+ #define CONSOLE_BAUDRATE 9600
+#endif
+
+/*
+ * ------------------------------------------------------------
+ * rs_stop() and rs_start()
+ *
+ * This routines are called before setting or resetting tty->stopped.
+ * They enable or disable transmitter interrupts, as necessary.
+ * ------------------------------------------------------------
+ */
+static void rs_360_stop(struct tty_struct *tty)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+ int idx;
+ unsigned long flags;
+ volatile struct scc_regs *sccp;
+ volatile struct smc_regs *smcp;
+
+ if (serial_paranoia_check(info, tty->name, "rs_stop"))
+ return;
+
+ local_irq_save(flags);
+ idx = PORT_NUM(info->state->smc_scc_num);
+ if (info->state->smc_scc_num & NUM_IS_SCC) {
+ sccp = &pquicc->scc_regs[idx];
+ sccp->scc_sccm &= ~UART_SCCM_TX;
+ } else {
+ /* smcp = &cpmp->cp_smc[idx]; */
+ smcp = &pquicc->smc_regs[idx];
+ smcp->smc_smcm &= ~SMCM_TX;
+ }
+ local_irq_restore(flags);
+}
+
+
+static void rs_360_start(struct tty_struct *tty)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+ int idx;
+ unsigned long flags;
+ volatile struct scc_regs *sccp;
+ volatile struct smc_regs *smcp;
+
+ if (serial_paranoia_check(info, tty->name, "rs_stop"))
+ return;
+
+ local_irq_save(flags);
+ idx = PORT_NUM(info->state->smc_scc_num);
+ if (info->state->smc_scc_num & NUM_IS_SCC) {
+ sccp = &pquicc->scc_regs[idx];
+ sccp->scc_sccm |= UART_SCCM_TX;
+ } else {
+ smcp = &pquicc->smc_regs[idx];
+ smcp->smc_smcm |= SMCM_TX;
+ }
+ local_irq_restore(flags);
+}
+
+/*
+ * ----------------------------------------------------------------------
+ *
+ * Here starts the interrupt handling routines. All of the following
+ * subroutines are declared as inline and are folded into
+ * rs_interrupt(). They were separated out for readability's sake.
+ *
+ * Note: rs_interrupt() is a "fast" interrupt, which means that it
+ * runs with interrupts turned off. People who may want to modify
+ * rs_interrupt() should try to keep the interrupt handler as fast as
+ * possible. After you are done making modifications, it is not a bad
+ * idea to do:
+ *
+ * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c
+ *
+ * and look at the resulting assemble code in serial.s.
+ *
+ * - Ted Ts'o (tytso@mit.edu), 7-Mar-93
+ * -----------------------------------------------------------------------
+ */
+
+static _INLINE_ void receive_chars(ser_info_t *info)
+{
+ struct tty_struct *tty = info->port.tty;
+ unsigned char ch, flag, *cp;
+ /*int ignored = 0;*/
+ int i;
+ ushort status;
+ struct async_icount *icount;
+ /* struct async_icount_24 *icount; */
+ volatile QUICC_BD *bdp;
+
+ icount = &info->state->icount;
+
+ /* Just loop through the closed BDs and copy the characters into
+ * the buffer.
+ */
+ bdp = info->rx_cur;
+ for (;;) {
+ if (bdp->status & BD_SC_EMPTY) /* If this one is empty */
+ break; /* we are all done */
+
+ /* The read status mask tell us what we should do with
+ * incoming characters, especially if errors occur.
+ * One special case is the use of BD_SC_EMPTY. If
+ * this is not set, we are supposed to be ignoring
+ * inputs. In this case, just mark the buffer empty and
+ * continue.
+ */
+ if (!(info->read_status_mask & BD_SC_EMPTY)) {
+ bdp->status |= BD_SC_EMPTY;
+ bdp->status &=
+ ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV);
+
+ if (bdp->status & BD_SC_WRAP)
+ bdp = info->rx_bd_base;
+ else
+ bdp++;
+ continue;
+ }
+
+ /* Get the number of characters and the buffer pointer.
+ */
+ i = bdp->length;
+ /* cp = (unsigned char *)__va(bdp->buf); */
+ cp = (char *)bdp->buf;
+ status = bdp->status;
+
+ while (i-- > 0) {
+ ch = *cp++;
+ icount->rx++;
+
+#ifdef SERIAL_DEBUG_INTR
+ printk("DR%02x:%02x...", ch, status);
+#endif
+ flag = TTY_NORMAL;
+
+ if (status & (BD_SC_BR | BD_SC_FR |
+ BD_SC_PR | BD_SC_OV)) {
+ /*
+ * For statistics only
+ */
+ if (status & BD_SC_BR)
+ icount->brk++;
+ else if (status & BD_SC_PR)
+ icount->parity++;
+ else if (status & BD_SC_FR)
+ icount->frame++;
+ if (status & BD_SC_OV)
+ icount->overrun++;
+
+ /*
+ * Now check to see if character should be
+ * ignored, and mask off conditions which
+ * should be ignored.
+ if (status & info->ignore_status_mask) {
+ if (++ignored > 100)
+ break;
+ continue;
+ }
+ */
+ status &= info->read_status_mask;
+
+ if (status & (BD_SC_BR)) {
+#ifdef SERIAL_DEBUG_INTR
+ printk("handling break....");
+#endif
+ *tty->flip.flag_buf_ptr = TTY_BREAK;
+ if (info->flags & ASYNC_SAK)
+ do_SAK(tty);
+ } else if (status & BD_SC_PR)
+ flag = TTY_PARITY;
+ else if (status & BD_SC_FR)
+ flag = TTY_FRAME;
+ }
+ tty_insert_flip_char(tty, ch, flag);
+ if (status & BD_SC_OV)
+ /*
+ * Overrun is special, since it's
+ * reported immediately, and doesn't
+ * affect the current character
+ */
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+
+ /* This BD is ready to be used again. Clear status.
+ * Get next BD.
+ */
+ bdp->status |= BD_SC_EMPTY;
+ bdp->status &= ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV);
+
+ if (bdp->status & BD_SC_WRAP)
+ bdp = info->rx_bd_base;
+ else
+ bdp++;
+ }
+
+ info->rx_cur = (QUICC_BD *)bdp;
+
+ tty_schedule_flip(tty);
+}
+
+static _INLINE_ void receive_break(ser_info_t *info)
+{
+ struct tty_struct *tty = info->port.tty;
+
+ info->state->icount.brk++;
+ /* Check to see if there is room in the tty buffer for
+ * the break. If not, we exit now, losing the break. FIXME
+ */
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
+ tty_schedule_flip(tty);
+}
+
+static _INLINE_ void transmit_chars(ser_info_t *info)
+{
+
+ if ((info->flags & TX_WAKEUP) ||
+ (info->port.tty->flags & (1 << TTY_DO_WRITE_WAKEUP))) {
+ schedule_work(&info->tqueue);
+ }
+
+#ifdef SERIAL_DEBUG_INTR
+ printk("THRE...");
+#endif
+}
+
+#ifdef notdef
+ /* I need to do this for the SCCs, so it is left as a reminder.
+ */
+static _INLINE_ void check_modem_status(struct async_struct *info)
+{
+ int status;
+ /* struct async_icount *icount; */
+ struct async_icount_24 *icount;
+
+ status = serial_in(info, UART_MSR);
+
+ if (status & UART_MSR_ANY_DELTA) {
+ icount = &info->state->icount;
+ /* update input line counters */
+ if (status & UART_MSR_TERI)
+ icount->rng++;
+ if (status & UART_MSR_DDSR)
+ icount->dsr++;
+ if (status & UART_MSR_DDCD) {
+ icount->dcd++;
+#ifdef CONFIG_HARD_PPS
+ if ((info->flags & ASYNC_HARDPPS_CD) &&
+ (status & UART_MSR_DCD))
+ hardpps();
+#endif
+ }
+ if (status & UART_MSR_DCTS)
+ icount->cts++;
+ wake_up_interruptible(&info->delta_msr_wait);
+ }
+
+ if ((info->flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) {
+#if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR))
+ printk("ttys%d CD now %s...", info->line,
+ (status & UART_MSR_DCD) ? "on" : "off");
+#endif
+ if (status & UART_MSR_DCD)
+ wake_up_interruptible(&info->open_wait);
+ else {
+#ifdef SERIAL_DEBUG_OPEN
+ printk("scheduling hangup...");
+#endif
+ queue_task(&info->tqueue_hangup,
+ &tq_scheduler);
+ }
+ }
+ if (info->flags & ASYNC_CTS_FLOW) {
+ if (info->port.tty->hw_stopped) {
+ if (status & UART_MSR_CTS) {
+#if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW))
+ printk("CTS tx start...");
+#endif
+ info->port.tty->hw_stopped = 0;
+ info->IER |= UART_IER_THRI;
+ serial_out(info, UART_IER, info->IER);
+ rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
+ return;
+ }
+ } else {
+ if (!(status & UART_MSR_CTS)) {
+#if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW))
+ printk("CTS tx stop...");
+#endif
+ info->port.tty->hw_stopped = 1;
+ info->IER &= ~UART_IER_THRI;
+ serial_out(info, UART_IER, info->IER);
+ }
+ }
+ }
+}
+#endif
+
+/*
+ * This is the serial driver's interrupt routine for a single port
+ */
+/* static void rs_360_interrupt(void *dev_id) */ /* until and if we start servicing irqs here */
+static void rs_360_interrupt(int vec, void *dev_id)
+{
+ u_char events;
+ int idx;
+ ser_info_t *info;
+ volatile struct smc_regs *smcp;
+ volatile struct scc_regs *sccp;
+
+ info = dev_id;
+
+ idx = PORT_NUM(info->state->smc_scc_num);
+ if (info->state->smc_scc_num & NUM_IS_SCC) {
+ sccp = &pquicc->scc_regs[idx];
+ events = sccp->scc_scce;
+ if (events & SCCM_RX)
+ receive_chars(info);
+ if (events & SCCM_TX)
+ transmit_chars(info);
+ sccp->scc_scce = events;
+ } else {
+ smcp = &pquicc->smc_regs[idx];
+ events = smcp->smc_smce;
+ if (events & SMCM_BRKE)
+ receive_break(info);
+ if (events & SMCM_RX)
+ receive_chars(info);
+ if (events & SMCM_TX)
+ transmit_chars(info);
+ smcp->smc_smce = events;
+ }
+
+#ifdef SERIAL_DEBUG_INTR
+ printk("rs_interrupt_single(%d, %x)...",
+ info->state->smc_scc_num, events);
+#endif
+#ifdef modem_control
+ check_modem_status(info);
+#endif
+ info->last_active = jiffies;
+#ifdef SERIAL_DEBUG_INTR
+ printk("end.\n");
+#endif
+}
+
+
+/*
+ * -------------------------------------------------------------------
+ * Here ends the serial interrupt routines.
+ * -------------------------------------------------------------------
+ */
+
+
+static void do_softint(void *private_)
+{
+ ser_info_t *info = (ser_info_t *) private_;
+ struct tty_struct *tty;
+
+ tty = info->port.tty;
+ if (!tty)
+ return;
+
+ if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event))
+ tty_wakeup(tty);
+}
+
+
+/*
+ * This routine is called from the scheduler tqueue when the interrupt
+ * routine has signalled that a hangup has occurred. The path of
+ * hangup processing is:
+ *
+ * serial interrupt routine -> (scheduler tqueue) ->
+ * do_serial_hangup() -> tty->hangup() -> rs_hangup()
+ *
+ */
+static void do_serial_hangup(void *private_)
+{
+ struct async_struct *info = (struct async_struct *) private_;
+ struct tty_struct *tty;
+
+ tty = info->port.tty;
+ if (!tty)
+ return;
+
+ tty_hangup(tty);
+}
+
+
+static int startup(ser_info_t *info)
+{
+ unsigned long flags;
+ int retval=0;
+ int idx;
+ /*struct serial_state *state = info->state;*/
+ volatile struct smc_regs *smcp;
+ volatile struct scc_regs *sccp;
+ volatile struct smc_uart_pram *up;
+ volatile struct uart_pram *scup;
+
+
+ local_irq_save(flags);
+
+ if (info->flags & ASYNC_INITIALIZED) {
+ goto errout;
+ }
+
+#ifdef maybe
+ if (!state->port || !state->type) {
+ if (info->port.tty)
+ set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+ goto errout;
+ }
+#endif
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("starting up ttys%d (irq %d)...", info->line, state->irq);
+#endif
+
+
+#ifdef modem_control
+ info->MCR = 0;
+ if (info->port.tty->termios->c_cflag & CBAUD)
+ info->MCR = UART_MCR_DTR | UART_MCR_RTS;
+#endif
+
+ if (info->port.tty)
+ clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
+
+ /*
+ * and set the speed of the serial port
+ */
+ change_speed(info);
+
+ idx = PORT_NUM(info->state->smc_scc_num);
+ if (info->state->smc_scc_num & NUM_IS_SCC) {
+ sccp = &pquicc->scc_regs[idx];
+ scup = &pquicc->pram[info->state->port].scc.pscc.u;
+
+ scup->mrblr = RX_BUF_SIZE;
+ scup->max_idl = RX_BUF_SIZE;
+
+ sccp->scc_sccm |= (UART_SCCM_TX | UART_SCCM_RX);
+ sccp->scc_gsmr.w.low |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ } else {
+ smcp = &pquicc->smc_regs[idx];
+
+ /* Enable interrupts and I/O.
+ */
+ smcp->smc_smcm |= (SMCM_RX | SMCM_TX);
+ smcp->smc_smcmr |= (SMCMR_REN | SMCMR_TEN);
+
+ /* We can tune the buffer length and idle characters
+ * to take advantage of the entire incoming buffer size.
+ * If mrblr is something other than 1, maxidl has to be
+ * non-zero or we never get an interrupt. The maxidl
+ * is the number of character times we wait after reception
+ * of the last character before we decide no more characters
+ * are coming.
+ */
+ /* up = (smc_uart_t *)&pquicc->cp_dparam[state->port]; */
+ /* holy unionized structures, Batman: */
+ up = &pquicc->pram[info->state->port].scc.pothers.idma_smc.psmc.u;
+
+ up->mrblr = RX_BUF_SIZE;
+ up->max_idl = RX_BUF_SIZE;
+
+ up->brkcr = 1; /* number of break chars */
+ }
+
+ info->flags |= ASYNC_INITIALIZED;
+ local_irq_restore(flags);
+ return 0;
+
+errout:
+ local_irq_restore(flags);
+ return retval;
+}
+
+/*
+ * This routine will shutdown a serial port; interrupts are disabled, and
+ * DTR is dropped if the hangup on close termio flag is on.
+ */
+static void shutdown(ser_info_t *info)
+{
+ unsigned long flags;
+ struct serial_state *state;
+ int idx;
+ volatile struct smc_regs *smcp;
+ volatile struct scc_regs *sccp;
+
+ if (!(info->flags & ASYNC_INITIALIZED))
+ return;
+
+ state = info->state;
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("Shutting down serial port %d (irq %d)....", info->line,
+ state->irq);
+#endif
+
+ local_irq_save(flags);
+
+ idx = PORT_NUM(state->smc_scc_num);
+ if (state->smc_scc_num & NUM_IS_SCC) {
+ sccp = &pquicc->scc_regs[idx];
+ sccp->scc_gsmr.w.low &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+#ifdef CONFIG_SERIAL_CONSOLE
+ /* We can't disable the transmitter if this is the
+ * system console.
+ */
+ if ((state - rs_table) != CONFIG_SERIAL_CONSOLE_PORT)
+#endif
+ sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX);
+ } else {
+ smcp = &pquicc->smc_regs[idx];
+
+ /* Disable interrupts and I/O.
+ */
+ smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX);
+#ifdef CONFIG_SERIAL_CONSOLE
+ /* We can't disable the transmitter if this is the
+ * system console.
+ */
+ if ((state - rs_table) != CONFIG_SERIAL_CONSOLE_PORT)
+#endif
+ smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
+ }
+
+ if (info->port.tty)
+ set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+
+ info->flags &= ~ASYNC_INITIALIZED;
+ local_irq_restore(flags);
+}
+
+/*
+ * This routine is called to set the UART divisor registers to match
+ * the specified baud rate for a serial port.
+ */
+static void change_speed(ser_info_t *info)
+{
+ int baud_rate;
+ unsigned cflag, cval, scval, prev_mode;
+ int i, bits, sbits, idx;
+ unsigned long flags;
+ struct serial_state *state;
+ volatile struct smc_regs *smcp;
+ volatile struct scc_regs *sccp;
+
+ if (!info->port.tty || !info->port.tty->termios)
+ return;
+ cflag = info->port.tty->termios->c_cflag;
+
+ state = info->state;
+
+ /* Character length programmed into the mode register is the
+ * sum of: 1 start bit, number of data bits, 0 or 1 parity bit,
+ * 1 or 2 stop bits, minus 1.
+ * The value 'bits' counts this for us.
+ */
+ cval = 0;
+ scval = 0;
+
+ /* byte size and parity */
+ switch (cflag & CSIZE) {
+ case CS5: bits = 5; break;
+ case CS6: bits = 6; break;
+ case CS7: bits = 7; break;
+ case CS8: bits = 8; break;
+ /* Never happens, but GCC is too dumb to figure it out */
+ default: bits = 8; break;
+ }
+ sbits = bits - 5;
+
+ if (cflag & CSTOPB) {
+ cval |= SMCMR_SL; /* Two stops */
+ scval |= SCU_PMSR_SL;
+ bits++;
+ }
+ if (cflag & PARENB) {
+ cval |= SMCMR_PEN;
+ scval |= SCU_PMSR_PEN;
+ bits++;
+ }
+ if (!(cflag & PARODD)) {
+ cval |= SMCMR_PM_EVEN;
+ scval |= (SCU_PMSR_REVP | SCU_PMSR_TEVP);
+ }
+
+ /* Determine divisor based on baud rate */
+ i = cflag & CBAUD;
+ if (i >= (sizeof(baud_table)/sizeof(int)))
+ baud_rate = 9600;
+ else
+ baud_rate = baud_table[i];
+
+ info->timeout = (TX_BUF_SIZE*HZ*bits);
+ info->timeout += HZ/50; /* Add .02 seconds of slop */
+
+#ifdef modem_control
+ /* CTS flow control flag and modem status interrupts */
+ info->IER &= ~UART_IER_MSI;
+ if (info->flags & ASYNC_HARDPPS_CD)
+ info->IER |= UART_IER_MSI;
+ if (cflag & CRTSCTS) {
+ info->flags |= ASYNC_CTS_FLOW;
+ info->IER |= UART_IER_MSI;
+ } else
+ info->flags &= ~ASYNC_CTS_FLOW;
+ if (cflag & CLOCAL)
+ info->flags &= ~ASYNC_CHECK_CD;
+ else {
+ info->flags |= ASYNC_CHECK_CD;
+ info->IER |= UART_IER_MSI;
+ }
+ serial_out(info, UART_IER, info->IER);
+#endif
+
+ /*
+ * Set up parity check flag
+ */
+ info->read_status_mask = (BD_SC_EMPTY | BD_SC_OV);
+ if (I_INPCK(info->port.tty))
+ info->read_status_mask |= BD_SC_FR | BD_SC_PR;
+ if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+ info->read_status_mask |= BD_SC_BR;
+
+ /*
+ * Characters to ignore
+ */
+ info->ignore_status_mask = 0;
+ if (I_IGNPAR(info->port.tty))
+ info->ignore_status_mask |= BD_SC_PR | BD_SC_FR;
+ if (I_IGNBRK(info->port.tty)) {
+ info->ignore_status_mask |= BD_SC_BR;
+ /*
+ * If we're ignore parity and break indicators, ignore
+ * overruns too. (For real raw support).
+ */
+ if (I_IGNPAR(info->port.tty))
+ info->ignore_status_mask |= BD_SC_OV;
+ }
+ /*
+ * !!! ignore all characters if CREAD is not set
+ */
+ if ((cflag & CREAD) == 0)
+ info->read_status_mask &= ~BD_SC_EMPTY;
+ local_irq_save(flags);
+
+ /* Start bit has not been added (so don't, because we would just
+ * subtract it later), and we need to add one for the number of
+ * stops bits (there is always at least one).
+ */
+ bits++;
+ idx = PORT_NUM(state->smc_scc_num);
+ if (state->smc_scc_num & NUM_IS_SCC) {
+ sccp = &pquicc->scc_regs[idx];
+ sccp->scc_psmr = (sbits << 12) | scval;
+ } else {
+ smcp = &pquicc->smc_regs[idx];
+
+ /* Set the mode register. We want to keep a copy of the
+ * enables, because we want to put them back if they were
+ * present.
+ */
+ prev_mode = smcp->smc_smcmr;
+ smcp->smc_smcmr = smcr_mk_clen(bits) | cval | SMCMR_SM_UART;
+ smcp->smc_smcmr |= (prev_mode & (SMCMR_REN | SMCMR_TEN));
+ }
+
+ m360_cpm_setbrg((state - rs_table), baud_rate);
+
+ local_irq_restore(flags);
+}
+
+static void rs_360_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+ volatile QUICC_BD *bdp;
+
+ if (serial_paranoia_check(info, tty->name, "rs_put_char"))
+ return 0;
+
+ if (!tty)
+ return 0;
+
+ bdp = info->tx_cur;
+ while (bdp->status & BD_SC_READY);
+
+ /* *((char *)__va(bdp->buf)) = ch; */
+ *((char *)bdp->buf) = ch;
+ bdp->length = 1;
+ bdp->status |= BD_SC_READY;
+
+ /* Get next BD.
+ */
+ if (bdp->status & BD_SC_WRAP)
+ bdp = info->tx_bd_base;
+ else
+ bdp++;
+
+ info->tx_cur = (QUICC_BD *)bdp;
+ return 1;
+
+}
+
+static int rs_360_write(struct tty_struct * tty,
+ const unsigned char *buf, int count)
+{
+ int c, ret = 0;
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+ volatile QUICC_BD *bdp;
+
+#ifdef CONFIG_KGDB
+ /* Try to let stub handle output. Returns true if it did. */
+ if (kgdb_output_string(buf, count))
+ return ret;
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "rs_write"))
+ return 0;
+
+ if (!tty)
+ return 0;
+
+ bdp = info->tx_cur;
+
+ while (1) {
+ c = min(count, TX_BUF_SIZE);
+
+ if (c <= 0)
+ break;
+
+ if (bdp->status & BD_SC_READY) {
+ info->flags |= TX_WAKEUP;
+ break;
+ }
+
+ /* memcpy(__va(bdp->buf), buf, c); */
+ memcpy((void *)bdp->buf, buf, c);
+
+ bdp->length = c;
+ bdp->status |= BD_SC_READY;
+
+ buf += c;
+ count -= c;
+ ret += c;
+
+ /* Get next BD.
+ */
+ if (bdp->status & BD_SC_WRAP)
+ bdp = info->tx_bd_base;
+ else
+ bdp++;
+ info->tx_cur = (QUICC_BD *)bdp;
+ }
+ return ret;
+}
+
+static int rs_360_write_room(struct tty_struct *tty)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+ int ret;
+
+ if (serial_paranoia_check(info, tty->name, "rs_write_room"))
+ return 0;
+
+ if ((info->tx_cur->status & BD_SC_READY) == 0) {
+ info->flags &= ~TX_WAKEUP;
+ ret = TX_BUF_SIZE;
+ }
+ else {
+ info->flags |= TX_WAKEUP;
+ ret = 0;
+ }
+ return ret;
+}
+
+/* I could track this with transmit counters....maybe later.
+*/
+static int rs_360_chars_in_buffer(struct tty_struct *tty)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
+ return 0;
+ return 0;
+}
+
+static void rs_360_flush_buffer(struct tty_struct *tty)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
+ return;
+
+ /* There is nothing to "flush", whatever we gave the CPM
+ * is on its way out.
+ */
+ tty_wakeup(tty);
+ info->flags &= ~TX_WAKEUP;
+}
+
+/*
+ * This function is used to send a high-priority XON/XOFF character to
+ * the device
+ */
+static void rs_360_send_xchar(struct tty_struct *tty, char ch)
+{
+ volatile QUICC_BD *bdp;
+
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+
+ if (serial_paranoia_check(info, tty->name, "rs_send_char"))
+ return;
+
+ bdp = info->tx_cur;
+ while (bdp->status & BD_SC_READY);
+
+ /* *((char *)__va(bdp->buf)) = ch; */
+ *((char *)bdp->buf) = ch;
+ bdp->length = 1;
+ bdp->status |= BD_SC_READY;
+
+ /* Get next BD.
+ */
+ if (bdp->status & BD_SC_WRAP)
+ bdp = info->tx_bd_base;
+ else
+ bdp++;
+
+ info->tx_cur = (QUICC_BD *)bdp;
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_throttle()
+ *
+ * This routine is called by the upper-layer tty layer to signal that
+ * incoming characters should be throttled.
+ * ------------------------------------------------------------
+ */
+static void rs_360_throttle(struct tty_struct * tty)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+#ifdef SERIAL_DEBUG_THROTTLE
+ char buf[64];
+
+ printk("throttle %s: %d....\n", _tty_name(tty, buf),
+ tty->ldisc.chars_in_buffer(tty));
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "rs_throttle"))
+ return;
+
+ if (I_IXOFF(tty))
+ rs_360_send_xchar(tty, STOP_CHAR(tty));
+
+#ifdef modem_control
+ if (tty->termios->c_cflag & CRTSCTS)
+ info->MCR &= ~UART_MCR_RTS;
+
+ local_irq_disable();
+ serial_out(info, UART_MCR, info->MCR);
+ local_irq_enable();
+#endif
+}
+
+static void rs_360_unthrottle(struct tty_struct * tty)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+#ifdef SERIAL_DEBUG_THROTTLE
+ char buf[64];
+
+ printk("unthrottle %s: %d....\n", _tty_name(tty, buf),
+ tty->ldisc.chars_in_buffer(tty));
+#endif
+
+ if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
+ return;
+
+ if (I_IXOFF(tty)) {
+ if (info->x_char)
+ info->x_char = 0;
+ else
+ rs_360_send_xchar(tty, START_CHAR(tty));
+ }
+#ifdef modem_control
+ if (tty->termios->c_cflag & CRTSCTS)
+ info->MCR |= UART_MCR_RTS;
+ local_irq_disable();
+ serial_out(info, UART_MCR, info->MCR);
+ local_irq_enable();
+#endif
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_ioctl() and friends
+ * ------------------------------------------------------------
+ */
+
+#ifdef maybe
+/*
+ * get_lsr_info - get line status register info
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * is emptied. On bus types like RS485, the transmitter must
+ * release the bus after transmitting. This must be done when
+ * the transmit shift register is empty, not be done when the
+ * transmit holding register is empty. This functionality
+ * allows an RS485 driver to be written in user space.
+ */
+static int get_lsr_info(struct async_struct * info, unsigned int *value)
+{
+ unsigned char status;
+ unsigned int result;
+
+ local_irq_disable();
+ status = serial_in(info, UART_LSR);
+ local_irq_enable();
+ result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
+ return put_user(result,value);
+}
+#endif
+
+static int rs_360_tiocmget(struct tty_struct *tty)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+ unsigned int result = 0;
+#ifdef modem_control
+ unsigned char control, status;
+
+ if (serial_paranoia_check(info, tty->name, __func__))
+ return -ENODEV;
+
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+
+ control = info->MCR;
+ local_irq_disable();
+ status = serial_in(info, UART_MSR);
+ local_irq_enable();
+ result = ((control & UART_MCR_RTS) ? TIOCM_RTS : 0)
+ | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0)
+#ifdef TIOCM_OUT1
+ | ((control & UART_MCR_OUT1) ? TIOCM_OUT1 : 0)
+ | ((control & UART_MCR_OUT2) ? TIOCM_OUT2 : 0)
+#endif
+ | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0)
+ | ((status & UART_MSR_RI) ? TIOCM_RNG : 0)
+ | ((status & UART_MSR_DSR) ? TIOCM_DSR : 0)
+ | ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
+#endif
+ return result;
+}
+
+static int rs_360_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+#ifdef modem_control
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+ unsigned int arg;
+
+ if (serial_paranoia_check(info, tty->name, __func__))
+ return -ENODEV;
+
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ /* FIXME: locking on info->mcr */
+ if (set & TIOCM_RTS)
+ info->mcr |= UART_MCR_RTS;
+ if (set & TIOCM_DTR)
+ info->mcr |= UART_MCR_DTR;
+ if (clear & TIOCM_RTS)
+ info->MCR &= ~UART_MCR_RTS;
+ if (clear & TIOCM_DTR)
+ info->MCR &= ~UART_MCR_DTR;
+
+#ifdef TIOCM_OUT1
+ if (set & TIOCM_OUT1)
+ info->MCR |= UART_MCR_OUT1;
+ if (set & TIOCM_OUT2)
+ info->MCR |= UART_MCR_OUT2;
+ if (clear & TIOCM_OUT1)
+ info->MCR &= ~UART_MCR_OUT1;
+ if (clear & TIOCM_OUT2)
+ info->MCR &= ~UART_MCR_OUT2;
+#endif
+
+ local_irq_disable();
+ serial_out(info, UART_MCR, info->MCR);
+ local_irq_enable();
+#endif
+ return 0;
+}
+
+/* Sending a break is a two step process on the SMC/SCC. It is accomplished
+ * by sending a STOP TRANSMIT command followed by a RESTART TRANSMIT
+ * command. We take advantage of the begin/end functions to make this
+ * happen.
+ */
+static ushort smc_chan_map[] = {
+ CPM_CR_CH_SMC1,
+ CPM_CR_CH_SMC2
+};
+
+static ushort scc_chan_map[] = {
+ CPM_CR_CH_SCC1,
+ CPM_CR_CH_SCC2,
+ CPM_CR_CH_SCC3,
+ CPM_CR_CH_SCC4
+};
+
+static void begin_break(ser_info_t *info)
+{
+ volatile QUICC *cp;
+ ushort chan;
+ int idx;
+
+ cp = pquicc;
+
+ idx = PORT_NUM(info->state->smc_scc_num);
+ if (info->state->smc_scc_num & NUM_IS_SCC)
+ chan = scc_chan_map[idx];
+ else
+ chan = smc_chan_map[idx];
+
+ cp->cp_cr = mk_cr_cmd(chan, CPM_CR_STOP_TX) | CPM_CR_FLG;
+ while (cp->cp_cr & CPM_CR_FLG);
+}
+
+static void end_break(ser_info_t *info)
+{
+ volatile QUICC *cp;
+ ushort chan;
+ int idx;
+
+ cp = pquicc;
+
+ idx = PORT_NUM(info->state->smc_scc_num);
+ if (info->state->smc_scc_num & NUM_IS_SCC)
+ chan = scc_chan_map[idx];
+ else
+ chan = smc_chan_map[idx];
+
+ cp->cp_cr = mk_cr_cmd(chan, CPM_CR_RESTART_TX) | CPM_CR_FLG;
+ while (cp->cp_cr & CPM_CR_FLG);
+}
+
+/*
+ * This routine sends a break character out the serial port.
+ */
+static void send_break(ser_info_t *info, unsigned int duration)
+{
+#ifdef SERIAL_DEBUG_SEND_BREAK
+ printk("rs_send_break(%d) jiff=%lu...", duration, jiffies);
+#endif
+ begin_break(info);
+ msleep_interruptible(duration);
+ end_break(info);
+#ifdef SERIAL_DEBUG_SEND_BREAK
+ printk("done jiffies=%lu\n", jiffies);
+#endif
+}
+
+
+/*
+ * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+ * Return: write counters to the user passed counter struct
+ * NB: both 1->0 and 0->1 transitions are counted except for
+ * RI where only 0->1 is counted.
+ */
+static int rs_360_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+ struct async_icount cnow;
+
+ local_irq_disable();
+ cnow = info->state->icount;
+ local_irq_enable();
+
+ icount->cts = cnow.cts;
+ icount->dsr = cnow.dsr;
+ icount->rng = cnow.rng;
+ icount->dcd = cnow.dcd;
+
+ return 0;
+}
+
+static int rs_360_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ int error;
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+ int retval;
+ struct async_icount cnow;
+ /* struct async_icount_24 cnow;*/ /* kernel counter temps */
+ struct serial_icounter_struct *p_cuser; /* user space */
+
+ if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
+ return -ENODEV;
+
+ if (cmd != TIOCMIWAIT) {
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case TCSBRK: /* SVID version: non-zero arg --> no break */
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+ return -EINTR;
+ if (!arg) {
+ send_break(info, 250); /* 1/4 second */
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ return 0;
+ case TCSBRKP: /* support for POSIX tcsendbreak() */
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+ return -EINTR;
+ send_break(info, arg ? arg*100 : 250);
+ if (signal_pending(current))
+ return -EINTR;
+ return 0;
+ case TIOCSBRK:
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ tty_wait_until_sent(tty, 0);
+ begin_break(info);
+ return 0;
+ case TIOCCBRK:
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ end_break(info);
+ return 0;
+#ifdef maybe
+ case TIOCSERGETLSR: /* Get line status register */
+ return get_lsr_info(info, (unsigned int *) arg);
+#endif
+ /*
+ * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
+ * - mask passed in arg for lines of interest
+ * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
+ * Caller should use TIOCGICOUNT to see which one it was
+ */
+ case TIOCMIWAIT:
+#ifdef modem_control
+ local_irq_disable();
+ /* note the counters on entry */
+ cprev = info->state->icount;
+ local_irq_enable();
+ while (1) {
+ interruptible_sleep_on(&info->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ local_irq_disable();
+ cnow = info->state->icount; /* atomic copy */
+ local_irq_enable();
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
+ return -EIO; /* no change => error */
+ if ( ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
+ ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
+ ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)) ) {
+ return 0;
+ }
+ cprev = cnow;
+ }
+ /* NOTREACHED */
+#else
+ return 0;
+#endif
+
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+/* FIX UP modem control here someday......
+*/
+static void rs_360_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+
+ change_speed(info);
+
+#ifdef modem_control
+ /* Handle transition to B0 status */
+ if ((old_termios->c_cflag & CBAUD) &&
+ !(tty->termios->c_cflag & CBAUD)) {
+ info->MCR &= ~(UART_MCR_DTR|UART_MCR_RTS);
+ local_irq_disable();
+ serial_out(info, UART_MCR, info->MCR);
+ local_irq_enable();
+ }
+
+ /* Handle transition away from B0 status */
+ if (!(old_termios->c_cflag & CBAUD) &&
+ (tty->termios->c_cflag & CBAUD)) {
+ info->MCR |= UART_MCR_DTR;
+ if (!tty->hw_stopped ||
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ info->MCR |= UART_MCR_RTS;
+ }
+ local_irq_disable();
+ serial_out(info, UART_MCR, info->MCR);
+ local_irq_enable();
+ }
+
+ /* Handle turning off CRTSCTS */
+ if ((old_termios->c_cflag & CRTSCTS) &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ rs_360_start(tty);
+ }
+#endif
+
+#if 0
+ /*
+ * No need to wake up processes in open wait, since they
+ * sample the CLOCAL flag once, and don't recheck it.
+ * XXX It's not clear whether the current behavior is correct
+ * or not. Hence, this may change.....
+ */
+ if (!(old_termios->c_cflag & CLOCAL) &&
+ (tty->termios->c_cflag & CLOCAL))
+ wake_up_interruptible(&info->open_wait);
+#endif
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_close()
+ *
+ * This routine is called when the serial port gets closed. First, we
+ * wait for the last remaining data to be sent. Then, we unlink its
+ * async structure from the interrupt chain if necessary, and we free
+ * that IRQ if nothing is left in the chain.
+ * ------------------------------------------------------------
+ */
+static void rs_360_close(struct tty_struct *tty, struct file * filp)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+ /* struct async_state *state; */
+ struct serial_state *state;
+ unsigned long flags;
+ int idx;
+ volatile struct smc_regs *smcp;
+ volatile struct scc_regs *sccp;
+
+ if (!info || serial_paranoia_check(info, tty->name, "rs_close"))
+ return;
+
+ state = info->state;
+
+ local_irq_save(flags);
+
+ if (tty_hung_up_p(filp)) {
+ DBG_CNT("before DEC-hung");
+ local_irq_restore(flags);
+ return;
+ }
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_close ttys%d, count = %d\n", info->line, state->count);
+#endif
+ if ((tty->count == 1) && (state->count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. state->count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ printk("rs_close: bad serial port count; tty->count is 1, "
+ "state->count is %d\n", state->count);
+ state->count = 1;
+ }
+ if (--state->count < 0) {
+ printk("rs_close: bad serial port count for ttys%d: %d\n",
+ info->line, state->count);
+ state->count = 0;
+ }
+ if (state->count) {
+ DBG_CNT("before DEC-2");
+ local_irq_restore(flags);
+ return;
+ }
+ info->flags |= ASYNC_CLOSING;
+ /*
+ * Now we wait for the transmit buffer to clear; and we notify
+ * the line discipline to only process XON/XOFF characters.
+ */
+ tty->closing = 1;
+ if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE)
+ tty_wait_until_sent(tty, info->closing_wait);
+ /*
+ * At this point we stop accepting input. To do this, we
+ * disable the receive line status interrupts, and tell the
+ * interrupt driver to stop checking the data ready bit in the
+ * line status register.
+ */
+ info->read_status_mask &= ~BD_SC_EMPTY;
+ if (info->flags & ASYNC_INITIALIZED) {
+
+ idx = PORT_NUM(info->state->smc_scc_num);
+ if (info->state->smc_scc_num & NUM_IS_SCC) {
+ sccp = &pquicc->scc_regs[idx];
+ sccp->scc_sccm &= ~UART_SCCM_RX;
+ sccp->scc_gsmr.w.low &= ~SCC_GSMRL_ENR;
+ } else {
+ smcp = &pquicc->smc_regs[idx];
+ smcp->smc_smcm &= ~SMCM_RX;
+ smcp->smc_smcmr &= ~SMCMR_REN;
+ }
+ /*
+ * Before we drop DTR, make sure the UART transmitter
+ * has completely drained; this is especially
+ * important if there is a transmit FIFO!
+ */
+ rs_360_wait_until_sent(tty, info->timeout);
+ }
+ shutdown(info);
+ rs_360_flush_buffer(tty);
+ tty_ldisc_flush(tty);
+ tty->closing = 0;
+ info->event = 0;
+ info->port.tty = NULL;
+ if (info->blocked_open) {
+ if (info->close_delay) {
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
+ }
+ wake_up_interruptible(&info->open_wait);
+ }
+ info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
+ wake_up_interruptible(&info->close_wait);
+ local_irq_restore(flags);
+}
+
+/*
+ * rs_wait_until_sent() --- wait until the transmitter is empty
+ */
+static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+ unsigned long orig_jiffies, char_time;
+ /*int lsr;*/
+ volatile QUICC_BD *bdp;
+
+ if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent"))
+ return;
+
+#ifdef maybe
+ if (info->state->type == PORT_UNKNOWN)
+ return;
+#endif
+
+ orig_jiffies = jiffies;
+ /*
+ * Set the check interval to be 1/5 of the estimated time to
+ * send a single character, and make it at least 1. The check
+ * interval should also be less than the timeout.
+ *
+ * Note: we have to use pretty tight timings here to satisfy
+ * the NIST-PCTS.
+ */
+ char_time = 1;
+ if (timeout)
+ char_time = min(char_time, (unsigned long)timeout);
+#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+ printk("In rs_wait_until_sent(%d) check=%lu...", timeout, char_time);
+ printk("jiff=%lu...", jiffies);
+#endif
+
+ /* We go through the loop at least once because we can't tell
+ * exactly when the last character exits the shifter. There can
+ * be at least two characters waiting to be sent after the buffers
+ * are empty.
+ */
+ do {
+#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+ printk("lsr = %d (jiff=%lu)...", lsr, jiffies);
+#endif
+/* current->counter = 0; make us low-priority */
+ msleep_interruptible(jiffies_to_msecs(char_time));
+ if (signal_pending(current))
+ break;
+ if (timeout && (time_after(jiffies, orig_jiffies + timeout)))
+ break;
+ /* The 'tx_cur' is really the next buffer to send. We
+ * have to back up to the previous BD and wait for it
+ * to go. This isn't perfect, because all this indicates
+ * is the buffer is available. There are still characters
+ * in the CPM FIFO.
+ */
+ bdp = info->tx_cur;
+ if (bdp == info->tx_bd_base)
+ bdp += (TX_NUM_FIFO-1);
+ else
+ bdp--;
+ } while (bdp->status & BD_SC_READY);
+ current->state = TASK_RUNNING;
+#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
+ printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
+#endif
+}
+
+/*
+ * rs_hangup() --- called by tty_hangup() when a hangup is signaled.
+ */
+static void rs_360_hangup(struct tty_struct *tty)
+{
+ ser_info_t *info = (ser_info_t *)tty->driver_data;
+ struct serial_state *state = info->state;
+
+ if (serial_paranoia_check(info, tty->name, "rs_hangup"))
+ return;
+
+ state = info->state;
+
+ rs_360_flush_buffer(tty);
+ shutdown(info);
+ info->event = 0;
+ state->count = 0;
+ info->flags &= ~ASYNC_NORMAL_ACTIVE;
+ info->port.tty = NULL;
+ wake_up_interruptible(&info->open_wait);
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_open() and friends
+ * ------------------------------------------------------------
+ */
+static int block_til_ready(struct tty_struct *tty, struct file * filp,
+ ser_info_t *info)
+{
+#ifdef DO_THIS_LATER
+ DECLARE_WAITQUEUE(wait, current);
+#endif
+ struct serial_state *state = info->state;
+ int retval;
+ int do_clocal = 0;
+
+ /*
+ * If the device is in the middle of being closed, then block
+ * until it's done, and then try again.
+ */
+ if (tty_hung_up_p(filp) ||
+ (info->flags & ASYNC_CLOSING)) {
+ if (info->flags & ASYNC_CLOSING)
+ interruptible_sleep_on(&info->close_wait);
+#ifdef SERIAL_DO_RESTART
+ if (info->flags & ASYNC_HUP_NOTIFY)
+ return -EAGAIN;
+ else
+ return -ERESTARTSYS;
+#else
+ return -EAGAIN;
+#endif
+ }
+
+ /*
+ * If non-blocking mode is set, or the port is not enabled,
+ * then make the check up front and then exit.
+ * If this is an SMC port, we don't have modem control to wait
+ * for, so just get out here.
+ */
+ if ((filp->f_flags & O_NONBLOCK) ||
+ (tty->flags & (1 << TTY_IO_ERROR)) ||
+ !(info->state->smc_scc_num & NUM_IS_SCC)) {
+ info->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+
+ if (tty->termios->c_cflag & CLOCAL)
+ do_clocal = 1;
+
+ /*
+ * Block waiting for the carrier detect and the line to become
+ * free (i.e., not in use by the callout). While we are in
+ * this loop, state->count is dropped by one, so that
+ * rs_close() knows when to free things. We restore it upon
+ * exit, either normal or abnormal.
+ */
+ retval = 0;
+#ifdef DO_THIS_LATER
+ add_wait_queue(&info->open_wait, &wait);
+#ifdef SERIAL_DEBUG_OPEN
+ printk("block_til_ready before block: ttys%d, count = %d\n",
+ state->line, state->count);
+#endif
+ local_irq_disable();
+ if (!tty_hung_up_p(filp))
+ state->count--;
+ local_irq_enable();
+ info->blocked_open++;
+ while (1) {
+ local_irq_disable();
+ if (tty->termios->c_cflag & CBAUD)
+ serial_out(info, UART_MCR,
+ serial_inp(info, UART_MCR) |
+ (UART_MCR_DTR | UART_MCR_RTS));
+ local_irq_enable();
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (tty_hung_up_p(filp) ||
+ !(info->flags & ASYNC_INITIALIZED)) {
+#ifdef SERIAL_DO_RESTART
+ if (info->flags & ASYNC_HUP_NOTIFY)
+ retval = -EAGAIN;
+ else
+ retval = -ERESTARTSYS;
+#else
+ retval = -EAGAIN;
+#endif
+ break;
+ }
+ if (!(info->flags & ASYNC_CLOSING) &&
+ (do_clocal || (serial_in(info, UART_MSR) &
+ UART_MSR_DCD)))
+ break;
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+#ifdef SERIAL_DEBUG_OPEN
+ printk("block_til_ready blocking: ttys%d, count = %d\n",
+ info->line, state->count);
+#endif
+ tty_unlock();
+ schedule();
+ tty_lock();
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&info->open_wait, &wait);
+ if (!tty_hung_up_p(filp))
+ state->count++;
+ info->blocked_open--;
+#ifdef SERIAL_DEBUG_OPEN
+ printk("block_til_ready after blocking: ttys%d, count = %d\n",
+ info->line, state->count);
+#endif
+#endif /* DO_THIS_LATER */
+ if (retval)
+ return retval;
+ info->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+}
+
+static int get_async_struct(int line, ser_info_t **ret_info)
+{
+ struct serial_state *sstate;
+
+ sstate = rs_table + line;
+ if (sstate->info) {
+ sstate->count++;
+ *ret_info = (ser_info_t *)sstate->info;
+ return 0;
+ }
+ else {
+ return -ENOMEM;
+ }
+}
+
+/*
+ * This routine is called whenever a serial port is opened. It
+ * enables interrupts for a serial port, linking in its async structure into
+ * the IRQ chain. It also performs the serial-specific
+ * initialization for the tty structure.
+ */
+static int rs_360_open(struct tty_struct *tty, struct file * filp)
+{
+ ser_info_t *info;
+ int retval, line;
+
+ line = tty->index;
+ if ((line < 0) || (line >= NR_PORTS))
+ return -ENODEV;
+ retval = get_async_struct(line, &info);
+ if (retval)
+ return retval;
+ if (serial_paranoia_check(info, tty->name, "rs_open"))
+ return -ENODEV;
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_open %s, count = %d\n", tty->name, info->state->count);
+#endif
+ tty->driver_data = info;
+ info->port.tty = tty;
+
+ /*
+ * Start up serial port
+ */
+ retval = startup(info);
+ if (retval)
+ return retval;
+
+ retval = block_til_ready(tty, filp, info);
+ if (retval) {
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_open returning after block_til_ready with %d\n",
+ retval);
+#endif
+ return retval;
+ }
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_open %s successful...", tty->name);
+#endif
+ return 0;
+}
+
+/*
+ * /proc fs routines....
+ */
+
+static inline int line_info(char *buf, struct serial_state *state)
+{
+#ifdef notdef
+ struct async_struct *info = state->info, scr_info;
+ char stat_buf[30], control, status;
+#endif
+ int ret;
+
+ ret = sprintf(buf, "%d: uart:%s port:%X irq:%d",
+ state->line,
+ (state->smc_scc_num & NUM_IS_SCC) ? "SCC" : "SMC",
+ (unsigned int)(state->port), state->irq);
+
+ if (!state->port || (state->type == PORT_UNKNOWN)) {
+ ret += sprintf(buf+ret, "\n");
+ return ret;
+ }
+
+#ifdef notdef
+ /*
+ * Figure out the current RS-232 lines
+ */
+ if (!info) {
+ info = &scr_info; /* This is just for serial_{in,out} */
+
+ info->magic = SERIAL_MAGIC;
+ info->port = state->port;
+ info->flags = state->flags;
+ info->quot = 0;
+ info->port.tty = NULL;
+ }
+ local_irq_disable();
+ status = serial_in(info, UART_MSR);
+ control = info ? info->MCR : serial_in(info, UART_MCR);
+ local_irq_enable();
+
+ stat_buf[0] = 0;
+ stat_buf[1] = 0;
+ if (control & UART_MCR_RTS)
+ strcat(stat_buf, "|RTS");
+ if (status & UART_MSR_CTS)
+ strcat(stat_buf, "|CTS");
+ if (control & UART_MCR_DTR)
+ strcat(stat_buf, "|DTR");
+ if (status & UART_MSR_DSR)
+ strcat(stat_buf, "|DSR");
+ if (status & UART_MSR_DCD)
+ strcat(stat_buf, "|CD");
+ if (status & UART_MSR_RI)
+ strcat(stat_buf, "|RI");
+
+ if (info->quot) {
+ ret += sprintf(buf+ret, " baud:%d",
+ state->baud_base / info->quot);
+ }
+
+ ret += sprintf(buf+ret, " tx:%d rx:%d",
+ state->icount.tx, state->icount.rx);
+
+ if (state->icount.frame)
+ ret += sprintf(buf+ret, " fe:%d", state->icount.frame);
+
+ if (state->icount.parity)
+ ret += sprintf(buf+ret, " pe:%d", state->icount.parity);
+
+ if (state->icount.brk)
+ ret += sprintf(buf+ret, " brk:%d", state->icount.brk);
+
+ if (state->icount.overrun)
+ ret += sprintf(buf+ret, " oe:%d", state->icount.overrun);
+
+ /*
+ * Last thing is the RS-232 status lines
+ */
+ ret += sprintf(buf+ret, " %s\n", stat_buf+1);
+#endif
+ return ret;
+}
+
+int rs_360_read_proc(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ int i, len = 0;
+ off_t begin = 0;
+
+ len += sprintf(page, "serinfo:1.0 driver:%s\n", serial_version);
+ for (i = 0; i < NR_PORTS && len < 4000; i++) {
+ len += line_info(page + len, &rs_table[i]);
+ if (len+begin > off+count)
+ goto done;
+ if (len+begin < off) {
+ begin += len;
+ len = 0;
+ }
+ }
+ *eof = 1;
+done:
+ if (off >= len+begin)
+ return 0;
+ *start = page + (begin-off);
+ return ((count < begin+len-off) ? count : begin+len-off);
+}
+
+/*
+ * ---------------------------------------------------------------------
+ * rs_init() and friends
+ *
+ * rs_init() is called at boot-time to initialize the serial driver.
+ * ---------------------------------------------------------------------
+ */
+
+/*
+ * This routine prints out the appropriate serial driver version
+ * number, and identifies which options were configured into this
+ * driver.
+ */
+static _INLINE_ void show_serial_version(void)
+{
+ printk(KERN_INFO "%s version %s\n", serial_name, serial_version);
+}
+
+
+/*
+ * The serial console driver used during boot. Note that these names
+ * clash with those found in "serial.c", so we currently can't support
+ * the 16xxx uarts and these at the same time. I will fix this to become
+ * an indirect function call from tty_io.c (or something).
+ */
+
+#ifdef CONFIG_SERIAL_CONSOLE
+
+/*
+ * Print a string to the serial port trying not to disturb any possible
+ * real use of the port...
+ */
+static void my_console_write(int idx, const char *s,
+ unsigned count)
+{
+ struct serial_state *ser;
+ ser_info_t *info;
+ unsigned i;
+ QUICC_BD *bdp, *bdbase;
+ volatile struct smc_uart_pram *up;
+ volatile u_char *cp;
+
+ ser = rs_table + idx;
+
+
+ /* If the port has been initialized for general use, we have
+ * to use the buffer descriptors allocated there. Otherwise,
+ * we simply use the single buffer allocated.
+ */
+ if ((info = (ser_info_t *)ser->info) != NULL) {
+ bdp = info->tx_cur;
+ bdbase = info->tx_bd_base;
+ }
+ else {
+ /* Pointer to UART in parameter ram.
+ */
+ /* up = (smc_uart_t *)&cpmp->cp_dparam[ser->port]; */
+ up = &pquicc->pram[ser->port].scc.pothers.idma_smc.psmc.u;
+
+ /* Get the address of the host memory buffer.
+ */
+ bdp = bdbase = (QUICC_BD *)((uint)pquicc + (uint)up->tbase);
+ }
+
+ /*
+ * We need to gracefully shut down the transmitter, disable
+ * interrupts, then send our bytes out.
+ */
+
+ /*
+ * Now, do each character. This is not as bad as it looks
+ * since this is a holding FIFO and not a transmitting FIFO.
+ * We could add the complexity of filling the entire transmit
+ * buffer, but we would just wait longer between accesses......
+ */
+ for (i = 0; i < count; i++, s++) {
+ /* Wait for transmitter fifo to empty.
+ * Ready indicates output is ready, and xmt is doing
+ * that, not that it is ready for us to send.
+ */
+ while (bdp->status & BD_SC_READY);
+
+ /* Send the character out.
+ */
+ cp = bdp->buf;
+ *cp = *s;
+
+ bdp->length = 1;
+ bdp->status |= BD_SC_READY;
+
+ if (bdp->status & BD_SC_WRAP)
+ bdp = bdbase;
+ else
+ bdp++;
+
+ /* if a LF, also do CR... */
+ if (*s == 10) {
+ while (bdp->status & BD_SC_READY);
+ /* cp = __va(bdp->buf); */
+ cp = bdp->buf;
+ *cp = 13;
+ bdp->length = 1;
+ bdp->status |= BD_SC_READY;
+
+ if (bdp->status & BD_SC_WRAP) {
+ bdp = bdbase;
+ }
+ else {
+ bdp++;
+ }
+ }
+ }
+
+ /*
+ * Finally, Wait for transmitter & holding register to empty
+ * and restore the IER
+ */
+ while (bdp->status & BD_SC_READY);
+
+ if (info)
+ info->tx_cur = (QUICC_BD *)bdp;
+}
+
+static void serial_console_write(struct console *c, const char *s,
+ unsigned count)
+{
+#ifdef CONFIG_KGDB
+ /* Try to let stub handle output. Returns true if it did. */
+ if (kgdb_output_string(s, count))
+ return;
+#endif
+ my_console_write(c->index, s, count);
+}
+
+
+
+/*void console_print_68360(const char *p)
+{
+ const char *cp = p;
+ int i;
+
+ for (i=0;cp[i]!=0;i++);
+
+ serial_console_write (p, i);
+
+ //Comment this if you want to have a strict interrupt-driven output
+ //rs_fair_output();
+
+ return;
+}*/
+
+
+
+
+
+
+#ifdef CONFIG_XMON
+int
+xmon_360_write(const char *s, unsigned count)
+{
+ my_console_write(0, s, count);
+ return(count);
+}
+#endif
+
+#ifdef CONFIG_KGDB
+void
+putDebugChar(char ch)
+{
+ my_console_write(0, &ch, 1);
+}
+#endif
+
+/*
+ * Receive character from the serial port. This only works well
+ * before the port is initialized for real use.
+ */
+static int my_console_wait_key(int idx, int xmon, char *obuf)
+{
+ struct serial_state *ser;
+ u_char c, *cp;
+ ser_info_t *info;
+ QUICC_BD *bdp;
+ volatile struct smc_uart_pram *up;
+ int i;
+
+ ser = rs_table + idx;
+
+ /* Get the address of the host memory buffer.
+ * If the port has been initialized for general use, we must
+ * use information from the port structure.
+ */
+ if ((info = (ser_info_t *)ser->info))
+ bdp = info->rx_cur;
+ else
+ /* bdp = (QUICC_BD *)&cpmp->cp_dpmem[up->smc_rbase]; */
+ bdp = (QUICC_BD *)((uint)pquicc + (uint)up->tbase);
+
+ /* Pointer to UART in parameter ram.
+ */
+ /* up = (smc_uart_t *)&cpmp->cp_dparam[ser->port]; */
+ up = &pquicc->pram[info->state->port].scc.pothers.idma_smc.psmc.u;
+
+ /*
+ * We need to gracefully shut down the receiver, disable
+ * interrupts, then read the input.
+ * XMON just wants a poll. If no character, return -1, else
+ * return the character.
+ */
+ if (!xmon) {
+ while (bdp->status & BD_SC_EMPTY);
+ }
+ else {
+ if (bdp->status & BD_SC_EMPTY)
+ return -1;
+ }
+
+ cp = (char *)bdp->buf;
+
+ if (obuf) {
+ i = c = bdp->length;
+ while (i-- > 0)
+ *obuf++ = *cp++;
+ }
+ else {
+ c = *cp;
+ }
+ bdp->status |= BD_SC_EMPTY;
+
+ if (info) {
+ if (bdp->status & BD_SC_WRAP) {
+ bdp = info->rx_bd_base;
+ }
+ else {
+ bdp++;
+ }
+ info->rx_cur = (QUICC_BD *)bdp;
+ }
+
+ return((int)c);
+}
+
+static int serial_console_wait_key(struct console *co)
+{
+ return(my_console_wait_key(co->index, 0, NULL));
+}
+
+#ifdef CONFIG_XMON
+int
+xmon_360_read_poll(void)
+{
+ return(my_console_wait_key(0, 1, NULL));
+}
+
+int
+xmon_360_read_char(void)
+{
+ return(my_console_wait_key(0, 0, NULL));
+}
+#endif
+
+#ifdef CONFIG_KGDB
+static char kgdb_buf[RX_BUF_SIZE], *kgdp;
+static int kgdb_chars;
+
+unsigned char
+getDebugChar(void)
+{
+ if (kgdb_chars <= 0) {
+ kgdb_chars = my_console_wait_key(0, 0, kgdb_buf);
+ kgdp = kgdb_buf;
+ }
+ kgdb_chars--;
+
+ return(*kgdp++);
+}
+
+void kgdb_interruptible(int state)
+{
+}
+void kgdb_map_scc(void)
+{
+ struct serial_state *ser;
+ uint mem_addr;
+ volatile QUICC_BD *bdp;
+ volatile smc_uart_t *up;
+
+ cpmp = (cpm360_t *)&(((immap_t *)IMAP_ADDR)->im_cpm);
+
+ /* To avoid data cache CPM DMA coherency problems, allocate a
+ * buffer in the CPM DPRAM. This will work until the CPM and
+ * serial ports are initialized. At that time a memory buffer
+ * will be allocated.
+ * The port is already initialized from the boot procedure, all
+ * we do here is give it a different buffer and make it a FIFO.
+ */
+
+ ser = rs_table;
+
+ /* Right now, assume we are using SMCs.
+ */
+ up = (smc_uart_t *)&cpmp->cp_dparam[ser->port];
+
+ /* Allocate space for an input FIFO, plus a few bytes for output.
+ * Allocate bytes to maintain word alignment.
+ */
+ mem_addr = (uint)(&cpmp->cp_dpmem[0x1000]);
+
+ /* Set the physical address of the host memory buffers in
+ * the buffer descriptors.
+ */
+ bdp = (QUICC_BD *)&cpmp->cp_dpmem[up->smc_rbase];
+ bdp->buf = mem_addr;
+
+ bdp = (QUICC_BD *)&cpmp->cp_dpmem[up->smc_tbase];
+ bdp->buf = mem_addr+RX_BUF_SIZE;
+
+ up->smc_mrblr = RX_BUF_SIZE; /* receive buffer length */
+ up->smc_maxidl = RX_BUF_SIZE;
+}
+#endif
+
+static struct tty_struct *serial_console_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return serial_driver;
+}
+
+
+struct console sercons = {
+ .name = "ttyS",
+ .write = serial_console_write,
+ .device = serial_console_device,
+ .wait_key = serial_console_wait_key,
+ .setup = serial_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = CONFIG_SERIAL_CONSOLE_PORT,
+};
+
+
+
+/*
+ * Register console.
+ */
+long console_360_init(long kmem_start, long kmem_end)
+{
+ register_console(&sercons);
+ /*register_console (console_print_68360); - 2.0.38 only required a write
+ function pointer. */
+ return kmem_start;
+}
+
+#endif
+
+/* Index in baud rate table of the default console baud rate.
+*/
+static int baud_idx;
+
+static const struct tty_operations rs_360_ops = {
+ .owner = THIS_MODULE,
+ .open = rs_360_open,
+ .close = rs_360_close,
+ .write = rs_360_write,
+ .put_char = rs_360_put_char,
+ .write_room = rs_360_write_room,
+ .chars_in_buffer = rs_360_chars_in_buffer,
+ .flush_buffer = rs_360_flush_buffer,
+ .ioctl = rs_360_ioctl,
+ .throttle = rs_360_throttle,
+ .unthrottle = rs_360_unthrottle,
+ /* .send_xchar = rs_360_send_xchar, */
+ .set_termios = rs_360_set_termios,
+ .stop = rs_360_stop,
+ .start = rs_360_start,
+ .hangup = rs_360_hangup,
+ /* .wait_until_sent = rs_360_wait_until_sent, */
+ /* .read_proc = rs_360_read_proc, */
+ .tiocmget = rs_360_tiocmget,
+ .tiocmset = rs_360_tiocmset,
+ .get_icount = rs_360_get_icount,
+};
+
+static int __init rs_360_init(void)
+{
+ struct serial_state * state;
+ ser_info_t *info;
+ void *mem_addr;
+ uint dp_addr, iobits;
+ int i, j, idx;
+ ushort chan;
+ QUICC_BD *bdp;
+ volatile QUICC *cp;
+ volatile struct smc_regs *sp;
+ volatile struct smc_uart_pram *up;
+ volatile struct scc_regs *scp;
+ volatile struct uart_pram *sup;
+ /* volatile immap_t *immap; */
+
+ serial_driver = alloc_tty_driver(NR_PORTS);
+ if (!serial_driver)
+ return -1;
+
+ show_serial_version();
+
+ serial_driver->name = "ttyS";
+ serial_driver->major = TTY_MAJOR;
+ serial_driver->minor_start = 64;
+ serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ serial_driver->init_termios = tty_std_termios;
+ serial_driver->init_termios.c_cflag =
+ baud_idx | CS8 | CREAD | HUPCL | CLOCAL;
+ serial_driver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(serial_driver, &rs_360_ops);
+
+ if (tty_register_driver(serial_driver))
+ panic("Couldn't register serial driver\n");
+
+ cp = pquicc; /* Get pointer to Communication Processor */
+ /* immap = (immap_t *)IMAP_ADDR; */ /* and to internal registers */
+
+
+ /* Configure SCC2, SCC3, and SCC4 instead of port A parallel I/O.
+ */
+ /* The "standard" configuration through the 860.
+ */
+/* immap->im_ioport.iop_papar |= 0x00fc; */
+/* immap->im_ioport.iop_padir &= ~0x00fc; */
+/* immap->im_ioport.iop_paodr &= ~0x00fc; */
+ cp->pio_papar |= 0x00fc;
+ cp->pio_padir &= ~0x00fc;
+ /* cp->pio_paodr &= ~0x00fc; */
+
+
+ /* Since we don't yet do modem control, connect the port C pins
+ * as general purpose I/O. This will assert CTS and CD for the
+ * SCC ports.
+ */
+ /* FIXME: see 360um p.7-365 and 860um p.34-12
+ * I can't make sense of these bits - mleslie*/
+/* immap->im_ioport.iop_pcdir |= 0x03c6; */
+/* immap->im_ioport.iop_pcpar &= ~0x03c6; */
+
+/* cp->pio_pcdir |= 0x03c6; */
+/* cp->pio_pcpar &= ~0x03c6; */
+
+
+
+ /* Connect SCC2 and SCC3 to NMSI. Connect BRG3 to SCC2 and
+ * BRG4 to SCC3.
+ */
+ cp->si_sicr &= ~0x00ffff00;
+ cp->si_sicr |= 0x001b1200;
+
+#ifdef CONFIG_PP04
+ /* Frequentis PP04 forced to RS-232 until we know better.
+ * Port C 12 and 13 low enables RS-232 on SCC3 and SCC4.
+ */
+ immap->im_ioport.iop_pcdir |= 0x000c;
+ immap->im_ioport.iop_pcpar &= ~0x000c;
+ immap->im_ioport.iop_pcdat &= ~0x000c;
+
+ /* This enables the TX driver.
+ */
+ cp->cp_pbpar &= ~0x6000;
+ cp->cp_pbdat &= ~0x6000;
+#endif
+
+ for (i = 0, state = rs_table; i < NR_PORTS; i++,state++) {
+ state->magic = SSTATE_MAGIC;
+ state->line = i;
+ state->type = PORT_UNKNOWN;
+ state->custom_divisor = 0;
+ state->close_delay = 5*HZ/10;
+ state->closing_wait = 30*HZ;
+ state->icount.cts = state->icount.dsr =
+ state->icount.rng = state->icount.dcd = 0;
+ state->icount.rx = state->icount.tx = 0;
+ state->icount.frame = state->icount.parity = 0;
+ state->icount.overrun = state->icount.brk = 0;
+ printk(KERN_INFO "ttyS%d at irq 0x%02x is an %s\n",
+ i, (unsigned int)(state->irq),
+ (state->smc_scc_num & NUM_IS_SCC) ? "SCC" : "SMC");
+
+#ifdef CONFIG_SERIAL_CONSOLE
+ /* If we just printed the message on the console port, and
+ * we are about to initialize it for general use, we have
+ * to wait a couple of character times for the CR/NL to
+ * make it out of the transmit buffer.
+ */
+ if (i == CONFIG_SERIAL_CONSOLE_PORT)
+ mdelay(8);
+
+
+/* idx = PORT_NUM(info->state->smc_scc_num); */
+/* if (info->state->smc_scc_num & NUM_IS_SCC) */
+/* chan = scc_chan_map[idx]; */
+/* else */
+/* chan = smc_chan_map[idx]; */
+
+/* cp->cp_cr = mk_cr_cmd(chan, CPM_CR_STOP_TX) | CPM_CR_FLG; */
+/* while (cp->cp_cr & CPM_CR_FLG); */
+
+#endif
+ /* info = kmalloc(sizeof(ser_info_t), GFP_KERNEL); */
+ info = &quicc_ser_info[i];
+ if (info) {
+ memset (info, 0, sizeof(ser_info_t));
+ info->magic = SERIAL_MAGIC;
+ info->line = i;
+ info->flags = state->flags;
+ INIT_WORK(&info->tqueue, do_softint, info);
+ INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info);
+ init_waitqueue_head(&info->open_wait);
+ init_waitqueue_head(&info->close_wait);
+ info->state = state;
+ state->info = (struct async_struct *)info;
+
+ /* We need to allocate a transmit and receive buffer
+ * descriptors from dual port ram, and a character
+ * buffer area from host mem.
+ */
+ dp_addr = m360_cpm_dpalloc(sizeof(QUICC_BD) * RX_NUM_FIFO);
+
+ /* Allocate space for FIFOs in the host memory.
+ * (for now this is from a static array of buffers :(
+ */
+ /* mem_addr = m360_cpm_hostalloc(RX_NUM_FIFO * RX_BUF_SIZE); */
+ /* mem_addr = kmalloc (RX_NUM_FIFO * RX_BUF_SIZE, GFP_BUFFER); */
+ mem_addr = &rx_buf_pool[i * RX_NUM_FIFO * RX_BUF_SIZE];
+
+ /* Set the physical address of the host memory
+ * buffers in the buffer descriptors, and the
+ * virtual address for us to work with.
+ */
+ bdp = (QUICC_BD *)((uint)pquicc + dp_addr);
+ info->rx_cur = info->rx_bd_base = bdp;
+
+ /* initialize rx buffer descriptors */
+ for (j=0; j<(RX_NUM_FIFO-1); j++) {
+ bdp->buf = &rx_buf_pool[(i * RX_NUM_FIFO + j ) * RX_BUF_SIZE];
+ bdp->status = BD_SC_EMPTY | BD_SC_INTRPT;
+ mem_addr += RX_BUF_SIZE;
+ bdp++;
+ }
+ bdp->buf = &rx_buf_pool[(i * RX_NUM_FIFO + j ) * RX_BUF_SIZE];
+ bdp->status = BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT;
+
+
+ idx = PORT_NUM(info->state->smc_scc_num);
+ if (info->state->smc_scc_num & NUM_IS_SCC) {
+
+#if defined (CONFIG_UCQUICC) && 1
+ /* set the transceiver mode to RS232 */
+ sipex_mode_bits &= ~(uint)SIPEX_MODE(idx,0x0f); /* clear current mode */
+ sipex_mode_bits |= (uint)SIPEX_MODE(idx,0x02);
+ *(uint *)_periph_base = sipex_mode_bits;
+ /* printk ("sipex bits = 0x%08x\n", sipex_mode_bits); */
+#endif
+ }
+
+ dp_addr = m360_cpm_dpalloc(sizeof(QUICC_BD) * TX_NUM_FIFO);
+
+ /* Allocate space for FIFOs in the host memory.
+ */
+ /* mem_addr = m360_cpm_hostalloc(TX_NUM_FIFO * TX_BUF_SIZE); */
+ /* mem_addr = kmalloc (TX_NUM_FIFO * TX_BUF_SIZE, GFP_BUFFER); */
+ mem_addr = &tx_buf_pool[i * TX_NUM_FIFO * TX_BUF_SIZE];
+
+ /* Set the physical address of the host memory
+ * buffers in the buffer descriptors, and the
+ * virtual address for us to work with.
+ */
+ /* bdp = (QUICC_BD *)&cp->cp_dpmem[dp_addr]; */
+ bdp = (QUICC_BD *)((uint)pquicc + dp_addr);
+ info->tx_cur = info->tx_bd_base = (QUICC_BD *)bdp;
+
+ /* initialize tx buffer descriptors */
+ for (j=0; j<(TX_NUM_FIFO-1); j++) {
+ bdp->buf = &tx_buf_pool[(i * TX_NUM_FIFO + j ) * TX_BUF_SIZE];
+ bdp->status = BD_SC_INTRPT;
+ mem_addr += TX_BUF_SIZE;
+ bdp++;
+ }
+ bdp->buf = &tx_buf_pool[(i * TX_NUM_FIFO + j ) * TX_BUF_SIZE];
+ bdp->status = (BD_SC_WRAP | BD_SC_INTRPT);
+
+ if (info->state->smc_scc_num & NUM_IS_SCC) {
+ scp = &pquicc->scc_regs[idx];
+ sup = &pquicc->pram[info->state->port].scc.pscc.u;
+ sup->rbase = dp_addr;
+ sup->tbase = dp_addr;
+
+ /* Set up the uart parameters in the
+ * parameter ram.
+ */
+ sup->rfcr = SMC_EB;
+ sup->tfcr = SMC_EB;
+
+ /* Set this to 1 for now, so we get single
+ * character interrupts. Using idle character
+ * time requires some additional tuning.
+ */
+ sup->mrblr = 1;
+ sup->max_idl = 0;
+ sup->brkcr = 1;
+ sup->parec = 0;
+ sup->frmer = 0;
+ sup->nosec = 0;
+ sup->brkec = 0;
+ sup->uaddr1 = 0;
+ sup->uaddr2 = 0;
+ sup->toseq = 0;
+ {
+ int i;
+ for (i=0;i<8;i++)
+ sup->cc[i] = 0x8000;
+ }
+ sup->rccm = 0xc0ff;
+
+ /* Send the CPM an initialize command.
+ */
+ chan = scc_chan_map[idx];
+
+ /* execute the INIT RX & TX PARAMS command for this channel. */
+ cp->cp_cr = mk_cr_cmd(chan, CPM_CR_INIT_TRX) | CPM_CR_FLG;
+ while (cp->cp_cr & CPM_CR_FLG);
+
+ /* Set UART mode, 8 bit, no parity, one stop.
+ * Enable receive and transmit.
+ */
+ scp->scc_gsmr.w.high = 0;
+ scp->scc_gsmr.w.low =
+ (SCC_GSMRL_MODE_UART | SCC_GSMRL_TDCR_16 | SCC_GSMRL_RDCR_16);
+
+ /* Disable all interrupts and clear all pending
+ * events.
+ */
+ scp->scc_sccm = 0;
+ scp->scc_scce = 0xffff;
+ scp->scc_dsr = 0x7e7e;
+ scp->scc_psmr = 0x3000;
+
+ /* If the port is the console, enable Rx and Tx.
+ */
+#ifdef CONFIG_SERIAL_CONSOLE
+ if (i == CONFIG_SERIAL_CONSOLE_PORT)
+ scp->scc_gsmr.w.low |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+#endif
+ }
+ else {
+ /* Configure SMCs Tx/Rx instead of port B
+ * parallel I/O.
+ */
+ up = &pquicc->pram[info->state->port].scc.pothers.idma_smc.psmc.u;
+ up->rbase = dp_addr;
+
+ iobits = 0xc0 << (idx * 4);
+ cp->pip_pbpar |= iobits;
+ cp->pip_pbdir &= ~iobits;
+ cp->pip_pbodr &= ~iobits;
+
+
+ /* Connect the baud rate generator to the
+ * SMC based upon index in rs_table. Also
+ * make sure it is connected to NMSI.
+ */
+ cp->si_simode &= ~(0xffff << (idx * 16));
+ cp->si_simode |= (i << ((idx * 16) + 12));
+
+ up->tbase = dp_addr;
+
+ /* Set up the uart parameters in the
+ * parameter ram.
+ */
+ up->rfcr = SMC_EB;
+ up->tfcr = SMC_EB;
+
+ /* Set this to 1 for now, so we get single
+ * character interrupts. Using idle character
+ * time requires some additional tuning.
+ */
+ up->mrblr = 1;
+ up->max_idl = 0;
+ up->brkcr = 1;
+
+ /* Send the CPM an initialize command.
+ */
+ chan = smc_chan_map[idx];
+
+ cp->cp_cr = mk_cr_cmd(chan,
+ CPM_CR_INIT_TRX) | CPM_CR_FLG;
+#ifdef CONFIG_SERIAL_CONSOLE
+ if (i == CONFIG_SERIAL_CONSOLE_PORT)
+ printk("");
+#endif
+ while (cp->cp_cr & CPM_CR_FLG);
+
+ /* Set UART mode, 8 bit, no parity, one stop.
+ * Enable receive and transmit.
+ */
+ sp = &cp->smc_regs[idx];
+ sp->smc_smcmr = smcr_mk_clen(9) | SMCMR_SM_UART;
+
+ /* Disable all interrupts and clear all pending
+ * events.
+ */
+ sp->smc_smcm = 0;
+ sp->smc_smce = 0xff;
+
+ /* If the port is the console, enable Rx and Tx.
+ */
+#ifdef CONFIG_SERIAL_CONSOLE
+ if (i == CONFIG_SERIAL_CONSOLE_PORT)
+ sp->smc_smcmr |= SMCMR_REN | SMCMR_TEN;
+#endif
+ }
+
+ /* Install interrupt handler.
+ */
+ /* cpm_install_handler(IRQ_MACHSPEC | state->irq, rs_360_interrupt, info); */
+ /*request_irq(IRQ_MACHSPEC | state->irq, rs_360_interrupt, */
+ request_irq(state->irq, rs_360_interrupt,
+ IRQ_FLG_LOCK, "ttyS", (void *)info);
+
+ /* Set up the baud rate generator.
+ */
+ m360_cpm_setbrg(i, baud_table[baud_idx]);
+
+ }
+ }
+
+ return 0;
+}
+module_init(rs_360_init);
+
+/* This must always be called before the rs_360_init() function, otherwise
+ * it blows away the port control information.
+ */
+//static int __init serial_console_setup( struct console *co, char *options)
+int serial_console_setup( struct console *co, char *options)
+{
+ struct serial_state *ser;
+ uint mem_addr, dp_addr, bidx, idx, iobits;
+ ushort chan;
+ QUICC_BD *bdp;
+ volatile QUICC *cp;
+ volatile struct smc_regs *sp;
+ volatile struct scc_regs *scp;
+ volatile struct smc_uart_pram *up;
+ volatile struct uart_pram *sup;
+
+/* mleslie TODO:
+ * add something to the 68k bootloader to store a desired initial console baud rate */
+
+/* bd_t *bd; */ /* a board info struct used by EPPC-bug */
+/* bd = (bd_t *)__res; */
+
+ for (bidx = 0; bidx < (sizeof(baud_table) / sizeof(int)); bidx++)
+ /* if (bd->bi_baudrate == baud_table[bidx]) */
+ if (CONSOLE_BAUDRATE == baud_table[bidx])
+ break;
+
+ /* co->cflag = CREAD|CLOCAL|bidx|CS8; */
+ baud_idx = bidx;
+
+ ser = rs_table + CONFIG_SERIAL_CONSOLE_PORT;
+
+ cp = pquicc; /* Get pointer to Communication Processor */
+
+ idx = PORT_NUM(ser->smc_scc_num);
+ if (ser->smc_scc_num & NUM_IS_SCC) {
+
+ /* TODO: need to set up SCC pin assignment etc. here */
+
+ }
+ else {
+ iobits = 0xc0 << (idx * 4);
+ cp->pip_pbpar |= iobits;
+ cp->pip_pbdir &= ~iobits;
+ cp->pip_pbodr &= ~iobits;
+
+ /* Connect the baud rate generator to the
+ * SMC based upon index in rs_table. Also
+ * make sure it is connected to NMSI.
+ */
+ cp->si_simode &= ~(0xffff << (idx * 16));
+ cp->si_simode |= (idx << ((idx * 16) + 12));
+ }
+
+ /* When we get here, the CPM has been reset, so we need
+ * to configure the port.
+ * We need to allocate a transmit and receive buffer descriptor
+ * from dual port ram, and a character buffer area from host mem.
+ */
+
+ /* Allocate space for two buffer descriptors in the DP ram.
+ */
+ dp_addr = m360_cpm_dpalloc(sizeof(QUICC_BD) * CONSOLE_NUM_FIFO);
+
+ /* Allocate space for two 2 byte FIFOs in the host memory.
+ */
+ /* mem_addr = m360_cpm_hostalloc(8); */
+ mem_addr = (uint)console_fifos;
+
+
+ /* Set the physical address of the host memory buffers in
+ * the buffer descriptors.
+ */
+ /* bdp = (QUICC_BD *)&cp->cp_dpmem[dp_addr]; */
+ bdp = (QUICC_BD *)((uint)pquicc + dp_addr);
+ bdp->buf = (char *)mem_addr;
+ (bdp+1)->buf = (char *)(mem_addr+4);
+
+ /* For the receive, set empty and wrap.
+ * For transmit, set wrap.
+ */
+ bdp->status = BD_SC_EMPTY | BD_SC_WRAP;
+ (bdp+1)->status = BD_SC_WRAP;
+
+ /* Set up the uart parameters in the parameter ram.
+ */
+ if (ser->smc_scc_num & NUM_IS_SCC) {
+ scp = &cp->scc_regs[idx];
+ /* sup = (scc_uart_t *)&cp->cp_dparam[ser->port]; */
+ sup = &pquicc->pram[ser->port].scc.pscc.u;
+
+ sup->rbase = dp_addr;
+ sup->tbase = dp_addr + sizeof(QUICC_BD);
+
+ /* Set up the uart parameters in the
+ * parameter ram.
+ */
+ sup->rfcr = SMC_EB;
+ sup->tfcr = SMC_EB;
+
+ /* Set this to 1 for now, so we get single
+ * character interrupts. Using idle character
+ * time requires some additional tuning.
+ */
+ sup->mrblr = 1;
+ sup->max_idl = 0;
+ sup->brkcr = 1;
+ sup->parec = 0;
+ sup->frmer = 0;
+ sup->nosec = 0;
+ sup->brkec = 0;
+ sup->uaddr1 = 0;
+ sup->uaddr2 = 0;
+ sup->toseq = 0;
+ {
+ int i;
+ for (i=0;i<8;i++)
+ sup->cc[i] = 0x8000;
+ }
+ sup->rccm = 0xc0ff;
+
+ /* Send the CPM an initialize command.
+ */
+ chan = scc_chan_map[idx];
+
+ cp->cp_cr = mk_cr_cmd(chan, CPM_CR_INIT_TRX) | CPM_CR_FLG;
+ while (cp->cp_cr & CPM_CR_FLG);
+
+ /* Set UART mode, 8 bit, no parity, one stop.
+ * Enable receive and transmit.
+ */
+ scp->scc_gsmr.w.high = 0;
+ scp->scc_gsmr.w.low =
+ (SCC_GSMRL_MODE_UART | SCC_GSMRL_TDCR_16 | SCC_GSMRL_RDCR_16);
+
+ /* Disable all interrupts and clear all pending
+ * events.
+ */
+ scp->scc_sccm = 0;
+ scp->scc_scce = 0xffff;
+ scp->scc_dsr = 0x7e7e;
+ scp->scc_psmr = 0x3000;
+
+ scp->scc_gsmr.w.low |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ }
+ else {
+ /* up = (smc_uart_t *)&cp->cp_dparam[ser->port]; */
+ up = &pquicc->pram[ser->port].scc.pothers.idma_smc.psmc.u;
+
+ up->rbase = dp_addr; /* Base of receive buffer desc. */
+ up->tbase = dp_addr+sizeof(QUICC_BD); /* Base of xmt buffer desc. */
+ up->rfcr = SMC_EB;
+ up->tfcr = SMC_EB;
+
+ /* Set this to 1 for now, so we get single character interrupts.
+ */
+ up->mrblr = 1; /* receive buffer length */
+ up->max_idl = 0; /* wait forever for next char */
+
+ /* Send the CPM an initialize command.
+ */
+ chan = smc_chan_map[idx];
+ cp->cp_cr = mk_cr_cmd(chan, CPM_CR_INIT_TRX) | CPM_CR_FLG;
+ while (cp->cp_cr & CPM_CR_FLG);
+
+ /* Set UART mode, 8 bit, no parity, one stop.
+ * Enable receive and transmit.
+ */
+ sp = &cp->smc_regs[idx];
+ sp->smc_smcmr = smcr_mk_clen(9) | SMCMR_SM_UART;
+
+ /* And finally, enable Rx and Tx.
+ */
+ sp->smc_smcmr |= SMCMR_REN | SMCMR_TEN;
+ }
+
+ /* Set up the baud rate generator.
+ */
+ /* m360_cpm_setbrg((ser - rs_table), bd->bi_baudrate); */
+ m360_cpm_setbrg((ser - rs_table), CONSOLE_BAUDRATE);
+
+ return 0;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
new file mode 100644
index 00000000..762ce722
--- /dev/null
+++ b/drivers/tty/serial/8250.c
@@ -0,0 +1,3426 @@
+/*
+ * Driver for 8250/16550-type serial ports
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * Copyright (C) 2001 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * A note about mapbase / membase
+ *
+ * mapbase is the physical address of the IO port.
+ * membase is an 'ioremapped' cookie.
+ */
+
+#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/ratelimit.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/nmi.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "8250.h"
+
+#ifdef CONFIG_SPARC
+#include "suncore.h"
+#endif
+
+/*
+ * Configuration:
+ * share_irqs - whether we pass IRQF_SHARED to request_irq(). This option
+ * is unsafe when used on edge-triggered interrupts.
+ */
+static unsigned int share_irqs = SERIAL8250_SHARE_IRQS;
+
+static unsigned int nr_uarts = CONFIG_SERIAL_8250_RUNTIME_UARTS;
+
+static struct uart_driver serial8250_reg;
+
+static int serial_index(struct uart_port *port)
+{
+ return (serial8250_reg.minor - 64) + port->line;
+}
+
+static unsigned int skip_txen_test; /* force skip of txen test at init time */
+
+/*
+ * Debugging.
+ */
+#if 0
+#define DEBUG_AUTOCONF(fmt...) printk(fmt)
+#else
+#define DEBUG_AUTOCONF(fmt...) do { } while (0)
+#endif
+
+#if 0
+#define DEBUG_INTR(fmt...) printk(fmt)
+#else
+#define DEBUG_INTR(fmt...) do { } while (0)
+#endif
+
+#define PASS_LIMIT 256
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+
+/*
+ * We default to IRQ0 for the "no irq" hack. Some
+ * machine types want others as well - they're free
+ * to redefine this in their header file.
+ */
+#define is_real_interrupt(irq) ((irq) != 0)
+
+#ifdef CONFIG_SERIAL_8250_DETECT_IRQ
+#define CONFIG_SERIAL_DETECT_IRQ 1
+#endif
+#ifdef CONFIG_SERIAL_8250_MANY_PORTS
+#define CONFIG_SERIAL_MANY_PORTS 1
+#endif
+
+/*
+ * HUB6 is always on. This will be removed once the header
+ * files have been cleaned.
+ */
+#define CONFIG_HUB6 1
+
+#include <asm/serial.h>
+/*
+ * SERIAL_PORT_DFNS tells us about built-in ports that have no
+ * standard enumeration mechanism. Platforms that can find all
+ * serial ports via mechanisms like ACPI or PCI need not supply it.
+ */
+#ifndef SERIAL_PORT_DFNS
+#define SERIAL_PORT_DFNS
+#endif
+
+static const struct old_serial_port old_serial_port[] = {
+ SERIAL_PORT_DFNS /* defined in asm/serial.h */
+};
+
+#define UART_NR CONFIG_SERIAL_8250_NR_UARTS
+
+#ifdef CONFIG_SERIAL_8250_RSA
+
+#define PORT_RSA_MAX 4
+static unsigned long probe_rsa[PORT_RSA_MAX];
+static unsigned int probe_rsa_count;
+#endif /* CONFIG_SERIAL_8250_RSA */
+
+struct uart_8250_port {
+ struct uart_port port;
+ struct timer_list timer; /* "no irq" timer */
+ struct list_head list; /* ports on this IRQ */
+ unsigned short capabilities; /* port capabilities */
+ unsigned short bugs; /* port bugs */
+ unsigned int tx_loadsz; /* transmit fifo load size */
+ unsigned char acr;
+ unsigned char ier;
+ unsigned char lcr;
+ unsigned char mcr;
+ unsigned char mcr_mask; /* mask of user bits */
+ unsigned char mcr_force; /* mask of forced bits */
+ unsigned char cur_iotype; /* Running I/O type */
+
+ /*
+ * Some bits in registers are cleared on a read, so they must
+ * be saved whenever the register is read but the bits will not
+ * be immediately processed.
+ */
+#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
+ unsigned char lsr_saved_flags;
+#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+ unsigned char msr_saved_flags;
+};
+
+struct irq_info {
+ struct hlist_node node;
+ int irq;
+ spinlock_t lock; /* Protects list not the hash */
+ struct list_head *head;
+};
+
+#define NR_IRQ_HASH 32 /* Can be adjusted later */
+static struct hlist_head irq_lists[NR_IRQ_HASH];
+static DEFINE_MUTEX(hash_mutex); /* Used to walk the hash */
+
+/*
+ * Here we define the default xmit fifo size used for each type of UART.
+ */
+static const struct serial8250_config uart_config[] = {
+ [PORT_UNKNOWN] = {
+ .name = "unknown",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ },
+ [PORT_8250] = {
+ .name = "8250",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ },
+ [PORT_16450] = {
+ .name = "16450",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ },
+ [PORT_16550] = {
+ .name = "16550",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ },
+ [PORT_16550A] = {
+ .name = "16550A",
+ .fifo_size = 16,
+ .tx_loadsz = 16,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO,
+ },
+ [PORT_CIRRUS] = {
+ .name = "Cirrus",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ },
+ [PORT_16650] = {
+ .name = "ST16650",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+ },
+ [PORT_16650V2] = {
+ .name = "ST16650V2",
+ .fifo_size = 32,
+ .tx_loadsz = 16,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+ UART_FCR_T_TRIG_00,
+ .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+ },
+ [PORT_16750] = {
+ .name = "TI16750",
+ .fifo_size = 64,
+ .tx_loadsz = 64,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+ UART_FCR7_64BYTE,
+ .flags = UART_CAP_FIFO | UART_CAP_SLEEP | UART_CAP_AFE,
+ },
+ [PORT_STARTECH] = {
+ .name = "Startech",
+ .fifo_size = 1,
+ .tx_loadsz = 1,
+ },
+ [PORT_16C950] = {
+ .name = "16C950/954",
+ .fifo_size = 128,
+ .tx_loadsz = 128,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ /* UART_CAP_EFR breaks billionon CF bluetooth card. */
+ .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
+ },
+ [PORT_16654] = {
+ .name = "ST16654",
+ .fifo_size = 64,
+ .tx_loadsz = 32,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+ UART_FCR_T_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+ },
+ [PORT_16850] = {
+ .name = "XR16850",
+ .fifo_size = 128,
+ .tx_loadsz = 128,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+ },
+ [PORT_RSA] = {
+ .name = "RSA",
+ .fifo_size = 2048,
+ .tx_loadsz = 2048,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11,
+ .flags = UART_CAP_FIFO,
+ },
+ [PORT_NS16550A] = {
+ .name = "NS16550A",
+ .fifo_size = 16,
+ .tx_loadsz = 16,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_NATSEMI,
+ },
+ [PORT_XSCALE] = {
+ .name = "XScale",
+ .fifo_size = 32,
+ .tx_loadsz = 32,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_UUE | UART_CAP_RTOIE,
+ },
+ [PORT_RM9000] = {
+ .name = "RM9000",
+ .fifo_size = 16,
+ .tx_loadsz = 16,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO,
+ },
+ [PORT_OCTEON] = {
+ .name = "OCTEON",
+ .fifo_size = 64,
+ .tx_loadsz = 64,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO,
+ },
+ [PORT_AR7] = {
+ .name = "AR7",
+ .fifo_size = 16,
+ .tx_loadsz = 16,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE,
+ },
+ [PORT_U6_16550A] = {
+ .name = "U6_16550A",
+ .fifo_size = 64,
+ .tx_loadsz = 64,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE,
+ },
+ [PORT_TEGRA] = {
+ .name = "Tegra",
+ .fifo_size = 32,
+ .tx_loadsz = 8,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+ UART_FCR_T_TRIG_01,
+ .flags = UART_CAP_FIFO | UART_CAP_RTOIE,
+ },
+};
+
+#if defined(CONFIG_MIPS_ALCHEMY)
+
+/* Au1x00 UART hardware has a weird register layout */
+static const u8 au_io_in_map[] = {
+ [UART_RX] = 0,
+ [UART_IER] = 2,
+ [UART_IIR] = 3,
+ [UART_LCR] = 5,
+ [UART_MCR] = 6,
+ [UART_LSR] = 7,
+ [UART_MSR] = 8,
+};
+
+static const u8 au_io_out_map[] = {
+ [UART_TX] = 1,
+ [UART_IER] = 2,
+ [UART_FCR] = 4,
+ [UART_LCR] = 5,
+ [UART_MCR] = 6,
+};
+
+/* sane hardware needs no mapping */
+static inline int map_8250_in_reg(struct uart_port *p, int offset)
+{
+ if (p->iotype != UPIO_AU)
+ return offset;
+ return au_io_in_map[offset];
+}
+
+static inline int map_8250_out_reg(struct uart_port *p, int offset)
+{
+ if (p->iotype != UPIO_AU)
+ return offset;
+ return au_io_out_map[offset];
+}
+
+#elif defined(CONFIG_SERIAL_8250_RM9K)
+
+static const u8
+ regmap_in[8] = {
+ [UART_RX] = 0x00,
+ [UART_IER] = 0x0c,
+ [UART_IIR] = 0x14,
+ [UART_LCR] = 0x1c,
+ [UART_MCR] = 0x20,
+ [UART_LSR] = 0x24,
+ [UART_MSR] = 0x28,
+ [UART_SCR] = 0x2c
+ },
+ regmap_out[8] = {
+ [UART_TX] = 0x04,
+ [UART_IER] = 0x0c,
+ [UART_FCR] = 0x18,
+ [UART_LCR] = 0x1c,
+ [UART_MCR] = 0x20,
+ [UART_LSR] = 0x24,
+ [UART_MSR] = 0x28,
+ [UART_SCR] = 0x2c
+ };
+
+static inline int map_8250_in_reg(struct uart_port *p, int offset)
+{
+ if (p->iotype != UPIO_RM9000)
+ return offset;
+ return regmap_in[offset];
+}
+
+static inline int map_8250_out_reg(struct uart_port *p, int offset)
+{
+ if (p->iotype != UPIO_RM9000)
+ return offset;
+ return regmap_out[offset];
+}
+
+#else
+
+/* sane hardware needs no mapping */
+#define map_8250_in_reg(up, offset) (offset)
+#define map_8250_out_reg(up, offset) (offset)
+
+#endif
+
+static unsigned int hub6_serial_in(struct uart_port *p, int offset)
+{
+ offset = map_8250_in_reg(p, offset) << p->regshift;
+ outb(p->hub6 - 1 + offset, p->iobase);
+ return inb(p->iobase + 1);
+}
+
+static void hub6_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = map_8250_out_reg(p, offset) << p->regshift;
+ outb(p->hub6 - 1 + offset, p->iobase);
+ outb(value, p->iobase + 1);
+}
+
+static unsigned int mem_serial_in(struct uart_port *p, int offset)
+{
+ offset = map_8250_in_reg(p, offset) << p->regshift;
+ return readb(p->membase + offset);
+}
+
+static void mem_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = map_8250_out_reg(p, offset) << p->regshift;
+ writeb(value, p->membase + offset);
+}
+
+static void mem32_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = map_8250_out_reg(p, offset) << p->regshift;
+ writel(value, p->membase + offset);
+}
+
+static unsigned int mem32_serial_in(struct uart_port *p, int offset)
+{
+ offset = map_8250_in_reg(p, offset) << p->regshift;
+ return readl(p->membase + offset);
+}
+
+static unsigned int au_serial_in(struct uart_port *p, int offset)
+{
+ offset = map_8250_in_reg(p, offset) << p->regshift;
+ return __raw_readl(p->membase + offset);
+}
+
+static void au_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = map_8250_out_reg(p, offset) << p->regshift;
+ __raw_writel(value, p->membase + offset);
+}
+
+static unsigned int tsi_serial_in(struct uart_port *p, int offset)
+{
+ unsigned int tmp;
+ offset = map_8250_in_reg(p, offset) << p->regshift;
+ if (offset == UART_IIR) {
+ tmp = readl(p->membase + (UART_IIR & ~3));
+ return (tmp >> 16) & 0xff; /* UART_IIR % 4 == 2 */
+ } else
+ return readb(p->membase + offset);
+}
+
+static void tsi_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = map_8250_out_reg(p, offset) << p->regshift;
+ if (!((offset == UART_IER) && (value & UART_IER_UUE)))
+ writeb(value, p->membase + offset);
+}
+
+/* Save the LCR value so it can be re-written when a Busy Detect IRQ occurs. */
+static inline void dwapb_save_out_value(struct uart_port *p, int offset,
+ int value)
+{
+ struct uart_8250_port *up =
+ container_of(p, struct uart_8250_port, port);
+
+ if (offset == UART_LCR)
+ up->lcr = value;
+}
+
+/* Read the IER to ensure any interrupt is cleared before returning from ISR. */
+static inline void dwapb_check_clear_ier(struct uart_port *p, int offset)
+{
+ if (offset == UART_TX || offset == UART_IER)
+ p->serial_in(p, UART_IER);
+}
+
+static void dwapb_serial_out(struct uart_port *p, int offset, int value)
+{
+ int save_offset = offset;
+ offset = map_8250_out_reg(p, offset) << p->regshift;
+ dwapb_save_out_value(p, save_offset, value);
+ writeb(value, p->membase + offset);
+ dwapb_check_clear_ier(p, save_offset);
+}
+
+static void dwapb32_serial_out(struct uart_port *p, int offset, int value)
+{
+ int save_offset = offset;
+ offset = map_8250_out_reg(p, offset) << p->regshift;
+ dwapb_save_out_value(p, save_offset, value);
+ writel(value, p->membase + offset);
+ dwapb_check_clear_ier(p, save_offset);
+}
+
+static unsigned int io_serial_in(struct uart_port *p, int offset)
+{
+ offset = map_8250_in_reg(p, offset) << p->regshift;
+ return inb(p->iobase + offset);
+}
+
+static void io_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = map_8250_out_reg(p, offset) << p->regshift;
+ outb(value, p->iobase + offset);
+}
+
+static void set_io_from_upio(struct uart_port *p)
+{
+ struct uart_8250_port *up =
+ container_of(p, struct uart_8250_port, port);
+ switch (p->iotype) {
+ case UPIO_HUB6:
+ p->serial_in = hub6_serial_in;
+ p->serial_out = hub6_serial_out;
+ break;
+
+ case UPIO_MEM:
+ p->serial_in = mem_serial_in;
+ p->serial_out = mem_serial_out;
+ break;
+
+ case UPIO_RM9000:
+ case UPIO_MEM32:
+ p->serial_in = mem32_serial_in;
+ p->serial_out = mem32_serial_out;
+ break;
+
+ case UPIO_AU:
+ p->serial_in = au_serial_in;
+ p->serial_out = au_serial_out;
+ break;
+
+ case UPIO_TSI:
+ p->serial_in = tsi_serial_in;
+ p->serial_out = tsi_serial_out;
+ break;
+
+ case UPIO_DWAPB:
+ p->serial_in = mem_serial_in;
+ p->serial_out = dwapb_serial_out;
+ break;
+
+ case UPIO_DWAPB32:
+ p->serial_in = mem32_serial_in;
+ p->serial_out = dwapb32_serial_out;
+ break;
+
+ default:
+ p->serial_in = io_serial_in;
+ p->serial_out = io_serial_out;
+ break;
+ }
+ /* Remember loaded iotype */
+ up->cur_iotype = p->iotype;
+}
+
+static void
+serial_out_sync(struct uart_8250_port *up, int offset, int value)
+{
+ struct uart_port *p = &up->port;
+ switch (p->iotype) {
+ case UPIO_MEM:
+ case UPIO_MEM32:
+ case UPIO_AU:
+ case UPIO_DWAPB:
+ case UPIO_DWAPB32:
+ p->serial_out(p, offset, value);
+ p->serial_in(p, UART_LCR); /* safe, no side-effects */
+ break;
+ default:
+ p->serial_out(p, offset, value);
+ }
+}
+
+#define serial_in(up, offset) \
+ (up->port.serial_in(&(up)->port, (offset)))
+#define serial_out(up, offset, value) \
+ (up->port.serial_out(&(up)->port, (offset), (value)))
+/*
+ * We used to support using pause I/O for certain machines. We
+ * haven't supported this for a while, but just in case it's badly
+ * needed for certain old 386 machines, I've left these #define's
+ * in....
+ */
+#define serial_inp(up, offset) serial_in(up, offset)
+#define serial_outp(up, offset, value) serial_out(up, offset, value)
+
+/* Uart divisor latch read */
+static inline int _serial_dl_read(struct uart_8250_port *up)
+{
+ return serial_inp(up, UART_DLL) | serial_inp(up, UART_DLM) << 8;
+}
+
+/* Uart divisor latch write */
+static inline void _serial_dl_write(struct uart_8250_port *up, int value)
+{
+ serial_outp(up, UART_DLL, value & 0xff);
+ serial_outp(up, UART_DLM, value >> 8 & 0xff);
+}
+
+#if defined(CONFIG_MIPS_ALCHEMY)
+/* Au1x00 haven't got a standard divisor latch */
+static int serial_dl_read(struct uart_8250_port *up)
+{
+ if (up->port.iotype == UPIO_AU)
+ return __raw_readl(up->port.membase + 0x28);
+ else
+ return _serial_dl_read(up);
+}
+
+static void serial_dl_write(struct uart_8250_port *up, int value)
+{
+ if (up->port.iotype == UPIO_AU)
+ __raw_writel(value, up->port.membase + 0x28);
+ else
+ _serial_dl_write(up, value);
+}
+#elif defined(CONFIG_SERIAL_8250_RM9K)
+static int serial_dl_read(struct uart_8250_port *up)
+{
+ return (up->port.iotype == UPIO_RM9000) ?
+ (((__raw_readl(up->port.membase + 0x10) << 8) |
+ (__raw_readl(up->port.membase + 0x08) & 0xff)) & 0xffff) :
+ _serial_dl_read(up);
+}
+
+static void serial_dl_write(struct uart_8250_port *up, int value)
+{
+ if (up->port.iotype == UPIO_RM9000) {
+ __raw_writel(value, up->port.membase + 0x08);
+ __raw_writel(value >> 8, up->port.membase + 0x10);
+ } else {
+ _serial_dl_write(up, value);
+ }
+}
+#else
+#define serial_dl_read(up) _serial_dl_read(up)
+#define serial_dl_write(up, value) _serial_dl_write(up, value)
+#endif
+
+/*
+ * For the 16C950
+ */
+static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
+{
+ serial_out(up, UART_SCR, offset);
+ serial_out(up, UART_ICR, value);
+}
+
+static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
+{
+ unsigned int value;
+
+ serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
+ serial_out(up, UART_SCR, offset);
+ value = serial_in(up, UART_ICR);
+ serial_icr_write(up, UART_ACR, up->acr);
+
+ return value;
+}
+
+/*
+ * FIFO support.
+ */
+static void serial8250_clear_fifos(struct uart_8250_port *p)
+{
+ if (p->capabilities & UART_CAP_FIFO) {
+ serial_outp(p, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_outp(p, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ serial_outp(p, UART_FCR, 0);
+ }
+}
+
+/*
+ * IER sleep support. UARTs which have EFRs need the "extended
+ * capability" bit enabled. Note that on XR16C850s, we need to
+ * reset LCR to write to IER.
+ */
+static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
+{
+ if (p->capabilities & UART_CAP_SLEEP) {
+ if (p->capabilities & UART_CAP_EFR) {
+ serial_outp(p, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_outp(p, UART_EFR, UART_EFR_ECB);
+ serial_outp(p, UART_LCR, 0);
+ }
+ serial_outp(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
+ if (p->capabilities & UART_CAP_EFR) {
+ serial_outp(p, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_outp(p, UART_EFR, 0);
+ serial_outp(p, UART_LCR, 0);
+ }
+ }
+}
+
+#ifdef CONFIG_SERIAL_8250_RSA
+/*
+ * Attempts to turn on the RSA FIFO. Returns zero on failure.
+ * We set the port uart clock rate if we succeed.
+ */
+static int __enable_rsa(struct uart_8250_port *up)
+{
+ unsigned char mode;
+ int result;
+
+ mode = serial_inp(up, UART_RSA_MSR);
+ result = mode & UART_RSA_MSR_FIFO;
+
+ if (!result) {
+ serial_outp(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO);
+ mode = serial_inp(up, UART_RSA_MSR);
+ result = mode & UART_RSA_MSR_FIFO;
+ }
+
+ if (result)
+ up->port.uartclk = SERIAL_RSA_BAUD_BASE * 16;
+
+ return result;
+}
+
+static void enable_rsa(struct uart_8250_port *up)
+{
+ if (up->port.type == PORT_RSA) {
+ if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
+ spin_lock_irq(&up->port.lock);
+ __enable_rsa(up);
+ spin_unlock_irq(&up->port.lock);
+ }
+ if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
+ serial_outp(up, UART_RSA_FRR, 0);
+ }
+}
+
+/*
+ * Attempts to turn off the RSA FIFO. Returns zero on failure.
+ * It is unknown why interrupts were disabled in here. However,
+ * the caller is expected to preserve this behaviour by grabbing
+ * the spinlock before calling this function.
+ */
+static void disable_rsa(struct uart_8250_port *up)
+{
+ unsigned char mode;
+ int result;
+
+ if (up->port.type == PORT_RSA &&
+ up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
+ spin_lock_irq(&up->port.lock);
+
+ mode = serial_inp(up, UART_RSA_MSR);
+ result = !(mode & UART_RSA_MSR_FIFO);
+
+ if (!result) {
+ serial_outp(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
+ mode = serial_inp(up, UART_RSA_MSR);
+ result = !(mode & UART_RSA_MSR_FIFO);
+ }
+
+ if (result)
+ up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
+ spin_unlock_irq(&up->port.lock);
+ }
+}
+#endif /* CONFIG_SERIAL_8250_RSA */
+
+/*
+ * This is a quickie test to see how big the FIFO is.
+ * It doesn't work at all the time, more's the pity.
+ */
+static int size_fifo(struct uart_8250_port *up)
+{
+ unsigned char old_fcr, old_mcr, old_lcr;
+ unsigned short old_dl;
+ int count;
+
+ old_lcr = serial_inp(up, UART_LCR);
+ serial_outp(up, UART_LCR, 0);
+ old_fcr = serial_inp(up, UART_FCR);
+ old_mcr = serial_inp(up, UART_MCR);
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ serial_outp(up, UART_MCR, UART_MCR_LOOP);
+ serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ old_dl = serial_dl_read(up);
+ serial_dl_write(up, 0x0001);
+ serial_outp(up, UART_LCR, 0x03);
+ for (count = 0; count < 256; count++)
+ serial_outp(up, UART_TX, count);
+ mdelay(20);/* FIXME - schedule_timeout */
+ for (count = 0; (serial_inp(up, UART_LSR) & UART_LSR_DR) &&
+ (count < 256); count++)
+ serial_inp(up, UART_RX);
+ serial_outp(up, UART_FCR, old_fcr);
+ serial_outp(up, UART_MCR, old_mcr);
+ serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ serial_dl_write(up, old_dl);
+ serial_outp(up, UART_LCR, old_lcr);
+
+ return count;
+}
+
+/*
+ * Read UART ID using the divisor method - set DLL and DLM to zero
+ * and the revision will be in DLL and device type in DLM. We
+ * preserve the device state across this.
+ */
+static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
+{
+ unsigned char old_dll, old_dlm, old_lcr;
+ unsigned int id;
+
+ old_lcr = serial_inp(p, UART_LCR);
+ serial_outp(p, UART_LCR, UART_LCR_CONF_MODE_A);
+
+ old_dll = serial_inp(p, UART_DLL);
+ old_dlm = serial_inp(p, UART_DLM);
+
+ serial_outp(p, UART_DLL, 0);
+ serial_outp(p, UART_DLM, 0);
+
+ id = serial_inp(p, UART_DLL) | serial_inp(p, UART_DLM) << 8;
+
+ serial_outp(p, UART_DLL, old_dll);
+ serial_outp(p, UART_DLM, old_dlm);
+ serial_outp(p, UART_LCR, old_lcr);
+
+ return id;
+}
+
+/*
+ * This is a helper routine to autodetect StarTech/Exar/Oxsemi UART's.
+ * When this function is called we know it is at least a StarTech
+ * 16650 V2, but it might be one of several StarTech UARTs, or one of
+ * its clones. (We treat the broken original StarTech 16650 V1 as a
+ * 16550, and why not? Startech doesn't seem to even acknowledge its
+ * existence.)
+ *
+ * What evil have men's minds wrought...
+ */
+static void autoconfig_has_efr(struct uart_8250_port *up)
+{
+ unsigned int id1, id2, id3, rev;
+
+ /*
+ * Everything with an EFR has SLEEP
+ */
+ up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
+
+ /*
+ * First we check to see if it's an Oxford Semiconductor UART.
+ *
+ * If we have to do this here because some non-National
+ * Semiconductor clone chips lock up if you try writing to the
+ * LSR register (which serial_icr_read does)
+ */
+
+ /*
+ * Check for Oxford Semiconductor 16C950.
+ *
+ * EFR [4] must be set else this test fails.
+ *
+ * This shouldn't be necessary, but Mike Hudson (Exoray@isys.ca)
+ * claims that it's needed for 952 dual UART's (which are not
+ * recommended for new designs).
+ */
+ up->acr = 0;
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, UART_LCR, 0x00);
+ id1 = serial_icr_read(up, UART_ID1);
+ id2 = serial_icr_read(up, UART_ID2);
+ id3 = serial_icr_read(up, UART_ID3);
+ rev = serial_icr_read(up, UART_REV);
+
+ DEBUG_AUTOCONF("950id=%02x:%02x:%02x:%02x ", id1, id2, id3, rev);
+
+ if (id1 == 0x16 && id2 == 0xC9 &&
+ (id3 == 0x50 || id3 == 0x52 || id3 == 0x54)) {
+ up->port.type = PORT_16C950;
+
+ /*
+ * Enable work around for the Oxford Semiconductor 952 rev B
+ * chip which causes it to seriously miscalculate baud rates
+ * when DLL is 0.
+ */
+ if (id3 == 0x52 && rev == 0x01)
+ up->bugs |= UART_BUG_QUOT;
+ return;
+ }
+
+ /*
+ * We check for a XR16C850 by setting DLL and DLM to 0, and then
+ * reading back DLL and DLM. The chip type depends on the DLM
+ * value read back:
+ * 0x10 - XR16C850 and the DLL contains the chip revision.
+ * 0x12 - XR16C2850.
+ * 0x14 - XR16C854.
+ */
+ id1 = autoconfig_read_divisor_id(up);
+ DEBUG_AUTOCONF("850id=%04x ", id1);
+
+ id2 = id1 >> 8;
+ if (id2 == 0x10 || id2 == 0x12 || id2 == 0x14) {
+ up->port.type = PORT_16850;
+ return;
+ }
+
+ /*
+ * It wasn't an XR16C850.
+ *
+ * We distinguish between the '654 and the '650 by counting
+ * how many bytes are in the FIFO. I'm using this for now,
+ * since that's the technique that was sent to me in the
+ * serial driver update, but I'm not convinced this works.
+ * I've had problems doing this in the past. -TYT
+ */
+ if (size_fifo(up) == 64)
+ up->port.type = PORT_16654;
+ else
+ up->port.type = PORT_16650V2;
+}
+
+/*
+ * We detected a chip without a FIFO. Only two fall into
+ * this category - the original 8250 and the 16450. The
+ * 16450 has a scratch register (accessible with LCR=0)
+ */
+static void autoconfig_8250(struct uart_8250_port *up)
+{
+ unsigned char scratch, status1, status2;
+
+ up->port.type = PORT_8250;
+
+ scratch = serial_in(up, UART_SCR);
+ serial_outp(up, UART_SCR, 0xa5);
+ status1 = serial_in(up, UART_SCR);
+ serial_outp(up, UART_SCR, 0x5a);
+ status2 = serial_in(up, UART_SCR);
+ serial_outp(up, UART_SCR, scratch);
+
+ if (status1 == 0xa5 && status2 == 0x5a)
+ up->port.type = PORT_16450;
+}
+
+static int broken_efr(struct uart_8250_port *up)
+{
+ /*
+ * Exar ST16C2550 "A2" devices incorrectly detect as
+ * having an EFR, and report an ID of 0x0201. See
+ * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html
+ */
+ if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16)
+ return 1;
+
+ return 0;
+}
+
+static inline int ns16550a_goto_highspeed(struct uart_8250_port *up)
+{
+ unsigned char status;
+
+ status = serial_in(up, 0x04); /* EXCR2 */
+#define PRESL(x) ((x) & 0x30)
+ if (PRESL(status) == 0x10) {
+ /* already in high speed mode */
+ return 0;
+ } else {
+ status &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
+ status |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
+ serial_outp(up, 0x04, status);
+ }
+ return 1;
+}
+
+/*
+ * We know that the chip has FIFOs. Does it have an EFR? The
+ * EFR is located in the same register position as the IIR and
+ * we know the top two bits of the IIR are currently set. The
+ * EFR should contain zero. Try to read the EFR.
+ */
+static void autoconfig_16550a(struct uart_8250_port *up)
+{
+ unsigned char status1, status2;
+ unsigned int iersave;
+
+ up->port.type = PORT_16550A;
+ up->capabilities |= UART_CAP_FIFO;
+
+ /*
+ * Check for presence of the EFR when DLAB is set.
+ * Only ST16C650V1 UARTs pass this test.
+ */
+ serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ if (serial_in(up, UART_EFR) == 0) {
+ serial_outp(up, UART_EFR, 0xA8);
+ if (serial_in(up, UART_EFR) != 0) {
+ DEBUG_AUTOCONF("EFRv1 ");
+ up->port.type = PORT_16650;
+ up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
+ } else {
+ DEBUG_AUTOCONF("Motorola 8xxx DUART ");
+ }
+ serial_outp(up, UART_EFR, 0);
+ return;
+ }
+
+ /*
+ * Maybe it requires 0xbf to be written to the LCR.
+ * (other ST16C650V2 UARTs, TI16C752A, etc)
+ */
+ serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ if (serial_in(up, UART_EFR) == 0 && !broken_efr(up)) {
+ DEBUG_AUTOCONF("EFRv2 ");
+ autoconfig_has_efr(up);
+ return;
+ }
+
+ /*
+ * Check for a National Semiconductor SuperIO chip.
+ * Attempt to switch to bank 2, read the value of the LOOP bit
+ * from EXCR1. Switch back to bank 0, change it in MCR. Then
+ * switch back to bank 2, read it from EXCR1 again and check
+ * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
+ */
+ serial_outp(up, UART_LCR, 0);
+ status1 = serial_in(up, UART_MCR);
+ serial_outp(up, UART_LCR, 0xE0);
+ status2 = serial_in(up, 0x02); /* EXCR1 */
+
+ if (!((status2 ^ status1) & UART_MCR_LOOP)) {
+ serial_outp(up, UART_LCR, 0);
+ serial_outp(up, UART_MCR, status1 ^ UART_MCR_LOOP);
+ serial_outp(up, UART_LCR, 0xE0);
+ status2 = serial_in(up, 0x02); /* EXCR1 */
+ serial_outp(up, UART_LCR, 0);
+ serial_outp(up, UART_MCR, status1);
+
+ if ((status2 ^ status1) & UART_MCR_LOOP) {
+ unsigned short quot;
+
+ serial_outp(up, UART_LCR, 0xE0);
+
+ quot = serial_dl_read(up);
+ quot <<= 3;
+
+ if (ns16550a_goto_highspeed(up))
+ serial_dl_write(up, quot);
+
+ serial_outp(up, UART_LCR, 0);
+
+ up->port.uartclk = 921600*16;
+ up->port.type = PORT_NS16550A;
+ up->capabilities |= UART_NATSEMI;
+ return;
+ }
+ }
+
+ /*
+ * No EFR. Try to detect a TI16750, which only sets bit 5 of
+ * the IIR when 64 byte FIFO mode is enabled when DLAB is set.
+ * Try setting it with and without DLAB set. Cheap clones
+ * set bit 5 without DLAB set.
+ */
+ serial_outp(up, UART_LCR, 0);
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
+ status1 = serial_in(up, UART_IIR) >> 5;
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
+ status2 = serial_in(up, UART_IIR) >> 5;
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_outp(up, UART_LCR, 0);
+
+ DEBUG_AUTOCONF("iir1=%d iir2=%d ", status1, status2);
+
+ if (status1 == 6 && status2 == 7) {
+ up->port.type = PORT_16750;
+ up->capabilities |= UART_CAP_AFE | UART_CAP_SLEEP;
+ return;
+ }
+
+ /*
+ * Try writing and reading the UART_IER_UUE bit (b6).
+ * If it works, this is probably one of the Xscale platform's
+ * internal UARTs.
+ * We're going to explicitly set the UUE bit to 0 before
+ * trying to write and read a 1 just to make sure it's not
+ * already a 1 and maybe locked there before we even start start.
+ */
+ iersave = serial_in(up, UART_IER);
+ serial_outp(up, UART_IER, iersave & ~UART_IER_UUE);
+ if (!(serial_in(up, UART_IER) & UART_IER_UUE)) {
+ /*
+ * OK it's in a known zero state, try writing and reading
+ * without disturbing the current state of the other bits.
+ */
+ serial_outp(up, UART_IER, iersave | UART_IER_UUE);
+ if (serial_in(up, UART_IER) & UART_IER_UUE) {
+ /*
+ * It's an Xscale.
+ * We'll leave the UART_IER_UUE bit set to 1 (enabled).
+ */
+ DEBUG_AUTOCONF("Xscale ");
+ up->port.type = PORT_XSCALE;
+ up->capabilities |= UART_CAP_UUE | UART_CAP_RTOIE;
+ return;
+ }
+ } else {
+ /*
+ * If we got here we couldn't force the IER_UUE bit to 0.
+ * Log it and continue.
+ */
+ DEBUG_AUTOCONF("Couldn't force IER_UUE to 0 ");
+ }
+ serial_outp(up, UART_IER, iersave);
+
+ /*
+ * We distinguish between 16550A and U6 16550A by counting
+ * how many bytes are in the FIFO.
+ */
+ if (up->port.type == PORT_16550A && size_fifo(up) == 64) {
+ up->port.type = PORT_U6_16550A;
+ up->capabilities |= UART_CAP_AFE;
+ }
+}
+
+/*
+ * This routine is called by rs_init() to initialize a specific serial
+ * port. It determines what type of UART chip this serial port is
+ * using: 8250, 16450, 16550, 16550A. The important question is
+ * whether or not this UART is a 16550A or not, since this will
+ * determine whether or not we can use its FIFO features or not.
+ */
+static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
+{
+ unsigned char status1, scratch, scratch2, scratch3;
+ unsigned char save_lcr, save_mcr;
+ unsigned long flags;
+
+ if (!up->port.iobase && !up->port.mapbase && !up->port.membase)
+ return;
+
+ DEBUG_AUTOCONF("ttyS%d: autoconf (0x%04lx, 0x%p): ",
+ serial_index(&up->port), up->port.iobase, up->port.membase);
+
+ /*
+ * We really do need global IRQs disabled here - we're going to
+ * be frobbing the chips IRQ enable register to see if it exists.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ up->capabilities = 0;
+ up->bugs = 0;
+
+ if (!(up->port.flags & UPF_BUGGY_UART)) {
+ /*
+ * Do a simple existence test first; if we fail this,
+ * there's no point trying anything else.
+ *
+ * 0x80 is used as a nonsense port to prevent against
+ * false positives due to ISA bus float. The
+ * assumption is that 0x80 is a non-existent port;
+ * which should be safe since include/asm/io.h also
+ * makes this assumption.
+ *
+ * Note: this is safe as long as MCR bit 4 is clear
+ * and the device is in "PC" mode.
+ */
+ scratch = serial_inp(up, UART_IER);
+ serial_outp(up, UART_IER, 0);
+#ifdef __i386__
+ outb(0xff, 0x080);
+#endif
+ /*
+ * Mask out IER[7:4] bits for test as some UARTs (e.g. TL
+ * 16C754B) allow only to modify them if an EFR bit is set.
+ */
+ scratch2 = serial_inp(up, UART_IER) & 0x0f;
+ serial_outp(up, UART_IER, 0x0F);
+#ifdef __i386__
+ outb(0, 0x080);
+#endif
+ scratch3 = serial_inp(up, UART_IER) & 0x0f;
+ serial_outp(up, UART_IER, scratch);
+ if (scratch2 != 0 || scratch3 != 0x0F) {
+ /*
+ * We failed; there's nothing here
+ */
+ DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
+ scratch2, scratch3);
+ goto out;
+ }
+ }
+
+ save_mcr = serial_in(up, UART_MCR);
+ save_lcr = serial_in(up, UART_LCR);
+
+ /*
+ * Check to see if a UART is really there. Certain broken
+ * internal modems based on the Rockwell chipset fail this
+ * test, because they apparently don't implement the loopback
+ * test mode. So this test is skipped on the COM 1 through
+ * COM 4 ports. This *should* be safe, since no board
+ * manufacturer would be stupid enough to design a board
+ * that conflicts with COM 1-4 --- we hope!
+ */
+ if (!(up->port.flags & UPF_SKIP_TEST)) {
+ serial_outp(up, UART_MCR, UART_MCR_LOOP | 0x0A);
+ status1 = serial_inp(up, UART_MSR) & 0xF0;
+ serial_outp(up, UART_MCR, save_mcr);
+ if (status1 != 0x90) {
+ DEBUG_AUTOCONF("LOOP test failed (%02x) ",
+ status1);
+ goto out;
+ }
+ }
+
+ /*
+ * We're pretty sure there's a port here. Lets find out what
+ * type of port it is. The IIR top two bits allows us to find
+ * out if it's 8250 or 16450, 16550, 16550A or later. This
+ * determines what we test for next.
+ *
+ * We also initialise the EFR (if any) to zero for later. The
+ * EFR occupies the same register location as the FCR and IIR.
+ */
+ serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_outp(up, UART_EFR, 0);
+ serial_outp(up, UART_LCR, 0);
+
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ scratch = serial_in(up, UART_IIR) >> 6;
+
+ DEBUG_AUTOCONF("iir=%d ", scratch);
+
+ switch (scratch) {
+ case 0:
+ autoconfig_8250(up);
+ break;
+ case 1:
+ up->port.type = PORT_UNKNOWN;
+ break;
+ case 2:
+ up->port.type = PORT_16550;
+ break;
+ case 3:
+ autoconfig_16550a(up);
+ break;
+ }
+
+#ifdef CONFIG_SERIAL_8250_RSA
+ /*
+ * Only probe for RSA ports if we got the region.
+ */
+ if (up->port.type == PORT_16550A && probeflags & PROBE_RSA) {
+ int i;
+
+ for (i = 0 ; i < probe_rsa_count; ++i) {
+ if (probe_rsa[i] == up->port.iobase &&
+ __enable_rsa(up)) {
+ up->port.type = PORT_RSA;
+ break;
+ }
+ }
+ }
+#endif
+
+ serial_outp(up, UART_LCR, save_lcr);
+
+ if (up->capabilities != uart_config[up->port.type].flags) {
+ printk(KERN_WARNING
+ "ttyS%d: detected caps %08x should be %08x\n",
+ serial_index(&up->port), up->capabilities,
+ uart_config[up->port.type].flags);
+ }
+
+ up->port.fifosize = uart_config[up->port.type].fifo_size;
+ up->capabilities = uart_config[up->port.type].flags;
+ up->tx_loadsz = uart_config[up->port.type].tx_loadsz;
+
+ if (up->port.type == PORT_UNKNOWN)
+ goto out;
+
+ /*
+ * Reset the UART.
+ */
+#ifdef CONFIG_SERIAL_8250_RSA
+ if (up->port.type == PORT_RSA)
+ serial_outp(up, UART_RSA_FRR, 0);
+#endif
+ serial_outp(up, UART_MCR, save_mcr);
+ serial8250_clear_fifos(up);
+ serial_in(up, UART_RX);
+ if (up->capabilities & UART_CAP_UUE)
+ serial_outp(up, UART_IER, UART_IER_UUE);
+ else
+ serial_outp(up, UART_IER, 0);
+
+ out:
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ DEBUG_AUTOCONF("type=%s\n", uart_config[up->port.type].name);
+}
+
+static void autoconfig_irq(struct uart_8250_port *up)
+{
+ unsigned char save_mcr, save_ier;
+ unsigned char save_ICP = 0;
+ unsigned int ICP = 0;
+ unsigned long irqs;
+ int irq;
+
+ if (up->port.flags & UPF_FOURPORT) {
+ ICP = (up->port.iobase & 0xfe0) | 0x1f;
+ save_ICP = inb_p(ICP);
+ outb_p(0x80, ICP);
+ (void) inb_p(ICP);
+ }
+
+ /* forget possible initially masked and pending IRQ */
+ probe_irq_off(probe_irq_on());
+ save_mcr = serial_inp(up, UART_MCR);
+ save_ier = serial_inp(up, UART_IER);
+ serial_outp(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2);
+
+ irqs = probe_irq_on();
+ serial_outp(up, UART_MCR, 0);
+ udelay(10);
+ if (up->port.flags & UPF_FOURPORT) {
+ serial_outp(up, UART_MCR,
+ UART_MCR_DTR | UART_MCR_RTS);
+ } else {
+ serial_outp(up, UART_MCR,
+ UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
+ }
+ serial_outp(up, UART_IER, 0x0f); /* enable all intrs */
+ (void)serial_inp(up, UART_LSR);
+ (void)serial_inp(up, UART_RX);
+ (void)serial_inp(up, UART_IIR);
+ (void)serial_inp(up, UART_MSR);
+ serial_outp(up, UART_TX, 0xFF);
+ udelay(20);
+ irq = probe_irq_off(irqs);
+
+ serial_outp(up, UART_MCR, save_mcr);
+ serial_outp(up, UART_IER, save_ier);
+
+ if (up->port.flags & UPF_FOURPORT)
+ outb_p(save_ICP, ICP);
+
+ up->port.irq = (irq > 0) ? irq : 0;
+}
+
+static inline void __stop_tx(struct uart_8250_port *p)
+{
+ if (p->ier & UART_IER_THRI) {
+ p->ier &= ~UART_IER_THRI;
+ serial_out(p, UART_IER, p->ier);
+ }
+}
+
+static void serial8250_stop_tx(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ __stop_tx(up);
+
+ /*
+ * We really want to stop the transmitter from sending.
+ */
+ if (up->port.type == PORT_16C950) {
+ up->acr |= UART_ACR_TXDIS;
+ serial_icr_write(up, UART_ACR, up->acr);
+ }
+}
+
+static void transmit_chars(struct uart_8250_port *up);
+
+static void serial8250_start_tx(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ if (!(up->ier & UART_IER_THRI)) {
+ up->ier |= UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+
+ if (up->bugs & UART_BUG_TXEN) {
+ unsigned char lsr;
+ lsr = serial_in(up, UART_LSR);
+ up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ if ((up->port.type == PORT_RM9000) ?
+ (lsr & UART_LSR_THRE) :
+ (lsr & UART_LSR_TEMT))
+ transmit_chars(up);
+ }
+ }
+
+ /*
+ * Re-enable the transmitter if we disabled it.
+ */
+ if (up->port.type == PORT_16C950 && up->acr & UART_ACR_TXDIS) {
+ up->acr &= ~UART_ACR_TXDIS;
+ serial_icr_write(up, UART_ACR, up->acr);
+ }
+}
+
+static void serial8250_stop_rx(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ up->ier &= ~UART_IER_RLSI;
+ up->port.read_status_mask &= ~UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+}
+
+static void serial8250_enable_ms(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ /* no MSR capabilities */
+ if (up->bugs & UART_BUG_NOMSR)
+ return;
+
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+}
+
+/*
+ * Clear the Tegra rx fifo after a break
+ *
+ * FIXME: This needs to become a port specific callback once we have a
+ * framework for this
+ */
+static void clear_rx_fifo(struct uart_8250_port *up)
+{
+ unsigned int status, tmout = 10000;
+ do {
+ status = serial_in(up, UART_LSR);
+ if (status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS))
+ status = serial_in(up, UART_RX);
+ else
+ break;
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ } while (1);
+}
+
+static void
+receive_chars(struct uart_8250_port *up, unsigned int *status)
+{
+ struct tty_struct *tty = up->port.state->port.tty;
+ unsigned char ch, lsr = *status;
+ int max_count = 256;
+ char flag;
+
+ do {
+ if (likely(lsr & UART_LSR_DR))
+ ch = serial_inp(up, UART_RX);
+ else
+ /*
+ * Intel 82571 has a Serial Over Lan device that will
+ * set UART_LSR_BI without setting UART_LSR_DR when
+ * it receives a break. To avoid reading from the
+ * receive buffer without UART_LSR_DR bit set, we
+ * just force the read character to be 0
+ */
+ ch = 0;
+
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+ lsr |= up->lsr_saved_flags;
+ up->lsr_saved_flags = 0;
+
+ if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
+ /*
+ * For statistics only
+ */
+ if (lsr & UART_LSR_BI) {
+ lsr &= ~(UART_LSR_FE | UART_LSR_PE);
+ up->port.icount.brk++;
+ /*
+ * If tegra port then clear the rx fifo to
+ * accept another break/character.
+ */
+ if (up->port.type == PORT_TEGRA)
+ clear_rx_fifo(up);
+
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(&up->port))
+ goto ignore_char;
+ } else if (lsr & UART_LSR_PE)
+ up->port.icount.parity++;
+ else if (lsr & UART_LSR_FE)
+ up->port.icount.frame++;
+ if (lsr & UART_LSR_OE)
+ up->port.icount.overrun++;
+
+ /*
+ * Mask off conditions which should be ignored.
+ */
+ lsr &= up->port.read_status_mask;
+
+ if (lsr & UART_LSR_BI) {
+ DEBUG_INTR("handling break....");
+ flag = TTY_BREAK;
+ } else if (lsr & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (lsr & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+ if (uart_handle_sysrq_char(&up->port, ch))
+ goto ignore_char;
+
+ uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag);
+
+ignore_char:
+ lsr = serial_inp(up, UART_LSR);
+ } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
+ spin_unlock(&up->port.lock);
+ tty_flip_buffer_push(tty);
+ spin_lock(&up->port.lock);
+ *status = lsr;
+}
+
+static void transmit_chars(struct uart_8250_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int count;
+
+ if (up->port.x_char) {
+ serial_outp(up, UART_TX, up->port.x_char);
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ return;
+ }
+ if (uart_tx_stopped(&up->port)) {
+ serial8250_stop_tx(&up->port);
+ return;
+ }
+ if (uart_circ_empty(xmit)) {
+ __stop_tx(up);
+ return;
+ }
+
+ count = up->tx_loadsz;
+ do {
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ DEBUG_INTR("THRE...");
+
+ if (uart_circ_empty(xmit))
+ __stop_tx(up);
+}
+
+static unsigned int check_modem_status(struct uart_8250_port *up)
+{
+ unsigned int status = serial_in(up, UART_MSR);
+
+ status |= up->msr_saved_flags;
+ up->msr_saved_flags = 0;
+ if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
+ up->port.state != NULL) {
+ if (status & UART_MSR_TERI)
+ up->port.icount.rng++;
+ if (status & UART_MSR_DDSR)
+ up->port.icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
+ if (status & UART_MSR_DCTS)
+ uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
+
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+ }
+
+ return status;
+}
+
+/*
+ * This handles the interrupt from one port.
+ */
+static void serial8250_handle_port(struct uart_8250_port *up)
+{
+ unsigned int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ status = serial_inp(up, UART_LSR);
+
+ DEBUG_INTR("status = %x...", status);
+
+ if (status & (UART_LSR_DR | UART_LSR_BI))
+ receive_chars(up, &status);
+ check_modem_status(up);
+ if (status & UART_LSR_THRE)
+ transmit_chars(up);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+/*
+ * This is the serial driver's interrupt routine.
+ *
+ * Arjan thinks the old way was overly complex, so it got simplified.
+ * Alan disagrees, saying that need the complexity to handle the weird
+ * nature of ISA shared interrupts. (This is a special exception.)
+ *
+ * In order to handle ISA shared interrupts properly, we need to check
+ * that all ports have been serviced, and therefore the ISA interrupt
+ * line has been de-asserted.
+ *
+ * This means we need to loop through all ports. checking that they
+ * don't have an interrupt pending.
+ */
+static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
+{
+ struct irq_info *i = dev_id;
+ struct list_head *l, *end = NULL;
+ int pass_counter = 0, handled = 0;
+
+ DEBUG_INTR("serial8250_interrupt(%d)...", irq);
+
+ spin_lock(&i->lock);
+
+ l = i->head;
+ do {
+ struct uart_8250_port *up;
+ unsigned int iir;
+
+ up = list_entry(l, struct uart_8250_port, list);
+
+ iir = serial_in(up, UART_IIR);
+ if (!(iir & UART_IIR_NO_INT)) {
+ serial8250_handle_port(up);
+
+ handled = 1;
+
+ end = NULL;
+ } else if ((up->port.iotype == UPIO_DWAPB ||
+ up->port.iotype == UPIO_DWAPB32) &&
+ (iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
+ /* The DesignWare APB UART has an Busy Detect (0x07)
+ * interrupt meaning an LCR write attempt occurred while the
+ * UART was busy. The interrupt must be cleared by reading
+ * the UART status register (USR) and the LCR re-written. */
+ unsigned int status;
+ status = *(volatile u32 *)up->port.private_data;
+ serial_out(up, UART_LCR, up->lcr);
+
+ handled = 1;
+
+ end = NULL;
+ } else if (end == NULL)
+ end = l;
+
+ l = l->next;
+
+ if (l == i->head && pass_counter++ > PASS_LIMIT) {
+ /* If we hit this, we're dead. */
+ printk_ratelimited(KERN_ERR
+ "serial8250: too much work for irq%d\n", irq);
+ break;
+ }
+ } while (l != end);
+
+ spin_unlock(&i->lock);
+
+ DEBUG_INTR("end.\n");
+
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ * To support ISA shared interrupts, we need to have one interrupt
+ * handler that ensures that the IRQ line has been deasserted
+ * before returning. Failing to do this will result in the IRQ
+ * line being stuck active, and, since ISA irqs are edge triggered,
+ * no more IRQs will be seen.
+ */
+static void serial_do_unlink(struct irq_info *i, struct uart_8250_port *up)
+{
+ spin_lock_irq(&i->lock);
+
+ if (!list_empty(i->head)) {
+ if (i->head == &up->list)
+ i->head = i->head->next;
+ list_del(&up->list);
+ } else {
+ BUG_ON(i->head != &up->list);
+ i->head = NULL;
+ }
+ spin_unlock_irq(&i->lock);
+ /* List empty so throw away the hash node */
+ if (i->head == NULL) {
+ hlist_del(&i->node);
+ kfree(i);
+ }
+}
+
+static int serial_link_irq_chain(struct uart_8250_port *up)
+{
+ struct hlist_head *h;
+ struct hlist_node *n;
+ struct irq_info *i;
+ int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
+
+ mutex_lock(&hash_mutex);
+
+ h = &irq_lists[up->port.irq % NR_IRQ_HASH];
+
+ hlist_for_each(n, h) {
+ i = hlist_entry(n, struct irq_info, node);
+ if (i->irq == up->port.irq)
+ break;
+ }
+
+ if (n == NULL) {
+ i = kzalloc(sizeof(struct irq_info), GFP_KERNEL);
+ if (i == NULL) {
+ mutex_unlock(&hash_mutex);
+ return -ENOMEM;
+ }
+ spin_lock_init(&i->lock);
+ i->irq = up->port.irq;
+ hlist_add_head(&i->node, h);
+ }
+ mutex_unlock(&hash_mutex);
+
+ spin_lock_irq(&i->lock);
+
+ if (i->head) {
+ list_add(&up->list, i->head);
+ spin_unlock_irq(&i->lock);
+
+ ret = 0;
+ } else {
+ INIT_LIST_HEAD(&up->list);
+ i->head = &up->list;
+ spin_unlock_irq(&i->lock);
+ irq_flags |= up->port.irqflags;
+ ret = request_irq(up->port.irq, serial8250_interrupt,
+ irq_flags, "serial", i);
+ if (ret < 0)
+ serial_do_unlink(i, up);
+ }
+
+ return ret;
+}
+
+static void serial_unlink_irq_chain(struct uart_8250_port *up)
+{
+ struct irq_info *i;
+ struct hlist_node *n;
+ struct hlist_head *h;
+
+ mutex_lock(&hash_mutex);
+
+ h = &irq_lists[up->port.irq % NR_IRQ_HASH];
+
+ hlist_for_each(n, h) {
+ i = hlist_entry(n, struct irq_info, node);
+ if (i->irq == up->port.irq)
+ break;
+ }
+
+ BUG_ON(n == NULL);
+ BUG_ON(i->head == NULL);
+
+ if (list_empty(i->head))
+ free_irq(up->port.irq, i);
+
+ serial_do_unlink(i, up);
+ mutex_unlock(&hash_mutex);
+}
+
+/*
+ * This function is used to handle ports that do not have an
+ * interrupt. This doesn't work very well for 16450's, but gives
+ * barely passable results for a 16550A. (Although at the expense
+ * of much CPU overhead).
+ */
+static void serial8250_timeout(unsigned long data)
+{
+ struct uart_8250_port *up = (struct uart_8250_port *)data;
+ unsigned int iir;
+
+ iir = serial_in(up, UART_IIR);
+ if (!(iir & UART_IIR_NO_INT))
+ serial8250_handle_port(up);
+ mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port));
+}
+
+static void serial8250_backup_timeout(unsigned long data)
+{
+ struct uart_8250_port *up = (struct uart_8250_port *)data;
+ unsigned int iir, ier = 0, lsr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * Must disable interrupts or else we risk racing with the interrupt
+ * based handler.
+ */
+ if (is_real_interrupt(up->port.irq)) {
+ ier = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, 0);
+ }
+
+ iir = serial_in(up, UART_IIR);
+
+ /*
+ * This should be a safe test for anyone who doesn't trust the
+ * IIR bits on their UART, but it's specifically designed for
+ * the "Diva" UART used on the management processor on many HP
+ * ia64 and parisc boxes.
+ */
+ lsr = serial_in(up, UART_LSR);
+ up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
+ (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&
+ (lsr & UART_LSR_THRE)) {
+ iir &= ~(UART_IIR_ID | UART_IIR_NO_INT);
+ iir |= UART_IIR_THRI;
+ }
+
+ if (!(iir & UART_IIR_NO_INT))
+ transmit_chars(up);
+
+ if (is_real_interrupt(up->port.irq))
+ serial_out(up, UART_IER, ier);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /* Standard timer interval plus 0.2s to keep the port running */
+ mod_timer(&up->timer,
+ jiffies + uart_poll_timeout(&up->port) + HZ / 5);
+}
+
+static unsigned int serial8250_tx_empty(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ unsigned long flags;
+ unsigned int lsr;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ lsr = serial_in(up, UART_LSR);
+ up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int serial8250_get_mctrl(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ unsigned int status;
+ unsigned int ret;
+
+ status = check_modem_status(up);
+
+ ret = 0;
+ if (status & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+ if (status & UART_MSR_RI)
+ ret |= TIOCM_RNG;
+ if (status & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
+ if (status & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ unsigned char mcr = 0;
+
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
+
+ serial_out(up, UART_MCR, mcr);
+}
+
+static void serial8250_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_out(up, UART_LCR, up->lcr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+{
+ unsigned int status, tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ for (;;) {
+ status = serial_in(up, UART_LSR);
+
+ up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
+
+ if ((status & bits) == bits)
+ break;
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ }
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ unsigned int tmout;
+ for (tmout = 1000000; tmout; tmout--) {
+ unsigned int msr = serial_in(up, UART_MSR);
+ up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
+ if (msr & UART_MSR_CTS)
+ break;
+ udelay(1);
+ touch_nmi_watchdog();
+ }
+ }
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+static int serial8250_get_poll_char(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ unsigned char lsr = serial_inp(up, UART_LSR);
+
+ if (!(lsr & UART_LSR_DR))
+ return NO_POLL_CHAR;
+
+ return serial_inp(up, UART_RX);
+}
+
+
+static void serial8250_put_poll_char(struct uart_port *port,
+ unsigned char c)
+{
+ unsigned int ier;
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = serial_in(up, UART_IER);
+ if (up->capabilities & UART_CAP_UUE)
+ serial_out(up, UART_IER, UART_IER_UUE);
+ else
+ serial_out(up, UART_IER, 0);
+
+ wait_for_xmitr(up, BOTH_EMPTY);
+ /*
+ * Send the character out.
+ * If a LF, also do CR...
+ */
+ serial_out(up, UART_TX, c);
+ if (c == 10) {
+ wait_for_xmitr(up, BOTH_EMPTY);
+ serial_out(up, UART_TX, 13);
+ }
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up, BOTH_EMPTY);
+ serial_out(up, UART_IER, ier);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
+static int serial8250_startup(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ unsigned long flags;
+ unsigned char lsr, iir;
+ int retval;
+
+ up->port.fifosize = uart_config[up->port.type].fifo_size;
+ up->tx_loadsz = uart_config[up->port.type].tx_loadsz;
+ up->capabilities = uart_config[up->port.type].flags;
+ up->mcr = 0;
+
+ if (up->port.iotype != up->cur_iotype)
+ set_io_from_upio(port);
+
+ if (up->port.type == PORT_16C950) {
+ /* Wake up and initialize UART */
+ up->acr = 0;
+ serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_outp(up, UART_EFR, UART_EFR_ECB);
+ serial_outp(up, UART_IER, 0);
+ serial_outp(up, UART_LCR, 0);
+ serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
+ serial_outp(up, UART_LCR, 0xBF);
+ serial_outp(up, UART_EFR, UART_EFR_ECB);
+ serial_outp(up, UART_LCR, 0);
+ }
+
+#ifdef CONFIG_SERIAL_8250_RSA
+ /*
+ * If this is an RSA port, see if we can kick it up to the
+ * higher speed clock.
+ */
+ enable_rsa(up);
+#endif
+
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in set_termios())
+ */
+ serial8250_clear_fifos(up);
+
+ /*
+ * Clear the interrupt registers.
+ */
+ (void) serial_inp(up, UART_LSR);
+ (void) serial_inp(up, UART_RX);
+ (void) serial_inp(up, UART_IIR);
+ (void) serial_inp(up, UART_MSR);
+
+ /*
+ * At this point, there's no way the LSR could still be 0xff;
+ * if it is, then bail out, because there's likely no UART
+ * here.
+ */
+ if (!(up->port.flags & UPF_BUGGY_UART) &&
+ (serial_inp(up, UART_LSR) == 0xff)) {
+ printk(KERN_INFO "ttyS%d: LSR safety check engaged!\n",
+ serial_index(&up->port));
+ return -ENODEV;
+ }
+
+ /*
+ * For a XR16C850, we need to set the trigger levels
+ */
+ if (up->port.type == PORT_16850) {
+ unsigned char fctr;
+
+ serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ fctr = serial_inp(up, UART_FCTR) & ~(UART_FCTR_RX|UART_FCTR_TX);
+ serial_outp(up, UART_FCTR, fctr | UART_FCTR_TRGD | UART_FCTR_RX);
+ serial_outp(up, UART_TRG, UART_TRG_96);
+ serial_outp(up, UART_FCTR, fctr | UART_FCTR_TRGD | UART_FCTR_TX);
+ serial_outp(up, UART_TRG, UART_TRG_96);
+
+ serial_outp(up, UART_LCR, 0);
+ }
+
+ if (is_real_interrupt(up->port.irq)) {
+ unsigned char iir1;
+ /*
+ * Test for UARTs that do not reassert THRE when the
+ * transmitter is idle and the interrupt has already
+ * been cleared. Real 16550s should always reassert
+ * this interrupt whenever the transmitter is idle and
+ * the interrupt is enabled. Delays are necessary to
+ * allow register changes to become visible.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (up->port.irqflags & IRQF_SHARED)
+ disable_irq_nosync(up->port.irq);
+
+ wait_for_xmitr(up, UART_LSR_THRE);
+ serial_out_sync(up, UART_IER, UART_IER_THRI);
+ udelay(1); /* allow THRE to set */
+ iir1 = serial_in(up, UART_IIR);
+ serial_out(up, UART_IER, 0);
+ serial_out_sync(up, UART_IER, UART_IER_THRI);
+ udelay(1); /* allow a working UART time to re-assert THRE */
+ iir = serial_in(up, UART_IIR);
+ serial_out(up, UART_IER, 0);
+
+ if (up->port.irqflags & IRQF_SHARED)
+ enable_irq(up->port.irq);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /*
+ * If the interrupt is not reasserted, setup a timer to
+ * kick the UART on a regular basis.
+ */
+ if (!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) {
+ up->bugs |= UART_BUG_THRE;
+ pr_debug("ttyS%d - using backup timer\n",
+ serial_index(port));
+ }
+ }
+
+ /*
+ * The above check will only give an accurate result the first time
+ * the port is opened so this value needs to be preserved.
+ */
+ if (up->bugs & UART_BUG_THRE) {
+ up->timer.function = serial8250_backup_timeout;
+ up->timer.data = (unsigned long)up;
+ mod_timer(&up->timer, jiffies +
+ uart_poll_timeout(port) + HZ / 5);
+ }
+
+ /*
+ * If the "interrupt" for this port doesn't correspond with any
+ * hardware interrupt, we use a timer-based system. The original
+ * driver used to do this with IRQ0.
+ */
+ if (!is_real_interrupt(up->port.irq)) {
+ up->timer.data = (unsigned long)up;
+ mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
+ } else {
+ retval = serial_link_irq_chain(up);
+ if (retval)
+ return retval;
+ }
+
+ /*
+ * Now, initialize the UART
+ */
+ serial_outp(up, UART_LCR, UART_LCR_WLEN8);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (up->port.flags & UPF_FOURPORT) {
+ if (!is_real_interrupt(up->port.irq))
+ up->port.mctrl |= TIOCM_OUT1;
+ } else
+ /*
+ * Most PC uarts need OUT2 raised to enable interrupts.
+ */
+ if (is_real_interrupt(up->port.irq))
+ up->port.mctrl |= TIOCM_OUT2;
+
+ serial8250_set_mctrl(&up->port, up->port.mctrl);
+
+ /* Serial over Lan (SoL) hack:
+ Intel 8257x Gigabit ethernet chips have a
+ 16550 emulation, to be used for Serial Over Lan.
+ Those chips take a longer time than a normal
+ serial device to signalize that a transmission
+ data was queued. Due to that, the above test generally
+ fails. One solution would be to delay the reading of
+ iir. However, this is not reliable, since the timeout
+ is variable. So, let's just don't test if we receive
+ TX irq. This way, we'll never enable UART_BUG_TXEN.
+ */
+ if (skip_txen_test || up->port.flags & UPF_NO_TXEN_TEST)
+ goto dont_test_tx_en;
+
+ /*
+ * Do a quick test to see if we receive an
+ * interrupt when we enable the TX irq.
+ */
+ serial_outp(up, UART_IER, UART_IER_THRI);
+ lsr = serial_in(up, UART_LSR);
+ iir = serial_in(up, UART_IIR);
+ serial_outp(up, UART_IER, 0);
+
+ if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
+ if (!(up->bugs & UART_BUG_TXEN)) {
+ up->bugs |= UART_BUG_TXEN;
+ pr_debug("ttyS%d - enabling bad tx status workarounds\n",
+ serial_index(port));
+ }
+ } else {
+ up->bugs &= ~UART_BUG_TXEN;
+ }
+
+dont_test_tx_en:
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /*
+ * Clear the interrupt registers again for luck, and clear the
+ * saved flags to avoid getting false values from polling
+ * routines or the previous session.
+ */
+ serial_inp(up, UART_LSR);
+ serial_inp(up, UART_RX);
+ serial_inp(up, UART_IIR);
+ serial_inp(up, UART_MSR);
+ up->lsr_saved_flags = 0;
+ up->msr_saved_flags = 0;
+
+ /*
+ * Finally, enable interrupts. Note: Modem status interrupts
+ * are set via set_termios(), which will be occurring imminently
+ * anyway, so we don't enable them here.
+ */
+ up->ier = UART_IER_RLSI | UART_IER_RDI;
+ serial_outp(up, UART_IER, up->ier);
+
+ if (up->port.flags & UPF_FOURPORT) {
+ unsigned int icp;
+ /*
+ * Enable interrupts on the AST Fourport board
+ */
+ icp = (up->port.iobase & 0xfe0) | 0x01f;
+ outb_p(0x80, icp);
+ (void) inb_p(icp);
+ }
+
+ return 0;
+}
+
+static void serial8250_shutdown(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ unsigned long flags;
+
+ /*
+ * Disable interrupts from this port
+ */
+ up->ier = 0;
+ serial_outp(up, UART_IER, 0);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (up->port.flags & UPF_FOURPORT) {
+ /* reset interrupts on the AST Fourport board */
+ inb((up->port.iobase & 0xfe0) | 0x1f);
+ up->port.mctrl |= TIOCM_OUT1;
+ } else
+ up->port.mctrl &= ~TIOCM_OUT2;
+
+ serial8250_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /*
+ * Disable break condition and FIFOs
+ */
+ serial_out(up, UART_LCR, serial_inp(up, UART_LCR) & ~UART_LCR_SBC);
+ serial8250_clear_fifos(up);
+
+#ifdef CONFIG_SERIAL_8250_RSA
+ /*
+ * Reset the RSA board back to 115kbps compat mode.
+ */
+ disable_rsa(up);
+#endif
+
+ /*
+ * Read data port to reset things, and then unlink from
+ * the IRQ chain.
+ */
+ (void) serial_in(up, UART_RX);
+
+ del_timer_sync(&up->timer);
+ up->timer.function = serial8250_timeout;
+ if (is_real_interrupt(up->port.irq))
+ serial_unlink_irq_chain(up);
+}
+
+static unsigned int serial8250_get_divisor(struct uart_port *port, unsigned int baud)
+{
+ unsigned int quot;
+
+ /*
+ * Handle magic divisors for baud rates above baud_base on
+ * SMSC SuperIO chips.
+ */
+ if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
+ baud == (port->uartclk/4))
+ quot = 0x8001;
+ else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
+ baud == (port->uartclk/8))
+ quot = 0x8002;
+ else
+ quot = uart_get_divisor(port, baud);
+
+ return quot;
+}
+
+void
+serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ unsigned char cval, fcr = 0;
+ unsigned long flags;
+ unsigned int baud, quot;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ cval = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ cval = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ cval |= UART_LCR_STOP;
+ if (termios->c_cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(termios->c_cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+#ifdef CMSPAR
+ if (termios->c_cflag & CMSPAR)
+ cval |= UART_LCR_SPAR;
+#endif
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old,
+ port->uartclk / 16 / 0xffff,
+ port->uartclk / 16);
+ quot = serial8250_get_divisor(port, baud);
+
+ /*
+ * Oxford Semi 952 rev B workaround
+ */
+ if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0)
+ quot++;
+
+ if (up->capabilities & UART_CAP_FIFO && up->port.fifosize > 1) {
+ if (baud < 2400)
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
+ else
+ fcr = uart_config[up->port.type].fcr;
+ }
+
+ /*
+ * MCR-based auto flow control. When AFE is enabled, RTS will be
+ * deasserted when the receive FIFO contains more characters than
+ * the trigger, or the MCR RTS bit is cleared. In the case where
+ * the remote UART is not using CTS auto flow control, we must
+ * have sufficient FIFO entries for the latency of the remote
+ * UART to respond. IOW, at least 32 bytes of FIFO.
+ */
+ if (up->capabilities & UART_CAP_AFE && up->port.fifosize >= 32) {
+ up->mcr &= ~UART_MCR_AFE;
+ if (termios->c_cflag & CRTSCTS)
+ up->mcr |= UART_MCR_AFE;
+ }
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characteres to ignore
+ */
+ up->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ up->port.ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * CTS flow control flag and modem status interrupts
+ */
+ up->ier &= ~UART_IER_MSI;
+ if (!(up->bugs & UART_BUG_NOMSR) &&
+ UART_ENABLE_MS(&up->port, termios->c_cflag))
+ up->ier |= UART_IER_MSI;
+ if (up->capabilities & UART_CAP_UUE)
+ up->ier |= UART_IER_UUE;
+ if (up->capabilities & UART_CAP_RTOIE)
+ up->ier |= UART_IER_RTOIE;
+
+ serial_out(up, UART_IER, up->ier);
+
+ if (up->capabilities & UART_CAP_EFR) {
+ unsigned char efr = 0;
+ /*
+ * TI16C752/Startech hardware flow control. FIXME:
+ * - TI16C752 requires control thresholds to be set.
+ * - UART_MCR_RTS is ineffective if auto-RTS mode is enabled.
+ */
+ if (termios->c_cflag & CRTSCTS)
+ efr |= UART_EFR_CTS;
+
+ serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_outp(up, UART_EFR, efr);
+ }
+
+#ifdef CONFIG_ARCH_OMAP
+ /* Workaround to enable 115200 baud on OMAP1510 internal ports */
+ if (cpu_is_omap1510() && is_omap_port(up)) {
+ if (baud == 115200) {
+ quot = 1;
+ serial_out(up, UART_OMAP_OSC_12M_SEL, 1);
+ } else
+ serial_out(up, UART_OMAP_OSC_12M_SEL, 0);
+ }
+#endif
+
+ if (up->capabilities & UART_NATSEMI) {
+ /* Switch to bank 2 not bank 1, to avoid resetting EXCR2 */
+ serial_outp(up, UART_LCR, 0xe0);
+ } else {
+ serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
+ }
+
+ serial_dl_write(up, quot);
+
+ /*
+ * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
+ * is written without DLAB set, this mode will be disabled.
+ */
+ if (up->port.type == PORT_16750)
+ serial_outp(up, UART_FCR, fcr);
+
+ serial_outp(up, UART_LCR, cval); /* reset DLAB */
+ up->lcr = cval; /* Save LCR */
+ if (up->port.type != PORT_16750) {
+ if (fcr & UART_FCR_ENABLE_FIFO) {
+ /* emulated UARTs (Lucent Venus 167x) need two steps */
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ }
+ serial_outp(up, UART_FCR, fcr); /* set fcr */
+ }
+ serial8250_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ /* Don't rewrite B0 */
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+}
+EXPORT_SYMBOL(serial8250_do_set_termios);
+
+static void
+serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ if (port->set_termios)
+ port->set_termios(port, termios, old);
+ else
+ serial8250_do_set_termios(port, termios, old);
+}
+
+static void
+serial8250_set_ldisc(struct uart_port *port, int new)
+{
+ if (new == N_PPS) {
+ port->flags |= UPF_HARDPPS_CD;
+ serial8250_enable_ms(port);
+ } else
+ port->flags &= ~UPF_HARDPPS_CD;
+}
+
+
+void serial8250_do_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct uart_8250_port *p =
+ container_of(port, struct uart_8250_port, port);
+
+ serial8250_set_sleep(p, state != 0);
+}
+EXPORT_SYMBOL(serial8250_do_pm);
+
+static void
+serial8250_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ if (port->pm)
+ port->pm(port, state, oldstate);
+ else
+ serial8250_do_pm(port, state, oldstate);
+}
+
+static unsigned int serial8250_port_size(struct uart_8250_port *pt)
+{
+ if (pt->port.iotype == UPIO_AU)
+ return 0x1000;
+#ifdef CONFIG_ARCH_OMAP
+ if (is_omap_port(pt))
+ return 0x16 << pt->port.regshift;
+#endif
+ return 8 << pt->port.regshift;
+}
+
+/*
+ * Resource handling.
+ */
+static int serial8250_request_std_resource(struct uart_8250_port *up)
+{
+ unsigned int size = serial8250_port_size(up);
+ int ret = 0;
+
+ switch (up->port.iotype) {
+ case UPIO_AU:
+ case UPIO_TSI:
+ case UPIO_MEM32:
+ case UPIO_MEM:
+ case UPIO_DWAPB:
+ case UPIO_DWAPB32:
+ if (!up->port.mapbase)
+ break;
+
+ if (!request_mem_region(up->port.mapbase, size, "serial")) {
+ ret = -EBUSY;
+ break;
+ }
+
+ if (up->port.flags & UPF_IOREMAP) {
+ up->port.membase = ioremap_nocache(up->port.mapbase,
+ size);
+ if (!up->port.membase) {
+ release_mem_region(up->port.mapbase, size);
+ ret = -ENOMEM;
+ }
+ }
+ break;
+
+ case UPIO_HUB6:
+ case UPIO_PORT:
+ if (!request_region(up->port.iobase, size, "serial"))
+ ret = -EBUSY;
+ break;
+ }
+ return ret;
+}
+
+static void serial8250_release_std_resource(struct uart_8250_port *up)
+{
+ unsigned int size = serial8250_port_size(up);
+
+ switch (up->port.iotype) {
+ case UPIO_AU:
+ case UPIO_TSI:
+ case UPIO_MEM32:
+ case UPIO_MEM:
+ case UPIO_DWAPB:
+ case UPIO_DWAPB32:
+ if (!up->port.mapbase)
+ break;
+
+ if (up->port.flags & UPF_IOREMAP) {
+ iounmap(up->port.membase);
+ up->port.membase = NULL;
+ }
+
+ release_mem_region(up->port.mapbase, size);
+ break;
+
+ case UPIO_HUB6:
+ case UPIO_PORT:
+ release_region(up->port.iobase, size);
+ break;
+ }
+}
+
+static int serial8250_request_rsa_resource(struct uart_8250_port *up)
+{
+ unsigned long start = UART_RSA_BASE << up->port.regshift;
+ unsigned int size = 8 << up->port.regshift;
+ int ret = -EINVAL;
+
+ switch (up->port.iotype) {
+ case UPIO_HUB6:
+ case UPIO_PORT:
+ start += up->port.iobase;
+ if (request_region(start, size, "serial-rsa"))
+ ret = 0;
+ else
+ ret = -EBUSY;
+ break;
+ }
+
+ return ret;
+}
+
+static void serial8250_release_rsa_resource(struct uart_8250_port *up)
+{
+ unsigned long offset = UART_RSA_BASE << up->port.regshift;
+ unsigned int size = 8 << up->port.regshift;
+
+ switch (up->port.iotype) {
+ case UPIO_HUB6:
+ case UPIO_PORT:
+ release_region(up->port.iobase + offset, size);
+ break;
+ }
+}
+
+static void serial8250_release_port(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ serial8250_release_std_resource(up);
+ if (up->port.type == PORT_RSA)
+ serial8250_release_rsa_resource(up);
+}
+
+static int serial8250_request_port(struct uart_port *port)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ int ret = 0;
+
+ ret = serial8250_request_std_resource(up);
+ if (ret == 0 && up->port.type == PORT_RSA) {
+ ret = serial8250_request_rsa_resource(up);
+ if (ret < 0)
+ serial8250_release_std_resource(up);
+ }
+
+ return ret;
+}
+
+static void serial8250_config_port(struct uart_port *port, int flags)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+ int probeflags = PROBE_ANY;
+ int ret;
+
+ /*
+ * Find the region that we can probe for. This in turn
+ * tells us whether we can probe for the type of port.
+ */
+ ret = serial8250_request_std_resource(up);
+ if (ret < 0)
+ return;
+
+ ret = serial8250_request_rsa_resource(up);
+ if (ret < 0)
+ probeflags &= ~PROBE_RSA;
+
+ if (up->port.iotype != up->cur_iotype)
+ set_io_from_upio(port);
+
+ if (flags & UART_CONFIG_TYPE)
+ autoconfig(up, probeflags);
+
+ /* if access method is AU, it is a 16550 with a quirk */
+ if (up->port.type == PORT_16550A && up->port.iotype == UPIO_AU)
+ up->bugs |= UART_BUG_NOMSR;
+
+ if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
+ autoconfig_irq(up);
+
+ if (up->port.type != PORT_RSA && probeflags & PROBE_RSA)
+ serial8250_release_rsa_resource(up);
+ if (up->port.type == PORT_UNKNOWN)
+ serial8250_release_std_resource(up);
+}
+
+static int
+serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if (ser->irq >= nr_irqs || ser->irq < 0 ||
+ ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
+ ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
+ ser->type == PORT_STARTECH)
+ return -EINVAL;
+ return 0;
+}
+
+static const char *
+serial8250_type(struct uart_port *port)
+{
+ int type = port->type;
+
+ if (type >= ARRAY_SIZE(uart_config))
+ type = 0;
+ return uart_config[type].name;
+}
+
+static struct uart_ops serial8250_pops = {
+ .tx_empty = serial8250_tx_empty,
+ .set_mctrl = serial8250_set_mctrl,
+ .get_mctrl = serial8250_get_mctrl,
+ .stop_tx = serial8250_stop_tx,
+ .start_tx = serial8250_start_tx,
+ .stop_rx = serial8250_stop_rx,
+ .enable_ms = serial8250_enable_ms,
+ .break_ctl = serial8250_break_ctl,
+ .startup = serial8250_startup,
+ .shutdown = serial8250_shutdown,
+ .set_termios = serial8250_set_termios,
+ .set_ldisc = serial8250_set_ldisc,
+ .pm = serial8250_pm,
+ .type = serial8250_type,
+ .release_port = serial8250_release_port,
+ .request_port = serial8250_request_port,
+ .config_port = serial8250_config_port,
+ .verify_port = serial8250_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = serial8250_get_poll_char,
+ .poll_put_char = serial8250_put_poll_char,
+#endif
+};
+
+static struct uart_8250_port serial8250_ports[UART_NR];
+
+static void (*serial8250_isa_config)(int port, struct uart_port *up,
+ unsigned short *capabilities);
+
+void serial8250_set_isa_configurator(
+ void (*v)(int port, struct uart_port *up, unsigned short *capabilities))
+{
+ serial8250_isa_config = v;
+}
+EXPORT_SYMBOL(serial8250_set_isa_configurator);
+
+static void __init serial8250_isa_init_ports(void)
+{
+ struct uart_8250_port *up;
+ static int first = 1;
+ int i, irqflag = 0;
+
+ if (!first)
+ return;
+ first = 0;
+
+ for (i = 0; i < nr_uarts; i++) {
+ struct uart_8250_port *up = &serial8250_ports[i];
+
+ up->port.line = i;
+ spin_lock_init(&up->port.lock);
+
+ init_timer(&up->timer);
+ up->timer.function = serial8250_timeout;
+
+ /*
+ * ALPHA_KLUDGE_MCR needs to be killed.
+ */
+ up->mcr_mask = ~ALPHA_KLUDGE_MCR;
+ up->mcr_force = ALPHA_KLUDGE_MCR;
+
+ up->port.ops = &serial8250_pops;
+ }
+
+ if (share_irqs)
+ irqflag = IRQF_SHARED;
+
+ for (i = 0, up = serial8250_ports;
+ i < ARRAY_SIZE(old_serial_port) && i < nr_uarts;
+ i++, up++) {
+ up->port.iobase = old_serial_port[i].port;
+ up->port.irq = irq_canonicalize(old_serial_port[i].irq);
+ up->port.irqflags = old_serial_port[i].irqflags;
+ up->port.uartclk = old_serial_port[i].baud_base * 16;
+ up->port.flags = old_serial_port[i].flags;
+ up->port.hub6 = old_serial_port[i].hub6;
+ up->port.membase = old_serial_port[i].iomem_base;
+ up->port.iotype = old_serial_port[i].io_type;
+ up->port.regshift = old_serial_port[i].iomem_reg_shift;
+ set_io_from_upio(&up->port);
+ up->port.irqflags |= irqflag;
+ if (serial8250_isa_config != NULL)
+ serial8250_isa_config(i, &up->port, &up->capabilities);
+
+ }
+}
+
+static void
+serial8250_init_fixed_type_port(struct uart_8250_port *up, unsigned int type)
+{
+ up->port.type = type;
+ up->port.fifosize = uart_config[type].fifo_size;
+ up->capabilities = uart_config[type].flags;
+ up->tx_loadsz = uart_config[type].tx_loadsz;
+}
+
+static void __init
+serial8250_register_ports(struct uart_driver *drv, struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < nr_uarts; i++) {
+ struct uart_8250_port *up = &serial8250_ports[i];
+ up->cur_iotype = 0xFF;
+ }
+
+ serial8250_isa_init_ports();
+
+ for (i = 0; i < nr_uarts; i++) {
+ struct uart_8250_port *up = &serial8250_ports[i];
+
+ up->port.dev = dev;
+
+ if (up->port.flags & UPF_FIXED_TYPE)
+ serial8250_init_fixed_type_port(up, up->port.type);
+
+ uart_add_one_port(drv, &up->port);
+ }
+}
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+
+static void serial8250_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ wait_for_xmitr(up, UART_LSR_THRE);
+ serial_out(up, UART_TX, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void
+serial8250_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_8250_port *up = &serial8250_ports[co->index];
+ unsigned long flags;
+ unsigned int ier;
+ int locked = 1;
+
+ touch_nmi_watchdog();
+
+ local_irq_save(flags);
+ if (up->port.sysrq) {
+ /* serial8250_handle_port() already took the lock */
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&up->port.lock);
+ } else
+ spin_lock(&up->port.lock);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = serial_in(up, UART_IER);
+
+ if (up->capabilities & UART_CAP_UUE)
+ serial_out(up, UART_IER, UART_IER_UUE);
+ else
+ serial_out(up, UART_IER, 0);
+
+ uart_console_write(&up->port, s, count, serial8250_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up, BOTH_EMPTY);
+ serial_out(up, UART_IER, ier);
+
+ /*
+ * The receive handling will happen properly because the
+ * receive ready bit will still be set; it is not cleared
+ * on read. However, modem control will not, we must
+ * call it if we have saved something in the saved flags
+ * while processing with interrupts off.
+ */
+ if (up->msr_saved_flags)
+ check_modem_status(up);
+
+ if (locked)
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
+}
+
+static int __init serial8250_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index >= nr_uarts)
+ co->index = 0;
+ port = &serial8250_ports[co->index].port;
+ if (!port->iobase && !port->membase)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static int serial8250_console_early_setup(void)
+{
+ return serial8250_find_port_for_earlycon();
+}
+
+static struct console serial8250_console = {
+ .name = "ttyS",
+ .write = serial8250_console_write,
+ .device = uart_console_device,
+ .setup = serial8250_console_setup,
+ .early_setup = serial8250_console_early_setup,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME,
+ .index = -1,
+ .data = &serial8250_reg,
+};
+
+static int __init serial8250_console_init(void)
+{
+ if (nr_uarts > UART_NR)
+ nr_uarts = UART_NR;
+
+ serial8250_isa_init_ports();
+ register_console(&serial8250_console);
+ return 0;
+}
+console_initcall(serial8250_console_init);
+
+int serial8250_find_port(struct uart_port *p)
+{
+ int line;
+ struct uart_port *port;
+
+ for (line = 0; line < nr_uarts; line++) {
+ port = &serial8250_ports[line].port;
+ if (uart_match_port(p, port))
+ return line;
+ }
+ return -ENODEV;
+}
+
+#define SERIAL8250_CONSOLE &serial8250_console
+#else
+#define SERIAL8250_CONSOLE NULL
+#endif
+
+static struct uart_driver serial8250_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "serial",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .cons = SERIAL8250_CONSOLE,
+};
+
+/*
+ * early_serial_setup - early registration for 8250 ports
+ *
+ * Setup an 8250 port structure prior to console initialisation. Use
+ * after console initialisation will cause undefined behaviour.
+ */
+int __init early_serial_setup(struct uart_port *port)
+{
+ struct uart_port *p;
+
+ if (port->line >= ARRAY_SIZE(serial8250_ports))
+ return -ENODEV;
+
+ serial8250_isa_init_ports();
+ p = &serial8250_ports[port->line].port;
+ p->iobase = port->iobase;
+ p->membase = port->membase;
+ p->irq = port->irq;
+ p->irqflags = port->irqflags;
+ p->uartclk = port->uartclk;
+ p->fifosize = port->fifosize;
+ p->regshift = port->regshift;
+ p->iotype = port->iotype;
+ p->flags = port->flags;
+ p->mapbase = port->mapbase;
+ p->private_data = port->private_data;
+ p->type = port->type;
+ p->line = port->line;
+
+ set_io_from_upio(p);
+ if (port->serial_in)
+ p->serial_in = port->serial_in;
+ if (port->serial_out)
+ p->serial_out = port->serial_out;
+
+ return 0;
+}
+
+/**
+ * serial8250_suspend_port - suspend one serial port
+ * @line: serial line number
+ *
+ * Suspend one serial port.
+ */
+void serial8250_suspend_port(int line)
+{
+ uart_suspend_port(&serial8250_reg, &serial8250_ports[line].port);
+}
+
+/**
+ * serial8250_resume_port - resume one serial port
+ * @line: serial line number
+ *
+ * Resume one serial port.
+ */
+void serial8250_resume_port(int line)
+{
+ struct uart_8250_port *up = &serial8250_ports[line];
+
+ if (up->capabilities & UART_NATSEMI) {
+ /* Ensure it's still in high speed mode */
+ serial_outp(up, UART_LCR, 0xE0);
+
+ ns16550a_goto_highspeed(up);
+
+ serial_outp(up, UART_LCR, 0);
+ up->port.uartclk = 921600*16;
+ }
+ uart_resume_port(&serial8250_reg, &up->port);
+}
+
+/*
+ * Register a set of serial devices attached to a platform device. The
+ * list is terminated with a zero flags entry, which means we expect
+ * all entries to have at least UPF_BOOT_AUTOCONF set.
+ */
+static int __devinit serial8250_probe(struct platform_device *dev)
+{
+ struct plat_serial8250_port *p = dev->dev.platform_data;
+ struct uart_port port;
+ int ret, i, irqflag = 0;
+
+ memset(&port, 0, sizeof(struct uart_port));
+
+ if (share_irqs)
+ irqflag = IRQF_SHARED;
+
+ for (i = 0; p && p->flags != 0; p++, i++) {
+ port.iobase = p->iobase;
+ port.membase = p->membase;
+ port.irq = p->irq;
+ port.irqflags = p->irqflags;
+ port.uartclk = p->uartclk;
+ port.regshift = p->regshift;
+ port.iotype = p->iotype;
+ port.flags = p->flags;
+ port.mapbase = p->mapbase;
+ port.hub6 = p->hub6;
+ port.private_data = p->private_data;
+ port.type = p->type;
+ port.serial_in = p->serial_in;
+ port.serial_out = p->serial_out;
+ port.set_termios = p->set_termios;
+ port.pm = p->pm;
+ port.dev = &dev->dev;
+ port.irqflags |= irqflag;
+ ret = serial8250_register_port(&port);
+ if (ret < 0) {
+ dev_err(&dev->dev, "unable to register port at index %d "
+ "(IO%lx MEM%llx IRQ%d): %d\n", i,
+ p->iobase, (unsigned long long)p->mapbase,
+ p->irq, ret);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Remove serial ports registered against a platform device.
+ */
+static int __devexit serial8250_remove(struct platform_device *dev)
+{
+ int i;
+
+ for (i = 0; i < nr_uarts; i++) {
+ struct uart_8250_port *up = &serial8250_ports[i];
+
+ if (up->port.dev == &dev->dev)
+ serial8250_unregister_port(i);
+ }
+ return 0;
+}
+
+static int serial8250_suspend(struct platform_device *dev, pm_message_t state)
+{
+ int i;
+
+ for (i = 0; i < UART_NR; i++) {
+ struct uart_8250_port *up = &serial8250_ports[i];
+
+ if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
+ uart_suspend_port(&serial8250_reg, &up->port);
+ }
+
+ return 0;
+}
+
+static int serial8250_resume(struct platform_device *dev)
+{
+ int i;
+
+ for (i = 0; i < UART_NR; i++) {
+ struct uart_8250_port *up = &serial8250_ports[i];
+
+ if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
+ serial8250_resume_port(i);
+ }
+
+ return 0;
+}
+
+static struct platform_driver serial8250_isa_driver = {
+ .probe = serial8250_probe,
+ .remove = __devexit_p(serial8250_remove),
+ .suspend = serial8250_suspend,
+ .resume = serial8250_resume,
+ .driver = {
+ .name = "serial8250",
+ .owner = THIS_MODULE,
+ },
+};
+
+/*
+ * This "device" covers _all_ ISA 8250-compatible serial devices listed
+ * in the table in include/asm/serial.h
+ */
+static struct platform_device *serial8250_isa_devs;
+
+/*
+ * serial8250_register_port and serial8250_unregister_port allows for
+ * 16x50 serial ports to be configured at run-time, to support PCMCIA
+ * modems and PCI multiport cards.
+ */
+static DEFINE_MUTEX(serial_mutex);
+
+static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *port)
+{
+ int i;
+
+ /*
+ * First, find a port entry which matches.
+ */
+ for (i = 0; i < nr_uarts; i++)
+ if (uart_match_port(&serial8250_ports[i].port, port))
+ return &serial8250_ports[i];
+
+ /*
+ * We didn't find a matching entry, so look for the first
+ * free entry. We look for one which hasn't been previously
+ * used (indicated by zero iobase).
+ */
+ for (i = 0; i < nr_uarts; i++)
+ if (serial8250_ports[i].port.type == PORT_UNKNOWN &&
+ serial8250_ports[i].port.iobase == 0)
+ return &serial8250_ports[i];
+
+ /*
+ * That also failed. Last resort is to find any entry which
+ * doesn't have a real port associated with it.
+ */
+ for (i = 0; i < nr_uarts; i++)
+ if (serial8250_ports[i].port.type == PORT_UNKNOWN)
+ return &serial8250_ports[i];
+
+ return NULL;
+}
+
+/**
+ * serial8250_register_port - register a serial port
+ * @port: serial port template
+ *
+ * Configure the serial port specified by the request. If the
+ * port exists and is in use, it is hung up and unregistered
+ * first.
+ *
+ * The port is then probed and if necessary the IRQ is autodetected
+ * If this fails an error is returned.
+ *
+ * On success the port is ready to use and the line number is returned.
+ */
+int serial8250_register_port(struct uart_port *port)
+{
+ struct uart_8250_port *uart;
+ int ret = -ENOSPC;
+
+ if (port->uartclk == 0)
+ return -EINVAL;
+
+ mutex_lock(&serial_mutex);
+
+ uart = serial8250_find_match_or_unused(port);
+ if (uart) {
+ uart_remove_one_port(&serial8250_reg, &uart->port);
+
+ uart->port.iobase = port->iobase;
+ uart->port.membase = port->membase;
+ uart->port.irq = port->irq;
+ uart->port.irqflags = port->irqflags;
+ uart->port.uartclk = port->uartclk;
+ uart->port.fifosize = port->fifosize;
+ uart->port.regshift = port->regshift;
+ uart->port.iotype = port->iotype;
+ uart->port.flags = port->flags | UPF_BOOT_AUTOCONF;
+ uart->port.mapbase = port->mapbase;
+ uart->port.private_data = port->private_data;
+ if (port->dev)
+ uart->port.dev = port->dev;
+
+ if (port->flags & UPF_FIXED_TYPE)
+ serial8250_init_fixed_type_port(uart, port->type);
+
+ set_io_from_upio(&uart->port);
+ /* Possibly override default I/O functions. */
+ if (port->serial_in)
+ uart->port.serial_in = port->serial_in;
+ if (port->serial_out)
+ uart->port.serial_out = port->serial_out;
+ /* Possibly override set_termios call */
+ if (port->set_termios)
+ uart->port.set_termios = port->set_termios;
+ if (port->pm)
+ uart->port.pm = port->pm;
+
+ if (serial8250_isa_config != NULL)
+ serial8250_isa_config(0, &uart->port,
+ &uart->capabilities);
+
+ ret = uart_add_one_port(&serial8250_reg, &uart->port);
+ if (ret == 0)
+ ret = uart->port.line;
+ }
+ mutex_unlock(&serial_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(serial8250_register_port);
+
+/**
+ * serial8250_unregister_port - remove a 16x50 serial port at runtime
+ * @line: serial line number
+ *
+ * Remove one serial port. This may not be called from interrupt
+ * context. We hand the port back to the our control.
+ */
+void serial8250_unregister_port(int line)
+{
+ struct uart_8250_port *uart = &serial8250_ports[line];
+
+ mutex_lock(&serial_mutex);
+ uart_remove_one_port(&serial8250_reg, &uart->port);
+ if (serial8250_isa_devs) {
+ uart->port.flags &= ~UPF_BOOT_AUTOCONF;
+ uart->port.type = PORT_UNKNOWN;
+ uart->port.dev = &serial8250_isa_devs->dev;
+ uart->capabilities = uart_config[uart->port.type].flags;
+ uart_add_one_port(&serial8250_reg, &uart->port);
+ } else {
+ uart->port.dev = NULL;
+ }
+ mutex_unlock(&serial_mutex);
+}
+EXPORT_SYMBOL(serial8250_unregister_port);
+
+static int __init serial8250_init(void)
+{
+ int ret;
+
+ if (nr_uarts > UART_NR)
+ nr_uarts = UART_NR;
+
+ printk(KERN_INFO "Serial: 8250/16550 driver, "
+ "%d ports, IRQ sharing %sabled\n", nr_uarts,
+ share_irqs ? "en" : "dis");
+
+#ifdef CONFIG_SPARC
+ ret = sunserial_register_minors(&serial8250_reg, UART_NR);
+#else
+ serial8250_reg.nr = UART_NR;
+ ret = uart_register_driver(&serial8250_reg);
+#endif
+ if (ret)
+ goto out;
+
+ serial8250_isa_devs = platform_device_alloc("serial8250",
+ PLAT8250_DEV_LEGACY);
+ if (!serial8250_isa_devs) {
+ ret = -ENOMEM;
+ goto unreg_uart_drv;
+ }
+
+ ret = platform_device_add(serial8250_isa_devs);
+ if (ret)
+ goto put_dev;
+
+ serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev);
+
+ ret = platform_driver_register(&serial8250_isa_driver);
+ if (ret == 0)
+ goto out;
+
+ platform_device_del(serial8250_isa_devs);
+put_dev:
+ platform_device_put(serial8250_isa_devs);
+unreg_uart_drv:
+#ifdef CONFIG_SPARC
+ sunserial_unregister_minors(&serial8250_reg, UART_NR);
+#else
+ uart_unregister_driver(&serial8250_reg);
+#endif
+out:
+ return ret;
+}
+
+static void __exit serial8250_exit(void)
+{
+ struct platform_device *isa_dev = serial8250_isa_devs;
+
+ /*
+ * This tells serial8250_unregister_port() not to re-register
+ * the ports (thereby making serial8250_isa_driver permanently
+ * in use.)
+ */
+ serial8250_isa_devs = NULL;
+
+ platform_driver_unregister(&serial8250_isa_driver);
+ platform_device_unregister(isa_dev);
+
+#ifdef CONFIG_SPARC
+ sunserial_unregister_minors(&serial8250_reg, UART_NR);
+#else
+ uart_unregister_driver(&serial8250_reg);
+#endif
+}
+
+module_init(serial8250_init);
+module_exit(serial8250_exit);
+
+EXPORT_SYMBOL(serial8250_suspend_port);
+EXPORT_SYMBOL(serial8250_resume_port);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Generic 8250/16x50 serial driver");
+
+module_param(share_irqs, uint, 0644);
+MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices"
+ " (unsafe)");
+
+module_param(nr_uarts, uint, 0644);
+MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
+
+module_param(skip_txen_test, uint, 0644);
+MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time");
+
+#ifdef CONFIG_SERIAL_8250_RSA
+module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444);
+MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
+#endif
+MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
diff --git a/drivers/tty/serial/8250.h b/drivers/tty/serial/8250.h
new file mode 100644
index 00000000..6edf4a6a
--- /dev/null
+++ b/drivers/tty/serial/8250.h
@@ -0,0 +1,79 @@
+/*
+ * Driver for 8250/16550-type serial ports
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * Copyright (C) 2001 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/serial_8250.h>
+
+struct old_serial_port {
+ unsigned int uart;
+ unsigned int baud_base;
+ unsigned int port;
+ unsigned int irq;
+ unsigned int flags;
+ unsigned char hub6;
+ unsigned char io_type;
+ unsigned char *iomem_base;
+ unsigned short iomem_reg_shift;
+ unsigned long irqflags;
+};
+
+/*
+ * This replaces serial_uart_config in include/linux/serial.h
+ */
+struct serial8250_config {
+ const char *name;
+ unsigned short fifo_size;
+ unsigned short tx_loadsz;
+ unsigned char fcr;
+ unsigned int flags;
+};
+
+#define UART_CAP_FIFO (1 << 8) /* UART has FIFO */
+#define UART_CAP_EFR (1 << 9) /* UART has EFR */
+#define UART_CAP_SLEEP (1 << 10) /* UART has IER sleep */
+#define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */
+#define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */
+#define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */
+
+#define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
+#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
+#define UART_BUG_NOMSR (1 << 2) /* UART has buggy MSR status bits (Au1x00) */
+#define UART_BUG_THRE (1 << 3) /* UART has buggy THRE reassertion */
+
+#define PROBE_RSA (1 << 0)
+#define PROBE_ANY (~0)
+
+#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
+
+#ifdef CONFIG_SERIAL_8250_SHARE_IRQ
+#define SERIAL8250_SHARE_IRQS 1
+#else
+#define SERIAL8250_SHARE_IRQS 0
+#endif
+
+#if defined(__alpha__) && !defined(CONFIG_PCI)
+/*
+ * Digital did something really horribly wrong with the OUT1 and OUT2
+ * lines on at least some ALPHA's. The failure mode is that if either
+ * is cleared, the machine locks up with endless interrupts.
+ */
+#define ALPHA_KLUDGE_MCR (UART_MCR_OUT2 | UART_MCR_OUT1)
+#elif defined(CONFIG_SBC8560)
+/*
+ * WindRiver did something similarly broken on their SBC8560 board. The
+ * UART tristates its IRQ output while OUT2 is clear, but they pulled
+ * the interrupt line _up_ instead of down, so if we register the IRQ
+ * while the UART is in that state, we die in an IRQ storm. */
+#define ALPHA_KLUDGE_MCR (UART_MCR_OUT2)
+#else
+#define ALPHA_KLUDGE_MCR 0
+#endif
diff --git a/drivers/tty/serial/8250_accent.c b/drivers/tty/serial/8250_accent.c
new file mode 100644
index 00000000..34b51c65
--- /dev/null
+++ b/drivers/tty/serial/8250_accent.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2005 Russell King.
+ * Data taken from include/asm-i386/serial.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+
+#define PORT(_base,_irq) \
+ { \
+ .iobase = _base, \
+ .irq = _irq, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF, \
+ }
+
+static struct plat_serial8250_port accent_data[] = {
+ PORT(0x330, 4),
+ PORT(0x338, 4),
+ { },
+};
+
+static struct platform_device accent_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_ACCENT,
+ .dev = {
+ .platform_data = accent_data,
+ },
+};
+
+static int __init accent_init(void)
+{
+ return platform_device_register(&accent_device);
+}
+
+module_init(accent_init);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("8250 serial probe module for Accent Async cards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250_acorn.c b/drivers/tty/serial/8250_acorn.c
new file mode 100644
index 00000000..b0ce8c56
--- /dev/null
+++ b/drivers/tty/serial/8250_acorn.c
@@ -0,0 +1,141 @@
+/*
+ * linux/drivers/serial/acorn.c
+ *
+ * Copyright (C) 1996-2003 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/tty.h>
+#include <linux/serial_core.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/ecard.h>
+#include <asm/string.h>
+
+#include "8250.h"
+
+#define MAX_PORTS 3
+
+struct serial_card_type {
+ unsigned int num_ports;
+ unsigned int uartclk;
+ unsigned int type;
+ unsigned int offset[MAX_PORTS];
+};
+
+struct serial_card_info {
+ unsigned int num_ports;
+ int ports[MAX_PORTS];
+ void __iomem *vaddr;
+};
+
+static int __devinit
+serial_card_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+ struct serial_card_info *info;
+ struct serial_card_type *type = id->data;
+ struct uart_port port;
+ unsigned long bus_addr;
+ unsigned int i;
+
+ info = kzalloc(sizeof(struct serial_card_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->num_ports = type->num_ports;
+
+ bus_addr = ecard_resource_start(ec, type->type);
+ info->vaddr = ecardm_iomap(ec, type->type, 0, 0);
+ if (!info->vaddr) {
+ kfree(info);
+ return -ENOMEM;
+ }
+
+ ecard_set_drvdata(ec, info);
+
+ memset(&port, 0, sizeof(struct uart_port));
+ port.irq = ec->irq;
+ port.flags = UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
+ port.uartclk = type->uartclk;
+ port.iotype = UPIO_MEM;
+ port.regshift = 2;
+ port.dev = &ec->dev;
+
+ for (i = 0; i < info->num_ports; i ++) {
+ port.membase = info->vaddr + type->offset[i];
+ port.mapbase = bus_addr + type->offset[i];
+
+ info->ports[i] = serial8250_register_port(&port);
+ }
+
+ return 0;
+}
+
+static void __devexit serial_card_remove(struct expansion_card *ec)
+{
+ struct serial_card_info *info = ecard_get_drvdata(ec);
+ int i;
+
+ ecard_set_drvdata(ec, NULL);
+
+ for (i = 0; i < info->num_ports; i++)
+ if (info->ports[i] > 0)
+ serial8250_unregister_port(info->ports[i]);
+
+ kfree(info);
+}
+
+static struct serial_card_type atomwide_type = {
+ .num_ports = 3,
+ .uartclk = 7372800,
+ .type = ECARD_RES_IOCSLOW,
+ .offset = { 0x2800, 0x2400, 0x2000 },
+};
+
+static struct serial_card_type serport_type = {
+ .num_ports = 2,
+ .uartclk = 3686400,
+ .type = ECARD_RES_IOCSLOW,
+ .offset = { 0x2000, 0x2020 },
+};
+
+static const struct ecard_id serial_cids[] = {
+ { MANU_ATOMWIDE, PROD_ATOMWIDE_3PSERIAL, &atomwide_type },
+ { MANU_SERPORT, PROD_SERPORT_DSPORT, &serport_type },
+ { 0xffff, 0xffff }
+};
+
+static struct ecard_driver serial_card_driver = {
+ .probe = serial_card_probe,
+ .remove = __devexit_p(serial_card_remove),
+ .id_table = serial_cids,
+ .drv = {
+ .name = "8250_acorn",
+ },
+};
+
+static int __init serial_card_init(void)
+{
+ return ecard_register_driver(&serial_card_driver);
+}
+
+static void __exit serial_card_exit(void)
+{
+ ecard_remove_driver(&serial_card_driver);
+}
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("Acorn 8250-compatible serial port expansion card driver");
+MODULE_LICENSE("GPL");
+
+module_init(serial_card_init);
+module_exit(serial_card_exit);
diff --git a/drivers/tty/serial/8250_boca.c b/drivers/tty/serial/8250_boca.c
new file mode 100644
index 00000000..d125dc10
--- /dev/null
+++ b/drivers/tty/serial/8250_boca.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2005 Russell King.
+ * Data taken from include/asm-i386/serial.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+
+#define PORT(_base,_irq) \
+ { \
+ .iobase = _base, \
+ .irq = _irq, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF, \
+ }
+
+static struct plat_serial8250_port boca_data[] = {
+ PORT(0x100, 12),
+ PORT(0x108, 12),
+ PORT(0x110, 12),
+ PORT(0x118, 12),
+ PORT(0x120, 12),
+ PORT(0x128, 12),
+ PORT(0x130, 12),
+ PORT(0x138, 12),
+ PORT(0x140, 12),
+ PORT(0x148, 12),
+ PORT(0x150, 12),
+ PORT(0x158, 12),
+ PORT(0x160, 12),
+ PORT(0x168, 12),
+ PORT(0x170, 12),
+ PORT(0x178, 12),
+ { },
+};
+
+static struct platform_device boca_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_BOCA,
+ .dev = {
+ .platform_data = boca_data,
+ },
+};
+
+static int __init boca_init(void)
+{
+ return platform_device_register(&boca_device);
+}
+
+module_init(boca_init);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("8250 serial probe module for Boca cards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250_early.c b/drivers/tty/serial/8250_early.c
new file mode 100644
index 00000000..eaafb98d
--- /dev/null
+++ b/drivers/tty/serial/8250_early.c
@@ -0,0 +1,287 @@
+/*
+ * Early serial console for 8250/16550 devices
+ *
+ * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on the 8250.c serial driver, Copyright (C) 2001 Russell King,
+ * and on early_printk.c by Andi Kleen.
+ *
+ * This is for use before the serial driver has initialized, in
+ * particular, before the UARTs have been discovered and named.
+ * Instead of specifying the console device as, e.g., "ttyS0",
+ * we locate the device directly by its MMIO or I/O port address.
+ *
+ * The user can specify the device directly, e.g.,
+ * earlycon=uart8250,io,0x3f8,9600n8
+ * earlycon=uart8250,mmio,0xff5e0000,115200n8
+ * earlycon=uart8250,mmio32,0xff5e0000,115200n8
+ * or
+ * console=uart8250,io,0x3f8,9600n8
+ * console=uart8250,mmio,0xff5e0000,115200n8
+ * console=uart8250,mmio32,0xff5e0000,115200n8
+ */
+
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <asm/io.h>
+#include <asm/serial.h>
+#ifdef CONFIG_FIX_EARLYCON_MEM
+#include <asm/pgtable.h>
+#include <asm/fixmap.h>
+#endif
+
+struct early_serial8250_device {
+ struct uart_port port;
+ char options[16]; /* e.g., 115200n8 */
+ unsigned int baud;
+};
+
+static struct early_serial8250_device early_device;
+
+static unsigned int __init serial_in(struct uart_port *port, int offset)
+{
+ switch (port->iotype) {
+ case UPIO_MEM:
+ return readb(port->membase + offset);
+ case UPIO_MEM32:
+ return readl(port->membase + (offset << 2));
+ case UPIO_PORT:
+ return inb(port->iobase + offset);
+ default:
+ return 0;
+ }
+}
+
+static void __init serial_out(struct uart_port *port, int offset, int value)
+{
+ switch (port->iotype) {
+ case UPIO_MEM:
+ writeb(value, port->membase + offset);
+ break;
+ case UPIO_MEM32:
+ writel(value, port->membase + (offset << 2));
+ break;
+ case UPIO_PORT:
+ outb(value, port->iobase + offset);
+ break;
+ }
+}
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static void __init wait_for_xmitr(struct uart_port *port)
+{
+ unsigned int status;
+
+ for (;;) {
+ status = serial_in(port, UART_LSR);
+ if ((status & BOTH_EMPTY) == BOTH_EMPTY)
+ return;
+ cpu_relax();
+ }
+}
+
+static void __init serial_putc(struct uart_port *port, int c)
+{
+ wait_for_xmitr(port);
+ serial_out(port, UART_TX, c);
+}
+
+static void __init early_serial8250_write(struct console *console,
+ const char *s, unsigned int count)
+{
+ struct uart_port *port = &early_device.port;
+ unsigned int ier;
+
+ /* Save the IER and disable interrupts */
+ ier = serial_in(port, UART_IER);
+ serial_out(port, UART_IER, 0);
+
+ uart_console_write(port, s, count, serial_putc);
+
+ /* Wait for transmitter to become empty and restore the IER */
+ wait_for_xmitr(port);
+ serial_out(port, UART_IER, ier);
+}
+
+static unsigned int __init probe_baud(struct uart_port *port)
+{
+ unsigned char lcr, dll, dlm;
+ unsigned int quot;
+
+ lcr = serial_in(port, UART_LCR);
+ serial_out(port, UART_LCR, lcr | UART_LCR_DLAB);
+ dll = serial_in(port, UART_DLL);
+ dlm = serial_in(port, UART_DLM);
+ serial_out(port, UART_LCR, lcr);
+
+ quot = (dlm << 8) | dll;
+ return (port->uartclk / 16) / quot;
+}
+
+static void __init init_port(struct early_serial8250_device *device)
+{
+ struct uart_port *port = &device->port;
+ unsigned int divisor;
+ unsigned char c;
+
+ serial_out(port, UART_LCR, 0x3); /* 8n1 */
+ serial_out(port, UART_IER, 0); /* no interrupt */
+ serial_out(port, UART_FCR, 0); /* no fifo */
+ serial_out(port, UART_MCR, 0x3); /* DTR + RTS */
+
+ divisor = port->uartclk / (16 * device->baud);
+ c = serial_in(port, UART_LCR);
+ serial_out(port, UART_LCR, c | UART_LCR_DLAB);
+ serial_out(port, UART_DLL, divisor & 0xff);
+ serial_out(port, UART_DLM, (divisor >> 8) & 0xff);
+ serial_out(port, UART_LCR, c & ~UART_LCR_DLAB);
+}
+
+static int __init parse_options(struct early_serial8250_device *device,
+ char *options)
+{
+ struct uart_port *port = &device->port;
+ int mmio, mmio32, length;
+
+ if (!options)
+ return -ENODEV;
+
+ port->uartclk = BASE_BAUD * 16;
+
+ mmio = !strncmp(options, "mmio,", 5);
+ mmio32 = !strncmp(options, "mmio32,", 7);
+ if (mmio || mmio32) {
+ port->iotype = (mmio ? UPIO_MEM : UPIO_MEM32);
+ port->mapbase = simple_strtoul(options + (mmio ? 5 : 7),
+ &options, 0);
+ if (mmio32)
+ port->regshift = 2;
+#ifdef CONFIG_FIX_EARLYCON_MEM
+ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE,
+ port->mapbase & PAGE_MASK);
+ port->membase =
+ (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
+ port->membase += port->mapbase & ~PAGE_MASK;
+#else
+ port->membase = ioremap_nocache(port->mapbase, 64);
+ if (!port->membase) {
+ printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n",
+ __func__,
+ (unsigned long long) port->mapbase);
+ return -ENOMEM;
+ }
+#endif
+ } else if (!strncmp(options, "io,", 3)) {
+ port->iotype = UPIO_PORT;
+ port->iobase = simple_strtoul(options + 3, &options, 0);
+ mmio = 0;
+ } else
+ return -EINVAL;
+
+ options = strchr(options, ',');
+ if (options) {
+ options++;
+ device->baud = simple_strtoul(options, NULL, 0);
+ length = min(strcspn(options, " "), sizeof(device->options));
+ strncpy(device->options, options, length);
+ } else {
+ device->baud = probe_baud(port);
+ snprintf(device->options, sizeof(device->options), "%u",
+ device->baud);
+ }
+
+ if (mmio || mmio32)
+ printk(KERN_INFO
+ "Early serial console at MMIO%s 0x%llx (options '%s')\n",
+ mmio32 ? "32" : "",
+ (unsigned long long)port->mapbase,
+ device->options);
+ else
+ printk(KERN_INFO
+ "Early serial console at I/O port 0x%lx (options '%s')\n",
+ port->iobase,
+ device->options);
+
+ return 0;
+}
+
+static struct console early_serial8250_console __initdata = {
+ .name = "uart",
+ .write = early_serial8250_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1,
+};
+
+static int __init early_serial8250_setup(char *options)
+{
+ struct early_serial8250_device *device = &early_device;
+ int err;
+
+ if (device->port.membase || device->port.iobase)
+ return 0;
+
+ err = parse_options(device, options);
+ if (err < 0)
+ return err;
+
+ init_port(device);
+ return 0;
+}
+
+int __init setup_early_serial8250_console(char *cmdline)
+{
+ char *options;
+ int err;
+
+ options = strstr(cmdline, "uart8250,");
+ if (!options) {
+ options = strstr(cmdline, "uart,");
+ if (!options)
+ return 0;
+ }
+
+ options = strchr(cmdline, ',') + 1;
+ err = early_serial8250_setup(options);
+ if (err < 0)
+ return err;
+
+ register_console(&early_serial8250_console);
+
+ return 0;
+}
+
+int serial8250_find_port_for_earlycon(void)
+{
+ struct early_serial8250_device *device = &early_device;
+ struct uart_port *port = &device->port;
+ int line;
+ int ret;
+
+ if (!device->port.membase && !device->port.iobase)
+ return -ENODEV;
+
+ line = serial8250_find_port(port);
+ if (line < 0)
+ return -ENODEV;
+
+ ret = update_console_cmdline("uart", 8250,
+ "ttyS", line, device->options);
+ if (ret < 0)
+ ret = update_console_cmdline("uart", 0,
+ "ttyS", line, device->options);
+
+ return ret;
+}
+
+early_param("earlycon", setup_early_serial8250_console);
diff --git a/drivers/tty/serial/8250_exar_st16c554.c b/drivers/tty/serial/8250_exar_st16c554.c
new file mode 100644
index 00000000..bf53aabf
--- /dev/null
+++ b/drivers/tty/serial/8250_exar_st16c554.c
@@ -0,0 +1,50 @@
+/*
+ * Written by Paul B Schroeder < pschroeder "at" uplogix "dot" com >
+ * Based on 8250_boca.
+ *
+ * Copyright (C) 2005 Russell King.
+ * Data taken from include/asm-i386/serial.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+
+#define PORT(_base,_irq) \
+ { \
+ .iobase = _base, \
+ .irq = _irq, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF, \
+ }
+
+static struct plat_serial8250_port exar_data[] = {
+ PORT(0x100, 5),
+ PORT(0x108, 5),
+ PORT(0x110, 5),
+ PORT(0x118, 5),
+ { },
+};
+
+static struct platform_device exar_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_EXAR_ST16C554,
+ .dev = {
+ .platform_data = exar_data,
+ },
+};
+
+static int __init exar_init(void)
+{
+ return platform_device_register(&exar_device);
+}
+
+module_init(exar_init);
+
+MODULE_AUTHOR("Paul B Schroeder");
+MODULE_DESCRIPTION("8250 serial probe module for Exar cards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250_fourport.c b/drivers/tty/serial/8250_fourport.c
new file mode 100644
index 00000000..be158260
--- /dev/null
+++ b/drivers/tty/serial/8250_fourport.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2005 Russell King.
+ * Data taken from include/asm-i386/serial.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+
+#define PORT(_base,_irq) \
+ { \
+ .iobase = _base, \
+ .irq = _irq, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_FOURPORT, \
+ }
+
+static struct plat_serial8250_port fourport_data[] = {
+ PORT(0x1a0, 9),
+ PORT(0x1a8, 9),
+ PORT(0x1b0, 9),
+ PORT(0x1b8, 9),
+ PORT(0x2a0, 5),
+ PORT(0x2a8, 5),
+ PORT(0x2b0, 5),
+ PORT(0x2b8, 5),
+ { },
+};
+
+static struct platform_device fourport_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_FOURPORT,
+ .dev = {
+ .platform_data = fourport_data,
+ },
+};
+
+static int __init fourport_init(void)
+{
+ return platform_device_register(&fourport_device);
+}
+
+module_init(fourport_init);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("8250 serial probe module for AST Fourport cards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250_gsc.c b/drivers/tty/serial/8250_gsc.c
new file mode 100644
index 00000000..d8c0ffbf
--- /dev/null
+++ b/drivers/tty/serial/8250_gsc.c
@@ -0,0 +1,122 @@
+/*
+ * Serial Device Initialisation for Lasi/Asp/Wax/Dino
+ *
+ * (c) Copyright Matthew Wilcox <willy@debian.org> 2001-2002
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/serial_core.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include <asm/hardware.h>
+#include <asm/parisc-device.h>
+#include <asm/io.h>
+
+#include "8250.h"
+
+static int __init serial_init_chip(struct parisc_device *dev)
+{
+ struct uart_port port;
+ unsigned long address;
+ int err;
+
+ if (!dev->irq) {
+ /* We find some unattached serial ports by walking native
+ * busses. These should be silently ignored. Otherwise,
+ * what we have here is a missing parent device, so tell
+ * the user what they're missing.
+ */
+ if (parisc_parent(dev)->id.hw_type != HPHW_IOA)
+ printk(KERN_INFO
+ "Serial: device 0x%llx not configured.\n"
+ "Enable support for Wax, Lasi, Asp or Dino.\n",
+ (unsigned long long)dev->hpa.start);
+ return -ENODEV;
+ }
+
+ address = dev->hpa.start;
+ if (dev->id.sversion != 0x8d)
+ address += 0x800;
+
+ memset(&port, 0, sizeof(port));
+ port.iotype = UPIO_MEM;
+ /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */
+ port.uartclk = 7272727;
+ port.mapbase = address;
+ port.membase = ioremap_nocache(address, 16);
+ port.irq = dev->irq;
+ port.flags = UPF_BOOT_AUTOCONF;
+ port.dev = &dev->dev;
+
+ err = serial8250_register_port(&port);
+ if (err < 0) {
+ printk(KERN_WARNING
+ "serial8250_register_port returned error %d\n", err);
+ iounmap(port.membase);
+ return err;
+ }
+
+ return 0;
+}
+
+static struct parisc_device_id serial_tbl[] = {
+ { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 },
+ { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c },
+ { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d },
+ { 0 }
+};
+
+/* Hack. Some machines have SERIAL_0 attached to Lasi and SERIAL_1
+ * attached to Dino. Unfortunately, Dino appears before Lasi in the device
+ * tree. To ensure that ttyS0 == SERIAL_0, we register two drivers; one
+ * which only knows about Lasi and then a second which will find all the
+ * other serial ports. HPUX ignores this problem.
+ */
+static struct parisc_device_id lasi_tbl[] = {
+ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03B, 0x0008C }, /* C1xx/C1xxL */
+ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03C, 0x0008C }, /* B132L */
+ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03D, 0x0008C }, /* B160L */
+ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03E, 0x0008C }, /* B132L+ */
+ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03F, 0x0008C }, /* B180L+ */
+ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x046, 0x0008C }, /* Rocky2 120 */
+ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x047, 0x0008C }, /* Rocky2 150 */
+ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x04E, 0x0008C }, /* Kiji L2 132 */
+ { HPHW_FIO, HVERSION_REV_ANY_ID, 0x056, 0x0008C }, /* Raven+ */
+ { 0 }
+};
+
+
+MODULE_DEVICE_TABLE(parisc, serial_tbl);
+
+static struct parisc_driver lasi_driver = {
+ .name = "serial_1",
+ .id_table = lasi_tbl,
+ .probe = serial_init_chip,
+};
+
+static struct parisc_driver serial_driver = {
+ .name = "serial",
+ .id_table = serial_tbl,
+ .probe = serial_init_chip,
+};
+
+static int __init probe_serial_gsc(void)
+{
+ register_parisc_driver(&lasi_driver);
+ register_parisc_driver(&serial_driver);
+ return 0;
+}
+
+module_init(probe_serial_gsc);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250_hp300.c b/drivers/tty/serial/8250_hp300.c
new file mode 100644
index 00000000..c13438c9
--- /dev/null
+++ b/drivers/tty/serial/8250_hp300.c
@@ -0,0 +1,327 @@
+/*
+ * Driver for the 98626/98644/internal serial interface on hp300/hp400
+ * (based on the National Semiconductor INS8250/NS16550AF/WD16C552 UARTs)
+ *
+ * Ported from 2.2 and modified to use the normal 8250 driver
+ * by Kars de Jong <jongk@linux-m68k.org>, May 2004.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
+#include <linux/delay.h>
+#include <linux/dio.h>
+#include <linux/console.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#include "8250.h"
+
+#if !defined(CONFIG_HPDCA) && !defined(CONFIG_HPAPCI)
+#warning CONFIG_8250 defined but neither CONFIG_HPDCA nor CONFIG_HPAPCI defined, are you sure?
+#endif
+
+#ifdef CONFIG_HPAPCI
+struct hp300_port
+{
+ struct hp300_port *next; /* next port */
+ int line; /* line (tty) number */
+};
+
+static struct hp300_port *hp300_ports;
+#endif
+
+#ifdef CONFIG_HPDCA
+
+static int __devinit hpdca_init_one(struct dio_dev *d,
+ const struct dio_device_id *ent);
+static void __devexit hpdca_remove_one(struct dio_dev *d);
+
+static struct dio_device_id hpdca_dio_tbl[] = {
+ { DIO_ID_DCA0 },
+ { DIO_ID_DCA0REM },
+ { DIO_ID_DCA1 },
+ { DIO_ID_DCA1REM },
+ { 0 }
+};
+
+static struct dio_driver hpdca_driver = {
+ .name = "hpdca",
+ .id_table = hpdca_dio_tbl,
+ .probe = hpdca_init_one,
+ .remove = __devexit_p(hpdca_remove_one),
+};
+
+#endif
+
+static unsigned int num_ports;
+
+extern int hp300_uart_scode;
+
+/* Offset to UART registers from base of DCA */
+#define UART_OFFSET 17
+
+#define DCA_ID 0x01 /* ID (read), reset (write) */
+#define DCA_IC 0x03 /* Interrupt control */
+
+/* Interrupt control */
+#define DCA_IC_IE 0x80 /* Master interrupt enable */
+
+#define HPDCA_BAUD_BASE 153600
+
+/* Base address of the Frodo part */
+#define FRODO_BASE (0x41c000)
+
+/*
+ * Where we find the 8250-like APCI ports, and how far apart they are.
+ */
+#define FRODO_APCIBASE 0x0
+#define FRODO_APCISPACE 0x20
+#define FRODO_APCI_OFFSET(x) (FRODO_APCIBASE + ((x) * FRODO_APCISPACE))
+
+#define HPAPCI_BAUD_BASE 500400
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+/*
+ * Parse the bootinfo to find descriptions for headless console and
+ * debug serial ports and register them with the 8250 driver.
+ * This function should be called before serial_console_init() is called
+ * to make sure the serial console will be available for use. IA-64 kernel
+ * calls this function from setup_arch() after the EFI and ACPI tables have
+ * been parsed.
+ */
+int __init hp300_setup_serial_console(void)
+{
+ int scode;
+ struct uart_port port;
+
+ memset(&port, 0, sizeof(port));
+
+ if (hp300_uart_scode < 0 || hp300_uart_scode > DIO_SCMAX)
+ return 0;
+
+ if (DIO_SCINHOLE(hp300_uart_scode))
+ return 0;
+
+ scode = hp300_uart_scode;
+
+ /* Memory mapped I/O */
+ port.iotype = UPIO_MEM;
+ port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF;
+ port.type = PORT_UNKNOWN;
+
+ /* Check for APCI console */
+ if (scode == 256) {
+#ifdef CONFIG_HPAPCI
+ printk(KERN_INFO "Serial console is HP APCI 1\n");
+
+ port.uartclk = HPAPCI_BAUD_BASE * 16;
+ port.mapbase = (FRODO_BASE + FRODO_APCI_OFFSET(1));
+ port.membase = (char *)(port.mapbase + DIO_VIRADDRBASE);
+ port.regshift = 2;
+ add_preferred_console("ttyS", port.line, "9600n8");
+#else
+ printk(KERN_WARNING "Serial console is APCI but support is disabled (CONFIG_HPAPCI)!\n");
+ return 0;
+#endif
+ } else {
+#ifdef CONFIG_HPDCA
+ unsigned long pa = dio_scodetophysaddr(scode);
+ if (!pa)
+ return 0;
+
+ printk(KERN_INFO "Serial console is HP DCA at select code %d\n", scode);
+
+ port.uartclk = HPDCA_BAUD_BASE * 16;
+ port.mapbase = (pa + UART_OFFSET);
+ port.membase = (char *)(port.mapbase + DIO_VIRADDRBASE);
+ port.regshift = 1;
+ port.irq = DIO_IPL(pa + DIO_VIRADDRBASE);
+
+ /* Enable board-interrupts */
+ out_8(pa + DIO_VIRADDRBASE + DCA_IC, DCA_IC_IE);
+
+ if (DIO_ID(pa + DIO_VIRADDRBASE) & 0x80)
+ add_preferred_console("ttyS", port.line, "9600n8");
+#else
+ printk(KERN_WARNING "Serial console is DCA but support is disabled (CONFIG_HPDCA)!\n");
+ return 0;
+#endif
+ }
+
+ if (early_serial_setup(&port) < 0)
+ printk(KERN_WARNING "hp300_setup_serial_console(): early_serial_setup() failed.\n");
+ return 0;
+}
+#endif /* CONFIG_SERIAL_8250_CONSOLE */
+
+#ifdef CONFIG_HPDCA
+static int __devinit hpdca_init_one(struct dio_dev *d,
+ const struct dio_device_id *ent)
+{
+ struct uart_port port;
+ int line;
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ if (hp300_uart_scode == d->scode) {
+ /* Already got it. */
+ return 0;
+ }
+#endif
+ memset(&port, 0, sizeof(struct uart_port));
+
+ /* Memory mapped I/O */
+ port.iotype = UPIO_MEM;
+ port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF;
+ port.irq = d->ipl;
+ port.uartclk = HPDCA_BAUD_BASE * 16;
+ port.mapbase = (d->resource.start + UART_OFFSET);
+ port.membase = (char *)(port.mapbase + DIO_VIRADDRBASE);
+ port.regshift = 1;
+ port.dev = &d->dev;
+ line = serial8250_register_port(&port);
+
+ if (line < 0) {
+ printk(KERN_NOTICE "8250_hp300: register_serial() DCA scode %d"
+ " irq %d failed\n", d->scode, port.irq);
+ return -ENOMEM;
+ }
+
+ /* Enable board-interrupts */
+ out_8(d->resource.start + DIO_VIRADDRBASE + DCA_IC, DCA_IC_IE);
+ dio_set_drvdata(d, (void *)line);
+
+ /* Reset the DCA */
+ out_8(d->resource.start + DIO_VIRADDRBASE + DCA_ID, 0xff);
+ udelay(100);
+
+ num_ports++;
+
+ return 0;
+}
+#endif
+
+static int __init hp300_8250_init(void)
+{
+ static int called;
+#ifdef CONFIG_HPAPCI
+ int line;
+ unsigned long base;
+ struct uart_port uport;
+ struct hp300_port *port;
+ int i;
+#endif
+ if (called)
+ return -ENODEV;
+ called = 1;
+
+ if (!MACH_IS_HP300)
+ return -ENODEV;
+
+#ifdef CONFIG_HPDCA
+ dio_register_driver(&hpdca_driver);
+#endif
+#ifdef CONFIG_HPAPCI
+ if (hp300_model < HP_400) {
+ if (!num_ports)
+ return -ENODEV;
+ return 0;
+ }
+ /* These models have the Frodo chip.
+ * Port 0 is reserved for the Apollo Domain keyboard.
+ * Port 1 is either the console or the DCA.
+ */
+ for (i = 1; i < 4; i++) {
+ /* Port 1 is the console on a 425e, on other machines it's
+ * mapped to DCA.
+ */
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ if (i == 1)
+ continue;
+#endif
+
+ /* Create new serial device */
+ port = kmalloc(sizeof(struct hp300_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ memset(&uport, 0, sizeof(struct uart_port));
+
+ base = (FRODO_BASE + FRODO_APCI_OFFSET(i));
+
+ /* Memory mapped I/O */
+ uport.iotype = UPIO_MEM;
+ uport.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ \
+ | UPF_BOOT_AUTOCONF;
+ /* XXX - no interrupt support yet */
+ uport.irq = 0;
+ uport.uartclk = HPAPCI_BAUD_BASE * 16;
+ uport.mapbase = base;
+ uport.membase = (char *)(base + DIO_VIRADDRBASE);
+ uport.regshift = 2;
+
+ line = serial8250_register_port(&uport);
+
+ if (line < 0) {
+ printk(KERN_NOTICE "8250_hp300: register_serial() APCI"
+ " %d irq %d failed\n", i, uport.irq);
+ kfree(port);
+ continue;
+ }
+
+ port->line = line;
+ port->next = hp300_ports;
+ hp300_ports = port;
+
+ num_ports++;
+ }
+#endif
+
+ /* Any boards found? */
+ if (!num_ports)
+ return -ENODEV;
+
+ return 0;
+}
+
+#ifdef CONFIG_HPDCA
+static void __devexit hpdca_remove_one(struct dio_dev *d)
+{
+ int line;
+
+ line = (int) dio_get_drvdata(d);
+ if (d->resource.start) {
+ /* Disable board-interrupts */
+ out_8(d->resource.start + DIO_VIRADDRBASE + DCA_IC, 0);
+ }
+ serial8250_unregister_port(line);
+}
+#endif
+
+static void __exit hp300_8250_exit(void)
+{
+#ifdef CONFIG_HPAPCI
+ struct hp300_port *port, *to_free;
+
+ for (port = hp300_ports; port; ) {
+ serial8250_unregister_port(port->line);
+ to_free = port;
+ port = port->next;
+ kfree(to_free);
+ }
+
+ hp300_ports = NULL;
+#endif
+#ifdef CONFIG_HPDCA
+ dio_unregister_driver(&hpdca_driver);
+#endif
+}
+
+module_init(hp300_8250_init);
+module_exit(hp300_8250_exit);
+MODULE_DESCRIPTION("HP DCA/APCI serial driver");
+MODULE_AUTHOR("Kars de Jong <jongk@linux-m68k.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250_hub6.c b/drivers/tty/serial/8250_hub6.c
new file mode 100644
index 00000000..a5c778e8
--- /dev/null
+++ b/drivers/tty/serial/8250_hub6.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2005 Russell King.
+ * Data taken from include/asm-i386/serial.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+
+#define HUB6(card,port) \
+ { \
+ .iobase = 0x302, \
+ .irq = 3, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_HUB6, \
+ .flags = UPF_BOOT_AUTOCONF, \
+ .hub6 = (card) << 6 | (port) << 3 | 1, \
+ }
+
+static struct plat_serial8250_port hub6_data[] = {
+ HUB6(0, 0),
+ HUB6(0, 1),
+ HUB6(0, 2),
+ HUB6(0, 3),
+ HUB6(0, 4),
+ HUB6(0, 5),
+ HUB6(1, 0),
+ HUB6(1, 1),
+ HUB6(1, 2),
+ HUB6(1, 3),
+ HUB6(1, 4),
+ HUB6(1, 5),
+ { },
+};
+
+static struct platform_device hub6_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_HUB6,
+ .dev = {
+ .platform_data = hub6_data,
+ },
+};
+
+static int __init hub6_init(void)
+{
+ return platform_device_register(&hub6_device);
+}
+
+module_init(hub6_init);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("8250 serial probe module for Hub6 cards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250_mca.c b/drivers/tty/serial/8250_mca.c
new file mode 100644
index 00000000..d20abf04
--- /dev/null
+++ b/drivers/tty/serial/8250_mca.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2005 Russell King.
+ * Data taken from include/asm-i386/serial.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mca.h>
+#include <linux/serial_8250.h>
+
+/*
+ * FIXME: Should we be doing AUTO_IRQ here?
+ */
+#ifdef CONFIG_SERIAL_8250_DETECT_IRQ
+#define MCA_FLAGS UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ
+#else
+#define MCA_FLAGS UPF_BOOT_AUTOCONF | UPF_SKIP_TEST
+#endif
+
+#define PORT(_base,_irq) \
+ { \
+ .iobase = _base, \
+ .irq = _irq, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_PORT, \
+ .flags = MCA_FLAGS, \
+ }
+
+static struct plat_serial8250_port mca_data[] = {
+ PORT(0x3220, 3),
+ PORT(0x3228, 3),
+ PORT(0x4220, 3),
+ PORT(0x4228, 3),
+ PORT(0x5220, 3),
+ PORT(0x5228, 3),
+ { },
+};
+
+static struct platform_device mca_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_MCA,
+ .dev = {
+ .platform_data = mca_data,
+ },
+};
+
+static int __init mca_init(void)
+{
+ if (!MCA_bus)
+ return -ENODEV;
+ return platform_device_register(&mca_device);
+}
+
+module_init(mca_init);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("8250 serial probe module for MCA ports");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
new file mode 100644
index 00000000..21098ed9
--- /dev/null
+++ b/drivers/tty/serial/8250_pci.c
@@ -0,0 +1,3970 @@
+/*
+ * Probe module for 8250/16550-type PCI serial ports.
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * Copyright (C) 2001 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/serial_core.h>
+#include <linux/8250_pci.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+
+#include "8250.h"
+
+#undef SERIAL_DEBUG_PCI
+
+/*
+ * init function returns:
+ * > 0 - number of ports
+ * = 0 - use board->num_ports
+ * < 0 - error
+ */
+struct pci_serial_quirk {
+ u32 vendor;
+ u32 device;
+ u32 subvendor;
+ u32 subdevice;
+ int (*init)(struct pci_dev *dev);
+ int (*setup)(struct serial_private *,
+ const struct pciserial_board *,
+ struct uart_port *, int);
+ void (*exit)(struct pci_dev *dev);
+};
+
+#define PCI_NUM_BAR_RESOURCES 6
+
+struct serial_private {
+ struct pci_dev *dev;
+ unsigned int nr;
+ void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES];
+ struct pci_serial_quirk *quirk;
+ int line[0];
+};
+
+static void moan_device(const char *str, struct pci_dev *dev)
+{
+ printk(KERN_WARNING
+ "%s: %s\n"
+ "Please send the output of lspci -vv, this\n"
+ "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
+ "manufacturer and name of serial board or\n"
+ "modem board to rmk+serial@arm.linux.org.uk.\n",
+ pci_name(dev), str, dev->vendor, dev->device,
+ dev->subsystem_vendor, dev->subsystem_device);
+}
+
+static int
+setup_port(struct serial_private *priv, struct uart_port *port,
+ int bar, int offset, int regshift)
+{
+ struct pci_dev *dev = priv->dev;
+ unsigned long base, len;
+
+ if (bar >= PCI_NUM_BAR_RESOURCES)
+ return -EINVAL;
+
+ base = pci_resource_start(dev, bar);
+
+ if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) {
+ len = pci_resource_len(dev, bar);
+
+ if (!priv->remapped_bar[bar])
+ priv->remapped_bar[bar] = ioremap_nocache(base, len);
+ if (!priv->remapped_bar[bar])
+ return -ENOMEM;
+
+ port->iotype = UPIO_MEM;
+ port->iobase = 0;
+ port->mapbase = base + offset;
+ port->membase = priv->remapped_bar[bar] + offset;
+ port->regshift = regshift;
+ } else {
+ port->iotype = UPIO_PORT;
+ port->iobase = base + offset;
+ port->mapbase = 0;
+ port->membase = NULL;
+ port->regshift = 0;
+ }
+ return 0;
+}
+
+/*
+ * ADDI-DATA GmbH communication cards <info@addi-data.com>
+ */
+static int addidata_apci7800_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ unsigned int bar = 0, offset = board->first_offset;
+ bar = FL_GET_BASE(board->flags);
+
+ if (idx < 2) {
+ offset += idx * board->uart_offset;
+ } else if ((idx >= 2) && (idx < 4)) {
+ bar += 1;
+ offset += ((idx - 2) * board->uart_offset);
+ } else if ((idx >= 4) && (idx < 6)) {
+ bar += 2;
+ offset += ((idx - 4) * board->uart_offset);
+ } else if (idx >= 6) {
+ bar += 3;
+ offset += ((idx - 6) * board->uart_offset);
+ }
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
+/*
+ * AFAVLAB uses a different mixture of BARs and offsets
+ * Not that ugly ;) -- HW
+ */
+static int
+afavlab_setup(struct serial_private *priv, const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ unsigned int bar, offset = board->first_offset;
+
+ bar = FL_GET_BASE(board->flags);
+ if (idx < 4)
+ bar += idx;
+ else {
+ bar = 4;
+ offset += (idx - 4) * board->uart_offset;
+ }
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
+/*
+ * HP's Remote Management Console. The Diva chip came in several
+ * different versions. N-class, L2000 and A500 have two Diva chips, each
+ * with 3 UARTs (the third UART on the second chip is unused). Superdome
+ * and Keystone have one Diva chip with 3 UARTs. Some later machines have
+ * one Diva chip, but it has been expanded to 5 UARTs.
+ */
+static int pci_hp_diva_init(struct pci_dev *dev)
+{
+ int rc = 0;
+
+ switch (dev->subsystem_device) {
+ case PCI_DEVICE_ID_HP_DIVA_TOSCA1:
+ case PCI_DEVICE_ID_HP_DIVA_HALFDOME:
+ case PCI_DEVICE_ID_HP_DIVA_KEYSTONE:
+ case PCI_DEVICE_ID_HP_DIVA_EVEREST:
+ rc = 3;
+ break;
+ case PCI_DEVICE_ID_HP_DIVA_TOSCA2:
+ rc = 2;
+ break;
+ case PCI_DEVICE_ID_HP_DIVA_MAESTRO:
+ rc = 4;
+ break;
+ case PCI_DEVICE_ID_HP_DIVA_POWERBAR:
+ case PCI_DEVICE_ID_HP_DIVA_HURRICANE:
+ rc = 1;
+ break;
+ }
+
+ return rc;
+}
+
+/*
+ * HP's Diva chip puts the 4th/5th serial port further out, and
+ * some serial ports are supposed to be hidden on certain models.
+ */
+static int
+pci_hp_diva_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ unsigned int offset = board->first_offset;
+ unsigned int bar = FL_GET_BASE(board->flags);
+
+ switch (priv->dev->subsystem_device) {
+ case PCI_DEVICE_ID_HP_DIVA_MAESTRO:
+ if (idx == 3)
+ idx++;
+ break;
+ case PCI_DEVICE_ID_HP_DIVA_EVEREST:
+ if (idx > 0)
+ idx++;
+ if (idx > 2)
+ idx++;
+ break;
+ }
+ if (idx > 2)
+ offset = 0x18;
+
+ offset += idx * board->uart_offset;
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
+/*
+ * Added for EKF Intel i960 serial boards
+ */
+static int pci_inteli960ni_init(struct pci_dev *dev)
+{
+ unsigned long oldval;
+
+ if (!(dev->subsystem_device & 0x1000))
+ return -ENODEV;
+
+ /* is firmware started? */
+ pci_read_config_dword(dev, 0x44, (void *)&oldval);
+ if (oldval == 0x00001000L) { /* RESET value */
+ printk(KERN_DEBUG "Local i960 firmware missing");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/*
+ * Some PCI serial cards using the PLX 9050 PCI interface chip require
+ * that the card interrupt be explicitly enabled or disabled. This
+ * seems to be mainly needed on card using the PLX which also use I/O
+ * mapped memory.
+ */
+static int pci_plx9050_init(struct pci_dev *dev)
+{
+ u8 irq_config;
+ void __iomem *p;
+
+ if ((pci_resource_flags(dev, 0) & IORESOURCE_MEM) == 0) {
+ moan_device("no memory in bar 0", dev);
+ return 0;
+ }
+
+ irq_config = 0x41;
+ if (dev->vendor == PCI_VENDOR_ID_PANACOM ||
+ dev->subsystem_vendor == PCI_SUBVENDOR_ID_EXSYS)
+ irq_config = 0x43;
+
+ if ((dev->vendor == PCI_VENDOR_ID_PLX) &&
+ (dev->device == PCI_DEVICE_ID_PLX_ROMULUS))
+ /*
+ * As the megawolf cards have the int pins active
+ * high, and have 2 UART chips, both ints must be
+ * enabled on the 9050. Also, the UARTS are set in
+ * 16450 mode by default, so we have to enable the
+ * 16C950 'enhanced' mode so that we can use the
+ * deep FIFOs
+ */
+ irq_config = 0x5b;
+ /*
+ * enable/disable interrupts
+ */
+ p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+ if (p == NULL)
+ return -ENOMEM;
+ writel(irq_config, p + 0x4c);
+
+ /*
+ * Read the register back to ensure that it took effect.
+ */
+ readl(p + 0x4c);
+ iounmap(p);
+
+ return 0;
+}
+
+static void __devexit pci_plx9050_exit(struct pci_dev *dev)
+{
+ u8 __iomem *p;
+
+ if ((pci_resource_flags(dev, 0) & IORESOURCE_MEM) == 0)
+ return;
+
+ /*
+ * disable interrupts
+ */
+ p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+ if (p != NULL) {
+ writel(0, p + 0x4c);
+
+ /*
+ * Read the register back to ensure that it took effect.
+ */
+ readl(p + 0x4c);
+ iounmap(p);
+ }
+}
+
+#define NI8420_INT_ENABLE_REG 0x38
+#define NI8420_INT_ENABLE_BIT 0x2000
+
+static void __devexit pci_ni8420_exit(struct pci_dev *dev)
+{
+ void __iomem *p;
+ unsigned long base, len;
+ unsigned int bar = 0;
+
+ if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) {
+ moan_device("no memory in bar", dev);
+ return;
+ }
+
+ base = pci_resource_start(dev, bar);
+ len = pci_resource_len(dev, bar);
+ p = ioremap_nocache(base, len);
+ if (p == NULL)
+ return;
+
+ /* Disable the CPU Interrupt */
+ writel(readl(p + NI8420_INT_ENABLE_REG) & ~(NI8420_INT_ENABLE_BIT),
+ p + NI8420_INT_ENABLE_REG);
+ iounmap(p);
+}
+
+
+/* MITE registers */
+#define MITE_IOWBSR1 0xc4
+#define MITE_IOWCR1 0xf4
+#define MITE_LCIMR1 0x08
+#define MITE_LCIMR2 0x10
+
+#define MITE_LCIMR2_CLR_CPU_IE (1 << 30)
+
+static void __devexit pci_ni8430_exit(struct pci_dev *dev)
+{
+ void __iomem *p;
+ unsigned long base, len;
+ unsigned int bar = 0;
+
+ if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) {
+ moan_device("no memory in bar", dev);
+ return;
+ }
+
+ base = pci_resource_start(dev, bar);
+ len = pci_resource_len(dev, bar);
+ p = ioremap_nocache(base, len);
+ if (p == NULL)
+ return;
+
+ /* Disable the CPU Interrupt */
+ writel(MITE_LCIMR2_CLR_CPU_IE, p + MITE_LCIMR2);
+ iounmap(p);
+}
+
+/* SBS Technologies Inc. PMC-OCTPRO and P-OCTAL cards */
+static int
+sbs_setup(struct serial_private *priv, const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ unsigned int bar, offset = board->first_offset;
+
+ bar = 0;
+
+ if (idx < 4) {
+ /* first four channels map to 0, 0x100, 0x200, 0x300 */
+ offset += idx * board->uart_offset;
+ } else if (idx < 8) {
+ /* last four channels map to 0x1000, 0x1100, 0x1200, 0x1300 */
+ offset += idx * board->uart_offset + 0xC00;
+ } else /* we have only 8 ports on PMC-OCTALPRO */
+ return 1;
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
+/*
+* This does initialization for PMC OCTALPRO cards:
+* maps the device memory, resets the UARTs (needed, bc
+* if the module is removed and inserted again, the card
+* is in the sleep mode) and enables global interrupt.
+*/
+
+/* global control register offset for SBS PMC-OctalPro */
+#define OCT_REG_CR_OFF 0x500
+
+static int sbs_init(struct pci_dev *dev)
+{
+ u8 __iomem *p;
+
+ p = pci_ioremap_bar(dev, 0);
+
+ if (p == NULL)
+ return -ENOMEM;
+ /* Set bit-4 Control Register (UART RESET) in to reset the uarts */
+ writeb(0x10, p + OCT_REG_CR_OFF);
+ udelay(50);
+ writeb(0x0, p + OCT_REG_CR_OFF);
+
+ /* Set bit-2 (INTENABLE) of Control Register */
+ writeb(0x4, p + OCT_REG_CR_OFF);
+ iounmap(p);
+
+ return 0;
+}
+
+/*
+ * Disables the global interrupt of PMC-OctalPro
+ */
+
+static void __devexit sbs_exit(struct pci_dev *dev)
+{
+ u8 __iomem *p;
+
+ p = pci_ioremap_bar(dev, 0);
+ /* FIXME: What if resource_len < OCT_REG_CR_OFF */
+ if (p != NULL)
+ writeb(0, p + OCT_REG_CR_OFF);
+ iounmap(p);
+}
+
+/*
+ * SIIG serial cards have an PCI interface chip which also controls
+ * the UART clocking frequency. Each UART can be clocked independently
+ * (except cards equipped with 4 UARTs) and initial clocking settings
+ * are stored in the EEPROM chip. It can cause problems because this
+ * version of serial driver doesn't support differently clocked UART's
+ * on single PCI card. To prevent this, initialization functions set
+ * high frequency clocking for all UART's on given card. It is safe (I
+ * hope) because it doesn't touch EEPROM settings to prevent conflicts
+ * with other OSes (like M$ DOS).
+ *
+ * SIIG support added by Andrey Panin <pazke@donpac.ru>, 10/1999
+ *
+ * There is two family of SIIG serial cards with different PCI
+ * interface chip and different configuration methods:
+ * - 10x cards have control registers in IO and/or memory space;
+ * - 20x cards have control registers in standard PCI configuration space.
+ *
+ * Note: all 10x cards have PCI device ids 0x10..
+ * all 20x cards have PCI device ids 0x20..
+ *
+ * There are also Quartet Serial cards which use Oxford Semiconductor
+ * 16954 quad UART PCI chip clocked by 18.432 MHz quartz.
+ *
+ * Note: some SIIG cards are probed by the parport_serial object.
+ */
+
+#define PCI_DEVICE_ID_SIIG_1S_10x (PCI_DEVICE_ID_SIIG_1S_10x_550 & 0xfffc)
+#define PCI_DEVICE_ID_SIIG_2S_10x (PCI_DEVICE_ID_SIIG_2S_10x_550 & 0xfff8)
+
+static int pci_siig10x_init(struct pci_dev *dev)
+{
+ u16 data;
+ void __iomem *p;
+
+ switch (dev->device & 0xfff8) {
+ case PCI_DEVICE_ID_SIIG_1S_10x: /* 1S */
+ data = 0xffdf;
+ break;
+ case PCI_DEVICE_ID_SIIG_2S_10x: /* 2S, 2S1P */
+ data = 0xf7ff;
+ break;
+ default: /* 1S1P, 4S */
+ data = 0xfffb;
+ break;
+ }
+
+ p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+ if (p == NULL)
+ return -ENOMEM;
+
+ writew(readw(p + 0x28) & data, p + 0x28);
+ readw(p + 0x28);
+ iounmap(p);
+ return 0;
+}
+
+#define PCI_DEVICE_ID_SIIG_2S_20x (PCI_DEVICE_ID_SIIG_2S_20x_550 & 0xfffc)
+#define PCI_DEVICE_ID_SIIG_2S1P_20x (PCI_DEVICE_ID_SIIG_2S1P_20x_550 & 0xfffc)
+
+static int pci_siig20x_init(struct pci_dev *dev)
+{
+ u8 data;
+
+ /* Change clock frequency for the first UART. */
+ pci_read_config_byte(dev, 0x6f, &data);
+ pci_write_config_byte(dev, 0x6f, data & 0xef);
+
+ /* If this card has 2 UART, we have to do the same with second UART. */
+ if (((dev->device & 0xfffc) == PCI_DEVICE_ID_SIIG_2S_20x) ||
+ ((dev->device & 0xfffc) == PCI_DEVICE_ID_SIIG_2S1P_20x)) {
+ pci_read_config_byte(dev, 0x73, &data);
+ pci_write_config_byte(dev, 0x73, data & 0xef);
+ }
+ return 0;
+}
+
+static int pci_siig_init(struct pci_dev *dev)
+{
+ unsigned int type = dev->device & 0xff00;
+
+ if (type == 0x1000)
+ return pci_siig10x_init(dev);
+ else if (type == 0x2000)
+ return pci_siig20x_init(dev);
+
+ moan_device("Unknown SIIG card", dev);
+ return -ENODEV;
+}
+
+static int pci_siig_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ unsigned int bar = FL_GET_BASE(board->flags) + idx, offset = 0;
+
+ if (idx > 3) {
+ bar = 4;
+ offset = (idx - 4) * 8;
+ }
+
+ return setup_port(priv, port, bar, offset, 0);
+}
+
+/*
+ * Timedia has an explosion of boards, and to avoid the PCI table from
+ * growing *huge*, we use this function to collapse some 70 entries
+ * in the PCI table into one, for sanity's and compactness's sake.
+ */
+static const unsigned short timedia_single_port[] = {
+ 0x4025, 0x4027, 0x4028, 0x5025, 0x5027, 0
+};
+
+static const unsigned short timedia_dual_port[] = {
+ 0x0002, 0x4036, 0x4037, 0x4038, 0x4078, 0x4079, 0x4085,
+ 0x4088, 0x4089, 0x5037, 0x5078, 0x5079, 0x5085, 0x6079,
+ 0x7079, 0x8079, 0x8137, 0x8138, 0x8237, 0x8238, 0x9079,
+ 0x9137, 0x9138, 0x9237, 0x9238, 0xA079, 0xB079, 0xC079,
+ 0xD079, 0
+};
+
+static const unsigned short timedia_quad_port[] = {
+ 0x4055, 0x4056, 0x4095, 0x4096, 0x5056, 0x8156, 0x8157,
+ 0x8256, 0x8257, 0x9056, 0x9156, 0x9157, 0x9158, 0x9159,
+ 0x9256, 0x9257, 0xA056, 0xA157, 0xA158, 0xA159, 0xB056,
+ 0xB157, 0
+};
+
+static const unsigned short timedia_eight_port[] = {
+ 0x4065, 0x4066, 0x5065, 0x5066, 0x8166, 0x9066, 0x9166,
+ 0x9167, 0x9168, 0xA066, 0xA167, 0xA168, 0
+};
+
+static const struct timedia_struct {
+ int num;
+ const unsigned short *ids;
+} timedia_data[] = {
+ { 1, timedia_single_port },
+ { 2, timedia_dual_port },
+ { 4, timedia_quad_port },
+ { 8, timedia_eight_port }
+};
+
+static int pci_timedia_init(struct pci_dev *dev)
+{
+ const unsigned short *ids;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(timedia_data); i++) {
+ ids = timedia_data[i].ids;
+ for (j = 0; ids[j]; j++)
+ if (dev->subsystem_device == ids[j])
+ return timedia_data[i].num;
+ }
+ return 0;
+}
+
+/*
+ * Timedia/SUNIX uses a mixture of BARs and offsets
+ * Ugh, this is ugly as all hell --- TYT
+ */
+static int
+pci_timedia_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ unsigned int bar = 0, offset = board->first_offset;
+
+ switch (idx) {
+ case 0:
+ bar = 0;
+ break;
+ case 1:
+ offset = board->uart_offset;
+ bar = 0;
+ break;
+ case 2:
+ bar = 1;
+ break;
+ case 3:
+ offset = board->uart_offset;
+ /* FALLTHROUGH */
+ case 4: /* BAR 2 */
+ case 5: /* BAR 3 */
+ case 6: /* BAR 4 */
+ case 7: /* BAR 5 */
+ bar = idx - 2;
+ }
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
+/*
+ * Some Titan cards are also a little weird
+ */
+static int
+titan_400l_800l_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ unsigned int bar, offset = board->first_offset;
+
+ switch (idx) {
+ case 0:
+ bar = 1;
+ break;
+ case 1:
+ bar = 2;
+ break;
+ default:
+ bar = 4;
+ offset = (idx - 2) * board->uart_offset;
+ }
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
+static int pci_xircom_init(struct pci_dev *dev)
+{
+ msleep(100);
+ return 0;
+}
+
+static int pci_ni8420_init(struct pci_dev *dev)
+{
+ void __iomem *p;
+ unsigned long base, len;
+ unsigned int bar = 0;
+
+ if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) {
+ moan_device("no memory in bar", dev);
+ return 0;
+ }
+
+ base = pci_resource_start(dev, bar);
+ len = pci_resource_len(dev, bar);
+ p = ioremap_nocache(base, len);
+ if (p == NULL)
+ return -ENOMEM;
+
+ /* Enable CPU Interrupt */
+ writel(readl(p + NI8420_INT_ENABLE_REG) | NI8420_INT_ENABLE_BIT,
+ p + NI8420_INT_ENABLE_REG);
+
+ iounmap(p);
+ return 0;
+}
+
+#define MITE_IOWBSR1_WSIZE 0xa
+#define MITE_IOWBSR1_WIN_OFFSET 0x800
+#define MITE_IOWBSR1_WENAB (1 << 7)
+#define MITE_LCIMR1_IO_IE_0 (1 << 24)
+#define MITE_LCIMR2_SET_CPU_IE (1 << 31)
+#define MITE_IOWCR1_RAMSEL_MASK 0xfffffffe
+
+static int pci_ni8430_init(struct pci_dev *dev)
+{
+ void __iomem *p;
+ unsigned long base, len;
+ u32 device_window;
+ unsigned int bar = 0;
+
+ if ((pci_resource_flags(dev, bar) & IORESOURCE_MEM) == 0) {
+ moan_device("no memory in bar", dev);
+ return 0;
+ }
+
+ base = pci_resource_start(dev, bar);
+ len = pci_resource_len(dev, bar);
+ p = ioremap_nocache(base, len);
+ if (p == NULL)
+ return -ENOMEM;
+
+ /* Set device window address and size in BAR0 */
+ device_window = ((base + MITE_IOWBSR1_WIN_OFFSET) & 0xffffff00)
+ | MITE_IOWBSR1_WENAB | MITE_IOWBSR1_WSIZE;
+ writel(device_window, p + MITE_IOWBSR1);
+
+ /* Set window access to go to RAMSEL IO address space */
+ writel((readl(p + MITE_IOWCR1) & MITE_IOWCR1_RAMSEL_MASK),
+ p + MITE_IOWCR1);
+
+ /* Enable IO Bus Interrupt 0 */
+ writel(MITE_LCIMR1_IO_IE_0, p + MITE_LCIMR1);
+
+ /* Enable CPU Interrupt */
+ writel(MITE_LCIMR2_SET_CPU_IE, p + MITE_LCIMR2);
+
+ iounmap(p);
+ return 0;
+}
+
+/* UART Port Control Register */
+#define NI8430_PORTCON 0x0f
+#define NI8430_PORTCON_TXVR_ENABLE (1 << 3)
+
+static int
+pci_ni8430_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ void __iomem *p;
+ unsigned long base, len;
+ unsigned int bar, offset = board->first_offset;
+
+ if (idx >= board->num_ports)
+ return 1;
+
+ bar = FL_GET_BASE(board->flags);
+ offset += idx * board->uart_offset;
+
+ base = pci_resource_start(priv->dev, bar);
+ len = pci_resource_len(priv->dev, bar);
+ p = ioremap_nocache(base, len);
+
+ /* enable the transciever */
+ writeb(readb(p + offset + NI8430_PORTCON) | NI8430_PORTCON_TXVR_ENABLE,
+ p + offset + NI8430_PORTCON);
+
+ iounmap(p);
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
+
+static int pci_netmos_init(struct pci_dev *dev)
+{
+ /* subdevice 0x00PS means <P> parallel, <S> serial */
+ unsigned int num_serial = dev->subsystem_device & 0xf;
+
+ if ((dev->device == PCI_DEVICE_ID_NETMOS_9901) ||
+ (dev->device == PCI_DEVICE_ID_NETMOS_9865))
+ return 0;
+ if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
+ dev->subsystem_device == 0x0299)
+ return 0;
+
+ if (num_serial == 0)
+ return -ENODEV;
+ return num_serial;
+}
+
+/*
+ * These chips are available with optionally one parallel port and up to
+ * two serial ports. Unfortunately they all have the same product id.
+ *
+ * Basic configuration is done over a region of 32 I/O ports. The base
+ * ioport is called INTA or INTC, depending on docs/other drivers.
+ *
+ * The region of the 32 I/O ports is configured in POSIO0R...
+ */
+
+/* registers */
+#define ITE_887x_MISCR 0x9c
+#define ITE_887x_INTCBAR 0x78
+#define ITE_887x_UARTBAR 0x7c
+#define ITE_887x_PS0BAR 0x10
+#define ITE_887x_POSIO0 0x60
+
+/* I/O space size */
+#define ITE_887x_IOSIZE 32
+/* I/O space size (bits 26-24; 8 bytes = 011b) */
+#define ITE_887x_POSIO_IOSIZE_8 (3 << 24)
+/* I/O space size (bits 26-24; 32 bytes = 101b) */
+#define ITE_887x_POSIO_IOSIZE_32 (5 << 24)
+/* Decoding speed (1 = slow, 2 = medium, 3 = fast) */
+#define ITE_887x_POSIO_SPEED (3 << 29)
+/* enable IO_Space bit */
+#define ITE_887x_POSIO_ENABLE (1 << 31)
+
+static int pci_ite887x_init(struct pci_dev *dev)
+{
+ /* inta_addr are the configuration addresses of the ITE */
+ static const short inta_addr[] = { 0x2a0, 0x2c0, 0x220, 0x240, 0x1e0,
+ 0x200, 0x280, 0 };
+ int ret, i, type;
+ struct resource *iobase = NULL;
+ u32 miscr, uartbar, ioport;
+
+ /* search for the base-ioport */
+ i = 0;
+ while (inta_addr[i] && iobase == NULL) {
+ iobase = request_region(inta_addr[i], ITE_887x_IOSIZE,
+ "ite887x");
+ if (iobase != NULL) {
+ /* write POSIO0R - speed | size | ioport */
+ pci_write_config_dword(dev, ITE_887x_POSIO0,
+ ITE_887x_POSIO_ENABLE | ITE_887x_POSIO_SPEED |
+ ITE_887x_POSIO_IOSIZE_32 | inta_addr[i]);
+ /* write INTCBAR - ioport */
+ pci_write_config_dword(dev, ITE_887x_INTCBAR,
+ inta_addr[i]);
+ ret = inb(inta_addr[i]);
+ if (ret != 0xff) {
+ /* ioport connected */
+ break;
+ }
+ release_region(iobase->start, ITE_887x_IOSIZE);
+ iobase = NULL;
+ }
+ i++;
+ }
+
+ if (!inta_addr[i]) {
+ printk(KERN_ERR "ite887x: could not find iobase\n");
+ return -ENODEV;
+ }
+
+ /* start of undocumented type checking (see parport_pc.c) */
+ type = inb(iobase->start + 0x18) & 0x0f;
+
+ switch (type) {
+ case 0x2: /* ITE8871 (1P) */
+ case 0xa: /* ITE8875 (1P) */
+ ret = 0;
+ break;
+ case 0xe: /* ITE8872 (2S1P) */
+ ret = 2;
+ break;
+ case 0x6: /* ITE8873 (1S) */
+ ret = 1;
+ break;
+ case 0x8: /* ITE8874 (2S) */
+ ret = 2;
+ break;
+ default:
+ moan_device("Unknown ITE887x", dev);
+ ret = -ENODEV;
+ }
+
+ /* configure all serial ports */
+ for (i = 0; i < ret; i++) {
+ /* read the I/O port from the device */
+ pci_read_config_dword(dev, ITE_887x_PS0BAR + (0x4 * (i + 1)),
+ &ioport);
+ ioport &= 0x0000FF00; /* the actual base address */
+ pci_write_config_dword(dev, ITE_887x_POSIO0 + (0x4 * (i + 1)),
+ ITE_887x_POSIO_ENABLE | ITE_887x_POSIO_SPEED |
+ ITE_887x_POSIO_IOSIZE_8 | ioport);
+
+ /* write the ioport to the UARTBAR */
+ pci_read_config_dword(dev, ITE_887x_UARTBAR, &uartbar);
+ uartbar &= ~(0xffff << (16 * i)); /* clear half the reg */
+ uartbar |= (ioport << (16 * i)); /* set the ioport */
+ pci_write_config_dword(dev, ITE_887x_UARTBAR, uartbar);
+
+ /* get current config */
+ pci_read_config_dword(dev, ITE_887x_MISCR, &miscr);
+ /* disable interrupts (UARTx_Routing[3:0]) */
+ miscr &= ~(0xf << (12 - 4 * i));
+ /* activate the UART (UARTx_En) */
+ miscr |= 1 << (23 - i);
+ /* write new config with activated UART */
+ pci_write_config_dword(dev, ITE_887x_MISCR, miscr);
+ }
+
+ if (ret <= 0) {
+ /* the device has no UARTs if we get here */
+ release_region(iobase->start, ITE_887x_IOSIZE);
+ }
+
+ return ret;
+}
+
+static void __devexit pci_ite887x_exit(struct pci_dev *dev)
+{
+ u32 ioport;
+ /* the ioport is bit 0-15 in POSIO0R */
+ pci_read_config_dword(dev, ITE_887x_POSIO0, &ioport);
+ ioport &= 0xffff;
+ release_region(ioport, ITE_887x_IOSIZE);
+}
+
+/*
+ * Oxford Semiconductor Inc.
+ * Check that device is part of the Tornado range of devices, then determine
+ * the number of ports available on the device.
+ */
+static int pci_oxsemi_tornado_init(struct pci_dev *dev)
+{
+ u8 __iomem *p;
+ unsigned long deviceID;
+ unsigned int number_uarts = 0;
+
+ /* OxSemi Tornado devices are all 0xCxxx */
+ if (dev->vendor == PCI_VENDOR_ID_OXSEMI &&
+ (dev->device & 0xF000) != 0xC000)
+ return 0;
+
+ p = pci_iomap(dev, 0, 5);
+ if (p == NULL)
+ return -ENOMEM;
+
+ deviceID = ioread32(p);
+ /* Tornado device */
+ if (deviceID == 0x07000200) {
+ number_uarts = ioread8(p + 4);
+ printk(KERN_DEBUG
+ "%d ports detected on Oxford PCI Express device\n",
+ number_uarts);
+ }
+ pci_iounmap(dev, p);
+ return number_uarts;
+}
+
+static int
+pci_default_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ unsigned int bar, offset = board->first_offset, maxnr;
+
+ bar = FL_GET_BASE(board->flags);
+ if (board->flags & FL_BASE_BARS)
+ bar += idx;
+ else
+ offset += idx * board->uart_offset;
+
+ maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
+ (board->reg_shift + 3);
+
+ if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
+ return 1;
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
+static int
+ce4100_serial_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ int ret;
+
+ ret = setup_port(priv, port, 0, 0, board->reg_shift);
+ port->iotype = UPIO_MEM32;
+ port->type = PORT_XSCALE;
+ port->flags = (port->flags | UPF_FIXED_PORT | UPF_FIXED_TYPE);
+ port->regshift = 2;
+
+ return ret;
+}
+
+static int
+pci_omegapci_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ return setup_port(priv, port, 2, idx * 8, 0);
+}
+
+static int skip_tx_en_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ port->flags |= UPF_NO_TXEN_TEST;
+ printk(KERN_DEBUG "serial8250: skipping TxEn test for device "
+ "[%04x:%04x] subsystem [%04x:%04x]\n",
+ priv->dev->vendor,
+ priv->dev->device,
+ priv->dev->subsystem_vendor,
+ priv->dev->subsystem_device);
+
+ return pci_default_setup(priv, board, port, idx);
+}
+
+static int pci_eg20t_init(struct pci_dev *dev)
+{
+#if defined(CONFIG_SERIAL_PCH_UART) || defined(CONFIG_SERIAL_PCH_UART_MODULE)
+ return -ENODEV;
+#else
+ return 0;
+#endif
+}
+
+/* This should be in linux/pci_ids.h */
+#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
+#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
+#define PCI_DEVICE_ID_OCTPRO 0x0001
+#define PCI_SUBDEVICE_ID_OCTPRO232 0x0108
+#define PCI_SUBDEVICE_ID_OCTPRO422 0x0208
+#define PCI_SUBDEVICE_ID_POCTAL232 0x0308
+#define PCI_SUBDEVICE_ID_POCTAL422 0x0408
+#define PCI_VENDOR_ID_ADVANTECH 0x13fe
+#define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
+#define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620
+#define PCI_DEVICE_ID_TITAN_200I 0x8028
+#define PCI_DEVICE_ID_TITAN_400I 0x8048
+#define PCI_DEVICE_ID_TITAN_800I 0x8088
+#define PCI_DEVICE_ID_TITAN_800EH 0xA007
+#define PCI_DEVICE_ID_TITAN_800EHB 0xA008
+#define PCI_DEVICE_ID_TITAN_400EH 0xA009
+#define PCI_DEVICE_ID_TITAN_100E 0xA010
+#define PCI_DEVICE_ID_TITAN_200E 0xA012
+#define PCI_DEVICE_ID_TITAN_400E 0xA013
+#define PCI_DEVICE_ID_TITAN_800E 0xA014
+#define PCI_DEVICE_ID_TITAN_200EI 0xA016
+#define PCI_DEVICE_ID_TITAN_200EISI 0xA017
+#define PCI_DEVICE_ID_OXSEMI_16PCI958 0x9538
+#define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6
+#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
+
+/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
+
+/*
+ * Master list of serial port init/setup/exit quirks.
+ * This does not describe the general nature of the port.
+ * (ie, baud base, number and location of ports, etc)
+ *
+ * This list is ordered alphabetically by vendor then device.
+ * Specific entries must come before more generic entries.
+ */
+static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ /*
+ * ADDI-DATA GmbH communication cards <info@addi-data.com>
+ */
+ {
+ .vendor = PCI_VENDOR_ID_ADDIDATA_OLD,
+ .device = PCI_DEVICE_ID_ADDIDATA_APCI7800,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = addidata_apci7800_setup,
+ },
+ /*
+ * AFAVLAB cards - these may be called via parport_serial
+ * It is not clear whether this applies to all products.
+ */
+ {
+ .vendor = PCI_VENDOR_ID_AFAVLAB,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = afavlab_setup,
+ },
+ /*
+ * HP Diva
+ */
+ {
+ .vendor = PCI_VENDOR_ID_HP,
+ .device = PCI_DEVICE_ID_HP_DIVA,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_hp_diva_init,
+ .setup = pci_hp_diva_setup,
+ },
+ /*
+ * Intel
+ */
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_80960_RP,
+ .subvendor = 0xe4bf,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_inteli960ni_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_8257X_SOL,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = skip_tx_en_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_82573L_SOL,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = skip_tx_en_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_82573E_SOL,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = skip_tx_en_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CE4100_UART,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = ce4100_serial_setup,
+ },
+ /*
+ * ITE
+ */
+ {
+ .vendor = PCI_VENDOR_ID_ITE,
+ .device = PCI_DEVICE_ID_ITE_8872,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ite887x_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_ite887x_exit),
+ },
+ /*
+ * National Instruments
+ */
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PCI23216,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8420_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_ni8420_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PCI2328,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8420_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_ni8420_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PCI2324,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8420_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_ni8420_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PCI2322,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8420_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_ni8420_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PCI2324I,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8420_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_ni8420_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PCI2322I,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8420_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_ni8420_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PXI8420_23216,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8420_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_ni8420_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PXI8420_2328,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8420_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_ni8420_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PXI8420_2324,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8420_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_ni8420_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PXI8420_2322,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8420_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_ni8420_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PXI8422_2324,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8420_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_ni8420_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PXI8422_2322,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8420_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_ni8420_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8430_init,
+ .setup = pci_ni8430_setup,
+ .exit = __devexit_p(pci_ni8430_exit),
+ },
+ /*
+ * Panacom
+ */
+ {
+ .vendor = PCI_VENDOR_ID_PANACOM,
+ .device = PCI_DEVICE_ID_PANACOM_QUADMODEM,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_plx9050_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_plx9050_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_PANACOM,
+ .device = PCI_DEVICE_ID_PANACOM_DUALMODEM,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_plx9050_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_plx9050_exit),
+ },
+ /*
+ * PLX
+ */
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_PLX_9030,
+ .subvendor = PCI_SUBVENDOR_ID_PERLE,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_PLX_9050,
+ .subvendor = PCI_SUBVENDOR_ID_EXSYS,
+ .subdevice = PCI_SUBDEVICE_ID_EXSYS_4055,
+ .init = pci_plx9050_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_plx9050_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_PLX_9050,
+ .subvendor = PCI_SUBVENDOR_ID_KEYSPAN,
+ .subdevice = PCI_SUBDEVICE_ID_KEYSPAN_SX2,
+ .init = pci_plx9050_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_plx9050_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_PLX_9050,
+ .subvendor = PCI_VENDOR_ID_PLX,
+ .subdevice = PCI_SUBDEVICE_ID_UNKNOWN_0x1584,
+ .init = pci_plx9050_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_plx9050_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_PLX_ROMULUS,
+ .subvendor = PCI_VENDOR_ID_PLX,
+ .subdevice = PCI_DEVICE_ID_PLX_ROMULUS,
+ .init = pci_plx9050_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_plx9050_exit),
+ },
+ /*
+ * SBS Technologies, Inc., PMC-OCTALPRO 232
+ */
+ {
+ .vendor = PCI_VENDOR_ID_SBSMODULARIO,
+ .device = PCI_DEVICE_ID_OCTPRO,
+ .subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO,
+ .subdevice = PCI_SUBDEVICE_ID_OCTPRO232,
+ .init = sbs_init,
+ .setup = sbs_setup,
+ .exit = __devexit_p(sbs_exit),
+ },
+ /*
+ * SBS Technologies, Inc., PMC-OCTALPRO 422
+ */
+ {
+ .vendor = PCI_VENDOR_ID_SBSMODULARIO,
+ .device = PCI_DEVICE_ID_OCTPRO,
+ .subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO,
+ .subdevice = PCI_SUBDEVICE_ID_OCTPRO422,
+ .init = sbs_init,
+ .setup = sbs_setup,
+ .exit = __devexit_p(sbs_exit),
+ },
+ /*
+ * SBS Technologies, Inc., P-Octal 232
+ */
+ {
+ .vendor = PCI_VENDOR_ID_SBSMODULARIO,
+ .device = PCI_DEVICE_ID_OCTPRO,
+ .subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO,
+ .subdevice = PCI_SUBDEVICE_ID_POCTAL232,
+ .init = sbs_init,
+ .setup = sbs_setup,
+ .exit = __devexit_p(sbs_exit),
+ },
+ /*
+ * SBS Technologies, Inc., P-Octal 422
+ */
+ {
+ .vendor = PCI_VENDOR_ID_SBSMODULARIO,
+ .device = PCI_DEVICE_ID_OCTPRO,
+ .subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO,
+ .subdevice = PCI_SUBDEVICE_ID_POCTAL422,
+ .init = sbs_init,
+ .setup = sbs_setup,
+ .exit = __devexit_p(sbs_exit),
+ },
+ /*
+ * SIIG cards - these may be called via parport_serial
+ */
+ {
+ .vendor = PCI_VENDOR_ID_SIIG,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_siig_init,
+ .setup = pci_siig_setup,
+ },
+ /*
+ * Titan cards
+ */
+ {
+ .vendor = PCI_VENDOR_ID_TITAN,
+ .device = PCI_DEVICE_ID_TITAN_400L,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = titan_400l_800l_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_TITAN,
+ .device = PCI_DEVICE_ID_TITAN_800L,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = titan_400l_800l_setup,
+ },
+ /*
+ * Timedia cards
+ */
+ {
+ .vendor = PCI_VENDOR_ID_TIMEDIA,
+ .device = PCI_DEVICE_ID_TIMEDIA_1889,
+ .subvendor = PCI_VENDOR_ID_TIMEDIA,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_timedia_init,
+ .setup = pci_timedia_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_TIMEDIA,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_timedia_setup,
+ },
+ /*
+ * Xircom cards
+ */
+ {
+ .vendor = PCI_VENDOR_ID_XIRCOM,
+ .device = PCI_DEVICE_ID_XIRCOM_X3201_MDM,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_xircom_init,
+ .setup = pci_default_setup,
+ },
+ /*
+ * Netmos cards - these may be called via parport_serial
+ */
+ {
+ .vendor = PCI_VENDOR_ID_NETMOS,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_netmos_init,
+ .setup = pci_default_setup,
+ },
+ /*
+ * For Oxford Semiconductor Tornado based devices
+ */
+ {
+ .vendor = PCI_VENDOR_ID_OXSEMI,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_MAINPINE,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_DIGI,
+ .device = PCIE_DEVICE_ID_NEO_2_OX_IBM,
+ .subvendor = PCI_SUBVENDOR_ID_IBM,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8811,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8812,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8813,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8814,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x8027,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x8028,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x8029,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x800C,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x800D,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x800D,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ /*
+ * Cronyx Omega PCI (PLX-chip based)
+ */
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_PLX_CRONYX_OMEGA,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_omegapci_setup,
+ },
+ /*
+ * Default "match everything" terminator entry
+ */
+ {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_default_setup,
+ }
+};
+
+static inline int quirk_id_matches(u32 quirk_id, u32 dev_id)
+{
+ return quirk_id == PCI_ANY_ID || quirk_id == dev_id;
+}
+
+static struct pci_serial_quirk *find_quirk(struct pci_dev *dev)
+{
+ struct pci_serial_quirk *quirk;
+
+ for (quirk = pci_serial_quirks; ; quirk++)
+ if (quirk_id_matches(quirk->vendor, dev->vendor) &&
+ quirk_id_matches(quirk->device, dev->device) &&
+ quirk_id_matches(quirk->subvendor, dev->subsystem_vendor) &&
+ quirk_id_matches(quirk->subdevice, dev->subsystem_device))
+ break;
+ return quirk;
+}
+
+static inline int get_pci_irq(struct pci_dev *dev,
+ const struct pciserial_board *board)
+{
+ if (board->flags & FL_NOIRQ)
+ return 0;
+ else
+ return dev->irq;
+}
+
+/*
+ * This is the configuration table for all of the PCI serial boards
+ * which we support. It is directly indexed by the pci_board_num_t enum
+ * value, which is encoded in the pci_device_id PCI probe table's
+ * driver_data member.
+ *
+ * The makeup of these names are:
+ * pbn_bn{_bt}_n_baud{_offsetinhex}
+ *
+ * bn = PCI BAR number
+ * bt = Index using PCI BARs
+ * n = number of serial ports
+ * baud = baud rate
+ * offsetinhex = offset for each sequential port (in hex)
+ *
+ * This table is sorted by (in order): bn, bt, baud, offsetindex, n.
+ *
+ * Please note: in theory if n = 1, _bt infix should make no difference.
+ * ie, pbn_b0_1_115200 is the same as pbn_b0_bt_1_115200
+ */
+enum pci_board_num_t {
+ pbn_default = 0,
+
+ pbn_b0_1_115200,
+ pbn_b0_2_115200,
+ pbn_b0_4_115200,
+ pbn_b0_5_115200,
+ pbn_b0_8_115200,
+
+ pbn_b0_1_921600,
+ pbn_b0_2_921600,
+ pbn_b0_4_921600,
+
+ pbn_b0_2_1130000,
+
+ pbn_b0_4_1152000,
+
+ pbn_b0_2_1843200,
+ pbn_b0_4_1843200,
+
+ pbn_b0_2_1843200_200,
+ pbn_b0_4_1843200_200,
+ pbn_b0_8_1843200_200,
+
+ pbn_b0_1_4000000,
+
+ pbn_b0_bt_1_115200,
+ pbn_b0_bt_2_115200,
+ pbn_b0_bt_4_115200,
+ pbn_b0_bt_8_115200,
+
+ pbn_b0_bt_1_460800,
+ pbn_b0_bt_2_460800,
+ pbn_b0_bt_4_460800,
+
+ pbn_b0_bt_1_921600,
+ pbn_b0_bt_2_921600,
+ pbn_b0_bt_4_921600,
+ pbn_b0_bt_8_921600,
+
+ pbn_b1_1_115200,
+ pbn_b1_2_115200,
+ pbn_b1_4_115200,
+ pbn_b1_8_115200,
+ pbn_b1_16_115200,
+
+ pbn_b1_1_921600,
+ pbn_b1_2_921600,
+ pbn_b1_4_921600,
+ pbn_b1_8_921600,
+
+ pbn_b1_2_1250000,
+
+ pbn_b1_bt_1_115200,
+ pbn_b1_bt_2_115200,
+ pbn_b1_bt_4_115200,
+
+ pbn_b1_bt_2_921600,
+
+ pbn_b1_1_1382400,
+ pbn_b1_2_1382400,
+ pbn_b1_4_1382400,
+ pbn_b1_8_1382400,
+
+ pbn_b2_1_115200,
+ pbn_b2_2_115200,
+ pbn_b2_4_115200,
+ pbn_b2_8_115200,
+
+ pbn_b2_1_460800,
+ pbn_b2_4_460800,
+ pbn_b2_8_460800,
+ pbn_b2_16_460800,
+
+ pbn_b2_1_921600,
+ pbn_b2_4_921600,
+ pbn_b2_8_921600,
+
+ pbn_b2_8_1152000,
+
+ pbn_b2_bt_1_115200,
+ pbn_b2_bt_2_115200,
+ pbn_b2_bt_4_115200,
+
+ pbn_b2_bt_2_921600,
+ pbn_b2_bt_4_921600,
+
+ pbn_b3_2_115200,
+ pbn_b3_4_115200,
+ pbn_b3_8_115200,
+
+ pbn_b4_bt_2_921600,
+ pbn_b4_bt_4_921600,
+ pbn_b4_bt_8_921600,
+
+ /*
+ * Board-specific versions.
+ */
+ pbn_panacom,
+ pbn_panacom2,
+ pbn_panacom4,
+ pbn_exsys_4055,
+ pbn_plx_romulus,
+ pbn_oxsemi,
+ pbn_oxsemi_1_4000000,
+ pbn_oxsemi_2_4000000,
+ pbn_oxsemi_4_4000000,
+ pbn_oxsemi_8_4000000,
+ pbn_intel_i960,
+ pbn_sgi_ioc3,
+ pbn_computone_4,
+ pbn_computone_6,
+ pbn_computone_8,
+ pbn_sbsxrsio,
+ pbn_exar_XR17C152,
+ pbn_exar_XR17C154,
+ pbn_exar_XR17C158,
+ pbn_exar_ibm_saturn,
+ pbn_pasemi_1682M,
+ pbn_ni8430_2,
+ pbn_ni8430_4,
+ pbn_ni8430_8,
+ pbn_ni8430_16,
+ pbn_ADDIDATA_PCIe_1_3906250,
+ pbn_ADDIDATA_PCIe_2_3906250,
+ pbn_ADDIDATA_PCIe_4_3906250,
+ pbn_ADDIDATA_PCIe_8_3906250,
+ pbn_ce4100_1_115200,
+ pbn_omegapci,
+};
+
+/*
+ * uart_offset - the space between channels
+ * reg_shift - describes how the UART registers are mapped
+ * to PCI memory by the card.
+ * For example IER register on SBS, Inc. PMC-OctPro is located at
+ * offset 0x10 from the UART base, while UART_IER is defined as 1
+ * in include/linux/serial_reg.h,
+ * see first lines of serial_in() and serial_out() in 8250.c
+*/
+
+static struct pciserial_board pci_boards[] __devinitdata = {
+ [pbn_default] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b0_1_115200] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b0_2_115200] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b0_4_115200] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b0_5_115200] = {
+ .flags = FL_BASE0,
+ .num_ports = 5,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b0_8_115200] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b0_1_921600] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b0_2_921600] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b0_4_921600] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+
+ [pbn_b0_2_1130000] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 1130000,
+ .uart_offset = 8,
+ },
+
+ [pbn_b0_4_1152000] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 1152000,
+ .uart_offset = 8,
+ },
+
+ [pbn_b0_2_1843200] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 1843200,
+ .uart_offset = 8,
+ },
+ [pbn_b0_4_1843200] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 1843200,
+ .uart_offset = 8,
+ },
+
+ [pbn_b0_2_1843200_200] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 1843200,
+ .uart_offset = 0x200,
+ },
+ [pbn_b0_4_1843200_200] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 1843200,
+ .uart_offset = 0x200,
+ },
+ [pbn_b0_8_1843200_200] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 1843200,
+ .uart_offset = 0x200,
+ },
+ [pbn_b0_1_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 4000000,
+ .uart_offset = 8,
+ },
+
+ [pbn_b0_bt_1_115200] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b0_bt_2_115200] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b0_bt_4_115200] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b0_bt_8_115200] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 8,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+
+ [pbn_b0_bt_1_460800] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 460800,
+ .uart_offset = 8,
+ },
+ [pbn_b0_bt_2_460800] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 460800,
+ .uart_offset = 8,
+ },
+ [pbn_b0_bt_4_460800] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 4,
+ .base_baud = 460800,
+ .uart_offset = 8,
+ },
+
+ [pbn_b0_bt_1_921600] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b0_bt_2_921600] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b0_bt_4_921600] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b0_bt_8_921600] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 8,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+
+ [pbn_b1_1_115200] = {
+ .flags = FL_BASE1,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b1_2_115200] = {
+ .flags = FL_BASE1,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b1_4_115200] = {
+ .flags = FL_BASE1,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b1_8_115200] = {
+ .flags = FL_BASE1,
+ .num_ports = 8,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b1_16_115200] = {
+ .flags = FL_BASE1,
+ .num_ports = 16,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+
+ [pbn_b1_1_921600] = {
+ .flags = FL_BASE1,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b1_2_921600] = {
+ .flags = FL_BASE1,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b1_4_921600] = {
+ .flags = FL_BASE1,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b1_8_921600] = {
+ .flags = FL_BASE1,
+ .num_ports = 8,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b1_2_1250000] = {
+ .flags = FL_BASE1,
+ .num_ports = 2,
+ .base_baud = 1250000,
+ .uart_offset = 8,
+ },
+
+ [pbn_b1_bt_1_115200] = {
+ .flags = FL_BASE1|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b1_bt_2_115200] = {
+ .flags = FL_BASE1|FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b1_bt_4_115200] = {
+ .flags = FL_BASE1|FL_BASE_BARS,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+
+ [pbn_b1_bt_2_921600] = {
+ .flags = FL_BASE1|FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+
+ [pbn_b1_1_1382400] = {
+ .flags = FL_BASE1,
+ .num_ports = 1,
+ .base_baud = 1382400,
+ .uart_offset = 8,
+ },
+ [pbn_b1_2_1382400] = {
+ .flags = FL_BASE1,
+ .num_ports = 2,
+ .base_baud = 1382400,
+ .uart_offset = 8,
+ },
+ [pbn_b1_4_1382400] = {
+ .flags = FL_BASE1,
+ .num_ports = 4,
+ .base_baud = 1382400,
+ .uart_offset = 8,
+ },
+ [pbn_b1_8_1382400] = {
+ .flags = FL_BASE1,
+ .num_ports = 8,
+ .base_baud = 1382400,
+ .uart_offset = 8,
+ },
+
+ [pbn_b2_1_115200] = {
+ .flags = FL_BASE2,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b2_2_115200] = {
+ .flags = FL_BASE2,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b2_4_115200] = {
+ .flags = FL_BASE2,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b2_8_115200] = {
+ .flags = FL_BASE2,
+ .num_ports = 8,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+
+ [pbn_b2_1_460800] = {
+ .flags = FL_BASE2,
+ .num_ports = 1,
+ .base_baud = 460800,
+ .uart_offset = 8,
+ },
+ [pbn_b2_4_460800] = {
+ .flags = FL_BASE2,
+ .num_ports = 4,
+ .base_baud = 460800,
+ .uart_offset = 8,
+ },
+ [pbn_b2_8_460800] = {
+ .flags = FL_BASE2,
+ .num_ports = 8,
+ .base_baud = 460800,
+ .uart_offset = 8,
+ },
+ [pbn_b2_16_460800] = {
+ .flags = FL_BASE2,
+ .num_ports = 16,
+ .base_baud = 460800,
+ .uart_offset = 8,
+ },
+
+ [pbn_b2_1_921600] = {
+ .flags = FL_BASE2,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b2_4_921600] = {
+ .flags = FL_BASE2,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b2_8_921600] = {
+ .flags = FL_BASE2,
+ .num_ports = 8,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+
+ [pbn_b2_8_1152000] = {
+ .flags = FL_BASE2,
+ .num_ports = 8,
+ .base_baud = 1152000,
+ .uart_offset = 8,
+ },
+
+ [pbn_b2_bt_1_115200] = {
+ .flags = FL_BASE2|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b2_bt_2_115200] = {
+ .flags = FL_BASE2|FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b2_bt_4_115200] = {
+ .flags = FL_BASE2|FL_BASE_BARS,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+
+ [pbn_b2_bt_2_921600] = {
+ .flags = FL_BASE2|FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b2_bt_4_921600] = {
+ .flags = FL_BASE2|FL_BASE_BARS,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+
+ [pbn_b3_2_115200] = {
+ .flags = FL_BASE3,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b3_4_115200] = {
+ .flags = FL_BASE3,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_b3_8_115200] = {
+ .flags = FL_BASE3,
+ .num_ports = 8,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+
+ [pbn_b4_bt_2_921600] = {
+ .flags = FL_BASE4,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b4_bt_4_921600] = {
+ .flags = FL_BASE4,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [pbn_b4_bt_8_921600] = {
+ .flags = FL_BASE4,
+ .num_ports = 8,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+
+ /*
+ * Entries following this are board-specific.
+ */
+
+ /*
+ * Panacom - IOMEM
+ */
+ [pbn_panacom] = {
+ .flags = FL_BASE2,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 0x400,
+ .reg_shift = 7,
+ },
+ [pbn_panacom2] = {
+ .flags = FL_BASE2|FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 0x400,
+ .reg_shift = 7,
+ },
+ [pbn_panacom4] = {
+ .flags = FL_BASE2|FL_BASE_BARS,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 0x400,
+ .reg_shift = 7,
+ },
+
+ [pbn_exsys_4055] = {
+ .flags = FL_BASE2,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+
+ /* I think this entry is broken - the first_offset looks wrong --rmk */
+ [pbn_plx_romulus] = {
+ .flags = FL_BASE2,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 8 << 2,
+ .reg_shift = 2,
+ .first_offset = 0x03,
+ },
+
+ /*
+ * This board uses the size of PCI Base region 0 to
+ * signal now many ports are available
+ */
+ [pbn_oxsemi] = {
+ .flags = FL_BASE0|FL_REGION_SZ_CAP,
+ .num_ports = 32,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [pbn_oxsemi_1_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_oxsemi_2_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_oxsemi_4_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_oxsemi_8_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+
+
+ /*
+ * EKF addition for i960 Boards form EKF with serial port.
+ * Max 256 ports.
+ */
+ [pbn_intel_i960] = {
+ .flags = FL_BASE0,
+ .num_ports = 32,
+ .base_baud = 921600,
+ .uart_offset = 8 << 2,
+ .reg_shift = 2,
+ .first_offset = 0x10000,
+ },
+ [pbn_sgi_ioc3] = {
+ .flags = FL_BASE0|FL_NOIRQ,
+ .num_ports = 1,
+ .base_baud = 458333,
+ .uart_offset = 8,
+ .reg_shift = 0,
+ .first_offset = 0x20178,
+ },
+
+ /*
+ * Computone - uses IOMEM.
+ */
+ [pbn_computone_4] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 0x40,
+ .reg_shift = 2,
+ .first_offset = 0x200,
+ },
+ [pbn_computone_6] = {
+ .flags = FL_BASE0,
+ .num_ports = 6,
+ .base_baud = 921600,
+ .uart_offset = 0x40,
+ .reg_shift = 2,
+ .first_offset = 0x200,
+ },
+ [pbn_computone_8] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 921600,
+ .uart_offset = 0x40,
+ .reg_shift = 2,
+ .first_offset = 0x200,
+ },
+ [pbn_sbsxrsio] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 460800,
+ .uart_offset = 256,
+ .reg_shift = 4,
+ },
+ /*
+ * Exar Corp. XR17C15[248] Dual/Quad/Octal UART
+ * Only basic 16550A support.
+ * XR17C15[24] are not tested, but they should work.
+ */
+ [pbn_exar_XR17C152] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 0x200,
+ },
+ [pbn_exar_XR17C154] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 0x200,
+ },
+ [pbn_exar_XR17C158] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 921600,
+ .uart_offset = 0x200,
+ },
+ [pbn_exar_ibm_saturn] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 0x200,
+ },
+
+ /*
+ * PA Semi PWRficient PA6T-1682M on-chip UART
+ */
+ [pbn_pasemi_1682M] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 8333333,
+ },
+ /*
+ * National Instruments 843x
+ */
+ [pbn_ni8430_16] = {
+ .flags = FL_BASE0,
+ .num_ports = 16,
+ .base_baud = 3686400,
+ .uart_offset = 0x10,
+ .first_offset = 0x800,
+ },
+ [pbn_ni8430_8] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 3686400,
+ .uart_offset = 0x10,
+ .first_offset = 0x800,
+ },
+ [pbn_ni8430_4] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 3686400,
+ .uart_offset = 0x10,
+ .first_offset = 0x800,
+ },
+ [pbn_ni8430_2] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 3686400,
+ .uart_offset = 0x10,
+ .first_offset = 0x800,
+ },
+ /*
+ * ADDI-DATA GmbH PCI-Express communication cards <info@addi-data.com>
+ */
+ [pbn_ADDIDATA_PCIe_1_3906250] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 3906250,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_ADDIDATA_PCIe_2_3906250] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 3906250,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_ADDIDATA_PCIe_4_3906250] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 3906250,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_ADDIDATA_PCIe_8_3906250] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 3906250,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_ce4100_1_115200] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .reg_shift = 2,
+ },
+ [pbn_omegapci] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 115200,
+ .uart_offset = 0x200,
+ },
+};
+
+static const struct pci_device_id softmodem_blacklist[] = {
+ { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */
+ { PCI_VDEVICE(MOTOROLA, 0x3052), }, /* Motorola Si3052-based modem */
+ { PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */
+};
+
+/*
+ * Given a complete unknown PCI device, try to use some heuristics to
+ * guess what the configuration might be, based on the pitiful PCI
+ * serial specs. Returns 0 on success, 1 on failure.
+ */
+static int __devinit
+serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
+{
+ const struct pci_device_id *blacklist;
+ int num_iomem, num_port, first_port = -1, i;
+
+ /*
+ * If it is not a communications device or the programming
+ * interface is greater than 6, give up.
+ *
+ * (Should we try to make guesses for multiport serial devices
+ * later?)
+ */
+ if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) &&
+ ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) ||
+ (dev->class & 0xff) > 6)
+ return -ENODEV;
+
+ /*
+ * Do not access blacklisted devices that are known not to
+ * feature serial ports.
+ */
+ for (blacklist = softmodem_blacklist;
+ blacklist < softmodem_blacklist + ARRAY_SIZE(softmodem_blacklist);
+ blacklist++) {
+ if (dev->vendor == blacklist->vendor &&
+ dev->device == blacklist->device)
+ return -ENODEV;
+ }
+
+ num_iomem = num_port = 0;
+ for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
+ if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
+ num_port++;
+ if (first_port == -1)
+ first_port = i;
+ }
+ if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
+ num_iomem++;
+ }
+
+ /*
+ * If there is 1 or 0 iomem regions, and exactly one port,
+ * use it. We guess the number of ports based on the IO
+ * region size.
+ */
+ if (num_iomem <= 1 && num_port == 1) {
+ board->flags = first_port;
+ board->num_ports = pci_resource_len(dev, first_port) / 8;
+ return 0;
+ }
+
+ /*
+ * Now guess if we've got a board which indexes by BARs.
+ * Each IO BAR should be 8 bytes, and they should follow
+ * consecutively.
+ */
+ first_port = -1;
+ num_port = 0;
+ for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
+ if (pci_resource_flags(dev, i) & IORESOURCE_IO &&
+ pci_resource_len(dev, i) == 8 &&
+ (first_port == -1 || (first_port + num_port) == i)) {
+ num_port++;
+ if (first_port == -1)
+ first_port = i;
+ }
+ }
+
+ if (num_port > 1) {
+ board->flags = first_port | FL_BASE_BARS;
+ board->num_ports = num_port;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static inline int
+serial_pci_matches(const struct pciserial_board *board,
+ const struct pciserial_board *guessed)
+{
+ return
+ board->num_ports == guessed->num_ports &&
+ board->base_baud == guessed->base_baud &&
+ board->uart_offset == guessed->uart_offset &&
+ board->reg_shift == guessed->reg_shift &&
+ board->first_offset == guessed->first_offset;
+}
+
+struct serial_private *
+pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
+{
+ struct uart_port serial_port;
+ struct serial_private *priv;
+ struct pci_serial_quirk *quirk;
+ int rc, nr_ports, i;
+
+ nr_ports = board->num_ports;
+
+ /*
+ * Find an init and setup quirks.
+ */
+ quirk = find_quirk(dev);
+
+ /*
+ * Run the new-style initialization function.
+ * The initialization function returns:
+ * <0 - error
+ * 0 - use board->num_ports
+ * >0 - number of ports
+ */
+ if (quirk->init) {
+ rc = quirk->init(dev);
+ if (rc < 0) {
+ priv = ERR_PTR(rc);
+ goto err_out;
+ }
+ if (rc)
+ nr_ports = rc;
+ }
+
+ priv = kzalloc(sizeof(struct serial_private) +
+ sizeof(unsigned int) * nr_ports,
+ GFP_KERNEL);
+ if (!priv) {
+ priv = ERR_PTR(-ENOMEM);
+ goto err_deinit;
+ }
+
+ priv->dev = dev;
+ priv->quirk = quirk;
+
+ memset(&serial_port, 0, sizeof(struct uart_port));
+ serial_port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
+ serial_port.uartclk = board->base_baud * 16;
+ serial_port.irq = get_pci_irq(dev, board);
+ serial_port.dev = &dev->dev;
+
+ for (i = 0; i < nr_ports; i++) {
+ if (quirk->setup(priv, board, &serial_port, i))
+ break;
+
+#ifdef SERIAL_DEBUG_PCI
+ printk(KERN_DEBUG "Setup PCI port: port %lx, irq %d, type %d\n",
+ serial_port.iobase, serial_port.irq, serial_port.iotype);
+#endif
+
+ priv->line[i] = serial8250_register_port(&serial_port);
+ if (priv->line[i] < 0) {
+ printk(KERN_WARNING "Couldn't register serial port %s: %d\n", pci_name(dev), priv->line[i]);
+ break;
+ }
+ }
+ priv->nr = i;
+ return priv;
+
+err_deinit:
+ if (quirk->exit)
+ quirk->exit(dev);
+err_out:
+ return priv;
+}
+EXPORT_SYMBOL_GPL(pciserial_init_ports);
+
+void pciserial_remove_ports(struct serial_private *priv)
+{
+ struct pci_serial_quirk *quirk;
+ int i;
+
+ for (i = 0; i < priv->nr; i++)
+ serial8250_unregister_port(priv->line[i]);
+
+ for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
+ if (priv->remapped_bar[i])
+ iounmap(priv->remapped_bar[i]);
+ priv->remapped_bar[i] = NULL;
+ }
+
+ /*
+ * Find the exit quirks.
+ */
+ quirk = find_quirk(priv->dev);
+ if (quirk->exit)
+ quirk->exit(priv->dev);
+
+ kfree(priv);
+}
+EXPORT_SYMBOL_GPL(pciserial_remove_ports);
+
+void pciserial_suspend_ports(struct serial_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->nr; i++)
+ if (priv->line[i] >= 0)
+ serial8250_suspend_port(priv->line[i]);
+}
+EXPORT_SYMBOL_GPL(pciserial_suspend_ports);
+
+void pciserial_resume_ports(struct serial_private *priv)
+{
+ int i;
+
+ /*
+ * Ensure that the board is correctly configured.
+ */
+ if (priv->quirk->init)
+ priv->quirk->init(priv->dev);
+
+ for (i = 0; i < priv->nr; i++)
+ if (priv->line[i] >= 0)
+ serial8250_resume_port(priv->line[i]);
+}
+EXPORT_SYMBOL_GPL(pciserial_resume_ports);
+
+/*
+ * Probe one serial board. Unfortunately, there is no rhyme nor reason
+ * to the arrangement of serial ports on a PCI card.
+ */
+static int __devinit
+pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+ struct serial_private *priv;
+ const struct pciserial_board *board;
+ struct pciserial_board tmp;
+ int rc;
+
+ if (ent->driver_data >= ARRAY_SIZE(pci_boards)) {
+ printk(KERN_ERR "pci_init_one: invalid driver_data: %ld\n",
+ ent->driver_data);
+ return -EINVAL;
+ }
+
+ board = &pci_boards[ent->driver_data];
+
+ rc = pci_enable_device(dev);
+ if (rc)
+ return rc;
+
+ if (ent->driver_data == pbn_default) {
+ /*
+ * Use a copy of the pci_board entry for this;
+ * avoid changing entries in the table.
+ */
+ memcpy(&tmp, board, sizeof(struct pciserial_board));
+ board = &tmp;
+
+ /*
+ * We matched one of our class entries. Try to
+ * determine the parameters of this board.
+ */
+ rc = serial_pci_guess_board(dev, &tmp);
+ if (rc)
+ goto disable;
+ } else {
+ /*
+ * We matched an explicit entry. If we are able to
+ * detect this boards settings with our heuristic,
+ * then we no longer need this entry.
+ */
+ memcpy(&tmp, &pci_boards[pbn_default],
+ sizeof(struct pciserial_board));
+ rc = serial_pci_guess_board(dev, &tmp);
+ if (rc == 0 && serial_pci_matches(board, &tmp))
+ moan_device("Redundant entry in serial pci_table.",
+ dev);
+ }
+
+ priv = pciserial_init_ports(dev, board);
+ if (!IS_ERR(priv)) {
+ pci_set_drvdata(dev, priv);
+ return 0;
+ }
+
+ rc = PTR_ERR(priv);
+
+ disable:
+ pci_disable_device(dev);
+ return rc;
+}
+
+static void __devexit pciserial_remove_one(struct pci_dev *dev)
+{
+ struct serial_private *priv = pci_get_drvdata(dev);
+
+ pci_set_drvdata(dev, NULL);
+
+ pciserial_remove_ports(priv);
+
+ pci_disable_device(dev);
+}
+
+#ifdef CONFIG_PM
+static int pciserial_suspend_one(struct pci_dev *dev, pm_message_t state)
+{
+ struct serial_private *priv = pci_get_drvdata(dev);
+
+ if (priv)
+ pciserial_suspend_ports(priv);
+
+ pci_save_state(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
+ return 0;
+}
+
+static int pciserial_resume_one(struct pci_dev *dev)
+{
+ int err;
+ struct serial_private *priv = pci_get_drvdata(dev);
+
+ pci_set_power_state(dev, PCI_D0);
+ pci_restore_state(dev);
+
+ if (priv) {
+ /*
+ * The device may have been disabled. Re-enable it.
+ */
+ err = pci_enable_device(dev);
+ /* FIXME: We cannot simply error out here */
+ if (err)
+ printk(KERN_ERR "pciserial: Unable to re-enable ports, trying to continue.\n");
+ pciserial_resume_ports(priv);
+ }
+ return 0;
+}
+#endif
+
+static struct pci_device_id serial_pci_tbl[] = {
+ /* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */
+ { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620,
+ PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0,
+ pbn_b2_8_921600 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V960,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232, 0, 0,
+ pbn_b1_8_1382400 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V960,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232, 0, 0,
+ pbn_b1_4_1382400 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V960,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232, 0, 0,
+ pbn_b1_2_1382400 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232, 0, 0,
+ pbn_b1_8_1382400 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232, 0, 0,
+ pbn_b1_4_1382400 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232, 0, 0,
+ pbn_b1_2_1382400 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485, 0, 0,
+ pbn_b1_8_921600 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_4_4, 0, 0,
+ pbn_b1_8_921600 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485, 0, 0,
+ pbn_b1_4_921600 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485_2_2, 0, 0,
+ pbn_b1_4_921600 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_485, 0, 0,
+ pbn_b1_2_921600 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6, 0, 0,
+ pbn_b1_8_921600 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1, 0, 0,
+ pbn_b1_8_921600 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1, 0, 0,
+ pbn_b1_4_921600 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_20MHZ, 0, 0,
+ pbn_b1_2_1250000 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_2, 0, 0,
+ pbn_b0_2_1843200 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_4, 0, 0,
+ pbn_b0_4_1843200 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
+ PCI_VENDOR_ID_AFAVLAB,
+ PCI_SUBDEVICE_ID_AFAVLAB_P061, 0, 0,
+ pbn_b0_4_1152000 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_232, 0, 0,
+ pbn_b0_2_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_232, 0, 0,
+ pbn_b0_4_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_232, 0, 0,
+ pbn_b0_8_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_1_1, 0, 0,
+ pbn_b0_2_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_2, 0, 0,
+ pbn_b0_4_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4, 0, 0,
+ pbn_b0_8_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2, 0, 0,
+ pbn_b0_2_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4, 0, 0,
+ pbn_b0_4_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8, 0, 0,
+ pbn_b0_8_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_485, 0, 0,
+ pbn_b0_2_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_485, 0, 0,
+ pbn_b0_4_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485, 0, 0,
+ pbn_b0_8_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152,
+ PCI_VENDOR_ID_IBM, PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT,
+ 0, 0, pbn_exar_ibm_saturn },
+
+ { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_U530,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_1_115200 },
+ { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_2_115200 },
+ { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM422,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_4_115200 },
+ { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM232,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_2_115200 },
+ { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_COMM4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_4_115200 },
+ { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_COMM8,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_8_115200 },
+ { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_7803,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_8_460800 },
+ { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM8,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_8_115200 },
+
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_GTEK_SERIAL2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_2_115200 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_SPCOM200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_2_921600 },
+ /*
+ * VScom SPCOM800, from sl@s.pl
+ */
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_SPCOM800,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_8_921600 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_1077,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_4_921600 },
+ /* Unknown card - subdevice 0x1584 */
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_VENDOR_ID_PLX,
+ PCI_SUBDEVICE_ID_UNKNOWN_0x1584, 0, 0,
+ pbn_b0_4_115200 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_SUBVENDOR_ID_KEYSPAN,
+ PCI_SUBDEVICE_ID_KEYSPAN_SX2, 0, 0,
+ pbn_panacom },
+ { PCI_VENDOR_ID_PANACOM, PCI_DEVICE_ID_PANACOM_QUADMODEM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_panacom4 },
+ { PCI_VENDOR_ID_PANACOM, PCI_DEVICE_ID_PANACOM_DUALMODEM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_panacom2 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+ PCI_VENDOR_ID_ESDGMBH,
+ PCI_DEVICE_ID_ESDGMBH_CPCIASIO4, 0, 0,
+ pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_SUBVENDOR_ID_CHASE_PCIFAST,
+ PCI_SUBDEVICE_ID_CHASE_PCIFAST4, 0, 0,
+ pbn_b2_4_460800 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_SUBVENDOR_ID_CHASE_PCIFAST,
+ PCI_SUBDEVICE_ID_CHASE_PCIFAST8, 0, 0,
+ pbn_b2_8_460800 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_SUBVENDOR_ID_CHASE_PCIFAST,
+ PCI_SUBDEVICE_ID_CHASE_PCIFAST16, 0, 0,
+ pbn_b2_16_460800 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_SUBVENDOR_ID_CHASE_PCIFAST,
+ PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC, 0, 0,
+ pbn_b2_16_460800 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_SUBVENDOR_ID_CHASE_PCIRAS,
+ PCI_SUBDEVICE_ID_CHASE_PCIRAS4, 0, 0,
+ pbn_b2_4_460800 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_SUBVENDOR_ID_CHASE_PCIRAS,
+ PCI_SUBDEVICE_ID_CHASE_PCIRAS8, 0, 0,
+ pbn_b2_8_460800 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_SUBVENDOR_ID_EXSYS,
+ PCI_SUBDEVICE_ID_EXSYS_4055, 0, 0,
+ pbn_exsys_4055 },
+ /*
+ * Megawolf Romulus PCI Serial Card, from Mike Hudson
+ * (Exoray@isys.ca)
+ */
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_ROMULUS,
+ 0x10b5, 0x106a, 0, 0,
+ pbn_plx_romulus },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSC100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_4_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_2_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESC100D,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_8_115200 },
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESC100M,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_8_115200 },
+ { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_OXSEMI_16PCI954,
+ PCI_VENDOR_ID_SPECIALIX, PCI_SUBDEVICE_ID_SPECIALIX_SPEED4,
+ 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
+ PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL,
+ 0, 0,
+ pbn_b0_4_1152000 },
+ { PCI_VENDOR_ID_OXSEMI, 0x9505,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_921600 },
+
+ /*
+ * The below card is a little controversial since it is the
+ * subject of a PCI vendor/device ID clash. (See
+ * www.ussg.iu.edu/hypermail/linux/kernel/0303.1/0516.html).
+ * For now just used the hex ID 0x950a.
+ */
+ { PCI_VENDOR_ID_OXSEMI, 0x950a,
+ PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL, 0, 0,
+ pbn_b0_2_115200 },
+ { PCI_VENDOR_ID_OXSEMI, 0x950a,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_2_1130000 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_C950,
+ PCI_VENDOR_ID_OXSEMI, PCI_SUBDEVICE_ID_OXSEMI_C950, 0, 0,
+ pbn_b0_1_921600 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_115200 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_921600 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI958,
+ PCI_ANY_ID , PCI_ANY_ID, 0, 0,
+ pbn_b2_8_1152000 },
+
+ /*
+ * Oxford Semiconductor Inc. Tornado PCI express device range.
+ */
+ { PCI_VENDOR_ID_OXSEMI, 0xc101, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc105, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc11b, /* OXPCIe952 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc11f, /* OXPCIe952 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc120, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc124, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc138, /* OXPCIe952 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc13d, /* OXPCIe952 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc140, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc141, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc144, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc145, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc158, /* OXPCIe952 2 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc15d, /* OXPCIe952 2 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc208, /* OXPCIe954 4 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_4_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc20d, /* OXPCIe954 4 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_4_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc308, /* OXPCIe958 8 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_8_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc30d, /* OXPCIe958 8 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_8_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc40b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc40f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc41b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc41f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc42b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc42f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc43b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc43f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc44b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc44f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc45b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc45f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc46b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc46f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc47b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc47f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc48b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc48f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc49b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc49f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc4ab, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc4af, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc4bb, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc4bf, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc4cb, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc4cf, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ /*
+ * Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado
+ */
+ { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 1 Port V.34 Super-G3 Fax */
+ PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 2 Port V.34 Super-G3 Fax */
+ PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0,
+ pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 4 Port V.34 Super-G3 Fax */
+ PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0,
+ pbn_oxsemi_4_4000000 },
+ { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 8 Port V.34 Super-G3 Fax */
+ PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0,
+ pbn_oxsemi_8_4000000 },
+
+ /*
+ * Digi/IBM PCIe 2-port Async EIA-232 Adapter utilizing OxSemi Tornado
+ */
+ { PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_2_OX_IBM,
+ PCI_SUBVENDOR_ID_IBM, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_2_4000000 },
+
+ /*
+ * SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards,
+ * from skokodyn@yahoo.com
+ */
+ { PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO,
+ PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_OCTPRO232, 0, 0,
+ pbn_sbsxrsio },
+ { PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO,
+ PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_OCTPRO422, 0, 0,
+ pbn_sbsxrsio },
+ { PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO,
+ PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_POCTAL232, 0, 0,
+ pbn_sbsxrsio },
+ { PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO,
+ PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_POCTAL422, 0, 0,
+ pbn_sbsxrsio },
+
+ /*
+ * Digitan DS560-558, from jimd@esoft.com
+ */
+ { PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_ATT_VENUS_MODEM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_1_115200 },
+
+ /*
+ * Titan Electronic cards
+ * The 400L and 800L have a custom setup quirk.
+ */
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_2_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100L,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_1_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200L,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_bt_2_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400L,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800L,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_8_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b4_bt_2_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b4_bt_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b4_bt_8_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400EH,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800EH,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800EHB,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100E,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200E,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400E,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_4_4000000 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800E,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_8_4000000 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EI,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_2_4000000 },
+
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_550,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_1_460800 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_650,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_1_460800 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_850,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_1_460800 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_10x_550,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_2_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_10x_650,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_2_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_10x_850,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_2_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_10x_550,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_4_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_10x_650,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_4_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_10x_850,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_4_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_20x_550,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_20x_650,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_20x_850,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_20x_550,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_20x_650,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S_20x_850,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_20x_550,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_4_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_20x_650,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_4_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_4S_20x_850,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_4_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_8S_20x_550,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_8_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_8S_20x_650,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_8_921600 },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_8S_20x_850,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_8_921600 },
+
+ /*
+ * Computone devices submitted by Doug McNash dmcnash@computone.com
+ */
+ { PCI_VENDOR_ID_COMPUTONE, PCI_DEVICE_ID_COMPUTONE_PG,
+ PCI_SUBVENDOR_ID_COMPUTONE, PCI_SUBDEVICE_ID_COMPUTONE_PG4,
+ 0, 0, pbn_computone_4 },
+ { PCI_VENDOR_ID_COMPUTONE, PCI_DEVICE_ID_COMPUTONE_PG,
+ PCI_SUBVENDOR_ID_COMPUTONE, PCI_SUBDEVICE_ID_COMPUTONE_PG8,
+ 0, 0, pbn_computone_8 },
+ { PCI_VENDOR_ID_COMPUTONE, PCI_DEVICE_ID_COMPUTONE_PG,
+ PCI_SUBVENDOR_ID_COMPUTONE, PCI_SUBDEVICE_ID_COMPUTONE_PG6,
+ 0, 0, pbn_computone_6 },
+
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI95N,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi },
+ { PCI_VENDOR_ID_TIMEDIA, PCI_DEVICE_ID_TIMEDIA_1889,
+ PCI_VENDOR_ID_TIMEDIA, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_1_921600 },
+
+ /*
+ * AFAVLAB serial card, from Harald Welte <laforge@gnumonks.org>
+ */
+ { PCI_VENDOR_ID_AFAVLAB, PCI_DEVICE_ID_AFAVLAB_P028,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_8_115200 },
+ { PCI_VENDOR_ID_AFAVLAB, PCI_DEVICE_ID_AFAVLAB_P030,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_8_115200 },
+
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_DSERIAL,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_115200 },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATRO_A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_115200 },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATRO_B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_115200 },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_115200 },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_115200 },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_4_460800 },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_4_460800 },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_PORT_PLUS,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_460800 },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUAD_A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_460800 },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUAD_B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_460800 },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_SSERIAL,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_1_115200 },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_PORT_650,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_1_460800 },
+
+ /*
+ * Korenix Jetcard F0/F1 cards (JC1204, JC1208, JC1404, JC1408).
+ * Cards are identified by their subsystem vendor IDs, which
+ * (in hex) match the model number.
+ *
+ * Note that JC140x are RS422/485 cards which require ox950
+ * ACR = 0x10, and as such are not currently fully supported.
+ */
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0,
+ 0x1204, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0,
+ 0x1208, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
+/* { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0,
+ 0x1402, 0x0002, 0, 0,
+ pbn_b0_2_921600 }, */
+/* { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF0,
+ 0x1404, 0x0004, 0, 0,
+ pbn_b0_4_921600 }, */
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF1,
+ 0x1208, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
+
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2,
+ 0x1204, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2,
+ 0x1208, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF3,
+ 0x1208, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
+ /*
+ * Dell Remote Access Card 4 - Tim_T_Murphy@Dell.com
+ */
+ { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_RAC4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_1_1382400 },
+
+ /*
+ * Dell Remote Access Card III - Tim_T_Murphy@Dell.com
+ */
+ { PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_RACIII,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_1_1382400 },
+
+ /*
+ * RAStel 2 port modem, gerg@moreton.com.au
+ */
+ { PCI_VENDOR_ID_MORETON, PCI_DEVICE_ID_RASTEL_2PORT,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_bt_2_115200 },
+
+ /*
+ * EKF addition for i960 Boards form EKF with serial port
+ */
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80960_RP,
+ 0xE4BF, PCI_ANY_ID, 0, 0,
+ pbn_intel_i960 },
+
+ /*
+ * Xircom Cardbus/Ethernet combos
+ */
+ { PCI_VENDOR_ID_XIRCOM, PCI_DEVICE_ID_XIRCOM_X3201_MDM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_115200 },
+ /*
+ * Xircom RBM56G cardbus modem - Dirk Arnold (temp entry)
+ */
+ { PCI_VENDOR_ID_XIRCOM, PCI_DEVICE_ID_XIRCOM_RBM56G,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_115200 },
+
+ /*
+ * Untested PCI modems, sent in from various folks...
+ */
+
+ /*
+ * Elsa Model 56K PCI Modem, from Andreas Rath <arh@01019freenet.de>
+ */
+ { PCI_VENDOR_ID_ROCKWELL, 0x1004,
+ 0x1048, 0x1500, 0, 0,
+ pbn_b1_1_115200 },
+
+ { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
+ 0xFF00, 0, 0, 0,
+ pbn_sgi_ioc3 },
+
+ /*
+ * HP Diva card
+ */
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA,
+ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_RMP3, 0, 0,
+ pbn_b1_1_115200 },
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_5_115200 },
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b2_1_115200 },
+
+ { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b3_2_115200 },
+ { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b3_4_115200 },
+ { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM8,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b3_8_115200 },
+
+ /*
+ * Exar Corp. XR17C15[248] Dual/Quad/Octal UART
+ */
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0,
+ 0, pbn_exar_XR17C152 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0,
+ 0, pbn_exar_XR17C154 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0,
+ 0, pbn_exar_XR17C158 },
+
+ /*
+ * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
+ */
+ { PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_115200 },
+ /*
+ * ITE
+ */
+ { PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8872,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b1_bt_1_115200 },
+
+ /*
+ * IntaShield IS-200
+ */
+ { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0811 */
+ pbn_b2_2_115200 },
+ /*
+ * IntaShield IS-400
+ */
+ { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
+ pbn_b2_4_115200 },
+ /*
+ * Perle PCI-RAS cards
+ */
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+ PCI_SUBVENDOR_ID_PERLE, PCI_SUBDEVICE_ID_PCI_RAS4,
+ 0, 0, pbn_b2_4_921600 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+ PCI_SUBVENDOR_ID_PERLE, PCI_SUBDEVICE_ID_PCI_RAS8,
+ 0, 0, pbn_b2_8_921600 },
+
+ /*
+ * Mainpine series cards: Fairly standard layout but fools
+ * parts of the autodetect in some cases and uses otherwise
+ * unmatched communications subclasses in the PCI Express case
+ */
+
+ { /* RockForceDUO */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x0200,
+ 0, 0, pbn_b0_2_115200 },
+ { /* RockForceQUATRO */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x0300,
+ 0, 0, pbn_b0_4_115200 },
+ { /* RockForceDUO+ */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x0400,
+ 0, 0, pbn_b0_2_115200 },
+ { /* RockForceQUATRO+ */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x0500,
+ 0, 0, pbn_b0_4_115200 },
+ { /* RockForce+ */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x0600,
+ 0, 0, pbn_b0_2_115200 },
+ { /* RockForce+ */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x0700,
+ 0, 0, pbn_b0_4_115200 },
+ { /* RockForceOCTO+ */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x0800,
+ 0, 0, pbn_b0_8_115200 },
+ { /* RockForceDUO+ */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x0C00,
+ 0, 0, pbn_b0_2_115200 },
+ { /* RockForceQUARTRO+ */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x0D00,
+ 0, 0, pbn_b0_4_115200 },
+ { /* RockForceOCTO+ */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x1D00,
+ 0, 0, pbn_b0_8_115200 },
+ { /* RockForceD1 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x2000,
+ 0, 0, pbn_b0_1_115200 },
+ { /* RockForceF1 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x2100,
+ 0, 0, pbn_b0_1_115200 },
+ { /* RockForceD2 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x2200,
+ 0, 0, pbn_b0_2_115200 },
+ { /* RockForceF2 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x2300,
+ 0, 0, pbn_b0_2_115200 },
+ { /* RockForceD4 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x2400,
+ 0, 0, pbn_b0_4_115200 },
+ { /* RockForceF4 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x2500,
+ 0, 0, pbn_b0_4_115200 },
+ { /* RockForceD8 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x2600,
+ 0, 0, pbn_b0_8_115200 },
+ { /* RockForceF8 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x2700,
+ 0, 0, pbn_b0_8_115200 },
+ { /* IQ Express D1 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x3000,
+ 0, 0, pbn_b0_1_115200 },
+ { /* IQ Express F1 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x3100,
+ 0, 0, pbn_b0_1_115200 },
+ { /* IQ Express D2 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x3200,
+ 0, 0, pbn_b0_2_115200 },
+ { /* IQ Express F2 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x3300,
+ 0, 0, pbn_b0_2_115200 },
+ { /* IQ Express D4 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x3400,
+ 0, 0, pbn_b0_4_115200 },
+ { /* IQ Express F4 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x3500,
+ 0, 0, pbn_b0_4_115200 },
+ { /* IQ Express D8 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x3C00,
+ 0, 0, pbn_b0_8_115200 },
+ { /* IQ Express F8 */
+ PCI_VENDOR_ID_MAINPINE, PCI_DEVICE_ID_MAINPINE_PBRIDGE,
+ PCI_VENDOR_ID_MAINPINE, 0x3D00,
+ 0, 0, pbn_b0_8_115200 },
+
+
+ /*
+ * PA Semi PA6T-1682M on-chip UART
+ */
+ { PCI_VENDOR_ID_PASEMI, 0xa004,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pasemi_1682M },
+
+ /*
+ * National Instruments
+ */
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI23216,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_16_115200 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2328,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_8_115200 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2324,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_bt_4_115200 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2322,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_bt_2_115200 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2324I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_bt_4_115200 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI2322I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_bt_2_115200 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8420_23216,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_16_115200 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8420_2328,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_8_115200 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8420_2324,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_bt_4_115200 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8420_2322,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_bt_2_115200 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8422_2324,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_bt_4_115200 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8422_2322,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_bt_2_115200 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8430_2322,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_2 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8430_2322,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_2 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8430_2324,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_4 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8430_2324,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_4 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8430_2328,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_8 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8430_2328,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_8 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8430_23216,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_16 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8430_23216,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_16 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8432_2322,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_2 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8432_2322,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_2 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8432_2324,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_4 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8432_2324,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_4 },
+
+ /*
+ * ADDI-DATA GmbH communication cards <info@addi-data.com>
+ */
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7500,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_4_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7420,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_2_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7300,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA_OLD,
+ PCI_DEVICE_ID_ADDIDATA_APCI7800,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b1_8_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7500_2,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_4_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7420_2,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_2_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7300_2,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7500_3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_4_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7420_3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_2_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7300_3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7800_3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_8_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCIe7500,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_ADDIDATA_PCIe_4_3906250 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCIe7420,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_ADDIDATA_PCIe_2_3906250 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCIe7300,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_ADDIDATA_PCIe_1_3906250 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCIe7800,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_ADDIDATA_PCIe_8_3906250 },
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
+ PCI_VENDOR_ID_IBM, 0x0299,
+ 0, 0, pbn_b0_bt_2_115200 },
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
+ /*
+ * Best Connectivity and Rosewill PCI Multi I/O cards
+ */
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x3002,
+ 0, 0, pbn_b0_bt_2_115200 },
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x3004,
+ 0, 0, pbn_b0_bt_4_115200 },
+ /* Intel CE4100 */
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CE4100_UART,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ce4100_1_115200 },
+
+ /*
+ * Cronyx Omega PCI
+ */
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_CRONYX_OMEGA,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_omegapci },
+
+ /*
+ * These entries match devices with class COMMUNICATION_SERIAL,
+ * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
+ */
+ { PCI_ANY_ID, PCI_ANY_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8,
+ 0xffff00, pbn_default },
+ { PCI_ANY_ID, PCI_ANY_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_MODEM << 8,
+ 0xffff00, pbn_default },
+ { PCI_ANY_ID, PCI_ANY_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8,
+ 0xffff00, pbn_default },
+ { 0, }
+};
+
+static struct pci_driver serial_pci_driver = {
+ .name = "serial",
+ .probe = pciserial_init_one,
+ .remove = __devexit_p(pciserial_remove_one),
+#ifdef CONFIG_PM
+ .suspend = pciserial_suspend_one,
+ .resume = pciserial_resume_one,
+#endif
+ .id_table = serial_pci_tbl,
+};
+
+static int __init serial8250_pci_init(void)
+{
+ return pci_register_driver(&serial_pci_driver);
+}
+
+static void __exit serial8250_pci_exit(void)
+{
+ pci_unregister_driver(&serial_pci_driver);
+}
+
+module_init(serial8250_pci_init);
+module_exit(serial8250_pci_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Generic 8250/16x50 PCI serial probe module");
+MODULE_DEVICE_TABLE(pci, serial_pci_tbl);
diff --git a/drivers/tty/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c
new file mode 100644
index 00000000..a2f23651
--- /dev/null
+++ b/drivers/tty/serial/8250_pnp.c
@@ -0,0 +1,524 @@
+/*
+ * Probe module for 8250/16550-type ISAPNP serial ports.
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * Copyright (C) 2001 Russell King, All Rights Reserved.
+ *
+ * Ported to the Linux PnP Layer - (C) Adam Belay.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pnp.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/serial_core.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+
+#include "8250.h"
+
+#define UNKNOWN_DEV 0x3000
+
+
+static const struct pnp_device_id pnp_dev_table[] = {
+ /* Archtek America Corp. */
+ /* Archtek SmartLink Modem 3334BT Plug & Play */
+ { "AAC000F", 0 },
+ /* Anchor Datacomm BV */
+ /* SXPro 144 External Data Fax Modem Plug & Play */
+ { "ADC0001", 0 },
+ /* SXPro 288 External Data Fax Modem Plug & Play */
+ { "ADC0002", 0 },
+ /* PROLiNK 1456VH ISA PnP K56flex Fax Modem */
+ { "AEI0250", 0 },
+ /* Actiontec ISA PNP 56K X2 Fax Modem */
+ { "AEI1240", 0 },
+ /* Rockwell 56K ACF II Fax+Data+Voice Modem */
+ { "AKY1021", 0 /*SPCI_FL_NO_SHIRQ*/ },
+ /* AZT3005 PnP SOUND DEVICE */
+ { "AZT4001", 0 },
+ /* Best Data Products Inc. Smart One 336F PnP Modem */
+ { "BDP3336", 0 },
+ /* Boca Research */
+ /* Boca Complete Ofc Communicator 14.4 Data-FAX */
+ { "BRI0A49", 0 },
+ /* Boca Research 33,600 ACF Modem */
+ { "BRI1400", 0 },
+ /* Boca 33.6 Kbps Internal FD34FSVD */
+ { "BRI3400", 0 },
+ /* Boca 33.6 Kbps Internal FD34FSVD */
+ { "BRI0A49", 0 },
+ /* Best Data Products Inc. Smart One 336F PnP Modem */
+ { "BDP3336", 0 },
+ /* Computer Peripherals Inc */
+ /* EuroViVa CommCenter-33.6 SP PnP */
+ { "CPI4050", 0 },
+ /* Creative Labs */
+ /* Creative Labs Phone Blaster 28.8 DSVD PnP Voice */
+ { "CTL3001", 0 },
+ /* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */
+ { "CTL3011", 0 },
+ /* Davicom ISA 33.6K Modem */
+ { "DAV0336", 0 },
+ /* Creative */
+ /* Creative Modem Blaster Flash56 DI5601-1 */
+ { "DMB1032", 0 },
+ /* Creative Modem Blaster V.90 DI5660 */
+ { "DMB2001", 0 },
+ /* E-Tech */
+ /* E-Tech CyberBULLET PC56RVP */
+ { "ETT0002", 0 },
+ /* FUJITSU */
+ /* Fujitsu 33600 PnP-I2 R Plug & Play */
+ { "FUJ0202", 0 },
+ /* Fujitsu FMV-FX431 Plug & Play */
+ { "FUJ0205", 0 },
+ /* Fujitsu 33600 PnP-I4 R Plug & Play */
+ { "FUJ0206", 0 },
+ /* Fujitsu Fax Voice 33600 PNP-I5 R Plug & Play */
+ { "FUJ0209", 0 },
+ /* Archtek America Corp. */
+ /* Archtek SmartLink Modem 3334BT Plug & Play */
+ { "GVC000F", 0 },
+ /* Archtek SmartLink Modem 3334BRV 33.6K Data Fax Voice */
+ { "GVC0303", 0 },
+ /* Hayes */
+ /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */
+ { "HAY0001", 0 },
+ /* Hayes Optima 336 V.34 + FAX + Voice PnP */
+ { "HAY000C", 0 },
+ /* Hayes Optima 336B V.34 + FAX + Voice PnP */
+ { "HAY000D", 0 },
+ /* Hayes Accura 56K Ext Fax Modem PnP */
+ { "HAY5670", 0 },
+ /* Hayes Accura 56K Ext Fax Modem PnP */
+ { "HAY5674", 0 },
+ /* Hayes Accura 56K Fax Modem PnP */
+ { "HAY5675", 0 },
+ /* Hayes 288, V.34 + FAX */
+ { "HAYF000", 0 },
+ /* Hayes Optima 288 V.34 + FAX + Voice, Plug & Play */
+ { "HAYF001", 0 },
+ /* IBM */
+ /* IBM Thinkpad 701 Internal Modem Voice */
+ { "IBM0033", 0 },
+ /* Intermec */
+ /* Intermec CV60 touchscreen port */
+ { "PNP4972", 0 },
+ /* Intertex */
+ /* Intertex 28k8 33k6 Voice EXT PnP */
+ { "IXDC801", 0 },
+ /* Intertex 33k6 56k Voice EXT PnP */
+ { "IXDC901", 0 },
+ /* Intertex 28k8 33k6 Voice SP EXT PnP */
+ { "IXDD801", 0 },
+ /* Intertex 33k6 56k Voice SP EXT PnP */
+ { "IXDD901", 0 },
+ /* Intertex 28k8 33k6 Voice SP INT PnP */
+ { "IXDF401", 0 },
+ /* Intertex 28k8 33k6 Voice SP EXT PnP */
+ { "IXDF801", 0 },
+ /* Intertex 33k6 56k Voice SP EXT PnP */
+ { "IXDF901", 0 },
+ /* Kortex International */
+ /* KORTEX 28800 Externe PnP */
+ { "KOR4522", 0 },
+ /* KXPro 33.6 Vocal ASVD PnP */
+ { "KORF661", 0 },
+ /* Lasat */
+ /* LASAT Internet 33600 PnP */
+ { "LAS4040", 0 },
+ /* Lasat Safire 560 PnP */
+ { "LAS4540", 0 },
+ /* Lasat Safire 336 PnP */
+ { "LAS5440", 0 },
+ /* Microcom, Inc. */
+ /* Microcom TravelPorte FAST V.34 Plug & Play */
+ { "MNP0281", 0 },
+ /* Microcom DeskPorte V.34 FAST or FAST+ Plug & Play */
+ { "MNP0336", 0 },
+ /* Microcom DeskPorte FAST EP 28.8 Plug & Play */
+ { "MNP0339", 0 },
+ /* Microcom DeskPorte 28.8P Plug & Play */
+ { "MNP0342", 0 },
+ /* Microcom DeskPorte FAST ES 28.8 Plug & Play */
+ { "MNP0500", 0 },
+ /* Microcom DeskPorte FAST ES 28.8 Plug & Play */
+ { "MNP0501", 0 },
+ /* Microcom DeskPorte 28.8S Internal Plug & Play */
+ { "MNP0502", 0 },
+ /* Motorola */
+ /* Motorola BitSURFR Plug & Play */
+ { "MOT1105", 0 },
+ /* Motorola TA210 Plug & Play */
+ { "MOT1111", 0 },
+ /* Motorola HMTA 200 (ISDN) Plug & Play */
+ { "MOT1114", 0 },
+ /* Motorola BitSURFR Plug & Play */
+ { "MOT1115", 0 },
+ /* Motorola Lifestyle 28.8 Internal */
+ { "MOT1190", 0 },
+ /* Motorola V.3400 Plug & Play */
+ { "MOT1501", 0 },
+ /* Motorola Lifestyle 28.8 V.34 Plug & Play */
+ { "MOT1502", 0 },
+ /* Motorola Power 28.8 V.34 Plug & Play */
+ { "MOT1505", 0 },
+ /* Motorola ModemSURFR External 28.8 Plug & Play */
+ { "MOT1509", 0 },
+ /* Motorola Premier 33.6 Desktop Plug & Play */
+ { "MOT150A", 0 },
+ /* Motorola VoiceSURFR 56K External PnP */
+ { "MOT150F", 0 },
+ /* Motorola ModemSURFR 56K External PnP */
+ { "MOT1510", 0 },
+ /* Motorola ModemSURFR 56K Internal PnP */
+ { "MOT1550", 0 },
+ /* Motorola ModemSURFR Internal 28.8 Plug & Play */
+ { "MOT1560", 0 },
+ /* Motorola Premier 33.6 Internal Plug & Play */
+ { "MOT1580", 0 },
+ /* Motorola OnlineSURFR 28.8 Internal Plug & Play */
+ { "MOT15B0", 0 },
+ /* Motorola VoiceSURFR 56K Internal PnP */
+ { "MOT15F0", 0 },
+ /* Com 1 */
+ /* Deskline K56 Phone System PnP */
+ { "MVX00A1", 0 },
+ /* PC Rider K56 Phone System PnP */
+ { "MVX00F2", 0 },
+ /* NEC 98NOTE SPEAKER PHONE FAX MODEM(33600bps) */
+ { "nEC8241", 0 },
+ /* Pace 56 Voice Internal Plug & Play Modem */
+ { "PMC2430", 0 },
+ /* Generic */
+ /* Generic standard PC COM port */
+ { "PNP0500", 0 },
+ /* Generic 16550A-compatible COM port */
+ { "PNP0501", 0 },
+ /* Compaq 14400 Modem */
+ { "PNPC000", 0 },
+ /* Compaq 2400/9600 Modem */
+ { "PNPC001", 0 },
+ /* Dial-Up Networking Serial Cable between 2 PCs */
+ { "PNPC031", 0 },
+ /* Dial-Up Networking Parallel Cable between 2 PCs */
+ { "PNPC032", 0 },
+ /* Standard 9600 bps Modem */
+ { "PNPC100", 0 },
+ /* Standard 14400 bps Modem */
+ { "PNPC101", 0 },
+ /* Standard 28800 bps Modem*/
+ { "PNPC102", 0 },
+ /* Standard Modem*/
+ { "PNPC103", 0 },
+ /* Standard 9600 bps Modem*/
+ { "PNPC104", 0 },
+ /* Standard 14400 bps Modem*/
+ { "PNPC105", 0 },
+ /* Standard 28800 bps Modem*/
+ { "PNPC106", 0 },
+ /* Standard Modem */
+ { "PNPC107", 0 },
+ /* Standard 9600 bps Modem */
+ { "PNPC108", 0 },
+ /* Standard 14400 bps Modem */
+ { "PNPC109", 0 },
+ /* Standard 28800 bps Modem */
+ { "PNPC10A", 0 },
+ /* Standard Modem */
+ { "PNPC10B", 0 },
+ /* Standard 9600 bps Modem */
+ { "PNPC10C", 0 },
+ /* Standard 14400 bps Modem */
+ { "PNPC10D", 0 },
+ /* Standard 28800 bps Modem */
+ { "PNPC10E", 0 },
+ /* Standard Modem */
+ { "PNPC10F", 0 },
+ /* Standard PCMCIA Card Modem */
+ { "PNP2000", 0 },
+ /* Rockwell */
+ /* Modular Technology */
+ /* Rockwell 33.6 DPF Internal PnP */
+ /* Modular Technology 33.6 Internal PnP */
+ { "ROK0030", 0 },
+ /* Kortex International */
+ /* KORTEX 14400 Externe PnP */
+ { "ROK0100", 0 },
+ /* Rockwell 28.8 */
+ { "ROK4120", 0 },
+ /* Viking Components, Inc */
+ /* Viking 28.8 INTERNAL Fax+Data+Voice PnP */
+ { "ROK4920", 0 },
+ /* Rockwell */
+ /* British Telecom */
+ /* Modular Technology */
+ /* Rockwell 33.6 DPF External PnP */
+ /* BT Prologue 33.6 External PnP */
+ /* Modular Technology 33.6 External PnP */
+ { "RSS00A0", 0 },
+ /* Viking 56K FAX INT */
+ { "RSS0262", 0 },
+ /* K56 par,VV,Voice,Speakphone,AudioSpan,PnP */
+ { "RSS0250", 0 },
+ /* SupraExpress 28.8 Data/Fax PnP modem */
+ { "SUP1310", 0 },
+ /* SupraExpress 336i PnP Voice Modem */
+ { "SUP1381", 0 },
+ /* SupraExpress 33.6 Data/Fax PnP modem */
+ { "SUP1421", 0 },
+ /* SupraExpress 33.6 Data/Fax PnP modem */
+ { "SUP1590", 0 },
+ /* SupraExpress 336i Sp ASVD */
+ { "SUP1620", 0 },
+ /* SupraExpress 33.6 Data/Fax PnP modem */
+ { "SUP1760", 0 },
+ /* SupraExpress 56i Sp Intl */
+ { "SUP2171", 0 },
+ /* Phoebe Micro */
+ /* Phoebe Micro 33.6 Data Fax 1433VQH Plug & Play */
+ { "TEX0011", 0 },
+ /* Archtek America Corp. */
+ /* Archtek SmartLink Modem 3334BT Plug & Play */
+ { "UAC000F", 0 },
+ /* 3Com Corp. */
+ /* Gateway Telepath IIvi 33.6 */
+ { "USR0000", 0 },
+ /* U.S. Robotics Sporster 33.6K Fax INT PnP */
+ { "USR0002", 0 },
+ /* Sportster Vi 14.4 PnP FAX Voicemail */
+ { "USR0004", 0 },
+ /* U.S. Robotics 33.6K Voice INT PnP */
+ { "USR0006", 0 },
+ /* U.S. Robotics 33.6K Voice EXT PnP */
+ { "USR0007", 0 },
+ /* U.S. Robotics Courier V.Everything INT PnP */
+ { "USR0009", 0 },
+ /* U.S. Robotics 33.6K Voice INT PnP */
+ { "USR2002", 0 },
+ /* U.S. Robotics 56K Voice INT PnP */
+ { "USR2070", 0 },
+ /* U.S. Robotics 56K Voice EXT PnP */
+ { "USR2080", 0 },
+ /* U.S. Robotics 56K FAX INT */
+ { "USR3031", 0 },
+ /* U.S. Robotics 56K FAX INT */
+ { "USR3050", 0 },
+ /* U.S. Robotics 56K Voice INT PnP */
+ { "USR3070", 0 },
+ /* U.S. Robotics 56K Voice EXT PnP */
+ { "USR3080", 0 },
+ /* U.S. Robotics 56K Voice INT PnP */
+ { "USR3090", 0 },
+ /* U.S. Robotics 56K Message */
+ { "USR9100", 0 },
+ /* U.S. Robotics 56K FAX EXT PnP*/
+ { "USR9160", 0 },
+ /* U.S. Robotics 56K FAX INT PnP*/
+ { "USR9170", 0 },
+ /* U.S. Robotics 56K Voice EXT PnP*/
+ { "USR9180", 0 },
+ /* U.S. Robotics 56K Voice INT PnP*/
+ { "USR9190", 0 },
+ /* Wacom tablets */
+ { "WACFXXX", 0 },
+ /* Compaq touchscreen */
+ { "FPI2002", 0 },
+ /* Fujitsu Stylistic touchscreens */
+ { "FUJ02B2", 0 },
+ { "FUJ02B3", 0 },
+ /* Fujitsu Stylistic LT touchscreens */
+ { "FUJ02B4", 0 },
+ /* Passive Fujitsu Stylistic touchscreens */
+ { "FUJ02B6", 0 },
+ { "FUJ02B7", 0 },
+ { "FUJ02B8", 0 },
+ { "FUJ02B9", 0 },
+ { "FUJ02BC", 0 },
+ /* Fujitsu Wacom Tablet PC device */
+ { "FUJ02E5", 0 },
+ /* Fujitsu P-series tablet PC device */
+ { "FUJ02E6", 0 },
+ /* Fujitsu Wacom 2FGT Tablet PC device */
+ { "FUJ02E7", 0 },
+ /* Fujitsu Wacom 1FGT Tablet PC device */
+ { "FUJ02E9", 0 },
+ /*
+ * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
+ * disguise)
+ */
+ { "LTS0001", 0 },
+ /* Rockwell's (PORALiNK) 33600 INT PNP */
+ { "WCI0003", 0 },
+ /* Unknown PnP modems */
+ { "PNPCXXX", UNKNOWN_DEV },
+ /* More unknown PnP modems */
+ { "PNPDXXX", UNKNOWN_DEV },
+ { "", 0 }
+};
+
+MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
+
+static char *modem_names[] __devinitdata = {
+ "MODEM", "Modem", "modem", "FAX", "Fax", "fax",
+ "56K", "56k", "K56", "33.6", "28.8", "14.4",
+ "33,600", "28,800", "14,400", "33.600", "28.800", "14.400",
+ "33600", "28800", "14400", "V.90", "V.34", "V.32", NULL
+};
+
+static int __devinit check_name(char *name)
+{
+ char **tmp;
+
+ for (tmp = modem_names; *tmp; tmp++)
+ if (strstr(name, *tmp))
+ return 1;
+
+ return 0;
+}
+
+static int __devinit check_resources(struct pnp_dev *dev)
+{
+ resource_size_t base[] = {0x2f8, 0x3f8, 0x2e8, 0x3e8};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(base); i++) {
+ if (pnp_possible_config(dev, IORESOURCE_IO, base[i], 8))
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Given a complete unknown PnP device, try to use some heuristics to
+ * detect modems. Currently use such heuristic set:
+ * - dev->name or dev->bus->name must contain "modem" substring;
+ * - device must have only one IO region (8 byte long) with base address
+ * 0x2e8, 0x3e8, 0x2f8 or 0x3f8.
+ *
+ * Such detection looks very ugly, but can detect at least some of numerous
+ * PnP modems, alternatively we must hardcode all modems in pnp_devices[]
+ * table.
+ */
+static int __devinit serial_pnp_guess_board(struct pnp_dev *dev, int *flags)
+{
+ if (!(check_name(pnp_dev_name(dev)) ||
+ (dev->card && check_name(dev->card->name))))
+ return -ENODEV;
+
+ if (check_resources(dev))
+ return 0;
+
+ return -ENODEV;
+}
+
+static int __devinit
+serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
+{
+ struct uart_port port;
+ int ret, line, flags = dev_id->driver_data;
+
+ if (flags & UNKNOWN_DEV) {
+ ret = serial_pnp_guess_board(dev, &flags);
+ if (ret < 0)
+ return ret;
+ }
+
+ memset(&port, 0, sizeof(struct uart_port));
+ if (pnp_irq_valid(dev, 0))
+ port.irq = pnp_irq(dev, 0);
+ if (pnp_port_valid(dev, 0)) {
+ port.iobase = pnp_port_start(dev, 0);
+ port.iotype = UPIO_PORT;
+ } else if (pnp_mem_valid(dev, 0)) {
+ port.mapbase = pnp_mem_start(dev, 0);
+ port.iotype = UPIO_MEM;
+ port.flags = UPF_IOREMAP;
+ } else
+ return -ENODEV;
+
+#ifdef SERIAL_DEBUG_PNP
+ printk(KERN_DEBUG
+ "Setup PNP port: port %x, mem 0x%lx, irq %d, type %d\n",
+ port.iobase, port.mapbase, port.irq, port.iotype);
+#endif
+
+ port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
+ if (pnp_irq_flags(dev, 0) & IORESOURCE_IRQ_SHAREABLE)
+ port.flags |= UPF_SHARE_IRQ;
+ port.uartclk = 1843200;
+ port.dev = &dev->dev;
+
+ line = serial8250_register_port(&port);
+ if (line < 0)
+ return -ENODEV;
+
+ pnp_set_drvdata(dev, (void *)((long)line + 1));
+ return 0;
+}
+
+static void __devexit serial_pnp_remove(struct pnp_dev *dev)
+{
+ long line = (long)pnp_get_drvdata(dev);
+ if (line)
+ serial8250_unregister_port(line - 1);
+}
+
+#ifdef CONFIG_PM
+static int serial_pnp_suspend(struct pnp_dev *dev, pm_message_t state)
+{
+ long line = (long)pnp_get_drvdata(dev);
+
+ if (!line)
+ return -ENODEV;
+ serial8250_suspend_port(line - 1);
+ return 0;
+}
+
+static int serial_pnp_resume(struct pnp_dev *dev)
+{
+ long line = (long)pnp_get_drvdata(dev);
+
+ if (!line)
+ return -ENODEV;
+ serial8250_resume_port(line - 1);
+ return 0;
+}
+#else
+#define serial_pnp_suspend NULL
+#define serial_pnp_resume NULL
+#endif /* CONFIG_PM */
+
+static struct pnp_driver serial_pnp_driver = {
+ .name = "serial",
+ .probe = serial_pnp_probe,
+ .remove = __devexit_p(serial_pnp_remove),
+ .suspend = serial_pnp_suspend,
+ .resume = serial_pnp_resume,
+ .id_table = pnp_dev_table,
+};
+
+static int __init serial8250_pnp_init(void)
+{
+ return pnp_register_driver(&serial_pnp_driver);
+}
+
+static void __exit serial8250_pnp_exit(void)
+{
+ pnp_unregister_driver(&serial_pnp_driver);
+}
+
+module_init(serial8250_pnp_init);
+module_exit(serial8250_pnp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Generic 8250/16x50 PnP serial driver");
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
new file mode 100644
index 00000000..9789293a
--- /dev/null
+++ b/drivers/tty/serial/Kconfig
@@ -0,0 +1,1638 @@
+#
+# Serial device configuration
+#
+
+menu "Serial drivers"
+ depends on HAS_IOMEM
+
+#
+# The new 8250/16550 serial drivers
+config SERIAL_8250
+ tristate "8250/16550 and compatible serial support"
+ select SERIAL_CORE
+ ---help---
+ This selects whether you want to include the driver for the standard
+ serial ports. The standard answer is Y. People who might say N
+ here are those that are setting up dedicated Ethernet WWW/FTP
+ servers, or users that have one of the various bus mice instead of a
+ serial mouse and don't intend to use their machine's standard serial
+ port for anything. (Note that the Cyclades and Stallion multi
+ serial port drivers do not need this driver built in for them to
+ work.)
+
+ To compile this driver as a module, choose M here: the
+ module will be called 8250.
+ [WARNING: Do not compile this driver as a module if you are using
+ non-standard serial ports, since the configuration information will
+ be lost when the driver is unloaded. This limitation may be lifted
+ in the future.]
+
+ BTW1: If you have a mouseman serial mouse which is not recognized by
+ the X window system, try running gpm first.
+
+ BTW2: If you intend to use a software modem (also called Winmodem)
+ under Linux, forget it. These modems are crippled and require
+ proprietary drivers which are only available under Windows.
+
+ Most people will say Y or M here, so that they can use serial mice,
+ modems and similar devices connecting to the standard serial ports.
+
+config SERIAL_8250_CONSOLE
+ bool "Console on 8250/16550 and compatible serial port"
+ depends on SERIAL_8250=y
+ select SERIAL_CORE_CONSOLE
+ ---help---
+ If you say Y here, it will be possible to use a serial port as the
+ system console (the system console is the device which receives all
+ kernel messages and warnings and which allows logins in single user
+ mode). This could be useful if some terminal or printer is connected
+ to that serial port.
+
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttyS1". (Try "man bootparam" or see the documentation of
+ your boot loader (grub or lilo or loadlin) about how to pass options
+ to the kernel at boot time.)
+
+ If you don't have a VGA card installed and you say Y here, the
+ kernel will automatically use the first serial line, /dev/ttyS0, as
+ system console.
+
+ You can set that using a kernel command line option such as
+ "console=uart8250,io,0x3f8,9600n8"
+ "console=uart8250,mmio,0xff5e0000,115200n8".
+ and it will switch to normal serial console when the corresponding
+ port is ready.
+ "earlycon=uart8250,io,0x3f8,9600n8"
+ "earlycon=uart8250,mmio,0xff5e0000,115200n8".
+ it will not only setup early console.
+
+ If unsure, say N.
+
+config FIX_EARLYCON_MEM
+ bool
+ depends on X86
+ default y
+
+config SERIAL_8250_GSC
+ tristate
+ depends on SERIAL_8250 && GSC
+ default SERIAL_8250
+
+config SERIAL_8250_PCI
+ tristate "8250/16550 PCI device support" if EXPERT
+ depends on SERIAL_8250 && PCI
+ default SERIAL_8250
+ help
+ This builds standard PCI serial support. You may be able to
+ disable this feature if you only need legacy serial support.
+ Saves about 9K.
+
+config SERIAL_8250_PNP
+ tristate "8250/16550 PNP device support" if EXPERT
+ depends on SERIAL_8250 && PNP
+ default SERIAL_8250
+ help
+ This builds standard PNP serial support. You may be able to
+ disable this feature if you only need legacy serial support.
+
+config SERIAL_8250_HP300
+ tristate
+ depends on SERIAL_8250 && HP300
+ default SERIAL_8250
+
+config SERIAL_8250_CS
+ tristate "8250/16550 PCMCIA device support"
+ depends on PCMCIA && SERIAL_8250
+ ---help---
+ Say Y here to enable support for 16-bit PCMCIA serial devices,
+ including serial port cards, modems, and the modem functions of
+ multi-function Ethernet/modem cards. (PCMCIA- or PC-cards are
+ credit-card size devices often used with laptops.)
+
+ To compile this driver as a module, choose M here: the
+ module will be called serial_cs.
+
+ If unsure, say N.
+
+config SERIAL_8250_NR_UARTS
+ int "Maximum number of 8250/16550 serial ports"
+ depends on SERIAL_8250
+ default "4"
+ help
+ Set this to the number of serial ports you want the driver
+ to support. This includes any ports discovered via ACPI or
+ PCI enumeration and any ports that may be added at run-time
+ via hot-plug, or any ISA multi-port serial cards.
+
+config SERIAL_8250_RUNTIME_UARTS
+ int "Number of 8250/16550 serial ports to register at runtime"
+ depends on SERIAL_8250
+ range 0 SERIAL_8250_NR_UARTS
+ default "4"
+ help
+ Set this to the maximum number of serial ports you want
+ the kernel to register at boot time. This can be overridden
+ with the module parameter "nr_uarts", or boot-time parameter
+ 8250.nr_uarts
+
+config SERIAL_8250_EXTENDED
+ bool "Extended 8250/16550 serial driver options"
+ depends on SERIAL_8250
+ help
+ If you wish to use any non-standard features of the standard "dumb"
+ driver, say Y here. This includes HUB6 support, shared serial
+ interrupts, special multiport support, support for more than the
+ four COM 1/2/3/4 boards, etc.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about serial driver options. If unsure, say N.
+
+config SERIAL_8250_MANY_PORTS
+ bool "Support more than 4 legacy serial ports"
+ depends on SERIAL_8250_EXTENDED && !IA64
+ help
+ Say Y here if you have dumb serial boards other than the four
+ standard COM 1/2/3/4 ports. This may happen if you have an AST
+ FourPort, Accent Async, Boca (read the Boca mini-HOWTO, available
+ from <http://www.tldp.org/docs.html#howto>), or other custom
+ serial port hardware which acts similar to standard serial port
+ hardware. If you only use the standard COM 1/2/3/4 ports, you can
+ say N here to save some memory. You can also say Y if you have an
+ "intelligent" multiport card such as Cyclades, Digiboards, etc.
+
+#
+# Multi-port serial cards
+#
+
+config SERIAL_8250_FOURPORT
+ tristate "Support Fourport cards"
+ depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+ help
+ Say Y here if you have an AST FourPort serial board.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_fourport.
+
+config SERIAL_8250_ACCENT
+ tristate "Support Accent cards"
+ depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+ help
+ Say Y here if you have an Accent Async serial board.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_accent.
+
+config SERIAL_8250_BOCA
+ tristate "Support Boca cards"
+ depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+ help
+ Say Y here if you have a Boca serial board. Please read the Boca
+ mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_boca.
+
+config SERIAL_8250_EXAR_ST16C554
+ tristate "Support Exar ST16C554/554D Quad UART"
+ depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+ help
+ The Uplogix Envoy TU301 uses this Exar Quad UART. If you are
+ tinkering with your Envoy TU301, or have a machine with this UART,
+ say Y here.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_exar_st16c554.
+
+config SERIAL_8250_HUB6
+ tristate "Support Hub6 cards"
+ depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+ help
+ Say Y here if you have a HUB6 serial board.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_hub6.
+
+config SERIAL_8250_SHARE_IRQ
+ bool "Support for sharing serial interrupts"
+ depends on SERIAL_8250_EXTENDED
+ help
+ Some serial boards have hardware support which allows multiple dumb
+ serial ports on the same board to share a single IRQ. To enable
+ support for this in the serial driver, say Y here.
+
+config SERIAL_8250_DETECT_IRQ
+ bool "Autodetect IRQ on standard ports (unsafe)"
+ depends on SERIAL_8250_EXTENDED
+ help
+ Say Y here if you want the kernel to try to guess which IRQ
+ to use for your serial port.
+
+ This is considered unsafe; it is far better to configure the IRQ in
+ a boot script using the setserial command.
+
+ If unsure, say N.
+
+config SERIAL_8250_RSA
+ bool "Support RSA serial ports"
+ depends on SERIAL_8250_EXTENDED
+ help
+ ::: To be written :::
+
+config SERIAL_8250_MCA
+ tristate "Support 8250-type ports on MCA buses"
+ depends on SERIAL_8250 != n && MCA
+ help
+ Say Y here if you have a MCA serial ports.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_mca.
+
+config SERIAL_8250_ACORN
+ tristate "Acorn expansion card serial port support"
+ depends on ARCH_ACORN && SERIAL_8250
+ help
+ If you have an Atomwide Serial card or Serial Port card for an Acorn
+ system, say Y to this option. The driver can handle 1, 2, or 3 port
+ cards. If unsure, say N.
+
+config SERIAL_8250_RM9K
+ bool "Support for MIPS RM9xxx integrated serial port"
+ depends on SERIAL_8250 != n && SERIAL_RM9000
+ select SERIAL_8250_SHARE_IRQ
+ help
+ Selecting this option will add support for the integrated serial
+ port hardware found on MIPS RM9122 and similar processors.
+ If unsure, say N.
+
+comment "Non-8250 serial port support"
+
+config SERIAL_AMBA_PL010
+ tristate "ARM AMBA PL010 serial port support"
+ depends on ARM_AMBA && (BROKEN || !ARCH_VERSATILE)
+ select SERIAL_CORE
+ help
+ This selects the ARM(R) AMBA(R) PrimeCell PL010 UART. If you have
+ an Integrator/AP or Integrator/PP2 platform, or if you have a
+ Cirrus Logic EP93xx CPU, say Y or M here.
+
+ If unsure, say N.
+
+config SERIAL_AMBA_PL010_CONSOLE
+ bool "Support for console on AMBA serial port"
+ depends on SERIAL_AMBA_PL010=y
+ select SERIAL_CORE_CONSOLE
+ ---help---
+ Say Y here if you wish to use an AMBA PrimeCell UART as the system
+ console (the system console is the device which receives all kernel
+ messages and warnings and which allows logins in single user mode).
+
+ Even if you say Y here, the currently visible framebuffer console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttyAM0". (Try "man bootparam" or see the documentation of
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
+config SERIAL_AMBA_PL011
+ tristate "ARM AMBA PL011 serial port support"
+ depends on ARM_AMBA
+ select SERIAL_CORE
+ help
+ This selects the ARM(R) AMBA(R) PrimeCell PL011 UART. If you have
+ an Integrator/PP2, Integrator/CP or Versatile platform, say Y or M
+ here.
+
+ If unsure, say N.
+
+config SERIAL_AMBA_PL011_CONSOLE
+ bool "Support for console on AMBA serial port"
+ depends on SERIAL_AMBA_PL011=y
+ select SERIAL_CORE_CONSOLE
+ ---help---
+ Say Y here if you wish to use an AMBA PrimeCell UART as the system
+ console (the system console is the device which receives all kernel
+ messages and warnings and which allows logins in single user mode).
+
+ Even if you say Y here, the currently visible framebuffer console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttyAMA0". (Try "man bootparam" or see the documentation of
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
+config SERIAL_SB1250_DUART
+ tristate "BCM1xxx on-chip DUART serial support"
+ depends on SIBYTE_SB1xxx_SOC=y
+ select SERIAL_CORE
+ default y
+ ---help---
+ Support for the asynchronous serial interface (DUART) included in
+ the BCM1250 and derived System-On-a-Chip (SOC) devices. Note that
+ the letter D in DUART stands for "dual", which is how the device
+ is implemented. Depending on the SOC configuration there may be
+ one or more DUARTs available of which all are handled.
+
+ If unsure, say Y. To compile this driver as a module, choose M here:
+ the module will be called sb1250-duart.
+
+config SERIAL_SB1250_DUART_CONSOLE
+ bool "Support for console on a BCM1xxx DUART serial port"
+ depends on SERIAL_SB1250_DUART=y
+ select SERIAL_CORE_CONSOLE
+ default y
+ ---help---
+ If you say Y here, it will be possible to use a serial port as the
+ system console (the system console is the device which receives all
+ kernel messages and warnings and which allows logins in single user
+ mode).
+
+ If unsure, say Y.
+
+config SERIAL_ATMEL
+ bool "AT91 / AT32 on-chip serial port support"
+ depends on (ARM && ARCH_AT91) || AVR32
+ select SERIAL_CORE
+ help
+ This enables the driver for the on-chip UARTs of the Atmel
+ AT91 and AT32 processors.
+
+config SERIAL_ATMEL_CONSOLE
+ bool "Support for console on AT91 / AT32 serial port"
+ depends on SERIAL_ATMEL=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Say Y here if you wish to use an on-chip UART on a Atmel
+ AT91 or AT32 processor as the system console (the system
+ console is the device which receives all kernel messages and
+ warnings and which allows logins in single user mode).
+
+config SERIAL_ATMEL_PDC
+ bool "Support DMA transfers on AT91 / AT32 serial port"
+ depends on SERIAL_ATMEL
+ default y
+ help
+ Say Y here if you wish to use the PDC to do DMA transfers to
+ and from the Atmel AT91 / AT32 serial port. In order to
+ actually use DMA transfers, make sure that the use_dma_tx
+ and use_dma_rx members in the atmel_uart_data struct is set
+ appropriately for each port.
+
+ Note that break and error handling currently doesn't work
+ properly when DMA is enabled. Make sure that ports where
+ this matters don't use DMA.
+
+config SERIAL_ATMEL_TTYAT
+ bool "Install as device ttyATn instead of ttySn"
+ depends on SERIAL_ATMEL=y
+ help
+ Say Y here if you wish to have the internal AT91 / AT32 UARTs
+ appear as /dev/ttyATn (major 204, minor starting at 154)
+ instead of the normal /dev/ttySn (major 4, minor starting at
+ 64). This is necessary if you also want other UARTs, such as
+ external 8250/16C550 compatible UARTs.
+ The ttySn nodes are legally reserved for the 8250 serial driver
+ but are often misused by other serial drivers.
+
+ To use this, you should create suitable ttyATn device nodes in
+ /dev/, and pass "console=ttyATn" to the kernel.
+
+ Say Y if you have an external 8250/16C550 UART. If unsure, say N.
+
+config SERIAL_KS8695
+ bool "Micrel KS8695 (Centaur) serial port support"
+ depends on ARCH_KS8695
+ select SERIAL_CORE
+ help
+ This selects the Micrel Centaur KS8695 UART. Say Y here.
+
+config SERIAL_KS8695_CONSOLE
+ bool "Support for console on KS8695 (Centaur) serial port"
+ depends on SERIAL_KS8695=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Say Y here if you wish to use a KS8695 (Centaur) UART as the
+ system console (the system console is the device which
+ receives all kernel messages and warnings and which allows
+ logins in single user mode).
+
+config SERIAL_CLPS711X
+ tristate "CLPS711X serial port support"
+ depends on ARM && ARCH_CLPS711X
+ select SERIAL_CORE
+ help
+ ::: To be written :::
+
+config SERIAL_CLPS711X_CONSOLE
+ bool "Support for console on CLPS711X serial port"
+ depends on SERIAL_CLPS711X=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttyCL1". (Try "man bootparam" or see the documentation of
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
+config SERIAL_SAMSUNG
+ tristate "Samsung SoC serial support"
+ depends on ARM && PLAT_SAMSUNG
+ select SERIAL_CORE
+ help
+ Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
+ providing /dev/ttySAC0, 1 and 2 (note, some machines may not
+ provide all of these ports, depending on how the serial port
+ pins are configured.
+
+config SERIAL_SAMSUNG_UARTS_4
+ bool
+ depends on ARM && PLAT_SAMSUNG
+ default y if CPU_S3C2443
+ help
+ Internal node for the common case of 4 Samsung compatible UARTs
+
+config SERIAL_SAMSUNG_UARTS
+ int
+ depends on ARM && PLAT_SAMSUNG
+ default 2 if ARCH_S3C2400
+ default 6 if ARCH_S5P6450
+ default 4 if SERIAL_SAMSUNG_UARTS_4
+ default 3
+ help
+ Select the number of available UART ports for the Samsung S3C
+ serial driver
+
+config SERIAL_SAMSUNG_DEBUG
+ bool "Samsung SoC serial debug"
+ depends on SERIAL_SAMSUNG && DEBUG_LL
+ help
+ Add support for debugging the serial driver. Since this is
+ generally being used as a console, we use our own output
+ routines that go via the low-level debug printascii()
+ function.
+
+config SERIAL_SAMSUNG_CONSOLE
+ bool "Support for console on Samsung SoC serial port"
+ depends on SERIAL_SAMSUNG=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Allow selection of the S3C24XX on-board serial ports for use as
+ an virtual console.
+
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttySACx". (Try "man bootparam" or see the documentation of
+ your boot loader about how to pass options to the kernel at
+ boot time.)
+
+config SERIAL_S3C2400
+ tristate "Samsung S3C2410 Serial port support"
+ depends on ARM && SERIAL_SAMSUNG && CPU_S3C2400
+ default y if CPU_S3C2400
+ help
+ Serial port support for the Samsung S3C2400 SoC
+
+config SERIAL_S3C2410
+ tristate "Samsung S3C2410 Serial port support"
+ depends on SERIAL_SAMSUNG && CPU_S3C2410
+ default y if CPU_S3C2410
+ help
+ Serial port support for the Samsung S3C2410 SoC
+
+config SERIAL_S3C2412
+ tristate "Samsung S3C2412/S3C2413 Serial port support"
+ depends on SERIAL_SAMSUNG && CPU_S3C2412
+ default y if CPU_S3C2412
+ help
+ Serial port support for the Samsung S3C2412 and S3C2413 SoC
+
+config SERIAL_S3C2440
+ tristate "Samsung S3C2440/S3C2442/S3C2416 Serial port support"
+ depends on SERIAL_SAMSUNG && (CPU_S3C2440 || CPU_S3C2442 || CPU_S3C2416)
+ default y if CPU_S3C2440
+ default y if CPU_S3C2442
+ select SERIAL_SAMSUNG_UARTS_4 if CPU_S3C2416
+ help
+ Serial port support for the Samsung S3C2440, S3C2416 and S3C2442 SoC
+
+config SERIAL_S3C24A0
+ tristate "Samsung S3C24A0 Serial port support"
+ depends on SERIAL_SAMSUNG && CPU_S3C24A0
+ default y if CPU_S3C24A0
+ help
+ Serial port support for the Samsung S3C24A0 SoC
+
+config SERIAL_S3C6400
+ tristate "Samsung S3C6400/S3C6410/S5P6440/S5P6450/S5PC100 Serial port support"
+ depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440 || CPU_S5P6450 || CPU_S5PC100)
+ select SERIAL_SAMSUNG_UARTS_4
+ default y
+ help
+ Serial port support for the Samsung S3C6400, S3C6410, S5P6440, S5P6450
+ and S5PC100 SoCs
+
+config SERIAL_S5PV210
+ tristate "Samsung S5PV210 Serial port support"
+ depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_EXYNOS4210)
+ select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_EXYNOS4210)
+ default y
+ help
+ Serial port support for Samsung's S5P Family of SoC's
+
+
+config SERIAL_MAX3100
+ tristate "MAX3100 support"
+ depends on SPI
+ select SERIAL_CORE
+ help
+ MAX3100 chip support
+
+config SERIAL_MAX3107
+ tristate "MAX3107 support"
+ depends on SPI
+ select SERIAL_CORE
+ help
+ MAX3107 chip support
+
+config SERIAL_MAX3107_AAVA
+ tristate "MAX3107 AAVA platform support"
+ depends on X86_MRST && SERIAL_MAX3107 && GPIOLIB
+ select SERIAL_CORE
+ help
+ Support for the MAX3107 chip configuration found on the AAVA
+ platform. Includes the extra initialisation and GPIO support
+ neded for this device.
+
+config SERIAL_DZ
+ bool "DECstation DZ serial driver"
+ depends on MACH_DECSTATION && 32BIT
+ select SERIAL_CORE
+ default y
+ ---help---
+ DZ11-family serial controllers for DECstations and VAXstations,
+ including the DC7085, M7814, and M7819.
+
+config SERIAL_DZ_CONSOLE
+ bool "Support console on DECstation DZ serial driver"
+ depends on SERIAL_DZ=y
+ select SERIAL_CORE_CONSOLE
+ default y
+ ---help---
+ If you say Y here, it will be possible to use a serial port as the
+ system console (the system console is the device which receives all
+ kernel messages and warnings and which allows logins in single user
+ mode).
+
+ Note that the firmware uses ttyS3 as the serial console on
+ DECstations that use this driver.
+
+ If unsure, say Y.
+
+config SERIAL_ZS
+ tristate "DECstation Z85C30 serial support"
+ depends on MACH_DECSTATION
+ select SERIAL_CORE
+ default y
+ ---help---
+ Support for the Zilog 85C350 serial communications controller used
+ for serial ports in newer DECstation systems. These include the
+ DECsystem 5900 and all models of the DECstation and DECsystem 5000
+ systems except from model 200.
+
+ If unsure, say Y. To compile this driver as a module, choose M here:
+ the module will be called zs.
+
+config SERIAL_ZS_CONSOLE
+ bool "Support for console on a DECstation Z85C30 serial port"
+ depends on SERIAL_ZS=y
+ select SERIAL_CORE_CONSOLE
+ default y
+ ---help---
+ If you say Y here, it will be possible to use a serial port as the
+ system console (the system console is the device which receives all
+ kernel messages and warnings and which allows logins in single user
+ mode).
+
+ Note that the firmware uses ttyS1 as the serial console on the
+ Maxine and ttyS3 on the others using this driver.
+
+ If unsure, say Y.
+
+config SERIAL_21285
+ tristate "DC21285 serial port support"
+ depends on ARM && FOOTBRIDGE
+ select SERIAL_CORE
+ help
+ If you have a machine based on a 21285 (Footbridge) StrongARM(R)/
+ PCI bridge you can enable its onboard serial port by enabling this
+ option.
+
+config SERIAL_21285_CONSOLE
+ bool "Console on DC21285 serial port"
+ depends on SERIAL_21285=y
+ select SERIAL_CORE_CONSOLE
+ help
+ If you have enabled the serial port on the 21285 footbridge you can
+ make it the console by answering Y to this option.
+
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttyFB". (Try "man bootparam" or see the documentation of
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
+config SERIAL_MPSC
+ bool "Marvell MPSC serial port support"
+ depends on PPC32 && MV64X60
+ select SERIAL_CORE
+ help
+ Say Y here if you want to use the Marvell MPSC serial controller.
+
+config SERIAL_MPSC_CONSOLE
+ bool "Support for console on Marvell MPSC serial port"
+ depends on SERIAL_MPSC
+ select SERIAL_CORE_CONSOLE
+ help
+ Say Y here if you want to support a serial console on a Marvell MPSC.
+
+config SERIAL_PXA
+ bool "PXA serial port support"
+ depends on ARCH_PXA || ARCH_MMP
+ select SERIAL_CORE
+ help
+ If you have a machine based on an Intel XScale PXA2xx CPU you
+ can enable its onboard serial ports by enabling this option.
+
+config SERIAL_PXA_CONSOLE
+ bool "Console on PXA serial port"
+ depends on SERIAL_PXA
+ select SERIAL_CORE_CONSOLE
+ help
+ If you have enabled the serial port on the Intel XScale PXA
+ CPU you can make it the console by answering Y to this option.
+
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttySA0". (Try "man bootparam" or see the documentation of
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
+config SERIAL_SA1100
+ bool "SA1100 serial port support"
+ depends on ARM && ARCH_SA1100
+ select SERIAL_CORE
+ help
+ If you have a machine based on a SA1100/SA1110 StrongARM(R) CPU you
+ can enable its onboard serial port by enabling this option.
+ Please read <file:Documentation/arm/SA1100/serial_UART> for further
+ info.
+
+config SERIAL_SA1100_CONSOLE
+ bool "Console on SA1100 serial port"
+ depends on SERIAL_SA1100
+ select SERIAL_CORE_CONSOLE
+ help
+ If you have enabled the serial port on the SA1100/SA1110 StrongARM
+ CPU you can make it the console by answering Y to this option.
+
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttySA0". (Try "man bootparam" or see the documentation of
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
+config SERIAL_MRST_MAX3110
+ tristate "SPI UART driver for Max3110"
+ depends on SPI_DW_PCI
+ select SERIAL_CORE
+ select SERIAL_CORE_CONSOLE
+ help
+ This is the UART protocol driver for the MAX3110 device on
+ the Intel Moorestown platform. On other systems use the max3100
+ driver.
+
+config SERIAL_MFD_HSU
+ tristate "Medfield High Speed UART support"
+ depends on PCI
+ select SERIAL_CORE
+
+config SERIAL_MFD_HSU_CONSOLE
+ boolean "Medfile HSU serial console support"
+ depends on SERIAL_MFD_HSU=y
+ select SERIAL_CORE_CONSOLE
+
+config SERIAL_BFIN
+ tristate "Blackfin serial port support"
+ depends on BLACKFIN
+ select SERIAL_CORE
+ select SERIAL_BFIN_UART0 if (BF531 || BF532 || BF533 || BF561)
+ help
+ Add support for the built-in UARTs on the Blackfin.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bfin_5xx.
+
+config SERIAL_BFIN_CONSOLE
+ bool "Console on Blackfin serial port"
+ depends on SERIAL_BFIN=y
+ select SERIAL_CORE_CONSOLE
+
+choice
+ prompt "UART Mode"
+ depends on SERIAL_BFIN
+ default SERIAL_BFIN_DMA
+ help
+ This driver supports the built-in serial ports of the Blackfin family
+ of CPUs
+
+config SERIAL_BFIN_DMA
+ bool "DMA mode"
+ depends on !DMA_UNCACHED_NONE && KGDB_SERIAL_CONSOLE=n
+ help
+ This driver works under DMA mode. If this option is selected, the
+ blackfin simple dma driver is also enabled.
+
+config SERIAL_BFIN_PIO
+ bool "PIO mode"
+ help
+ This driver works under PIO mode.
+
+endchoice
+
+config SERIAL_BFIN_UART0
+ bool "Enable UART0"
+ depends on SERIAL_BFIN
+ help
+ Enable UART0
+
+config BFIN_UART0_CTSRTS
+ bool "Enable UART0 hardware flow control"
+ depends on SERIAL_BFIN_UART0
+ help
+ Enable hardware flow control in the driver.
+
+config SERIAL_BFIN_UART1
+ bool "Enable UART1"
+ depends on SERIAL_BFIN && (!BF531 && !BF532 && !BF533 && !BF561)
+ help
+ Enable UART1
+
+config BFIN_UART1_CTSRTS
+ bool "Enable UART1 hardware flow control"
+ depends on SERIAL_BFIN_UART1
+ help
+ Enable hardware flow control in the driver.
+
+config SERIAL_BFIN_UART2
+ bool "Enable UART2"
+ depends on SERIAL_BFIN && (BF54x || BF538 || BF539)
+ help
+ Enable UART2
+
+config BFIN_UART2_CTSRTS
+ bool "Enable UART2 hardware flow control"
+ depends on SERIAL_BFIN_UART2
+ help
+ Enable hardware flow control in the driver.
+
+config SERIAL_BFIN_UART3
+ bool "Enable UART3"
+ depends on SERIAL_BFIN && (BF54x)
+ help
+ Enable UART3
+
+config BFIN_UART3_CTSRTS
+ bool "Enable UART3 hardware flow control"
+ depends on SERIAL_BFIN_UART3
+ help
+ Enable hardware flow control in the driver.
+
+config SERIAL_IMX
+ bool "IMX serial port support"
+ depends on ARM && (ARCH_IMX || ARCH_MXC)
+ select SERIAL_CORE
+ select RATIONAL
+ help
+ If you have a machine based on a Motorola IMX CPU you
+ can enable its onboard serial port by enabling this option.
+
+config SERIAL_IMX_CONSOLE
+ bool "Console on IMX serial port"
+ depends on SERIAL_IMX
+ select SERIAL_CORE_CONSOLE
+ help
+ If you have enabled the serial port on the Motorola IMX
+ CPU you can make it the console by answering Y to this option.
+
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttySA0". (Try "man bootparam" or see the documentation of
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
+config SERIAL_UARTLITE
+ tristate "Xilinx uartlite serial port support"
+ depends on PPC32 || MICROBLAZE || MFD_TIMBERDALE
+ select SERIAL_CORE
+ help
+ Say Y here if you want to use the Xilinx uartlite serial controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called uartlite.
+
+config SERIAL_UARTLITE_CONSOLE
+ bool "Support for console on Xilinx uartlite serial port"
+ depends on SERIAL_UARTLITE=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Say Y here if you wish to use a Xilinx uartlite as the system
+ console (the system console is the device which receives all kernel
+ messages and warnings and which allows logins in single user mode).
+
+config SERIAL_SUNCORE
+ bool
+ depends on SPARC
+ select SERIAL_CORE
+ select SERIAL_CORE_CONSOLE
+ default y
+
+config SERIAL_SUNZILOG
+ tristate "Sun Zilog8530 serial support"
+ depends on SPARC
+ help
+ This driver supports the Zilog8530 serial ports found on many Sparc
+ systems. Say Y or M if you want to be able to these serial ports.
+
+config SERIAL_SUNZILOG_CONSOLE
+ bool "Console on Sun Zilog8530 serial port"
+ depends on SERIAL_SUNZILOG=y
+ help
+ If you would like to be able to use the Zilog8530 serial port
+ on your Sparc system as the console, you can do so by answering
+ Y to this option.
+
+config SERIAL_SUNSU
+ tristate "Sun SU serial support"
+ depends on SPARC && PCI
+ help
+ This driver supports the 8250 serial ports that run the keyboard and
+ mouse on (PCI) UltraSPARC systems. Say Y or M if you want to be able
+ to these serial ports.
+
+config SERIAL_SUNSU_CONSOLE
+ bool "Console on Sun SU serial port"
+ depends on SERIAL_SUNSU=y
+ help
+ If you would like to be able to use the SU serial port
+ on your Sparc system as the console, you can do so by answering
+ Y to this option.
+
+config SERIAL_MUX
+ tristate "Serial MUX support"
+ depends on GSC
+ select SERIAL_CORE
+ default y
+ ---help---
+ Saying Y here will enable the hardware MUX serial driver for
+ the Nova, K class systems and D class with a 'remote control card'.
+ The hardware MUX is not 8250/16550 compatible therefore the
+ /dev/ttyB0 device is shared between the Serial MUX and the PDC
+ software console. The following steps need to be completed to use
+ the Serial MUX:
+
+ 1. create the device entry (mknod /dev/ttyB0 c 11 0)
+ 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0
+ 3. Add device ttyB0 to /etc/securetty (if you want to log on as
+ root on this console.)
+ 4. Change the kernel command console parameter to: console=ttyB0
+
+config SERIAL_MUX_CONSOLE
+ bool "Support for console on serial MUX"
+ depends on SERIAL_MUX=y
+ select SERIAL_CORE_CONSOLE
+ default y
+
+config PDC_CONSOLE
+ bool "PDC software console support"
+ depends on PARISC && !SERIAL_MUX && VT
+ default n
+ help
+ Saying Y here will enable the software based PDC console to be
+ used as the system console. This is useful for machines in
+ which the hardware based console has not been written yet. The
+ following steps must be competed to use the PDC console:
+
+ 1. create the device entry (mknod /dev/ttyB0 c 11 0)
+ 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0
+ 3. Add device ttyB0 to /etc/securetty (if you want to log on as
+ root on this console.)
+ 4. Change the kernel command console parameter to: console=ttyB0
+
+config SERIAL_SUNSAB
+ tristate "Sun Siemens SAB82532 serial support"
+ depends on SPARC && PCI
+ help
+ This driver supports the Siemens SAB82532 DUSCC serial ports on newer
+ (PCI) UltraSPARC systems. Say Y or M if you want to be able to these
+ serial ports.
+
+config SERIAL_SUNSAB_CONSOLE
+ bool "Console on Sun Siemens SAB82532 serial port"
+ depends on SERIAL_SUNSAB=y
+ help
+ If you would like to be able to use the SAB82532 serial port
+ on your Sparc system as the console, you can do so by answering
+ Y to this option.
+
+config SERIAL_SUNHV
+ bool "Sun4v Hypervisor Console support"
+ depends on SPARC64
+ help
+ This driver supports the console device found on SUN4V Sparc
+ systems. Say Y if you want to be able to use this device.
+
+config SERIAL_IP22_ZILOG
+ tristate "SGI Zilog8530 serial support"
+ depends on SGI_HAS_ZILOG
+ select SERIAL_CORE
+ help
+ This driver supports the Zilog8530 serial ports found on SGI
+ systems. Say Y or M if you want to be able to these serial ports.
+
+config SERIAL_IP22_ZILOG_CONSOLE
+ bool "Console on SGI Zilog8530 serial port"
+ depends on SERIAL_IP22_ZILOG=y
+ select SERIAL_CORE_CONSOLE
+
+config SERIAL_SH_SCI
+ tristate "SuperH SCI(F) serial port support"
+ depends on HAVE_CLK && (SUPERH || H8300 || ARCH_SHMOBILE)
+ select SERIAL_CORE
+
+config SERIAL_SH_SCI_NR_UARTS
+ int "Maximum number of SCI(F) serial ports"
+ depends on SERIAL_SH_SCI
+ default "2"
+
+config SERIAL_SH_SCI_CONSOLE
+ bool "Support for console on SuperH SCI(F)"
+ depends on SERIAL_SH_SCI=y
+ select SERIAL_CORE_CONSOLE
+
+config SERIAL_SH_SCI_DMA
+ bool "DMA support"
+ depends on SERIAL_SH_SCI && SH_DMAE && EXPERIMENTAL
+
+config SERIAL_PNX8XXX
+ bool "Enable PNX8XXX SoCs' UART Support"
+ depends on MIPS && (SOC_PNX8550 || SOC_PNX833X)
+ select SERIAL_CORE
+ help
+ If you have a MIPS-based Philips SoC such as PNX8550 or PNX8330
+ and you want to use serial ports, say Y. Otherwise, say N.
+
+config SERIAL_PNX8XXX_CONSOLE
+ bool "Enable PNX8XX0 serial console"
+ depends on SERIAL_PNX8XXX
+ select SERIAL_CORE_CONSOLE
+ help
+ If you have a MIPS-based Philips SoC such as PNX8550 or PNX8330
+ and you want to use serial console, say Y. Otherwise, say N.
+
+config SERIAL_CORE
+ tristate
+
+config SERIAL_CORE_CONSOLE
+ bool
+
+config CONSOLE_POLL
+ bool
+
+config SERIAL_68328
+ bool "68328 serial support"
+ depends on M68328 || M68EZ328 || M68VZ328
+ help
+ This driver supports the built-in serial port of the Motorola 68328
+ (standard, EZ and VZ varieties).
+
+config SERIAL_68328_RTS_CTS
+ bool "Support RTS/CTS on 68328 serial port"
+ depends on SERIAL_68328
+
+config SERIAL_MCF
+ bool "Coldfire serial support"
+ depends on COLDFIRE
+ select SERIAL_CORE
+ help
+ This serial driver supports the Freescale Coldfire serial ports.
+
+config SERIAL_MCF_BAUDRATE
+ int "Default baudrate for Coldfire serial ports"
+ depends on SERIAL_MCF
+ default 19200
+ help
+ This setting lets you define what the default baudrate is for the
+ ColdFire serial ports. The usual default varies from board to board,
+ and this setting is a way of catering for that.
+
+config SERIAL_MCF_CONSOLE
+ bool "Coldfire serial console support"
+ depends on SERIAL_MCF
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a ColdFire internal serial port to be the system console.
+
+config SERIAL_68360_SMC
+ bool "68360 SMC uart support"
+ depends on M68360
+ help
+ This driver supports the SMC serial ports of the Motorola 68360 CPU.
+
+config SERIAL_68360_SCC
+ bool "68360 SCC uart support"
+ depends on M68360
+ help
+ This driver supports the SCC serial ports of the Motorola 68360 CPU.
+
+config SERIAL_68360
+ bool
+ depends on SERIAL_68360_SMC || SERIAL_68360_SCC
+ default y
+
+config SERIAL_PMACZILOG
+ tristate "Mac or PowerMac z85c30 ESCC support"
+ depends on (M68K && MAC) || (PPC_OF && PPC_PMAC)
+ select SERIAL_CORE
+ help
+ This driver supports the Zilog z85C30 serial ports found on
+ (Power)Mac machines.
+ Say Y or M if you want to be able to these serial ports.
+
+config SERIAL_PMACZILOG_TTYS
+ bool "Use ttySn device nodes for Zilog z85c30"
+ depends on SERIAL_PMACZILOG
+ help
+ The pmac_zilog driver for the z85C30 chip on many powermacs
+ historically used the device numbers for /dev/ttySn. The
+ 8250 serial port driver also uses these numbers, which means
+ the two drivers being unable to coexist; you could not use
+ both z85C30 and 8250 type ports at the same time.
+
+ If this option is not selected, the pmac_zilog driver will
+ use the device numbers allocated for /dev/ttyPZn. This allows
+ the pmac_zilog and 8250 drivers to co-exist, but may cause
+ existing userspace setups to break. Programs that need to
+ access the built-in serial ports on powermacs will need to
+ be reconfigured to use /dev/ttyPZn instead of /dev/ttySn.
+
+ If you enable this option, any z85c30 ports in the system will
+ be registered as ttyS0 onwards as in the past, and you will be
+ unable to use the 8250 module for PCMCIA or other 16C550-style
+ UARTs.
+
+ Say N unless you need the z85c30 ports on your (Power)Mac
+ to appear as /dev/ttySn.
+
+config SERIAL_PMACZILOG_CONSOLE
+ bool "Console on Mac or PowerMac z85c30 serial port"
+ depends on SERIAL_PMACZILOG=y
+ select SERIAL_CORE_CONSOLE
+ help
+ If you would like to be able to use the z85c30 serial port
+ on your (Power)Mac as the console, you can do so by answering
+ Y to this option.
+
+config SERIAL_CPM
+ tristate "CPM SCC/SMC serial port support"
+ depends on CPM2 || 8xx
+ select SERIAL_CORE
+ help
+ This driver supports the SCC and SMC serial ports on Motorola
+ embedded PowerPC that contain a CPM1 (8xx) or CPM2 (8xxx)
+
+config SERIAL_CPM_CONSOLE
+ bool "Support for console on CPM SCC/SMC serial port"
+ depends on SERIAL_CPM=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Say Y here if you wish to use a SCC or SMC CPM UART as the system
+ console (the system console is the device which receives all kernel
+ messages and warnings and which allows logins in single user mode).
+
+ Even if you say Y here, the currently visible framebuffer console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttyCPM0". (Try "man bootparam" or see the documentation of
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
+config SERIAL_SGI_L1_CONSOLE
+ bool "SGI Altix L1 serial console support"
+ depends on IA64_GENERIC || IA64_SGI_SN2
+ select SERIAL_CORE
+ select SERIAL_CORE_CONSOLE
+ help
+ If you have an SGI Altix and you would like to use the system
+ controller serial port as your console (you want this!),
+ say Y. Otherwise, say N.
+
+config SERIAL_MPC52xx
+ tristate "Freescale MPC52xx/MPC512x family PSC serial support"
+ depends on PPC_MPC52xx || PPC_MPC512x
+ select SERIAL_CORE
+ help
+ This driver supports MPC52xx and MPC512x PSC serial ports. If you would
+ like to use them, you must answer Y or M to this option. Note that
+ for use as console, it must be included in kernel and not as a
+ module.
+
+config SERIAL_MPC52xx_CONSOLE
+ bool "Console on a Freescale MPC52xx/MPC512x family PSC serial port"
+ depends on SERIAL_MPC52xx=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Select this options if you'd like to use one of the PSC serial port
+ of the Freescale MPC52xx family as a console.
+
+config SERIAL_MPC52xx_CONSOLE_BAUD
+ int "Freescale MPC52xx/MPC512x family PSC serial port baud"
+ depends on SERIAL_MPC52xx_CONSOLE=y
+ default "9600"
+ help
+ Select the MPC52xx console baud rate.
+ This value is only used if the bootloader doesn't pass in the
+ console baudrate
+
+config SERIAL_ICOM
+ tristate "IBM Multiport Serial Adapter"
+ depends on PCI && (PPC_ISERIES || PPC_PSERIES)
+ select SERIAL_CORE
+ select FW_LOADER
+ help
+ This driver is for a family of multiport serial adapters
+ including 2 port RVX, 2 port internal modem, 4 port internal
+ modem and a split 1 port RVX and 1 port internal modem.
+
+ This driver can also be built as a module. If so, the module
+ will be called icom.
+
+config SERIAL_M32R_SIO
+ bool "M32R SIO I/F"
+ depends on M32R
+ default y
+ select SERIAL_CORE
+ help
+ Say Y here if you want to use the M32R serial controller.
+
+config SERIAL_M32R_SIO_CONSOLE
+ bool "use SIO console"
+ depends on SERIAL_M32R_SIO=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Say Y here if you want to support a serial console.
+
+ If you use an M3T-M32700UT or an OPSPUT platform,
+ please say also y for SERIAL_M32R_PLDSIO.
+
+config SERIAL_M32R_PLDSIO
+ bool "M32R SIO I/F on a PLD"
+ depends on SERIAL_M32R_SIO=y && (PLAT_OPSPUT || PLAT_USRV || PLAT_M32700UT)
+ default n
+ help
+ Say Y here if you want to use the M32R serial controller
+ on a PLD (Programmable Logic Device).
+
+ If you use an M3T-M32700UT or an OPSPUT platform,
+ please say Y.
+
+config SERIAL_TXX9
+ bool "TMPTX39XX/49XX SIO support"
+ depends on HAS_TXX9_SERIAL
+ select SERIAL_CORE
+ default y
+
+config HAS_TXX9_SERIAL
+ bool
+
+config SERIAL_TXX9_NR_UARTS
+ int "Maximum number of TMPTX39XX/49XX SIO ports"
+ depends on SERIAL_TXX9
+ default "6"
+
+config SERIAL_TXX9_CONSOLE
+ bool "TMPTX39XX/49XX SIO Console support"
+ depends on SERIAL_TXX9=y
+ select SERIAL_CORE_CONSOLE
+
+config SERIAL_TXX9_STDSERIAL
+ bool "TX39XX/49XX SIO act as standard serial"
+ depends on !SERIAL_8250 && SERIAL_TXX9
+
+config SERIAL_VR41XX
+ tristate "NEC VR4100 series Serial Interface Unit support"
+ depends on CPU_VR41XX
+ select SERIAL_CORE
+ help
+ If you have a NEC VR4100 series processor and you want to use
+ Serial Interface Unit(SIU) or Debug Serial Interface Unit(DSIU)
+ (not include VR4111/VR4121 DSIU), say Y. Otherwise, say N.
+
+config SERIAL_VR41XX_CONSOLE
+ bool "Enable NEC VR4100 series Serial Interface Unit console"
+ depends on SERIAL_VR41XX=y
+ select SERIAL_CORE_CONSOLE
+ help
+ If you have a NEC VR4100 series processor and you want to use
+ a console on a serial port, say Y. Otherwise, say N.
+
+config SERIAL_JSM
+ tristate "Digi International NEO PCI Support"
+ depends on PCI
+ select SERIAL_CORE
+ help
+ This is a driver for Digi International's Neo series
+ of cards which provide multiple serial ports. You would need
+ something like this to connect more than two modems to your Linux
+ box, for instance in order to become a dial-in server. This driver
+ supports PCI boards only.
+
+ If you have a card like this, say Y here, otherwise say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called jsm.
+
+config SERIAL_SGI_IOC4
+ tristate "SGI IOC4 controller serial support"
+ depends on (IA64_GENERIC || IA64_SGI_SN2) && SGI_IOC4
+ select SERIAL_CORE
+ help
+ If you have an SGI Altix with an IOC4 based Base IO card
+ and wish to use the serial ports on this card, say Y.
+ Otherwise, say N.
+
+config SERIAL_SGI_IOC3
+ tristate "SGI Altix IOC3 serial support"
+ depends on (IA64_GENERIC || IA64_SGI_SN2) && SGI_IOC3
+ select SERIAL_CORE
+ help
+ If you have an SGI Altix with an IOC3 serial card,
+ say Y or M. Otherwise, say N.
+
+config SERIAL_MSM
+ bool "MSM on-chip serial port support"
+ depends on ARM && ARCH_MSM
+ select SERIAL_CORE
+
+config SERIAL_MSM_CONSOLE
+ bool "MSM serial console support"
+ depends on SERIAL_MSM=y
+ select SERIAL_CORE_CONSOLE
+
+config SERIAL_MSM_HS
+ tristate "MSM UART High Speed: Serial Driver"
+ depends on ARCH_MSM
+ select SERIAL_CORE
+ help
+ If you have a machine based on MSM family of SoCs, you
+ can enable its onboard high speed serial port by enabling
+ this option.
+
+ Choose M here to compile it as a module. The module will be
+ called msm_serial_hs.
+
+config SERIAL_VT8500
+ bool "VIA VT8500 on-chip serial port support"
+ depends on ARM && ARCH_VT8500
+ select SERIAL_CORE
+
+config SERIAL_VT8500_CONSOLE
+ bool "VIA VT8500 serial console support"
+ depends on SERIAL_VT8500=y
+ select SERIAL_CORE_CONSOLE
+
+config SERIAL_NETX
+ tristate "NetX serial port support"
+ depends on ARM && ARCH_NETX
+ select SERIAL_CORE
+ help
+ If you have a machine based on a Hilscher NetX SoC you
+ can enable its onboard serial port by enabling this option.
+
+ To compile this driver as a module, choose M here: the
+ module will be called netx-serial.
+
+config SERIAL_NETX_CONSOLE
+ bool "Console on NetX serial port"
+ depends on SERIAL_NETX=y
+ select SERIAL_CORE_CONSOLE
+ help
+ If you have enabled the serial port on the Hilscher NetX SoC
+ you can make it the console by answering Y to this option.
+
+config SERIAL_OF_PLATFORM
+ tristate "Serial port on Open Firmware platform bus"
+ depends on OF
+ depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL
+ help
+ If you have a PowerPC based system that has serial ports
+ on a platform specific bus, you should enable this option.
+ Currently, only 8250 compatible ports are supported, but
+ others can easily be added.
+
+config SERIAL_OMAP
+ tristate "OMAP serial port support"
+ depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4
+ select SERIAL_CORE
+ help
+ If you have a machine based on an Texas Instruments OMAP CPU you
+ can enable its onboard serial ports by enabling this option.
+
+ By enabling this option you take advantage of dma feature available
+ with the omap-serial driver. DMA support can be enabled from platform
+ data.
+
+config SERIAL_OMAP_CONSOLE
+ bool "Console on OMAP serial port"
+ depends on SERIAL_OMAP
+ select SERIAL_CORE_CONSOLE
+ help
+ Select this option if you would like to use omap serial port as
+ console.
+
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttyOx". (Try "man bootparam" or see the documentation of
+ your boot loader about how to pass options to the kernel at
+ boot time.)
+
+config SERIAL_OF_PLATFORM_NWPSERIAL
+ tristate "NWP serial port driver"
+ depends on PPC_OF && PPC_DCR
+ select SERIAL_OF_PLATFORM
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_CORE
+ help
+ This driver supports the cell network processor nwp serial
+ device.
+
+config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
+ bool "Console on NWP serial port"
+ depends on SERIAL_OF_PLATFORM_NWPSERIAL=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Support for Console on the NWP serial ports.
+
+config SERIAL_LANTIQ
+ bool "Lantiq serial driver"
+ depends on LANTIQ
+ select SERIAL_CORE
+ select SERIAL_CORE_CONSOLE
+ help
+ Support for console and UART on Lantiq SoCs.
+
+config SERIAL_QE
+ tristate "Freescale QUICC Engine serial port support"
+ depends on QUICC_ENGINE
+ select SERIAL_CORE
+ select FW_LOADER
+ default n
+ help
+ This driver supports the QE serial ports on Freescale embedded
+ PowerPC that contain a QUICC Engine.
+
+config SERIAL_SC26XX
+ tristate "SC2681/SC2692 serial port support"
+ depends on SNI_RM
+ select SERIAL_CORE
+ help
+ This is a driver for the onboard serial ports of
+ older RM400 machines.
+
+config SERIAL_SC26XX_CONSOLE
+ bool "Console on SC2681/SC2692 serial port"
+ depends on SERIAL_SC26XX=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Support for Console on SC2681/SC2692 serial ports.
+
+config SERIAL_BFIN_SPORT
+ tristate "Blackfin SPORT emulate UART"
+ depends on BLACKFIN
+ select SERIAL_CORE
+ help
+ Enable SPORT emulate UART on Blackfin series.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bfin_sport_uart.
+
+config SERIAL_BFIN_SPORT_CONSOLE
+ bool "Console on Blackfin sport emulated uart"
+ depends on SERIAL_BFIN_SPORT=y
+ select SERIAL_CORE_CONSOLE
+
+config SERIAL_BFIN_SPORT0_UART
+ bool "Enable UART over SPORT0"
+ depends on SERIAL_BFIN_SPORT && !(BF542 || BF544)
+ help
+ Enable UART over SPORT0
+
+config SERIAL_BFIN_SPORT0_UART_CTSRTS
+ bool "Enable UART over SPORT0 hardware flow control"
+ depends on SERIAL_BFIN_SPORT0_UART
+ help
+ Enable hardware flow control in the driver.
+
+config SERIAL_BFIN_SPORT1_UART
+ bool "Enable UART over SPORT1"
+ depends on SERIAL_BFIN_SPORT
+ help
+ Enable UART over SPORT1
+
+config SERIAL_BFIN_SPORT1_UART_CTSRTS
+ bool "Enable UART over SPORT1 hardware flow control"
+ depends on SERIAL_BFIN_SPORT1_UART
+ help
+ Enable hardware flow control in the driver.
+
+config SERIAL_BFIN_SPORT2_UART
+ bool "Enable UART over SPORT2"
+ depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
+ help
+ Enable UART over SPORT2
+
+config SERIAL_BFIN_SPORT2_UART_CTSRTS
+ bool "Enable UART over SPORT2 hardware flow control"
+ depends on SERIAL_BFIN_SPORT2_UART
+ help
+ Enable hardware flow control in the driver.
+
+config SERIAL_BFIN_SPORT3_UART
+ bool "Enable UART over SPORT3"
+ depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
+ help
+ Enable UART over SPORT3
+
+config SERIAL_BFIN_SPORT3_UART_CTSRTS
+ bool "Enable UART over SPORT3 hardware flow control"
+ depends on SERIAL_BFIN_SPORT3_UART
+ help
+ Enable hardware flow control in the driver.
+
+config SERIAL_TIMBERDALE
+ tristate "Support for timberdale UART"
+ select SERIAL_CORE
+ ---help---
+ Add support for UART controller on timberdale.
+
+config SERIAL_BCM63XX
+ tristate "bcm63xx serial port support"
+ select SERIAL_CORE
+ depends on BCM63XX
+ help
+ If you have a bcm63xx CPU, you can enable its onboard
+ serial port by enabling this options.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bcm963xx_uart.
+
+config SERIAL_BCM63XX_CONSOLE
+ bool "Console on bcm63xx serial port"
+ depends on SERIAL_BCM63XX=y
+ select SERIAL_CORE_CONSOLE
+ help
+ If you have enabled the serial port on the bcm63xx CPU
+ you can make it the console by answering Y to this option.
+
+config SERIAL_GRLIB_GAISLER_APBUART
+ tristate "GRLIB APBUART serial support"
+ depends on OF && SPARC
+ select SERIAL_CORE
+ ---help---
+ Add support for the GRLIB APBUART serial port.
+
+config SERIAL_GRLIB_GAISLER_APBUART_CONSOLE
+ bool "Console on GRLIB APBUART serial port"
+ depends on SERIAL_GRLIB_GAISLER_APBUART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Support for running a console on the GRLIB APBUART
+
+config SERIAL_ALTERA_JTAGUART
+ tristate "Altera JTAG UART support"
+ select SERIAL_CORE
+ help
+ This driver supports the Altera JTAG UART port.
+
+config SERIAL_ALTERA_JTAGUART_CONSOLE
+ bool "Altera JTAG UART console support"
+ depends on SERIAL_ALTERA_JTAGUART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a Altera JTAG UART port to be the system console.
+
+config SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS
+ bool "Bypass output when no connection"
+ depends on SERIAL_ALTERA_JTAGUART_CONSOLE
+ select SERIAL_CORE_CONSOLE
+ help
+ Bypass console output and keep going even if there is no
+ JTAG terminal connection with the host.
+
+config SERIAL_ALTERA_UART
+ tristate "Altera UART support"
+ select SERIAL_CORE
+ help
+ This driver supports the Altera softcore UART port.
+
+config SERIAL_ALTERA_UART_MAXPORTS
+ int "Maximum number of Altera UART ports"
+ depends on SERIAL_ALTERA_UART
+ default 4
+ help
+ This setting lets you define the maximum number of the Altera
+ UART ports. The usual default varies from board to board, and
+ this setting is a way of catering for that.
+
+config SERIAL_ALTERA_UART_BAUDRATE
+ int "Default baudrate for Altera UART ports"
+ depends on SERIAL_ALTERA_UART
+ default 115200
+ help
+ This setting lets you define what the default baudrate is for the
+ Altera UART ports. The usual default varies from board to board,
+ and this setting is a way of catering for that.
+
+config SERIAL_ALTERA_UART_CONSOLE
+ bool "Altera UART console support"
+ depends on SERIAL_ALTERA_UART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a Altera UART port to be the system console.
+
+config SERIAL_IFX6X60
+ tristate "SPI protocol driver for Infineon 6x60 modem (EXPERIMENTAL)"
+ depends on GPIOLIB && SPI && EXPERIMENTAL
+ help
+ Support for the IFX6x60 modem devices on Intel MID platforms.
+
+config SERIAL_PCH_UART
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART"
+ depends on PCI
+ select SERIAL_CORE
+ help
+ This driver is for PCH(Platform controller Hub) UART of Intel EG20T
+ which is an IOH(Input/Output Hub) for x86 embedded processor.
+ Enabling PCH_DMA, this PCH UART works as DMA mode.
+
+ This driver also can be used for LAPIS Semiconductor IOH(Input/
+ Output Hub), ML7213, ML7223 and ML7831.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+ for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+
+config SERIAL_MSM_SMD
+ bool "Enable tty device interface for some SMD ports"
+ default n
+ depends on MSM_SMD
+ help
+ Enables userspace clients to read and write to some streaming SMD
+ ports via tty device interface for MSM chipset.
+
+config SERIAL_MXS_AUART
+ depends on ARCH_MXS
+ tristate "MXS AUART support"
+ select SERIAL_CORE
+ help
+ This driver supports the MXS Application UART (AUART) port.
+
+config SERIAL_MXS_AUART_CONSOLE
+ bool "MXS AUART console support"
+ depends on SERIAL_MXS_AUART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a MXS AUART port to be the system console.
+
+config SERIAL_XILINX_PS_UART
+ tristate "Xilinx PS UART support"
+ select SERIAL_CORE
+ help
+ This driver supports the Xilinx PS UART port.
+
+config SERIAL_XILINX_PS_UART_CONSOLE
+ bool "Xilinx PS UART console support"
+ depends on SERIAL_XILINX_PS_UART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a Xilinx PS UART port to be the system console.
+
+endmenu
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
new file mode 100644
index 00000000..5f9f9e69
--- /dev/null
+++ b/drivers/tty/serial/Makefile
@@ -0,0 +1,99 @@
+#
+# Makefile for the kernel serial device drivers.
+#
+
+obj-$(CONFIG_SERIAL_CORE) += serial_core.o
+obj-$(CONFIG_SERIAL_21285) += 21285.o
+
+# These Sparc drivers have to appear before others such as 8250
+# which share ttySx minor node space. Otherwise console device
+# names change and other unplesantries.
+obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o
+obj-$(CONFIG_SERIAL_SUNHV) += sunhv.o
+obj-$(CONFIG_SERIAL_SUNZILOG) += sunzilog.o
+obj-$(CONFIG_SERIAL_SUNSU) += sunsu.o
+obj-$(CONFIG_SERIAL_SUNSAB) += sunsab.o
+
+obj-$(CONFIG_SERIAL_8250) += 8250.o
+obj-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
+obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
+obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
+obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
+obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o
+obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o
+obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o
+obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o
+obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o
+obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
+obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
+obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
+obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o
+obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
+obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
+obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
+obj-$(CONFIG_SERIAL_PXA) += pxa.o
+obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
+obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
+obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
+obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o
+obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
+obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
+obj-$(CONFIG_SERIAL_S3C2400) += s3c2400.o
+obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o
+obj-$(CONFIG_SERIAL_S3C2412) += s3c2412.o
+obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o
+obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.o
+obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
+obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
+obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
+obj-$(CONFIG_SERIAL_MAX3107) += max3107.o
+obj-$(CONFIG_SERIAL_MAX3107_AAVA) += max3107-aava.o
+obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
+obj-$(CONFIG_SERIAL_MUX) += mux.o
+obj-$(CONFIG_SERIAL_68328) += 68328serial.o
+obj-$(CONFIG_SERIAL_68360) += 68360serial.o
+obj-$(CONFIG_SERIAL_MCF) += mcf.o
+obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o
+obj-$(CONFIG_SERIAL_DZ) += dz.o
+obj-$(CONFIG_SERIAL_ZS) += zs.o
+obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
+obj-$(CONFIG_SERIAL_SGI_L1_CONSOLE) += sn_console.o
+obj-$(CONFIG_SERIAL_CPM) += cpm_uart/
+obj-$(CONFIG_SERIAL_IMX) += imx.o
+obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
+obj-$(CONFIG_SERIAL_ICOM) += icom.o
+obj-$(CONFIG_SERIAL_M32R_SIO) += m32r_sio.o
+obj-$(CONFIG_SERIAL_MPSC) += mpsc.o
+obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
+obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o
+obj-$(CONFIG_SERIAL_SC26XX) += sc26xx.o
+obj-$(CONFIG_SERIAL_JSM) += jsm/
+obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
+obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o
+obj-$(CONFIG_SERIAL_SGI_IOC4) += ioc4_serial.o
+obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o
+obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
+obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
+obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
+obj-$(CONFIG_SERIAL_MSM_HS) += msm_serial_hs.o
+obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
+obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
+obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
+obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
+obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
+obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
+obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
+obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
+obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
+obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
+obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
+obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o
+obj-$(CONFIG_SERIAL_MRST_MAX3110) += mrst_max3110.o
+obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o
+obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
+obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
+obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
+obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
+obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
+obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
+obj-$(CONFIG_SERIAL_IMX_CONSOLE) += mxc_uart_early.o
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
new file mode 100644
index 00000000..60e049b0
--- /dev/null
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -0,0 +1,516 @@
+/*
+ * altera_jtaguart.c -- Altera JTAG UART driver
+ *
+ * Based on mcf.c -- Freescale ColdFire UART driver
+ *
+ * (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
+ * (C) Copyright 2008, Thomas Chou <thomas@wytron.com.tw>
+ * (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/altera_jtaguart.h>
+
+#define DRV_NAME "altera_jtaguart"
+
+/*
+ * Altera JTAG UART register definitions according to the Altera JTAG UART
+ * datasheet: http://www.altera.com/literature/hb/nios2/n2cpu_nii51009.pdf
+ */
+
+#define ALTERA_JTAGUART_SIZE 8
+
+#define ALTERA_JTAGUART_DATA_REG 0
+
+#define ALTERA_JTAGUART_DATA_DATA_MSK 0x000000FF
+#define ALTERA_JTAGUART_DATA_RVALID_MSK 0x00008000
+#define ALTERA_JTAGUART_DATA_RAVAIL_MSK 0xFFFF0000
+#define ALTERA_JTAGUART_DATA_RAVAIL_OFF 16
+
+#define ALTERA_JTAGUART_CONTROL_REG 4
+
+#define ALTERA_JTAGUART_CONTROL_RE_MSK 0x00000001
+#define ALTERA_JTAGUART_CONTROL_WE_MSK 0x00000002
+#define ALTERA_JTAGUART_CONTROL_RI_MSK 0x00000100
+#define ALTERA_JTAGUART_CONTROL_RI_OFF 8
+#define ALTERA_JTAGUART_CONTROL_WI_MSK 0x00000200
+#define ALTERA_JTAGUART_CONTROL_AC_MSK 0x00000400
+#define ALTERA_JTAGUART_CONTROL_WSPACE_MSK 0xFFFF0000
+#define ALTERA_JTAGUART_CONTROL_WSPACE_OFF 16
+
+/*
+ * Local per-uart structure.
+ */
+struct altera_jtaguart {
+ struct uart_port port;
+ unsigned int sigs; /* Local copy of line sigs */
+ unsigned long imr; /* Local IMR mirror */
+};
+
+static unsigned int altera_jtaguart_tx_empty(struct uart_port *port)
+{
+ return (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
+ ALTERA_JTAGUART_CONTROL_WSPACE_MSK) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int altera_jtaguart_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+}
+
+static void altera_jtaguart_set_mctrl(struct uart_port *port, unsigned int sigs)
+{
+}
+
+static void altera_jtaguart_start_tx(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+
+ pp->imr |= ALTERA_JTAGUART_CONTROL_WE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static void altera_jtaguart_stop_tx(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+
+ pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static void altera_jtaguart_stop_rx(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+
+ pp->imr &= ~ALTERA_JTAGUART_CONTROL_RE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static void altera_jtaguart_break_ctl(struct uart_port *port, int break_state)
+{
+}
+
+static void altera_jtaguart_enable_ms(struct uart_port *port)
+{
+}
+
+static void altera_jtaguart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ /* Just copy the old termios settings back */
+ if (old)
+ tty_termios_copy_hw(termios, old);
+}
+
+static void altera_jtaguart_rx_chars(struct altera_jtaguart *pp)
+{
+ struct uart_port *port = &pp->port;
+ unsigned char ch, flag;
+ unsigned long status;
+
+ while ((status = readl(port->membase + ALTERA_JTAGUART_DATA_REG)) &
+ ALTERA_JTAGUART_DATA_RVALID_MSK) {
+ ch = status & ALTERA_JTAGUART_DATA_DATA_MSK;
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+ uart_insert_char(port, 0, 0, ch, flag);
+ }
+
+ tty_flip_buffer_push(port->state->port.tty);
+}
+
+static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp)
+{
+ struct uart_port *port = &pp->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned int pending, count;
+
+ if (port->x_char) {
+ /* Send special char - probably flow control */
+ writel(port->x_char, port->membase + ALTERA_JTAGUART_DATA_REG);
+ port->x_char = 0;
+ port->icount.tx++;
+ return;
+ }
+
+ pending = uart_circ_chars_pending(xmit);
+ if (pending > 0) {
+ count = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
+ ALTERA_JTAGUART_CONTROL_WSPACE_MSK) >>
+ ALTERA_JTAGUART_CONTROL_WSPACE_OFF;
+ if (count > pending)
+ count = pending;
+ if (count > 0) {
+ pending -= count;
+ while (count--) {
+ writel(xmit->buf[xmit->tail],
+ port->membase + ALTERA_JTAGUART_DATA_REG);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+ if (pending < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+ }
+ }
+
+ if (pending == 0) {
+ pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+ }
+}
+
+static irqreturn_t altera_jtaguart_interrupt(int irq, void *data)
+{
+ struct uart_port *port = data;
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+ unsigned int isr;
+
+ isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >>
+ ALTERA_JTAGUART_CONTROL_RI_OFF) & pp->imr;
+
+ spin_lock(&port->lock);
+
+ if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK)
+ altera_jtaguart_rx_chars(pp);
+ if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK)
+ altera_jtaguart_tx_chars(pp);
+
+ spin_unlock(&port->lock);
+
+ return IRQ_RETVAL(isr);
+}
+
+static void altera_jtaguart_config_port(struct uart_port *port, int flags)
+{
+ port->type = PORT_ALTERA_JTAGUART;
+
+ /* Clear mask, so no surprise interrupts. */
+ writel(0, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static int altera_jtaguart_startup(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+ unsigned long flags;
+ int ret;
+
+ ret = request_irq(port->irq, altera_jtaguart_interrupt, IRQF_DISABLED,
+ DRV_NAME, port);
+ if (ret) {
+ pr_err(DRV_NAME ": unable to attach Altera JTAG UART %d "
+ "interrupt vector=%d\n", port->line, port->irq);
+ return ret;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Enable RX interrupts now */
+ pp->imr = ALTERA_JTAGUART_CONTROL_RE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static void altera_jtaguart_shutdown(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Disable all interrupts now */
+ pp->imr = 0;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ free_irq(port->irq, port);
+}
+
+static const char *altera_jtaguart_type(struct uart_port *port)
+{
+ return (port->type == PORT_ALTERA_JTAGUART) ? "Altera JTAG UART" : NULL;
+}
+
+static int altera_jtaguart_request_port(struct uart_port *port)
+{
+ /* UARTs always present */
+ return 0;
+}
+
+static void altera_jtaguart_release_port(struct uart_port *port)
+{
+ /* Nothing to release... */
+}
+
+static int altera_jtaguart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_ALTERA_JTAGUART)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Define the basic serial functions we support.
+ */
+static struct uart_ops altera_jtaguart_ops = {
+ .tx_empty = altera_jtaguart_tx_empty,
+ .get_mctrl = altera_jtaguart_get_mctrl,
+ .set_mctrl = altera_jtaguart_set_mctrl,
+ .start_tx = altera_jtaguart_start_tx,
+ .stop_tx = altera_jtaguart_stop_tx,
+ .stop_rx = altera_jtaguart_stop_rx,
+ .enable_ms = altera_jtaguart_enable_ms,
+ .break_ctl = altera_jtaguart_break_ctl,
+ .startup = altera_jtaguart_startup,
+ .shutdown = altera_jtaguart_shutdown,
+ .set_termios = altera_jtaguart_set_termios,
+ .type = altera_jtaguart_type,
+ .request_port = altera_jtaguart_request_port,
+ .release_port = altera_jtaguart_release_port,
+ .config_port = altera_jtaguart_config_port,
+ .verify_port = altera_jtaguart_verify_port,
+};
+
+#define ALTERA_JTAGUART_MAXPORTS 1
+static struct altera_jtaguart altera_jtaguart_ports[ALTERA_JTAGUART_MAXPORTS];
+
+#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE)
+
+#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS)
+static void altera_jtaguart_console_putc(struct console *co, const char c)
+{
+ struct uart_port *port = &(altera_jtaguart_ports + co->index)->port;
+ unsigned long status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ while (((status = readl(port->membase + ALTERA_JTAGUART_CONTROL_REG)) &
+ ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
+ if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ return; /* no connection activity */
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ cpu_relax();
+ spin_lock_irqsave(&port->lock, flags);
+ }
+ writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+#else
+static void altera_jtaguart_console_putc(struct console *co, const char c)
+{
+ struct uart_port *port = &(altera_jtaguart_ports + co->index)->port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ while ((readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
+ ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ cpu_relax();
+ spin_lock_irqsave(&port->lock, flags);
+ }
+ writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+#endif
+
+static void altera_jtaguart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ for (; count; count--, s++) {
+ altera_jtaguart_console_putc(co, *s);
+ if (*s == '\n')
+ altera_jtaguart_console_putc(co, '\r');
+ }
+}
+
+static int __init altera_jtaguart_console_setup(struct console *co,
+ char *options)
+{
+ struct uart_port *port;
+
+ if (co->index < 0 || co->index >= ALTERA_JTAGUART_MAXPORTS)
+ return -EINVAL;
+ port = &altera_jtaguart_ports[co->index].port;
+ if (port->membase == NULL)
+ return -ENODEV;
+ return 0;
+}
+
+static struct uart_driver altera_jtaguart_driver;
+
+static struct console altera_jtaguart_console = {
+ .name = "ttyJ",
+ .write = altera_jtaguart_console_write,
+ .device = uart_console_device,
+ .setup = altera_jtaguart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &altera_jtaguart_driver,
+};
+
+static int __init altera_jtaguart_console_init(void)
+{
+ register_console(&altera_jtaguart_console);
+ return 0;
+}
+
+console_initcall(altera_jtaguart_console_init);
+
+#define ALTERA_JTAGUART_CONSOLE (&altera_jtaguart_console)
+
+#else
+
+#define ALTERA_JTAGUART_CONSOLE NULL
+
+#endif /* CONFIG_ALTERA_JTAGUART_CONSOLE */
+
+static struct uart_driver altera_jtaguart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "altera_jtaguart",
+ .dev_name = "ttyJ",
+ .major = ALTERA_JTAGUART_MAJOR,
+ .minor = ALTERA_JTAGUART_MINOR,
+ .nr = ALTERA_JTAGUART_MAXPORTS,
+ .cons = ALTERA_JTAGUART_CONSOLE,
+};
+
+static int __devinit altera_jtaguart_probe(struct platform_device *pdev)
+{
+ struct altera_jtaguart_platform_uart *platp = pdev->dev.platform_data;
+ struct uart_port *port;
+ struct resource *res_irq, *res_mem;
+ int i = pdev->id;
+
+ /* -1 emphasizes that the platform must have one port, no .N suffix */
+ if (i == -1)
+ i = 0;
+
+ if (i >= ALTERA_JTAGUART_MAXPORTS)
+ return -EINVAL;
+
+ port = &altera_jtaguart_ports[i].port;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem)
+ port->mapbase = res_mem->start;
+ else if (platp)
+ port->mapbase = platp->mapbase;
+ else
+ return -ENODEV;
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res_irq)
+ port->irq = res_irq->start;
+ else if (platp)
+ port->irq = platp->irq;
+ else
+ return -ENODEV;
+
+ port->membase = ioremap(port->mapbase, ALTERA_JTAGUART_SIZE);
+ if (!port->membase)
+ return -ENOMEM;
+
+ port->line = i;
+ port->type = PORT_ALTERA_JTAGUART;
+ port->iotype = SERIAL_IO_MEM;
+ port->ops = &altera_jtaguart_ops;
+ port->flags = UPF_BOOT_AUTOCONF;
+
+ uart_add_one_port(&altera_jtaguart_driver, port);
+
+ return 0;
+}
+
+static int __devexit altera_jtaguart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port;
+ int i = pdev->id;
+
+ if (i == -1)
+ i = 0;
+
+ port = &altera_jtaguart_ports[i].port;
+ uart_remove_one_port(&altera_jtaguart_driver, port);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id altera_jtaguart_match[] = {
+ { .compatible = "ALTR,juart-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_jtaguart_match);
+#else
+#define altera_jtaguart_match NULL
+#endif /* CONFIG_OF */
+
+static struct platform_driver altera_jtaguart_platform_driver = {
+ .probe = altera_jtaguart_probe,
+ .remove = __devexit_p(altera_jtaguart_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = altera_jtaguart_match,
+ },
+};
+
+static int __init altera_jtaguart_init(void)
+{
+ int rc;
+
+ rc = uart_register_driver(&altera_jtaguart_driver);
+ if (rc)
+ return rc;
+ rc = platform_driver_register(&altera_jtaguart_platform_driver);
+ if (rc) {
+ uart_unregister_driver(&altera_jtaguart_driver);
+ return rc;
+ }
+ return 0;
+}
+
+static void __exit altera_jtaguart_exit(void)
+{
+ platform_driver_unregister(&altera_jtaguart_platform_driver);
+ uart_unregister_driver(&altera_jtaguart_driver);
+}
+
+module_init(altera_jtaguart_init);
+module_exit(altera_jtaguart_exit);
+
+MODULE_DESCRIPTION("Altera JTAG UART driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
new file mode 100644
index 00000000..37db1d58
--- /dev/null
+++ b/drivers/tty/serial/altera_uart.c
@@ -0,0 +1,661 @@
+/*
+ * altera_uart.c -- Altera UART driver
+ *
+ * Based on mcf.c -- Freescale ColdFire UART driver
+ *
+ * (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
+ * (C) Copyright 2008, Thomas Chou <thomas@wytron.com.tw>
+ * (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/altera_uart.h>
+
+#define DRV_NAME "altera_uart"
+#define SERIAL_ALTERA_MAJOR 204
+#define SERIAL_ALTERA_MINOR 213
+
+/*
+ * Altera UART register definitions according to the Nios UART datasheet:
+ * http://www.altera.com/literature/ds/ds_nios_uart.pdf
+ */
+
+#define ALTERA_UART_SIZE 32
+
+#define ALTERA_UART_RXDATA_REG 0
+#define ALTERA_UART_TXDATA_REG 4
+#define ALTERA_UART_STATUS_REG 8
+#define ALTERA_UART_CONTROL_REG 12
+#define ALTERA_UART_DIVISOR_REG 16
+#define ALTERA_UART_EOP_REG 20
+
+#define ALTERA_UART_STATUS_PE_MSK 0x0001 /* parity error */
+#define ALTERA_UART_STATUS_FE_MSK 0x0002 /* framing error */
+#define ALTERA_UART_STATUS_BRK_MSK 0x0004 /* break */
+#define ALTERA_UART_STATUS_ROE_MSK 0x0008 /* RX overrun error */
+#define ALTERA_UART_STATUS_TOE_MSK 0x0010 /* TX overrun error */
+#define ALTERA_UART_STATUS_TMT_MSK 0x0020 /* TX shift register state */
+#define ALTERA_UART_STATUS_TRDY_MSK 0x0040 /* TX ready */
+#define ALTERA_UART_STATUS_RRDY_MSK 0x0080 /* RX ready */
+#define ALTERA_UART_STATUS_E_MSK 0x0100 /* exception condition */
+#define ALTERA_UART_STATUS_DCTS_MSK 0x0400 /* CTS logic-level change */
+#define ALTERA_UART_STATUS_CTS_MSK 0x0800 /* CTS logic state */
+#define ALTERA_UART_STATUS_EOP_MSK 0x1000 /* EOP written/read */
+
+ /* Enable interrupt on... */
+#define ALTERA_UART_CONTROL_PE_MSK 0x0001 /* ...parity error */
+#define ALTERA_UART_CONTROL_FE_MSK 0x0002 /* ...framing error */
+#define ALTERA_UART_CONTROL_BRK_MSK 0x0004 /* ...break */
+#define ALTERA_UART_CONTROL_ROE_MSK 0x0008 /* ...RX overrun */
+#define ALTERA_UART_CONTROL_TOE_MSK 0x0010 /* ...TX overrun */
+#define ALTERA_UART_CONTROL_TMT_MSK 0x0020 /* ...TX shift register empty */
+#define ALTERA_UART_CONTROL_TRDY_MSK 0x0040 /* ...TX ready */
+#define ALTERA_UART_CONTROL_RRDY_MSK 0x0080 /* ...RX ready */
+#define ALTERA_UART_CONTROL_E_MSK 0x0100 /* ...exception*/
+
+#define ALTERA_UART_CONTROL_TRBK_MSK 0x0200 /* TX break */
+#define ALTERA_UART_CONTROL_DCTS_MSK 0x0400 /* Interrupt on CTS change */
+#define ALTERA_UART_CONTROL_RTS_MSK 0x0800 /* RTS signal */
+#define ALTERA_UART_CONTROL_EOP_MSK 0x1000 /* Interrupt on EOP */
+
+/*
+ * Local per-uart structure.
+ */
+struct altera_uart {
+ struct uart_port port;
+ struct timer_list tmr;
+ unsigned int sigs; /* Local copy of line sigs */
+ unsigned short imr; /* Local IMR mirror */
+};
+
+static u32 altera_uart_readl(struct uart_port *port, int reg)
+{
+ return readl(port->membase + (reg << port->regshift));
+}
+
+static void altera_uart_writel(struct uart_port *port, u32 dat, int reg)
+{
+ writel(dat, port->membase + (reg << port->regshift));
+}
+
+static unsigned int altera_uart_tx_empty(struct uart_port *port)
+{
+ return (altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_TMT_MSK) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int altera_uart_get_mctrl(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned int sigs;
+
+ sigs = (altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_CTS_MSK) ? TIOCM_CTS : 0;
+ sigs |= (pp->sigs & TIOCM_RTS);
+
+ return sigs;
+}
+
+static void altera_uart_set_mctrl(struct uart_port *port, unsigned int sigs)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+
+ pp->sigs = sigs;
+ if (sigs & TIOCM_RTS)
+ pp->imr |= ALTERA_UART_CONTROL_RTS_MSK;
+ else
+ pp->imr &= ~ALTERA_UART_CONTROL_RTS_MSK;
+ altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
+}
+
+static void altera_uart_start_tx(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+
+ pp->imr |= ALTERA_UART_CONTROL_TRDY_MSK;
+ altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
+}
+
+static void altera_uart_stop_tx(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+
+ pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
+ altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
+}
+
+static void altera_uart_stop_rx(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+
+ pp->imr &= ~ALTERA_UART_CONTROL_RRDY_MSK;
+ altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
+}
+
+static void altera_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (break_state == -1)
+ pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK;
+ else
+ pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK;
+ altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_enable_ms(struct uart_port *port)
+{
+}
+
+static void altera_uart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int baud, baudclk;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ baudclk = port->uartclk / baud;
+
+ if (old)
+ tty_termios_copy_hw(termios, old);
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ spin_lock_irqsave(&port->lock, flags);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_rx_chars(struct altera_uart *pp)
+{
+ struct uart_port *port = &pp->port;
+ unsigned char ch, flag;
+ unsigned short status;
+
+ while ((status = altera_uart_readl(port, ALTERA_UART_STATUS_REG)) &
+ ALTERA_UART_STATUS_RRDY_MSK) {
+ ch = altera_uart_readl(port, ALTERA_UART_RXDATA_REG);
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (status & ALTERA_UART_STATUS_E_MSK) {
+ altera_uart_writel(port, status,
+ ALTERA_UART_STATUS_REG);
+
+ if (status & ALTERA_UART_STATUS_BRK_MSK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (status & ALTERA_UART_STATUS_PE_MSK) {
+ port->icount.parity++;
+ } else if (status & ALTERA_UART_STATUS_ROE_MSK) {
+ port->icount.overrun++;
+ } else if (status & ALTERA_UART_STATUS_FE_MSK) {
+ port->icount.frame++;
+ }
+
+ status &= port->read_status_mask;
+
+ if (status & ALTERA_UART_STATUS_BRK_MSK)
+ flag = TTY_BREAK;
+ else if (status & ALTERA_UART_STATUS_PE_MSK)
+ flag = TTY_PARITY;
+ else if (status & ALTERA_UART_STATUS_FE_MSK)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+ uart_insert_char(port, status, ALTERA_UART_STATUS_ROE_MSK, ch,
+ flag);
+ }
+
+ tty_flip_buffer_push(port->state->port.tty);
+}
+
+static void altera_uart_tx_chars(struct altera_uart *pp)
+{
+ struct uart_port *port = &pp->port;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (port->x_char) {
+ /* Send special char - probably flow control */
+ altera_uart_writel(port, port->x_char, ALTERA_UART_TXDATA_REG);
+ port->x_char = 0;
+ port->icount.tx++;
+ return;
+ }
+
+ while (altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_TRDY_MSK) {
+ if (xmit->head == xmit->tail)
+ break;
+ altera_uart_writel(port, xmit->buf[xmit->tail],
+ ALTERA_UART_TXDATA_REG);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (xmit->head == xmit->tail) {
+ pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
+ altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
+ }
+}
+
+static irqreturn_t altera_uart_interrupt(int irq, void *data)
+{
+ struct uart_port *port = data;
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned int isr;
+
+ isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
+
+ spin_lock(&port->lock);
+ if (isr & ALTERA_UART_STATUS_RRDY_MSK)
+ altera_uart_rx_chars(pp);
+ if (isr & ALTERA_UART_STATUS_TRDY_MSK)
+ altera_uart_tx_chars(pp);
+ spin_unlock(&port->lock);
+
+ return IRQ_RETVAL(isr);
+}
+
+static void altera_uart_timer(unsigned long data)
+{
+ struct uart_port *port = (void *)data;
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+
+ altera_uart_interrupt(0, port);
+ mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port));
+}
+
+static void altera_uart_config_port(struct uart_port *port, int flags)
+{
+ port->type = PORT_ALTERA_UART;
+
+ /* Clear mask, so no surprise interrupts. */
+ altera_uart_writel(port, 0, ALTERA_UART_CONTROL_REG);
+ /* Clear status register */
+ altera_uart_writel(port, 0, ALTERA_UART_STATUS_REG);
+}
+
+static int altera_uart_startup(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+ int ret;
+
+ if (!port->irq) {
+ setup_timer(&pp->tmr, altera_uart_timer, (unsigned long)port);
+ mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port));
+ return 0;
+ }
+
+ ret = request_irq(port->irq, altera_uart_interrupt, IRQF_DISABLED,
+ DRV_NAME, port);
+ if (ret) {
+ pr_err(DRV_NAME ": unable to attach Altera UART %d "
+ "interrupt vector=%d\n", port->line, port->irq);
+ return ret;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Enable RX interrupts now */
+ pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static void altera_uart_shutdown(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Disable all interrupts now */
+ pp->imr = 0;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (port->irq)
+ free_irq(port->irq, port);
+ else
+ del_timer_sync(&pp->tmr);
+}
+
+static const char *altera_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_ALTERA_UART) ? "Altera UART" : NULL;
+}
+
+static int altera_uart_request_port(struct uart_port *port)
+{
+ /* UARTs always present */
+ return 0;
+}
+
+static void altera_uart_release_port(struct uart_port *port)
+{
+ /* Nothing to release... */
+}
+
+static int altera_uart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_ALTERA_UART))
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Define the basic serial functions we support.
+ */
+static struct uart_ops altera_uart_ops = {
+ .tx_empty = altera_uart_tx_empty,
+ .get_mctrl = altera_uart_get_mctrl,
+ .set_mctrl = altera_uart_set_mctrl,
+ .start_tx = altera_uart_start_tx,
+ .stop_tx = altera_uart_stop_tx,
+ .stop_rx = altera_uart_stop_rx,
+ .enable_ms = altera_uart_enable_ms,
+ .break_ctl = altera_uart_break_ctl,
+ .startup = altera_uart_startup,
+ .shutdown = altera_uart_shutdown,
+ .set_termios = altera_uart_set_termios,
+ .type = altera_uart_type,
+ .request_port = altera_uart_request_port,
+ .release_port = altera_uart_release_port,
+ .config_port = altera_uart_config_port,
+ .verify_port = altera_uart_verify_port,
+};
+
+static struct altera_uart altera_uart_ports[CONFIG_SERIAL_ALTERA_UART_MAXPORTS];
+
+#if defined(CONFIG_SERIAL_ALTERA_UART_CONSOLE)
+
+int __init early_altera_uart_setup(struct altera_uart_platform_uart *platp)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS && platp[i].mapbase; i++) {
+ port = &altera_uart_ports[i].port;
+
+ port->line = i;
+ port->type = PORT_ALTERA_UART;
+ port->mapbase = platp[i].mapbase;
+ port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE);
+ port->iotype = SERIAL_IO_MEM;
+ port->irq = platp[i].irq;
+ port->uartclk = platp[i].uartclk;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->ops = &altera_uart_ops;
+ port->private_data = platp;
+ }
+
+ return 0;
+}
+
+static void altera_uart_console_putc(struct uart_port *port, const char c)
+{
+ while (!(altera_uart_readl(port, ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_TRDY_MSK))
+ cpu_relax();
+
+ writel(c, port->membase + ALTERA_UART_TXDATA_REG);
+}
+
+static void altera_uart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port = &(altera_uart_ports + co->index)->port;
+
+ for (; count; count--, s++) {
+ altera_uart_console_putc(port, *s);
+ if (*s == '\n')
+ altera_uart_console_putc(port, '\r');
+ }
+}
+
+static int __init altera_uart_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = CONFIG_SERIAL_ALTERA_UART_BAUDRATE;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index < 0 || co->index >= CONFIG_SERIAL_ALTERA_UART_MAXPORTS)
+ return -EINVAL;
+ port = &altera_uart_ports[co->index].port;
+ if (!port->membase)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver altera_uart_driver;
+
+static struct console altera_uart_console = {
+ .name = "ttyAL",
+ .write = altera_uart_console_write,
+ .device = uart_console_device,
+ .setup = altera_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &altera_uart_driver,
+};
+
+static int __init altera_uart_console_init(void)
+{
+ register_console(&altera_uart_console);
+ return 0;
+}
+
+console_initcall(altera_uart_console_init);
+
+#define ALTERA_UART_CONSOLE (&altera_uart_console)
+
+#else
+
+#define ALTERA_UART_CONSOLE NULL
+
+#endif /* CONFIG_ALTERA_UART_CONSOLE */
+
+/*
+ * Define the altera_uart UART driver structure.
+ */
+static struct uart_driver altera_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = DRV_NAME,
+ .dev_name = "ttyAL",
+ .major = SERIAL_ALTERA_MAJOR,
+ .minor = SERIAL_ALTERA_MINOR,
+ .nr = CONFIG_SERIAL_ALTERA_UART_MAXPORTS,
+ .cons = ALTERA_UART_CONSOLE,
+};
+
+#ifdef CONFIG_OF
+static int altera_uart_get_of_uartclk(struct platform_device *pdev,
+ struct uart_port *port)
+{
+ int len;
+ const __be32 *clk;
+
+ clk = of_get_property(pdev->dev.of_node, "clock-frequency", &len);
+ if (!clk || len < sizeof(__be32))
+ return -ENODEV;
+
+ port->uartclk = be32_to_cpup(clk);
+
+ return 0;
+}
+#else
+static int altera_uart_get_of_uartclk(struct platform_device *pdev,
+ struct uart_port *port)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_OF */
+
+static int __devinit altera_uart_probe(struct platform_device *pdev)
+{
+ struct altera_uart_platform_uart *platp = pdev->dev.platform_data;
+ struct uart_port *port;
+ struct resource *res_mem;
+ struct resource *res_irq;
+ int i = pdev->id;
+ int ret;
+
+ /* if id is -1 scan for a free id and use that one */
+ if (i == -1) {
+ for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS; i++)
+ if (altera_uart_ports[i].port.mapbase == 0)
+ break;
+ }
+
+ if (i < 0 || i >= CONFIG_SERIAL_ALTERA_UART_MAXPORTS)
+ return -EINVAL;
+
+ port = &altera_uart_ports[i].port;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem)
+ port->mapbase = res_mem->start;
+ else if (platp)
+ port->mapbase = platp->mapbase;
+ else
+ return -EINVAL;
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res_irq)
+ port->irq = res_irq->start;
+ else if (platp)
+ port->irq = platp->irq;
+
+ /* Check platform data first so we can override device node data */
+ if (platp)
+ port->uartclk = platp->uartclk;
+ else {
+ ret = altera_uart_get_of_uartclk(pdev, port);
+ if (ret)
+ return ret;
+ }
+
+ port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE);
+ if (!port->membase)
+ return -ENOMEM;
+
+ if (platp)
+ port->regshift = platp->bus_shift;
+ else
+ port->regshift = 0;
+
+ port->line = i;
+ port->type = PORT_ALTERA_UART;
+ port->iotype = SERIAL_IO_MEM;
+ port->ops = &altera_uart_ops;
+ port->flags = UPF_BOOT_AUTOCONF;
+
+ dev_set_drvdata(&pdev->dev, port);
+
+ uart_add_one_port(&altera_uart_driver, port);
+
+ return 0;
+}
+
+static int __devexit altera_uart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = dev_get_drvdata(&pdev->dev);
+
+ if (port) {
+ uart_remove_one_port(&altera_uart_driver, port);
+ dev_set_drvdata(&pdev->dev, NULL);
+ port->mapbase = 0;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id altera_uart_match[] = {
+ { .compatible = "ALTR,uart-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_uart_match);
+#else
+#define altera_uart_match NULL
+#endif /* CONFIG_OF */
+
+static struct platform_driver altera_uart_platform_driver = {
+ .probe = altera_uart_probe,
+ .remove = __devexit_p(altera_uart_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = altera_uart_match,
+ },
+};
+
+static int __init altera_uart_init(void)
+{
+ int rc;
+
+ rc = uart_register_driver(&altera_uart_driver);
+ if (rc)
+ return rc;
+ rc = platform_driver_register(&altera_uart_platform_driver);
+ if (rc) {
+ uart_unregister_driver(&altera_uart_driver);
+ return rc;
+ }
+ return 0;
+}
+
+static void __exit altera_uart_exit(void)
+{
+ platform_driver_unregister(&altera_uart_platform_driver);
+ uart_unregister_driver(&altera_uart_driver);
+}
+
+module_init(altera_uart_init);
+module_exit(altera_uart_exit);
+
+MODULE_DESCRIPTION("Altera UART driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_ALTERA_MAJOR);
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
new file mode 100644
index 00000000..c0d10c4d
--- /dev/null
+++ b/drivers/tty/serial/amba-pl010.c
@@ -0,0 +1,823 @@
+/*
+ * Driver for AMBA serial ports
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * Copyright 1999 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This is a generic driver for ARM AMBA-type serial ports. They
+ * have a lot of 16550-like features, but are not register compatible.
+ * Note that although they do have CTS, DCD and DSR inputs, they do
+ * not have an RI input, nor do they have DTR or RTS outputs. If
+ * required, these have to be supplied via some other means (eg, GPIO)
+ * and hooked into this driver.
+ */
+
+#if defined(CONFIG_SERIAL_AMBA_PL010_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/serial.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+
+#define UART_NR 8
+
+#define SERIAL_AMBA_MAJOR 204
+#define SERIAL_AMBA_MINOR 16
+#define SERIAL_AMBA_NR UART_NR
+
+#define AMBA_ISR_PASS_LIMIT 256
+
+#define UART_RX_DATA(s) (((s) & UART01x_FR_RXFE) == 0)
+#define UART_TX_READY(s) (((s) & UART01x_FR_TXFF) == 0)
+
+#define UART_DUMMY_RSR_RX 256
+#define UART_PORT_SIZE 64
+
+/*
+ * We wrap our port structure around the generic uart_port.
+ */
+struct uart_amba_port {
+ struct uart_port port;
+ struct clk *clk;
+ struct amba_device *dev;
+ struct amba_pl010_data *data;
+ unsigned int old_status;
+};
+
+static void pl010_stop_tx(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int cr;
+
+ cr = readb(uap->port.membase + UART010_CR);
+ cr &= ~UART010_CR_TIE;
+ writel(cr, uap->port.membase + UART010_CR);
+}
+
+static void pl010_start_tx(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int cr;
+
+ cr = readb(uap->port.membase + UART010_CR);
+ cr |= UART010_CR_TIE;
+ writel(cr, uap->port.membase + UART010_CR);
+}
+
+static void pl010_stop_rx(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int cr;
+
+ cr = readb(uap->port.membase + UART010_CR);
+ cr &= ~(UART010_CR_RIE | UART010_CR_RTIE);
+ writel(cr, uap->port.membase + UART010_CR);
+}
+
+static void pl010_enable_ms(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int cr;
+
+ cr = readb(uap->port.membase + UART010_CR);
+ cr |= UART010_CR_MSIE;
+ writel(cr, uap->port.membase + UART010_CR);
+}
+
+static void pl010_rx_chars(struct uart_amba_port *uap)
+{
+ struct tty_struct *tty = uap->port.state->port.tty;
+ unsigned int status, ch, flag, rsr, max_count = 256;
+
+ status = readb(uap->port.membase + UART01x_FR);
+ while (UART_RX_DATA(status) && max_count--) {
+ ch = readb(uap->port.membase + UART01x_DR);
+ flag = TTY_NORMAL;
+
+ uap->port.icount.rx++;
+
+ /*
+ * Note that the error handling code is
+ * out of the main execution path
+ */
+ rsr = readb(uap->port.membase + UART01x_RSR) | UART_DUMMY_RSR_RX;
+ if (unlikely(rsr & UART01x_RSR_ANY)) {
+ writel(0, uap->port.membase + UART01x_ECR);
+
+ if (rsr & UART01x_RSR_BE) {
+ rsr &= ~(UART01x_RSR_FE | UART01x_RSR_PE);
+ uap->port.icount.brk++;
+ if (uart_handle_break(&uap->port))
+ goto ignore_char;
+ } else if (rsr & UART01x_RSR_PE)
+ uap->port.icount.parity++;
+ else if (rsr & UART01x_RSR_FE)
+ uap->port.icount.frame++;
+ if (rsr & UART01x_RSR_OE)
+ uap->port.icount.overrun++;
+
+ rsr &= uap->port.read_status_mask;
+
+ if (rsr & UART01x_RSR_BE)
+ flag = TTY_BREAK;
+ else if (rsr & UART01x_RSR_PE)
+ flag = TTY_PARITY;
+ else if (rsr & UART01x_RSR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&uap->port, ch))
+ goto ignore_char;
+
+ uart_insert_char(&uap->port, rsr, UART01x_RSR_OE, ch, flag);
+
+ ignore_char:
+ status = readb(uap->port.membase + UART01x_FR);
+ }
+ spin_unlock(&uap->port.lock);
+ tty_flip_buffer_push(tty);
+ spin_lock(&uap->port.lock);
+}
+
+static void pl010_tx_chars(struct uart_amba_port *uap)
+{
+ struct circ_buf *xmit = &uap->port.state->xmit;
+ int count;
+
+ if (uap->port.x_char) {
+ writel(uap->port.x_char, uap->port.membase + UART01x_DR);
+ uap->port.icount.tx++;
+ uap->port.x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
+ pl010_stop_tx(&uap->port);
+ return;
+ }
+
+ count = uap->port.fifosize >> 1;
+ do {
+ writel(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ uap->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&uap->port);
+
+ if (uart_circ_empty(xmit))
+ pl010_stop_tx(&uap->port);
+}
+
+static void pl010_modem_status(struct uart_amba_port *uap)
+{
+ unsigned int status, delta;
+
+ writel(0, uap->port.membase + UART010_ICR);
+
+ status = readb(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
+
+ delta = status ^ uap->old_status;
+ uap->old_status = status;
+
+ if (!delta)
+ return;
+
+ if (delta & UART01x_FR_DCD)
+ uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
+
+ if (delta & UART01x_FR_DSR)
+ uap->port.icount.dsr++;
+
+ if (delta & UART01x_FR_CTS)
+ uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
+
+ wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
+}
+
+static irqreturn_t pl010_int(int irq, void *dev_id)
+{
+ struct uart_amba_port *uap = dev_id;
+ unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
+ int handled = 0;
+
+ spin_lock(&uap->port.lock);
+
+ status = readb(uap->port.membase + UART010_IIR);
+ if (status) {
+ do {
+ if (status & (UART010_IIR_RTIS | UART010_IIR_RIS))
+ pl010_rx_chars(uap);
+ if (status & UART010_IIR_MIS)
+ pl010_modem_status(uap);
+ if (status & UART010_IIR_TIS)
+ pl010_tx_chars(uap);
+
+ if (pass_counter-- == 0)
+ break;
+
+ status = readb(uap->port.membase + UART010_IIR);
+ } while (status & (UART010_IIR_RTIS | UART010_IIR_RIS |
+ UART010_IIR_TIS));
+ handled = 1;
+ }
+
+ spin_unlock(&uap->port.lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+static unsigned int pl010_tx_empty(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int status = readb(uap->port.membase + UART01x_FR);
+ return status & UART01x_FR_BUSY ? 0 : TIOCSER_TEMT;
+}
+
+static unsigned int pl010_get_mctrl(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int result = 0;
+ unsigned int status;
+
+ status = readb(uap->port.membase + UART01x_FR);
+ if (status & UART01x_FR_DCD)
+ result |= TIOCM_CAR;
+ if (status & UART01x_FR_DSR)
+ result |= TIOCM_DSR;
+ if (status & UART01x_FR_CTS)
+ result |= TIOCM_CTS;
+
+ return result;
+}
+
+static void pl010_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+ if (uap->data)
+ uap->data->set_mctrl(uap->dev, uap->port.membase, mctrl);
+}
+
+static void pl010_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned long flags;
+ unsigned int lcr_h;
+
+ spin_lock_irqsave(&uap->port.lock, flags);
+ lcr_h = readb(uap->port.membase + UART010_LCRH);
+ if (break_state == -1)
+ lcr_h |= UART01x_LCRH_BRK;
+ else
+ lcr_h &= ~UART01x_LCRH_BRK;
+ writel(lcr_h, uap->port.membase + UART010_LCRH);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+}
+
+static int pl010_startup(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ int retval;
+
+ /*
+ * Try to enable the clock producer.
+ */
+ retval = clk_enable(uap->clk);
+ if (retval)
+ goto out;
+
+ uap->port.uartclk = clk_get_rate(uap->clk);
+
+ /*
+ * Allocate the IRQ
+ */
+ retval = request_irq(uap->port.irq, pl010_int, 0, "uart-pl010", uap);
+ if (retval)
+ goto clk_dis;
+
+ /*
+ * initialise the old status of the modem signals
+ */
+ uap->old_status = readb(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
+
+ /*
+ * Finally, enable interrupts
+ */
+ writel(UART01x_CR_UARTEN | UART010_CR_RIE | UART010_CR_RTIE,
+ uap->port.membase + UART010_CR);
+
+ return 0;
+
+ clk_dis:
+ clk_disable(uap->clk);
+ out:
+ return retval;
+}
+
+static void pl010_shutdown(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+ /*
+ * Free the interrupt
+ */
+ free_irq(uap->port.irq, uap);
+
+ /*
+ * disable all interrupts, disable the port
+ */
+ writel(0, uap->port.membase + UART010_CR);
+
+ /* disable break condition and fifos */
+ writel(readb(uap->port.membase + UART010_LCRH) &
+ ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN),
+ uap->port.membase + UART010_LCRH);
+
+ /*
+ * Shut down the clock producer
+ */
+ clk_disable(uap->clk);
+}
+
+static void
+pl010_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int lcr_h, old_cr;
+ unsigned long flags;
+ unsigned int baud, quot;
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, uap->port.uartclk/16);
+ quot = uart_get_divisor(port, baud);
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ lcr_h = UART01x_LCRH_WLEN_5;
+ break;
+ case CS6:
+ lcr_h = UART01x_LCRH_WLEN_6;
+ break;
+ case CS7:
+ lcr_h = UART01x_LCRH_WLEN_7;
+ break;
+ default: // CS8
+ lcr_h = UART01x_LCRH_WLEN_8;
+ break;
+ }
+ if (termios->c_cflag & CSTOPB)
+ lcr_h |= UART01x_LCRH_STP2;
+ if (termios->c_cflag & PARENB) {
+ lcr_h |= UART01x_LCRH_PEN;
+ if (!(termios->c_cflag & PARODD))
+ lcr_h |= UART01x_LCRH_EPS;
+ }
+ if (uap->port.fifosize > 1)
+ lcr_h |= UART01x_LCRH_FEN;
+
+ spin_lock_irqsave(&uap->port.lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ uap->port.read_status_mask = UART01x_RSR_OE;
+ if (termios->c_iflag & INPCK)
+ uap->port.read_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ uap->port.read_status_mask |= UART01x_RSR_BE;
+
+ /*
+ * Characters to ignore
+ */
+ uap->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ uap->port.ignore_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE;
+ if (termios->c_iflag & IGNBRK) {
+ uap->port.ignore_status_mask |= UART01x_RSR_BE;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ uap->port.ignore_status_mask |= UART01x_RSR_OE;
+ }
+
+ /*
+ * Ignore all characters if CREAD is not set.
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ uap->port.ignore_status_mask |= UART_DUMMY_RSR_RX;
+
+ /* first, disable everything */
+ old_cr = readb(uap->port.membase + UART010_CR) & ~UART010_CR_MSIE;
+
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ old_cr |= UART010_CR_MSIE;
+
+ writel(0, uap->port.membase + UART010_CR);
+
+ /* Set baud rate */
+ quot -= 1;
+ writel((quot & 0xf00) >> 8, uap->port.membase + UART010_LCRM);
+ writel(quot & 0xff, uap->port.membase + UART010_LCRL);
+
+ /*
+ * ----------v----------v----------v----------v-----
+ * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L
+ * ----------^----------^----------^----------^-----
+ */
+ writel(lcr_h, uap->port.membase + UART010_LCRH);
+ writel(old_cr, uap->port.membase + UART010_CR);
+
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+}
+
+static void pl010_set_ldisc(struct uart_port *port, int new)
+{
+ if (new == N_PPS) {
+ port->flags |= UPF_HARDPPS_CD;
+ pl010_enable_ms(port);
+ } else
+ port->flags &= ~UPF_HARDPPS_CD;
+}
+
+static const char *pl010_type(struct uart_port *port)
+{
+ return port->type == PORT_AMBA ? "AMBA" : NULL;
+}
+
+/*
+ * Release the memory region(s) being used by 'port'
+ */
+static void pl010_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, UART_PORT_SIZE);
+}
+
+/*
+ * Request the memory region(s) being used by 'port'
+ */
+static int pl010_request_port(struct uart_port *port)
+{
+ return request_mem_region(port->mapbase, UART_PORT_SIZE, "uart-pl010")
+ != NULL ? 0 : -EBUSY;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void pl010_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_AMBA;
+ pl010_request_port(port);
+ }
+}
+
+/*
+ * verify the new serial_struct (for TIOCSSERIAL).
+ */
+static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ int ret = 0;
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
+ ret = -EINVAL;
+ if (ser->irq < 0 || ser->irq >= nr_irqs)
+ ret = -EINVAL;
+ if (ser->baud_base < 9600)
+ ret = -EINVAL;
+ return ret;
+}
+
+static struct uart_ops amba_pl010_pops = {
+ .tx_empty = pl010_tx_empty,
+ .set_mctrl = pl010_set_mctrl,
+ .get_mctrl = pl010_get_mctrl,
+ .stop_tx = pl010_stop_tx,
+ .start_tx = pl010_start_tx,
+ .stop_rx = pl010_stop_rx,
+ .enable_ms = pl010_enable_ms,
+ .break_ctl = pl010_break_ctl,
+ .startup = pl010_startup,
+ .shutdown = pl010_shutdown,
+ .set_termios = pl010_set_termios,
+ .set_ldisc = pl010_set_ldisc,
+ .type = pl010_type,
+ .release_port = pl010_release_port,
+ .request_port = pl010_request_port,
+ .config_port = pl010_config_port,
+ .verify_port = pl010_verify_port,
+};
+
+static struct uart_amba_port *amba_ports[UART_NR];
+
+#ifdef CONFIG_SERIAL_AMBA_PL010_CONSOLE
+
+static void pl010_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int status;
+
+ do {
+ status = readb(uap->port.membase + UART01x_FR);
+ barrier();
+ } while (!UART_TX_READY(status));
+ writel(ch, uap->port.membase + UART01x_DR);
+}
+
+static void
+pl010_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_amba_port *uap = amba_ports[co->index];
+ unsigned int status, old_cr;
+
+ clk_enable(uap->clk);
+
+ /*
+ * First save the CR then disable the interrupts
+ */
+ old_cr = readb(uap->port.membase + UART010_CR);
+ writel(UART01x_CR_UARTEN, uap->port.membase + UART010_CR);
+
+ uart_console_write(&uap->port, s, count, pl010_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the TCR
+ */
+ do {
+ status = readb(uap->port.membase + UART01x_FR);
+ barrier();
+ } while (status & UART01x_FR_BUSY);
+ writel(old_cr, uap->port.membase + UART010_CR);
+
+ clk_disable(uap->clk);
+}
+
+static void __init
+pl010_console_get_options(struct uart_amba_port *uap, int *baud,
+ int *parity, int *bits)
+{
+ if (readb(uap->port.membase + UART010_CR) & UART01x_CR_UARTEN) {
+ unsigned int lcr_h, quot;
+ lcr_h = readb(uap->port.membase + UART010_LCRH);
+
+ *parity = 'n';
+ if (lcr_h & UART01x_LCRH_PEN) {
+ if (lcr_h & UART01x_LCRH_EPS)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+
+ if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
+ *bits = 7;
+ else
+ *bits = 8;
+
+ quot = readb(uap->port.membase + UART010_LCRL) |
+ readb(uap->port.membase + UART010_LCRM) << 8;
+ *baud = uap->port.uartclk / (16 * (quot + 1));
+ }
+}
+
+static int __init pl010_console_setup(struct console *co, char *options)
+{
+ struct uart_amba_port *uap;
+ int baud = 38400;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index >= UART_NR)
+ co->index = 0;
+ uap = amba_ports[co->index];
+ if (!uap)
+ return -ENODEV;
+
+ uap->port.uartclk = clk_get_rate(uap->clk);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ pl010_console_get_options(uap, &baud, &parity, &bits);
+
+ return uart_set_options(&uap->port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver amba_reg;
+static struct console amba_console = {
+ .name = "ttyAM",
+ .write = pl010_console_write,
+ .device = uart_console_device,
+ .setup = pl010_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &amba_reg,
+};
+
+#define AMBA_CONSOLE &amba_console
+#else
+#define AMBA_CONSOLE NULL
+#endif
+
+static struct uart_driver amba_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyAM",
+ .dev_name = "ttyAM",
+ .major = SERIAL_AMBA_MAJOR,
+ .minor = SERIAL_AMBA_MINOR,
+ .nr = UART_NR,
+ .cons = AMBA_CONSOLE,
+};
+
+static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
+{
+ struct uart_amba_port *uap;
+ void __iomem *base;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
+ if (amba_ports[i] == NULL)
+ break;
+
+ if (i == ARRAY_SIZE(amba_ports)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ uap = kzalloc(sizeof(struct uart_amba_port), GFP_KERNEL);
+ if (!uap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ base = ioremap(dev->res.start, resource_size(&dev->res));
+ if (!base) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ uap->clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(uap->clk)) {
+ ret = PTR_ERR(uap->clk);
+ goto unmap;
+ }
+
+ uap->port.dev = &dev->dev;
+ uap->port.mapbase = dev->res.start;
+ uap->port.membase = base;
+ uap->port.iotype = UPIO_MEM;
+ uap->port.irq = dev->irq[0];
+ uap->port.fifosize = 16;
+ uap->port.ops = &amba_pl010_pops;
+ uap->port.flags = UPF_BOOT_AUTOCONF;
+ uap->port.line = i;
+ uap->dev = dev;
+ uap->data = dev->dev.platform_data;
+
+ amba_ports[i] = uap;
+
+ amba_set_drvdata(dev, uap);
+ ret = uart_add_one_port(&amba_reg, &uap->port);
+ if (ret) {
+ amba_set_drvdata(dev, NULL);
+ amba_ports[i] = NULL;
+ clk_put(uap->clk);
+ unmap:
+ iounmap(base);
+ free:
+ kfree(uap);
+ }
+ out:
+ return ret;
+}
+
+static int pl010_remove(struct amba_device *dev)
+{
+ struct uart_amba_port *uap = amba_get_drvdata(dev);
+ int i;
+
+ amba_set_drvdata(dev, NULL);
+
+ uart_remove_one_port(&amba_reg, &uap->port);
+
+ for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
+ if (amba_ports[i] == uap)
+ amba_ports[i] = NULL;
+
+ iounmap(uap->port.membase);
+ clk_put(uap->clk);
+ kfree(uap);
+ return 0;
+}
+
+static int pl010_suspend(struct amba_device *dev, pm_message_t state)
+{
+ struct uart_amba_port *uap = amba_get_drvdata(dev);
+
+ if (uap)
+ uart_suspend_port(&amba_reg, &uap->port);
+
+ return 0;
+}
+
+static int pl010_resume(struct amba_device *dev)
+{
+ struct uart_amba_port *uap = amba_get_drvdata(dev);
+
+ if (uap)
+ uart_resume_port(&amba_reg, &uap->port);
+
+ return 0;
+}
+
+static struct amba_id pl010_ids[] = {
+ {
+ .id = 0x00041010,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl010_driver = {
+ .drv = {
+ .name = "uart-pl010",
+ },
+ .id_table = pl010_ids,
+ .probe = pl010_probe,
+ .remove = pl010_remove,
+ .suspend = pl010_suspend,
+ .resume = pl010_resume,
+};
+
+static int __init pl010_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Serial: AMBA driver\n");
+
+ ret = uart_register_driver(&amba_reg);
+ if (ret == 0) {
+ ret = amba_driver_register(&pl010_driver);
+ if (ret)
+ uart_unregister_driver(&amba_reg);
+ }
+ return ret;
+}
+
+static void __exit pl010_exit(void)
+{
+ amba_driver_unregister(&pl010_driver);
+ uart_unregister_driver(&amba_reg);
+}
+
+module_init(pl010_init);
+module_exit(pl010_exit);
+
+MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
+MODULE_DESCRIPTION("ARM AMBA serial port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
new file mode 100644
index 00000000..21dc4b76
--- /dev/null
+++ b/drivers/tty/serial/amba-pl011.c
@@ -0,0 +1,2048 @@
+/*
+ * Driver for AMBA serial ports
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * Copyright 1999 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd.
+ * Copyright (C) 2010 ST-Ericsson SA
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This is a generic driver for ARM AMBA-type serial ports. They
+ * have a lot of 16550-like features, but are not register compatible.
+ * Note that although they do have CTS, DCD and DSR inputs, they do
+ * not have an RI input, nor do they have DTR or RTS outputs. If
+ * required, these have to be supplied via some other means (eg, GPIO)
+ * and hooked into this driver.
+ */
+
+#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/serial.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/sizes.h>
+
+#define UART_NR 14
+
+#define SERIAL_AMBA_MAJOR 204
+#define SERIAL_AMBA_MINOR 64
+#define SERIAL_AMBA_NR UART_NR
+
+#define AMBA_ISR_PASS_LIMIT 256
+
+#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
+#define UART_DUMMY_DR_RX (1 << 16)
+
+
+#define UART_WA_SAVE_NR 14
+
+static void pl011_lockup_wa(unsigned long data);
+static const u32 uart_wa_reg[UART_WA_SAVE_NR] = {
+ ST_UART011_DMAWM,
+ ST_UART011_TIMEOUT,
+ ST_UART011_LCRH_RX,
+ UART011_IBRD,
+ UART011_FBRD,
+ ST_UART011_LCRH_TX,
+ UART011_IFLS,
+ ST_UART011_XFCR,
+ ST_UART011_XON1,
+ ST_UART011_XON2,
+ ST_UART011_XOFF1,
+ ST_UART011_XOFF2,
+ UART011_CR,
+ UART011_IMSC
+};
+
+static u32 uart_wa_regdata[UART_WA_SAVE_NR];
+static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, 0);
+
+/* There is by now at least one vendor with differing details, so handle it */
+struct vendor_data {
+ unsigned int ifls;
+ unsigned int fifosize;
+ unsigned int lcrh_tx;
+ unsigned int lcrh_rx;
+ bool oversampling;
+ bool interrupt_may_hang; /* vendor-specific */
+ bool dma_threshold;
+};
+
+static struct vendor_data vendor_arm = {
+ .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
+ .fifosize = 16,
+ .lcrh_tx = UART011_LCRH,
+ .lcrh_rx = UART011_LCRH,
+ .oversampling = false,
+ .dma_threshold = false,
+};
+
+static struct vendor_data vendor_st = {
+ .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
+ .fifosize = 64,
+ .lcrh_tx = ST_UART011_LCRH_TX,
+ .lcrh_rx = ST_UART011_LCRH_RX,
+ .oversampling = true,
+ .interrupt_may_hang = true,
+ .dma_threshold = true,
+};
+
+static struct uart_amba_port *amba_ports[UART_NR];
+
+/* Deals with DMA transactions */
+
+struct pl011_sgbuf {
+ struct scatterlist sg;
+ char *buf;
+};
+
+struct pl011_dmarx_data {
+ struct dma_chan *chan;
+ struct completion complete;
+ bool use_buf_b;
+ struct pl011_sgbuf sgbuf_a;
+ struct pl011_sgbuf sgbuf_b;
+ dma_cookie_t cookie;
+ bool running;
+};
+
+struct pl011_dmatx_data {
+ struct dma_chan *chan;
+ struct scatterlist sg;
+ char *buf;
+ bool queued;
+};
+
+/*
+ * We wrap our port structure around the generic uart_port.
+ */
+struct uart_amba_port {
+ struct uart_port port;
+ struct clk *clk;
+ const struct vendor_data *vendor;
+ unsigned int dmacr; /* dma control reg */
+ unsigned int im; /* interrupt mask */
+ unsigned int old_status;
+ unsigned int fifosize; /* vendor-specific */
+ unsigned int lcrh_tx; /* vendor-specific */
+ unsigned int lcrh_rx; /* vendor-specific */
+ bool autorts;
+ char type[12];
+ bool interrupt_may_hang; /* vendor-specific */
+#ifdef CONFIG_DMA_ENGINE
+ /* DMA stuff */
+ bool using_tx_dma;
+ bool using_rx_dma;
+ struct pl011_dmarx_data dmarx;
+ struct pl011_dmatx_data dmatx;
+#endif
+};
+
+/*
+ * Reads up to 256 characters from the FIFO or until it's empty and
+ * inserts them into the TTY layer. Returns the number of characters
+ * read from the FIFO.
+ */
+static int pl011_fifo_to_tty(struct uart_amba_port *uap)
+{
+ u16 status, ch;
+ unsigned int flag, max_count = 256;
+ int fifotaken = 0;
+
+ while (max_count--) {
+ status = readw(uap->port.membase + UART01x_FR);
+ if (status & UART01x_FR_RXFE)
+ break;
+
+ /* Take chars from the FIFO and update status */
+ ch = readw(uap->port.membase + UART01x_DR) |
+ UART_DUMMY_DR_RX;
+ flag = TTY_NORMAL;
+ uap->port.icount.rx++;
+ fifotaken++;
+
+ if (unlikely(ch & UART_DR_ERROR)) {
+ if (ch & UART011_DR_BE) {
+ ch &= ~(UART011_DR_FE | UART011_DR_PE);
+ uap->port.icount.brk++;
+ if (uart_handle_break(&uap->port))
+ continue;
+ } else if (ch & UART011_DR_PE)
+ uap->port.icount.parity++;
+ else if (ch & UART011_DR_FE)
+ uap->port.icount.frame++;
+ if (ch & UART011_DR_OE)
+ uap->port.icount.overrun++;
+
+ ch &= uap->port.read_status_mask;
+
+ if (ch & UART011_DR_BE)
+ flag = TTY_BREAK;
+ else if (ch & UART011_DR_PE)
+ flag = TTY_PARITY;
+ else if (ch & UART011_DR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&uap->port, ch & 255))
+ continue;
+
+ uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
+ }
+
+ return fifotaken;
+}
+
+
+/*
+ * All the DMA operation mode stuff goes inside this ifdef.
+ * This assumes that you have a generic DMA device interface,
+ * no custom DMA interfaces are supported.
+ */
+#ifdef CONFIG_DMA_ENGINE
+
+#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
+
+static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
+ enum dma_data_direction dir)
+{
+ sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
+ if (!sg->buf)
+ return -ENOMEM;
+
+ sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
+
+ if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
+ kfree(sg->buf);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
+ enum dma_data_direction dir)
+{
+ if (sg->buf) {
+ dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
+ kfree(sg->buf);
+ }
+}
+
+static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
+{
+ /* DMA is the sole user of the platform data right now */
+ struct amba_pl011_data *plat = uap->port.dev->platform_data;
+ struct dma_slave_config tx_conf = {
+ .dst_addr = uap->port.mapbase + UART01x_DR,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .direction = DMA_TO_DEVICE,
+ .dst_maxburst = uap->fifosize >> 1,
+ };
+ struct dma_chan *chan;
+ dma_cap_mask_t mask;
+
+ /* We need platform data */
+ if (!plat || !plat->dma_filter) {
+ dev_info(uap->port.dev, "no DMA platform data\n");
+ return;
+ }
+
+ /* Try to acquire a generic DMA engine slave TX channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_channel(mask, plat->dma_filter, plat->dma_tx_param);
+ if (!chan) {
+ dev_err(uap->port.dev, "no TX DMA channel!\n");
+ return;
+ }
+
+ dmaengine_slave_config(chan, &tx_conf);
+ uap->dmatx.chan = chan;
+
+ dev_info(uap->port.dev, "DMA channel TX %s\n",
+ dma_chan_name(uap->dmatx.chan));
+
+ /* Optionally make use of an RX channel as well */
+ if (plat->dma_rx_param) {
+ struct dma_slave_config rx_conf = {
+ .src_addr = uap->port.mapbase + UART01x_DR,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .direction = DMA_FROM_DEVICE,
+ .src_maxburst = uap->fifosize >> 1,
+ };
+
+ chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
+ if (!chan) {
+ dev_err(uap->port.dev, "no RX DMA channel!\n");
+ return;
+ }
+
+ dmaengine_slave_config(chan, &rx_conf);
+ uap->dmarx.chan = chan;
+
+ dev_info(uap->port.dev, "DMA channel RX %s\n",
+ dma_chan_name(uap->dmarx.chan));
+ }
+}
+
+#ifndef MODULE
+/*
+ * Stack up the UARTs and let the above initcall be done at device
+ * initcall time, because the serial driver is called as an arch
+ * initcall, and at this time the DMA subsystem is not yet registered.
+ * At this point the driver will switch over to using DMA where desired.
+ */
+struct dma_uap {
+ struct list_head node;
+ struct uart_amba_port *uap;
+};
+
+static LIST_HEAD(pl011_dma_uarts);
+
+static int __init pl011_dma_initcall(void)
+{
+ struct list_head *node, *tmp;
+
+ list_for_each_safe(node, tmp, &pl011_dma_uarts) {
+ struct dma_uap *dmau = list_entry(node, struct dma_uap, node);
+ pl011_dma_probe_initcall(dmau->uap);
+ list_del(node);
+ kfree(dmau);
+ }
+ return 0;
+}
+
+device_initcall(pl011_dma_initcall);
+
+static void pl011_dma_probe(struct uart_amba_port *uap)
+{
+ struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL);
+ if (dmau) {
+ dmau->uap = uap;
+ list_add_tail(&dmau->node, &pl011_dma_uarts);
+ }
+}
+#else
+static void pl011_dma_probe(struct uart_amba_port *uap)
+{
+ pl011_dma_probe_initcall(uap);
+}
+#endif
+
+static void pl011_dma_remove(struct uart_amba_port *uap)
+{
+ /* TODO: remove the initcall if it has not yet executed */
+ if (uap->dmatx.chan)
+ dma_release_channel(uap->dmatx.chan);
+ if (uap->dmarx.chan)
+ dma_release_channel(uap->dmarx.chan);
+}
+
+/* Forward declare this for the refill routine */
+static int pl011_dma_tx_refill(struct uart_amba_port *uap);
+
+/*
+ * The current DMA TX buffer has been sent.
+ * Try to queue up another DMA buffer.
+ */
+static void pl011_dma_tx_callback(void *data)
+{
+ struct uart_amba_port *uap = data;
+ struct pl011_dmatx_data *dmatx = &uap->dmatx;
+ unsigned long flags;
+ u16 dmacr;
+
+ spin_lock_irqsave(&uap->port.lock, flags);
+ if (uap->dmatx.queued)
+ dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
+ DMA_TO_DEVICE);
+
+ dmacr = uap->dmacr;
+ uap->dmacr = dmacr & ~UART011_TXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+
+ /*
+ * If TX DMA was disabled, it means that we've stopped the DMA for
+ * some reason (eg, XOFF received, or we want to send an X-char.)
+ *
+ * Note: we need to be careful here of a potential race between DMA
+ * and the rest of the driver - if the driver disables TX DMA while
+ * a TX buffer completing, we must update the tx queued status to
+ * get further refills (hence we check dmacr).
+ */
+ if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
+ uart_circ_empty(&uap->port.state->xmit)) {
+ uap->dmatx.queued = false;
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+ return;
+ }
+
+ if (pl011_dma_tx_refill(uap) <= 0) {
+ /*
+ * We didn't queue a DMA buffer for some reason, but we
+ * have data pending to be sent. Re-enable the TX IRQ.
+ */
+ uap->im |= UART011_TXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+}
+
+/*
+ * Try to refill the TX DMA buffer.
+ * Locking: called with port lock held and IRQs disabled.
+ * Returns:
+ * 1 if we queued up a TX DMA buffer.
+ * 0 if we didn't want to handle this by DMA
+ * <0 on error
+ */
+static int pl011_dma_tx_refill(struct uart_amba_port *uap)
+{
+ struct pl011_dmatx_data *dmatx = &uap->dmatx;
+ struct dma_chan *chan = dmatx->chan;
+ struct dma_device *dma_dev = chan->device;
+ struct dma_async_tx_descriptor *desc;
+ struct circ_buf *xmit = &uap->port.state->xmit;
+ unsigned int count;
+
+ /*
+ * Try to avoid the overhead involved in using DMA if the
+ * transaction fits in the first half of the FIFO, by using
+ * the standard interrupt handling. This ensures that we
+ * issue a uart_write_wakeup() at the appropriate time.
+ */
+ count = uart_circ_chars_pending(xmit);
+ if (count < (uap->fifosize >> 1)) {
+ uap->dmatx.queued = false;
+ return 0;
+ }
+
+ /*
+ * Bodge: don't send the last character by DMA, as this
+ * will prevent XON from notifying us to restart DMA.
+ */
+ count -= 1;
+
+ /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
+ if (count > PL011_DMA_BUFFER_SIZE)
+ count = PL011_DMA_BUFFER_SIZE;
+
+ if (xmit->tail < xmit->head)
+ memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
+ else {
+ size_t first = UART_XMIT_SIZE - xmit->tail;
+ size_t second = xmit->head;
+
+ memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
+ if (second)
+ memcpy(&dmatx->buf[first], &xmit->buf[0], second);
+ }
+
+ dmatx->sg.length = count;
+
+ if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
+ uap->dmatx.queued = false;
+ dev_dbg(uap->port.dev, "unable to map TX DMA\n");
+ return -EBUSY;
+ }
+
+ desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
+ uap->dmatx.queued = false;
+ /*
+ * If DMA cannot be used right now, we complete this
+ * transaction via IRQ and let the TTY layer retry.
+ */
+ dev_dbg(uap->port.dev, "TX DMA busy\n");
+ return -EBUSY;
+ }
+
+ /* Some data to go along to the callback */
+ desc->callback = pl011_dma_tx_callback;
+ desc->callback_param = uap;
+
+ /* All errors should happen at prepare time */
+ dmaengine_submit(desc);
+
+ /* Fire the DMA transaction */
+ dma_dev->device_issue_pending(chan);
+
+ uap->dmacr |= UART011_TXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+ uap->dmatx.queued = true;
+
+ /*
+ * Now we know that DMA will fire, so advance the ring buffer
+ * with the stuff we just dispatched.
+ */
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uap->port.icount.tx += count;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&uap->port);
+
+ return 1;
+}
+
+/*
+ * We received a transmit interrupt without a pending X-char but with
+ * pending characters.
+ * Locking: called with port lock held and IRQs disabled.
+ * Returns:
+ * false if we want to use PIO to transmit
+ * true if we queued a DMA buffer
+ */
+static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
+{
+ if (!uap->using_tx_dma)
+ return false;
+
+ /*
+ * If we already have a TX buffer queued, but received a
+ * TX interrupt, it will be because we've just sent an X-char.
+ * Ensure the TX DMA is enabled and the TX IRQ is disabled.
+ */
+ if (uap->dmatx.queued) {
+ uap->dmacr |= UART011_TXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+ uap->im &= ~UART011_TXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ return true;
+ }
+
+ /*
+ * We don't have a TX buffer queued, so try to queue one.
+ * If we successfully queued a buffer, mask the TX IRQ.
+ */
+ if (pl011_dma_tx_refill(uap) > 0) {
+ uap->im &= ~UART011_TXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Stop the DMA transmit (eg, due to received XOFF).
+ * Locking: called with port lock held and IRQs disabled.
+ */
+static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
+{
+ if (uap->dmatx.queued) {
+ uap->dmacr &= ~UART011_TXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+ }
+}
+
+/*
+ * Try to start a DMA transmit, or in the case of an XON/OFF
+ * character queued for send, try to get that character out ASAP.
+ * Locking: called with port lock held and IRQs disabled.
+ * Returns:
+ * false if we want the TX IRQ to be enabled
+ * true if we have a buffer queued
+ */
+static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
+{
+ u16 dmacr;
+
+ if (!uap->using_tx_dma)
+ return false;
+
+ if (!uap->port.x_char) {
+ /* no X-char, try to push chars out in DMA mode */
+ bool ret = true;
+
+ if (!uap->dmatx.queued) {
+ if (pl011_dma_tx_refill(uap) > 0) {
+ uap->im &= ~UART011_TXIM;
+ ret = true;
+ } else {
+ uap->im |= UART011_TXIM;
+ ret = false;
+ }
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ } else if (!(uap->dmacr & UART011_TXDMAE)) {
+ uap->dmacr |= UART011_TXDMAE;
+ writew(uap->dmacr,
+ uap->port.membase + UART011_DMACR);
+ }
+ return ret;
+ }
+
+ /*
+ * We have an X-char to send. Disable DMA to prevent it loading
+ * the TX fifo, and then see if we can stuff it into the FIFO.
+ */
+ dmacr = uap->dmacr;
+ uap->dmacr &= ~UART011_TXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+
+ if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
+ /*
+ * No space in the FIFO, so enable the transmit interrupt
+ * so we know when there is space. Note that once we've
+ * loaded the character, we should just re-enable DMA.
+ */
+ return false;
+ }
+
+ writew(uap->port.x_char, uap->port.membase + UART01x_DR);
+ uap->port.icount.tx++;
+ uap->port.x_char = 0;
+
+ /* Success - restore the DMA state */
+ uap->dmacr = dmacr;
+ writew(dmacr, uap->port.membase + UART011_DMACR);
+
+ return true;
+}
+
+/*
+ * Flush the transmit buffer.
+ * Locking: called with port lock held and IRQs disabled.
+ */
+static void pl011_dma_flush_buffer(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+ if (!uap->using_tx_dma)
+ return;
+
+ /* Avoid deadlock with the DMA engine callback */
+ spin_unlock(&uap->port.lock);
+ dmaengine_terminate_all(uap->dmatx.chan);
+ spin_lock(&uap->port.lock);
+ if (uap->dmatx.queued) {
+ dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+ DMA_TO_DEVICE);
+ uap->dmatx.queued = false;
+ uap->dmacr &= ~UART011_TXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+ }
+}
+
+static void pl011_dma_rx_callback(void *data);
+
+static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
+{
+ struct dma_chan *rxchan = uap->dmarx.chan;
+ struct dma_device *dma_dev;
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_async_tx_descriptor *desc;
+ struct pl011_sgbuf *sgbuf;
+
+ if (!rxchan)
+ return -EIO;
+
+ /* Start the RX DMA job */
+ sgbuf = uap->dmarx.use_buf_b ?
+ &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ dma_dev = rxchan->device;
+ desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ /*
+ * If the DMA engine is busy and cannot prepare a
+ * channel, no big deal, the driver will fall back
+ * to interrupt mode as a result of this error code.
+ */
+ if (!desc) {
+ uap->dmarx.running = false;
+ dmaengine_terminate_all(rxchan);
+ return -EBUSY;
+ }
+
+ /* Some data to go along to the callback */
+ desc->callback = pl011_dma_rx_callback;
+ desc->callback_param = uap;
+ dmarx->cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(rxchan);
+
+ uap->dmacr |= UART011_RXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+ uap->dmarx.running = true;
+
+ uap->im &= ~UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+
+ return 0;
+}
+
+/*
+ * This is called when either the DMA job is complete, or
+ * the FIFO timeout interrupt occurred. This must be called
+ * with the port spinlock uap->port.lock held.
+ */
+static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+ u32 pending, bool use_buf_b,
+ bool readfifo)
+{
+ struct tty_struct *tty = uap->port.state->port.tty;
+ struct pl011_sgbuf *sgbuf = use_buf_b ?
+ &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ struct device *dev = uap->dmarx.chan->device->dev;
+ int dma_count = 0;
+ u32 fifotaken = 0; /* only used for vdbg() */
+
+ /* Pick everything from the DMA first */
+ if (pending) {
+ /* Sync in buffer */
+ dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+
+ /*
+ * First take all chars in the DMA pipe, then look in the FIFO.
+ * Note that tty_insert_flip_buf() tries to take as many chars
+ * as it can.
+ */
+ dma_count = tty_insert_flip_string(uap->port.state->port.tty,
+ sgbuf->buf, pending);
+
+ /* Return buffer to device */
+ dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+
+ uap->port.icount.rx += dma_count;
+ if (dma_count < pending)
+ dev_warn(uap->port.dev,
+ "couldn't insert all characters (TTY is full?)\n");
+ }
+
+ /*
+ * Only continue with trying to read the FIFO if all DMA chars have
+ * been taken first.
+ */
+ if (dma_count == pending && readfifo) {
+ /* Clear any error flags */
+ writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
+ uap->port.membase + UART011_ICR);
+
+ /*
+ * If we read all the DMA'd characters, and we had an
+ * incomplete buffer, that could be due to an rx error, or
+ * maybe we just timed out. Read any pending chars and check
+ * the error status.
+ *
+ * Error conditions will only occur in the FIFO, these will
+ * trigger an immediate interrupt and stop the DMA job, so we
+ * will always find the error in the FIFO, never in the DMA
+ * buffer.
+ */
+ fifotaken = pl011_fifo_to_tty(uap);
+ }
+
+ spin_unlock(&uap->port.lock);
+ dev_vdbg(uap->port.dev,
+ "Took %d chars from DMA buffer and %d chars from the FIFO\n",
+ dma_count, fifotaken);
+ tty_flip_buffer_push(tty);
+ spin_lock(&uap->port.lock);
+}
+
+static void pl011_dma_rx_irq(struct uart_amba_port *uap)
+{
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
+ &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ size_t pending;
+ struct dma_tx_state state;
+ enum dma_status dmastat;
+
+ /*
+ * Pause the transfer so we can trust the current counter,
+ * do this before we pause the PL011 block, else we may
+ * overflow the FIFO.
+ */
+ if (dmaengine_pause(rxchan))
+ dev_err(uap->port.dev, "unable to pause DMA transfer\n");
+ dmastat = rxchan->device->device_tx_status(rxchan,
+ dmarx->cookie, &state);
+ if (dmastat != DMA_PAUSED)
+ dev_err(uap->port.dev, "unable to pause DMA transfer\n");
+
+ /* Disable RX DMA - incoming data will wait in the FIFO */
+ uap->dmacr &= ~UART011_RXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+ uap->dmarx.running = false;
+
+ pending = sgbuf->sg.length - state.residue;
+ BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ dmaengine_terminate_all(rxchan);
+
+ /*
+ * This will take the chars we have so far and insert
+ * into the framework.
+ */
+ pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
+
+ /* Switch buffer & re-trigger DMA job */
+ dmarx->use_buf_b = !dmarx->use_buf_b;
+ if (pl011_dma_rx_trigger_dma(uap)) {
+ dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
+ "fall back to interrupt mode\n");
+ uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
+}
+
+static void pl011_dma_rx_callback(void *data)
+{
+ struct uart_amba_port *uap = data;
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ bool lastbuf = dmarx->use_buf_b;
+ int ret;
+
+ /*
+ * This completion interrupt occurs typically when the
+ * RX buffer is totally stuffed but no timeout has yet
+ * occurred. When that happens, we just want the RX
+ * routine to flush out the secondary DMA buffer while
+ * we immediately trigger the next DMA job.
+ */
+ spin_lock_irq(&uap->port.lock);
+ uap->dmarx.running = false;
+ dmarx->use_buf_b = !lastbuf;
+ ret = pl011_dma_rx_trigger_dma(uap);
+
+ pl011_dma_rx_chars(uap, PL011_DMA_BUFFER_SIZE, lastbuf, false);
+ spin_unlock_irq(&uap->port.lock);
+ /*
+ * Do this check after we picked the DMA chars so we don't
+ * get some IRQ immediately from RX.
+ */
+ if (ret) {
+ dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
+ "fall back to interrupt mode\n");
+ uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
+}
+
+/*
+ * Stop accepting received characters, when we're shutting down or
+ * suspending this port.
+ * Locking: called with port lock held and IRQs disabled.
+ */
+static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
+{
+ /* FIXME. Just disable the DMA enable */
+ uap->dmacr &= ~UART011_RXDMAE;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+}
+
+static void pl011_dma_startup(struct uart_amba_port *uap)
+{
+ int ret;
+
+ if (!uap->dmatx.chan)
+ return;
+
+ uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
+ if (!uap->dmatx.buf) {
+ dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
+ uap->port.fifosize = uap->fifosize;
+ return;
+ }
+
+ sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
+
+ /* The DMA buffer is now the FIFO the TTY subsystem can use */
+ uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
+ uap->using_tx_dma = true;
+
+ if (!uap->dmarx.chan)
+ goto skip_rx;
+
+ /* Allocate and map DMA RX buffers */
+ ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+ "RX buffer A", ret);
+ goto skip_rx;
+ }
+
+ ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+ "RX buffer B", ret);
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ DMA_FROM_DEVICE);
+ goto skip_rx;
+ }
+
+ uap->using_rx_dma = true;
+
+skip_rx:
+ /* Turn on DMA error (RX/TX will be enabled on demand) */
+ uap->dmacr |= UART011_DMAONERR;
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+
+ /*
+ * ST Micro variants has some specific dma burst threshold
+ * compensation. Set this to 16 bytes, so burst will only
+ * be issued above/below 16 bytes.
+ */
+ if (uap->vendor->dma_threshold)
+ writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
+ uap->port.membase + ST_UART011_DMAWM);
+
+ if (uap->using_rx_dma) {
+ if (pl011_dma_rx_trigger_dma(uap))
+ dev_dbg(uap->port.dev, "could not trigger initial "
+ "RX DMA job, fall back to interrupt mode\n");
+ }
+}
+
+static void pl011_dma_shutdown(struct uart_amba_port *uap)
+{
+ if (!(uap->using_tx_dma || uap->using_rx_dma))
+ return;
+
+ /* Disable RX and TX DMA */
+ while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
+ barrier();
+
+ spin_lock_irq(&uap->port.lock);
+ uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
+ writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+ spin_unlock_irq(&uap->port.lock);
+
+ if (uap->using_tx_dma) {
+ /* In theory, this should already be done by pl011_dma_flush_buffer */
+ dmaengine_terminate_all(uap->dmatx.chan);
+ if (uap->dmatx.queued) {
+ dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+ DMA_TO_DEVICE);
+ uap->dmatx.queued = false;
+ }
+
+ kfree(uap->dmatx.buf);
+ uap->using_tx_dma = false;
+ }
+
+ if (uap->using_rx_dma) {
+ dmaengine_terminate_all(uap->dmarx.chan);
+ /* Clean up the RX DMA */
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
+ pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+ uap->using_rx_dma = false;
+ }
+}
+
+static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
+{
+ return uap->using_rx_dma;
+}
+
+static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
+{
+ return uap->using_rx_dma && uap->dmarx.running;
+}
+
+
+#else
+/* Blank functions if the DMA engine is not available */
+static inline void pl011_dma_probe(struct uart_amba_port *uap)
+{
+}
+
+static inline void pl011_dma_remove(struct uart_amba_port *uap)
+{
+}
+
+static inline void pl011_dma_startup(struct uart_amba_port *uap)
+{
+}
+
+static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
+{
+}
+
+static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
+{
+ return false;
+}
+
+static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
+{
+}
+
+static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
+{
+ return false;
+}
+
+static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
+{
+}
+
+static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
+{
+}
+
+static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
+{
+ return -EIO;
+}
+
+static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
+{
+ return false;
+}
+
+static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
+{
+ return false;
+}
+
+#define pl011_dma_flush_buffer NULL
+#endif
+
+
+/*
+ * pl011_lockup_wa
+ * This workaround aims to break the deadlock situation
+ * when after long transfer over uart in hardware flow
+ * control, uart interrupt registers cannot be cleared.
+ * Hence uart transfer gets blocked.
+ *
+ * It is seen that during such deadlock condition ICR
+ * don't get cleared even on multiple write. This leads
+ * pass_counter to decrease and finally reach zero. This
+ * can be taken as trigger point to run this UART_BT_WA.
+ *
+ */
+static void pl011_lockup_wa(unsigned long data)
+{
+ struct uart_amba_port *uap = amba_ports[0];
+ void __iomem *base = uap->port.membase;
+ struct circ_buf *xmit = &uap->port.state->xmit;
+ struct tty_struct *tty = uap->port.state->port.tty;
+ int buf_empty_retries = 200;
+ int loop;
+
+ /* Stop HCI layer from submitting data for tx */
+ tty->hw_stopped = 1;
+ while (!uart_circ_empty(xmit)) {
+ if (buf_empty_retries-- == 0)
+ break;
+ udelay(100);
+ }
+
+ /* Backup registers */
+ for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
+ uart_wa_regdata[loop] = readl(base + uart_wa_reg[loop]);
+
+ /* Disable UART so that FIFO data is flushed out */
+ writew(0x00, uap->port.membase + UART011_CR);
+
+ /* Soft reset UART module */
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->reset)
+ plat->reset();
+ }
+
+ /* Restore registers */
+ for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
+ writew(uart_wa_regdata[loop] ,
+ uap->port.membase + uart_wa_reg[loop]);
+
+ /* Initialise the old status of the modem signals */
+ uap->old_status = readw(uap->port.membase + UART01x_FR) &
+ UART01x_FR_MODEM_ANY;
+
+ if (readl(base + UART011_MIS) & 0x2)
+ printk(KERN_EMERG "UART_BT_WA: ***FAILED***\n");
+
+ /* Start Tx/Rx */
+ tty->hw_stopped = 0;
+}
+
+static void pl011_stop_tx(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+ uap->im &= ~UART011_TXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ pl011_dma_tx_stop(uap);
+}
+
+static void pl011_start_tx(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+ if (!pl011_dma_tx_start(uap)) {
+ uap->im |= UART011_TXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
+}
+
+static void pl011_stop_rx(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+ uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
+ UART011_PEIM|UART011_BEIM|UART011_OEIM);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+
+ pl011_dma_rx_stop(uap);
+}
+
+static void pl011_enable_ms(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+ uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+}
+
+static void pl011_rx_chars(struct uart_amba_port *uap)
+{
+ struct tty_struct *tty = uap->port.state->port.tty;
+
+ pl011_fifo_to_tty(uap);
+
+ spin_unlock(&uap->port.lock);
+ tty_flip_buffer_push(tty);
+ /*
+ * If we were temporarily out of DMA mode for a while,
+ * attempt to switch back to DMA mode again.
+ */
+ if (pl011_dma_rx_available(uap)) {
+ if (pl011_dma_rx_trigger_dma(uap)) {
+ dev_dbg(uap->port.dev, "could not trigger RX DMA job "
+ "fall back to interrupt mode again\n");
+ uap->im |= UART011_RXIM;
+ } else
+ uap->im &= ~UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ }
+ spin_lock(&uap->port.lock);
+}
+
+static void pl011_tx_chars(struct uart_amba_port *uap)
+{
+ struct circ_buf *xmit = &uap->port.state->xmit;
+ int count;
+
+ if (uap->port.x_char) {
+ writew(uap->port.x_char, uap->port.membase + UART01x_DR);
+ uap->port.icount.tx++;
+ uap->port.x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
+ pl011_stop_tx(&uap->port);
+ return;
+ }
+
+ /* If we are using DMA mode, try to send some characters. */
+ if (pl011_dma_tx_irq(uap))
+ return;
+
+ count = uap->fifosize >> 1;
+ do {
+ writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ uap->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&uap->port);
+
+ if (uart_circ_empty(xmit))
+ pl011_stop_tx(&uap->port);
+}
+
+static void pl011_modem_status(struct uart_amba_port *uap)
+{
+ unsigned int status, delta;
+
+ status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
+
+ delta = status ^ uap->old_status;
+ uap->old_status = status;
+
+ if (!delta)
+ return;
+
+ if (delta & UART01x_FR_DCD)
+ uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
+
+ if (delta & UART01x_FR_DSR)
+ uap->port.icount.dsr++;
+
+ if (delta & UART01x_FR_CTS)
+ uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
+
+ wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
+}
+
+static irqreturn_t pl011_int(int irq, void *dev_id)
+{
+ struct uart_amba_port *uap = dev_id;
+ unsigned long flags;
+ unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
+ int handled = 0;
+
+ spin_lock_irqsave(&uap->port.lock, flags);
+
+ status = readw(uap->port.membase + UART011_MIS);
+ if (status) {
+ do {
+ writew(status & ~(UART011_TXIS|UART011_RTIS|
+ UART011_RXIS),
+ uap->port.membase + UART011_ICR);
+
+ if (status & (UART011_RTIS|UART011_RXIS)) {
+ if (pl011_dma_rx_running(uap))
+ pl011_dma_rx_irq(uap);
+ else
+ pl011_rx_chars(uap);
+ }
+ if (status & (UART011_DSRMIS|UART011_DCDMIS|
+ UART011_CTSMIS|UART011_RIMIS))
+ pl011_modem_status(uap);
+ if (status & UART011_TXIS)
+ pl011_tx_chars(uap);
+
+ if (pass_counter-- == 0) {
+ if (uap->interrupt_may_hang)
+ tasklet_schedule(&pl011_lockup_tlet);
+ break;
+ }
+
+ status = readw(uap->port.membase + UART011_MIS);
+ } while (status != 0);
+ handled = 1;
+ }
+
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static unsigned int pl01x_tx_empty(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int status = readw(uap->port.membase + UART01x_FR);
+ return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
+}
+
+static unsigned int pl01x_get_mctrl(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int result = 0;
+ unsigned int status = readw(uap->port.membase + UART01x_FR);
+
+#define TIOCMBIT(uartbit, tiocmbit) \
+ if (status & uartbit) \
+ result |= tiocmbit
+
+ TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
+ TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
+ TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
+ TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
+#undef TIOCMBIT
+ return result;
+}
+
+static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int cr;
+
+ cr = readw(uap->port.membase + UART011_CR);
+
+#define TIOCMBIT(tiocmbit, uartbit) \
+ if (mctrl & tiocmbit) \
+ cr |= uartbit; \
+ else \
+ cr &= ~uartbit
+
+ TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
+ TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
+ TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
+ TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
+ TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
+
+ if (uap->autorts) {
+ /* We need to disable auto-RTS if we want to turn RTS off */
+ TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
+ }
+#undef TIOCMBIT
+
+ writew(cr, uap->port.membase + UART011_CR);
+}
+
+static void pl011_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned long flags;
+ unsigned int lcr_h;
+
+ spin_lock_irqsave(&uap->port.lock, flags);
+ lcr_h = readw(uap->port.membase + uap->lcrh_tx);
+ if (break_state == -1)
+ lcr_h |= UART01x_LCRH_BRK;
+ else
+ lcr_h &= ~UART01x_LCRH_BRK;
+ writew(lcr_h, uap->port.membase + uap->lcrh_tx);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int pl010_get_poll_char(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int status;
+
+ status = readw(uap->port.membase + UART01x_FR);
+ if (status & UART01x_FR_RXFE)
+ return NO_POLL_CHAR;
+
+ return readw(uap->port.membase + UART01x_DR);
+}
+
+static void pl010_put_poll_char(struct uart_port *port,
+ unsigned char ch)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+ while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
+ barrier();
+
+ writew(ch, uap->port.membase + UART01x_DR);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
+static int pl011_startup(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int cr;
+ int retval;
+
+ /*
+ * Try to enable the clock producer.
+ */
+ retval = clk_enable(uap->clk);
+ if (retval)
+ goto out;
+
+ uap->port.uartclk = clk_get_rate(uap->clk);
+
+ /* Clear pending error and receive interrupts */
+ writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
+ UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
+
+ /*
+ * Allocate the IRQ
+ */
+ retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
+ if (retval)
+ goto clk_dis;
+
+ writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
+
+ /*
+ * Provoke TX FIFO interrupt into asserting.
+ */
+ cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
+ writew(cr, uap->port.membase + UART011_CR);
+ writew(0, uap->port.membase + UART011_FBRD);
+ writew(1, uap->port.membase + UART011_IBRD);
+ writew(0, uap->port.membase + uap->lcrh_rx);
+ if (uap->lcrh_tx != uap->lcrh_rx) {
+ int i;
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX register,
+ * to get this delay write read only register 10 times
+ */
+ for (i = 0; i < 10; ++i)
+ writew(0xff, uap->port.membase + UART011_MIS);
+ writew(0, uap->port.membase + uap->lcrh_tx);
+ }
+ writew(0, uap->port.membase + UART01x_DR);
+ while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
+ barrier();
+
+ cr = UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
+ writew(cr, uap->port.membase + UART011_CR);
+
+ /*
+ * initialise the old status of the modem signals
+ */
+ uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
+
+ /* Startup DMA */
+ pl011_dma_startup(uap);
+
+ /*
+ * Finally, enable interrupts, only timeouts when using DMA
+ * if initial RX DMA job failed, start in interrupt mode
+ * as well.
+ */
+ spin_lock_irq(&uap->port.lock);
+ /* Clear out any spuriously appearing RX interrupts */
+ writew(UART011_RTIS | UART011_RXIS,
+ uap->port.membase + UART011_ICR);
+ uap->im = UART011_RTIM;
+ if (!pl011_dma_rx_running(uap))
+ uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ spin_unlock_irq(&uap->port.lock);
+
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->init)
+ plat->init();
+ }
+
+ return 0;
+
+ clk_dis:
+ clk_disable(uap->clk);
+ out:
+ return retval;
+}
+
+static void pl011_shutdown_channel(struct uart_amba_port *uap,
+ unsigned int lcrh)
+{
+ unsigned long val;
+
+ val = readw(uap->port.membase + lcrh);
+ val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
+ writew(val, uap->port.membase + lcrh);
+}
+
+static void pl011_shutdown(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+ /*
+ * disable all interrupts
+ */
+ spin_lock_irq(&uap->port.lock);
+ uap->im = 0;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ writew(0xffff, uap->port.membase + UART011_ICR);
+ spin_unlock_irq(&uap->port.lock);
+
+ pl011_dma_shutdown(uap);
+
+ /*
+ * Free the interrupt
+ */
+ free_irq(uap->port.irq, uap);
+
+ /*
+ * disable the port
+ */
+ uap->autorts = false;
+ writew(UART01x_CR_UARTEN | UART011_CR_TXE, uap->port.membase + UART011_CR);
+
+ /*
+ * disable break condition and fifos
+ */
+ pl011_shutdown_channel(uap, uap->lcrh_rx);
+ if (uap->lcrh_rx != uap->lcrh_tx)
+ pl011_shutdown_channel(uap, uap->lcrh_tx);
+
+ /*
+ * Shut down the clock producer
+ */
+ clk_disable(uap->clk);
+
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->exit)
+ plat->exit();
+ }
+
+}
+
+static void
+pl011_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int lcr_h, old_cr;
+ unsigned long flags;
+ unsigned int baud, quot, clkdiv;
+
+ if (uap->vendor->oversampling)
+ clkdiv = 8;
+ else
+ clkdiv = 16;
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0,
+ port->uartclk / clkdiv);
+
+ if (baud > port->uartclk/16)
+ quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
+ else
+ quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ lcr_h = UART01x_LCRH_WLEN_5;
+ break;
+ case CS6:
+ lcr_h = UART01x_LCRH_WLEN_6;
+ break;
+ case CS7:
+ lcr_h = UART01x_LCRH_WLEN_7;
+ break;
+ default: // CS8
+ lcr_h = UART01x_LCRH_WLEN_8;
+ break;
+ }
+ if (termios->c_cflag & CSTOPB)
+ lcr_h |= UART01x_LCRH_STP2;
+ if (termios->c_cflag & PARENB) {
+ lcr_h |= UART01x_LCRH_PEN;
+ if (!(termios->c_cflag & PARODD))
+ lcr_h |= UART01x_LCRH_EPS;
+ }
+ if (uap->fifosize > 1)
+ lcr_h |= UART01x_LCRH_FEN;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ port->read_status_mask = UART011_DR_OE | 255;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= UART011_DR_BE;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= UART011_DR_BE;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART011_DR_OE;
+ }
+
+ /*
+ * Ignore all characters if CREAD is not set.
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= UART_DUMMY_DR_RX;
+
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ pl011_enable_ms(port);
+
+ /* first, disable everything */
+ old_cr = readw(port->membase + UART011_CR);
+ writew(0, port->membase + UART011_CR);
+
+ if (termios->c_cflag & CRTSCTS) {
+ if (old_cr & UART011_CR_RTS)
+ old_cr |= UART011_CR_RTSEN;
+
+ old_cr |= UART011_CR_CTSEN;
+ uap->autorts = true;
+ } else {
+ old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
+ uap->autorts = false;
+ }
+
+ if (uap->vendor->oversampling) {
+ if (baud > port->uartclk / 16)
+ old_cr |= ST_UART011_CR_OVSFACT;
+ else
+ old_cr &= ~ST_UART011_CR_OVSFACT;
+ }
+
+ /* Set baud rate */
+ writew(quot & 0x3f, port->membase + UART011_FBRD);
+ writew(quot >> 6, port->membase + UART011_IBRD);
+
+ /*
+ * ----------v----------v----------v----------v-----
+ * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L
+ * ----------^----------^----------^----------^-----
+ */
+ writew(lcr_h, port->membase + uap->lcrh_rx);
+ if (uap->lcrh_rx != uap->lcrh_tx) {
+ int i;
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX register,
+ * to get this delay write read only register 10 times
+ */
+ for (i = 0; i < 10; ++i)
+ writew(0xff, uap->port.membase + UART011_MIS);
+ writew(lcr_h, port->membase + uap->lcrh_tx);
+ }
+ writew(old_cr, port->membase + UART011_CR);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *pl011_type(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ return uap->port.type == PORT_AMBA ? uap->type : NULL;
+}
+
+/*
+ * Release the memory region(s) being used by 'port'
+ */
+static void pl010_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, SZ_4K);
+}
+
+/*
+ * Request the memory region(s) being used by 'port'
+ */
+static int pl010_request_port(struct uart_port *port)
+{
+ return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
+ != NULL ? 0 : -EBUSY;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void pl010_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_AMBA;
+ pl010_request_port(port);
+ }
+}
+
+/*
+ * verify the new serial_struct (for TIOCSSERIAL).
+ */
+static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ int ret = 0;
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
+ ret = -EINVAL;
+ if (ser->irq < 0 || ser->irq >= nr_irqs)
+ ret = -EINVAL;
+ if (ser->baud_base < 9600)
+ ret = -EINVAL;
+ return ret;
+}
+
+static struct uart_ops amba_pl011_pops = {
+ .tx_empty = pl01x_tx_empty,
+ .set_mctrl = pl011_set_mctrl,
+ .get_mctrl = pl01x_get_mctrl,
+ .stop_tx = pl011_stop_tx,
+ .start_tx = pl011_start_tx,
+ .stop_rx = pl011_stop_rx,
+ .enable_ms = pl011_enable_ms,
+ .break_ctl = pl011_break_ctl,
+ .startup = pl011_startup,
+ .shutdown = pl011_shutdown,
+ .flush_buffer = pl011_dma_flush_buffer,
+ .set_termios = pl011_set_termios,
+ .type = pl011_type,
+ .release_port = pl010_release_port,
+ .request_port = pl010_request_port,
+ .config_port = pl010_config_port,
+ .verify_port = pl010_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = pl010_get_poll_char,
+ .poll_put_char = pl010_put_poll_char,
+#endif
+};
+
+static struct uart_amba_port *amba_ports[UART_NR];
+
+#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
+
+static void pl011_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+ while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
+ barrier();
+ writew(ch, uap->port.membase + UART01x_DR);
+}
+
+static void
+pl011_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_amba_port *uap = amba_ports[co->index];
+ unsigned int status, old_cr, new_cr;
+ unsigned long flags;
+ int locked = 1;
+
+ clk_enable(uap->clk);
+
+ local_irq_save(flags);
+ if (uap->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock(&uap->port.lock);
+ else
+ spin_lock(&uap->port.lock);
+
+ /*
+ * First save the CR then disable the interrupts
+ */
+ old_cr = readw(uap->port.membase + UART011_CR);
+ new_cr = old_cr & ~UART011_CR_CTSEN;
+ new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
+ writew(new_cr, uap->port.membase + UART011_CR);
+
+ uart_console_write(&uap->port, s, count, pl011_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the TCR
+ */
+ do {
+ status = readw(uap->port.membase + UART01x_FR);
+ } while (status & UART01x_FR_BUSY);
+ writew(old_cr, uap->port.membase + UART011_CR);
+
+ if (locked)
+ spin_unlock(&uap->port.lock);
+ local_irq_restore(flags);
+
+ clk_disable(uap->clk);
+}
+
+static void __init
+pl011_console_get_options(struct uart_amba_port *uap, int *baud,
+ int *parity, int *bits)
+{
+ if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
+ unsigned int lcr_h, ibrd, fbrd;
+
+ lcr_h = readw(uap->port.membase + uap->lcrh_tx);
+
+ *parity = 'n';
+ if (lcr_h & UART01x_LCRH_PEN) {
+ if (lcr_h & UART01x_LCRH_EPS)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+
+ if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
+ *bits = 7;
+ else
+ *bits = 8;
+
+ ibrd = readw(uap->port.membase + UART011_IBRD);
+ fbrd = readw(uap->port.membase + UART011_FBRD);
+
+ *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
+
+ if (uap->vendor->oversampling) {
+ if (readw(uap->port.membase + UART011_CR)
+ & ST_UART011_CR_OVSFACT)
+ *baud *= 2;
+ }
+ }
+}
+
+static int __init pl011_console_setup(struct console *co, char *options)
+{
+ struct uart_amba_port *uap;
+ int baud = 38400;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index >= UART_NR)
+ co->index = 0;
+ uap = amba_ports[co->index];
+ if (!uap)
+ return -ENODEV;
+
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->init)
+ plat->init();
+ }
+
+ uap->port.uartclk = clk_get_rate(uap->clk);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ pl011_console_get_options(uap, &baud, &parity, &bits);
+
+ return uart_set_options(&uap->port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver amba_reg;
+static struct console amba_console = {
+ .name = "ttyAMA",
+ .write = pl011_console_write,
+ .device = uart_console_device,
+ .setup = pl011_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &amba_reg,
+};
+
+#define AMBA_CONSOLE (&amba_console)
+#else
+#define AMBA_CONSOLE NULL
+#endif
+
+static struct uart_driver amba_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyAMA",
+ .dev_name = "ttyAMA",
+ .major = SERIAL_AMBA_MAJOR,
+ .minor = SERIAL_AMBA_MINOR,
+ .nr = UART_NR,
+ .cons = AMBA_CONSOLE,
+};
+
+static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
+{
+ struct uart_amba_port *uap;
+ struct vendor_data *vendor = id->data;
+ void __iomem *base;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
+ if (amba_ports[i] == NULL)
+ break;
+
+ if (i == ARRAY_SIZE(amba_ports)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ uap = kzalloc(sizeof(struct uart_amba_port), GFP_KERNEL);
+ if (uap == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ base = ioremap(dev->res.start, resource_size(&dev->res));
+ if (!base) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ uap->clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(uap->clk)) {
+ ret = PTR_ERR(uap->clk);
+ goto unmap;
+ }
+
+ uap->vendor = vendor;
+ uap->lcrh_rx = vendor->lcrh_rx;
+ uap->lcrh_tx = vendor->lcrh_tx;
+ uap->fifosize = vendor->fifosize;
+ uap->interrupt_may_hang = vendor->interrupt_may_hang;
+ uap->port.dev = &dev->dev;
+ uap->port.mapbase = dev->res.start;
+ uap->port.membase = base;
+ uap->port.iotype = UPIO_MEM;
+ uap->port.irq = dev->irq[0];
+ uap->port.fifosize = uap->fifosize;
+ uap->port.ops = &amba_pl011_pops;
+ uap->port.flags = UPF_BOOT_AUTOCONF;
+ uap->port.line = i;
+ pl011_dma_probe(uap);
+
+ /* Ensure interrupts from this UART are masked and cleared */
+ writew(0, uap->port.membase + UART011_IMSC);
+ writew(0xffff, uap->port.membase + UART011_ICR);
+
+ snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
+
+ amba_ports[i] = uap;
+
+ amba_set_drvdata(dev, uap);
+ ret = uart_add_one_port(&amba_reg, &uap->port);
+ if (ret) {
+ amba_set_drvdata(dev, NULL);
+ amba_ports[i] = NULL;
+ pl011_dma_remove(uap);
+ clk_put(uap->clk);
+ unmap:
+ iounmap(base);
+ free:
+ kfree(uap);
+ }
+ out:
+ return ret;
+}
+
+static int pl011_remove(struct amba_device *dev)
+{
+ struct uart_amba_port *uap = amba_get_drvdata(dev);
+ int i;
+
+ amba_set_drvdata(dev, NULL);
+
+ uart_remove_one_port(&amba_reg, &uap->port);
+
+ for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
+ if (amba_ports[i] == uap)
+ amba_ports[i] = NULL;
+
+ pl011_dma_remove(uap);
+ iounmap(uap->port.membase);
+ clk_put(uap->clk);
+ kfree(uap);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pl011_suspend(struct amba_device *dev, pm_message_t state)
+{
+ struct uart_amba_port *uap = amba_get_drvdata(dev);
+
+ if (!uap)
+ return -EINVAL;
+
+ return uart_suspend_port(&amba_reg, &uap->port);
+}
+
+static int pl011_resume(struct amba_device *dev)
+{
+ struct uart_amba_port *uap = amba_get_drvdata(dev);
+
+ if (!uap)
+ return -EINVAL;
+
+ return uart_resume_port(&amba_reg, &uap->port);
+}
+#endif
+
+static struct amba_id pl011_ids[] = {
+ {
+ .id = 0x00041011,
+ .mask = 0x000fffff,
+ .data = &vendor_arm,
+ },
+ {
+ .id = 0x00380802,
+ .mask = 0x00ffffff,
+ .data = &vendor_st,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl011_driver = {
+ .drv = {
+ .name = "uart-pl011",
+ },
+ .id_table = pl011_ids,
+ .probe = pl011_probe,
+ .remove = pl011_remove,
+#ifdef CONFIG_PM
+ .suspend = pl011_suspend,
+ .resume = pl011_resume,
+#endif
+};
+
+static int __init pl011_init(void)
+{
+ int ret;
+ printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
+
+ ret = uart_register_driver(&amba_reg);
+ if (ret == 0) {
+ ret = amba_driver_register(&pl011_driver);
+ if (ret)
+ uart_unregister_driver(&amba_reg);
+ }
+ return ret;
+}
+
+static void __exit pl011_exit(void)
+{
+ amba_driver_unregister(&pl011_driver);
+ uart_unregister_driver(&amba_reg);
+}
+
+/*
+ * While this can be a module, if builtin it's most likely the console
+ * So let's leave module_exit but move module_init to an earlier place
+ */
+arch_initcall(pl011_init);
+module_exit(pl011_exit);
+
+MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
+MODULE_DESCRIPTION("ARM AMBA serial port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
new file mode 100644
index 00000000..19a94369
--- /dev/null
+++ b/drivers/tty/serial/apbuart.c
@@ -0,0 +1,696 @@
+/*
+ * Driver for GRLIB serial ports (APBUART)
+ *
+ * Based on linux/drivers/serial/amba.c
+ *
+ * Copyright (C) 2000 Deep Blue Solutions Ltd.
+ * Copyright (C) 2003 Konrad Eisele <eiselekd@web.de>
+ * Copyright (C) 2006 Daniel Hellstrom <daniel@gaisler.com>, Aeroflex Gaisler AB
+ * Copyright (C) 2008 Gilead Kutnick <kutnickg@zin-tech.com>
+ * Copyright (C) 2009 Kristoffer Glembo <kristoffer@gaisler.com>, Aeroflex Gaisler AB
+ */
+
+#if defined(CONFIG_SERIAL_GRLIB_GAISLER_APBUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/kthread.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/serial_core.h>
+#include <asm/irq.h>
+
+#include "apbuart.h"
+
+#define SERIAL_APBUART_MAJOR TTY_MAJOR
+#define SERIAL_APBUART_MINOR 64
+#define UART_DUMMY_RSR_RX 0x8000 /* for ignore all read */
+
+static void apbuart_tx_chars(struct uart_port *port);
+
+static void apbuart_stop_tx(struct uart_port *port)
+{
+ unsigned int cr;
+
+ cr = UART_GET_CTRL(port);
+ cr &= ~UART_CTRL_TI;
+ UART_PUT_CTRL(port, cr);
+}
+
+static void apbuart_start_tx(struct uart_port *port)
+{
+ unsigned int cr;
+
+ cr = UART_GET_CTRL(port);
+ cr |= UART_CTRL_TI;
+ UART_PUT_CTRL(port, cr);
+
+ if (UART_GET_STATUS(port) & UART_STATUS_THE)
+ apbuart_tx_chars(port);
+}
+
+static void apbuart_stop_rx(struct uart_port *port)
+{
+ unsigned int cr;
+
+ cr = UART_GET_CTRL(port);
+ cr &= ~(UART_CTRL_RI);
+ UART_PUT_CTRL(port, cr);
+}
+
+static void apbuart_enable_ms(struct uart_port *port)
+{
+ /* No modem status change interrupts for APBUART */
+}
+
+static void apbuart_rx_chars(struct uart_port *port)
+{
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int status, ch, rsr, flag;
+ unsigned int max_chars = port->fifosize;
+
+ status = UART_GET_STATUS(port);
+
+ while (UART_RX_DATA(status) && (max_chars--)) {
+
+ ch = UART_GET_CHAR(port);
+ flag = TTY_NORMAL;
+
+ port->icount.rx++;
+
+ rsr = UART_GET_STATUS(port) | UART_DUMMY_RSR_RX;
+ UART_PUT_STATUS(port, 0);
+ if (rsr & UART_STATUS_ERR) {
+
+ if (rsr & UART_STATUS_BR) {
+ rsr &= ~(UART_STATUS_FE | UART_STATUS_PE);
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ goto ignore_char;
+ } else if (rsr & UART_STATUS_PE) {
+ port->icount.parity++;
+ } else if (rsr & UART_STATUS_FE) {
+ port->icount.frame++;
+ }
+ if (rsr & UART_STATUS_OE)
+ port->icount.overrun++;
+
+ rsr &= port->read_status_mask;
+
+ if (rsr & UART_STATUS_PE)
+ flag = TTY_PARITY;
+ else if (rsr & UART_STATUS_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ goto ignore_char;
+
+ uart_insert_char(port, rsr, UART_STATUS_OE, ch, flag);
+
+
+ ignore_char:
+ status = UART_GET_STATUS(port);
+ }
+
+ tty_flip_buffer_push(tty);
+}
+
+static void apbuart_tx_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ int count;
+
+ if (port->x_char) {
+ UART_PUT_CHAR(port, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ apbuart_stop_tx(port);
+ return;
+ }
+
+ /* amba: fill FIFO */
+ count = port->fifosize >> 1;
+ do {
+ UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ apbuart_stop_tx(port);
+}
+
+static irqreturn_t apbuart_int(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ unsigned int status;
+
+ spin_lock(&port->lock);
+
+ status = UART_GET_STATUS(port);
+ if (status & UART_STATUS_DR)
+ apbuart_rx_chars(port);
+ if (status & UART_STATUS_THE)
+ apbuart_tx_chars(port);
+
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int apbuart_tx_empty(struct uart_port *port)
+{
+ unsigned int status = UART_GET_STATUS(port);
+ return status & UART_STATUS_THE ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int apbuart_get_mctrl(struct uart_port *port)
+{
+ /* The GRLIB APBUART handles flow control in hardware */
+ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+}
+
+static void apbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /* The GRLIB APBUART handles flow control in hardware */
+}
+
+static void apbuart_break_ctl(struct uart_port *port, int break_state)
+{
+ /* We don't support sending break */
+}
+
+static int apbuart_startup(struct uart_port *port)
+{
+ int retval;
+ unsigned int cr;
+
+ /* Allocate the IRQ */
+ retval = request_irq(port->irq, apbuart_int, 0, "apbuart", port);
+ if (retval)
+ return retval;
+
+ /* Finally, enable interrupts */
+ cr = UART_GET_CTRL(port);
+ UART_PUT_CTRL(port,
+ cr | UART_CTRL_RE | UART_CTRL_TE |
+ UART_CTRL_RI | UART_CTRL_TI);
+
+ return 0;
+}
+
+static void apbuart_shutdown(struct uart_port *port)
+{
+ unsigned int cr;
+
+ /* disable all interrupts, disable the port */
+ cr = UART_GET_CTRL(port);
+ UART_PUT_CTRL(port,
+ cr & ~(UART_CTRL_RE | UART_CTRL_TE |
+ UART_CTRL_RI | UART_CTRL_TI));
+
+ /* Free the interrupt */
+ free_irq(port->irq, port);
+}
+
+static void apbuart_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old)
+{
+ unsigned int cr;
+ unsigned long flags;
+ unsigned int baud, quot;
+
+ /* Ask the core to calculate the divisor for us. */
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+ if (baud == 0)
+ panic("invalid baudrate %i\n", port->uartclk / 16);
+
+ /* uart_get_divisor calc a *16 uart freq, apbuart is *8 */
+ quot = (uart_get_divisor(port, baud)) * 2;
+ cr = UART_GET_CTRL(port);
+ cr &= ~(UART_CTRL_PE | UART_CTRL_PS);
+
+ if (termios->c_cflag & PARENB) {
+ cr |= UART_CTRL_PE;
+ if ((termios->c_cflag & PARODD))
+ cr |= UART_CTRL_PS;
+ }
+
+ /* Enable flow control. */
+ if (termios->c_cflag & CRTSCTS)
+ cr |= UART_CTRL_FL;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Update the per-port timeout. */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ port->read_status_mask = UART_STATUS_OE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART_STATUS_FE | UART_STATUS_PE;
+
+ /* Characters to ignore */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_STATUS_FE | UART_STATUS_PE;
+
+ /* Ignore all characters if CREAD is not set. */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= UART_DUMMY_RSR_RX;
+
+ /* Set baud rate */
+ quot -= 1;
+ UART_PUT_SCAL(port, quot);
+ UART_PUT_CTRL(port, cr);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *apbuart_type(struct uart_port *port)
+{
+ return port->type == PORT_APBUART ? "GRLIB/APBUART" : NULL;
+}
+
+static void apbuart_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, 0x100);
+}
+
+static int apbuart_request_port(struct uart_port *port)
+{
+ return request_mem_region(port->mapbase, 0x100, "grlib-apbuart")
+ != NULL ? 0 : -EBUSY;
+ return 0;
+}
+
+/* Configure/autoconfigure the port */
+static void apbuart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_APBUART;
+ apbuart_request_port(port);
+ }
+}
+
+/* Verify the new serial_struct (for TIOCSSERIAL) */
+static int apbuart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ int ret = 0;
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_APBUART)
+ ret = -EINVAL;
+ if (ser->irq < 0 || ser->irq >= NR_IRQS)
+ ret = -EINVAL;
+ if (ser->baud_base < 9600)
+ ret = -EINVAL;
+ return ret;
+}
+
+static struct uart_ops grlib_apbuart_ops = {
+ .tx_empty = apbuart_tx_empty,
+ .set_mctrl = apbuart_set_mctrl,
+ .get_mctrl = apbuart_get_mctrl,
+ .stop_tx = apbuart_stop_tx,
+ .start_tx = apbuart_start_tx,
+ .stop_rx = apbuart_stop_rx,
+ .enable_ms = apbuart_enable_ms,
+ .break_ctl = apbuart_break_ctl,
+ .startup = apbuart_startup,
+ .shutdown = apbuart_shutdown,
+ .set_termios = apbuart_set_termios,
+ .type = apbuart_type,
+ .release_port = apbuart_release_port,
+ .request_port = apbuart_request_port,
+ .config_port = apbuart_config_port,
+ .verify_port = apbuart_verify_port,
+};
+
+static struct uart_port grlib_apbuart_ports[UART_NR];
+static struct device_node *grlib_apbuart_nodes[UART_NR];
+
+static int apbuart_scan_fifo_size(struct uart_port *port, int portnumber)
+{
+ int ctrl, loop = 0;
+ int status;
+ int fifosize;
+ unsigned long flags;
+
+ ctrl = UART_GET_CTRL(port);
+
+ /*
+ * Enable the transceiver and wait for it to be ready to send data.
+ * Clear interrupts so that this process will not be externally
+ * interrupted in the middle (which can cause the transceiver to
+ * drain prematurely).
+ */
+
+ local_irq_save(flags);
+
+ UART_PUT_CTRL(port, ctrl | UART_CTRL_TE);
+
+ while (!UART_TX_READY(UART_GET_STATUS(port)))
+ loop++;
+
+ /*
+ * Disable the transceiver so data isn't actually sent during the
+ * actual test.
+ */
+
+ UART_PUT_CTRL(port, ctrl & ~(UART_CTRL_TE));
+
+ fifosize = 1;
+ UART_PUT_CHAR(port, 0);
+
+ /*
+ * So long as transmitting a character increments the tranceivier FIFO
+ * length the FIFO must be at least that big. These bytes will
+ * automatically drain off of the FIFO.
+ */
+
+ status = UART_GET_STATUS(port);
+ while (((status >> 20) & 0x3F) == fifosize) {
+ fifosize++;
+ UART_PUT_CHAR(port, 0);
+ status = UART_GET_STATUS(port);
+ }
+
+ fifosize--;
+
+ UART_PUT_CTRL(port, ctrl);
+ local_irq_restore(flags);
+
+ if (fifosize == 0)
+ fifosize = 1;
+
+ return fifosize;
+}
+
+static void apbuart_flush_fifo(struct uart_port *port)
+{
+ int i;
+
+ for (i = 0; i < port->fifosize; i++)
+ UART_GET_CHAR(port);
+}
+
+
+/* ======================================================================== */
+/* Console driver, if enabled */
+/* ======================================================================== */
+
+#ifdef CONFIG_SERIAL_GRLIB_GAISLER_APBUART_CONSOLE
+
+static void apbuart_console_putchar(struct uart_port *port, int ch)
+{
+ unsigned int status;
+ do {
+ status = UART_GET_STATUS(port);
+ } while (!UART_TX_READY(status));
+ UART_PUT_CHAR(port, ch);
+}
+
+static void
+apbuart_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_port *port = &grlib_apbuart_ports[co->index];
+ unsigned int status, old_cr, new_cr;
+
+ /* First save the CR then disable the interrupts */
+ old_cr = UART_GET_CTRL(port);
+ new_cr = old_cr & ~(UART_CTRL_RI | UART_CTRL_TI);
+ UART_PUT_CTRL(port, new_cr);
+
+ uart_console_write(port, s, count, apbuart_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the TCR
+ */
+ do {
+ status = UART_GET_STATUS(port);
+ } while (!UART_TX_READY(status));
+ UART_PUT_CTRL(port, old_cr);
+}
+
+static void __init
+apbuart_console_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits)
+{
+ if (UART_GET_CTRL(port) & (UART_CTRL_RE | UART_CTRL_TE)) {
+
+ unsigned int quot, status;
+ status = UART_GET_STATUS(port);
+
+ *parity = 'n';
+ if (status & UART_CTRL_PE) {
+ if ((status & UART_CTRL_PS) == 0)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+
+ *bits = 8;
+ quot = UART_GET_SCAL(port) / 8;
+ *baud = port->uartclk / (16 * (quot + 1));
+ }
+}
+
+static int __init apbuart_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 38400;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ pr_debug("apbuart_console_setup co=%p, co->index=%i, options=%s\n",
+ co, co->index, options);
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index >= grlib_apbuart_port_nr)
+ co->index = 0;
+
+ port = &grlib_apbuart_ports[co->index];
+
+ spin_lock_init(&port->lock);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ apbuart_console_get_options(port, &baud, &parity, &bits);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver grlib_apbuart_driver;
+
+static struct console grlib_apbuart_console = {
+ .name = "ttyS",
+ .write = apbuart_console_write,
+ .device = uart_console_device,
+ .setup = apbuart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &grlib_apbuart_driver,
+};
+
+
+static int grlib_apbuart_configure(void);
+
+static int __init apbuart_console_init(void)
+{
+ if (grlib_apbuart_configure())
+ return -ENODEV;
+ register_console(&grlib_apbuart_console);
+ return 0;
+}
+
+console_initcall(apbuart_console_init);
+
+#define APBUART_CONSOLE (&grlib_apbuart_console)
+#else
+#define APBUART_CONSOLE NULL
+#endif
+
+static struct uart_driver grlib_apbuart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "serial",
+ .dev_name = "ttyS",
+ .major = SERIAL_APBUART_MAJOR,
+ .minor = SERIAL_APBUART_MINOR,
+ .nr = UART_NR,
+ .cons = APBUART_CONSOLE,
+};
+
+
+/* ======================================================================== */
+/* OF Platform Driver */
+/* ======================================================================== */
+
+static int __devinit apbuart_probe(struct platform_device *op)
+{
+ int i;
+ struct uart_port *port = NULL;
+
+ for (i = 0; i < grlib_apbuart_port_nr; i++) {
+ if (op->dev.of_node == grlib_apbuart_nodes[i])
+ break;
+ }
+
+ port = &grlib_apbuart_ports[i];
+ port->dev = &op->dev;
+ port->irq = op->archdata.irqs[0];
+
+ uart_add_one_port(&grlib_apbuart_driver, (struct uart_port *) port);
+
+ apbuart_flush_fifo((struct uart_port *) port);
+
+ printk(KERN_INFO "grlib-apbuart at 0x%llx, irq %d\n",
+ (unsigned long long) port->mapbase, port->irq);
+ return 0;
+}
+
+static struct of_device_id __initdata apbuart_match[] = {
+ {
+ .name = "GAISLER_APBUART",
+ },
+ {
+ .name = "01_00c",
+ },
+ {},
+};
+
+static struct platform_driver grlib_apbuart_of_driver = {
+ .probe = apbuart_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "grlib-apbuart",
+ .of_match_table = apbuart_match,
+ },
+};
+
+
+static int grlib_apbuart_configure(void)
+{
+ struct device_node *np;
+ int line = 0;
+
+ for_each_matching_node(np, apbuart_match) {
+ const int *ampopts;
+ const u32 *freq_hz;
+ const struct amba_prom_registers *regs;
+ struct uart_port *port;
+ unsigned long addr;
+
+ ampopts = of_get_property(np, "ampopts", NULL);
+ if (ampopts && (*ampopts == 0))
+ continue; /* Ignore if used by another OS instance */
+ regs = of_get_property(np, "reg", NULL);
+ /* Frequency of APB Bus is frequency of UART */
+ freq_hz = of_get_property(np, "freq", NULL);
+
+ if (!regs || !freq_hz || (*freq_hz == 0))
+ continue;
+
+ grlib_apbuart_nodes[line] = np;
+
+ addr = regs->phys_addr;
+
+ port = &grlib_apbuart_ports[line];
+
+ port->mapbase = addr;
+ port->membase = ioremap(addr, sizeof(struct grlib_apbuart_regs_map));
+ port->irq = 0;
+ port->iotype = UPIO_MEM;
+ port->ops = &grlib_apbuart_ops;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->line = line;
+ port->uartclk = *freq_hz;
+ port->fifosize = apbuart_scan_fifo_size((struct uart_port *) port, line);
+ line++;
+
+ /* We support maximum UART_NR uarts ... */
+ if (line == UART_NR)
+ break;
+ }
+
+ grlib_apbuart_driver.nr = grlib_apbuart_port_nr = line;
+ return line ? 0 : -ENODEV;
+}
+
+static int __init grlib_apbuart_init(void)
+{
+ int ret;
+
+ /* Find all APBUARTS in device the tree and initialize their ports */
+ ret = grlib_apbuart_configure();
+ if (ret)
+ return ret;
+
+ printk(KERN_INFO "Serial: GRLIB APBUART driver\n");
+
+ ret = uart_register_driver(&grlib_apbuart_driver);
+
+ if (ret) {
+ printk(KERN_ERR "%s: uart_register_driver failed (%i)\n",
+ __FILE__, ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&grlib_apbuart_of_driver);
+ if (ret) {
+ printk(KERN_ERR
+ "%s: platform_driver_register failed (%i)\n",
+ __FILE__, ret);
+ uart_unregister_driver(&grlib_apbuart_driver);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void __exit grlib_apbuart_exit(void)
+{
+ int i;
+
+ for (i = 0; i < grlib_apbuart_port_nr; i++)
+ uart_remove_one_port(&grlib_apbuart_driver,
+ &grlib_apbuart_ports[i]);
+
+ uart_unregister_driver(&grlib_apbuart_driver);
+ platform_driver_unregister(&grlib_apbuart_of_driver);
+}
+
+module_init(grlib_apbuart_init);
+module_exit(grlib_apbuart_exit);
+
+MODULE_AUTHOR("Aeroflex Gaisler AB");
+MODULE_DESCRIPTION("GRLIB APBUART serial driver");
+MODULE_VERSION("2.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/apbuart.h b/drivers/tty/serial/apbuart.h
new file mode 100644
index 00000000..5faf87c8
--- /dev/null
+++ b/drivers/tty/serial/apbuart.h
@@ -0,0 +1,64 @@
+#ifndef __GRLIB_APBUART_H__
+#define __GRLIB_APBUART_H__
+
+#include <asm/io.h>
+
+#define UART_NR 8
+static int grlib_apbuart_port_nr;
+
+struct grlib_apbuart_regs_map {
+ u32 data;
+ u32 status;
+ u32 ctrl;
+ u32 scaler;
+};
+
+struct amba_prom_registers {
+ unsigned int phys_addr;
+ unsigned int reg_size;
+};
+
+/*
+ * The following defines the bits in the APBUART Status Registers.
+ */
+#define UART_STATUS_DR 0x00000001 /* Data Ready */
+#define UART_STATUS_TSE 0x00000002 /* TX Send Register Empty */
+#define UART_STATUS_THE 0x00000004 /* TX Hold Register Empty */
+#define UART_STATUS_BR 0x00000008 /* Break Error */
+#define UART_STATUS_OE 0x00000010 /* RX Overrun Error */
+#define UART_STATUS_PE 0x00000020 /* RX Parity Error */
+#define UART_STATUS_FE 0x00000040 /* RX Framing Error */
+#define UART_STATUS_ERR 0x00000078 /* Error Mask */
+
+/*
+ * The following defines the bits in the APBUART Ctrl Registers.
+ */
+#define UART_CTRL_RE 0x00000001 /* Receiver enable */
+#define UART_CTRL_TE 0x00000002 /* Transmitter enable */
+#define UART_CTRL_RI 0x00000004 /* Receiver interrupt enable */
+#define UART_CTRL_TI 0x00000008 /* Transmitter irq */
+#define UART_CTRL_PS 0x00000010 /* Parity select */
+#define UART_CTRL_PE 0x00000020 /* Parity enable */
+#define UART_CTRL_FL 0x00000040 /* Flow control enable */
+#define UART_CTRL_LB 0x00000080 /* Loopback enable */
+
+#define APBBASE(port) ((struct grlib_apbuart_regs_map *)((port)->membase))
+
+#define APBBASE_DATA_P(port) (&(APBBASE(port)->data))
+#define APBBASE_STATUS_P(port) (&(APBBASE(port)->status))
+#define APBBASE_CTRL_P(port) (&(APBBASE(port)->ctrl))
+#define APBBASE_SCALAR_P(port) (&(APBBASE(port)->scaler))
+
+#define UART_GET_CHAR(port) (__raw_readl(APBBASE_DATA_P(port)))
+#define UART_PUT_CHAR(port, v) (__raw_writel(v, APBBASE_DATA_P(port)))
+#define UART_GET_STATUS(port) (__raw_readl(APBBASE_STATUS_P(port)))
+#define UART_PUT_STATUS(port, v)(__raw_writel(v, APBBASE_STATUS_P(port)))
+#define UART_GET_CTRL(port) (__raw_readl(APBBASE_CTRL_P(port)))
+#define UART_PUT_CTRL(port, v) (__raw_writel(v, APBBASE_CTRL_P(port)))
+#define UART_GET_SCAL(port) (__raw_readl(APBBASE_SCALAR_P(port)))
+#define UART_PUT_SCAL(port, v) (__raw_writel(v, APBBASE_SCALAR_P(port)))
+
+#define UART_RX_DATA(s) (((s) & UART_STATUS_DR) != 0)
+#define UART_TX_READY(s) (((s) & UART_STATUS_THE) != 0)
+
+#endif /* __GRLIB_APBUART_H__ */
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
new file mode 100644
index 00000000..b989495c
--- /dev/null
+++ b/drivers/tty/serial/atmel_serial.c
@@ -0,0 +1,1829 @@
+/*
+ * Driver for Atmel AT91 / AT32 Serial ports
+ * Copyright (C) 2003 Rick Bronson
+ *
+ * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * DMA support added by Chip Coldwell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/tty_flip.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/atmel_pdc.h>
+#include <linux/atmel_serial.h>
+#include <linux/uaccess.h>
+
+#include <asm/io.h>
+#include <asm/ioctls.h>
+
+#include <asm/mach/serial_at91.h>
+#include <mach/board.h>
+
+#ifdef CONFIG_ARM
+#include <mach/cpu.h>
+#include <mach/gpio.h>
+#endif
+
+#define PDC_BUFFER_SIZE 512
+/* Revisit: We should calculate this based on the actual port settings */
+#define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */
+
+#if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+static void atmel_start_rx(struct uart_port *port);
+static void atmel_stop_rx(struct uart_port *port);
+
+#ifdef CONFIG_SERIAL_ATMEL_TTYAT
+
+/* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
+ * should coexist with the 8250 driver, such as if we have an external 16C550
+ * UART. */
+#define SERIAL_ATMEL_MAJOR 204
+#define MINOR_START 154
+#define ATMEL_DEVICENAME "ttyAT"
+
+#else
+
+/* Use device name ttyS, major 4, minor 64-68. This is the usual serial port
+ * name, but it is legally reserved for the 8250 driver. */
+#define SERIAL_ATMEL_MAJOR TTY_MAJOR
+#define MINOR_START 64
+#define ATMEL_DEVICENAME "ttyS"
+
+#endif
+
+#define ATMEL_ISR_PASS_LIMIT 256
+
+/* UART registers. CR is write-only, hence no GET macro */
+#define UART_PUT_CR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_CR)
+#define UART_GET_MR(port) __raw_readl((port)->membase + ATMEL_US_MR)
+#define UART_PUT_MR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_MR)
+#define UART_PUT_IER(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IER)
+#define UART_PUT_IDR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IDR)
+#define UART_GET_IMR(port) __raw_readl((port)->membase + ATMEL_US_IMR)
+#define UART_GET_CSR(port) __raw_readl((port)->membase + ATMEL_US_CSR)
+#define UART_GET_CHAR(port) __raw_readl((port)->membase + ATMEL_US_RHR)
+#define UART_PUT_CHAR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_THR)
+#define UART_GET_BRGR(port) __raw_readl((port)->membase + ATMEL_US_BRGR)
+#define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR)
+#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR)
+#define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR)
+
+ /* PDC registers */
+#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
+#define UART_GET_PTSR(port) __raw_readl((port)->membase + ATMEL_PDC_PTSR)
+
+#define UART_PUT_RPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RPR)
+#define UART_GET_RPR(port) __raw_readl((port)->membase + ATMEL_PDC_RPR)
+#define UART_PUT_RCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RCR)
+#define UART_PUT_RNPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNPR)
+#define UART_PUT_RNCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNCR)
+
+#define UART_PUT_TPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TPR)
+#define UART_PUT_TCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TCR)
+#define UART_GET_TCR(port) __raw_readl((port)->membase + ATMEL_PDC_TCR)
+
+static int (*atmel_open_hook)(struct uart_port *);
+static void (*atmel_close_hook)(struct uart_port *);
+
+struct atmel_dma_buffer {
+ unsigned char *buf;
+ dma_addr_t dma_addr;
+ unsigned int dma_size;
+ unsigned int ofs;
+};
+
+struct atmel_uart_char {
+ u16 status;
+ u16 ch;
+};
+
+#define ATMEL_SERIAL_RINGSIZE 1024
+
+/*
+ * We wrap our port structure around the generic uart_port.
+ */
+struct atmel_uart_port {
+ struct uart_port uart; /* uart */
+ struct clk *clk; /* uart clock */
+ int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */
+ u32 backup_imr; /* IMR saved during suspend */
+ int break_active; /* break being received */
+
+ short use_dma_rx; /* enable PDC receiver */
+ short pdc_rx_idx; /* current PDC RX buffer */
+ struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */
+
+ short use_dma_tx; /* enable PDC transmitter */
+ struct atmel_dma_buffer pdc_tx; /* PDC transmitter */
+
+ struct tasklet_struct tasklet;
+ unsigned int irq_status;
+ unsigned int irq_status_prev;
+
+ struct circ_buf rx_ring;
+
+ struct serial_rs485 rs485; /* rs485 settings */
+ unsigned int tx_done_mask;
+};
+
+static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
+
+#ifdef SUPPORT_SYSRQ
+static struct console atmel_console;
+#endif
+
+static inline struct atmel_uart_port *
+to_atmel_uart_port(struct uart_port *uart)
+{
+ return container_of(uart, struct atmel_uart_port, uart);
+}
+
+#ifdef CONFIG_SERIAL_ATMEL_PDC
+static bool atmel_use_dma_rx(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ return atmel_port->use_dma_rx;
+}
+
+static bool atmel_use_dma_tx(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ return atmel_port->use_dma_tx;
+}
+#else
+static bool atmel_use_dma_rx(struct uart_port *port)
+{
+ return false;
+}
+
+static bool atmel_use_dma_tx(struct uart_port *port)
+{
+ return false;
+}
+#endif
+
+/* Enable or disable the rs485 support */
+void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ unsigned int mode;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Disable interrupts */
+ UART_PUT_IDR(port, atmel_port->tx_done_mask);
+
+ mode = UART_GET_MR(port);
+
+ /* Resetting serial mode to RS232 (0x0) */
+ mode &= ~ATMEL_US_USMODE;
+
+ atmel_port->rs485 = *rs485conf;
+
+ if (rs485conf->flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+ if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
+ UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
+ mode |= ATMEL_US_USMODE_RS485;
+ } else {
+ dev_dbg(port->dev, "Setting UART to RS232\n");
+ if (atmel_use_dma_tx(port))
+ atmel_port->tx_done_mask = ATMEL_US_ENDTX |
+ ATMEL_US_TXBUFE;
+ else
+ atmel_port->tx_done_mask = ATMEL_US_TXRDY;
+ }
+ UART_PUT_MR(port, mode);
+
+ /* Enable interrupts */
+ UART_PUT_IER(port, atmel_port->tx_done_mask);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+}
+
+/*
+ * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
+ */
+static u_int atmel_tx_empty(struct uart_port *port)
+{
+ return (UART_GET_CSR(port) & ATMEL_US_TXEMPTY) ? TIOCSER_TEMT : 0;
+}
+
+/*
+ * Set state of the modem control output lines
+ */
+static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
+{
+ unsigned int control = 0;
+ unsigned int mode;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+#ifdef CONFIG_ARCH_AT91RM9200
+ if (cpu_is_at91rm9200()) {
+ /*
+ * AT91RM9200 Errata #39: RTS0 is not internally connected
+ * to PA21. We need to drive the pin manually.
+ */
+ if (port->mapbase == AT91RM9200_BASE_US0) {
+ if (mctrl & TIOCM_RTS)
+ at91_set_gpio_value(AT91_PIN_PA21, 0);
+ else
+ at91_set_gpio_value(AT91_PIN_PA21, 1);
+ }
+ }
+#endif
+
+ if (mctrl & TIOCM_RTS)
+ control |= ATMEL_US_RTSEN;
+ else
+ control |= ATMEL_US_RTSDIS;
+
+ if (mctrl & TIOCM_DTR)
+ control |= ATMEL_US_DTREN;
+ else
+ control |= ATMEL_US_DTRDIS;
+
+ UART_PUT_CR(port, control);
+
+ /* Local loopback mode? */
+ mode = UART_GET_MR(port) & ~ATMEL_US_CHMODE;
+ if (mctrl & TIOCM_LOOP)
+ mode |= ATMEL_US_CHMODE_LOC_LOOP;
+ else
+ mode |= ATMEL_US_CHMODE_NORMAL;
+
+ /* Resetting serial mode to RS232 (0x0) */
+ mode &= ~ATMEL_US_USMODE;
+
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ UART_PUT_TTGR(port,
+ atmel_port->rs485.delay_rts_after_send);
+ mode |= ATMEL_US_USMODE_RS485;
+ } else {
+ dev_dbg(port->dev, "Setting UART to RS232\n");
+ }
+ UART_PUT_MR(port, mode);
+}
+
+/*
+ * Get state of the modem control input lines
+ */
+static u_int atmel_get_mctrl(struct uart_port *port)
+{
+ unsigned int status, ret = 0;
+
+ status = UART_GET_CSR(port);
+
+ /*
+ * The control signals are active low.
+ */
+ if (!(status & ATMEL_US_DCD))
+ ret |= TIOCM_CD;
+ if (!(status & ATMEL_US_CTS))
+ ret |= TIOCM_CTS;
+ if (!(status & ATMEL_US_DSR))
+ ret |= TIOCM_DSR;
+ if (!(status & ATMEL_US_RI))
+ ret |= TIOCM_RI;
+
+ return ret;
+}
+
+/*
+ * Stop transmitting.
+ */
+static void atmel_stop_tx(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ if (atmel_use_dma_tx(port)) {
+ /* disable PDC transmit */
+ UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
+ }
+ /* Disable interrupts */
+ UART_PUT_IDR(port, atmel_port->tx_done_mask);
+
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED)
+ atmel_start_rx(port);
+}
+
+/*
+ * Start transmitting.
+ */
+static void atmel_start_tx(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ if (atmel_use_dma_tx(port)) {
+ if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN)
+ /* The transmitter is already running. Yes, we
+ really need this.*/
+ return;
+
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED)
+ atmel_stop_rx(port);
+
+ /* re-enable PDC transmit */
+ UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
+ }
+ /* Enable interrupts */
+ UART_PUT_IER(port, atmel_port->tx_done_mask);
+}
+
+/*
+ * start receiving - port is in process of being opened.
+ */
+static void atmel_start_rx(struct uart_port *port)
+{
+ UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */
+
+ if (atmel_use_dma_rx(port)) {
+ /* enable PDC controller */
+ UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
+ port->read_status_mask);
+ UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
+ } else {
+ UART_PUT_IER(port, ATMEL_US_RXRDY);
+ }
+}
+
+/*
+ * Stop receiving - port is in process of being closed.
+ */
+static void atmel_stop_rx(struct uart_port *port)
+{
+ if (atmel_use_dma_rx(port)) {
+ /* disable PDC receive */
+ UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
+ UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
+ port->read_status_mask);
+ } else {
+ UART_PUT_IDR(port, ATMEL_US_RXRDY);
+ }
+}
+
+/*
+ * Enable modem status interrupts
+ */
+static void atmel_enable_ms(struct uart_port *port)
+{
+ UART_PUT_IER(port, ATMEL_US_RIIC | ATMEL_US_DSRIC
+ | ATMEL_US_DCDIC | ATMEL_US_CTSIC);
+}
+
+/*
+ * Control the transmission of a break signal
+ */
+static void atmel_break_ctl(struct uart_port *port, int break_state)
+{
+ if (break_state != 0)
+ UART_PUT_CR(port, ATMEL_US_STTBRK); /* start break */
+ else
+ UART_PUT_CR(port, ATMEL_US_STPBRK); /* stop break */
+}
+
+/*
+ * Stores the incoming character in the ring buffer
+ */
+static void
+atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
+ unsigned int ch)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct circ_buf *ring = &atmel_port->rx_ring;
+ struct atmel_uart_char *c;
+
+ if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
+ /* Buffer overflow, ignore char */
+ return;
+
+ c = &((struct atmel_uart_char *)ring->buf)[ring->head];
+ c->status = status;
+ c->ch = ch;
+
+ /* Make sure the character is stored before we update head. */
+ smp_wmb();
+
+ ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
+}
+
+/*
+ * Deal with parity, framing and overrun errors.
+ */
+static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
+{
+ /* clear error */
+ UART_PUT_CR(port, ATMEL_US_RSTSTA);
+
+ if (status & ATMEL_US_RXBRK) {
+ /* ignore side-effect */
+ status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
+ port->icount.brk++;
+ }
+ if (status & ATMEL_US_PARE)
+ port->icount.parity++;
+ if (status & ATMEL_US_FRAME)
+ port->icount.frame++;
+ if (status & ATMEL_US_OVRE)
+ port->icount.overrun++;
+}
+
+/*
+ * Characters received (called from interrupt handler)
+ */
+static void atmel_rx_chars(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ unsigned int status, ch;
+
+ status = UART_GET_CSR(port);
+ while (status & ATMEL_US_RXRDY) {
+ ch = UART_GET_CHAR(port);
+
+ /*
+ * note that the error handling code is
+ * out of the main execution path
+ */
+ if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
+ | ATMEL_US_OVRE | ATMEL_US_RXBRK)
+ || atmel_port->break_active)) {
+
+ /* clear error */
+ UART_PUT_CR(port, ATMEL_US_RSTSTA);
+
+ if (status & ATMEL_US_RXBRK
+ && !atmel_port->break_active) {
+ atmel_port->break_active = 1;
+ UART_PUT_IER(port, ATMEL_US_RXBRK);
+ } else {
+ /*
+ * This is either the end-of-break
+ * condition or we've received at
+ * least one character without RXBRK
+ * being set. In both cases, the next
+ * RXBRK will indicate start-of-break.
+ */
+ UART_PUT_IDR(port, ATMEL_US_RXBRK);
+ status &= ~ATMEL_US_RXBRK;
+ atmel_port->break_active = 0;
+ }
+ }
+
+ atmel_buffer_rx_char(port, status, ch);
+ status = UART_GET_CSR(port);
+ }
+
+ tasklet_schedule(&atmel_port->tasklet);
+}
+
+/*
+ * Transmit characters (called from tasklet with TXRDY interrupt
+ * disabled)
+ */
+static void atmel_tx_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ if (port->x_char && UART_GET_CSR(port) & atmel_port->tx_done_mask) {
+ UART_PUT_CHAR(port, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+ return;
+
+ while (UART_GET_CSR(port) & atmel_port->tx_done_mask) {
+ UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (!uart_circ_empty(xmit))
+ /* Enable interrupts */
+ UART_PUT_IER(port, atmel_port->tx_done_mask);
+}
+
+/*
+ * receive interrupt handler.
+ */
+static void
+atmel_handle_receive(struct uart_port *port, unsigned int pending)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ if (atmel_use_dma_rx(port)) {
+ /*
+ * PDC receive. Just schedule the tasklet and let it
+ * figure out the details.
+ *
+ * TODO: We're not handling error flags correctly at
+ * the moment.
+ */
+ if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
+ UART_PUT_IDR(port, (ATMEL_US_ENDRX
+ | ATMEL_US_TIMEOUT));
+ tasklet_schedule(&atmel_port->tasklet);
+ }
+
+ if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
+ ATMEL_US_FRAME | ATMEL_US_PARE))
+ atmel_pdc_rxerr(port, pending);
+ }
+
+ /* Interrupt receive */
+ if (pending & ATMEL_US_RXRDY)
+ atmel_rx_chars(port);
+ else if (pending & ATMEL_US_RXBRK) {
+ /*
+ * End of break detected. If it came along with a
+ * character, atmel_rx_chars will handle it.
+ */
+ UART_PUT_CR(port, ATMEL_US_RSTSTA);
+ UART_PUT_IDR(port, ATMEL_US_RXBRK);
+ atmel_port->break_active = 0;
+ }
+}
+
+/*
+ * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
+ */
+static void
+atmel_handle_transmit(struct uart_port *port, unsigned int pending)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ if (pending & atmel_port->tx_done_mask) {
+ /* Either PDC or interrupt transmission */
+ UART_PUT_IDR(port, atmel_port->tx_done_mask);
+ tasklet_schedule(&atmel_port->tasklet);
+ }
+}
+
+/*
+ * status flags interrupt handler.
+ */
+static void
+atmel_handle_status(struct uart_port *port, unsigned int pending,
+ unsigned int status)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
+ | ATMEL_US_CTSIC)) {
+ atmel_port->irq_status = status;
+ tasklet_schedule(&atmel_port->tasklet);
+ }
+}
+
+/*
+ * Interrupt handler
+ */
+static irqreturn_t atmel_interrupt(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ unsigned int status, pending, pass_counter = 0;
+
+ do {
+ status = UART_GET_CSR(port);
+ pending = status & UART_GET_IMR(port);
+ if (!pending)
+ break;
+
+ atmel_handle_receive(port, pending);
+ atmel_handle_status(port, pending, status);
+ atmel_handle_transmit(port, pending);
+ } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
+
+ return pass_counter ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*
+ * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
+ */
+static void atmel_tx_dma(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct circ_buf *xmit = &port->state->xmit;
+ struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
+ int count;
+
+ /* nothing left to transmit? */
+ if (UART_GET_TCR(port))
+ return;
+
+ xmit->tail += pdc->ofs;
+ xmit->tail &= UART_XMIT_SIZE - 1;
+
+ port->icount.tx += pdc->ofs;
+ pdc->ofs = 0;
+
+ /* more to transmit - setup next transfer */
+
+ /* disable PDC transmit */
+ UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
+
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
+ dma_sync_single_for_device(port->dev,
+ pdc->dma_addr,
+ pdc->dma_size,
+ DMA_TO_DEVICE);
+
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ pdc->ofs = count;
+
+ UART_PUT_TPR(port, pdc->dma_addr + xmit->tail);
+ UART_PUT_TCR(port, count);
+ /* re-enable PDC transmit */
+ UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
+ /* Enable interrupts */
+ UART_PUT_IER(port, atmel_port->tx_done_mask);
+ } else {
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ /* DMA done, stop TX, start RX for RS485 */
+ atmel_start_rx(port);
+ }
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static void atmel_rx_from_ring(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct circ_buf *ring = &atmel_port->rx_ring;
+ unsigned int flg;
+ unsigned int status;
+
+ while (ring->head != ring->tail) {
+ struct atmel_uart_char c;
+
+ /* Make sure c is loaded after head. */
+ smp_rmb();
+
+ c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
+
+ ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
+
+ port->icount.rx++;
+ status = c.status;
+ flg = TTY_NORMAL;
+
+ /*
+ * note that the error handling code is
+ * out of the main execution path
+ */
+ if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
+ | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
+ if (status & ATMEL_US_RXBRK) {
+ /* ignore side-effect */
+ status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
+
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ }
+ if (status & ATMEL_US_PARE)
+ port->icount.parity++;
+ if (status & ATMEL_US_FRAME)
+ port->icount.frame++;
+ if (status & ATMEL_US_OVRE)
+ port->icount.overrun++;
+
+ status &= port->read_status_mask;
+
+ if (status & ATMEL_US_RXBRK)
+ flg = TTY_BREAK;
+ else if (status & ATMEL_US_PARE)
+ flg = TTY_PARITY;
+ else if (status & ATMEL_US_FRAME)
+ flg = TTY_FRAME;
+ }
+
+
+ if (uart_handle_sysrq_char(port, c.ch))
+ continue;
+
+ uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
+ }
+
+ /*
+ * Drop the lock here since it might end up calling
+ * uart_start(), which takes the lock.
+ */
+ spin_unlock(&port->lock);
+ tty_flip_buffer_push(port->state->port.tty);
+ spin_lock(&port->lock);
+}
+
+static void atmel_rx_from_dma(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct tty_struct *tty = port->state->port.tty;
+ struct atmel_dma_buffer *pdc;
+ int rx_idx = atmel_port->pdc_rx_idx;
+ unsigned int head;
+ unsigned int tail;
+ unsigned int count;
+
+ do {
+ /* Reset the UART timeout early so that we don't miss one */
+ UART_PUT_CR(port, ATMEL_US_STTTO);
+
+ pdc = &atmel_port->pdc_rx[rx_idx];
+ head = UART_GET_RPR(port) - pdc->dma_addr;
+ tail = pdc->ofs;
+
+ /* If the PDC has switched buffers, RPR won't contain
+ * any address within the current buffer. Since head
+ * is unsigned, we just need a one-way comparison to
+ * find out.
+ *
+ * In this case, we just need to consume the entire
+ * buffer and resubmit it for DMA. This will clear the
+ * ENDRX bit as well, so that we can safely re-enable
+ * all interrupts below.
+ */
+ head = min(head, pdc->dma_size);
+
+ if (likely(head != tail)) {
+ dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
+ pdc->dma_size, DMA_FROM_DEVICE);
+
+ /*
+ * head will only wrap around when we recycle
+ * the DMA buffer, and when that happens, we
+ * explicitly set tail to 0. So head will
+ * always be greater than tail.
+ */
+ count = head - tail;
+
+ tty_insert_flip_string(tty, pdc->buf + pdc->ofs, count);
+
+ dma_sync_single_for_device(port->dev, pdc->dma_addr,
+ pdc->dma_size, DMA_FROM_DEVICE);
+
+ port->icount.rx += count;
+ pdc->ofs = head;
+ }
+
+ /*
+ * If the current buffer is full, we need to check if
+ * the next one contains any additional data.
+ */
+ if (head >= pdc->dma_size) {
+ pdc->ofs = 0;
+ UART_PUT_RNPR(port, pdc->dma_addr);
+ UART_PUT_RNCR(port, pdc->dma_size);
+
+ rx_idx = !rx_idx;
+ atmel_port->pdc_rx_idx = rx_idx;
+ }
+ } while (head >= pdc->dma_size);
+
+ /*
+ * Drop the lock here since it might end up calling
+ * uart_start(), which takes the lock.
+ */
+ spin_unlock(&port->lock);
+ tty_flip_buffer_push(tty);
+ spin_lock(&port->lock);
+
+ UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
+}
+
+/*
+ * tasklet handling tty stuff outside the interrupt handler.
+ */
+static void atmel_tasklet_func(unsigned long data)
+{
+ struct uart_port *port = (struct uart_port *)data;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ unsigned int status;
+ unsigned int status_change;
+
+ /* The interrupt handler does not take the lock */
+ spin_lock(&port->lock);
+
+ if (atmel_use_dma_tx(port))
+ atmel_tx_dma(port);
+ else
+ atmel_tx_chars(port);
+
+ status = atmel_port->irq_status;
+ status_change = status ^ atmel_port->irq_status_prev;
+
+ if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
+ | ATMEL_US_DCD | ATMEL_US_CTS)) {
+ /* TODO: All reads to CSR will clear these interrupts! */
+ if (status_change & ATMEL_US_RI)
+ port->icount.rng++;
+ if (status_change & ATMEL_US_DSR)
+ port->icount.dsr++;
+ if (status_change & ATMEL_US_DCD)
+ uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
+ if (status_change & ATMEL_US_CTS)
+ uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
+
+ wake_up_interruptible(&port->state->port.delta_msr_wait);
+
+ atmel_port->irq_status_prev = status;
+ }
+
+ if (atmel_use_dma_rx(port))
+ atmel_rx_from_dma(port);
+ else
+ atmel_rx_from_ring(port);
+
+ spin_unlock(&port->lock);
+}
+
+/*
+ * Perform initialization and enable port for reception
+ */
+static int atmel_startup(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct tty_struct *tty = port->state->port.tty;
+ int retval;
+
+ /*
+ * Ensure that no interrupts are enabled otherwise when
+ * request_irq() is called we could get stuck trying to
+ * handle an unexpected interrupt
+ */
+ UART_PUT_IDR(port, -1);
+
+ /*
+ * Allocate the IRQ
+ */
+ retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED,
+ tty ? tty->name : "atmel_serial", port);
+ if (retval) {
+ printk("atmel_serial: atmel_startup - Can't get irq\n");
+ return retval;
+ }
+
+ /*
+ * Initialize DMA (if necessary)
+ */
+ if (atmel_use_dma_rx(port)) {
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
+
+ pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
+ if (pdc->buf == NULL) {
+ if (i != 0) {
+ dma_unmap_single(port->dev,
+ atmel_port->pdc_rx[0].dma_addr,
+ PDC_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ kfree(atmel_port->pdc_rx[0].buf);
+ }
+ free_irq(port->irq, port);
+ return -ENOMEM;
+ }
+ pdc->dma_addr = dma_map_single(port->dev,
+ pdc->buf,
+ PDC_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ pdc->dma_size = PDC_BUFFER_SIZE;
+ pdc->ofs = 0;
+ }
+
+ atmel_port->pdc_rx_idx = 0;
+
+ UART_PUT_RPR(port, atmel_port->pdc_rx[0].dma_addr);
+ UART_PUT_RCR(port, PDC_BUFFER_SIZE);
+
+ UART_PUT_RNPR(port, atmel_port->pdc_rx[1].dma_addr);
+ UART_PUT_RNCR(port, PDC_BUFFER_SIZE);
+ }
+ if (atmel_use_dma_tx(port)) {
+ struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ pdc->buf = xmit->buf;
+ pdc->dma_addr = dma_map_single(port->dev,
+ pdc->buf,
+ UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+ pdc->dma_size = UART_XMIT_SIZE;
+ pdc->ofs = 0;
+ }
+
+ /*
+ * If there is a specific "open" function (to register
+ * control line interrupts)
+ */
+ if (atmel_open_hook) {
+ retval = atmel_open_hook(port);
+ if (retval) {
+ free_irq(port->irq, port);
+ return retval;
+ }
+ }
+
+ /* Save current CSR for comparison in atmel_tasklet_func() */
+ atmel_port->irq_status_prev = UART_GET_CSR(port);
+ atmel_port->irq_status = atmel_port->irq_status_prev;
+
+ /*
+ * Finally, enable the serial port
+ */
+ UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+ /* enable xmit & rcvr */
+ UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
+
+ if (atmel_use_dma_rx(port)) {
+ /* set UART timeout */
+ UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
+ UART_PUT_CR(port, ATMEL_US_STTTO);
+
+ UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
+ /* enable PDC controller */
+ UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
+ } else {
+ /* enable receive only */
+ UART_PUT_IER(port, ATMEL_US_RXRDY);
+ }
+
+ return 0;
+}
+
+/*
+ * Disable the port
+ */
+static void atmel_shutdown(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ /*
+ * Ensure everything is stopped.
+ */
+ atmel_stop_rx(port);
+ atmel_stop_tx(port);
+
+ /*
+ * Shut-down the DMA.
+ */
+ if (atmel_use_dma_rx(port)) {
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
+
+ dma_unmap_single(port->dev,
+ pdc->dma_addr,
+ pdc->dma_size,
+ DMA_FROM_DEVICE);
+ kfree(pdc->buf);
+ }
+ }
+ if (atmel_use_dma_tx(port)) {
+ struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
+
+ dma_unmap_single(port->dev,
+ pdc->dma_addr,
+ pdc->dma_size,
+ DMA_TO_DEVICE);
+ }
+
+ /*
+ * Disable all interrupts, port and break condition.
+ */
+ UART_PUT_CR(port, ATMEL_US_RSTSTA);
+ UART_PUT_IDR(port, -1);
+
+ /*
+ * Free the interrupt
+ */
+ free_irq(port->irq, port);
+
+ /*
+ * If there is a specific "close" function (to unregister
+ * control line interrupts)
+ */
+ if (atmel_close_hook)
+ atmel_close_hook(port);
+}
+
+/*
+ * Flush any TX data submitted for DMA. Called when the TX circular
+ * buffer is reset.
+ */
+static void atmel_flush_buffer(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ if (atmel_use_dma_tx(port)) {
+ UART_PUT_TCR(port, 0);
+ atmel_port->pdc_tx.ofs = 0;
+ }
+}
+
+/*
+ * Power / Clock management.
+ */
+static void atmel_serial_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ switch (state) {
+ case 0:
+ /*
+ * Enable the peripheral clock for this serial port.
+ * This is called on uart_open() or a resume event.
+ */
+ clk_enable(atmel_port->clk);
+
+ /* re-enable interrupts if we disabled some on suspend */
+ UART_PUT_IER(port, atmel_port->backup_imr);
+ break;
+ case 3:
+ /* Back up the interrupt mask and disable all interrupts */
+ atmel_port->backup_imr = UART_GET_IMR(port);
+ UART_PUT_IDR(port, -1);
+
+ /*
+ * Disable the peripheral clock for this serial port.
+ * This is called on uart_close() or a suspend event.
+ */
+ clk_disable(atmel_port->clk);
+ break;
+ default:
+ printk(KERN_ERR "atmel_serial: unknown pm %d\n", state);
+ }
+}
+
+/*
+ * Change the port parameters
+ */
+static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int mode, imr, quot, baud;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ /* Get current mode register */
+ mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL
+ | ATMEL_US_NBSTOP | ATMEL_US_PAR
+ | ATMEL_US_USMODE);
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+ quot = uart_get_divisor(port, baud);
+
+ if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */
+ quot /= 8;
+ mode |= ATMEL_US_USCLKS_MCK_DIV8;
+ }
+
+ /* byte size */
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ mode |= ATMEL_US_CHRL_5;
+ break;
+ case CS6:
+ mode |= ATMEL_US_CHRL_6;
+ break;
+ case CS7:
+ mode |= ATMEL_US_CHRL_7;
+ break;
+ default:
+ mode |= ATMEL_US_CHRL_8;
+ break;
+ }
+
+ /* stop bits */
+ if (termios->c_cflag & CSTOPB)
+ mode |= ATMEL_US_NBSTOP_2;
+
+ /* parity */
+ if (termios->c_cflag & PARENB) {
+ /* Mark or Space parity */
+ if (termios->c_cflag & CMSPAR) {
+ if (termios->c_cflag & PARODD)
+ mode |= ATMEL_US_PAR_MARK;
+ else
+ mode |= ATMEL_US_PAR_SPACE;
+ } else if (termios->c_cflag & PARODD)
+ mode |= ATMEL_US_PAR_ODD;
+ else
+ mode |= ATMEL_US_PAR_EVEN;
+ } else
+ mode |= ATMEL_US_PAR_NONE;
+
+ /* hardware handshake (RTS/CTS) */
+ if (termios->c_cflag & CRTSCTS)
+ mode |= ATMEL_US_USMODE_HWHS;
+ else
+ mode |= ATMEL_US_USMODE_NORMAL;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ port->read_status_mask = ATMEL_US_OVRE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= ATMEL_US_RXBRK;
+
+ if (atmel_use_dma_rx(port))
+ /* need to enable error interrupts */
+ UART_PUT_IER(port, port->read_status_mask);
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= ATMEL_US_RXBRK;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= ATMEL_US_OVRE;
+ }
+ /* TODO: Ignore all characters if CREAD is set.*/
+
+ /* update the per-port timeout */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ /*
+ * save/disable interrupts. The tty layer will ensure that the
+ * transmitter is empty if requested by the caller, so there's
+ * no need to wait for it here.
+ */
+ imr = UART_GET_IMR(port);
+ UART_PUT_IDR(port, -1);
+
+ /* disable receiver and transmitter */
+ UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
+
+ /* Resetting serial mode to RS232 (0x0) */
+ mode &= ~ATMEL_US_USMODE;
+
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ UART_PUT_TTGR(port,
+ atmel_port->rs485.delay_rts_after_send);
+ mode |= ATMEL_US_USMODE_RS485;
+ } else {
+ dev_dbg(port->dev, "Setting UART to RS232\n");
+ }
+
+ /* set the parity, stop bits and data size */
+ UART_PUT_MR(port, mode);
+
+ /* set the baud rate */
+ UART_PUT_BRGR(port, quot);
+ UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+ UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
+
+ /* restore interrupts */
+ UART_PUT_IER(port, imr);
+
+ /* CTS flow-control and modem-status interrupts */
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ port->ops->enable_ms(port);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void atmel_set_ldisc(struct uart_port *port, int new)
+{
+ int line = port->line;
+
+ if (line >= port->state->port.tty->driver->num)
+ return;
+
+ if (port->state->port.tty->ldisc->ops->num == N_PPS) {
+ port->flags |= UPF_HARDPPS_CD;
+ atmel_enable_ms(port);
+ } else {
+ port->flags &= ~UPF_HARDPPS_CD;
+ }
+}
+
+/*
+ * Return string describing the specified port
+ */
+static const char *atmel_type(struct uart_port *port)
+{
+ return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
+}
+
+/*
+ * Release the memory region(s) being used by 'port'.
+ */
+static void atmel_release_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ int size = pdev->resource[0].end - pdev->resource[0].start + 1;
+
+ release_mem_region(port->mapbase, size);
+
+ if (port->flags & UPF_IOREMAP) {
+ iounmap(port->membase);
+ port->membase = NULL;
+ }
+}
+
+/*
+ * Request the memory region(s) being used by 'port'.
+ */
+static int atmel_request_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ int size = pdev->resource[0].end - pdev->resource[0].start + 1;
+
+ if (!request_mem_region(port->mapbase, size, "atmel_serial"))
+ return -EBUSY;
+
+ if (port->flags & UPF_IOREMAP) {
+ port->membase = ioremap(port->mapbase, size);
+ if (port->membase == NULL) {
+ release_mem_region(port->mapbase, size);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void atmel_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_ATMEL;
+ atmel_request_port(port);
+ }
+}
+
+/*
+ * Verify the new serial_struct (for TIOCSSERIAL).
+ */
+static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ int ret = 0;
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
+ ret = -EINVAL;
+ if (port->irq != ser->irq)
+ ret = -EINVAL;
+ if (ser->io_type != SERIAL_IO_MEM)
+ ret = -EINVAL;
+ if (port->uartclk / 16 != ser->baud_base)
+ ret = -EINVAL;
+ if ((void *)port->mapbase != ser->iomem_base)
+ ret = -EINVAL;
+ if (port->iobase != ser->port)
+ ret = -EINVAL;
+ if (ser->hub6 != 0)
+ ret = -EINVAL;
+ return ret;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int atmel_poll_get_char(struct uart_port *port)
+{
+ while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY))
+ cpu_relax();
+
+ return UART_GET_CHAR(port);
+}
+
+static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+ while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
+ cpu_relax();
+
+ UART_PUT_CHAR(port, ch);
+}
+#endif
+
+static int
+atmel_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg)
+{
+ struct serial_rs485 rs485conf;
+
+ switch (cmd) {
+ case TIOCSRS485:
+ if (copy_from_user(&rs485conf, (struct serial_rs485 *) arg,
+ sizeof(rs485conf)))
+ return -EFAULT;
+
+ atmel_config_rs485(port, &rs485conf);
+ break;
+
+ case TIOCGRS485:
+ if (copy_to_user((struct serial_rs485 *) arg,
+ &(to_atmel_uart_port(port)->rs485),
+ sizeof(rs485conf)))
+ return -EFAULT;
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+
+
+static struct uart_ops atmel_pops = {
+ .tx_empty = atmel_tx_empty,
+ .set_mctrl = atmel_set_mctrl,
+ .get_mctrl = atmel_get_mctrl,
+ .stop_tx = atmel_stop_tx,
+ .start_tx = atmel_start_tx,
+ .stop_rx = atmel_stop_rx,
+ .enable_ms = atmel_enable_ms,
+ .break_ctl = atmel_break_ctl,
+ .startup = atmel_startup,
+ .shutdown = atmel_shutdown,
+ .flush_buffer = atmel_flush_buffer,
+ .set_termios = atmel_set_termios,
+ .set_ldisc = atmel_set_ldisc,
+ .type = atmel_type,
+ .release_port = atmel_release_port,
+ .request_port = atmel_request_port,
+ .config_port = atmel_config_port,
+ .verify_port = atmel_verify_port,
+ .pm = atmel_serial_pm,
+ .ioctl = atmel_ioctl,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = atmel_poll_get_char,
+ .poll_put_char = atmel_poll_put_char,
+#endif
+};
+
+/*
+ * Configure the port from the platform device resource info.
+ */
+static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
+ struct platform_device *pdev)
+{
+ struct uart_port *port = &atmel_port->uart;
+ struct atmel_uart_data *data = pdev->dev.platform_data;
+
+ port->iotype = UPIO_MEM;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->ops = &atmel_pops;
+ port->fifosize = 1;
+ port->line = data->num;
+ port->dev = &pdev->dev;
+ port->mapbase = pdev->resource[0].start;
+ port->irq = pdev->resource[1].start;
+
+ tasklet_init(&atmel_port->tasklet, atmel_tasklet_func,
+ (unsigned long)port);
+
+ memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
+
+ if (data->regs)
+ /* Already mapped by setup code */
+ port->membase = data->regs;
+ else {
+ port->flags |= UPF_IOREMAP;
+ port->membase = NULL;
+ }
+
+ /* for console, the clock could already be configured */
+ if (!atmel_port->clk) {
+ atmel_port->clk = clk_get(&pdev->dev, "usart");
+ clk_enable(atmel_port->clk);
+ port->uartclk = clk_get_rate(atmel_port->clk);
+ clk_disable(atmel_port->clk);
+ /* only enable clock when USART is in use */
+ }
+
+ atmel_port->use_dma_rx = data->use_dma_rx;
+ atmel_port->use_dma_tx = data->use_dma_tx;
+ atmel_port->rs485 = data->rs485;
+ /* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED)
+ atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+ else if (atmel_use_dma_tx(port)) {
+ port->fifosize = PDC_BUFFER_SIZE;
+ atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
+ } else {
+ atmel_port->tx_done_mask = ATMEL_US_TXRDY;
+ }
+}
+
+/*
+ * Register board-specific modem-control line handlers.
+ */
+void __init atmel_register_uart_fns(struct atmel_port_fns *fns)
+{
+ if (fns->enable_ms)
+ atmel_pops.enable_ms = fns->enable_ms;
+ if (fns->get_mctrl)
+ atmel_pops.get_mctrl = fns->get_mctrl;
+ if (fns->set_mctrl)
+ atmel_pops.set_mctrl = fns->set_mctrl;
+ atmel_open_hook = fns->open;
+ atmel_close_hook = fns->close;
+ atmel_pops.pm = fns->pm;
+ atmel_pops.set_wake = fns->set_wake;
+}
+
+#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
+static void atmel_console_putchar(struct uart_port *port, int ch)
+{
+ while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
+ cpu_relax();
+ UART_PUT_CHAR(port, ch);
+}
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void atmel_console_write(struct console *co, const char *s, u_int count)
+{
+ struct uart_port *port = &atmel_ports[co->index].uart;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ unsigned int status, imr;
+ unsigned int pdc_tx;
+
+ /*
+ * First, save IMR and then disable interrupts
+ */
+ imr = UART_GET_IMR(port);
+ UART_PUT_IDR(port, ATMEL_US_RXRDY | atmel_port->tx_done_mask);
+
+ /* Store PDC transmit status and disable it */
+ pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN;
+ UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
+
+ uart_console_write(port, s, count, atmel_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore IMR
+ */
+ do {
+ status = UART_GET_CSR(port);
+ } while (!(status & ATMEL_US_TXRDY));
+
+ /* Restore PDC transmit status */
+ if (pdc_tx)
+ UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
+
+ /* set interrupts back the way they were */
+ UART_PUT_IER(port, imr);
+}
+
+/*
+ * If the port was already initialised (eg, by a boot loader),
+ * try to determine the current setup.
+ */
+static void __init atmel_console_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits)
+{
+ unsigned int mr, quot;
+
+ /*
+ * If the baud rate generator isn't running, the port wasn't
+ * initialized by the boot loader.
+ */
+ quot = UART_GET_BRGR(port) & ATMEL_US_CD;
+ if (!quot)
+ return;
+
+ mr = UART_GET_MR(port) & ATMEL_US_CHRL;
+ if (mr == ATMEL_US_CHRL_8)
+ *bits = 8;
+ else
+ *bits = 7;
+
+ mr = UART_GET_MR(port) & ATMEL_US_PAR;
+ if (mr == ATMEL_US_PAR_EVEN)
+ *parity = 'e';
+ else if (mr == ATMEL_US_PAR_ODD)
+ *parity = 'o';
+
+ /*
+ * The serial core only rounds down when matching this to a
+ * supported baud rate. Make sure we don't end up slightly
+ * lower than one of those, as it would make us fall through
+ * to a much lower baud rate than we really want.
+ */
+ *baud = port->uartclk / (16 * (quot - 1));
+}
+
+static int __init atmel_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port = &atmel_ports[co->index].uart;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (port->membase == NULL) {
+ /* Port not initialized yet - delay setup */
+ return -ENODEV;
+ }
+
+ clk_enable(atmel_ports[co->index].clk);
+
+ UART_PUT_IDR(port, -1);
+ UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+ UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ atmel_console_get_options(port, &baud, &parity, &bits);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver atmel_uart;
+
+static struct console atmel_console = {
+ .name = ATMEL_DEVICENAME,
+ .write = atmel_console_write,
+ .device = uart_console_device,
+ .setup = atmel_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &atmel_uart,
+};
+
+#define ATMEL_CONSOLE_DEVICE (&atmel_console)
+
+/*
+ * Early console initialization (before VM subsystem initialized).
+ */
+static int __init atmel_console_init(void)
+{
+ if (atmel_default_console_device) {
+ add_preferred_console(ATMEL_DEVICENAME,
+ atmel_default_console_device->id, NULL);
+ atmel_init_port(&atmel_ports[atmel_default_console_device->id],
+ atmel_default_console_device);
+ register_console(&atmel_console);
+ }
+
+ return 0;
+}
+
+console_initcall(atmel_console_init);
+
+/*
+ * Late console initialization.
+ */
+static int __init atmel_late_console_init(void)
+{
+ if (atmel_default_console_device
+ && !(atmel_console.flags & CON_ENABLED))
+ register_console(&atmel_console);
+
+ return 0;
+}
+
+core_initcall(atmel_late_console_init);
+
+static inline bool atmel_is_console_port(struct uart_port *port)
+{
+ return port->cons && port->cons->index == port->line;
+}
+
+#else
+#define ATMEL_CONSOLE_DEVICE NULL
+
+static inline bool atmel_is_console_port(struct uart_port *port)
+{
+ return false;
+}
+#endif
+
+static struct uart_driver atmel_uart = {
+ .owner = THIS_MODULE,
+ .driver_name = "atmel_serial",
+ .dev_name = ATMEL_DEVICENAME,
+ .major = SERIAL_ATMEL_MAJOR,
+ .minor = MINOR_START,
+ .nr = ATMEL_MAX_UART,
+ .cons = ATMEL_CONSOLE_DEVICE,
+};
+
+#ifdef CONFIG_PM
+static bool atmel_serial_clk_will_stop(void)
+{
+#ifdef CONFIG_ARCH_AT91
+ return at91_suspend_entering_slow_clock();
+#else
+ return false;
+#endif
+}
+
+static int atmel_serial_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ if (atmel_is_console_port(port) && console_suspend_enabled) {
+ /* Drain the TX shifter */
+ while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY))
+ cpu_relax();
+ }
+
+ /* we can not wake up if we're running on slow clock */
+ atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
+ if (atmel_serial_clk_will_stop())
+ device_set_wakeup_enable(&pdev->dev, 0);
+
+ uart_suspend_port(&atmel_uart, port);
+
+ return 0;
+}
+
+static int atmel_serial_resume(struct platform_device *pdev)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ uart_resume_port(&atmel_uart, port);
+ device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
+
+ return 0;
+}
+#else
+#define atmel_serial_suspend NULL
+#define atmel_serial_resume NULL
+#endif
+
+static int __devinit atmel_serial_probe(struct platform_device *pdev)
+{
+ struct atmel_uart_port *port;
+ struct atmel_uart_data *pdata = pdev->dev.platform_data;
+ void *data;
+ int ret;
+
+ BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
+
+ port = &atmel_ports[pdata->num];
+ port->backup_imr = 0;
+
+ atmel_init_port(port, pdev);
+
+ if (!atmel_use_dma_rx(&port->uart)) {
+ ret = -ENOMEM;
+ data = kmalloc(sizeof(struct atmel_uart_char)
+ * ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
+ if (!data)
+ goto err_alloc_ring;
+ port->rx_ring.buf = data;
+ }
+
+ ret = uart_add_one_port(&atmel_uart, &port->uart);
+ if (ret)
+ goto err_add_port;
+
+#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
+ if (atmel_is_console_port(&port->uart)
+ && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
+ /*
+ * The serial core enabled the clock for us, so undo
+ * the clk_enable() in atmel_console_setup()
+ */
+ clk_disable(port->clk);
+ }
+#endif
+
+ device_init_wakeup(&pdev->dev, 1);
+ platform_set_drvdata(pdev, port);
+
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
+ UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
+ }
+
+ return 0;
+
+err_add_port:
+ kfree(port->rx_ring.buf);
+ port->rx_ring.buf = NULL;
+err_alloc_ring:
+ if (!atmel_is_console_port(&port->uart)) {
+ clk_put(port->clk);
+ port->clk = NULL;
+ }
+
+ return ret;
+}
+
+static int __devexit atmel_serial_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ int ret = 0;
+
+ device_init_wakeup(&pdev->dev, 0);
+ platform_set_drvdata(pdev, NULL);
+
+ ret = uart_remove_one_port(&atmel_uart, port);
+
+ tasklet_kill(&atmel_port->tasklet);
+ kfree(atmel_port->rx_ring.buf);
+
+ /* "port" is allocated statically, so we shouldn't free it */
+
+ clk_put(atmel_port->clk);
+
+ return ret;
+}
+
+static struct platform_driver atmel_serial_driver = {
+ .probe = atmel_serial_probe,
+ .remove = __devexit_p(atmel_serial_remove),
+ .suspend = atmel_serial_suspend,
+ .resume = atmel_serial_resume,
+ .driver = {
+ .name = "atmel_usart",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init atmel_serial_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&atmel_uart);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&atmel_serial_driver);
+ if (ret)
+ uart_unregister_driver(&atmel_uart);
+
+ return ret;
+}
+
+static void __exit atmel_serial_exit(void)
+{
+ platform_driver_unregister(&atmel_serial_driver);
+ uart_unregister_driver(&atmel_uart);
+}
+
+module_init(atmel_serial_init);
+module_exit(atmel_serial_exit);
+
+MODULE_AUTHOR("Rick Bronson");
+MODULE_DESCRIPTION("Atmel AT91 / AT32 serial port driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:atmel_usart");
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
new file mode 100644
index 00000000..c0b68b9c
--- /dev/null
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -0,0 +1,901 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from many drivers using generic_serial interface.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ *
+ * Serial driver for BCM63xx integrated UART.
+ *
+ * Hardware flow control was _not_ tested since I only have RX/TX on
+ * my board.
+ */
+
+#if defined(CONFIG_SERIAL_BCM63XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/clk.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/sysrq.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+
+#include <bcm63xx_clk.h>
+#include <bcm63xx_irq.h>
+#include <bcm63xx_regs.h>
+#include <bcm63xx_io.h>
+
+#define BCM63XX_NR_UARTS 2
+
+static struct uart_port ports[BCM63XX_NR_UARTS];
+
+/*
+ * rx interrupt mask / stat
+ *
+ * mask:
+ * - rx fifo full
+ * - rx fifo above threshold
+ * - rx fifo not empty for too long
+ */
+#define UART_RX_INT_MASK (UART_IR_MASK(UART_IR_RXOVER) | \
+ UART_IR_MASK(UART_IR_RXTHRESH) | \
+ UART_IR_MASK(UART_IR_RXTIMEOUT))
+
+#define UART_RX_INT_STAT (UART_IR_STAT(UART_IR_RXOVER) | \
+ UART_IR_STAT(UART_IR_RXTHRESH) | \
+ UART_IR_STAT(UART_IR_RXTIMEOUT))
+
+/*
+ * tx interrupt mask / stat
+ *
+ * mask:
+ * - tx fifo empty
+ * - tx fifo below threshold
+ */
+#define UART_TX_INT_MASK (UART_IR_MASK(UART_IR_TXEMPTY) | \
+ UART_IR_MASK(UART_IR_TXTRESH))
+
+#define UART_TX_INT_STAT (UART_IR_STAT(UART_IR_TXEMPTY) | \
+ UART_IR_STAT(UART_IR_TXTRESH))
+
+/*
+ * external input interrupt
+ *
+ * mask: any edge on CTS, DCD
+ */
+#define UART_EXTINP_INT_MASK (UART_EXTINP_IRMASK(UART_EXTINP_IR_CTS) | \
+ UART_EXTINP_IRMASK(UART_EXTINP_IR_DCD))
+
+/*
+ * handy uart register accessor
+ */
+static inline unsigned int bcm_uart_readl(struct uart_port *port,
+ unsigned int offset)
+{
+ return bcm_readl(port->membase + offset);
+}
+
+static inline void bcm_uart_writel(struct uart_port *port,
+ unsigned int value, unsigned int offset)
+{
+ bcm_writel(value, port->membase + offset);
+}
+
+/*
+ * serial core request to check if uart tx fifo is empty
+ */
+static unsigned int bcm_uart_tx_empty(struct uart_port *port)
+{
+ unsigned int val;
+
+ val = bcm_uart_readl(port, UART_IR_REG);
+ return (val & UART_IR_STAT(UART_IR_TXEMPTY)) ? 1 : 0;
+}
+
+/*
+ * serial core request to set RTS and DTR pin state and loopback mode
+ */
+static void bcm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned int val;
+
+ val = bcm_uart_readl(port, UART_MCTL_REG);
+ val &= ~(UART_MCTL_DTR_MASK | UART_MCTL_RTS_MASK);
+ /* invert of written value is reflected on the pin */
+ if (!(mctrl & TIOCM_DTR))
+ val |= UART_MCTL_DTR_MASK;
+ if (!(mctrl & TIOCM_RTS))
+ val |= UART_MCTL_RTS_MASK;
+ bcm_uart_writel(port, val, UART_MCTL_REG);
+
+ val = bcm_uart_readl(port, UART_CTL_REG);
+ if (mctrl & TIOCM_LOOP)
+ val |= UART_CTL_LOOPBACK_MASK;
+ else
+ val &= ~UART_CTL_LOOPBACK_MASK;
+ bcm_uart_writel(port, val, UART_CTL_REG);
+}
+
+/*
+ * serial core request to return RI, CTS, DCD and DSR pin state
+ */
+static unsigned int bcm_uart_get_mctrl(struct uart_port *port)
+{
+ unsigned int val, mctrl;
+
+ mctrl = 0;
+ val = bcm_uart_readl(port, UART_EXTINP_REG);
+ if (val & UART_EXTINP_RI_MASK)
+ mctrl |= TIOCM_RI;
+ if (val & UART_EXTINP_CTS_MASK)
+ mctrl |= TIOCM_CTS;
+ if (val & UART_EXTINP_DCD_MASK)
+ mctrl |= TIOCM_CD;
+ if (val & UART_EXTINP_DSR_MASK)
+ mctrl |= TIOCM_DSR;
+ return mctrl;
+}
+
+/*
+ * serial core request to disable tx ASAP (used for flow control)
+ */
+static void bcm_uart_stop_tx(struct uart_port *port)
+{
+ unsigned int val;
+
+ val = bcm_uart_readl(port, UART_CTL_REG);
+ val &= ~(UART_CTL_TXEN_MASK);
+ bcm_uart_writel(port, val, UART_CTL_REG);
+
+ val = bcm_uart_readl(port, UART_IR_REG);
+ val &= ~UART_TX_INT_MASK;
+ bcm_uart_writel(port, val, UART_IR_REG);
+}
+
+/*
+ * serial core request to (re)enable tx
+ */
+static void bcm_uart_start_tx(struct uart_port *port)
+{
+ unsigned int val;
+
+ val = bcm_uart_readl(port, UART_IR_REG);
+ val |= UART_TX_INT_MASK;
+ bcm_uart_writel(port, val, UART_IR_REG);
+
+ val = bcm_uart_readl(port, UART_CTL_REG);
+ val |= UART_CTL_TXEN_MASK;
+ bcm_uart_writel(port, val, UART_CTL_REG);
+}
+
+/*
+ * serial core request to stop rx, called before port shutdown
+ */
+static void bcm_uart_stop_rx(struct uart_port *port)
+{
+ unsigned int val;
+
+ val = bcm_uart_readl(port, UART_IR_REG);
+ val &= ~UART_RX_INT_MASK;
+ bcm_uart_writel(port, val, UART_IR_REG);
+}
+
+/*
+ * serial core request to enable modem status interrupt reporting
+ */
+static void bcm_uart_enable_ms(struct uart_port *port)
+{
+ unsigned int val;
+
+ val = bcm_uart_readl(port, UART_IR_REG);
+ val |= UART_IR_MASK(UART_IR_EXTIP);
+ bcm_uart_writel(port, val, UART_IR_REG);
+}
+
+/*
+ * serial core request to start/stop emitting break char
+ */
+static void bcm_uart_break_ctl(struct uart_port *port, int ctl)
+{
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ val = bcm_uart_readl(port, UART_CTL_REG);
+ if (ctl)
+ val |= UART_CTL_XMITBRK_MASK;
+ else
+ val &= ~UART_CTL_XMITBRK_MASK;
+ bcm_uart_writel(port, val, UART_CTL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/*
+ * return port type in string format
+ */
+static const char *bcm_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_BCM63XX) ? "bcm63xx_uart" : NULL;
+}
+
+/*
+ * read all chars in rx fifo and send them to core
+ */
+static void bcm_uart_do_rx(struct uart_port *port)
+{
+ struct tty_struct *tty;
+ unsigned int max_count;
+
+ /* limit number of char read in interrupt, should not be
+ * higher than fifo size anyway since we're much faster than
+ * serial port */
+ max_count = 32;
+ tty = port->state->port.tty;
+ do {
+ unsigned int iestat, c, cstat;
+ char flag;
+
+ /* get overrun/fifo empty information from ier
+ * register */
+ iestat = bcm_uart_readl(port, UART_IR_REG);
+
+ if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
+ unsigned int val;
+
+ /* fifo reset is required to clear
+ * interrupt */
+ val = bcm_uart_readl(port, UART_CTL_REG);
+ val |= UART_CTL_RSTRXFIFO_MASK;
+ bcm_uart_writel(port, val, UART_CTL_REG);
+
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+
+ if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
+ break;
+
+ cstat = c = bcm_uart_readl(port, UART_FIFO_REG);
+ port->icount.rx++;
+ flag = TTY_NORMAL;
+ c &= 0xff;
+
+ if (unlikely((cstat & UART_FIFO_ANYERR_MASK))) {
+ /* do stats first */
+ if (cstat & UART_FIFO_BRKDET_MASK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ }
+
+ if (cstat & UART_FIFO_PARERR_MASK)
+ port->icount.parity++;
+ if (cstat & UART_FIFO_FRAMEERR_MASK)
+ port->icount.frame++;
+
+ /* update flag wrt read_status_mask */
+ cstat &= port->read_status_mask;
+ if (cstat & UART_FIFO_BRKDET_MASK)
+ flag = TTY_BREAK;
+ if (cstat & UART_FIFO_FRAMEERR_MASK)
+ flag = TTY_FRAME;
+ if (cstat & UART_FIFO_PARERR_MASK)
+ flag = TTY_PARITY;
+ }
+
+ if (uart_handle_sysrq_char(port, c))
+ continue;
+
+
+ if ((cstat & port->ignore_status_mask) == 0)
+ tty_insert_flip_char(tty, c, flag);
+
+ } while (--max_count);
+
+ tty_flip_buffer_push(tty);
+}
+
+/*
+ * fill tx fifo with chars to send, stop when fifo is about to be full
+ * or when all chars have been sent.
+ */
+static void bcm_uart_do_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit;
+ unsigned int val, max_count;
+
+ if (port->x_char) {
+ bcm_uart_writel(port, port->x_char, UART_FIFO_REG);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if (uart_tx_stopped(port)) {
+ bcm_uart_stop_tx(port);
+ return;
+ }
+
+ xmit = &port->state->xmit;
+ if (uart_circ_empty(xmit))
+ goto txq_empty;
+
+ val = bcm_uart_readl(port, UART_MCTL_REG);
+ val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT;
+ max_count = port->fifosize - val;
+
+ while (max_count--) {
+ unsigned int c;
+
+ c = xmit->buf[xmit->tail];
+ bcm_uart_writel(port, c, UART_FIFO_REG);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ goto txq_empty;
+ return;
+
+txq_empty:
+ /* nothing to send, disable transmit interrupt */
+ val = bcm_uart_readl(port, UART_IR_REG);
+ val &= ~UART_TX_INT_MASK;
+ bcm_uart_writel(port, val, UART_IR_REG);
+ return;
+}
+
+/*
+ * process uart interrupt
+ */
+static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
+{
+ struct uart_port *port;
+ unsigned int irqstat;
+
+ port = dev_id;
+ spin_lock(&port->lock);
+
+ irqstat = bcm_uart_readl(port, UART_IR_REG);
+ if (irqstat & UART_RX_INT_STAT)
+ bcm_uart_do_rx(port);
+
+ if (irqstat & UART_TX_INT_STAT)
+ bcm_uart_do_tx(port);
+
+ if (irqstat & UART_IR_MASK(UART_IR_EXTIP)) {
+ unsigned int estat;
+
+ estat = bcm_uart_readl(port, UART_EXTINP_REG);
+ if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_CTS))
+ uart_handle_cts_change(port,
+ estat & UART_EXTINP_CTS_MASK);
+ if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_DCD))
+ uart_handle_dcd_change(port,
+ estat & UART_EXTINP_DCD_MASK);
+ }
+
+ spin_unlock(&port->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * enable rx & tx operation on uart
+ */
+static void bcm_uart_enable(struct uart_port *port)
+{
+ unsigned int val;
+
+ val = bcm_uart_readl(port, UART_CTL_REG);
+ val |= (UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | UART_CTL_RXEN_MASK);
+ bcm_uart_writel(port, val, UART_CTL_REG);
+}
+
+/*
+ * disable rx & tx operation on uart
+ */
+static void bcm_uart_disable(struct uart_port *port)
+{
+ unsigned int val;
+
+ val = bcm_uart_readl(port, UART_CTL_REG);
+ val &= ~(UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK |
+ UART_CTL_RXEN_MASK);
+ bcm_uart_writel(port, val, UART_CTL_REG);
+}
+
+/*
+ * clear all unread data in rx fifo and unsent data in tx fifo
+ */
+static void bcm_uart_flush(struct uart_port *port)
+{
+ unsigned int val;
+
+ /* empty rx and tx fifo */
+ val = bcm_uart_readl(port, UART_CTL_REG);
+ val |= UART_CTL_RSTRXFIFO_MASK | UART_CTL_RSTTXFIFO_MASK;
+ bcm_uart_writel(port, val, UART_CTL_REG);
+
+ /* read any pending char to make sure all irq status are
+ * cleared */
+ (void)bcm_uart_readl(port, UART_FIFO_REG);
+}
+
+/*
+ * serial core request to initialize uart and start rx operation
+ */
+static int bcm_uart_startup(struct uart_port *port)
+{
+ unsigned int val;
+ int ret;
+
+ /* mask all irq and flush port */
+ bcm_uart_disable(port);
+ bcm_uart_writel(port, 0, UART_IR_REG);
+ bcm_uart_flush(port);
+
+ /* clear any pending external input interrupt */
+ (void)bcm_uart_readl(port, UART_EXTINP_REG);
+
+ /* set rx/tx fifo thresh to fifo half size */
+ val = bcm_uart_readl(port, UART_MCTL_REG);
+ val &= ~(UART_MCTL_RXFIFOTHRESH_MASK | UART_MCTL_TXFIFOTHRESH_MASK);
+ val |= (port->fifosize / 2) << UART_MCTL_RXFIFOTHRESH_SHIFT;
+ val |= (port->fifosize / 2) << UART_MCTL_TXFIFOTHRESH_SHIFT;
+ bcm_uart_writel(port, val, UART_MCTL_REG);
+
+ /* set rx fifo timeout to 1 char time */
+ val = bcm_uart_readl(port, UART_CTL_REG);
+ val &= ~UART_CTL_RXTMOUTCNT_MASK;
+ val |= 1 << UART_CTL_RXTMOUTCNT_SHIFT;
+ bcm_uart_writel(port, val, UART_CTL_REG);
+
+ /* report any edge on dcd and cts */
+ val = UART_EXTINP_INT_MASK;
+ val |= UART_EXTINP_DCD_NOSENSE_MASK;
+ val |= UART_EXTINP_CTS_NOSENSE_MASK;
+ bcm_uart_writel(port, val, UART_EXTINP_REG);
+
+ /* register irq and enable rx interrupts */
+ ret = request_irq(port->irq, bcm_uart_interrupt, 0,
+ bcm_uart_type(port), port);
+ if (ret)
+ return ret;
+ bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG);
+ bcm_uart_enable(port);
+ return 0;
+}
+
+/*
+ * serial core request to flush & disable uart
+ */
+static void bcm_uart_shutdown(struct uart_port *port)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ bcm_uart_writel(port, 0, UART_IR_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ bcm_uart_disable(port);
+ bcm_uart_flush(port);
+ free_irq(port->irq, port);
+}
+
+/*
+ * serial core request to change current uart setting
+ */
+static void bcm_uart_set_termios(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
+{
+ unsigned int ctl, baud, quot, ier;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* disable uart while changing speed */
+ bcm_uart_disable(port);
+ bcm_uart_flush(port);
+
+ /* update Control register */
+ ctl = bcm_uart_readl(port, UART_CTL_REG);
+ ctl &= ~UART_CTL_BITSPERSYM_MASK;
+
+ switch (new->c_cflag & CSIZE) {
+ case CS5:
+ ctl |= (0 << UART_CTL_BITSPERSYM_SHIFT);
+ break;
+ case CS6:
+ ctl |= (1 << UART_CTL_BITSPERSYM_SHIFT);
+ break;
+ case CS7:
+ ctl |= (2 << UART_CTL_BITSPERSYM_SHIFT);
+ break;
+ default:
+ ctl |= (3 << UART_CTL_BITSPERSYM_SHIFT);
+ break;
+ }
+
+ ctl &= ~UART_CTL_STOPBITS_MASK;
+ if (new->c_cflag & CSTOPB)
+ ctl |= UART_CTL_STOPBITS_2;
+ else
+ ctl |= UART_CTL_STOPBITS_1;
+
+ ctl &= ~(UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
+ if (new->c_cflag & PARENB)
+ ctl |= (UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
+ ctl &= ~(UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
+ if (new->c_cflag & PARODD)
+ ctl |= (UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
+ bcm_uart_writel(port, ctl, UART_CTL_REG);
+
+ /* update Baudword register */
+ baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
+ quot = uart_get_divisor(port, baud) - 1;
+ bcm_uart_writel(port, quot, UART_BAUD_REG);
+
+ /* update Interrupt register */
+ ier = bcm_uart_readl(port, UART_IR_REG);
+
+ ier &= ~UART_IR_MASK(UART_IR_EXTIP);
+ if (UART_ENABLE_MS(port, new->c_cflag))
+ ier |= UART_IR_MASK(UART_IR_EXTIP);
+
+ bcm_uart_writel(port, ier, UART_IR_REG);
+
+ /* update read/ignore mask */
+ port->read_status_mask = UART_FIFO_VALID_MASK;
+ if (new->c_iflag & INPCK) {
+ port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
+ port->read_status_mask |= UART_FIFO_PARERR_MASK;
+ }
+ if (new->c_iflag & (BRKINT))
+ port->read_status_mask |= UART_FIFO_BRKDET_MASK;
+
+ port->ignore_status_mask = 0;
+ if (new->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_FIFO_PARERR_MASK;
+ if (new->c_iflag & IGNBRK)
+ port->ignore_status_mask |= UART_FIFO_BRKDET_MASK;
+ if (!(new->c_cflag & CREAD))
+ port->ignore_status_mask |= UART_FIFO_VALID_MASK;
+
+ uart_update_timeout(port, new->c_cflag, baud);
+ bcm_uart_enable(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/*
+ * serial core request to claim uart iomem
+ */
+static int bcm_uart_request_port(struct uart_port *port)
+{
+ unsigned int size;
+
+ size = RSET_UART_SIZE;
+ if (!request_mem_region(port->mapbase, size, "bcm63xx")) {
+ dev_err(port->dev, "Memory region busy\n");
+ return -EBUSY;
+ }
+
+ port->membase = ioremap(port->mapbase, size);
+ if (!port->membase) {
+ dev_err(port->dev, "Unable to map registers\n");
+ release_mem_region(port->mapbase, size);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/*
+ * serial core request to release uart iomem
+ */
+static void bcm_uart_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, RSET_UART_SIZE);
+ iounmap(port->membase);
+}
+
+/*
+ * serial core request to do any port required autoconfiguration
+ */
+static void bcm_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ if (bcm_uart_request_port(port))
+ return;
+ port->type = PORT_BCM63XX;
+ }
+}
+
+/*
+ * serial core request to check that port information in serinfo are
+ * suitable
+ */
+static int bcm_uart_verify_port(struct uart_port *port,
+ struct serial_struct *serinfo)
+{
+ if (port->type != PORT_BCM63XX)
+ return -EINVAL;
+ if (port->irq != serinfo->irq)
+ return -EINVAL;
+ if (port->iotype != serinfo->io_type)
+ return -EINVAL;
+ if (port->mapbase != (unsigned long)serinfo->iomem_base)
+ return -EINVAL;
+ return 0;
+}
+
+/* serial core callbacks */
+static struct uart_ops bcm_uart_ops = {
+ .tx_empty = bcm_uart_tx_empty,
+ .get_mctrl = bcm_uart_get_mctrl,
+ .set_mctrl = bcm_uart_set_mctrl,
+ .start_tx = bcm_uart_start_tx,
+ .stop_tx = bcm_uart_stop_tx,
+ .stop_rx = bcm_uart_stop_rx,
+ .enable_ms = bcm_uart_enable_ms,
+ .break_ctl = bcm_uart_break_ctl,
+ .startup = bcm_uart_startup,
+ .shutdown = bcm_uart_shutdown,
+ .set_termios = bcm_uart_set_termios,
+ .type = bcm_uart_type,
+ .release_port = bcm_uart_release_port,
+ .request_port = bcm_uart_request_port,
+ .config_port = bcm_uart_config_port,
+ .verify_port = bcm_uart_verify_port,
+};
+
+
+
+#ifdef CONFIG_SERIAL_BCM63XX_CONSOLE
+static inline void wait_for_xmitr(struct uart_port *port)
+{
+ unsigned int tmout;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ tmout = 10000;
+ while (--tmout) {
+ unsigned int val;
+
+ val = bcm_uart_readl(port, UART_IR_REG);
+ if (val & UART_IR_STAT(UART_IR_TXEMPTY))
+ break;
+ udelay(1);
+ }
+
+ /* Wait up to 1s for flow control if necessary */
+ if (port->flags & UPF_CONS_FLOW) {
+ tmout = 1000000;
+ while (--tmout) {
+ unsigned int val;
+
+ val = bcm_uart_readl(port, UART_EXTINP_REG);
+ if (val & UART_EXTINP_CTS_MASK)
+ break;
+ udelay(1);
+ }
+ }
+}
+
+/*
+ * output given char
+ */
+static void bcm_console_putchar(struct uart_port *port, int ch)
+{
+ wait_for_xmitr(port);
+ bcm_uart_writel(port, ch, UART_FIFO_REG);
+}
+
+/*
+ * console core request to output given string
+ */
+static void bcm_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port;
+ unsigned long flags;
+ int locked;
+
+ port = &ports[co->index];
+
+ local_irq_save(flags);
+ if (port->sysrq) {
+ /* bcm_uart_interrupt() already took the lock */
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&port->lock);
+ } else {
+ spin_lock(&port->lock);
+ locked = 1;
+ }
+
+ /* call helper to deal with \r\n */
+ uart_console_write(port, s, count, bcm_console_putchar);
+
+ /* and wait for char to be transmitted */
+ wait_for_xmitr(port);
+
+ if (locked)
+ spin_unlock(&port->lock);
+ local_irq_restore(flags);
+}
+
+/*
+ * console core request to setup given console, find matching uart
+ * port and setup it.
+ */
+static int bcm_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index < 0 || co->index >= BCM63XX_NR_UARTS)
+ return -EINVAL;
+ port = &ports[co->index];
+ if (!port->membase)
+ return -ENODEV;
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver bcm_uart_driver;
+
+static struct console bcm63xx_console = {
+ .name = "ttyS",
+ .write = bcm_console_write,
+ .device = uart_console_device,
+ .setup = bcm_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &bcm_uart_driver,
+};
+
+static int __init bcm63xx_console_init(void)
+{
+ register_console(&bcm63xx_console);
+ return 0;
+}
+
+console_initcall(bcm63xx_console_init);
+
+#define BCM63XX_CONSOLE (&bcm63xx_console)
+#else
+#define BCM63XX_CONSOLE NULL
+#endif /* CONFIG_SERIAL_BCM63XX_CONSOLE */
+
+static struct uart_driver bcm_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "bcm63xx_uart",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = BCM63XX_NR_UARTS,
+ .cons = BCM63XX_CONSOLE,
+};
+
+/*
+ * platform driver probe/remove callback
+ */
+static int __devinit bcm_uart_probe(struct platform_device *pdev)
+{
+ struct resource *res_mem, *res_irq;
+ struct uart_port *port;
+ struct clk *clk;
+ int ret;
+
+ if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS)
+ return -EINVAL;
+
+ if (ports[pdev->id].membase)
+ return -EBUSY;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_mem)
+ return -ENODEV;
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ return -ENODEV;
+
+ clk = clk_get(&pdev->dev, "periph");
+ if (IS_ERR(clk))
+ return -ENODEV;
+
+ port = &ports[pdev->id];
+ memset(port, 0, sizeof(*port));
+ port->iotype = UPIO_MEM;
+ port->mapbase = res_mem->start;
+ port->irq = res_irq->start;
+ port->ops = &bcm_uart_ops;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->dev = &pdev->dev;
+ port->fifosize = 16;
+ port->uartclk = clk_get_rate(clk) / 2;
+ port->line = pdev->id;
+ clk_put(clk);
+
+ ret = uart_add_one_port(&bcm_uart_driver, port);
+ if (ret) {
+ ports[pdev->id].membase = 0;
+ return ret;
+ }
+ platform_set_drvdata(pdev, port);
+ return 0;
+}
+
+static int __devexit bcm_uart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port;
+
+ port = platform_get_drvdata(pdev);
+ uart_remove_one_port(&bcm_uart_driver, port);
+ platform_set_drvdata(pdev, NULL);
+ /* mark port as free */
+ ports[pdev->id].membase = 0;
+ return 0;
+}
+
+/*
+ * platform driver stuff
+ */
+static struct platform_driver bcm_uart_platform_driver = {
+ .probe = bcm_uart_probe,
+ .remove = __devexit_p(bcm_uart_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bcm63xx_uart",
+ },
+};
+
+static int __init bcm_uart_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&bcm_uart_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&bcm_uart_platform_driver);
+ if (ret)
+ uart_unregister_driver(&bcm_uart_driver);
+
+ return ret;
+}
+
+static void __exit bcm_uart_exit(void)
+{
+ platform_driver_unregister(&bcm_uart_platform_driver);
+ uart_unregister_driver(&bcm_uart_driver);
+}
+
+module_init(bcm_uart_init);
+module_exit(bcm_uart_exit);
+
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_DESCRIPTION("Broadcom 63<xx integrated uart driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/bfin_5xx.c b/drivers/tty/serial/bfin_5xx.c
new file mode 100644
index 00000000..9b1ff2b6
--- /dev/null
+++ b/drivers/tty/serial/bfin_5xx.c
@@ -0,0 +1,1597 @@
+/*
+ * Blackfin On-Chip Serial Driver
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#define DRIVER_NAME "bfin-uart"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/kgdb.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/portmux.h>
+#include <asm/cacheflush.h>
+#include <asm/dma.h>
+
+#define port_membase(uart) (((struct bfin_serial_port *)(uart))->port.membase)
+#define get_lsr_cache(uart) (((struct bfin_serial_port *)(uart))->lsr)
+#define put_lsr_cache(uart, v) (((struct bfin_serial_port *)(uart))->lsr = (v))
+#include <asm/bfin_serial.h>
+
+#ifdef CONFIG_SERIAL_BFIN_MODULE
+# undef CONFIG_EARLY_PRINTK
+#endif
+
+#ifdef CONFIG_SERIAL_BFIN_MODULE
+# undef CONFIG_EARLY_PRINTK
+#endif
+
+/* UART name and device definitions */
+#define BFIN_SERIAL_DEV_NAME "ttyBF"
+#define BFIN_SERIAL_MAJOR 204
+#define BFIN_SERIAL_MINOR 64
+
+static struct bfin_serial_port *bfin_serial_ports[BFIN_UART_NR_PORTS];
+
+#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
+ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+
+# ifndef CONFIG_SERIAL_BFIN_PIO
+# error KGDB only support UART in PIO mode.
+# endif
+
+static int kgdboc_port_line;
+static int kgdboc_break_enabled;
+#endif
+/*
+ * Setup for console. Argument comes from the menuconfig
+ */
+#define DMA_RX_XCOUNT 512
+#define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT)
+
+#define DMA_RX_FLUSH_JIFFIES (HZ / 50)
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart);
+#else
+static void bfin_serial_tx_chars(struct bfin_serial_port *uart);
+#endif
+
+static void bfin_serial_reset_irda(struct uart_port *port);
+
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
+ defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+static unsigned int bfin_serial_get_mctrl(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ if (uart->cts_pin < 0)
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+
+ /* CTS PIN is negative assertive. */
+ if (UART_GET_CTS(uart))
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+ else
+ return TIOCM_DSR | TIOCM_CAR;
+}
+
+static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ if (uart->rts_pin < 0)
+ return;
+
+ /* RTS PIN is negative assertive. */
+ if (mctrl & TIOCM_RTS)
+ UART_ENABLE_RTS(uart);
+ else
+ UART_DISABLE_RTS(uart);
+}
+
+/*
+ * Handle any change of modem status signal.
+ */
+static irqreturn_t bfin_serial_mctrl_cts_int(int irq, void *dev_id)
+{
+ struct bfin_serial_port *uart = dev_id;
+ unsigned int status;
+
+ status = bfin_serial_get_mctrl(&uart->port);
+ uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+ uart->scts = 1;
+ UART_CLEAR_SCTS(uart);
+ UART_CLEAR_IER(uart, EDSSI);
+#endif
+
+ return IRQ_HANDLED;
+}
+#else
+static unsigned int bfin_serial_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+}
+
+static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+#endif
+
+/*
+ * interrupts are disabled on entry
+ */
+static void bfin_serial_stop_tx(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ struct circ_buf *xmit = &uart->port.state->xmit;
+#endif
+
+ while (!(UART_GET_LSR(uart) & TEMT))
+ cpu_relax();
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ disable_dma(uart->tx_dma_channel);
+ xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
+ uart->port.icount.tx += uart->tx_count;
+ uart->tx_count = 0;
+ uart->tx_done = 1;
+#else
+#ifdef CONFIG_BF54x
+ /* Clear TFI bit */
+ UART_PUT_LSR(uart, TFI);
+#endif
+ UART_CLEAR_IER(uart, ETBEI);
+#endif
+}
+
+/*
+ * port is locked and interrupts are disabled
+ */
+static void bfin_serial_start_tx(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ struct tty_struct *tty = uart->port.state->port.tty;
+
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+ if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
+ uart->scts = 0;
+ uart_handle_cts_change(&uart->port, uart->scts);
+ }
+#endif
+
+ /*
+ * To avoid losting RX interrupt, we reset IR function
+ * before sending data.
+ */
+ if (tty->termios->c_line == N_IRDA)
+ bfin_serial_reset_irda(port);
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ if (uart->tx_done)
+ bfin_serial_dma_tx_chars(uart);
+#else
+ UART_SET_IER(uart, ETBEI);
+ bfin_serial_tx_chars(uart);
+#endif
+}
+
+/*
+ * Interrupts are enabled
+ */
+static void bfin_serial_stop_rx(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+
+ UART_CLEAR_IER(uart, ERBFI);
+}
+
+/*
+ * Set the modem control timer to fire immediately.
+ */
+static void bfin_serial_enable_ms(struct uart_port *port)
+{
+}
+
+
+#if ANOMALY_05000363 && defined(CONFIG_SERIAL_BFIN_PIO)
+# define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold)
+# define UART_SET_ANOMALY_THRESHOLD(uart, v) ((uart)->anomaly_threshold = (v))
+#else
+# define UART_GET_ANOMALY_THRESHOLD(uart) 0
+# define UART_SET_ANOMALY_THRESHOLD(uart, v)
+#endif
+
+#ifdef CONFIG_SERIAL_BFIN_PIO
+static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
+{
+ struct tty_struct *tty = NULL;
+ unsigned int status, ch, flg;
+ static struct timeval anomaly_start = { .tv_sec = 0 };
+
+ status = UART_GET_LSR(uart);
+ UART_CLEAR_LSR(uart);
+
+ ch = UART_GET_CHAR(uart);
+ uart->port.icount.rx++;
+
+#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
+ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+ if (kgdb_connected && kgdboc_port_line == uart->port.line
+ && kgdboc_break_enabled)
+ if (ch == 0x3) {/* Ctrl + C */
+ kgdb_breakpoint();
+ return;
+ }
+
+ if (!uart->port.state || !uart->port.state->port.tty)
+ return;
+#endif
+ tty = uart->port.state->port.tty;
+
+ if (ANOMALY_05000363) {
+ /* The BF533 (and BF561) family of processors have a nice anomaly
+ * where they continuously generate characters for a "single" break.
+ * We have to basically ignore this flood until the "next" valid
+ * character comes across. Due to the nature of the flood, it is
+ * not possible to reliably catch bytes that are sent too quickly
+ * after this break. So application code talking to the Blackfin
+ * which sends a break signal must allow at least 1.5 character
+ * times after the end of the break for things to stabilize. This
+ * timeout was picked as it must absolutely be larger than 1
+ * character time +/- some percent. So 1.5 sounds good. All other
+ * Blackfin families operate properly. Woo.
+ */
+ if (anomaly_start.tv_sec) {
+ struct timeval curr;
+ suseconds_t usecs;
+
+ if ((~ch & (~ch + 1)) & 0xff)
+ goto known_good_char;
+
+ do_gettimeofday(&curr);
+ if (curr.tv_sec - anomaly_start.tv_sec > 1)
+ goto known_good_char;
+
+ usecs = 0;
+ if (curr.tv_sec != anomaly_start.tv_sec)
+ usecs += USEC_PER_SEC;
+ usecs += curr.tv_usec - anomaly_start.tv_usec;
+
+ if (usecs > UART_GET_ANOMALY_THRESHOLD(uart))
+ goto known_good_char;
+
+ if (ch)
+ anomaly_start.tv_sec = 0;
+ else
+ anomaly_start = curr;
+
+ return;
+
+ known_good_char:
+ status &= ~BI;
+ anomaly_start.tv_sec = 0;
+ }
+ }
+
+ if (status & BI) {
+ if (ANOMALY_05000363)
+ if (bfin_revid() < 5)
+ do_gettimeofday(&anomaly_start);
+ uart->port.icount.brk++;
+ if (uart_handle_break(&uart->port))
+ goto ignore_char;
+ status &= ~(PE | FE);
+ }
+ if (status & PE)
+ uart->port.icount.parity++;
+ if (status & OE)
+ uart->port.icount.overrun++;
+ if (status & FE)
+ uart->port.icount.frame++;
+
+ status &= uart->port.read_status_mask;
+
+ if (status & BI)
+ flg = TTY_BREAK;
+ else if (status & PE)
+ flg = TTY_PARITY;
+ else if (status & FE)
+ flg = TTY_FRAME;
+ else
+ flg = TTY_NORMAL;
+
+ if (uart_handle_sysrq_char(&uart->port, ch))
+ goto ignore_char;
+
+ uart_insert_char(&uart->port, status, OE, ch, flg);
+
+ ignore_char:
+ tty_flip_buffer_push(tty);
+}
+
+static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
+{
+ struct circ_buf *xmit = &uart->port.state->xmit;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
+#ifdef CONFIG_BF54x
+ /* Clear TFI bit */
+ UART_PUT_LSR(uart, TFI);
+#endif
+ /* Anomaly notes:
+ * 05000215 - we always clear ETBEI within last UART TX
+ * interrupt to end a string. It is always set
+ * when start a new tx.
+ */
+ UART_CLEAR_IER(uart, ETBEI);
+ return;
+ }
+
+ if (uart->port.x_char) {
+ UART_PUT_CHAR(uart, uart->port.x_char);
+ uart->port.icount.tx++;
+ uart->port.x_char = 0;
+ }
+
+ while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) {
+ UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ uart->port.icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&uart->port);
+}
+
+static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
+{
+ struct bfin_serial_port *uart = dev_id;
+
+ while (UART_GET_LSR(uart) & DR)
+ bfin_serial_rx_chars(uart);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
+{
+ struct bfin_serial_port *uart = dev_id;
+
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+ if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
+ uart->scts = 0;
+ uart_handle_cts_change(&uart->port, uart->scts);
+ }
+#endif
+ spin_lock(&uart->port.lock);
+ if (UART_GET_LSR(uart) & THRE)
+ bfin_serial_tx_chars(uart);
+ spin_unlock(&uart->port.lock);
+
+ return IRQ_HANDLED;
+}
+#endif
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
+{
+ struct circ_buf *xmit = &uart->port.state->xmit;
+
+ uart->tx_done = 0;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
+ uart->tx_count = 0;
+ uart->tx_done = 1;
+ return;
+ }
+
+ if (uart->port.x_char) {
+ UART_PUT_CHAR(uart, uart->port.x_char);
+ uart->port.icount.tx++;
+ uart->port.x_char = 0;
+ }
+
+ uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail))
+ uart->tx_count = UART_XMIT_SIZE - xmit->tail;
+ blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail),
+ (unsigned long)(xmit->buf+xmit->tail+uart->tx_count));
+ set_dma_config(uart->tx_dma_channel,
+ set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
+ INTR_ON_BUF,
+ DIMENSION_LINEAR,
+ DATA_SIZE_8,
+ DMA_SYNC_RESTART));
+ set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail));
+ set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
+ set_dma_x_modify(uart->tx_dma_channel, 1);
+ SSYNC();
+ enable_dma(uart->tx_dma_channel);
+
+ UART_SET_IER(uart, ETBEI);
+}
+
+static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
+{
+ struct tty_struct *tty = uart->port.state->port.tty;
+ int i, flg, status;
+
+ status = UART_GET_LSR(uart);
+ UART_CLEAR_LSR(uart);
+
+ uart->port.icount.rx +=
+ CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail,
+ UART_XMIT_SIZE);
+
+ if (status & BI) {
+ uart->port.icount.brk++;
+ if (uart_handle_break(&uart->port))
+ goto dma_ignore_char;
+ status &= ~(PE | FE);
+ }
+ if (status & PE)
+ uart->port.icount.parity++;
+ if (status & OE)
+ uart->port.icount.overrun++;
+ if (status & FE)
+ uart->port.icount.frame++;
+
+ status &= uart->port.read_status_mask;
+
+ if (status & BI)
+ flg = TTY_BREAK;
+ else if (status & PE)
+ flg = TTY_PARITY;
+ else if (status & FE)
+ flg = TTY_FRAME;
+ else
+ flg = TTY_NORMAL;
+
+ for (i = uart->rx_dma_buf.tail; ; i++) {
+ if (i >= UART_XMIT_SIZE)
+ i = 0;
+ if (i == uart->rx_dma_buf.head)
+ break;
+ if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i]))
+ uart_insert_char(&uart->port, status, OE,
+ uart->rx_dma_buf.buf[i], flg);
+ }
+
+ dma_ignore_char:
+ tty_flip_buffer_push(tty);
+}
+
+void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
+{
+ int x_pos, pos;
+
+ dma_disable_irq_nosync(uart->rx_dma_channel);
+ spin_lock_bh(&uart->rx_lock);
+
+ /* 2D DMA RX buffer ring is used. Because curr_y_count and
+ * curr_x_count can't be read as an atomic operation,
+ * curr_y_count should be read before curr_x_count. When
+ * curr_x_count is read, curr_y_count may already indicate
+ * next buffer line. But, the position calculated here is
+ * still indicate the old line. The wrong position data may
+ * be smaller than current buffer tail, which cause garbages
+ * are received if it is not prohibit.
+ */
+ uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
+ x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
+ uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
+ if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
+ uart->rx_dma_nrows = 0;
+ x_pos = DMA_RX_XCOUNT - x_pos;
+ if (x_pos == DMA_RX_XCOUNT)
+ x_pos = 0;
+
+ pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
+ /* Ignore receiving data if new position is in the same line of
+ * current buffer tail and small.
+ */
+ if (pos > uart->rx_dma_buf.tail ||
+ uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
+ uart->rx_dma_buf.head = pos;
+ bfin_serial_dma_rx_chars(uart);
+ uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
+ }
+
+ spin_unlock_bh(&uart->rx_lock);
+ dma_enable_irq(uart->rx_dma_channel);
+
+ mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
+}
+
+static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
+{
+ struct bfin_serial_port *uart = dev_id;
+ struct circ_buf *xmit = &uart->port.state->xmit;
+
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+ if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
+ uart->scts = 0;
+ uart_handle_cts_change(&uart->port, uart->scts);
+ }
+#endif
+
+ spin_lock(&uart->port.lock);
+ if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
+ disable_dma(uart->tx_dma_channel);
+ clear_dma_irqstat(uart->tx_dma_channel);
+ /* Anomaly notes:
+ * 05000215 - we always clear ETBEI within last UART TX
+ * interrupt to end a string. It is always set
+ * when start a new tx.
+ */
+ UART_CLEAR_IER(uart, ETBEI);
+ xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
+ uart->port.icount.tx += uart->tx_count;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&uart->port);
+
+ bfin_serial_dma_tx_chars(uart);
+ }
+
+ spin_unlock(&uart->port.lock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
+{
+ struct bfin_serial_port *uart = dev_id;
+ unsigned short irqstat;
+ int x_pos, pos;
+
+ spin_lock(&uart->rx_lock);
+ irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
+ clear_dma_irqstat(uart->rx_dma_channel);
+
+ uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
+ x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
+ uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
+ if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
+ uart->rx_dma_nrows = 0;
+
+ pos = uart->rx_dma_nrows * DMA_RX_XCOUNT;
+ if (pos > uart->rx_dma_buf.tail ||
+ uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
+ uart->rx_dma_buf.head = pos;
+ bfin_serial_dma_rx_chars(uart);
+ uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
+ }
+
+ spin_unlock(&uart->rx_lock);
+
+ return IRQ_HANDLED;
+}
+#endif
+
+/*
+ * Return TIOCSER_TEMT when transmitter is not busy.
+ */
+static unsigned int bfin_serial_tx_empty(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ unsigned short lsr;
+
+ lsr = UART_GET_LSR(uart);
+ if (lsr & TEMT)
+ return TIOCSER_TEMT;
+ else
+ return 0;
+}
+
+static void bfin_serial_break_ctl(struct uart_port *port, int break_state)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ u16 lcr = UART_GET_LCR(uart);
+ if (break_state)
+ lcr |= SB;
+ else
+ lcr &= ~SB;
+ UART_PUT_LCR(uart, lcr);
+ SSYNC();
+}
+
+static int bfin_serial_startup(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ dma_addr_t dma_handle;
+
+ if (request_dma(uart->rx_dma_channel, "BFIN_UART_RX") < 0) {
+ printk(KERN_NOTICE "Unable to attach Blackfin UART RX DMA channel\n");
+ return -EBUSY;
+ }
+
+ if (request_dma(uart->tx_dma_channel, "BFIN_UART_TX") < 0) {
+ printk(KERN_NOTICE "Unable to attach Blackfin UART TX DMA channel\n");
+ free_dma(uart->rx_dma_channel);
+ return -EBUSY;
+ }
+
+ set_dma_callback(uart->rx_dma_channel, bfin_serial_dma_rx_int, uart);
+ set_dma_callback(uart->tx_dma_channel, bfin_serial_dma_tx_int, uart);
+
+ uart->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA);
+ uart->rx_dma_buf.head = 0;
+ uart->rx_dma_buf.tail = 0;
+ uart->rx_dma_nrows = 0;
+
+ set_dma_config(uart->rx_dma_channel,
+ set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
+ INTR_ON_ROW, DIMENSION_2D,
+ DATA_SIZE_8,
+ DMA_SYNC_RESTART));
+ set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT);
+ set_dma_x_modify(uart->rx_dma_channel, 1);
+ set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT);
+ set_dma_y_modify(uart->rx_dma_channel, 1);
+ set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf);
+ enable_dma(uart->rx_dma_channel);
+
+ uart->rx_dma_timer.data = (unsigned long)(uart);
+ uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout;
+ uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES;
+ add_timer(&(uart->rx_dma_timer));
+#else
+# if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
+ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+ if (kgdboc_port_line == uart->port.line && kgdboc_break_enabled)
+ kgdboc_break_enabled = 0;
+ else {
+# endif
+ if (request_irq(uart->port.irq, bfin_serial_rx_int, IRQF_DISABLED,
+ "BFIN_UART_RX", uart)) {
+ printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n");
+ return -EBUSY;
+ }
+
+ if (request_irq
+ (uart->port.irq+1, bfin_serial_tx_int, IRQF_DISABLED,
+ "BFIN_UART_TX", uart)) {
+ printk(KERN_NOTICE "Unable to attach BlackFin UART TX interrupt\n");
+ free_irq(uart->port.irq, uart);
+ return -EBUSY;
+ }
+
+# ifdef CONFIG_BF54x
+ {
+ /*
+ * UART2 and UART3 on BF548 share interrupt PINs and DMA
+ * controllers with SPORT2 and SPORT3. UART rx and tx
+ * interrupts are generated in PIO mode only when configure
+ * their peripheral mapping registers properly, which means
+ * request corresponding DMA channels in PIO mode as well.
+ */
+ unsigned uart_dma_ch_rx, uart_dma_ch_tx;
+
+ switch (uart->port.irq) {
+ case IRQ_UART3_RX:
+ uart_dma_ch_rx = CH_UART3_RX;
+ uart_dma_ch_tx = CH_UART3_TX;
+ break;
+ case IRQ_UART2_RX:
+ uart_dma_ch_rx = CH_UART2_RX;
+ uart_dma_ch_tx = CH_UART2_TX;
+ break;
+ default:
+ uart_dma_ch_rx = uart_dma_ch_tx = 0;
+ break;
+ };
+
+ if (uart_dma_ch_rx &&
+ request_dma(uart_dma_ch_rx, "BFIN_UART_RX") < 0) {
+ printk(KERN_NOTICE"Fail to attach UART interrupt\n");
+ free_irq(uart->port.irq, uart);
+ free_irq(uart->port.irq + 1, uart);
+ return -EBUSY;
+ }
+ if (uart_dma_ch_tx &&
+ request_dma(uart_dma_ch_tx, "BFIN_UART_TX") < 0) {
+ printk(KERN_NOTICE "Fail to attach UART interrupt\n");
+ free_dma(uart_dma_ch_rx);
+ free_irq(uart->port.irq, uart);
+ free_irq(uart->port.irq + 1, uart);
+ return -EBUSY;
+ }
+ }
+# endif
+# if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
+ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+ }
+# endif
+#endif
+
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ if (uart->cts_pin >= 0) {
+ if (request_irq(gpio_to_irq(uart->cts_pin),
+ bfin_serial_mctrl_cts_int,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_DISABLED, "BFIN_UART_CTS", uart)) {
+ uart->cts_pin = -1;
+ pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
+ }
+ }
+ if (uart->rts_pin >= 0) {
+ gpio_direction_output(uart->rts_pin, 0);
+ }
+#endif
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+ if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
+ bfin_serial_mctrl_cts_int,
+ IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
+ uart->cts_pin = -1;
+ pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
+ }
+
+ /* CTS RTS PINs are negative assertive. */
+ UART_PUT_MCR(uart, ACTS);
+ UART_SET_IER(uart, EDSSI);
+#endif
+
+ UART_SET_IER(uart, ERBFI);
+ return 0;
+}
+
+static void bfin_serial_shutdown(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ disable_dma(uart->tx_dma_channel);
+ free_dma(uart->tx_dma_channel);
+ disable_dma(uart->rx_dma_channel);
+ free_dma(uart->rx_dma_channel);
+ del_timer(&(uart->rx_dma_timer));
+ dma_free_coherent(NULL, PAGE_SIZE, uart->rx_dma_buf.buf, 0);
+#else
+#ifdef CONFIG_BF54x
+ switch (uart->port.irq) {
+ case IRQ_UART3_RX:
+ free_dma(CH_UART3_RX);
+ free_dma(CH_UART3_TX);
+ break;
+ case IRQ_UART2_RX:
+ free_dma(CH_UART2_RX);
+ free_dma(CH_UART2_TX);
+ break;
+ default:
+ break;
+ };
+#endif
+ free_irq(uart->port.irq, uart);
+ free_irq(uart->port.irq+1, uart);
+#endif
+
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ if (uart->cts_pin >= 0)
+ free_irq(gpio_to_irq(uart->cts_pin), uart);
+#endif
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+ if (uart->cts_pin >= 0)
+ free_irq(uart->status_irq, uart);
+#endif
+}
+
+static void
+bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ unsigned long flags;
+ unsigned int baud, quot;
+ unsigned short val, ier, lcr = 0;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS8:
+ lcr = WLS(8);
+ break;
+ case CS7:
+ lcr = WLS(7);
+ break;
+ case CS6:
+ lcr = WLS(6);
+ break;
+ case CS5:
+ lcr = WLS(5);
+ break;
+ default:
+ printk(KERN_ERR "%s: word lengh not supported\n",
+ __func__);
+ }
+
+ /* Anomaly notes:
+ * 05000231 - STOP bit is always set to 1 whatever the user is set.
+ */
+ if (termios->c_cflag & CSTOPB) {
+ if (ANOMALY_05000231)
+ printk(KERN_WARNING "STOP bits other than 1 is not "
+ "supported in case of anomaly 05000231.\n");
+ else
+ lcr |= STB;
+ }
+ if (termios->c_cflag & PARENB)
+ lcr |= PEN;
+ if (!(termios->c_cflag & PARODD))
+ lcr |= EPS;
+ if (termios->c_cflag & CMSPAR)
+ lcr |= STP;
+
+ spin_lock_irqsave(&uart->port.lock, flags);
+
+ port->read_status_mask = OE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= (FE | PE);
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= BI;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= FE | PE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= OE;
+ }
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = uart_get_divisor(port, baud);
+
+ /* If discipline is not IRDA, apply ANOMALY_05000230 */
+ if (termios->c_line != N_IRDA)
+ quot -= ANOMALY_05000230;
+
+ UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
+
+ /* Disable UART */
+ ier = UART_GET_IER(uart);
+ UART_DISABLE_INTS(uart);
+
+ /* Set DLAB in LCR to Access DLL and DLH */
+ UART_SET_DLAB(uart);
+
+ UART_PUT_DLL(uart, quot & 0xFF);
+ UART_PUT_DLH(uart, (quot >> 8) & 0xFF);
+ SSYNC();
+
+ /* Clear DLAB in LCR to Access THR RBR IER */
+ UART_CLEAR_DLAB(uart);
+
+ UART_PUT_LCR(uart, lcr);
+
+ /* Enable UART */
+ UART_ENABLE_INTS(uart, ier);
+
+ val = UART_GET_GCTL(uart);
+ val |= UCEN;
+ UART_PUT_GCTL(uart, val);
+
+ /* Port speed changed, update the per-port timeout. */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&uart->port.lock, flags);
+}
+
+static const char *bfin_serial_type(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+
+ return uart->port.type == PORT_BFIN ? "BFIN-UART" : NULL;
+}
+
+/*
+ * Release the memory region(s) being used by 'port'.
+ */
+static void bfin_serial_release_port(struct uart_port *port)
+{
+}
+
+/*
+ * Request the memory region(s) being used by 'port'.
+ */
+static int bfin_serial_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void bfin_serial_config_port(struct uart_port *port, int flags)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+
+ if (flags & UART_CONFIG_TYPE &&
+ bfin_serial_request_port(&uart->port) == 0)
+ uart->port.type = PORT_BFIN;
+}
+
+/*
+ * Verify the new serial_struct (for TIOCSSERIAL).
+ * The only change we allow are to the flags and type, and
+ * even then only between PORT_BFIN and PORT_UNKNOWN
+ */
+static int
+bfin_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ return 0;
+}
+
+/*
+ * Enable the IrDA function if tty->ldisc.num is N_IRDA.
+ * In other cases, disable IrDA function.
+ */
+static void bfin_serial_set_ldisc(struct uart_port *port, int ld)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ unsigned short val;
+
+ switch (ld) {
+ case N_IRDA:
+ val = UART_GET_GCTL(uart);
+ val |= (IREN | RPOLC);
+ UART_PUT_GCTL(uart, val);
+ break;
+ default:
+ val = UART_GET_GCTL(uart);
+ val &= ~(IREN | RPOLC);
+ UART_PUT_GCTL(uart, val);
+ }
+}
+
+static void bfin_serial_reset_irda(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ unsigned short val;
+
+ val = UART_GET_GCTL(uart);
+ val &= ~(IREN | RPOLC);
+ UART_PUT_GCTL(uart, val);
+ SSYNC();
+ val |= (IREN | RPOLC);
+ UART_PUT_GCTL(uart, val);
+ SSYNC();
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+/* Anomaly notes:
+ * 05000099 - Because we only use THRE in poll_put and DR in poll_get,
+ * losing other bits of UART_LSR is not a problem here.
+ */
+static void bfin_serial_poll_put_char(struct uart_port *port, unsigned char chr)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+
+ while (!(UART_GET_LSR(uart) & THRE))
+ cpu_relax();
+
+ UART_CLEAR_DLAB(uart);
+ UART_PUT_CHAR(uart, (unsigned char)chr);
+}
+
+static int bfin_serial_poll_get_char(struct uart_port *port)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ unsigned char chr;
+
+ while (!(UART_GET_LSR(uart) & DR))
+ cpu_relax();
+
+ UART_CLEAR_DLAB(uart);
+ chr = UART_GET_CHAR(uart);
+
+ return chr;
+}
+#endif
+
+#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
+ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+static void bfin_kgdboc_port_shutdown(struct uart_port *port)
+{
+ if (kgdboc_break_enabled) {
+ kgdboc_break_enabled = 0;
+ bfin_serial_shutdown(port);
+ }
+}
+
+static int bfin_kgdboc_port_startup(struct uart_port *port)
+{
+ kgdboc_port_line = port->line;
+ kgdboc_break_enabled = !bfin_serial_startup(port);
+ return 0;
+}
+#endif
+
+static struct uart_ops bfin_serial_pops = {
+ .tx_empty = bfin_serial_tx_empty,
+ .set_mctrl = bfin_serial_set_mctrl,
+ .get_mctrl = bfin_serial_get_mctrl,
+ .stop_tx = bfin_serial_stop_tx,
+ .start_tx = bfin_serial_start_tx,
+ .stop_rx = bfin_serial_stop_rx,
+ .enable_ms = bfin_serial_enable_ms,
+ .break_ctl = bfin_serial_break_ctl,
+ .startup = bfin_serial_startup,
+ .shutdown = bfin_serial_shutdown,
+ .set_termios = bfin_serial_set_termios,
+ .set_ldisc = bfin_serial_set_ldisc,
+ .type = bfin_serial_type,
+ .release_port = bfin_serial_release_port,
+ .request_port = bfin_serial_request_port,
+ .config_port = bfin_serial_config_port,
+ .verify_port = bfin_serial_verify_port,
+#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
+ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
+ .kgdboc_port_startup = bfin_kgdboc_port_startup,
+ .kgdboc_port_shutdown = bfin_kgdboc_port_shutdown,
+#endif
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_put_char = bfin_serial_poll_put_char,
+ .poll_get_char = bfin_serial_poll_get_char,
+#endif
+};
+
+#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
+/*
+ * If the port was already initialised (eg, by a boot loader),
+ * try to determine the current setup.
+ */
+static void __init
+bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud,
+ int *parity, int *bits)
+{
+ unsigned short status;
+
+ status = UART_GET_IER(uart) & (ERBFI | ETBEI);
+ if (status == (ERBFI | ETBEI)) {
+ /* ok, the port was enabled */
+ u16 lcr, dlh, dll;
+
+ lcr = UART_GET_LCR(uart);
+
+ *parity = 'n';
+ if (lcr & PEN) {
+ if (lcr & EPS)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+ switch (lcr & 0x03) {
+ case 0: *bits = 5; break;
+ case 1: *bits = 6; break;
+ case 2: *bits = 7; break;
+ case 3: *bits = 8; break;
+ }
+ /* Set DLAB in LCR to Access DLL and DLH */
+ UART_SET_DLAB(uart);
+
+ dll = UART_GET_DLL(uart);
+ dlh = UART_GET_DLH(uart);
+
+ /* Clear DLAB in LCR to Access THR RBR IER */
+ UART_CLEAR_DLAB(uart);
+
+ *baud = get_sclk() / (16*(dll | dlh << 8));
+ }
+ pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __func__, *baud, *parity, *bits);
+}
+
+static struct uart_driver bfin_serial_reg;
+
+static void bfin_serial_console_putchar(struct uart_port *port, int ch)
+{
+ struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+ while (!(UART_GET_LSR(uart) & THRE))
+ barrier();
+ UART_PUT_CHAR(uart, ch);
+}
+
+#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
+ defined (CONFIG_EARLY_PRINTK) */
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+#define CLASS_BFIN_CONSOLE "bfin-console"
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct bfin_serial_port *uart = bfin_serial_ports[co->index];
+ unsigned long flags;
+
+ spin_lock_irqsave(&uart->port.lock, flags);
+ uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
+ spin_unlock_irqrestore(&uart->port.lock, flags);
+
+}
+
+static int __init
+bfin_serial_console_setup(struct console *co, char *options)
+{
+ struct bfin_serial_port *uart;
+ int baud = 57600;
+ int bits = 8;
+ int parity = 'n';
+# if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
+ defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+ int flow = 'r';
+# else
+ int flow = 'n';
+# endif
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index < 0 || co->index >= BFIN_UART_NR_PORTS)
+ return -ENODEV;
+
+ uart = bfin_serial_ports[co->index];
+ if (!uart)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ bfin_serial_console_get_options(uart, &baud, &parity, &bits);
+
+ return uart_set_options(&uart->port, co, baud, parity, bits, flow);
+}
+
+static struct console bfin_serial_console = {
+ .name = BFIN_SERIAL_DEV_NAME,
+ .write = bfin_serial_console_write,
+ .device = uart_console_device,
+ .setup = bfin_serial_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &bfin_serial_reg,
+};
+#define BFIN_SERIAL_CONSOLE &bfin_serial_console
+#else
+#define BFIN_SERIAL_CONSOLE NULL
+#endif /* CONFIG_SERIAL_BFIN_CONSOLE */
+
+#ifdef CONFIG_EARLY_PRINTK
+static struct bfin_serial_port bfin_earlyprintk_port;
+#define CLASS_BFIN_EARLYPRINTK "bfin-earlyprintk"
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+bfin_earlyprintk_console_write(struct console *co, const char *s, unsigned int count)
+{
+ unsigned long flags;
+
+ if (bfin_earlyprintk_port.port.line != co->index)
+ return;
+
+ spin_lock_irqsave(&bfin_earlyprintk_port.port.lock, flags);
+ uart_console_write(&bfin_earlyprintk_port.port, s, count,
+ bfin_serial_console_putchar);
+ spin_unlock_irqrestore(&bfin_earlyprintk_port.port.lock, flags);
+}
+
+/*
+ * This should have a .setup or .early_setup in it, but then things get called
+ * without the command line options, and the baud rate gets messed up - so
+ * don't let the common infrastructure play with things. (see calls to setup
+ * & earlysetup in ./kernel/printk.c:register_console()
+ */
+static struct __initdata console bfin_early_serial_console = {
+ .name = "early_BFuart",
+ .write = bfin_earlyprintk_console_write,
+ .device = uart_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &bfin_serial_reg,
+};
+#endif
+
+static struct uart_driver bfin_serial_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = DRIVER_NAME,
+ .dev_name = BFIN_SERIAL_DEV_NAME,
+ .major = BFIN_SERIAL_MAJOR,
+ .minor = BFIN_SERIAL_MINOR,
+ .nr = BFIN_UART_NR_PORTS,
+ .cons = BFIN_SERIAL_CONSOLE,
+};
+
+static int bfin_serial_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+ return uart_suspend_port(&bfin_serial_reg, &uart->port);
+}
+
+static int bfin_serial_resume(struct platform_device *pdev)
+{
+ struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+ return uart_resume_port(&bfin_serial_reg, &uart->port);
+}
+
+static int bfin_serial_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct bfin_serial_port *uart = NULL;
+ int ret = 0;
+
+ if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
+ dev_err(&pdev->dev, "Wrong bfin uart platform device id.\n");
+ return -ENOENT;
+ }
+
+ if (bfin_serial_ports[pdev->id] == NULL) {
+
+ uart = kzalloc(sizeof(*uart), GFP_KERNEL);
+ if (!uart) {
+ dev_err(&pdev->dev,
+ "fail to malloc bfin_serial_port\n");
+ return -ENOMEM;
+ }
+ bfin_serial_ports[pdev->id] = uart;
+
+#ifdef CONFIG_EARLY_PRINTK
+ if (!(bfin_earlyprintk_port.port.membase
+ && bfin_earlyprintk_port.port.line == pdev->id)) {
+ /*
+ * If the peripheral PINs of current port is allocated
+ * in earlyprintk probe stage, don't do it again.
+ */
+#endif
+ ret = peripheral_request_list(
+ (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail to request bfin serial peripherals\n");
+ goto out_error_free_mem;
+ }
+#ifdef CONFIG_EARLY_PRINTK
+ }
+#endif
+
+ spin_lock_init(&uart->port.lock);
+ uart->port.uartclk = get_sclk();
+ uart->port.fifosize = BFIN_UART_TX_FIFO_SIZE;
+ uart->port.ops = &bfin_serial_pops;
+ uart->port.line = pdev->id;
+ uart->port.iotype = UPIO_MEM;
+ uart->port.flags = UPF_BOOT_AUTOCONF;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ goto out_error_free_peripherals;
+ }
+
+ uart->port.membase = ioremap(res->start,
+ res->end - res->start);
+ if (!uart->port.membase) {
+ dev_err(&pdev->dev, "Cannot map uart IO\n");
+ ret = -ENXIO;
+ goto out_error_free_peripherals;
+ }
+ uart->port.mapbase = res->start;
+
+ uart->port.irq = platform_get_irq(pdev, 0);
+ if (uart->port.irq < 0) {
+ dev_err(&pdev->dev, "No uart RX/TX IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+
+ uart->status_irq = platform_get_irq(pdev, 1);
+ if (uart->status_irq < 0) {
+ dev_err(&pdev->dev, "No uart status IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+ spin_lock_init(&uart->rx_lock);
+ uart->tx_done = 1;
+ uart->tx_count = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "No uart TX DMA channel specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+ uart->tx_dma_channel = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "No uart RX DMA channel specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+ uart->rx_dma_channel = res->start;
+
+ init_timer(&(uart->rx_dma_timer));
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
+ defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res == NULL)
+ uart->cts_pin = -1;
+ else
+ uart->cts_pin = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+ if (res == NULL)
+ uart->rts_pin = -1;
+ else
+ uart->rts_pin = res->start;
+# if defined(CONFIG_SERIAL_BFIN_CTSRTS)
+ if (uart->rts_pin >= 0)
+ gpio_request(uart->rts_pin, DRIVER_NAME);
+# endif
+#endif
+ }
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+ if (!is_early_platform_device(pdev)) {
+#endif
+ uart = bfin_serial_ports[pdev->id];
+ uart->port.dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, uart);
+ ret = uart_add_one_port(&bfin_serial_reg, &uart->port);
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+ }
+#endif
+
+ if (!ret)
+ return 0;
+
+ if (uart) {
+out_error_unmap:
+ iounmap(uart->port.membase);
+out_error_free_peripherals:
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+out_error_free_mem:
+ kfree(uart);
+ bfin_serial_ports[pdev->id] = NULL;
+ }
+
+ return ret;
+}
+
+static int __devexit bfin_serial_remove(struct platform_device *pdev)
+{
+ struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (uart) {
+ uart_remove_one_port(&bfin_serial_reg, &uart->port);
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ if (uart->rts_pin >= 0)
+ gpio_free(uart->rts_pin);
+#endif
+ iounmap(uart->port.membase);
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+ kfree(uart);
+ bfin_serial_ports[pdev->id] = NULL;
+ }
+
+ return 0;
+}
+
+static struct platform_driver bfin_serial_driver = {
+ .probe = bfin_serial_probe,
+ .remove = __devexit_p(bfin_serial_remove),
+ .suspend = bfin_serial_suspend,
+ .resume = bfin_serial_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+#if defined(CONFIG_SERIAL_BFIN_CONSOLE)
+static __initdata struct early_platform_driver early_bfin_serial_driver = {
+ .class_str = CLASS_BFIN_CONSOLE,
+ .pdrv = &bfin_serial_driver,
+ .requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+static int __init bfin_serial_rs_console_init(void)
+{
+ early_platform_driver_register(&early_bfin_serial_driver, DRIVER_NAME);
+
+ early_platform_driver_probe(CLASS_BFIN_CONSOLE, BFIN_UART_NR_PORTS, 0);
+
+ register_console(&bfin_serial_console);
+
+ return 0;
+}
+console_initcall(bfin_serial_rs_console_init);
+#endif
+
+#ifdef CONFIG_EARLY_PRINTK
+/*
+ * Memory can't be allocated dynamically during earlyprink init stage.
+ * So, do individual probe for earlyprink with a static uart port variable.
+ */
+static int bfin_earlyprintk_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
+ dev_err(&pdev->dev, "Wrong earlyprintk platform device id.\n");
+ return -ENOENT;
+ }
+
+ ret = peripheral_request_list(
+ (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail to request bfin serial peripherals\n");
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ goto out_error_free_peripherals;
+ }
+
+ bfin_earlyprintk_port.port.membase = ioremap(res->start,
+ res->end - res->start);
+ if (!bfin_earlyprintk_port.port.membase) {
+ dev_err(&pdev->dev, "Cannot map uart IO\n");
+ ret = -ENXIO;
+ goto out_error_free_peripherals;
+ }
+ bfin_earlyprintk_port.port.mapbase = res->start;
+ bfin_earlyprintk_port.port.line = pdev->id;
+ bfin_earlyprintk_port.port.uartclk = get_sclk();
+ bfin_earlyprintk_port.port.fifosize = BFIN_UART_TX_FIFO_SIZE;
+ spin_lock_init(&bfin_earlyprintk_port.port.lock);
+
+ return 0;
+
+out_error_free_peripherals:
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+
+ return ret;
+}
+
+static struct platform_driver bfin_earlyprintk_driver = {
+ .probe = bfin_earlyprintk_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static __initdata struct early_platform_driver early_bfin_earlyprintk_driver = {
+ .class_str = CLASS_BFIN_EARLYPRINTK,
+ .pdrv = &bfin_earlyprintk_driver,
+ .requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+struct console __init *bfin_earlyserial_init(unsigned int port,
+ unsigned int cflag)
+{
+ struct ktermios t;
+ char port_name[20];
+
+ if (port < 0 || port >= BFIN_UART_NR_PORTS)
+ return NULL;
+
+ /*
+ * Only probe resource of the given port in earlyprintk boot arg.
+ * The expected port id should be indicated in port name string.
+ */
+ snprintf(port_name, 20, DRIVER_NAME ".%d", port);
+ early_platform_driver_register(&early_bfin_earlyprintk_driver,
+ port_name);
+ early_platform_driver_probe(CLASS_BFIN_EARLYPRINTK, 1, 0);
+
+ if (!bfin_earlyprintk_port.port.membase)
+ return NULL;
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+ /*
+ * If we are using early serial, don't let the normal console rewind
+ * log buffer, since that causes things to be printed multiple times
+ */
+ bfin_serial_console.flags &= ~CON_PRINTBUFFER;
+#endif
+
+ bfin_early_serial_console.index = port;
+ t.c_cflag = cflag;
+ t.c_iflag = 0;
+ t.c_oflag = 0;
+ t.c_lflag = ICANON;
+ t.c_line = port;
+ bfin_serial_set_termios(&bfin_earlyprintk_port.port, &t, &t);
+
+ return &bfin_early_serial_console;
+}
+#endif /* CONFIG_EARLY_PRINTK */
+
+static int __init bfin_serial_init(void)
+{
+ int ret;
+
+ pr_info("Blackfin serial driver\n");
+
+ ret = uart_register_driver(&bfin_serial_reg);
+ if (ret) {
+ pr_err("failed to register %s:%d\n",
+ bfin_serial_reg.driver_name, ret);
+ }
+
+ ret = platform_driver_register(&bfin_serial_driver);
+ if (ret) {
+ pr_err("fail to register bfin uart\n");
+ uart_unregister_driver(&bfin_serial_reg);
+ }
+
+ return ret;
+}
+
+static void __exit bfin_serial_exit(void)
+{
+ platform_driver_unregister(&bfin_serial_driver);
+ uart_unregister_driver(&bfin_serial_reg);
+}
+
+
+module_init(bfin_serial_init);
+module_exit(bfin_serial_exit);
+
+MODULE_AUTHOR("Sonic Zhang, Aubrey Li");
+MODULE_DESCRIPTION("Blackfin generic serial port driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR);
+MODULE_ALIAS("platform:bfin-uart");
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
new file mode 100644
index 00000000..891d194a
--- /dev/null
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -0,0 +1,934 @@
+/*
+ * Blackfin On-Chip Sport Emulated UART Driver
+ *
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+/*
+ * This driver and the hardware supported are in term of EE-191 of ADI.
+ * http://www.analog.com/static/imported-files/application_notes/EE191.pdf
+ * This application note describe how to implement a UART on a Sharc DSP,
+ * but this driver is implemented on Blackfin Processor.
+ * Transmit Frame Sync is not used by this driver to transfer data out.
+ */
+
+/* #define DEBUG */
+
+#define DRV_NAME "bfin-sport-uart"
+#define DEVICE_NAME "ttySS"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+
+#include <asm/bfin_sport.h>
+#include <asm/delay.h>
+#include <asm/portmux.h>
+
+#include "bfin_sport_uart.h"
+
+struct sport_uart_port {
+ struct uart_port port;
+ int err_irq;
+ unsigned short csize;
+ unsigned short rxmask;
+ unsigned short txmask1;
+ unsigned short txmask2;
+ unsigned char stopb;
+/* unsigned char parib; */
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ int cts_pin;
+ int rts_pin;
+#endif
+};
+
+static int sport_uart_tx_chars(struct sport_uart_port *up);
+static void sport_stop_tx(struct uart_port *port);
+
+static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
+{
+ pr_debug("%s value:%x, mask1=0x%x, mask2=0x%x\n", __func__, value,
+ up->txmask1, up->txmask2);
+
+ /* Place Start and Stop bits */
+ __asm__ __volatile__ (
+ "%[val] <<= 1;"
+ "%[val] = %[val] & %[mask1];"
+ "%[val] = %[val] | %[mask2];"
+ : [val]"+d"(value)
+ : [mask1]"d"(up->txmask1), [mask2]"d"(up->txmask2)
+ : "ASTAT"
+ );
+ pr_debug("%s value:%x\n", __func__, value);
+
+ SPORT_PUT_TX(up, value);
+}
+
+static inline unsigned char rx_one_byte(struct sport_uart_port *up)
+{
+ unsigned int value;
+ unsigned char extract;
+ u32 tmp_mask1, tmp_mask2, tmp_shift, tmp;
+
+ if ((up->csize + up->stopb) > 7)
+ value = SPORT_GET_RX32(up);
+ else
+ value = SPORT_GET_RX(up);
+
+ pr_debug("%s value:%x, cs=%d, mask=0x%x\n", __func__, value,
+ up->csize, up->rxmask);
+
+ /* Extract data */
+ __asm__ __volatile__ (
+ "%[extr] = 0;"
+ "%[mask1] = %[rxmask];"
+ "%[mask2] = 0x0200(Z);"
+ "%[shift] = 0;"
+ "LSETUP(.Lloop_s, .Lloop_e) LC0 = %[lc];"
+ ".Lloop_s:"
+ "%[tmp] = extract(%[val], %[mask1].L)(Z);"
+ "%[tmp] <<= %[shift];"
+ "%[extr] = %[extr] | %[tmp];"
+ "%[mask1] = %[mask1] - %[mask2];"
+ ".Lloop_e:"
+ "%[shift] += 1;"
+ : [extr]"=&d"(extract), [shift]"=&d"(tmp_shift), [tmp]"=&d"(tmp),
+ [mask1]"=&d"(tmp_mask1), [mask2]"=&d"(tmp_mask2)
+ : [val]"d"(value), [rxmask]"d"(up->rxmask), [lc]"a"(up->csize)
+ : "ASTAT", "LB0", "LC0", "LT0"
+ );
+
+ pr_debug(" extract:%x\n", extract);
+ return extract;
+}
+
+static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
+{
+ int tclkdiv, rclkdiv;
+ unsigned int sclk = get_sclk();
+
+ /* Set TCR1 and TCR2, TFSR is not enabled for uart */
+ SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
+ SPORT_PUT_TCR2(up, size + 1);
+ pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
+
+ /* Set RCR1 and RCR2 */
+ SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK));
+ SPORT_PUT_RCR2(up, (size + 1) * 2 - 1);
+ pr_debug("%s RCR1:%x, RCR2:%x\n", __func__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up));
+
+ tclkdiv = sclk / (2 * baud_rate) - 1;
+ /* The actual uart baud rate of devices vary between +/-2%. The sport
+ * RX sample rate should be faster than the double of the worst case,
+ * otherwise, wrong data are received. So, set sport RX clock to be
+ * 3% faster.
+ */
+ rclkdiv = sclk / (2 * baud_rate * 2 * 97 / 100) - 1;
+ SPORT_PUT_TCLKDIV(up, tclkdiv);
+ SPORT_PUT_RCLKDIV(up, rclkdiv);
+ SSYNC();
+ pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, rclkdiv:%d\n",
+ __func__, sclk, baud_rate, tclkdiv, rclkdiv);
+
+ return 0;
+}
+
+static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id)
+{
+ struct sport_uart_port *up = dev_id;
+ struct tty_struct *tty = up->port.state->port.tty;
+ unsigned int ch;
+
+ spin_lock(&up->port.lock);
+
+ while (SPORT_GET_STAT(up) & RXNE) {
+ ch = rx_one_byte(up);
+ up->port.icount.rx++;
+
+ if (!uart_handle_sysrq_char(&up->port, ch))
+ tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ }
+ tty_flip_buffer_push(tty);
+
+ spin_unlock(&up->port.lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id)
+{
+ struct sport_uart_port *up = dev_id;
+
+ spin_lock(&up->port.lock);
+ sport_uart_tx_chars(up);
+ spin_unlock(&up->port.lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
+{
+ struct sport_uart_port *up = dev_id;
+ struct tty_struct *tty = up->port.state->port.tty;
+ unsigned int stat = SPORT_GET_STAT(up);
+
+ spin_lock(&up->port.lock);
+
+ /* Overflow in RX FIFO */
+ if (stat & ROVF) {
+ up->port.icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ SPORT_PUT_STAT(up, ROVF); /* Clear ROVF bit */
+ }
+ /* These should not happen */
+ if (stat & (TOVF | TUVF | RUVF)) {
+ pr_err("SPORT Error:%s %s %s\n",
+ (stat & TOVF) ? "TX overflow" : "",
+ (stat & TUVF) ? "TX underflow" : "",
+ (stat & RUVF) ? "RX underflow" : "");
+ SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
+ SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
+ }
+ SSYNC();
+
+ spin_unlock(&up->port.lock);
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+static unsigned int sport_get_mctrl(struct uart_port *port)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+ if (up->cts_pin < 0)
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+
+ /* CTS PIN is negative assertive. */
+ if (SPORT_UART_GET_CTS(up))
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+ else
+ return TIOCM_DSR | TIOCM_CAR;
+}
+
+static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+ if (up->rts_pin < 0)
+ return;
+
+ /* RTS PIN is negative assertive. */
+ if (mctrl & TIOCM_RTS)
+ SPORT_UART_ENABLE_RTS(up);
+ else
+ SPORT_UART_DISABLE_RTS(up);
+}
+
+/*
+ * Handle any change of modem status signal.
+ */
+static irqreturn_t sport_mctrl_cts_int(int irq, void *dev_id)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)dev_id;
+ unsigned int status;
+
+ status = sport_get_mctrl(&up->port);
+ uart_handle_cts_change(&up->port, status & TIOCM_CTS);
+
+ return IRQ_HANDLED;
+}
+#else
+static unsigned int sport_get_mctrl(struct uart_port *port)
+{
+ pr_debug("%s enter\n", __func__);
+ return TIOCM_CTS | TIOCM_CD | TIOCM_DSR;
+}
+
+static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ pr_debug("%s enter\n", __func__);
+}
+#endif
+
+/* Reqeust IRQ, Setup clock */
+static int sport_startup(struct uart_port *port)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+ int ret;
+
+ pr_debug("%s enter\n", __func__);
+ ret = request_irq(up->port.irq, sport_uart_rx_irq, 0,
+ "SPORT_UART_RX", up);
+ if (ret) {
+ dev_err(port->dev, "unable to request SPORT RX interrupt\n");
+ return ret;
+ }
+
+ ret = request_irq(up->port.irq+1, sport_uart_tx_irq, 0,
+ "SPORT_UART_TX", up);
+ if (ret) {
+ dev_err(port->dev, "unable to request SPORT TX interrupt\n");
+ goto fail1;
+ }
+
+ ret = request_irq(up->err_irq, sport_uart_err_irq, 0,
+ "SPORT_UART_STATUS", up);
+ if (ret) {
+ dev_err(port->dev, "unable to request SPORT status interrupt\n");
+ goto fail2;
+ }
+
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ if (up->cts_pin >= 0) {
+ if (request_irq(gpio_to_irq(up->cts_pin),
+ sport_mctrl_cts_int,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_DISABLED, "BFIN_SPORT_UART_CTS", up)) {
+ up->cts_pin = -1;
+ dev_info(port->dev, "Unable to attach BlackFin UART over SPORT CTS interrupt. So, disable it.\n");
+ }
+ }
+ if (up->rts_pin >= 0)
+ gpio_direction_output(up->rts_pin, 0);
+#endif
+
+ return 0;
+ fail2:
+ free_irq(up->port.irq+1, up);
+ fail1:
+ free_irq(up->port.irq, up);
+
+ return ret;
+}
+
+/*
+ * sport_uart_tx_chars
+ *
+ * ret 1 means need to enable sport.
+ * ret 0 means do nothing.
+ */
+static int sport_uart_tx_chars(struct sport_uart_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+
+ if (SPORT_GET_STAT(up) & TXF)
+ return 0;
+
+ if (up->port.x_char) {
+ tx_one_byte(up, up->port.x_char);
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ return 1;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+ /* The waiting loop to stop SPORT TX from TX interrupt is
+ * too long. This may block SPORT RX interrupts and cause
+ * RX FIFO overflow. So, do stop sport TX only after the last
+ * char in TX FIFO is moved into the shift register.
+ */
+ if (SPORT_GET_STAT(up) & TXHRE)
+ sport_stop_tx(&up->port);
+ return 0;
+ }
+
+ while(!(SPORT_GET_STAT(up) & TXF) && !uart_circ_empty(xmit)) {
+ tx_one_byte(up, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE -1);
+ up->port.icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ return 1;
+}
+
+static unsigned int sport_tx_empty(struct uart_port *port)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+ unsigned int stat;
+
+ stat = SPORT_GET_STAT(up);
+ pr_debug("%s stat:%04x\n", __func__, stat);
+ if (stat & TXHRE) {
+ return TIOCSER_TEMT;
+ } else
+ return 0;
+}
+
+static void sport_stop_tx(struct uart_port *port)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+ pr_debug("%s enter\n", __func__);
+
+ if (!(SPORT_GET_TCR1(up) & TSPEN))
+ return;
+
+ /* Although the hold register is empty, last byte is still in shift
+ * register and not sent out yet. So, put a dummy data into TX FIFO.
+ * Then, sport tx stops when last byte is shift out and the dummy
+ * data is moved into the shift register.
+ */
+ SPORT_PUT_TX(up, 0xffff);
+ while (!(SPORT_GET_STAT(up) & TXHRE))
+ cpu_relax();
+
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
+ SSYNC();
+
+ return;
+}
+
+static void sport_start_tx(struct uart_port *port)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+ pr_debug("%s enter\n", __func__);
+
+ /* Write data into SPORT FIFO before enable SPROT to transmit */
+ if (sport_uart_tx_chars(up)) {
+ /* Enable transmit, then an interrupt will generated */
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
+ SSYNC();
+ }
+
+ pr_debug("%s exit\n", __func__);
+}
+
+static void sport_stop_rx(struct uart_port *port)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+ pr_debug("%s enter\n", __func__);
+ /* Disable sport to stop rx */
+ SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN));
+ SSYNC();
+}
+
+static void sport_enable_ms(struct uart_port *port)
+{
+ pr_debug("%s enter\n", __func__);
+}
+
+static void sport_break_ctl(struct uart_port *port, int break_state)
+{
+ pr_debug("%s enter\n", __func__);
+}
+
+static void sport_shutdown(struct uart_port *port)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+ dev_dbg(port->dev, "%s enter\n", __func__);
+
+ /* Disable sport */
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
+ SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN));
+ SSYNC();
+
+ free_irq(up->port.irq, up);
+ free_irq(up->port.irq+1, up);
+ free_irq(up->err_irq, up);
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ if (up->cts_pin >= 0)
+ free_irq(gpio_to_irq(up->cts_pin), up);
+#endif
+}
+
+static const char *sport_type(struct uart_port *port)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+ pr_debug("%s enter\n", __func__);
+ return up->port.type == PORT_BFIN_SPORT ? "BFIN-SPORT-UART" : NULL;
+}
+
+static void sport_release_port(struct uart_port *port)
+{
+ pr_debug("%s enter\n", __func__);
+}
+
+static int sport_request_port(struct uart_port *port)
+{
+ pr_debug("%s enter\n", __func__);
+ return 0;
+}
+
+static void sport_config_port(struct uart_port *port, int flags)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+ pr_debug("%s enter\n", __func__);
+ up->port.type = PORT_BFIN_SPORT;
+}
+
+static int sport_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ pr_debug("%s enter\n", __func__);
+ return 0;
+}
+
+static void sport_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+ unsigned long flags;
+ int i;
+
+ pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS8:
+ up->csize = 8;
+ break;
+ case CS7:
+ up->csize = 7;
+ break;
+ case CS6:
+ up->csize = 6;
+ break;
+ case CS5:
+ up->csize = 5;
+ break;
+ default:
+ pr_warning("requested word length not supported\n");
+ }
+
+ if (termios->c_cflag & CSTOPB) {
+ up->stopb = 1;
+ }
+ if (termios->c_cflag & PARENB) {
+ pr_warning("PAREN bits is not supported yet\n");
+ /* up->parib = 1; */
+ }
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ port->read_status_mask = 0;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+
+ /* RX extract mask */
+ up->rxmask = 0x01 | (((up->csize + up->stopb) * 2 - 1) << 0x8);
+ /* TX masks, 8 bit data and 1 bit stop for example:
+ * mask1 = b#0111111110
+ * mask2 = b#1000000000
+ */
+ for (i = 0, up->txmask1 = 0; i < up->csize; i++)
+ up->txmask1 |= (1<<i);
+ up->txmask2 = (1<<i);
+ if (up->stopb) {
+ ++i;
+ up->txmask2 |= (1<<i);
+ }
+ up->txmask1 <<= 1;
+ up->txmask2 <<= 1;
+ /* uart baud rate */
+ port->uartclk = uart_get_baud_rate(port, termios, old, 0, get_sclk()/16);
+
+ /* Disable UART */
+ SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
+ SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
+
+ sport_uart_setup(up, up->csize + up->stopb, port->uartclk);
+
+ /* driver TX line high after config, one dummy data is
+ * necessary to stop sport after shift one byte
+ */
+ SPORT_PUT_TX(up, 0xffff);
+ SPORT_PUT_TX(up, 0xffff);
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
+ SSYNC();
+ while (!(SPORT_GET_STAT(up) & TXHRE))
+ cpu_relax();
+ SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
+ SSYNC();
+
+ /* Port speed changed, update the per-port timeout. */
+ uart_update_timeout(port, termios->c_cflag, port->uartclk);
+
+ /* Enable sport rx */
+ SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) | RSPEN);
+ SSYNC();
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+struct uart_ops sport_uart_ops = {
+ .tx_empty = sport_tx_empty,
+ .set_mctrl = sport_set_mctrl,
+ .get_mctrl = sport_get_mctrl,
+ .stop_tx = sport_stop_tx,
+ .start_tx = sport_start_tx,
+ .stop_rx = sport_stop_rx,
+ .enable_ms = sport_enable_ms,
+ .break_ctl = sport_break_ctl,
+ .startup = sport_startup,
+ .shutdown = sport_shutdown,
+ .set_termios = sport_set_termios,
+ .type = sport_type,
+ .release_port = sport_release_port,
+ .request_port = sport_request_port,
+ .config_port = sport_config_port,
+ .verify_port = sport_verify_port,
+};
+
+#define BFIN_SPORT_UART_MAX_PORTS 4
+
+static struct sport_uart_port *bfin_sport_uart_ports[BFIN_SPORT_UART_MAX_PORTS];
+
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+#define CLASS_BFIN_SPORT_CONSOLE "bfin-sport-console"
+
+static int __init
+sport_uart_console_setup(struct console *co, char *options)
+{
+ struct sport_uart_port *up;
+ int baud = 57600;
+ int bits = 8;
+ int parity = 'n';
+# ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ int flow = 'r';
+# else
+ int flow = 'n';
+# endif
+
+ /* Check whether an invalid uart number has been specified */
+ if (co->index < 0 || co->index >= BFIN_SPORT_UART_MAX_PORTS)
+ return -ENODEV;
+
+ up = bfin_sport_uart_ports[co->index];
+ if (!up)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
+}
+
+static void sport_uart_console_putchar(struct uart_port *port, int ch)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+ while (SPORT_GET_STAT(up) & TXF)
+ barrier();
+
+ tx_one_byte(up, ch);
+}
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+sport_uart_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct sport_uart_port *up = bfin_sport_uart_ports[co->index];
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ if (SPORT_GET_TCR1(up) & TSPEN)
+ uart_console_write(&up->port, s, count, sport_uart_console_putchar);
+ else {
+ /* dummy data to start sport */
+ while (SPORT_GET_STAT(up) & TXF)
+ barrier();
+ SPORT_PUT_TX(up, 0xffff);
+ /* Enable transmit, then an interrupt will generated */
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
+ SSYNC();
+
+ uart_console_write(&up->port, s, count, sport_uart_console_putchar);
+
+ /* Although the hold register is empty, last byte is still in shift
+ * register and not sent out yet. So, put a dummy data into TX FIFO.
+ * Then, sport tx stops when last byte is shift out and the dummy
+ * data is moved into the shift register.
+ */
+ while (SPORT_GET_STAT(up) & TXF)
+ barrier();
+ SPORT_PUT_TX(up, 0xffff);
+ while (!(SPORT_GET_STAT(up) & TXHRE))
+ barrier();
+
+ /* Stop sport tx transfer */
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
+ SSYNC();
+ }
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static struct uart_driver sport_uart_reg;
+
+static struct console sport_uart_console = {
+ .name = DEVICE_NAME,
+ .write = sport_uart_console_write,
+ .device = uart_console_device,
+ .setup = sport_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sport_uart_reg,
+};
+
+#define SPORT_UART_CONSOLE (&sport_uart_console)
+#else
+#define SPORT_UART_CONSOLE NULL
+#endif /* CONFIG_SERIAL_BFIN_SPORT_CONSOLE */
+
+
+static struct uart_driver sport_uart_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = DRV_NAME,
+ .dev_name = DEVICE_NAME,
+ .major = 204,
+ .minor = 84,
+ .nr = BFIN_SPORT_UART_MAX_PORTS,
+ .cons = SPORT_UART_CONSOLE,
+};
+
+#ifdef CONFIG_PM
+static int sport_uart_suspend(struct device *dev)
+{
+ struct sport_uart_port *sport = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s enter\n", __func__);
+ if (sport)
+ uart_suspend_port(&sport_uart_reg, &sport->port);
+
+ return 0;
+}
+
+static int sport_uart_resume(struct device *dev)
+{
+ struct sport_uart_port *sport = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s enter\n", __func__);
+ if (sport)
+ uart_resume_port(&sport_uart_reg, &sport->port);
+
+ return 0;
+}
+
+static struct dev_pm_ops bfin_sport_uart_dev_pm_ops = {
+ .suspend = sport_uart_suspend,
+ .resume = sport_uart_resume,
+};
+#endif
+
+static int __devinit sport_uart_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct sport_uart_port *sport;
+ int ret = 0;
+
+ dev_dbg(&pdev->dev, "%s enter\n", __func__);
+
+ if (pdev->id < 0 || pdev->id >= BFIN_SPORT_UART_MAX_PORTS) {
+ dev_err(&pdev->dev, "Wrong sport uart platform device id.\n");
+ return -ENOENT;
+ }
+
+ if (bfin_sport_uart_ports[pdev->id] == NULL) {
+ bfin_sport_uart_ports[pdev->id] =
+ kzalloc(sizeof(struct sport_uart_port), GFP_KERNEL);
+ sport = bfin_sport_uart_ports[pdev->id];
+ if (!sport) {
+ dev_err(&pdev->dev,
+ "Fail to malloc sport_uart_port\n");
+ return -ENOMEM;
+ }
+
+ ret = peripheral_request_list(
+ (unsigned short *)pdev->dev.platform_data, DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Fail to request SPORT peripherals\n");
+ goto out_error_free_mem;
+ }
+
+ spin_lock_init(&sport->port.lock);
+ sport->port.fifosize = SPORT_TX_FIFO_SIZE,
+ sport->port.ops = &sport_uart_ops;
+ sport->port.line = pdev->id;
+ sport->port.iotype = UPIO_MEM;
+ sport->port.flags = UPF_BOOT_AUTOCONF;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ goto out_error_free_peripherals;
+ }
+
+ sport->port.membase = ioremap(res->start, resource_size(res));
+ if (!sport->port.membase) {
+ dev_err(&pdev->dev, "Cannot map sport IO\n");
+ ret = -ENXIO;
+ goto out_error_free_peripherals;
+ }
+ sport->port.mapbase = res->start;
+
+ sport->port.irq = platform_get_irq(pdev, 0);
+ if ((int)sport->port.irq < 0) {
+ dev_err(&pdev->dev, "No sport RX/TX IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+
+ sport->err_irq = platform_get_irq(pdev, 1);
+ if (sport->err_irq < 0) {
+ dev_err(&pdev->dev, "No sport status IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res == NULL)
+ sport->cts_pin = -1;
+ else
+ sport->cts_pin = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+ if (res == NULL)
+ sport->rts_pin = -1;
+ else
+ sport->rts_pin = res->start;
+
+ if (sport->rts_pin >= 0)
+ gpio_request(sport->rts_pin, DRV_NAME);
+#endif
+ }
+
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+ if (!is_early_platform_device(pdev)) {
+#endif
+ sport = bfin_sport_uart_ports[pdev->id];
+ sport->port.dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, sport);
+ ret = uart_add_one_port(&sport_uart_reg, &sport->port);
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+ }
+#endif
+ if (!ret)
+ return 0;
+
+ if (sport) {
+out_error_unmap:
+ iounmap(sport->port.membase);
+out_error_free_peripherals:
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+out_error_free_mem:
+ kfree(sport);
+ bfin_sport_uart_ports[pdev->id] = NULL;
+ }
+
+ return ret;
+}
+
+static int __devexit sport_uart_remove(struct platform_device *pdev)
+{
+ struct sport_uart_port *sport = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s enter\n", __func__);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (sport) {
+ uart_remove_one_port(&sport_uart_reg, &sport->port);
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ if (sport->rts_pin >= 0)
+ gpio_free(sport->rts_pin);
+#endif
+ iounmap(sport->port.membase);
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+ kfree(sport);
+ bfin_sport_uart_ports[pdev->id] = NULL;
+ }
+
+ return 0;
+}
+
+static struct platform_driver sport_uart_driver = {
+ .probe = sport_uart_probe,
+ .remove = __devexit_p(sport_uart_remove),
+ .driver = {
+ .name = DRV_NAME,
+#ifdef CONFIG_PM
+ .pm = &bfin_sport_uart_dev_pm_ops,
+#endif
+ },
+};
+
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+static __initdata struct early_platform_driver early_sport_uart_driver = {
+ .class_str = CLASS_BFIN_SPORT_CONSOLE,
+ .pdrv = &sport_uart_driver,
+ .requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+static int __init sport_uart_rs_console_init(void)
+{
+ early_platform_driver_register(&early_sport_uart_driver, DRV_NAME);
+
+ early_platform_driver_probe(CLASS_BFIN_SPORT_CONSOLE,
+ BFIN_SPORT_UART_MAX_PORTS, 0);
+
+ register_console(&sport_uart_console);
+
+ return 0;
+}
+console_initcall(sport_uart_rs_console_init);
+#endif
+
+static int __init sport_uart_init(void)
+{
+ int ret;
+
+ pr_info("Blackfin uart over sport driver\n");
+
+ ret = uart_register_driver(&sport_uart_reg);
+ if (ret) {
+ pr_err("failed to register %s:%d\n",
+ sport_uart_reg.driver_name, ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&sport_uart_driver);
+ if (ret) {
+ pr_err("failed to register sport uart driver:%d\n", ret);
+ uart_unregister_driver(&sport_uart_reg);
+ }
+
+ return ret;
+}
+module_init(sport_uart_init);
+
+static void __exit sport_uart_exit(void)
+{
+ platform_driver_unregister(&sport_uart_driver);
+ uart_unregister_driver(&sport_uart_reg);
+}
+module_exit(sport_uart_exit);
+
+MODULE_AUTHOR("Sonic Zhang, Roy Huang");
+MODULE_DESCRIPTION("Blackfin serial over SPORT driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/bfin_sport_uart.h b/drivers/tty/serial/bfin_sport_uart.h
new file mode 100644
index 00000000..6d06ce1d
--- /dev/null
+++ b/drivers/tty/serial/bfin_sport_uart.h
@@ -0,0 +1,86 @@
+/*
+ * Blackfin On-Chip Sport Emulated UART Driver
+ *
+ * Copyright 2006-2008 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+/*
+ * This driver and the hardware supported are in term of EE-191 of ADI.
+ * http://www.analog.com/static/imported-files/application_notes/EE191.pdf
+ * This application note describe how to implement a UART on a Sharc DSP,
+ * but this driver is implemented on Blackfin Processor.
+ * Transmit Frame Sync is not used by this driver to transfer data out.
+ */
+
+#ifndef _BFIN_SPORT_UART_H
+#define _BFIN_SPORT_UART_H
+
+#define OFFSET_TCR1 0x00 /* Transmit Configuration 1 Register */
+#define OFFSET_TCR2 0x04 /* Transmit Configuration 2 Register */
+#define OFFSET_TCLKDIV 0x08 /* Transmit Serial Clock Divider Register */
+#define OFFSET_TFSDIV 0x0C /* Transmit Frame Sync Divider Register */
+#define OFFSET_TX 0x10 /* Transmit Data Register */
+#define OFFSET_RX 0x18 /* Receive Data Register */
+#define OFFSET_RCR1 0x20 /* Receive Configuration 1 Register */
+#define OFFSET_RCR2 0x24 /* Receive Configuration 2 Register */
+#define OFFSET_RCLKDIV 0x28 /* Receive Serial Clock Divider Register */
+#define OFFSET_RFSDIV 0x2c /* Receive Frame Sync Divider Register */
+#define OFFSET_STAT 0x30 /* Status Register */
+
+#define SPORT_GET_TCR1(sport) bfin_read16(((sport)->port.membase + OFFSET_TCR1))
+#define SPORT_GET_TCR2(sport) bfin_read16(((sport)->port.membase + OFFSET_TCR2))
+#define SPORT_GET_TCLKDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_TCLKDIV))
+#define SPORT_GET_TFSDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_TFSDIV))
+#define SPORT_GET_TX(sport) bfin_read16(((sport)->port.membase + OFFSET_TX))
+#define SPORT_GET_RX(sport) bfin_read16(((sport)->port.membase + OFFSET_RX))
+/*
+ * If another interrupt fires while doing a 32-bit read from RX FIFO,
+ * a fake RX underflow error will be generated. So disable interrupts
+ * to prevent interruption while reading the FIFO.
+ */
+#define SPORT_GET_RX32(sport) \
+({ \
+ unsigned int __ret; \
+ if (ANOMALY_05000473) \
+ local_irq_disable(); \
+ __ret = bfin_read32((sport)->port.membase + OFFSET_RX); \
+ if (ANOMALY_05000473) \
+ local_irq_enable(); \
+ __ret; \
+})
+#define SPORT_GET_RCR1(sport) bfin_read16(((sport)->port.membase + OFFSET_RCR1))
+#define SPORT_GET_RCR2(sport) bfin_read16(((sport)->port.membase + OFFSET_RCR2))
+#define SPORT_GET_RCLKDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_RCLKDIV))
+#define SPORT_GET_RFSDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_RFSDIV))
+#define SPORT_GET_STAT(sport) bfin_read16(((sport)->port.membase + OFFSET_STAT))
+
+#define SPORT_PUT_TCR1(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TCR1), v)
+#define SPORT_PUT_TCR2(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TCR2), v)
+#define SPORT_PUT_TCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TCLKDIV), v)
+#define SPORT_PUT_TFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TFSDIV), v)
+#define SPORT_PUT_TX(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TX), v)
+#define SPORT_PUT_RX(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RX), v)
+#define SPORT_PUT_RCR1(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCR1), v)
+#define SPORT_PUT_RCR2(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCR2), v)
+#define SPORT_PUT_RCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCLKDIV), v)
+#define SPORT_PUT_RFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RFSDIV), v)
+#define SPORT_PUT_STAT(sport, v) bfin_write16(((sport)->port.membase + OFFSET_STAT), v)
+
+#define SPORT_TX_FIFO_SIZE 8
+
+#define SPORT_UART_GET_CTS(x) gpio_get_value(x->cts_pin)
+#define SPORT_UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
+#define SPORT_UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
+
+#if defined(CONFIG_SERIAL_BFIN_SPORT0_UART_CTSRTS) \
+ || defined(CONFIG_SERIAL_BFIN_SPORT1_UART_CTSRTS) \
+ || defined(CONFIG_SERIAL_BFIN_SPORT2_UART_CTSRTS) \
+ || defined(CONFIG_SERIAL_BFIN_SPORT3_UART_CTSRTS)
+# define CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+#endif
+
+#endif /* _BFIN_SPORT_UART_H */
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
new file mode 100644
index 00000000..836fe273
--- /dev/null
+++ b/drivers/tty/serial/clps711x.c
@@ -0,0 +1,579 @@
+/*
+ * Driver for CLPS711x serial ports
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * Copyright 1999 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#if defined(CONFIG_SERIAL_CLPS711X_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <asm/irq.h>
+#include <asm/hardware/clps7111.h>
+
+#define UART_NR 2
+
+#define SERIAL_CLPS711X_MAJOR 204
+#define SERIAL_CLPS711X_MINOR 40
+#define SERIAL_CLPS711X_NR UART_NR
+
+/*
+ * We use the relevant SYSCON register as a base address for these ports.
+ */
+#define UBRLCR(port) ((port)->iobase + UBRLCR1 - SYSCON1)
+#define UARTDR(port) ((port)->iobase + UARTDR1 - SYSCON1)
+#define SYSFLG(port) ((port)->iobase + SYSFLG1 - SYSCON1)
+#define SYSCON(port) ((port)->iobase + SYSCON1 - SYSCON1)
+
+#define TX_IRQ(port) ((port)->irq)
+#define RX_IRQ(port) ((port)->irq + 1)
+
+#define UART_ANY_ERR (UARTDR_FRMERR | UARTDR_PARERR | UARTDR_OVERR)
+
+#define tx_enabled(port) ((port)->unused[0])
+
+static void clps711xuart_stop_tx(struct uart_port *port)
+{
+ if (tx_enabled(port)) {
+ disable_irq(TX_IRQ(port));
+ tx_enabled(port) = 0;
+ }
+}
+
+static void clps711xuart_start_tx(struct uart_port *port)
+{
+ if (!tx_enabled(port)) {
+ enable_irq(TX_IRQ(port));
+ tx_enabled(port) = 1;
+ }
+}
+
+static void clps711xuart_stop_rx(struct uart_port *port)
+{
+ disable_irq(RX_IRQ(port));
+}
+
+static void clps711xuart_enable_ms(struct uart_port *port)
+{
+}
+
+static irqreturn_t clps711xuart_int_rx(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int status, ch, flg;
+
+ status = clps_readl(SYSFLG(port));
+ while (!(status & SYSFLG_URXFE)) {
+ ch = clps_readl(UARTDR(port));
+
+ port->icount.rx++;
+
+ flg = TTY_NORMAL;
+
+ /*
+ * Note that the error handling code is
+ * out of the main execution path
+ */
+ if (unlikely(ch & UART_ANY_ERR)) {
+ if (ch & UARTDR_PARERR)
+ port->icount.parity++;
+ else if (ch & UARTDR_FRMERR)
+ port->icount.frame++;
+ if (ch & UARTDR_OVERR)
+ port->icount.overrun++;
+
+ ch &= port->read_status_mask;
+
+ if (ch & UARTDR_PARERR)
+ flg = TTY_PARITY;
+ else if (ch & UARTDR_FRMERR)
+ flg = TTY_FRAME;
+
+#ifdef SUPPORT_SYSRQ
+ port->sysrq = 0;
+#endif
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ goto ignore_char;
+
+ /*
+ * CHECK: does overrun affect the current character?
+ * ASSUMPTION: it does not.
+ */
+ uart_insert_char(port, ch, UARTDR_OVERR, ch, flg);
+
+ ignore_char:
+ status = clps_readl(SYSFLG(port));
+ }
+ tty_flip_buffer_push(tty);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct circ_buf *xmit = &port->state->xmit;
+ int count;
+
+ if (port->x_char) {
+ clps_writel(port->x_char, UARTDR(port));
+ port->icount.tx++;
+ port->x_char = 0;
+ return IRQ_HANDLED;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+ goto disable_tx_irq;
+
+ count = port->fifosize >> 1;
+ do {
+ clps_writel(xmit->buf[xmit->tail], UARTDR(port));
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit)) {
+ disable_tx_irq:
+ disable_irq_nosync(TX_IRQ(port));
+ tx_enabled(port) = 0;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int clps711xuart_tx_empty(struct uart_port *port)
+{
+ unsigned int status = clps_readl(SYSFLG(port));
+ return status & SYSFLG_UBUSY ? 0 : TIOCSER_TEMT;
+}
+
+static unsigned int clps711xuart_get_mctrl(struct uart_port *port)
+{
+ unsigned int port_addr;
+ unsigned int result = 0;
+ unsigned int status;
+
+ port_addr = SYSFLG(port);
+ if (port_addr == SYSFLG1) {
+ status = clps_readl(SYSFLG1);
+ if (status & SYSFLG1_DCD)
+ result |= TIOCM_CAR;
+ if (status & SYSFLG1_DSR)
+ result |= TIOCM_DSR;
+ if (status & SYSFLG1_CTS)
+ result |= TIOCM_CTS;
+ }
+
+ return result;
+}
+
+static void
+clps711xuart_set_mctrl_null(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static void clps711xuart_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned long flags;
+ unsigned int ubrlcr;
+
+ spin_lock_irqsave(&port->lock, flags);
+ ubrlcr = clps_readl(UBRLCR(port));
+ if (break_state == -1)
+ ubrlcr |= UBRLCR_BREAK;
+ else
+ ubrlcr &= ~UBRLCR_BREAK;
+ clps_writel(ubrlcr, UBRLCR(port));
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int clps711xuart_startup(struct uart_port *port)
+{
+ unsigned int syscon;
+ int retval;
+
+ tx_enabled(port) = 1;
+
+ /*
+ * Allocate the IRQs
+ */
+ retval = request_irq(TX_IRQ(port), clps711xuart_int_tx, 0,
+ "clps711xuart_tx", port);
+ if (retval)
+ return retval;
+
+ retval = request_irq(RX_IRQ(port), clps711xuart_int_rx, 0,
+ "clps711xuart_rx", port);
+ if (retval) {
+ free_irq(TX_IRQ(port), port);
+ return retval;
+ }
+
+ /*
+ * enable the port
+ */
+ syscon = clps_readl(SYSCON(port));
+ syscon |= SYSCON_UARTEN;
+ clps_writel(syscon, SYSCON(port));
+
+ return 0;
+}
+
+static void clps711xuart_shutdown(struct uart_port *port)
+{
+ unsigned int ubrlcr, syscon;
+
+ /*
+ * Free the interrupt
+ */
+ free_irq(TX_IRQ(port), port); /* TX interrupt */
+ free_irq(RX_IRQ(port), port); /* RX interrupt */
+
+ /*
+ * disable the port
+ */
+ syscon = clps_readl(SYSCON(port));
+ syscon &= ~SYSCON_UARTEN;
+ clps_writel(syscon, SYSCON(port));
+
+ /*
+ * disable break condition and fifos
+ */
+ ubrlcr = clps_readl(UBRLCR(port));
+ ubrlcr &= ~(UBRLCR_FIFOEN | UBRLCR_BREAK);
+ clps_writel(ubrlcr, UBRLCR(port));
+}
+
+static void
+clps711xuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int ubrlcr, baud, quot;
+ unsigned long flags;
+
+ /*
+ * We don't implement CREAD.
+ */
+ termios->c_cflag |= CREAD;
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = uart_get_divisor(port, baud);
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ ubrlcr = UBRLCR_WRDLEN5;
+ break;
+ case CS6:
+ ubrlcr = UBRLCR_WRDLEN6;
+ break;
+ case CS7:
+ ubrlcr = UBRLCR_WRDLEN7;
+ break;
+ default: // CS8
+ ubrlcr = UBRLCR_WRDLEN8;
+ break;
+ }
+ if (termios->c_cflag & CSTOPB)
+ ubrlcr |= UBRLCR_XSTOP;
+ if (termios->c_cflag & PARENB) {
+ ubrlcr |= UBRLCR_PRTEN;
+ if (!(termios->c_cflag & PARODD))
+ ubrlcr |= UBRLCR_EVENPRT;
+ }
+ if (port->fifosize > 1)
+ ubrlcr |= UBRLCR_FIFOEN;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ port->read_status_mask = UARTDR_OVERR;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UARTDR_PARERR | UARTDR_FRMERR;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UARTDR_FRMERR | UARTDR_PARERR;
+ if (termios->c_iflag & IGNBRK) {
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns to (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= UARTDR_OVERR;
+ }
+
+ quot -= 1;
+
+ clps_writel(ubrlcr | quot, UBRLCR(port));
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *clps711xuart_type(struct uart_port *port)
+{
+ return port->type == PORT_CLPS711X ? "CLPS711x" : NULL;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void clps711xuart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE)
+ port->type = PORT_CLPS711X;
+}
+
+static void clps711xuart_release_port(struct uart_port *port)
+{
+}
+
+static int clps711xuart_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static struct uart_ops clps711x_pops = {
+ .tx_empty = clps711xuart_tx_empty,
+ .set_mctrl = clps711xuart_set_mctrl_null,
+ .get_mctrl = clps711xuart_get_mctrl,
+ .stop_tx = clps711xuart_stop_tx,
+ .start_tx = clps711xuart_start_tx,
+ .stop_rx = clps711xuart_stop_rx,
+ .enable_ms = clps711xuart_enable_ms,
+ .break_ctl = clps711xuart_break_ctl,
+ .startup = clps711xuart_startup,
+ .shutdown = clps711xuart_shutdown,
+ .set_termios = clps711xuart_set_termios,
+ .type = clps711xuart_type,
+ .config_port = clps711xuart_config_port,
+ .release_port = clps711xuart_release_port,
+ .request_port = clps711xuart_request_port,
+};
+
+static struct uart_port clps711x_ports[UART_NR] = {
+ {
+ .iobase = SYSCON1,
+ .irq = IRQ_UTXINT1, /* IRQ_URXINT1, IRQ_UMSINT */
+ .uartclk = 3686400,
+ .fifosize = 16,
+ .ops = &clps711x_pops,
+ .line = 0,
+ .flags = UPF_BOOT_AUTOCONF,
+ },
+ {
+ .iobase = SYSCON2,
+ .irq = IRQ_UTXINT2, /* IRQ_URXINT2 */
+ .uartclk = 3686400,
+ .fifosize = 16,
+ .ops = &clps711x_pops,
+ .line = 1,
+ .flags = UPF_BOOT_AUTOCONF,
+ }
+};
+
+#ifdef CONFIG_SERIAL_CLPS711X_CONSOLE
+static void clps711xuart_console_putchar(struct uart_port *port, int ch)
+{
+ while (clps_readl(SYSFLG(port)) & SYSFLG_UTXFF)
+ barrier();
+ clps_writel(ch, UARTDR(port));
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ *
+ * Note that this is called with interrupts already disabled
+ */
+static void
+clps711xuart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port = clps711x_ports + co->index;
+ unsigned int status, syscon;
+
+ /*
+ * Ensure that the port is enabled.
+ */
+ syscon = clps_readl(SYSCON(port));
+ clps_writel(syscon | SYSCON_UARTEN, SYSCON(port));
+
+ uart_console_write(port, s, count, clps711xuart_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the uart state.
+ */
+ do {
+ status = clps_readl(SYSFLG(port));
+ } while (status & SYSFLG_UBUSY);
+
+ clps_writel(syscon, SYSCON(port));
+}
+
+static void __init
+clps711xuart_console_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits)
+{
+ if (clps_readl(SYSCON(port)) & SYSCON_UARTEN) {
+ unsigned int ubrlcr, quot;
+
+ ubrlcr = clps_readl(UBRLCR(port));
+
+ *parity = 'n';
+ if (ubrlcr & UBRLCR_PRTEN) {
+ if (ubrlcr & UBRLCR_EVENPRT)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+
+ if ((ubrlcr & UBRLCR_WRDLEN_MASK) == UBRLCR_WRDLEN7)
+ *bits = 7;
+ else
+ *bits = 8;
+
+ quot = ubrlcr & UBRLCR_BAUD_MASK;
+ *baud = port->uartclk / (16 * (quot + 1));
+ }
+}
+
+static int __init clps711xuart_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 38400;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ port = uart_get_console(clps711x_ports, UART_NR, co);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ clps711xuart_console_get_options(port, &baud, &parity, &bits);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver clps711x_reg;
+static struct console clps711x_console = {
+ .name = "ttyCL",
+ .write = clps711xuart_console_write,
+ .device = uart_console_device,
+ .setup = clps711xuart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &clps711x_reg,
+};
+
+static int __init clps711xuart_console_init(void)
+{
+ register_console(&clps711x_console);
+ return 0;
+}
+console_initcall(clps711xuart_console_init);
+
+#define CLPS711X_CONSOLE &clps711x_console
+#else
+#define CLPS711X_CONSOLE NULL
+#endif
+
+static struct uart_driver clps711x_reg = {
+ .driver_name = "ttyCL",
+ .dev_name = "ttyCL",
+ .major = SERIAL_CLPS711X_MAJOR,
+ .minor = SERIAL_CLPS711X_MINOR,
+ .nr = UART_NR,
+
+ .cons = CLPS711X_CONSOLE,
+};
+
+static int __init clps711xuart_init(void)
+{
+ int ret, i;
+
+ printk(KERN_INFO "Serial: CLPS711x driver\n");
+
+ ret = uart_register_driver(&clps711x_reg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < UART_NR; i++)
+ uart_add_one_port(&clps711x_reg, &clps711x_ports[i]);
+
+ return 0;
+}
+
+static void __exit clps711xuart_exit(void)
+{
+ int i;
+
+ for (i = 0; i < UART_NR; i++)
+ uart_remove_one_port(&clps711x_reg, &clps711x_ports[i]);
+
+ uart_unregister_driver(&clps711x_reg);
+}
+
+module_init(clps711xuart_init);
+module_exit(clps711xuart_exit);
+
+MODULE_AUTHOR("Deep Blue Solutions Ltd");
+MODULE_DESCRIPTION("CLPS-711x generic serial driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV(SERIAL_CLPS711X_MAJOR, SERIAL_CLPS711X_MINOR);
diff --git a/drivers/tty/serial/cpm_uart/Makefile b/drivers/tty/serial/cpm_uart/Makefile
new file mode 100644
index 00000000..e072724e
--- /dev/null
+++ b/drivers/tty/serial/cpm_uart/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the Motorola 8xx FEC ethernet controller
+#
+
+obj-$(CONFIG_SERIAL_CPM) += cpm_uart.o
+
+# Select the correct platform objects.
+cpm_uart-objs-$(CONFIG_CPM2) += cpm_uart_cpm2.o
+cpm_uart-objs-$(CONFIG_8xx) += cpm_uart_cpm1.o
+
+cpm_uart-objs := cpm_uart_core.o $(cpm_uart-objs-y)
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
new file mode 100644
index 00000000..cf34d26f
--- /dev/null
+++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
@@ -0,0 +1,145 @@
+/*
+ * Driver for CPM (SCC/SMC) serial ports
+ *
+ * Copyright (C) 2004 Freescale Semiconductor, Inc.
+ *
+ * 2006 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+#ifndef CPM_UART_H
+#define CPM_UART_H
+
+#include <linux/platform_device.h>
+#include <linux/fs_uart_pd.h>
+
+#if defined(CONFIG_CPM2)
+#include "cpm_uart_cpm2.h"
+#elif defined(CONFIG_8xx)
+#include "cpm_uart_cpm1.h"
+#endif
+
+#define SERIAL_CPM_MAJOR 204
+#define SERIAL_CPM_MINOR 46
+
+#define IS_SMC(pinfo) (pinfo->flags & FLAG_SMC)
+#define IS_DISCARDING(pinfo) (pinfo->flags & FLAG_DISCARDING)
+#define FLAG_DISCARDING 0x00000004 /* when set, don't discard */
+#define FLAG_SMC 0x00000002
+#define FLAG_CONSOLE 0x00000001
+
+#define UART_SMC1 fsid_smc1_uart
+#define UART_SMC2 fsid_smc2_uart
+#define UART_SCC1 fsid_scc1_uart
+#define UART_SCC2 fsid_scc2_uart
+#define UART_SCC3 fsid_scc3_uart
+#define UART_SCC4 fsid_scc4_uart
+
+#define UART_NR fs_uart_nr
+
+#define RX_NUM_FIFO 4
+#define RX_BUF_SIZE 32
+#define TX_NUM_FIFO 4
+#define TX_BUF_SIZE 32
+
+#define SCC_WAIT_CLOSING 100
+
+#define GPIO_CTS 0
+#define GPIO_RTS 1
+#define GPIO_DCD 2
+#define GPIO_DSR 3
+#define GPIO_DTR 4
+#define GPIO_RI 5
+
+#define NUM_GPIOS (GPIO_RI+1)
+
+struct uart_cpm_port {
+ struct uart_port port;
+ u16 rx_nrfifos;
+ u16 rx_fifosize;
+ u16 tx_nrfifos;
+ u16 tx_fifosize;
+ smc_t __iomem *smcp;
+ smc_uart_t __iomem *smcup;
+ scc_t __iomem *sccp;
+ scc_uart_t __iomem *sccup;
+ cbd_t __iomem *rx_bd_base;
+ cbd_t __iomem *rx_cur;
+ cbd_t __iomem *tx_bd_base;
+ cbd_t __iomem *tx_cur;
+ unsigned char *tx_buf;
+ unsigned char *rx_buf;
+ u32 flags;
+ struct clk *clk;
+ u8 brg;
+ uint dp_addr;
+ void *mem_addr;
+ dma_addr_t dma_addr;
+ u32 mem_size;
+ /* wait on close if needed */
+ int wait_closing;
+ /* value to combine with opcode to form cpm command */
+ u32 command;
+ int gpios[NUM_GPIOS];
+};
+
+extern int cpm_uart_nr;
+extern struct uart_cpm_port cpm_uart_ports[UART_NR];
+
+/* these are located in their respective files */
+void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd);
+void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port,
+ struct device_node *np);
+void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram);
+int cpm_uart_init_portdesc(void);
+int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con);
+void cpm_uart_freebuf(struct uart_cpm_port *pinfo);
+
+void smc1_lineif(struct uart_cpm_port *pinfo);
+void smc2_lineif(struct uart_cpm_port *pinfo);
+void scc1_lineif(struct uart_cpm_port *pinfo);
+void scc2_lineif(struct uart_cpm_port *pinfo);
+void scc3_lineif(struct uart_cpm_port *pinfo);
+void scc4_lineif(struct uart_cpm_port *pinfo);
+
+/*
+ virtual to phys transtalion
+*/
+static inline unsigned long cpu2cpm_addr(void *addr,
+ struct uart_cpm_port *pinfo)
+{
+ int offset;
+ u32 val = (u32)addr;
+ u32 mem = (u32)pinfo->mem_addr;
+ /* sane check */
+ if (likely(val >= mem && val < mem + pinfo->mem_size)) {
+ offset = val - mem;
+ return pinfo->dma_addr + offset;
+ }
+ /* something nasty happened */
+ BUG();
+ return 0;
+}
+
+static inline void *cpm2cpu_addr(unsigned long addr,
+ struct uart_cpm_port *pinfo)
+{
+ int offset;
+ u32 val = addr;
+ u32 dma = (u32)pinfo->dma_addr;
+ /* sane check */
+ if (likely(val >= dma && val < dma + pinfo->mem_size)) {
+ offset = val - dma;
+ return pinfo->mem_addr + offset;
+ }
+ /* something nasty happened */
+ BUG();
+ return NULL;
+}
+
+
+#endif /* CPM_UART_H */
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
new file mode 100644
index 00000000..9488da74
--- /dev/null
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -0,0 +1,1440 @@
+/*
+ * Driver for CPM (SCC/SMC) serial ports; core driver
+ *
+ * Based on arch/ppc/cpm2_io/uart.c by Dan Malek
+ * Based on ppc8xx.c by Thomas Gleixner
+ * Based on drivers/serial/amba.c by Russell King
+ *
+ * Maintainer: Kumar Gala (galak@kernel.crashing.org) (CPM2)
+ * Pantelis Antoniou (panto@intracom.gr) (CPM1)
+ *
+ * Copyright (C) 2004, 2007 Freescale Semiconductor, Inc.
+ * (C) 2004 Intracom, S.A.
+ * (C) 2005-2006 MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/device.h>
+#include <linux/bootmem.h>
+#include <linux/dma-mapping.h>
+#include <linux/fs_uart_pd.h>
+#include <linux/of_platform.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/clk.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/delay.h>
+#include <asm/fs_pd.h>
+#include <asm/udbg.h>
+
+#if defined(CONFIG_SERIAL_CPM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+#include <linux/kernel.h>
+
+#include "cpm_uart.h"
+
+
+/**************************************************************/
+
+static int cpm_uart_tx_pump(struct uart_port *port);
+static void cpm_uart_init_smc(struct uart_cpm_port *pinfo);
+static void cpm_uart_init_scc(struct uart_cpm_port *pinfo);
+static void cpm_uart_initbd(struct uart_cpm_port *pinfo);
+
+/**************************************************************/
+
+#define HW_BUF_SPD_THRESHOLD 9600
+
+/*
+ * Check, if transmit buffers are processed
+*/
+static unsigned int cpm_uart_tx_empty(struct uart_port *port)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ cbd_t __iomem *bdp = pinfo->tx_bd_base;
+ int ret = 0;
+
+ while (1) {
+ if (in_be16(&bdp->cbd_sc) & BD_SC_READY)
+ break;
+
+ if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) {
+ ret = TIOCSER_TEMT;
+ break;
+ }
+ bdp++;
+ }
+
+ pr_debug("CPM uart[%d]:tx_empty: %d\n", port->line, ret);
+
+ return ret;
+}
+
+static void cpm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+
+ if (pinfo->gpios[GPIO_RTS] >= 0)
+ gpio_set_value(pinfo->gpios[GPIO_RTS], !(mctrl & TIOCM_RTS));
+
+ if (pinfo->gpios[GPIO_DTR] >= 0)
+ gpio_set_value(pinfo->gpios[GPIO_DTR], !(mctrl & TIOCM_DTR));
+}
+
+static unsigned int cpm_uart_get_mctrl(struct uart_port *port)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ unsigned int mctrl = TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+
+ if (pinfo->gpios[GPIO_CTS] >= 0) {
+ if (gpio_get_value(pinfo->gpios[GPIO_CTS]))
+ mctrl &= ~TIOCM_CTS;
+ }
+
+ if (pinfo->gpios[GPIO_DSR] >= 0) {
+ if (gpio_get_value(pinfo->gpios[GPIO_DSR]))
+ mctrl &= ~TIOCM_DSR;
+ }
+
+ if (pinfo->gpios[GPIO_DCD] >= 0) {
+ if (gpio_get_value(pinfo->gpios[GPIO_DCD]))
+ mctrl &= ~TIOCM_CAR;
+ }
+
+ if (pinfo->gpios[GPIO_RI] >= 0) {
+ if (!gpio_get_value(pinfo->gpios[GPIO_RI]))
+ mctrl |= TIOCM_RNG;
+ }
+
+ return mctrl;
+}
+
+/*
+ * Stop transmitter
+ */
+static void cpm_uart_stop_tx(struct uart_port *port)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ smc_t __iomem *smcp = pinfo->smcp;
+ scc_t __iomem *sccp = pinfo->sccp;
+
+ pr_debug("CPM uart[%d]:stop tx\n", port->line);
+
+ if (IS_SMC(pinfo))
+ clrbits8(&smcp->smc_smcm, SMCM_TX);
+ else
+ clrbits16(&sccp->scc_sccm, UART_SCCM_TX);
+}
+
+/*
+ * Start transmitter
+ */
+static void cpm_uart_start_tx(struct uart_port *port)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ smc_t __iomem *smcp = pinfo->smcp;
+ scc_t __iomem *sccp = pinfo->sccp;
+
+ pr_debug("CPM uart[%d]:start tx\n", port->line);
+
+ if (IS_SMC(pinfo)) {
+ if (in_8(&smcp->smc_smcm) & SMCM_TX)
+ return;
+ } else {
+ if (in_be16(&sccp->scc_sccm) & UART_SCCM_TX)
+ return;
+ }
+
+ if (cpm_uart_tx_pump(port) != 0) {
+ if (IS_SMC(pinfo)) {
+ setbits8(&smcp->smc_smcm, SMCM_TX);
+ } else {
+ setbits16(&sccp->scc_sccm, UART_SCCM_TX);
+ }
+ }
+}
+
+/*
+ * Stop receiver
+ */
+static void cpm_uart_stop_rx(struct uart_port *port)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ smc_t __iomem *smcp = pinfo->smcp;
+ scc_t __iomem *sccp = pinfo->sccp;
+
+ pr_debug("CPM uart[%d]:stop rx\n", port->line);
+
+ if (IS_SMC(pinfo))
+ clrbits8(&smcp->smc_smcm, SMCM_RX);
+ else
+ clrbits16(&sccp->scc_sccm, UART_SCCM_RX);
+}
+
+/*
+ * Enable Modem status interrupts
+ */
+static void cpm_uart_enable_ms(struct uart_port *port)
+{
+ pr_debug("CPM uart[%d]:enable ms\n", port->line);
+}
+
+/*
+ * Generate a break.
+ */
+static void cpm_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+
+ pr_debug("CPM uart[%d]:break ctrl, break_state: %d\n", port->line,
+ break_state);
+
+ if (break_state)
+ cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX);
+ else
+ cpm_line_cr_cmd(pinfo, CPM_CR_RESTART_TX);
+}
+
+/*
+ * Transmit characters, refill buffer descriptor, if possible
+ */
+static void cpm_uart_int_tx(struct uart_port *port)
+{
+ pr_debug("CPM uart[%d]:TX INT\n", port->line);
+
+ cpm_uart_tx_pump(port);
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int serial_polled;
+#endif
+
+/*
+ * Receive characters
+ */
+static void cpm_uart_int_rx(struct uart_port *port)
+{
+ int i;
+ unsigned char ch;
+ u8 *cp;
+ struct tty_struct *tty = port->state->port.tty;
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ cbd_t __iomem *bdp;
+ u16 status;
+ unsigned int flg;
+
+ pr_debug("CPM uart[%d]:RX INT\n", port->line);
+
+ /* Just loop through the closed BDs and copy the characters into
+ * the buffer.
+ */
+ bdp = pinfo->rx_cur;
+ for (;;) {
+#ifdef CONFIG_CONSOLE_POLL
+ if (unlikely(serial_polled)) {
+ serial_polled = 0;
+ return;
+ }
+#endif
+ /* get status */
+ status = in_be16(&bdp->cbd_sc);
+ /* If this one is empty, return happy */
+ if (status & BD_SC_EMPTY)
+ break;
+
+ /* get number of characters, and check spce in flip-buffer */
+ i = in_be16(&bdp->cbd_datlen);
+
+ /* If we have not enough room in tty flip buffer, then we try
+ * later, which will be the next rx-interrupt or a timeout
+ */
+ if(tty_buffer_request_room(tty, i) < i) {
+ printk(KERN_WARNING "No room in flip buffer\n");
+ return;
+ }
+
+ /* get pointer */
+ cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
+
+ /* loop through the buffer */
+ while (i-- > 0) {
+ ch = *cp++;
+ port->icount.rx++;
+ flg = TTY_NORMAL;
+
+ if (status &
+ (BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV))
+ goto handle_error;
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+#ifdef CONFIG_CONSOLE_POLL
+ if (unlikely(serial_polled)) {
+ serial_polled = 0;
+ return;
+ }
+#endif
+ error_return:
+ tty_insert_flip_char(tty, ch, flg);
+
+ } /* End while (i--) */
+
+ /* This BD is ready to be used again. Clear status. get next */
+ clrbits16(&bdp->cbd_sc, BD_SC_BR | BD_SC_FR | BD_SC_PR |
+ BD_SC_OV | BD_SC_ID);
+ setbits16(&bdp->cbd_sc, BD_SC_EMPTY);
+
+ if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+ bdp = pinfo->rx_bd_base;
+ else
+ bdp++;
+
+ } /* End for (;;) */
+
+ /* Write back buffer pointer */
+ pinfo->rx_cur = bdp;
+
+ /* activate BH processing */
+ tty_flip_buffer_push(tty);
+
+ return;
+
+ /* Error processing */
+
+ handle_error:
+ /* Statistics */
+ if (status & BD_SC_BR)
+ port->icount.brk++;
+ if (status & BD_SC_PR)
+ port->icount.parity++;
+ if (status & BD_SC_FR)
+ port->icount.frame++;
+ if (status & BD_SC_OV)
+ port->icount.overrun++;
+
+ /* Mask out ignored conditions */
+ status &= port->read_status_mask;
+
+ /* Handle the remaining ones */
+ if (status & BD_SC_BR)
+ flg = TTY_BREAK;
+ else if (status & BD_SC_PR)
+ flg = TTY_PARITY;
+ else if (status & BD_SC_FR)
+ flg = TTY_FRAME;
+
+ /* overrun does not affect the current character ! */
+ if (status & BD_SC_OV) {
+ ch = 0;
+ flg = TTY_OVERRUN;
+ /* We skip this buffer */
+ /* CHECK: Is really nothing senseful there */
+ /* ASSUMPTION: it contains nothing valid */
+ i = 0;
+ }
+#ifdef SUPPORT_SYSRQ
+ port->sysrq = 0;
+#endif
+ goto error_return;
+}
+
+/*
+ * Asynchron mode interrupt handler
+ */
+static irqreturn_t cpm_uart_int(int irq, void *data)
+{
+ u8 events;
+ struct uart_port *port = data;
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ smc_t __iomem *smcp = pinfo->smcp;
+ scc_t __iomem *sccp = pinfo->sccp;
+
+ pr_debug("CPM uart[%d]:IRQ\n", port->line);
+
+ if (IS_SMC(pinfo)) {
+ events = in_8(&smcp->smc_smce);
+ out_8(&smcp->smc_smce, events);
+ if (events & SMCM_BRKE)
+ uart_handle_break(port);
+ if (events & SMCM_RX)
+ cpm_uart_int_rx(port);
+ if (events & SMCM_TX)
+ cpm_uart_int_tx(port);
+ } else {
+ events = in_be16(&sccp->scc_scce);
+ out_be16(&sccp->scc_scce, events);
+ if (events & UART_SCCM_BRKE)
+ uart_handle_break(port);
+ if (events & UART_SCCM_RX)
+ cpm_uart_int_rx(port);
+ if (events & UART_SCCM_TX)
+ cpm_uart_int_tx(port);
+ }
+ return (events) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int cpm_uart_startup(struct uart_port *port)
+{
+ int retval;
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+
+ pr_debug("CPM uart[%d]:startup\n", port->line);
+
+ /* If the port is not the console, make sure rx is disabled. */
+ if (!(pinfo->flags & FLAG_CONSOLE)) {
+ /* Disable UART rx */
+ if (IS_SMC(pinfo)) {
+ clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN);
+ clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX);
+ } else {
+ clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR);
+ clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX);
+ }
+ cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+ }
+ /* Install interrupt handler. */
+ retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port);
+ if (retval)
+ return retval;
+
+ /* Startup rx-int */
+ if (IS_SMC(pinfo)) {
+ setbits8(&pinfo->smcp->smc_smcm, SMCM_RX);
+ setbits16(&pinfo->smcp->smc_smcmr, (SMCMR_REN | SMCMR_TEN));
+ } else {
+ setbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX);
+ setbits32(&pinfo->sccp->scc_gsmrl, (SCC_GSMRL_ENR | SCC_GSMRL_ENT));
+ }
+
+ return 0;
+}
+
+inline void cpm_uart_wait_until_send(struct uart_cpm_port *pinfo)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(pinfo->wait_closing);
+}
+
+/*
+ * Shutdown the uart
+ */
+static void cpm_uart_shutdown(struct uart_port *port)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+
+ pr_debug("CPM uart[%d]:shutdown\n", port->line);
+
+ /* free interrupt handler */
+ free_irq(port->irq, port);
+
+ /* If the port is not the console, disable Rx and Tx. */
+ if (!(pinfo->flags & FLAG_CONSOLE)) {
+ /* Wait for all the BDs marked sent */
+ while(!cpm_uart_tx_empty(port)) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(2);
+ }
+
+ if (pinfo->wait_closing)
+ cpm_uart_wait_until_send(pinfo);
+
+ /* Stop uarts */
+ if (IS_SMC(pinfo)) {
+ smc_t __iomem *smcp = pinfo->smcp;
+ clrbits16(&smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN);
+ clrbits8(&smcp->smc_smcm, SMCM_RX | SMCM_TX);
+ } else {
+ scc_t __iomem *sccp = pinfo->sccp;
+ clrbits32(&sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+ clrbits16(&sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX);
+ }
+
+ /* Shut them really down and reinit buffer descriptors */
+ if (IS_SMC(pinfo)) {
+ out_be16(&pinfo->smcup->smc_brkcr, 0);
+ cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX);
+ } else {
+ out_be16(&pinfo->sccup->scc_brkcr, 0);
+ cpm_line_cr_cmd(pinfo, CPM_CR_GRA_STOP_TX);
+ }
+
+ cpm_uart_initbd(pinfo);
+ }
+}
+
+static void cpm_uart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ int baud;
+ unsigned long flags;
+ u16 cval, scval, prev_mode;
+ int bits, sbits;
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ smc_t __iomem *smcp = pinfo->smcp;
+ scc_t __iomem *sccp = pinfo->sccp;
+
+ pr_debug("CPM uart[%d]:set_termios\n", port->line);
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+ if (baud <= HW_BUF_SPD_THRESHOLD ||
+ (pinfo->port.state && pinfo->port.state->port.tty->low_latency))
+ pinfo->rx_fifosize = 1;
+ else
+ pinfo->rx_fifosize = RX_BUF_SIZE;
+
+ /* Character length programmed into the mode register is the
+ * sum of: 1 start bit, number of data bits, 0 or 1 parity bit,
+ * 1 or 2 stop bits, minus 1.
+ * The value 'bits' counts this for us.
+ */
+ cval = 0;
+ scval = 0;
+
+ /* byte size */
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ bits = 5;
+ break;
+ case CS6:
+ bits = 6;
+ break;
+ case CS7:
+ bits = 7;
+ break;
+ case CS8:
+ bits = 8;
+ break;
+ /* Never happens, but GCC is too dumb to figure it out */
+ default:
+ bits = 8;
+ break;
+ }
+ sbits = bits - 5;
+
+ if (termios->c_cflag & CSTOPB) {
+ cval |= SMCMR_SL; /* Two stops */
+ scval |= SCU_PSMR_SL;
+ bits++;
+ }
+
+ if (termios->c_cflag & PARENB) {
+ cval |= SMCMR_PEN;
+ scval |= SCU_PSMR_PEN;
+ bits++;
+ if (!(termios->c_cflag & PARODD)) {
+ cval |= SMCMR_PM_EVEN;
+ scval |= (SCU_PSMR_REVP | SCU_PSMR_TEVP);
+ }
+ }
+
+ /*
+ * Update the timeout
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ /*
+ * Set up parity check flag
+ */
+#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
+
+ port->read_status_mask = (BD_SC_EMPTY | BD_SC_OV);
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= BD_SC_FR | BD_SC_PR;
+ if ((termios->c_iflag & BRKINT) || (termios->c_iflag & PARMRK))
+ port->read_status_mask |= BD_SC_BR;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= BD_SC_PR | BD_SC_FR;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= BD_SC_BR;
+ /*
+ * If we're ignore parity and break indicators, ignore
+ * overruns too. (For real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= BD_SC_OV;
+ }
+ /*
+ * !!! ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->read_status_mask &= ~BD_SC_EMPTY;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Start bit has not been added (so don't, because we would just
+ * subtract it later), and we need to add one for the number of
+ * stops bits (there is always at least one).
+ */
+ bits++;
+ if (IS_SMC(pinfo)) {
+ /*
+ * MRBLR can be changed while an SMC/SCC is operating only
+ * if it is done in a single bus cycle with one 16-bit move
+ * (not two 8-bit bus cycles back-to-back). This occurs when
+ * the cp shifts control to the next RxBD, so the change does
+ * not take effect immediately. To guarantee the exact RxBD
+ * on which the change occurs, change MRBLR only while the
+ * SMC/SCC receiver is disabled.
+ */
+ out_be16(&pinfo->smcup->smc_mrblr, pinfo->rx_fifosize);
+
+ /* Set the mode register. We want to keep a copy of the
+ * enables, because we want to put them back if they were
+ * present.
+ */
+ prev_mode = in_be16(&smcp->smc_smcmr) & (SMCMR_REN | SMCMR_TEN);
+ /* Output in *one* operation, so we don't interrupt RX/TX if they
+ * were already enabled. */
+ out_be16(&smcp->smc_smcmr, smcr_mk_clen(bits) | cval |
+ SMCMR_SM_UART | prev_mode);
+ } else {
+ out_be16(&pinfo->sccup->scc_genscc.scc_mrblr, pinfo->rx_fifosize);
+ out_be16(&sccp->scc_psmr, (sbits << 12) | scval);
+ }
+
+ if (pinfo->clk)
+ clk_set_rate(pinfo->clk, baud);
+ else
+ cpm_set_brg(pinfo->brg - 1, baud);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *cpm_uart_type(struct uart_port *port)
+{
+ pr_debug("CPM uart[%d]:uart_type\n", port->line);
+
+ return port->type == PORT_CPM ? "CPM UART" : NULL;
+}
+
+/*
+ * verify the new serial_struct (for TIOCSSERIAL).
+ */
+static int cpm_uart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ int ret = 0;
+
+ pr_debug("CPM uart[%d]:verify_port\n", port->line);
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM)
+ ret = -EINVAL;
+ if (ser->irq < 0 || ser->irq >= nr_irqs)
+ ret = -EINVAL;
+ if (ser->baud_base < 9600)
+ ret = -EINVAL;
+ return ret;
+}
+
+/*
+ * Transmit characters, refill buffer descriptor, if possible
+ */
+static int cpm_uart_tx_pump(struct uart_port *port)
+{
+ cbd_t __iomem *bdp;
+ u8 *p;
+ int count;
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ /* Handle xon/xoff */
+ if (port->x_char) {
+ /* Pick next descriptor and fill from buffer */
+ bdp = pinfo->tx_cur;
+
+ p = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
+
+ *p++ = port->x_char;
+
+ out_be16(&bdp->cbd_datlen, 1);
+ setbits16(&bdp->cbd_sc, BD_SC_READY);
+ /* Get next BD. */
+ if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+ bdp = pinfo->tx_bd_base;
+ else
+ bdp++;
+ pinfo->tx_cur = bdp;
+
+ port->icount.tx++;
+ port->x_char = 0;
+ return 1;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ cpm_uart_stop_tx(port);
+ return 0;
+ }
+
+ /* Pick next descriptor and fill from buffer */
+ bdp = pinfo->tx_cur;
+
+ while (!(in_be16(&bdp->cbd_sc) & BD_SC_READY) &&
+ xmit->tail != xmit->head) {
+ count = 0;
+ p = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
+ while (count < pinfo->tx_fifosize) {
+ *p++ = xmit->buf[xmit->tail];
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ count++;
+ if (xmit->head == xmit->tail)
+ break;
+ }
+ out_be16(&bdp->cbd_datlen, count);
+ setbits16(&bdp->cbd_sc, BD_SC_READY);
+ /* Get next BD. */
+ if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+ bdp = pinfo->tx_bd_base;
+ else
+ bdp++;
+ }
+ pinfo->tx_cur = bdp;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit)) {
+ cpm_uart_stop_tx(port);
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * init buffer descriptors
+ */
+static void cpm_uart_initbd(struct uart_cpm_port *pinfo)
+{
+ int i;
+ u8 *mem_addr;
+ cbd_t __iomem *bdp;
+
+ pr_debug("CPM uart[%d]:initbd\n", pinfo->port.line);
+
+ /* Set the physical address of the host memory
+ * buffers in the buffer descriptors, and the
+ * virtual address for us to work with.
+ */
+ mem_addr = pinfo->mem_addr;
+ bdp = pinfo->rx_cur = pinfo->rx_bd_base;
+ for (i = 0; i < (pinfo->rx_nrfifos - 1); i++, bdp++) {
+ out_be32(&bdp->cbd_bufaddr, cpu2cpm_addr(mem_addr, pinfo));
+ out_be16(&bdp->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT);
+ mem_addr += pinfo->rx_fifosize;
+ }
+
+ out_be32(&bdp->cbd_bufaddr, cpu2cpm_addr(mem_addr, pinfo));
+ out_be16(&bdp->cbd_sc, BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT);
+
+ /* Set the physical address of the host memory
+ * buffers in the buffer descriptors, and the
+ * virtual address for us to work with.
+ */
+ mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize);
+ bdp = pinfo->tx_cur = pinfo->tx_bd_base;
+ for (i = 0; i < (pinfo->tx_nrfifos - 1); i++, bdp++) {
+ out_be32(&bdp->cbd_bufaddr, cpu2cpm_addr(mem_addr, pinfo));
+ out_be16(&bdp->cbd_sc, BD_SC_INTRPT);
+ mem_addr += pinfo->tx_fifosize;
+ }
+
+ out_be32(&bdp->cbd_bufaddr, cpu2cpm_addr(mem_addr, pinfo));
+ out_be16(&bdp->cbd_sc, BD_SC_WRAP | BD_SC_INTRPT);
+}
+
+static void cpm_uart_init_scc(struct uart_cpm_port *pinfo)
+{
+ scc_t __iomem *scp;
+ scc_uart_t __iomem *sup;
+
+ pr_debug("CPM uart[%d]:init_scc\n", pinfo->port.line);
+
+ scp = pinfo->sccp;
+ sup = pinfo->sccup;
+
+ /* Store address */
+ out_be16(&pinfo->sccup->scc_genscc.scc_rbase,
+ (u8 __iomem *)pinfo->rx_bd_base - DPRAM_BASE);
+ out_be16(&pinfo->sccup->scc_genscc.scc_tbase,
+ (u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE);
+
+ /* Set up the uart parameters in the
+ * parameter ram.
+ */
+
+ cpm_set_scc_fcr(sup);
+
+ out_be16(&sup->scc_genscc.scc_mrblr, pinfo->rx_fifosize);
+ out_be16(&sup->scc_maxidl, pinfo->rx_fifosize);
+ out_be16(&sup->scc_brkcr, 1);
+ out_be16(&sup->scc_parec, 0);
+ out_be16(&sup->scc_frmec, 0);
+ out_be16(&sup->scc_nosec, 0);
+ out_be16(&sup->scc_brkec, 0);
+ out_be16(&sup->scc_uaddr1, 0);
+ out_be16(&sup->scc_uaddr2, 0);
+ out_be16(&sup->scc_toseq, 0);
+ out_be16(&sup->scc_char1, 0x8000);
+ out_be16(&sup->scc_char2, 0x8000);
+ out_be16(&sup->scc_char3, 0x8000);
+ out_be16(&sup->scc_char4, 0x8000);
+ out_be16(&sup->scc_char5, 0x8000);
+ out_be16(&sup->scc_char6, 0x8000);
+ out_be16(&sup->scc_char7, 0x8000);
+ out_be16(&sup->scc_char8, 0x8000);
+ out_be16(&sup->scc_rccm, 0xc0ff);
+
+ /* Send the CPM an initialize command.
+ */
+ cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+
+ /* Set UART mode, 8 bit, no parity, one stop.
+ * Enable receive and transmit.
+ */
+ out_be32(&scp->scc_gsmrh, 0);
+ out_be32(&scp->scc_gsmrl,
+ SCC_GSMRL_MODE_UART | SCC_GSMRL_TDCR_16 | SCC_GSMRL_RDCR_16);
+
+ /* Enable rx interrupts and clear all pending events. */
+ out_be16(&scp->scc_sccm, 0);
+ out_be16(&scp->scc_scce, 0xffff);
+ out_be16(&scp->scc_dsr, 0x7e7e);
+ out_be16(&scp->scc_psmr, 0x3000);
+
+ setbits32(&scp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+}
+
+static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
+{
+ smc_t __iomem *sp;
+ smc_uart_t __iomem *up;
+
+ pr_debug("CPM uart[%d]:init_smc\n", pinfo->port.line);
+
+ sp = pinfo->smcp;
+ up = pinfo->smcup;
+
+ /* Store address */
+ out_be16(&pinfo->smcup->smc_rbase,
+ (u8 __iomem *)pinfo->rx_bd_base - DPRAM_BASE);
+ out_be16(&pinfo->smcup->smc_tbase,
+ (u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE);
+
+/*
+ * In case SMC1 is being relocated...
+ */
+#if defined (CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
+ out_be16(&up->smc_rbptr, in_be16(&pinfo->smcup->smc_rbase));
+ out_be16(&up->smc_tbptr, in_be16(&pinfo->smcup->smc_tbase));
+ out_be32(&up->smc_rstate, 0);
+ out_be32(&up->smc_tstate, 0);
+ out_be16(&up->smc_brkcr, 1); /* number of break chars */
+ out_be16(&up->smc_brkec, 0);
+#endif
+
+ /* Set up the uart parameters in the
+ * parameter ram.
+ */
+ cpm_set_smc_fcr(up);
+
+ /* Using idle character time requires some additional tuning. */
+ out_be16(&up->smc_mrblr, pinfo->rx_fifosize);
+ out_be16(&up->smc_maxidl, pinfo->rx_fifosize);
+ out_be16(&up->smc_brklen, 0);
+ out_be16(&up->smc_brkec, 0);
+ out_be16(&up->smc_brkcr, 1);
+
+ cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+
+ /* Set UART mode, 8 bit, no parity, one stop.
+ * Enable receive and transmit.
+ */
+ out_be16(&sp->smc_smcmr, smcr_mk_clen(9) | SMCMR_SM_UART);
+
+ /* Enable only rx interrupts clear all pending events. */
+ out_8(&sp->smc_smcm, 0);
+ out_8(&sp->smc_smce, 0xff);
+
+ setbits16(&sp->smc_smcmr, SMCMR_REN | SMCMR_TEN);
+}
+
+/*
+ * Initialize port. This is called from early_console stuff
+ * so we have to be careful here !
+ */
+static int cpm_uart_request_port(struct uart_port *port)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ int ret;
+
+ pr_debug("CPM uart[%d]:request port\n", port->line);
+
+ if (pinfo->flags & FLAG_CONSOLE)
+ return 0;
+
+ if (IS_SMC(pinfo)) {
+ clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX | SMCM_TX);
+ clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN);
+ } else {
+ clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX);
+ clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+ }
+
+ ret = cpm_uart_allocbuf(pinfo, 0);
+
+ if (ret)
+ return ret;
+
+ cpm_uart_initbd(pinfo);
+ if (IS_SMC(pinfo))
+ cpm_uart_init_smc(pinfo);
+ else
+ cpm_uart_init_scc(pinfo);
+
+ return 0;
+}
+
+static void cpm_uart_release_port(struct uart_port *port)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+
+ if (!(pinfo->flags & FLAG_CONSOLE))
+ cpm_uart_freebuf(pinfo);
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void cpm_uart_config_port(struct uart_port *port, int flags)
+{
+ pr_debug("CPM uart[%d]:config_port\n", port->line);
+
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_CPM;
+ cpm_uart_request_port(port);
+ }
+}
+
+#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_CPM_CONSOLE)
+/*
+ * Write a string to the serial port
+ * Note that this is called with interrupts already disabled
+ */
+static void cpm_uart_early_write(struct uart_cpm_port *pinfo,
+ const char *string, u_int count)
+{
+ unsigned int i;
+ cbd_t __iomem *bdp, *bdbase;
+ unsigned char *cpm_outp_addr;
+
+ /* Get the address of the host memory buffer.
+ */
+ bdp = pinfo->tx_cur;
+ bdbase = pinfo->tx_bd_base;
+
+ /*
+ * Now, do each character. This is not as bad as it looks
+ * since this is a holding FIFO and not a transmitting FIFO.
+ * We could add the complexity of filling the entire transmit
+ * buffer, but we would just wait longer between accesses......
+ */
+ for (i = 0; i < count; i++, string++) {
+ /* Wait for transmitter fifo to empty.
+ * Ready indicates output is ready, and xmt is doing
+ * that, not that it is ready for us to send.
+ */
+ while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+ ;
+
+ /* Send the character out.
+ * If the buffer address is in the CPM DPRAM, don't
+ * convert it.
+ */
+ cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
+ pinfo);
+ *cpm_outp_addr = *string;
+
+ out_be16(&bdp->cbd_datlen, 1);
+ setbits16(&bdp->cbd_sc, BD_SC_READY);
+
+ if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+ bdp = bdbase;
+ else
+ bdp++;
+
+ /* if a LF, also do CR... */
+ if (*string == 10) {
+ while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+ ;
+
+ cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
+ pinfo);
+ *cpm_outp_addr = 13;
+
+ out_be16(&bdp->cbd_datlen, 1);
+ setbits16(&bdp->cbd_sc, BD_SC_READY);
+
+ if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
+ bdp = bdbase;
+ else
+ bdp++;
+ }
+ }
+
+ /*
+ * Finally, Wait for transmitter & holding register to empty
+ * and restore the IER
+ */
+ while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
+ ;
+
+ pinfo->tx_cur = bdp;
+}
+#endif
+
+#ifdef CONFIG_CONSOLE_POLL
+/* Serial polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+#define GDB_BUF_SIZE 512 /* power of 2, please */
+
+static char poll_buf[GDB_BUF_SIZE];
+static char *pollp;
+static int poll_chars;
+
+static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
+{
+ u_char c, *cp;
+ volatile cbd_t *bdp;
+ int i;
+
+ /* Get the address of the host memory buffer.
+ */
+ bdp = pinfo->rx_cur;
+ while (bdp->cbd_sc & BD_SC_EMPTY)
+ ;
+
+ /* If the buffer address is in the CPM DPRAM, don't
+ * convert it.
+ */
+ cp = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo);
+
+ if (obuf) {
+ i = c = bdp->cbd_datlen;
+ while (i-- > 0)
+ *obuf++ = *cp++;
+ } else
+ c = *cp;
+ bdp->cbd_sc &= ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID);
+ bdp->cbd_sc |= BD_SC_EMPTY;
+
+ if (bdp->cbd_sc & BD_SC_WRAP)
+ bdp = pinfo->rx_bd_base;
+ else
+ bdp++;
+ pinfo->rx_cur = (cbd_t *)bdp;
+
+ return (int)c;
+}
+
+static int cpm_get_poll_char(struct uart_port *port)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+
+ if (!serial_polled) {
+ serial_polled = 1;
+ poll_chars = 0;
+ }
+ if (poll_chars <= 0) {
+ poll_chars = poll_wait_key(poll_buf, pinfo);
+ pollp = poll_buf;
+ }
+ poll_chars--;
+ return *pollp++;
+}
+
+static void cpm_put_poll_char(struct uart_port *port,
+ unsigned char c)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ static char ch[2];
+
+ ch[0] = (char)c;
+ cpm_uart_early_write(pinfo, ch, 1);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
+static struct uart_ops cpm_uart_pops = {
+ .tx_empty = cpm_uart_tx_empty,
+ .set_mctrl = cpm_uart_set_mctrl,
+ .get_mctrl = cpm_uart_get_mctrl,
+ .stop_tx = cpm_uart_stop_tx,
+ .start_tx = cpm_uart_start_tx,
+ .stop_rx = cpm_uart_stop_rx,
+ .enable_ms = cpm_uart_enable_ms,
+ .break_ctl = cpm_uart_break_ctl,
+ .startup = cpm_uart_startup,
+ .shutdown = cpm_uart_shutdown,
+ .set_termios = cpm_uart_set_termios,
+ .type = cpm_uart_type,
+ .release_port = cpm_uart_release_port,
+ .request_port = cpm_uart_request_port,
+ .config_port = cpm_uart_config_port,
+ .verify_port = cpm_uart_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = cpm_get_poll_char,
+ .poll_put_char = cpm_put_poll_char,
+#endif
+};
+
+struct uart_cpm_port cpm_uart_ports[UART_NR];
+
+static int cpm_uart_init_port(struct device_node *np,
+ struct uart_cpm_port *pinfo)
+{
+ const u32 *data;
+ void __iomem *mem, *pram;
+ int len;
+ int ret;
+ int i;
+
+ data = of_get_property(np, "clock", NULL);
+ if (data) {
+ struct clk *clk = clk_get(NULL, (const char*)data);
+ if (!IS_ERR(clk))
+ pinfo->clk = clk;
+ }
+ if (!pinfo->clk) {
+ data = of_get_property(np, "fsl,cpm-brg", &len);
+ if (!data || len != 4) {
+ printk(KERN_ERR "CPM UART %s has no/invalid "
+ "fsl,cpm-brg property.\n", np->name);
+ return -EINVAL;
+ }
+ pinfo->brg = *data;
+ }
+
+ data = of_get_property(np, "fsl,cpm-command", &len);
+ if (!data || len != 4) {
+ printk(KERN_ERR "CPM UART %s has no/invalid "
+ "fsl,cpm-command property.\n", np->name);
+ return -EINVAL;
+ }
+ pinfo->command = *data;
+
+ mem = of_iomap(np, 0);
+ if (!mem)
+ return -ENOMEM;
+
+ if (of_device_is_compatible(np, "fsl,cpm1-scc-uart") ||
+ of_device_is_compatible(np, "fsl,cpm2-scc-uart")) {
+ pinfo->sccp = mem;
+ pinfo->sccup = pram = cpm_uart_map_pram(pinfo, np);
+ } else if (of_device_is_compatible(np, "fsl,cpm1-smc-uart") ||
+ of_device_is_compatible(np, "fsl,cpm2-smc-uart")) {
+ pinfo->flags |= FLAG_SMC;
+ pinfo->smcp = mem;
+ pinfo->smcup = pram = cpm_uart_map_pram(pinfo, np);
+ } else {
+ ret = -ENODEV;
+ goto out_mem;
+ }
+
+ if (!pram) {
+ ret = -ENOMEM;
+ goto out_mem;
+ }
+
+ pinfo->tx_nrfifos = TX_NUM_FIFO;
+ pinfo->tx_fifosize = TX_BUF_SIZE;
+ pinfo->rx_nrfifos = RX_NUM_FIFO;
+ pinfo->rx_fifosize = RX_BUF_SIZE;
+
+ pinfo->port.uartclk = ppc_proc_freq;
+ pinfo->port.mapbase = (unsigned long)mem;
+ pinfo->port.type = PORT_CPM;
+ pinfo->port.ops = &cpm_uart_pops,
+ pinfo->port.iotype = UPIO_MEM;
+ pinfo->port.fifosize = pinfo->tx_nrfifos * pinfo->tx_fifosize;
+ spin_lock_init(&pinfo->port.lock);
+
+ pinfo->port.irq = of_irq_to_resource(np, 0, NULL);
+ if (pinfo->port.irq == NO_IRQ) {
+ ret = -EINVAL;
+ goto out_pram;
+ }
+
+ for (i = 0; i < NUM_GPIOS; i++)
+ pinfo->gpios[i] = of_get_gpio(np, i);
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
+ udbg_putc = NULL;
+#endif
+
+ return cpm_uart_request_port(&pinfo->port);
+
+out_pram:
+ cpm_uart_unmap_pram(pinfo, pram);
+out_mem:
+ iounmap(mem);
+ return ret;
+}
+
+#ifdef CONFIG_SERIAL_CPM_CONSOLE
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * Note that this is called with interrupts already disabled
+ */
+static void cpm_uart_console_write(struct console *co, const char *s,
+ u_int count)
+{
+ struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
+ unsigned long flags;
+ int nolock = oops_in_progress;
+
+ if (unlikely(nolock)) {
+ local_irq_save(flags);
+ } else {
+ spin_lock_irqsave(&pinfo->port.lock, flags);
+ }
+
+ cpm_uart_early_write(pinfo, s, count);
+
+ if (unlikely(nolock)) {
+ local_irq_restore(flags);
+ } else {
+ spin_unlock_irqrestore(&pinfo->port.lock, flags);
+ }
+}
+
+
+static int __init cpm_uart_console_setup(struct console *co, char *options)
+{
+ int baud = 38400;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+ struct uart_cpm_port *pinfo;
+ struct uart_port *port;
+
+ struct device_node *np = NULL;
+ int i = 0;
+
+ if (co->index >= UART_NR) {
+ printk(KERN_ERR "cpm_uart: console index %d too high\n",
+ co->index);
+ return -ENODEV;
+ }
+
+ do {
+ np = of_find_node_by_type(np, "serial");
+ if (!np)
+ return -ENODEV;
+
+ if (!of_device_is_compatible(np, "fsl,cpm1-smc-uart") &&
+ !of_device_is_compatible(np, "fsl,cpm1-scc-uart") &&
+ !of_device_is_compatible(np, "fsl,cpm2-smc-uart") &&
+ !of_device_is_compatible(np, "fsl,cpm2-scc-uart"))
+ i--;
+ } while (i++ != co->index);
+
+ pinfo = &cpm_uart_ports[co->index];
+
+ pinfo->flags |= FLAG_CONSOLE;
+ port = &pinfo->port;
+
+ ret = cpm_uart_init_port(np, pinfo);
+ of_node_put(np);
+ if (ret)
+ return ret;
+
+ if (options) {
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ } else {
+ if ((baud = uart_baudrate()) == -1)
+ baud = 9600;
+ }
+
+ if (IS_SMC(pinfo)) {
+ out_be16(&pinfo->smcup->smc_brkcr, 0);
+ cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX);
+ clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX | SMCM_TX);
+ clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN);
+ } else {
+ out_be16(&pinfo->sccup->scc_brkcr, 0);
+ cpm_line_cr_cmd(pinfo, CPM_CR_GRA_STOP_TX);
+ clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX);
+ clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+ }
+
+ ret = cpm_uart_allocbuf(pinfo, 1);
+
+ if (ret)
+ return ret;
+
+ cpm_uart_initbd(pinfo);
+
+ if (IS_SMC(pinfo))
+ cpm_uart_init_smc(pinfo);
+ else
+ cpm_uart_init_scc(pinfo);
+
+ uart_set_options(port, co, baud, parity, bits, flow);
+ cpm_line_cr_cmd(pinfo, CPM_CR_RESTART_TX);
+
+ return 0;
+}
+
+static struct uart_driver cpm_reg;
+static struct console cpm_scc_uart_console = {
+ .name = "ttyCPM",
+ .write = cpm_uart_console_write,
+ .device = uart_console_device,
+ .setup = cpm_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &cpm_reg,
+};
+
+static int __init cpm_uart_console_init(void)
+{
+ register_console(&cpm_scc_uart_console);
+ return 0;
+}
+
+console_initcall(cpm_uart_console_init);
+
+#define CPM_UART_CONSOLE &cpm_scc_uart_console
+#else
+#define CPM_UART_CONSOLE NULL
+#endif
+
+static struct uart_driver cpm_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyCPM",
+ .dev_name = "ttyCPM",
+ .major = SERIAL_CPM_MAJOR,
+ .minor = SERIAL_CPM_MINOR,
+ .cons = CPM_UART_CONSOLE,
+ .nr = UART_NR,
+};
+
+static int probe_index;
+
+static int __devinit cpm_uart_probe(struct platform_device *ofdev)
+{
+ int index = probe_index++;
+ struct uart_cpm_port *pinfo = &cpm_uart_ports[index];
+ int ret;
+
+ pinfo->port.line = index;
+
+ if (index >= UART_NR)
+ return -ENODEV;
+
+ dev_set_drvdata(&ofdev->dev, pinfo);
+
+ /* initialize the device pointer for the port */
+ pinfo->port.dev = &ofdev->dev;
+
+ ret = cpm_uart_init_port(ofdev->dev.of_node, pinfo);
+ if (ret)
+ return ret;
+
+ return uart_add_one_port(&cpm_reg, &pinfo->port);
+}
+
+static int __devexit cpm_uart_remove(struct platform_device *ofdev)
+{
+ struct uart_cpm_port *pinfo = dev_get_drvdata(&ofdev->dev);
+ return uart_remove_one_port(&cpm_reg, &pinfo->port);
+}
+
+static struct of_device_id cpm_uart_match[] = {
+ {
+ .compatible = "fsl,cpm1-smc-uart",
+ },
+ {
+ .compatible = "fsl,cpm1-scc-uart",
+ },
+ {
+ .compatible = "fsl,cpm2-smc-uart",
+ },
+ {
+ .compatible = "fsl,cpm2-scc-uart",
+ },
+ {}
+};
+
+static struct platform_driver cpm_uart_driver = {
+ .driver = {
+ .name = "cpm_uart",
+ .owner = THIS_MODULE,
+ .of_match_table = cpm_uart_match,
+ },
+ .probe = cpm_uart_probe,
+ .remove = cpm_uart_remove,
+ };
+
+static int __init cpm_uart_init(void)
+{
+ int ret = uart_register_driver(&cpm_reg);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&cpm_uart_driver);
+ if (ret)
+ uart_unregister_driver(&cpm_reg);
+
+ return ret;
+}
+
+static void __exit cpm_uart_exit(void)
+{
+ platform_driver_unregister(&cpm_uart_driver);
+ uart_unregister_driver(&cpm_reg);
+}
+
+module_init(cpm_uart_init);
+module_exit(cpm_uart_exit);
+
+MODULE_AUTHOR("Kumar Gala/Antoniou Pantelis");
+MODULE_DESCRIPTION("CPM SCC/SMC port driver $Revision: 0.01 $");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV(SERIAL_CPM_MAJOR, SERIAL_CPM_MINOR);
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
new file mode 100644
index 00000000..18f79575
--- /dev/null
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
@@ -0,0 +1,136 @@
+/*
+ * Driver for CPM (SCC/SMC) serial ports; CPM1 definitions
+ *
+ * Maintainer: Kumar Gala (galak@kernel.crashing.org) (CPM2)
+ * Pantelis Antoniou (panto@intracom.gr) (CPM1)
+ *
+ * Copyright (C) 2004 Freescale Semiconductor, Inc.
+ * (C) 2004 Intracom, S.A.
+ * (C) 2006 MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/gfp.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/device.h>
+#include <linux/bootmem.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/fs_pd.h>
+
+#include <linux/serial_core.h>
+#include <linux/kernel.h>
+
+#include <linux/of.h>
+
+#include "cpm_uart.h"
+
+/**************************************************************/
+
+void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
+{
+ cpm_command(port->command, cmd);
+}
+
+void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port,
+ struct device_node *np)
+{
+ return of_iomap(np, 1);
+}
+
+void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram)
+{
+ iounmap(pram);
+}
+
+/*
+ * Allocate DP-Ram and memory buffers. We need to allocate a transmit and
+ * receive buffer descriptors from dual port ram, and a character
+ * buffer area from host mem. If we are allocating for the console we need
+ * to do it from bootmem
+ */
+int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
+{
+ int dpmemsz, memsz;
+ u8 *dp_mem;
+ unsigned long dp_offset;
+ u8 *mem_addr;
+ dma_addr_t dma_addr = 0;
+
+ pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line);
+
+ dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos);
+ dp_offset = cpm_dpalloc(dpmemsz, 8);
+ if (IS_ERR_VALUE(dp_offset)) {
+ printk(KERN_ERR
+ "cpm_uart_cpm1.c: could not allocate buffer descriptors\n");
+ return -ENOMEM;
+ }
+ dp_mem = cpm_dpram_addr(dp_offset);
+
+ memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
+ L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
+ if (is_con) {
+ /* was hostalloc but changed cause it blows away the */
+ /* large tlb mapping when pinning the kernel area */
+ mem_addr = (u8 *) cpm_dpram_addr(cpm_dpalloc(memsz, 8));
+ dma_addr = (u32)cpm_dpram_phys(mem_addr);
+ } else
+ mem_addr = dma_alloc_coherent(pinfo->port.dev, memsz, &dma_addr,
+ GFP_KERNEL);
+
+ if (mem_addr == NULL) {
+ cpm_dpfree(dp_offset);
+ printk(KERN_ERR
+ "cpm_uart_cpm1.c: could not allocate coherent memory\n");
+ return -ENOMEM;
+ }
+
+ pinfo->dp_addr = dp_offset;
+ pinfo->mem_addr = mem_addr; /* virtual address*/
+ pinfo->dma_addr = dma_addr; /* physical address*/
+ pinfo->mem_size = memsz;
+
+ pinfo->rx_buf = mem_addr;
+ pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos
+ * pinfo->rx_fifosize);
+
+ pinfo->rx_bd_base = (cbd_t __iomem __force *)dp_mem;
+ pinfo->tx_bd_base = pinfo->rx_bd_base + pinfo->rx_nrfifos;
+
+ return 0;
+}
+
+void cpm_uart_freebuf(struct uart_cpm_port *pinfo)
+{
+ dma_free_coherent(pinfo->port.dev, L1_CACHE_ALIGN(pinfo->rx_nrfifos *
+ pinfo->rx_fifosize) +
+ L1_CACHE_ALIGN(pinfo->tx_nrfifos *
+ pinfo->tx_fifosize), pinfo->mem_addr,
+ pinfo->dma_addr);
+
+ cpm_dpfree(pinfo->dp_addr);
+}
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
new file mode 100644
index 00000000..60c7e94c
--- /dev/null
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
@@ -0,0 +1,32 @@
+/*
+ * Driver for CPM (SCC/SMC) serial ports
+ *
+ * definitions for cpm1
+ *
+ */
+
+#ifndef CPM_UART_CPM1_H
+#define CPM_UART_CPM1_H
+
+#include <asm/cpm1.h>
+
+static inline void cpm_set_brg(int brg, int baud)
+{
+ cpm_setbrg(brg, baud);
+}
+
+static inline void cpm_set_scc_fcr(scc_uart_t __iomem * sup)
+{
+ out_8(&sup->scc_genscc.scc_rfcr, SMC_EB);
+ out_8(&sup->scc_genscc.scc_tfcr, SMC_EB);
+}
+
+static inline void cpm_set_smc_fcr(smc_uart_t __iomem * up)
+{
+ out_8(&up->smc_rfcr, SMC_EB);
+ out_8(&up->smc_tfcr, SMC_EB);
+}
+
+#define DPRAM_BASE ((u8 __iomem __force *)cpm_dpram_addr(0))
+
+#endif
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
new file mode 100644
index 00000000..a4927e66
--- /dev/null
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
@@ -0,0 +1,172 @@
+/*
+ * Driver for CPM (SCC/SMC) serial ports; CPM2 definitions
+ *
+ * Maintainer: Kumar Gala (galak@kernel.crashing.org) (CPM2)
+ * Pantelis Antoniou (panto@intracom.gr) (CPM1)
+ *
+ * Copyright (C) 2004 Freescale Semiconductor, Inc.
+ * (C) 2004 Intracom, S.A.
+ * (C) 2006 MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/device.h>
+#include <linux/bootmem.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/fs_pd.h>
+#include <asm/prom.h>
+
+#include <linux/serial_core.h>
+#include <linux/kernel.h>
+
+#include "cpm_uart.h"
+
+/**************************************************************/
+
+void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
+{
+ cpm_command(port->command, cmd);
+}
+
+void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port,
+ struct device_node *np)
+{
+ void __iomem *pram;
+ unsigned long offset;
+ struct resource res;
+ resource_size_t len;
+
+ /* Don't remap parameter RAM if it has already been initialized
+ * during console setup.
+ */
+ if (IS_SMC(port) && port->smcup)
+ return port->smcup;
+ else if (!IS_SMC(port) && port->sccup)
+ return port->sccup;
+
+ if (of_address_to_resource(np, 1, &res))
+ return NULL;
+
+ len = resource_size(&res);
+ pram = ioremap(res.start, len);
+ if (!pram)
+ return NULL;
+
+ if (!IS_SMC(port))
+ return pram;
+
+ if (len != 2) {
+ printk(KERN_WARNING "cpm_uart[%d]: device tree references "
+ "SMC pram, using boot loader/wrapper pram mapping. "
+ "Please fix your device tree to reference the pram "
+ "base register instead.\n",
+ port->port.line);
+ return pram;
+ }
+
+ offset = cpm_dpalloc(PROFF_SMC_SIZE, 64);
+ out_be16(pram, offset);
+ iounmap(pram);
+ return cpm_muram_addr(offset);
+}
+
+void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram)
+{
+ if (!IS_SMC(port))
+ iounmap(pram);
+}
+
+/*
+ * Allocate DP-Ram and memory buffers. We need to allocate a transmit and
+ * receive buffer descriptors from dual port ram, and a character
+ * buffer area from host mem. If we are allocating for the console we need
+ * to do it from bootmem
+ */
+int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
+{
+ int dpmemsz, memsz;
+ u8 __iomem *dp_mem;
+ unsigned long dp_offset;
+ u8 *mem_addr;
+ dma_addr_t dma_addr = 0;
+
+ pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line);
+
+ dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos);
+ dp_offset = cpm_dpalloc(dpmemsz, 8);
+ if (IS_ERR_VALUE(dp_offset)) {
+ printk(KERN_ERR
+ "cpm_uart_cpm.c: could not allocate buffer descriptors\n");
+ return -ENOMEM;
+ }
+
+ dp_mem = cpm_dpram_addr(dp_offset);
+
+ memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
+ L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
+ if (is_con) {
+ mem_addr = kzalloc(memsz, GFP_NOWAIT);
+ dma_addr = virt_to_bus(mem_addr);
+ }
+ else
+ mem_addr = dma_alloc_coherent(pinfo->port.dev, memsz, &dma_addr,
+ GFP_KERNEL);
+
+ if (mem_addr == NULL) {
+ cpm_dpfree(dp_offset);
+ printk(KERN_ERR
+ "cpm_uart_cpm.c: could not allocate coherent memory\n");
+ return -ENOMEM;
+ }
+
+ pinfo->dp_addr = dp_offset;
+ pinfo->mem_addr = mem_addr;
+ pinfo->dma_addr = dma_addr;
+ pinfo->mem_size = memsz;
+
+ pinfo->rx_buf = mem_addr;
+ pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos
+ * pinfo->rx_fifosize);
+
+ pinfo->rx_bd_base = (cbd_t __iomem *)dp_mem;
+ pinfo->tx_bd_base = pinfo->rx_bd_base + pinfo->rx_nrfifos;
+
+ return 0;
+}
+
+void cpm_uart_freebuf(struct uart_cpm_port *pinfo)
+{
+ dma_free_coherent(pinfo->port.dev, L1_CACHE_ALIGN(pinfo->rx_nrfifos *
+ pinfo->rx_fifosize) +
+ L1_CACHE_ALIGN(pinfo->tx_nrfifos *
+ pinfo->tx_fifosize), (void __force *)pinfo->mem_addr,
+ pinfo->dma_addr);
+
+ cpm_dpfree(pinfo->dp_addr);
+}
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
new file mode 100644
index 00000000..51e651a6
--- /dev/null
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
@@ -0,0 +1,32 @@
+/*
+ * Driver for CPM (SCC/SMC) serial ports
+ *
+ * definitions for cpm2
+ *
+ */
+
+#ifndef CPM_UART_CPM2_H
+#define CPM_UART_CPM2_H
+
+#include <asm/cpm2.h>
+
+static inline void cpm_set_brg(int brg, int baud)
+{
+ cpm_setbrg(brg, baud);
+}
+
+static inline void cpm_set_scc_fcr(scc_uart_t __iomem *sup)
+{
+ out_8(&sup->scc_genscc.scc_rfcr, CPMFCR_GBL | CPMFCR_EB);
+ out_8(&sup->scc_genscc.scc_tfcr, CPMFCR_GBL | CPMFCR_EB);
+}
+
+static inline void cpm_set_smc_fcr(smc_uart_t __iomem *up)
+{
+ out_8(&up->smc_rfcr, CPMFCR_GBL | CPMFCR_EB);
+ out_8(&up->smc_tfcr, CPMFCR_GBL | CPMFCR_EB);
+}
+
+#define DPRAM_BASE ((u8 __iomem __force *)cpm_dpram_addr(0))
+
+#endif
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
new file mode 100644
index 00000000..58be7159
--- /dev/null
+++ b/drivers/tty/serial/crisv10.c
@@ -0,0 +1,4572 @@
+/*
+ * Serial port driver for the ETRAX 100LX chip
+ *
+ * Copyright (C) 1998-2007 Axis Communications AB
+ *
+ * Many, many authors. Based once upon a time on serial.c for 16x50.
+ *
+ */
+
+static char *serial_version = "$Revision: 1.25 $";
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+
+#include <arch/svinto.h>
+
+/* non-arch dependent serial structures are in linux/serial.h */
+#include <linux/serial.h>
+/* while we keep our own stuff (struct e100_serial) in a local .h file */
+#include "crisv10.h"
+#include <asm/fasttimer.h>
+#include <arch/io_interface_mux.h>
+
+#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
+#ifndef CONFIG_ETRAX_FAST_TIMER
+#error "Enable FAST_TIMER to use SERIAL_FAST_TIMER"
+#endif
+#endif
+
+#if defined(CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS) && \
+ (CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS == 0)
+#error "RX_TIMEOUT_TICKS == 0 not allowed, use 1"
+#endif
+
+#if defined(CONFIG_ETRAX_RS485_ON_PA) && defined(CONFIG_ETRAX_RS485_ON_PORT_G)
+#error "Disable either CONFIG_ETRAX_RS485_ON_PA or CONFIG_ETRAX_RS485_ON_PORT_G"
+#endif
+
+/*
+ * All of the compatibilty code so we can compile serial.c against
+ * older kernels is hidden in serial_compat.h
+ */
+#if defined(LOCAL_HEADERS)
+#include "serial_compat.h"
+#endif
+
+struct tty_driver *serial_driver;
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 256
+
+//#define SERIAL_DEBUG_INTR
+//#define SERIAL_DEBUG_OPEN
+//#define SERIAL_DEBUG_FLOW
+//#define SERIAL_DEBUG_DATA
+//#define SERIAL_DEBUG_THROTTLE
+//#define SERIAL_DEBUG_IO /* Debug for Extra control and status pins */
+//#define SERIAL_DEBUG_LINE 0 /* What serport we want to debug */
+
+/* Enable this to use serial interrupts to handle when you
+ expect the first received event on the serial port to
+ be an error, break or similar. Used to be able to flash IRMA
+ from eLinux */
+#define SERIAL_HANDLE_EARLY_ERRORS
+
+/* Currently 16 descriptors x 128 bytes = 2048 bytes */
+#define SERIAL_DESCR_BUF_SIZE 256
+
+#define SERIAL_PRESCALE_BASE 3125000 /* 3.125MHz */
+#define DEF_BAUD_BASE SERIAL_PRESCALE_BASE
+
+/* We don't want to load the system with massive fast timer interrupt
+ * on high baudrates so limit it to 250 us (4kHz) */
+#define MIN_FLUSH_TIME_USEC 250
+
+/* Add an x here to log a lot of timer stuff */
+#define TIMERD(x)
+/* Debug details of interrupt handling */
+#define DINTR1(x) /* irq on/off, errors */
+#define DINTR2(x) /* tx and rx */
+/* Debug flip buffer stuff */
+#define DFLIP(x)
+/* Debug flow control and overview of data flow */
+#define DFLOW(x)
+#define DBAUD(x)
+#define DLOG_INT_TRIG(x)
+
+//#define DEBUG_LOG_INCLUDED
+#ifndef DEBUG_LOG_INCLUDED
+#define DEBUG_LOG(line, string, value)
+#else
+struct debug_log_info
+{
+ unsigned long time;
+ unsigned long timer_data;
+// int line;
+ const char *string;
+ int value;
+};
+#define DEBUG_LOG_SIZE 4096
+
+struct debug_log_info debug_log[DEBUG_LOG_SIZE];
+int debug_log_pos = 0;
+
+#define DEBUG_LOG(_line, _string, _value) do { \
+ if ((_line) == SERIAL_DEBUG_LINE) {\
+ debug_log_func(_line, _string, _value); \
+ }\
+}while(0)
+
+void debug_log_func(int line, const char *string, int value)
+{
+ if (debug_log_pos < DEBUG_LOG_SIZE) {
+ debug_log[debug_log_pos].time = jiffies;
+ debug_log[debug_log_pos].timer_data = *R_TIMER_DATA;
+// debug_log[debug_log_pos].line = line;
+ debug_log[debug_log_pos].string = string;
+ debug_log[debug_log_pos].value = value;
+ debug_log_pos++;
+ }
+ /*printk(string, value);*/
+}
+#endif
+
+#ifndef CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS
+/* Default number of timer ticks before flushing rx fifo
+ * When using "little data, low latency applications: use 0
+ * When using "much data applications (PPP)" use ~5
+ */
+#define CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS 5
+#endif
+
+unsigned long timer_data_to_ns(unsigned long timer_data);
+
+static void change_speed(struct e100_serial *info);
+static void rs_throttle(struct tty_struct * tty);
+static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
+static int rs_write(struct tty_struct *tty,
+ const unsigned char *buf, int count);
+#ifdef CONFIG_ETRAX_RS485
+static int e100_write_rs485(struct tty_struct *tty,
+ const unsigned char *buf, int count);
+#endif
+static int get_lsr_info(struct e100_serial *info, unsigned int *value);
+
+
+#define DEF_BAUD 115200 /* 115.2 kbit/s */
+#define STD_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
+#define DEF_RX 0x20 /* or SERIAL_CTRL_W >> 8 */
+/* Default value of tx_ctrl register: has txd(bit 7)=1 (idle) as default */
+#define DEF_TX 0x80 /* or SERIAL_CTRL_B */
+
+/* offsets from R_SERIALx_CTRL */
+
+#define REG_DATA 0
+#define REG_DATA_STATUS32 0 /* this is the 32 bit register R_SERIALx_READ */
+#define REG_TR_DATA 0
+#define REG_STATUS 1
+#define REG_TR_CTRL 1
+#define REG_REC_CTRL 2
+#define REG_BAUD 3
+#define REG_XOFF 4 /* this is a 32 bit register */
+
+/* The bitfields are the same for all serial ports */
+#define SER_RXD_MASK IO_MASK(R_SERIAL0_STATUS, rxd)
+#define SER_DATA_AVAIL_MASK IO_MASK(R_SERIAL0_STATUS, data_avail)
+#define SER_FRAMING_ERR_MASK IO_MASK(R_SERIAL0_STATUS, framing_err)
+#define SER_PAR_ERR_MASK IO_MASK(R_SERIAL0_STATUS, par_err)
+#define SER_OVERRUN_MASK IO_MASK(R_SERIAL0_STATUS, overrun)
+
+#define SER_ERROR_MASK (SER_OVERRUN_MASK | SER_PAR_ERR_MASK | SER_FRAMING_ERR_MASK)
+
+/* Values for info->errorcode */
+#define ERRCODE_SET_BREAK (TTY_BREAK)
+#define ERRCODE_INSERT 0x100
+#define ERRCODE_INSERT_BREAK (ERRCODE_INSERT | TTY_BREAK)
+
+#define FORCE_EOP(info) *R_SET_EOP = 1U << info->iseteop;
+
+/*
+ * General note regarding the use of IO_* macros in this file:
+ *
+ * We will use the bits defined for DMA channel 6 when using various
+ * IO_* macros (e.g. IO_STATE, IO_MASK, IO_EXTRACT) and _assume_ they are
+ * the same for all channels (which of course they are).
+ *
+ * We will also use the bits defined for serial port 0 when writing commands
+ * to the different ports, as these bits too are the same for all ports.
+ */
+
+
+/* Mask for the irqs possibly enabled in R_IRQ_MASK1_RD etc. */
+static const unsigned long e100_ser_int_mask = 0
+#ifdef CONFIG_ETRAX_SERIAL_PORT0
+| IO_MASK(R_IRQ_MASK1_RD, ser0_data) | IO_MASK(R_IRQ_MASK1_RD, ser0_ready)
+#endif
+#ifdef CONFIG_ETRAX_SERIAL_PORT1
+| IO_MASK(R_IRQ_MASK1_RD, ser1_data) | IO_MASK(R_IRQ_MASK1_RD, ser1_ready)
+#endif
+#ifdef CONFIG_ETRAX_SERIAL_PORT2
+| IO_MASK(R_IRQ_MASK1_RD, ser2_data) | IO_MASK(R_IRQ_MASK1_RD, ser2_ready)
+#endif
+#ifdef CONFIG_ETRAX_SERIAL_PORT3
+| IO_MASK(R_IRQ_MASK1_RD, ser3_data) | IO_MASK(R_IRQ_MASK1_RD, ser3_ready)
+#endif
+;
+unsigned long r_alt_ser_baudrate_shadow = 0;
+
+/* this is the data for the four serial ports in the etrax100 */
+/* DMA2(ser2), DMA4(ser3), DMA6(ser0) or DMA8(ser1) */
+/* R_DMA_CHx_CLR_INTR, R_DMA_CHx_FIRST, R_DMA_CHx_CMD */
+
+static struct e100_serial rs_table[] = {
+ { .baud = DEF_BAUD,
+ .ioport = (unsigned char *)R_SERIAL0_CTRL,
+ .irq = 1U << 12, /* uses DMA 6 and 7 */
+ .oclrintradr = R_DMA_CH6_CLR_INTR,
+ .ofirstadr = R_DMA_CH6_FIRST,
+ .ocmdadr = R_DMA_CH6_CMD,
+ .ostatusadr = R_DMA_CH6_STATUS,
+ .iclrintradr = R_DMA_CH7_CLR_INTR,
+ .ifirstadr = R_DMA_CH7_FIRST,
+ .icmdadr = R_DMA_CH7_CMD,
+ .idescradr = R_DMA_CH7_DESCR,
+ .flags = STD_FLAGS,
+ .rx_ctrl = DEF_RX,
+ .tx_ctrl = DEF_TX,
+ .iseteop = 2,
+ .dma_owner = dma_ser0,
+ .io_if = if_serial_0,
+#ifdef CONFIG_ETRAX_SERIAL_PORT0
+ .enabled = 1,
+#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
+ .dma_out_enabled = 1,
+ .dma_out_nbr = SER0_TX_DMA_NBR,
+ .dma_out_irq_nbr = SER0_DMA_TX_IRQ_NBR,
+ .dma_out_irq_flags = IRQF_DISABLED,
+ .dma_out_irq_description = "serial 0 dma tr",
+#else
+ .dma_out_enabled = 0,
+ .dma_out_nbr = UINT_MAX,
+ .dma_out_irq_nbr = 0,
+ .dma_out_irq_flags = 0,
+ .dma_out_irq_description = NULL,
+#endif
+#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
+ .dma_in_enabled = 1,
+ .dma_in_nbr = SER0_RX_DMA_NBR,
+ .dma_in_irq_nbr = SER0_DMA_RX_IRQ_NBR,
+ .dma_in_irq_flags = IRQF_DISABLED,
+ .dma_in_irq_description = "serial 0 dma rec",
+#else
+ .dma_in_enabled = 0,
+ .dma_in_nbr = UINT_MAX,
+ .dma_in_irq_nbr = 0,
+ .dma_in_irq_flags = 0,
+ .dma_in_irq_description = NULL,
+#endif
+#else
+ .enabled = 0,
+ .io_if_description = NULL,
+ .dma_out_enabled = 0,
+ .dma_in_enabled = 0
+#endif
+
+}, /* ttyS0 */
+#ifndef CONFIG_SVINTO_SIM
+ { .baud = DEF_BAUD,
+ .ioport = (unsigned char *)R_SERIAL1_CTRL,
+ .irq = 1U << 16, /* uses DMA 8 and 9 */
+ .oclrintradr = R_DMA_CH8_CLR_INTR,
+ .ofirstadr = R_DMA_CH8_FIRST,
+ .ocmdadr = R_DMA_CH8_CMD,
+ .ostatusadr = R_DMA_CH8_STATUS,
+ .iclrintradr = R_DMA_CH9_CLR_INTR,
+ .ifirstadr = R_DMA_CH9_FIRST,
+ .icmdadr = R_DMA_CH9_CMD,
+ .idescradr = R_DMA_CH9_DESCR,
+ .flags = STD_FLAGS,
+ .rx_ctrl = DEF_RX,
+ .tx_ctrl = DEF_TX,
+ .iseteop = 3,
+ .dma_owner = dma_ser1,
+ .io_if = if_serial_1,
+#ifdef CONFIG_ETRAX_SERIAL_PORT1
+ .enabled = 1,
+ .io_if_description = "ser1",
+#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT
+ .dma_out_enabled = 1,
+ .dma_out_nbr = SER1_TX_DMA_NBR,
+ .dma_out_irq_nbr = SER1_DMA_TX_IRQ_NBR,
+ .dma_out_irq_flags = IRQF_DISABLED,
+ .dma_out_irq_description = "serial 1 dma tr",
+#else
+ .dma_out_enabled = 0,
+ .dma_out_nbr = UINT_MAX,
+ .dma_out_irq_nbr = 0,
+ .dma_out_irq_flags = 0,
+ .dma_out_irq_description = NULL,
+#endif
+#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN
+ .dma_in_enabled = 1,
+ .dma_in_nbr = SER1_RX_DMA_NBR,
+ .dma_in_irq_nbr = SER1_DMA_RX_IRQ_NBR,
+ .dma_in_irq_flags = IRQF_DISABLED,
+ .dma_in_irq_description = "serial 1 dma rec",
+#else
+ .dma_in_enabled = 0,
+ .dma_in_enabled = 0,
+ .dma_in_nbr = UINT_MAX,
+ .dma_in_irq_nbr = 0,
+ .dma_in_irq_flags = 0,
+ .dma_in_irq_description = NULL,
+#endif
+#else
+ .enabled = 0,
+ .io_if_description = NULL,
+ .dma_in_irq_nbr = 0,
+ .dma_out_enabled = 0,
+ .dma_in_enabled = 0
+#endif
+}, /* ttyS1 */
+
+ { .baud = DEF_BAUD,
+ .ioport = (unsigned char *)R_SERIAL2_CTRL,
+ .irq = 1U << 4, /* uses DMA 2 and 3 */
+ .oclrintradr = R_DMA_CH2_CLR_INTR,
+ .ofirstadr = R_DMA_CH2_FIRST,
+ .ocmdadr = R_DMA_CH2_CMD,
+ .ostatusadr = R_DMA_CH2_STATUS,
+ .iclrintradr = R_DMA_CH3_CLR_INTR,
+ .ifirstadr = R_DMA_CH3_FIRST,
+ .icmdadr = R_DMA_CH3_CMD,
+ .idescradr = R_DMA_CH3_DESCR,
+ .flags = STD_FLAGS,
+ .rx_ctrl = DEF_RX,
+ .tx_ctrl = DEF_TX,
+ .iseteop = 0,
+ .dma_owner = dma_ser2,
+ .io_if = if_serial_2,
+#ifdef CONFIG_ETRAX_SERIAL_PORT2
+ .enabled = 1,
+ .io_if_description = "ser2",
+#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
+ .dma_out_enabled = 1,
+ .dma_out_nbr = SER2_TX_DMA_NBR,
+ .dma_out_irq_nbr = SER2_DMA_TX_IRQ_NBR,
+ .dma_out_irq_flags = IRQF_DISABLED,
+ .dma_out_irq_description = "serial 2 dma tr",
+#else
+ .dma_out_enabled = 0,
+ .dma_out_nbr = UINT_MAX,
+ .dma_out_irq_nbr = 0,
+ .dma_out_irq_flags = 0,
+ .dma_out_irq_description = NULL,
+#endif
+#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
+ .dma_in_enabled = 1,
+ .dma_in_nbr = SER2_RX_DMA_NBR,
+ .dma_in_irq_nbr = SER2_DMA_RX_IRQ_NBR,
+ .dma_in_irq_flags = IRQF_DISABLED,
+ .dma_in_irq_description = "serial 2 dma rec",
+#else
+ .dma_in_enabled = 0,
+ .dma_in_nbr = UINT_MAX,
+ .dma_in_irq_nbr = 0,
+ .dma_in_irq_flags = 0,
+ .dma_in_irq_description = NULL,
+#endif
+#else
+ .enabled = 0,
+ .io_if_description = NULL,
+ .dma_out_enabled = 0,
+ .dma_in_enabled = 0
+#endif
+ }, /* ttyS2 */
+
+ { .baud = DEF_BAUD,
+ .ioport = (unsigned char *)R_SERIAL3_CTRL,
+ .irq = 1U << 8, /* uses DMA 4 and 5 */
+ .oclrintradr = R_DMA_CH4_CLR_INTR,
+ .ofirstadr = R_DMA_CH4_FIRST,
+ .ocmdadr = R_DMA_CH4_CMD,
+ .ostatusadr = R_DMA_CH4_STATUS,
+ .iclrintradr = R_DMA_CH5_CLR_INTR,
+ .ifirstadr = R_DMA_CH5_FIRST,
+ .icmdadr = R_DMA_CH5_CMD,
+ .idescradr = R_DMA_CH5_DESCR,
+ .flags = STD_FLAGS,
+ .rx_ctrl = DEF_RX,
+ .tx_ctrl = DEF_TX,
+ .iseteop = 1,
+ .dma_owner = dma_ser3,
+ .io_if = if_serial_3,
+#ifdef CONFIG_ETRAX_SERIAL_PORT3
+ .enabled = 1,
+ .io_if_description = "ser3",
+#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT
+ .dma_out_enabled = 1,
+ .dma_out_nbr = SER3_TX_DMA_NBR,
+ .dma_out_irq_nbr = SER3_DMA_TX_IRQ_NBR,
+ .dma_out_irq_flags = IRQF_DISABLED,
+ .dma_out_irq_description = "serial 3 dma tr",
+#else
+ .dma_out_enabled = 0,
+ .dma_out_nbr = UINT_MAX,
+ .dma_out_irq_nbr = 0,
+ .dma_out_irq_flags = 0,
+ .dma_out_irq_description = NULL,
+#endif
+#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN
+ .dma_in_enabled = 1,
+ .dma_in_nbr = SER3_RX_DMA_NBR,
+ .dma_in_irq_nbr = SER3_DMA_RX_IRQ_NBR,
+ .dma_in_irq_flags = IRQF_DISABLED,
+ .dma_in_irq_description = "serial 3 dma rec",
+#else
+ .dma_in_enabled = 0,
+ .dma_in_nbr = UINT_MAX,
+ .dma_in_irq_nbr = 0,
+ .dma_in_irq_flags = 0,
+ .dma_in_irq_description = NULL
+#endif
+#else
+ .enabled = 0,
+ .io_if_description = NULL,
+ .dma_out_enabled = 0,
+ .dma_in_enabled = 0
+#endif
+ } /* ttyS3 */
+#endif
+};
+
+
+#define NR_PORTS (sizeof(rs_table)/sizeof(struct e100_serial))
+
+#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
+static struct fast_timer fast_timers[NR_PORTS];
+#endif
+
+#ifdef CONFIG_ETRAX_SERIAL_PROC_ENTRY
+#define PROCSTAT(x) x
+struct ser_statistics_type {
+ int overrun_cnt;
+ int early_errors_cnt;
+ int ser_ints_ok_cnt;
+ int errors_cnt;
+ unsigned long int processing_flip;
+ unsigned long processing_flip_still_room;
+ unsigned long int timeout_flush_cnt;
+ int rx_dma_ints;
+ int tx_dma_ints;
+ int rx_tot;
+ int tx_tot;
+};
+
+static struct ser_statistics_type ser_stat[NR_PORTS];
+
+#else
+
+#define PROCSTAT(x)
+
+#endif /* CONFIG_ETRAX_SERIAL_PROC_ENTRY */
+
+/* RS-485 */
+#if defined(CONFIG_ETRAX_RS485)
+#ifdef CONFIG_ETRAX_FAST_TIMER
+static struct fast_timer fast_timers_rs485[NR_PORTS];
+#endif
+#if defined(CONFIG_ETRAX_RS485_ON_PA)
+static int rs485_pa_bit = CONFIG_ETRAX_RS485_ON_PA_BIT;
+#endif
+#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
+static int rs485_port_g_bit = CONFIG_ETRAX_RS485_ON_PORT_G_BIT;
+#endif
+#endif
+
+/* Info and macros needed for each ports extra control/status signals. */
+#define E100_STRUCT_PORT(line, pinname) \
+ ((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \
+ (R_PORT_PA_DATA): ( \
+ (CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \
+ (R_PORT_PB_DATA):&dummy_ser[line]))
+
+#define E100_STRUCT_SHADOW(line, pinname) \
+ ((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \
+ (&port_pa_data_shadow): ( \
+ (CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \
+ (&port_pb_data_shadow):&dummy_ser[line]))
+#define E100_STRUCT_MASK(line, pinname) \
+ ((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \
+ (1<<CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT): ( \
+ (CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \
+ (1<<CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT):DUMMY_##pinname##_MASK))
+
+#define DUMMY_DTR_MASK 1
+#define DUMMY_RI_MASK 2
+#define DUMMY_DSR_MASK 4
+#define DUMMY_CD_MASK 8
+static unsigned char dummy_ser[NR_PORTS] = {0xFF, 0xFF, 0xFF,0xFF};
+
+/* If not all status pins are used or disabled, use mixed mode */
+#ifdef CONFIG_ETRAX_SERIAL_PORT0
+
+#define SER0_PA_BITSUM (CONFIG_ETRAX_SER0_DTR_ON_PA_BIT+CONFIG_ETRAX_SER0_RI_ON_PA_BIT+CONFIG_ETRAX_SER0_DSR_ON_PA_BIT+CONFIG_ETRAX_SER0_CD_ON_PA_BIT)
+
+#if SER0_PA_BITSUM != -4
+# if CONFIG_ETRAX_SER0_DTR_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER0_RI_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER0_DSR_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER0_CD_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+#endif
+
+#define SER0_PB_BITSUM (CONFIG_ETRAX_SER0_DTR_ON_PB_BIT+CONFIG_ETRAX_SER0_RI_ON_PB_BIT+CONFIG_ETRAX_SER0_DSR_ON_PB_BIT+CONFIG_ETRAX_SER0_CD_ON_PB_BIT)
+
+#if SER0_PB_BITSUM != -4
+# if CONFIG_ETRAX_SER0_DTR_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER0_RI_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER0_DSR_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER0_CD_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+#endif
+
+#endif /* PORT0 */
+
+
+#ifdef CONFIG_ETRAX_SERIAL_PORT1
+
+#define SER1_PA_BITSUM (CONFIG_ETRAX_SER1_DTR_ON_PA_BIT+CONFIG_ETRAX_SER1_RI_ON_PA_BIT+CONFIG_ETRAX_SER1_DSR_ON_PA_BIT+CONFIG_ETRAX_SER1_CD_ON_PA_BIT)
+
+#if SER1_PA_BITSUM != -4
+# if CONFIG_ETRAX_SER1_DTR_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER1_RI_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER1_DSR_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER1_CD_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+#endif
+
+#define SER1_PB_BITSUM (CONFIG_ETRAX_SER1_DTR_ON_PB_BIT+CONFIG_ETRAX_SER1_RI_ON_PB_BIT+CONFIG_ETRAX_SER1_DSR_ON_PB_BIT+CONFIG_ETRAX_SER1_CD_ON_PB_BIT)
+
+#if SER1_PB_BITSUM != -4
+# if CONFIG_ETRAX_SER1_DTR_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER1_RI_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER1_DSR_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER1_CD_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+#endif
+
+#endif /* PORT1 */
+
+#ifdef CONFIG_ETRAX_SERIAL_PORT2
+
+#define SER2_PA_BITSUM (CONFIG_ETRAX_SER2_DTR_ON_PA_BIT+CONFIG_ETRAX_SER2_RI_ON_PA_BIT+CONFIG_ETRAX_SER2_DSR_ON_PA_BIT+CONFIG_ETRAX_SER2_CD_ON_PA_BIT)
+
+#if SER2_PA_BITSUM != -4
+# if CONFIG_ETRAX_SER2_DTR_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER2_RI_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER2_DSR_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER2_CD_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+#endif
+
+#define SER2_PB_BITSUM (CONFIG_ETRAX_SER2_DTR_ON_PB_BIT+CONFIG_ETRAX_SER2_RI_ON_PB_BIT+CONFIG_ETRAX_SER2_DSR_ON_PB_BIT+CONFIG_ETRAX_SER2_CD_ON_PB_BIT)
+
+#if SER2_PB_BITSUM != -4
+# if CONFIG_ETRAX_SER2_DTR_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER2_RI_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER2_DSR_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER2_CD_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+#endif
+
+#endif /* PORT2 */
+
+#ifdef CONFIG_ETRAX_SERIAL_PORT3
+
+#define SER3_PA_BITSUM (CONFIG_ETRAX_SER3_DTR_ON_PA_BIT+CONFIG_ETRAX_SER3_RI_ON_PA_BIT+CONFIG_ETRAX_SER3_DSR_ON_PA_BIT+CONFIG_ETRAX_SER3_CD_ON_PA_BIT)
+
+#if SER3_PA_BITSUM != -4
+# if CONFIG_ETRAX_SER3_DTR_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER3_RI_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER3_DSR_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER3_CD_ON_PA_BIT == -1
+# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+#endif
+
+#define SER3_PB_BITSUM (CONFIG_ETRAX_SER3_DTR_ON_PB_BIT+CONFIG_ETRAX_SER3_RI_ON_PB_BIT+CONFIG_ETRAX_SER3_DSR_ON_PB_BIT+CONFIG_ETRAX_SER3_CD_ON_PB_BIT)
+
+#if SER3_PB_BITSUM != -4
+# if CONFIG_ETRAX_SER3_DTR_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER3_RI_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER3_DSR_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+# if CONFIG_ETRAX_SER3_CD_ON_PB_BIT == -1
+# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
+# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
+# endif
+# endif
+#endif
+
+#endif /* PORT3 */
+
+
+#if defined(CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED) || \
+ defined(CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED) || \
+ defined(CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED) || \
+ defined(CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED)
+#define CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED
+#endif
+
+#ifdef CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED
+/* The pins can be mixed on PA and PB */
+#define CONTROL_PINS_PORT_NOT_USED(line) \
+ &dummy_ser[line], &dummy_ser[line], \
+ &dummy_ser[line], &dummy_ser[line], \
+ &dummy_ser[line], &dummy_ser[line], \
+ &dummy_ser[line], &dummy_ser[line], \
+ DUMMY_DTR_MASK, DUMMY_RI_MASK, DUMMY_DSR_MASK, DUMMY_CD_MASK
+
+
+struct control_pins
+{
+ volatile unsigned char *dtr_port;
+ unsigned char *dtr_shadow;
+ volatile unsigned char *ri_port;
+ unsigned char *ri_shadow;
+ volatile unsigned char *dsr_port;
+ unsigned char *dsr_shadow;
+ volatile unsigned char *cd_port;
+ unsigned char *cd_shadow;
+
+ unsigned char dtr_mask;
+ unsigned char ri_mask;
+ unsigned char dsr_mask;
+ unsigned char cd_mask;
+};
+
+static const struct control_pins e100_modem_pins[NR_PORTS] =
+{
+ /* Ser 0 */
+ {
+#ifdef CONFIG_ETRAX_SERIAL_PORT0
+ E100_STRUCT_PORT(0,DTR), E100_STRUCT_SHADOW(0,DTR),
+ E100_STRUCT_PORT(0,RI), E100_STRUCT_SHADOW(0,RI),
+ E100_STRUCT_PORT(0,DSR), E100_STRUCT_SHADOW(0,DSR),
+ E100_STRUCT_PORT(0,CD), E100_STRUCT_SHADOW(0,CD),
+ E100_STRUCT_MASK(0,DTR),
+ E100_STRUCT_MASK(0,RI),
+ E100_STRUCT_MASK(0,DSR),
+ E100_STRUCT_MASK(0,CD)
+#else
+ CONTROL_PINS_PORT_NOT_USED(0)
+#endif
+ },
+
+ /* Ser 1 */
+ {
+#ifdef CONFIG_ETRAX_SERIAL_PORT1
+ E100_STRUCT_PORT(1,DTR), E100_STRUCT_SHADOW(1,DTR),
+ E100_STRUCT_PORT(1,RI), E100_STRUCT_SHADOW(1,RI),
+ E100_STRUCT_PORT(1,DSR), E100_STRUCT_SHADOW(1,DSR),
+ E100_STRUCT_PORT(1,CD), E100_STRUCT_SHADOW(1,CD),
+ E100_STRUCT_MASK(1,DTR),
+ E100_STRUCT_MASK(1,RI),
+ E100_STRUCT_MASK(1,DSR),
+ E100_STRUCT_MASK(1,CD)
+#else
+ CONTROL_PINS_PORT_NOT_USED(1)
+#endif
+ },
+
+ /* Ser 2 */
+ {
+#ifdef CONFIG_ETRAX_SERIAL_PORT2
+ E100_STRUCT_PORT(2,DTR), E100_STRUCT_SHADOW(2,DTR),
+ E100_STRUCT_PORT(2,RI), E100_STRUCT_SHADOW(2,RI),
+ E100_STRUCT_PORT(2,DSR), E100_STRUCT_SHADOW(2,DSR),
+ E100_STRUCT_PORT(2,CD), E100_STRUCT_SHADOW(2,CD),
+ E100_STRUCT_MASK(2,DTR),
+ E100_STRUCT_MASK(2,RI),
+ E100_STRUCT_MASK(2,DSR),
+ E100_STRUCT_MASK(2,CD)
+#else
+ CONTROL_PINS_PORT_NOT_USED(2)
+#endif
+ },
+
+ /* Ser 3 */
+ {
+#ifdef CONFIG_ETRAX_SERIAL_PORT3
+ E100_STRUCT_PORT(3,DTR), E100_STRUCT_SHADOW(3,DTR),
+ E100_STRUCT_PORT(3,RI), E100_STRUCT_SHADOW(3,RI),
+ E100_STRUCT_PORT(3,DSR), E100_STRUCT_SHADOW(3,DSR),
+ E100_STRUCT_PORT(3,CD), E100_STRUCT_SHADOW(3,CD),
+ E100_STRUCT_MASK(3,DTR),
+ E100_STRUCT_MASK(3,RI),
+ E100_STRUCT_MASK(3,DSR),
+ E100_STRUCT_MASK(3,CD)
+#else
+ CONTROL_PINS_PORT_NOT_USED(3)
+#endif
+ }
+};
+#else /* CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED */
+
+/* All pins are on either PA or PB for each serial port */
+#define CONTROL_PINS_PORT_NOT_USED(line) \
+ &dummy_ser[line], &dummy_ser[line], \
+ DUMMY_DTR_MASK, DUMMY_RI_MASK, DUMMY_DSR_MASK, DUMMY_CD_MASK
+
+
+struct control_pins
+{
+ volatile unsigned char *port;
+ unsigned char *shadow;
+
+ unsigned char dtr_mask;
+ unsigned char ri_mask;
+ unsigned char dsr_mask;
+ unsigned char cd_mask;
+};
+
+#define dtr_port port
+#define dtr_shadow shadow
+#define ri_port port
+#define ri_shadow shadow
+#define dsr_port port
+#define dsr_shadow shadow
+#define cd_port port
+#define cd_shadow shadow
+
+static const struct control_pins e100_modem_pins[NR_PORTS] =
+{
+ /* Ser 0 */
+ {
+#ifdef CONFIG_ETRAX_SERIAL_PORT0
+ E100_STRUCT_PORT(0,DTR), E100_STRUCT_SHADOW(0,DTR),
+ E100_STRUCT_MASK(0,DTR),
+ E100_STRUCT_MASK(0,RI),
+ E100_STRUCT_MASK(0,DSR),
+ E100_STRUCT_MASK(0,CD)
+#else
+ CONTROL_PINS_PORT_NOT_USED(0)
+#endif
+ },
+
+ /* Ser 1 */
+ {
+#ifdef CONFIG_ETRAX_SERIAL_PORT1
+ E100_STRUCT_PORT(1,DTR), E100_STRUCT_SHADOW(1,DTR),
+ E100_STRUCT_MASK(1,DTR),
+ E100_STRUCT_MASK(1,RI),
+ E100_STRUCT_MASK(1,DSR),
+ E100_STRUCT_MASK(1,CD)
+#else
+ CONTROL_PINS_PORT_NOT_USED(1)
+#endif
+ },
+
+ /* Ser 2 */
+ {
+#ifdef CONFIG_ETRAX_SERIAL_PORT2
+ E100_STRUCT_PORT(2,DTR), E100_STRUCT_SHADOW(2,DTR),
+ E100_STRUCT_MASK(2,DTR),
+ E100_STRUCT_MASK(2,RI),
+ E100_STRUCT_MASK(2,DSR),
+ E100_STRUCT_MASK(2,CD)
+#else
+ CONTROL_PINS_PORT_NOT_USED(2)
+#endif
+ },
+
+ /* Ser 3 */
+ {
+#ifdef CONFIG_ETRAX_SERIAL_PORT3
+ E100_STRUCT_PORT(3,DTR), E100_STRUCT_SHADOW(3,DTR),
+ E100_STRUCT_MASK(3,DTR),
+ E100_STRUCT_MASK(3,RI),
+ E100_STRUCT_MASK(3,DSR),
+ E100_STRUCT_MASK(3,CD)
+#else
+ CONTROL_PINS_PORT_NOT_USED(3)
+#endif
+ }
+};
+#endif /* !CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED */
+
+#define E100_RTS_MASK 0x20
+#define E100_CTS_MASK 0x40
+
+/* All serial port signals are active low:
+ * active = 0 -> 3.3V to RS-232 driver -> -12V on RS-232 level
+ * inactive = 1 -> 0V to RS-232 driver -> +12V on RS-232 level
+ *
+ * These macros returns the pin value: 0=0V, >=1 = 3.3V on ETRAX chip
+ */
+
+/* Output */
+#define E100_RTS_GET(info) ((info)->rx_ctrl & E100_RTS_MASK)
+/* Input */
+#define E100_CTS_GET(info) ((info)->ioport[REG_STATUS] & E100_CTS_MASK)
+
+/* These are typically PA or PB and 0 means 0V, 1 means 3.3V */
+/* Is an output */
+#define E100_DTR_GET(info) ((*e100_modem_pins[(info)->line].dtr_shadow) & e100_modem_pins[(info)->line].dtr_mask)
+
+/* Normally inputs */
+#define E100_RI_GET(info) ((*e100_modem_pins[(info)->line].ri_port) & e100_modem_pins[(info)->line].ri_mask)
+#define E100_CD_GET(info) ((*e100_modem_pins[(info)->line].cd_port) & e100_modem_pins[(info)->line].cd_mask)
+
+/* Input */
+#define E100_DSR_GET(info) ((*e100_modem_pins[(info)->line].dsr_port) & e100_modem_pins[(info)->line].dsr_mask)
+
+
+/*
+ * tmp_buf is used as a temporary buffer by serial_write. We need to
+ * lock it in case the memcpy_fromfs blocks while swapping in a page,
+ * and some other program tries to do a serial write at the same time.
+ * Since the lock will only come under contention when the system is
+ * swapping and available memory is low, it makes sense to share one
+ * buffer across all the serial ports, since it significantly saves
+ * memory if large numbers of serial ports are open.
+ */
+static unsigned char *tmp_buf;
+static DEFINE_MUTEX(tmp_buf_mutex);
+
+/* Calculate the chartime depending on baudrate, numbor of bits etc. */
+static void update_char_time(struct e100_serial * info)
+{
+ tcflag_t cflags = info->port.tty->termios->c_cflag;
+ int bits;
+
+ /* calc. number of bits / data byte */
+ /* databits + startbit and 1 stopbit */
+ if ((cflags & CSIZE) == CS7)
+ bits = 9;
+ else
+ bits = 10;
+
+ if (cflags & CSTOPB) /* 2 stopbits ? */
+ bits++;
+
+ if (cflags & PARENB) /* parity bit ? */
+ bits++;
+
+ /* calc timeout */
+ info->char_time_usec = ((bits * 1000000) / info->baud) + 1;
+ info->flush_time_usec = 4*info->char_time_usec;
+ if (info->flush_time_usec < MIN_FLUSH_TIME_USEC)
+ info->flush_time_usec = MIN_FLUSH_TIME_USEC;
+
+}
+
+/*
+ * This function maps from the Bxxxx defines in asm/termbits.h into real
+ * baud rates.
+ */
+
+static int
+cflag_to_baud(unsigned int cflag)
+{
+ static int baud_table[] = {
+ 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 };
+
+ static int ext_baud_table[] = {
+ 0, 57600, 115200, 230400, 460800, 921600, 1843200, 6250000,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ if (cflag & CBAUDEX)
+ return ext_baud_table[(cflag & CBAUD) & ~CBAUDEX];
+ else
+ return baud_table[cflag & CBAUD];
+}
+
+/* and this maps to an etrax100 hardware baud constant */
+
+static unsigned char
+cflag_to_etrax_baud(unsigned int cflag)
+{
+ char retval;
+
+ static char baud_table[] = {
+ -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, 4, 5, 6, 7 };
+
+ static char ext_baud_table[] = {
+ -1, 8, 9, 10, 11, 12, 13, 14, -1, -1, -1, -1, -1, -1, -1, -1 };
+
+ if (cflag & CBAUDEX)
+ retval = ext_baud_table[(cflag & CBAUD) & ~CBAUDEX];
+ else
+ retval = baud_table[cflag & CBAUD];
+
+ if (retval < 0) {
+ printk(KERN_WARNING "serdriver tried setting invalid baud rate, flags %x.\n", cflag);
+ retval = 5; /* choose default 9600 instead */
+ }
+
+ return retval | (retval << 4); /* choose same for both TX and RX */
+}
+
+
+/* Various static support functions */
+
+/* Functions to set or clear DTR/RTS on the requested line */
+/* It is complicated by the fact that RTS is a serial port register, while
+ * DTR might not be implemented in the HW at all, and if it is, it can be on
+ * any general port.
+ */
+
+
+static inline void
+e100_dtr(struct e100_serial *info, int set)
+{
+#ifndef CONFIG_SVINTO_SIM
+ unsigned char mask = e100_modem_pins[info->line].dtr_mask;
+
+#ifdef SERIAL_DEBUG_IO
+ printk("ser%i dtr %i mask: 0x%02X\n", info->line, set, mask);
+ printk("ser%i shadow before 0x%02X get: %i\n",
+ info->line, *e100_modem_pins[info->line].dtr_shadow,
+ E100_DTR_GET(info));
+#endif
+ /* DTR is active low */
+ {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *e100_modem_pins[info->line].dtr_shadow &= ~mask;
+ *e100_modem_pins[info->line].dtr_shadow |= (set ? 0 : mask);
+ *e100_modem_pins[info->line].dtr_port = *e100_modem_pins[info->line].dtr_shadow;
+ local_irq_restore(flags);
+ }
+
+#ifdef SERIAL_DEBUG_IO
+ printk("ser%i shadow after 0x%02X get: %i\n",
+ info->line, *e100_modem_pins[info->line].dtr_shadow,
+ E100_DTR_GET(info));
+#endif
+#endif
+}
+
+/* set = 0 means 3.3V on the pin, bitvalue: 0=active, 1=inactive
+ * 0=0V , 1=3.3V
+ */
+static inline void
+e100_rts(struct e100_serial *info, int set)
+{
+#ifndef CONFIG_SVINTO_SIM
+ unsigned long flags;
+ local_irq_save(flags);
+ info->rx_ctrl &= ~E100_RTS_MASK;
+ info->rx_ctrl |= (set ? 0 : E100_RTS_MASK); /* RTS is active low */
+ info->ioport[REG_REC_CTRL] = info->rx_ctrl;
+ local_irq_restore(flags);
+#ifdef SERIAL_DEBUG_IO
+ printk("ser%i rts %i\n", info->line, set);
+#endif
+#endif
+}
+
+
+/* If this behaves as a modem, RI and CD is an output */
+static inline void
+e100_ri_out(struct e100_serial *info, int set)
+{
+#ifndef CONFIG_SVINTO_SIM
+ /* RI is active low */
+ {
+ unsigned char mask = e100_modem_pins[info->line].ri_mask;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *e100_modem_pins[info->line].ri_shadow &= ~mask;
+ *e100_modem_pins[info->line].ri_shadow |= (set ? 0 : mask);
+ *e100_modem_pins[info->line].ri_port = *e100_modem_pins[info->line].ri_shadow;
+ local_irq_restore(flags);
+ }
+#endif
+}
+static inline void
+e100_cd_out(struct e100_serial *info, int set)
+{
+#ifndef CONFIG_SVINTO_SIM
+ /* CD is active low */
+ {
+ unsigned char mask = e100_modem_pins[info->line].cd_mask;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *e100_modem_pins[info->line].cd_shadow &= ~mask;
+ *e100_modem_pins[info->line].cd_shadow |= (set ? 0 : mask);
+ *e100_modem_pins[info->line].cd_port = *e100_modem_pins[info->line].cd_shadow;
+ local_irq_restore(flags);
+ }
+#endif
+}
+
+static inline void
+e100_disable_rx(struct e100_serial *info)
+{
+#ifndef CONFIG_SVINTO_SIM
+ /* disable the receiver */
+ info->ioport[REG_REC_CTRL] =
+ (info->rx_ctrl &= ~IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
+#endif
+}
+
+static inline void
+e100_enable_rx(struct e100_serial *info)
+{
+#ifndef CONFIG_SVINTO_SIM
+ /* enable the receiver */
+ info->ioport[REG_REC_CTRL] =
+ (info->rx_ctrl |= IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
+#endif
+}
+
+/* the rx DMA uses both the dma_descr and the dma_eop interrupts */
+
+static inline void
+e100_disable_rxdma_irq(struct e100_serial *info)
+{
+#ifdef SERIAL_DEBUG_INTR
+ printk("rxdma_irq(%d): 0\n",info->line);
+#endif
+ DINTR1(DEBUG_LOG(info->line,"IRQ disable_rxdma_irq %i\n", info->line));
+ *R_IRQ_MASK2_CLR = (info->irq << 2) | (info->irq << 3);
+}
+
+static inline void
+e100_enable_rxdma_irq(struct e100_serial *info)
+{
+#ifdef SERIAL_DEBUG_INTR
+ printk("rxdma_irq(%d): 1\n",info->line);
+#endif
+ DINTR1(DEBUG_LOG(info->line,"IRQ enable_rxdma_irq %i\n", info->line));
+ *R_IRQ_MASK2_SET = (info->irq << 2) | (info->irq << 3);
+}
+
+/* the tx DMA uses only dma_descr interrupt */
+
+static void e100_disable_txdma_irq(struct e100_serial *info)
+{
+#ifdef SERIAL_DEBUG_INTR
+ printk("txdma_irq(%d): 0\n",info->line);
+#endif
+ DINTR1(DEBUG_LOG(info->line,"IRQ disable_txdma_irq %i\n", info->line));
+ *R_IRQ_MASK2_CLR = info->irq;
+}
+
+static void e100_enable_txdma_irq(struct e100_serial *info)
+{
+#ifdef SERIAL_DEBUG_INTR
+ printk("txdma_irq(%d): 1\n",info->line);
+#endif
+ DINTR1(DEBUG_LOG(info->line,"IRQ enable_txdma_irq %i\n", info->line));
+ *R_IRQ_MASK2_SET = info->irq;
+}
+
+static void e100_disable_txdma_channel(struct e100_serial *info)
+{
+ unsigned long flags;
+
+ /* Disable output DMA channel for the serial port in question
+ * ( set to something other than serialX)
+ */
+ local_irq_save(flags);
+ DFLOW(DEBUG_LOG(info->line, "disable_txdma_channel %i\n", info->line));
+ if (info->line == 0) {
+ if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma6)) ==
+ IO_STATE(R_GEN_CONFIG, dma6, serial0)) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma6);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma6, unused);
+ }
+ } else if (info->line == 1) {
+ if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma8)) ==
+ IO_STATE(R_GEN_CONFIG, dma8, serial1)) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma8);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma8, usb);
+ }
+ } else if (info->line == 2) {
+ if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma2)) ==
+ IO_STATE(R_GEN_CONFIG, dma2, serial2)) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma2);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma2, par0);
+ }
+ } else if (info->line == 3) {
+ if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma4)) ==
+ IO_STATE(R_GEN_CONFIG, dma4, serial3)) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma4);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, par1);
+ }
+ }
+ *R_GEN_CONFIG = genconfig_shadow;
+ local_irq_restore(flags);
+}
+
+
+static void e100_enable_txdma_channel(struct e100_serial *info)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ DFLOW(DEBUG_LOG(info->line, "enable_txdma_channel %i\n", info->line));
+ /* Enable output DMA channel for the serial port in question */
+ if (info->line == 0) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma6);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma6, serial0);
+ } else if (info->line == 1) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma8);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma8, serial1);
+ } else if (info->line == 2) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma2);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma2, serial2);
+ } else if (info->line == 3) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma4);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, serial3);
+ }
+ *R_GEN_CONFIG = genconfig_shadow;
+ local_irq_restore(flags);
+}
+
+static void e100_disable_rxdma_channel(struct e100_serial *info)
+{
+ unsigned long flags;
+
+ /* Disable input DMA channel for the serial port in question
+ * ( set to something other than serialX)
+ */
+ local_irq_save(flags);
+ if (info->line == 0) {
+ if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma7)) ==
+ IO_STATE(R_GEN_CONFIG, dma7, serial0)) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma7, unused);
+ }
+ } else if (info->line == 1) {
+ if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma9)) ==
+ IO_STATE(R_GEN_CONFIG, dma9, serial1)) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma9);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma9, usb);
+ }
+ } else if (info->line == 2) {
+ if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma3)) ==
+ IO_STATE(R_GEN_CONFIG, dma3, serial2)) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma3);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma3, par0);
+ }
+ } else if (info->line == 3) {
+ if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma5)) ==
+ IO_STATE(R_GEN_CONFIG, dma5, serial3)) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma5);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, par1);
+ }
+ }
+ *R_GEN_CONFIG = genconfig_shadow;
+ local_irq_restore(flags);
+}
+
+
+static void e100_enable_rxdma_channel(struct e100_serial *info)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ /* Enable input DMA channel for the serial port in question */
+ if (info->line == 0) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma7, serial0);
+ } else if (info->line == 1) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma9);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma9, serial1);
+ } else if (info->line == 2) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma3);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma3, serial2);
+ } else if (info->line == 3) {
+ genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma5);
+ genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, serial3);
+ }
+ *R_GEN_CONFIG = genconfig_shadow;
+ local_irq_restore(flags);
+}
+
+#ifdef SERIAL_HANDLE_EARLY_ERRORS
+/* in order to detect and fix errors on the first byte
+ we have to use the serial interrupts as well. */
+
+static inline void
+e100_disable_serial_data_irq(struct e100_serial *info)
+{
+#ifdef SERIAL_DEBUG_INTR
+ printk("ser_irq(%d): 0\n",info->line);
+#endif
+ DINTR1(DEBUG_LOG(info->line,"IRQ disable data_irq %i\n", info->line));
+ *R_IRQ_MASK1_CLR = (1U << (8+2*info->line));
+}
+
+static inline void
+e100_enable_serial_data_irq(struct e100_serial *info)
+{
+#ifdef SERIAL_DEBUG_INTR
+ printk("ser_irq(%d): 1\n",info->line);
+ printk("**** %d = %d\n",
+ (8+2*info->line),
+ (1U << (8+2*info->line)));
+#endif
+ DINTR1(DEBUG_LOG(info->line,"IRQ enable data_irq %i\n", info->line));
+ *R_IRQ_MASK1_SET = (1U << (8+2*info->line));
+}
+#endif
+
+static inline void
+e100_disable_serial_tx_ready_irq(struct e100_serial *info)
+{
+#ifdef SERIAL_DEBUG_INTR
+ printk("ser_tx_irq(%d): 0\n",info->line);
+#endif
+ DINTR1(DEBUG_LOG(info->line,"IRQ disable ready_irq %i\n", info->line));
+ *R_IRQ_MASK1_CLR = (1U << (8+1+2*info->line));
+}
+
+static inline void
+e100_enable_serial_tx_ready_irq(struct e100_serial *info)
+{
+#ifdef SERIAL_DEBUG_INTR
+ printk("ser_tx_irq(%d): 1\n",info->line);
+ printk("**** %d = %d\n",
+ (8+1+2*info->line),
+ (1U << (8+1+2*info->line)));
+#endif
+ DINTR2(DEBUG_LOG(info->line,"IRQ enable ready_irq %i\n", info->line));
+ *R_IRQ_MASK1_SET = (1U << (8+1+2*info->line));
+}
+
+static inline void e100_enable_rx_irq(struct e100_serial *info)
+{
+ if (info->uses_dma_in)
+ e100_enable_rxdma_irq(info);
+ else
+ e100_enable_serial_data_irq(info);
+}
+static inline void e100_disable_rx_irq(struct e100_serial *info)
+{
+ if (info->uses_dma_in)
+ e100_disable_rxdma_irq(info);
+ else
+ e100_disable_serial_data_irq(info);
+}
+
+#if defined(CONFIG_ETRAX_RS485)
+/* Enable RS-485 mode on selected port. This is UGLY. */
+static int
+e100_enable_rs485(struct tty_struct *tty, struct serial_rs485 *r)
+{
+ struct e100_serial * info = (struct e100_serial *)tty->driver_data;
+
+#if defined(CONFIG_ETRAX_RS485_ON_PA)
+ *R_PORT_PA_DATA = port_pa_data_shadow |= (1 << rs485_pa_bit);
+#endif
+#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
+ REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
+ rs485_port_g_bit, 1);
+#endif
+#if defined(CONFIG_ETRAX_RS485_LTC1387)
+ REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
+ CONFIG_ETRAX_RS485_LTC1387_DXEN_PORT_G_BIT, 1);
+ REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
+ CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 1);
+#endif
+
+ info->rs485 = *r;
+
+ /* Maximum delay before RTS equal to 1000 */
+ if (info->rs485.delay_rts_before_send >= 1000)
+ info->rs485.delay_rts_before_send = 1000;
+
+/* printk("rts: on send = %i, after = %i, enabled = %i",
+ info->rs485.rts_on_send,
+ info->rs485.rts_after_sent,
+ info->rs485.enabled
+ );
+*/
+ return 0;
+}
+
+static int
+e100_write_rs485(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ struct e100_serial * info = (struct e100_serial *)tty->driver_data;
+ int old_value = (info->rs485.flags) & SER_RS485_ENABLED;
+
+ /* rs485 is always implicitly enabled if we're using the ioctl()
+ * but it doesn't have to be set in the serial_rs485
+ * (to be backward compatible with old apps)
+ * So we store, set and restore it.
+ */
+ info->rs485.flags |= SER_RS485_ENABLED;
+ /* rs_write now deals with RS485 if enabled */
+ count = rs_write(tty, buf, count);
+ if (!old_value)
+ info->rs485.flags &= ~(SER_RS485_ENABLED);
+ return count;
+}
+
+#ifdef CONFIG_ETRAX_FAST_TIMER
+/* Timer function to toggle RTS when using FAST_TIMER */
+static void rs485_toggle_rts_timer_function(unsigned long data)
+{
+ struct e100_serial *info = (struct e100_serial *)data;
+
+ fast_timers_rs485[info->line].function = NULL;
+ e100_rts(info, (info->rs485.flags & SER_RS485_RTS_AFTER_SEND));
+#if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER)
+ e100_enable_rx(info);
+ e100_enable_rx_irq(info);
+#endif
+}
+#endif
+#endif /* CONFIG_ETRAX_RS485 */
+
+/*
+ * ------------------------------------------------------------
+ * rs_stop() and rs_start()
+ *
+ * This routines are called before setting or resetting tty->stopped.
+ * They enable or disable transmitter using the XOFF registers, as necessary.
+ * ------------------------------------------------------------
+ */
+
+static void
+rs_stop(struct tty_struct *tty)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+ if (info) {
+ unsigned long flags;
+ unsigned long xoff;
+
+ local_irq_save(flags);
+ DFLOW(DEBUG_LOG(info->line, "XOFF rs_stop xmit %i\n",
+ CIRC_CNT(info->xmit.head,
+ info->xmit.tail,SERIAL_XMIT_SIZE)));
+
+ xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char,
+ STOP_CHAR(info->port.tty));
+ xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, stop);
+ if (tty->termios->c_iflag & IXON ) {
+ xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
+ }
+
+ *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
+ local_irq_restore(flags);
+ }
+}
+
+static void
+rs_start(struct tty_struct *tty)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+ if (info) {
+ unsigned long flags;
+ unsigned long xoff;
+
+ local_irq_save(flags);
+ DFLOW(DEBUG_LOG(info->line, "XOFF rs_start xmit %i\n",
+ CIRC_CNT(info->xmit.head,
+ info->xmit.tail,SERIAL_XMIT_SIZE)));
+ xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(tty));
+ xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable);
+ if (tty->termios->c_iflag & IXON ) {
+ xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
+ }
+
+ *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
+ if (!info->uses_dma_out &&
+ info->xmit.head != info->xmit.tail && info->xmit.buf)
+ e100_enable_serial_tx_ready_irq(info);
+
+ local_irq_restore(flags);
+ }
+}
+
+/*
+ * ----------------------------------------------------------------------
+ *
+ * Here starts the interrupt handling routines. All of the following
+ * subroutines are declared as inline and are folded into
+ * rs_interrupt(). They were separated out for readability's sake.
+ *
+ * Note: rs_interrupt() is a "fast" interrupt, which means that it
+ * runs with interrupts turned off. People who may want to modify
+ * rs_interrupt() should try to keep the interrupt handler as fast as
+ * possible. After you are done making modifications, it is not a bad
+ * idea to do:
+ *
+ * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c
+ *
+ * and look at the resulting assemble code in serial.s.
+ *
+ * - Ted Ts'o (tytso@mit.edu), 7-Mar-93
+ * -----------------------------------------------------------------------
+ */
+
+/*
+ * This routine is used by the interrupt handler to schedule
+ * processing in the software interrupt portion of the driver.
+ */
+static void rs_sched_event(struct e100_serial *info, int event)
+{
+ if (info->event & (1 << event))
+ return;
+ info->event |= 1 << event;
+ schedule_work(&info->work);
+}
+
+/* The output DMA channel is free - use it to send as many chars as possible
+ * NOTES:
+ * We don't pay attention to info->x_char, which means if the TTY wants to
+ * use XON/XOFF it will set info->x_char but we won't send any X char!
+ *
+ * To implement this, we'd just start a DMA send of 1 byte pointing at a
+ * buffer containing the X char, and skip updating xmit. We'd also have to
+ * check if the last sent char was the X char when we enter this function
+ * the next time, to avoid updating xmit with the sent X value.
+ */
+
+static void
+transmit_chars_dma(struct e100_serial *info)
+{
+ unsigned int c, sentl;
+ struct etrax_dma_descr *descr;
+
+#ifdef CONFIG_SVINTO_SIM
+ /* This will output too little if tail is not 0 always since
+ * we don't reloop to send the other part. Anyway this SHOULD be a
+ * no-op - transmit_chars_dma would never really be called during sim
+ * since rs_write does not write into the xmit buffer then.
+ */
+ if (info->xmit.tail)
+ printk("Error in serial.c:transmit_chars-dma(), tail!=0\n");
+ if (info->xmit.head != info->xmit.tail) {
+ SIMCOUT(info->xmit.buf + info->xmit.tail,
+ CIRC_CNT(info->xmit.head,
+ info->xmit.tail,
+ SERIAL_XMIT_SIZE));
+ info->xmit.head = info->xmit.tail; /* move back head */
+ info->tr_running = 0;
+ }
+ return;
+#endif
+ /* acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */
+ *info->oclrintradr =
+ IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
+ IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
+
+#ifdef SERIAL_DEBUG_INTR
+ if (info->line == SERIAL_DEBUG_LINE)
+ printk("tc\n");
+#endif
+ if (!info->tr_running) {
+ /* weirdo... we shouldn't get here! */
+ printk(KERN_WARNING "Achtung: transmit_chars_dma with !tr_running\n");
+ return;
+ }
+
+ descr = &info->tr_descr;
+
+ /* first get the amount of bytes sent during the last DMA transfer,
+ and update xmit accordingly */
+
+ /* if the stop bit was not set, all data has been sent */
+ if (!(descr->status & d_stop)) {
+ sentl = descr->sw_len;
+ } else
+ /* otherwise we find the amount of data sent here */
+ sentl = descr->hw_len;
+
+ DFLOW(DEBUG_LOG(info->line, "TX %i done\n", sentl));
+
+ /* update stats */
+ info->icount.tx += sentl;
+
+ /* update xmit buffer */
+ info->xmit.tail = (info->xmit.tail + sentl) & (SERIAL_XMIT_SIZE - 1);
+
+ /* if there is only a few chars left in the buf, wake up the blocked
+ write if any */
+ if (CIRC_CNT(info->xmit.head,
+ info->xmit.tail,
+ SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
+ rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
+
+ /* find out the largest amount of consecutive bytes we want to send now */
+
+ c = CIRC_CNT_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+
+ /* Don't send all in one DMA transfer - divide it so we wake up
+ * application before all is sent
+ */
+
+ if (c >= 4*WAKEUP_CHARS)
+ c = c/2;
+
+ if (c <= 0) {
+ /* our job here is done, don't schedule any new DMA transfer */
+ info->tr_running = 0;
+
+#if defined(CONFIG_ETRAX_RS485) && defined(CONFIG_ETRAX_FAST_TIMER)
+ if (info->rs485.flags & SER_RS485_ENABLED) {
+ /* Set a short timer to toggle RTS */
+ start_one_shot_timer(&fast_timers_rs485[info->line],
+ rs485_toggle_rts_timer_function,
+ (unsigned long)info,
+ info->char_time_usec*2,
+ "RS-485");
+ }
+#endif /* RS485 */
+ return;
+ }
+
+ /* ok we can schedule a dma send of c chars starting at info->xmit.tail */
+ /* set up the descriptor correctly for output */
+ DFLOW(DEBUG_LOG(info->line, "TX %i\n", c));
+ descr->ctrl = d_int | d_eol | d_wait; /* Wait needed for tty_wait_until_sent() */
+ descr->sw_len = c;
+ descr->buf = virt_to_phys(info->xmit.buf + info->xmit.tail);
+ descr->status = 0;
+
+ *info->ofirstadr = virt_to_phys(descr); /* write to R_DMAx_FIRST */
+ *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, start);
+
+ /* DMA is now running (hopefully) */
+} /* transmit_chars_dma */
+
+static void
+start_transmit(struct e100_serial *info)
+{
+#if 0
+ if (info->line == SERIAL_DEBUG_LINE)
+ printk("x\n");
+#endif
+
+ info->tr_descr.sw_len = 0;
+ info->tr_descr.hw_len = 0;
+ info->tr_descr.status = 0;
+ info->tr_running = 1;
+ if (info->uses_dma_out)
+ transmit_chars_dma(info);
+ else
+ e100_enable_serial_tx_ready_irq(info);
+} /* start_transmit */
+
+#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
+static int serial_fast_timer_started = 0;
+static int serial_fast_timer_expired = 0;
+static void flush_timeout_function(unsigned long data);
+#define START_FLUSH_FAST_TIMER_TIME(info, string, usec) {\
+ unsigned long timer_flags; \
+ local_irq_save(timer_flags); \
+ if (fast_timers[info->line].function == NULL) { \
+ serial_fast_timer_started++; \
+ TIMERD(DEBUG_LOG(info->line, "start_timer %i ", info->line)); \
+ TIMERD(DEBUG_LOG(info->line, "num started: %i\n", serial_fast_timer_started)); \
+ start_one_shot_timer(&fast_timers[info->line], \
+ flush_timeout_function, \
+ (unsigned long)info, \
+ (usec), \
+ string); \
+ } \
+ else { \
+ TIMERD(DEBUG_LOG(info->line, "timer %i already running\n", info->line)); \
+ } \
+ local_irq_restore(timer_flags); \
+}
+#define START_FLUSH_FAST_TIMER(info, string) START_FLUSH_FAST_TIMER_TIME(info, string, info->flush_time_usec)
+
+#else
+#define START_FLUSH_FAST_TIMER_TIME(info, string, usec)
+#define START_FLUSH_FAST_TIMER(info, string)
+#endif
+
+static struct etrax_recv_buffer *
+alloc_recv_buffer(unsigned int size)
+{
+ struct etrax_recv_buffer *buffer;
+
+ if (!(buffer = kmalloc(sizeof *buffer + size, GFP_ATOMIC)))
+ return NULL;
+
+ buffer->next = NULL;
+ buffer->length = 0;
+ buffer->error = TTY_NORMAL;
+
+ return buffer;
+}
+
+static void
+append_recv_buffer(struct e100_serial *info, struct etrax_recv_buffer *buffer)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ if (!info->first_recv_buffer)
+ info->first_recv_buffer = buffer;
+ else
+ info->last_recv_buffer->next = buffer;
+
+ info->last_recv_buffer = buffer;
+
+ info->recv_cnt += buffer->length;
+ if (info->recv_cnt > info->max_recv_cnt)
+ info->max_recv_cnt = info->recv_cnt;
+
+ local_irq_restore(flags);
+}
+
+static int
+add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char flag)
+{
+ struct etrax_recv_buffer *buffer;
+ if (info->uses_dma_in) {
+ if (!(buffer = alloc_recv_buffer(4)))
+ return 0;
+
+ buffer->length = 1;
+ buffer->error = flag;
+ buffer->buffer[0] = data;
+
+ append_recv_buffer(info, buffer);
+
+ info->icount.rx++;
+ } else {
+ struct tty_struct *tty = info->port.tty;
+ tty_insert_flip_char(tty, data, flag);
+ info->icount.rx++;
+ }
+
+ return 1;
+}
+
+static unsigned int handle_descr_data(struct e100_serial *info,
+ struct etrax_dma_descr *descr,
+ unsigned int recvl)
+{
+ struct etrax_recv_buffer *buffer = phys_to_virt(descr->buf) - sizeof *buffer;
+
+ if (info->recv_cnt + recvl > 65536) {
+ printk(KERN_CRIT
+ "%s: Too much pending incoming serial data! Dropping %u bytes.\n", __func__, recvl);
+ return 0;
+ }
+
+ buffer->length = recvl;
+
+ if (info->errorcode == ERRCODE_SET_BREAK)
+ buffer->error = TTY_BREAK;
+ info->errorcode = 0;
+
+ append_recv_buffer(info, buffer);
+
+ if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
+ panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
+
+ descr->buf = virt_to_phys(buffer->buffer);
+
+ return recvl;
+}
+
+static unsigned int handle_all_descr_data(struct e100_serial *info)
+{
+ struct etrax_dma_descr *descr;
+ unsigned int recvl;
+ unsigned int ret = 0;
+
+ while (1)
+ {
+ descr = &info->rec_descr[info->cur_rec_descr];
+
+ if (descr == phys_to_virt(*info->idescradr))
+ break;
+
+ if (++info->cur_rec_descr == SERIAL_RECV_DESCRIPTORS)
+ info->cur_rec_descr = 0;
+
+ /* find out how many bytes were read */
+
+ /* if the eop bit was not set, all data has been received */
+ if (!(descr->status & d_eop)) {
+ recvl = descr->sw_len;
+ } else {
+ /* otherwise we find the amount of data received here */
+ recvl = descr->hw_len;
+ }
+
+ /* Reset the status information */
+ descr->status = 0;
+
+ DFLOW( DEBUG_LOG(info->line, "RX %lu\n", recvl);
+ if (info->port.tty->stopped) {
+ unsigned char *buf = phys_to_virt(descr->buf);
+ DEBUG_LOG(info->line, "rx 0x%02X\n", buf[0]);
+ DEBUG_LOG(info->line, "rx 0x%02X\n", buf[1]);
+ DEBUG_LOG(info->line, "rx 0x%02X\n", buf[2]);
+ }
+ );
+
+ /* update stats */
+ info->icount.rx += recvl;
+
+ ret += handle_descr_data(info, descr, recvl);
+ }
+
+ return ret;
+}
+
+static void receive_chars_dma(struct e100_serial *info)
+{
+ struct tty_struct *tty;
+ unsigned char rstat;
+
+#ifdef CONFIG_SVINTO_SIM
+ /* No receive in the simulator. Will probably be when the rest of
+ * the serial interface works, and this piece will just be removed.
+ */
+ return;
+#endif
+
+ /* Acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */
+ *info->iclrintradr =
+ IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
+ IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
+
+ tty = info->port.tty;
+ if (!tty) /* Something wrong... */
+ return;
+
+#ifdef SERIAL_HANDLE_EARLY_ERRORS
+ if (info->uses_dma_in)
+ e100_enable_serial_data_irq(info);
+#endif
+
+ if (info->errorcode == ERRCODE_INSERT_BREAK)
+ add_char_and_flag(info, '\0', TTY_BREAK);
+
+ handle_all_descr_data(info);
+
+ /* Read the status register to detect errors */
+ rstat = info->ioport[REG_STATUS];
+ if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) {
+ DFLOW(DEBUG_LOG(info->line, "XOFF detect stat %x\n", rstat));
+ }
+
+ if (rstat & SER_ERROR_MASK) {
+ /* If we got an error, we must reset it by reading the
+ * data_in field
+ */
+ unsigned char data = info->ioport[REG_DATA];
+
+ PROCSTAT(ser_stat[info->line].errors_cnt++);
+ DEBUG_LOG(info->line, "#dERR: s d 0x%04X\n",
+ ((rstat & SER_ERROR_MASK) << 8) | data);
+
+ if (rstat & SER_PAR_ERR_MASK)
+ add_char_and_flag(info, data, TTY_PARITY);
+ else if (rstat & SER_OVERRUN_MASK)
+ add_char_and_flag(info, data, TTY_OVERRUN);
+ else if (rstat & SER_FRAMING_ERR_MASK)
+ add_char_and_flag(info, data, TTY_FRAME);
+ }
+
+ START_FLUSH_FAST_TIMER(info, "receive_chars");
+
+ /* Restart the receiving DMA */
+ *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart);
+}
+
+static int start_recv_dma(struct e100_serial *info)
+{
+ struct etrax_dma_descr *descr = info->rec_descr;
+ struct etrax_recv_buffer *buffer;
+ int i;
+
+ /* Set up the receiving descriptors */
+ for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) {
+ if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
+ panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
+
+ descr[i].ctrl = d_int;
+ descr[i].buf = virt_to_phys(buffer->buffer);
+ descr[i].sw_len = SERIAL_DESCR_BUF_SIZE;
+ descr[i].hw_len = 0;
+ descr[i].status = 0;
+ descr[i].next = virt_to_phys(&descr[i+1]);
+ }
+
+ /* Link the last descriptor to the first */
+ descr[i-1].next = virt_to_phys(&descr[0]);
+
+ /* Start with the first descriptor in the list */
+ info->cur_rec_descr = 0;
+
+ /* Start the DMA */
+ *info->ifirstadr = virt_to_phys(&descr[info->cur_rec_descr]);
+ *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, start);
+
+ /* Input DMA should be running now */
+ return 1;
+}
+
+static void
+start_receive(struct e100_serial *info)
+{
+#ifdef CONFIG_SVINTO_SIM
+ /* No receive in the simulator. Will probably be when the rest of
+ * the serial interface works, and this piece will just be removed.
+ */
+ return;
+#endif
+ if (info->uses_dma_in) {
+ /* reset the input dma channel to be sure it works */
+
+ *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
+ while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->icmdadr) ==
+ IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
+
+ start_recv_dma(info);
+ }
+}
+
+
+/* the bits in the MASK2 register are laid out like this:
+ DMAI_EOP DMAI_DESCR DMAO_EOP DMAO_DESCR
+ where I is the input channel and O is the output channel for the port.
+ info->irq is the bit number for the DMAO_DESCR so to check the others we
+ shift info->irq to the left.
+*/
+
+/* dma output channel interrupt handler
+ this interrupt is called from DMA2(ser2), DMA4(ser3), DMA6(ser0) or
+ DMA8(ser1) when they have finished a descriptor with the intr flag set.
+*/
+
+static irqreturn_t
+tr_interrupt(int irq, void *dev_id)
+{
+ struct e100_serial *info;
+ unsigned long ireg;
+ int i;
+ int handled = 0;
+
+#ifdef CONFIG_SVINTO_SIM
+ /* No receive in the simulator. Will probably be when the rest of
+ * the serial interface works, and this piece will just be removed.
+ */
+ {
+ const char *s = "What? tr_interrupt in simulator??\n";
+ SIMCOUT(s,strlen(s));
+ }
+ return IRQ_HANDLED;
+#endif
+
+ /* find out the line that caused this irq and get it from rs_table */
+
+ ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */
+
+ for (i = 0; i < NR_PORTS; i++) {
+ info = rs_table + i;
+ if (!info->enabled || !info->uses_dma_out)
+ continue;
+ /* check for dma_descr (don't need to check for dma_eop in output dma for serial */
+ if (ireg & info->irq) {
+ handled = 1;
+ /* we can send a new dma bunch. make it so. */
+ DINTR2(DEBUG_LOG(info->line, "tr_interrupt %i\n", i));
+ /* Read jiffies_usec first,
+ * we want this time to be as late as possible
+ */
+ PROCSTAT(ser_stat[info->line].tx_dma_ints++);
+ info->last_tx_active_usec = GET_JIFFIES_USEC();
+ info->last_tx_active = jiffies;
+ transmit_chars_dma(info);
+ }
+
+ /* FIXME: here we should really check for a change in the
+ status lines and if so call status_handle(info) */
+ }
+ return IRQ_RETVAL(handled);
+} /* tr_interrupt */
+
+/* dma input channel interrupt handler */
+
+static irqreturn_t
+rec_interrupt(int irq, void *dev_id)
+{
+ struct e100_serial *info;
+ unsigned long ireg;
+ int i;
+ int handled = 0;
+
+#ifdef CONFIG_SVINTO_SIM
+ /* No receive in the simulator. Will probably be when the rest of
+ * the serial interface works, and this piece will just be removed.
+ */
+ {
+ const char *s = "What? rec_interrupt in simulator??\n";
+ SIMCOUT(s,strlen(s));
+ }
+ return IRQ_HANDLED;
+#endif
+
+ /* find out the line that caused this irq and get it from rs_table */
+
+ ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */
+
+ for (i = 0; i < NR_PORTS; i++) {
+ info = rs_table + i;
+ if (!info->enabled || !info->uses_dma_in)
+ continue;
+ /* check for both dma_eop and dma_descr for the input dma channel */
+ if (ireg & ((info->irq << 2) | (info->irq << 3))) {
+ handled = 1;
+ /* we have received something */
+ receive_chars_dma(info);
+ }
+
+ /* FIXME: here we should really check for a change in the
+ status lines and if so call status_handle(info) */
+ }
+ return IRQ_RETVAL(handled);
+} /* rec_interrupt */
+
+static int force_eop_if_needed(struct e100_serial *info)
+{
+ /* We check data_avail bit to determine if data has
+ * arrived since last time
+ */
+ unsigned char rstat = info->ioport[REG_STATUS];
+
+ /* error or datavail? */
+ if (rstat & SER_ERROR_MASK) {
+ /* Some error has occurred. If there has been valid data, an
+ * EOP interrupt will be made automatically. If no data, the
+ * normal ser_interrupt should be enabled and handle it.
+ * So do nothing!
+ */
+ DEBUG_LOG(info->line, "timeout err: rstat 0x%03X\n",
+ rstat | (info->line << 8));
+ return 0;
+ }
+
+ if (rstat & SER_DATA_AVAIL_MASK) {
+ /* Ok data, no error, count it */
+ TIMERD(DEBUG_LOG(info->line, "timeout: rstat 0x%03X\n",
+ rstat | (info->line << 8)));
+ /* Read data to clear status flags */
+ (void)info->ioport[REG_DATA];
+
+ info->forced_eop = 0;
+ START_FLUSH_FAST_TIMER(info, "magic");
+ return 0;
+ }
+
+ /* hit the timeout, force an EOP for the input
+ * dma channel if we haven't already
+ */
+ if (!info->forced_eop) {
+ info->forced_eop = 1;
+ PROCSTAT(ser_stat[info->line].timeout_flush_cnt++);
+ TIMERD(DEBUG_LOG(info->line, "timeout EOP %i\n", info->line));
+ FORCE_EOP(info);
+ }
+
+ return 1;
+}
+
+static void flush_to_flip_buffer(struct e100_serial *info)
+{
+ struct tty_struct *tty;
+ struct etrax_recv_buffer *buffer;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ tty = info->port.tty;
+
+ if (!tty) {
+ local_irq_restore(flags);
+ return;
+ }
+
+ while ((buffer = info->first_recv_buffer) != NULL) {
+ unsigned int count = buffer->length;
+
+ tty_insert_flip_string(tty, buffer->buffer, count);
+ info->recv_cnt -= count;
+
+ if (count == buffer->length) {
+ info->first_recv_buffer = buffer->next;
+ kfree(buffer);
+ } else {
+ buffer->length -= count;
+ memmove(buffer->buffer, buffer->buffer + count, buffer->length);
+ buffer->error = TTY_NORMAL;
+ }
+ }
+
+ if (!info->first_recv_buffer)
+ info->last_recv_buffer = NULL;
+
+ local_irq_restore(flags);
+
+ /* This includes a check for low-latency */
+ tty_flip_buffer_push(tty);
+}
+
+static void check_flush_timeout(struct e100_serial *info)
+{
+ /* Flip what we've got (if we can) */
+ flush_to_flip_buffer(info);
+
+ /* We might need to flip later, but not to fast
+ * since the system is busy processing input... */
+ if (info->first_recv_buffer)
+ START_FLUSH_FAST_TIMER_TIME(info, "flip", 2000);
+
+ /* Force eop last, since data might have come while we're processing
+ * and if we started the slow timer above, we won't start a fast
+ * below.
+ */
+ force_eop_if_needed(info);
+}
+
+#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
+static void flush_timeout_function(unsigned long data)
+{
+ struct e100_serial *info = (struct e100_serial *)data;
+
+ fast_timers[info->line].function = NULL;
+ serial_fast_timer_expired++;
+ TIMERD(DEBUG_LOG(info->line, "flush_timout %i ", info->line));
+ TIMERD(DEBUG_LOG(info->line, "num expired: %i\n", serial_fast_timer_expired));
+ check_flush_timeout(info);
+}
+
+#else
+
+/* dma fifo/buffer timeout handler
+ forces an end-of-packet for the dma input channel if no chars
+ have been received for CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS/100 s.
+*/
+
+static struct timer_list flush_timer;
+
+static void
+timed_flush_handler(unsigned long ptr)
+{
+ struct e100_serial *info;
+ int i;
+
+#ifdef CONFIG_SVINTO_SIM
+ return;
+#endif
+
+ for (i = 0; i < NR_PORTS; i++) {
+ info = rs_table + i;
+ if (info->uses_dma_in)
+ check_flush_timeout(info);
+ }
+
+ /* restart flush timer */
+ mod_timer(&flush_timer, jiffies + CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS);
+}
+#endif
+
+#ifdef SERIAL_HANDLE_EARLY_ERRORS
+
+/* If there is an error (ie break) when the DMA is running and
+ * there are no bytes in the fifo the DMA is stopped and we get no
+ * eop interrupt. Thus we have to monitor the first bytes on a DMA
+ * transfer, and if it is without error we can turn the serial
+ * interrupts off.
+ */
+
+/*
+BREAK handling on ETRAX 100:
+ETRAX will generate interrupt although there is no stop bit between the
+characters.
+
+Depending on how long the break sequence is, the end of the breaksequence
+will look differently:
+| indicates start/end of a character.
+
+B= Break character (0x00) with framing error.
+E= Error byte with parity error received after B characters.
+F= "Faked" valid byte received immediately after B characters.
+V= Valid byte
+
+1.
+ B BL ___________________________ V
+.._|__________|__________| |valid data |
+
+Multiple frame errors with data == 0x00 (B),
+the timing matches up "perfectly" so no extra ending char is detected.
+The RXD pin is 1 in the last interrupt, in that case
+we set info->errorcode = ERRCODE_INSERT_BREAK, but we can't really
+know if another byte will come and this really is case 2. below
+(e.g F=0xFF or 0xFE)
+If RXD pin is 0 we can expect another character (see 2. below).
+
+
+2.
+
+ B B E or F__________________..__ V
+.._|__________|__________|______ | |valid data
+ "valid" or
+ parity error
+
+Multiple frame errors with data == 0x00 (B),
+but the part of the break trigs is interpreted as a start bit (and possibly
+some 0 bits followed by a number of 1 bits and a stop bit).
+Depending on parity settings etc. this last character can be either
+a fake "valid" char (F) or have a parity error (E).
+
+If the character is valid it will be put in the buffer,
+we set info->errorcode = ERRCODE_SET_BREAK so the receive interrupt
+will set the flags so the tty will handle it,
+if it's an error byte it will not be put in the buffer
+and we set info->errorcode = ERRCODE_INSERT_BREAK.
+
+To distinguish a V byte in 1. from an F byte in 2. we keep a timestamp
+of the last faulty char (B) and compares it with the current time:
+If the time elapsed time is less then 2*char_time_usec we will assume
+it's a faked F char and not a Valid char and set
+info->errorcode = ERRCODE_SET_BREAK.
+
+Flaws in the above solution:
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We use the timer to distinguish a F character from a V character,
+if a V character is to close after the break we might make the wrong decision.
+
+TODO: The break will be delayed until an F or V character is received.
+
+*/
+
+static
+struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info)
+{
+ unsigned long data_read;
+ struct tty_struct *tty = info->port.tty;
+
+ if (!tty) {
+ printk("!NO TTY!\n");
+ return info;
+ }
+
+ /* Read data and status at the same time */
+ data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]);
+more_data:
+ if (data_read & IO_MASK(R_SERIAL0_READ, xoff_detect) ) {
+ DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0));
+ }
+ DINTR2(DEBUG_LOG(info->line, "ser_rx %c\n", IO_EXTRACT(R_SERIAL0_READ, data_in, data_read)));
+
+ if (data_read & ( IO_MASK(R_SERIAL0_READ, framing_err) |
+ IO_MASK(R_SERIAL0_READ, par_err) |
+ IO_MASK(R_SERIAL0_READ, overrun) )) {
+ /* An error */
+ info->last_rx_active_usec = GET_JIFFIES_USEC();
+ info->last_rx_active = jiffies;
+ DINTR1(DEBUG_LOG(info->line, "ser_rx err stat_data %04X\n", data_read));
+ DLOG_INT_TRIG(
+ if (!log_int_trig1_pos) {
+ log_int_trig1_pos = log_int_pos;
+ log_int(rdpc(), 0, 0);
+ }
+ );
+
+
+ if ( ((data_read & IO_MASK(R_SERIAL0_READ, data_in)) == 0) &&
+ (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) ) {
+ /* Most likely a break, but we get interrupts over and
+ * over again.
+ */
+
+ if (!info->break_detected_cnt) {
+ DEBUG_LOG(info->line, "#BRK start\n", 0);
+ }
+ if (data_read & IO_MASK(R_SERIAL0_READ, rxd)) {
+ /* The RX pin is high now, so the break
+ * must be over, but....
+ * we can't really know if we will get another
+ * last byte ending the break or not.
+ * And we don't know if the byte (if any) will
+ * have an error or look valid.
+ */
+ DEBUG_LOG(info->line, "# BL BRK\n", 0);
+ info->errorcode = ERRCODE_INSERT_BREAK;
+ }
+ info->break_detected_cnt++;
+ } else {
+ /* The error does not look like a break, but could be
+ * the end of one
+ */
+ if (info->break_detected_cnt) {
+ DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt);
+ info->errorcode = ERRCODE_INSERT_BREAK;
+ } else {
+ unsigned char data = IO_EXTRACT(R_SERIAL0_READ,
+ data_in, data_read);
+ char flag = TTY_NORMAL;
+ if (info->errorcode == ERRCODE_INSERT_BREAK) {
+ struct tty_struct *tty = info->port.tty;
+ tty_insert_flip_char(tty, 0, flag);
+ info->icount.rx++;
+ }
+
+ if (data_read & IO_MASK(R_SERIAL0_READ, par_err)) {
+ info->icount.parity++;
+ flag = TTY_PARITY;
+ } else if (data_read & IO_MASK(R_SERIAL0_READ, overrun)) {
+ info->icount.overrun++;
+ flag = TTY_OVERRUN;
+ } else if (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) {
+ info->icount.frame++;
+ flag = TTY_FRAME;
+ }
+ tty_insert_flip_char(tty, data, flag);
+ info->errorcode = 0;
+ }
+ info->break_detected_cnt = 0;
+ }
+ } else if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) {
+ /* No error */
+ DLOG_INT_TRIG(
+ if (!log_int_trig1_pos) {
+ if (log_int_pos >= log_int_size) {
+ log_int_pos = 0;
+ }
+ log_int_trig0_pos = log_int_pos;
+ log_int(rdpc(), 0, 0);
+ }
+ );
+ tty_insert_flip_char(tty,
+ IO_EXTRACT(R_SERIAL0_READ, data_in, data_read),
+ TTY_NORMAL);
+ } else {
+ DEBUG_LOG(info->line, "ser_rx int but no data_avail %08lX\n", data_read);
+ }
+
+
+ info->icount.rx++;
+ data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]);
+ if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) {
+ DEBUG_LOG(info->line, "ser_rx %c in loop\n", IO_EXTRACT(R_SERIAL0_READ, data_in, data_read));
+ goto more_data;
+ }
+
+ tty_flip_buffer_push(info->port.tty);
+ return info;
+}
+
+static struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info)
+{
+ unsigned char rstat;
+
+#ifdef SERIAL_DEBUG_INTR
+ printk("Interrupt from serport %d\n", i);
+#endif
+/* DEBUG_LOG(info->line, "ser_interrupt stat %03X\n", rstat | (i << 8)); */
+ if (!info->uses_dma_in) {
+ return handle_ser_rx_interrupt_no_dma(info);
+ }
+ /* DMA is used */
+ rstat = info->ioport[REG_STATUS];
+ if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) {
+ DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0));
+ }
+
+ if (rstat & SER_ERROR_MASK) {
+ unsigned char data;
+
+ info->last_rx_active_usec = GET_JIFFIES_USEC();
+ info->last_rx_active = jiffies;
+ /* If we got an error, we must reset it by reading the
+ * data_in field
+ */
+ data = info->ioport[REG_DATA];
+ DINTR1(DEBUG_LOG(info->line, "ser_rx! %c\n", data));
+ DINTR1(DEBUG_LOG(info->line, "ser_rx err stat %02X\n", rstat));
+ if (!data && (rstat & SER_FRAMING_ERR_MASK)) {
+ /* Most likely a break, but we get interrupts over and
+ * over again.
+ */
+
+ if (!info->break_detected_cnt) {
+ DEBUG_LOG(info->line, "#BRK start\n", 0);
+ }
+ if (rstat & SER_RXD_MASK) {
+ /* The RX pin is high now, so the break
+ * must be over, but....
+ * we can't really know if we will get another
+ * last byte ending the break or not.
+ * And we don't know if the byte (if any) will
+ * have an error or look valid.
+ */
+ DEBUG_LOG(info->line, "# BL BRK\n", 0);
+ info->errorcode = ERRCODE_INSERT_BREAK;
+ }
+ info->break_detected_cnt++;
+ } else {
+ /* The error does not look like a break, but could be
+ * the end of one
+ */
+ if (info->break_detected_cnt) {
+ DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt);
+ info->errorcode = ERRCODE_INSERT_BREAK;
+ } else {
+ if (info->errorcode == ERRCODE_INSERT_BREAK) {
+ info->icount.brk++;
+ add_char_and_flag(info, '\0', TTY_BREAK);
+ }
+
+ if (rstat & SER_PAR_ERR_MASK) {
+ info->icount.parity++;
+ add_char_and_flag(info, data, TTY_PARITY);
+ } else if (rstat & SER_OVERRUN_MASK) {
+ info->icount.overrun++;
+ add_char_and_flag(info, data, TTY_OVERRUN);
+ } else if (rstat & SER_FRAMING_ERR_MASK) {
+ info->icount.frame++;
+ add_char_and_flag(info, data, TTY_FRAME);
+ }
+
+ info->errorcode = 0;
+ }
+ info->break_detected_cnt = 0;
+ DEBUG_LOG(info->line, "#iERR s d %04X\n",
+ ((rstat & SER_ERROR_MASK) << 8) | data);
+ }
+ PROCSTAT(ser_stat[info->line].early_errors_cnt++);
+ } else { /* It was a valid byte, now let the DMA do the rest */
+ unsigned long curr_time_u = GET_JIFFIES_USEC();
+ unsigned long curr_time = jiffies;
+
+ if (info->break_detected_cnt) {
+ /* Detect if this character is a new valid char or the
+ * last char in a break sequence: If LSBits are 0 and
+ * MSBits are high AND the time is close to the
+ * previous interrupt we should discard it.
+ */
+ long elapsed_usec =
+ (curr_time - info->last_rx_active) * (1000000/HZ) +
+ curr_time_u - info->last_rx_active_usec;
+ if (elapsed_usec < 2*info->char_time_usec) {
+ DEBUG_LOG(info->line, "FBRK %i\n", info->line);
+ /* Report as BREAK (error) and let
+ * receive_chars_dma() handle it
+ */
+ info->errorcode = ERRCODE_SET_BREAK;
+ } else {
+ DEBUG_LOG(info->line, "Not end of BRK (V)%i\n", info->line);
+ }
+ DEBUG_LOG(info->line, "num brk %i\n", info->break_detected_cnt);
+ }
+
+#ifdef SERIAL_DEBUG_INTR
+ printk("** OK, disabling ser_interrupts\n");
+#endif
+ e100_disable_serial_data_irq(info);
+ DINTR2(DEBUG_LOG(info->line, "ser_rx OK %d\n", info->line));
+ info->break_detected_cnt = 0;
+
+ PROCSTAT(ser_stat[info->line].ser_ints_ok_cnt++);
+ }
+ /* Restarting the DMA never hurts */
+ *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart);
+ START_FLUSH_FAST_TIMER(info, "ser_int");
+ return info;
+} /* handle_ser_rx_interrupt */
+
+static void handle_ser_tx_interrupt(struct e100_serial *info)
+{
+ unsigned long flags;
+
+ if (info->x_char) {
+ unsigned char rstat;
+ DFLOW(DEBUG_LOG(info->line, "tx_int: xchar 0x%02X\n", info->x_char));
+ local_irq_save(flags);
+ rstat = info->ioport[REG_STATUS];
+ DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
+
+ info->ioport[REG_TR_DATA] = info->x_char;
+ info->icount.tx++;
+ info->x_char = 0;
+ /* We must enable since it is disabled in ser_interrupt */
+ e100_enable_serial_tx_ready_irq(info);
+ local_irq_restore(flags);
+ return;
+ }
+ if (info->uses_dma_out) {
+ unsigned char rstat;
+ int i;
+ /* We only use normal tx interrupt when sending x_char */
+ DFLOW(DEBUG_LOG(info->line, "tx_int: xchar sent\n", 0));
+ local_irq_save(flags);
+ rstat = info->ioport[REG_STATUS];
+ DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
+ e100_disable_serial_tx_ready_irq(info);
+ if (info->port.tty->stopped)
+ rs_stop(info->port.tty);
+ /* Enable the DMA channel and tell it to continue */
+ e100_enable_txdma_channel(info);
+ /* Wait 12 cycles before doing the DMA command */
+ for(i = 6; i > 0; i--)
+ nop();
+
+ *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, continue);
+ local_irq_restore(flags);
+ return;
+ }
+ /* Normal char-by-char interrupt */
+ if (info->xmit.head == info->xmit.tail
+ || info->port.tty->stopped
+ || info->port.tty->hw_stopped) {
+ DFLOW(DEBUG_LOG(info->line, "tx_int: stopped %i\n",
+ info->port.tty->stopped));
+ e100_disable_serial_tx_ready_irq(info);
+ info->tr_running = 0;
+ return;
+ }
+ DINTR2(DEBUG_LOG(info->line, "tx_int %c\n", info->xmit.buf[info->xmit.tail]));
+ /* Send a byte, rs485 timing is critical so turn of ints */
+ local_irq_save(flags);
+ info->ioport[REG_TR_DATA] = info->xmit.buf[info->xmit.tail];
+ info->xmit.tail = (info->xmit.tail + 1) & (SERIAL_XMIT_SIZE-1);
+ info->icount.tx++;
+ if (info->xmit.head == info->xmit.tail) {
+#if defined(CONFIG_ETRAX_RS485) && defined(CONFIG_ETRAX_FAST_TIMER)
+ if (info->rs485.flags & SER_RS485_ENABLED) {
+ /* Set a short timer to toggle RTS */
+ start_one_shot_timer(&fast_timers_rs485[info->line],
+ rs485_toggle_rts_timer_function,
+ (unsigned long)info,
+ info->char_time_usec*2,
+ "RS-485");
+ }
+#endif /* RS485 */
+ info->last_tx_active_usec = GET_JIFFIES_USEC();
+ info->last_tx_active = jiffies;
+ e100_disable_serial_tx_ready_irq(info);
+ info->tr_running = 0;
+ DFLOW(DEBUG_LOG(info->line, "tx_int: stop2\n", 0));
+ } else {
+ /* We must enable since it is disabled in ser_interrupt */
+ e100_enable_serial_tx_ready_irq(info);
+ }
+ local_irq_restore(flags);
+
+ if (CIRC_CNT(info->xmit.head,
+ info->xmit.tail,
+ SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
+ rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
+
+} /* handle_ser_tx_interrupt */
+
+/* result of time measurements:
+ * RX duration 54-60 us when doing something, otherwise 6-9 us
+ * ser_int duration: just sending: 8-15 us normally, up to 73 us
+ */
+static irqreturn_t
+ser_interrupt(int irq, void *dev_id)
+{
+ static volatile int tx_started = 0;
+ struct e100_serial *info;
+ int i;
+ unsigned long flags;
+ unsigned long irq_mask1_rd;
+ unsigned long data_mask = (1 << (8+2*0)); /* ser0 data_avail */
+ int handled = 0;
+ static volatile unsigned long reentered_ready_mask = 0;
+
+ local_irq_save(flags);
+ irq_mask1_rd = *R_IRQ_MASK1_RD;
+ /* First handle all rx interrupts with ints disabled */
+ info = rs_table;
+ irq_mask1_rd &= e100_ser_int_mask;
+ for (i = 0; i < NR_PORTS; i++) {
+ /* Which line caused the data irq? */
+ if (irq_mask1_rd & data_mask) {
+ handled = 1;
+ handle_ser_rx_interrupt(info);
+ }
+ info += 1;
+ data_mask <<= 2;
+ }
+ /* Handle tx interrupts with interrupts enabled so we
+ * can take care of new data interrupts while transmitting
+ * We protect the tx part with the tx_started flag.
+ * We disable the tr_ready interrupts we are about to handle and
+ * unblock the serial interrupt so new serial interrupts may come.
+ *
+ * If we get a new interrupt:
+ * - it migth be due to synchronous serial ports.
+ * - serial irq will be blocked by general irq handler.
+ * - async data will be handled above (sync will be ignored).
+ * - tx_started flag will prevent us from trying to send again and
+ * we will exit fast - no need to unblock serial irq.
+ * - Next (sync) serial interrupt handler will be runned with
+ * disabled interrupt due to restore_flags() at end of function,
+ * so sync handler will not be preempted or reentered.
+ */
+ if (!tx_started) {
+ unsigned long ready_mask;
+ unsigned long
+ tx_started = 1;
+ /* Only the tr_ready interrupts left */
+ irq_mask1_rd &= (IO_MASK(R_IRQ_MASK1_RD, ser0_ready) |
+ IO_MASK(R_IRQ_MASK1_RD, ser1_ready) |
+ IO_MASK(R_IRQ_MASK1_RD, ser2_ready) |
+ IO_MASK(R_IRQ_MASK1_RD, ser3_ready));
+ while (irq_mask1_rd) {
+ /* Disable those we are about to handle */
+ *R_IRQ_MASK1_CLR = irq_mask1_rd;
+ /* Unblock the serial interrupt */
+ *R_VECT_MASK_SET = IO_STATE(R_VECT_MASK_SET, serial, set);
+
+ local_irq_enable();
+ ready_mask = (1 << (8+1+2*0)); /* ser0 tr_ready */
+ info = rs_table;
+ for (i = 0; i < NR_PORTS; i++) {
+ /* Which line caused the ready irq? */
+ if (irq_mask1_rd & ready_mask) {
+ handled = 1;
+ handle_ser_tx_interrupt(info);
+ }
+ info += 1;
+ ready_mask <<= 2;
+ }
+ /* handle_ser_tx_interrupt enables tr_ready interrupts */
+ local_irq_disable();
+ /* Handle reentered TX interrupt */
+ irq_mask1_rd = reentered_ready_mask;
+ }
+ local_irq_disable();
+ tx_started = 0;
+ } else {
+ unsigned long ready_mask;
+ ready_mask = irq_mask1_rd & (IO_MASK(R_IRQ_MASK1_RD, ser0_ready) |
+ IO_MASK(R_IRQ_MASK1_RD, ser1_ready) |
+ IO_MASK(R_IRQ_MASK1_RD, ser2_ready) |
+ IO_MASK(R_IRQ_MASK1_RD, ser3_ready));
+ if (ready_mask) {
+ reentered_ready_mask |= ready_mask;
+ /* Disable those we are about to handle */
+ *R_IRQ_MASK1_CLR = ready_mask;
+ DFLOW(DEBUG_LOG(SERIAL_DEBUG_LINE, "ser_int reentered with TX %X\n", ready_mask));
+ }
+ }
+
+ local_irq_restore(flags);
+ return IRQ_RETVAL(handled);
+} /* ser_interrupt */
+#endif
+
+/*
+ * -------------------------------------------------------------------
+ * Here ends the serial interrupt routines.
+ * -------------------------------------------------------------------
+ */
+
+/*
+ * This routine is used to handle the "bottom half" processing for the
+ * serial driver, known also the "software interrupt" processing.
+ * This processing is done at the kernel interrupt level, after the
+ * rs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON. This
+ * is where time-consuming activities which can not be done in the
+ * interrupt driver proper are done; the interrupt driver schedules
+ * them using rs_sched_event(), and they get done here.
+ */
+static void
+do_softint(struct work_struct *work)
+{
+ struct e100_serial *info;
+ struct tty_struct *tty;
+
+ info = container_of(work, struct e100_serial, work);
+
+ tty = info->port.tty;
+ if (!tty)
+ return;
+
+ if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event))
+ tty_wakeup(tty);
+}
+
+static int
+startup(struct e100_serial * info)
+{
+ unsigned long flags;
+ unsigned long xmit_page;
+ int i;
+
+ xmit_page = get_zeroed_page(GFP_KERNEL);
+ if (!xmit_page)
+ return -ENOMEM;
+
+ local_irq_save(flags);
+
+ /* if it was already initialized, skip this */
+
+ if (info->flags & ASYNC_INITIALIZED) {
+ local_irq_restore(flags);
+ free_page(xmit_page);
+ return 0;
+ }
+
+ if (info->xmit.buf)
+ free_page(xmit_page);
+ else
+ info->xmit.buf = (unsigned char *) xmit_page;
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("starting up ttyS%d (xmit_buf 0x%p)...\n", info->line, info->xmit.buf);
+#endif
+
+#ifdef CONFIG_SVINTO_SIM
+ /* Bits and pieces collected from below. Better to have them
+ in one ifdef:ed clause than to mix in a lot of ifdefs,
+ right? */
+ if (info->port.tty)
+ clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
+
+ info->xmit.head = info->xmit.tail = 0;
+ info->first_recv_buffer = info->last_recv_buffer = NULL;
+ info->recv_cnt = info->max_recv_cnt = 0;
+
+ for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
+ info->rec_descr[i].buf = NULL;
+
+ /* No real action in the simulator, but may set info important
+ to ioctl. */
+ change_speed(info);
+#else
+
+ /*
+ * Clear the FIFO buffers and disable them
+ * (they will be reenabled in change_speed())
+ */
+
+ /*
+ * Reset the DMA channels and make sure their interrupts are cleared
+ */
+
+ if (info->dma_in_enabled) {
+ info->uses_dma_in = 1;
+ e100_enable_rxdma_channel(info);
+
+ *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
+
+ /* Wait until reset cycle is complete */
+ while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->icmdadr) ==
+ IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
+
+ /* Make sure the irqs are cleared */
+ *info->iclrintradr =
+ IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
+ IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
+ } else {
+ e100_disable_rxdma_channel(info);
+ }
+
+ if (info->dma_out_enabled) {
+ info->uses_dma_out = 1;
+ e100_enable_txdma_channel(info);
+ *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
+
+ while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->ocmdadr) ==
+ IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
+
+ /* Make sure the irqs are cleared */
+ *info->oclrintradr =
+ IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
+ IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
+ } else {
+ e100_disable_txdma_channel(info);
+ }
+
+ if (info->port.tty)
+ clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
+
+ info->xmit.head = info->xmit.tail = 0;
+ info->first_recv_buffer = info->last_recv_buffer = NULL;
+ info->recv_cnt = info->max_recv_cnt = 0;
+
+ for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
+ info->rec_descr[i].buf = 0;
+
+ /*
+ * and set the speed and other flags of the serial port
+ * this will start the rx/tx as well
+ */
+#ifdef SERIAL_HANDLE_EARLY_ERRORS
+ e100_enable_serial_data_irq(info);
+#endif
+ change_speed(info);
+
+ /* dummy read to reset any serial errors */
+
+ (void)info->ioport[REG_DATA];
+
+ /* enable the interrupts */
+ if (info->uses_dma_out)
+ e100_enable_txdma_irq(info);
+
+ e100_enable_rx_irq(info);
+
+ info->tr_running = 0; /* to be sure we don't lock up the transmitter */
+
+ /* setup the dma input descriptor and start dma */
+
+ start_receive(info);
+
+ /* for safety, make sure the descriptors last result is 0 bytes written */
+
+ info->tr_descr.sw_len = 0;
+ info->tr_descr.hw_len = 0;
+ info->tr_descr.status = 0;
+
+ /* enable RTS/DTR last */
+
+ e100_rts(info, 1);
+ e100_dtr(info, 1);
+
+#endif /* CONFIG_SVINTO_SIM */
+
+ info->flags |= ASYNC_INITIALIZED;
+
+ local_irq_restore(flags);
+ return 0;
+}
+
+/*
+ * This routine will shutdown a serial port; interrupts are disabled, and
+ * DTR is dropped if the hangup on close termio flag is on.
+ */
+static void
+shutdown(struct e100_serial * info)
+{
+ unsigned long flags;
+ struct etrax_dma_descr *descr = info->rec_descr;
+ struct etrax_recv_buffer *buffer;
+ int i;
+
+#ifndef CONFIG_SVINTO_SIM
+ /* shut down the transmitter and receiver */
+ DFLOW(DEBUG_LOG(info->line, "shutdown %i\n", info->line));
+ e100_disable_rx(info);
+ info->ioport[REG_TR_CTRL] = (info->tx_ctrl &= ~0x40);
+
+ /* disable interrupts, reset dma channels */
+ if (info->uses_dma_in) {
+ e100_disable_rxdma_irq(info);
+ *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
+ info->uses_dma_in = 0;
+ } else {
+ e100_disable_serial_data_irq(info);
+ }
+
+ if (info->uses_dma_out) {
+ e100_disable_txdma_irq(info);
+ info->tr_running = 0;
+ *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
+ info->uses_dma_out = 0;
+ } else {
+ e100_disable_serial_tx_ready_irq(info);
+ info->tr_running = 0;
+ }
+
+#endif /* CONFIG_SVINTO_SIM */
+
+ if (!(info->flags & ASYNC_INITIALIZED))
+ return;
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("Shutting down serial port %d (irq %d)....\n", info->line,
+ info->irq);
+#endif
+
+ local_irq_save(flags);
+
+ if (info->xmit.buf) {
+ free_page((unsigned long)info->xmit.buf);
+ info->xmit.buf = NULL;
+ }
+
+ for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
+ if (descr[i].buf) {
+ buffer = phys_to_virt(descr[i].buf) - sizeof *buffer;
+ kfree(buffer);
+ descr[i].buf = 0;
+ }
+
+ if (!info->port.tty || (info->port.tty->termios->c_cflag & HUPCL)) {
+ /* hang up DTR and RTS if HUPCL is enabled */
+ e100_dtr(info, 0);
+ e100_rts(info, 0); /* could check CRTSCTS before doing this */
+ }
+
+ if (info->port.tty)
+ set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+
+ info->flags &= ~ASYNC_INITIALIZED;
+ local_irq_restore(flags);
+}
+
+
+/* change baud rate and other assorted parameters */
+
+static void
+change_speed(struct e100_serial *info)
+{
+ unsigned int cflag;
+ unsigned long xoff;
+ unsigned long flags;
+ /* first some safety checks */
+
+ if (!info->port.tty || !info->port.tty->termios)
+ return;
+ if (!info->ioport)
+ return;
+
+ cflag = info->port.tty->termios->c_cflag;
+
+ /* possibly, the tx/rx should be disabled first to do this safely */
+
+ /* change baud-rate and write it to the hardware */
+ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) {
+ /* Special baudrate */
+ u32 mask = 0xFF << (info->line*8); /* Each port has 8 bits */
+ unsigned long alt_source =
+ IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, normal) |
+ IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, normal);
+ /* R_ALT_SER_BAUDRATE selects the source */
+ DBAUD(printk("Custom baudrate: baud_base/divisor %lu/%i\n",
+ (unsigned long)info->baud_base, info->custom_divisor));
+ if (info->baud_base == SERIAL_PRESCALE_BASE) {
+ /* 0, 2-65535 (0=65536) */
+ u16 divisor = info->custom_divisor;
+ /* R_SERIAL_PRESCALE (upper 16 bits of R_CLOCK_PRESCALE) */
+ /* baudrate is 3.125MHz/custom_divisor */
+ alt_source =
+ IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, prescale) |
+ IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, prescale);
+ alt_source = 0x11;
+ DBAUD(printk("Writing SERIAL_PRESCALE: divisor %i\n", divisor));
+ *R_SERIAL_PRESCALE = divisor;
+ info->baud = SERIAL_PRESCALE_BASE/divisor;
+ }
+#ifdef CONFIG_ETRAX_EXTERN_PB6CLK_ENABLED
+ else if ((info->baud_base==CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8 &&
+ info->custom_divisor == 1) ||
+ (info->baud_base==CONFIG_ETRAX_EXTERN_PB6CLK_FREQ &&
+ info->custom_divisor == 8)) {
+ /* ext_clk selected */
+ alt_source =
+ IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, extern) |
+ IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, extern);
+ DBAUD(printk("using external baudrate: %lu\n", CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8));
+ info->baud = CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8;
+ }
+#endif
+ else
+ {
+ /* Bad baudbase, we don't support using timer0
+ * for baudrate.
+ */
+ printk(KERN_WARNING "Bad baud_base/custom_divisor: %lu/%i\n",
+ (unsigned long)info->baud_base, info->custom_divisor);
+ }
+ r_alt_ser_baudrate_shadow &= ~mask;
+ r_alt_ser_baudrate_shadow |= (alt_source << (info->line*8));
+ *R_ALT_SER_BAUDRATE = r_alt_ser_baudrate_shadow;
+ } else {
+ /* Normal baudrate */
+ /* Make sure we use normal baudrate */
+ u32 mask = 0xFF << (info->line*8); /* Each port has 8 bits */
+ unsigned long alt_source =
+ IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, normal) |
+ IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, normal);
+ r_alt_ser_baudrate_shadow &= ~mask;
+ r_alt_ser_baudrate_shadow |= (alt_source << (info->line*8));
+#ifndef CONFIG_SVINTO_SIM
+ *R_ALT_SER_BAUDRATE = r_alt_ser_baudrate_shadow;
+#endif /* CONFIG_SVINTO_SIM */
+
+ info->baud = cflag_to_baud(cflag);
+#ifndef CONFIG_SVINTO_SIM
+ info->ioport[REG_BAUD] = cflag_to_etrax_baud(cflag);
+#endif /* CONFIG_SVINTO_SIM */
+ }
+
+#ifndef CONFIG_SVINTO_SIM
+ /* start with default settings and then fill in changes */
+ local_irq_save(flags);
+ /* 8 bit, no/even parity */
+ info->rx_ctrl &= ~(IO_MASK(R_SERIAL0_REC_CTRL, rec_bitnr) |
+ IO_MASK(R_SERIAL0_REC_CTRL, rec_par_en) |
+ IO_MASK(R_SERIAL0_REC_CTRL, rec_par));
+
+ /* 8 bit, no/even parity, 1 stop bit, no cts */
+ info->tx_ctrl &= ~(IO_MASK(R_SERIAL0_TR_CTRL, tr_bitnr) |
+ IO_MASK(R_SERIAL0_TR_CTRL, tr_par_en) |
+ IO_MASK(R_SERIAL0_TR_CTRL, tr_par) |
+ IO_MASK(R_SERIAL0_TR_CTRL, stop_bits) |
+ IO_MASK(R_SERIAL0_TR_CTRL, auto_cts));
+
+ if ((cflag & CSIZE) == CS7) {
+ /* set 7 bit mode */
+ info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_bitnr, tr_7bit);
+ info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_bitnr, rec_7bit);
+ }
+
+ if (cflag & CSTOPB) {
+ /* set 2 stop bit mode */
+ info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, stop_bits, two_bits);
+ }
+
+ if (cflag & PARENB) {
+ /* enable parity */
+ info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_par_en, enable);
+ info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_par_en, enable);
+ }
+
+ if (cflag & CMSPAR) {
+ /* enable stick parity, PARODD mean Mark which matches ETRAX */
+ info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_stick_par, stick);
+ info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_stick_par, stick);
+ }
+ if (cflag & PARODD) {
+ /* set odd parity (or Mark if CMSPAR) */
+ info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_par, odd);
+ info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_par, odd);
+ }
+
+ if (cflag & CRTSCTS) {
+ /* enable automatic CTS handling */
+ DFLOW(DEBUG_LOG(info->line, "FLOW auto_cts enabled\n", 0));
+ info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, auto_cts, active);
+ }
+
+ /* make sure the tx and rx are enabled */
+
+ info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_enable, enable);
+ info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_enable, enable);
+
+ /* actually write the control regs to the hardware */
+
+ info->ioport[REG_TR_CTRL] = info->tx_ctrl;
+ info->ioport[REG_REC_CTRL] = info->rx_ctrl;
+ xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(info->port.tty));
+ xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable);
+ if (info->port.tty->termios->c_iflag & IXON ) {
+ DFLOW(DEBUG_LOG(info->line, "FLOW XOFF enabled 0x%02X\n",
+ STOP_CHAR(info->port.tty)));
+ xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
+ }
+
+ *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
+ local_irq_restore(flags);
+#endif /* !CONFIG_SVINTO_SIM */
+
+ update_char_time(info);
+
+} /* change_speed */
+
+/* start transmitting chars NOW */
+
+static void
+rs_flush_chars(struct tty_struct *tty)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+ unsigned long flags;
+
+ if (info->tr_running ||
+ info->xmit.head == info->xmit.tail ||
+ tty->stopped ||
+ tty->hw_stopped ||
+ !info->xmit.buf)
+ return;
+
+#ifdef SERIAL_DEBUG_FLOW
+ printk("rs_flush_chars\n");
+#endif
+
+ /* this protection might not exactly be necessary here */
+
+ local_irq_save(flags);
+ start_transmit(info);
+ local_irq_restore(flags);
+}
+
+static int rs_raw_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ int c, ret = 0;
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+ unsigned long flags;
+
+ /* first some sanity checks */
+
+ if (!tty || !info->xmit.buf || !tmp_buf)
+ return 0;
+
+#ifdef SERIAL_DEBUG_DATA
+ if (info->line == SERIAL_DEBUG_LINE)
+ printk("rs_raw_write (%d), status %d\n",
+ count, info->ioport[REG_STATUS]);
+#endif
+
+#ifdef CONFIG_SVINTO_SIM
+ /* Really simple. The output is here and now. */
+ SIMCOUT(buf, count);
+ return count;
+#endif
+ local_save_flags(flags);
+ DFLOW(DEBUG_LOG(info->line, "write count %i ", count));
+ DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty)));
+
+
+ /* The local_irq_disable/restore_flags pairs below are needed
+ * because the DMA interrupt handler moves the info->xmit values.
+ * the memcpy needs to be in the critical region unfortunately,
+ * because we need to read xmit values, memcpy, write xmit values
+ * in one atomic operation... this could perhaps be avoided by
+ * more clever design.
+ */
+ local_irq_disable();
+ while (count) {
+ c = CIRC_SPACE_TO_END(info->xmit.head,
+ info->xmit.tail,
+ SERIAL_XMIT_SIZE);
+
+ if (count < c)
+ c = count;
+ if (c <= 0)
+ break;
+
+ memcpy(info->xmit.buf + info->xmit.head, buf, c);
+ info->xmit.head = (info->xmit.head + c) &
+ (SERIAL_XMIT_SIZE-1);
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+ local_irq_restore(flags);
+
+ /* enable transmitter if not running, unless the tty is stopped
+ * this does not need IRQ protection since if tr_running == 0
+ * the IRQ's are not running anyway for this port.
+ */
+ DFLOW(DEBUG_LOG(info->line, "write ret %i\n", ret));
+
+ if (info->xmit.head != info->xmit.tail &&
+ !tty->stopped &&
+ !tty->hw_stopped &&
+ !info->tr_running) {
+ start_transmit(info);
+ }
+
+ return ret;
+} /* raw_raw_write() */
+
+static int
+rs_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+#if defined(CONFIG_ETRAX_RS485)
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+
+ if (info->rs485.flags & SER_RS485_ENABLED)
+ {
+ /* If we are in RS-485 mode, we need to toggle RTS and disable
+ * the receiver before initiating a DMA transfer
+ */
+#ifdef CONFIG_ETRAX_FAST_TIMER
+ /* Abort any started timer */
+ fast_timers_rs485[info->line].function = NULL;
+ del_fast_timer(&fast_timers_rs485[info->line]);
+#endif
+ e100_rts(info, (info->rs485.flags & SER_RS485_RTS_ON_SEND));
+#if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER)
+ e100_disable_rx(info);
+ e100_enable_rx_irq(info);
+#endif
+ if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) &&
+ (info->rs485.delay_rts_before_send > 0))
+ msleep(info->rs485.delay_rts_before_send);
+ }
+#endif /* CONFIG_ETRAX_RS485 */
+
+ count = rs_raw_write(tty, buf, count);
+
+#if defined(CONFIG_ETRAX_RS485)
+ if (info->rs485.flags & SER_RS485_ENABLED)
+ {
+ unsigned int val;
+ /* If we are in RS-485 mode the following has to be done:
+ * wait until DMA is ready
+ * wait on transmit shift register
+ * toggle RTS
+ * enable the receiver
+ */
+
+ /* Sleep until all sent */
+ tty_wait_until_sent(tty, 0);
+#ifdef CONFIG_ETRAX_FAST_TIMER
+ /* Now sleep a little more so that shift register is empty */
+ schedule_usleep(info->char_time_usec * 2);
+#endif
+ /* wait on transmit shift register */
+ do{
+ get_lsr_info(info, &val);
+ }while (!(val & TIOCSER_TEMT));
+
+ e100_rts(info, (info->rs485.flags & SER_RS485_RTS_AFTER_SEND));
+
+#if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER)
+ e100_enable_rx(info);
+ e100_enable_rxdma_irq(info);
+#endif
+ }
+#endif /* CONFIG_ETRAX_RS485 */
+
+ return count;
+} /* rs_write */
+
+
+/* how much space is available in the xmit buffer? */
+
+static int
+rs_write_room(struct tty_struct *tty)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+
+ return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+}
+
+/* How many chars are in the xmit buffer?
+ * This does not include any chars in the transmitter FIFO.
+ * Use wait_until_sent for waiting for FIFO drain.
+ */
+
+static int
+rs_chars_in_buffer(struct tty_struct *tty)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+
+ return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+}
+
+/* discard everything in the xmit buffer */
+
+static void
+rs_flush_buffer(struct tty_struct *tty)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ info->xmit.head = info->xmit.tail = 0;
+ local_irq_restore(flags);
+
+ tty_wakeup(tty);
+}
+
+/*
+ * This function is used to send a high-priority XON/XOFF character to
+ * the device
+ *
+ * Since we use DMA we don't check for info->x_char in transmit_chars_dma(),
+ * but we do it in handle_ser_tx_interrupt().
+ * We disable DMA channel and enable tx ready interrupt and write the
+ * character when possible.
+ */
+static void rs_send_xchar(struct tty_struct *tty, char ch)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+ unsigned long flags;
+ local_irq_save(flags);
+ if (info->uses_dma_out) {
+ /* Put the DMA on hold and disable the channel */
+ *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, hold);
+ while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->ocmdadr) !=
+ IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, hold));
+ e100_disable_txdma_channel(info);
+ }
+
+ /* Must make sure transmitter is not stopped before we can transmit */
+ if (tty->stopped)
+ rs_start(tty);
+
+ /* Enable manual transmit interrupt and send from there */
+ DFLOW(DEBUG_LOG(info->line, "rs_send_xchar 0x%02X\n", ch));
+ info->x_char = ch;
+ e100_enable_serial_tx_ready_irq(info);
+ local_irq_restore(flags);
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_throttle()
+ *
+ * This routine is called by the upper-layer tty layer to signal that
+ * incoming characters should be throttled.
+ * ------------------------------------------------------------
+ */
+static void
+rs_throttle(struct tty_struct * tty)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+#ifdef SERIAL_DEBUG_THROTTLE
+ char buf[64];
+
+ printk("throttle %s: %lu....\n", tty_name(tty, buf),
+ (unsigned long)tty->ldisc.chars_in_buffer(tty));
+#endif
+ DFLOW(DEBUG_LOG(info->line,"rs_throttle %lu\n", tty->ldisc.chars_in_buffer(tty)));
+
+ /* Do RTS before XOFF since XOFF might take some time */
+ if (tty->termios->c_cflag & CRTSCTS) {
+ /* Turn off RTS line */
+ e100_rts(info, 0);
+ }
+ if (I_IXOFF(tty))
+ rs_send_xchar(tty, STOP_CHAR(tty));
+
+}
+
+static void
+rs_unthrottle(struct tty_struct * tty)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+#ifdef SERIAL_DEBUG_THROTTLE
+ char buf[64];
+
+ printk("unthrottle %s: %lu....\n", tty_name(tty, buf),
+ (unsigned long)tty->ldisc.chars_in_buffer(tty));
+#endif
+ DFLOW(DEBUG_LOG(info->line,"rs_unthrottle ldisc %d\n", tty->ldisc.chars_in_buffer(tty)));
+ DFLOW(DEBUG_LOG(info->line,"rs_unthrottle flip.count: %i\n", tty->flip.count));
+ /* Do RTS before XOFF since XOFF might take some time */
+ if (tty->termios->c_cflag & CRTSCTS) {
+ /* Assert RTS line */
+ e100_rts(info, 1);
+ }
+
+ if (I_IXOFF(tty)) {
+ if (info->x_char)
+ info->x_char = 0;
+ else
+ rs_send_xchar(tty, START_CHAR(tty));
+ }
+
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_ioctl() and friends
+ * ------------------------------------------------------------
+ */
+
+static int
+get_serial_info(struct e100_serial * info,
+ struct serial_struct * retinfo)
+{
+ struct serial_struct tmp;
+
+ /* this is all probably wrong, there are a lot of fields
+ * here that we don't have in e100_serial and maybe we
+ * should set them to something else than 0.
+ */
+
+ if (!retinfo)
+ return -EFAULT;
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.type = info->type;
+ tmp.line = info->line;
+ tmp.port = (int)info->ioport;
+ tmp.irq = info->irq;
+ tmp.flags = info->flags;
+ tmp.baud_base = info->baud_base;
+ tmp.close_delay = info->close_delay;
+ tmp.closing_wait = info->closing_wait;
+ tmp.custom_divisor = info->custom_divisor;
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int
+set_serial_info(struct e100_serial *info,
+ struct serial_struct *new_info)
+{
+ struct serial_struct new_serial;
+ struct e100_serial old_info;
+ int retval = 0;
+
+ if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
+ return -EFAULT;
+
+ old_info = *info;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ if ((new_serial.type != info->type) ||
+ (new_serial.close_delay != info->close_delay) ||
+ ((new_serial.flags & ~ASYNC_USR_MASK) !=
+ (info->flags & ~ASYNC_USR_MASK)))
+ return -EPERM;
+ info->flags = ((info->flags & ~ASYNC_USR_MASK) |
+ (new_serial.flags & ASYNC_USR_MASK));
+ goto check_and_exit;
+ }
+
+ if (info->count > 1)
+ return -EBUSY;
+
+ /*
+ * OK, past this point, all the error checking has been done.
+ * At this point, we start making changes.....
+ */
+
+ info->baud_base = new_serial.baud_base;
+ info->flags = ((info->flags & ~ASYNC_FLAGS) |
+ (new_serial.flags & ASYNC_FLAGS));
+ info->custom_divisor = new_serial.custom_divisor;
+ info->type = new_serial.type;
+ info->close_delay = new_serial.close_delay;
+ info->closing_wait = new_serial.closing_wait;
+ info->port.tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+ check_and_exit:
+ if (info->flags & ASYNC_INITIALIZED) {
+ change_speed(info);
+ } else
+ retval = startup(info);
+ return retval;
+}
+
+/*
+ * get_lsr_info - get line status register info
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * is emptied. On bus types like RS485, the transmitter must
+ * release the bus after transmitting. This must be done when
+ * the transmit shift register is empty, not be done when the
+ * transmit holding register is empty. This functionality
+ * allows an RS485 driver to be written in user space.
+ */
+static int
+get_lsr_info(struct e100_serial * info, unsigned int *value)
+{
+ unsigned int result = TIOCSER_TEMT;
+#ifndef CONFIG_SVINTO_SIM
+ unsigned long curr_time = jiffies;
+ unsigned long curr_time_usec = GET_JIFFIES_USEC();
+ unsigned long elapsed_usec =
+ (curr_time - info->last_tx_active) * 1000000/HZ +
+ curr_time_usec - info->last_tx_active_usec;
+
+ if (info->xmit.head != info->xmit.tail ||
+ elapsed_usec < 2*info->char_time_usec) {
+ result = 0;
+ }
+#endif
+
+ if (copy_to_user(value, &result, sizeof(int)))
+ return -EFAULT;
+ return 0;
+}
+
+#ifdef SERIAL_DEBUG_IO
+struct state_str
+{
+ int state;
+ const char *str;
+};
+
+const struct state_str control_state_str[] = {
+ {TIOCM_DTR, "DTR" },
+ {TIOCM_RTS, "RTS"},
+ {TIOCM_ST, "ST?" },
+ {TIOCM_SR, "SR?" },
+ {TIOCM_CTS, "CTS" },
+ {TIOCM_CD, "CD" },
+ {TIOCM_RI, "RI" },
+ {TIOCM_DSR, "DSR" },
+ {0, NULL }
+};
+
+char *get_control_state_str(int MLines, char *s)
+{
+ int i = 0;
+
+ s[0]='\0';
+ while (control_state_str[i].str != NULL) {
+ if (MLines & control_state_str[i].state) {
+ if (s[0] != '\0') {
+ strcat(s, ", ");
+ }
+ strcat(s, control_state_str[i].str);
+ }
+ i++;
+ }
+ return s;
+}
+#endif
+
+static int
+rs_break(struct tty_struct *tty, int break_state)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+ unsigned long flags;
+
+ if (!info->ioport)
+ return -EIO;
+
+ local_irq_save(flags);
+ if (break_state == -1) {
+ /* Go to manual mode and set the txd pin to 0 */
+ /* Clear bit 7 (txd) and 6 (tr_enable) */
+ info->tx_ctrl &= 0x3F;
+ } else {
+ /* Set bit 7 (txd) and 6 (tr_enable) */
+ info->tx_ctrl |= (0x80 | 0x40);
+ }
+ info->ioport[REG_TR_CTRL] = info->tx_ctrl;
+ local_irq_restore(flags);
+ return 0;
+}
+
+static int
+rs_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ if (clear & TIOCM_RTS)
+ e100_rts(info, 0);
+ if (clear & TIOCM_DTR)
+ e100_dtr(info, 0);
+ /* Handle FEMALE behaviour */
+ if (clear & TIOCM_RI)
+ e100_ri_out(info, 0);
+ if (clear & TIOCM_CD)
+ e100_cd_out(info, 0);
+
+ if (set & TIOCM_RTS)
+ e100_rts(info, 1);
+ if (set & TIOCM_DTR)
+ e100_dtr(info, 1);
+ /* Handle FEMALE behaviour */
+ if (set & TIOCM_RI)
+ e100_ri_out(info, 1);
+ if (set & TIOCM_CD)
+ e100_cd_out(info, 1);
+
+ local_irq_restore(flags);
+ return 0;
+}
+
+static int
+rs_tiocmget(struct tty_struct *tty)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+ unsigned int result;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ result =
+ (!E100_RTS_GET(info) ? TIOCM_RTS : 0)
+ | (!E100_DTR_GET(info) ? TIOCM_DTR : 0)
+ | (!E100_RI_GET(info) ? TIOCM_RNG : 0)
+ | (!E100_DSR_GET(info) ? TIOCM_DSR : 0)
+ | (!E100_CD_GET(info) ? TIOCM_CAR : 0)
+ | (!E100_CTS_GET(info) ? TIOCM_CTS : 0);
+
+ local_irq_restore(flags);
+
+#ifdef SERIAL_DEBUG_IO
+ printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n",
+ info->line, result, result);
+ {
+ char s[100];
+
+ get_control_state_str(result, s);
+ printk(KERN_DEBUG "state: %s\n", s);
+ }
+#endif
+ return result;
+
+}
+
+
+static int
+rs_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct e100_serial * info = (struct e100_serial *)tty->driver_data;
+
+ if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+ (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD) &&
+ (cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT)) {
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case TIOCGSERIAL:
+ return get_serial_info(info,
+ (struct serial_struct *) arg);
+ case TIOCSSERIAL:
+ return set_serial_info(info,
+ (struct serial_struct *) arg);
+ case TIOCSERGETLSR: /* Get line status register */
+ return get_lsr_info(info, (unsigned int *) arg);
+
+ case TIOCSERGSTRUCT:
+ if (copy_to_user((struct e100_serial *) arg,
+ info, sizeof(struct e100_serial)))
+ return -EFAULT;
+ return 0;
+
+#if defined(CONFIG_ETRAX_RS485)
+ case TIOCSERSETRS485:
+ {
+ /* In this ioctl we still use the old structure
+ * rs485_control for backward compatibility
+ * (if we use serial_rs485, then old user-level code
+ * wouldn't work anymore...).
+ * The use of this ioctl is deprecated: use TIOCSRS485
+ * instead.*/
+ struct rs485_control rs485ctrl;
+ struct serial_rs485 rs485data;
+ printk(KERN_DEBUG "The use of this ioctl is deprecated. Use TIOCSRS485 instead\n");
+ if (copy_from_user(&rs485ctrl, (struct rs485_control *)arg,
+ sizeof(rs485ctrl)))
+ return -EFAULT;
+
+ rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send;
+ rs485data.flags = 0;
+ if (rs485data.delay_rts_before_send != 0)
+ rs485data.flags |= SER_RS485_RTS_BEFORE_SEND;
+ else
+ rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
+
+ if (rs485ctrl.enabled)
+ rs485data.flags |= SER_RS485_ENABLED;
+ else
+ rs485data.flags &= ~(SER_RS485_ENABLED);
+
+ if (rs485ctrl.rts_on_send)
+ rs485data.flags |= SER_RS485_RTS_ON_SEND;
+ else
+ rs485data.flags &= ~(SER_RS485_RTS_ON_SEND);
+
+ if (rs485ctrl.rts_after_sent)
+ rs485data.flags |= SER_RS485_RTS_AFTER_SEND;
+ else
+ rs485data.flags &= ~(SER_RS485_RTS_AFTER_SEND);
+
+ return e100_enable_rs485(tty, &rs485data);
+ }
+
+ case TIOCSRS485:
+ {
+ /* This is the new version of TIOCSRS485, with new
+ * data structure serial_rs485 */
+ struct serial_rs485 rs485data;
+ if (copy_from_user(&rs485data, (struct rs485_control *)arg,
+ sizeof(rs485data)))
+ return -EFAULT;
+
+ return e100_enable_rs485(tty, &rs485data);
+ }
+
+ case TIOCGRS485:
+ {
+ struct serial_rs485 *rs485data =
+ &(((struct e100_serial *)tty->driver_data)->rs485);
+ /* This is the ioctl to get RS485 data from user-space */
+ if (copy_to_user((struct serial_rs485 *) arg,
+ rs485data,
+ sizeof(struct serial_rs485)))
+ return -EFAULT;
+ break;
+ }
+
+ case TIOCSERWRRS485:
+ {
+ struct rs485_write rs485wr;
+ if (copy_from_user(&rs485wr, (struct rs485_write *)arg,
+ sizeof(rs485wr)))
+ return -EFAULT;
+
+ return e100_write_rs485(tty, rs485wr.outc, rs485wr.outc_size);
+ }
+#endif
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static void
+rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+
+ change_speed(info);
+
+ /* Handle turning off CRTSCTS */
+ if ((old_termios->c_cflag & CRTSCTS) &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ rs_start(tty);
+ }
+
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_close()
+ *
+ * This routine is called when the serial port gets closed. First, we
+ * wait for the last remaining data to be sent. Then, we unlink its
+ * S structure from the interrupt chain if necessary, and we free
+ * that IRQ if nothing is left in the chain.
+ * ------------------------------------------------------------
+ */
+static void
+rs_close(struct tty_struct *tty, struct file * filp)
+{
+ struct e100_serial * info = (struct e100_serial *)tty->driver_data;
+ unsigned long flags;
+
+ if (!info)
+ return;
+
+ /* interrupts are disabled for this entire function */
+
+ local_irq_save(flags);
+
+ if (tty_hung_up_p(filp)) {
+ local_irq_restore(flags);
+ return;
+ }
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("[%d] rs_close ttyS%d, count = %d\n", current->pid,
+ info->line, info->count);
+#endif
+ if ((tty->count == 1) && (info->count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. Info->count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ printk(KERN_CRIT
+ "rs_close: bad serial port count; tty->count is 1, "
+ "info->count is %d\n", info->count);
+ info->count = 1;
+ }
+ if (--info->count < 0) {
+ printk(KERN_CRIT "rs_close: bad serial port count for ttyS%d: %d\n",
+ info->line, info->count);
+ info->count = 0;
+ }
+ if (info->count) {
+ local_irq_restore(flags);
+ return;
+ }
+ info->flags |= ASYNC_CLOSING;
+ /*
+ * Save the termios structure, since this port may have
+ * separate termios for callout and dialin.
+ */
+ if (info->flags & ASYNC_NORMAL_ACTIVE)
+ info->normal_termios = *tty->termios;
+ /*
+ * Now we wait for the transmit buffer to clear; and we notify
+ * the line discipline to only process XON/XOFF characters.
+ */
+ tty->closing = 1;
+ if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE)
+ tty_wait_until_sent(tty, info->closing_wait);
+ /*
+ * At this point we stop accepting input. To do this, we
+ * disable the serial receiver and the DMA receive interrupt.
+ */
+#ifdef SERIAL_HANDLE_EARLY_ERRORS
+ e100_disable_serial_data_irq(info);
+#endif
+
+#ifndef CONFIG_SVINTO_SIM
+ e100_disable_rx(info);
+ e100_disable_rx_irq(info);
+
+ if (info->flags & ASYNC_INITIALIZED) {
+ /*
+ * Before we drop DTR, make sure the UART transmitter
+ * has completely drained; this is especially
+ * important as we have a transmit FIFO!
+ */
+ rs_wait_until_sent(tty, HZ);
+ }
+#endif
+
+ shutdown(info);
+ rs_flush_buffer(tty);
+ tty_ldisc_flush(tty);
+ tty->closing = 0;
+ info->event = 0;
+ info->port.tty = NULL;
+ if (info->blocked_open) {
+ if (info->close_delay)
+ schedule_timeout_interruptible(info->close_delay);
+ wake_up_interruptible(&info->open_wait);
+ }
+ info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
+ wake_up_interruptible(&info->close_wait);
+ local_irq_restore(flags);
+
+ /* port closed */
+
+#if defined(CONFIG_ETRAX_RS485)
+ if (info->rs485.flags & SER_RS485_ENABLED) {
+ info->rs485.flags &= ~(SER_RS485_ENABLED);
+#if defined(CONFIG_ETRAX_RS485_ON_PA)
+ *R_PORT_PA_DATA = port_pa_data_shadow &= ~(1 << rs485_pa_bit);
+#endif
+#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
+ REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
+ rs485_port_g_bit, 0);
+#endif
+#if defined(CONFIG_ETRAX_RS485_LTC1387)
+ REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
+ CONFIG_ETRAX_RS485_LTC1387_DXEN_PORT_G_BIT, 0);
+ REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
+ CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 0);
+#endif
+ }
+#endif
+
+ /*
+ * Release any allocated DMA irq's.
+ */
+ if (info->dma_in_enabled) {
+ free_irq(info->dma_in_irq_nbr, info);
+ cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
+ info->uses_dma_in = 0;
+#ifdef SERIAL_DEBUG_OPEN
+ printk(KERN_DEBUG "DMA irq '%s' freed\n",
+ info->dma_in_irq_description);
+#endif
+ }
+ if (info->dma_out_enabled) {
+ free_irq(info->dma_out_irq_nbr, info);
+ cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
+ info->uses_dma_out = 0;
+#ifdef SERIAL_DEBUG_OPEN
+ printk(KERN_DEBUG "DMA irq '%s' freed\n",
+ info->dma_out_irq_description);
+#endif
+ }
+}
+
+/*
+ * rs_wait_until_sent() --- wait until the transmitter is empty
+ */
+static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ unsigned long orig_jiffies;
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+ unsigned long curr_time = jiffies;
+ unsigned long curr_time_usec = GET_JIFFIES_USEC();
+ long elapsed_usec =
+ (curr_time - info->last_tx_active) * (1000000/HZ) +
+ curr_time_usec - info->last_tx_active_usec;
+
+ /*
+ * Check R_DMA_CHx_STATUS bit 0-6=number of available bytes in FIFO
+ * R_DMA_CHx_HWSW bit 31-16=nbr of bytes left in DMA buffer (0=64k)
+ */
+ orig_jiffies = jiffies;
+ while (info->xmit.head != info->xmit.tail || /* More in send queue */
+ (*info->ostatusadr & 0x007f) || /* more in FIFO */
+ (elapsed_usec < 2*info->char_time_usec)) {
+ schedule_timeout_interruptible(1);
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ curr_time = jiffies;
+ curr_time_usec = GET_JIFFIES_USEC();
+ elapsed_usec =
+ (curr_time - info->last_tx_active) * (1000000/HZ) +
+ curr_time_usec - info->last_tx_active_usec;
+ }
+ set_current_state(TASK_RUNNING);
+}
+
+/*
+ * rs_hangup() --- called by tty_hangup() when a hangup is signaled.
+ */
+void
+rs_hangup(struct tty_struct *tty)
+{
+ struct e100_serial * info = (struct e100_serial *)tty->driver_data;
+
+ rs_flush_buffer(tty);
+ shutdown(info);
+ info->event = 0;
+ info->count = 0;
+ info->flags &= ~ASYNC_NORMAL_ACTIVE;
+ info->port.tty = NULL;
+ wake_up_interruptible(&info->open_wait);
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_open() and friends
+ * ------------------------------------------------------------
+ */
+static int
+block_til_ready(struct tty_struct *tty, struct file * filp,
+ struct e100_serial *info)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+ int retval;
+ int do_clocal = 0, extra_count = 0;
+
+ /*
+ * If the device is in the middle of being closed, then block
+ * until it's done, and then try again.
+ */
+ if (tty_hung_up_p(filp) ||
+ (info->flags & ASYNC_CLOSING)) {
+ wait_event_interruptible_tty(info->close_wait,
+ !(info->flags & ASYNC_CLOSING));
+#ifdef SERIAL_DO_RESTART
+ if (info->flags & ASYNC_HUP_NOTIFY)
+ return -EAGAIN;
+ else
+ return -ERESTARTSYS;
+#else
+ return -EAGAIN;
+#endif
+ }
+
+ /*
+ * If non-blocking mode is set, or the port is not enabled,
+ * then make the check up front and then exit.
+ */
+ if ((filp->f_flags & O_NONBLOCK) ||
+ (tty->flags & (1 << TTY_IO_ERROR))) {
+ info->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+
+ if (tty->termios->c_cflag & CLOCAL) {
+ do_clocal = 1;
+ }
+
+ /*
+ * Block waiting for the carrier detect and the line to become
+ * free (i.e., not in use by the callout). While we are in
+ * this loop, info->count is dropped by one, so that
+ * rs_close() knows when to free things. We restore it upon
+ * exit, either normal or abnormal.
+ */
+ retval = 0;
+ add_wait_queue(&info->open_wait, &wait);
+#ifdef SERIAL_DEBUG_OPEN
+ printk("block_til_ready before block: ttyS%d, count = %d\n",
+ info->line, info->count);
+#endif
+ local_irq_save(flags);
+ if (!tty_hung_up_p(filp)) {
+ extra_count++;
+ info->count--;
+ }
+ local_irq_restore(flags);
+ info->blocked_open++;
+ while (1) {
+ local_irq_save(flags);
+ /* assert RTS and DTR */
+ e100_rts(info, 1);
+ e100_dtr(info, 1);
+ local_irq_restore(flags);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (tty_hung_up_p(filp) ||
+ !(info->flags & ASYNC_INITIALIZED)) {
+#ifdef SERIAL_DO_RESTART
+ if (info->flags & ASYNC_HUP_NOTIFY)
+ retval = -EAGAIN;
+ else
+ retval = -ERESTARTSYS;
+#else
+ retval = -EAGAIN;
+#endif
+ break;
+ }
+ if (!(info->flags & ASYNC_CLOSING) && do_clocal)
+ /* && (do_clocal || DCD_IS_ASSERTED) */
+ break;
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+#ifdef SERIAL_DEBUG_OPEN
+ printk("block_til_ready blocking: ttyS%d, count = %d\n",
+ info->line, info->count);
+#endif
+ tty_unlock();
+ schedule();
+ tty_lock();
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&info->open_wait, &wait);
+ if (extra_count)
+ info->count++;
+ info->blocked_open--;
+#ifdef SERIAL_DEBUG_OPEN
+ printk("block_til_ready after blocking: ttyS%d, count = %d\n",
+ info->line, info->count);
+#endif
+ if (retval)
+ return retval;
+ info->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+}
+
+static void
+deinit_port(struct e100_serial *info)
+{
+ if (info->dma_out_enabled) {
+ cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
+ free_irq(info->dma_out_irq_nbr, info);
+ }
+ if (info->dma_in_enabled) {
+ cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
+ free_irq(info->dma_in_irq_nbr, info);
+ }
+}
+
+/*
+ * This routine is called whenever a serial port is opened.
+ * It performs the serial-specific initialization for the tty structure.
+ */
+static int
+rs_open(struct tty_struct *tty, struct file * filp)
+{
+ struct e100_serial *info;
+ int retval, line;
+ unsigned long page;
+ int allocated_resources = 0;
+
+ /* find which port we want to open */
+ line = tty->index;
+
+ if (line < 0 || line >= NR_PORTS)
+ return -ENODEV;
+
+ /* find the corresponding e100_serial struct in the table */
+ info = rs_table + line;
+
+ /* don't allow the opening of ports that are not enabled in the HW config */
+ if (!info->enabled)
+ return -ENODEV;
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("[%d] rs_open %s, count = %d\n", current->pid, tty->name,
+ info->count);
+#endif
+
+ info->count++;
+ tty->driver_data = info;
+ info->port.tty = tty;
+
+ info->port.tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+ if (!tmp_buf) {
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page) {
+ return -ENOMEM;
+ }
+ if (tmp_buf)
+ free_page(page);
+ else
+ tmp_buf = (unsigned char *) page;
+ }
+
+ /*
+ * If the port is in the middle of closing, bail out now
+ */
+ if (tty_hung_up_p(filp) ||
+ (info->flags & ASYNC_CLOSING)) {
+ wait_event_interruptible_tty(info->close_wait,
+ !(info->flags & ASYNC_CLOSING));
+#ifdef SERIAL_DO_RESTART
+ return ((info->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS);
+#else
+ return -EAGAIN;
+#endif
+ }
+
+ /*
+ * If DMA is enabled try to allocate the irq's.
+ */
+ if (info->count == 1) {
+ allocated_resources = 1;
+ if (info->dma_in_enabled) {
+ if (request_irq(info->dma_in_irq_nbr,
+ rec_interrupt,
+ info->dma_in_irq_flags,
+ info->dma_in_irq_description,
+ info)) {
+ printk(KERN_WARNING "DMA irq '%s' busy; "
+ "falling back to non-DMA mode\n",
+ info->dma_in_irq_description);
+ /* Make sure we never try to use DMA in */
+ /* for the port again. */
+ info->dma_in_enabled = 0;
+ } else if (cris_request_dma(info->dma_in_nbr,
+ info->dma_in_irq_description,
+ DMA_VERBOSE_ON_ERROR,
+ info->dma_owner)) {
+ free_irq(info->dma_in_irq_nbr, info);
+ printk(KERN_WARNING "DMA '%s' busy; "
+ "falling back to non-DMA mode\n",
+ info->dma_in_irq_description);
+ /* Make sure we never try to use DMA in */
+ /* for the port again. */
+ info->dma_in_enabled = 0;
+ }
+#ifdef SERIAL_DEBUG_OPEN
+ else
+ printk(KERN_DEBUG "DMA irq '%s' allocated\n",
+ info->dma_in_irq_description);
+#endif
+ }
+ if (info->dma_out_enabled) {
+ if (request_irq(info->dma_out_irq_nbr,
+ tr_interrupt,
+ info->dma_out_irq_flags,
+ info->dma_out_irq_description,
+ info)) {
+ printk(KERN_WARNING "DMA irq '%s' busy; "
+ "falling back to non-DMA mode\n",
+ info->dma_out_irq_description);
+ /* Make sure we never try to use DMA out */
+ /* for the port again. */
+ info->dma_out_enabled = 0;
+ } else if (cris_request_dma(info->dma_out_nbr,
+ info->dma_out_irq_description,
+ DMA_VERBOSE_ON_ERROR,
+ info->dma_owner)) {
+ free_irq(info->dma_out_irq_nbr, info);
+ printk(KERN_WARNING "DMA '%s' busy; "
+ "falling back to non-DMA mode\n",
+ info->dma_out_irq_description);
+ /* Make sure we never try to use DMA out */
+ /* for the port again. */
+ info->dma_out_enabled = 0;
+ }
+#ifdef SERIAL_DEBUG_OPEN
+ else
+ printk(KERN_DEBUG "DMA irq '%s' allocated\n",
+ info->dma_out_irq_description);
+#endif
+ }
+ }
+
+ /*
+ * Start up the serial port
+ */
+
+ retval = startup(info);
+ if (retval) {
+ if (allocated_resources)
+ deinit_port(info);
+
+ /* FIXME Decrease count info->count here too? */
+ return retval;
+ }
+
+
+ retval = block_til_ready(tty, filp, info);
+ if (retval) {
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_open returning after block_til_ready with %d\n",
+ retval);
+#endif
+ if (allocated_resources)
+ deinit_port(info);
+
+ return retval;
+ }
+
+ if ((info->count == 1) && (info->flags & ASYNC_SPLIT_TERMIOS)) {
+ *tty->termios = info->normal_termios;
+ change_speed(info);
+ }
+
+#ifdef SERIAL_DEBUG_OPEN
+ printk("rs_open ttyS%d successful...\n", info->line);
+#endif
+ DLOG_INT_TRIG( log_int_pos = 0);
+
+ DFLIP( if (info->line == SERIAL_DEBUG_LINE) {
+ info->icount.rx = 0;
+ } );
+
+ return 0;
+}
+
+#ifdef CONFIG_PROC_FS
+/*
+ * /proc fs routines....
+ */
+
+static void seq_line_info(struct seq_file *m, struct e100_serial *info)
+{
+ unsigned long tmp;
+
+ seq_printf(m, "%d: uart:E100 port:%lX irq:%d",
+ info->line, (unsigned long)info->ioport, info->irq);
+
+ if (!info->ioport || (info->type == PORT_UNKNOWN)) {
+ seq_printf(m, "\n");
+ return;
+ }
+
+ seq_printf(m, " baud:%d", info->baud);
+ seq_printf(m, " tx:%lu rx:%lu",
+ (unsigned long)info->icount.tx,
+ (unsigned long)info->icount.rx);
+ tmp = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
+ if (tmp)
+ seq_printf(m, " tx_pend:%lu/%lu",
+ (unsigned long)tmp,
+ (unsigned long)SERIAL_XMIT_SIZE);
+
+ seq_printf(m, " rx_pend:%lu/%lu",
+ (unsigned long)info->recv_cnt,
+ (unsigned long)info->max_recv_cnt);
+
+#if 1
+ if (info->port.tty) {
+ if (info->port.tty->stopped)
+ seq_printf(m, " stopped:%i",
+ (int)info->port.tty->stopped);
+ if (info->port.tty->hw_stopped)
+ seq_printf(m, " hw_stopped:%i",
+ (int)info->port.tty->hw_stopped);
+ }
+
+ {
+ unsigned char rstat = info->ioport[REG_STATUS];
+ if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect))
+ seq_printf(m, " xoff_detect:1");
+ }
+
+#endif
+
+ if (info->icount.frame)
+ seq_printf(m, " fe:%lu", (unsigned long)info->icount.frame);
+
+ if (info->icount.parity)
+ seq_printf(m, " pe:%lu", (unsigned long)info->icount.parity);
+
+ if (info->icount.brk)
+ seq_printf(m, " brk:%lu", (unsigned long)info->icount.brk);
+
+ if (info->icount.overrun)
+ seq_printf(m, " oe:%lu", (unsigned long)info->icount.overrun);
+
+ /*
+ * Last thing is the RS-232 status lines
+ */
+ if (!E100_RTS_GET(info))
+ seq_puts(m, "|RTS");
+ if (!E100_CTS_GET(info))
+ seq_puts(m, "|CTS");
+ if (!E100_DTR_GET(info))
+ seq_puts(m, "|DTR");
+ if (!E100_DSR_GET(info))
+ seq_puts(m, "|DSR");
+ if (!E100_CD_GET(info))
+ seq_puts(m, "|CD");
+ if (!E100_RI_GET(info))
+ seq_puts(m, "|RI");
+ seq_puts(m, "\n");
+}
+
+
+static int crisv10_proc_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version);
+
+ for (i = 0; i < NR_PORTS; i++) {
+ if (!rs_table[i].enabled)
+ continue;
+ seq_line_info(m, &rs_table[i]);
+ }
+#ifdef DEBUG_LOG_INCLUDED
+ for (i = 0; i < debug_log_pos; i++) {
+ seq_printf(m, "%-4i %lu.%lu ",
+ i, debug_log[i].time,
+ timer_data_to_ns(debug_log[i].timer_data));
+ seq_printf(m, debug_log[i].string, debug_log[i].value);
+ }
+ seq_printf(m, "debug_log %i/%i\n", i, DEBUG_LOG_SIZE);
+ debug_log_pos = 0;
+#endif
+ return 0;
+}
+
+static int crisv10_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, crisv10_proc_show, NULL);
+}
+
+static const struct file_operations crisv10_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = crisv10_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+
+/* Finally, routines used to initialize the serial driver. */
+
+static void show_serial_version(void)
+{
+ printk(KERN_INFO
+ "ETRAX 100LX serial-driver %s, "
+ "(c) 2000-2004 Axis Communications AB\r\n",
+ &serial_version[11]); /* "$Revision: x.yy" */
+}
+
+/* rs_init inits the driver at boot (using the module_init chain) */
+
+static const struct tty_operations rs_ops = {
+ .open = rs_open,
+ .close = rs_close,
+ .write = rs_write,
+ .flush_chars = rs_flush_chars,
+ .write_room = rs_write_room,
+ .chars_in_buffer = rs_chars_in_buffer,
+ .flush_buffer = rs_flush_buffer,
+ .ioctl = rs_ioctl,
+ .throttle = rs_throttle,
+ .unthrottle = rs_unthrottle,
+ .set_termios = rs_set_termios,
+ .stop = rs_stop,
+ .start = rs_start,
+ .hangup = rs_hangup,
+ .break_ctl = rs_break,
+ .send_xchar = rs_send_xchar,
+ .wait_until_sent = rs_wait_until_sent,
+ .tiocmget = rs_tiocmget,
+ .tiocmset = rs_tiocmset,
+#ifdef CONFIG_PROC_FS
+ .proc_fops = &crisv10_proc_fops,
+#endif
+};
+
+static int __init rs_init(void)
+{
+ int i;
+ struct e100_serial *info;
+ struct tty_driver *driver = alloc_tty_driver(NR_PORTS);
+
+ if (!driver)
+ return -ENOMEM;
+
+ show_serial_version();
+
+ /* Setup the timed flush handler system */
+
+#if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
+ setup_timer(&flush_timer, timed_flush_handler, 0);
+ mod_timer(&flush_timer, jiffies + 5);
+#endif
+
+#if defined(CONFIG_ETRAX_RS485)
+#if defined(CONFIG_ETRAX_RS485_ON_PA)
+ if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit,
+ rs485_pa_bit)) {
+ printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
+ "RS485 pin\n");
+ put_tty_driver(driver);
+ return -EBUSY;
+ }
+#endif
+#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
+ if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit,
+ rs485_port_g_bit)) {
+ printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
+ "RS485 pin\n");
+ put_tty_driver(driver);
+ return -EBUSY;
+ }
+#endif
+#endif
+
+ /* Initialize the tty_driver structure */
+
+ driver->driver_name = "serial";
+ driver->name = "ttyS";
+ driver->major = TTY_MAJOR;
+ driver->minor_start = 64;
+ driver->type = TTY_DRIVER_TYPE_SERIAL;
+ driver->subtype = SERIAL_TYPE_NORMAL;
+ driver->init_termios = tty_std_termios;
+ driver->init_termios.c_cflag =
+ B115200 | CS8 | CREAD | HUPCL | CLOCAL; /* is normally B9600 default... */
+ driver->init_termios.c_ispeed = 115200;
+ driver->init_termios.c_ospeed = 115200;
+ driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+
+ tty_set_operations(driver, &rs_ops);
+ serial_driver = driver;
+ if (tty_register_driver(driver))
+ panic("Couldn't register serial driver\n");
+ /* do some initializing for the separate ports */
+
+ for (i = 0, info = rs_table; i < NR_PORTS; i++,info++) {
+ if (info->enabled) {
+ if (cris_request_io_interface(info->io_if,
+ info->io_if_description)) {
+ printk(KERN_CRIT "ETRAX100LX async serial: "
+ "Could not allocate IO pins for "
+ "%s, port %d\n",
+ info->io_if_description, i);
+ info->enabled = 0;
+ }
+ }
+ info->uses_dma_in = 0;
+ info->uses_dma_out = 0;
+ info->line = i;
+ info->port.tty = NULL;
+ info->type = PORT_ETRAX;
+ info->tr_running = 0;
+ info->forced_eop = 0;
+ info->baud_base = DEF_BAUD_BASE;
+ info->custom_divisor = 0;
+ info->flags = 0;
+ info->close_delay = 5*HZ/10;
+ info->closing_wait = 30*HZ;
+ info->x_char = 0;
+ info->event = 0;
+ info->count = 0;
+ info->blocked_open = 0;
+ info->normal_termios = driver->init_termios;
+ init_waitqueue_head(&info->open_wait);
+ init_waitqueue_head(&info->close_wait);
+ info->xmit.buf = NULL;
+ info->xmit.tail = info->xmit.head = 0;
+ info->first_recv_buffer = info->last_recv_buffer = NULL;
+ info->recv_cnt = info->max_recv_cnt = 0;
+ info->last_tx_active_usec = 0;
+ info->last_tx_active = 0;
+
+#if defined(CONFIG_ETRAX_RS485)
+ /* Set sane defaults */
+ info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND);
+ info->rs485.flags |= SER_RS485_RTS_AFTER_SEND;
+ info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
+ info->rs485.delay_rts_before_send = 0;
+ info->rs485.flags &= ~(SER_RS485_ENABLED);
+#endif
+ INIT_WORK(&info->work, do_softint);
+
+ if (info->enabled) {
+ printk(KERN_INFO "%s%d at %p is a builtin UART with DMA\n",
+ serial_driver->name, info->line, info->ioport);
+ }
+ }
+#ifdef CONFIG_ETRAX_FAST_TIMER
+#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
+ memset(fast_timers, 0, sizeof(fast_timers));
+#endif
+#ifdef CONFIG_ETRAX_RS485
+ memset(fast_timers_rs485, 0, sizeof(fast_timers_rs485));
+#endif
+ fast_timer_init();
+#endif
+
+#ifndef CONFIG_SVINTO_SIM
+#ifndef CONFIG_ETRAX_KGDB
+ /* Not needed in simulator. May only complicate stuff. */
+ /* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */
+
+ if (request_irq(SERIAL_IRQ_NBR, ser_interrupt,
+ IRQF_SHARED | IRQF_DISABLED, "serial ", driver))
+ panic("%s: Failed to request irq8", __func__);
+
+#endif
+#endif /* CONFIG_SVINTO_SIM */
+
+ return 0;
+}
+
+/* this makes sure that rs_init is called during kernel boot */
+
+module_init(rs_init);
diff --git a/drivers/tty/serial/crisv10.h b/drivers/tty/serial/crisv10.h
new file mode 100644
index 00000000..ea0beb46
--- /dev/null
+++ b/drivers/tty/serial/crisv10.h
@@ -0,0 +1,147 @@
+/*
+ * serial.h: Arch-dep definitions for the Etrax100 serial driver.
+ *
+ * Copyright (C) 1998-2007 Axis Communications AB
+ */
+
+#ifndef _ETRAX_SERIAL_H
+#define _ETRAX_SERIAL_H
+
+#include <linux/circ_buf.h>
+#include <asm/termios.h>
+#include <asm/dma.h>
+#include <arch/io_interface_mux.h>
+
+/* Software state per channel */
+
+#ifdef __KERNEL__
+/*
+ * This is our internal structure for each serial port's state.
+ *
+ * Many fields are paralleled by the structure used by the serial_struct
+ * structure.
+ *
+ * For definitions of the flags field, see tty.h
+ */
+
+#define SERIAL_RECV_DESCRIPTORS 8
+
+struct etrax_recv_buffer {
+ struct etrax_recv_buffer *next;
+ unsigned short length;
+ unsigned char error;
+ unsigned char pad;
+
+ unsigned char buffer[0];
+};
+
+struct e100_serial {
+ struct tty_port port;
+ int baud;
+ volatile u8 *ioport; /* R_SERIALx_CTRL */
+ u32 irq; /* bitnr in R_IRQ_MASK2 for dmaX_descr */
+
+ /* Output registers */
+ volatile u8 *oclrintradr; /* adr to R_DMA_CHx_CLR_INTR */
+ volatile u32 *ofirstadr; /* adr to R_DMA_CHx_FIRST */
+ volatile u8 *ocmdadr; /* adr to R_DMA_CHx_CMD */
+ const volatile u8 *ostatusadr; /* adr to R_DMA_CHx_STATUS */
+
+ /* Input registers */
+ volatile u8 *iclrintradr; /* adr to R_DMA_CHx_CLR_INTR */
+ volatile u32 *ifirstadr; /* adr to R_DMA_CHx_FIRST */
+ volatile u8 *icmdadr; /* adr to R_DMA_CHx_CMD */
+ volatile u32 *idescradr; /* adr to R_DMA_CHx_DESCR */
+
+ int flags; /* defined in tty.h */
+
+ u8 rx_ctrl; /* shadow for R_SERIALx_REC_CTRL */
+ u8 tx_ctrl; /* shadow for R_SERIALx_TR_CTRL */
+ u8 iseteop; /* bit number for R_SET_EOP for the input dma */
+ int enabled; /* Set to 1 if the port is enabled in HW config */
+
+ u8 dma_out_enabled; /* Set to 1 if DMA should be used */
+ u8 dma_in_enabled; /* Set to 1 if DMA should be used */
+
+ /* end of fields defined in rs_table[] in .c-file */
+ int dma_owner;
+ unsigned int dma_in_nbr;
+ unsigned int dma_out_nbr;
+ unsigned int dma_in_irq_nbr;
+ unsigned int dma_out_irq_nbr;
+ unsigned long dma_in_irq_flags;
+ unsigned long dma_out_irq_flags;
+ char *dma_in_irq_description;
+ char *dma_out_irq_description;
+
+ enum cris_io_interface io_if;
+ char *io_if_description;
+
+ u8 uses_dma_in; /* Set to 1 if DMA is used */
+ u8 uses_dma_out; /* Set to 1 if DMA is used */
+ u8 forced_eop; /* a fifo eop has been forced */
+ int baud_base; /* For special baudrates */
+ int custom_divisor; /* For special baudrates */
+ struct etrax_dma_descr tr_descr;
+ struct etrax_dma_descr rec_descr[SERIAL_RECV_DESCRIPTORS];
+ int cur_rec_descr;
+
+ volatile int tr_running; /* 1 if output is running */
+
+ struct tty_struct *tty;
+ int read_status_mask;
+ int ignore_status_mask;
+ int x_char; /* xon/xoff character */
+ int close_delay;
+ unsigned short closing_wait;
+ unsigned short closing_wait2;
+ unsigned long event;
+ unsigned long last_active;
+ int line;
+ int type; /* PORT_ETRAX */
+ int count; /* # of fd on device */
+ int blocked_open; /* # of blocked opens */
+ struct circ_buf xmit;
+ struct etrax_recv_buffer *first_recv_buffer;
+ struct etrax_recv_buffer *last_recv_buffer;
+ unsigned int recv_cnt;
+ unsigned int max_recv_cnt;
+
+ struct work_struct work;
+ struct async_icount icount; /* error-statistics etc.*/
+ struct ktermios normal_termios;
+ struct ktermios callout_termios;
+ wait_queue_head_t open_wait;
+ wait_queue_head_t close_wait;
+
+ unsigned long char_time_usec; /* The time for 1 char, in usecs */
+ unsigned long flush_time_usec; /* How often we should flush */
+ unsigned long last_tx_active_usec; /* Last tx usec in the jiffies */
+ unsigned long last_tx_active; /* Last tx time in jiffies */
+ unsigned long last_rx_active_usec; /* Last rx usec in the jiffies */
+ unsigned long last_rx_active; /* Last rx time in jiffies */
+
+ int break_detected_cnt;
+ int errorcode;
+
+#ifdef CONFIG_ETRAX_RS485
+ struct serial_rs485 rs485; /* RS-485 support */
+#endif
+};
+
+/* this PORT is not in the standard serial.h. it's not actually used for
+ * anything since we only have one type of async serial-port anyway in this
+ * system.
+ */
+
+#define PORT_ETRAX 1
+
+/*
+ * Events are used to schedule things to happen at timer-interrupt
+ * time, instead of at rs interrupt time.
+ */
+#define RS_EVENT_WRITE_WAKEUP 0
+
+#endif /* __KERNEL__ */
+
+#endif /* !_ETRAX_SERIAL_H */
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
new file mode 100644
index 00000000..57421d77
--- /dev/null
+++ b/drivers/tty/serial/dz.c
@@ -0,0 +1,955 @@
+/*
+ * dz.c: Serial port driver for DECstations equipped
+ * with the DZ chipset.
+ *
+ * Copyright (C) 1998 Olivier A. D. Lebaillif
+ *
+ * Email: olivier.lebaillif@ifrsys.com
+ *
+ * Copyright (C) 2004, 2006, 2007 Maciej W. Rozycki
+ *
+ * [31-AUG-98] triemer
+ * Changed IRQ to use Harald's dec internals interrupts.h
+ * removed base_addr code - moving address assignment to setup.c
+ * Changed name of dz_init to rs_init to be consistent with tc code
+ * [13-NOV-98] triemer fixed code to receive characters
+ * after patches by harald to irq code.
+ * [09-JAN-99] triemer minor fix for schedule - due to removal of timeout
+ * field from "current" - somewhere between 2.1.121 and 2.1.131
+ Qua Jun 27 15:02:26 BRT 2001
+ * [27-JUN-2001] Arnaldo Carvalho de Melo <acme@conectiva.com.br> - cleanups
+ *
+ * Parts (C) 1999 David Airlie, airlied@linux.ie
+ * [07-SEP-99] Bugfixes
+ *
+ * [06-Jan-2002] Russell King <rmk@arm.linux.org.uk>
+ * Converted to new serial core
+ */
+
+#undef DEBUG_DZ
+
+#if defined(CONFIG_SERIAL_DZ_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+
+#include <asm/atomic.h>
+#include <asm/bootinfo.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <asm/dec/interrupts.h>
+#include <asm/dec/kn01.h>
+#include <asm/dec/kn02.h>
+#include <asm/dec/machtype.h>
+#include <asm/dec/prom.h>
+#include <asm/dec/system.h>
+
+#include "dz.h"
+
+
+MODULE_DESCRIPTION("DECstation DZ serial driver");
+MODULE_LICENSE("GPL");
+
+
+static char dz_name[] __initdata = "DECstation DZ serial driver version ";
+static char dz_version[] __initdata = "1.04";
+
+struct dz_port {
+ struct dz_mux *mux;
+ struct uart_port port;
+ unsigned int cflag;
+};
+
+struct dz_mux {
+ struct dz_port dport[DZ_NB_PORT];
+ atomic_t map_guard;
+ atomic_t irq_guard;
+ int initialised;
+};
+
+static struct dz_mux dz_mux;
+
+static inline struct dz_port *to_dport(struct uart_port *uport)
+{
+ return container_of(uport, struct dz_port, port);
+}
+
+/*
+ * ------------------------------------------------------------
+ * dz_in () and dz_out ()
+ *
+ * These routines are used to access the registers of the DZ
+ * chip, hiding relocation differences between implementation.
+ * ------------------------------------------------------------
+ */
+
+static u16 dz_in(struct dz_port *dport, unsigned offset)
+{
+ void __iomem *addr = dport->port.membase + offset;
+
+ return readw(addr);
+}
+
+static void dz_out(struct dz_port *dport, unsigned offset, u16 value)
+{
+ void __iomem *addr = dport->port.membase + offset;
+
+ writew(value, addr);
+}
+
+/*
+ * ------------------------------------------------------------
+ * rs_stop () and rs_start ()
+ *
+ * These routines are called before setting or resetting
+ * tty->stopped. They enable or disable transmitter interrupts,
+ * as necessary.
+ * ------------------------------------------------------------
+ */
+
+static void dz_stop_tx(struct uart_port *uport)
+{
+ struct dz_port *dport = to_dport(uport);
+ u16 tmp, mask = 1 << dport->port.line;
+
+ tmp = dz_in(dport, DZ_TCR); /* read the TX flag */
+ tmp &= ~mask; /* clear the TX flag */
+ dz_out(dport, DZ_TCR, tmp);
+}
+
+static void dz_start_tx(struct uart_port *uport)
+{
+ struct dz_port *dport = to_dport(uport);
+ u16 tmp, mask = 1 << dport->port.line;
+
+ tmp = dz_in(dport, DZ_TCR); /* read the TX flag */
+ tmp |= mask; /* set the TX flag */
+ dz_out(dport, DZ_TCR, tmp);
+}
+
+static void dz_stop_rx(struct uart_port *uport)
+{
+ struct dz_port *dport = to_dport(uport);
+
+ dport->cflag &= ~DZ_RXENAB;
+ dz_out(dport, DZ_LPR, dport->cflag);
+}
+
+static void dz_enable_ms(struct uart_port *uport)
+{
+ /* nothing to do */
+}
+
+/*
+ * ------------------------------------------------------------
+ *
+ * Here start the interrupt handling routines. All of the following
+ * subroutines are declared as inline and are folded into
+ * dz_interrupt. They were separated out for readability's sake.
+ *
+ * Note: dz_interrupt() is a "fast" interrupt, which means that it
+ * runs with interrupts turned off. People who may want to modify
+ * dz_interrupt() should try to keep the interrupt handler as fast as
+ * possible. After you are done making modifications, it is not a bad
+ * idea to do:
+ *
+ * make drivers/serial/dz.s
+ *
+ * and look at the resulting assemble code in dz.s.
+ *
+ * ------------------------------------------------------------
+ */
+
+/*
+ * ------------------------------------------------------------
+ * receive_char ()
+ *
+ * This routine deals with inputs from any lines.
+ * ------------------------------------------------------------
+ */
+static inline void dz_receive_chars(struct dz_mux *mux)
+{
+ struct uart_port *uport;
+ struct dz_port *dport = &mux->dport[0];
+ struct tty_struct *tty = NULL;
+ struct uart_icount *icount;
+ int lines_rx[DZ_NB_PORT] = { [0 ... DZ_NB_PORT - 1] = 0 };
+ unsigned char ch, flag;
+ u16 status;
+ int i;
+
+ while ((status = dz_in(dport, DZ_RBUF)) & DZ_DVAL) {
+ dport = &mux->dport[LINE(status)];
+ uport = &dport->port;
+ tty = uport->state->port.tty; /* point to the proper dev */
+
+ ch = UCHAR(status); /* grab the char */
+ flag = TTY_NORMAL;
+
+ icount = &uport->icount;
+ icount->rx++;
+
+ if (unlikely(status & (DZ_OERR | DZ_FERR | DZ_PERR))) {
+
+ /*
+ * There is no separate BREAK status bit, so treat
+ * null characters with framing errors as BREAKs;
+ * normally, otherwise. For this move the Framing
+ * Error bit to a simulated BREAK bit.
+ */
+ if (!ch) {
+ status |= (status & DZ_FERR) >>
+ (ffs(DZ_FERR) - ffs(DZ_BREAK));
+ status &= ~DZ_FERR;
+ }
+
+ /* Handle SysRq/SAK & keep track of the statistics. */
+ if (status & DZ_BREAK) {
+ icount->brk++;
+ if (uart_handle_break(uport))
+ continue;
+ } else if (status & DZ_FERR)
+ icount->frame++;
+ else if (status & DZ_PERR)
+ icount->parity++;
+ if (status & DZ_OERR)
+ icount->overrun++;
+
+ status &= uport->read_status_mask;
+ if (status & DZ_BREAK)
+ flag = TTY_BREAK;
+ else if (status & DZ_FERR)
+ flag = TTY_FRAME;
+ else if (status & DZ_PERR)
+ flag = TTY_PARITY;
+
+ }
+
+ if (uart_handle_sysrq_char(uport, ch))
+ continue;
+
+ uart_insert_char(uport, status, DZ_OERR, ch, flag);
+ lines_rx[LINE(status)] = 1;
+ }
+ for (i = 0; i < DZ_NB_PORT; i++)
+ if (lines_rx[i])
+ tty_flip_buffer_push(mux->dport[i].port.state->port.tty);
+}
+
+/*
+ * ------------------------------------------------------------
+ * transmit_char ()
+ *
+ * This routine deals with outputs to any lines.
+ * ------------------------------------------------------------
+ */
+static inline void dz_transmit_chars(struct dz_mux *mux)
+{
+ struct dz_port *dport = &mux->dport[0];
+ struct circ_buf *xmit;
+ unsigned char tmp;
+ u16 status;
+
+ status = dz_in(dport, DZ_CSR);
+ dport = &mux->dport[LINE(status)];
+ xmit = &dport->port.state->xmit;
+
+ if (dport->port.x_char) { /* XON/XOFF chars */
+ dz_out(dport, DZ_TDR, dport->port.x_char);
+ dport->port.icount.tx++;
+ dport->port.x_char = 0;
+ return;
+ }
+ /* If nothing to do or stopped or hardware stopped. */
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
+ spin_lock(&dport->port.lock);
+ dz_stop_tx(&dport->port);
+ spin_unlock(&dport->port.lock);
+ return;
+ }
+
+ /*
+ * If something to do... (remember the dz has no output fifo,
+ * so we go one char at a time) :-<
+ */
+ tmp = xmit->buf[xmit->tail];
+ xmit->tail = (xmit->tail + 1) & (DZ_XMIT_SIZE - 1);
+ dz_out(dport, DZ_TDR, tmp);
+ dport->port.icount.tx++;
+
+ if (uart_circ_chars_pending(xmit) < DZ_WAKEUP_CHARS)
+ uart_write_wakeup(&dport->port);
+
+ /* Are we are done. */
+ if (uart_circ_empty(xmit)) {
+ spin_lock(&dport->port.lock);
+ dz_stop_tx(&dport->port);
+ spin_unlock(&dport->port.lock);
+ }
+}
+
+/*
+ * ------------------------------------------------------------
+ * check_modem_status()
+ *
+ * DS 3100 & 5100: Only valid for the MODEM line, duh!
+ * DS 5000/200: Valid for the MODEM and PRINTER line.
+ * ------------------------------------------------------------
+ */
+static inline void check_modem_status(struct dz_port *dport)
+{
+ /*
+ * FIXME:
+ * 1. No status change interrupt; use a timer.
+ * 2. Handle the 3100/5000 as appropriate. --macro
+ */
+ u16 status;
+
+ /* If not the modem line just return. */
+ if (dport->port.line != DZ_MODEM)
+ return;
+
+ status = dz_in(dport, DZ_MSR);
+
+ /* it's easy, since DSR2 is the only bit in the register */
+ if (status)
+ dport->port.icount.dsr++;
+}
+
+/*
+ * ------------------------------------------------------------
+ * dz_interrupt ()
+ *
+ * this is the main interrupt routine for the DZ chip.
+ * It deals with the multiple ports.
+ * ------------------------------------------------------------
+ */
+static irqreturn_t dz_interrupt(int irq, void *dev_id)
+{
+ struct dz_mux *mux = dev_id;
+ struct dz_port *dport = &mux->dport[0];
+ u16 status;
+
+ /* get the reason why we just got an irq */
+ status = dz_in(dport, DZ_CSR);
+
+ if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE))
+ dz_receive_chars(mux);
+
+ if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE))
+ dz_transmit_chars(mux);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * -------------------------------------------------------------------
+ * Here ends the DZ interrupt routines.
+ * -------------------------------------------------------------------
+ */
+
+static unsigned int dz_get_mctrl(struct uart_port *uport)
+{
+ /*
+ * FIXME: Handle the 3100/5000 as appropriate. --macro
+ */
+ struct dz_port *dport = to_dport(uport);
+ unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+
+ if (dport->port.line == DZ_MODEM) {
+ if (dz_in(dport, DZ_MSR) & DZ_MODEM_DSR)
+ mctrl &= ~TIOCM_DSR;
+ }
+
+ return mctrl;
+}
+
+static void dz_set_mctrl(struct uart_port *uport, unsigned int mctrl)
+{
+ /*
+ * FIXME: Handle the 3100/5000 as appropriate. --macro
+ */
+ struct dz_port *dport = to_dport(uport);
+ u16 tmp;
+
+ if (dport->port.line == DZ_MODEM) {
+ tmp = dz_in(dport, DZ_TCR);
+ if (mctrl & TIOCM_DTR)
+ tmp &= ~DZ_MODEM_DTR;
+ else
+ tmp |= DZ_MODEM_DTR;
+ dz_out(dport, DZ_TCR, tmp);
+ }
+}
+
+/*
+ * -------------------------------------------------------------------
+ * startup ()
+ *
+ * various initialization tasks
+ * -------------------------------------------------------------------
+ */
+static int dz_startup(struct uart_port *uport)
+{
+ struct dz_port *dport = to_dport(uport);
+ struct dz_mux *mux = dport->mux;
+ unsigned long flags;
+ int irq_guard;
+ int ret;
+ u16 tmp;
+
+ irq_guard = atomic_add_return(1, &mux->irq_guard);
+ if (irq_guard != 1)
+ return 0;
+
+ ret = request_irq(dport->port.irq, dz_interrupt,
+ IRQF_SHARED, "dz", mux);
+ if (ret) {
+ atomic_add(-1, &mux->irq_guard);
+ printk(KERN_ERR "dz: Cannot get IRQ %d!\n", dport->port.irq);
+ return ret;
+ }
+
+ spin_lock_irqsave(&dport->port.lock, flags);
+
+ /* Enable interrupts. */
+ tmp = dz_in(dport, DZ_CSR);
+ tmp |= DZ_RIE | DZ_TIE;
+ dz_out(dport, DZ_CSR, tmp);
+
+ spin_unlock_irqrestore(&dport->port.lock, flags);
+
+ return 0;
+}
+
+/*
+ * -------------------------------------------------------------------
+ * shutdown ()
+ *
+ * This routine will shutdown a serial port; interrupts are disabled, and
+ * DTR is dropped if the hangup on close termio flag is on.
+ * -------------------------------------------------------------------
+ */
+static void dz_shutdown(struct uart_port *uport)
+{
+ struct dz_port *dport = to_dport(uport);
+ struct dz_mux *mux = dport->mux;
+ unsigned long flags;
+ int irq_guard;
+ u16 tmp;
+
+ spin_lock_irqsave(&dport->port.lock, flags);
+ dz_stop_tx(&dport->port);
+ spin_unlock_irqrestore(&dport->port.lock, flags);
+
+ irq_guard = atomic_add_return(-1, &mux->irq_guard);
+ if (!irq_guard) {
+ /* Disable interrupts. */
+ tmp = dz_in(dport, DZ_CSR);
+ tmp &= ~(DZ_RIE | DZ_TIE);
+ dz_out(dport, DZ_CSR, tmp);
+
+ free_irq(dport->port.irq, mux);
+ }
+}
+
+/*
+ * -------------------------------------------------------------------
+ * dz_tx_empty() -- get the transmitter empty status
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * is emptied. On bus types like RS485, the transmitter must
+ * release the bus after transmitting. This must be done when
+ * the transmit shift register is empty, not be done when the
+ * transmit holding register is empty. This functionality
+ * allows an RS485 driver to be written in user space.
+ * -------------------------------------------------------------------
+ */
+static unsigned int dz_tx_empty(struct uart_port *uport)
+{
+ struct dz_port *dport = to_dport(uport);
+ unsigned short tmp, mask = 1 << dport->port.line;
+
+ tmp = dz_in(dport, DZ_TCR);
+ tmp &= mask;
+
+ return tmp ? 0 : TIOCSER_TEMT;
+}
+
+static void dz_break_ctl(struct uart_port *uport, int break_state)
+{
+ /*
+ * FIXME: Can't access BREAK bits in TDR easily;
+ * reuse the code for polled TX. --macro
+ */
+ struct dz_port *dport = to_dport(uport);
+ unsigned long flags;
+ unsigned short tmp, mask = 1 << dport->port.line;
+
+ spin_lock_irqsave(&uport->lock, flags);
+ tmp = dz_in(dport, DZ_TCR);
+ if (break_state)
+ tmp |= mask;
+ else
+ tmp &= ~mask;
+ dz_out(dport, DZ_TCR, tmp);
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+static int dz_encode_baud_rate(unsigned int baud)
+{
+ switch (baud) {
+ case 50:
+ return DZ_B50;
+ case 75:
+ return DZ_B75;
+ case 110:
+ return DZ_B110;
+ case 134:
+ return DZ_B134;
+ case 150:
+ return DZ_B150;
+ case 300:
+ return DZ_B300;
+ case 600:
+ return DZ_B600;
+ case 1200:
+ return DZ_B1200;
+ case 1800:
+ return DZ_B1800;
+ case 2000:
+ return DZ_B2000;
+ case 2400:
+ return DZ_B2400;
+ case 3600:
+ return DZ_B3600;
+ case 4800:
+ return DZ_B4800;
+ case 7200:
+ return DZ_B7200;
+ case 9600:
+ return DZ_B9600;
+ default:
+ return -1;
+ }
+}
+
+
+static void dz_reset(struct dz_port *dport)
+{
+ struct dz_mux *mux = dport->mux;
+
+ if (mux->initialised)
+ return;
+
+ dz_out(dport, DZ_CSR, DZ_CLR);
+ while (dz_in(dport, DZ_CSR) & DZ_CLR);
+ iob();
+
+ /* Enable scanning. */
+ dz_out(dport, DZ_CSR, DZ_MSE);
+
+ mux->initialised = 1;
+}
+
+static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
+ struct ktermios *old_termios)
+{
+ struct dz_port *dport = to_dport(uport);
+ unsigned long flags;
+ unsigned int cflag, baud;
+ int bflag;
+
+ cflag = dport->port.line;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ cflag |= DZ_CS5;
+ break;
+ case CS6:
+ cflag |= DZ_CS6;
+ break;
+ case CS7:
+ cflag |= DZ_CS7;
+ break;
+ case CS8:
+ default:
+ cflag |= DZ_CS8;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ cflag |= DZ_CSTOPB;
+ if (termios->c_cflag & PARENB)
+ cflag |= DZ_PARENB;
+ if (termios->c_cflag & PARODD)
+ cflag |= DZ_PARODD;
+
+ baud = uart_get_baud_rate(uport, termios, old_termios, 50, 9600);
+ bflag = dz_encode_baud_rate(baud);
+ if (bflag < 0) { /* Try to keep unchanged. */
+ baud = uart_get_baud_rate(uport, old_termios, NULL, 50, 9600);
+ bflag = dz_encode_baud_rate(baud);
+ if (bflag < 0) { /* Resort to 9600. */
+ baud = 9600;
+ bflag = DZ_B9600;
+ }
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ }
+ cflag |= bflag;
+
+ if (termios->c_cflag & CREAD)
+ cflag |= DZ_RXENAB;
+
+ spin_lock_irqsave(&dport->port.lock, flags);
+
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
+ dz_out(dport, DZ_LPR, cflag);
+ dport->cflag = cflag;
+
+ /* setup accept flag */
+ dport->port.read_status_mask = DZ_OERR;
+ if (termios->c_iflag & INPCK)
+ dport->port.read_status_mask |= DZ_FERR | DZ_PERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ dport->port.read_status_mask |= DZ_BREAK;
+
+ /* characters to ignore */
+ uport->ignore_status_mask = 0;
+ if ((termios->c_iflag & (IGNPAR | IGNBRK)) == (IGNPAR | IGNBRK))
+ dport->port.ignore_status_mask |= DZ_OERR;
+ if (termios->c_iflag & IGNPAR)
+ dport->port.ignore_status_mask |= DZ_FERR | DZ_PERR;
+ if (termios->c_iflag & IGNBRK)
+ dport->port.ignore_status_mask |= DZ_BREAK;
+
+ spin_unlock_irqrestore(&dport->port.lock, flags);
+}
+
+/*
+ * Hack alert!
+ * Required solely so that the initial PROM-based console
+ * works undisturbed in parallel with this one.
+ */
+static void dz_pm(struct uart_port *uport, unsigned int state,
+ unsigned int oldstate)
+{
+ struct dz_port *dport = to_dport(uport);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dport->port.lock, flags);
+ if (state < 3)
+ dz_start_tx(&dport->port);
+ else
+ dz_stop_tx(&dport->port);
+ spin_unlock_irqrestore(&dport->port.lock, flags);
+}
+
+
+static const char *dz_type(struct uart_port *uport)
+{
+ return "DZ";
+}
+
+static void dz_release_port(struct uart_port *uport)
+{
+ struct dz_mux *mux = to_dport(uport)->mux;
+ int map_guard;
+
+ iounmap(uport->membase);
+ uport->membase = NULL;
+
+ map_guard = atomic_add_return(-1, &mux->map_guard);
+ if (!map_guard)
+ release_mem_region(uport->mapbase, dec_kn_slot_size);
+}
+
+static int dz_map_port(struct uart_port *uport)
+{
+ if (!uport->membase)
+ uport->membase = ioremap_nocache(uport->mapbase,
+ dec_kn_slot_size);
+ if (!uport->membase) {
+ printk(KERN_ERR "dz: Cannot map MMIO\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int dz_request_port(struct uart_port *uport)
+{
+ struct dz_mux *mux = to_dport(uport)->mux;
+ int map_guard;
+ int ret;
+
+ map_guard = atomic_add_return(1, &mux->map_guard);
+ if (map_guard == 1) {
+ if (!request_mem_region(uport->mapbase, dec_kn_slot_size,
+ "dz")) {
+ atomic_add(-1, &mux->map_guard);
+ printk(KERN_ERR
+ "dz: Unable to reserve MMIO resource\n");
+ return -EBUSY;
+ }
+ }
+ ret = dz_map_port(uport);
+ if (ret) {
+ map_guard = atomic_add_return(-1, &mux->map_guard);
+ if (!map_guard)
+ release_mem_region(uport->mapbase, dec_kn_slot_size);
+ return ret;
+ }
+ return 0;
+}
+
+static void dz_config_port(struct uart_port *uport, int flags)
+{
+ struct dz_port *dport = to_dport(uport);
+
+ if (flags & UART_CONFIG_TYPE) {
+ if (dz_request_port(uport))
+ return;
+
+ uport->type = PORT_DZ;
+
+ dz_reset(dport);
+ }
+}
+
+/*
+ * Verify the new serial_struct (for TIOCSSERIAL).
+ */
+static int dz_verify_port(struct uart_port *uport, struct serial_struct *ser)
+{
+ int ret = 0;
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_DZ)
+ ret = -EINVAL;
+ if (ser->irq != uport->irq)
+ ret = -EINVAL;
+ return ret;
+}
+
+static struct uart_ops dz_ops = {
+ .tx_empty = dz_tx_empty,
+ .get_mctrl = dz_get_mctrl,
+ .set_mctrl = dz_set_mctrl,
+ .stop_tx = dz_stop_tx,
+ .start_tx = dz_start_tx,
+ .stop_rx = dz_stop_rx,
+ .enable_ms = dz_enable_ms,
+ .break_ctl = dz_break_ctl,
+ .startup = dz_startup,
+ .shutdown = dz_shutdown,
+ .set_termios = dz_set_termios,
+ .pm = dz_pm,
+ .type = dz_type,
+ .release_port = dz_release_port,
+ .request_port = dz_request_port,
+ .config_port = dz_config_port,
+ .verify_port = dz_verify_port,
+};
+
+static void __init dz_init_ports(void)
+{
+ static int first = 1;
+ unsigned long base;
+ int line;
+
+ if (!first)
+ return;
+ first = 0;
+
+ if (mips_machtype == MACH_DS23100 || mips_machtype == MACH_DS5100)
+ base = dec_kn_slot_base + KN01_DZ11;
+ else
+ base = dec_kn_slot_base + KN02_DZ11;
+
+ for (line = 0; line < DZ_NB_PORT; line++) {
+ struct dz_port *dport = &dz_mux.dport[line];
+ struct uart_port *uport = &dport->port;
+
+ dport->mux = &dz_mux;
+
+ uport->irq = dec_interrupt[DEC_IRQ_DZ11];
+ uport->fifosize = 1;
+ uport->iotype = UPIO_MEM;
+ uport->flags = UPF_BOOT_AUTOCONF;
+ uport->ops = &dz_ops;
+ uport->line = line;
+ uport->mapbase = base;
+ }
+}
+
+#ifdef CONFIG_SERIAL_DZ_CONSOLE
+/*
+ * -------------------------------------------------------------------
+ * dz_console_putchar() -- transmit a character
+ *
+ * Polled transmission. This is tricky. We need to mask transmit
+ * interrupts so that they do not interfere, enable the transmitter
+ * for the line requested and then wait till the transmit scanner
+ * requests data for this line. But it may request data for another
+ * line first, in which case we have to disable its transmitter and
+ * repeat waiting till our line pops up. Only then the character may
+ * be transmitted. Finally, the state of the transmitter mask is
+ * restored. Welcome to the world of PDP-11!
+ * -------------------------------------------------------------------
+ */
+static void dz_console_putchar(struct uart_port *uport, int ch)
+{
+ struct dz_port *dport = to_dport(uport);
+ unsigned long flags;
+ unsigned short csr, tcr, trdy, mask;
+ int loops = 10000;
+
+ spin_lock_irqsave(&dport->port.lock, flags);
+ csr = dz_in(dport, DZ_CSR);
+ dz_out(dport, DZ_CSR, csr & ~DZ_TIE);
+ tcr = dz_in(dport, DZ_TCR);
+ tcr |= 1 << dport->port.line;
+ mask = tcr;
+ dz_out(dport, DZ_TCR, mask);
+ iob();
+ spin_unlock_irqrestore(&dport->port.lock, flags);
+
+ do {
+ trdy = dz_in(dport, DZ_CSR);
+ if (!(trdy & DZ_TRDY))
+ continue;
+ trdy = (trdy & DZ_TLINE) >> 8;
+ if (trdy == dport->port.line)
+ break;
+ mask &= ~(1 << trdy);
+ dz_out(dport, DZ_TCR, mask);
+ iob();
+ udelay(2);
+ } while (--loops);
+
+ if (loops) /* Cannot send otherwise. */
+ dz_out(dport, DZ_TDR, ch);
+
+ dz_out(dport, DZ_TCR, tcr);
+ dz_out(dport, DZ_CSR, csr);
+}
+
+/*
+ * -------------------------------------------------------------------
+ * dz_console_print ()
+ *
+ * dz_console_print is registered for printk.
+ * The console must be locked when we get here.
+ * -------------------------------------------------------------------
+ */
+static void dz_console_print(struct console *co,
+ const char *str,
+ unsigned int count)
+{
+ struct dz_port *dport = &dz_mux.dport[co->index];
+#ifdef DEBUG_DZ
+ prom_printf((char *) str);
+#endif
+ uart_console_write(&dport->port, str, count, dz_console_putchar);
+}
+
+static int __init dz_console_setup(struct console *co, char *options)
+{
+ struct dz_port *dport = &dz_mux.dport[co->index];
+ struct uart_port *uport = &dport->port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ ret = dz_map_port(uport);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&dport->port.lock); /* For dz_pm(). */
+
+ dz_reset(dport);
+ dz_pm(uport, 0, -1);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&dport->port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver dz_reg;
+static struct console dz_console = {
+ .name = "ttyS",
+ .write = dz_console_print,
+ .device = uart_console_device,
+ .setup = dz_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &dz_reg,
+};
+
+static int __init dz_serial_console_init(void)
+{
+ if (!IOASIC) {
+ dz_init_ports();
+ register_console(&dz_console);
+ return 0;
+ } else
+ return -ENXIO;
+}
+
+console_initcall(dz_serial_console_init);
+
+#define SERIAL_DZ_CONSOLE &dz_console
+#else
+#define SERIAL_DZ_CONSOLE NULL
+#endif /* CONFIG_SERIAL_DZ_CONSOLE */
+
+static struct uart_driver dz_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "serial",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = DZ_NB_PORT,
+ .cons = SERIAL_DZ_CONSOLE,
+};
+
+static int __init dz_init(void)
+{
+ int ret, i;
+
+ if (IOASIC)
+ return -ENXIO;
+
+ printk("%s%s\n", dz_name, dz_version);
+
+ dz_init_ports();
+
+ ret = uart_register_driver(&dz_reg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DZ_NB_PORT; i++)
+ uart_add_one_port(&dz_reg, &dz_mux.dport[i].port);
+
+ return 0;
+}
+
+module_init(dz_init);
diff --git a/drivers/tty/serial/dz.h b/drivers/tty/serial/dz.h
new file mode 100644
index 00000000..faf169ed
--- /dev/null
+++ b/drivers/tty/serial/dz.h
@@ -0,0 +1,129 @@
+/*
+ * dz.h: Serial port driver for DECstations equipped
+ * with the DZ chipset.
+ *
+ * Copyright (C) 1998 Olivier A. D. Lebaillif
+ *
+ * Email: olivier.lebaillif@ifrsys.com
+ *
+ * Copyright (C) 2004, 2006 Maciej W. Rozycki
+ */
+#ifndef DZ_SERIAL_H
+#define DZ_SERIAL_H
+
+/*
+ * Definitions for the Control and Status Register.
+ */
+#define DZ_TRDY 0x8000 /* Transmitter empty */
+#define DZ_TIE 0x4000 /* Transmitter Interrupt Enbl */
+#define DZ_TLINE 0x0300 /* Transmitter Line Number */
+#define DZ_RDONE 0x0080 /* Receiver data ready */
+#define DZ_RIE 0x0040 /* Receive Interrupt Enable */
+#define DZ_MSE 0x0020 /* Master Scan Enable */
+#define DZ_CLR 0x0010 /* Master reset */
+#define DZ_MAINT 0x0008 /* Loop Back Mode */
+
+/*
+ * Definitions for the Receiver Buffer Register.
+ */
+#define DZ_RBUF_MASK 0x00FF /* Data Mask */
+#define DZ_LINE_MASK 0x0300 /* Line Mask */
+#define DZ_DVAL 0x8000 /* Valid Data indicator */
+#define DZ_OERR 0x4000 /* Overrun error indicator */
+#define DZ_FERR 0x2000 /* Frame error indicator */
+#define DZ_PERR 0x1000 /* Parity error indicator */
+
+#define DZ_BREAK 0x0800 /* BREAK event software flag */
+
+#define LINE(x) ((x & DZ_LINE_MASK) >> 8) /* Get the line number
+ from the input buffer */
+#define UCHAR(x) ((unsigned char)(x & DZ_RBUF_MASK))
+
+/*
+ * Definitions for the Transmit Control Register.
+ */
+#define DZ_LINE_KEYBOARD 0x0001
+#define DZ_LINE_MOUSE 0x0002
+#define DZ_LINE_MODEM 0x0004
+#define DZ_LINE_PRINTER 0x0008
+
+#define DZ_MODEM_RTS 0x0800 /* RTS for the modem line (2) */
+#define DZ_MODEM_DTR 0x0400 /* DTR for the modem line (2) */
+#define DZ_PRINT_RTS 0x0200 /* RTS for the prntr line (3) */
+#define DZ_PRINT_DTR 0x0100 /* DTR for the prntr line (3) */
+#define DZ_LNENB 0x000f /* Transmitter Line Enable */
+
+/*
+ * Definitions for the Modem Status Register.
+ */
+#define DZ_MODEM_RI 0x0800 /* RI for the modem line (2) */
+#define DZ_MODEM_CD 0x0400 /* CD for the modem line (2) */
+#define DZ_MODEM_DSR 0x0200 /* DSR for the modem line (2) */
+#define DZ_MODEM_CTS 0x0100 /* CTS for the modem line (2) */
+#define DZ_PRINT_RI 0x0008 /* RI for the printer line (3) */
+#define DZ_PRINT_CD 0x0004 /* CD for the printer line (3) */
+#define DZ_PRINT_DSR 0x0002 /* DSR for the prntr line (3) */
+#define DZ_PRINT_CTS 0x0001 /* CTS for the prntr line (3) */
+
+/*
+ * Definitions for the Transmit Data Register.
+ */
+#define DZ_BRK0 0x0100 /* Break assertion for line 0 */
+#define DZ_BRK1 0x0200 /* Break assertion for line 1 */
+#define DZ_BRK2 0x0400 /* Break assertion for line 2 */
+#define DZ_BRK3 0x0800 /* Break assertion for line 3 */
+
+/*
+ * Definitions for the Line Parameter Register.
+ */
+#define DZ_KEYBOARD 0x0000 /* line 0 = keyboard */
+#define DZ_MOUSE 0x0001 /* line 1 = mouse */
+#define DZ_MODEM 0x0002 /* line 2 = modem */
+#define DZ_PRINTER 0x0003 /* line 3 = printer */
+
+#define DZ_CSIZE 0x0018 /* Number of bits per byte (mask) */
+#define DZ_CS5 0x0000 /* 5 bits per byte */
+#define DZ_CS6 0x0008 /* 6 bits per byte */
+#define DZ_CS7 0x0010 /* 7 bits per byte */
+#define DZ_CS8 0x0018 /* 8 bits per byte */
+
+#define DZ_CSTOPB 0x0020 /* 2 stop bits instead of one */
+
+#define DZ_PARENB 0x0040 /* Parity enable */
+#define DZ_PARODD 0x0080 /* Odd parity instead of even */
+
+#define DZ_CBAUD 0x0E00 /* Baud Rate (mask) */
+#define DZ_B50 0x0000
+#define DZ_B75 0x0100
+#define DZ_B110 0x0200
+#define DZ_B134 0x0300
+#define DZ_B150 0x0400
+#define DZ_B300 0x0500
+#define DZ_B600 0x0600
+#define DZ_B1200 0x0700
+#define DZ_B1800 0x0800
+#define DZ_B2000 0x0900
+#define DZ_B2400 0x0A00
+#define DZ_B3600 0x0B00
+#define DZ_B4800 0x0C00
+#define DZ_B7200 0x0D00
+#define DZ_B9600 0x0E00
+
+#define DZ_RXENAB 0x1000 /* Receiver Enable */
+
+/*
+ * Addresses for the DZ registers
+ */
+#define DZ_CSR 0x00 /* Control and Status Register */
+#define DZ_RBUF 0x08 /* Receive Buffer */
+#define DZ_LPR 0x08 /* Line Parameters Register */
+#define DZ_TCR 0x10 /* Transmitter Control Register */
+#define DZ_MSR 0x18 /* Modem Status Register */
+#define DZ_TDR 0x18 /* Transmit Data Register */
+
+#define DZ_NB_PORT 4
+
+#define DZ_XMIT_SIZE 4096 /* buffer size */
+#define DZ_WAKEUP_CHARS DZ_XMIT_SIZE/4
+
+#endif /* DZ_SERIAL_H */
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
new file mode 100644
index 00000000..8a869e58
--- /dev/null
+++ b/drivers/tty/serial/icom.c
@@ -0,0 +1,1658 @@
+/*
+ * icom.c
+ *
+ * Copyright (C) 2001 IBM Corporation. All rights reserved.
+ *
+ * Serial device driver.
+ *
+ * Based on code from serial.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#define SERIAL_DO_RESTART
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/termios.h>
+#include <linux/fs.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_reg.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include <linux/firmware.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "icom.h"
+
+/*#define ICOM_TRACE enable port trace capabilities */
+
+#define ICOM_DRIVER_NAME "icom"
+#define ICOM_VERSION_STR "1.3.1"
+#define NR_PORTS 128
+#define ICOM_PORT ((struct icom_port *)port)
+#define to_icom_adapter(d) container_of(d, struct icom_adapter, kref)
+
+static const struct pci_device_id icom_pci_table[] = {
+ {
+ .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = ADAPTER_V1,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
+ .subvendor = PCI_VENDOR_ID_IBM,
+ .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX,
+ .driver_data = ADAPTER_V2,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
+ .subvendor = PCI_VENDOR_ID_IBM,
+ .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM,
+ .driver_data = ADAPTER_V2,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
+ .subvendor = PCI_VENDOR_ID_IBM,
+ .subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL,
+ .driver_data = ADAPTER_V2,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
+ .subvendor = PCI_VENDOR_ID_IBM,
+ .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE,
+ .driver_data = ADAPTER_V2,
+ },
+ {}
+};
+
+struct lookup_proc_table start_proc[4] = {
+ {NULL, ICOM_CONTROL_START_A},
+ {NULL, ICOM_CONTROL_START_B},
+ {NULL, ICOM_CONTROL_START_C},
+ {NULL, ICOM_CONTROL_START_D}
+};
+
+
+struct lookup_proc_table stop_proc[4] = {
+ {NULL, ICOM_CONTROL_STOP_A},
+ {NULL, ICOM_CONTROL_STOP_B},
+ {NULL, ICOM_CONTROL_STOP_C},
+ {NULL, ICOM_CONTROL_STOP_D}
+};
+
+struct lookup_int_table int_mask_tbl[4] = {
+ {NULL, ICOM_INT_MASK_PRC_A},
+ {NULL, ICOM_INT_MASK_PRC_B},
+ {NULL, ICOM_INT_MASK_PRC_C},
+ {NULL, ICOM_INT_MASK_PRC_D},
+};
+
+
+MODULE_DEVICE_TABLE(pci, icom_pci_table);
+
+static LIST_HEAD(icom_adapter_head);
+
+/* spinlock for adapter initialization and changing adapter operations */
+static spinlock_t icom_lock;
+
+#ifdef ICOM_TRACE
+static inline void trace(struct icom_port *icom_port, char *trace_pt,
+ unsigned long trace_data)
+{
+ dev_info(&icom_port->adapter->pci_dev->dev, ":%d:%s - %lx\n",
+ icom_port->port, trace_pt, trace_data);
+}
+#else
+static inline void trace(struct icom_port *icom_port, char *trace_pt, unsigned long trace_data) {};
+#endif
+static void icom_kref_release(struct kref *kref);
+
+static void free_port_memory(struct icom_port *icom_port)
+{
+ struct pci_dev *dev = icom_port->adapter->pci_dev;
+
+ trace(icom_port, "RET_PORT_MEM", 0);
+ if (icom_port->recv_buf) {
+ pci_free_consistent(dev, 4096, icom_port->recv_buf,
+ icom_port->recv_buf_pci);
+ icom_port->recv_buf = NULL;
+ }
+ if (icom_port->xmit_buf) {
+ pci_free_consistent(dev, 4096, icom_port->xmit_buf,
+ icom_port->xmit_buf_pci);
+ icom_port->xmit_buf = NULL;
+ }
+ if (icom_port->statStg) {
+ pci_free_consistent(dev, 4096, icom_port->statStg,
+ icom_port->statStg_pci);
+ icom_port->statStg = NULL;
+ }
+
+ if (icom_port->xmitRestart) {
+ pci_free_consistent(dev, 4096, icom_port->xmitRestart,
+ icom_port->xmitRestart_pci);
+ icom_port->xmitRestart = NULL;
+ }
+}
+
+static int __devinit get_port_memory(struct icom_port *icom_port)
+{
+ int index;
+ unsigned long stgAddr;
+ unsigned long startStgAddr;
+ unsigned long offset;
+ struct pci_dev *dev = icom_port->adapter->pci_dev;
+
+ icom_port->xmit_buf =
+ pci_alloc_consistent(dev, 4096, &icom_port->xmit_buf_pci);
+ if (!icom_port->xmit_buf) {
+ dev_err(&dev->dev, "Can not allocate Transmit buffer\n");
+ return -ENOMEM;
+ }
+
+ trace(icom_port, "GET_PORT_MEM",
+ (unsigned long) icom_port->xmit_buf);
+
+ icom_port->recv_buf =
+ pci_alloc_consistent(dev, 4096, &icom_port->recv_buf_pci);
+ if (!icom_port->recv_buf) {
+ dev_err(&dev->dev, "Can not allocate Receive buffer\n");
+ free_port_memory(icom_port);
+ return -ENOMEM;
+ }
+ trace(icom_port, "GET_PORT_MEM",
+ (unsigned long) icom_port->recv_buf);
+
+ icom_port->statStg =
+ pci_alloc_consistent(dev, 4096, &icom_port->statStg_pci);
+ if (!icom_port->statStg) {
+ dev_err(&dev->dev, "Can not allocate Status buffer\n");
+ free_port_memory(icom_port);
+ return -ENOMEM;
+ }
+ trace(icom_port, "GET_PORT_MEM",
+ (unsigned long) icom_port->statStg);
+
+ icom_port->xmitRestart =
+ pci_alloc_consistent(dev, 4096, &icom_port->xmitRestart_pci);
+ if (!icom_port->xmitRestart) {
+ dev_err(&dev->dev,
+ "Can not allocate xmit Restart buffer\n");
+ free_port_memory(icom_port);
+ return -ENOMEM;
+ }
+
+ memset(icom_port->statStg, 0, 4096);
+
+ /* FODs: Frame Out Descriptor Queue, this is a FIFO queue that
+ indicates that frames are to be transmitted
+ */
+
+ stgAddr = (unsigned long) icom_port->statStg;
+ for (index = 0; index < NUM_XBUFFS; index++) {
+ trace(icom_port, "FOD_ADDR", stgAddr);
+ stgAddr = stgAddr + sizeof(icom_port->statStg->xmit[0]);
+ if (index < (NUM_XBUFFS - 1)) {
+ memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
+ icom_port->statStg->xmit[index].leLengthASD =
+ (unsigned short int) cpu_to_le16(XMIT_BUFF_SZ);
+ trace(icom_port, "FOD_ADDR", stgAddr);
+ trace(icom_port, "FOD_XBUFF",
+ (unsigned long) icom_port->xmit_buf);
+ icom_port->statStg->xmit[index].leBuffer =
+ cpu_to_le32(icom_port->xmit_buf_pci);
+ } else if (index == (NUM_XBUFFS - 1)) {
+ memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
+ icom_port->statStg->xmit[index].leLengthASD =
+ (unsigned short int) cpu_to_le16(XMIT_BUFF_SZ);
+ trace(icom_port, "FOD_XBUFF",
+ (unsigned long) icom_port->xmit_buf);
+ icom_port->statStg->xmit[index].leBuffer =
+ cpu_to_le32(icom_port->xmit_buf_pci);
+ } else {
+ memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
+ }
+ }
+ /* FIDs */
+ startStgAddr = stgAddr;
+
+ /* fill in every entry, even if no buffer */
+ for (index = 0; index < NUM_RBUFFS; index++) {
+ trace(icom_port, "FID_ADDR", stgAddr);
+ stgAddr = stgAddr + sizeof(icom_port->statStg->rcv[0]);
+ icom_port->statStg->rcv[index].leLength = 0;
+ icom_port->statStg->rcv[index].WorkingLength =
+ (unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
+ if (index < (NUM_RBUFFS - 1) ) {
+ offset = stgAddr - (unsigned long) icom_port->statStg;
+ icom_port->statStg->rcv[index].leNext =
+ cpu_to_le32(icom_port-> statStg_pci + offset);
+ trace(icom_port, "FID_RBUFF",
+ (unsigned long) icom_port->recv_buf);
+ icom_port->statStg->rcv[index].leBuffer =
+ cpu_to_le32(icom_port->recv_buf_pci);
+ } else if (index == (NUM_RBUFFS -1) ) {
+ offset = startStgAddr - (unsigned long) icom_port->statStg;
+ icom_port->statStg->rcv[index].leNext =
+ cpu_to_le32(icom_port-> statStg_pci + offset);
+ trace(icom_port, "FID_RBUFF",
+ (unsigned long) icom_port->recv_buf + 2048);
+ icom_port->statStg->rcv[index].leBuffer =
+ cpu_to_le32(icom_port->recv_buf_pci + 2048);
+ } else {
+ icom_port->statStg->rcv[index].leNext = 0;
+ icom_port->statStg->rcv[index].leBuffer = 0;
+ }
+ }
+
+ return 0;
+}
+
+static void stop_processor(struct icom_port *icom_port)
+{
+ unsigned long temp;
+ unsigned long flags;
+ int port;
+
+ spin_lock_irqsave(&icom_lock, flags);
+
+ port = icom_port->port;
+ if (port == 0 || port == 1)
+ stop_proc[port].global_control_reg = &icom_port->global_reg->control;
+ else
+ stop_proc[port].global_control_reg = &icom_port->global_reg->control_2;
+
+
+ if (port < 4) {
+ temp = readl(stop_proc[port].global_control_reg);
+ temp =
+ (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id;
+ writel(temp, stop_proc[port].global_control_reg);
+
+ /* write flush */
+ readl(stop_proc[port].global_control_reg);
+ } else {
+ dev_err(&icom_port->adapter->pci_dev->dev,
+ "Invalid port assignment\n");
+ }
+
+ spin_unlock_irqrestore(&icom_lock, flags);
+}
+
+static void start_processor(struct icom_port *icom_port)
+{
+ unsigned long temp;
+ unsigned long flags;
+ int port;
+
+ spin_lock_irqsave(&icom_lock, flags);
+
+ port = icom_port->port;
+ if (port == 0 || port == 1)
+ start_proc[port].global_control_reg = &icom_port->global_reg->control;
+ else
+ start_proc[port].global_control_reg = &icom_port->global_reg->control_2;
+ if (port < 4) {
+ temp = readl(start_proc[port].global_control_reg);
+ temp =
+ (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id;
+ writel(temp, start_proc[port].global_control_reg);
+
+ /* write flush */
+ readl(start_proc[port].global_control_reg);
+ } else {
+ dev_err(&icom_port->adapter->pci_dev->dev,
+ "Invalid port assignment\n");
+ }
+
+ spin_unlock_irqrestore(&icom_lock, flags);
+}
+
+static void load_code(struct icom_port *icom_port)
+{
+ const struct firmware *fw;
+ char __iomem *iram_ptr;
+ int index;
+ int status = 0;
+ void __iomem *dram_ptr = icom_port->dram;
+ dma_addr_t temp_pci;
+ unsigned char *new_page = NULL;
+ unsigned char cable_id = NO_CABLE;
+ struct pci_dev *dev = icom_port->adapter->pci_dev;
+
+ /* Clear out any pending interrupts */
+ writew(0x3FFF, icom_port->int_reg);
+
+ trace(icom_port, "CLEAR_INTERRUPTS", 0);
+
+ /* Stop processor */
+ stop_processor(icom_port);
+
+ /* Zero out DRAM */
+ memset_io(dram_ptr, 0, 512);
+
+ /* Load Call Setup into Adapter */
+ if (request_firmware(&fw, "icom_call_setup.bin", &dev->dev) < 0) {
+ dev_err(&dev->dev,"Unable to load icom_call_setup.bin firmware image\n");
+ status = -1;
+ goto load_code_exit;
+ }
+
+ if (fw->size > ICOM_DCE_IRAM_OFFSET) {
+ dev_err(&dev->dev, "Invalid firmware image for icom_call_setup.bin found.\n");
+ release_firmware(fw);
+ status = -1;
+ goto load_code_exit;
+ }
+
+ iram_ptr = (char __iomem *)icom_port->dram + ICOM_IRAM_OFFSET;
+ for (index = 0; index < fw->size; index++)
+ writeb(fw->data[index], &iram_ptr[index]);
+
+ release_firmware(fw);
+
+ /* Load Resident DCE portion of Adapter */
+ if (request_firmware(&fw, "icom_res_dce.bin", &dev->dev) < 0) {
+ dev_err(&dev->dev,"Unable to load icom_res_dce.bin firmware image\n");
+ status = -1;
+ goto load_code_exit;
+ }
+
+ if (fw->size > ICOM_IRAM_SIZE) {
+ dev_err(&dev->dev, "Invalid firmware image for icom_res_dce.bin found.\n");
+ release_firmware(fw);
+ status = -1;
+ goto load_code_exit;
+ }
+
+ iram_ptr = (char __iomem *) icom_port->dram + ICOM_IRAM_OFFSET;
+ for (index = ICOM_DCE_IRAM_OFFSET; index < fw->size; index++)
+ writeb(fw->data[index], &iram_ptr[index]);
+
+ release_firmware(fw);
+
+ /* Set Hardware level */
+ if (icom_port->adapter->version == ADAPTER_V2)
+ writeb(V2_HARDWARE, &(icom_port->dram->misc_flags));
+
+ /* Start the processor in Adapter */
+ start_processor(icom_port);
+
+ writeb((HDLC_PPP_PURE_ASYNC | HDLC_FF_FILL),
+ &(icom_port->dram->HDLCConfigReg));
+ writeb(0x04, &(icom_port->dram->FlagFillIdleTimer)); /* 0.5 seconds */
+ writeb(0x00, &(icom_port->dram->CmdReg));
+ writeb(0x10, &(icom_port->dram->async_config3));
+ writeb((ICOM_ACFG_DRIVE1 | ICOM_ACFG_NO_PARITY | ICOM_ACFG_8BPC |
+ ICOM_ACFG_1STOP_BIT), &(icom_port->dram->async_config2));
+
+ /*Set up data in icom DRAM to indicate where personality
+ *code is located and its length.
+ */
+ new_page = pci_alloc_consistent(dev, 4096, &temp_pci);
+
+ if (!new_page) {
+ dev_err(&dev->dev, "Can not allocate DMA buffer\n");
+ status = -1;
+ goto load_code_exit;
+ }
+
+ if (request_firmware(&fw, "icom_asc.bin", &dev->dev) < 0) {
+ dev_err(&dev->dev,"Unable to load icom_asc.bin firmware image\n");
+ status = -1;
+ goto load_code_exit;
+ }
+
+ if (fw->size > ICOM_DCE_IRAM_OFFSET) {
+ dev_err(&dev->dev, "Invalid firmware image for icom_asc.bin found.\n");
+ release_firmware(fw);
+ status = -1;
+ goto load_code_exit;
+ }
+
+ for (index = 0; index < fw->size; index++)
+ new_page[index] = fw->data[index];
+
+ release_firmware(fw);
+
+ writeb((char) ((fw->size + 16)/16), &icom_port->dram->mac_length);
+ writel(temp_pci, &icom_port->dram->mac_load_addr);
+
+ /*Setting the syncReg to 0x80 causes adapter to start downloading
+ the personality code into adapter instruction RAM.
+ Once code is loaded, it will begin executing and, based on
+ information provided above, will start DMAing data from
+ shared memory to adapter DRAM.
+ */
+ /* the wait loop below verifies this write operation has been done
+ and processed
+ */
+ writeb(START_DOWNLOAD, &icom_port->dram->sync);
+
+ /* Wait max 1 Sec for data download and processor to start */
+ for (index = 0; index < 10; index++) {
+ msleep(100);
+ if (readb(&icom_port->dram->misc_flags) & ICOM_HDW_ACTIVE)
+ break;
+ }
+
+ if (index == 10)
+ status = -1;
+
+ /*
+ * check Cable ID
+ */
+ cable_id = readb(&icom_port->dram->cable_id);
+
+ if (cable_id & ICOM_CABLE_ID_VALID) {
+ /* Get cable ID into the lower 4 bits (standard form) */
+ cable_id = (cable_id & ICOM_CABLE_ID_MASK) >> 4;
+ icom_port->cable_id = cable_id;
+ } else {
+ dev_err(&dev->dev,"Invalid or no cable attached\n");
+ icom_port->cable_id = NO_CABLE;
+ }
+
+ load_code_exit:
+
+ if (status != 0) {
+ /* Clear out any pending interrupts */
+ writew(0x3FFF, icom_port->int_reg);
+
+ /* Turn off port */
+ writeb(ICOM_DISABLE, &(icom_port->dram->disable));
+
+ /* Stop processor */
+ stop_processor(icom_port);
+
+ dev_err(&icom_port->adapter->pci_dev->dev,"Port not opertional\n");
+ }
+
+ if (new_page != NULL)
+ pci_free_consistent(dev, 4096, new_page, temp_pci);
+}
+
+static int startup(struct icom_port *icom_port)
+{
+ unsigned long temp;
+ unsigned char cable_id, raw_cable_id;
+ unsigned long flags;
+ int port;
+
+ trace(icom_port, "STARTUP", 0);
+
+ if (!icom_port->dram) {
+ /* should NEVER be NULL */
+ dev_err(&icom_port->adapter->pci_dev->dev,
+ "Unusable Port, port configuration missing\n");
+ return -ENODEV;
+ }
+
+ /*
+ * check Cable ID
+ */
+ raw_cable_id = readb(&icom_port->dram->cable_id);
+ trace(icom_port, "CABLE_ID", raw_cable_id);
+
+ /* Get cable ID into the lower 4 bits (standard form) */
+ cable_id = (raw_cable_id & ICOM_CABLE_ID_MASK) >> 4;
+
+ /* Check for valid Cable ID */
+ if (!(raw_cable_id & ICOM_CABLE_ID_VALID) ||
+ (cable_id != icom_port->cable_id)) {
+
+ /* reload adapter code, pick up any potential changes in cable id */
+ load_code(icom_port);
+
+ /* still no sign of cable, error out */
+ raw_cable_id = readb(&icom_port->dram->cable_id);
+ cable_id = (raw_cable_id & ICOM_CABLE_ID_MASK) >> 4;
+ if (!(raw_cable_id & ICOM_CABLE_ID_VALID) ||
+ (icom_port->cable_id == NO_CABLE))
+ return -EIO;
+ }
+
+ /*
+ * Finally, clear and enable interrupts
+ */
+ spin_lock_irqsave(&icom_lock, flags);
+ port = icom_port->port;
+ if (port == 0 || port == 1)
+ int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask;
+ else
+ int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask_2;
+
+ if (port == 0 || port == 2)
+ writew(0x00FF, icom_port->int_reg);
+ else
+ writew(0x3F00, icom_port->int_reg);
+ if (port < 4) {
+ temp = readl(int_mask_tbl[port].global_int_mask);
+ writel(temp & ~int_mask_tbl[port].processor_id, int_mask_tbl[port].global_int_mask);
+
+ /* write flush */
+ readl(int_mask_tbl[port].global_int_mask);
+ } else {
+ dev_err(&icom_port->adapter->pci_dev->dev,
+ "Invalid port assignment\n");
+ }
+
+ spin_unlock_irqrestore(&icom_lock, flags);
+ return 0;
+}
+
+static void shutdown(struct icom_port *icom_port)
+{
+ unsigned long temp;
+ unsigned char cmdReg;
+ unsigned long flags;
+ int port;
+
+ spin_lock_irqsave(&icom_lock, flags);
+ trace(icom_port, "SHUTDOWN", 0);
+
+ /*
+ * disable all interrupts
+ */
+ port = icom_port->port;
+ if (port == 0 || port == 1)
+ int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask;
+ else
+ int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask_2;
+
+ if (port < 4) {
+ temp = readl(int_mask_tbl[port].global_int_mask);
+ writel(temp | int_mask_tbl[port].processor_id, int_mask_tbl[port].global_int_mask);
+
+ /* write flush */
+ readl(int_mask_tbl[port].global_int_mask);
+ } else {
+ dev_err(&icom_port->adapter->pci_dev->dev,
+ "Invalid port assignment\n");
+ }
+ spin_unlock_irqrestore(&icom_lock, flags);
+
+ /*
+ * disable break condition
+ */
+ cmdReg = readb(&icom_port->dram->CmdReg);
+ if (cmdReg & CMD_SND_BREAK) {
+ writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg);
+ }
+}
+
+static int icom_write(struct uart_port *port)
+{
+ unsigned long data_count;
+ unsigned char cmdReg;
+ unsigned long offset;
+ int temp_tail = port->state->xmit.tail;
+
+ trace(ICOM_PORT, "WRITE", 0);
+
+ if (cpu_to_le16(ICOM_PORT->statStg->xmit[0].flags) &
+ SA_FLAGS_READY_TO_XMIT) {
+ trace(ICOM_PORT, "WRITE_FULL", 0);
+ return 0;
+ }
+
+ data_count = 0;
+ while ((port->state->xmit.head != temp_tail) &&
+ (data_count <= XMIT_BUFF_SZ)) {
+
+ ICOM_PORT->xmit_buf[data_count++] =
+ port->state->xmit.buf[temp_tail];
+
+ temp_tail++;
+ temp_tail &= (UART_XMIT_SIZE - 1);
+ }
+
+ if (data_count) {
+ ICOM_PORT->statStg->xmit[0].flags =
+ cpu_to_le16(SA_FLAGS_READY_TO_XMIT);
+ ICOM_PORT->statStg->xmit[0].leLength =
+ cpu_to_le16(data_count);
+ offset =
+ (unsigned long) &ICOM_PORT->statStg->xmit[0] -
+ (unsigned long) ICOM_PORT->statStg;
+ *ICOM_PORT->xmitRestart =
+ cpu_to_le32(ICOM_PORT->statStg_pci + offset);
+ cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+ writeb(cmdReg | CMD_XMIT_RCV_ENABLE,
+ &ICOM_PORT->dram->CmdReg);
+ writeb(START_XMIT, &ICOM_PORT->dram->StartXmitCmd);
+ trace(ICOM_PORT, "WRITE_START", data_count);
+ /* write flush */
+ readb(&ICOM_PORT->dram->StartXmitCmd);
+ }
+
+ return data_count;
+}
+
+static inline void check_modem_status(struct icom_port *icom_port)
+{
+ static char old_status = 0;
+ char delta_status;
+ unsigned char status;
+
+ spin_lock(&icom_port->uart_port.lock);
+
+ /*modem input register */
+ status = readb(&icom_port->dram->isr);
+ trace(icom_port, "CHECK_MODEM", status);
+ delta_status = status ^ old_status;
+ if (delta_status) {
+ if (delta_status & ICOM_RI)
+ icom_port->uart_port.icount.rng++;
+ if (delta_status & ICOM_DSR)
+ icom_port->uart_port.icount.dsr++;
+ if (delta_status & ICOM_DCD)
+ uart_handle_dcd_change(&icom_port->uart_port,
+ delta_status & ICOM_DCD);
+ if (delta_status & ICOM_CTS)
+ uart_handle_cts_change(&icom_port->uart_port,
+ delta_status & ICOM_CTS);
+
+ wake_up_interruptible(&icom_port->uart_port.state->
+ port.delta_msr_wait);
+ old_status = status;
+ }
+ spin_unlock(&icom_port->uart_port.lock);
+}
+
+static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
+{
+ unsigned short int count;
+ int i;
+
+ if (port_int_reg & (INT_XMIT_COMPLETED)) {
+ trace(icom_port, "XMIT_COMPLETE", 0);
+
+ /* clear buffer in use bit */
+ icom_port->statStg->xmit[0].flags &=
+ cpu_to_le16(~SA_FLAGS_READY_TO_XMIT);
+
+ count = (unsigned short int)
+ cpu_to_le16(icom_port->statStg->xmit[0].leLength);
+ icom_port->uart_port.icount.tx += count;
+
+ for (i=0; i<count &&
+ !uart_circ_empty(&icom_port->uart_port.state->xmit); i++) {
+
+ icom_port->uart_port.state->xmit.tail++;
+ icom_port->uart_port.state->xmit.tail &=
+ (UART_XMIT_SIZE - 1);
+ }
+
+ if (!icom_write(&icom_port->uart_port))
+ /* activate write queue */
+ uart_write_wakeup(&icom_port->uart_port);
+ } else
+ trace(icom_port, "XMIT_DISABLED", 0);
+}
+
+static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
+{
+ short int count, rcv_buff;
+ struct tty_struct *tty = icom_port->uart_port.state->port.tty;
+ unsigned short int status;
+ struct uart_icount *icount;
+ unsigned long offset;
+ unsigned char flag;
+
+ trace(icom_port, "RCV_COMPLETE", 0);
+ rcv_buff = icom_port->next_rcv;
+
+ status = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].flags);
+ while (status & SA_FL_RCV_DONE) {
+ int first = -1;
+
+ trace(icom_port, "FID_STATUS", status);
+ count = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].leLength);
+
+ trace(icom_port, "RCV_COUNT", count);
+
+ trace(icom_port, "REAL_COUNT", count);
+
+ offset =
+ cpu_to_le32(icom_port->statStg->rcv[rcv_buff].leBuffer) -
+ icom_port->recv_buf_pci;
+
+ /* Block copy all but the last byte as this may have status */
+ if (count > 0) {
+ first = icom_port->recv_buf[offset];
+ tty_insert_flip_string(tty, icom_port->recv_buf + offset, count - 1);
+ }
+
+ icount = &icom_port->uart_port.icount;
+ icount->rx += count;
+
+ /* Break detect logic */
+ if ((status & SA_FLAGS_FRAME_ERROR)
+ && first == 0) {
+ status &= ~SA_FLAGS_FRAME_ERROR;
+ status |= SA_FLAGS_BREAK_DET;
+ trace(icom_port, "BREAK_DET", 0);
+ }
+
+ flag = TTY_NORMAL;
+
+ if (status &
+ (SA_FLAGS_BREAK_DET | SA_FLAGS_PARITY_ERROR |
+ SA_FLAGS_FRAME_ERROR | SA_FLAGS_OVERRUN)) {
+
+ if (status & SA_FLAGS_BREAK_DET)
+ icount->brk++;
+ if (status & SA_FLAGS_PARITY_ERROR)
+ icount->parity++;
+ if (status & SA_FLAGS_FRAME_ERROR)
+ icount->frame++;
+ if (status & SA_FLAGS_OVERRUN)
+ icount->overrun++;
+
+ /*
+ * Now check to see if character should be
+ * ignored, and mask off conditions which
+ * should be ignored.
+ */
+ if (status & icom_port->ignore_status_mask) {
+ trace(icom_port, "IGNORE_CHAR", 0);
+ goto ignore_char;
+ }
+
+ status &= icom_port->read_status_mask;
+
+ if (status & SA_FLAGS_BREAK_DET) {
+ flag = TTY_BREAK;
+ } else if (status & SA_FLAGS_PARITY_ERROR) {
+ trace(icom_port, "PARITY_ERROR", 0);
+ flag = TTY_PARITY;
+ } else if (status & SA_FLAGS_FRAME_ERROR)
+ flag = TTY_FRAME;
+
+ }
+
+ tty_insert_flip_char(tty, *(icom_port->recv_buf + offset + count - 1), flag);
+
+ if (status & SA_FLAGS_OVERRUN)
+ /*
+ * Overrun is special, since it's
+ * reported immediately, and doesn't
+ * affect the current character
+ */
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ignore_char:
+ icom_port->statStg->rcv[rcv_buff].flags = 0;
+ icom_port->statStg->rcv[rcv_buff].leLength = 0;
+ icom_port->statStg->rcv[rcv_buff].WorkingLength =
+ (unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
+
+ rcv_buff++;
+ if (rcv_buff == NUM_RBUFFS)
+ rcv_buff = 0;
+
+ status = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].flags);
+ }
+ icom_port->next_rcv = rcv_buff;
+ tty_flip_buffer_push(tty);
+}
+
+static void process_interrupt(u16 port_int_reg,
+ struct icom_port *icom_port)
+{
+
+ spin_lock(&icom_port->uart_port.lock);
+ trace(icom_port, "INTERRUPT", port_int_reg);
+
+ if (port_int_reg & (INT_XMIT_COMPLETED | INT_XMIT_DISABLED))
+ xmit_interrupt(port_int_reg, icom_port);
+
+ if (port_int_reg & INT_RCV_COMPLETED)
+ recv_interrupt(port_int_reg, icom_port);
+
+ spin_unlock(&icom_port->uart_port.lock);
+}
+
+static irqreturn_t icom_interrupt(int irq, void *dev_id)
+{
+ void __iomem * int_reg;
+ u32 adapter_interrupts;
+ u16 port_int_reg;
+ struct icom_adapter *icom_adapter;
+ struct icom_port *icom_port;
+
+ /* find icom_port for this interrupt */
+ icom_adapter = (struct icom_adapter *) dev_id;
+
+ if (icom_adapter->version == ADAPTER_V2) {
+ int_reg = icom_adapter->base_addr + 0x8024;
+
+ adapter_interrupts = readl(int_reg);
+
+ if (adapter_interrupts & 0x00003FFF) {
+ /* port 2 interrupt, NOTE: for all ADAPTER_V2, port 2 will be active */
+ icom_port = &icom_adapter->port_info[2];
+ port_int_reg = (u16) adapter_interrupts;
+ process_interrupt(port_int_reg, icom_port);
+ check_modem_status(icom_port);
+ }
+ if (adapter_interrupts & 0x3FFF0000) {
+ /* port 3 interrupt */
+ icom_port = &icom_adapter->port_info[3];
+ if (icom_port->status == ICOM_PORT_ACTIVE) {
+ port_int_reg =
+ (u16) (adapter_interrupts >> 16);
+ process_interrupt(port_int_reg, icom_port);
+ check_modem_status(icom_port);
+ }
+ }
+
+ /* Clear out any pending interrupts */
+ writel(adapter_interrupts, int_reg);
+
+ int_reg = icom_adapter->base_addr + 0x8004;
+ } else {
+ int_reg = icom_adapter->base_addr + 0x4004;
+ }
+
+ adapter_interrupts = readl(int_reg);
+
+ if (adapter_interrupts & 0x00003FFF) {
+ /* port 0 interrupt, NOTE: for all adapters, port 0 will be active */
+ icom_port = &icom_adapter->port_info[0];
+ port_int_reg = (u16) adapter_interrupts;
+ process_interrupt(port_int_reg, icom_port);
+ check_modem_status(icom_port);
+ }
+ if (adapter_interrupts & 0x3FFF0000) {
+ /* port 1 interrupt */
+ icom_port = &icom_adapter->port_info[1];
+ if (icom_port->status == ICOM_PORT_ACTIVE) {
+ port_int_reg = (u16) (adapter_interrupts >> 16);
+ process_interrupt(port_int_reg, icom_port);
+ check_modem_status(icom_port);
+ }
+ }
+
+ /* Clear out any pending interrupts */
+ writel(adapter_interrupts, int_reg);
+
+ /* flush the write */
+ adapter_interrupts = readl(int_reg);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * ------------------------------------------------------------------
+ * Begin serial-core API
+ * ------------------------------------------------------------------
+ */
+static unsigned int icom_tx_empty(struct uart_port *port)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (cpu_to_le16(ICOM_PORT->statStg->xmit[0].flags) &
+ SA_FLAGS_READY_TO_XMIT)
+ ret = TIOCSER_TEMT;
+ else
+ ret = 0;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+static void icom_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned char local_osr;
+
+ trace(ICOM_PORT, "SET_MODEM", 0);
+ local_osr = readb(&ICOM_PORT->dram->osr);
+
+ if (mctrl & TIOCM_RTS) {
+ trace(ICOM_PORT, "RAISE_RTS", 0);
+ local_osr |= ICOM_RTS;
+ } else {
+ trace(ICOM_PORT, "LOWER_RTS", 0);
+ local_osr &= ~ICOM_RTS;
+ }
+
+ if (mctrl & TIOCM_DTR) {
+ trace(ICOM_PORT, "RAISE_DTR", 0);
+ local_osr |= ICOM_DTR;
+ } else {
+ trace(ICOM_PORT, "LOWER_DTR", 0);
+ local_osr &= ~ICOM_DTR;
+ }
+
+ writeb(local_osr, &ICOM_PORT->dram->osr);
+}
+
+static unsigned int icom_get_mctrl(struct uart_port *port)
+{
+ unsigned char status;
+ unsigned int result;
+
+ trace(ICOM_PORT, "GET_MODEM", 0);
+
+ status = readb(&ICOM_PORT->dram->isr);
+
+ result = ((status & ICOM_DCD) ? TIOCM_CAR : 0)
+ | ((status & ICOM_RI) ? TIOCM_RNG : 0)
+ | ((status & ICOM_DSR) ? TIOCM_DSR : 0)
+ | ((status & ICOM_CTS) ? TIOCM_CTS : 0);
+ return result;
+}
+
+static void icom_stop_tx(struct uart_port *port)
+{
+ unsigned char cmdReg;
+
+ trace(ICOM_PORT, "STOP", 0);
+ cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+ writeb(cmdReg | CMD_HOLD_XMIT, &ICOM_PORT->dram->CmdReg);
+}
+
+static void icom_start_tx(struct uart_port *port)
+{
+ unsigned char cmdReg;
+
+ trace(ICOM_PORT, "START", 0);
+ cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+ if ((cmdReg & CMD_HOLD_XMIT) == CMD_HOLD_XMIT)
+ writeb(cmdReg & ~CMD_HOLD_XMIT,
+ &ICOM_PORT->dram->CmdReg);
+
+ icom_write(port);
+}
+
+static void icom_send_xchar(struct uart_port *port, char ch)
+{
+ unsigned char xdata;
+ int index;
+ unsigned long flags;
+
+ trace(ICOM_PORT, "SEND_XCHAR", ch);
+
+ /* wait .1 sec to send char */
+ for (index = 0; index < 10; index++) {
+ spin_lock_irqsave(&port->lock, flags);
+ xdata = readb(&ICOM_PORT->dram->xchar);
+ if (xdata == 0x00) {
+ trace(ICOM_PORT, "QUICK_WRITE", 0);
+ writeb(ch, &ICOM_PORT->dram->xchar);
+
+ /* flush write operation */
+ xdata = readb(&ICOM_PORT->dram->xchar);
+ spin_unlock_irqrestore(&port->lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ msleep(10);
+ }
+}
+
+static void icom_stop_rx(struct uart_port *port)
+{
+ unsigned char cmdReg;
+
+ cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+ writeb(cmdReg & ~CMD_RCV_ENABLE, &ICOM_PORT->dram->CmdReg);
+}
+
+static void icom_enable_ms(struct uart_port *port)
+{
+ /* no-op */
+}
+
+static void icom_break(struct uart_port *port, int break_state)
+{
+ unsigned char cmdReg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ trace(ICOM_PORT, "BREAK", 0);
+ cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+ if (break_state == -1) {
+ writeb(cmdReg | CMD_SND_BREAK, &ICOM_PORT->dram->CmdReg);
+ } else {
+ writeb(cmdReg & ~CMD_SND_BREAK, &ICOM_PORT->dram->CmdReg);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int icom_open(struct uart_port *port)
+{
+ int retval;
+
+ kref_get(&ICOM_PORT->adapter->kref);
+ retval = startup(ICOM_PORT);
+
+ if (retval) {
+ kref_put(&ICOM_PORT->adapter->kref, icom_kref_release);
+ trace(ICOM_PORT, "STARTUP_ERROR", 0);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void icom_close(struct uart_port *port)
+{
+ unsigned char cmdReg;
+
+ trace(ICOM_PORT, "CLOSE", 0);
+
+ /* stop receiver */
+ cmdReg = readb(&ICOM_PORT->dram->CmdReg);
+ writeb(cmdReg & (unsigned char) ~CMD_RCV_ENABLE,
+ &ICOM_PORT->dram->CmdReg);
+
+ shutdown(ICOM_PORT);
+
+ kref_put(&ICOM_PORT->adapter->kref, icom_kref_release);
+}
+
+static void icom_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old_termios)
+{
+ int baud;
+ unsigned cflag, iflag;
+ char new_config2;
+ char new_config3 = 0;
+ char tmp_byte;
+ int index;
+ int rcv_buff, xmit_buff;
+ unsigned long offset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ trace(ICOM_PORT, "CHANGE_SPEED", 0);
+
+ cflag = termios->c_cflag;
+ iflag = termios->c_iflag;
+
+ new_config2 = ICOM_ACFG_DRIVE1;
+
+ /* byte size and parity */
+ switch (cflag & CSIZE) {
+ case CS5: /* 5 bits/char */
+ new_config2 |= ICOM_ACFG_5BPC;
+ break;
+ case CS6: /* 6 bits/char */
+ new_config2 |= ICOM_ACFG_6BPC;
+ break;
+ case CS7: /* 7 bits/char */
+ new_config2 |= ICOM_ACFG_7BPC;
+ break;
+ case CS8: /* 8 bits/char */
+ new_config2 |= ICOM_ACFG_8BPC;
+ break;
+ default:
+ break;
+ }
+ if (cflag & CSTOPB) {
+ /* 2 stop bits */
+ new_config2 |= ICOM_ACFG_2STOP_BIT;
+ }
+ if (cflag & PARENB) {
+ /* parity bit enabled */
+ new_config2 |= ICOM_ACFG_PARITY_ENAB;
+ trace(ICOM_PORT, "PARENB", 0);
+ }
+ if (cflag & PARODD) {
+ /* odd parity */
+ new_config2 |= ICOM_ACFG_PARITY_ODD;
+ trace(ICOM_PORT, "PARODD", 0);
+ }
+
+ /* Determine divisor based on baud rate */
+ baud = uart_get_baud_rate(port, termios, old_termios,
+ icom_acfg_baud[0],
+ icom_acfg_baud[BAUD_TABLE_LIMIT]);
+ if (!baud)
+ baud = 9600; /* B0 transition handled in rs_set_termios */
+
+ for (index = 0; index < BAUD_TABLE_LIMIT; index++) {
+ if (icom_acfg_baud[index] == baud) {
+ new_config3 = index;
+ break;
+ }
+ }
+
+ uart_update_timeout(port, cflag, baud);
+
+ /* CTS flow control flag and modem status interrupts */
+ tmp_byte = readb(&(ICOM_PORT->dram->HDLCConfigReg));
+ if (cflag & CRTSCTS)
+ tmp_byte |= HDLC_HDW_FLOW;
+ else
+ tmp_byte &= ~HDLC_HDW_FLOW;
+ writeb(tmp_byte, &(ICOM_PORT->dram->HDLCConfigReg));
+
+ /*
+ * Set up parity check flag
+ */
+ ICOM_PORT->read_status_mask = SA_FLAGS_OVERRUN | SA_FL_RCV_DONE;
+ if (iflag & INPCK)
+ ICOM_PORT->read_status_mask |=
+ SA_FLAGS_FRAME_ERROR | SA_FLAGS_PARITY_ERROR;
+
+ if ((iflag & BRKINT) || (iflag & PARMRK))
+ ICOM_PORT->read_status_mask |= SA_FLAGS_BREAK_DET;
+
+ /*
+ * Characters to ignore
+ */
+ ICOM_PORT->ignore_status_mask = 0;
+ if (iflag & IGNPAR)
+ ICOM_PORT->ignore_status_mask |=
+ SA_FLAGS_PARITY_ERROR | SA_FLAGS_FRAME_ERROR;
+ if (iflag & IGNBRK) {
+ ICOM_PORT->ignore_status_mask |= SA_FLAGS_BREAK_DET;
+ /*
+ * If we're ignore parity and break indicators, ignore
+ * overruns too. (For real raw support).
+ */
+ if (iflag & IGNPAR)
+ ICOM_PORT->ignore_status_mask |= SA_FLAGS_OVERRUN;
+ }
+
+ /*
+ * !!! ignore all characters if CREAD is not set
+ */
+ if ((cflag & CREAD) == 0)
+ ICOM_PORT->ignore_status_mask |= SA_FL_RCV_DONE;
+
+ /* Turn off Receiver to prepare for reset */
+ writeb(CMD_RCV_DISABLE, &ICOM_PORT->dram->CmdReg);
+
+ for (index = 0; index < 10; index++) {
+ if (readb(&ICOM_PORT->dram->PrevCmdReg) == 0x00) {
+ break;
+ }
+ }
+
+ /* clear all current buffers of data */
+ for (rcv_buff = 0; rcv_buff < NUM_RBUFFS; rcv_buff++) {
+ ICOM_PORT->statStg->rcv[rcv_buff].flags = 0;
+ ICOM_PORT->statStg->rcv[rcv_buff].leLength = 0;
+ ICOM_PORT->statStg->rcv[rcv_buff].WorkingLength =
+ (unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
+ }
+
+ for (xmit_buff = 0; xmit_buff < NUM_XBUFFS; xmit_buff++) {
+ ICOM_PORT->statStg->xmit[xmit_buff].flags = 0;
+ }
+
+ /* activate changes and start xmit and receiver here */
+ /* Enable the receiver */
+ writeb(new_config3, &(ICOM_PORT->dram->async_config3));
+ writeb(new_config2, &(ICOM_PORT->dram->async_config2));
+ tmp_byte = readb(&(ICOM_PORT->dram->HDLCConfigReg));
+ tmp_byte |= HDLC_PPP_PURE_ASYNC | HDLC_FF_FILL;
+ writeb(tmp_byte, &(ICOM_PORT->dram->HDLCConfigReg));
+ writeb(0x04, &(ICOM_PORT->dram->FlagFillIdleTimer)); /* 0.5 seconds */
+ writeb(0xFF, &(ICOM_PORT->dram->ier)); /* enable modem signal interrupts */
+
+ /* reset processor */
+ writeb(CMD_RESTART, &ICOM_PORT->dram->CmdReg);
+
+ for (index = 0; index < 10; index++) {
+ if (readb(&ICOM_PORT->dram->CmdReg) == 0x00) {
+ break;
+ }
+ }
+
+ /* Enable Transmitter and Receiver */
+ offset =
+ (unsigned long) &ICOM_PORT->statStg->rcv[0] -
+ (unsigned long) ICOM_PORT->statStg;
+ writel(ICOM_PORT->statStg_pci + offset,
+ &ICOM_PORT->dram->RcvStatusAddr);
+ ICOM_PORT->next_rcv = 0;
+ ICOM_PORT->put_length = 0;
+ *ICOM_PORT->xmitRestart = 0;
+ writel(ICOM_PORT->xmitRestart_pci,
+ &ICOM_PORT->dram->XmitStatusAddr);
+ trace(ICOM_PORT, "XR_ENAB", 0);
+ writeb(CMD_XMIT_RCV_ENABLE, &ICOM_PORT->dram->CmdReg);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *icom_type(struct uart_port *port)
+{
+ return "icom";
+}
+
+static void icom_release_port(struct uart_port *port)
+{
+}
+
+static int icom_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void icom_config_port(struct uart_port *port, int flags)
+{
+ port->type = PORT_ICOM;
+}
+
+static struct uart_ops icom_ops = {
+ .tx_empty = icom_tx_empty,
+ .set_mctrl = icom_set_mctrl,
+ .get_mctrl = icom_get_mctrl,
+ .stop_tx = icom_stop_tx,
+ .start_tx = icom_start_tx,
+ .send_xchar = icom_send_xchar,
+ .stop_rx = icom_stop_rx,
+ .enable_ms = icom_enable_ms,
+ .break_ctl = icom_break,
+ .startup = icom_open,
+ .shutdown = icom_close,
+ .set_termios = icom_set_termios,
+ .type = icom_type,
+ .release_port = icom_release_port,
+ .request_port = icom_request_port,
+ .config_port = icom_config_port,
+};
+
+#define ICOM_CONSOLE NULL
+
+static struct uart_driver icom_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = ICOM_DRIVER_NAME,
+ .dev_name = "ttyA",
+ .major = ICOM_MAJOR,
+ .minor = ICOM_MINOR_START,
+ .nr = NR_PORTS,
+ .cons = ICOM_CONSOLE,
+};
+
+static int __devinit icom_init_ports(struct icom_adapter *icom_adapter)
+{
+ u32 subsystem_id = icom_adapter->subsystem_id;
+ int i;
+ struct icom_port *icom_port;
+
+ if (icom_adapter->version == ADAPTER_V1) {
+ icom_adapter->numb_ports = 2;
+
+ for (i = 0; i < 2; i++) {
+ icom_port = &icom_adapter->port_info[i];
+ icom_port->port = i;
+ icom_port->status = ICOM_PORT_ACTIVE;
+ icom_port->imbed_modem = ICOM_UNKNOWN;
+ }
+ } else {
+ if (subsystem_id == PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL) {
+ icom_adapter->numb_ports = 4;
+
+ for (i = 0; i < 4; i++) {
+ icom_port = &icom_adapter->port_info[i];
+
+ icom_port->port = i;
+ icom_port->status = ICOM_PORT_ACTIVE;
+ icom_port->imbed_modem = ICOM_IMBED_MODEM;
+ }
+ } else {
+ icom_adapter->numb_ports = 4;
+
+ icom_adapter->port_info[0].port = 0;
+ icom_adapter->port_info[0].status = ICOM_PORT_ACTIVE;
+
+ if (subsystem_id ==
+ PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM) {
+ icom_adapter->port_info[0].imbed_modem = ICOM_IMBED_MODEM;
+ } else {
+ icom_adapter->port_info[0].imbed_modem = ICOM_RVX;
+ }
+
+ icom_adapter->port_info[1].status = ICOM_PORT_OFF;
+
+ icom_adapter->port_info[2].port = 2;
+ icom_adapter->port_info[2].status = ICOM_PORT_ACTIVE;
+ icom_adapter->port_info[2].imbed_modem = ICOM_RVX;
+ icom_adapter->port_info[3].status = ICOM_PORT_OFF;
+ }
+ }
+
+ return 0;
+}
+
+static void icom_port_active(struct icom_port *icom_port, struct icom_adapter *icom_adapter, int port_num)
+{
+ if (icom_adapter->version == ADAPTER_V1) {
+ icom_port->global_reg = icom_adapter->base_addr + 0x4000;
+ icom_port->int_reg = icom_adapter->base_addr +
+ 0x4004 + 2 - 2 * port_num;
+ } else {
+ icom_port->global_reg = icom_adapter->base_addr + 0x8000;
+ if (icom_port->port < 2)
+ icom_port->int_reg = icom_adapter->base_addr +
+ 0x8004 + 2 - 2 * icom_port->port;
+ else
+ icom_port->int_reg = icom_adapter->base_addr +
+ 0x8024 + 2 - 2 * (icom_port->port - 2);
+ }
+}
+static int __devinit icom_load_ports(struct icom_adapter *icom_adapter)
+{
+ struct icom_port *icom_port;
+ int port_num;
+
+ for (port_num = 0; port_num < icom_adapter->numb_ports; port_num++) {
+
+ icom_port = &icom_adapter->port_info[port_num];
+
+ if (icom_port->status == ICOM_PORT_ACTIVE) {
+ icom_port_active(icom_port, icom_adapter, port_num);
+ icom_port->dram = icom_adapter->base_addr +
+ 0x2000 * icom_port->port;
+
+ icom_port->adapter = icom_adapter;
+
+ /* get port memory */
+ if (get_port_memory(icom_port) != 0) {
+ dev_err(&icom_port->adapter->pci_dev->dev,
+ "Memory allocation for port FAILED\n");
+ }
+ }
+ }
+ return 0;
+}
+
+static int __devinit icom_alloc_adapter(struct icom_adapter
+ **icom_adapter_ref)
+{
+ int adapter_count = 0;
+ struct icom_adapter *icom_adapter;
+ struct icom_adapter *cur_adapter_entry;
+ struct list_head *tmp;
+
+ icom_adapter = (struct icom_adapter *)
+ kzalloc(sizeof(struct icom_adapter), GFP_KERNEL);
+
+ if (!icom_adapter) {
+ return -ENOMEM;
+ }
+
+ list_for_each(tmp, &icom_adapter_head) {
+ cur_adapter_entry =
+ list_entry(tmp, struct icom_adapter,
+ icom_adapter_entry);
+ if (cur_adapter_entry->index != adapter_count) {
+ break;
+ }
+ adapter_count++;
+ }
+
+ icom_adapter->index = adapter_count;
+ list_add_tail(&icom_adapter->icom_adapter_entry, tmp);
+
+ *icom_adapter_ref = icom_adapter;
+ return 0;
+}
+
+static void icom_free_adapter(struct icom_adapter *icom_adapter)
+{
+ list_del(&icom_adapter->icom_adapter_entry);
+ kfree(icom_adapter);
+}
+
+static void icom_remove_adapter(struct icom_adapter *icom_adapter)
+{
+ struct icom_port *icom_port;
+ int index;
+
+ for (index = 0; index < icom_adapter->numb_ports; index++) {
+ icom_port = &icom_adapter->port_info[index];
+
+ if (icom_port->status == ICOM_PORT_ACTIVE) {
+ dev_info(&icom_adapter->pci_dev->dev,
+ "Device removed\n");
+
+ uart_remove_one_port(&icom_uart_driver,
+ &icom_port->uart_port);
+
+ /* be sure that DTR and RTS are dropped */
+ writeb(0x00, &icom_port->dram->osr);
+
+ /* Wait 0.1 Sec for simple Init to complete */
+ msleep(100);
+
+ /* Stop proccessor */
+ stop_processor(icom_port);
+
+ free_port_memory(icom_port);
+ }
+ }
+
+ free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter);
+ iounmap(icom_adapter->base_addr);
+ pci_release_regions(icom_adapter->pci_dev);
+ icom_free_adapter(icom_adapter);
+}
+
+static void icom_kref_release(struct kref *kref)
+{
+ struct icom_adapter *icom_adapter;
+
+ icom_adapter = to_icom_adapter(kref);
+ icom_remove_adapter(icom_adapter);
+}
+
+static int __devinit icom_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ int index;
+ unsigned int command_reg;
+ int retval;
+ struct icom_adapter *icom_adapter;
+ struct icom_port *icom_port;
+
+ retval = pci_enable_device(dev);
+ if (retval) {
+ dev_err(&dev->dev, "Device enable FAILED\n");
+ return retval;
+ }
+
+ if ( (retval = pci_request_regions(dev, "icom"))) {
+ dev_err(&dev->dev, "pci_request_regions FAILED\n");
+ pci_disable_device(dev);
+ return retval;
+ }
+
+ pci_set_master(dev);
+
+ if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) {
+ dev_err(&dev->dev, "PCI Config read FAILED\n");
+ return retval;
+ }
+
+ pci_write_config_dword(dev, PCI_COMMAND,
+ command_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
+ | PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+
+ if (ent->driver_data == ADAPTER_V1) {
+ pci_write_config_dword(dev, 0x44, 0x8300830A);
+ } else {
+ pci_write_config_dword(dev, 0x44, 0x42004200);
+ pci_write_config_dword(dev, 0x48, 0x42004200);
+ }
+
+
+ retval = icom_alloc_adapter(&icom_adapter);
+ if (retval) {
+ dev_err(&dev->dev, "icom_alloc_adapter FAILED\n");
+ retval = -EIO;
+ goto probe_exit0;
+ }
+
+ icom_adapter->base_addr_pci = pci_resource_start(dev, 0);
+ icom_adapter->pci_dev = dev;
+ icom_adapter->version = ent->driver_data;
+ icom_adapter->subsystem_id = ent->subdevice;
+
+
+ retval = icom_init_ports(icom_adapter);
+ if (retval) {
+ dev_err(&dev->dev, "Port configuration failed\n");
+ goto probe_exit1;
+ }
+
+ icom_adapter->base_addr = pci_ioremap_bar(dev, 0);
+
+ if (!icom_adapter->base_addr)
+ goto probe_exit1;
+
+ /* save off irq and request irq line */
+ if ( (retval = request_irq(dev->irq, icom_interrupt,
+ IRQF_DISABLED | IRQF_SHARED, ICOM_DRIVER_NAME,
+ (void *) icom_adapter))) {
+ goto probe_exit2;
+ }
+
+ retval = icom_load_ports(icom_adapter);
+
+ for (index = 0; index < icom_adapter->numb_ports; index++) {
+ icom_port = &icom_adapter->port_info[index];
+
+ if (icom_port->status == ICOM_PORT_ACTIVE) {
+ icom_port->uart_port.irq = icom_port->adapter->pci_dev->irq;
+ icom_port->uart_port.type = PORT_ICOM;
+ icom_port->uart_port.iotype = UPIO_MEM;
+ icom_port->uart_port.membase =
+ (char *) icom_adapter->base_addr_pci;
+ icom_port->uart_port.fifosize = 16;
+ icom_port->uart_port.ops = &icom_ops;
+ icom_port->uart_port.line =
+ icom_port->port + icom_adapter->index * 4;
+ if (uart_add_one_port (&icom_uart_driver, &icom_port->uart_port)) {
+ icom_port->status = ICOM_PORT_OFF;
+ dev_err(&dev->dev, "Device add failed\n");
+ } else
+ dev_info(&dev->dev, "Device added\n");
+ }
+ }
+
+ kref_init(&icom_adapter->kref);
+ return 0;
+
+probe_exit2:
+ iounmap(icom_adapter->base_addr);
+probe_exit1:
+ icom_free_adapter(icom_adapter);
+
+probe_exit0:
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+
+ return retval;
+}
+
+static void __devexit icom_remove(struct pci_dev *dev)
+{
+ struct icom_adapter *icom_adapter;
+ struct list_head *tmp;
+
+ list_for_each(tmp, &icom_adapter_head) {
+ icom_adapter = list_entry(tmp, struct icom_adapter,
+ icom_adapter_entry);
+ if (icom_adapter->pci_dev == dev) {
+ kref_put(&icom_adapter->kref, icom_kref_release);
+ return;
+ }
+ }
+
+ dev_err(&dev->dev, "Unable to find device to remove\n");
+}
+
+static struct pci_driver icom_pci_driver = {
+ .name = ICOM_DRIVER_NAME,
+ .id_table = icom_pci_table,
+ .probe = icom_probe,
+ .remove = __devexit_p(icom_remove),
+};
+
+static int __init icom_init(void)
+{
+ int ret;
+
+ spin_lock_init(&icom_lock);
+
+ ret = uart_register_driver(&icom_uart_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&icom_pci_driver);
+
+ if (ret < 0)
+ uart_unregister_driver(&icom_uart_driver);
+
+ return ret;
+}
+
+static void __exit icom_exit(void)
+{
+ pci_unregister_driver(&icom_pci_driver);
+ uart_unregister_driver(&icom_uart_driver);
+}
+
+module_init(icom_init);
+module_exit(icom_exit);
+
+MODULE_AUTHOR("Michael Anderson <mjanders@us.ibm.com>");
+MODULE_DESCRIPTION("IBM iSeries Serial IOA driver");
+MODULE_SUPPORTED_DEVICE
+ ("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("icom_call_setup.bin");
+MODULE_FIRMWARE("icom_res_dce.bin");
+MODULE_FIRMWARE("icom_asc.bin");
diff --git a/drivers/tty/serial/icom.h b/drivers/tty/serial/icom.h
new file mode 100644
index 00000000..c8029e00
--- /dev/null
+++ b/drivers/tty/serial/icom.h
@@ -0,0 +1,287 @@
+/*
+ * icom.h
+ *
+ * Copyright (C) 2001 Michael Anderson, IBM Corporation
+ *
+ * Serial device driver include file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/serial_core.h>
+
+#define BAUD_TABLE_LIMIT ((sizeof(icom_acfg_baud)/sizeof(int)) - 1)
+static int icom_acfg_baud[] = {
+ 300,
+ 600,
+ 900,
+ 1200,
+ 1800,
+ 2400,
+ 3600,
+ 4800,
+ 7200,
+ 9600,
+ 14400,
+ 19200,
+ 28800,
+ 38400,
+ 57600,
+ 76800,
+ 115200,
+ 153600,
+ 230400,
+ 307200,
+ 460800,
+};
+
+struct icom_regs {
+ u32 control; /* Adapter Control Register */
+ u32 interrupt; /* Adapter Interrupt Register */
+ u32 int_mask; /* Adapter Interrupt Mask Reg */
+ u32 int_pri; /* Adapter Interrupt Priority r */
+ u32 int_reg_b; /* Adapter non-masked Interrupt */
+ u32 resvd01;
+ u32 resvd02;
+ u32 resvd03;
+ u32 control_2; /* Adapter Control Register 2 */
+ u32 interrupt_2; /* Adapter Interrupt Register 2 */
+ u32 int_mask_2; /* Adapter Interrupt Mask 2 */
+ u32 int_pri_2; /* Adapter Interrupt Prior 2 */
+ u32 int_reg_2b; /* Adapter non-masked 2 */
+};
+
+struct func_dram {
+ u32 reserved[108]; /* 0-1B0 reserved by personality code */
+ u32 RcvStatusAddr; /* 1B0-1B3 Status Address for Next rcv */
+ u8 RcvStnAddr; /* 1B4 Receive Station Addr */
+ u8 IdleState; /* 1B5 Idle State */
+ u8 IdleMonitor; /* 1B6 Idle Monitor */
+ u8 FlagFillIdleTimer; /* 1B7 Flag Fill Idle Timer */
+ u32 XmitStatusAddr; /* 1B8-1BB Transmit Status Address */
+ u8 StartXmitCmd; /* 1BC Start Xmit Command */
+ u8 HDLCConfigReg; /* 1BD Reserved */
+ u8 CauseCode; /* 1BE Cause code for fatal error */
+ u8 xchar; /* 1BF High priority send */
+ u32 reserved3; /* 1C0-1C3 Reserved */
+ u8 PrevCmdReg; /* 1C4 Reserved */
+ u8 CmdReg; /* 1C5 Command Register */
+ u8 async_config2; /* 1C6 Async Config Byte 2 */
+ u8 async_config3; /* 1C7 Async Config Byte 3 */
+ u8 dce_resvd[20]; /* 1C8-1DB DCE Rsvd */
+ u8 dce_resvd21; /* 1DC DCE Rsvd (21st byte */
+ u8 misc_flags; /* 1DD misc flags */
+#define V2_HARDWARE 0x40
+#define ICOM_HDW_ACTIVE 0x01
+ u8 call_length; /* 1DE Phone #/CFI buff ln */
+ u8 call_length2; /* 1DF Upper byte (unused) */
+ u32 call_addr; /* 1E0-1E3 Phn #/CFI buff addr */
+ u16 timer_value; /* 1E4-1E5 general timer value */
+ u8 timer_command; /* 1E6 general timer cmd */
+ u8 dce_command; /* 1E7 dce command reg */
+ u8 dce_cmd_status; /* 1E8 dce command stat */
+ u8 x21_r1_ioff; /* 1E9 dce ready counter */
+ u8 x21_r0_ioff; /* 1EA dce not ready ctr */
+ u8 x21_ralt_ioff; /* 1EB dce CNR counter */
+ u8 x21_r1_ion; /* 1EC dce ready I on ctr */
+ u8 rsvd_ier; /* 1ED Rsvd for IER (if ne */
+ u8 ier; /* 1EE Interrupt Enable */
+ u8 isr; /* 1EF Input Signal Reg */
+ u8 osr; /* 1F0 Output Signal Reg */
+ u8 reset; /* 1F1 Reset/Reload Reg */
+ u8 disable; /* 1F2 Disable Reg */
+ u8 sync; /* 1F3 Sync Reg */
+ u8 error_stat; /* 1F4 Error Status */
+ u8 cable_id; /* 1F5 Cable ID */
+ u8 cs_length; /* 1F6 CS Load Length */
+ u8 mac_length; /* 1F7 Mac Load Length */
+ u32 cs_load_addr; /* 1F8-1FB Call Load PCI Addr */
+ u32 mac_load_addr; /* 1FC-1FF Mac Load PCI Addr */
+};
+
+/*
+ * adapter defines and structures
+ */
+#define ICOM_CONTROL_START_A 0x00000008
+#define ICOM_CONTROL_STOP_A 0x00000004
+#define ICOM_CONTROL_START_B 0x00000002
+#define ICOM_CONTROL_STOP_B 0x00000001
+#define ICOM_CONTROL_START_C 0x00000008
+#define ICOM_CONTROL_STOP_C 0x00000004
+#define ICOM_CONTROL_START_D 0x00000002
+#define ICOM_CONTROL_STOP_D 0x00000001
+#define ICOM_IRAM_OFFSET 0x1000
+#define ICOM_IRAM_SIZE 0x0C00
+#define ICOM_DCE_IRAM_OFFSET 0x0A00
+#define ICOM_CABLE_ID_VALID 0x01
+#define ICOM_CABLE_ID_MASK 0xF0
+#define ICOM_DISABLE 0x80
+#define CMD_XMIT_RCV_ENABLE 0xC0
+#define CMD_XMIT_ENABLE 0x40
+#define CMD_RCV_DISABLE 0x00
+#define CMD_RCV_ENABLE 0x80
+#define CMD_RESTART 0x01
+#define CMD_HOLD_XMIT 0x02
+#define CMD_SND_BREAK 0x04
+#define RS232_CABLE 0x06
+#define V24_CABLE 0x0E
+#define V35_CABLE 0x0C
+#define V36_CABLE 0x02
+#define NO_CABLE 0x00
+#define START_DOWNLOAD 0x80
+#define ICOM_INT_MASK_PRC_A 0x00003FFF
+#define ICOM_INT_MASK_PRC_B 0x3FFF0000
+#define ICOM_INT_MASK_PRC_C 0x00003FFF
+#define ICOM_INT_MASK_PRC_D 0x3FFF0000
+#define INT_RCV_COMPLETED 0x1000
+#define INT_XMIT_COMPLETED 0x2000
+#define INT_IDLE_DETECT 0x0800
+#define INT_RCV_DISABLED 0x0400
+#define INT_XMIT_DISABLED 0x0200
+#define INT_RCV_XMIT_SHUTDOWN 0x0100
+#define INT_FATAL_ERROR 0x0080
+#define INT_CABLE_PULL 0x0020
+#define INT_SIGNAL_CHANGE 0x0010
+#define HDLC_PPP_PURE_ASYNC 0x02
+#define HDLC_FF_FILL 0x00
+#define HDLC_HDW_FLOW 0x01
+#define START_XMIT 0x80
+#define ICOM_ACFG_DRIVE1 0x20
+#define ICOM_ACFG_NO_PARITY 0x00
+#define ICOM_ACFG_PARITY_ENAB 0x02
+#define ICOM_ACFG_PARITY_ODD 0x01
+#define ICOM_ACFG_8BPC 0x00
+#define ICOM_ACFG_7BPC 0x04
+#define ICOM_ACFG_6BPC 0x08
+#define ICOM_ACFG_5BPC 0x0C
+#define ICOM_ACFG_1STOP_BIT 0x00
+#define ICOM_ACFG_2STOP_BIT 0x10
+#define ICOM_DTR 0x80
+#define ICOM_RTS 0x40
+#define ICOM_RI 0x08
+#define ICOM_DSR 0x80
+#define ICOM_DCD 0x20
+#define ICOM_CTS 0x40
+
+#define NUM_XBUFFS 1
+#define NUM_RBUFFS 2
+#define RCV_BUFF_SZ 0x0200
+#define XMIT_BUFF_SZ 0x1000
+struct statusArea {
+ /**********************************************/
+ /* Transmit Status Area */
+ /**********************************************/
+ struct xmit_status_area{
+ u32 leNext; /* Next entry in Little Endian on Adapter */
+ u32 leNextASD;
+ u32 leBuffer; /* Buffer for entry in LE for Adapter */
+ u16 leLengthASD;
+ u16 leOffsetASD;
+ u16 leLength; /* Length of data in segment */
+ u16 flags;
+#define SA_FLAGS_DONE 0x0080 /* Done with Segment */
+#define SA_FLAGS_CONTINUED 0x8000 /* More Segments */
+#define SA_FLAGS_IDLE 0x4000 /* Mark IDLE after frm */
+#define SA_FLAGS_READY_TO_XMIT 0x0800
+#define SA_FLAGS_STAT_MASK 0x007F
+ } xmit[NUM_XBUFFS];
+
+ /**********************************************/
+ /* Receive Status Area */
+ /**********************************************/
+ struct {
+ u32 leNext; /* Next entry in Little Endian on Adapter */
+ u32 leNextASD;
+ u32 leBuffer; /* Buffer for entry in LE for Adapter */
+ u16 WorkingLength; /* size of segment */
+ u16 reserv01;
+ u16 leLength; /* Length of data in segment */
+ u16 flags;
+#define SA_FL_RCV_DONE 0x0010 /* Data ready */
+#define SA_FLAGS_OVERRUN 0x0040
+#define SA_FLAGS_PARITY_ERROR 0x0080
+#define SA_FLAGS_FRAME_ERROR 0x0001
+#define SA_FLAGS_FRAME_TRUNC 0x0002
+#define SA_FLAGS_BREAK_DET 0x0004 /* set conditionally by device driver, not hardware */
+#define SA_FLAGS_RCV_MASK 0xFFE6
+ } rcv[NUM_RBUFFS];
+};
+
+struct icom_adapter;
+
+
+#define ICOM_MAJOR 243
+#define ICOM_MINOR_START 0
+
+struct icom_port {
+ struct uart_port uart_port;
+ u8 imbed_modem;
+#define ICOM_UNKNOWN 1
+#define ICOM_RVX 2
+#define ICOM_IMBED_MODEM 3
+ unsigned char cable_id;
+ unsigned char read_status_mask;
+ unsigned char ignore_status_mask;
+ void __iomem * int_reg;
+ struct icom_regs __iomem *global_reg;
+ struct func_dram __iomem *dram;
+ int port;
+ struct statusArea *statStg;
+ dma_addr_t statStg_pci;
+ u32 *xmitRestart;
+ dma_addr_t xmitRestart_pci;
+ unsigned char *xmit_buf;
+ dma_addr_t xmit_buf_pci;
+ unsigned char *recv_buf;
+ dma_addr_t recv_buf_pci;
+ int next_rcv;
+ int put_length;
+ int status;
+#define ICOM_PORT_ACTIVE 1 /* Port exists. */
+#define ICOM_PORT_OFF 0 /* Port does not exist. */
+ int load_in_progress;
+ struct icom_adapter *adapter;
+};
+
+struct icom_adapter {
+ void __iomem * base_addr;
+ unsigned long base_addr_pci;
+ struct pci_dev *pci_dev;
+ struct icom_port port_info[4];
+ int index;
+ int version;
+#define ADAPTER_V1 0x0001
+#define ADAPTER_V2 0x0002
+ u32 subsystem_id;
+#define FOUR_PORT_MODEL 0x0252
+#define V2_TWO_PORTS_RVX 0x021A
+#define V2_ONE_PORT_RVX_ONE_PORT_IMBED_MDM 0x0251
+ int numb_ports;
+ struct list_head icom_adapter_entry;
+ struct kref kref;
+};
+
+/* prototype */
+extern void iCom_sercons_init(void);
+
+struct lookup_proc_table {
+ u32 __iomem *global_control_reg;
+ unsigned long processor_id;
+};
+
+struct lookup_int_table {
+ u32 __iomem *global_int_mask;
+ unsigned long processor_id;
+};
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
new file mode 100644
index 00000000..426434e5
--- /dev/null
+++ b/drivers/tty/serial/ifx6x60.c
@@ -0,0 +1,1415 @@
+/****************************************************************************
+ *
+ * Driver for the IFX 6x60 spi modem.
+ *
+ * Copyright (C) 2008 Option International
+ * Copyright (C) 2008 Filip Aben <f.aben@option.com>
+ * Denis Joseph Barrow <d.barow@option.com>
+ * Jan Dumon <j.dumon@option.com>
+ *
+ * Copyright (C) 2009, 2010 Intel Corp
+ * Russ Gorby <russ.gorby@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA
+ *
+ * Driver modified by Intel from Option gtm501l_spi.c
+ *
+ * Notes
+ * o The driver currently assumes a single device only. If you need to
+ * change this then look for saved_ifx_dev and add a device lookup
+ * o The driver is intended to be big-endian safe but has never been
+ * tested that way (no suitable hardware). There are a couple of FIXME
+ * notes by areas that may need addressing
+ * o Some of the GPIO naming/setup assumptions may need revisiting if
+ * you need to use this driver for another platform.
+ *
+ *****************************************************************************/
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/termios.h>
+#include <linux/tty.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/kfifo.h>
+#include <linux/tty_flip.h>
+#include <linux/timer.h>
+#include <linux/serial.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/rfkill.h>
+#include <linux/fs.h>
+#include <linux/ip.h>
+#include <linux/dmapool.h>
+#include <linux/gpio.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/ifx_modem.h>
+#include <linux/delay.h>
+
+#include "ifx6x60.h"
+
+#define IFX_SPI_MORE_MASK 0x10
+#define IFX_SPI_MORE_BIT 12 /* bit position in u16 */
+#define IFX_SPI_CTS_BIT 13 /* bit position in u16 */
+#define IFX_SPI_MODE SPI_MODE_1
+#define IFX_SPI_TTY_ID 0
+#define IFX_SPI_TIMEOUT_SEC 2
+#define IFX_SPI_HEADER_0 (-1)
+#define IFX_SPI_HEADER_F (-2)
+
+/* forward reference */
+static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev);
+
+/* local variables */
+static int spi_bpw = 16; /* 8, 16 or 32 bit word length */
+static struct tty_driver *tty_drv;
+static struct ifx_spi_device *saved_ifx_dev;
+static struct lock_class_key ifx_spi_key;
+
+/* GPIO/GPE settings */
+
+/**
+ * mrdy_set_high - set MRDY GPIO
+ * @ifx: device we are controlling
+ *
+ */
+static inline void mrdy_set_high(struct ifx_spi_device *ifx)
+{
+ gpio_set_value(ifx->gpio.mrdy, 1);
+}
+
+/**
+ * mrdy_set_low - clear MRDY GPIO
+ * @ifx: device we are controlling
+ *
+ */
+static inline void mrdy_set_low(struct ifx_spi_device *ifx)
+{
+ gpio_set_value(ifx->gpio.mrdy, 0);
+}
+
+/**
+ * ifx_spi_power_state_set
+ * @ifx_dev: our SPI device
+ * @val: bits to set
+ *
+ * Set bit in power status and signal power system if status becomes non-0
+ */
+static void
+ifx_spi_power_state_set(struct ifx_spi_device *ifx_dev, unsigned char val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ifx_dev->power_lock, flags);
+
+ /*
+ * if power status is already non-0, just update, else
+ * tell power system
+ */
+ if (!ifx_dev->power_status)
+ pm_runtime_get(&ifx_dev->spi_dev->dev);
+ ifx_dev->power_status |= val;
+
+ spin_unlock_irqrestore(&ifx_dev->power_lock, flags);
+}
+
+/**
+ * ifx_spi_power_state_clear - clear power bit
+ * @ifx_dev: our SPI device
+ * @val: bits to clear
+ *
+ * clear bit in power status and signal power system if status becomes 0
+ */
+static void
+ifx_spi_power_state_clear(struct ifx_spi_device *ifx_dev, unsigned char val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ifx_dev->power_lock, flags);
+
+ if (ifx_dev->power_status) {
+ ifx_dev->power_status &= ~val;
+ if (!ifx_dev->power_status)
+ pm_runtime_put(&ifx_dev->spi_dev->dev);
+ }
+
+ spin_unlock_irqrestore(&ifx_dev->power_lock, flags);
+}
+
+/**
+ * swap_buf
+ * @buf: our buffer
+ * @len : number of bytes (not words) in the buffer
+ * @end: end of buffer
+ *
+ * Swap the contents of a buffer into big endian format
+ */
+static inline void swap_buf(u16 *buf, int len, void *end)
+{
+ int n;
+
+ len = ((len + 1) >> 1);
+ if ((void *)&buf[len] > end) {
+ pr_err("swap_buf: swap exceeds boundary (%p > %p)!",
+ &buf[len], end);
+ return;
+ }
+ for (n = 0; n < len; n++) {
+ *buf = cpu_to_be16(*buf);
+ buf++;
+ }
+}
+
+/**
+ * mrdy_assert - assert MRDY line
+ * @ifx_dev: our SPI device
+ *
+ * Assert mrdy and set timer to wait for SRDY interrupt, if SRDY is low
+ * now.
+ *
+ * FIXME: Can SRDY even go high as we are running this code ?
+ */
+static void mrdy_assert(struct ifx_spi_device *ifx_dev)
+{
+ int val = gpio_get_value(ifx_dev->gpio.srdy);
+ if (!val) {
+ if (!test_and_set_bit(IFX_SPI_STATE_TIMER_PENDING,
+ &ifx_dev->flags)) {
+ ifx_dev->spi_timer.expires =
+ jiffies + IFX_SPI_TIMEOUT_SEC*HZ;
+ add_timer(&ifx_dev->spi_timer);
+
+ }
+ }
+ ifx_spi_power_state_set(ifx_dev, IFX_SPI_POWER_DATA_PENDING);
+ mrdy_set_high(ifx_dev);
+}
+
+/**
+ * ifx_spi_hangup - hang up an IFX device
+ * @ifx_dev: our SPI device
+ *
+ * Hang up the tty attached to the IFX device if one is currently
+ * open. If not take no action
+ */
+static void ifx_spi_ttyhangup(struct ifx_spi_device *ifx_dev)
+{
+ struct tty_port *pport = &ifx_dev->tty_port;
+ struct tty_struct *tty = tty_port_tty_get(pport);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+}
+
+/**
+ * ifx_spi_timeout - SPI timeout
+ * @arg: our SPI device
+ *
+ * The SPI has timed out: hang up the tty. Users will then see a hangup
+ * and error events.
+ */
+static void ifx_spi_timeout(unsigned long arg)
+{
+ struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *)arg;
+
+ dev_warn(&ifx_dev->spi_dev->dev, "*** SPI Timeout ***");
+ ifx_spi_ttyhangup(ifx_dev);
+ mrdy_set_low(ifx_dev);
+ clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
+}
+
+/* char/tty operations */
+
+/**
+ * ifx_spi_tiocmget - get modem lines
+ * @tty: our tty device
+ * @filp: file handle issuing the request
+ *
+ * Map the signal state into Linux modem flags and report the value
+ * in Linux terms
+ */
+static int ifx_spi_tiocmget(struct tty_struct *tty)
+{
+ unsigned int value;
+ struct ifx_spi_device *ifx_dev = tty->driver_data;
+
+ value =
+ (test_bit(IFX_SPI_RTS, &ifx_dev->signal_state) ? TIOCM_RTS : 0) |
+ (test_bit(IFX_SPI_DTR, &ifx_dev->signal_state) ? TIOCM_DTR : 0) |
+ (test_bit(IFX_SPI_CTS, &ifx_dev->signal_state) ? TIOCM_CTS : 0) |
+ (test_bit(IFX_SPI_DSR, &ifx_dev->signal_state) ? TIOCM_DSR : 0) |
+ (test_bit(IFX_SPI_DCD, &ifx_dev->signal_state) ? TIOCM_CAR : 0) |
+ (test_bit(IFX_SPI_RI, &ifx_dev->signal_state) ? TIOCM_RNG : 0);
+ return value;
+}
+
+/**
+ * ifx_spi_tiocmset - set modem bits
+ * @tty: the tty structure
+ * @set: bits to set
+ * @clear: bits to clear
+ *
+ * The IFX6x60 only supports DTR and RTS. Set them accordingly
+ * and flag that an update to the modem is needed.
+ *
+ * FIXME: do we need to kick the tranfers when we do this ?
+ */
+static int ifx_spi_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct ifx_spi_device *ifx_dev = tty->driver_data;
+
+ if (set & TIOCM_RTS)
+ set_bit(IFX_SPI_RTS, &ifx_dev->signal_state);
+ if (set & TIOCM_DTR)
+ set_bit(IFX_SPI_DTR, &ifx_dev->signal_state);
+ if (clear & TIOCM_RTS)
+ clear_bit(IFX_SPI_RTS, &ifx_dev->signal_state);
+ if (clear & TIOCM_DTR)
+ clear_bit(IFX_SPI_DTR, &ifx_dev->signal_state);
+
+ set_bit(IFX_SPI_UPDATE, &ifx_dev->signal_state);
+ return 0;
+}
+
+/**
+ * ifx_spi_open - called on tty open
+ * @tty: our tty device
+ * @filp: file handle being associated with the tty
+ *
+ * Open the tty interface. We let the tty_port layer do all the work
+ * for us.
+ *
+ * FIXME: Remove single device assumption and saved_ifx_dev
+ */
+static int ifx_spi_open(struct tty_struct *tty, struct file *filp)
+{
+ return tty_port_open(&saved_ifx_dev->tty_port, tty, filp);
+}
+
+/**
+ * ifx_spi_close - called when our tty closes
+ * @tty: the tty being closed
+ * @filp: the file handle being closed
+ *
+ * Perform the close of the tty. We use the tty_port layer to do all
+ * our hard work.
+ */
+static void ifx_spi_close(struct tty_struct *tty, struct file *filp)
+{
+ struct ifx_spi_device *ifx_dev = tty->driver_data;
+ tty_port_close(&ifx_dev->tty_port, tty, filp);
+ /* FIXME: should we do an ifx_spi_reset here ? */
+}
+
+/**
+ * ifx_decode_spi_header - decode received header
+ * @buffer: the received data
+ * @length: decoded length
+ * @more: decoded more flag
+ * @received_cts: status of cts we received
+ *
+ * Note how received_cts is handled -- if header is all F it is left
+ * the same as it was, if header is all 0 it is set to 0 otherwise it is
+ * taken from the incoming header.
+ *
+ * FIXME: endianness
+ */
+static int ifx_spi_decode_spi_header(unsigned char *buffer, int *length,
+ unsigned char *more, unsigned char *received_cts)
+{
+ u16 h1;
+ u16 h2;
+ u16 *in_buffer = (u16 *)buffer;
+
+ h1 = *in_buffer;
+ h2 = *(in_buffer+1);
+
+ if (h1 == 0 && h2 == 0) {
+ *received_cts = 0;
+ return IFX_SPI_HEADER_0;
+ } else if (h1 == 0xffff && h2 == 0xffff) {
+ /* spi_slave_cts remains as it was */
+ return IFX_SPI_HEADER_F;
+ }
+
+ *length = h1 & 0xfff; /* upper bits of byte are flags */
+ *more = (buffer[1] >> IFX_SPI_MORE_BIT) & 1;
+ *received_cts = (buffer[3] >> IFX_SPI_CTS_BIT) & 1;
+ return 0;
+}
+
+/**
+ * ifx_setup_spi_header - set header fields
+ * @txbuffer: pointer to start of SPI buffer
+ * @tx_count: bytes
+ * @more: indicate if more to follow
+ *
+ * Format up an SPI header for a transfer
+ *
+ * FIXME: endianness?
+ */
+static void ifx_spi_setup_spi_header(unsigned char *txbuffer, int tx_count,
+ unsigned char more)
+{
+ *(u16 *)(txbuffer) = tx_count;
+ *(u16 *)(txbuffer+2) = IFX_SPI_PAYLOAD_SIZE;
+ txbuffer[1] |= (more << IFX_SPI_MORE_BIT) & IFX_SPI_MORE_MASK;
+}
+
+/**
+ * ifx_spi_wakeup_serial - SPI space made
+ * @port_data: our SPI device
+ *
+ * We have emptied the FIFO enough that we want to get more data
+ * queued into it. Poke the line discipline via tty_wakeup so that
+ * it will feed us more bits
+ */
+static void ifx_spi_wakeup_serial(struct ifx_spi_device *ifx_dev)
+{
+ struct tty_struct *tty;
+
+ tty = tty_port_tty_get(&ifx_dev->tty_port);
+ if (!tty)
+ return;
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+}
+
+/**
+ * ifx_spi_prepare_tx_buffer - prepare transmit frame
+ * @ifx_dev: our SPI device
+ *
+ * The transmit buffr needs a header and various other bits of
+ * information followed by as much data as we can pull from the FIFO
+ * and transfer. This function formats up a suitable buffer in the
+ * ifx_dev->tx_buffer
+ *
+ * FIXME: performance - should we wake the tty when the queue is half
+ * empty ?
+ */
+static int ifx_spi_prepare_tx_buffer(struct ifx_spi_device *ifx_dev)
+{
+ int temp_count;
+ int queue_length;
+ int tx_count;
+ unsigned char *tx_buffer;
+
+ tx_buffer = ifx_dev->tx_buffer;
+ memset(tx_buffer, 0, IFX_SPI_TRANSFER_SIZE);
+
+ /* make room for required SPI header */
+ tx_buffer += IFX_SPI_HEADER_OVERHEAD;
+ tx_count = IFX_SPI_HEADER_OVERHEAD;
+
+ /* clear to signal no more data if this turns out to be the
+ * last buffer sent in a sequence */
+ ifx_dev->spi_more = 0;
+
+ /* if modem cts is set, just send empty buffer */
+ if (!ifx_dev->spi_slave_cts) {
+ /* see if there's tx data */
+ queue_length = kfifo_len(&ifx_dev->tx_fifo);
+ if (queue_length != 0) {
+ /* data to mux -- see if there's room for it */
+ temp_count = min(queue_length, IFX_SPI_PAYLOAD_SIZE);
+ temp_count = kfifo_out_locked(&ifx_dev->tx_fifo,
+ tx_buffer, temp_count,
+ &ifx_dev->fifo_lock);
+
+ /* update buffer pointer and data count in message */
+ tx_buffer += temp_count;
+ tx_count += temp_count;
+ if (temp_count == queue_length)
+ /* poke port to get more data */
+ ifx_spi_wakeup_serial(ifx_dev);
+ else /* more data in port, use next SPI message */
+ ifx_dev->spi_more = 1;
+ }
+ }
+ /* have data and info for header -- set up SPI header in buffer */
+ /* spi header needs payload size, not entire buffer size */
+ ifx_spi_setup_spi_header(ifx_dev->tx_buffer,
+ tx_count-IFX_SPI_HEADER_OVERHEAD,
+ ifx_dev->spi_more);
+ /* swap actual data in the buffer */
+ swap_buf((u16 *)(ifx_dev->tx_buffer), tx_count,
+ &ifx_dev->tx_buffer[IFX_SPI_TRANSFER_SIZE]);
+ return tx_count;
+}
+
+/**
+ * ifx_spi_write - line discipline write
+ * @tty: our tty device
+ * @buf: pointer to buffer to write (kernel space)
+ * @count: size of buffer
+ *
+ * Write the characters we have been given into the FIFO. If the device
+ * is not active then activate it, when the SRDY line is asserted back
+ * this will commence I/O
+ */
+static int ifx_spi_write(struct tty_struct *tty, const unsigned char *buf,
+ int count)
+{
+ struct ifx_spi_device *ifx_dev = tty->driver_data;
+ unsigned char *tmp_buf = (unsigned char *)buf;
+ int tx_count = kfifo_in_locked(&ifx_dev->tx_fifo, tmp_buf, count,
+ &ifx_dev->fifo_lock);
+ mrdy_assert(ifx_dev);
+ return tx_count;
+}
+
+/**
+ * ifx_spi_chars_in_buffer - line discipline helper
+ * @tty: our tty device
+ *
+ * Report how much data we can accept before we drop bytes. As we use
+ * a simple FIFO this is nice and easy.
+ */
+static int ifx_spi_write_room(struct tty_struct *tty)
+{
+ struct ifx_spi_device *ifx_dev = tty->driver_data;
+ return IFX_SPI_FIFO_SIZE - kfifo_len(&ifx_dev->tx_fifo);
+}
+
+/**
+ * ifx_spi_chars_in_buffer - line discipline helper
+ * @tty: our tty device
+ *
+ * Report how many characters we have buffered. In our case this is the
+ * number of bytes sitting in our transmit FIFO.
+ */
+static int ifx_spi_chars_in_buffer(struct tty_struct *tty)
+{
+ struct ifx_spi_device *ifx_dev = tty->driver_data;
+ return kfifo_len(&ifx_dev->tx_fifo);
+}
+
+/**
+ * ifx_port_hangup
+ * @port: our tty port
+ *
+ * tty port hang up. Called when tty_hangup processing is invoked either
+ * by loss of carrier, or by software (eg vhangup). Serialized against
+ * activate/shutdown by the tty layer.
+ */
+static void ifx_spi_hangup(struct tty_struct *tty)
+{
+ struct ifx_spi_device *ifx_dev = tty->driver_data;
+ tty_port_hangup(&ifx_dev->tty_port);
+}
+
+/**
+ * ifx_port_activate
+ * @port: our tty port
+ *
+ * tty port activate method - called for first open. Serialized
+ * with hangup and shutdown by the tty layer.
+ */
+static int ifx_port_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct ifx_spi_device *ifx_dev =
+ container_of(port, struct ifx_spi_device, tty_port);
+
+ /* clear any old data; can't do this in 'close' */
+ kfifo_reset(&ifx_dev->tx_fifo);
+
+ /* put port data into this tty */
+ tty->driver_data = ifx_dev;
+
+ /* allows flip string push from int context */
+ tty->low_latency = 1;
+
+ return 0;
+}
+
+/**
+ * ifx_port_shutdown
+ * @port: our tty port
+ *
+ * tty port shutdown method - called for last port close. Serialized
+ * with hangup and activate by the tty layer.
+ */
+static void ifx_port_shutdown(struct tty_port *port)
+{
+ struct ifx_spi_device *ifx_dev =
+ container_of(port, struct ifx_spi_device, tty_port);
+
+ mrdy_set_low(ifx_dev);
+ clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
+ tasklet_kill(&ifx_dev->io_work_tasklet);
+}
+
+static const struct tty_port_operations ifx_tty_port_ops = {
+ .activate = ifx_port_activate,
+ .shutdown = ifx_port_shutdown,
+};
+
+static const struct tty_operations ifx_spi_serial_ops = {
+ .open = ifx_spi_open,
+ .close = ifx_spi_close,
+ .write = ifx_spi_write,
+ .hangup = ifx_spi_hangup,
+ .write_room = ifx_spi_write_room,
+ .chars_in_buffer = ifx_spi_chars_in_buffer,
+ .tiocmget = ifx_spi_tiocmget,
+ .tiocmset = ifx_spi_tiocmset,
+};
+
+/**
+ * ifx_spi_insert_fip_string - queue received data
+ * @ifx_ser: our SPI device
+ * @chars: buffer we have received
+ * @size: number of chars reeived
+ *
+ * Queue bytes to the tty assuming the tty side is currently open. If
+ * not the discard the data.
+ */
+static void ifx_spi_insert_flip_string(struct ifx_spi_device *ifx_dev,
+ unsigned char *chars, size_t size)
+{
+ struct tty_struct *tty = tty_port_tty_get(&ifx_dev->tty_port);
+ if (!tty)
+ return;
+ tty_insert_flip_string(tty, chars, size);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+}
+
+/**
+ * ifx_spi_complete - SPI transfer completed
+ * @ctx: our SPI device
+ *
+ * An SPI transfer has completed. Process any received data and kick off
+ * any further transmits we can commence.
+ */
+static void ifx_spi_complete(void *ctx)
+{
+ struct ifx_spi_device *ifx_dev = ctx;
+ struct tty_struct *tty;
+ struct tty_ldisc *ldisc = NULL;
+ int length;
+ int actual_length;
+ unsigned char more;
+ unsigned char cts;
+ int local_write_pending = 0;
+ int queue_length;
+ int srdy;
+ int decode_result;
+
+ mrdy_set_low(ifx_dev);
+
+ if (!ifx_dev->spi_msg.status) {
+ /* check header validity, get comm flags */
+ swap_buf((u16 *)ifx_dev->rx_buffer, IFX_SPI_HEADER_OVERHEAD,
+ &ifx_dev->rx_buffer[IFX_SPI_HEADER_OVERHEAD]);
+ decode_result = ifx_spi_decode_spi_header(ifx_dev->rx_buffer,
+ &length, &more, &cts);
+ if (decode_result == IFX_SPI_HEADER_0) {
+ dev_dbg(&ifx_dev->spi_dev->dev,
+ "ignore input: invalid header 0");
+ ifx_dev->spi_slave_cts = 0;
+ goto complete_exit;
+ } else if (decode_result == IFX_SPI_HEADER_F) {
+ dev_dbg(&ifx_dev->spi_dev->dev,
+ "ignore input: invalid header F");
+ goto complete_exit;
+ }
+
+ ifx_dev->spi_slave_cts = cts;
+
+ actual_length = min((unsigned int)length,
+ ifx_dev->spi_msg.actual_length);
+ swap_buf((u16 *)(ifx_dev->rx_buffer + IFX_SPI_HEADER_OVERHEAD),
+ actual_length,
+ &ifx_dev->rx_buffer[IFX_SPI_TRANSFER_SIZE]);
+ ifx_spi_insert_flip_string(
+ ifx_dev,
+ ifx_dev->rx_buffer + IFX_SPI_HEADER_OVERHEAD,
+ (size_t)actual_length);
+ } else {
+ dev_dbg(&ifx_dev->spi_dev->dev, "SPI transfer error %d",
+ ifx_dev->spi_msg.status);
+ }
+
+complete_exit:
+ if (ifx_dev->write_pending) {
+ ifx_dev->write_pending = 0;
+ local_write_pending = 1;
+ }
+
+ clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &(ifx_dev->flags));
+
+ queue_length = kfifo_len(&ifx_dev->tx_fifo);
+ srdy = gpio_get_value(ifx_dev->gpio.srdy);
+ if (!srdy)
+ ifx_spi_power_state_clear(ifx_dev, IFX_SPI_POWER_SRDY);
+
+ /* schedule output if there is more to do */
+ if (test_and_clear_bit(IFX_SPI_STATE_IO_READY, &ifx_dev->flags))
+ tasklet_schedule(&ifx_dev->io_work_tasklet);
+ else {
+ if (more || ifx_dev->spi_more || queue_length > 0 ||
+ local_write_pending) {
+ if (ifx_dev->spi_slave_cts) {
+ if (more)
+ mrdy_assert(ifx_dev);
+ } else
+ mrdy_assert(ifx_dev);
+ } else {
+ /*
+ * poke line discipline driver if any for more data
+ * may or may not get more data to write
+ * for now, say not busy
+ */
+ ifx_spi_power_state_clear(ifx_dev,
+ IFX_SPI_POWER_DATA_PENDING);
+ tty = tty_port_tty_get(&ifx_dev->tty_port);
+ if (tty) {
+ ldisc = tty_ldisc_ref(tty);
+ if (ldisc) {
+ ldisc->ops->write_wakeup(tty);
+ tty_ldisc_deref(ldisc);
+ }
+ tty_kref_put(tty);
+ }
+ }
+ }
+}
+
+/**
+ * ifx_spio_io - I/O tasklet
+ * @data: our SPI device
+ *
+ * Queue data for transmission if possible and then kick off the
+ * transfer.
+ */
+static void ifx_spi_io(unsigned long data)
+{
+ int retval;
+ struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *) data;
+
+ if (!test_and_set_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags)) {
+ if (ifx_dev->gpio.unack_srdy_int_nb > 0)
+ ifx_dev->gpio.unack_srdy_int_nb--;
+
+ ifx_spi_prepare_tx_buffer(ifx_dev);
+
+ spi_message_init(&ifx_dev->spi_msg);
+ INIT_LIST_HEAD(&ifx_dev->spi_msg.queue);
+
+ ifx_dev->spi_msg.context = ifx_dev;
+ ifx_dev->spi_msg.complete = ifx_spi_complete;
+
+ /* set up our spi transfer */
+ /* note len is BYTES, not transfers */
+ ifx_dev->spi_xfer.len = IFX_SPI_TRANSFER_SIZE;
+ ifx_dev->spi_xfer.cs_change = 0;
+ ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz;
+ /* ifx_dev->spi_xfer.speed_hz = 390625; */
+ ifx_dev->spi_xfer.bits_per_word = spi_bpw;
+
+ ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer;
+ ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer;
+
+ /*
+ * setup dma pointers
+ */
+ if (ifx_dev->use_dma) {
+ ifx_dev->spi_msg.is_dma_mapped = 1;
+ ifx_dev->tx_dma = ifx_dev->tx_bus;
+ ifx_dev->rx_dma = ifx_dev->rx_bus;
+ ifx_dev->spi_xfer.tx_dma = ifx_dev->tx_dma;
+ ifx_dev->spi_xfer.rx_dma = ifx_dev->rx_dma;
+ } else {
+ ifx_dev->spi_msg.is_dma_mapped = 0;
+ ifx_dev->tx_dma = (dma_addr_t)0;
+ ifx_dev->rx_dma = (dma_addr_t)0;
+ ifx_dev->spi_xfer.tx_dma = (dma_addr_t)0;
+ ifx_dev->spi_xfer.rx_dma = (dma_addr_t)0;
+ }
+
+ spi_message_add_tail(&ifx_dev->spi_xfer, &ifx_dev->spi_msg);
+
+ /* Assert MRDY. This may have already been done by the write
+ * routine.
+ */
+ mrdy_assert(ifx_dev);
+
+ retval = spi_async(ifx_dev->spi_dev, &ifx_dev->spi_msg);
+ if (retval) {
+ clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS,
+ &ifx_dev->flags);
+ tasklet_schedule(&ifx_dev->io_work_tasklet);
+ return;
+ }
+ } else
+ ifx_dev->write_pending = 1;
+}
+
+/**
+ * ifx_spi_free_port - free up the tty side
+ * @ifx_dev: IFX device going away
+ *
+ * Unregister and free up a port when the device goes away
+ */
+static void ifx_spi_free_port(struct ifx_spi_device *ifx_dev)
+{
+ if (ifx_dev->tty_dev)
+ tty_unregister_device(tty_drv, ifx_dev->minor);
+ kfifo_free(&ifx_dev->tx_fifo);
+}
+
+/**
+ * ifx_spi_create_port - create a new port
+ * @ifx_dev: our spi device
+ *
+ * Allocate and initialise the tty port that goes with this interface
+ * and add it to the tty layer so that it can be opened.
+ */
+static int ifx_spi_create_port(struct ifx_spi_device *ifx_dev)
+{
+ int ret = 0;
+ struct tty_port *pport = &ifx_dev->tty_port;
+
+ spin_lock_init(&ifx_dev->fifo_lock);
+ lockdep_set_class_and_subclass(&ifx_dev->fifo_lock,
+ &ifx_spi_key, 0);
+
+ if (kfifo_alloc(&ifx_dev->tx_fifo, IFX_SPI_FIFO_SIZE, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ tty_port_init(pport);
+ pport->ops = &ifx_tty_port_ops;
+ ifx_dev->minor = IFX_SPI_TTY_ID;
+ ifx_dev->tty_dev = tty_register_device(tty_drv, ifx_dev->minor,
+ &ifx_dev->spi_dev->dev);
+ if (IS_ERR(ifx_dev->tty_dev)) {
+ dev_dbg(&ifx_dev->spi_dev->dev,
+ "%s: registering tty device failed", __func__);
+ ret = PTR_ERR(ifx_dev->tty_dev);
+ goto error_ret;
+ }
+ return 0;
+
+error_ret:
+ ifx_spi_free_port(ifx_dev);
+ return ret;
+}
+
+/**
+ * ifx_spi_handle_srdy - handle SRDY
+ * @ifx_dev: device asserting SRDY
+ *
+ * Check our device state and see what we need to kick off when SRDY
+ * is asserted. This usually means killing the timer and firing off the
+ * I/O processing.
+ */
+static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev)
+{
+ if (test_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags)) {
+ del_timer_sync(&ifx_dev->spi_timer);
+ clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
+ }
+
+ ifx_spi_power_state_set(ifx_dev, IFX_SPI_POWER_SRDY);
+
+ if (!test_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags))
+ tasklet_schedule(&ifx_dev->io_work_tasklet);
+ else
+ set_bit(IFX_SPI_STATE_IO_READY, &ifx_dev->flags);
+}
+
+/**
+ * ifx_spi_srdy_interrupt - SRDY asserted
+ * @irq: our IRQ number
+ * @dev: our ifx device
+ *
+ * The modem asserted SRDY. Handle the srdy event
+ */
+static irqreturn_t ifx_spi_srdy_interrupt(int irq, void *dev)
+{
+ struct ifx_spi_device *ifx_dev = dev;
+ ifx_dev->gpio.unack_srdy_int_nb++;
+ ifx_spi_handle_srdy(ifx_dev);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ifx_spi_reset_interrupt - Modem has changed reset state
+ * @irq: interrupt number
+ * @dev: our device pointer
+ *
+ * The modem has either entered or left reset state. Check the GPIO
+ * line to see which.
+ *
+ * FIXME: review locking on MR_INPROGRESS versus
+ * parallel unsolicited reset/solicited reset
+ */
+static irqreturn_t ifx_spi_reset_interrupt(int irq, void *dev)
+{
+ struct ifx_spi_device *ifx_dev = dev;
+ int val = gpio_get_value(ifx_dev->gpio.reset_out);
+ int solreset = test_bit(MR_START, &ifx_dev->mdm_reset_state);
+
+ if (val == 0) {
+ /* entered reset */
+ set_bit(MR_INPROGRESS, &ifx_dev->mdm_reset_state);
+ if (!solreset) {
+ /* unsolicited reset */
+ ifx_spi_ttyhangup(ifx_dev);
+ }
+ } else {
+ /* exited reset */
+ clear_bit(MR_INPROGRESS, &ifx_dev->mdm_reset_state);
+ if (solreset) {
+ set_bit(MR_COMPLETE, &ifx_dev->mdm_reset_state);
+ wake_up(&ifx_dev->mdm_reset_wait);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * ifx_spi_free_device - free device
+ * @ifx_dev: device to free
+ *
+ * Free the IFX device
+ */
+static void ifx_spi_free_device(struct ifx_spi_device *ifx_dev)
+{
+ ifx_spi_free_port(ifx_dev);
+ dma_free_coherent(&ifx_dev->spi_dev->dev,
+ IFX_SPI_TRANSFER_SIZE,
+ ifx_dev->tx_buffer,
+ ifx_dev->tx_bus);
+ dma_free_coherent(&ifx_dev->spi_dev->dev,
+ IFX_SPI_TRANSFER_SIZE,
+ ifx_dev->rx_buffer,
+ ifx_dev->rx_bus);
+}
+
+/**
+ * ifx_spi_reset - reset modem
+ * @ifx_dev: modem to reset
+ *
+ * Perform a reset on the modem
+ */
+static int ifx_spi_reset(struct ifx_spi_device *ifx_dev)
+{
+ int ret;
+ /*
+ * set up modem power, reset
+ *
+ * delays are required on some platforms for the modem
+ * to reset properly
+ */
+ set_bit(MR_START, &ifx_dev->mdm_reset_state);
+ gpio_set_value(ifx_dev->gpio.po, 0);
+ gpio_set_value(ifx_dev->gpio.reset, 0);
+ msleep(25);
+ gpio_set_value(ifx_dev->gpio.reset, 1);
+ msleep(1);
+ gpio_set_value(ifx_dev->gpio.po, 1);
+ msleep(1);
+ gpio_set_value(ifx_dev->gpio.po, 0);
+ ret = wait_event_timeout(ifx_dev->mdm_reset_wait,
+ test_bit(MR_COMPLETE,
+ &ifx_dev->mdm_reset_state),
+ IFX_RESET_TIMEOUT);
+ if (!ret)
+ dev_warn(&ifx_dev->spi_dev->dev, "Modem reset timeout: (state:%lx)",
+ ifx_dev->mdm_reset_state);
+
+ ifx_dev->mdm_reset_state = 0;
+ return ret;
+}
+
+/**
+ * ifx_spi_spi_probe - probe callback
+ * @spi: our possible matching SPI device
+ *
+ * Probe for a 6x60 modem on SPI bus. Perform any needed device and
+ * GPIO setup.
+ *
+ * FIXME:
+ * - Support for multiple devices
+ * - Split out MID specific GPIO handling eventually
+ */
+
+static int ifx_spi_spi_probe(struct spi_device *spi)
+{
+ int ret;
+ int srdy;
+ struct ifx_modem_platform_data *pl_data;
+ struct ifx_spi_device *ifx_dev;
+
+ if (saved_ifx_dev) {
+ dev_dbg(&spi->dev, "ignoring subsequent detection");
+ return -ENODEV;
+ }
+
+ pl_data = (struct ifx_modem_platform_data *)spi->dev.platform_data;
+ if (!pl_data) {
+ dev_err(&spi->dev, "missing platform data!");
+ return -ENODEV;
+ }
+
+ /* initialize structure to hold our device variables */
+ ifx_dev = kzalloc(sizeof(struct ifx_spi_device), GFP_KERNEL);
+ if (!ifx_dev) {
+ dev_err(&spi->dev, "spi device allocation failed");
+ return -ENOMEM;
+ }
+ saved_ifx_dev = ifx_dev;
+ ifx_dev->spi_dev = spi;
+ clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags);
+ spin_lock_init(&ifx_dev->write_lock);
+ spin_lock_init(&ifx_dev->power_lock);
+ ifx_dev->power_status = 0;
+ init_timer(&ifx_dev->spi_timer);
+ ifx_dev->spi_timer.function = ifx_spi_timeout;
+ ifx_dev->spi_timer.data = (unsigned long)ifx_dev;
+ ifx_dev->modem = pl_data->modem_type;
+ ifx_dev->use_dma = pl_data->use_dma;
+ ifx_dev->max_hz = pl_data->max_hz;
+ /* initialize spi mode, etc */
+ spi->max_speed_hz = ifx_dev->max_hz;
+ spi->mode = IFX_SPI_MODE | (SPI_LOOP & spi->mode);
+ spi->bits_per_word = spi_bpw;
+ ret = spi_setup(spi);
+ if (ret) {
+ dev_err(&spi->dev, "SPI setup wasn't successful %d", ret);
+ return -ENODEV;
+ }
+
+ /* ensure SPI protocol flags are initialized to enable transfer */
+ ifx_dev->spi_more = 0;
+ ifx_dev->spi_slave_cts = 0;
+
+ /*initialize transfer and dma buffers */
+ ifx_dev->tx_buffer = dma_alloc_coherent(ifx_dev->spi_dev->dev.parent,
+ IFX_SPI_TRANSFER_SIZE,
+ &ifx_dev->tx_bus,
+ GFP_KERNEL);
+ if (!ifx_dev->tx_buffer) {
+ dev_err(&spi->dev, "DMA-TX buffer allocation failed");
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ ifx_dev->rx_buffer = dma_alloc_coherent(ifx_dev->spi_dev->dev.parent,
+ IFX_SPI_TRANSFER_SIZE,
+ &ifx_dev->rx_bus,
+ GFP_KERNEL);
+ if (!ifx_dev->rx_buffer) {
+ dev_err(&spi->dev, "DMA-RX buffer allocation failed");
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ /* initialize waitq for modem reset */
+ init_waitqueue_head(&ifx_dev->mdm_reset_wait);
+
+ spi_set_drvdata(spi, ifx_dev);
+ tasklet_init(&ifx_dev->io_work_tasklet, ifx_spi_io,
+ (unsigned long)ifx_dev);
+
+ set_bit(IFX_SPI_STATE_PRESENT, &ifx_dev->flags);
+
+ /* create our tty port */
+ ret = ifx_spi_create_port(ifx_dev);
+ if (ret != 0) {
+ dev_err(&spi->dev, "create default tty port failed");
+ goto error_ret;
+ }
+
+ ifx_dev->gpio.reset = pl_data->rst_pmu;
+ ifx_dev->gpio.po = pl_data->pwr_on;
+ ifx_dev->gpio.mrdy = pl_data->mrdy;
+ ifx_dev->gpio.srdy = pl_data->srdy;
+ ifx_dev->gpio.reset_out = pl_data->rst_out;
+
+ dev_info(&spi->dev, "gpios %d, %d, %d, %d, %d",
+ ifx_dev->gpio.reset, ifx_dev->gpio.po, ifx_dev->gpio.mrdy,
+ ifx_dev->gpio.srdy, ifx_dev->gpio.reset_out);
+
+ /* Configure gpios */
+ ret = gpio_request(ifx_dev->gpio.reset, "ifxModem");
+ if (ret < 0) {
+ dev_err(&spi->dev, "Unable to allocate GPIO%d (RESET)",
+ ifx_dev->gpio.reset);
+ goto error_ret;
+ }
+ ret += gpio_direction_output(ifx_dev->gpio.reset, 0);
+ ret += gpio_export(ifx_dev->gpio.reset, 1);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to configure GPIO%d (RESET)",
+ ifx_dev->gpio.reset);
+ ret = -EBUSY;
+ goto error_ret2;
+ }
+
+ ret = gpio_request(ifx_dev->gpio.po, "ifxModem");
+ ret += gpio_direction_output(ifx_dev->gpio.po, 0);
+ ret += gpio_export(ifx_dev->gpio.po, 1);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to configure GPIO%d (ON)",
+ ifx_dev->gpio.po);
+ ret = -EBUSY;
+ goto error_ret3;
+ }
+
+ ret = gpio_request(ifx_dev->gpio.mrdy, "ifxModem");
+ if (ret < 0) {
+ dev_err(&spi->dev, "Unable to allocate GPIO%d (MRDY)",
+ ifx_dev->gpio.mrdy);
+ goto error_ret3;
+ }
+ ret += gpio_export(ifx_dev->gpio.mrdy, 1);
+ ret += gpio_direction_output(ifx_dev->gpio.mrdy, 0);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to configure GPIO%d (MRDY)",
+ ifx_dev->gpio.mrdy);
+ ret = -EBUSY;
+ goto error_ret4;
+ }
+
+ ret = gpio_request(ifx_dev->gpio.srdy, "ifxModem");
+ if (ret < 0) {
+ dev_err(&spi->dev, "Unable to allocate GPIO%d (SRDY)",
+ ifx_dev->gpio.srdy);
+ ret = -EBUSY;
+ goto error_ret4;
+ }
+ ret += gpio_export(ifx_dev->gpio.srdy, 1);
+ ret += gpio_direction_input(ifx_dev->gpio.srdy);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to configure GPIO%d (SRDY)",
+ ifx_dev->gpio.srdy);
+ ret = -EBUSY;
+ goto error_ret5;
+ }
+
+ ret = gpio_request(ifx_dev->gpio.reset_out, "ifxModem");
+ if (ret < 0) {
+ dev_err(&spi->dev, "Unable to allocate GPIO%d (RESET_OUT)",
+ ifx_dev->gpio.reset_out);
+ goto error_ret5;
+ }
+ ret += gpio_export(ifx_dev->gpio.reset_out, 1);
+ ret += gpio_direction_input(ifx_dev->gpio.reset_out);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to configure GPIO%d (RESET_OUT)",
+ ifx_dev->gpio.reset_out);
+ ret = -EBUSY;
+ goto error_ret6;
+ }
+
+ ret = request_irq(gpio_to_irq(ifx_dev->gpio.reset_out),
+ ifx_spi_reset_interrupt,
+ IRQF_TRIGGER_RISING|IRQF_TRIGGER_FALLING, DRVNAME,
+ (void *)ifx_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to get irq %x\n",
+ gpio_to_irq(ifx_dev->gpio.reset_out));
+ goto error_ret6;
+ }
+
+ ret = ifx_spi_reset(ifx_dev);
+
+ ret = request_irq(gpio_to_irq(ifx_dev->gpio.srdy),
+ ifx_spi_srdy_interrupt,
+ IRQF_TRIGGER_RISING, DRVNAME,
+ (void *)ifx_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to get irq %x",
+ gpio_to_irq(ifx_dev->gpio.srdy));
+ goto error_ret7;
+ }
+
+ /* set pm runtime power state and register with power system */
+ pm_runtime_set_active(&spi->dev);
+ pm_runtime_enable(&spi->dev);
+
+ /* handle case that modem is already signaling SRDY */
+ /* no outgoing tty open at this point, this just satisfies the
+ * modem's read and should reset communication properly
+ */
+ srdy = gpio_get_value(ifx_dev->gpio.srdy);
+
+ if (srdy) {
+ mrdy_assert(ifx_dev);
+ ifx_spi_handle_srdy(ifx_dev);
+ } else
+ mrdy_set_low(ifx_dev);
+ return 0;
+
+error_ret7:
+ free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), (void *)ifx_dev);
+error_ret6:
+ gpio_free(ifx_dev->gpio.srdy);
+error_ret5:
+ gpio_free(ifx_dev->gpio.mrdy);
+error_ret4:
+ gpio_free(ifx_dev->gpio.reset);
+error_ret3:
+ gpio_free(ifx_dev->gpio.po);
+error_ret2:
+ gpio_free(ifx_dev->gpio.reset_out);
+error_ret:
+ ifx_spi_free_device(ifx_dev);
+ saved_ifx_dev = NULL;
+ return ret;
+}
+
+/**
+ * ifx_spi_spi_remove - SPI device was removed
+ * @spi: SPI device
+ *
+ * FIXME: We should be shutting the device down here not in
+ * the module unload path.
+ */
+
+static int ifx_spi_spi_remove(struct spi_device *spi)
+{
+ struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
+ /* stop activity */
+ tasklet_kill(&ifx_dev->io_work_tasklet);
+ /* free irq */
+ free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), (void *)ifx_dev);
+ free_irq(gpio_to_irq(ifx_dev->gpio.srdy), (void *)ifx_dev);
+
+ gpio_free(ifx_dev->gpio.srdy);
+ gpio_free(ifx_dev->gpio.mrdy);
+ gpio_free(ifx_dev->gpio.reset);
+ gpio_free(ifx_dev->gpio.po);
+ gpio_free(ifx_dev->gpio.reset_out);
+
+ /* free allocations */
+ ifx_spi_free_device(ifx_dev);
+
+ saved_ifx_dev = NULL;
+ return 0;
+}
+
+/**
+ * ifx_spi_spi_shutdown - called on SPI shutdown
+ * @spi: SPI device
+ *
+ * No action needs to be taken here
+ */
+
+static void ifx_spi_spi_shutdown(struct spi_device *spi)
+{
+}
+
+/*
+ * various suspends and resumes have nothing to do
+ * no hardware to save state for
+ */
+
+/**
+ * ifx_spi_spi_suspend - suspend SPI on system suspend
+ * @dev: device being suspended
+ *
+ * Suspend the SPI side. No action needed on Intel MID platforms, may
+ * need extending for other systems.
+ */
+static int ifx_spi_spi_suspend(struct spi_device *spi, pm_message_t msg)
+{
+ return 0;
+}
+
+/**
+ * ifx_spi_spi_resume - resume SPI side on system resume
+ * @dev: device being suspended
+ *
+ * Suspend the SPI side. No action needed on Intel MID platforms, may
+ * need extending for other systems.
+ */
+static int ifx_spi_spi_resume(struct spi_device *spi)
+{
+ return 0;
+}
+
+/**
+ * ifx_spi_pm_suspend - suspend modem on system suspend
+ * @dev: device being suspended
+ *
+ * Suspend the modem. No action needed on Intel MID platforms, may
+ * need extending for other systems.
+ */
+static int ifx_spi_pm_suspend(struct device *dev)
+{
+ return 0;
+}
+
+/**
+ * ifx_spi_pm_resume - resume modem on system resume
+ * @dev: device being suspended
+ *
+ * Allow the modem to resume. No action needed.
+ *
+ * FIXME: do we need to reset anything here ?
+ */
+static int ifx_spi_pm_resume(struct device *dev)
+{
+ return 0;
+}
+
+/**
+ * ifx_spi_pm_runtime_resume - suspend modem
+ * @dev: device being suspended
+ *
+ * Allow the modem to resume. No action needed.
+ */
+static int ifx_spi_pm_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+/**
+ * ifx_spi_pm_runtime_suspend - suspend modem
+ * @dev: device being suspended
+ *
+ * Allow the modem to suspend and thus suspend to continue up the
+ * device tree.
+ */
+static int ifx_spi_pm_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+/**
+ * ifx_spi_pm_runtime_idle - check if modem idle
+ * @dev: our device
+ *
+ * Check conditions and queue runtime suspend if idle.
+ */
+static int ifx_spi_pm_runtime_idle(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
+
+ if (!ifx_dev->power_status)
+ pm_runtime_suspend(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ifx_spi_pm = {
+ .resume = ifx_spi_pm_resume,
+ .suspend = ifx_spi_pm_suspend,
+ .runtime_resume = ifx_spi_pm_runtime_resume,
+ .runtime_suspend = ifx_spi_pm_runtime_suspend,
+ .runtime_idle = ifx_spi_pm_runtime_idle
+};
+
+static const struct spi_device_id ifx_id_table[] = {
+ {"ifx6160", 0},
+ {"ifx6260", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ifx_id_table);
+
+/* spi operations */
+static const struct spi_driver ifx_spi_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .bus = &spi_bus_type,
+ .pm = &ifx_spi_pm,
+ .owner = THIS_MODULE},
+ .probe = ifx_spi_spi_probe,
+ .shutdown = ifx_spi_spi_shutdown,
+ .remove = __devexit_p(ifx_spi_spi_remove),
+ .suspend = ifx_spi_spi_suspend,
+ .resume = ifx_spi_spi_resume,
+ .id_table = ifx_id_table
+};
+
+/**
+ * ifx_spi_exit - module exit
+ *
+ * Unload the module.
+ */
+
+static void __exit ifx_spi_exit(void)
+{
+ /* unregister */
+ tty_unregister_driver(tty_drv);
+ spi_unregister_driver((void *)&ifx_spi_driver);
+}
+
+/**
+ * ifx_spi_init - module entry point
+ *
+ * Initialise the SPI and tty interfaces for the IFX SPI driver
+ * We need to initialize upper-edge spi driver after the tty
+ * driver because otherwise the spi probe will race
+ */
+
+static int __init ifx_spi_init(void)
+{
+ int result;
+
+ tty_drv = alloc_tty_driver(1);
+ if (!tty_drv) {
+ pr_err("%s: alloc_tty_driver failed", DRVNAME);
+ return -ENOMEM;
+ }
+
+ tty_drv->magic = TTY_DRIVER_MAGIC;
+ tty_drv->owner = THIS_MODULE;
+ tty_drv->driver_name = DRVNAME;
+ tty_drv->name = TTYNAME;
+ tty_drv->minor_start = IFX_SPI_TTY_ID;
+ tty_drv->num = 1;
+ tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
+ tty_drv->subtype = SERIAL_TYPE_NORMAL;
+ tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_drv->init_termios = tty_std_termios;
+
+ tty_set_operations(tty_drv, &ifx_spi_serial_ops);
+
+ result = tty_register_driver(tty_drv);
+ if (result) {
+ pr_err("%s: tty_register_driver failed(%d)",
+ DRVNAME, result);
+ put_tty_driver(tty_drv);
+ return result;
+ }
+
+ result = spi_register_driver((void *)&ifx_spi_driver);
+ if (result) {
+ pr_err("%s: spi_register_driver failed(%d)",
+ DRVNAME, result);
+ tty_unregister_driver(tty_drv);
+ }
+ return result;
+}
+
+module_init(ifx_spi_init);
+module_exit(ifx_spi_exit);
+
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("IFX6x60 spi driver");
+MODULE_LICENSE("GPL");
+MODULE_INFO(Version, "0.1-IFX6x60");
diff --git a/drivers/tty/serial/ifx6x60.h b/drivers/tty/serial/ifx6x60.h
new file mode 100644
index 00000000..e8464baf
--- /dev/null
+++ b/drivers/tty/serial/ifx6x60.h
@@ -0,0 +1,129 @@
+/****************************************************************************
+ *
+ * Driver for the IFX spi modem.
+ *
+ * Copyright (C) 2009, 2010 Intel Corp
+ * Jim Stanley <jim.stanley@intel.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA
+ *
+ *
+ *
+ *****************************************************************************/
+#ifndef _IFX6X60_H
+#define _IFX6X60_H
+
+#define DRVNAME "ifx6x60"
+#define TTYNAME "ttyIFX"
+
+#define IFX_SPI_MAX_MINORS 1
+#define IFX_SPI_TRANSFER_SIZE 2048
+#define IFX_SPI_FIFO_SIZE 4096
+
+#define IFX_SPI_HEADER_OVERHEAD 4
+#define IFX_RESET_TIMEOUT msecs_to_jiffies(50)
+
+/* device flags bitfield definitions */
+#define IFX_SPI_STATE_PRESENT 0
+#define IFX_SPI_STATE_IO_IN_PROGRESS 1
+#define IFX_SPI_STATE_IO_READY 2
+#define IFX_SPI_STATE_TIMER_PENDING 3
+
+/* flow control bitfields */
+#define IFX_SPI_DCD 0
+#define IFX_SPI_CTS 1
+#define IFX_SPI_DSR 2
+#define IFX_SPI_RI 3
+#define IFX_SPI_DTR 4
+#define IFX_SPI_RTS 5
+#define IFX_SPI_TX_FC 6
+#define IFX_SPI_RX_FC 7
+#define IFX_SPI_UPDATE 8
+
+#define IFX_SPI_PAYLOAD_SIZE (IFX_SPI_TRANSFER_SIZE - \
+ IFX_SPI_HEADER_OVERHEAD)
+
+#define IFX_SPI_IRQ_TYPE DETECT_EDGE_RISING
+#define IFX_SPI_GPIO_TARGET 0
+#define IFX_SPI_GPIO0 0x105
+
+#define IFX_SPI_STATUS_TIMEOUT (2000*HZ)
+
+/* values for bits in power status byte */
+#define IFX_SPI_POWER_DATA_PENDING 1
+#define IFX_SPI_POWER_SRDY 2
+
+struct ifx_spi_device {
+ /* Our SPI device */
+ struct spi_device *spi_dev;
+
+ /* Port specific data */
+ struct kfifo tx_fifo;
+ spinlock_t fifo_lock;
+ unsigned long signal_state;
+
+ /* TTY Layer logic */
+ struct tty_port tty_port;
+ struct device *tty_dev;
+ int minor;
+
+ /* Low level I/O work */
+ struct tasklet_struct io_work_tasklet;
+ unsigned long flags;
+ dma_addr_t rx_dma;
+ dma_addr_t tx_dma;
+
+ int modem; /* Modem type */
+ int use_dma; /* provide dma-able addrs in SPI msg */
+ long max_hz; /* max SPI frequency */
+
+ spinlock_t write_lock;
+ int write_pending;
+ spinlock_t power_lock;
+ unsigned char power_status;
+
+ unsigned char *rx_buffer;
+ unsigned char *tx_buffer;
+ dma_addr_t rx_bus;
+ dma_addr_t tx_bus;
+ unsigned char spi_more;
+ unsigned char spi_slave_cts;
+
+ struct timer_list spi_timer;
+
+ struct spi_message spi_msg;
+ struct spi_transfer spi_xfer;
+
+ struct {
+ /* gpio lines */
+ unsigned short srdy; /* slave-ready gpio */
+ unsigned short mrdy; /* master-ready gpio */
+ unsigned short reset; /* modem-reset gpio */
+ unsigned short po; /* modem-on gpio */
+ unsigned short reset_out; /* modem-in-reset gpio */
+ /* state/stats */
+ int unack_srdy_int_nb;
+ } gpio;
+
+ /* modem reset */
+ unsigned long mdm_reset_state;
+#define MR_START 0
+#define MR_INPROGRESS 1
+#define MR_COMPLETE 2
+ wait_queue_head_t mdm_reset_wait;
+};
+
+#endif /* _IFX6X60_H */
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
new file mode 100644
index 00000000..a512a766
--- /dev/null
+++ b/drivers/tty/serial/imx.c
@@ -0,0 +1,1909 @@
+/*
+ * Driver for Motorola IMX serial ports
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * Author: Sascha Hauer <sascha@saschahauer.de>
+ * Copyright (C) 2004 Pengutronix
+ *
+ * Copyright (C) 2009 emlix GmbH
+ * Author: Fabian Godehardt (added IrDA support for iMX)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * [29-Mar-2005] Mike Lee
+ * Added hardware handshake
+ */
+
+#if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/rational.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <mach/dma.h>
+#include <mach/hardware.h>
+#include <mach/imx-uart.h>
+
+
+/* Register definitions */
+#define URXD0 0x0 /* Receiver Register */
+#define URTX0 0x40 /* Transmitter Register */
+#define UCR1 0x80 /* Control Register 1 */
+#define UCR2 0x84 /* Control Register 2 */
+#define UCR3 0x88 /* Control Register 3 */
+#define UCR4 0x8c /* Control Register 4 */
+#define UFCR 0x90 /* FIFO Control Register */
+#define USR1 0x94 /* Status Register 1 */
+#define USR2 0x98 /* Status Register 2 */
+#define UESC 0x9c /* Escape Character Register */
+#define UTIM 0xa0 /* Escape Timer Register */
+#define UBIR 0xa4 /* BRM Incremental Register */
+#define UBMR 0xa8 /* BRM Modulator Register */
+#define UBRC 0xac /* Baud Rate Count Register */
+#define MX2_ONEMS 0xb0 /* One Millisecond register */
+#define UTS (cpu_is_mx1() ? 0xd0 : 0xb4) /* UART Test Register */
+
+/* UART Control Register Bit Fields.*/
+#define URXD_CHARRDY (1<<15)
+#define URXD_ERR (1<<14)
+#define URXD_OVRRUN (1<<13)
+#define URXD_FRMERR (1<<12)
+#define URXD_BRK (1<<11)
+#define URXD_PRERR (1<<10)
+#define UCR1_ADEN (1<<15) /* Auto detect interrupt */
+#define UCR1_ADBR (1<<14) /* Auto detect baud rate */
+#define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */
+#define UCR1_IDEN (1<<12) /* Idle condition interrupt */
+#define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */
+#define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */
+#define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */
+#define UCR1_IREN (1<<7) /* Infrared interface enable */
+#define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */
+#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
+#define UCR1_SNDBRK (1<<4) /* Send break */
+#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
+#define MX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, mx1 only */
+#define UCR1_ATDMAEN (1<<2) /* Aging DMA Timer Enable */
+#define UCR1_DOZE (1<<1) /* Doze */
+#define UCR1_UARTEN (1<<0) /* UART enabled */
+#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
+#define UCR2_IRTS (1<<14) /* Ignore RTS pin */
+#define UCR2_CTSC (1<<13) /* CTS pin control */
+#define UCR2_CTS (1<<12) /* Clear to send */
+#define UCR2_ESCEN (1<<11) /* Escape enable */
+#define UCR2_PREN (1<<8) /* Parity enable */
+#define UCR2_PROE (1<<7) /* Parity odd/even */
+#define UCR2_STPB (1<<6) /* Stop */
+#define UCR2_WS (1<<5) /* Word size */
+#define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */
+#define UCR2_ATEN (1<<3) /* Aging Timer Enable */
+#define UCR2_TXEN (1<<2) /* Transmitter enabled */
+#define UCR2_RXEN (1<<1) /* Receiver enabled */
+#define UCR2_SRST (1<<0) /* SW reset */
+#define UCR3_DTREN (1<<13) /* DTR interrupt enable */
+#define UCR3_PARERREN (1<<12) /* Parity enable */
+#define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */
+#define UCR3_DSR (1<<10) /* Data set ready */
+#define UCR3_DCD (1<<9) /* Data carrier detect */
+#define UCR3_RI (1<<8) /* Ring indicator */
+#define UCR3_TIMEOUTEN (1<<7) /* Timeout interrupt enable */
+#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
+#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
+#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
+#define MX1_UCR3_REF25 (1<<3) /* Ref freq 25 MHz, only on mx1 */
+#define MX1_UCR3_REF30 (1<<2) /* Ref Freq 30 MHz, only on mx1 */
+#define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */
+#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
+#define UCR3_BPEN (1<<0) /* Preset registers enable */
+#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
+#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
+#define UCR4_INVR (1<<9) /* Inverted infrared reception */
+#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
+#define UCR4_WKEN (1<<7) /* Wake interrupt enable */
+#define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */
+#define UCR4_IDDMAEN (1<<6) /* DMA IDLE Condition Detected */
+#define UCR4_IRSC (1<<5) /* IR special case */
+#define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */
+#define UCR4_BKEN (1<<2) /* Break condition interrupt enable */
+#define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */
+#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
+#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
+#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
+#define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
+#define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */
+#define UFCR_DCEDTE (1<<6)
+#define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */
+#define USR1_RTSS (1<<14) /* RTS pin status */
+#define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */
+#define USR1_RTSD (1<<12) /* RTS delta */
+#define USR1_ESCF (1<<11) /* Escape seq interrupt flag */
+#define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */
+#define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */
+#define USR1_TIMEOUT (1<<7) /* Receive timeout interrupt status */
+#define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */
+#define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */
+#define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */
+#define USR2_ADET (1<<15) /* Auto baud rate detect complete */
+#define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */
+#define USR2_DTRF (1<<13) /* DTR edge interrupt flag */
+#define USR2_IDLE (1<<12) /* Idle condition */
+#define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */
+#define USR2_WAKE (1<<7) /* Wake */
+#define USR2_RTSF (1<<4) /* RTS edge interrupt flag */
+#define USR2_TXDC (1<<3) /* Transmitter complete */
+#define USR2_BRCD (1<<2) /* Break condition */
+#define USR2_ORE (1<<1) /* Overrun error */
+#define USR2_RDR (1<<0) /* Recv data ready */
+#define UTS_FRCPERR (1<<13) /* Force parity error */
+#define UTS_LOOP (1<<12) /* Loop tx and rx */
+#define UTS_TXEMPTY (1<<6) /* TxFIFO empty */
+#define UTS_RXEMPTY (1<<5) /* RxFIFO empty */
+#define UTS_TXFULL (1<<4) /* TxFIFO full */
+#define UTS_RXFULL (1<<3) /* RxFIFO full */
+#define UTS_SOFTRST (1<<0) /* Software reset */
+
+/* We've been assigned a range on the "Low-density serial ports" major */
+#define SERIAL_IMX_MAJOR 207
+#define MINOR_START 16
+#define DEV_NAME "ttymxc"
+#define MAX_INTERNAL_IRQ MXC_INTERNAL_IRQS
+
+/*
+ * This determines how often we check the modem status signals
+ * for any change. They generally aren't connected to an IRQ
+ * so we have to poll them. We also check immediately before
+ * filling the TX fifo incase CTS has been dropped.
+ */
+#define MCTRL_TIMEOUT (250*HZ/1000)
+
+#define DRIVER_NAME "IMX-uart"
+
+#define UART_NR 8
+
+struct imx_port {
+ struct uart_port port;
+ struct timer_list timer;
+ unsigned int old_status;
+ int txirq,rxirq,rtsirq;
+ unsigned int have_rtscts:1;
+ unsigned int use_dcedte:1;
+ unsigned int use_irda:1;
+ unsigned int irda_inv_rx:1;
+ unsigned int irda_inv_tx:1;
+ unsigned short trcv_delay; /* transceiver delay */
+ struct clk *clk;
+
+ /* DMA fields */
+ int enable_dma;
+ struct imx_dma_data dma_data;
+ struct dma_chan *dma_chan_rx, *dma_chan_tx;
+ struct scatterlist rx_sgl, tx_sgl[2];
+ void *rx_buf;
+ unsigned int rx_bytes, tx_bytes;
+ struct work_struct tsk_dma_rx, tsk_dma_tx;
+ unsigned int dma_tx_nents;
+ bool dma_is_rxing;
+ wait_queue_head_t dma_wait;
+};
+
+struct imx_port_ucrs {
+ unsigned int ucr1;
+ unsigned int ucr2;
+ unsigned int ucr3;
+};
+
+#ifdef CONFIG_IRDA
+#define USE_IRDA(sport) ((sport)->use_irda)
+#else
+#define USE_IRDA(sport) (0)
+#endif
+
+/*
+ * Save and restore functions for UCR1, UCR2 and UCR3 registers
+ */
+static void imx_port_ucrs_save(struct uart_port *port,
+ struct imx_port_ucrs *ucr)
+{
+ /* save control registers */
+ ucr->ucr1 = readl(port->membase + UCR1);
+ ucr->ucr2 = readl(port->membase + UCR2);
+ ucr->ucr3 = readl(port->membase + UCR3);
+}
+
+static void imx_port_ucrs_restore(struct uart_port *port,
+ struct imx_port_ucrs *ucr)
+{
+ /* restore control registers */
+ writel(ucr->ucr1, port->membase + UCR1);
+ writel(ucr->ucr2, port->membase + UCR2);
+ writel(ucr->ucr3, port->membase + UCR3);
+}
+
+/*
+ * Handle any change of modem status signal since we were last called.
+ */
+static void imx_mctrl_check(struct imx_port *sport)
+{
+ unsigned int status, changed;
+
+ status = sport->port.ops->get_mctrl(&sport->port);
+ changed = status ^ sport->old_status;
+
+ if (changed == 0)
+ return;
+
+ sport->old_status = status;
+
+ if (changed & TIOCM_RI)
+ sport->port.icount.rng++;
+ if (changed & TIOCM_DSR)
+ sport->port.icount.dsr++;
+ if (changed & TIOCM_CAR)
+ uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
+ if (changed & TIOCM_CTS)
+ uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
+
+ wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
+}
+
+/*
+ * This is our per-port timeout handler, for checking the
+ * modem status signals.
+ */
+static void imx_timeout(unsigned long data)
+{
+ struct imx_port *sport = (struct imx_port *)data;
+ unsigned long flags;
+
+ if (sport->port.state) {
+ spin_lock_irqsave(&sport->port.lock, flags);
+ imx_mctrl_check(sport);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
+ }
+}
+
+/*
+ * interrupts disabled on entry
+ */
+static void imx_stop_tx(struct uart_port *port)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+ unsigned long temp;
+
+ if (USE_IRDA(sport)) {
+ /* half duplex - wait for end of transmission */
+ int n = 256;
+ while ((--n > 0) &&
+ !(readl(sport->port.membase + USR2) & USR2_TXDC)) {
+ udelay(5);
+ barrier();
+ }
+ /*
+ * irda transceiver - wait a bit more to avoid
+ * cutoff, hardware dependent
+ */
+ udelay(sport->trcv_delay);
+
+ /*
+ * half duplex - reactivate receive mode,
+ * flush receive pipe echo crap
+ */
+ if (readl(sport->port.membase + USR2) & USR2_TXDC) {
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~(UCR1_TXMPTYEN | UCR1_TRDYEN);
+ writel(temp, sport->port.membase + UCR1);
+
+ temp = readl(sport->port.membase + UCR4);
+ temp &= ~(UCR4_TCEN);
+ writel(temp, sport->port.membase + UCR4);
+
+ while (readl(sport->port.membase + URXD0) &
+ URXD_CHARRDY)
+ barrier();
+
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_RRDYEN;
+ writel(temp, sport->port.membase + UCR1);
+
+ temp = readl(sport->port.membase + UCR4);
+ temp |= UCR4_DREN;
+ writel(temp, sport->port.membase + UCR4);
+ }
+ return;
+ }
+
+ temp = readl(sport->port.membase + UCR1);
+ writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1);
+}
+
+/*
+ * interrupts disabled on entry
+ */
+static void imx_stop_rx(struct uart_port *port)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+ unsigned long temp;
+
+ /*
+ * We are in SMP now, so if the DMA RX thread is running,
+ * we have to wait for it to finish.
+ */
+ if (sport->enable_dma && sport->dma_is_rxing)
+ return;
+
+ temp = readl(sport->port.membase + UCR2);
+ writel(temp &~ UCR2_RXEN, sport->port.membase + UCR2);
+}
+
+/*
+ * Set the modem control timer to fire immediately.
+ */
+static void imx_enable_ms(struct uart_port *port)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+
+ mod_timer(&sport->timer, jiffies);
+}
+
+static inline void imx_transmit_buffer(struct imx_port *sport)
+{
+ struct circ_buf *xmit = &sport->port.state->xmit;
+
+ while (!uart_circ_empty(xmit) &&
+ !(readl(sport->port.membase + UTS) & UTS_TXFULL)) {
+ /* send xmit->buf[xmit->tail]
+ * out the port here */
+ writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ sport->port.icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+
+ if (uart_circ_empty(xmit))
+ imx_stop_tx(&sport->port);
+}
+
+static void dma_tx_callback(void *data)
+{
+ struct imx_port *sport = data;
+ struct scatterlist *sgl = &sport->tx_sgl[0];
+ struct circ_buf *xmit = &sport->port.state->xmit;
+
+ dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+
+ /* update the stat */
+ spin_lock(&sport->port.lock);
+ xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
+ sport->port.icount.tx += sport->tx_bytes;
+ spin_unlock(&sport->port.lock);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+ schedule_work(&sport->tsk_dma_tx);
+}
+
+static void dma_tx_work(struct work_struct *w)
+{
+ struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_tx);
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ struct scatterlist *sgl = &sport->tx_sgl[0];
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan = sport->dma_chan_tx;
+ enum dma_status status;
+ unsigned long flags;
+ int ret;
+
+ status = chan->device->device_tx_status(chan, (dma_cookie_t)NULL, NULL);
+ if (DMA_IN_PROGRESS == status)
+ return;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ sport->tx_bytes = uart_circ_chars_pending(xmit);
+ if (sport->tx_bytes > 0) {
+ if (xmit->tail > xmit->head) {
+ sport->dma_tx_nents = 2;
+ sg_init_table(sgl, 2);
+ sg_set_buf(sgl, xmit->buf + xmit->tail,
+ UART_XMIT_SIZE - xmit->tail);
+ sg_set_buf(&sgl[1], xmit->buf, xmit->head);
+ } else {
+ sport->dma_tx_nents = 1;
+ sg_init_one(sgl, xmit->buf + xmit->tail,
+ sport->tx_bytes);
+ }
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ ret = dma_map_sg(sport->port.dev, sgl,
+ sport->dma_tx_nents, DMA_TO_DEVICE);
+ if (ret == 0) {
+ pr_err("DMA mapping error for TX.\n");
+ return;
+ }
+ desc = chan->device->device_prep_slave_sg(chan, sgl,
+ sport->dma_tx_nents, DMA_TO_DEVICE, 0);
+ if (!desc) {
+ pr_err("We cannot prepare for the TX slave dma!\n");
+ return;
+ }
+ desc->callback = dma_tx_callback;
+ desc->callback_param = sport;
+
+ /* fire it */
+ dmaengine_submit(desc);
+ return;
+ }
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ return;
+}
+
+/*
+ * interrupts disabled on entry
+ */
+static void imx_start_tx(struct uart_port *port)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+ unsigned long temp;
+
+ if (USE_IRDA(sport)) {
+ /* half duplex in IrDA mode; have to disable receive mode */
+ temp = readl(sport->port.membase + UCR4);
+ temp &= ~(UCR4_DREN);
+ writel(temp, sport->port.membase + UCR4);
+
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~(UCR1_RRDYEN);
+ writel(temp, sport->port.membase + UCR1);
+ }
+
+ if (!sport->enable_dma) {
+ temp = readl(sport->port.membase + UCR1);
+ writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
+ }
+
+ if (USE_IRDA(sport)) {
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_TRDYEN;
+ writel(temp, sport->port.membase + UCR1);
+
+ temp = readl(sport->port.membase + UCR4);
+ temp |= UCR4_TCEN;
+ writel(temp, sport->port.membase + UCR4);
+ }
+
+ if (sport->enable_dma) {
+ schedule_work(&sport->tsk_dma_tx);
+ return;
+ }
+
+ if (readl(sport->port.membase + UTS) & UTS_TXEMPTY)
+ imx_transmit_buffer(sport);
+}
+
+static irqreturn_t imx_rtsint(int irq, void *dev_id)
+{
+ struct imx_port *sport = dev_id;
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ writel(USR1_RTSD, sport->port.membase + USR1);
+ val = readl(sport->port.membase + USR1) & USR1_RTSS;
+ uart_handle_cts_change(&sport->port, !!val);
+ wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t imx_txint(int irq, void *dev_id)
+{
+ struct imx_port *sport = dev_id;
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sport->port.lock,flags);
+ if (sport->port.x_char)
+ {
+ /* Send next char */
+ writel(sport->port.x_char, sport->port.membase + URTX0);
+ goto out;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
+ imx_stop_tx(&sport->port);
+ goto out;
+ }
+
+ imx_transmit_buffer(sport);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+
+out:
+ spin_unlock_irqrestore(&sport->port.lock,flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t imx_rxint(int irq, void *dev_id)
+{
+ struct imx_port *sport = dev_id;
+ unsigned int rx,flg,ignored = 0;
+ struct tty_struct *tty = sport->port.state->port.tty;
+ unsigned long flags, temp;
+
+ spin_lock_irqsave(&sport->port.lock,flags);
+
+ while (readl(sport->port.membase + USR2) & USR2_RDR) {
+ flg = TTY_NORMAL;
+ sport->port.icount.rx++;
+
+ rx = readl(sport->port.membase + URXD0);
+
+ temp = readl(sport->port.membase + USR2);
+ if (temp & USR2_BRCD) {
+ writel(USR2_BRCD, sport->port.membase + USR2);
+ if (uart_handle_break(&sport->port))
+ continue;
+ }
+
+ if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
+ continue;
+
+ if (unlikely(rx & URXD_ERR)) {
+ if (rx & URXD_BRK)
+ sport->port.icount.brk++;
+ else if (rx & URXD_PRERR)
+ sport->port.icount.parity++;
+ else if (rx & URXD_FRMERR)
+ sport->port.icount.frame++;
+ if (rx & URXD_OVRRUN)
+ sport->port.icount.overrun++;
+
+ if (rx & sport->port.ignore_status_mask) {
+ if (++ignored > 100)
+ goto out;
+ continue;
+ }
+
+ rx &= sport->port.read_status_mask;
+
+ if (rx & URXD_BRK)
+ flg = TTY_BREAK;
+ else if (rx & URXD_PRERR)
+ flg = TTY_PARITY;
+ else if (rx & URXD_FRMERR)
+ flg = TTY_FRAME;
+ if (rx & URXD_OVRRUN)
+ flg = TTY_OVERRUN;
+
+#ifdef SUPPORT_SYSRQ
+ sport->port.sysrq = 0;
+#endif
+ }
+
+ tty_insert_flip_char(tty, rx, flg);
+ }
+
+out:
+ spin_unlock_irqrestore(&sport->port.lock,flags);
+ tty_flip_buffer_push(tty);
+ return IRQ_HANDLED;
+}
+
+/*
+ * We wait for the RXFIFO is filled with some data, and then
+ * arise a DMA operation to receive the data.
+ */
+static void imx_dma_rxint(struct imx_port *sport)
+{
+ unsigned long temp;
+
+ temp = readl(sport->port.membase + USR2);
+ if ((temp & USR2_RDR) && !sport->dma_is_rxing) {
+ sport->dma_is_rxing = true;
+
+ /* disable the `Recerver Ready Interrrupt` */
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~(UCR1_RRDYEN);
+ writel(temp, sport->port.membase + UCR1);
+
+ /* tell the DMA to receive the data. */
+ schedule_work(&sport->tsk_dma_rx);
+ }
+}
+
+static irqreturn_t imx_int(int irq, void *dev_id)
+{
+ struct imx_port *sport = dev_id;
+ unsigned int sts;
+
+ sts = readl(sport->port.membase + USR1);
+
+ if (sts & USR1_RRDY) {
+ if (sport->enable_dma)
+ imx_dma_rxint(sport);
+ else
+ imx_rxint(irq, dev_id);
+ }
+
+ if (sts & USR1_TRDY &&
+ readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN)
+ imx_txint(irq, dev_id);
+
+ if (sts & USR1_RTSD)
+ imx_rtsint(irq, dev_id);
+
+ if (sts & USR1_AWAKE)
+ writel(USR1_AWAKE, sport->port.membase + USR1);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Return TIOCSER_TEMT when transmitter is not busy.
+ */
+static unsigned int imx_tx_empty(struct uart_port *port)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+
+ return (readl(sport->port.membase + USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0;
+}
+
+/*
+ * We have a modem side uart, so the meanings of RTS and CTS are inverted.
+ */
+static unsigned int imx_get_mctrl(struct uart_port *port)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+ unsigned int tmp = TIOCM_DSR | TIOCM_CAR;
+
+ if (readl(sport->port.membase + USR1) & USR1_RTSS)
+ tmp |= TIOCM_CTS;
+
+ if (readl(sport->port.membase + UCR2) & UCR2_CTS)
+ tmp |= TIOCM_RTS;
+
+ if (readl(sport->port.membase + UTS) & UTS_LOOP)
+ tmp |= TIOCM_LOOP;
+
+ return tmp;
+}
+
+static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+ unsigned long temp;
+
+ temp = readl(sport->port.membase + UCR2) & ~UCR2_CTS;
+
+ if (mctrl & TIOCM_RTS)
+ temp |= UCR2_CTS;
+
+ writel(temp, sport->port.membase + UCR2);
+
+ if (mctrl & TIOCM_LOOP) {
+ temp = readl(sport->port.membase + UTS) & ~UTS_LOOP;
+ temp |= UTS_LOOP;
+ writel(temp, sport->port.membase + UTS);
+ } else {
+ temp = readl(sport->port.membase + UTS) & ~UTS_LOOP;
+ writel(temp, sport->port.membase + UTS);
+ }
+}
+
+/*
+ * Interrupts always disabled.
+ */
+static void imx_break_ctl(struct uart_port *port, int break_state)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+ unsigned long flags, temp;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ temp = readl(sport->port.membase + UCR1) & ~UCR1_SNDBRK;
+
+ if ( break_state != 0 )
+ temp |= UCR1_SNDBRK;
+
+ writel(temp, sport->port.membase + UCR1);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+#define TXTL 2 /* reset default */
+#define RXTL 1 /* reset default */
+
+static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
+{
+ unsigned int val;
+ unsigned int ufcr_rfdiv;
+
+ /* set receiver / transmitter trigger level.
+ * RFDIV is set such way to satisfy requested uartclk value
+ */
+ val = TXTL << 10 | RXTL;
+ ufcr_rfdiv = (clk_get_rate(sport->clk) + sport->port.uartclk / 2)
+ / sport->port.uartclk;
+
+ if(!ufcr_rfdiv)
+ ufcr_rfdiv = 1;
+
+ val |= UFCR_RFDIV_REG(ufcr_rfdiv);
+
+ writel(val, sport->port.membase + UFCR);
+
+ return 0;
+}
+
+static bool imx_uart_filter(struct dma_chan *chan, void *param)
+{
+ struct imx_port *sport = param;
+
+ if (!imx_dma_is_general_purpose(chan))
+ return false;
+ chan->private = &sport->dma_data;
+ return true;
+}
+
+#define RX_BUF_SIZE (PAGE_SIZE)
+static int start_rx_dma(struct imx_port *sport);
+
+static void dma_rx_work(struct work_struct *w)
+{
+ struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_rx);
+ struct tty_struct *tty = sport->port.state->port.tty;
+
+ if (sport->rx_bytes) {
+ tty_insert_flip_string(tty, sport->rx_buf, sport->rx_bytes);
+ tty_flip_buffer_push(tty);
+ sport->rx_bytes = 0;
+ }
+
+ if (sport->dma_is_rxing)
+ start_rx_dma(sport);
+}
+
+static void imx_finish_dma(struct imx_port *sport)
+{
+ unsigned long temp;
+
+ /* Enable the interrupt when the RXFIFO is not empty. */
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_RRDYEN;
+ writel(temp, sport->port.membase + UCR1);
+
+ sport->dma_is_rxing = false;
+ if (waitqueue_active(&sport->dma_wait))
+ wake_up(&sport->dma_wait);
+}
+
+/*
+ * There are three kinds of RX DMA interrupts:
+ * [1] the RX DMA buffer is full.
+ * [2] the Aging timer expires(wait for 8 bytes long)
+ * [3] the Idle Condition Detect(enabled the UCR4_IDDMAEN).
+ *
+ * [2] is trigger when a character was been sitting in the FIFO
+ * meanwhile [3] can wait for 32 bytes long when the RX line is
+ * on IDLE state and RxFIFO is empty.
+ * Bluetooth H4 is very susceptible to Rx timeouts if data is not available
+ * in a specific period of time so we use both.
+ */
+static void dma_rx_callback(void *data)
+{
+ struct imx_port *sport = data;
+ struct dma_chan *chan = sport->dma_chan_rx;
+ unsigned int count;
+ struct tty_struct *tty;
+ struct scatterlist *sgl;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ tty = sport->port.state->port.tty;
+ sgl = &sport->rx_sgl;
+
+ /* unmap it first */
+ dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE);
+
+ /* If we have finish the reading. we will not accept any more data. */
+ if (tty->closing) {
+ imx_finish_dma(sport);
+ return;
+ }
+
+ status = chan->device->device_tx_status(chan,
+ (dma_cookie_t)NULL, &state);
+ count = RX_BUF_SIZE - state.residue;
+ if (count) {
+ sport->rx_bytes = count;
+ schedule_work(&sport->tsk_dma_rx);
+ } else
+ imx_finish_dma(sport);
+}
+
+static int start_rx_dma(struct imx_port *sport)
+{
+ struct scatterlist *sgl = &sport->rx_sgl;
+ struct dma_chan *chan = sport->dma_chan_rx;
+ struct dma_async_tx_descriptor *desc;
+ int ret;
+
+ sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
+ ret = dma_map_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE);
+ if (ret == 0) {
+ pr_err("DMA mapping error for RX.\n");
+ return -EINVAL;
+ }
+ desc = chan->device->device_prep_slave_sg(chan,
+ sgl, 1, DMA_FROM_DEVICE, 0);
+ if (!desc) {
+ pr_err("We cannot prepare for the RX slave dma!\n");
+ return -EINVAL;
+ }
+ desc->callback = dma_rx_callback;
+ desc->callback_param = sport;
+
+ dmaengine_submit(desc);
+ return 0;
+}
+
+static void imx_uart_dma_exit(struct imx_port *sport)
+{
+ if (sport->dma_chan_rx) {
+ dma_release_channel(sport->dma_chan_rx);
+ sport->dma_chan_rx = NULL;
+
+ kfree(sport->rx_buf);
+ sport->rx_buf = NULL;
+ }
+
+ if (sport->dma_chan_tx) {
+ dma_release_channel(sport->dma_chan_tx);
+ sport->dma_chan_tx = NULL;
+ }
+}
+
+/* see the "i.MX61 SDMA Scripts User Manual.doc" for the parameters */
+static int imx_uart_dma_init(struct imx_port *sport)
+{
+ struct imxuart_platform_data *pdata = sport->port.dev->platform_data;
+ struct dma_slave_config slave_config;
+ dma_cap_mask_t mask;
+ int ret;
+
+ /* prepare for RX : */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ sport->dma_data.priority = DMA_PRIO_HIGH;
+ sport->dma_data.dma_request = pdata->dma_req_rx;
+ sport->dma_data.peripheral_type = IMX_DMATYPE_UART;
+
+ sport->dma_chan_rx = dma_request_channel(mask, imx_uart_filter, sport);
+ if (!sport->dma_chan_rx) {
+ pr_err("cannot get the DMA channel.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ slave_config.direction = DMA_DEV_TO_MEM;
+ slave_config.src_addr = sport->port.mapbase + URXD0;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.src_maxburst = RXTL; /* fix me */
+ ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config);
+ if (ret) {
+ pr_err("error in RX dma configuration.\n");
+ goto err;
+ }
+
+ sport->rx_buf = kzalloc(PAGE_SIZE, GFP_DMA);
+ if (!sport->rx_buf) {
+ pr_err("cannot alloc DMA buffer.\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ sport->rx_bytes = 0;
+
+ /* prepare for TX : */
+ sport->dma_data.dma_request = pdata->dma_req_tx;
+ sport->dma_chan_tx = dma_request_channel(mask, imx_uart_filter, sport);
+ if (!sport->dma_chan_tx) {
+ pr_err("cannot get the TX DMA channel!\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ slave_config.direction = DMA_MEM_TO_DEV;
+ slave_config.dst_addr = sport->port.mapbase + URTX0;
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.dst_maxburst = TXTL; /* fix me */
+ ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config);
+ if (ret) {
+ pr_err("error in TX dma configuration.");
+ goto err;
+ }
+
+ return 0;
+err:
+ imx_uart_dma_exit(sport);
+ return ret;
+}
+
+/* half the RX buffer size */
+#define CTSTL 16
+
+static int imx_startup(struct uart_port *port)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+ int retval;
+ unsigned long flags, temp;
+ struct tty_struct *tty;
+
+ clk_enable(sport->clk);
+
+#ifndef CONFIG_SERIAL_CORE_CONSOLE
+ imx_setup_ufcr(sport, 0);
+#endif
+
+ /* disable the DREN bit (Data Ready interrupt enable) before
+ * requesting IRQs
+ */
+ temp = readl(sport->port.membase + UCR4);
+
+ if (USE_IRDA(sport))
+ temp |= UCR4_IRSC;
+
+ /* set the trigger level for CTS */
+ temp &= ~(UCR4_CTSTL_MASK<< UCR4_CTSTL_SHF);
+ temp |= CTSTL<< UCR4_CTSTL_SHF;
+
+ writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
+
+ if (USE_IRDA(sport)) {
+ /* reset fifo's and state machines */
+ int i = 100;
+ temp = readl(sport->port.membase + UCR2);
+ temp &= ~UCR2_SRST;
+ writel(temp, sport->port.membase + UCR2);
+ while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) &&
+ (--i > 0)) {
+ udelay(1);
+ }
+ }
+
+ /*
+ * Allocate the IRQ(s) i.MX1 has three interrupts whereas later
+ * chips only have one interrupt.
+ */
+ if (sport->txirq > 0) {
+ retval = request_irq(sport->rxirq, imx_rxint, 0,
+ DRIVER_NAME, sport);
+ if (retval)
+ goto error_out1;
+
+ retval = request_irq(sport->txirq, imx_txint, 0,
+ DRIVER_NAME, sport);
+ if (retval)
+ goto error_out2;
+
+ /* do not use RTS IRQ on IrDA */
+ if (!USE_IRDA(sport)) {
+ retval = request_irq(sport->rtsirq, imx_rtsint,
+ (sport->rtsirq < MAX_INTERNAL_IRQ) ? 0 :
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING,
+ DRIVER_NAME, sport);
+ if (retval)
+ goto error_out3;
+ }
+ } else {
+ retval = request_irq(sport->port.irq, imx_int, 0,
+ DRIVER_NAME, sport);
+ if (retval) {
+ free_irq(sport->port.irq, sport);
+ goto error_out1;
+ }
+ }
+
+ /* Enable the SDMA for uart. */
+ if (sport->enable_dma) {
+ int ret;
+ ret = imx_uart_dma_init(sport);
+ if (ret)
+ goto error_out3;
+
+ sport->port.flags |= UPF_LOW_LATENCY;
+ INIT_WORK(&sport->tsk_dma_tx, dma_tx_work);
+ INIT_WORK(&sport->tsk_dma_rx, dma_rx_work);
+ init_waitqueue_head(&sport->dma_wait);
+ }
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ /*
+ * Finally, clear and enable interrupts
+ */
+ writel(USR1_RTSD, sport->port.membase + USR1);
+
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
+ if (sport->enable_dma) {
+ temp |= UCR1_RDMAEN | UCR1_TDMAEN;
+ /* ICD,await 4 idle frames also enable AGING Timer */
+ temp |= UCR1_ICD_REG(0)|UCR1_ATDMAEN;
+ }
+
+ if (USE_IRDA(sport)) {
+ temp |= UCR1_IREN;
+ temp &= ~(UCR1_RTSDEN);
+ }
+
+ writel(temp, sport->port.membase + UCR1);
+
+ temp = readl(sport->port.membase + UCR2);
+ temp |= (UCR2_RXEN | UCR2_TXEN);
+ writel(temp, sport->port.membase + UCR2);
+
+ if (USE_IRDA(sport)) {
+ /* clear RX-FIFO */
+ int i = 64;
+ while ((--i > 0) &&
+ (readl(sport->port.membase + URXD0) & URXD_CHARRDY)) {
+ barrier();
+ }
+ }
+
+ if (!cpu_is_mx1()) {
+ temp = readl(sport->port.membase + UCR3);
+ temp |= MX2_UCR3_RXDMUXSEL;
+ writel(temp, sport->port.membase + UCR3);
+ }
+
+ if (USE_IRDA(sport)) {
+ temp = readl(sport->port.membase + UCR4);
+ if (sport->irda_inv_rx)
+ temp |= UCR4_INVR;
+ else
+ temp &= ~(UCR4_INVR);
+ writel(temp | UCR4_DREN, sport->port.membase + UCR4);
+
+ temp = readl(sport->port.membase + UCR3);
+ if (sport->irda_inv_tx)
+ temp |= UCR3_INVT;
+ else
+ temp &= ~(UCR3_INVT);
+ writel(temp, sport->port.membase + UCR3);
+ }
+
+ if (sport->enable_dma) {
+ temp = readl(sport->port.membase + UCR4);
+ temp |= UCR4_IDDMAEN;
+ writel(temp, sport->port.membase + UCR4);
+ }
+
+ /*
+ * Enable modem status interrupts
+ */
+ imx_enable_ms(&sport->port);
+ spin_unlock_irqrestore(&sport->port.lock,flags);
+
+ if (USE_IRDA(sport)) {
+ struct imxuart_platform_data *pdata;
+ pdata = sport->port.dev->platform_data;
+ sport->irda_inv_rx = pdata->irda_inv_rx;
+ sport->irda_inv_tx = pdata->irda_inv_tx;
+ sport->trcv_delay = pdata->transceiver_delay;
+ if (pdata->irda_enable)
+ pdata->irda_enable(1);
+ }
+
+ tty = sport->port.state->port.tty;
+
+ return 0;
+
+error_out3:
+ if (sport->txirq)
+ free_irq(sport->txirq, sport);
+error_out2:
+ if (sport->rxirq)
+ free_irq(sport->rxirq, sport);
+error_out1:
+ return retval;
+}
+
+static void imx_shutdown(struct uart_port *port)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+ unsigned long temp;
+ unsigned long flags;
+
+ if (sport->enable_dma) {
+ /* We have to wait for the DMA to finish. */
+ wait_event(sport->dma_wait, !sport->dma_is_rxing);
+ imx_stop_rx(port);
+ imx_uart_dma_exit(sport);
+ }
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ temp = readl(sport->port.membase + UCR2);
+ temp &= ~(UCR2_TXEN);
+ writel(temp, sport->port.membase + UCR2);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ if (USE_IRDA(sport)) {
+ struct imxuart_platform_data *pdata;
+ pdata = sport->port.dev->platform_data;
+ if (pdata->irda_enable)
+ pdata->irda_enable(0);
+ }
+
+ /*
+ * Stop our timer.
+ */
+ del_timer_sync(&sport->timer);
+
+ /*
+ * Free the interrupts
+ */
+ if (sport->txirq > 0) {
+ if (!USE_IRDA(sport))
+ free_irq(sport->rtsirq, sport);
+ free_irq(sport->txirq, sport);
+ free_irq(sport->rxirq, sport);
+ } else
+ free_irq(sport->port.irq, sport);
+
+ /*
+ * Disable all interrupts, port and break condition.
+ */
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
+ if (USE_IRDA(sport))
+ temp &= ~(UCR1_IREN);
+ if (sport->enable_dma)
+ temp &= ~(UCR1_RDMAEN | UCR1_TDMAEN);
+ writel(temp, sport->port.membase + UCR1);
+
+ if (sport->enable_dma) {
+ temp = readl(sport->port.membase + UCR4);
+ temp &= ~UCR4_IDDMAEN;
+ writel(temp, sport->port.membase + UCR4);
+ }
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ clk_disable(sport->clk);
+}
+
+static void
+imx_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+ unsigned long flags;
+ unsigned int ucr2, old_ucr1, old_txrxen, baud, quot;
+ unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
+ unsigned int div, ufcr;
+ unsigned long num, denom;
+ uint64_t tdiv64;
+
+ /*
+ * If we don't support modem control lines, don't allow
+ * these to be set.
+ */
+ if (0) {
+ termios->c_cflag &= ~(HUPCL | CRTSCTS | CMSPAR);
+ termios->c_cflag |= CLOCAL;
+ }
+
+ /*
+ * We only support CS7 and CS8.
+ */
+ while ((termios->c_cflag & CSIZE) != CS7 &&
+ (termios->c_cflag & CSIZE) != CS8) {
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= old_csize;
+ old_csize = CS8;
+ }
+
+ if ((termios->c_cflag & CSIZE) == CS8)
+ ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
+ else
+ ucr2 = UCR2_SRST | UCR2_IRTS;
+
+ if (termios->c_cflag & CRTSCTS) {
+ if( sport->have_rtscts ) {
+ ucr2 &= ~UCR2_IRTS;
+ ucr2 |= UCR2_CTSC;
+ } else {
+ termios->c_cflag &= ~CRTSCTS;
+ }
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ ucr2 |= UCR2_STPB;
+ if (termios->c_cflag & PARENB) {
+ ucr2 |= UCR2_PREN;
+ if (termios->c_cflag & PARODD)
+ ucr2 |= UCR2_PROE;
+ }
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
+ quot = uart_get_divisor(port, baud);
+
+ del_timer_sync(&sport->timer);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ sport->port.read_status_mask = 0;
+ if (termios->c_iflag & INPCK)
+ sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ sport->port.read_status_mask |= URXD_BRK;
+
+ /*
+ * Characters to ignore
+ */
+ sport->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ sport->port.ignore_status_mask |= URXD_PRERR;
+ if (termios->c_iflag & IGNBRK) {
+ sport->port.ignore_status_mask |= URXD_BRK;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ sport->port.ignore_status_mask |= URXD_OVRRUN;
+ }
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ /*
+ * disable interrupts and drain transmitter
+ */
+ old_ucr1 = readl(sport->port.membase + UCR1);
+ writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN),
+ sport->port.membase + UCR1);
+
+ while ( !(readl(sport->port.membase + USR2) & USR2_TXDC))
+ barrier();
+
+ /* then, disable everything */
+ old_txrxen = readl(sport->port.membase + UCR2);
+ writel(old_txrxen & ~( UCR2_TXEN | UCR2_RXEN),
+ sport->port.membase + UCR2);
+ old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
+
+ if (USE_IRDA(sport)) {
+ /*
+ * use maximum available submodule frequency to
+ * avoid missing short pulses due to low sampling rate
+ */
+ div = 1;
+ } else {
+ div = sport->port.uartclk / (baud * 16);
+ if (div > 7)
+ div = 7;
+ if (!div)
+ div = 1;
+ }
+
+ rational_best_approximation(16 * div * baud, sport->port.uartclk,
+ 1 << 16, 1 << 16, &num, &denom);
+
+ tdiv64 = sport->port.uartclk;
+ tdiv64 *= num;
+ do_div(tdiv64, denom * 16 * div);
+ tty_termios_encode_baud_rate(termios,
+ (speed_t)tdiv64, (speed_t)tdiv64);
+
+ num -= 1;
+ denom -= 1;
+
+ ufcr = readl(sport->port.membase + UFCR);
+ ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
+
+ if (sport->use_dcedte)
+ ufcr |= UFCR_DCEDTE;
+
+ writel(ufcr, sport->port.membase + UFCR);
+
+ writel(num, sport->port.membase + UBIR);
+ writel(denom, sport->port.membase + UBMR);
+
+ if (!cpu_is_mx1())
+ writel(sport->port.uartclk / div / 1000,
+ sport->port.membase + MX2_ONEMS);
+
+ writel(old_ucr1, sport->port.membase + UCR1);
+
+ /* set the parity, stop bits and data size */
+ writel(ucr2 | old_txrxen, sport->port.membase + UCR2);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
+ imx_enable_ms(&sport->port);
+
+}
+
+static const char *imx_type(struct uart_port *port)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+
+ return sport->port.type == PORT_IMX ? "IMX" : NULL;
+}
+
+/*
+ * Release the memory region(s) being used by 'port'.
+ */
+static void imx_release_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *mmres;
+
+ mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mmres->start, mmres->end - mmres->start + 1);
+}
+
+/*
+ * Request the memory region(s) being used by 'port'.
+ */
+static int imx_request_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *mmres;
+ void *ret;
+
+ mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mmres)
+ return -ENODEV;
+
+ ret = request_mem_region(mmres->start, mmres->end - mmres->start + 1,
+ "imx-uart");
+
+ return ret ? 0 : -EBUSY;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void imx_config_port(struct uart_port *port, int flags)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+
+ if (flags & UART_CONFIG_TYPE &&
+ imx_request_port(&sport->port) == 0)
+ sport->port.type = PORT_IMX;
+}
+
+/*
+ * Verify the new serial_struct (for TIOCSSERIAL).
+ * The only change we allow are to the flags and type, and
+ * even then only between PORT_IMX and PORT_UNKNOWN
+ */
+static int
+imx_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+ int ret = 0;
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX)
+ ret = -EINVAL;
+ if (sport->port.irq != ser->irq)
+ ret = -EINVAL;
+ if (ser->io_type != UPIO_MEM)
+ ret = -EINVAL;
+ if (sport->port.uartclk / 16 != ser->baud_base)
+ ret = -EINVAL;
+ if ((void *)sport->port.mapbase != ser->iomem_base)
+ ret = -EINVAL;
+ if (sport->port.iobase != ser->port)
+ ret = -EINVAL;
+ if (ser->hub6 != 0)
+ ret = -EINVAL;
+ return ret;
+}
+
+#if defined(CONFIG_CONSOLE_POLL)
+static int imx_poll_get_char(struct uart_port *port)
+{
+ struct imx_port_ucrs old_ucr;
+ unsigned int status;
+ unsigned char c;
+
+ /* save control registers */
+ imx_port_ucrs_save(port, &old_ucr);
+
+ /* disable interrupts */
+ writel(UCR1_UARTEN, port->membase + UCR1);
+ writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
+ port->membase + UCR2);
+ writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
+ port->membase + UCR3);
+
+ /* poll */
+ do {
+ status = readl(port->membase + USR2);
+ } while (~status & USR2_RDR);
+
+ /* read */
+ c = readl(port->membase + URXD0);
+
+ /* restore control registers */
+ imx_port_ucrs_restore(port, &old_ucr);
+
+ return c;
+}
+
+static void imx_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ struct imx_port_ucrs old_ucr;
+ unsigned int status;
+
+ /* save control registers */
+ imx_port_ucrs_save(port, &old_ucr);
+
+ /* disable interrupts */
+ writel(UCR1_UARTEN, port->membase + UCR1);
+ writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
+ port->membase + UCR2);
+ writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
+ port->membase + UCR3);
+
+ /* drain */
+ do {
+ status = readl(port->membase + USR1);
+ } while (~status & USR1_TRDY);
+
+ /* write */
+ writel(c, port->membase + URTX0);
+
+ /* flush */
+ do {
+ status = readl(port->membase + USR2);
+ } while (~status & USR2_TXDC);
+
+ /* restore control registers */
+ imx_port_ucrs_restore(port, &old_ucr);
+}
+#endif
+
+static struct uart_ops imx_pops = {
+ .tx_empty = imx_tx_empty,
+ .set_mctrl = imx_set_mctrl,
+ .get_mctrl = imx_get_mctrl,
+ .stop_tx = imx_stop_tx,
+ .start_tx = imx_start_tx,
+ .stop_rx = imx_stop_rx,
+ .enable_ms = imx_enable_ms,
+ .break_ctl = imx_break_ctl,
+ .startup = imx_startup,
+ .shutdown = imx_shutdown,
+ .set_termios = imx_set_termios,
+ .type = imx_type,
+ .release_port = imx_release_port,
+ .request_port = imx_request_port,
+ .config_port = imx_config_port,
+ .verify_port = imx_verify_port,
+#if defined(CONFIG_CONSOLE_POLL)
+ .poll_get_char = imx_poll_get_char,
+ .poll_put_char = imx_poll_put_char,
+#endif
+};
+
+static struct imx_port *imx_ports[UART_NR];
+
+#ifdef CONFIG_SERIAL_IMX_CONSOLE
+static void imx_console_putchar(struct uart_port *port, int ch)
+{
+ struct imx_port *sport = (struct imx_port *)port;
+
+ while (readl(sport->port.membase + UTS) & UTS_TXFULL)
+ barrier();
+
+ writel(ch, sport->port.membase + URTX0);
+}
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+imx_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct imx_port *sport = imx_ports[co->index];
+ struct imx_port_ucrs old_ucr;
+ unsigned int ucr1;
+ unsigned long flags;
+ int locked = 1;
+
+ local_irq_save(flags);
+ if (sport->port.sysrq)
+ locked = 0;
+ else
+ spin_lock(&sport->port.lock);
+
+ /*
+ * First, save UCR1/2/3 and then disable interrupts
+ */
+ imx_port_ucrs_save(&sport->port, &old_ucr);
+ ucr1 = old_ucr.ucr1;
+
+ if (cpu_is_mx1())
+ ucr1 |= MX1_UCR1_UARTCLKEN;
+ ucr1 |= UCR1_UARTEN;
+ ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN);
+
+ writel(ucr1, sport->port.membase + UCR1);
+
+ writel(old_ucr.ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
+
+ uart_console_write(&sport->port, s, count, imx_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore UCR1/2/3
+ */
+ while (!(readl(sport->port.membase + USR2) & USR2_TXDC));
+
+ imx_port_ucrs_restore(&sport->port, &old_ucr);
+
+ if (locked)
+ spin_unlock(&sport->port.lock);
+ local_irq_restore(flags);
+}
+
+/*
+ * If the port was already initialised (eg, by a boot loader),
+ * try to determine the current setup.
+ */
+static void __init
+imx_console_get_options(struct imx_port *sport, int *baud,
+ int *parity, int *bits)
+{
+
+ if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) {
+ /* ok, the port was enabled */
+ unsigned int ucr2, ubir,ubmr, uartclk;
+ unsigned int baud_raw;
+ unsigned int ucfr_rfdiv;
+
+ ucr2 = readl(sport->port.membase + UCR2);
+
+ *parity = 'n';
+ if (ucr2 & UCR2_PREN) {
+ if (ucr2 & UCR2_PROE)
+ *parity = 'o';
+ else
+ *parity = 'e';
+ }
+
+ if (ucr2 & UCR2_WS)
+ *bits = 8;
+ else
+ *bits = 7;
+
+ ubir = readl(sport->port.membase + UBIR) & 0xffff;
+ ubmr = readl(sport->port.membase + UBMR) & 0xffff;
+
+ ucfr_rfdiv = (readl(sport->port.membase + UFCR) & UFCR_RFDIV) >> 7;
+ if (ucfr_rfdiv == 6)
+ ucfr_rfdiv = 7;
+ else
+ ucfr_rfdiv = 6 - ucfr_rfdiv;
+
+ uartclk = clk_get_rate(sport->clk);
+ uartclk /= ucfr_rfdiv;
+
+ { /*
+ * The next code provides exact computation of
+ * baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1))
+ * without need of float support or long long division,
+ * which would be required to prevent 32bit arithmetic overflow
+ */
+ unsigned int mul = ubir + 1;
+ unsigned int div = 16 * (ubmr + 1);
+ unsigned int rem = uartclk % div;
+
+ baud_raw = (uartclk / div) * mul;
+ baud_raw += (rem * mul + div / 2) / div;
+ *baud = (baud_raw + 50) / 100 * 100;
+ }
+
+ if(*baud != baud_raw)
+ printk(KERN_INFO "Serial: Console IMX rounded baud rate from %d to %d\n",
+ baud_raw, *baud);
+ }
+}
+
+static int __init
+imx_console_setup(struct console *co, char *options)
+{
+ struct imx_port *sport;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports))
+ co->index = 0;
+ sport = imx_ports[co->index];
+ if(sport == NULL)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ imx_console_get_options(sport, &baud, &parity, &bits);
+
+ imx_setup_ufcr(sport, 0);
+
+ return uart_set_options(&sport->port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver imx_reg;
+static struct console imx_console = {
+ .name = DEV_NAME,
+ .write = imx_console_write,
+ .device = uart_console_device,
+ .setup = imx_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &imx_reg,
+};
+
+#define IMX_CONSOLE &imx_console
+#else
+#define IMX_CONSOLE NULL
+#endif
+
+static struct uart_driver imx_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = DRIVER_NAME,
+ .dev_name = DEV_NAME,
+ .major = SERIAL_IMX_MAJOR,
+ .minor = MINOR_START,
+ .nr = ARRAY_SIZE(imx_ports),
+ .cons = IMX_CONSOLE,
+};
+
+static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct imx_port *sport = platform_get_drvdata(dev);
+ unsigned int val;
+
+ /* Enable i.MX UART wakeup */
+ val = readl(sport->port.membase + UCR3);
+ val |= UCR3_AWAKEN;
+ writel(val, sport->port.membase + UCR3);
+
+ if (sport)
+ uart_suspend_port(&imx_reg, &sport->port);
+
+ return 0;
+}
+
+static int serial_imx_resume(struct platform_device *dev)
+{
+ struct imx_port *sport = platform_get_drvdata(dev);
+ unsigned int val;
+
+ if (sport)
+ uart_resume_port(&imx_reg, &sport->port);
+
+ /* Disable i.MX UART wakeup */
+ val = readl(sport->port.membase + UCR3);
+ val &= ~UCR3_AWAKEN;
+ writel(val, sport->port.membase + UCR3);
+
+ return 0;
+}
+
+extern int uart_at_24;
+
+static int serial_imx_probe(struct platform_device *pdev)
+{
+ struct imx_port *sport;
+ struct imxuart_platform_data *pdata;
+ void __iomem *base;
+ int ret = 0;
+ struct resource *res;
+
+ sport = kzalloc(sizeof(*sport), GFP_KERNEL);
+ if (!sport)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto free;
+ }
+
+ base = ioremap(res->start, PAGE_SIZE);
+ if (!base) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ sport->port.dev = &pdev->dev;
+ sport->port.mapbase = res->start;
+ sport->port.membase = base;
+ sport->port.type = PORT_IMX,
+ sport->port.iotype = UPIO_MEM;
+ sport->port.irq = platform_get_irq(pdev, 0);
+ sport->rxirq = platform_get_irq(pdev, 0);
+ sport->txirq = platform_get_irq(pdev, 1);
+ sport->rtsirq = platform_get_irq(pdev, 2);
+ sport->port.fifosize = 32;
+ sport->port.ops = &imx_pops;
+ sport->port.flags = UPF_BOOT_AUTOCONF;
+ sport->port.line = pdev->id;
+ init_timer(&sport->timer);
+ sport->timer.function = imx_timeout;
+ sport->timer.data = (unsigned long)sport;
+
+ sport->clk = clk_get(&pdev->dev, "uart");
+ if (IS_ERR(sport->clk)) {
+ ret = PTR_ERR(sport->clk);
+ goto unmap;
+ }
+ if (uart_at_24)
+ clk_set_parent(sport->clk, clk_get(NULL, "osc"));
+
+ clk_enable(sport->clk);
+
+ sport->port.uartclk = clk_get_rate(sport->clk);
+
+ imx_ports[pdev->id] = sport;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata && (pdata->flags & IMXUART_HAVE_RTSCTS))
+ sport->have_rtscts = 1;
+ if (pdata && (pdata->flags & IMXUART_USE_DCEDTE))
+ sport->use_dcedte = 1;
+ if (pdata && (pdata->flags & IMXUART_SDMA))
+ sport->enable_dma = 1;
+
+#ifdef CONFIG_IRDA
+ if (pdata && (pdata->flags & IMXUART_IRDA))
+ sport->use_irda = 1;
+#endif
+
+ if (pdata && pdata->init) {
+ ret = pdata->init(pdev);
+ if (ret)
+ goto clkput;
+ }
+
+ ret = uart_add_one_port(&imx_reg, &sport->port);
+ if (ret)
+ goto deinit;
+ platform_set_drvdata(pdev, &sport->port);
+
+ clk_disable(sport->clk);
+ return 0;
+deinit:
+ if (pdata && pdata->exit)
+ pdata->exit(pdev);
+clkput:
+ clk_put(sport->clk);
+ clk_disable(sport->clk);
+unmap:
+ iounmap(sport->port.membase);
+free:
+ kfree(sport);
+
+ return ret;
+}
+
+static int serial_imx_remove(struct platform_device *pdev)
+{
+ struct imxuart_platform_data *pdata;
+ struct imx_port *sport = platform_get_drvdata(pdev);
+
+ pdata = pdev->dev.platform_data;
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (sport) {
+ uart_remove_one_port(&imx_reg, &sport->port);
+ clk_put(sport->clk);
+ }
+
+ if (pdata && pdata->exit)
+ pdata->exit(pdev);
+
+ iounmap(sport->port.membase);
+ kfree(sport);
+
+ return 0;
+}
+
+static struct platform_driver serial_imx_driver = {
+ .probe = serial_imx_probe,
+ .remove = serial_imx_remove,
+
+ .suspend = serial_imx_suspend,
+ .resume = serial_imx_resume,
+ .driver = {
+ .name = "imx-uart",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init imx_serial_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Serial: IMX driver\n");
+
+ ret = uart_register_driver(&imx_reg);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&serial_imx_driver);
+ if (ret != 0)
+ uart_unregister_driver(&imx_reg);
+
+ return 0;
+}
+
+static void __exit imx_serial_exit(void)
+{
+ platform_driver_unregister(&serial_imx_driver);
+ uart_unregister_driver(&imx_reg);
+}
+
+module_init(imx_serial_init);
+module_exit(imx_serial_exit);
+
+MODULE_AUTHOR("Sascha Hauer");
+MODULE_DESCRIPTION("IMX generic serial port driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-uart");
diff --git a/drivers/tty/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c
new file mode 100644
index 00000000..ee43efc7
--- /dev/null
+++ b/drivers/tty/serial/ioc3_serial.c
@@ -0,0 +1,2199 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+/*
+ * This file contains a module version of the ioc3 serial driver. This
+ * includes all the support functions needed (support functions, etc.)
+ * and the serial driver itself.
+ */
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/circ_buf.h>
+#include <linux/serial_reg.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/serial_core.h>
+#include <linux/ioc3.h>
+#include <linux/slab.h>
+
+/*
+ * Interesting things about the ioc3
+ */
+
+#define LOGICAL_PORTS 2 /* rs232(0) and rs422(1) */
+#define PORTS_PER_CARD 2
+#define LOGICAL_PORTS_PER_CARD (PORTS_PER_CARD * LOGICAL_PORTS)
+#define MAX_CARDS 8
+#define MAX_LOGICAL_PORTS (LOGICAL_PORTS_PER_CARD * MAX_CARDS)
+
+/* determine given the sio_ir what port it applies to */
+#define GET_PORT_FROM_SIO_IR(_x) (_x & SIO_IR_SA) ? 0 : 1
+
+
+/*
+ * we have 2 logical ports (rs232, rs422) for each physical port
+ * evens are rs232, odds are rs422
+ */
+#define GET_PHYSICAL_PORT(_x) ((_x) >> 1)
+#define GET_LOGICAL_PORT(_x) ((_x) & 1)
+#define IS_PHYSICAL_PORT(_x) !((_x) & 1)
+#define IS_RS232(_x) !((_x) & 1)
+
+static unsigned int Num_of_ioc3_cards;
+static unsigned int Submodule_slot;
+
+/* defining this will get you LOTS of great debug info */
+//#define DEBUG_INTERRUPTS
+#define DPRINT_CONFIG(_x...) ;
+//#define DPRINT_CONFIG(_x...) printk _x
+#define NOT_PROGRESS() ;
+//#define NOT_PROGRESS() printk("%s : fails %d\n", __func__, __LINE__)
+
+/* number of characters we want to transmit to the lower level at a time */
+#define MAX_CHARS 256
+#define FIFO_SIZE (MAX_CHARS-1) /* it's a uchar */
+
+/* Device name we're using */
+#define DEVICE_NAME "ttySIOC"
+#define DEVICE_MAJOR 204
+#define DEVICE_MINOR 116
+
+/* flags for next_char_state */
+#define NCS_BREAK 0x1
+#define NCS_PARITY 0x2
+#define NCS_FRAMING 0x4
+#define NCS_OVERRUN 0x8
+
+/* cause we need SOME parameters ... */
+#define MIN_BAUD_SUPPORTED 1200
+#define MAX_BAUD_SUPPORTED 115200
+
+/* protocol types supported */
+#define PROTO_RS232 0
+#define PROTO_RS422 1
+
+/* Notification types */
+#define N_DATA_READY 0x01
+#define N_OUTPUT_LOWAT 0x02
+#define N_BREAK 0x04
+#define N_PARITY_ERROR 0x08
+#define N_FRAMING_ERROR 0x10
+#define N_OVERRUN_ERROR 0x20
+#define N_DDCD 0x40
+#define N_DCTS 0x80
+
+#define N_ALL_INPUT (N_DATA_READY | N_BREAK \
+ | N_PARITY_ERROR | N_FRAMING_ERROR \
+ | N_OVERRUN_ERROR | N_DDCD | N_DCTS)
+
+#define N_ALL_OUTPUT N_OUTPUT_LOWAT
+
+#define N_ALL_ERRORS (N_PARITY_ERROR | N_FRAMING_ERROR \
+ | N_OVERRUN_ERROR)
+
+#define N_ALL (N_DATA_READY | N_OUTPUT_LOWAT | N_BREAK \
+ | N_PARITY_ERROR | N_FRAMING_ERROR \
+ | N_OVERRUN_ERROR | N_DDCD | N_DCTS)
+
+#define SER_CLK_SPEED(prediv) ((22000000 << 1) / prediv)
+#define SER_DIVISOR(x, clk) (((clk) + (x) * 8) / ((x) * 16))
+#define DIVISOR_TO_BAUD(div, clk) ((clk) / 16 / (div))
+
+/* Some masks */
+#define LCR_MASK_BITS_CHAR (UART_LCR_WLEN5 | UART_LCR_WLEN6 \
+ | UART_LCR_WLEN7 | UART_LCR_WLEN8)
+#define LCR_MASK_STOP_BITS (UART_LCR_STOP)
+
+#define PENDING(_a, _p) (readl(&(_p)->vma->sio_ir) & (_a)->ic_enable)
+
+#define RING_BUF_SIZE 4096
+#define BUF_SIZE_BIT SBBR_L_SIZE
+#define PROD_CONS_MASK PROD_CONS_PTR_4K
+
+#define TOTAL_RING_BUF_SIZE (RING_BUF_SIZE * 4)
+
+/* driver specific - one per card */
+struct ioc3_card {
+ struct {
+ /* uart ports are allocated here */
+ struct uart_port icp_uart_port[LOGICAL_PORTS];
+ /* the ioc3_port used for this port */
+ struct ioc3_port *icp_port;
+ } ic_port[PORTS_PER_CARD];
+ /* currently enabled interrupts */
+ uint32_t ic_enable;
+};
+
+/* Local port info for each IOC3 serial port */
+struct ioc3_port {
+ /* handy reference material */
+ struct uart_port *ip_port;
+ struct ioc3_card *ip_card;
+ struct ioc3_driver_data *ip_idd;
+ struct ioc3_submodule *ip_is;
+
+ /* pci mem addresses for this port */
+ struct ioc3_serialregs __iomem *ip_serial_regs;
+ struct ioc3_uartregs __iomem *ip_uart_regs;
+
+ /* Ring buffer page for this port */
+ dma_addr_t ip_dma_ringbuf;
+ /* vaddr of ring buffer */
+ struct ring_buffer *ip_cpu_ringbuf;
+
+ /* Rings for this port */
+ struct ring *ip_inring;
+ struct ring *ip_outring;
+
+ /* Hook to port specific values */
+ struct port_hooks *ip_hooks;
+
+ spinlock_t ip_lock;
+
+ /* Various rx/tx parameters */
+ int ip_baud;
+ int ip_tx_lowat;
+ int ip_rx_timeout;
+
+ /* Copy of notification bits */
+ int ip_notify;
+
+ /* Shadow copies of various registers so we don't need to PIO
+ * read them constantly
+ */
+ uint32_t ip_sscr;
+ uint32_t ip_tx_prod;
+ uint32_t ip_rx_cons;
+ unsigned char ip_flags;
+};
+
+/* tx low water mark. We need to notify the driver whenever tx is getting
+ * close to empty so it can refill the tx buffer and keep things going.
+ * Let's assume that if we interrupt 1 ms before the tx goes idle, we'll
+ * have no trouble getting in more chars in time (I certainly hope so).
+ */
+#define TX_LOWAT_LATENCY 1000
+#define TX_LOWAT_HZ (1000000 / TX_LOWAT_LATENCY)
+#define TX_LOWAT_CHARS(baud) (baud / 10 / TX_LOWAT_HZ)
+
+/* Flags per port */
+#define INPUT_HIGH 0x01
+ /* used to signify that we have turned off the rx_high
+ * temporarily - we need to drain the fifo and don't
+ * want to get blasted with interrupts.
+ */
+#define DCD_ON 0x02
+ /* DCD state is on */
+#define LOWAT_WRITTEN 0x04
+#define READ_ABORTED 0x08
+ /* the read was aborted - used to avaoid infinate looping
+ * in the interrupt handler
+ */
+#define INPUT_ENABLE 0x10
+
+/* Since each port has different register offsets and bitmasks
+ * for everything, we'll store those that we need in tables so we
+ * don't have to be constantly checking the port we are dealing with.
+ */
+struct port_hooks {
+ uint32_t intr_delta_dcd;
+ uint32_t intr_delta_cts;
+ uint32_t intr_tx_mt;
+ uint32_t intr_rx_timer;
+ uint32_t intr_rx_high;
+ uint32_t intr_tx_explicit;
+ uint32_t intr_clear;
+ uint32_t intr_all;
+ char rs422_select_pin;
+};
+
+static struct port_hooks hooks_array[PORTS_PER_CARD] = {
+ /* values for port A */
+ {
+ .intr_delta_dcd = SIO_IR_SA_DELTA_DCD,
+ .intr_delta_cts = SIO_IR_SA_DELTA_CTS,
+ .intr_tx_mt = SIO_IR_SA_TX_MT,
+ .intr_rx_timer = SIO_IR_SA_RX_TIMER,
+ .intr_rx_high = SIO_IR_SA_RX_HIGH,
+ .intr_tx_explicit = SIO_IR_SA_TX_EXPLICIT,
+ .intr_clear = (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL
+ | SIO_IR_SA_RX_HIGH
+ | SIO_IR_SA_RX_TIMER
+ | SIO_IR_SA_DELTA_DCD
+ | SIO_IR_SA_DELTA_CTS
+ | SIO_IR_SA_INT
+ | SIO_IR_SA_TX_EXPLICIT
+ | SIO_IR_SA_MEMERR),
+ .intr_all = SIO_IR_SA,
+ .rs422_select_pin = GPPR_UARTA_MODESEL_PIN,
+ },
+
+ /* values for port B */
+ {
+ .intr_delta_dcd = SIO_IR_SB_DELTA_DCD,
+ .intr_delta_cts = SIO_IR_SB_DELTA_CTS,
+ .intr_tx_mt = SIO_IR_SB_TX_MT,
+ .intr_rx_timer = SIO_IR_SB_RX_TIMER,
+ .intr_rx_high = SIO_IR_SB_RX_HIGH,
+ .intr_tx_explicit = SIO_IR_SB_TX_EXPLICIT,
+ .intr_clear = (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL
+ | SIO_IR_SB_RX_HIGH
+ | SIO_IR_SB_RX_TIMER
+ | SIO_IR_SB_DELTA_DCD
+ | SIO_IR_SB_DELTA_CTS
+ | SIO_IR_SB_INT
+ | SIO_IR_SB_TX_EXPLICIT
+ | SIO_IR_SB_MEMERR),
+ .intr_all = SIO_IR_SB,
+ .rs422_select_pin = GPPR_UARTB_MODESEL_PIN,
+ }
+};
+
+struct ring_entry {
+ union {
+ struct {
+ uint32_t alldata;
+ uint32_t allsc;
+ } all;
+ struct {
+ char data[4]; /* data bytes */
+ char sc[4]; /* status/control */
+ } s;
+ } u;
+};
+
+/* Test the valid bits in any of the 4 sc chars using "allsc" member */
+#define RING_ANY_VALID \
+ ((uint32_t)(RXSB_MODEM_VALID | RXSB_DATA_VALID) * 0x01010101)
+
+#define ring_sc u.s.sc
+#define ring_data u.s.data
+#define ring_allsc u.all.allsc
+
+/* Number of entries per ring buffer. */
+#define ENTRIES_PER_RING (RING_BUF_SIZE / (int) sizeof(struct ring_entry))
+
+/* An individual ring */
+struct ring {
+ struct ring_entry entries[ENTRIES_PER_RING];
+};
+
+/* The whole enchilada */
+struct ring_buffer {
+ struct ring TX_A;
+ struct ring RX_A;
+ struct ring TX_B;
+ struct ring RX_B;
+};
+
+/* Get a ring from a port struct */
+#define RING(_p, _wh) &(((struct ring_buffer *)((_p)->ip_cpu_ringbuf))->_wh)
+
+/* for Infinite loop detection */
+#define MAXITER 10000000
+
+
+/**
+ * set_baud - Baud rate setting code
+ * @port: port to set
+ * @baud: baud rate to use
+ */
+static int set_baud(struct ioc3_port *port, int baud)
+{
+ int divisor;
+ int actual_baud;
+ int diff;
+ int lcr, prediv;
+ struct ioc3_uartregs __iomem *uart;
+
+ for (prediv = 6; prediv < 64; prediv++) {
+ divisor = SER_DIVISOR(baud, SER_CLK_SPEED(prediv));
+ if (!divisor)
+ continue; /* invalid divisor */
+ actual_baud = DIVISOR_TO_BAUD(divisor, SER_CLK_SPEED(prediv));
+
+ diff = actual_baud - baud;
+ if (diff < 0)
+ diff = -diff;
+
+ /* if we're within 1% we've found a match */
+ if (diff * 100 <= actual_baud)
+ break;
+ }
+
+ /* if the above loop completed, we didn't match
+ * the baud rate. give up.
+ */
+ if (prediv == 64) {
+ NOT_PROGRESS();
+ return 1;
+ }
+
+ uart = port->ip_uart_regs;
+ lcr = readb(&uart->iu_lcr);
+
+ writeb(lcr | UART_LCR_DLAB, &uart->iu_lcr);
+ writeb((unsigned char)divisor, &uart->iu_dll);
+ writeb((unsigned char)(divisor >> 8), &uart->iu_dlm);
+ writeb((unsigned char)prediv, &uart->iu_scr);
+ writeb((unsigned char)lcr, &uart->iu_lcr);
+
+ return 0;
+}
+
+/**
+ * get_ioc3_port - given a uart port, return the control structure
+ * @the_port: uart port to find
+ */
+static struct ioc3_port *get_ioc3_port(struct uart_port *the_port)
+{
+ struct ioc3_driver_data *idd = dev_get_drvdata(the_port->dev);
+ struct ioc3_card *card_ptr = idd->data[Submodule_slot];
+ int ii, jj;
+
+ if (!card_ptr) {
+ NOT_PROGRESS();
+ return NULL;
+ }
+ for (ii = 0; ii < PORTS_PER_CARD; ii++) {
+ for (jj = 0; jj < LOGICAL_PORTS; jj++) {
+ if (the_port == &card_ptr->ic_port[ii].icp_uart_port[jj])
+ return card_ptr->ic_port[ii].icp_port;
+ }
+ }
+ NOT_PROGRESS();
+ return NULL;
+}
+
+/**
+ * port_init - Initialize the sio and ioc3 hardware for a given port
+ * called per port from attach...
+ * @port: port to initialize
+ */
+static int inline port_init(struct ioc3_port *port)
+{
+ uint32_t sio_cr;
+ struct port_hooks *hooks = port->ip_hooks;
+ struct ioc3_uartregs __iomem *uart;
+ int reset_loop_counter = 0xfffff;
+ struct ioc3_driver_data *idd = port->ip_idd;
+
+ /* Idle the IOC3 serial interface */
+ writel(SSCR_RESET, &port->ip_serial_regs->sscr);
+
+ /* Wait until any pending bus activity for this port has ceased */
+ do {
+ sio_cr = readl(&idd->vma->sio_cr);
+ if (reset_loop_counter-- <= 0) {
+ printk(KERN_WARNING
+ "IOC3 unable to come out of reset"
+ " scr 0x%x\n", sio_cr);
+ return -1;
+ }
+ } while (!(sio_cr & SIO_CR_ARB_DIAG_IDLE) &&
+ (((sio_cr &= SIO_CR_ARB_DIAG) == SIO_CR_ARB_DIAG_TXA)
+ || sio_cr == SIO_CR_ARB_DIAG_TXB
+ || sio_cr == SIO_CR_ARB_DIAG_RXA
+ || sio_cr == SIO_CR_ARB_DIAG_RXB));
+
+ /* Finish reset sequence */
+ writel(0, &port->ip_serial_regs->sscr);
+
+ /* Once RESET is done, reload cached tx_prod and rx_cons values
+ * and set rings to empty by making prod == cons
+ */
+ port->ip_tx_prod = readl(&port->ip_serial_regs->stcir) & PROD_CONS_MASK;
+ writel(port->ip_tx_prod, &port->ip_serial_regs->stpir);
+ port->ip_rx_cons = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK;
+ writel(port->ip_rx_cons | SRCIR_ARM, &port->ip_serial_regs->srcir);
+
+ /* Disable interrupts for this 16550 */
+ uart = port->ip_uart_regs;
+ writeb(0, &uart->iu_lcr);
+ writeb(0, &uart->iu_ier);
+
+ /* Set the default baud */
+ set_baud(port, port->ip_baud);
+
+ /* Set line control to 8 bits no parity */
+ writeb(UART_LCR_WLEN8 | 0, &uart->iu_lcr);
+ /* UART_LCR_STOP == 1 stop */
+
+ /* Enable the FIFOs */
+ writeb(UART_FCR_ENABLE_FIFO, &uart->iu_fcr);
+ /* then reset 16550 FIFOs */
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
+ &uart->iu_fcr);
+
+ /* Clear modem control register */
+ writeb(0, &uart->iu_mcr);
+
+ /* Clear deltas in modem status register */
+ writel(0, &port->ip_serial_regs->shadow);
+
+ /* Only do this once per port pair */
+ if (port->ip_hooks == &hooks_array[0]) {
+ unsigned long ring_pci_addr;
+ uint32_t __iomem *sbbr_l, *sbbr_h;
+
+ sbbr_l = &idd->vma->sbbr_l;
+ sbbr_h = &idd->vma->sbbr_h;
+ ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf;
+ DPRINT_CONFIG(("%s: ring_pci_addr 0x%p\n",
+ __func__, (void *)ring_pci_addr));
+
+ writel((unsigned int)((uint64_t) ring_pci_addr >> 32), sbbr_h);
+ writel((unsigned int)ring_pci_addr | BUF_SIZE_BIT, sbbr_l);
+ }
+
+ /* Set the receive timeout value to 10 msec */
+ writel(SRTR_HZ / 100, &port->ip_serial_regs->srtr);
+
+ /* Set rx threshold, enable DMA */
+ /* Set high water mark at 3/4 of full ring */
+ port->ip_sscr = (ENTRIES_PER_RING * 3 / 4);
+
+ /* uart experiences pauses at high baud rate reducing actual
+ * throughput by 10% or so unless we enable high speed polling
+ * XXX when this hardware bug is resolved we should revert to
+ * normal polling speed
+ */
+ port->ip_sscr |= SSCR_HIGH_SPD;
+
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+
+ /* Disable and clear all serial related interrupt bits */
+ port->ip_card->ic_enable &= ~hooks->intr_clear;
+ ioc3_disable(port->ip_is, idd, hooks->intr_clear);
+ ioc3_ack(port->ip_is, idd, hooks->intr_clear);
+ return 0;
+}
+
+/**
+ * enable_intrs - enable interrupts
+ * @port: port to enable
+ * @mask: mask to use
+ */
+static void enable_intrs(struct ioc3_port *port, uint32_t mask)
+{
+ if ((port->ip_card->ic_enable & mask) != mask) {
+ port->ip_card->ic_enable |= mask;
+ ioc3_enable(port->ip_is, port->ip_idd, mask);
+ }
+}
+
+/**
+ * local_open - local open a port
+ * @port: port to open
+ */
+static inline int local_open(struct ioc3_port *port)
+{
+ int spiniter = 0;
+
+ port->ip_flags = INPUT_ENABLE;
+
+ /* Pause the DMA interface if necessary */
+ if (port->ip_sscr & SSCR_DMA_EN) {
+ writel(port->ip_sscr | SSCR_DMA_PAUSE,
+ &port->ip_serial_regs->sscr);
+ while ((readl(&port->ip_serial_regs->sscr)
+ & SSCR_PAUSE_STATE) == 0) {
+ spiniter++;
+ if (spiniter > MAXITER) {
+ NOT_PROGRESS();
+ return -1;
+ }
+ }
+ }
+
+ /* Reset the input fifo. If the uart received chars while the port
+ * was closed and DMA is not enabled, the uart may have a bunch of
+ * chars hanging around in its rx fifo which will not be discarded
+ * by rclr in the upper layer. We must get rid of them here.
+ */
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR,
+ &port->ip_uart_regs->iu_fcr);
+
+ writeb(UART_LCR_WLEN8, &port->ip_uart_regs->iu_lcr);
+ /* UART_LCR_STOP == 1 stop */
+
+ /* Re-enable DMA, set default threshold to intr whenever there is
+ * data available.
+ */
+ port->ip_sscr &= ~SSCR_RX_THRESHOLD;
+ port->ip_sscr |= 1; /* default threshold */
+
+ /* Plug in the new sscr. This implicitly clears the DMA_PAUSE
+ * flag if it was set above
+ */
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+ port->ip_tx_lowat = 1;
+ return 0;
+}
+
+/**
+ * set_rx_timeout - Set rx timeout and threshold values.
+ * @port: port to use
+ * @timeout: timeout value in ticks
+ */
+static inline int set_rx_timeout(struct ioc3_port *port, int timeout)
+{
+ int threshold;
+
+ port->ip_rx_timeout = timeout;
+
+ /* Timeout is in ticks. Let's figure out how many chars we
+ * can receive at the current baud rate in that interval
+ * and set the rx threshold to that amount. There are 4 chars
+ * per ring entry, so we'll divide the number of chars that will
+ * arrive in timeout by 4.
+ * So .... timeout * baud / 10 / HZ / 4, with HZ = 100.
+ */
+ threshold = timeout * port->ip_baud / 4000;
+ if (threshold == 0)
+ threshold = 1; /* otherwise we'll intr all the time! */
+
+ if ((unsigned)threshold > (unsigned)SSCR_RX_THRESHOLD)
+ return 1;
+
+ port->ip_sscr &= ~SSCR_RX_THRESHOLD;
+ port->ip_sscr |= threshold;
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+
+ /* Now set the rx timeout to the given value
+ * again timeout * SRTR_HZ / HZ
+ */
+ timeout = timeout * SRTR_HZ / 100;
+ if (timeout > SRTR_CNT)
+ timeout = SRTR_CNT;
+ writel(timeout, &port->ip_serial_regs->srtr);
+ return 0;
+}
+
+/**
+ * config_port - config the hardware
+ * @port: port to config
+ * @baud: baud rate for the port
+ * @byte_size: data size
+ * @stop_bits: number of stop bits
+ * @parenb: parity enable ?
+ * @parodd: odd parity ?
+ */
+static inline int
+config_port(struct ioc3_port *port,
+ int baud, int byte_size, int stop_bits, int parenb, int parodd)
+{
+ char lcr, sizebits;
+ int spiniter = 0;
+
+ DPRINT_CONFIG(("%s: line %d baud %d byte_size %d stop %d parenb %d "
+ "parodd %d\n",
+ __func__, ((struct uart_port *)port->ip_port)->line,
+ baud, byte_size, stop_bits, parenb, parodd));
+
+ if (set_baud(port, baud))
+ return 1;
+
+ switch (byte_size) {
+ case 5:
+ sizebits = UART_LCR_WLEN5;
+ break;
+ case 6:
+ sizebits = UART_LCR_WLEN6;
+ break;
+ case 7:
+ sizebits = UART_LCR_WLEN7;
+ break;
+ case 8:
+ sizebits = UART_LCR_WLEN8;
+ break;
+ default:
+ return 1;
+ }
+
+ /* Pause the DMA interface if necessary */
+ if (port->ip_sscr & SSCR_DMA_EN) {
+ writel(port->ip_sscr | SSCR_DMA_PAUSE,
+ &port->ip_serial_regs->sscr);
+ while ((readl(&port->ip_serial_regs->sscr)
+ & SSCR_PAUSE_STATE) == 0) {
+ spiniter++;
+ if (spiniter > MAXITER)
+ return -1;
+ }
+ }
+
+ /* Clear relevant fields in lcr */
+ lcr = readb(&port->ip_uart_regs->iu_lcr);
+ lcr &= ~(LCR_MASK_BITS_CHAR | UART_LCR_EPAR |
+ UART_LCR_PARITY | LCR_MASK_STOP_BITS);
+
+ /* Set byte size in lcr */
+ lcr |= sizebits;
+
+ /* Set parity */
+ if (parenb) {
+ lcr |= UART_LCR_PARITY;
+ if (!parodd)
+ lcr |= UART_LCR_EPAR;
+ }
+
+ /* Set stop bits */
+ if (stop_bits)
+ lcr |= UART_LCR_STOP /* 2 stop bits */ ;
+
+ writeb(lcr, &port->ip_uart_regs->iu_lcr);
+
+ /* Re-enable the DMA interface if necessary */
+ if (port->ip_sscr & SSCR_DMA_EN) {
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+ }
+ port->ip_baud = baud;
+
+ /* When we get within this number of ring entries of filling the
+ * entire ring on tx, place an EXPLICIT intr to generate a lowat
+ * notification when output has drained.
+ */
+ port->ip_tx_lowat = (TX_LOWAT_CHARS(baud) + 3) / 4;
+ if (port->ip_tx_lowat == 0)
+ port->ip_tx_lowat = 1;
+
+ set_rx_timeout(port, 2);
+ return 0;
+}
+
+/**
+ * do_write - Write bytes to the port. Returns the number of bytes
+ * actually written. Called from transmit_chars
+ * @port: port to use
+ * @buf: the stuff to write
+ * @len: how many bytes in 'buf'
+ */
+static inline int do_write(struct ioc3_port *port, char *buf, int len)
+{
+ int prod_ptr, cons_ptr, total = 0;
+ struct ring *outring;
+ struct ring_entry *entry;
+ struct port_hooks *hooks = port->ip_hooks;
+
+ BUG_ON(!(len >= 0));
+
+ prod_ptr = port->ip_tx_prod;
+ cons_ptr = readl(&port->ip_serial_regs->stcir) & PROD_CONS_MASK;
+ outring = port->ip_outring;
+
+ /* Maintain a 1-entry red-zone. The ring buffer is full when
+ * (cons - prod) % ring_size is 1. Rather than do this subtraction
+ * in the body of the loop, I'll do it now.
+ */
+ cons_ptr = (cons_ptr - (int)sizeof(struct ring_entry)) & PROD_CONS_MASK;
+
+ /* Stuff the bytes into the output */
+ while ((prod_ptr != cons_ptr) && (len > 0)) {
+ int xx;
+
+ /* Get 4 bytes (one ring entry) at a time */
+ entry = (struct ring_entry *)((caddr_t) outring + prod_ptr);
+
+ /* Invalidate all entries */
+ entry->ring_allsc = 0;
+
+ /* Copy in some bytes */
+ for (xx = 0; (xx < 4) && (len > 0); xx++) {
+ entry->ring_data[xx] = *buf++;
+ entry->ring_sc[xx] = TXCB_VALID;
+ len--;
+ total++;
+ }
+
+ /* If we are within some small threshold of filling up the
+ * entire ring buffer, we must place an EXPLICIT intr here
+ * to generate a lowat interrupt in case we subsequently
+ * really do fill up the ring and the caller goes to sleep.
+ * No need to place more than one though.
+ */
+ if (!(port->ip_flags & LOWAT_WRITTEN) &&
+ ((cons_ptr - prod_ptr) & PROD_CONS_MASK)
+ <= port->ip_tx_lowat * (int)sizeof(struct ring_entry)) {
+ port->ip_flags |= LOWAT_WRITTEN;
+ entry->ring_sc[0] |= TXCB_INT_WHEN_DONE;
+ }
+
+ /* Go on to next entry */
+ prod_ptr += sizeof(struct ring_entry);
+ prod_ptr &= PROD_CONS_MASK;
+ }
+
+ /* If we sent something, start DMA if necessary */
+ if (total > 0 && !(port->ip_sscr & SSCR_DMA_EN)) {
+ port->ip_sscr |= SSCR_DMA_EN;
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+ }
+
+ /* Store the new producer pointer. If tx is disabled, we stuff the
+ * data into the ring buffer, but we don't actually start tx.
+ */
+ if (!uart_tx_stopped(port->ip_port)) {
+ writel(prod_ptr, &port->ip_serial_regs->stpir);
+
+ /* If we are now transmitting, enable tx_mt interrupt so we
+ * can disable DMA if necessary when the tx finishes.
+ */
+ if (total > 0)
+ enable_intrs(port, hooks->intr_tx_mt);
+ }
+ port->ip_tx_prod = prod_ptr;
+
+ return total;
+}
+
+/**
+ * disable_intrs - disable interrupts
+ * @port: port to enable
+ * @mask: mask to use
+ */
+static inline void disable_intrs(struct ioc3_port *port, uint32_t mask)
+{
+ if (port->ip_card->ic_enable & mask) {
+ ioc3_disable(port->ip_is, port->ip_idd, mask);
+ port->ip_card->ic_enable &= ~mask;
+ }
+}
+
+/**
+ * set_notification - Modify event notification
+ * @port: port to use
+ * @mask: events mask
+ * @set_on: set ?
+ */
+static int set_notification(struct ioc3_port *port, int mask, int set_on)
+{
+ struct port_hooks *hooks = port->ip_hooks;
+ uint32_t intrbits, sscrbits;
+
+ BUG_ON(!mask);
+
+ intrbits = sscrbits = 0;
+
+ if (mask & N_DATA_READY)
+ intrbits |= (hooks->intr_rx_timer | hooks->intr_rx_high);
+ if (mask & N_OUTPUT_LOWAT)
+ intrbits |= hooks->intr_tx_explicit;
+ if (mask & N_DDCD) {
+ intrbits |= hooks->intr_delta_dcd;
+ sscrbits |= SSCR_RX_RING_DCD;
+ }
+ if (mask & N_DCTS)
+ intrbits |= hooks->intr_delta_cts;
+
+ if (set_on) {
+ enable_intrs(port, intrbits);
+ port->ip_notify |= mask;
+ port->ip_sscr |= sscrbits;
+ } else {
+ disable_intrs(port, intrbits);
+ port->ip_notify &= ~mask;
+ port->ip_sscr &= ~sscrbits;
+ }
+
+ /* We require DMA if either DATA_READY or DDCD notification is
+ * currently requested. If neither of these is requested and
+ * there is currently no tx in progress, DMA may be disabled.
+ */
+ if (port->ip_notify & (N_DATA_READY | N_DDCD))
+ port->ip_sscr |= SSCR_DMA_EN;
+ else if (!(port->ip_card->ic_enable & hooks->intr_tx_mt))
+ port->ip_sscr &= ~SSCR_DMA_EN;
+
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+ return 0;
+}
+
+/**
+ * set_mcr - set the master control reg
+ * @the_port: port to use
+ * @mask1: mcr mask
+ * @mask2: shadow mask
+ */
+static inline int set_mcr(struct uart_port *the_port,
+ int mask1, int mask2)
+{
+ struct ioc3_port *port = get_ioc3_port(the_port);
+ uint32_t shadow;
+ int spiniter = 0;
+ char mcr;
+
+ if (!port)
+ return -1;
+
+ /* Pause the DMA interface if necessary */
+ if (port->ip_sscr & SSCR_DMA_EN) {
+ writel(port->ip_sscr | SSCR_DMA_PAUSE,
+ &port->ip_serial_regs->sscr);
+ while ((readl(&port->ip_serial_regs->sscr)
+ & SSCR_PAUSE_STATE) == 0) {
+ spiniter++;
+ if (spiniter > MAXITER)
+ return -1;
+ }
+ }
+ shadow = readl(&port->ip_serial_regs->shadow);
+ mcr = (shadow & 0xff000000) >> 24;
+
+ /* Set new value */
+ mcr |= mask1;
+ shadow |= mask2;
+ writeb(mcr, &port->ip_uart_regs->iu_mcr);
+ writel(shadow, &port->ip_serial_regs->shadow);
+
+ /* Re-enable the DMA interface if necessary */
+ if (port->ip_sscr & SSCR_DMA_EN) {
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+ }
+ return 0;
+}
+
+/**
+ * ioc3_set_proto - set the protocol for the port
+ * @port: port to use
+ * @proto: protocol to use
+ */
+static int ioc3_set_proto(struct ioc3_port *port, int proto)
+{
+ struct port_hooks *hooks = port->ip_hooks;
+
+ switch (proto) {
+ default:
+ case PROTO_RS232:
+ /* Clear the appropriate GIO pin */
+ DPRINT_CONFIG(("%s: rs232\n", __func__));
+ writel(0, (&port->ip_idd->vma->gppr[0]
+ + hooks->rs422_select_pin));
+ break;
+
+ case PROTO_RS422:
+ /* Set the appropriate GIO pin */
+ DPRINT_CONFIG(("%s: rs422\n", __func__));
+ writel(1, (&port->ip_idd->vma->gppr[0]
+ + hooks->rs422_select_pin));
+ break;
+ }
+ return 0;
+}
+
+/**
+ * transmit_chars - upper level write, called with the_port->lock
+ * @the_port: port to write
+ */
+static void transmit_chars(struct uart_port *the_port)
+{
+ int xmit_count, tail, head;
+ int result;
+ char *start;
+ struct tty_struct *tty;
+ struct ioc3_port *port = get_ioc3_port(the_port);
+ struct uart_state *state;
+
+ if (!the_port)
+ return;
+ if (!port)
+ return;
+
+ state = the_port->state;
+ tty = state->port.tty;
+
+ if (uart_circ_empty(&state->xmit) || uart_tx_stopped(the_port)) {
+ /* Nothing to do or hw stopped */
+ set_notification(port, N_ALL_OUTPUT, 0);
+ return;
+ }
+
+ head = state->xmit.head;
+ tail = state->xmit.tail;
+ start = (char *)&state->xmit.buf[tail];
+
+ /* write out all the data or until the end of the buffer */
+ xmit_count = (head < tail) ? (UART_XMIT_SIZE - tail) : (head - tail);
+ if (xmit_count > 0) {
+ result = do_write(port, start, xmit_count);
+ if (result > 0) {
+ /* booking */
+ xmit_count -= result;
+ the_port->icount.tx += result;
+ /* advance the pointers */
+ tail += result;
+ tail &= UART_XMIT_SIZE - 1;
+ state->xmit.tail = tail;
+ start = (char *)&state->xmit.buf[tail];
+ }
+ }
+ if (uart_circ_chars_pending(&state->xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(the_port);
+
+ if (uart_circ_empty(&state->xmit)) {
+ set_notification(port, N_OUTPUT_LOWAT, 0);
+ } else {
+ set_notification(port, N_OUTPUT_LOWAT, 1);
+ }
+}
+
+/**
+ * ioc3_change_speed - change the speed of the port
+ * @the_port: port to change
+ * @new_termios: new termios settings
+ * @old_termios: old termios settings
+ */
+static void
+ioc3_change_speed(struct uart_port *the_port,
+ struct ktermios *new_termios, struct ktermios *old_termios)
+{
+ struct ioc3_port *port = get_ioc3_port(the_port);
+ unsigned int cflag, iflag;
+ int baud;
+ int new_parity = 0, new_parity_enable = 0, new_stop = 0, new_data = 8;
+ struct uart_state *state = the_port->state;
+
+ cflag = new_termios->c_cflag;
+ iflag = new_termios->c_iflag;
+
+ switch (cflag & CSIZE) {
+ case CS5:
+ new_data = 5;
+ break;
+ case CS6:
+ new_data = 6;
+ break;
+ case CS7:
+ new_data = 7;
+ break;
+ case CS8:
+ new_data = 8;
+ break;
+ default:
+ /* cuz we always need a default ... */
+ new_data = 5;
+ break;
+ }
+ if (cflag & CSTOPB) {
+ new_stop = 1;
+ }
+ if (cflag & PARENB) {
+ new_parity_enable = 1;
+ if (cflag & PARODD)
+ new_parity = 1;
+ }
+ baud = uart_get_baud_rate(the_port, new_termios, old_termios,
+ MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED);
+ DPRINT_CONFIG(("%s: returned baud %d for line %d\n", __func__, baud,
+ the_port->line));
+
+ if (!the_port->fifosize)
+ the_port->fifosize = FIFO_SIZE;
+ uart_update_timeout(the_port, cflag, baud);
+
+ the_port->ignore_status_mask = N_ALL_INPUT;
+
+ state->port.tty->low_latency = 1;
+
+ if (iflag & IGNPAR)
+ the_port->ignore_status_mask &= ~(N_PARITY_ERROR
+ | N_FRAMING_ERROR);
+ if (iflag & IGNBRK) {
+ the_port->ignore_status_mask &= ~N_BREAK;
+ if (iflag & IGNPAR)
+ the_port->ignore_status_mask &= ~N_OVERRUN_ERROR;
+ }
+ if (!(cflag & CREAD)) {
+ /* ignore everything */
+ the_port->ignore_status_mask &= ~N_DATA_READY;
+ }
+
+ if (cflag & CRTSCTS) {
+ /* enable hardware flow control */
+ port->ip_sscr |= SSCR_HFC_EN;
+ }
+ else {
+ /* disable hardware flow control */
+ port->ip_sscr &= ~SSCR_HFC_EN;
+ }
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+
+ /* Set the configuration and proper notification call */
+ DPRINT_CONFIG(("%s : port 0x%p line %d cflag 0%o "
+ "config_port(baud %d data %d stop %d penable %d "
+ " parity %d), notification 0x%x\n",
+ __func__, (void *)port, the_port->line, cflag, baud,
+ new_data, new_stop, new_parity_enable, new_parity,
+ the_port->ignore_status_mask));
+
+ if ((config_port(port, baud, /* baud */
+ new_data, /* byte size */
+ new_stop, /* stop bits */
+ new_parity_enable, /* set parity */
+ new_parity)) >= 0) { /* parity 1==odd */
+ set_notification(port, the_port->ignore_status_mask, 1);
+ }
+}
+
+/**
+ * ic3_startup_local - Start up the serial port - returns >= 0 if no errors
+ * @the_port: Port to operate on
+ */
+static inline int ic3_startup_local(struct uart_port *the_port)
+{
+ struct ioc3_port *port;
+
+ if (!the_port) {
+ NOT_PROGRESS();
+ return -1;
+ }
+
+ port = get_ioc3_port(the_port);
+ if (!port) {
+ NOT_PROGRESS();
+ return -1;
+ }
+
+ local_open(port);
+
+ /* set the protocol */
+ ioc3_set_proto(port, IS_RS232(the_port->line) ? PROTO_RS232 :
+ PROTO_RS422);
+ return 0;
+}
+
+/*
+ * ioc3_cb_output_lowat - called when the output low water mark is hit
+ * @port: port to output
+ */
+static void ioc3_cb_output_lowat(struct ioc3_port *port)
+{
+ unsigned long pflags;
+
+ /* the_port->lock is set on the call here */
+ if (port->ip_port) {
+ spin_lock_irqsave(&port->ip_port->lock, pflags);
+ transmit_chars(port->ip_port);
+ spin_unlock_irqrestore(&port->ip_port->lock, pflags);
+ }
+}
+
+/*
+ * ioc3_cb_post_ncs - called for some basic errors
+ * @port: port to use
+ * @ncs: event
+ */
+static void ioc3_cb_post_ncs(struct uart_port *the_port, int ncs)
+{
+ struct uart_icount *icount;
+
+ icount = &the_port->icount;
+
+ if (ncs & NCS_BREAK)
+ icount->brk++;
+ if (ncs & NCS_FRAMING)
+ icount->frame++;
+ if (ncs & NCS_OVERRUN)
+ icount->overrun++;
+ if (ncs & NCS_PARITY)
+ icount->parity++;
+}
+
+/**
+ * do_read - Read in bytes from the port. Return the number of bytes
+ * actually read.
+ * @the_port: port to use
+ * @buf: place to put the stuff we read
+ * @len: how big 'buf' is
+ */
+
+static inline int do_read(struct uart_port *the_port, char *buf, int len)
+{
+ int prod_ptr, cons_ptr, total;
+ struct ioc3_port *port = get_ioc3_port(the_port);
+ struct ring *inring;
+ struct ring_entry *entry;
+ struct port_hooks *hooks = port->ip_hooks;
+ int byte_num;
+ char *sc;
+ int loop_counter;
+
+ BUG_ON(!(len >= 0));
+ BUG_ON(!port);
+
+ /* There is a nasty timing issue in the IOC3. When the rx_timer
+ * expires or the rx_high condition arises, we take an interrupt.
+ * At some point while servicing the interrupt, we read bytes from
+ * the ring buffer and re-arm the rx_timer. However the rx_timer is
+ * not started until the first byte is received *after* it is armed,
+ * and any bytes pending in the rx construction buffers are not drained
+ * to memory until either there are 4 bytes available or the rx_timer
+ * expires. This leads to a potential situation where data is left
+ * in the construction buffers forever - 1 to 3 bytes were received
+ * after the interrupt was generated but before the rx_timer was
+ * re-armed. At that point as long as no subsequent bytes are received
+ * the timer will never be started and the bytes will remain in the
+ * construction buffer forever. The solution is to execute a DRAIN
+ * command after rearming the timer. This way any bytes received before
+ * the DRAIN will be drained to memory, and any bytes received after
+ * the DRAIN will start the TIMER and be drained when it expires.
+ * Luckily, this only needs to be done when the DMA buffer is empty
+ * since there is no requirement that this function return all
+ * available data as long as it returns some.
+ */
+ /* Re-arm the timer */
+
+ writel(port->ip_rx_cons | SRCIR_ARM, &port->ip_serial_regs->srcir);
+
+ prod_ptr = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK;
+ cons_ptr = port->ip_rx_cons;
+
+ if (prod_ptr == cons_ptr) {
+ int reset_dma = 0;
+
+ /* Input buffer appears empty, do a flush. */
+
+ /* DMA must be enabled for this to work. */
+ if (!(port->ip_sscr & SSCR_DMA_EN)) {
+ port->ip_sscr |= SSCR_DMA_EN;
+ reset_dma = 1;
+ }
+
+ /* Potential race condition: we must reload the srpir after
+ * issuing the drain command, otherwise we could think the rx
+ * buffer is empty, then take a very long interrupt, and when
+ * we come back it's full and we wait forever for the drain to
+ * complete.
+ */
+ writel(port->ip_sscr | SSCR_RX_DRAIN,
+ &port->ip_serial_regs->sscr);
+ prod_ptr = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK;
+
+ /* We must not wait for the DRAIN to complete unless there are
+ * at least 8 bytes (2 ring entries) available to receive the
+ * data otherwise the DRAIN will never complete and we'll
+ * deadlock here.
+ * In fact, to make things easier, I'll just ignore the flush if
+ * there is any data at all now available.
+ */
+ if (prod_ptr == cons_ptr) {
+ loop_counter = 0;
+ while (readl(&port->ip_serial_regs->sscr) &
+ SSCR_RX_DRAIN) {
+ loop_counter++;
+ if (loop_counter > MAXITER)
+ return -1;
+ }
+
+ /* SIGH. We have to reload the prod_ptr *again* since
+ * the drain may have caused it to change
+ */
+ prod_ptr = readl(&port->ip_serial_regs->srpir)
+ & PROD_CONS_MASK;
+ }
+ if (reset_dma) {
+ port->ip_sscr &= ~SSCR_DMA_EN;
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+ }
+ }
+ inring = port->ip_inring;
+ port->ip_flags &= ~READ_ABORTED;
+
+ total = 0;
+ loop_counter = 0xfffff; /* to avoid hangs */
+
+ /* Grab bytes from the hardware */
+ while ((prod_ptr != cons_ptr) && (len > 0)) {
+ entry = (struct ring_entry *)((caddr_t) inring + cons_ptr);
+
+ if (loop_counter-- <= 0) {
+ printk(KERN_WARNING "IOC3 serial: "
+ "possible hang condition/"
+ "port stuck on read (line %d).\n",
+ the_port->line);
+ break;
+ }
+
+ /* According to the producer pointer, this ring entry
+ * must contain some data. But if the PIO happened faster
+ * than the DMA, the data may not be available yet, so let's
+ * wait until it arrives.
+ */
+ if ((entry->ring_allsc & RING_ANY_VALID) == 0) {
+ /* Indicate the read is aborted so we don't disable
+ * the interrupt thinking that the consumer is
+ * congested.
+ */
+ port->ip_flags |= READ_ABORTED;
+ len = 0;
+ break;
+ }
+
+ /* Load the bytes/status out of the ring entry */
+ for (byte_num = 0; byte_num < 4 && len > 0; byte_num++) {
+ sc = &(entry->ring_sc[byte_num]);
+
+ /* Check for change in modem state or overrun */
+ if ((*sc & RXSB_MODEM_VALID)
+ && (port->ip_notify & N_DDCD)) {
+ /* Notify upper layer if DCD dropped */
+ if ((port->ip_flags & DCD_ON)
+ && !(*sc & RXSB_DCD)) {
+ /* If we have already copied some data,
+ * return it. We'll pick up the carrier
+ * drop on the next pass. That way we
+ * don't throw away the data that has
+ * already been copied back to
+ * the caller's buffer.
+ */
+ if (total > 0) {
+ len = 0;
+ break;
+ }
+ port->ip_flags &= ~DCD_ON;
+
+ /* Turn off this notification so the
+ * carrier drop protocol won't see it
+ * again when it does a read.
+ */
+ *sc &= ~RXSB_MODEM_VALID;
+
+ /* To keep things consistent, we need
+ * to update the consumer pointer so
+ * the next reader won't come in and
+ * try to read the same ring entries
+ * again. This must be done here before
+ * the dcd change.
+ */
+
+ if ((entry->ring_allsc & RING_ANY_VALID)
+ == 0) {
+ cons_ptr += (int)sizeof
+ (struct ring_entry);
+ cons_ptr &= PROD_CONS_MASK;
+ }
+ writel(cons_ptr,
+ &port->ip_serial_regs->srcir);
+ port->ip_rx_cons = cons_ptr;
+
+ /* Notify upper layer of carrier drop */
+ if ((port->ip_notify & N_DDCD)
+ && port->ip_port) {
+ uart_handle_dcd_change
+ (port->ip_port, 0);
+ wake_up_interruptible
+ (&the_port->state->
+ port.delta_msr_wait);
+ }
+
+ /* If we had any data to return, we
+ * would have returned it above.
+ */
+ return 0;
+ }
+ }
+ if (*sc & RXSB_MODEM_VALID) {
+ /* Notify that an input overrun occurred */
+ if ((*sc & RXSB_OVERRUN)
+ && (port->ip_notify & N_OVERRUN_ERROR)) {
+ ioc3_cb_post_ncs(the_port, NCS_OVERRUN);
+ }
+ /* Don't look at this byte again */
+ *sc &= ~RXSB_MODEM_VALID;
+ }
+
+ /* Check for valid data or RX errors */
+ if ((*sc & RXSB_DATA_VALID) &&
+ ((*sc & (RXSB_PAR_ERR
+ | RXSB_FRAME_ERR | RXSB_BREAK))
+ && (port->ip_notify & (N_PARITY_ERROR
+ | N_FRAMING_ERROR
+ | N_BREAK)))) {
+ /* There is an error condition on the next byte.
+ * If we have already transferred some bytes,
+ * we'll stop here. Otherwise if this is the
+ * first byte to be read, we'll just transfer
+ * it alone after notifying the
+ * upper layer of its status.
+ */
+ if (total > 0) {
+ len = 0;
+ break;
+ } else {
+ if ((*sc & RXSB_PAR_ERR) &&
+ (port->
+ ip_notify & N_PARITY_ERROR)) {
+ ioc3_cb_post_ncs(the_port,
+ NCS_PARITY);
+ }
+ if ((*sc & RXSB_FRAME_ERR) &&
+ (port->
+ ip_notify & N_FRAMING_ERROR)) {
+ ioc3_cb_post_ncs(the_port,
+ NCS_FRAMING);
+ }
+ if ((*sc & RXSB_BREAK)
+ && (port->ip_notify & N_BREAK)) {
+ ioc3_cb_post_ncs
+ (the_port, NCS_BREAK);
+ }
+ len = 1;
+ }
+ }
+ if (*sc & RXSB_DATA_VALID) {
+ *sc &= ~RXSB_DATA_VALID;
+ *buf = entry->ring_data[byte_num];
+ buf++;
+ len--;
+ total++;
+ }
+ }
+
+ /* If we used up this entry entirely, go on to the next one,
+ * otherwise we must have run out of buffer space, so
+ * leave the consumer pointer here for the next read in case
+ * there are still unread bytes in this entry.
+ */
+ if ((entry->ring_allsc & RING_ANY_VALID) == 0) {
+ cons_ptr += (int)sizeof(struct ring_entry);
+ cons_ptr &= PROD_CONS_MASK;
+ }
+ }
+
+ /* Update consumer pointer and re-arm rx timer interrupt */
+ writel(cons_ptr, &port->ip_serial_regs->srcir);
+ port->ip_rx_cons = cons_ptr;
+
+ /* If we have now dipped below the rx high water mark and we have
+ * rx_high interrupt turned off, we can now turn it back on again.
+ */
+ if ((port->ip_flags & INPUT_HIGH) && (((prod_ptr - cons_ptr)
+ & PROD_CONS_MASK) <
+ ((port->
+ ip_sscr &
+ SSCR_RX_THRESHOLD)
+ << PROD_CONS_PTR_OFF))) {
+ port->ip_flags &= ~INPUT_HIGH;
+ enable_intrs(port, hooks->intr_rx_high);
+ }
+ return total;
+}
+
+/**
+ * receive_chars - upper level read.
+ * @the_port: port to read from
+ */
+static int receive_chars(struct uart_port *the_port)
+{
+ struct tty_struct *tty;
+ unsigned char ch[MAX_CHARS];
+ int read_count = 0, read_room, flip = 0;
+ struct uart_state *state = the_port->state;
+ struct ioc3_port *port = get_ioc3_port(the_port);
+ unsigned long pflags;
+
+ /* Make sure all the pointers are "good" ones */
+ if (!state)
+ return 0;
+ if (!state->port.tty)
+ return 0;
+
+ if (!(port->ip_flags & INPUT_ENABLE))
+ return 0;
+
+ spin_lock_irqsave(&the_port->lock, pflags);
+ tty = state->port.tty;
+
+ read_count = do_read(the_port, ch, MAX_CHARS);
+ if (read_count > 0) {
+ flip = 1;
+ read_room = tty_insert_flip_string(tty, ch, read_count);
+ the_port->icount.rx += read_count;
+ }
+ spin_unlock_irqrestore(&the_port->lock, pflags);
+
+ if (flip)
+ tty_flip_buffer_push(tty);
+
+ return read_count;
+}
+
+/**
+ * ioc3uart_intr_one - lowest level (per port) interrupt handler.
+ * @is : submodule
+ * @idd: driver data
+ * @pending: interrupts to handle
+ */
+
+static int inline
+ioc3uart_intr_one(struct ioc3_submodule *is,
+ struct ioc3_driver_data *idd,
+ unsigned int pending)
+{
+ int port_num = GET_PORT_FROM_SIO_IR(pending);
+ struct port_hooks *hooks;
+ unsigned int rx_high_rd_aborted = 0;
+ unsigned long flags;
+ struct uart_port *the_port;
+ struct ioc3_port *port;
+ int loop_counter;
+ struct ioc3_card *card_ptr;
+ unsigned int sio_ir;
+
+ card_ptr = idd->data[is->id];
+ port = card_ptr->ic_port[port_num].icp_port;
+ hooks = port->ip_hooks;
+
+ /* Possible race condition here: The tx_mt interrupt bit may be
+ * cleared without the intervention of the interrupt handler,
+ * e.g. by a write. If the top level interrupt handler reads a
+ * tx_mt, then some other processor does a write, starting up
+ * output, then we come in here, see the tx_mt and stop DMA, the
+ * output started by the other processor will hang. Thus we can
+ * only rely on tx_mt being legitimate if it is read while the
+ * port lock is held. Therefore this bit must be ignored in the
+ * passed in interrupt mask which was read by the top level
+ * interrupt handler since the port lock was not held at the time
+ * it was read. We can only rely on this bit being accurate if it
+ * is read while the port lock is held. So we'll clear it for now,
+ * and reload it later once we have the port lock.
+ */
+
+ sio_ir = pending & ~(hooks->intr_tx_mt);
+ spin_lock_irqsave(&port->ip_lock, flags);
+
+ loop_counter = MAXITER; /* to avoid hangs */
+
+ do {
+ uint32_t shadow;
+
+ if (loop_counter-- <= 0) {
+ printk(KERN_WARNING "IOC3 serial: "
+ "possible hang condition/"
+ "port stuck on interrupt (line %d).\n",
+ ((struct uart_port *)port->ip_port)->line);
+ break;
+ }
+ /* Handle a DCD change */
+ if (sio_ir & hooks->intr_delta_dcd) {
+ ioc3_ack(is, idd, hooks->intr_delta_dcd);
+ shadow = readl(&port->ip_serial_regs->shadow);
+
+ if ((port->ip_notify & N_DDCD)
+ && (shadow & SHADOW_DCD)
+ && (port->ip_port)) {
+ the_port = port->ip_port;
+ uart_handle_dcd_change(the_port,
+ shadow & SHADOW_DCD);
+ wake_up_interruptible
+ (&the_port->state->port.delta_msr_wait);
+ } else if ((port->ip_notify & N_DDCD)
+ && !(shadow & SHADOW_DCD)) {
+ /* Flag delta DCD/no DCD */
+ uart_handle_dcd_change(port->ip_port,
+ shadow & SHADOW_DCD);
+ port->ip_flags |= DCD_ON;
+ }
+ }
+
+ /* Handle a CTS change */
+ if (sio_ir & hooks->intr_delta_cts) {
+ ioc3_ack(is, idd, hooks->intr_delta_cts);
+ shadow = readl(&port->ip_serial_regs->shadow);
+
+ if ((port->ip_notify & N_DCTS) && (port->ip_port)) {
+ the_port = port->ip_port;
+ uart_handle_cts_change(the_port, shadow
+ & SHADOW_CTS);
+ wake_up_interruptible
+ (&the_port->state->port.delta_msr_wait);
+ }
+ }
+
+ /* rx timeout interrupt. Must be some data available. Put this
+ * before the check for rx_high since servicing this condition
+ * may cause that condition to clear.
+ */
+ if (sio_ir & hooks->intr_rx_timer) {
+ ioc3_ack(is, idd, hooks->intr_rx_timer);
+ if ((port->ip_notify & N_DATA_READY)
+ && (port->ip_port)) {
+ receive_chars(port->ip_port);
+ }
+ }
+
+ /* rx high interrupt. Must be after rx_timer. */
+ else if (sio_ir & hooks->intr_rx_high) {
+ /* Data available, notify upper layer */
+ if ((port->ip_notify & N_DATA_READY) && port->ip_port) {
+ receive_chars(port->ip_port);
+ }
+
+ /* We can't ACK this interrupt. If receive_chars didn't
+ * cause the condition to clear, we'll have to disable
+ * the interrupt until the data is drained.
+ * If the read was aborted, don't disable the interrupt
+ * as this may cause us to hang indefinitely. An
+ * aborted read generally means that this interrupt
+ * hasn't been delivered to the cpu yet anyway, even
+ * though we see it as asserted when we read the sio_ir.
+ */
+ if ((sio_ir = PENDING(card_ptr, idd))
+ & hooks->intr_rx_high) {
+ if (port->ip_flags & READ_ABORTED) {
+ rx_high_rd_aborted++;
+ }
+ else {
+ card_ptr->ic_enable &= ~hooks->intr_rx_high;
+ port->ip_flags |= INPUT_HIGH;
+ }
+ }
+ }
+
+ /* We got a low water interrupt: notify upper layer to
+ * send more data. Must come before tx_mt since servicing
+ * this condition may cause that condition to clear.
+ */
+ if (sio_ir & hooks->intr_tx_explicit) {
+ port->ip_flags &= ~LOWAT_WRITTEN;
+ ioc3_ack(is, idd, hooks->intr_tx_explicit);
+ if (port->ip_notify & N_OUTPUT_LOWAT)
+ ioc3_cb_output_lowat(port);
+ }
+
+ /* Handle tx_mt. Must come after tx_explicit. */
+ else if (sio_ir & hooks->intr_tx_mt) {
+ /* If we are expecting a lowat notification
+ * and we get to this point it probably means that for
+ * some reason the tx_explicit didn't work as expected
+ * (that can legitimately happen if the output buffer is
+ * filled up in just the right way).
+ * So send the notification now.
+ */
+ if (port->ip_notify & N_OUTPUT_LOWAT) {
+ ioc3_cb_output_lowat(port);
+
+ /* We need to reload the sio_ir since the lowat
+ * call may have caused another write to occur,
+ * clearing the tx_mt condition.
+ */
+ sio_ir = PENDING(card_ptr, idd);
+ }
+
+ /* If the tx_mt condition still persists even after the
+ * lowat call, we've got some work to do.
+ */
+ if (sio_ir & hooks->intr_tx_mt) {
+ /* If we are not currently expecting DMA input,
+ * and the transmitter has just gone idle,
+ * there is no longer any reason for DMA, so
+ * disable it.
+ */
+ if (!(port->ip_notify
+ & (N_DATA_READY | N_DDCD))) {
+ BUG_ON(!(port->ip_sscr
+ & SSCR_DMA_EN));
+ port->ip_sscr &= ~SSCR_DMA_EN;
+ writel(port->ip_sscr,
+ &port->ip_serial_regs->sscr);
+ }
+ /* Prevent infinite tx_mt interrupt */
+ card_ptr->ic_enable &= ~hooks->intr_tx_mt;
+ }
+ }
+ sio_ir = PENDING(card_ptr, idd);
+
+ /* if the read was aborted and only hooks->intr_rx_high,
+ * clear hooks->intr_rx_high, so we do not loop forever.
+ */
+
+ if (rx_high_rd_aborted && (sio_ir == hooks->intr_rx_high)) {
+ sio_ir &= ~hooks->intr_rx_high;
+ }
+ } while (sio_ir & hooks->intr_all);
+
+ spin_unlock_irqrestore(&port->ip_lock, flags);
+ ioc3_enable(is, idd, card_ptr->ic_enable);
+ return 0;
+}
+
+/**
+ * ioc3uart_intr - field all serial interrupts
+ * @is : submodule
+ * @idd: driver data
+ * @pending: interrupts to handle
+ *
+ */
+
+static int ioc3uart_intr(struct ioc3_submodule *is,
+ struct ioc3_driver_data *idd,
+ unsigned int pending)
+{
+ int ret = 0;
+
+ /*
+ * The upper level interrupt handler sends interrupts for both ports
+ * here. So we need to call for each port with its interrupts.
+ */
+
+ if (pending & SIO_IR_SA)
+ ret |= ioc3uart_intr_one(is, idd, pending & SIO_IR_SA);
+ if (pending & SIO_IR_SB)
+ ret |= ioc3uart_intr_one(is, idd, pending & SIO_IR_SB);
+
+ return ret;
+}
+
+/**
+ * ic3_type
+ * @port: Port to operate with (we ignore since we only have one port)
+ *
+ */
+static const char *ic3_type(struct uart_port *the_port)
+{
+ if (IS_RS232(the_port->line))
+ return "SGI IOC3 Serial [rs232]";
+ else
+ return "SGI IOC3 Serial [rs422]";
+}
+
+/**
+ * ic3_tx_empty - Is the transmitter empty?
+ * @port: Port to operate on
+ *
+ */
+static unsigned int ic3_tx_empty(struct uart_port *the_port)
+{
+ unsigned int ret = 0;
+ struct ioc3_port *port = get_ioc3_port(the_port);
+
+ if (readl(&port->ip_serial_regs->shadow) & SHADOW_TEMT)
+ ret = TIOCSER_TEMT;
+ return ret;
+}
+
+/**
+ * ic3_stop_tx - stop the transmitter
+ * @port: Port to operate on
+ *
+ */
+static void ic3_stop_tx(struct uart_port *the_port)
+{
+ struct ioc3_port *port = get_ioc3_port(the_port);
+
+ if (port)
+ set_notification(port, N_OUTPUT_LOWAT, 0);
+}
+
+/**
+ * ic3_stop_rx - stop the receiver
+ * @port: Port to operate on
+ *
+ */
+static void ic3_stop_rx(struct uart_port *the_port)
+{
+ struct ioc3_port *port = get_ioc3_port(the_port);
+
+ if (port)
+ port->ip_flags &= ~INPUT_ENABLE;
+}
+
+/**
+ * null_void_function
+ * @port: Port to operate on
+ *
+ */
+static void null_void_function(struct uart_port *the_port)
+{
+}
+
+/**
+ * ic3_shutdown - shut down the port - free irq and disable
+ * @port: port to shut down
+ *
+ */
+static void ic3_shutdown(struct uart_port *the_port)
+{
+ unsigned long port_flags;
+ struct ioc3_port *port;
+ struct uart_state *state;
+
+ port = get_ioc3_port(the_port);
+ if (!port)
+ return;
+
+ state = the_port->state;
+ wake_up_interruptible(&state->port.delta_msr_wait);
+
+ spin_lock_irqsave(&the_port->lock, port_flags);
+ set_notification(port, N_ALL, 0);
+ spin_unlock_irqrestore(&the_port->lock, port_flags);
+}
+
+/**
+ * ic3_set_mctrl - set control lines (dtr, rts, etc)
+ * @port: Port to operate on
+ * @mctrl: Lines to set/unset
+ *
+ */
+static void ic3_set_mctrl(struct uart_port *the_port, unsigned int mctrl)
+{
+ unsigned char mcr = 0;
+
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ set_mcr(the_port, mcr, SHADOW_DTR);
+}
+
+/**
+ * ic3_get_mctrl - get control line info
+ * @port: port to operate on
+ *
+ */
+static unsigned int ic3_get_mctrl(struct uart_port *the_port)
+{
+ struct ioc3_port *port = get_ioc3_port(the_port);
+ uint32_t shadow;
+ unsigned int ret = 0;
+
+ if (!port)
+ return 0;
+
+ shadow = readl(&port->ip_serial_regs->shadow);
+ if (shadow & SHADOW_DCD)
+ ret |= TIOCM_CD;
+ if (shadow & SHADOW_DR)
+ ret |= TIOCM_DSR;
+ if (shadow & SHADOW_CTS)
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+/**
+ * ic3_start_tx - Start transmitter. Called with the_port->lock
+ * @port: Port to operate on
+ *
+ */
+static void ic3_start_tx(struct uart_port *the_port)
+{
+ struct ioc3_port *port = get_ioc3_port(the_port);
+
+ if (port) {
+ set_notification(port, N_OUTPUT_LOWAT, 1);
+ enable_intrs(port, port->ip_hooks->intr_tx_mt);
+ }
+}
+
+/**
+ * ic3_break_ctl - handle breaks
+ * @port: Port to operate on
+ * @break_state: Break state
+ *
+ */
+static void ic3_break_ctl(struct uart_port *the_port, int break_state)
+{
+}
+
+/**
+ * ic3_startup - Start up the serial port - always return 0 (We're always on)
+ * @port: Port to operate on
+ *
+ */
+static int ic3_startup(struct uart_port *the_port)
+{
+ int retval;
+ struct ioc3_port *port;
+ struct ioc3_card *card_ptr;
+ unsigned long port_flags;
+
+ if (!the_port) {
+ NOT_PROGRESS();
+ return -ENODEV;
+ }
+ port = get_ioc3_port(the_port);
+ if (!port) {
+ NOT_PROGRESS();
+ return -ENODEV;
+ }
+ card_ptr = port->ip_card;
+ port->ip_port = the_port;
+
+ if (!card_ptr) {
+ NOT_PROGRESS();
+ return -ENODEV;
+ }
+
+ /* Start up the serial port */
+ spin_lock_irqsave(&the_port->lock, port_flags);
+ retval = ic3_startup_local(the_port);
+ spin_unlock_irqrestore(&the_port->lock, port_flags);
+ return retval;
+}
+
+/**
+ * ic3_set_termios - set termios stuff
+ * @port: port to operate on
+ * @termios: New settings
+ * @termios: Old
+ *
+ */
+static void
+ic3_set_termios(struct uart_port *the_port,
+ struct ktermios *termios, struct ktermios *old_termios)
+{
+ unsigned long port_flags;
+
+ spin_lock_irqsave(&the_port->lock, port_flags);
+ ioc3_change_speed(the_port, termios, old_termios);
+ spin_unlock_irqrestore(&the_port->lock, port_flags);
+}
+
+/**
+ * ic3_request_port - allocate resources for port - no op....
+ * @port: port to operate on
+ *
+ */
+static int ic3_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+/* Associate the uart functions above - given to serial core */
+static struct uart_ops ioc3_ops = {
+ .tx_empty = ic3_tx_empty,
+ .set_mctrl = ic3_set_mctrl,
+ .get_mctrl = ic3_get_mctrl,
+ .stop_tx = ic3_stop_tx,
+ .start_tx = ic3_start_tx,
+ .stop_rx = ic3_stop_rx,
+ .enable_ms = null_void_function,
+ .break_ctl = ic3_break_ctl,
+ .startup = ic3_startup,
+ .shutdown = ic3_shutdown,
+ .set_termios = ic3_set_termios,
+ .type = ic3_type,
+ .release_port = null_void_function,
+ .request_port = ic3_request_port,
+};
+
+/*
+ * Boot-time initialization code
+ */
+
+static struct uart_driver ioc3_uart = {
+ .owner = THIS_MODULE,
+ .driver_name = "ioc3_serial",
+ .dev_name = DEVICE_NAME,
+ .major = DEVICE_MAJOR,
+ .minor = DEVICE_MINOR,
+ .nr = MAX_LOGICAL_PORTS
+};
+
+/**
+ * ioc3_serial_core_attach - register with serial core
+ * This is done during pci probing
+ * @is: submodule struct for this
+ * @idd: handle for this card
+ */
+static inline int ioc3_serial_core_attach( struct ioc3_submodule *is,
+ struct ioc3_driver_data *idd)
+{
+ struct ioc3_port *port;
+ struct uart_port *the_port;
+ struct ioc3_card *card_ptr = idd->data[is->id];
+ int ii, phys_port;
+ struct pci_dev *pdev = idd->pdev;
+
+ DPRINT_CONFIG(("%s: attach pdev 0x%p - card_ptr 0x%p\n",
+ __func__, pdev, (void *)card_ptr));
+
+ if (!card_ptr)
+ return -ENODEV;
+
+ /* once around for each logical port on this card */
+ for (ii = 0; ii < LOGICAL_PORTS_PER_CARD; ii++) {
+ phys_port = GET_PHYSICAL_PORT(ii);
+ the_port = &card_ptr->ic_port[phys_port].
+ icp_uart_port[GET_LOGICAL_PORT(ii)];
+ port = card_ptr->ic_port[phys_port].icp_port;
+ port->ip_port = the_port;
+
+ DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p [%d/%d]\n",
+ __func__, (void *)the_port, (void *)port,
+ phys_port, ii));
+
+ /* membase, iobase and mapbase just need to be non-0 */
+ the_port->membase = (unsigned char __iomem *)1;
+ the_port->iobase = (pdev->bus->number << 16) | ii;
+ the_port->line = (Num_of_ioc3_cards << 2) | ii;
+ the_port->mapbase = 1;
+ the_port->type = PORT_16550A;
+ the_port->fifosize = FIFO_SIZE;
+ the_port->ops = &ioc3_ops;
+ the_port->irq = idd->irq_io;
+ the_port->dev = &pdev->dev;
+
+ if (uart_add_one_port(&ioc3_uart, the_port) < 0) {
+ printk(KERN_WARNING
+ "%s: unable to add port %d bus %d\n",
+ __func__, the_port->line, pdev->bus->number);
+ } else {
+ DPRINT_CONFIG(("IOC3 serial port %d irq %d bus %d\n",
+ the_port->line, the_port->irq, pdev->bus->number));
+ }
+
+ /* all ports are rs232 for now */
+ if (IS_PHYSICAL_PORT(ii))
+ ioc3_set_proto(port, PROTO_RS232);
+ }
+ return 0;
+}
+
+/**
+ * ioc3uart_remove - register detach function
+ * @is: submodule struct for this submodule
+ * @idd: ioc3 driver data for this submodule
+ */
+
+static int ioc3uart_remove(struct ioc3_submodule *is,
+ struct ioc3_driver_data *idd)
+{
+ struct ioc3_card *card_ptr = idd->data[is->id];
+ struct uart_port *the_port;
+ struct ioc3_port *port;
+ int ii;
+
+ if (card_ptr) {
+ for (ii = 0; ii < LOGICAL_PORTS_PER_CARD; ii++) {
+ the_port = &card_ptr->ic_port[GET_PHYSICAL_PORT(ii)].
+ icp_uart_port[GET_LOGICAL_PORT(ii)];
+ if (the_port)
+ uart_remove_one_port(&ioc3_uart, the_port);
+ port = card_ptr->ic_port[GET_PHYSICAL_PORT(ii)].icp_port;
+ if (port && IS_PHYSICAL_PORT(ii)
+ && (GET_PHYSICAL_PORT(ii) == 0)) {
+ pci_free_consistent(port->ip_idd->pdev,
+ TOTAL_RING_BUF_SIZE,
+ (void *)port->ip_cpu_ringbuf,
+ port->ip_dma_ringbuf);
+ kfree(port);
+ card_ptr->ic_port[GET_PHYSICAL_PORT(ii)].
+ icp_port = NULL;
+ }
+ }
+ kfree(card_ptr);
+ idd->data[is->id] = NULL;
+ }
+ return 0;
+}
+
+/**
+ * ioc3uart_probe - card probe function called from shim driver
+ * @is: submodule struct for this submodule
+ * @idd: ioc3 driver data for this card
+ */
+
+static int __devinit
+ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
+{
+ struct pci_dev *pdev = idd->pdev;
+ struct ioc3_card *card_ptr;
+ int ret = 0;
+ struct ioc3_port *port;
+ struct ioc3_port *ports[PORTS_PER_CARD];
+ int phys_port;
+ int cnt;
+
+ DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd));
+
+ card_ptr = kzalloc(sizeof(struct ioc3_card), GFP_KERNEL);
+ if (!card_ptr) {
+ printk(KERN_WARNING "ioc3_attach_one"
+ ": unable to get memory for the IOC3\n");
+ return -ENOMEM;
+ }
+ idd->data[is->id] = card_ptr;
+ Submodule_slot = is->id;
+
+ writel(((UARTA_BASE >> 3) << SIO_CR_SER_A_BASE_SHIFT) |
+ ((UARTB_BASE >> 3) << SIO_CR_SER_B_BASE_SHIFT) |
+ (0xf << SIO_CR_CMD_PULSE_SHIFT), &idd->vma->sio_cr);
+
+ pci_write_config_dword(pdev, PCI_LAT, 0xff00);
+
+ /* Enable serial port mode select generic PIO pins as outputs */
+ ioc3_gpcr_set(idd, GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL);
+
+ /* Create port structures for each port */
+ for (phys_port = 0; phys_port < PORTS_PER_CARD; phys_port++) {
+ port = kzalloc(sizeof(struct ioc3_port), GFP_KERNEL);
+ if (!port) {
+ printk(KERN_WARNING
+ "IOC3 serial memory not available for port\n");
+ ret = -ENOMEM;
+ goto out4;
+ }
+ spin_lock_init(&port->ip_lock);
+
+ /* we need to remember the previous ones, to point back to
+ * them farther down - setting up the ring buffers.
+ */
+ ports[phys_port] = port;
+
+ /* init to something useful */
+ card_ptr->ic_port[phys_port].icp_port = port;
+ port->ip_is = is;
+ port->ip_idd = idd;
+ port->ip_baud = 9600;
+ port->ip_card = card_ptr;
+ port->ip_hooks = &hooks_array[phys_port];
+
+ /* Setup each port */
+ if (phys_port == 0) {
+ port->ip_serial_regs = &idd->vma->port_a;
+ port->ip_uart_regs = &idd->vma->sregs.uarta;
+
+ DPRINT_CONFIG(("%s : Port A ip_serial_regs 0x%p "
+ "ip_uart_regs 0x%p\n",
+ __func__,
+ (void *)port->ip_serial_regs,
+ (void *)port->ip_uart_regs));
+
+ /* setup ring buffers */
+ port->ip_cpu_ringbuf = pci_alloc_consistent(pdev,
+ TOTAL_RING_BUF_SIZE, &port->ip_dma_ringbuf);
+
+ BUG_ON(!((((int64_t) port->ip_dma_ringbuf) &
+ (TOTAL_RING_BUF_SIZE - 1)) == 0));
+ port->ip_inring = RING(port, RX_A);
+ port->ip_outring = RING(port, TX_A);
+ DPRINT_CONFIG(("%s : Port A ip_cpu_ringbuf 0x%p "
+ "ip_dma_ringbuf 0x%p, ip_inring 0x%p "
+ "ip_outring 0x%p\n",
+ __func__,
+ (void *)port->ip_cpu_ringbuf,
+ (void *)port->ip_dma_ringbuf,
+ (void *)port->ip_inring,
+ (void *)port->ip_outring));
+ }
+ else {
+ port->ip_serial_regs = &idd->vma->port_b;
+ port->ip_uart_regs = &idd->vma->sregs.uartb;
+
+ DPRINT_CONFIG(("%s : Port B ip_serial_regs 0x%p "
+ "ip_uart_regs 0x%p\n",
+ __func__,
+ (void *)port->ip_serial_regs,
+ (void *)port->ip_uart_regs));
+
+ /* share the ring buffers */
+ port->ip_dma_ringbuf =
+ ports[phys_port - 1]->ip_dma_ringbuf;
+ port->ip_cpu_ringbuf =
+ ports[phys_port - 1]->ip_cpu_ringbuf;
+ port->ip_inring = RING(port, RX_B);
+ port->ip_outring = RING(port, TX_B);
+ DPRINT_CONFIG(("%s : Port B ip_cpu_ringbuf 0x%p "
+ "ip_dma_ringbuf 0x%p, ip_inring 0x%p "
+ "ip_outring 0x%p\n",
+ __func__,
+ (void *)port->ip_cpu_ringbuf,
+ (void *)port->ip_dma_ringbuf,
+ (void *)port->ip_inring,
+ (void *)port->ip_outring));
+ }
+
+ DPRINT_CONFIG(("%s : port %d [addr 0x%p] card_ptr 0x%p",
+ __func__,
+ phys_port, (void *)port, (void *)card_ptr));
+ DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n",
+ (void *)port->ip_serial_regs,
+ (void *)port->ip_uart_regs));
+
+ /* Initialize the hardware for IOC3 */
+ port_init(port);
+
+ DPRINT_CONFIG(("%s: phys_port %d port 0x%p inring 0x%p "
+ "outring 0x%p\n",
+ __func__,
+ phys_port, (void *)port,
+ (void *)port->ip_inring,
+ (void *)port->ip_outring));
+
+ }
+
+ /* register port with the serial core */
+
+ if ((ret = ioc3_serial_core_attach(is, idd)))
+ goto out4;
+
+ Num_of_ioc3_cards++;
+
+ return ret;
+
+ /* error exits that give back resources */
+out4:
+ for (cnt = 0; cnt < phys_port; cnt++)
+ kfree(ports[cnt]);
+
+ kfree(card_ptr);
+ return ret;
+}
+
+static struct ioc3_submodule ioc3uart_ops = {
+ .name = "IOC3uart",
+ .probe = ioc3uart_probe,
+ .remove = ioc3uart_remove,
+ /* call .intr for both ports initially */
+ .irq_mask = SIO_IR_SA | SIO_IR_SB,
+ .intr = ioc3uart_intr,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * ioc3_detect - module init called,
+ */
+static int __init ioc3uart_init(void)
+{
+ int ret;
+
+ /* register with serial core */
+ if ((ret = uart_register_driver(&ioc3_uart)) < 0) {
+ printk(KERN_WARNING
+ "%s: Couldn't register IOC3 uart serial driver\n",
+ __func__);
+ return ret;
+ }
+ ret = ioc3_register_submodule(&ioc3uart_ops);
+ if (ret)
+ uart_unregister_driver(&ioc3_uart);
+ return ret;
+}
+
+static void __exit ioc3uart_exit(void)
+{
+ ioc3_unregister_submodule(&ioc3uart_ops);
+ uart_unregister_driver(&ioc3_uart);
+}
+
+module_init(ioc3uart_init);
+module_exit(ioc3uart_exit);
+
+MODULE_AUTHOR("Pat Gefre - Silicon Graphics Inc. (SGI) <pfg@sgi.com>");
+MODULE_DESCRIPTION("Serial PCI driver module for SGI IOC3 card");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
new file mode 100644
index 00000000..fcfe8265
--- /dev/null
+++ b/drivers/tty/serial/ioc4_serial.c
@@ -0,0 +1,2953 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+
+/*
+ * This file contains a module version of the ioc4 serial driver. This
+ * includes all the support functions needed (support functions, etc.)
+ * and the serial driver itself.
+ */
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/serialP.h>
+#include <linux/circ_buf.h>
+#include <linux/serial_reg.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ioc4.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+
+/*
+ * interesting things about the ioc4
+ */
+
+#define IOC4_NUM_SERIAL_PORTS 4 /* max ports per card */
+#define IOC4_NUM_CARDS 8 /* max cards per partition */
+
+#define GET_SIO_IR(_n) (_n == 0) ? (IOC4_SIO_IR_S0) : \
+ (_n == 1) ? (IOC4_SIO_IR_S1) : \
+ (_n == 2) ? (IOC4_SIO_IR_S2) : \
+ (IOC4_SIO_IR_S3)
+
+#define GET_OTHER_IR(_n) (_n == 0) ? (IOC4_OTHER_IR_S0_MEMERR) : \
+ (_n == 1) ? (IOC4_OTHER_IR_S1_MEMERR) : \
+ (_n == 2) ? (IOC4_OTHER_IR_S2_MEMERR) : \
+ (IOC4_OTHER_IR_S3_MEMERR)
+
+
+/*
+ * All IOC4 registers are 32 bits wide.
+ */
+
+/*
+ * PCI Memory Space Map
+ */
+#define IOC4_PCI_ERR_ADDR_L 0x000 /* Low Error Address */
+#define IOC4_PCI_ERR_ADDR_VLD (0x1 << 0)
+#define IOC4_PCI_ERR_ADDR_MST_ID_MSK (0xf << 1)
+#define IOC4_PCI_ERR_ADDR_MST_NUM_MSK (0xe << 1)
+#define IOC4_PCI_ERR_ADDR_MST_TYP_MSK (0x1 << 1)
+#define IOC4_PCI_ERR_ADDR_MUL_ERR (0x1 << 5)
+#define IOC4_PCI_ERR_ADDR_ADDR_MSK (0x3ffffff << 6)
+
+/* Interrupt types */
+#define IOC4_SIO_INTR_TYPE 0
+#define IOC4_OTHER_INTR_TYPE 1
+#define IOC4_NUM_INTR_TYPES 2
+
+/* Bitmasks for IOC4_SIO_IR, IOC4_SIO_IEC, and IOC4_SIO_IES */
+#define IOC4_SIO_IR_S0_TX_MT 0x00000001 /* Serial port 0 TX empty */
+#define IOC4_SIO_IR_S0_RX_FULL 0x00000002 /* Port 0 RX buf full */
+#define IOC4_SIO_IR_S0_RX_HIGH 0x00000004 /* Port 0 RX hiwat */
+#define IOC4_SIO_IR_S0_RX_TIMER 0x00000008 /* Port 0 RX timeout */
+#define IOC4_SIO_IR_S0_DELTA_DCD 0x00000010 /* Port 0 delta DCD */
+#define IOC4_SIO_IR_S0_DELTA_CTS 0x00000020 /* Port 0 delta CTS */
+#define IOC4_SIO_IR_S0_INT 0x00000040 /* Port 0 pass-thru intr */
+#define IOC4_SIO_IR_S0_TX_EXPLICIT 0x00000080 /* Port 0 explicit TX thru */
+#define IOC4_SIO_IR_S1_TX_MT 0x00000100 /* Serial port 1 */
+#define IOC4_SIO_IR_S1_RX_FULL 0x00000200 /* */
+#define IOC4_SIO_IR_S1_RX_HIGH 0x00000400 /* */
+#define IOC4_SIO_IR_S1_RX_TIMER 0x00000800 /* */
+#define IOC4_SIO_IR_S1_DELTA_DCD 0x00001000 /* */
+#define IOC4_SIO_IR_S1_DELTA_CTS 0x00002000 /* */
+#define IOC4_SIO_IR_S1_INT 0x00004000 /* */
+#define IOC4_SIO_IR_S1_TX_EXPLICIT 0x00008000 /* */
+#define IOC4_SIO_IR_S2_TX_MT 0x00010000 /* Serial port 2 */
+#define IOC4_SIO_IR_S2_RX_FULL 0x00020000 /* */
+#define IOC4_SIO_IR_S2_RX_HIGH 0x00040000 /* */
+#define IOC4_SIO_IR_S2_RX_TIMER 0x00080000 /* */
+#define IOC4_SIO_IR_S2_DELTA_DCD 0x00100000 /* */
+#define IOC4_SIO_IR_S2_DELTA_CTS 0x00200000 /* */
+#define IOC4_SIO_IR_S2_INT 0x00400000 /* */
+#define IOC4_SIO_IR_S2_TX_EXPLICIT 0x00800000 /* */
+#define IOC4_SIO_IR_S3_TX_MT 0x01000000 /* Serial port 3 */
+#define IOC4_SIO_IR_S3_RX_FULL 0x02000000 /* */
+#define IOC4_SIO_IR_S3_RX_HIGH 0x04000000 /* */
+#define IOC4_SIO_IR_S3_RX_TIMER 0x08000000 /* */
+#define IOC4_SIO_IR_S3_DELTA_DCD 0x10000000 /* */
+#define IOC4_SIO_IR_S3_DELTA_CTS 0x20000000 /* */
+#define IOC4_SIO_IR_S3_INT 0x40000000 /* */
+#define IOC4_SIO_IR_S3_TX_EXPLICIT 0x80000000 /* */
+
+/* Per device interrupt masks */
+#define IOC4_SIO_IR_S0 (IOC4_SIO_IR_S0_TX_MT | \
+ IOC4_SIO_IR_S0_RX_FULL | \
+ IOC4_SIO_IR_S0_RX_HIGH | \
+ IOC4_SIO_IR_S0_RX_TIMER | \
+ IOC4_SIO_IR_S0_DELTA_DCD | \
+ IOC4_SIO_IR_S0_DELTA_CTS | \
+ IOC4_SIO_IR_S0_INT | \
+ IOC4_SIO_IR_S0_TX_EXPLICIT)
+#define IOC4_SIO_IR_S1 (IOC4_SIO_IR_S1_TX_MT | \
+ IOC4_SIO_IR_S1_RX_FULL | \
+ IOC4_SIO_IR_S1_RX_HIGH | \
+ IOC4_SIO_IR_S1_RX_TIMER | \
+ IOC4_SIO_IR_S1_DELTA_DCD | \
+ IOC4_SIO_IR_S1_DELTA_CTS | \
+ IOC4_SIO_IR_S1_INT | \
+ IOC4_SIO_IR_S1_TX_EXPLICIT)
+#define IOC4_SIO_IR_S2 (IOC4_SIO_IR_S2_TX_MT | \
+ IOC4_SIO_IR_S2_RX_FULL | \
+ IOC4_SIO_IR_S2_RX_HIGH | \
+ IOC4_SIO_IR_S2_RX_TIMER | \
+ IOC4_SIO_IR_S2_DELTA_DCD | \
+ IOC4_SIO_IR_S2_DELTA_CTS | \
+ IOC4_SIO_IR_S2_INT | \
+ IOC4_SIO_IR_S2_TX_EXPLICIT)
+#define IOC4_SIO_IR_S3 (IOC4_SIO_IR_S3_TX_MT | \
+ IOC4_SIO_IR_S3_RX_FULL | \
+ IOC4_SIO_IR_S3_RX_HIGH | \
+ IOC4_SIO_IR_S3_RX_TIMER | \
+ IOC4_SIO_IR_S3_DELTA_DCD | \
+ IOC4_SIO_IR_S3_DELTA_CTS | \
+ IOC4_SIO_IR_S3_INT | \
+ IOC4_SIO_IR_S3_TX_EXPLICIT)
+
+/* Bitmasks for IOC4_OTHER_IR, IOC4_OTHER_IEC, and IOC4_OTHER_IES */
+#define IOC4_OTHER_IR_ATA_INT 0x00000001 /* ATAPI intr pass-thru */
+#define IOC4_OTHER_IR_ATA_MEMERR 0x00000002 /* ATAPI DMA PCI error */
+#define IOC4_OTHER_IR_S0_MEMERR 0x00000004 /* Port 0 PCI error */
+#define IOC4_OTHER_IR_S1_MEMERR 0x00000008 /* Port 1 PCI error */
+#define IOC4_OTHER_IR_S2_MEMERR 0x00000010 /* Port 2 PCI error */
+#define IOC4_OTHER_IR_S3_MEMERR 0x00000020 /* Port 3 PCI error */
+#define IOC4_OTHER_IR_KBD_INT 0x00000040 /* Keyboard/mouse */
+#define IOC4_OTHER_IR_RESERVED 0x007fff80 /* Reserved */
+#define IOC4_OTHER_IR_RT_INT 0x00800000 /* INT_OUT section output */
+#define IOC4_OTHER_IR_GEN_INT 0xff000000 /* Generic pins */
+
+#define IOC4_OTHER_IR_SER_MEMERR (IOC4_OTHER_IR_S0_MEMERR | IOC4_OTHER_IR_S1_MEMERR | \
+ IOC4_OTHER_IR_S2_MEMERR | IOC4_OTHER_IR_S3_MEMERR)
+
+/* Bitmasks for IOC4_SIO_CR */
+#define IOC4_SIO_CR_CMD_PULSE_SHIFT 0 /* byte bus strobe shift */
+#define IOC4_SIO_CR_ARB_DIAG_TX0 0x00000000
+#define IOC4_SIO_CR_ARB_DIAG_RX0 0x00000010
+#define IOC4_SIO_CR_ARB_DIAG_TX1 0x00000020
+#define IOC4_SIO_CR_ARB_DIAG_RX1 0x00000030
+#define IOC4_SIO_CR_ARB_DIAG_TX2 0x00000040
+#define IOC4_SIO_CR_ARB_DIAG_RX2 0x00000050
+#define IOC4_SIO_CR_ARB_DIAG_TX3 0x00000060
+#define IOC4_SIO_CR_ARB_DIAG_RX3 0x00000070
+#define IOC4_SIO_CR_SIO_DIAG_IDLE 0x00000080 /* 0 -> active request among
+ serial ports (ro) */
+/* Defs for some of the generic I/O pins */
+#define IOC4_GPCR_UART0_MODESEL 0x10 /* Pin is output to port 0
+ mode sel */
+#define IOC4_GPCR_UART1_MODESEL 0x20 /* Pin is output to port 1
+ mode sel */
+#define IOC4_GPCR_UART2_MODESEL 0x40 /* Pin is output to port 2
+ mode sel */
+#define IOC4_GPCR_UART3_MODESEL 0x80 /* Pin is output to port 3
+ mode sel */
+
+#define IOC4_GPPR_UART0_MODESEL_PIN 4 /* GIO pin controlling
+ uart 0 mode select */
+#define IOC4_GPPR_UART1_MODESEL_PIN 5 /* GIO pin controlling
+ uart 1 mode select */
+#define IOC4_GPPR_UART2_MODESEL_PIN 6 /* GIO pin controlling
+ uart 2 mode select */
+#define IOC4_GPPR_UART3_MODESEL_PIN 7 /* GIO pin controlling
+ uart 3 mode select */
+
+/* Bitmasks for serial RX status byte */
+#define IOC4_RXSB_OVERRUN 0x01 /* Char(s) lost */
+#define IOC4_RXSB_PAR_ERR 0x02 /* Parity error */
+#define IOC4_RXSB_FRAME_ERR 0x04 /* Framing error */
+#define IOC4_RXSB_BREAK 0x08 /* Break character */
+#define IOC4_RXSB_CTS 0x10 /* State of CTS */
+#define IOC4_RXSB_DCD 0x20 /* State of DCD */
+#define IOC4_RXSB_MODEM_VALID 0x40 /* DCD, CTS, and OVERRUN are valid */
+#define IOC4_RXSB_DATA_VALID 0x80 /* Data byte, FRAME_ERR PAR_ERR
+ * & BREAK valid */
+
+/* Bitmasks for serial TX control byte */
+#define IOC4_TXCB_INT_WHEN_DONE 0x20 /* Interrupt after this byte is sent */
+#define IOC4_TXCB_INVALID 0x00 /* Byte is invalid */
+#define IOC4_TXCB_VALID 0x40 /* Byte is valid */
+#define IOC4_TXCB_MCR 0x80 /* Data<7:0> to modem control reg */
+#define IOC4_TXCB_DELAY 0xc0 /* Delay data<7:0> mSec */
+
+/* Bitmasks for IOC4_SBBR_L */
+#define IOC4_SBBR_L_SIZE 0x00000001 /* 0 == 1KB rings, 1 == 4KB rings */
+
+/* Bitmasks for IOC4_SSCR_<3:0> */
+#define IOC4_SSCR_RX_THRESHOLD 0x000001ff /* Hiwater mark */
+#define IOC4_SSCR_TX_TIMER_BUSY 0x00010000 /* TX timer in progress */
+#define IOC4_SSCR_HFC_EN 0x00020000 /* Hardware flow control enabled */
+#define IOC4_SSCR_RX_RING_DCD 0x00040000 /* Post RX record on delta-DCD */
+#define IOC4_SSCR_RX_RING_CTS 0x00080000 /* Post RX record on delta-CTS */
+#define IOC4_SSCR_DIAG 0x00200000 /* Bypass clock divider for sim */
+#define IOC4_SSCR_RX_DRAIN 0x08000000 /* Drain RX buffer to memory */
+#define IOC4_SSCR_DMA_EN 0x10000000 /* Enable ring buffer DMA */
+#define IOC4_SSCR_DMA_PAUSE 0x20000000 /* Pause DMA */
+#define IOC4_SSCR_PAUSE_STATE 0x40000000 /* Sets when PAUSE takes effect */
+#define IOC4_SSCR_RESET 0x80000000 /* Reset DMA channels */
+
+/* All producer/comsumer pointers are the same bitfield */
+#define IOC4_PROD_CONS_PTR_4K 0x00000ff8 /* For 4K buffers */
+#define IOC4_PROD_CONS_PTR_1K 0x000003f8 /* For 1K buffers */
+#define IOC4_PROD_CONS_PTR_OFF 3
+
+/* Bitmasks for IOC4_SRCIR_<3:0> */
+#define IOC4_SRCIR_ARM 0x80000000 /* Arm RX timer */
+
+/* Bitmasks for IOC4_SHADOW_<3:0> */
+#define IOC4_SHADOW_DR 0x00000001 /* Data ready */
+#define IOC4_SHADOW_OE 0x00000002 /* Overrun error */
+#define IOC4_SHADOW_PE 0x00000004 /* Parity error */
+#define IOC4_SHADOW_FE 0x00000008 /* Framing error */
+#define IOC4_SHADOW_BI 0x00000010 /* Break interrupt */
+#define IOC4_SHADOW_THRE 0x00000020 /* Xmit holding register empty */
+#define IOC4_SHADOW_TEMT 0x00000040 /* Xmit shift register empty */
+#define IOC4_SHADOW_RFCE 0x00000080 /* Char in RX fifo has an error */
+#define IOC4_SHADOW_DCTS 0x00010000 /* Delta clear to send */
+#define IOC4_SHADOW_DDCD 0x00080000 /* Delta data carrier detect */
+#define IOC4_SHADOW_CTS 0x00100000 /* Clear to send */
+#define IOC4_SHADOW_DCD 0x00800000 /* Data carrier detect */
+#define IOC4_SHADOW_DTR 0x01000000 /* Data terminal ready */
+#define IOC4_SHADOW_RTS 0x02000000 /* Request to send */
+#define IOC4_SHADOW_OUT1 0x04000000 /* 16550 OUT1 bit */
+#define IOC4_SHADOW_OUT2 0x08000000 /* 16550 OUT2 bit */
+#define IOC4_SHADOW_LOOP 0x10000000 /* Loopback enabled */
+
+/* Bitmasks for IOC4_SRTR_<3:0> */
+#define IOC4_SRTR_CNT 0x00000fff /* Reload value for RX timer */
+#define IOC4_SRTR_CNT_VAL 0x0fff0000 /* Current value of RX timer */
+#define IOC4_SRTR_CNT_VAL_SHIFT 16
+#define IOC4_SRTR_HZ 16000 /* SRTR clock frequency */
+
+/* Serial port register map used for DMA and PIO serial I/O */
+struct ioc4_serialregs {
+ uint32_t sscr;
+ uint32_t stpir;
+ uint32_t stcir;
+ uint32_t srpir;
+ uint32_t srcir;
+ uint32_t srtr;
+ uint32_t shadow;
+};
+
+/* IOC4 UART register map */
+struct ioc4_uartregs {
+ char i4u_lcr;
+ union {
+ char iir; /* read only */
+ char fcr; /* write only */
+ } u3;
+ union {
+ char ier; /* DLAB == 0 */
+ char dlm; /* DLAB == 1 */
+ } u2;
+ union {
+ char rbr; /* read only, DLAB == 0 */
+ char thr; /* write only, DLAB == 0 */
+ char dll; /* DLAB == 1 */
+ } u1;
+ char i4u_scr;
+ char i4u_msr;
+ char i4u_lsr;
+ char i4u_mcr;
+};
+
+/* short names */
+#define i4u_dll u1.dll
+#define i4u_ier u2.ier
+#define i4u_dlm u2.dlm
+#define i4u_fcr u3.fcr
+
+/* Serial port registers used for DMA serial I/O */
+struct ioc4_serial {
+ uint32_t sbbr01_l;
+ uint32_t sbbr01_h;
+ uint32_t sbbr23_l;
+ uint32_t sbbr23_h;
+
+ struct ioc4_serialregs port_0;
+ struct ioc4_serialregs port_1;
+ struct ioc4_serialregs port_2;
+ struct ioc4_serialregs port_3;
+ struct ioc4_uartregs uart_0;
+ struct ioc4_uartregs uart_1;
+ struct ioc4_uartregs uart_2;
+ struct ioc4_uartregs uart_3;
+} ioc4_serial;
+
+/* UART clock speed */
+#define IOC4_SER_XIN_CLK_66 66666667
+#define IOC4_SER_XIN_CLK_33 33333333
+
+#define IOC4_W_IES 0
+#define IOC4_W_IEC 1
+
+typedef void ioc4_intr_func_f(void *, uint32_t);
+typedef ioc4_intr_func_f *ioc4_intr_func_t;
+
+static unsigned int Num_of_ioc4_cards;
+
+/* defining this will get you LOTS of great debug info */
+//#define DEBUG_INTERRUPTS
+#define DPRINT_CONFIG(_x...) ;
+//#define DPRINT_CONFIG(_x...) printk _x
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 256
+
+/* number of characters we want to transmit to the lower level at a time */
+#define IOC4_MAX_CHARS 256
+#define IOC4_FIFO_CHARS 255
+
+/* Device name we're using */
+#define DEVICE_NAME_RS232 "ttyIOC"
+#define DEVICE_NAME_RS422 "ttyAIOC"
+#define DEVICE_MAJOR 204
+#define DEVICE_MINOR_RS232 50
+#define DEVICE_MINOR_RS422 84
+
+
+/* register offsets */
+#define IOC4_SERIAL_OFFSET 0x300
+
+/* flags for next_char_state */
+#define NCS_BREAK 0x1
+#define NCS_PARITY 0x2
+#define NCS_FRAMING 0x4
+#define NCS_OVERRUN 0x8
+
+/* cause we need SOME parameters ... */
+#define MIN_BAUD_SUPPORTED 1200
+#define MAX_BAUD_SUPPORTED 115200
+
+/* protocol types supported */
+#define PROTO_RS232 3
+#define PROTO_RS422 7
+
+/* Notification types */
+#define N_DATA_READY 0x01
+#define N_OUTPUT_LOWAT 0x02
+#define N_BREAK 0x04
+#define N_PARITY_ERROR 0x08
+#define N_FRAMING_ERROR 0x10
+#define N_OVERRUN_ERROR 0x20
+#define N_DDCD 0x40
+#define N_DCTS 0x80
+
+#define N_ALL_INPUT (N_DATA_READY | N_BREAK | \
+ N_PARITY_ERROR | N_FRAMING_ERROR | \
+ N_OVERRUN_ERROR | N_DDCD | N_DCTS)
+
+#define N_ALL_OUTPUT N_OUTPUT_LOWAT
+
+#define N_ALL_ERRORS (N_PARITY_ERROR | N_FRAMING_ERROR | N_OVERRUN_ERROR)
+
+#define N_ALL (N_DATA_READY | N_OUTPUT_LOWAT | N_BREAK | \
+ N_PARITY_ERROR | N_FRAMING_ERROR | \
+ N_OVERRUN_ERROR | N_DDCD | N_DCTS)
+
+#define SER_DIVISOR(_x, clk) (((clk) + (_x) * 8) / ((_x) * 16))
+#define DIVISOR_TO_BAUD(div, clk) ((clk) / 16 / (div))
+
+/* Some masks */
+#define LCR_MASK_BITS_CHAR (UART_LCR_WLEN5 | UART_LCR_WLEN6 \
+ | UART_LCR_WLEN7 | UART_LCR_WLEN8)
+#define LCR_MASK_STOP_BITS (UART_LCR_STOP)
+
+#define PENDING(_p) (readl(&(_p)->ip_mem->sio_ir.raw) & _p->ip_ienb)
+#define READ_SIO_IR(_p) readl(&(_p)->ip_mem->sio_ir.raw)
+
+/* Default to 4k buffers */
+#ifdef IOC4_1K_BUFFERS
+#define RING_BUF_SIZE 1024
+#define IOC4_BUF_SIZE_BIT 0
+#define PROD_CONS_MASK IOC4_PROD_CONS_PTR_1K
+#else
+#define RING_BUF_SIZE 4096
+#define IOC4_BUF_SIZE_BIT IOC4_SBBR_L_SIZE
+#define PROD_CONS_MASK IOC4_PROD_CONS_PTR_4K
+#endif
+
+#define TOTAL_RING_BUF_SIZE (RING_BUF_SIZE * 4)
+
+/*
+ * This is the entry saved by the driver - one per card
+ */
+
+#define UART_PORT_MIN 0
+#define UART_PORT_RS232 UART_PORT_MIN
+#define UART_PORT_RS422 1
+#define UART_PORT_COUNT 2 /* one for each mode */
+
+struct ioc4_control {
+ int ic_irq;
+ struct {
+ /* uart ports are allocated here - 1 for rs232, 1 for rs422 */
+ struct uart_port icp_uart_port[UART_PORT_COUNT];
+ /* Handy reference material */
+ struct ioc4_port *icp_port;
+ } ic_port[IOC4_NUM_SERIAL_PORTS];
+ struct ioc4_soft *ic_soft;
+};
+
+/*
+ * per-IOC4 data structure
+ */
+#define MAX_IOC4_INTR_ENTS (8 * sizeof(uint32_t))
+struct ioc4_soft {
+ struct ioc4_misc_regs __iomem *is_ioc4_misc_addr;
+ struct ioc4_serial __iomem *is_ioc4_serial_addr;
+
+ /* Each interrupt type has an entry in the array */
+ struct ioc4_intr_type {
+
+ /*
+ * Each in-use entry in this array contains at least
+ * one nonzero bit in sd_bits; no two entries in this
+ * array have overlapping sd_bits values.
+ */
+ struct ioc4_intr_info {
+ uint32_t sd_bits;
+ ioc4_intr_func_f *sd_intr;
+ void *sd_info;
+ } is_intr_info[MAX_IOC4_INTR_ENTS];
+
+ /* Number of entries active in the above array */
+ atomic_t is_num_intrs;
+ } is_intr_type[IOC4_NUM_INTR_TYPES];
+
+ /* is_ir_lock must be held while
+ * modifying sio_ie values, so
+ * we can be sure that sio_ie is
+ * not changing when we read it
+ * along with sio_ir.
+ */
+ spinlock_t is_ir_lock; /* SIO_IE[SC] mod lock */
+};
+
+/* Local port info for each IOC4 serial ports */
+struct ioc4_port {
+ struct uart_port *ip_port; /* current active port ptr */
+ /* Ptrs for all ports */
+ struct uart_port *ip_all_ports[UART_PORT_COUNT];
+ /* Back ptrs for this port */
+ struct ioc4_control *ip_control;
+ struct pci_dev *ip_pdev;
+ struct ioc4_soft *ip_ioc4_soft;
+
+ /* pci mem addresses */
+ struct ioc4_misc_regs __iomem *ip_mem;
+ struct ioc4_serial __iomem *ip_serial;
+ struct ioc4_serialregs __iomem *ip_serial_regs;
+ struct ioc4_uartregs __iomem *ip_uart_regs;
+
+ /* Ring buffer page for this port */
+ dma_addr_t ip_dma_ringbuf;
+ /* vaddr of ring buffer */
+ struct ring_buffer *ip_cpu_ringbuf;
+
+ /* Rings for this port */
+ struct ring *ip_inring;
+ struct ring *ip_outring;
+
+ /* Hook to port specific values */
+ struct hooks *ip_hooks;
+
+ spinlock_t ip_lock;
+
+ /* Various rx/tx parameters */
+ int ip_baud;
+ int ip_tx_lowat;
+ int ip_rx_timeout;
+
+ /* Copy of notification bits */
+ int ip_notify;
+
+ /* Shadow copies of various registers so we don't need to PIO
+ * read them constantly
+ */
+ uint32_t ip_ienb; /* Enabled interrupts */
+ uint32_t ip_sscr;
+ uint32_t ip_tx_prod;
+ uint32_t ip_rx_cons;
+ int ip_pci_bus_speed;
+ unsigned char ip_flags;
+};
+
+/* tx low water mark. We need to notify the driver whenever tx is getting
+ * close to empty so it can refill the tx buffer and keep things going.
+ * Let's assume that if we interrupt 1 ms before the tx goes idle, we'll
+ * have no trouble getting in more chars in time (I certainly hope so).
+ */
+#define TX_LOWAT_LATENCY 1000
+#define TX_LOWAT_HZ (1000000 / TX_LOWAT_LATENCY)
+#define TX_LOWAT_CHARS(baud) (baud / 10 / TX_LOWAT_HZ)
+
+/* Flags per port */
+#define INPUT_HIGH 0x01
+#define DCD_ON 0x02
+#define LOWAT_WRITTEN 0x04
+#define READ_ABORTED 0x08
+#define PORT_ACTIVE 0x10
+#define PORT_INACTIVE 0 /* This is the value when "off" */
+
+
+/* Since each port has different register offsets and bitmasks
+ * for everything, we'll store those that we need in tables so we
+ * don't have to be constantly checking the port we are dealing with.
+ */
+struct hooks {
+ uint32_t intr_delta_dcd;
+ uint32_t intr_delta_cts;
+ uint32_t intr_tx_mt;
+ uint32_t intr_rx_timer;
+ uint32_t intr_rx_high;
+ uint32_t intr_tx_explicit;
+ uint32_t intr_dma_error;
+ uint32_t intr_clear;
+ uint32_t intr_all;
+ int rs422_select_pin;
+};
+
+static struct hooks hooks_array[IOC4_NUM_SERIAL_PORTS] = {
+ /* Values for port 0 */
+ {
+ IOC4_SIO_IR_S0_DELTA_DCD, IOC4_SIO_IR_S0_DELTA_CTS,
+ IOC4_SIO_IR_S0_TX_MT, IOC4_SIO_IR_S0_RX_TIMER,
+ IOC4_SIO_IR_S0_RX_HIGH, IOC4_SIO_IR_S0_TX_EXPLICIT,
+ IOC4_OTHER_IR_S0_MEMERR,
+ (IOC4_SIO_IR_S0_TX_MT | IOC4_SIO_IR_S0_RX_FULL |
+ IOC4_SIO_IR_S0_RX_HIGH | IOC4_SIO_IR_S0_RX_TIMER |
+ IOC4_SIO_IR_S0_DELTA_DCD | IOC4_SIO_IR_S0_DELTA_CTS |
+ IOC4_SIO_IR_S0_INT | IOC4_SIO_IR_S0_TX_EXPLICIT),
+ IOC4_SIO_IR_S0, IOC4_GPPR_UART0_MODESEL_PIN,
+ },
+
+ /* Values for port 1 */
+ {
+ IOC4_SIO_IR_S1_DELTA_DCD, IOC4_SIO_IR_S1_DELTA_CTS,
+ IOC4_SIO_IR_S1_TX_MT, IOC4_SIO_IR_S1_RX_TIMER,
+ IOC4_SIO_IR_S1_RX_HIGH, IOC4_SIO_IR_S1_TX_EXPLICIT,
+ IOC4_OTHER_IR_S1_MEMERR,
+ (IOC4_SIO_IR_S1_TX_MT | IOC4_SIO_IR_S1_RX_FULL |
+ IOC4_SIO_IR_S1_RX_HIGH | IOC4_SIO_IR_S1_RX_TIMER |
+ IOC4_SIO_IR_S1_DELTA_DCD | IOC4_SIO_IR_S1_DELTA_CTS |
+ IOC4_SIO_IR_S1_INT | IOC4_SIO_IR_S1_TX_EXPLICIT),
+ IOC4_SIO_IR_S1, IOC4_GPPR_UART1_MODESEL_PIN,
+ },
+
+ /* Values for port 2 */
+ {
+ IOC4_SIO_IR_S2_DELTA_DCD, IOC4_SIO_IR_S2_DELTA_CTS,
+ IOC4_SIO_IR_S2_TX_MT, IOC4_SIO_IR_S2_RX_TIMER,
+ IOC4_SIO_IR_S2_RX_HIGH, IOC4_SIO_IR_S2_TX_EXPLICIT,
+ IOC4_OTHER_IR_S2_MEMERR,
+ (IOC4_SIO_IR_S2_TX_MT | IOC4_SIO_IR_S2_RX_FULL |
+ IOC4_SIO_IR_S2_RX_HIGH | IOC4_SIO_IR_S2_RX_TIMER |
+ IOC4_SIO_IR_S2_DELTA_DCD | IOC4_SIO_IR_S2_DELTA_CTS |
+ IOC4_SIO_IR_S2_INT | IOC4_SIO_IR_S2_TX_EXPLICIT),
+ IOC4_SIO_IR_S2, IOC4_GPPR_UART2_MODESEL_PIN,
+ },
+
+ /* Values for port 3 */
+ {
+ IOC4_SIO_IR_S3_DELTA_DCD, IOC4_SIO_IR_S3_DELTA_CTS,
+ IOC4_SIO_IR_S3_TX_MT, IOC4_SIO_IR_S3_RX_TIMER,
+ IOC4_SIO_IR_S3_RX_HIGH, IOC4_SIO_IR_S3_TX_EXPLICIT,
+ IOC4_OTHER_IR_S3_MEMERR,
+ (IOC4_SIO_IR_S3_TX_MT | IOC4_SIO_IR_S3_RX_FULL |
+ IOC4_SIO_IR_S3_RX_HIGH | IOC4_SIO_IR_S3_RX_TIMER |
+ IOC4_SIO_IR_S3_DELTA_DCD | IOC4_SIO_IR_S3_DELTA_CTS |
+ IOC4_SIO_IR_S3_INT | IOC4_SIO_IR_S3_TX_EXPLICIT),
+ IOC4_SIO_IR_S3, IOC4_GPPR_UART3_MODESEL_PIN,
+ }
+};
+
+/* A ring buffer entry */
+struct ring_entry {
+ union {
+ struct {
+ uint32_t alldata;
+ uint32_t allsc;
+ } all;
+ struct {
+ char data[4]; /* data bytes */
+ char sc[4]; /* status/control */
+ } s;
+ } u;
+};
+
+/* Test the valid bits in any of the 4 sc chars using "allsc" member */
+#define RING_ANY_VALID \
+ ((uint32_t)(IOC4_RXSB_MODEM_VALID | IOC4_RXSB_DATA_VALID) * 0x01010101)
+
+#define ring_sc u.s.sc
+#define ring_data u.s.data
+#define ring_allsc u.all.allsc
+
+/* Number of entries per ring buffer. */
+#define ENTRIES_PER_RING (RING_BUF_SIZE / (int) sizeof(struct ring_entry))
+
+/* An individual ring */
+struct ring {
+ struct ring_entry entries[ENTRIES_PER_RING];
+};
+
+/* The whole enchilada */
+struct ring_buffer {
+ struct ring TX_0_OR_2;
+ struct ring RX_0_OR_2;
+ struct ring TX_1_OR_3;
+ struct ring RX_1_OR_3;
+};
+
+/* Get a ring from a port struct */
+#define RING(_p, _wh) &(((struct ring_buffer *)((_p)->ip_cpu_ringbuf))->_wh)
+
+/* Infinite loop detection.
+ */
+#define MAXITER 10000000
+
+/* Prototypes */
+static void receive_chars(struct uart_port *);
+static void handle_intr(void *arg, uint32_t sio_ir);
+
+/*
+ * port_is_active - determines if this port is currently active
+ * @port: ptr to soft struct for this port
+ * @uart_port: uart port to test for
+ */
+static inline int port_is_active(struct ioc4_port *port,
+ struct uart_port *uart_port)
+{
+ if (port) {
+ if ((port->ip_flags & PORT_ACTIVE)
+ && (port->ip_port == uart_port))
+ return 1;
+ }
+ return 0;
+}
+
+
+/**
+ * write_ireg - write the interrupt regs
+ * @ioc4_soft: ptr to soft struct for this port
+ * @val: value to write
+ * @which: which register
+ * @type: which ireg set
+ */
+static inline void
+write_ireg(struct ioc4_soft *ioc4_soft, uint32_t val, int which, int type)
+{
+ struct ioc4_misc_regs __iomem *mem = ioc4_soft->is_ioc4_misc_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc4_soft->is_ir_lock, flags);
+
+ switch (type) {
+ case IOC4_SIO_INTR_TYPE:
+ switch (which) {
+ case IOC4_W_IES:
+ writel(val, &mem->sio_ies.raw);
+ break;
+
+ case IOC4_W_IEC:
+ writel(val, &mem->sio_iec.raw);
+ break;
+ }
+ break;
+
+ case IOC4_OTHER_INTR_TYPE:
+ switch (which) {
+ case IOC4_W_IES:
+ writel(val, &mem->other_ies.raw);
+ break;
+
+ case IOC4_W_IEC:
+ writel(val, &mem->other_iec.raw);
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&ioc4_soft->is_ir_lock, flags);
+}
+
+/**
+ * set_baud - Baud rate setting code
+ * @port: port to set
+ * @baud: baud rate to use
+ */
+static int set_baud(struct ioc4_port *port, int baud)
+{
+ int actual_baud;
+ int diff;
+ int lcr;
+ unsigned short divisor;
+ struct ioc4_uartregs __iomem *uart;
+
+ divisor = SER_DIVISOR(baud, port->ip_pci_bus_speed);
+ if (!divisor)
+ return 1;
+ actual_baud = DIVISOR_TO_BAUD(divisor, port->ip_pci_bus_speed);
+
+ diff = actual_baud - baud;
+ if (diff < 0)
+ diff = -diff;
+
+ /* If we're within 1%, we've found a match */
+ if (diff * 100 > actual_baud)
+ return 1;
+
+ uart = port->ip_uart_regs;
+ lcr = readb(&uart->i4u_lcr);
+ writeb(lcr | UART_LCR_DLAB, &uart->i4u_lcr);
+ writeb((unsigned char)divisor, &uart->i4u_dll);
+ writeb((unsigned char)(divisor >> 8), &uart->i4u_dlm);
+ writeb(lcr, &uart->i4u_lcr);
+ return 0;
+}
+
+
+/**
+ * get_ioc4_port - given a uart port, return the control structure
+ * @port: uart port
+ * @set: set this port as current
+ */
+static struct ioc4_port *get_ioc4_port(struct uart_port *the_port, int set)
+{
+ struct ioc4_driver_data *idd = dev_get_drvdata(the_port->dev);
+ struct ioc4_control *control = idd->idd_serial_data;
+ struct ioc4_port *port;
+ int port_num, port_type;
+
+ if (control) {
+ for ( port_num = 0; port_num < IOC4_NUM_SERIAL_PORTS;
+ port_num++ ) {
+ port = control->ic_port[port_num].icp_port;
+ if (!port)
+ continue;
+ for (port_type = UART_PORT_MIN;
+ port_type < UART_PORT_COUNT;
+ port_type++) {
+ if (the_port == port->ip_all_ports
+ [port_type]) {
+ /* set local copy */
+ if (set) {
+ port->ip_port = the_port;
+ }
+ return port;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+/* The IOC4 hardware provides no atomic way to determine if interrupts
+ * are pending since two reads are required to do so. The handler must
+ * read the SIO_IR and the SIO_IES, and take the logical and of the
+ * two. When this value is zero, all interrupts have been serviced and
+ * the handler may return.
+ *
+ * This has the unfortunate "hole" that, if some other CPU or
+ * some other thread or some higher level interrupt manages to
+ * modify SIO_IE between our reads of SIO_IR and SIO_IE, we may
+ * think we have observed SIO_IR&SIO_IE==0 when in fact this
+ * condition never really occurred.
+ *
+ * To solve this, we use a simple spinlock that must be held
+ * whenever modifying SIO_IE; holding this lock while observing
+ * both SIO_IR and SIO_IE guarantees that we do not falsely
+ * conclude that no enabled interrupts are pending.
+ */
+
+static inline uint32_t
+pending_intrs(struct ioc4_soft *soft, int type)
+{
+ struct ioc4_misc_regs __iomem *mem = soft->is_ioc4_misc_addr;
+ unsigned long flag;
+ uint32_t intrs = 0;
+
+ BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
+ || (type == IOC4_OTHER_INTR_TYPE)));
+
+ spin_lock_irqsave(&soft->is_ir_lock, flag);
+
+ switch (type) {
+ case IOC4_SIO_INTR_TYPE:
+ intrs = readl(&mem->sio_ir.raw) & readl(&mem->sio_ies.raw);
+ break;
+
+ case IOC4_OTHER_INTR_TYPE:
+ intrs = readl(&mem->other_ir.raw) & readl(&mem->other_ies.raw);
+
+ /* Don't process any ATA interrupte */
+ intrs &= ~(IOC4_OTHER_IR_ATA_INT | IOC4_OTHER_IR_ATA_MEMERR);
+ break;
+
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&soft->is_ir_lock, flag);
+ return intrs;
+}
+
+/**
+ * port_init - Initialize the sio and ioc4 hardware for a given port
+ * called per port from attach...
+ * @port: port to initialize
+ */
+static int inline port_init(struct ioc4_port *port)
+{
+ uint32_t sio_cr;
+ struct hooks *hooks = port->ip_hooks;
+ struct ioc4_uartregs __iomem *uart;
+
+ /* Idle the IOC4 serial interface */
+ writel(IOC4_SSCR_RESET, &port->ip_serial_regs->sscr);
+
+ /* Wait until any pending bus activity for this port has ceased */
+ do
+ sio_cr = readl(&port->ip_mem->sio_cr.raw);
+ while (!(sio_cr & IOC4_SIO_CR_SIO_DIAG_IDLE));
+
+ /* Finish reset sequence */
+ writel(0, &port->ip_serial_regs->sscr);
+
+ /* Once RESET is done, reload cached tx_prod and rx_cons values
+ * and set rings to empty by making prod == cons
+ */
+ port->ip_tx_prod = readl(&port->ip_serial_regs->stcir) & PROD_CONS_MASK;
+ writel(port->ip_tx_prod, &port->ip_serial_regs->stpir);
+ port->ip_rx_cons = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK;
+ writel(port->ip_rx_cons | IOC4_SRCIR_ARM, &port->ip_serial_regs->srcir);
+
+ /* Disable interrupts for this 16550 */
+ uart = port->ip_uart_regs;
+ writeb(0, &uart->i4u_lcr);
+ writeb(0, &uart->i4u_ier);
+
+ /* Set the default baud */
+ set_baud(port, port->ip_baud);
+
+ /* Set line control to 8 bits no parity */
+ writeb(UART_LCR_WLEN8 | 0, &uart->i4u_lcr);
+ /* UART_LCR_STOP == 1 stop */
+
+ /* Enable the FIFOs */
+ writeb(UART_FCR_ENABLE_FIFO, &uart->i4u_fcr);
+ /* then reset 16550 FIFOs */
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
+ &uart->i4u_fcr);
+
+ /* Clear modem control register */
+ writeb(0, &uart->i4u_mcr);
+
+ /* Clear deltas in modem status register */
+ readb(&uart->i4u_msr);
+
+ /* Only do this once per port pair */
+ if (port->ip_hooks == &hooks_array[0]
+ || port->ip_hooks == &hooks_array[2]) {
+ unsigned long ring_pci_addr;
+ uint32_t __iomem *sbbr_l;
+ uint32_t __iomem *sbbr_h;
+
+ if (port->ip_hooks == &hooks_array[0]) {
+ sbbr_l = &port->ip_serial->sbbr01_l;
+ sbbr_h = &port->ip_serial->sbbr01_h;
+ } else {
+ sbbr_l = &port->ip_serial->sbbr23_l;
+ sbbr_h = &port->ip_serial->sbbr23_h;
+ }
+
+ ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf;
+ DPRINT_CONFIG(("%s: ring_pci_addr 0x%lx\n",
+ __func__, ring_pci_addr));
+
+ writel((unsigned int)((uint64_t)ring_pci_addr >> 32), sbbr_h);
+ writel((unsigned int)ring_pci_addr | IOC4_BUF_SIZE_BIT, sbbr_l);
+ }
+
+ /* Set the receive timeout value to 10 msec */
+ writel(IOC4_SRTR_HZ / 100, &port->ip_serial_regs->srtr);
+
+ /* Set rx threshold, enable DMA */
+ /* Set high water mark at 3/4 of full ring */
+ port->ip_sscr = (ENTRIES_PER_RING * 3 / 4);
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+
+ /* Disable and clear all serial related interrupt bits */
+ write_ireg(port->ip_ioc4_soft, hooks->intr_clear,
+ IOC4_W_IEC, IOC4_SIO_INTR_TYPE);
+ port->ip_ienb &= ~hooks->intr_clear;
+ writel(hooks->intr_clear, &port->ip_mem->sio_ir.raw);
+ return 0;
+}
+
+/**
+ * handle_dma_error_intr - service any pending DMA error interrupts for the
+ * given port - 2nd level called via sd_intr
+ * @arg: handler arg
+ * @other_ir: ioc4regs
+ */
+static void handle_dma_error_intr(void *arg, uint32_t other_ir)
+{
+ struct ioc4_port *port = (struct ioc4_port *)arg;
+ struct hooks *hooks = port->ip_hooks;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->ip_lock, flags);
+
+ /* ACK the interrupt */
+ writel(hooks->intr_dma_error, &port->ip_mem->other_ir.raw);
+
+ if (readl(&port->ip_mem->pci_err_addr_l.raw) & IOC4_PCI_ERR_ADDR_VLD) {
+ printk(KERN_ERR
+ "PCI error address is 0x%llx, "
+ "master is serial port %c %s\n",
+ (((uint64_t)readl(&port->ip_mem->pci_err_addr_h)
+ << 32)
+ | readl(&port->ip_mem->pci_err_addr_l.raw))
+ & IOC4_PCI_ERR_ADDR_ADDR_MSK, '1' +
+ ((char)(readl(&port->ip_mem->pci_err_addr_l.raw) &
+ IOC4_PCI_ERR_ADDR_MST_NUM_MSK) >> 1),
+ (readl(&port->ip_mem->pci_err_addr_l.raw)
+ & IOC4_PCI_ERR_ADDR_MST_TYP_MSK)
+ ? "RX" : "TX");
+
+ if (readl(&port->ip_mem->pci_err_addr_l.raw)
+ & IOC4_PCI_ERR_ADDR_MUL_ERR) {
+ printk(KERN_ERR
+ "Multiple errors occurred\n");
+ }
+ }
+ spin_unlock_irqrestore(&port->ip_lock, flags);
+
+ /* Re-enable DMA error interrupts */
+ write_ireg(port->ip_ioc4_soft, hooks->intr_dma_error, IOC4_W_IES,
+ IOC4_OTHER_INTR_TYPE);
+}
+
+/**
+ * intr_connect - interrupt connect function
+ * @soft: soft struct for this card
+ * @type: interrupt type
+ * @intrbits: bit pattern to set
+ * @intr: handler function
+ * @info: handler arg
+ */
+static void
+intr_connect(struct ioc4_soft *soft, int type,
+ uint32_t intrbits, ioc4_intr_func_f * intr, void *info)
+{
+ int i;
+ struct ioc4_intr_info *intr_ptr;
+
+ BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
+ || (type == IOC4_OTHER_INTR_TYPE)));
+
+ i = atomic_inc(&soft-> is_intr_type[type].is_num_intrs) - 1;
+ BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
+
+ /* Save off the lower level interrupt handler */
+ intr_ptr = &soft->is_intr_type[type].is_intr_info[i];
+ intr_ptr->sd_bits = intrbits;
+ intr_ptr->sd_intr = intr;
+ intr_ptr->sd_info = info;
+}
+
+/**
+ * ioc4_intr - Top level IOC4 interrupt handler.
+ * @irq: irq value
+ * @arg: handler arg
+ */
+
+static irqreturn_t ioc4_intr(int irq, void *arg)
+{
+ struct ioc4_soft *soft;
+ uint32_t this_ir, this_mir;
+ int xx, num_intrs = 0;
+ int intr_type;
+ int handled = 0;
+ struct ioc4_intr_info *intr_info;
+
+ soft = arg;
+ for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
+ num_intrs = (int)atomic_read(
+ &soft->is_intr_type[intr_type].is_num_intrs);
+
+ this_mir = this_ir = pending_intrs(soft, intr_type);
+
+ /* Farm out the interrupt to the various drivers depending on
+ * which interrupt bits are set.
+ */
+ for (xx = 0; xx < num_intrs; xx++) {
+ intr_info = &soft->is_intr_type[intr_type].is_intr_info[xx];
+ if ((this_mir = this_ir & intr_info->sd_bits)) {
+ /* Disable owned interrupts, call handler */
+ handled++;
+ write_ireg(soft, intr_info->sd_bits, IOC4_W_IEC,
+ intr_type);
+ intr_info->sd_intr(intr_info->sd_info, this_mir);
+ this_ir &= ~this_mir;
+ }
+ }
+ }
+#ifdef DEBUG_INTERRUPTS
+ {
+ struct ioc4_misc_regs __iomem *mem = soft->is_ioc4_misc_addr;
+ unsigned long flag;
+
+ spin_lock_irqsave(&soft->is_ir_lock, flag);
+ printk ("%s : %d : mem 0x%p sio_ir 0x%x sio_ies 0x%x "
+ "other_ir 0x%x other_ies 0x%x mask 0x%x\n",
+ __func__, __LINE__,
+ (void *)mem, readl(&mem->sio_ir.raw),
+ readl(&mem->sio_ies.raw),
+ readl(&mem->other_ir.raw),
+ readl(&mem->other_ies.raw),
+ IOC4_OTHER_IR_ATA_INT | IOC4_OTHER_IR_ATA_MEMERR);
+ spin_unlock_irqrestore(&soft->is_ir_lock, flag);
+ }
+#endif
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/**
+ * ioc4_attach_local - Device initialization.
+ * Called at *_attach() time for each
+ * IOC4 with serial ports in the system.
+ * @idd: Master module data for this IOC4
+ */
+static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
+{
+ struct ioc4_port *port;
+ struct ioc4_port *ports[IOC4_NUM_SERIAL_PORTS];
+ int port_number;
+ uint16_t ioc4_revid_min = 62;
+ uint16_t ioc4_revid;
+ struct pci_dev *pdev = idd->idd_pdev;
+ struct ioc4_control* control = idd->idd_serial_data;
+ struct ioc4_soft *soft = control->ic_soft;
+ void __iomem *ioc4_misc = idd->idd_misc_regs;
+ void __iomem *ioc4_serial = soft->is_ioc4_serial_addr;
+
+ /* IOC4 firmware must be at least rev 62 */
+ pci_read_config_word(pdev, PCI_COMMAND_SPECIAL, &ioc4_revid);
+
+ printk(KERN_INFO "IOC4 firmware revision %d\n", ioc4_revid);
+ if (ioc4_revid < ioc4_revid_min) {
+ printk(KERN_WARNING
+ "IOC4 serial not supported on firmware rev %d, "
+ "please upgrade to rev %d or higher\n",
+ ioc4_revid, ioc4_revid_min);
+ return -EPERM;
+ }
+ BUG_ON(ioc4_misc == NULL);
+ BUG_ON(ioc4_serial == NULL);
+
+ /* Create port structures for each port */
+ for (port_number = 0; port_number < IOC4_NUM_SERIAL_PORTS;
+ port_number++) {
+ port = kzalloc(sizeof(struct ioc4_port), GFP_KERNEL);
+ if (!port) {
+ printk(KERN_WARNING
+ "IOC4 serial memory not available for port\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&port->ip_lock);
+
+ /* we need to remember the previous ones, to point back to
+ * them farther down - setting up the ring buffers.
+ */
+ ports[port_number] = port;
+
+ /* Allocate buffers and jumpstart the hardware. */
+ control->ic_port[port_number].icp_port = port;
+ port->ip_ioc4_soft = soft;
+ port->ip_pdev = pdev;
+ port->ip_ienb = 0;
+ /* Use baud rate calculations based on detected PCI
+ * bus speed. Simply test whether the PCI clock is
+ * running closer to 66MHz or 33MHz.
+ */
+ if (idd->count_period/IOC4_EXTINT_COUNT_DIVISOR < 20) {
+ port->ip_pci_bus_speed = IOC4_SER_XIN_CLK_66;
+ } else {
+ port->ip_pci_bus_speed = IOC4_SER_XIN_CLK_33;
+ }
+ port->ip_baud = 9600;
+ port->ip_control = control;
+ port->ip_mem = ioc4_misc;
+ port->ip_serial = ioc4_serial;
+
+ /* point to the right hook */
+ port->ip_hooks = &hooks_array[port_number];
+
+ /* Get direct hooks to the serial regs and uart regs
+ * for this port
+ */
+ switch (port_number) {
+ case 0:
+ port->ip_serial_regs = &(port->ip_serial->port_0);
+ port->ip_uart_regs = &(port->ip_serial->uart_0);
+ break;
+ case 1:
+ port->ip_serial_regs = &(port->ip_serial->port_1);
+ port->ip_uart_regs = &(port->ip_serial->uart_1);
+ break;
+ case 2:
+ port->ip_serial_regs = &(port->ip_serial->port_2);
+ port->ip_uart_regs = &(port->ip_serial->uart_2);
+ break;
+ default:
+ case 3:
+ port->ip_serial_regs = &(port->ip_serial->port_3);
+ port->ip_uart_regs = &(port->ip_serial->uart_3);
+ break;
+ }
+
+ /* ring buffers are 1 to a pair of ports */
+ if (port_number && (port_number & 1)) {
+ /* odd use the evens buffer */
+ port->ip_dma_ringbuf =
+ ports[port_number - 1]->ip_dma_ringbuf;
+ port->ip_cpu_ringbuf =
+ ports[port_number - 1]->ip_cpu_ringbuf;
+ port->ip_inring = RING(port, RX_1_OR_3);
+ port->ip_outring = RING(port, TX_1_OR_3);
+
+ } else {
+ if (port->ip_dma_ringbuf == 0) {
+ port->ip_cpu_ringbuf = pci_alloc_consistent
+ (pdev, TOTAL_RING_BUF_SIZE,
+ &port->ip_dma_ringbuf);
+
+ }
+ BUG_ON(!((((int64_t)port->ip_dma_ringbuf) &
+ (TOTAL_RING_BUF_SIZE - 1)) == 0));
+ DPRINT_CONFIG(("%s : ip_cpu_ringbuf 0x%p "
+ "ip_dma_ringbuf 0x%p\n",
+ __func__,
+ (void *)port->ip_cpu_ringbuf,
+ (void *)port->ip_dma_ringbuf));
+ port->ip_inring = RING(port, RX_0_OR_2);
+ port->ip_outring = RING(port, TX_0_OR_2);
+ }
+ DPRINT_CONFIG(("%s : port %d [addr 0x%p] control 0x%p",
+ __func__,
+ port_number, (void *)port, (void *)control));
+ DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n",
+ (void *)port->ip_serial_regs,
+ (void *)port->ip_uart_regs));
+
+ /* Initialize the hardware for IOC4 */
+ port_init(port);
+
+ DPRINT_CONFIG(("%s: port_number %d port 0x%p inring 0x%p "
+ "outring 0x%p\n",
+ __func__,
+ port_number, (void *)port,
+ (void *)port->ip_inring,
+ (void *)port->ip_outring));
+
+ /* Attach interrupt handlers */
+ intr_connect(soft, IOC4_SIO_INTR_TYPE,
+ GET_SIO_IR(port_number),
+ handle_intr, port);
+
+ intr_connect(soft, IOC4_OTHER_INTR_TYPE,
+ GET_OTHER_IR(port_number),
+ handle_dma_error_intr, port);
+ }
+ return 0;
+}
+
+/**
+ * enable_intrs - enable interrupts
+ * @port: port to enable
+ * @mask: mask to use
+ */
+static void enable_intrs(struct ioc4_port *port, uint32_t mask)
+{
+ struct hooks *hooks = port->ip_hooks;
+
+ if ((port->ip_ienb & mask) != mask) {
+ write_ireg(port->ip_ioc4_soft, mask, IOC4_W_IES,
+ IOC4_SIO_INTR_TYPE);
+ port->ip_ienb |= mask;
+ }
+
+ if (port->ip_ienb)
+ write_ireg(port->ip_ioc4_soft, hooks->intr_dma_error,
+ IOC4_W_IES, IOC4_OTHER_INTR_TYPE);
+}
+
+/**
+ * local_open - local open a port
+ * @port: port to open
+ */
+static inline int local_open(struct ioc4_port *port)
+{
+ int spiniter = 0;
+
+ port->ip_flags = PORT_ACTIVE;
+
+ /* Pause the DMA interface if necessary */
+ if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
+ writel(port->ip_sscr | IOC4_SSCR_DMA_PAUSE,
+ &port->ip_serial_regs->sscr);
+ while((readl(&port->ip_serial_regs-> sscr)
+ & IOC4_SSCR_PAUSE_STATE) == 0) {
+ spiniter++;
+ if (spiniter > MAXITER) {
+ port->ip_flags = PORT_INACTIVE;
+ return -1;
+ }
+ }
+ }
+
+ /* Reset the input fifo. If the uart received chars while the port
+ * was closed and DMA is not enabled, the uart may have a bunch of
+ * chars hanging around in its rx fifo which will not be discarded
+ * by rclr in the upper layer. We must get rid of them here.
+ */
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR,
+ &port->ip_uart_regs->i4u_fcr);
+
+ writeb(UART_LCR_WLEN8, &port->ip_uart_regs->i4u_lcr);
+ /* UART_LCR_STOP == 1 stop */
+
+ /* Re-enable DMA, set default threshold to intr whenever there is
+ * data available.
+ */
+ port->ip_sscr &= ~IOC4_SSCR_RX_THRESHOLD;
+ port->ip_sscr |= 1; /* default threshold */
+
+ /* Plug in the new sscr. This implicitly clears the DMA_PAUSE
+ * flag if it was set above
+ */
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+ port->ip_tx_lowat = 1;
+ return 0;
+}
+
+/**
+ * set_rx_timeout - Set rx timeout and threshold values.
+ * @port: port to use
+ * @timeout: timeout value in ticks
+ */
+static inline int set_rx_timeout(struct ioc4_port *port, int timeout)
+{
+ int threshold;
+
+ port->ip_rx_timeout = timeout;
+
+ /* Timeout is in ticks. Let's figure out how many chars we
+ * can receive at the current baud rate in that interval
+ * and set the rx threshold to that amount. There are 4 chars
+ * per ring entry, so we'll divide the number of chars that will
+ * arrive in timeout by 4.
+ * So .... timeout * baud / 10 / HZ / 4, with HZ = 100.
+ */
+ threshold = timeout * port->ip_baud / 4000;
+ if (threshold == 0)
+ threshold = 1; /* otherwise we'll intr all the time! */
+
+ if ((unsigned)threshold > (unsigned)IOC4_SSCR_RX_THRESHOLD)
+ return 1;
+
+ port->ip_sscr &= ~IOC4_SSCR_RX_THRESHOLD;
+ port->ip_sscr |= threshold;
+
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+
+ /* Now set the rx timeout to the given value
+ * again timeout * IOC4_SRTR_HZ / HZ
+ */
+ timeout = timeout * IOC4_SRTR_HZ / 100;
+ if (timeout > IOC4_SRTR_CNT)
+ timeout = IOC4_SRTR_CNT;
+
+ writel(timeout, &port->ip_serial_regs->srtr);
+ return 0;
+}
+
+/**
+ * config_port - config the hardware
+ * @port: port to config
+ * @baud: baud rate for the port
+ * @byte_size: data size
+ * @stop_bits: number of stop bits
+ * @parenb: parity enable ?
+ * @parodd: odd parity ?
+ */
+static inline int
+config_port(struct ioc4_port *port,
+ int baud, int byte_size, int stop_bits, int parenb, int parodd)
+{
+ char lcr, sizebits;
+ int spiniter = 0;
+
+ DPRINT_CONFIG(("%s: baud %d byte_size %d stop %d parenb %d parodd %d\n",
+ __func__, baud, byte_size, stop_bits, parenb, parodd));
+
+ if (set_baud(port, baud))
+ return 1;
+
+ switch (byte_size) {
+ case 5:
+ sizebits = UART_LCR_WLEN5;
+ break;
+ case 6:
+ sizebits = UART_LCR_WLEN6;
+ break;
+ case 7:
+ sizebits = UART_LCR_WLEN7;
+ break;
+ case 8:
+ sizebits = UART_LCR_WLEN8;
+ break;
+ default:
+ return 1;
+ }
+
+ /* Pause the DMA interface if necessary */
+ if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
+ writel(port->ip_sscr | IOC4_SSCR_DMA_PAUSE,
+ &port->ip_serial_regs->sscr);
+ while((readl(&port->ip_serial_regs->sscr)
+ & IOC4_SSCR_PAUSE_STATE) == 0) {
+ spiniter++;
+ if (spiniter > MAXITER)
+ return -1;
+ }
+ }
+
+ /* Clear relevant fields in lcr */
+ lcr = readb(&port->ip_uart_regs->i4u_lcr);
+ lcr &= ~(LCR_MASK_BITS_CHAR | UART_LCR_EPAR |
+ UART_LCR_PARITY | LCR_MASK_STOP_BITS);
+
+ /* Set byte size in lcr */
+ lcr |= sizebits;
+
+ /* Set parity */
+ if (parenb) {
+ lcr |= UART_LCR_PARITY;
+ if (!parodd)
+ lcr |= UART_LCR_EPAR;
+ }
+
+ /* Set stop bits */
+ if (stop_bits)
+ lcr |= UART_LCR_STOP /* 2 stop bits */ ;
+
+ writeb(lcr, &port->ip_uart_regs->i4u_lcr);
+
+ /* Re-enable the DMA interface if necessary */
+ if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+ }
+ port->ip_baud = baud;
+
+ /* When we get within this number of ring entries of filling the
+ * entire ring on tx, place an EXPLICIT intr to generate a lowat
+ * notification when output has drained.
+ */
+ port->ip_tx_lowat = (TX_LOWAT_CHARS(baud) + 3) / 4;
+ if (port->ip_tx_lowat == 0)
+ port->ip_tx_lowat = 1;
+
+ set_rx_timeout(port, 2);
+
+ return 0;
+}
+
+/**
+ * do_write - Write bytes to the port. Returns the number of bytes
+ * actually written. Called from transmit_chars
+ * @port: port to use
+ * @buf: the stuff to write
+ * @len: how many bytes in 'buf'
+ */
+static inline int do_write(struct ioc4_port *port, char *buf, int len)
+{
+ int prod_ptr, cons_ptr, total = 0;
+ struct ring *outring;
+ struct ring_entry *entry;
+ struct hooks *hooks = port->ip_hooks;
+
+ BUG_ON(!(len >= 0));
+
+ prod_ptr = port->ip_tx_prod;
+ cons_ptr = readl(&port->ip_serial_regs->stcir) & PROD_CONS_MASK;
+ outring = port->ip_outring;
+
+ /* Maintain a 1-entry red-zone. The ring buffer is full when
+ * (cons - prod) % ring_size is 1. Rather than do this subtraction
+ * in the body of the loop, I'll do it now.
+ */
+ cons_ptr = (cons_ptr - (int)sizeof(struct ring_entry)) & PROD_CONS_MASK;
+
+ /* Stuff the bytes into the output */
+ while ((prod_ptr != cons_ptr) && (len > 0)) {
+ int xx;
+
+ /* Get 4 bytes (one ring entry) at a time */
+ entry = (struct ring_entry *)((caddr_t) outring + prod_ptr);
+
+ /* Invalidate all entries */
+ entry->ring_allsc = 0;
+
+ /* Copy in some bytes */
+ for (xx = 0; (xx < 4) && (len > 0); xx++) {
+ entry->ring_data[xx] = *buf++;
+ entry->ring_sc[xx] = IOC4_TXCB_VALID;
+ len--;
+ total++;
+ }
+
+ /* If we are within some small threshold of filling up the
+ * entire ring buffer, we must place an EXPLICIT intr here
+ * to generate a lowat interrupt in case we subsequently
+ * really do fill up the ring and the caller goes to sleep.
+ * No need to place more than one though.
+ */
+ if (!(port->ip_flags & LOWAT_WRITTEN) &&
+ ((cons_ptr - prod_ptr) & PROD_CONS_MASK)
+ <= port->ip_tx_lowat
+ * (int)sizeof(struct ring_entry)) {
+ port->ip_flags |= LOWAT_WRITTEN;
+ entry->ring_sc[0] |= IOC4_TXCB_INT_WHEN_DONE;
+ }
+
+ /* Go on to next entry */
+ prod_ptr += sizeof(struct ring_entry);
+ prod_ptr &= PROD_CONS_MASK;
+ }
+
+ /* If we sent something, start DMA if necessary */
+ if (total > 0 && !(port->ip_sscr & IOC4_SSCR_DMA_EN)) {
+ port->ip_sscr |= IOC4_SSCR_DMA_EN;
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+ }
+
+ /* Store the new producer pointer. If tx is disabled, we stuff the
+ * data into the ring buffer, but we don't actually start tx.
+ */
+ if (!uart_tx_stopped(port->ip_port)) {
+ writel(prod_ptr, &port->ip_serial_regs->stpir);
+
+ /* If we are now transmitting, enable tx_mt interrupt so we
+ * can disable DMA if necessary when the tx finishes.
+ */
+ if (total > 0)
+ enable_intrs(port, hooks->intr_tx_mt);
+ }
+ port->ip_tx_prod = prod_ptr;
+ return total;
+}
+
+/**
+ * disable_intrs - disable interrupts
+ * @port: port to enable
+ * @mask: mask to use
+ */
+static void disable_intrs(struct ioc4_port *port, uint32_t mask)
+{
+ struct hooks *hooks = port->ip_hooks;
+
+ if (port->ip_ienb & mask) {
+ write_ireg(port->ip_ioc4_soft, mask, IOC4_W_IEC,
+ IOC4_SIO_INTR_TYPE);
+ port->ip_ienb &= ~mask;
+ }
+
+ if (!port->ip_ienb)
+ write_ireg(port->ip_ioc4_soft, hooks->intr_dma_error,
+ IOC4_W_IEC, IOC4_OTHER_INTR_TYPE);
+}
+
+/**
+ * set_notification - Modify event notification
+ * @port: port to use
+ * @mask: events mask
+ * @set_on: set ?
+ */
+static int set_notification(struct ioc4_port *port, int mask, int set_on)
+{
+ struct hooks *hooks = port->ip_hooks;
+ uint32_t intrbits, sscrbits;
+
+ BUG_ON(!mask);
+
+ intrbits = sscrbits = 0;
+
+ if (mask & N_DATA_READY)
+ intrbits |= (hooks->intr_rx_timer | hooks->intr_rx_high);
+ if (mask & N_OUTPUT_LOWAT)
+ intrbits |= hooks->intr_tx_explicit;
+ if (mask & N_DDCD) {
+ intrbits |= hooks->intr_delta_dcd;
+ sscrbits |= IOC4_SSCR_RX_RING_DCD;
+ }
+ if (mask & N_DCTS)
+ intrbits |= hooks->intr_delta_cts;
+
+ if (set_on) {
+ enable_intrs(port, intrbits);
+ port->ip_notify |= mask;
+ port->ip_sscr |= sscrbits;
+ } else {
+ disable_intrs(port, intrbits);
+ port->ip_notify &= ~mask;
+ port->ip_sscr &= ~sscrbits;
+ }
+
+ /* We require DMA if either DATA_READY or DDCD notification is
+ * currently requested. If neither of these is requested and
+ * there is currently no tx in progress, DMA may be disabled.
+ */
+ if (port->ip_notify & (N_DATA_READY | N_DDCD))
+ port->ip_sscr |= IOC4_SSCR_DMA_EN;
+ else if (!(port->ip_ienb & hooks->intr_tx_mt))
+ port->ip_sscr &= ~IOC4_SSCR_DMA_EN;
+
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+ return 0;
+}
+
+/**
+ * set_mcr - set the master control reg
+ * @the_port: port to use
+ * @mask1: mcr mask
+ * @mask2: shadow mask
+ */
+static inline int set_mcr(struct uart_port *the_port,
+ int mask1, int mask2)
+{
+ struct ioc4_port *port = get_ioc4_port(the_port, 0);
+ uint32_t shadow;
+ int spiniter = 0;
+ char mcr;
+
+ if (!port)
+ return -1;
+
+ /* Pause the DMA interface if necessary */
+ if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
+ writel(port->ip_sscr | IOC4_SSCR_DMA_PAUSE,
+ &port->ip_serial_regs->sscr);
+ while ((readl(&port->ip_serial_regs->sscr)
+ & IOC4_SSCR_PAUSE_STATE) == 0) {
+ spiniter++;
+ if (spiniter > MAXITER)
+ return -1;
+ }
+ }
+ shadow = readl(&port->ip_serial_regs->shadow);
+ mcr = (shadow & 0xff000000) >> 24;
+
+ /* Set new value */
+ mcr |= mask1;
+ shadow |= mask2;
+
+ writeb(mcr, &port->ip_uart_regs->i4u_mcr);
+ writel(shadow, &port->ip_serial_regs->shadow);
+
+ /* Re-enable the DMA interface if necessary */
+ if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+ }
+ return 0;
+}
+
+/**
+ * ioc4_set_proto - set the protocol for the port
+ * @port: port to use
+ * @proto: protocol to use
+ */
+static int ioc4_set_proto(struct ioc4_port *port, int proto)
+{
+ struct hooks *hooks = port->ip_hooks;
+
+ switch (proto) {
+ case PROTO_RS232:
+ /* Clear the appropriate GIO pin */
+ writel(0, (&port->ip_mem->gppr[hooks->rs422_select_pin].raw));
+ break;
+
+ case PROTO_RS422:
+ /* Set the appropriate GIO pin */
+ writel(1, (&port->ip_mem->gppr[hooks->rs422_select_pin].raw));
+ break;
+
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * transmit_chars - upper level write, called with ip_lock
+ * @the_port: port to write
+ */
+static void transmit_chars(struct uart_port *the_port)
+{
+ int xmit_count, tail, head;
+ int result;
+ char *start;
+ struct tty_struct *tty;
+ struct ioc4_port *port = get_ioc4_port(the_port, 0);
+ struct uart_state *state;
+
+ if (!the_port)
+ return;
+ if (!port)
+ return;
+
+ state = the_port->state;
+ tty = state->port.tty;
+
+ if (uart_circ_empty(&state->xmit) || uart_tx_stopped(the_port)) {
+ /* Nothing to do or hw stopped */
+ set_notification(port, N_ALL_OUTPUT, 0);
+ return;
+ }
+
+ head = state->xmit.head;
+ tail = state->xmit.tail;
+ start = (char *)&state->xmit.buf[tail];
+
+ /* write out all the data or until the end of the buffer */
+ xmit_count = (head < tail) ? (UART_XMIT_SIZE - tail) : (head - tail);
+ if (xmit_count > 0) {
+ result = do_write(port, start, xmit_count);
+ if (result > 0) {
+ /* booking */
+ xmit_count -= result;
+ the_port->icount.tx += result;
+ /* advance the pointers */
+ tail += result;
+ tail &= UART_XMIT_SIZE - 1;
+ state->xmit.tail = tail;
+ start = (char *)&state->xmit.buf[tail];
+ }
+ }
+ if (uart_circ_chars_pending(&state->xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(the_port);
+
+ if (uart_circ_empty(&state->xmit)) {
+ set_notification(port, N_OUTPUT_LOWAT, 0);
+ } else {
+ set_notification(port, N_OUTPUT_LOWAT, 1);
+ }
+}
+
+/**
+ * ioc4_change_speed - change the speed of the port
+ * @the_port: port to change
+ * @new_termios: new termios settings
+ * @old_termios: old termios settings
+ */
+static void
+ioc4_change_speed(struct uart_port *the_port,
+ struct ktermios *new_termios, struct ktermios *old_termios)
+{
+ struct ioc4_port *port = get_ioc4_port(the_port, 0);
+ int baud, bits;
+ unsigned cflag, iflag;
+ int new_parity = 0, new_parity_enable = 0, new_stop = 0, new_data = 8;
+ struct uart_state *state = the_port->state;
+
+ cflag = new_termios->c_cflag;
+ iflag = new_termios->c_iflag;
+
+ switch (cflag & CSIZE) {
+ case CS5:
+ new_data = 5;
+ bits = 7;
+ break;
+ case CS6:
+ new_data = 6;
+ bits = 8;
+ break;
+ case CS7:
+ new_data = 7;
+ bits = 9;
+ break;
+ case CS8:
+ new_data = 8;
+ bits = 10;
+ break;
+ default:
+ /* cuz we always need a default ... */
+ new_data = 5;
+ bits = 7;
+ break;
+ }
+ if (cflag & CSTOPB) {
+ bits++;
+ new_stop = 1;
+ }
+ if (cflag & PARENB) {
+ bits++;
+ new_parity_enable = 1;
+ if (cflag & PARODD)
+ new_parity = 1;
+ }
+ baud = uart_get_baud_rate(the_port, new_termios, old_termios,
+ MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED);
+ DPRINT_CONFIG(("%s: returned baud %d\n", __func__, baud));
+
+ /* default is 9600 */
+ if (!baud)
+ baud = 9600;
+
+ if (!the_port->fifosize)
+ the_port->fifosize = IOC4_FIFO_CHARS;
+ the_port->timeout = ((the_port->fifosize * HZ * bits) / (baud / 10));
+ the_port->timeout += HZ / 50; /* Add .02 seconds of slop */
+
+ the_port->ignore_status_mask = N_ALL_INPUT;
+
+ state->port.tty->low_latency = 1;
+
+ if (iflag & IGNPAR)
+ the_port->ignore_status_mask &= ~(N_PARITY_ERROR
+ | N_FRAMING_ERROR);
+ if (iflag & IGNBRK) {
+ the_port->ignore_status_mask &= ~N_BREAK;
+ if (iflag & IGNPAR)
+ the_port->ignore_status_mask &= ~N_OVERRUN_ERROR;
+ }
+ if (!(cflag & CREAD)) {
+ /* ignore everything */
+ the_port->ignore_status_mask &= ~N_DATA_READY;
+ }
+
+ if (cflag & CRTSCTS) {
+ port->ip_sscr |= IOC4_SSCR_HFC_EN;
+ }
+ else {
+ port->ip_sscr &= ~IOC4_SSCR_HFC_EN;
+ }
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+
+ /* Set the configuration and proper notification call */
+ DPRINT_CONFIG(("%s : port 0x%p cflag 0%o "
+ "config_port(baud %d data %d stop %d p enable %d parity %d),"
+ " notification 0x%x\n",
+ __func__, (void *)port, cflag, baud, new_data, new_stop,
+ new_parity_enable, new_parity, the_port->ignore_status_mask));
+
+ if ((config_port(port, baud, /* baud */
+ new_data, /* byte size */
+ new_stop, /* stop bits */
+ new_parity_enable, /* set parity */
+ new_parity)) >= 0) { /* parity 1==odd */
+ set_notification(port, the_port->ignore_status_mask, 1);
+ }
+}
+
+/**
+ * ic4_startup_local - Start up the serial port - returns >= 0 if no errors
+ * @the_port: Port to operate on
+ */
+static inline int ic4_startup_local(struct uart_port *the_port)
+{
+ struct ioc4_port *port;
+ struct uart_state *state;
+
+ if (!the_port)
+ return -1;
+
+ port = get_ioc4_port(the_port, 0);
+ if (!port)
+ return -1;
+
+ state = the_port->state;
+
+ local_open(port);
+
+ /* set the protocol - mapbase has the port type */
+ ioc4_set_proto(port, the_port->mapbase);
+
+ /* set the speed of the serial port */
+ ioc4_change_speed(the_port, state->port.tty->termios,
+ (struct ktermios *)0);
+
+ return 0;
+}
+
+/*
+ * ioc4_cb_output_lowat - called when the output low water mark is hit
+ * @the_port: port to output
+ */
+static void ioc4_cb_output_lowat(struct uart_port *the_port)
+{
+ unsigned long pflags;
+
+ /* ip_lock is set on the call here */
+ if (the_port) {
+ spin_lock_irqsave(&the_port->lock, pflags);
+ transmit_chars(the_port);
+ spin_unlock_irqrestore(&the_port->lock, pflags);
+ }
+}
+
+/**
+ * handle_intr - service any interrupts for the given port - 2nd level
+ * called via sd_intr
+ * @arg: handler arg
+ * @sio_ir: ioc4regs
+ */
+static void handle_intr(void *arg, uint32_t sio_ir)
+{
+ struct ioc4_port *port = (struct ioc4_port *)arg;
+ struct hooks *hooks = port->ip_hooks;
+ unsigned int rx_high_rd_aborted = 0;
+ unsigned long flags;
+ struct uart_port *the_port;
+ int loop_counter;
+
+ /* Possible race condition here: The tx_mt interrupt bit may be
+ * cleared without the intervention of the interrupt handler,
+ * e.g. by a write. If the top level interrupt handler reads a
+ * tx_mt, then some other processor does a write, starting up
+ * output, then we come in here, see the tx_mt and stop DMA, the
+ * output started by the other processor will hang. Thus we can
+ * only rely on tx_mt being legitimate if it is read while the
+ * port lock is held. Therefore this bit must be ignored in the
+ * passed in interrupt mask which was read by the top level
+ * interrupt handler since the port lock was not held at the time
+ * it was read. We can only rely on this bit being accurate if it
+ * is read while the port lock is held. So we'll clear it for now,
+ * and reload it later once we have the port lock.
+ */
+ sio_ir &= ~(hooks->intr_tx_mt);
+
+ spin_lock_irqsave(&port->ip_lock, flags);
+
+ loop_counter = MAXITER; /* to avoid hangs */
+
+ do {
+ uint32_t shadow;
+
+ if ( loop_counter-- <= 0 ) {
+ printk(KERN_WARNING "IOC4 serial: "
+ "possible hang condition/"
+ "port stuck on interrupt.\n");
+ break;
+ }
+
+ /* Handle a DCD change */
+ if (sio_ir & hooks->intr_delta_dcd) {
+ /* ACK the interrupt */
+ writel(hooks->intr_delta_dcd,
+ &port->ip_mem->sio_ir.raw);
+
+ shadow = readl(&port->ip_serial_regs->shadow);
+
+ if ((port->ip_notify & N_DDCD)
+ && (shadow & IOC4_SHADOW_DCD)
+ && (port->ip_port)) {
+ the_port = port->ip_port;
+ the_port->icount.dcd = 1;
+ wake_up_interruptible
+ (&the_port->state->port.delta_msr_wait);
+ } else if ((port->ip_notify & N_DDCD)
+ && !(shadow & IOC4_SHADOW_DCD)) {
+ /* Flag delta DCD/no DCD */
+ port->ip_flags |= DCD_ON;
+ }
+ }
+
+ /* Handle a CTS change */
+ if (sio_ir & hooks->intr_delta_cts) {
+ /* ACK the interrupt */
+ writel(hooks->intr_delta_cts,
+ &port->ip_mem->sio_ir.raw);
+
+ shadow = readl(&port->ip_serial_regs->shadow);
+
+ if ((port->ip_notify & N_DCTS)
+ && (port->ip_port)) {
+ the_port = port->ip_port;
+ the_port->icount.cts =
+ (shadow & IOC4_SHADOW_CTS) ? 1 : 0;
+ wake_up_interruptible
+ (&the_port->state->port.delta_msr_wait);
+ }
+ }
+
+ /* rx timeout interrupt. Must be some data available. Put this
+ * before the check for rx_high since servicing this condition
+ * may cause that condition to clear.
+ */
+ if (sio_ir & hooks->intr_rx_timer) {
+ /* ACK the interrupt */
+ writel(hooks->intr_rx_timer,
+ &port->ip_mem->sio_ir.raw);
+
+ if ((port->ip_notify & N_DATA_READY)
+ && (port->ip_port)) {
+ /* ip_lock is set on call here */
+ receive_chars(port->ip_port);
+ }
+ }
+
+ /* rx high interrupt. Must be after rx_timer. */
+ else if (sio_ir & hooks->intr_rx_high) {
+ /* Data available, notify upper layer */
+ if ((port->ip_notify & N_DATA_READY)
+ && port->ip_port) {
+ /* ip_lock is set on call here */
+ receive_chars(port->ip_port);
+ }
+
+ /* We can't ACK this interrupt. If receive_chars didn't
+ * cause the condition to clear, we'll have to disable
+ * the interrupt until the data is drained.
+ * If the read was aborted, don't disable the interrupt
+ * as this may cause us to hang indefinitely. An
+ * aborted read generally means that this interrupt
+ * hasn't been delivered to the cpu yet anyway, even
+ * though we see it as asserted when we read the sio_ir.
+ */
+ if ((sio_ir = PENDING(port)) & hooks->intr_rx_high) {
+ if ((port->ip_flags & READ_ABORTED) == 0) {
+ port->ip_ienb &= ~hooks->intr_rx_high;
+ port->ip_flags |= INPUT_HIGH;
+ } else {
+ rx_high_rd_aborted++;
+ }
+ }
+ }
+
+ /* We got a low water interrupt: notify upper layer to
+ * send more data. Must come before tx_mt since servicing
+ * this condition may cause that condition to clear.
+ */
+ if (sio_ir & hooks->intr_tx_explicit) {
+ port->ip_flags &= ~LOWAT_WRITTEN;
+
+ /* ACK the interrupt */
+ writel(hooks->intr_tx_explicit,
+ &port->ip_mem->sio_ir.raw);
+
+ if (port->ip_notify & N_OUTPUT_LOWAT)
+ ioc4_cb_output_lowat(port->ip_port);
+ }
+
+ /* Handle tx_mt. Must come after tx_explicit. */
+ else if (sio_ir & hooks->intr_tx_mt) {
+ /* If we are expecting a lowat notification
+ * and we get to this point it probably means that for
+ * some reason the tx_explicit didn't work as expected
+ * (that can legitimately happen if the output buffer is
+ * filled up in just the right way).
+ * So send the notification now.
+ */
+ if (port->ip_notify & N_OUTPUT_LOWAT) {
+ ioc4_cb_output_lowat(port->ip_port);
+
+ /* We need to reload the sio_ir since the lowat
+ * call may have caused another write to occur,
+ * clearing the tx_mt condition.
+ */
+ sio_ir = PENDING(port);
+ }
+
+ /* If the tx_mt condition still persists even after the
+ * lowat call, we've got some work to do.
+ */
+ if (sio_ir & hooks->intr_tx_mt) {
+
+ /* If we are not currently expecting DMA input,
+ * and the transmitter has just gone idle,
+ * there is no longer any reason for DMA, so
+ * disable it.
+ */
+ if (!(port->ip_notify
+ & (N_DATA_READY | N_DDCD))) {
+ BUG_ON(!(port->ip_sscr
+ & IOC4_SSCR_DMA_EN));
+ port->ip_sscr &= ~IOC4_SSCR_DMA_EN;
+ writel(port->ip_sscr,
+ &port->ip_serial_regs->sscr);
+ }
+
+ /* Prevent infinite tx_mt interrupt */
+ port->ip_ienb &= ~hooks->intr_tx_mt;
+ }
+ }
+ sio_ir = PENDING(port);
+
+ /* if the read was aborted and only hooks->intr_rx_high,
+ * clear hooks->intr_rx_high, so we do not loop forever.
+ */
+
+ if (rx_high_rd_aborted && (sio_ir == hooks->intr_rx_high)) {
+ sio_ir &= ~hooks->intr_rx_high;
+ }
+ } while (sio_ir & hooks->intr_all);
+
+ spin_unlock_irqrestore(&port->ip_lock, flags);
+
+ /* Re-enable interrupts before returning from interrupt handler.
+ * Getting interrupted here is okay. It'll just v() our semaphore, and
+ * we'll come through the loop again.
+ */
+
+ write_ireg(port->ip_ioc4_soft, port->ip_ienb, IOC4_W_IES,
+ IOC4_SIO_INTR_TYPE);
+}
+
+/*
+ * ioc4_cb_post_ncs - called for some basic errors
+ * @port: port to use
+ * @ncs: event
+ */
+static void ioc4_cb_post_ncs(struct uart_port *the_port, int ncs)
+{
+ struct uart_icount *icount;
+
+ icount = &the_port->icount;
+
+ if (ncs & NCS_BREAK)
+ icount->brk++;
+ if (ncs & NCS_FRAMING)
+ icount->frame++;
+ if (ncs & NCS_OVERRUN)
+ icount->overrun++;
+ if (ncs & NCS_PARITY)
+ icount->parity++;
+}
+
+/**
+ * do_read - Read in bytes from the port. Return the number of bytes
+ * actually read.
+ * @the_port: port to use
+ * @buf: place to put the stuff we read
+ * @len: how big 'buf' is
+ */
+
+static inline int do_read(struct uart_port *the_port, unsigned char *buf,
+ int len)
+{
+ int prod_ptr, cons_ptr, total;
+ struct ioc4_port *port = get_ioc4_port(the_port, 0);
+ struct ring *inring;
+ struct ring_entry *entry;
+ struct hooks *hooks = port->ip_hooks;
+ int byte_num;
+ char *sc;
+ int loop_counter;
+
+ BUG_ON(!(len >= 0));
+ BUG_ON(!port);
+
+ /* There is a nasty timing issue in the IOC4. When the rx_timer
+ * expires or the rx_high condition arises, we take an interrupt.
+ * At some point while servicing the interrupt, we read bytes from
+ * the ring buffer and re-arm the rx_timer. However the rx_timer is
+ * not started until the first byte is received *after* it is armed,
+ * and any bytes pending in the rx construction buffers are not drained
+ * to memory until either there are 4 bytes available or the rx_timer
+ * expires. This leads to a potential situation where data is left
+ * in the construction buffers forever - 1 to 3 bytes were received
+ * after the interrupt was generated but before the rx_timer was
+ * re-armed. At that point as long as no subsequent bytes are received
+ * the timer will never be started and the bytes will remain in the
+ * construction buffer forever. The solution is to execute a DRAIN
+ * command after rearming the timer. This way any bytes received before
+ * the DRAIN will be drained to memory, and any bytes received after
+ * the DRAIN will start the TIMER and be drained when it expires.
+ * Luckily, this only needs to be done when the DMA buffer is empty
+ * since there is no requirement that this function return all
+ * available data as long as it returns some.
+ */
+ /* Re-arm the timer */
+ writel(port->ip_rx_cons | IOC4_SRCIR_ARM, &port->ip_serial_regs->srcir);
+
+ prod_ptr = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK;
+ cons_ptr = port->ip_rx_cons;
+
+ if (prod_ptr == cons_ptr) {
+ int reset_dma = 0;
+
+ /* Input buffer appears empty, do a flush. */
+
+ /* DMA must be enabled for this to work. */
+ if (!(port->ip_sscr & IOC4_SSCR_DMA_EN)) {
+ port->ip_sscr |= IOC4_SSCR_DMA_EN;
+ reset_dma = 1;
+ }
+
+ /* Potential race condition: we must reload the srpir after
+ * issuing the drain command, otherwise we could think the rx
+ * buffer is empty, then take a very long interrupt, and when
+ * we come back it's full and we wait forever for the drain to
+ * complete.
+ */
+ writel(port->ip_sscr | IOC4_SSCR_RX_DRAIN,
+ &port->ip_serial_regs->sscr);
+ prod_ptr = readl(&port->ip_serial_regs->srpir)
+ & PROD_CONS_MASK;
+
+ /* We must not wait for the DRAIN to complete unless there are
+ * at least 8 bytes (2 ring entries) available to receive the
+ * data otherwise the DRAIN will never complete and we'll
+ * deadlock here.
+ * In fact, to make things easier, I'll just ignore the flush if
+ * there is any data at all now available.
+ */
+ if (prod_ptr == cons_ptr) {
+ loop_counter = 0;
+ while (readl(&port->ip_serial_regs->sscr) &
+ IOC4_SSCR_RX_DRAIN) {
+ loop_counter++;
+ if (loop_counter > MAXITER)
+ return -1;
+ }
+
+ /* SIGH. We have to reload the prod_ptr *again* since
+ * the drain may have caused it to change
+ */
+ prod_ptr = readl(&port->ip_serial_regs->srpir)
+ & PROD_CONS_MASK;
+ }
+ if (reset_dma) {
+ port->ip_sscr &= ~IOC4_SSCR_DMA_EN;
+ writel(port->ip_sscr, &port->ip_serial_regs->sscr);
+ }
+ }
+ inring = port->ip_inring;
+ port->ip_flags &= ~READ_ABORTED;
+
+ total = 0;
+ loop_counter = 0xfffff; /* to avoid hangs */
+
+ /* Grab bytes from the hardware */
+ while ((prod_ptr != cons_ptr) && (len > 0)) {
+ entry = (struct ring_entry *)((caddr_t)inring + cons_ptr);
+
+ if ( loop_counter-- <= 0 ) {
+ printk(KERN_WARNING "IOC4 serial: "
+ "possible hang condition/"
+ "port stuck on read.\n");
+ break;
+ }
+
+ /* According to the producer pointer, this ring entry
+ * must contain some data. But if the PIO happened faster
+ * than the DMA, the data may not be available yet, so let's
+ * wait until it arrives.
+ */
+ if ((entry->ring_allsc & RING_ANY_VALID) == 0) {
+ /* Indicate the read is aborted so we don't disable
+ * the interrupt thinking that the consumer is
+ * congested.
+ */
+ port->ip_flags |= READ_ABORTED;
+ len = 0;
+ break;
+ }
+
+ /* Load the bytes/status out of the ring entry */
+ for (byte_num = 0; byte_num < 4 && len > 0; byte_num++) {
+ sc = &(entry->ring_sc[byte_num]);
+
+ /* Check for change in modem state or overrun */
+ if ((*sc & IOC4_RXSB_MODEM_VALID)
+ && (port->ip_notify & N_DDCD)) {
+ /* Notify upper layer if DCD dropped */
+
+ if ((port->ip_flags & DCD_ON)
+ && !(*sc & IOC4_RXSB_DCD)) {
+
+ /* If we have already copied some data,
+ * return it. We'll pick up the carrier
+ * drop on the next pass. That way we
+ * don't throw away the data that has
+ * already been copied back to
+ * the caller's buffer.
+ */
+ if (total > 0) {
+ len = 0;
+ break;
+ }
+ port->ip_flags &= ~DCD_ON;
+
+ /* Turn off this notification so the
+ * carrier drop protocol won't see it
+ * again when it does a read.
+ */
+ *sc &= ~IOC4_RXSB_MODEM_VALID;
+
+ /* To keep things consistent, we need
+ * to update the consumer pointer so
+ * the next reader won't come in and
+ * try to read the same ring entries
+ * again. This must be done here before
+ * the dcd change.
+ */
+
+ if ((entry->ring_allsc & RING_ANY_VALID)
+ == 0) {
+ cons_ptr += (int)sizeof
+ (struct ring_entry);
+ cons_ptr &= PROD_CONS_MASK;
+ }
+ writel(cons_ptr,
+ &port->ip_serial_regs->srcir);
+ port->ip_rx_cons = cons_ptr;
+
+ /* Notify upper layer of carrier drop */
+ if ((port->ip_notify & N_DDCD)
+ && port->ip_port) {
+ the_port->icount.dcd = 0;
+ wake_up_interruptible
+ (&the_port->state->
+ port.delta_msr_wait);
+ }
+
+ /* If we had any data to return, we
+ * would have returned it above.
+ */
+ return 0;
+ }
+ }
+ if (*sc & IOC4_RXSB_MODEM_VALID) {
+ /* Notify that an input overrun occurred */
+ if ((*sc & IOC4_RXSB_OVERRUN)
+ && (port->ip_notify & N_OVERRUN_ERROR)) {
+ ioc4_cb_post_ncs(the_port, NCS_OVERRUN);
+ }
+ /* Don't look at this byte again */
+ *sc &= ~IOC4_RXSB_MODEM_VALID;
+ }
+
+ /* Check for valid data or RX errors */
+ if ((*sc & IOC4_RXSB_DATA_VALID) &&
+ ((*sc & (IOC4_RXSB_PAR_ERR
+ | IOC4_RXSB_FRAME_ERR
+ | IOC4_RXSB_BREAK))
+ && (port->ip_notify & (N_PARITY_ERROR
+ | N_FRAMING_ERROR
+ | N_BREAK)))) {
+ /* There is an error condition on the next byte.
+ * If we have already transferred some bytes,
+ * we'll stop here. Otherwise if this is the
+ * first byte to be read, we'll just transfer
+ * it alone after notifying the
+ * upper layer of its status.
+ */
+ if (total > 0) {
+ len = 0;
+ break;
+ } else {
+ if ((*sc & IOC4_RXSB_PAR_ERR) &&
+ (port->ip_notify & N_PARITY_ERROR)) {
+ ioc4_cb_post_ncs(the_port,
+ NCS_PARITY);
+ }
+ if ((*sc & IOC4_RXSB_FRAME_ERR) &&
+ (port->ip_notify & N_FRAMING_ERROR)){
+ ioc4_cb_post_ncs(the_port,
+ NCS_FRAMING);
+ }
+ if ((*sc & IOC4_RXSB_BREAK)
+ && (port->ip_notify & N_BREAK)) {
+ ioc4_cb_post_ncs
+ (the_port,
+ NCS_BREAK);
+ }
+ len = 1;
+ }
+ }
+ if (*sc & IOC4_RXSB_DATA_VALID) {
+ *sc &= ~IOC4_RXSB_DATA_VALID;
+ *buf = entry->ring_data[byte_num];
+ buf++;
+ len--;
+ total++;
+ }
+ }
+
+ /* If we used up this entry entirely, go on to the next one,
+ * otherwise we must have run out of buffer space, so
+ * leave the consumer pointer here for the next read in case
+ * there are still unread bytes in this entry.
+ */
+ if ((entry->ring_allsc & RING_ANY_VALID) == 0) {
+ cons_ptr += (int)sizeof(struct ring_entry);
+ cons_ptr &= PROD_CONS_MASK;
+ }
+ }
+
+ /* Update consumer pointer and re-arm rx timer interrupt */
+ writel(cons_ptr, &port->ip_serial_regs->srcir);
+ port->ip_rx_cons = cons_ptr;
+
+ /* If we have now dipped below the rx high water mark and we have
+ * rx_high interrupt turned off, we can now turn it back on again.
+ */
+ if ((port->ip_flags & INPUT_HIGH) && (((prod_ptr - cons_ptr)
+ & PROD_CONS_MASK) < ((port->ip_sscr &
+ IOC4_SSCR_RX_THRESHOLD)
+ << IOC4_PROD_CONS_PTR_OFF))) {
+ port->ip_flags &= ~INPUT_HIGH;
+ enable_intrs(port, hooks->intr_rx_high);
+ }
+ return total;
+}
+
+/**
+ * receive_chars - upper level read. Called with ip_lock.
+ * @the_port: port to read from
+ */
+static void receive_chars(struct uart_port *the_port)
+{
+ struct tty_struct *tty;
+ unsigned char ch[IOC4_MAX_CHARS];
+ int read_count, request_count = IOC4_MAX_CHARS;
+ struct uart_icount *icount;
+ struct uart_state *state = the_port->state;
+ unsigned long pflags;
+
+ /* Make sure all the pointers are "good" ones */
+ if (!state)
+ return;
+ if (!state->port.tty)
+ return;
+
+ spin_lock_irqsave(&the_port->lock, pflags);
+ tty = state->port.tty;
+
+ request_count = tty_buffer_request_room(tty, IOC4_MAX_CHARS);
+
+ if (request_count > 0) {
+ icount = &the_port->icount;
+ read_count = do_read(the_port, ch, request_count);
+ if (read_count > 0) {
+ tty_insert_flip_string(tty, ch, read_count);
+ icount->rx += read_count;
+ }
+ }
+
+ spin_unlock_irqrestore(&the_port->lock, pflags);
+
+ tty_flip_buffer_push(tty);
+}
+
+/**
+ * ic4_type - What type of console are we?
+ * @port: Port to operate with (we ignore since we only have one port)
+ *
+ */
+static const char *ic4_type(struct uart_port *the_port)
+{
+ if (the_port->mapbase == PROTO_RS232)
+ return "SGI IOC4 Serial [rs232]";
+ else
+ return "SGI IOC4 Serial [rs422]";
+}
+
+/**
+ * ic4_tx_empty - Is the transmitter empty?
+ * @port: Port to operate on
+ *
+ */
+static unsigned int ic4_tx_empty(struct uart_port *the_port)
+{
+ struct ioc4_port *port = get_ioc4_port(the_port, 0);
+ unsigned int ret = 0;
+
+ if (port_is_active(port, the_port)) {
+ if (readl(&port->ip_serial_regs->shadow) & IOC4_SHADOW_TEMT)
+ ret = TIOCSER_TEMT;
+ }
+ return ret;
+}
+
+/**
+ * ic4_stop_tx - stop the transmitter
+ * @port: Port to operate on
+ *
+ */
+static void ic4_stop_tx(struct uart_port *the_port)
+{
+ struct ioc4_port *port = get_ioc4_port(the_port, 0);
+
+ if (port_is_active(port, the_port))
+ set_notification(port, N_OUTPUT_LOWAT, 0);
+}
+
+/**
+ * null_void_function -
+ * @port: Port to operate on
+ *
+ */
+static void null_void_function(struct uart_port *the_port)
+{
+}
+
+/**
+ * ic4_shutdown - shut down the port - free irq and disable
+ * @port: Port to shut down
+ *
+ */
+static void ic4_shutdown(struct uart_port *the_port)
+{
+ unsigned long port_flags;
+ struct ioc4_port *port;
+ struct uart_state *state;
+
+ port = get_ioc4_port(the_port, 0);
+ if (!port)
+ return;
+
+ state = the_port->state;
+ port->ip_port = NULL;
+
+ wake_up_interruptible(&state->port.delta_msr_wait);
+
+ if (state->port.tty)
+ set_bit(TTY_IO_ERROR, &state->port.tty->flags);
+
+ spin_lock_irqsave(&the_port->lock, port_flags);
+ set_notification(port, N_ALL, 0);
+ port->ip_flags = PORT_INACTIVE;
+ spin_unlock_irqrestore(&the_port->lock, port_flags);
+}
+
+/**
+ * ic4_set_mctrl - set control lines (dtr, rts, etc)
+ * @port: Port to operate on
+ * @mctrl: Lines to set/unset
+ *
+ */
+static void ic4_set_mctrl(struct uart_port *the_port, unsigned int mctrl)
+{
+ unsigned char mcr = 0;
+ struct ioc4_port *port;
+
+ port = get_ioc4_port(the_port, 0);
+ if (!port_is_active(port, the_port))
+ return;
+
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ set_mcr(the_port, mcr, IOC4_SHADOW_DTR);
+}
+
+/**
+ * ic4_get_mctrl - get control line info
+ * @port: port to operate on
+ *
+ */
+static unsigned int ic4_get_mctrl(struct uart_port *the_port)
+{
+ struct ioc4_port *port = get_ioc4_port(the_port, 0);
+ uint32_t shadow;
+ unsigned int ret = 0;
+
+ if (!port_is_active(port, the_port))
+ return 0;
+
+ shadow = readl(&port->ip_serial_regs->shadow);
+ if (shadow & IOC4_SHADOW_DCD)
+ ret |= TIOCM_CAR;
+ if (shadow & IOC4_SHADOW_DR)
+ ret |= TIOCM_DSR;
+ if (shadow & IOC4_SHADOW_CTS)
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+/**
+ * ic4_start_tx - Start transmitter, flush any output
+ * @port: Port to operate on
+ *
+ */
+static void ic4_start_tx(struct uart_port *the_port)
+{
+ struct ioc4_port *port = get_ioc4_port(the_port, 0);
+
+ if (port_is_active(port, the_port)) {
+ set_notification(port, N_OUTPUT_LOWAT, 1);
+ enable_intrs(port, port->ip_hooks->intr_tx_mt);
+ }
+}
+
+/**
+ * ic4_break_ctl - handle breaks
+ * @port: Port to operate on
+ * @break_state: Break state
+ *
+ */
+static void ic4_break_ctl(struct uart_port *the_port, int break_state)
+{
+}
+
+/**
+ * ic4_startup - Start up the serial port
+ * @port: Port to operate on
+ *
+ */
+static int ic4_startup(struct uart_port *the_port)
+{
+ int retval;
+ struct ioc4_port *port;
+ struct ioc4_control *control;
+ struct uart_state *state;
+ unsigned long port_flags;
+
+ if (!the_port)
+ return -ENODEV;
+ port = get_ioc4_port(the_port, 1);
+ if (!port)
+ return -ENODEV;
+ state = the_port->state;
+
+ control = port->ip_control;
+ if (!control) {
+ port->ip_port = NULL;
+ return -ENODEV;
+ }
+
+ /* Start up the serial port */
+ spin_lock_irqsave(&the_port->lock, port_flags);
+ retval = ic4_startup_local(the_port);
+ spin_unlock_irqrestore(&the_port->lock, port_flags);
+ return retval;
+}
+
+/**
+ * ic4_set_termios - set termios stuff
+ * @port: port to operate on
+ * @termios: New settings
+ * @termios: Old
+ *
+ */
+static void
+ic4_set_termios(struct uart_port *the_port,
+ struct ktermios *termios, struct ktermios *old_termios)
+{
+ unsigned long port_flags;
+
+ spin_lock_irqsave(&the_port->lock, port_flags);
+ ioc4_change_speed(the_port, termios, old_termios);
+ spin_unlock_irqrestore(&the_port->lock, port_flags);
+}
+
+/**
+ * ic4_request_port - allocate resources for port - no op....
+ * @port: port to operate on
+ *
+ */
+static int ic4_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+/* Associate the uart functions above - given to serial core */
+
+static struct uart_ops ioc4_ops = {
+ .tx_empty = ic4_tx_empty,
+ .set_mctrl = ic4_set_mctrl,
+ .get_mctrl = ic4_get_mctrl,
+ .stop_tx = ic4_stop_tx,
+ .start_tx = ic4_start_tx,
+ .stop_rx = null_void_function,
+ .enable_ms = null_void_function,
+ .break_ctl = ic4_break_ctl,
+ .startup = ic4_startup,
+ .shutdown = ic4_shutdown,
+ .set_termios = ic4_set_termios,
+ .type = ic4_type,
+ .release_port = null_void_function,
+ .request_port = ic4_request_port,
+};
+
+/*
+ * Boot-time initialization code
+ */
+
+static struct uart_driver ioc4_uart_rs232 = {
+ .owner = THIS_MODULE,
+ .driver_name = "ioc4_serial_rs232",
+ .dev_name = DEVICE_NAME_RS232,
+ .major = DEVICE_MAJOR,
+ .minor = DEVICE_MINOR_RS232,
+ .nr = IOC4_NUM_CARDS * IOC4_NUM_SERIAL_PORTS,
+};
+
+static struct uart_driver ioc4_uart_rs422 = {
+ .owner = THIS_MODULE,
+ .driver_name = "ioc4_serial_rs422",
+ .dev_name = DEVICE_NAME_RS422,
+ .major = DEVICE_MAJOR,
+ .minor = DEVICE_MINOR_RS422,
+ .nr = IOC4_NUM_CARDS * IOC4_NUM_SERIAL_PORTS,
+};
+
+
+/**
+ * ioc4_serial_remove_one - detach function
+ *
+ * @idd: IOC4 master module data for this IOC4
+ */
+
+static int ioc4_serial_remove_one(struct ioc4_driver_data *idd)
+{
+ int port_num, port_type;
+ struct ioc4_control *control;
+ struct uart_port *the_port;
+ struct ioc4_port *port;
+ struct ioc4_soft *soft;
+
+ /* If serial driver did not attach, don't try to detach */
+ control = idd->idd_serial_data;
+ if (!control)
+ return 0;
+
+ for (port_num = 0; port_num < IOC4_NUM_SERIAL_PORTS; port_num++) {
+ for (port_type = UART_PORT_MIN;
+ port_type < UART_PORT_COUNT;
+ port_type++) {
+ the_port = &control->ic_port[port_num].icp_uart_port
+ [port_type];
+ if (the_port) {
+ switch (port_type) {
+ case UART_PORT_RS422:
+ uart_remove_one_port(&ioc4_uart_rs422,
+ the_port);
+ break;
+ default:
+ case UART_PORT_RS232:
+ uart_remove_one_port(&ioc4_uart_rs232,
+ the_port);
+ break;
+ }
+ }
+ }
+ port = control->ic_port[port_num].icp_port;
+ /* we allocate in pairs */
+ if (!(port_num & 1) && port) {
+ pci_free_consistent(port->ip_pdev,
+ TOTAL_RING_BUF_SIZE,
+ port->ip_cpu_ringbuf,
+ port->ip_dma_ringbuf);
+ kfree(port);
+ }
+ }
+ soft = control->ic_soft;
+ if (soft) {
+ free_irq(control->ic_irq, soft);
+ if (soft->is_ioc4_serial_addr) {
+ iounmap(soft->is_ioc4_serial_addr);
+ release_mem_region((unsigned long)
+ soft->is_ioc4_serial_addr,
+ sizeof(struct ioc4_serial));
+ }
+ kfree(soft);
+ }
+ kfree(control);
+ idd->idd_serial_data = NULL;
+
+ return 0;
+}
+
+
+/**
+ * ioc4_serial_core_attach_rs232 - register with serial core
+ * This is done during pci probing
+ * @pdev: handle for this card
+ */
+static inline int
+ioc4_serial_core_attach(struct pci_dev *pdev, int port_type)
+{
+ struct ioc4_port *port;
+ struct uart_port *the_port;
+ struct ioc4_driver_data *idd = pci_get_drvdata(pdev);
+ struct ioc4_control *control = idd->idd_serial_data;
+ int port_num;
+ int port_type_idx;
+ struct uart_driver *u_driver;
+
+
+ DPRINT_CONFIG(("%s: attach pdev 0x%p - control 0x%p\n",
+ __func__, pdev, (void *)control));
+
+ if (!control)
+ return -ENODEV;
+
+ port_type_idx = (port_type == PROTO_RS232) ? UART_PORT_RS232
+ : UART_PORT_RS422;
+
+ u_driver = (port_type == PROTO_RS232) ? &ioc4_uart_rs232
+ : &ioc4_uart_rs422;
+
+ /* once around for each port on this card */
+ for (port_num = 0; port_num < IOC4_NUM_SERIAL_PORTS; port_num++) {
+ the_port = &control->ic_port[port_num].icp_uart_port
+ [port_type_idx];
+ port = control->ic_port[port_num].icp_port;
+ port->ip_all_ports[port_type_idx] = the_port;
+
+ DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p : type %s\n",
+ __func__, (void *)the_port,
+ (void *)port,
+ port_type == PROTO_RS232 ? "rs232" : "rs422"));
+
+ /* membase, iobase and mapbase just need to be non-0 */
+ the_port->membase = (unsigned char __iomem *)1;
+ the_port->iobase = (pdev->bus->number << 16) | port_num;
+ the_port->line = (Num_of_ioc4_cards << 2) | port_num;
+ the_port->mapbase = port_type;
+ the_port->type = PORT_16550A;
+ the_port->fifosize = IOC4_FIFO_CHARS;
+ the_port->ops = &ioc4_ops;
+ the_port->irq = control->ic_irq;
+ the_port->dev = &pdev->dev;
+ spin_lock_init(&the_port->lock);
+ if (uart_add_one_port(u_driver, the_port) < 0) {
+ printk(KERN_WARNING
+ "%s: unable to add port %d bus %d\n",
+ __func__, the_port->line, pdev->bus->number);
+ } else {
+ DPRINT_CONFIG(
+ ("IOC4 serial port %d irq = %d, bus %d\n",
+ the_port->line, the_port->irq, pdev->bus->number));
+ }
+ }
+ return 0;
+}
+
+/**
+ * ioc4_serial_attach_one - register attach function
+ * called per card found from IOC4 master module.
+ * @idd: Master module data for this IOC4
+ */
+int
+ioc4_serial_attach_one(struct ioc4_driver_data *idd)
+{
+ unsigned long tmp_addr1;
+ struct ioc4_serial __iomem *serial;
+ struct ioc4_soft *soft;
+ struct ioc4_control *control;
+ int ret = 0;
+
+
+ DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, idd->idd_pdev,
+ idd->idd_pci_id));
+
+ /* PCI-RT does not bring out serial connections.
+ * Do not attach to this particular IOC4.
+ */
+ if (idd->idd_variant == IOC4_VARIANT_PCI_RT)
+ return 0;
+
+ /* request serial registers */
+ tmp_addr1 = idd->idd_bar0 + IOC4_SERIAL_OFFSET;
+
+ if (!request_mem_region(tmp_addr1, sizeof(struct ioc4_serial),
+ "sioc4_uart")) {
+ printk(KERN_WARNING
+ "ioc4 (%p): unable to get request region for "
+ "uart space\n", (void *)idd->idd_pdev);
+ ret = -ENODEV;
+ goto out1;
+ }
+ serial = ioremap(tmp_addr1, sizeof(struct ioc4_serial));
+ if (!serial) {
+ printk(KERN_WARNING
+ "ioc4 (%p) : unable to remap ioc4 serial register\n",
+ (void *)idd->idd_pdev);
+ ret = -ENODEV;
+ goto out2;
+ }
+ DPRINT_CONFIG(("%s : mem 0x%p, serial 0x%p\n",
+ __func__, (void *)idd->idd_misc_regs,
+ (void *)serial));
+
+ /* Get memory for the new card */
+ control = kzalloc(sizeof(struct ioc4_control), GFP_KERNEL);
+
+ if (!control) {
+ printk(KERN_WARNING "ioc4_attach_one"
+ ": unable to get memory for the IOC4\n");
+ ret = -ENOMEM;
+ goto out2;
+ }
+ idd->idd_serial_data = control;
+
+ /* Allocate the soft structure */
+ soft = kzalloc(sizeof(struct ioc4_soft), GFP_KERNEL);
+ if (!soft) {
+ printk(KERN_WARNING
+ "ioc4 (%p): unable to get memory for the soft struct\n",
+ (void *)idd->idd_pdev);
+ ret = -ENOMEM;
+ goto out3;
+ }
+
+ spin_lock_init(&soft->is_ir_lock);
+ soft->is_ioc4_misc_addr = idd->idd_misc_regs;
+ soft->is_ioc4_serial_addr = serial;
+
+ /* Init the IOC4 */
+ writel(0xf << IOC4_SIO_CR_CMD_PULSE_SHIFT,
+ &idd->idd_misc_regs->sio_cr.raw);
+
+ /* Enable serial port mode select generic PIO pins as outputs */
+ writel(IOC4_GPCR_UART0_MODESEL | IOC4_GPCR_UART1_MODESEL
+ | IOC4_GPCR_UART2_MODESEL | IOC4_GPCR_UART3_MODESEL,
+ &idd->idd_misc_regs->gpcr_s.raw);
+
+ /* Clear and disable all serial interrupts */
+ write_ireg(soft, ~0, IOC4_W_IEC, IOC4_SIO_INTR_TYPE);
+ writel(~0, &idd->idd_misc_regs->sio_ir.raw);
+ write_ireg(soft, IOC4_OTHER_IR_SER_MEMERR, IOC4_W_IEC,
+ IOC4_OTHER_INTR_TYPE);
+ writel(IOC4_OTHER_IR_SER_MEMERR, &idd->idd_misc_regs->other_ir.raw);
+ control->ic_soft = soft;
+
+ /* Hook up interrupt handler */
+ if (!request_irq(idd->idd_pdev->irq, ioc4_intr, IRQF_SHARED,
+ "sgi-ioc4serial", soft)) {
+ control->ic_irq = idd->idd_pdev->irq;
+ } else {
+ printk(KERN_WARNING
+ "%s : request_irq fails for IRQ 0x%x\n ",
+ __func__, idd->idd_pdev->irq);
+ }
+ ret = ioc4_attach_local(idd);
+ if (ret)
+ goto out4;
+
+ /* register port with the serial core - 1 rs232, 1 rs422 */
+
+ if ((ret = ioc4_serial_core_attach(idd->idd_pdev, PROTO_RS232)))
+ goto out4;
+
+ if ((ret = ioc4_serial_core_attach(idd->idd_pdev, PROTO_RS422)))
+ goto out5;
+
+ Num_of_ioc4_cards++;
+
+ return ret;
+
+ /* error exits that give back resources */
+out5:
+ ioc4_serial_remove_one(idd);
+out4:
+ kfree(soft);
+out3:
+ kfree(control);
+out2:
+ if (serial)
+ iounmap(serial);
+ release_mem_region(tmp_addr1, sizeof(struct ioc4_serial));
+out1:
+
+ return ret;
+}
+
+
+static struct ioc4_submodule ioc4_serial_submodule = {
+ .is_name = "IOC4_serial",
+ .is_owner = THIS_MODULE,
+ .is_probe = ioc4_serial_attach_one,
+ .is_remove = ioc4_serial_remove_one,
+};
+
+/**
+ * ioc4_serial_init - module init
+ */
+static int __init ioc4_serial_init(void)
+{
+ int ret;
+
+ /* register with serial core */
+ if ((ret = uart_register_driver(&ioc4_uart_rs232)) < 0) {
+ printk(KERN_WARNING
+ "%s: Couldn't register rs232 IOC4 serial driver\n",
+ __func__);
+ goto out;
+ }
+ if ((ret = uart_register_driver(&ioc4_uart_rs422)) < 0) {
+ printk(KERN_WARNING
+ "%s: Couldn't register rs422 IOC4 serial driver\n",
+ __func__);
+ goto out_uart_rs232;
+ }
+
+ /* register with IOC4 main module */
+ ret = ioc4_register_submodule(&ioc4_serial_submodule);
+ if (ret)
+ goto out_uart_rs422;
+ return 0;
+
+out_uart_rs422:
+ uart_unregister_driver(&ioc4_uart_rs422);
+out_uart_rs232:
+ uart_unregister_driver(&ioc4_uart_rs232);
+out:
+ return ret;
+}
+
+static void __exit ioc4_serial_exit(void)
+{
+ ioc4_unregister_submodule(&ioc4_serial_submodule);
+ uart_unregister_driver(&ioc4_uart_rs232);
+ uart_unregister_driver(&ioc4_uart_rs422);
+}
+
+late_initcall(ioc4_serial_init); /* Call only after tty init is done */
+module_exit(ioc4_serial_exit);
+
+MODULE_AUTHOR("Pat Gefre - Silicon Graphics Inc. (SGI) <pfg@sgi.com>");
+MODULE_DESCRIPTION("Serial PCI driver module for SGI IOC4 Base-IO Card");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
new file mode 100644
index 00000000..7b1cda59
--- /dev/null
+++ b/drivers/tty/serial/ip22zilog.c
@@ -0,0 +1,1221 @@
+/*
+ * Driver for Zilog serial chips found on SGI workstations and
+ * servers. This driver could actually be made more generic.
+ *
+ * This is based on the drivers/serial/sunzilog.c code as of 2.6.0-test7 and the
+ * old drivers/sgi/char/sgiserial.c code which itself is based of the original
+ * drivers/sbus/char/zs.c code. A lot of code has been simply moved over
+ * directly from there but much has been rewritten. Credits therefore go out
+ * to David S. Miller, Eddie C. Dost, Pete Zaitcev, Ted Ts'o and Alex Buell
+ * for their work there.
+ *
+ * Copyright (C) 2002 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2002 David S. Miller (davem@redhat.com)
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/circ_buf.h>
+#include <linux/serial.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/sgialib.h>
+#include <asm/sgi/ioc.h>
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/ip22.h>
+
+#if defined(CONFIG_SERIAL_IP22_ZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+#include "ip22zilog.h"
+
+/*
+ * On IP22 we need to delay after register accesses but we do not need to
+ * flush writes.
+ */
+#define ZSDELAY() udelay(5)
+#define ZSDELAY_LONG() udelay(20)
+#define ZS_WSYNC(channel) do { } while (0)
+
+#define NUM_IP22ZILOG 1
+#define NUM_CHANNELS (NUM_IP22ZILOG * 2)
+
+#define ZS_CLOCK 3672000 /* Zilog input clock rate. */
+#define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */
+
+/*
+ * We wrap our port structure around the generic uart_port.
+ */
+struct uart_ip22zilog_port {
+ struct uart_port port;
+
+ /* IRQ servicing chain. */
+ struct uart_ip22zilog_port *next;
+
+ /* Current values of Zilog write registers. */
+ unsigned char curregs[NUM_ZSREGS];
+
+ unsigned int flags;
+#define IP22ZILOG_FLAG_IS_CONS 0x00000004
+#define IP22ZILOG_FLAG_IS_KGDB 0x00000008
+#define IP22ZILOG_FLAG_MODEM_STATUS 0x00000010
+#define IP22ZILOG_FLAG_IS_CHANNEL_A 0x00000020
+#define IP22ZILOG_FLAG_REGS_HELD 0x00000040
+#define IP22ZILOG_FLAG_TX_STOPPED 0x00000080
+#define IP22ZILOG_FLAG_TX_ACTIVE 0x00000100
+#define IP22ZILOG_FLAG_RESET_DONE 0x00000200
+
+ unsigned int tty_break;
+
+ unsigned char parity_mask;
+ unsigned char prev_status;
+};
+
+#define ZILOG_CHANNEL_FROM_PORT(PORT) ((struct zilog_channel *)((PORT)->membase))
+#define UART_ZILOG(PORT) ((struct uart_ip22zilog_port *)(PORT))
+#define IP22ZILOG_GET_CURR_REG(PORT, REGNUM) \
+ (UART_ZILOG(PORT)->curregs[REGNUM])
+#define IP22ZILOG_SET_CURR_REG(PORT, REGNUM, REGVAL) \
+ ((UART_ZILOG(PORT)->curregs[REGNUM]) = (REGVAL))
+#define ZS_IS_CONS(UP) ((UP)->flags & IP22ZILOG_FLAG_IS_CONS)
+#define ZS_IS_KGDB(UP) ((UP)->flags & IP22ZILOG_FLAG_IS_KGDB)
+#define ZS_WANTS_MODEM_STATUS(UP) ((UP)->flags & IP22ZILOG_FLAG_MODEM_STATUS)
+#define ZS_IS_CHANNEL_A(UP) ((UP)->flags & IP22ZILOG_FLAG_IS_CHANNEL_A)
+#define ZS_REGS_HELD(UP) ((UP)->flags & IP22ZILOG_FLAG_REGS_HELD)
+#define ZS_TX_STOPPED(UP) ((UP)->flags & IP22ZILOG_FLAG_TX_STOPPED)
+#define ZS_TX_ACTIVE(UP) ((UP)->flags & IP22ZILOG_FLAG_TX_ACTIVE)
+
+/* Reading and writing Zilog8530 registers. The delays are to make this
+ * driver work on the IP22 which needs a settling delay after each chip
+ * register access, other machines handle this in hardware via auxiliary
+ * flip-flops which implement the settle time we do in software.
+ *
+ * The port lock must be held and local IRQs must be disabled
+ * when {read,write}_zsreg is invoked.
+ */
+static unsigned char read_zsreg(struct zilog_channel *channel,
+ unsigned char reg)
+{
+ unsigned char retval;
+
+ writeb(reg, &channel->control);
+ ZSDELAY();
+ retval = readb(&channel->control);
+ ZSDELAY();
+
+ return retval;
+}
+
+static void write_zsreg(struct zilog_channel *channel,
+ unsigned char reg, unsigned char value)
+{
+ writeb(reg, &channel->control);
+ ZSDELAY();
+ writeb(value, &channel->control);
+ ZSDELAY();
+}
+
+static void ip22zilog_clear_fifo(struct zilog_channel *channel)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ unsigned char regval;
+
+ regval = readb(&channel->control);
+ ZSDELAY();
+ if (regval & Rx_CH_AV)
+ break;
+
+ regval = read_zsreg(channel, R1);
+ readb(&channel->data);
+ ZSDELAY();
+
+ if (regval & (PAR_ERR | Rx_OVR | CRC_ERR)) {
+ writeb(ERR_RES, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+ }
+ }
+}
+
+/* This function must only be called when the TX is not busy. The UART
+ * port lock must be held and local interrupts disabled.
+ */
+static void __load_zsregs(struct zilog_channel *channel, unsigned char *regs)
+{
+ int i;
+
+ /* Let pending transmits finish. */
+ for (i = 0; i < 1000; i++) {
+ unsigned char stat = read_zsreg(channel, R1);
+ if (stat & ALL_SNT)
+ break;
+ udelay(100);
+ }
+
+ writeb(ERR_RES, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ ip22zilog_clear_fifo(channel);
+
+ /* Disable all interrupts. */
+ write_zsreg(channel, R1,
+ regs[R1] & ~(RxINT_MASK | TxINT_ENAB | EXT_INT_ENAB));
+
+ /* Set parity, sync config, stop bits, and clock divisor. */
+ write_zsreg(channel, R4, regs[R4]);
+
+ /* Set misc. TX/RX control bits. */
+ write_zsreg(channel, R10, regs[R10]);
+
+ /* Set TX/RX controls sans the enable bits. */
+ write_zsreg(channel, R3, regs[R3] & ~RxENAB);
+ write_zsreg(channel, R5, regs[R5] & ~TxENAB);
+
+ /* Synchronous mode config. */
+ write_zsreg(channel, R6, regs[R6]);
+ write_zsreg(channel, R7, regs[R7]);
+
+ /* Don't mess with the interrupt vector (R2, unused by us) and
+ * master interrupt control (R9). We make sure this is setup
+ * properly at probe time then never touch it again.
+ */
+
+ /* Disable baud generator. */
+ write_zsreg(channel, R14, regs[R14] & ~BRENAB);
+
+ /* Clock mode control. */
+ write_zsreg(channel, R11, regs[R11]);
+
+ /* Lower and upper byte of baud rate generator divisor. */
+ write_zsreg(channel, R12, regs[R12]);
+ write_zsreg(channel, R13, regs[R13]);
+
+ /* Now rewrite R14, with BRENAB (if set). */
+ write_zsreg(channel, R14, regs[R14]);
+
+ /* External status interrupt control. */
+ write_zsreg(channel, R15, regs[R15]);
+
+ /* Reset external status interrupts. */
+ write_zsreg(channel, R0, RES_EXT_INT);
+ write_zsreg(channel, R0, RES_EXT_INT);
+
+ /* Rewrite R3/R5, this time without enables masked. */
+ write_zsreg(channel, R3, regs[R3]);
+ write_zsreg(channel, R5, regs[R5]);
+
+ /* Rewrite R1, this time without IRQ enabled masked. */
+ write_zsreg(channel, R1, regs[R1]);
+}
+
+/* Reprogram the Zilog channel HW registers with the copies found in the
+ * software state struct. If the transmitter is busy, we defer this update
+ * until the next TX complete interrupt. Else, we do it right now.
+ *
+ * The UART port lock must be held and local interrupts disabled.
+ */
+static void ip22zilog_maybe_update_regs(struct uart_ip22zilog_port *up,
+ struct zilog_channel *channel)
+{
+ if (!ZS_REGS_HELD(up)) {
+ if (ZS_TX_ACTIVE(up)) {
+ up->flags |= IP22ZILOG_FLAG_REGS_HELD;
+ } else {
+ __load_zsregs(channel, up->curregs);
+ }
+ }
+}
+
+#define Rx_BRK 0x0100 /* BREAK event software flag. */
+#define Rx_SYS 0x0200 /* SysRq event software flag. */
+
+static struct tty_struct *ip22zilog_receive_chars(struct uart_ip22zilog_port *up,
+ struct zilog_channel *channel)
+{
+ struct tty_struct *tty;
+ unsigned char ch, flag;
+ unsigned int r1;
+
+ tty = NULL;
+ if (up->port.state != NULL &&
+ up->port.state->port.tty != NULL)
+ tty = up->port.state->port.tty;
+
+ for (;;) {
+ ch = readb(&channel->control);
+ ZSDELAY();
+ if (!(ch & Rx_CH_AV))
+ break;
+
+ r1 = read_zsreg(channel, R1);
+ if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) {
+ writeb(ERR_RES, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+ }
+
+ ch = readb(&channel->data);
+ ZSDELAY();
+
+ ch &= up->parity_mask;
+
+ /* Handle the null char got when BREAK is removed. */
+ if (!ch)
+ r1 |= up->tty_break;
+
+ /* A real serial line, record the character and status. */
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+ if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR | Rx_SYS | Rx_BRK)) {
+ up->tty_break = 0;
+
+ if (r1 & (Rx_SYS | Rx_BRK)) {
+ up->port.icount.brk++;
+ if (r1 & Rx_SYS)
+ continue;
+ r1 &= ~(PAR_ERR | CRC_ERR);
+ }
+ else if (r1 & PAR_ERR)
+ up->port.icount.parity++;
+ else if (r1 & CRC_ERR)
+ up->port.icount.frame++;
+ if (r1 & Rx_OVR)
+ up->port.icount.overrun++;
+ r1 &= up->port.read_status_mask;
+ if (r1 & Rx_BRK)
+ flag = TTY_BREAK;
+ else if (r1 & PAR_ERR)
+ flag = TTY_PARITY;
+ else if (r1 & CRC_ERR)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&up->port, ch))
+ continue;
+
+ if (tty)
+ uart_insert_char(&up->port, r1, Rx_OVR, ch, flag);
+ }
+ return tty;
+}
+
+static void ip22zilog_status_handle(struct uart_ip22zilog_port *up,
+ struct zilog_channel *channel)
+{
+ unsigned char status;
+
+ status = readb(&channel->control);
+ ZSDELAY();
+
+ writeb(RES_EXT_INT, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ if (up->curregs[R15] & BRKIE) {
+ if ((status & BRK_ABRT) && !(up->prev_status & BRK_ABRT)) {
+ if (uart_handle_break(&up->port))
+ up->tty_break = Rx_SYS;
+ else
+ up->tty_break = Rx_BRK;
+ }
+ }
+
+ if (ZS_WANTS_MODEM_STATUS(up)) {
+ if (status & SYNC)
+ up->port.icount.dsr++;
+
+ /* The Zilog just gives us an interrupt when DCD/CTS/etc. change.
+ * But it does not tell us which bit has changed, we have to keep
+ * track of this ourselves.
+ */
+ if ((status ^ up->prev_status) ^ DCD)
+ uart_handle_dcd_change(&up->port,
+ (status & DCD));
+ if ((status ^ up->prev_status) ^ CTS)
+ uart_handle_cts_change(&up->port,
+ (status & CTS));
+
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+ }
+
+ up->prev_status = status;
+}
+
+static void ip22zilog_transmit_chars(struct uart_ip22zilog_port *up,
+ struct zilog_channel *channel)
+{
+ struct circ_buf *xmit;
+
+ if (ZS_IS_CONS(up)) {
+ unsigned char status = readb(&channel->control);
+ ZSDELAY();
+
+ /* TX still busy? Just wait for the next TX done interrupt.
+ *
+ * It can occur because of how we do serial console writes. It would
+ * be nice to transmit console writes just like we normally would for
+ * a TTY line. (ie. buffered and TX interrupt driven). That is not
+ * easy because console writes cannot sleep. One solution might be
+ * to poll on enough port->xmit space becoming free. -DaveM
+ */
+ if (!(status & Tx_BUF_EMP))
+ return;
+ }
+
+ up->flags &= ~IP22ZILOG_FLAG_TX_ACTIVE;
+
+ if (ZS_REGS_HELD(up)) {
+ __load_zsregs(channel, up->curregs);
+ up->flags &= ~IP22ZILOG_FLAG_REGS_HELD;
+ }
+
+ if (ZS_TX_STOPPED(up)) {
+ up->flags &= ~IP22ZILOG_FLAG_TX_STOPPED;
+ goto ack_tx_int;
+ }
+
+ if (up->port.x_char) {
+ up->flags |= IP22ZILOG_FLAG_TX_ACTIVE;
+ writeb(up->port.x_char, &channel->data);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ return;
+ }
+
+ if (up->port.state == NULL)
+ goto ack_tx_int;
+ xmit = &up->port.state->xmit;
+ if (uart_circ_empty(xmit))
+ goto ack_tx_int;
+ if (uart_tx_stopped(&up->port))
+ goto ack_tx_int;
+
+ up->flags |= IP22ZILOG_FLAG_TX_ACTIVE;
+ writeb(xmit->buf[xmit->tail], &channel->data);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ return;
+
+ack_tx_int:
+ writeb(RES_Tx_P, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+}
+
+static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
+{
+ struct uart_ip22zilog_port *up = dev_id;
+
+ while (up) {
+ struct zilog_channel *channel
+ = ZILOG_CHANNEL_FROM_PORT(&up->port);
+ struct tty_struct *tty;
+ unsigned char r3;
+
+ spin_lock(&up->port.lock);
+ r3 = read_zsreg(channel, R3);
+
+ /* Channel A */
+ tty = NULL;
+ if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
+ writeb(RES_H_IUS, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ if (r3 & CHARxIP)
+ tty = ip22zilog_receive_chars(up, channel);
+ if (r3 & CHAEXT)
+ ip22zilog_status_handle(up, channel);
+ if (r3 & CHATxIP)
+ ip22zilog_transmit_chars(up, channel);
+ }
+ spin_unlock(&up->port.lock);
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+
+ /* Channel B */
+ up = up->next;
+ channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+
+ spin_lock(&up->port.lock);
+ tty = NULL;
+ if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
+ writeb(RES_H_IUS, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ if (r3 & CHBRxIP)
+ tty = ip22zilog_receive_chars(up, channel);
+ if (r3 & CHBEXT)
+ ip22zilog_status_handle(up, channel);
+ if (r3 & CHBTxIP)
+ ip22zilog_transmit_chars(up, channel);
+ }
+ spin_unlock(&up->port.lock);
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+
+ up = up->next;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* A convenient way to quickly get R0 status. The caller must _not_ hold the
+ * port lock, it is acquired here.
+ */
+static __inline__ unsigned char ip22zilog_read_channel_status(struct uart_port *port)
+{
+ struct zilog_channel *channel;
+ unsigned char status;
+
+ channel = ZILOG_CHANNEL_FROM_PORT(port);
+ status = readb(&channel->control);
+ ZSDELAY();
+
+ return status;
+}
+
+/* The port lock is not held. */
+static unsigned int ip22zilog_tx_empty(struct uart_port *port)
+{
+ unsigned long flags;
+ unsigned char status;
+ unsigned int ret;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ status = ip22zilog_read_channel_status(port);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (status & Tx_BUF_EMP)
+ ret = TIOCSER_TEMT;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+/* The port lock is held and interrupts are disabled. */
+static unsigned int ip22zilog_get_mctrl(struct uart_port *port)
+{
+ unsigned char status;
+ unsigned int ret;
+
+ status = ip22zilog_read_channel_status(port);
+
+ ret = 0;
+ if (status & DCD)
+ ret |= TIOCM_CAR;
+ if (status & SYNC)
+ ret |= TIOCM_DSR;
+ if (status & CTS)
+ ret |= TIOCM_CTS;
+
+ return ret;
+}
+
+/* The port lock is held and interrupts are disabled. */
+static void ip22zilog_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
+ struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
+ unsigned char set_bits, clear_bits;
+
+ set_bits = clear_bits = 0;
+
+ if (mctrl & TIOCM_RTS)
+ set_bits |= RTS;
+ else
+ clear_bits |= RTS;
+ if (mctrl & TIOCM_DTR)
+ set_bits |= DTR;
+ else
+ clear_bits |= DTR;
+
+ /* NOTE: Not subject to 'transmitter active' rule. */
+ up->curregs[R5] |= set_bits;
+ up->curregs[R5] &= ~clear_bits;
+ write_zsreg(channel, R5, up->curregs[R5]);
+}
+
+/* The port lock is held and interrupts are disabled. */
+static void ip22zilog_stop_tx(struct uart_port *port)
+{
+ struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
+
+ up->flags |= IP22ZILOG_FLAG_TX_STOPPED;
+}
+
+/* The port lock is held and interrupts are disabled. */
+static void ip22zilog_start_tx(struct uart_port *port)
+{
+ struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
+ struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
+ unsigned char status;
+
+ up->flags |= IP22ZILOG_FLAG_TX_ACTIVE;
+ up->flags &= ~IP22ZILOG_FLAG_TX_STOPPED;
+
+ status = readb(&channel->control);
+ ZSDELAY();
+
+ /* TX busy? Just wait for the TX done interrupt. */
+ if (!(status & Tx_BUF_EMP))
+ return;
+
+ /* Send the first character to jump-start the TX done
+ * IRQ sending engine.
+ */
+ if (port->x_char) {
+ writeb(port->x_char, &channel->data);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ port->icount.tx++;
+ port->x_char = 0;
+ } else {
+ struct circ_buf *xmit = &port->state->xmit;
+
+ writeb(xmit->buf[xmit->tail], &channel->data);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+ }
+}
+
+/* The port lock is held and interrupts are disabled. */
+static void ip22zilog_stop_rx(struct uart_port *port)
+{
+ struct uart_ip22zilog_port *up = UART_ZILOG(port);
+ struct zilog_channel *channel;
+
+ if (ZS_IS_CONS(up))
+ return;
+
+ channel = ZILOG_CHANNEL_FROM_PORT(port);
+
+ /* Disable all RX interrupts. */
+ up->curregs[R1] &= ~RxINT_MASK;
+ ip22zilog_maybe_update_regs(up, channel);
+}
+
+/* The port lock is held. */
+static void ip22zilog_enable_ms(struct uart_port *port)
+{
+ struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
+ struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
+ unsigned char new_reg;
+
+ new_reg = up->curregs[R15] | (DCDIE | SYNCIE | CTSIE);
+ if (new_reg != up->curregs[R15]) {
+ up->curregs[R15] = new_reg;
+
+ /* NOTE: Not subject to 'transmitter active' rule. */
+ write_zsreg(channel, R15, up->curregs[R15]);
+ }
+}
+
+/* The port lock is not held. */
+static void ip22zilog_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
+ struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
+ unsigned char set_bits, clear_bits, new_reg;
+ unsigned long flags;
+
+ set_bits = clear_bits = 0;
+
+ if (break_state)
+ set_bits |= SND_BRK;
+ else
+ clear_bits |= SND_BRK;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
+ if (new_reg != up->curregs[R5]) {
+ up->curregs[R5] = new_reg;
+
+ /* NOTE: Not subject to 'transmitter active' rule. */
+ write_zsreg(channel, R5, up->curregs[R5]);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void __ip22zilog_reset(struct uart_ip22zilog_port *up)
+{
+ struct zilog_channel *channel;
+ int i;
+
+ if (up->flags & IP22ZILOG_FLAG_RESET_DONE)
+ return;
+
+ /* Let pending transmits finish. */
+ channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+ for (i = 0; i < 1000; i++) {
+ unsigned char stat = read_zsreg(channel, R1);
+ if (stat & ALL_SNT)
+ break;
+ udelay(100);
+ }
+
+ if (!ZS_IS_CHANNEL_A(up)) {
+ up++;
+ channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+ }
+ write_zsreg(channel, R9, FHWRES);
+ ZSDELAY_LONG();
+ (void) read_zsreg(channel, R0);
+
+ up->flags |= IP22ZILOG_FLAG_RESET_DONE;
+ up->next->flags |= IP22ZILOG_FLAG_RESET_DONE;
+}
+
+static void __ip22zilog_startup(struct uart_ip22zilog_port *up)
+{
+ struct zilog_channel *channel;
+
+ channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+
+ __ip22zilog_reset(up);
+
+ __load_zsregs(channel, up->curregs);
+ /* set master interrupt enable */
+ write_zsreg(channel, R9, up->curregs[R9]);
+ up->prev_status = readb(&channel->control);
+
+ /* Enable receiver and transmitter. */
+ up->curregs[R3] |= RxENAB;
+ up->curregs[R5] |= TxENAB;
+
+ up->curregs[R1] |= EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB;
+ ip22zilog_maybe_update_regs(up, channel);
+}
+
+static int ip22zilog_startup(struct uart_port *port)
+{
+ struct uart_ip22zilog_port *up = UART_ZILOG(port);
+ unsigned long flags;
+
+ if (ZS_IS_CONS(up))
+ return 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+ __ip22zilog_startup(up);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return 0;
+}
+
+/*
+ * The test for ZS_IS_CONS is explained by the following e-mail:
+ *****
+ * From: Russell King <rmk@arm.linux.org.uk>
+ * Date: Sun, 8 Dec 2002 10:18:38 +0000
+ *
+ * On Sun, Dec 08, 2002 at 02:43:36AM -0500, Pete Zaitcev wrote:
+ * > I boot my 2.5 boxes using "console=ttyS0,9600" argument,
+ * > and I noticed that something is not right with reference
+ * > counting in this case. It seems that when the console
+ * > is open by kernel initially, this is not accounted
+ * > as an open, and uart_startup is not called.
+ *
+ * That is correct. We are unable to call uart_startup when the serial
+ * console is initialised because it may need to allocate memory (as
+ * request_irq does) and the memory allocators may not have been
+ * initialised.
+ *
+ * 1. initialise the port into a state where it can send characters in the
+ * console write method.
+ *
+ * 2. don't do the actual hardware shutdown in your shutdown() method (but
+ * do the normal software shutdown - ie, free irqs etc)
+ *****
+ */
+static void ip22zilog_shutdown(struct uart_port *port)
+{
+ struct uart_ip22zilog_port *up = UART_ZILOG(port);
+ struct zilog_channel *channel;
+ unsigned long flags;
+
+ if (ZS_IS_CONS(up))
+ return;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ channel = ZILOG_CHANNEL_FROM_PORT(port);
+
+ /* Disable receiver and transmitter. */
+ up->curregs[R3] &= ~RxENAB;
+ up->curregs[R5] &= ~TxENAB;
+
+ /* Disable all interrupts and BRK assertion. */
+ up->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
+ up->curregs[R5] &= ~SND_BRK;
+ ip22zilog_maybe_update_regs(up, channel);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* Shared by TTY driver and serial console setup. The port lock is held
+ * and local interrupts are disabled.
+ */
+static void
+ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag,
+ unsigned int iflag, int brg)
+{
+
+ up->curregs[R10] = NRZ;
+ up->curregs[R11] = TCBR | RCBR;
+
+ /* Program BAUD and clock source. */
+ up->curregs[R4] &= ~XCLK_MASK;
+ up->curregs[R4] |= X16CLK;
+ up->curregs[R12] = brg & 0xff;
+ up->curregs[R13] = (brg >> 8) & 0xff;
+ up->curregs[R14] = BRENAB;
+
+ /* Character size, stop bits, and parity. */
+ up->curregs[3] &= ~RxN_MASK;
+ up->curregs[5] &= ~TxN_MASK;
+ switch (cflag & CSIZE) {
+ case CS5:
+ up->curregs[3] |= Rx5;
+ up->curregs[5] |= Tx5;
+ up->parity_mask = 0x1f;
+ break;
+ case CS6:
+ up->curregs[3] |= Rx6;
+ up->curregs[5] |= Tx6;
+ up->parity_mask = 0x3f;
+ break;
+ case CS7:
+ up->curregs[3] |= Rx7;
+ up->curregs[5] |= Tx7;
+ up->parity_mask = 0x7f;
+ break;
+ case CS8:
+ default:
+ up->curregs[3] |= Rx8;
+ up->curregs[5] |= Tx8;
+ up->parity_mask = 0xff;
+ break;
+ };
+ up->curregs[4] &= ~0x0c;
+ if (cflag & CSTOPB)
+ up->curregs[4] |= SB2;
+ else
+ up->curregs[4] |= SB1;
+ if (cflag & PARENB)
+ up->curregs[4] |= PAR_ENAB;
+ else
+ up->curregs[4] &= ~PAR_ENAB;
+ if (!(cflag & PARODD))
+ up->curregs[4] |= PAR_EVEN;
+ else
+ up->curregs[4] &= ~PAR_EVEN;
+
+ up->port.read_status_mask = Rx_OVR;
+ if (iflag & INPCK)
+ up->port.read_status_mask |= CRC_ERR | PAR_ERR;
+ if (iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= BRK_ABRT;
+
+ up->port.ignore_status_mask = 0;
+ if (iflag & IGNPAR)
+ up->port.ignore_status_mask |= CRC_ERR | PAR_ERR;
+ if (iflag & IGNBRK) {
+ up->port.ignore_status_mask |= BRK_ABRT;
+ if (iflag & IGNPAR)
+ up->port.ignore_status_mask |= Rx_OVR;
+ }
+
+ if ((cflag & CREAD) == 0)
+ up->port.ignore_status_mask = 0xff;
+}
+
+/* The port lock is not held. */
+static void
+ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_ip22zilog_port *up = (struct uart_ip22zilog_port *) port;
+ unsigned long flags;
+ int baud, brg;
+
+ baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
+
+ ip22zilog_convert_to_zs(up, termios->c_cflag, termios->c_iflag, brg);
+
+ if (UART_ENABLE_MS(&up->port, termios->c_cflag))
+ up->flags |= IP22ZILOG_FLAG_MODEM_STATUS;
+ else
+ up->flags &= ~IP22ZILOG_FLAG_MODEM_STATUS;
+
+ ip22zilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port));
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static const char *ip22zilog_type(struct uart_port *port)
+{
+ return "IP22-Zilog";
+}
+
+/* We do not request/release mappings of the registers here, this
+ * happens at early serial probe time.
+ */
+static void ip22zilog_release_port(struct uart_port *port)
+{
+}
+
+static int ip22zilog_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+/* These do not need to do anything interesting either. */
+static void ip22zilog_config_port(struct uart_port *port, int flags)
+{
+}
+
+/* We do not support letting the user mess with the divisor, IRQ, etc. */
+static int ip22zilog_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ return -EINVAL;
+}
+
+static struct uart_ops ip22zilog_pops = {
+ .tx_empty = ip22zilog_tx_empty,
+ .set_mctrl = ip22zilog_set_mctrl,
+ .get_mctrl = ip22zilog_get_mctrl,
+ .stop_tx = ip22zilog_stop_tx,
+ .start_tx = ip22zilog_start_tx,
+ .stop_rx = ip22zilog_stop_rx,
+ .enable_ms = ip22zilog_enable_ms,
+ .break_ctl = ip22zilog_break_ctl,
+ .startup = ip22zilog_startup,
+ .shutdown = ip22zilog_shutdown,
+ .set_termios = ip22zilog_set_termios,
+ .type = ip22zilog_type,
+ .release_port = ip22zilog_release_port,
+ .request_port = ip22zilog_request_port,
+ .config_port = ip22zilog_config_port,
+ .verify_port = ip22zilog_verify_port,
+};
+
+static struct uart_ip22zilog_port *ip22zilog_port_table;
+static struct zilog_layout **ip22zilog_chip_regs;
+
+static struct uart_ip22zilog_port *ip22zilog_irq_chain;
+static int zilog_irq = -1;
+
+static void * __init alloc_one_table(unsigned long size)
+{
+ return kzalloc(size, GFP_KERNEL);
+}
+
+static void __init ip22zilog_alloc_tables(void)
+{
+ ip22zilog_port_table = (struct uart_ip22zilog_port *)
+ alloc_one_table(NUM_CHANNELS * sizeof(struct uart_ip22zilog_port));
+ ip22zilog_chip_regs = (struct zilog_layout **)
+ alloc_one_table(NUM_IP22ZILOG * sizeof(struct zilog_layout *));
+
+ if (ip22zilog_port_table == NULL || ip22zilog_chip_regs == NULL) {
+ panic("IP22-Zilog: Cannot allocate IP22-Zilog tables.");
+ }
+}
+
+/* Get the address of the registers for IP22-Zilog instance CHIP. */
+static struct zilog_layout * __init get_zs(int chip)
+{
+ unsigned long base;
+
+ if (chip < 0 || chip >= NUM_IP22ZILOG) {
+ panic("IP22-Zilog: Illegal chip number %d in get_zs.", chip);
+ }
+
+ /* Not probe-able, hard code it. */
+ base = (unsigned long) &sgioc->uart;
+
+ zilog_irq = SGI_SERIAL_IRQ;
+ request_mem_region(base, 8, "IP22-Zilog");
+
+ return (struct zilog_layout *) base;
+}
+
+#define ZS_PUT_CHAR_MAX_DELAY 2000 /* 10 ms */
+
+#ifdef CONFIG_SERIAL_IP22_ZILOG_CONSOLE
+static void ip22zilog_put_char(struct uart_port *port, int ch)
+{
+ struct zilog_channel *channel = ZILOG_CHANNEL_FROM_PORT(port);
+ int loops = ZS_PUT_CHAR_MAX_DELAY;
+
+ /* This is a timed polling loop so do not switch the explicit
+ * udelay with ZSDELAY as that is a NOP on some platforms. -DaveM
+ */
+ do {
+ unsigned char val = readb(&channel->control);
+ if (val & Tx_BUF_EMP) {
+ ZSDELAY();
+ break;
+ }
+ udelay(5);
+ } while (--loops);
+
+ writeb(ch, &channel->data);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+}
+
+static void
+ip22zilog_console_write(struct console *con, const char *s, unsigned int count)
+{
+ struct uart_ip22zilog_port *up = &ip22zilog_port_table[con->index];
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ uart_console_write(&up->port, s, count, ip22zilog_put_char);
+ udelay(2);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static int __init ip22zilog_console_setup(struct console *con, char *options)
+{
+ struct uart_ip22zilog_port *up = &ip22zilog_port_table[con->index];
+ unsigned long flags;
+ int baud = 9600, bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ up->flags |= IP22ZILOG_FLAG_IS_CONS;
+
+ printk(KERN_INFO "Console: ttyS%d (IP22-Zilog)\n", con->index);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ up->curregs[R15] |= BRKIE;
+
+ __ip22zilog_startup(up);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ return uart_set_options(&up->port, con, baud, parity, bits, flow);
+}
+
+static struct uart_driver ip22zilog_reg;
+
+static struct console ip22zilog_console = {
+ .name = "ttyS",
+ .write = ip22zilog_console_write,
+ .device = uart_console_device,
+ .setup = ip22zilog_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &ip22zilog_reg,
+};
+#endif /* CONFIG_SERIAL_IP22_ZILOG_CONSOLE */
+
+static struct uart_driver ip22zilog_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "serial",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = NUM_CHANNELS,
+#ifdef CONFIG_SERIAL_IP22_ZILOG_CONSOLE
+ .cons = &ip22zilog_console,
+#endif
+};
+
+static void __init ip22zilog_prepare(void)
+{
+ struct uart_ip22zilog_port *up;
+ struct zilog_layout *rp;
+ int channel, chip;
+
+ /*
+ * Temporary fix.
+ */
+ for (channel = 0; channel < NUM_CHANNELS; channel++)
+ spin_lock_init(&ip22zilog_port_table[channel].port.lock);
+
+ ip22zilog_irq_chain = &ip22zilog_port_table[NUM_CHANNELS - 1];
+ up = &ip22zilog_port_table[0];
+ for (channel = NUM_CHANNELS - 1 ; channel > 0; channel--)
+ up[channel].next = &up[channel - 1];
+ up[channel].next = NULL;
+
+ for (chip = 0; chip < NUM_IP22ZILOG; chip++) {
+ if (!ip22zilog_chip_regs[chip]) {
+ ip22zilog_chip_regs[chip] = rp = get_zs(chip);
+
+ up[(chip * 2) + 0].port.membase = (char *) &rp->channelB;
+ up[(chip * 2) + 1].port.membase = (char *) &rp->channelA;
+
+ /* In theory mapbase is the physical address ... */
+ up[(chip * 2) + 0].port.mapbase =
+ (unsigned long) ioremap((unsigned long) &rp->channelB, 8);
+ up[(chip * 2) + 1].port.mapbase =
+ (unsigned long) ioremap((unsigned long) &rp->channelA, 8);
+ }
+
+ /* Channel A */
+ up[(chip * 2) + 0].port.iotype = UPIO_MEM;
+ up[(chip * 2) + 0].port.irq = zilog_irq;
+ up[(chip * 2) + 0].port.uartclk = ZS_CLOCK;
+ up[(chip * 2) + 0].port.fifosize = 1;
+ up[(chip * 2) + 0].port.ops = &ip22zilog_pops;
+ up[(chip * 2) + 0].port.type = PORT_IP22ZILOG;
+ up[(chip * 2) + 0].port.flags = 0;
+ up[(chip * 2) + 0].port.line = (chip * 2) + 0;
+ up[(chip * 2) + 0].flags = 0;
+
+ /* Channel B */
+ up[(chip * 2) + 1].port.iotype = UPIO_MEM;
+ up[(chip * 2) + 1].port.irq = zilog_irq;
+ up[(chip * 2) + 1].port.uartclk = ZS_CLOCK;
+ up[(chip * 2) + 1].port.fifosize = 1;
+ up[(chip * 2) + 1].port.ops = &ip22zilog_pops;
+ up[(chip * 2) + 1].port.type = PORT_IP22ZILOG;
+ up[(chip * 2) + 1].port.line = (chip * 2) + 1;
+ up[(chip * 2) + 1].flags |= IP22ZILOG_FLAG_IS_CHANNEL_A;
+ }
+
+ for (channel = 0; channel < NUM_CHANNELS; channel++) {
+ struct uart_ip22zilog_port *up = &ip22zilog_port_table[channel];
+ int brg;
+
+ /* Normal serial TTY. */
+ up->parity_mask = 0xff;
+ up->curregs[R1] = EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB;
+ up->curregs[R4] = PAR_EVEN | X16CLK | SB1;
+ up->curregs[R3] = RxENAB | Rx8;
+ up->curregs[R5] = TxENAB | Tx8;
+ up->curregs[R9] = NV | MIE;
+ up->curregs[R10] = NRZ;
+ up->curregs[R11] = TCBR | RCBR;
+ brg = BPS_TO_BRG(9600, ZS_CLOCK / ZS_CLOCK_DIVISOR);
+ up->curregs[R12] = (brg & 0xff);
+ up->curregs[R13] = (brg >> 8) & 0xff;
+ up->curregs[R14] = BRENAB;
+ }
+}
+
+static int __init ip22zilog_ports_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Serial: IP22 Zilog driver (%d chips).\n", NUM_IP22ZILOG);
+
+ ip22zilog_prepare();
+
+ if (request_irq(zilog_irq, ip22zilog_interrupt, 0,
+ "IP22-Zilog", ip22zilog_irq_chain)) {
+ panic("IP22-Zilog: Unable to register zs interrupt handler.\n");
+ }
+
+ ret = uart_register_driver(&ip22zilog_reg);
+ if (ret == 0) {
+ int i;
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ struct uart_ip22zilog_port *up = &ip22zilog_port_table[i];
+
+ uart_add_one_port(&ip22zilog_reg, &up->port);
+ }
+ }
+
+ return ret;
+}
+
+static int __init ip22zilog_init(void)
+{
+ /* IP22 Zilog setup is hard coded, no probing to do. */
+ ip22zilog_alloc_tables();
+ ip22zilog_ports_init();
+
+ return 0;
+}
+
+static void __exit ip22zilog_exit(void)
+{
+ int i;
+ struct uart_ip22zilog_port *up;
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ up = &ip22zilog_port_table[i];
+
+ uart_remove_one_port(&ip22zilog_reg, &up->port);
+ }
+
+ /* Free IO mem */
+ up = &ip22zilog_port_table[0];
+ for (i = 0; i < NUM_IP22ZILOG; i++) {
+ if (up[(i * 2) + 0].port.mapbase) {
+ iounmap((void*)up[(i * 2) + 0].port.mapbase);
+ up[(i * 2) + 0].port.mapbase = 0;
+ }
+ if (up[(i * 2) + 1].port.mapbase) {
+ iounmap((void*)up[(i * 2) + 1].port.mapbase);
+ up[(i * 2) + 1].port.mapbase = 0;
+ }
+ }
+
+ uart_unregister_driver(&ip22zilog_reg);
+}
+
+module_init(ip22zilog_init);
+module_exit(ip22zilog_exit);
+
+/* David wrote it but I'm to blame for the bugs ... */
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
+MODULE_DESCRIPTION("SGI Zilog serial port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/ip22zilog.h b/drivers/tty/serial/ip22zilog.h
new file mode 100644
index 00000000..a59a9a83
--- /dev/null
+++ b/drivers/tty/serial/ip22zilog.h
@@ -0,0 +1,281 @@
+#ifndef _IP22_ZILOG_H
+#define _IP22_ZILOG_H
+
+#include <asm/byteorder.h>
+
+struct zilog_channel {
+#ifdef __BIG_ENDIAN
+ volatile unsigned char unused0[3];
+ volatile unsigned char control;
+ volatile unsigned char unused1[3];
+ volatile unsigned char data;
+#else /* __LITTLE_ENDIAN */
+ volatile unsigned char control;
+ volatile unsigned char unused0[3];
+ volatile unsigned char data;
+ volatile unsigned char unused1[3];
+#endif
+};
+
+struct zilog_layout {
+ struct zilog_channel channelB;
+ struct zilog_channel channelA;
+};
+
+#define NUM_ZSREGS 16
+
+/* Conversion routines to/from brg time constants from/to bits
+ * per second.
+ */
+#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
+#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
+
+/* The Zilog register set */
+
+#define FLAG 0x7e
+
+/* Write Register 0 */
+#define R0 0 /* Register selects */
+#define R1 1
+#define R2 2
+#define R3 3
+#define R4 4
+#define R5 5
+#define R6 6
+#define R7 7
+#define R8 8
+#define R9 9
+#define R10 10
+#define R11 11
+#define R12 12
+#define R13 13
+#define R14 14
+#define R15 15
+
+#define NULLCODE 0 /* Null Code */
+#define POINT_HIGH 0x8 /* Select upper half of registers */
+#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */
+#define SEND_ABORT 0x18 /* HDLC Abort */
+#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */
+#define RES_Tx_P 0x28 /* Reset TxINT Pending */
+#define ERR_RES 0x30 /* Error Reset */
+#define RES_H_IUS 0x38 /* Reset highest IUS */
+
+#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */
+#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */
+#define RES_EOM_L 0xC0 /* Reset EOM latch */
+
+/* Write Register 1 */
+
+#define EXT_INT_ENAB 0x1 /* Ext Int Enable */
+#define TxINT_ENAB 0x2 /* Tx Int Enable */
+#define PAR_SPEC 0x4 /* Parity is special condition */
+
+#define RxINT_DISAB 0 /* Rx Int Disable */
+#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */
+#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */
+#define INT_ERR_Rx 0x18 /* Int on error only */
+#define RxINT_MASK 0x18
+
+#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */
+#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */
+#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */
+
+/* Write Register #2 (Interrupt Vector) */
+
+/* Write Register 3 */
+
+#define RxENAB 0x1 /* Rx Enable */
+#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */
+#define ADD_SM 0x4 /* Address Search Mode (SDLC) */
+#define RxCRC_ENAB 0x8 /* Rx CRC Enable */
+#define ENT_HM 0x10 /* Enter Hunt Mode */
+#define AUTO_ENAB 0x20 /* Auto Enables */
+#define Rx5 0x0 /* Rx 5 Bits/Character */
+#define Rx7 0x40 /* Rx 7 Bits/Character */
+#define Rx6 0x80 /* Rx 6 Bits/Character */
+#define Rx8 0xc0 /* Rx 8 Bits/Character */
+#define RxN_MASK 0xc0
+
+/* Write Register 4 */
+
+#define PAR_ENAB 0x1 /* Parity Enable */
+#define PAR_EVEN 0x2 /* Parity Even/Odd* */
+
+#define SYNC_ENAB 0 /* Sync Modes Enable */
+#define SB1 0x4 /* 1 stop bit/char */
+#define SB15 0x8 /* 1.5 stop bits/char */
+#define SB2 0xc /* 2 stop bits/char */
+
+#define MONSYNC 0 /* 8 Bit Sync character */
+#define BISYNC 0x10 /* 16 bit sync character */
+#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */
+#define EXTSYNC 0x30 /* External Sync Mode */
+
+#define X1CLK 0x0 /* x1 clock mode */
+#define X16CLK 0x40 /* x16 clock mode */
+#define X32CLK 0x80 /* x32 clock mode */
+#define X64CLK 0xC0 /* x64 clock mode */
+#define XCLK_MASK 0xC0
+
+/* Write Register 5 */
+
+#define TxCRC_ENAB 0x1 /* Tx CRC Enable */
+#define RTS 0x2 /* RTS */
+#define SDLC_CRC 0x4 /* SDLC/CRC-16 */
+#define TxENAB 0x8 /* Tx Enable */
+#define SND_BRK 0x10 /* Send Break */
+#define Tx5 0x0 /* Tx 5 bits (or less)/character */
+#define Tx7 0x20 /* Tx 7 bits/character */
+#define Tx6 0x40 /* Tx 6 bits/character */
+#define Tx8 0x60 /* Tx 8 bits/character */
+#define TxN_MASK 0x60
+#define DTR 0x80 /* DTR */
+
+/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
+
+/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
+
+/* Write Register 8 (transmit buffer) */
+
+/* Write Register 9 (Master interrupt control) */
+#define VIS 1 /* Vector Includes Status */
+#define NV 2 /* No Vector */
+#define DLC 4 /* Disable Lower Chain */
+#define MIE 8 /* Master Interrupt Enable */
+#define STATHI 0x10 /* Status high */
+#define NORESET 0 /* No reset on write to R9 */
+#define CHRB 0x40 /* Reset channel B */
+#define CHRA 0x80 /* Reset channel A */
+#define FHWRES 0xc0 /* Force hardware reset */
+
+/* Write Register 10 (misc control bits) */
+#define BIT6 1 /* 6 bit/8bit sync */
+#define LOOPMODE 2 /* SDLC Loop mode */
+#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */
+#define MARKIDLE 8 /* Mark/flag on idle */
+#define GAOP 0x10 /* Go active on poll */
+#define NRZ 0 /* NRZ mode */
+#define NRZI 0x20 /* NRZI mode */
+#define FM1 0x40 /* FM1 (transition = 1) */
+#define FM0 0x60 /* FM0 (transition = 0) */
+#define CRCPS 0x80 /* CRC Preset I/O */
+
+/* Write Register 11 (Clock Mode control) */
+#define TRxCXT 0 /* TRxC = Xtal output */
+#define TRxCTC 1 /* TRxC = Transmit clock */
+#define TRxCBR 2 /* TRxC = BR Generator Output */
+#define TRxCDP 3 /* TRxC = DPLL output */
+#define TRxCOI 4 /* TRxC O/I */
+#define TCRTxCP 0 /* Transmit clock = RTxC pin */
+#define TCTRxCP 8 /* Transmit clock = TRxC pin */
+#define TCBR 0x10 /* Transmit clock = BR Generator output */
+#define TCDPLL 0x18 /* Transmit clock = DPLL output */
+#define RCRTxCP 0 /* Receive clock = RTxC pin */
+#define RCTRxCP 0x20 /* Receive clock = TRxC pin */
+#define RCBR 0x40 /* Receive clock = BR Generator output */
+#define RCDPLL 0x60 /* Receive clock = DPLL output */
+#define RTxCX 0x80 /* RTxC Xtal/No Xtal */
+
+/* Write Register 12 (lower byte of baud rate generator time constant) */
+
+/* Write Register 13 (upper byte of baud rate generator time constant) */
+
+/* Write Register 14 (Misc control bits) */
+#define BRENAB 1 /* Baud rate generator enable */
+#define BRSRC 2 /* Baud rate generator source */
+#define DTRREQ 4 /* DTR/Request function */
+#define AUTOECHO 8 /* Auto Echo */
+#define LOOPBAK 0x10 /* Local loopback */
+#define SEARCH 0x20 /* Enter search mode */
+#define RMC 0x40 /* Reset missing clock */
+#define DISDPLL 0x60 /* Disable DPLL */
+#define SSBR 0x80 /* Set DPLL source = BR generator */
+#define SSRTxC 0xa0 /* Set DPLL source = RTxC */
+#define SFMM 0xc0 /* Set FM mode */
+#define SNRZI 0xe0 /* Set NRZI mode */
+
+/* Write Register 15 (external/status interrupt control) */
+#define ZCIE 2 /* Zero count IE */
+#define DCDIE 8 /* DCD IE */
+#define SYNCIE 0x10 /* Sync/hunt IE */
+#define CTSIE 0x20 /* CTS IE */
+#define TxUIE 0x40 /* Tx Underrun/EOM IE */
+#define BRKIE 0x80 /* Break/Abort IE */
+
+
+/* Read Register 0 */
+#define Rx_CH_AV 0x1 /* Rx Character Available */
+#define ZCOUNT 0x2 /* Zero count */
+#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */
+#define DCD 0x8 /* DCD */
+#define SYNC 0x10 /* Sync/hunt */
+#define CTS 0x20 /* CTS */
+#define TxEOM 0x40 /* Tx underrun */
+#define BRK_ABRT 0x80 /* Break/Abort */
+
+/* Read Register 1 */
+#define ALL_SNT 0x1 /* All sent */
+/* Residue Data for 8 Rx bits/char programmed */
+#define RES3 0x8 /* 0/3 */
+#define RES4 0x4 /* 0/4 */
+#define RES5 0xc /* 0/5 */
+#define RES6 0x2 /* 0/6 */
+#define RES7 0xa /* 0/7 */
+#define RES8 0x6 /* 0/8 */
+#define RES18 0xe /* 1/8 */
+#define RES28 0x0 /* 2/8 */
+/* Special Rx Condition Interrupts */
+#define PAR_ERR 0x10 /* Parity error */
+#define Rx_OVR 0x20 /* Rx Overrun Error */
+#define CRC_ERR 0x40 /* CRC/Framing Error */
+#define END_FR 0x80 /* End of Frame (SDLC) */
+
+/* Read Register 2 (channel b only) - Interrupt vector */
+#define CHB_Tx_EMPTY 0x00
+#define CHB_EXT_STAT 0x02
+#define CHB_Rx_AVAIL 0x04
+#define CHB_SPECIAL 0x06
+#define CHA_Tx_EMPTY 0x08
+#define CHA_EXT_STAT 0x0a
+#define CHA_Rx_AVAIL 0x0c
+#define CHA_SPECIAL 0x0e
+#define STATUS_MASK 0x0e
+
+/* Read Register 3 (interrupt pending register) ch a only */
+#define CHBEXT 0x1 /* Channel B Ext/Stat IP */
+#define CHBTxIP 0x2 /* Channel B Tx IP */
+#define CHBRxIP 0x4 /* Channel B Rx IP */
+#define CHAEXT 0x8 /* Channel A Ext/Stat IP */
+#define CHATxIP 0x10 /* Channel A Tx IP */
+#define CHARxIP 0x20 /* Channel A Rx IP */
+
+/* Read Register 8 (receive data register) */
+
+/* Read Register 10 (misc status bits) */
+#define ONLOOP 2 /* On loop */
+#define LOOPSEND 0x10 /* Loop sending */
+#define CLK2MIS 0x40 /* Two clocks missing */
+#define CLK1MIS 0x80 /* One clock missing */
+
+/* Read Register 12 (lower byte of baud rate generator constant) */
+
+/* Read Register 13 (upper byte of baud rate generator constant) */
+
+/* Read Register 15 (value of WR 15) */
+
+/* Misc macros */
+#define ZS_CLEARERR(channel) do { writeb(ERR_RES, &channel->control); \
+ udelay(5); } while(0)
+
+#define ZS_CLEARSTAT(channel) do { writeb(RES_EXT_INT, &channel->control); \
+ udelay(5); } while(0)
+
+#define ZS_CLEARFIFO(channel) do { readb(&channel->data); \
+ udelay(2); \
+ readb(&channel->data); \
+ udelay(2); \
+ readb(&channel->data); \
+ udelay(2); } while(0)
+
+#endif /* _IP22_ZILOG_H */
diff --git a/drivers/tty/serial/jsm/Makefile b/drivers/tty/serial/jsm/Makefile
new file mode 100644
index 00000000..e46b6e0f
--- /dev/null
+++ b/drivers/tty/serial/jsm/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for Jasmine adapter
+#
+
+obj-$(CONFIG_SERIAL_JSM) += jsm.o
+
+jsm-objs := jsm_driver.o jsm_neo.o jsm_tty.o
+
diff --git a/drivers/tty/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h
new file mode 100644
index 00000000..5b837e74
--- /dev/null
+++ b/drivers/tty/serial/jsm/jsm.h
@@ -0,0 +1,381 @@
+/************************************************************************
+ * Copyright 2003 Digi International (www.digi.com)
+ *
+ * Copyright (C) 2004 IBM Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 * Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * Contact Information:
+ * Scott H Kilau <Scott_Kilau@digi.com>
+ * Wendy Xiong <wendyx@us.ibm.com>
+ *
+ ***********************************************************************/
+
+#ifndef __JSM_DRIVER_H
+#define __JSM_DRIVER_H
+
+#include <linux/kernel.h>
+#include <linux/types.h> /* To pick up the varions Linux types */
+#include <linux/tty.h>
+#include <linux/serial_core.h>
+#include <linux/device.h>
+
+/*
+ * Debugging levels can be set using debug insmod variable
+ * They can also be compiled out completely.
+ */
+enum {
+ DBG_INIT = 0x01,
+ DBG_BASIC = 0x02,
+ DBG_CORE = 0x04,
+ DBG_OPEN = 0x08,
+ DBG_CLOSE = 0x10,
+ DBG_READ = 0x20,
+ DBG_WRITE = 0x40,
+ DBG_IOCTL = 0x80,
+ DBG_PROC = 0x100,
+ DBG_PARAM = 0x200,
+ DBG_PSCAN = 0x400,
+ DBG_EVENT = 0x800,
+ DBG_DRAIN = 0x1000,
+ DBG_MSIGS = 0x2000,
+ DBG_MGMT = 0x4000,
+ DBG_INTR = 0x8000,
+ DBG_CARR = 0x10000,
+};
+
+#define jsm_printk(nlevel, klevel, pdev, fmt, args...) \
+ if ((DBG_##nlevel & jsm_debug)) \
+ dev_printk(KERN_##klevel, pdev->dev, fmt, ## args)
+
+#define MAXLINES 256
+#define MAXPORTS 8
+#define MAX_STOPS_SENT 5
+
+/* Board type definitions */
+
+#define T_NEO 0000
+#define T_CLASSIC 0001
+#define T_PCIBUS 0400
+
+/* Board State Definitions */
+
+#define BD_RUNNING 0x0
+#define BD_REASON 0x7f
+#define BD_NOTFOUND 0x1
+#define BD_NOIOPORT 0x2
+#define BD_NOMEM 0x3
+#define BD_NOBIOS 0x4
+#define BD_NOFEP 0x5
+#define BD_FAILED 0x6
+#define BD_ALLOCATED 0x7
+#define BD_TRIBOOT 0x8
+#define BD_BADKME 0x80
+
+
+/* 4 extra for alignment play space */
+#define WRITEBUFLEN ((4096) + 4)
+#define MYFLIPLEN N_TTY_BUF_SIZE
+
+#define JSM_VERSION "jsm: 1.2-1-INKERNEL"
+#define JSM_PARTNUM "40002438_A-INKERNEL"
+
+struct jsm_board;
+struct jsm_channel;
+
+/************************************************************************
+ * Per board operations structure *
+ ************************************************************************/
+struct board_ops {
+ irq_handler_t intr;
+ void (*uart_init) (struct jsm_channel *ch);
+ void (*uart_off) (struct jsm_channel *ch);
+ void (*param) (struct jsm_channel *ch);
+ void (*assert_modem_signals) (struct jsm_channel *ch);
+ void (*flush_uart_write) (struct jsm_channel *ch);
+ void (*flush_uart_read) (struct jsm_channel *ch);
+ void (*disable_receiver) (struct jsm_channel *ch);
+ void (*enable_receiver) (struct jsm_channel *ch);
+ void (*send_break) (struct jsm_channel *ch);
+ void (*clear_break) (struct jsm_channel *ch, int);
+ void (*send_start_character) (struct jsm_channel *ch);
+ void (*send_stop_character) (struct jsm_channel *ch);
+ void (*copy_data_from_queue_to_uart) (struct jsm_channel *ch);
+ u32 (*get_uart_bytes_left) (struct jsm_channel *ch);
+ void (*send_immediate_char) (struct jsm_channel *ch, unsigned char);
+};
+
+
+/*
+ * Per-board information
+ */
+struct jsm_board
+{
+ int boardnum; /* Board number: 0-32 */
+
+ int type; /* Type of board */
+ u8 rev; /* PCI revision ID */
+ struct pci_dev *pci_dev;
+ u32 maxports; /* MAX ports this board can handle */
+
+ spinlock_t bd_intr_lock; /* Used to protect the poller tasklet and
+ * the interrupt routine from each other.
+ */
+
+ u32 nasync; /* Number of ports on card */
+
+ u32 irq; /* Interrupt request number */
+
+ u64 membase; /* Start of base memory of the card */
+ u64 membase_end; /* End of base memory of the card */
+
+ u8 __iomem *re_map_membase;/* Remapped memory of the card */
+
+ u64 iobase; /* Start of io base of the card */
+ u64 iobase_end; /* End of io base of the card */
+
+ u32 bd_uart_offset; /* Space between each UART */
+
+ struct jsm_channel *channels[MAXPORTS]; /* array of pointers to our channels. */
+ char *flipbuf; /* Our flip buffer, alloced if board is found */
+
+ u32 bd_dividend; /* Board/UARTs specific dividend */
+
+ struct board_ops *bd_ops;
+
+ struct list_head jsm_board_entry;
+};
+
+/************************************************************************
+ * Device flag definitions for ch_flags.
+ ************************************************************************/
+#define CH_PRON 0x0001 /* Printer on string */
+#define CH_STOP 0x0002 /* Output is stopped */
+#define CH_STOPI 0x0004 /* Input is stopped */
+#define CH_CD 0x0008 /* Carrier is present */
+#define CH_FCAR 0x0010 /* Carrier forced on */
+#define CH_HANGUP 0x0020 /* Hangup received */
+
+#define CH_RECEIVER_OFF 0x0040 /* Receiver is off */
+#define CH_OPENING 0x0080 /* Port in fragile open state */
+#define CH_CLOSING 0x0100 /* Port in fragile close state */
+#define CH_FIFO_ENABLED 0x0200 /* Port has FIFOs enabled */
+#define CH_TX_FIFO_EMPTY 0x0400 /* TX Fifo is completely empty */
+#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */
+#define CH_BREAK_SENDING 0x1000 /* Break is being sent */
+#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */
+#define CH_FLIPBUF_IN_USE 0x4000 /* Channel's flipbuf is in use */
+#define CH_BAUD0 0x08000 /* Used for checking B0 transitions */
+
+/* Our Read/Error/Write queue sizes */
+#define RQUEUEMASK 0x1FFF /* 8 K - 1 */
+#define EQUEUEMASK 0x1FFF /* 8 K - 1 */
+#define RQUEUESIZE (RQUEUEMASK + 1)
+#define EQUEUESIZE RQUEUESIZE
+
+
+/************************************************************************
+ * Channel information structure.
+ ************************************************************************/
+struct jsm_channel {
+ struct uart_port uart_port;
+ struct jsm_board *ch_bd; /* Board structure pointer */
+
+ spinlock_t ch_lock; /* provide for serialization */
+ wait_queue_head_t ch_flags_wait;
+
+ u32 ch_portnum; /* Port number, 0 offset. */
+ u32 ch_open_count; /* open count */
+ u32 ch_flags; /* Channel flags */
+
+ u64 ch_close_delay; /* How long we should drop RTS/DTR for */
+
+ tcflag_t ch_c_iflag; /* channel iflags */
+ tcflag_t ch_c_cflag; /* channel cflags */
+ tcflag_t ch_c_oflag; /* channel oflags */
+ tcflag_t ch_c_lflag; /* channel lflags */
+ u8 ch_stopc; /* Stop character */
+ u8 ch_startc; /* Start character */
+
+ u8 ch_mostat; /* FEP output modem status */
+ u8 ch_mistat; /* FEP input modem status */
+
+ struct neo_uart_struct __iomem *ch_neo_uart; /* Pointer to the "mapped" UART struct */
+ u8 ch_cached_lsr; /* Cached value of the LSR register */
+
+ u8 *ch_rqueue; /* Our read queue buffer - malloc'ed */
+ u16 ch_r_head; /* Head location of the read queue */
+ u16 ch_r_tail; /* Tail location of the read queue */
+
+ u8 *ch_equeue; /* Our error queue buffer - malloc'ed */
+ u16 ch_e_head; /* Head location of the error queue */
+ u16 ch_e_tail; /* Tail location of the error queue */
+
+ u64 ch_rxcount; /* total of data received so far */
+ u64 ch_txcount; /* total of data transmitted so far */
+
+ u8 ch_r_tlevel; /* Receive Trigger level */
+ u8 ch_t_tlevel; /* Transmit Trigger level */
+
+ u8 ch_r_watermark; /* Receive Watermark */
+
+
+ u32 ch_stops_sent; /* How many times I have sent a stop character
+ * to try to stop the other guy sending.
+ */
+ u64 ch_err_parity; /* Count of parity errors on channel */
+ u64 ch_err_frame; /* Count of framing errors on channel */
+ u64 ch_err_break; /* Count of breaks on channel */
+ u64 ch_err_overrun; /* Count of overruns on channel */
+
+ u64 ch_xon_sends; /* Count of xons transmitted */
+ u64 ch_xoff_sends; /* Count of xoffs transmitted */
+};
+
+
+/************************************************************************
+ * Per channel/port NEO UART structure *
+ ************************************************************************
+ * Base Structure Entries Usage Meanings to Host *
+ * *
+ * W = read write R = read only *
+ * U = Unused. *
+ ************************************************************************/
+
+struct neo_uart_struct {
+ u8 txrx; /* WR RHR/THR - Holding Reg */
+ u8 ier; /* WR IER - Interrupt Enable Reg */
+ u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg */
+ u8 lcr; /* WR LCR - Line Control Reg */
+ u8 mcr; /* WR MCR - Modem Control Reg */
+ u8 lsr; /* WR LSR - Line Status Reg */
+ u8 msr; /* WR MSR - Modem Status Reg */
+ u8 spr; /* WR SPR - Scratch Pad Reg */
+ u8 fctr; /* WR FCTR - Feature Control Reg */
+ u8 efr; /* WR EFR - Enhanced Function Reg */
+ u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */
+ u8 rfifo; /* WR RXCNT/RXTRG - Receive FIFO Reg */
+ u8 xoffchar1; /* WR XOFF 1 - XOff Character 1 Reg */
+ u8 xoffchar2; /* WR XOFF 2 - XOff Character 2 Reg */
+ u8 xonchar1; /* WR XON 1 - Xon Character 1 Reg */
+ u8 xonchar2; /* WR XON 2 - XOn Character 2 Reg */
+
+ u8 reserved1[0x2ff - 0x200]; /* U Reserved by Exar */
+ u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */
+ u8 reserved2[0x37f - 0x340]; /* U Reserved by Exar */
+ u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */
+};
+
+/* Where to read the extended interrupt register (32bits instead of 8bits) */
+#define UART_17158_POLL_ADDR_OFFSET 0x80
+
+/*
+ * These are the redefinitions for the FCTR on the XR17C158, since
+ * Exar made them different than their earlier design. (XR16C854)
+ */
+
+/* These are only applicable when table D is selected */
+#define UART_17158_FCTR_RTS_NODELAY 0x00
+#define UART_17158_FCTR_RTS_4DELAY 0x01
+#define UART_17158_FCTR_RTS_6DELAY 0x02
+#define UART_17158_FCTR_RTS_8DELAY 0x03
+#define UART_17158_FCTR_RTS_12DELAY 0x12
+#define UART_17158_FCTR_RTS_16DELAY 0x05
+#define UART_17158_FCTR_RTS_20DELAY 0x13
+#define UART_17158_FCTR_RTS_24DELAY 0x06
+#define UART_17158_FCTR_RTS_28DELAY 0x14
+#define UART_17158_FCTR_RTS_32DELAY 0x07
+#define UART_17158_FCTR_RTS_36DELAY 0x16
+#define UART_17158_FCTR_RTS_40DELAY 0x08
+#define UART_17158_FCTR_RTS_44DELAY 0x09
+#define UART_17158_FCTR_RTS_48DELAY 0x10
+#define UART_17158_FCTR_RTS_52DELAY 0x11
+
+#define UART_17158_FCTR_RTS_IRDA 0x10
+#define UART_17158_FCTR_RS485 0x20
+#define UART_17158_FCTR_TRGA 0x00
+#define UART_17158_FCTR_TRGB 0x40
+#define UART_17158_FCTR_TRGC 0x80
+#define UART_17158_FCTR_TRGD 0xC0
+
+/* 17158 trigger table selects.. */
+#define UART_17158_FCTR_BIT6 0x40
+#define UART_17158_FCTR_BIT7 0x80
+
+/* 17158 TX/RX memmapped buffer offsets */
+#define UART_17158_RX_FIFOSIZE 64
+#define UART_17158_TX_FIFOSIZE 64
+
+/* 17158 Extended IIR's */
+#define UART_17158_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */
+#define UART_17158_IIR_XONXOFF 0x10 /* Received an XON/XOFF char */
+#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR state change */
+#define UART_17158_IIR_FIFO_ENABLED 0xC0 /* 16550 FIFOs are Enabled */
+
+/*
+ * These are the extended interrupts that get sent
+ * back to us from the UART's 32bit interrupt register
+ */
+#define UART_17158_RX_LINE_STATUS 0x1 /* RX Ready */
+#define UART_17158_RXRDY_TIMEOUT 0x2 /* RX Ready Timeout */
+#define UART_17158_TXRDY 0x3 /* TX Ready */
+#define UART_17158_MSR 0x4 /* Modem State Change */
+#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding Reg Empty */
+#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO Data error */
+
+/*
+ * These are the EXTENDED definitions for the 17C158's Interrupt
+ * Enable Register.
+ */
+#define UART_17158_EFR_ECB 0x10 /* Enhanced control bit */
+#define UART_17158_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
+#define UART_17158_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
+#define UART_17158_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
+#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */
+
+#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an incoming XOFF char */
+#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an incoming XON char */
+
+#define UART_17158_IER_RSVD1 0x10 /* Reserved by Exar */
+#define UART_17158_IER_XOFF 0x20 /* Xoff Interrupt Enable */
+#define UART_17158_IER_RTSDTR 0x40 /* Output Interrupt Enable */
+#define UART_17158_IER_CTSDSR 0x80 /* Input Interrupt Enable */
+
+#define PCI_DEVICE_NEO_2DB9_PCI_NAME "Neo 2 - DB9 Universal PCI"
+#define PCI_DEVICE_NEO_2DB9PRI_PCI_NAME "Neo 2 - DB9 Universal PCI - Powered Ring Indicator"
+#define PCI_DEVICE_NEO_2RJ45_PCI_NAME "Neo 2 - RJ45 Universal PCI"
+#define PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME "Neo 2 - RJ45 Universal PCI - Powered Ring Indicator"
+#define PCIE_DEVICE_NEO_IBM_PCI_NAME "Neo 4 - PCI Express - IBM"
+
+/*
+ * Our Global Variables.
+ */
+extern struct uart_driver jsm_uart_driver;
+extern struct board_ops jsm_neo_ops;
+extern int jsm_debug;
+
+/*************************************************************************
+ *
+ * Prototypes for non-static functions used in more than one module
+ *
+ *************************************************************************/
+int jsm_tty_init(struct jsm_board *);
+int jsm_uart_port_init(struct jsm_board *);
+int jsm_remove_uart_port(struct jsm_board *);
+void jsm_input(struct jsm_channel *ch);
+void jsm_check_queue_flow_control(struct jsm_channel *ch);
+
+#endif
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
new file mode 100644
index 00000000..6c12d94e
--- /dev/null
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -0,0 +1,297 @@
+/************************************************************************
+ * Copyright 2003 Digi International (www.digi.com)
+ *
+ * Copyright (C) 2004 IBM Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 * Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * Contact Information:
+ * Scott H Kilau <Scott_Kilau@digi.com>
+ * Wendy Xiong <wendyx@us.ibm.com>
+ *
+ *
+ ***********************************************************************/
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "jsm.h"
+
+MODULE_AUTHOR("Digi International, http://www.digi.com");
+MODULE_DESCRIPTION("Driver for the Digi International "
+ "Neo PCI based product line");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("jsm");
+
+#define JSM_DRIVER_NAME "jsm"
+#define NR_PORTS 32
+#define JSM_MINOR_START 0
+
+struct uart_driver jsm_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = JSM_DRIVER_NAME,
+ .dev_name = "ttyn",
+ .major = 0,
+ .minor = JSM_MINOR_START,
+ .nr = NR_PORTS,
+};
+
+static pci_ers_result_t jsm_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+static pci_ers_result_t jsm_io_slot_reset(struct pci_dev *pdev);
+static void jsm_io_resume(struct pci_dev *pdev);
+
+static struct pci_error_handlers jsm_err_handler = {
+ .error_detected = jsm_io_error_detected,
+ .slot_reset = jsm_io_slot_reset,
+ .resume = jsm_io_resume,
+};
+
+int jsm_debug;
+module_param(jsm_debug, int, 0);
+MODULE_PARM_DESC(jsm_debug, "Driver debugging level");
+
+static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc = 0;
+ struct jsm_board *brd;
+ static int adapter_count = 0;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Device enable FAILED\n");
+ goto out;
+ }
+
+ rc = pci_request_regions(pdev, "jsm");
+ if (rc) {
+ dev_err(&pdev->dev, "pci_request_region FAILED\n");
+ goto out_disable_device;
+ }
+
+ brd = kzalloc(sizeof(struct jsm_board), GFP_KERNEL);
+ if (!brd) {
+ dev_err(&pdev->dev,
+ "memory allocation for board structure failed\n");
+ rc = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ /* store the info for the board we've found */
+ brd->boardnum = adapter_count++;
+ brd->pci_dev = pdev;
+ if (pdev->device == PCIE_DEVICE_ID_NEO_4_IBM)
+ brd->maxports = 4;
+ else if (pdev->device == PCI_DEVICE_ID_DIGI_NEO_8)
+ brd->maxports = 8;
+ else
+ brd->maxports = 2;
+
+ spin_lock_init(&brd->bd_intr_lock);
+
+ /* store which revision we have */
+ brd->rev = pdev->revision;
+
+ brd->irq = pdev->irq;
+
+ jsm_printk(INIT, INFO, &brd->pci_dev,
+ "jsm_found_board - NEO adapter\n");
+
+ /* get the PCI Base Address Registers */
+ brd->membase = pci_resource_start(pdev, 0);
+ brd->membase_end = pci_resource_end(pdev, 0);
+
+ if (brd->membase & 1)
+ brd->membase &= ~3;
+ else
+ brd->membase &= ~15;
+
+ /* Assign the board_ops struct */
+ brd->bd_ops = &jsm_neo_ops;
+
+ brd->bd_uart_offset = 0x200;
+ brd->bd_dividend = 921600;
+
+ brd->re_map_membase = ioremap(brd->membase, pci_resource_len(pdev, 0));
+ if (!brd->re_map_membase) {
+ dev_err(&pdev->dev,
+ "card has no PCI Memory resources, "
+ "failing board.\n");
+ rc = -ENOMEM;
+ goto out_kfree_brd;
+ }
+
+ rc = request_irq(brd->irq, brd->bd_ops->intr,
+ IRQF_SHARED, "JSM", brd);
+ if (rc) {
+ printk(KERN_WARNING "Failed to hook IRQ %d\n",brd->irq);
+ goto out_iounmap;
+ }
+
+ rc = jsm_tty_init(brd);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Can't init tty devices (%d)\n", rc);
+ rc = -ENXIO;
+ goto out_free_irq;
+ }
+
+ rc = jsm_uart_port_init(brd);
+ if (rc < 0) {
+ /* XXX: leaking all resources from jsm_tty_init here! */
+ dev_err(&pdev->dev, "Can't init uart port (%d)\n", rc);
+ rc = -ENXIO;
+ goto out_free_irq;
+ }
+
+ /* Log the information about the board */
+ dev_info(&pdev->dev, "board %d: Digi Neo (rev %d), irq %d\n",
+ adapter_count, brd->rev, brd->irq);
+
+ /*
+ * allocate flip buffer for board.
+ *
+ * Okay to malloc with GFP_KERNEL, we are not at interrupt
+ * context, and there are no locks held.
+ */
+ brd->flipbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
+ if (!brd->flipbuf) {
+ /* XXX: leaking all resources from jsm_tty_init and
+ jsm_uart_port_init here! */
+ dev_err(&pdev->dev, "memory allocation for flipbuf failed\n");
+ rc = -ENOMEM;
+ goto out_free_uart;
+ }
+
+ pci_set_drvdata(pdev, brd);
+ pci_save_state(pdev);
+
+ return 0;
+ out_free_uart:
+ jsm_remove_uart_port(brd);
+ out_free_irq:
+ jsm_remove_uart_port(brd);
+ free_irq(brd->irq, brd);
+ out_iounmap:
+ iounmap(brd->re_map_membase);
+ out_kfree_brd:
+ kfree(brd);
+ out_release_regions:
+ pci_release_regions(pdev);
+ out_disable_device:
+ pci_disable_device(pdev);
+ out:
+ return rc;
+}
+
+static void __devexit jsm_remove_one(struct pci_dev *pdev)
+{
+ struct jsm_board *brd = pci_get_drvdata(pdev);
+ int i = 0;
+
+ jsm_remove_uart_port(brd);
+
+ free_irq(brd->irq, brd);
+ iounmap(brd->re_map_membase);
+
+ /* Free all allocated channels structs */
+ for (i = 0; i < brd->maxports; i++) {
+ if (brd->channels[i]) {
+ kfree(brd->channels[i]->ch_rqueue);
+ kfree(brd->channels[i]->ch_equeue);
+ kfree(brd->channels[i]);
+ }
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ kfree(brd->flipbuf);
+ kfree(brd);
+}
+
+static struct pci_device_id jsm_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2DB9), 0, 0, 0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2DB9PRI), 0, 0, 1 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45), 0, 0, 2 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45PRI), 0, 0, 3 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_4_IBM), 0, 0, 4 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_NEO_8), 0, 0, 5 },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, jsm_pci_tbl);
+
+static struct pci_driver jsm_driver = {
+ .name = "jsm",
+ .id_table = jsm_pci_tbl,
+ .probe = jsm_probe_one,
+ .remove = __devexit_p(jsm_remove_one),
+ .err_handler = &jsm_err_handler,
+};
+
+static pci_ers_result_t jsm_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct jsm_board *brd = pci_get_drvdata(pdev);
+
+ jsm_remove_uart_port(brd);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t jsm_io_slot_reset(struct pci_dev *pdev)
+{
+ int rc;
+
+ rc = pci_enable_device(pdev);
+
+ if (rc)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_set_master(pdev);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void jsm_io_resume(struct pci_dev *pdev)
+{
+ struct jsm_board *brd = pci_get_drvdata(pdev);
+
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ jsm_uart_port_init(brd);
+}
+
+static int __init jsm_init_module(void)
+{
+ int rc;
+
+ rc = uart_register_driver(&jsm_uart_driver);
+ if (!rc) {
+ rc = pci_register_driver(&jsm_driver);
+ if (rc)
+ uart_unregister_driver(&jsm_uart_driver);
+ }
+ return rc;
+}
+
+static void __exit jsm_exit_module(void)
+{
+ pci_unregister_driver(&jsm_driver);
+ uart_unregister_driver(&jsm_uart_driver);
+}
+
+module_init(jsm_init_module);
+module_exit(jsm_exit_module);
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
new file mode 100644
index 00000000..bd6e8469
--- /dev/null
+++ b/drivers/tty/serial/jsm/jsm_neo.c
@@ -0,0 +1,1413 @@
+/************************************************************************
+ * Copyright 2003 Digi International (www.digi.com)
+ *
+ * Copyright (C) 2004 IBM Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 * Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * Contact Information:
+ * Scott H Kilau <Scott_Kilau@digi.com>
+ * Wendy Xiong <wendyx@us.ibm.com>
+ *
+ ***********************************************************************/
+#include <linux/delay.h> /* For udelay */
+#include <linux/serial_reg.h> /* For the various UART offsets */
+#include <linux/tty.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+#include "jsm.h" /* Driver main header file */
+
+static u32 jsm_offset_table[8] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
+
+/*
+ * This function allows calls to ensure that all outstanding
+ * PCI writes have been completed, by doing a PCI read against
+ * a non-destructive, read-only location on the Neo card.
+ *
+ * In this case, we are reading the DVID (Read-only Device Identification)
+ * value of the Neo card.
+ */
+static inline void neo_pci_posting_flush(struct jsm_board *bd)
+{
+ readb(bd->re_map_membase + 0x8D);
+}
+
+static void neo_set_cts_flow_control(struct jsm_channel *ch)
+{
+ u8 ier, efr;
+ ier = readb(&ch->ch_neo_uart->ier);
+ efr = readb(&ch->ch_neo_uart->efr);
+
+ jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting CTSFLOW\n");
+
+ /* Turn on auto CTS flow control */
+ ier |= (UART_17158_IER_CTSDSR);
+ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_CTSDSR);
+
+ /* Turn off auto Xon flow control */
+ efr &= ~(UART_17158_EFR_IXON);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ /* Turn on table D, with 8 char hi/low watermarks */
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
+
+ /* Feed the UART our trigger levels */
+ writeb(8, &ch->ch_neo_uart->tfifo);
+ ch->ch_t_tlevel = 8;
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+}
+
+static void neo_set_rts_flow_control(struct jsm_channel *ch)
+{
+ u8 ier, efr;
+ ier = readb(&ch->ch_neo_uart->ier);
+ efr = readb(&ch->ch_neo_uart->efr);
+
+ jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting RTSFLOW\n");
+
+ /* Turn on auto RTS flow control */
+ ier |= (UART_17158_IER_RTSDTR);
+ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_RTSDTR);
+
+ /* Turn off auto Xoff flow control */
+ ier &= ~(UART_17158_IER_XOFF);
+ efr &= ~(UART_17158_EFR_IXOFF);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
+ ch->ch_r_watermark = 4;
+
+ writeb(56, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 56;
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ /*
+ * From the Neo UART spec sheet:
+ * The auto RTS/DTR function must be started by asserting
+ * RTS/DTR# output pin (MCR bit-0 or 1 to logic 1 after
+ * it is enabled.
+ */
+ ch->ch_mostat |= (UART_MCR_RTS);
+}
+
+
+static void neo_set_ixon_flow_control(struct jsm_channel *ch)
+{
+ u8 ier, efr;
+ ier = readb(&ch->ch_neo_uart->ier);
+ efr = readb(&ch->ch_neo_uart->efr);
+
+ jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting IXON FLOW\n");
+
+ /* Turn off auto CTS flow control */
+ ier &= ~(UART_17158_IER_CTSDSR);
+ efr &= ~(UART_17158_EFR_CTSDSR);
+
+ /* Turn on auto Xon flow control */
+ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ ch->ch_r_watermark = 4;
+
+ writeb(32, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 32;
+
+ /* Tell UART what start/stop chars it should be looking for */
+ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
+ writeb(0, &ch->ch_neo_uart->xonchar2);
+
+ writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
+ writeb(0, &ch->ch_neo_uart->xoffchar2);
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+}
+
+static void neo_set_ixoff_flow_control(struct jsm_channel *ch)
+{
+ u8 ier, efr;
+ ier = readb(&ch->ch_neo_uart->ier);
+ efr = readb(&ch->ch_neo_uart->efr);
+
+ jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting IXOFF FLOW\n");
+
+ /* Turn off auto RTS flow control */
+ ier &= ~(UART_17158_IER_RTSDTR);
+ efr &= ~(UART_17158_EFR_RTSDTR);
+
+ /* Turn on auto Xoff flow control */
+ ier |= (UART_17158_IER_XOFF);
+ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ /* Turn on table D, with 8 char hi/low watermarks */
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+
+ writeb(8, &ch->ch_neo_uart->tfifo);
+ ch->ch_t_tlevel = 8;
+
+ /* Tell UART what start/stop chars it should be looking for */
+ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
+ writeb(0, &ch->ch_neo_uart->xonchar2);
+
+ writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
+ writeb(0, &ch->ch_neo_uart->xoffchar2);
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+}
+
+static void neo_set_no_input_flow_control(struct jsm_channel *ch)
+{
+ u8 ier, efr;
+ ier = readb(&ch->ch_neo_uart->ier);
+ efr = readb(&ch->ch_neo_uart->efr);
+
+ jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Unsetting Input FLOW\n");
+
+ /* Turn off auto RTS flow control */
+ ier &= ~(UART_17158_IER_RTSDTR);
+ efr &= ~(UART_17158_EFR_RTSDTR);
+
+ /* Turn off auto Xoff flow control */
+ ier &= ~(UART_17158_IER_XOFF);
+ if (ch->ch_c_iflag & IXON)
+ efr &= ~(UART_17158_EFR_IXOFF);
+ else
+ efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ /* Turn on table D, with 8 char hi/low watermarks */
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+
+ ch->ch_r_watermark = 0;
+
+ writeb(16, &ch->ch_neo_uart->tfifo);
+ ch->ch_t_tlevel = 16;
+
+ writeb(16, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 16;
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+}
+
+static void neo_set_no_output_flow_control(struct jsm_channel *ch)
+{
+ u8 ier, efr;
+ ier = readb(&ch->ch_neo_uart->ier);
+ efr = readb(&ch->ch_neo_uart->efr);
+
+ jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Unsetting Output FLOW\n");
+
+ /* Turn off auto CTS flow control */
+ ier &= ~(UART_17158_IER_CTSDSR);
+ efr &= ~(UART_17158_EFR_CTSDSR);
+
+ /* Turn off auto Xon flow control */
+ if (ch->ch_c_iflag & IXOFF)
+ efr &= ~(UART_17158_EFR_IXON);
+ else
+ efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ /* Turn on table D, with 8 char hi/low watermarks */
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+
+ ch->ch_r_watermark = 0;
+
+ writeb(16, &ch->ch_neo_uart->tfifo);
+ ch->ch_t_tlevel = 16;
+
+ writeb(16, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 16;
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+}
+
+static inline void neo_set_new_start_stop_chars(struct jsm_channel *ch)
+{
+
+ /* if hardware flow control is set, then skip this whole thing */
+ if (ch->ch_c_cflag & CRTSCTS)
+ return;
+
+ jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "start\n");
+
+ /* Tell UART what start/stop chars it should be looking for */
+ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
+ writeb(0, &ch->ch_neo_uart->xonchar2);
+
+ writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
+ writeb(0, &ch->ch_neo_uart->xoffchar2);
+}
+
+static void neo_copy_data_from_uart_to_queue(struct jsm_channel *ch)
+{
+ int qleft = 0;
+ u8 linestatus = 0;
+ u8 error_mask = 0;
+ int n = 0;
+ int total = 0;
+ u16 head;
+ u16 tail;
+
+ if (!ch)
+ return;
+
+ /* cache head and tail of queue */
+ head = ch->ch_r_head & RQUEUEMASK;
+ tail = ch->ch_r_tail & RQUEUEMASK;
+
+ /* Get our cached LSR */
+ linestatus = ch->ch_cached_lsr;
+ ch->ch_cached_lsr = 0;
+
+ /* Store how much space we have left in the queue */
+ if ((qleft = tail - head - 1) < 0)
+ qleft += RQUEUEMASK + 1;
+
+ /*
+ * If the UART is not in FIFO mode, force the FIFO copy to
+ * NOT be run, by setting total to 0.
+ *
+ * On the other hand, if the UART IS in FIFO mode, then ask
+ * the UART to give us an approximation of data it has RX'ed.
+ */
+ if (!(ch->ch_flags & CH_FIFO_ENABLED))
+ total = 0;
+ else {
+ total = readb(&ch->ch_neo_uart->rfifo);
+
+ /*
+ * EXAR chip bug - RX FIFO COUNT - Fudge factor.
+ *
+ * This resolves a problem/bug with the Exar chip that sometimes
+ * returns a bogus value in the rfifo register.
+ * The count can be any where from 0-3 bytes "off".
+ * Bizarre, but true.
+ */
+ total -= 3;
+ }
+
+ /*
+ * Finally, bound the copy to make sure we don't overflow
+ * our own queue...
+ * The byte by byte copy loop below this loop this will
+ * deal with the queue overflow possibility.
+ */
+ total = min(total, qleft);
+
+ while (total > 0) {
+ /*
+ * Grab the linestatus register, we need to check
+ * to see if there are any errors in the FIFO.
+ */
+ linestatus = readb(&ch->ch_neo_uart->lsr);
+
+ /*
+ * Break out if there is a FIFO error somewhere.
+ * This will allow us to go byte by byte down below,
+ * finding the exact location of the error.
+ */
+ if (linestatus & UART_17158_RX_FIFO_DATA_ERROR)
+ break;
+
+ /* Make sure we don't go over the end of our queue */
+ n = min(((u32) total), (RQUEUESIZE - (u32) head));
+
+ /*
+ * Cut down n even further if needed, this is to fix
+ * a problem with memcpy_fromio() with the Neo on the
+ * IBM pSeries platform.
+ * 15 bytes max appears to be the magic number.
+ */
+ n = min((u32) n, (u32) 12);
+
+ /*
+ * Since we are grabbing the linestatus register, which
+ * will reset some bits after our read, we need to ensure
+ * we don't miss our TX FIFO emptys.
+ */
+ if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR))
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+
+ linestatus = 0;
+
+ /* Copy data from uart to the queue */
+ memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n);
+ /*
+ * Since RX_FIFO_DATA_ERROR was 0, we are guaranteed
+ * that all the data currently in the FIFO is free of
+ * breaks and parity/frame/orun errors.
+ */
+ memset(ch->ch_equeue + head, 0, n);
+
+ /* Add to and flip head if needed */
+ head = (head + n) & RQUEUEMASK;
+ total -= n;
+ qleft -= n;
+ ch->ch_rxcount += n;
+ }
+
+ /*
+ * Create a mask to determine whether we should
+ * insert the character (if any) into our queue.
+ */
+ if (ch->ch_c_iflag & IGNBRK)
+ error_mask |= UART_LSR_BI;
+
+ /*
+ * Now cleanup any leftover bytes still in the UART.
+ * Also deal with any possible queue overflow here as well.
+ */
+ while (1) {
+
+ /*
+ * Its possible we have a linestatus from the loop above
+ * this, so we "OR" on any extra bits.
+ */
+ linestatus |= readb(&ch->ch_neo_uart->lsr);
+
+ /*
+ * If the chip tells us there is no more data pending to
+ * be read, we can then leave.
+ * But before we do, cache the linestatus, just in case.
+ */
+ if (!(linestatus & UART_LSR_DR)) {
+ ch->ch_cached_lsr = linestatus;
+ break;
+ }
+
+ /* No need to store this bit */
+ linestatus &= ~UART_LSR_DR;
+
+ /*
+ * Since we are grabbing the linestatus register, which
+ * will reset some bits after our read, we need to ensure
+ * we don't miss our TX FIFO emptys.
+ */
+ if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) {
+ linestatus &= ~(UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ }
+
+ /*
+ * Discard character if we are ignoring the error mask.
+ */
+ if (linestatus & error_mask) {
+ u8 discard;
+ linestatus = 0;
+ memcpy_fromio(&discard, &ch->ch_neo_uart->txrxburst, 1);
+ continue;
+ }
+
+ /*
+ * If our queue is full, we have no choice but to drop some data.
+ * The assumption is that HWFLOW or SWFLOW should have stopped
+ * things way way before we got to this point.
+ *
+ * I decided that I wanted to ditch the oldest data first,
+ * I hope thats okay with everyone? Yes? Good.
+ */
+ while (qleft < 1) {
+ jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
+ "Queue full, dropping DATA:%x LSR:%x\n",
+ ch->ch_rqueue[tail], ch->ch_equeue[tail]);
+
+ ch->ch_r_tail = tail = (tail + 1) & RQUEUEMASK;
+ ch->ch_err_overrun++;
+ qleft++;
+ }
+
+ memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1);
+ ch->ch_equeue[head] = (u8) linestatus;
+
+ jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
+ "DATA/LSR pair: %x %x\n", ch->ch_rqueue[head], ch->ch_equeue[head]);
+
+ /* Ditch any remaining linestatus value. */
+ linestatus = 0;
+
+ /* Add to and flip head if needed */
+ head = (head + 1) & RQUEUEMASK;
+
+ qleft--;
+ ch->ch_rxcount++;
+ }
+
+ /*
+ * Write new final heads to channel structure.
+ */
+ ch->ch_r_head = head & RQUEUEMASK;
+ ch->ch_e_head = head & EQUEUEMASK;
+ jsm_input(ch);
+}
+
+static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch)
+{
+ u16 head;
+ u16 tail;
+ int n;
+ int s;
+ int qlen;
+ u32 len_written = 0;
+ struct circ_buf *circ;
+
+ if (!ch)
+ return;
+
+ circ = &ch->uart_port.state->xmit;
+
+ /* No data to write to the UART */
+ if (uart_circ_empty(circ))
+ return;
+
+ /* If port is "stopped", don't send any data to the UART */
+ if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_BREAK_SENDING))
+ return;
+ /*
+ * If FIFOs are disabled. Send data directly to txrx register
+ */
+ if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
+ u8 lsrbits = readb(&ch->ch_neo_uart->lsr);
+
+ ch->ch_cached_lsr |= lsrbits;
+ if (ch->ch_cached_lsr & UART_LSR_THRE) {
+ ch->ch_cached_lsr &= ~(UART_LSR_THRE);
+
+ writeb(circ->buf[circ->tail], &ch->ch_neo_uart->txrx);
+ jsm_printk(WRITE, INFO, &ch->ch_bd->pci_dev,
+ "Tx data: %x\n", circ->buf[circ->head]);
+ circ->tail = (circ->tail + 1) & (UART_XMIT_SIZE - 1);
+ ch->ch_txcount++;
+ }
+ return;
+ }
+
+ /*
+ * We have to do it this way, because of the EXAR TXFIFO count bug.
+ */
+ if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM)))
+ return;
+
+ n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel;
+
+ /* cache head and tail of queue */
+ head = circ->head & (UART_XMIT_SIZE - 1);
+ tail = circ->tail & (UART_XMIT_SIZE - 1);
+ qlen = uart_circ_chars_pending(circ);
+
+ /* Find minimum of the FIFO space, versus queue length */
+ n = min(n, qlen);
+
+ while (n > 0) {
+
+ s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail;
+ s = min(s, n);
+
+ if (s <= 0)
+ break;
+
+ memcpy_toio(&ch->ch_neo_uart->txrxburst, circ->buf + tail, s);
+ /* Add and flip queue if needed */
+ tail = (tail + s) & (UART_XMIT_SIZE - 1);
+ n -= s;
+ ch->ch_txcount += s;
+ len_written += s;
+ }
+
+ /* Update the final tail */
+ circ->tail = tail & (UART_XMIT_SIZE - 1);
+
+ if (len_written >= ch->ch_t_tlevel)
+ ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+
+ if (uart_circ_empty(circ))
+ uart_write_wakeup(&ch->uart_port);
+}
+
+static void neo_parse_modem(struct jsm_channel *ch, u8 signals)
+{
+ u8 msignals = signals;
+
+ jsm_printk(MSIGS, INFO, &ch->ch_bd->pci_dev,
+ "neo_parse_modem: port: %d msignals: %x\n", ch->ch_portnum, msignals);
+
+ /* Scrub off lower bits. They signify delta's, which I don't care about */
+ /* Keep DDCD and DDSR though */
+ msignals &= 0xf8;
+
+ if (msignals & UART_MSR_DDCD)
+ uart_handle_dcd_change(&ch->uart_port, msignals & UART_MSR_DCD);
+ if (msignals & UART_MSR_DDSR)
+ uart_handle_cts_change(&ch->uart_port, msignals & UART_MSR_CTS);
+ if (msignals & UART_MSR_DCD)
+ ch->ch_mistat |= UART_MSR_DCD;
+ else
+ ch->ch_mistat &= ~UART_MSR_DCD;
+
+ if (msignals & UART_MSR_DSR)
+ ch->ch_mistat |= UART_MSR_DSR;
+ else
+ ch->ch_mistat &= ~UART_MSR_DSR;
+
+ if (msignals & UART_MSR_RI)
+ ch->ch_mistat |= UART_MSR_RI;
+ else
+ ch->ch_mistat &= ~UART_MSR_RI;
+
+ if (msignals & UART_MSR_CTS)
+ ch->ch_mistat |= UART_MSR_CTS;
+ else
+ ch->ch_mistat &= ~UART_MSR_CTS;
+
+ jsm_printk(MSIGS, INFO, &ch->ch_bd->pci_dev,
+ "Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n",
+ ch->ch_portnum,
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_CTS),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DSR),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_RI),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DCD));
+}
+
+/* Make the UART raise any of the output signals we want up */
+static void neo_assert_modem_signals(struct jsm_channel *ch)
+{
+ if (!ch)
+ return;
+
+ writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr);
+
+ /* flush write operation */
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+/*
+ * Flush the WRITE FIFO on the Neo.
+ *
+ * NOTE: Channel lock MUST be held before calling this function!
+ */
+static void neo_flush_uart_write(struct jsm_channel *ch)
+{
+ u8 tmp = 0;
+ int i = 0;
+
+ if (!ch)
+ return;
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
+
+ for (i = 0; i < 10; i++) {
+
+ /* Check to see if the UART feels it completely flushed the FIFO. */
+ tmp = readb(&ch->ch_neo_uart->isr_fcr);
+ if (tmp & 4) {
+ jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev,
+ "Still flushing TX UART... i: %d\n", i);
+ udelay(10);
+ }
+ else
+ break;
+ }
+
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+}
+
+
+/*
+ * Flush the READ FIFO on the Neo.
+ *
+ * NOTE: Channel lock MUST be held before calling this function!
+ */
+static void neo_flush_uart_read(struct jsm_channel *ch)
+{
+ u8 tmp = 0;
+ int i = 0;
+
+ if (!ch)
+ return;
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_neo_uart->isr_fcr);
+
+ for (i = 0; i < 10; i++) {
+
+ /* Check to see if the UART feels it completely flushed the FIFO. */
+ tmp = readb(&ch->ch_neo_uart->isr_fcr);
+ if (tmp & 2) {
+ jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev,
+ "Still flushing RX UART... i: %d\n", i);
+ udelay(10);
+ }
+ else
+ break;
+ }
+}
+
+/*
+ * No locks are assumed to be held when calling this function.
+ */
+static void neo_clear_break(struct jsm_channel *ch, int force)
+{
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ /* Turn break off, and unset some variables */
+ if (ch->ch_flags & CH_BREAK_SENDING) {
+ u8 temp = readb(&ch->ch_neo_uart->lcr);
+ writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr);
+
+ ch->ch_flags &= ~(CH_BREAK_SENDING);
+ jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev,
+ "clear break Finishing UART_LCR_SBC! finished: %lx\n", jiffies);
+
+ /* flush write operation */
+ neo_pci_posting_flush(ch->ch_bd);
+ }
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+}
+
+/*
+ * Parse the ISR register.
+ */
+static inline void neo_parse_isr(struct jsm_board *brd, u32 port)
+{
+ struct jsm_channel *ch;
+ u8 isr;
+ u8 cause;
+ unsigned long lock_flags;
+
+ if (!brd)
+ return;
+
+ if (port > brd->maxports)
+ return;
+
+ ch = brd->channels[port];
+ if (!ch)
+ return;
+
+ /* Here we try to figure out what caused the interrupt to happen */
+ while (1) {
+
+ isr = readb(&ch->ch_neo_uart->isr_fcr);
+
+ /* Bail if no pending interrupt */
+ if (isr & UART_IIR_NO_INT)
+ break;
+
+ /*
+ * Yank off the upper 2 bits, which just show that the FIFO's are enabled.
+ */
+ isr &= ~(UART_17158_IIR_FIFO_ENABLED);
+
+ jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev,
+ "%s:%d isr: %x\n", __FILE__, __LINE__, isr);
+
+ if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) {
+ /* Read data from uart -> queue */
+ neo_copy_data_from_uart_to_queue(ch);
+
+ /* Call our tty layer to enforce queue flow control if needed. */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ jsm_check_queue_flow_control(ch);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ }
+
+ if (isr & UART_IIR_THRI) {
+ /* Transfer data (if any) from Write Queue -> UART. */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ neo_copy_data_from_queue_to_uart(ch);
+ }
+
+ if (isr & UART_17158_IIR_XONXOFF) {
+ cause = readb(&ch->ch_neo_uart->xoffchar1);
+
+ jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev,
+ "Port %d. Got ISR_XONXOFF: cause:%x\n", port, cause);
+
+ /*
+ * Since the UART detected either an XON or
+ * XOFF match, we need to figure out which
+ * one it was, so we can suspend or resume data flow.
+ */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ if (cause == UART_17158_XON_DETECT) {
+ /* Is output stopped right now, if so, resume it */
+ if (brd->channels[port]->ch_flags & CH_STOP) {
+ ch->ch_flags &= ~(CH_STOP);
+ }
+ jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev,
+ "Port %d. XON detected in incoming data\n", port);
+ }
+ else if (cause == UART_17158_XOFF_DETECT) {
+ if (!(brd->channels[port]->ch_flags & CH_STOP)) {
+ ch->ch_flags |= CH_STOP;
+ jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev,
+ "Setting CH_STOP\n");
+ }
+ jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev,
+ "Port: %d. XOFF detected in incoming data\n", port);
+ }
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ }
+
+ if (isr & UART_17158_IIR_HWFLOW_STATE_CHANGE) {
+ /*
+ * If we get here, this means the hardware is doing auto flow control.
+ * Check to see whether RTS/DTR or CTS/DSR caused this interrupt.
+ */
+ cause = readb(&ch->ch_neo_uart->mcr);
+
+ /* Which pin is doing auto flow? RTS or DTR? */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ if ((cause & 0x4) == 0) {
+ if (cause & UART_MCR_RTS)
+ ch->ch_mostat |= UART_MCR_RTS;
+ else
+ ch->ch_mostat &= ~(UART_MCR_RTS);
+ } else {
+ if (cause & UART_MCR_DTR)
+ ch->ch_mostat |= UART_MCR_DTR;
+ else
+ ch->ch_mostat &= ~(UART_MCR_DTR);
+ }
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ }
+
+ /* Parse any modem signal changes */
+ jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev,
+ "MOD_STAT: sending to parse_modem_sigs\n");
+ neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
+ }
+}
+
+static inline void neo_parse_lsr(struct jsm_board *brd, u32 port)
+{
+ struct jsm_channel *ch;
+ int linestatus;
+ unsigned long lock_flags;
+
+ if (!brd)
+ return;
+
+ if (port > brd->maxports)
+ return;
+
+ ch = brd->channels[port];
+ if (!ch)
+ return;
+
+ linestatus = readb(&ch->ch_neo_uart->lsr);
+
+ jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev,
+ "%s:%d port: %d linestatus: %x\n", __FILE__, __LINE__, port, linestatus);
+
+ ch->ch_cached_lsr |= linestatus;
+
+ if (ch->ch_cached_lsr & UART_LSR_DR) {
+ /* Read data from uart -> queue */
+ neo_copy_data_from_uart_to_queue(ch);
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ jsm_check_queue_flow_control(ch);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ }
+
+ /*
+ * This is a special flag. It indicates that at least 1
+ * RX error (parity, framing, or break) has happened.
+ * Mark this in our struct, which will tell me that I have
+ *to do the special RX+LSR read for this FIFO load.
+ */
+ if (linestatus & UART_17158_RX_FIFO_DATA_ERROR)
+ jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev,
+ "%s:%d Port: %d Got an RX error, need to parse LSR\n",
+ __FILE__, __LINE__, port);
+
+ /*
+ * The next 3 tests should *NOT* happen, as the above test
+ * should encapsulate all 3... At least, thats what Exar says.
+ */
+
+ if (linestatus & UART_LSR_PE) {
+ ch->ch_err_parity++;
+ jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev,
+ "%s:%d Port: %d. PAR ERR!\n", __FILE__, __LINE__, port);
+ }
+
+ if (linestatus & UART_LSR_FE) {
+ ch->ch_err_frame++;
+ jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev,
+ "%s:%d Port: %d. FRM ERR!\n", __FILE__, __LINE__, port);
+ }
+
+ if (linestatus & UART_LSR_BI) {
+ ch->ch_err_break++;
+ jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev,
+ "%s:%d Port: %d. BRK INTR!\n", __FILE__, __LINE__, port);
+ }
+
+ if (linestatus & UART_LSR_OE) {
+ /*
+ * Rx Oruns. Exar says that an orun will NOT corrupt
+ * the FIFO. It will just replace the holding register
+ * with this new data byte. So basically just ignore this.
+ * Probably we should eventually have an orun stat in our driver...
+ */
+ ch->ch_err_overrun++;
+ jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev,
+ "%s:%d Port: %d. Rx Overrun!\n", __FILE__, __LINE__, port);
+ }
+
+ if (linestatus & UART_LSR_THRE) {
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ /* Transfer data (if any) from Write Queue -> UART. */
+ neo_copy_data_from_queue_to_uart(ch);
+ }
+ else if (linestatus & UART_17158_TX_AND_FIFO_CLR) {
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ /* Transfer data (if any) from Write Queue -> UART. */
+ neo_copy_data_from_queue_to_uart(ch);
+ }
+}
+
+/*
+ * neo_param()
+ * Send any/all changes to the line to the UART.
+ */
+static void neo_param(struct jsm_channel *ch)
+{
+ u8 lcr = 0;
+ u8 uart_lcr, ier;
+ u32 baud;
+ int quot;
+ struct jsm_board *bd;
+
+ bd = ch->ch_bd;
+ if (!bd)
+ return;
+
+ /*
+ * If baud rate is zero, flush queues, and set mval to drop DTR.
+ */
+ if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+ ch->ch_r_head = ch->ch_r_tail = 0;
+ ch->ch_e_head = ch->ch_e_tail = 0;
+
+ neo_flush_uart_write(ch);
+ neo_flush_uart_read(ch);
+
+ ch->ch_flags |= (CH_BAUD0);
+ ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR);
+ neo_assert_modem_signals(ch);
+ return;
+
+ } else {
+ int i;
+ unsigned int cflag;
+ static struct {
+ unsigned int rate;
+ unsigned int cflag;
+ } baud_rates[] = {
+ { 921600, B921600 },
+ { 460800, B460800 },
+ { 230400, B230400 },
+ { 115200, B115200 },
+ { 57600, B57600 },
+ { 38400, B38400 },
+ { 19200, B19200 },
+ { 9600, B9600 },
+ { 4800, B4800 },
+ { 2400, B2400 },
+ { 1200, B1200 },
+ { 600, B600 },
+ { 300, B300 },
+ { 200, B200 },
+ { 150, B150 },
+ { 134, B134 },
+ { 110, B110 },
+ { 75, B75 },
+ { 50, B50 },
+ };
+
+ cflag = C_BAUD(ch->uart_port.state->port.tty);
+ baud = 9600;
+ for (i = 0; i < ARRAY_SIZE(baud_rates); i++) {
+ if (baud_rates[i].cflag == cflag) {
+ baud = baud_rates[i].rate;
+ break;
+ }
+ }
+
+ if (ch->ch_flags & CH_BAUD0)
+ ch->ch_flags &= ~(CH_BAUD0);
+ }
+
+ if (ch->ch_c_cflag & PARENB)
+ lcr |= UART_LCR_PARITY;
+
+ if (!(ch->ch_c_cflag & PARODD))
+ lcr |= UART_LCR_EPAR;
+
+ /*
+ * Not all platforms support mark/space parity,
+ * so this will hide behind an ifdef.
+ */
+#ifdef CMSPAR
+ if (ch->ch_c_cflag & CMSPAR)
+ lcr |= UART_LCR_SPAR;
+#endif
+
+ if (ch->ch_c_cflag & CSTOPB)
+ lcr |= UART_LCR_STOP;
+
+ switch (ch->ch_c_cflag & CSIZE) {
+ case CS5:
+ lcr |= UART_LCR_WLEN5;
+ break;
+ case CS6:
+ lcr |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ lcr |= UART_LCR_WLEN7;
+ break;
+ case CS8:
+ default:
+ lcr |= UART_LCR_WLEN8;
+ break;
+ }
+
+ ier = readb(&ch->ch_neo_uart->ier);
+ uart_lcr = readb(&ch->ch_neo_uart->lcr);
+
+ if (baud == 0)
+ baud = 9600;
+
+ quot = ch->ch_bd->bd_dividend / baud;
+
+ if (quot != 0) {
+ writeb(UART_LCR_DLAB, &ch->ch_neo_uart->lcr);
+ writeb((quot & 0xff), &ch->ch_neo_uart->txrx);
+ writeb((quot >> 8), &ch->ch_neo_uart->ier);
+ writeb(lcr, &ch->ch_neo_uart->lcr);
+ }
+
+ if (uart_lcr != lcr)
+ writeb(lcr, &ch->ch_neo_uart->lcr);
+
+ if (ch->ch_c_cflag & CREAD)
+ ier |= (UART_IER_RDI | UART_IER_RLSI);
+
+ ier |= (UART_IER_THRI | UART_IER_MSI);
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ /* Set new start/stop chars */
+ neo_set_new_start_stop_chars(ch);
+
+ if (ch->ch_c_cflag & CRTSCTS)
+ neo_set_cts_flow_control(ch);
+ else if (ch->ch_c_iflag & IXON) {
+ /* If start/stop is set to disable, then we should disable flow control */
+ if ((ch->ch_startc == __DISABLED_CHAR) || (ch->ch_stopc == __DISABLED_CHAR))
+ neo_set_no_output_flow_control(ch);
+ else
+ neo_set_ixon_flow_control(ch);
+ }
+ else
+ neo_set_no_output_flow_control(ch);
+
+ if (ch->ch_c_cflag & CRTSCTS)
+ neo_set_rts_flow_control(ch);
+ else if (ch->ch_c_iflag & IXOFF) {
+ /* If start/stop is set to disable, then we should disable flow control */
+ if ((ch->ch_startc == __DISABLED_CHAR) || (ch->ch_stopc == __DISABLED_CHAR))
+ neo_set_no_input_flow_control(ch);
+ else
+ neo_set_ixoff_flow_control(ch);
+ }
+ else
+ neo_set_no_input_flow_control(ch);
+ /*
+ * Adjust the RX FIFO Trigger level if baud is less than 9600.
+ * Not exactly elegant, but this is needed because of the Exar chip's
+ * delay on firing off the RX FIFO interrupt on slower baud rates.
+ */
+ if (baud < 9600) {
+ writeb(1, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 1;
+ }
+
+ neo_assert_modem_signals(ch);
+
+ /* Get current status of the modem signals now */
+ neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
+ return;
+}
+
+/*
+ * jsm_neo_intr()
+ *
+ * Neo specific interrupt handler.
+ */
+static irqreturn_t neo_intr(int irq, void *voidbrd)
+{
+ struct jsm_board *brd = voidbrd;
+ struct jsm_channel *ch;
+ int port = 0;
+ int type = 0;
+ int current_port;
+ u32 tmp;
+ u32 uart_poll;
+ unsigned long lock_flags;
+ unsigned long lock_flags2;
+ int outofloop_count = 0;
+
+ /* Lock out the slow poller from running on this board. */
+ spin_lock_irqsave(&brd->bd_intr_lock, lock_flags);
+
+ /*
+ * Read in "extended" IRQ information from the 32bit Neo register.
+ * Bits 0-7: What port triggered the interrupt.
+ * Bits 8-31: Each 3bits indicate what type of interrupt occurred.
+ */
+ uart_poll = readl(brd->re_map_membase + UART_17158_POLL_ADDR_OFFSET);
+
+ jsm_printk(INTR, INFO, &brd->pci_dev,
+ "%s:%d uart_poll: %x\n", __FILE__, __LINE__, uart_poll);
+
+ if (!uart_poll) {
+ jsm_printk(INTR, INFO, &brd->pci_dev,
+ "Kernel interrupted to me, but no pending interrupts...\n");
+ spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags);
+ return IRQ_NONE;
+ }
+
+ /* At this point, we have at least SOMETHING to service, dig further... */
+
+ current_port = 0;
+
+ /* Loop on each port */
+ while (((uart_poll & 0xff) != 0) && (outofloop_count < 0xff)){
+
+ tmp = uart_poll;
+ outofloop_count++;
+
+ /* Check current port to see if it has interrupt pending */
+ if ((tmp & jsm_offset_table[current_port]) != 0) {
+ port = current_port;
+ type = tmp >> (8 + (port * 3));
+ type &= 0x7;
+ } else {
+ current_port++;
+ continue;
+ }
+
+ jsm_printk(INTR, INFO, &brd->pci_dev,
+ "%s:%d port: %x type: %x\n", __FILE__, __LINE__, port, type);
+
+ /* Remove this port + type from uart_poll */
+ uart_poll &= ~(jsm_offset_table[port]);
+
+ if (!type) {
+ /* If no type, just ignore it, and move onto next port */
+ jsm_printk(INTR, ERR, &brd->pci_dev,
+ "Interrupt with no type! port: %d\n", port);
+ continue;
+ }
+
+ /* Switch on type of interrupt we have */
+ switch (type) {
+
+ case UART_17158_RXRDY_TIMEOUT:
+ /*
+ * RXRDY Time-out is cleared by reading data in the
+ * RX FIFO until it falls below the trigger level.
+ */
+
+ /* Verify the port is in range. */
+ if (port > brd->nasync)
+ continue;
+
+ ch = brd->channels[port];
+ neo_copy_data_from_uart_to_queue(ch);
+
+ /* Call our tty layer to enforce queue flow control if needed. */
+ spin_lock_irqsave(&ch->ch_lock, lock_flags2);
+ jsm_check_queue_flow_control(ch);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
+
+ continue;
+
+ case UART_17158_RX_LINE_STATUS:
+ /*
+ * RXRDY and RX LINE Status (logic OR of LSR[4:1])
+ */
+ neo_parse_lsr(brd, port);
+ continue;
+
+ case UART_17158_TXRDY:
+ /*
+ * TXRDY interrupt clears after reading ISR register for the UART channel.
+ */
+
+ /*
+ * Yes, this is odd...
+ * Why would I check EVERY possibility of type of
+ * interrupt, when we know its TXRDY???
+ * Becuz for some reason, even tho we got triggered for TXRDY,
+ * it seems to be occasionally wrong. Instead of TX, which
+ * it should be, I was getting things like RXDY too. Weird.
+ */
+ neo_parse_isr(brd, port);
+ continue;
+
+ case UART_17158_MSR:
+ /*
+ * MSR or flow control was seen.
+ */
+ neo_parse_isr(brd, port);
+ continue;
+
+ default:
+ /*
+ * The UART triggered us with a bogus interrupt type.
+ * It appears the Exar chip, when REALLY bogged down, will throw
+ * these once and awhile.
+ * Its harmless, just ignore it and move on.
+ */
+ jsm_printk(INTR, ERR, &brd->pci_dev,
+ "%s:%d Unknown Interrupt type: %x\n", __FILE__, __LINE__, type);
+ continue;
+ }
+ }
+
+ spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags);
+
+ jsm_printk(INTR, INFO, &brd->pci_dev, "finish.\n");
+ return IRQ_HANDLED;
+}
+
+/*
+ * Neo specific way of turning off the receiver.
+ * Used as a way to enforce queue flow control when in
+ * hardware flow control mode.
+ */
+static void neo_disable_receiver(struct jsm_channel *ch)
+{
+ u8 tmp = readb(&ch->ch_neo_uart->ier);
+ tmp &= ~(UART_IER_RDI);
+ writeb(tmp, &ch->ch_neo_uart->ier);
+
+ /* flush write operation */
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+/*
+ * Neo specific way of turning on the receiver.
+ * Used as a way to un-enforce queue flow control when in
+ * hardware flow control mode.
+ */
+static void neo_enable_receiver(struct jsm_channel *ch)
+{
+ u8 tmp = readb(&ch->ch_neo_uart->ier);
+ tmp |= (UART_IER_RDI);
+ writeb(tmp, &ch->ch_neo_uart->ier);
+
+ /* flush write operation */
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+static void neo_send_start_character(struct jsm_channel *ch)
+{
+ if (!ch)
+ return;
+
+ if (ch->ch_startc != __DISABLED_CHAR) {
+ ch->ch_xon_sends++;
+ writeb(ch->ch_startc, &ch->ch_neo_uart->txrx);
+
+ /* flush write operation */
+ neo_pci_posting_flush(ch->ch_bd);
+ }
+}
+
+static void neo_send_stop_character(struct jsm_channel *ch)
+{
+ if (!ch)
+ return;
+
+ if (ch->ch_stopc != __DISABLED_CHAR) {
+ ch->ch_xoff_sends++;
+ writeb(ch->ch_stopc, &ch->ch_neo_uart->txrx);
+
+ /* flush write operation */
+ neo_pci_posting_flush(ch->ch_bd);
+ }
+}
+
+/*
+ * neo_uart_init
+ */
+static void neo_uart_init(struct jsm_channel *ch)
+{
+ writeb(0, &ch->ch_neo_uart->ier);
+ writeb(0, &ch->ch_neo_uart->efr);
+ writeb(UART_EFR_ECB, &ch->ch_neo_uart->efr);
+
+ /* Clear out UART and FIFO */
+ readb(&ch->ch_neo_uart->txrx);
+ writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
+ readb(&ch->ch_neo_uart->lsr);
+ readb(&ch->ch_neo_uart->msr);
+
+ ch->ch_flags |= CH_FIFO_ENABLED;
+
+ /* Assert any signals we want up */
+ writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr);
+}
+
+/*
+ * Make the UART completely turn off.
+ */
+static void neo_uart_off(struct jsm_channel *ch)
+{
+ /* Turn off UART enhanced bits */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Stop all interrupts from occurring. */
+ writeb(0, &ch->ch_neo_uart->ier);
+}
+
+static u32 neo_get_uart_bytes_left(struct jsm_channel *ch)
+{
+ u8 left = 0;
+ u8 lsr = readb(&ch->ch_neo_uart->lsr);
+
+ /* We must cache the LSR as some of the bits get reset once read... */
+ ch->ch_cached_lsr |= lsr;
+
+ /* Determine whether the Transmitter is empty or not */
+ if (!(lsr & UART_LSR_TEMT))
+ left = 1;
+ else {
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ left = 0;
+ }
+
+ return left;
+}
+
+/* Channel lock MUST be held by the calling function! */
+static void neo_send_break(struct jsm_channel *ch)
+{
+ /*
+ * Set the time we should stop sending the break.
+ * If we are already sending a break, toss away the existing
+ * time to stop, and use this new value instead.
+ */
+
+ /* Tell the UART to start sending the break */
+ if (!(ch->ch_flags & CH_BREAK_SENDING)) {
+ u8 temp = readb(&ch->ch_neo_uart->lcr);
+ writeb((temp | UART_LCR_SBC), &ch->ch_neo_uart->lcr);
+ ch->ch_flags |= (CH_BREAK_SENDING);
+
+ /* flush write operation */
+ neo_pci_posting_flush(ch->ch_bd);
+ }
+}
+
+/*
+ * neo_send_immediate_char.
+ *
+ * Sends a specific character as soon as possible to the UART,
+ * jumping over any bytes that might be in the write queue.
+ *
+ * The channel lock MUST be held by the calling function.
+ */
+static void neo_send_immediate_char(struct jsm_channel *ch, unsigned char c)
+{
+ if (!ch)
+ return;
+
+ writeb(c, &ch->ch_neo_uart->txrx);
+
+ /* flush write operation */
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+struct board_ops jsm_neo_ops = {
+ .intr = neo_intr,
+ .uart_init = neo_uart_init,
+ .uart_off = neo_uart_off,
+ .param = neo_param,
+ .assert_modem_signals = neo_assert_modem_signals,
+ .flush_uart_write = neo_flush_uart_write,
+ .flush_uart_read = neo_flush_uart_read,
+ .disable_receiver = neo_disable_receiver,
+ .enable_receiver = neo_enable_receiver,
+ .send_break = neo_send_break,
+ .clear_break = neo_clear_break,
+ .send_start_character = neo_send_start_character,
+ .send_stop_character = neo_send_stop_character,
+ .copy_data_from_queue_to_uart = neo_copy_data_from_queue_to_uart,
+ .get_uart_bytes_left = neo_get_uart_bytes_left,
+ .send_immediate_char = neo_send_immediate_char
+};
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
new file mode 100644
index 00000000..434bd881
--- /dev/null
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -0,0 +1,842 @@
+/************************************************************************
+ * Copyright 2003 Digi International (www.digi.com)
+ *
+ * Copyright (C) 2004 IBM Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 * Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * Contact Information:
+ * Scott H Kilau <Scott_Kilau@digi.com>
+ * Ananda Venkatarman <mansarov@us.ibm.com>
+ * Modifications:
+ * 01/19/06: changed jsm_input routine to use the dynamically allocated
+ * tty_buffer changes. Contributors: Scott Kilau and Ananda V.
+ ***********************************************************************/
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_reg.h>
+#include <linux/delay.h> /* For udelay */
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "jsm.h"
+
+static DECLARE_BITMAP(linemap, MAXLINES);
+
+static void jsm_carrier(struct jsm_channel *ch);
+
+static inline int jsm_get_mstat(struct jsm_channel *ch)
+{
+ unsigned char mstat;
+ unsigned result;
+
+ jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "start\n");
+
+ mstat = (ch->ch_mostat | ch->ch_mistat);
+
+ result = 0;
+
+ if (mstat & UART_MCR_DTR)
+ result |= TIOCM_DTR;
+ if (mstat & UART_MCR_RTS)
+ result |= TIOCM_RTS;
+ if (mstat & UART_MSR_CTS)
+ result |= TIOCM_CTS;
+ if (mstat & UART_MSR_DSR)
+ result |= TIOCM_DSR;
+ if (mstat & UART_MSR_RI)
+ result |= TIOCM_RI;
+ if (mstat & UART_MSR_DCD)
+ result |= TIOCM_CD;
+
+ jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "finish\n");
+ return result;
+}
+
+static unsigned int jsm_tty_tx_empty(struct uart_port *port)
+{
+ return TIOCSER_TEMT;
+}
+
+/*
+ * Return modem signals to ld.
+ */
+static unsigned int jsm_tty_get_mctrl(struct uart_port *port)
+{
+ int result;
+ struct jsm_channel *channel = (struct jsm_channel *)port;
+
+ jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "start\n");
+
+ result = jsm_get_mstat(channel);
+
+ if (result < 0)
+ return -ENXIO;
+
+ jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "finish\n");
+
+ return result;
+}
+
+/*
+ * jsm_set_modem_info()
+ *
+ * Set modem signals, called by ld.
+ */
+static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct jsm_channel *channel = (struct jsm_channel *)port;
+
+ jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "start\n");
+
+ if (mctrl & TIOCM_RTS)
+ channel->ch_mostat |= UART_MCR_RTS;
+ else
+ channel->ch_mostat &= ~UART_MCR_RTS;
+
+ if (mctrl & TIOCM_DTR)
+ channel->ch_mostat |= UART_MCR_DTR;
+ else
+ channel->ch_mostat &= ~UART_MCR_DTR;
+
+ channel->ch_bd->bd_ops->assert_modem_signals(channel);
+
+ jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "finish\n");
+ udelay(10);
+}
+
+/*
+ * jsm_tty_write()
+ *
+ * Take data from the user or kernel and send it out to the FEP.
+ * In here exists all the Transparent Print magic as well.
+ */
+static void jsm_tty_write(struct uart_port *port)
+{
+ struct jsm_channel *channel;
+ channel = container_of(port, struct jsm_channel, uart_port);
+ channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel);
+}
+
+static void jsm_tty_start_tx(struct uart_port *port)
+{
+ struct jsm_channel *channel = (struct jsm_channel *)port;
+
+ jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "start\n");
+
+ channel->ch_flags &= ~(CH_STOP);
+ jsm_tty_write(port);
+
+ jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "finish\n");
+}
+
+static void jsm_tty_stop_tx(struct uart_port *port)
+{
+ struct jsm_channel *channel = (struct jsm_channel *)port;
+
+ jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "start\n");
+
+ channel->ch_flags |= (CH_STOP);
+
+ jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "finish\n");
+}
+
+static void jsm_tty_send_xchar(struct uart_port *port, char ch)
+{
+ unsigned long lock_flags;
+ struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct ktermios *termios;
+
+ spin_lock_irqsave(&port->lock, lock_flags);
+ termios = port->state->port.tty->termios;
+ if (ch == termios->c_cc[VSTART])
+ channel->ch_bd->bd_ops->send_start_character(channel);
+
+ if (ch == termios->c_cc[VSTOP])
+ channel->ch_bd->bd_ops->send_stop_character(channel);
+ spin_unlock_irqrestore(&port->lock, lock_flags);
+}
+
+static void jsm_tty_stop_rx(struct uart_port *port)
+{
+ struct jsm_channel *channel = (struct jsm_channel *)port;
+
+ channel->ch_bd->bd_ops->disable_receiver(channel);
+}
+
+static void jsm_tty_enable_ms(struct uart_port *port)
+{
+ /* Nothing needed */
+}
+
+static void jsm_tty_break(struct uart_port *port, int break_state)
+{
+ unsigned long lock_flags;
+ struct jsm_channel *channel = (struct jsm_channel *)port;
+
+ spin_lock_irqsave(&port->lock, lock_flags);
+ if (break_state == -1)
+ channel->ch_bd->bd_ops->send_break(channel);
+ else
+ channel->ch_bd->bd_ops->clear_break(channel, 0);
+
+ spin_unlock_irqrestore(&port->lock, lock_flags);
+}
+
+static int jsm_tty_open(struct uart_port *port)
+{
+ struct jsm_board *brd;
+ struct jsm_channel *channel = (struct jsm_channel *)port;
+ struct ktermios *termios;
+
+ /* Get board pointer from our array of majors we have allocated */
+ brd = channel->ch_bd;
+
+ /*
+ * Allocate channel buffers for read/write/error.
+ * Set flag, so we don't get trounced on.
+ */
+ channel->ch_flags |= (CH_OPENING);
+
+ /* Drop locks, as malloc with GFP_KERNEL can sleep */
+
+ if (!channel->ch_rqueue) {
+ channel->ch_rqueue = kzalloc(RQUEUESIZE, GFP_KERNEL);
+ if (!channel->ch_rqueue) {
+ jsm_printk(INIT, ERR, &channel->ch_bd->pci_dev,
+ "unable to allocate read queue buf");
+ return -ENOMEM;
+ }
+ }
+ if (!channel->ch_equeue) {
+ channel->ch_equeue = kzalloc(EQUEUESIZE, GFP_KERNEL);
+ if (!channel->ch_equeue) {
+ jsm_printk(INIT, ERR, &channel->ch_bd->pci_dev,
+ "unable to allocate error queue buf");
+ return -ENOMEM;
+ }
+ }
+
+ channel->ch_flags &= ~(CH_OPENING);
+ /*
+ * Initialize if neither terminal is open.
+ */
+ jsm_printk(OPEN, INFO, &channel->ch_bd->pci_dev,
+ "jsm_open: initializing channel in open...\n");
+
+ /*
+ * Flush input queues.
+ */
+ channel->ch_r_head = channel->ch_r_tail = 0;
+ channel->ch_e_head = channel->ch_e_tail = 0;
+
+ brd->bd_ops->flush_uart_write(channel);
+ brd->bd_ops->flush_uart_read(channel);
+
+ channel->ch_flags = 0;
+ channel->ch_cached_lsr = 0;
+ channel->ch_stops_sent = 0;
+
+ termios = port->state->port.tty->termios;
+ channel->ch_c_cflag = termios->c_cflag;
+ channel->ch_c_iflag = termios->c_iflag;
+ channel->ch_c_oflag = termios->c_oflag;
+ channel->ch_c_lflag = termios->c_lflag;
+ channel->ch_startc = termios->c_cc[VSTART];
+ channel->ch_stopc = termios->c_cc[VSTOP];
+
+ /* Tell UART to init itself */
+ brd->bd_ops->uart_init(channel);
+
+ /*
+ * Run param in case we changed anything
+ */
+ brd->bd_ops->param(channel);
+
+ jsm_carrier(channel);
+
+ channel->ch_open_count++;
+
+ jsm_printk(OPEN, INFO, &channel->ch_bd->pci_dev, "finish\n");
+ return 0;
+}
+
+static void jsm_tty_close(struct uart_port *port)
+{
+ struct jsm_board *bd;
+ struct ktermios *ts;
+ struct jsm_channel *channel = (struct jsm_channel *)port;
+
+ jsm_printk(CLOSE, INFO, &channel->ch_bd->pci_dev, "start\n");
+
+ bd = channel->ch_bd;
+ ts = port->state->port.tty->termios;
+
+ channel->ch_flags &= ~(CH_STOPI);
+
+ channel->ch_open_count--;
+
+ /*
+ * If we have HUPCL set, lower DTR and RTS
+ */
+ if (channel->ch_c_cflag & HUPCL) {
+ jsm_printk(CLOSE, INFO, &channel->ch_bd->pci_dev,
+ "Close. HUPCL set, dropping DTR/RTS\n");
+
+ /* Drop RTS/DTR */
+ channel->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS);
+ bd->bd_ops->assert_modem_signals(channel);
+ }
+
+ /* Turn off UART interrupts for this port */
+ channel->ch_bd->bd_ops->uart_off(channel);
+
+ jsm_printk(CLOSE, INFO, &channel->ch_bd->pci_dev, "finish\n");
+}
+
+static void jsm_tty_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old_termios)
+{
+ unsigned long lock_flags;
+ struct jsm_channel *channel = (struct jsm_channel *)port;
+
+ spin_lock_irqsave(&port->lock, lock_flags);
+ channel->ch_c_cflag = termios->c_cflag;
+ channel->ch_c_iflag = termios->c_iflag;
+ channel->ch_c_oflag = termios->c_oflag;
+ channel->ch_c_lflag = termios->c_lflag;
+ channel->ch_startc = termios->c_cc[VSTART];
+ channel->ch_stopc = termios->c_cc[VSTOP];
+
+ channel->ch_bd->bd_ops->param(channel);
+ jsm_carrier(channel);
+ spin_unlock_irqrestore(&port->lock, lock_flags);
+}
+
+static const char *jsm_tty_type(struct uart_port *port)
+{
+ return "jsm";
+}
+
+static void jsm_tty_release_port(struct uart_port *port)
+{
+}
+
+static int jsm_tty_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void jsm_config_port(struct uart_port *port, int flags)
+{
+ port->type = PORT_JSM;
+}
+
+static struct uart_ops jsm_ops = {
+ .tx_empty = jsm_tty_tx_empty,
+ .set_mctrl = jsm_tty_set_mctrl,
+ .get_mctrl = jsm_tty_get_mctrl,
+ .stop_tx = jsm_tty_stop_tx,
+ .start_tx = jsm_tty_start_tx,
+ .send_xchar = jsm_tty_send_xchar,
+ .stop_rx = jsm_tty_stop_rx,
+ .enable_ms = jsm_tty_enable_ms,
+ .break_ctl = jsm_tty_break,
+ .startup = jsm_tty_open,
+ .shutdown = jsm_tty_close,
+ .set_termios = jsm_tty_set_termios,
+ .type = jsm_tty_type,
+ .release_port = jsm_tty_release_port,
+ .request_port = jsm_tty_request_port,
+ .config_port = jsm_config_port,
+};
+
+/*
+ * jsm_tty_init()
+ *
+ * Init the tty subsystem. Called once per board after board has been
+ * downloaded and init'ed.
+ */
+int __devinit jsm_tty_init(struct jsm_board *brd)
+{
+ int i;
+ void __iomem *vaddr;
+ struct jsm_channel *ch;
+
+ if (!brd)
+ return -ENXIO;
+
+ jsm_printk(INIT, INFO, &brd->pci_dev, "start\n");
+
+ /*
+ * Initialize board structure elements.
+ */
+
+ brd->nasync = brd->maxports;
+
+ /*
+ * Allocate channel memory that might not have been allocated
+ * when the driver was first loaded.
+ */
+ for (i = 0; i < brd->nasync; i++) {
+ if (!brd->channels[i]) {
+
+ /*
+ * Okay to malloc with GFP_KERNEL, we are not at
+ * interrupt context, and there are no locks held.
+ */
+ brd->channels[i] = kzalloc(sizeof(struct jsm_channel), GFP_KERNEL);
+ if (!brd->channels[i]) {
+ jsm_printk(CORE, ERR, &brd->pci_dev,
+ "%s:%d Unable to allocate memory for channel struct\n",
+ __FILE__, __LINE__);
+ }
+ }
+ }
+
+ ch = brd->channels[0];
+ vaddr = brd->re_map_membase;
+
+ /* Set up channel variables */
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
+
+ if (!brd->channels[i])
+ continue;
+
+ spin_lock_init(&ch->ch_lock);
+
+ if (brd->bd_uart_offset == 0x200)
+ ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i);
+
+ ch->ch_bd = brd;
+ ch->ch_portnum = i;
+
+ /* .25 second delay */
+ ch->ch_close_delay = 250;
+
+ init_waitqueue_head(&ch->ch_flags_wait);
+ }
+
+ jsm_printk(INIT, INFO, &brd->pci_dev, "finish\n");
+ return 0;
+}
+
+int jsm_uart_port_init(struct jsm_board *brd)
+{
+ int i, rc;
+ unsigned int line;
+ struct jsm_channel *ch;
+
+ if (!brd)
+ return -ENXIO;
+
+ jsm_printk(INIT, INFO, &brd->pci_dev, "start\n");
+
+ /*
+ * Initialize board structure elements.
+ */
+
+ brd->nasync = brd->maxports;
+
+ /* Set up channel variables */
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
+
+ if (!brd->channels[i])
+ continue;
+
+ brd->channels[i]->uart_port.irq = brd->irq;
+ brd->channels[i]->uart_port.uartclk = 14745600;
+ brd->channels[i]->uart_port.type = PORT_JSM;
+ brd->channels[i]->uart_port.iotype = UPIO_MEM;
+ brd->channels[i]->uart_port.membase = brd->re_map_membase;
+ brd->channels[i]->uart_port.fifosize = 16;
+ brd->channels[i]->uart_port.ops = &jsm_ops;
+ line = find_first_zero_bit(linemap, MAXLINES);
+ if (line >= MAXLINES) {
+ printk(KERN_INFO "jsm: linemap is full, added device failed\n");
+ continue;
+ } else
+ set_bit(line, linemap);
+ brd->channels[i]->uart_port.line = line;
+ rc = uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port);
+ if (rc){
+ printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i);
+ return rc;
+ }
+ else
+ printk(KERN_INFO "jsm: Port %d added\n", i);
+ }
+
+ jsm_printk(INIT, INFO, &brd->pci_dev, "finish\n");
+ return 0;
+}
+
+int jsm_remove_uart_port(struct jsm_board *brd)
+{
+ int i;
+ struct jsm_channel *ch;
+
+ if (!brd)
+ return -ENXIO;
+
+ jsm_printk(INIT, INFO, &brd->pci_dev, "start\n");
+
+ /*
+ * Initialize board structure elements.
+ */
+
+ brd->nasync = brd->maxports;
+
+ /* Set up channel variables */
+ for (i = 0; i < brd->nasync; i++) {
+
+ if (!brd->channels[i])
+ continue;
+
+ ch = brd->channels[i];
+
+ clear_bit(ch->uart_port.line, linemap);
+ uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
+ }
+
+ jsm_printk(INIT, INFO, &brd->pci_dev, "finish\n");
+ return 0;
+}
+
+void jsm_input(struct jsm_channel *ch)
+{
+ struct jsm_board *bd;
+ struct tty_struct *tp;
+ u32 rmask;
+ u16 head;
+ u16 tail;
+ int data_len;
+ unsigned long lock_flags;
+ int len = 0;
+ int n = 0;
+ int s = 0;
+ int i = 0;
+
+ jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start\n");
+
+ if (!ch)
+ return;
+
+ tp = ch->uart_port.state->port.tty;
+
+ bd = ch->ch_bd;
+ if(!bd)
+ return;
+
+ spin_lock_irqsave(&ch->ch_lock, lock_flags);
+
+ /*
+ *Figure the number of characters in the buffer.
+ *Exit immediately if none.
+ */
+
+ rmask = RQUEUEMASK;
+
+ head = ch->ch_r_head & rmask;
+ tail = ch->ch_r_tail & rmask;
+
+ data_len = (head - tail) & rmask;
+ if (data_len == 0) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ return;
+ }
+
+ jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start\n");
+
+ /*
+ *If the device is not open, or CREAD is off, flush
+ *input data and return immediately.
+ */
+ if (!tp ||
+ !(tp->termios->c_cflag & CREAD) ) {
+
+ jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
+ "input. dropping %d bytes on port %d...\n", data_len, ch->ch_portnum);
+ ch->ch_r_head = tail;
+
+ /* Force queue flow control to be released, if needed */
+ jsm_check_queue_flow_control(ch);
+
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * If we are throttled, simply don't read any data.
+ */
+ if (ch->ch_flags & CH_STOPI) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
+ "Port %d throttled, not reading any data. head: %x tail: %x\n",
+ ch->ch_portnum, head, tail);
+ return;
+ }
+
+ jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start 2\n");
+
+ if (data_len <= 0) {
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+ jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "jsm_input 1\n");
+ return;
+ }
+
+ len = tty_buffer_request_room(tp, data_len);
+ n = len;
+
+ /*
+ * n now contains the most amount of data we can copy,
+ * bounded either by the flip buffer size or the amount
+ * of data the card actually has pending...
+ */
+ while (n) {
+ s = ((head >= tail) ? head : RQUEUESIZE) - tail;
+ s = min(s, n);
+
+ if (s <= 0)
+ break;
+
+ /*
+ * If conditions are such that ld needs to see all
+ * UART errors, we will have to walk each character
+ * and error byte and send them to the buffer one at
+ * a time.
+ */
+
+ if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
+ for (i = 0; i < s; i++) {
+ /*
+ * Give the Linux ld the flags in the
+ * format it likes.
+ */
+ if (*(ch->ch_equeue +tail +i) & UART_LSR_BI)
+ tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_BREAK);
+ else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE)
+ tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_PARITY);
+ else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE)
+ tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_FRAME);
+ else
+ tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_NORMAL);
+ }
+ } else {
+ tty_insert_flip_string(tp, ch->ch_rqueue + tail, s) ;
+ }
+ tail += s;
+ n -= s;
+ /* Flip queue if needed */
+ tail &= rmask;
+ }
+
+ ch->ch_r_tail = tail & rmask;
+ ch->ch_e_tail = tail & rmask;
+ jsm_check_queue_flow_control(ch);
+ spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
+
+ /* Tell the tty layer its okay to "eat" the data now */
+ tty_flip_buffer_push(tp);
+
+ jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "finish\n");
+}
+
+static void jsm_carrier(struct jsm_channel *ch)
+{
+ struct jsm_board *bd;
+
+ int virt_carrier = 0;
+ int phys_carrier = 0;
+
+ jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev, "start\n");
+ if (!ch)
+ return;
+
+ bd = ch->ch_bd;
+
+ if (!bd)
+ return;
+
+ if (ch->ch_mistat & UART_MSR_DCD) {
+ jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev,
+ "mistat: %x D_CD: %x\n", ch->ch_mistat, ch->ch_mistat & UART_MSR_DCD);
+ phys_carrier = 1;
+ }
+
+ if (ch->ch_c_cflag & CLOCAL)
+ virt_carrier = 1;
+
+ jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev,
+ "DCD: physical: %d virt: %d\n", phys_carrier, virt_carrier);
+
+ /*
+ * Test for a VIRTUAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev,
+ "carrier: virt DCD rose\n");
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev,
+ "carrier: physical DCD rose\n");
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL transition to low, so long as we aren't
+ * currently ignoring physical transitions (which is what "virtual
+ * carrier" indicates).
+ *
+ * The transition of the virtual carrier to low really doesn't
+ * matter... it really only means "ignore carrier state", not
+ * "make pretend that carrier is there".
+ */
+ if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0)
+ && (phys_carrier == 0)) {
+ /*
+ * When carrier drops:
+ *
+ * Drop carrier on all open units.
+ *
+ * Flush queues, waking up any task waiting in the
+ * line discipline.
+ *
+ * Send a hangup to the control terminal.
+ *
+ * Enable all select calls.
+ */
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Make sure that our cached values reflect the current reality.
+ */
+ if (virt_carrier == 1)
+ ch->ch_flags |= CH_FCAR;
+ else
+ ch->ch_flags &= ~CH_FCAR;
+
+ if (phys_carrier == 1)
+ ch->ch_flags |= CH_CD;
+ else
+ ch->ch_flags &= ~CH_CD;
+}
+
+
+void jsm_check_queue_flow_control(struct jsm_channel *ch)
+{
+ struct board_ops *bd_ops = ch->ch_bd->bd_ops;
+ int qleft;
+
+ /* Store how much space we have left in the queue */
+ if ((qleft = ch->ch_r_tail - ch->ch_r_head - 1) < 0)
+ qleft += RQUEUEMASK + 1;
+
+ /*
+ * Check to see if we should enforce flow control on our queue because
+ * the ld (or user) isn't reading data out of our queue fast enuf.
+ *
+ * NOTE: This is done based on what the current flow control of the
+ * port is set for.
+ *
+ * 1) HWFLOW (RTS) - Turn off the UART's Receive interrupt.
+ * This will cause the UART's FIFO to back up, and force
+ * the RTS signal to be dropped.
+ * 2) SWFLOW (IXOFF) - Keep trying to send a stop character to
+ * the other side, in hopes it will stop sending data to us.
+ * 3) NONE - Nothing we can do. We will simply drop any extra data
+ * that gets sent into us when the queue fills up.
+ */
+ if (qleft < 256) {
+ /* HWFLOW */
+ if (ch->ch_c_cflag & CRTSCTS) {
+ if(!(ch->ch_flags & CH_RECEIVER_OFF)) {
+ bd_ops->disable_receiver(ch);
+ ch->ch_flags |= (CH_RECEIVER_OFF);
+ jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
+ "Internal queue hit hilevel mark (%d)! Turning off interrupts.\n",
+ qleft);
+ }
+ }
+ /* SWFLOW */
+ else if (ch->ch_c_iflag & IXOFF) {
+ if (ch->ch_stops_sent <= MAX_STOPS_SENT) {
+ bd_ops->send_stop_character(ch);
+ ch->ch_stops_sent++;
+ jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
+ "Sending stop char! Times sent: %x\n", ch->ch_stops_sent);
+ }
+ }
+ }
+
+ /*
+ * Check to see if we should unenforce flow control because
+ * ld (or user) finally read enuf data out of our queue.
+ *
+ * NOTE: This is done based on what the current flow control of the
+ * port is set for.
+ *
+ * 1) HWFLOW (RTS) - Turn back on the UART's Receive interrupt.
+ * This will cause the UART's FIFO to raise RTS back up,
+ * which will allow the other side to start sending data again.
+ * 2) SWFLOW (IXOFF) - Send a start character to
+ * the other side, so it will start sending data to us again.
+ * 3) NONE - Do nothing. Since we didn't do anything to turn off the
+ * other side, we don't need to do anything now.
+ */
+ if (qleft > (RQUEUESIZE / 2)) {
+ /* HWFLOW */
+ if (ch->ch_c_cflag & CRTSCTS) {
+ if (ch->ch_flags & CH_RECEIVER_OFF) {
+ bd_ops->enable_receiver(ch);
+ ch->ch_flags &= ~(CH_RECEIVER_OFF);
+ jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
+ "Internal queue hit lowlevel mark (%d)! Turning on interrupts.\n",
+ qleft);
+ }
+ }
+ /* SWFLOW */
+ else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) {
+ ch->ch_stops_sent = 0;
+ bd_ops->send_start_character(ch);
+ jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Sending start char!\n");
+ }
+ }
+}
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
new file mode 100644
index 00000000..87e7e6c8
--- /dev/null
+++ b/drivers/tty/serial/kgdboc.c
@@ -0,0 +1,328 @@
+/*
+ * Based on the same principle as kgdboe using the NETPOLL api, this
+ * driver uses a console polling api to implement a gdb serial inteface
+ * which is multiplexed on a console port.
+ *
+ * Maintainer: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * 2007-2008 (c) Jason Wessel - Wind River Systems, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/vt_kern.h>
+#include <linux/input.h>
+
+#define MAX_CONFIG_LEN 40
+
+static struct kgdb_io kgdboc_io_ops;
+
+/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
+static int configured = -1;
+
+static char config[MAX_CONFIG_LEN];
+static struct kparam_string kps = {
+ .string = config,
+ .maxlen = MAX_CONFIG_LEN,
+};
+
+static int kgdboc_use_kms; /* 1 if we use kernel mode switching */
+static struct tty_driver *kgdb_tty_driver;
+static int kgdb_tty_line;
+
+#ifdef CONFIG_KDB_KEYBOARD
+static int kgdboc_reset_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ input_reset_device(dev);
+
+ /* Retrun an error - we do not want to bind, just to reset */
+ return -ENODEV;
+}
+
+static void kgdboc_reset_disconnect(struct input_handle *handle)
+{
+ /* We do not expect anyone to actually bind to us */
+ BUG();
+}
+
+static const struct input_device_id kgdboc_reset_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ },
+ { }
+};
+
+static struct input_handler kgdboc_reset_handler = {
+ .connect = kgdboc_reset_connect,
+ .disconnect = kgdboc_reset_disconnect,
+ .name = "kgdboc_reset",
+ .id_table = kgdboc_reset_ids,
+};
+
+static DEFINE_MUTEX(kgdboc_reset_mutex);
+
+static void kgdboc_restore_input_helper(struct work_struct *dummy)
+{
+ /*
+ * We need to take a mutex to prevent several instances of
+ * this work running on different CPUs so they don't try
+ * to register again already registered handler.
+ */
+ mutex_lock(&kgdboc_reset_mutex);
+
+ if (input_register_handler(&kgdboc_reset_handler) == 0)
+ input_unregister_handler(&kgdboc_reset_handler);
+
+ mutex_unlock(&kgdboc_reset_mutex);
+}
+
+static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper);
+
+static void kgdboc_restore_input(void)
+{
+ if (likely(system_state == SYSTEM_RUNNING))
+ schedule_work(&kgdboc_restore_input_work);
+}
+
+static int kgdboc_register_kbd(char **cptr)
+{
+ if (strncmp(*cptr, "kbd", 3) == 0) {
+ if (kdb_poll_idx < KDB_POLL_FUNC_MAX) {
+ kdb_poll_funcs[kdb_poll_idx] = kdb_get_kbd_char;
+ kdb_poll_idx++;
+ if (cptr[0][3] == ',')
+ *cptr += 4;
+ else
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void kgdboc_unregister_kbd(void)
+{
+ int i;
+
+ for (i = 0; i < kdb_poll_idx; i++) {
+ if (kdb_poll_funcs[i] == kdb_get_kbd_char) {
+ kdb_poll_idx--;
+ kdb_poll_funcs[i] = kdb_poll_funcs[kdb_poll_idx];
+ kdb_poll_funcs[kdb_poll_idx] = NULL;
+ i--;
+ }
+ }
+ flush_work_sync(&kgdboc_restore_input_work);
+}
+#else /* ! CONFIG_KDB_KEYBOARD */
+#define kgdboc_register_kbd(x) 0
+#define kgdboc_unregister_kbd()
+#define kgdboc_restore_input()
+#endif /* ! CONFIG_KDB_KEYBOARD */
+
+static int kgdboc_option_setup(char *opt)
+{
+ if (strlen(opt) >= MAX_CONFIG_LEN) {
+ printk(KERN_ERR "kgdboc: config string too long\n");
+ return -ENOSPC;
+ }
+ strcpy(config, opt);
+
+ return 0;
+}
+
+__setup("kgdboc=", kgdboc_option_setup);
+
+static void cleanup_kgdboc(void)
+{
+ kgdboc_unregister_kbd();
+ if (configured == 1)
+ kgdb_unregister_io_module(&kgdboc_io_ops);
+}
+
+static int configure_kgdboc(void)
+{
+ struct tty_driver *p;
+ int tty_line = 0;
+ int err;
+ char *cptr = config;
+ struct console *cons;
+
+ err = kgdboc_option_setup(config);
+ if (err || !strlen(config) || isspace(config[0]))
+ goto noconfig;
+
+ err = -ENODEV;
+ kgdboc_io_ops.is_console = 0;
+ kgdb_tty_driver = NULL;
+
+ kgdboc_use_kms = 0;
+ if (strncmp(cptr, "kms,", 4) == 0) {
+ cptr += 4;
+ kgdboc_use_kms = 1;
+ }
+
+ if (kgdboc_register_kbd(&cptr))
+ goto do_register;
+
+ p = tty_find_polling_driver(cptr, &tty_line);
+ if (!p)
+ goto noconfig;
+
+ cons = console_drivers;
+ while (cons) {
+ int idx;
+ if (cons->device && cons->device(cons, &idx) == p &&
+ idx == tty_line) {
+ kgdboc_io_ops.is_console = 1;
+ break;
+ }
+ cons = cons->next;
+ }
+
+ kgdb_tty_driver = p;
+ kgdb_tty_line = tty_line;
+
+do_register:
+ err = kgdb_register_io_module(&kgdboc_io_ops);
+ if (err)
+ goto noconfig;
+
+ configured = 1;
+
+ return 0;
+
+noconfig:
+ config[0] = 0;
+ configured = 0;
+ cleanup_kgdboc();
+
+ return err;
+}
+
+static int __init init_kgdboc(void)
+{
+ /* Already configured? */
+ if (configured == 1)
+ return 0;
+
+ return configure_kgdboc();
+}
+
+static int kgdboc_get_char(void)
+{
+ if (!kgdb_tty_driver)
+ return -1;
+ return kgdb_tty_driver->ops->poll_get_char(kgdb_tty_driver,
+ kgdb_tty_line);
+}
+
+static void kgdboc_put_char(u8 chr)
+{
+ if (!kgdb_tty_driver)
+ return;
+ kgdb_tty_driver->ops->poll_put_char(kgdb_tty_driver,
+ kgdb_tty_line, chr);
+}
+
+static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
+{
+ int len = strlen(kmessage);
+
+ if (len >= MAX_CONFIG_LEN) {
+ printk(KERN_ERR "kgdboc: config string too long\n");
+ return -ENOSPC;
+ }
+
+ /* Only copy in the string if the init function has not run yet */
+ if (configured < 0) {
+ strcpy(config, kmessage);
+ return 0;
+ }
+
+ if (kgdb_connected) {
+ printk(KERN_ERR
+ "kgdboc: Cannot reconfigure while KGDB is connected.\n");
+
+ return -EBUSY;
+ }
+
+ strcpy(config, kmessage);
+ /* Chop out \n char as a result of echo */
+ if (config[len - 1] == '\n')
+ config[len - 1] = '\0';
+
+ if (configured == 1)
+ cleanup_kgdboc();
+
+ /* Go and configure with the new params. */
+ return configure_kgdboc();
+}
+
+static int dbg_restore_graphics;
+
+static void kgdboc_pre_exp_handler(void)
+{
+ if (!dbg_restore_graphics && kgdboc_use_kms) {
+ dbg_restore_graphics = 1;
+ con_debug_enter(vc_cons[fg_console].d);
+ }
+ /* Increment the module count when the debugger is active */
+ if (!kgdb_connected)
+ try_module_get(THIS_MODULE);
+}
+
+static void kgdboc_post_exp_handler(void)
+{
+ /* decrement the module count when the debugger detaches */
+ if (!kgdb_connected)
+ module_put(THIS_MODULE);
+ if (kgdboc_use_kms && dbg_restore_graphics) {
+ dbg_restore_graphics = 0;
+ con_debug_leave();
+ }
+ kgdboc_restore_input();
+}
+
+static struct kgdb_io kgdboc_io_ops = {
+ .name = "kgdboc",
+ .read_char = kgdboc_get_char,
+ .write_char = kgdboc_put_char,
+ .pre_exception = kgdboc_pre_exp_handler,
+ .post_exception = kgdboc_post_exp_handler,
+};
+
+#ifdef CONFIG_KGDB_SERIAL_CONSOLE
+/* This is only available if kgdboc is a built in for early debugging */
+static int __init kgdboc_early_init(char *opt)
+{
+ /* save the first character of the config string because the
+ * init routine can destroy it.
+ */
+ char save_ch;
+
+ kgdboc_option_setup(opt);
+ save_ch = config[0];
+ init_kgdboc();
+ config[0] = save_ch;
+ return 0;
+}
+
+early_param("ekgdboc", kgdboc_early_init);
+#endif /* CONFIG_KGDB_SERIAL_CONSOLE */
+
+module_init(init_kgdboc);
+module_exit(cleanup_kgdboc);
+module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644);
+MODULE_PARM_DESC(kgdboc, "<serial_device>[,baud]");
+MODULE_DESCRIPTION("KGDB Console TTY Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
new file mode 100644
index 00000000..58cf279e
--- /dev/null
+++ b/drivers/tty/serial/lantiq.c
@@ -0,0 +1,756 @@
+/*
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) 2004 Infineon IFAP DC COM CPE
+ * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 Thomas Langer, <thomas.langer@lantiq.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#include <lantiq_soc.h>
+
+#define PORT_LTQ_ASC 111
+#define MAXPORTS 2
+#define UART_DUMMY_UER_RX 1
+#define DRVNAME "ltq_asc"
+#ifdef __BIG_ENDIAN
+#define LTQ_ASC_TBUF (0x0020 + 3)
+#define LTQ_ASC_RBUF (0x0024 + 3)
+#else
+#define LTQ_ASC_TBUF 0x0020
+#define LTQ_ASC_RBUF 0x0024
+#endif
+#define LTQ_ASC_FSTAT 0x0048
+#define LTQ_ASC_WHBSTATE 0x0018
+#define LTQ_ASC_STATE 0x0014
+#define LTQ_ASC_IRNCR 0x00F8
+#define LTQ_ASC_CLC 0x0000
+#define LTQ_ASC_ID 0x0008
+#define LTQ_ASC_PISEL 0x0004
+#define LTQ_ASC_TXFCON 0x0044
+#define LTQ_ASC_RXFCON 0x0040
+#define LTQ_ASC_CON 0x0010
+#define LTQ_ASC_BG 0x0050
+#define LTQ_ASC_IRNREN 0x00F4
+
+#define ASC_IRNREN_TX 0x1
+#define ASC_IRNREN_RX 0x2
+#define ASC_IRNREN_ERR 0x4
+#define ASC_IRNREN_TX_BUF 0x8
+#define ASC_IRNCR_TIR 0x1
+#define ASC_IRNCR_RIR 0x2
+#define ASC_IRNCR_EIR 0x4
+
+#define ASCOPT_CSIZE 0x3
+#define TXFIFO_FL 1
+#define RXFIFO_FL 1
+#define ASCCLC_DISS 0x2
+#define ASCCLC_RMCMASK 0x0000FF00
+#define ASCCLC_RMCOFFSET 8
+#define ASCCON_M_8ASYNC 0x0
+#define ASCCON_M_7ASYNC 0x2
+#define ASCCON_ODD 0x00000020
+#define ASCCON_STP 0x00000080
+#define ASCCON_BRS 0x00000100
+#define ASCCON_FDE 0x00000200
+#define ASCCON_R 0x00008000
+#define ASCCON_FEN 0x00020000
+#define ASCCON_ROEN 0x00080000
+#define ASCCON_TOEN 0x00100000
+#define ASCSTATE_PE 0x00010000
+#define ASCSTATE_FE 0x00020000
+#define ASCSTATE_ROE 0x00080000
+#define ASCSTATE_ANY (ASCSTATE_ROE|ASCSTATE_PE|ASCSTATE_FE)
+#define ASCWHBSTATE_CLRREN 0x00000001
+#define ASCWHBSTATE_SETREN 0x00000002
+#define ASCWHBSTATE_CLRPE 0x00000004
+#define ASCWHBSTATE_CLRFE 0x00000008
+#define ASCWHBSTATE_CLRROE 0x00000020
+#define ASCTXFCON_TXFEN 0x0001
+#define ASCTXFCON_TXFFLU 0x0002
+#define ASCTXFCON_TXFITLMASK 0x3F00
+#define ASCTXFCON_TXFITLOFF 8
+#define ASCRXFCON_RXFEN 0x0001
+#define ASCRXFCON_RXFFLU 0x0002
+#define ASCRXFCON_RXFITLMASK 0x3F00
+#define ASCRXFCON_RXFITLOFF 8
+#define ASCFSTAT_RXFFLMASK 0x003F
+#define ASCFSTAT_TXFFLMASK 0x3F00
+#define ASCFSTAT_TXFREEMASK 0x3F000000
+#define ASCFSTAT_TXFREEOFF 24
+
+static void lqasc_tx_chars(struct uart_port *port);
+static struct ltq_uart_port *lqasc_port[MAXPORTS];
+static struct uart_driver lqasc_reg;
+static DEFINE_SPINLOCK(ltq_asc_lock);
+
+struct ltq_uart_port {
+ struct uart_port port;
+ struct clk *clk;
+ unsigned int tx_irq;
+ unsigned int rx_irq;
+ unsigned int err_irq;
+};
+
+static inline struct
+ltq_uart_port *to_ltq_uart_port(struct uart_port *port)
+{
+ return container_of(port, struct ltq_uart_port, port);
+}
+
+static void
+lqasc_stop_tx(struct uart_port *port)
+{
+ return;
+}
+
+static void
+lqasc_start_tx(struct uart_port *port)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ltq_asc_lock, flags);
+ lqasc_tx_chars(port);
+ spin_unlock_irqrestore(&ltq_asc_lock, flags);
+ return;
+}
+
+static void
+lqasc_stop_rx(struct uart_port *port)
+{
+ ltq_w32(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE);
+}
+
+static void
+lqasc_enable_ms(struct uart_port *port)
+{
+}
+
+static int
+lqasc_rx_chars(struct uart_port *port)
+{
+ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
+ unsigned int ch = 0, rsr = 0, fifocnt;
+
+ if (!tty) {
+ dev_dbg(port->dev, "%s:tty is busy now", __func__);
+ return -EBUSY;
+ }
+ fifocnt =
+ ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK;
+ while (fifocnt--) {
+ u8 flag = TTY_NORMAL;
+ ch = ltq_r8(port->membase + LTQ_ASC_RBUF);
+ rsr = (ltq_r32(port->membase + LTQ_ASC_STATE)
+ & ASCSTATE_ANY) | UART_DUMMY_UER_RX;
+ tty_flip_buffer_push(tty);
+ port->icount.rx++;
+
+ /*
+ * Note that the error handling code is
+ * out of the main execution path
+ */
+ if (rsr & ASCSTATE_ANY) {
+ if (rsr & ASCSTATE_PE) {
+ port->icount.parity++;
+ ltq_w32_mask(0, ASCWHBSTATE_CLRPE,
+ port->membase + LTQ_ASC_WHBSTATE);
+ } else if (rsr & ASCSTATE_FE) {
+ port->icount.frame++;
+ ltq_w32_mask(0, ASCWHBSTATE_CLRFE,
+ port->membase + LTQ_ASC_WHBSTATE);
+ }
+ if (rsr & ASCSTATE_ROE) {
+ port->icount.overrun++;
+ ltq_w32_mask(0, ASCWHBSTATE_CLRROE,
+ port->membase + LTQ_ASC_WHBSTATE);
+ }
+
+ rsr &= port->read_status_mask;
+
+ if (rsr & ASCSTATE_PE)
+ flag = TTY_PARITY;
+ else if (rsr & ASCSTATE_FE)
+ flag = TTY_FRAME;
+ }
+
+ if ((rsr & port->ignore_status_mask) == 0)
+ tty_insert_flip_char(tty, ch, flag);
+
+ if (rsr & ASCSTATE_ROE)
+ /*
+ * Overrun is special, since it's reported
+ * immediately, and doesn't affect the current
+ * character
+ */
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+ if (ch != 0)
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ return 0;
+}
+
+static void
+lqasc_tx_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ if (uart_tx_stopped(port)) {
+ lqasc_stop_tx(port);
+ return;
+ }
+
+ while (((ltq_r32(port->membase + LTQ_ASC_FSTAT) &
+ ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) {
+ if (port->x_char) {
+ ltq_w8(port->x_char, port->membase + LTQ_ASC_TBUF);
+ port->icount.tx++;
+ port->x_char = 0;
+ continue;
+ }
+
+ if (uart_circ_empty(xmit))
+ break;
+
+ ltq_w8(port->state->xmit.buf[port->state->xmit.tail],
+ port->membase + LTQ_ASC_TBUF);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static irqreturn_t
+lqasc_tx_int(int irq, void *_port)
+{
+ unsigned long flags;
+ struct uart_port *port = (struct uart_port *)_port;
+ spin_lock_irqsave(&ltq_asc_lock, flags);
+ ltq_w32(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR);
+ spin_unlock_irqrestore(&ltq_asc_lock, flags);
+ lqasc_start_tx(port);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+lqasc_err_int(int irq, void *_port)
+{
+ unsigned long flags;
+ struct uart_port *port = (struct uart_port *)_port;
+ spin_lock_irqsave(&ltq_asc_lock, flags);
+ /* clear any pending interrupts */
+ ltq_w32_mask(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE |
+ ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE);
+ spin_unlock_irqrestore(&ltq_asc_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+lqasc_rx_int(int irq, void *_port)
+{
+ unsigned long flags;
+ struct uart_port *port = (struct uart_port *)_port;
+ spin_lock_irqsave(&ltq_asc_lock, flags);
+ ltq_w32(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR);
+ lqasc_rx_chars(port);
+ spin_unlock_irqrestore(&ltq_asc_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static unsigned int
+lqasc_tx_empty(struct uart_port *port)
+{
+ int status;
+ status = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK;
+ return status ? 0 : TIOCSER_TEMT;
+}
+
+static unsigned int
+lqasc_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CTS | TIOCM_CAR | TIOCM_DSR;
+}
+
+static void
+lqasc_set_mctrl(struct uart_port *port, u_int mctrl)
+{
+}
+
+static void
+lqasc_break_ctl(struct uart_port *port, int break_state)
+{
+}
+
+static int
+lqasc_startup(struct uart_port *port)
+{
+ struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
+ int retval;
+
+ port->uartclk = clk_get_rate(ltq_port->clk);
+
+ ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
+ port->membase + LTQ_ASC_CLC);
+
+ ltq_w32(0, port->membase + LTQ_ASC_PISEL);
+ ltq_w32(
+ ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) |
+ ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU,
+ port->membase + LTQ_ASC_TXFCON);
+ ltq_w32(
+ ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK)
+ | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU,
+ port->membase + LTQ_ASC_RXFCON);
+ /* make sure other settings are written to hardware before
+ * setting enable bits
+ */
+ wmb();
+ ltq_w32_mask(0, ASCCON_M_8ASYNC | ASCCON_FEN | ASCCON_TOEN |
+ ASCCON_ROEN, port->membase + LTQ_ASC_CON);
+
+ retval = request_irq(ltq_port->tx_irq, lqasc_tx_int,
+ IRQF_DISABLED, "asc_tx", port);
+ if (retval) {
+ pr_err("failed to request lqasc_tx_int\n");
+ return retval;
+ }
+
+ retval = request_irq(ltq_port->rx_irq, lqasc_rx_int,
+ IRQF_DISABLED, "asc_rx", port);
+ if (retval) {
+ pr_err("failed to request lqasc_rx_int\n");
+ goto err1;
+ }
+
+ retval = request_irq(ltq_port->err_irq, lqasc_err_int,
+ IRQF_DISABLED, "asc_err", port);
+ if (retval) {
+ pr_err("failed to request lqasc_err_int\n");
+ goto err2;
+ }
+
+ ltq_w32(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX,
+ port->membase + LTQ_ASC_IRNREN);
+ return 0;
+
+err2:
+ free_irq(ltq_port->rx_irq, port);
+err1:
+ free_irq(ltq_port->tx_irq, port);
+ return retval;
+}
+
+static void
+lqasc_shutdown(struct uart_port *port)
+{
+ struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
+ free_irq(ltq_port->tx_irq, port);
+ free_irq(ltq_port->rx_irq, port);
+ free_irq(ltq_port->err_irq, port);
+
+ ltq_w32(0, port->membase + LTQ_ASC_CON);
+ ltq_w32_mask(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU,
+ port->membase + LTQ_ASC_RXFCON);
+ ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
+ port->membase + LTQ_ASC_TXFCON);
+}
+
+static void
+lqasc_set_termios(struct uart_port *port,
+ struct ktermios *new, struct ktermios *old)
+{
+ unsigned int cflag;
+ unsigned int iflag;
+ unsigned int divisor;
+ unsigned int baud;
+ unsigned int con = 0;
+ unsigned long flags;
+
+ cflag = new->c_cflag;
+ iflag = new->c_iflag;
+
+ switch (cflag & CSIZE) {
+ case CS7:
+ con = ASCCON_M_7ASYNC;
+ break;
+
+ case CS5:
+ case CS6:
+ default:
+ new->c_cflag &= ~ CSIZE;
+ new->c_cflag |= CS8;
+ con = ASCCON_M_8ASYNC;
+ break;
+ }
+
+ cflag &= ~CMSPAR; /* Mark/Space parity is not supported */
+
+ if (cflag & CSTOPB)
+ con |= ASCCON_STP;
+
+ if (cflag & PARENB) {
+ if (!(cflag & PARODD))
+ con &= ~ASCCON_ODD;
+ else
+ con |= ASCCON_ODD;
+ }
+
+ port->read_status_mask = ASCSTATE_ROE;
+ if (iflag & INPCK)
+ port->read_status_mask |= ASCSTATE_FE | ASCSTATE_PE;
+
+ port->ignore_status_mask = 0;
+ if (iflag & IGNPAR)
+ port->ignore_status_mask |= ASCSTATE_FE | ASCSTATE_PE;
+
+ if (iflag & IGNBRK) {
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (iflag & IGNPAR)
+ port->ignore_status_mask |= ASCSTATE_ROE;
+ }
+
+ if ((cflag & CREAD) == 0)
+ port->ignore_status_mask |= UART_DUMMY_UER_RX;
+
+ /* set error signals - framing, parity and overrun, enable receiver */
+ con |= ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN;
+
+ spin_lock_irqsave(&ltq_asc_lock, flags);
+
+ /* set up CON */
+ ltq_w32_mask(0, con, port->membase + LTQ_ASC_CON);
+
+ /* Set baud rate - take a divider of 2 into account */
+ baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
+ divisor = uart_get_divisor(port, baud);
+ divisor = divisor / 2 - 1;
+
+ /* disable the baudrate generator */
+ ltq_w32_mask(ASCCON_R, 0, port->membase + LTQ_ASC_CON);
+
+ /* make sure the fractional divider is off */
+ ltq_w32_mask(ASCCON_FDE, 0, port->membase + LTQ_ASC_CON);
+
+ /* set up to use divisor of 2 */
+ ltq_w32_mask(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON);
+
+ /* now we can write the new baudrate into the register */
+ ltq_w32(divisor, port->membase + LTQ_ASC_BG);
+
+ /* turn the baudrate generator back on */
+ ltq_w32_mask(0, ASCCON_R, port->membase + LTQ_ASC_CON);
+
+ /* enable rx */
+ ltq_w32(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE);
+
+ spin_unlock_irqrestore(&ltq_asc_lock, flags);
+
+ /* Don't rewrite B0 */
+ if (tty_termios_baud_rate(new))
+ tty_termios_encode_baud_rate(new, baud, baud);
+}
+
+static const char*
+lqasc_type(struct uart_port *port)
+{
+ if (port->type == PORT_LTQ_ASC)
+ return DRVNAME;
+ else
+ return NULL;
+}
+
+static void
+lqasc_release_port(struct uart_port *port)
+{
+ if (port->flags & UPF_IOREMAP) {
+ iounmap(port->membase);
+ port->membase = NULL;
+ }
+}
+
+static int
+lqasc_request_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *res;
+ int size;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot obtain I/O memory region");
+ return -ENODEV;
+ }
+ size = resource_size(res);
+
+ res = devm_request_mem_region(&pdev->dev, res->start,
+ size, dev_name(&pdev->dev));
+ if (!res) {
+ dev_err(&pdev->dev, "cannot request I/O memory region");
+ return -EBUSY;
+ }
+
+ if (port->flags & UPF_IOREMAP) {
+ port->membase = devm_ioremap_nocache(&pdev->dev,
+ port->mapbase, size);
+ if (port->membase == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void
+lqasc_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_LTQ_ASC;
+ lqasc_request_port(port);
+ }
+}
+
+static int
+lqasc_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ int ret = 0;
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_LTQ_ASC)
+ ret = -EINVAL;
+ if (ser->irq < 0 || ser->irq >= NR_IRQS)
+ ret = -EINVAL;
+ if (ser->baud_base < 9600)
+ ret = -EINVAL;
+ return ret;
+}
+
+static struct uart_ops lqasc_pops = {
+ .tx_empty = lqasc_tx_empty,
+ .set_mctrl = lqasc_set_mctrl,
+ .get_mctrl = lqasc_get_mctrl,
+ .stop_tx = lqasc_stop_tx,
+ .start_tx = lqasc_start_tx,
+ .stop_rx = lqasc_stop_rx,
+ .enable_ms = lqasc_enable_ms,
+ .break_ctl = lqasc_break_ctl,
+ .startup = lqasc_startup,
+ .shutdown = lqasc_shutdown,
+ .set_termios = lqasc_set_termios,
+ .type = lqasc_type,
+ .release_port = lqasc_release_port,
+ .request_port = lqasc_request_port,
+ .config_port = lqasc_config_port,
+ .verify_port = lqasc_verify_port,
+};
+
+static void
+lqasc_console_putchar(struct uart_port *port, int ch)
+{
+ int fifofree;
+
+ if (!port->membase)
+ return;
+
+ do {
+ fifofree = (ltq_r32(port->membase + LTQ_ASC_FSTAT)
+ & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF;
+ } while (fifofree == 0);
+ ltq_w8(ch, port->membase + LTQ_ASC_TBUF);
+}
+
+
+static void
+lqasc_console_write(struct console *co, const char *s, u_int count)
+{
+ struct ltq_uart_port *ltq_port;
+ struct uart_port *port;
+ unsigned long flags;
+
+ if (co->index >= MAXPORTS)
+ return;
+
+ ltq_port = lqasc_port[co->index];
+ if (!ltq_port)
+ return;
+
+ port = &ltq_port->port;
+
+ spin_lock_irqsave(&ltq_asc_lock, flags);
+ uart_console_write(port, s, count, lqasc_console_putchar);
+ spin_unlock_irqrestore(&ltq_asc_lock, flags);
+}
+
+static int __init
+lqasc_console_setup(struct console *co, char *options)
+{
+ struct ltq_uart_port *ltq_port;
+ struct uart_port *port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index >= MAXPORTS)
+ return -ENODEV;
+
+ ltq_port = lqasc_port[co->index];
+ if (!ltq_port)
+ return -ENODEV;
+
+ port = &ltq_port->port;
+
+ port->uartclk = clk_get_rate(ltq_port->clk);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct console lqasc_console = {
+ .name = "ttyLTQ",
+ .write = lqasc_console_write,
+ .device = uart_console_device,
+ .setup = lqasc_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &lqasc_reg,
+};
+
+static int __init
+lqasc_console_init(void)
+{
+ register_console(&lqasc_console);
+ return 0;
+}
+console_initcall(lqasc_console_init);
+
+static struct uart_driver lqasc_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = DRVNAME,
+ .dev_name = "ttyLTQ",
+ .major = 0,
+ .minor = 0,
+ .nr = MAXPORTS,
+ .cons = &lqasc_console,
+};
+
+static int __init
+lqasc_probe(struct platform_device *pdev)
+{
+ struct ltq_uart_port *ltq_port;
+ struct uart_port *port;
+ struct resource *mmres, *irqres;
+ int tx_irq, rx_irq, err_irq;
+ struct clk *clk;
+ int ret;
+
+ mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!mmres || !irqres)
+ return -ENODEV;
+
+ if (pdev->id >= MAXPORTS)
+ return -EBUSY;
+
+ if (lqasc_port[pdev->id] != NULL)
+ return -EBUSY;
+
+ clk = clk_get(&pdev->dev, "fpi");
+ if (IS_ERR(clk)) {
+ pr_err("failed to get fpi clk\n");
+ return -ENOENT;
+ }
+
+ tx_irq = platform_get_irq_byname(pdev, "tx");
+ rx_irq = platform_get_irq_byname(pdev, "rx");
+ err_irq = platform_get_irq_byname(pdev, "err");
+ if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0))
+ return -ENODEV;
+
+ ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL);
+ if (!ltq_port)
+ return -ENOMEM;
+
+ port = &ltq_port->port;
+
+ port->iotype = SERIAL_IO_MEM;
+ port->flags = ASYNC_BOOT_AUTOCONF | UPF_IOREMAP;
+ port->ops = &lqasc_pops;
+ port->fifosize = 16;
+ port->type = PORT_LTQ_ASC,
+ port->line = pdev->id;
+ port->dev = &pdev->dev;
+
+ port->irq = tx_irq; /* unused, just to be backward-compatibe */
+ port->mapbase = mmres->start;
+
+ ltq_port->clk = clk;
+
+ ltq_port->tx_irq = tx_irq;
+ ltq_port->rx_irq = rx_irq;
+ ltq_port->err_irq = err_irq;
+
+ lqasc_port[pdev->id] = ltq_port;
+ platform_set_drvdata(pdev, ltq_port);
+
+ ret = uart_add_one_port(&lqasc_reg, port);
+
+ return ret;
+}
+
+static struct platform_driver lqasc_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init
+init_lqasc(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&lqasc_reg);
+ if (ret != 0)
+ return ret;
+
+ ret = platform_driver_probe(&lqasc_driver, lqasc_probe);
+ if (ret != 0)
+ uart_unregister_driver(&lqasc_reg);
+
+ return ret;
+}
+
+module_init(init_lqasc);
+
+MODULE_DESCRIPTION("Lantiq serial port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
new file mode 100644
index 00000000..84db7321
--- /dev/null
+++ b/drivers/tty/serial/m32r_sio.c
@@ -0,0 +1,1193 @@
+/*
+ * m32r_sio.c
+ *
+ * Driver for M32R serial ports
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ * Based on drivers/serial/8250.c.
+ *
+ * Copyright (C) 2001 Russell King.
+ * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/*
+ * A note about mapbase / membase
+ *
+ * mapbase is the physical address of the IO port. Currently, we don't
+ * support this very well, and it may well be dropped from this driver
+ * in future. As such, mapbase should be NULL.
+ *
+ * membase is an 'ioremapped' cookie. This is compatible with the old
+ * serial.c driver, and is currently the preferred form.
+ */
+
+#if defined(CONFIG_SERIAL_M32R_SIO_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/serial.h>
+#include <linux/serialP.h>
+#include <linux/delay.h>
+
+#include <asm/m32r.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#define PORT_M32R_BASE PORT_M32R_SIO
+#define PORT_INDEX(x) (x - PORT_M32R_BASE + 1)
+#define BAUD_RATE 115200
+
+#include <linux/serial_core.h>
+#include "m32r_sio.h"
+#include "m32r_sio_reg.h"
+
+/*
+ * Debugging.
+ */
+#if 0
+#define DEBUG_AUTOCONF(fmt...) printk(fmt)
+#else
+#define DEBUG_AUTOCONF(fmt...) do { } while (0)
+#endif
+
+#if 0
+#define DEBUG_INTR(fmt...) printk(fmt)
+#else
+#define DEBUG_INTR(fmt...) do { } while (0)
+#endif
+
+#define PASS_LIMIT 256
+
+/*
+ * We default to IRQ0 for the "no irq" hack. Some
+ * machine types want others as well - they're free
+ * to redefine this in their header file.
+ */
+#define is_real_interrupt(irq) ((irq) != 0)
+
+#define BASE_BAUD 115200
+
+/* Standard COM flags */
+#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST)
+
+/*
+ * SERIAL_PORT_DFNS tells us about built-in ports that have no
+ * standard enumeration mechanism. Platforms that can find all
+ * serial ports via mechanisms like ACPI or PCI need not supply it.
+ */
+#if defined(CONFIG_PLAT_USRV)
+
+#define SERIAL_PORT_DFNS \
+ /* UART CLK PORT IRQ FLAGS */ \
+ { 0, BASE_BAUD, 0x3F8, PLD_IRQ_UART0, STD_COM_FLAGS }, /* ttyS0 */ \
+ { 0, BASE_BAUD, 0x2F8, PLD_IRQ_UART1, STD_COM_FLAGS }, /* ttyS1 */
+
+#else /* !CONFIG_PLAT_USRV */
+
+#if defined(CONFIG_SERIAL_M32R_PLDSIO)
+#define SERIAL_PORT_DFNS \
+ { 0, BASE_BAUD, ((unsigned long)PLD_ESIO0CR), PLD_IRQ_SIO0_RCV, \
+ STD_COM_FLAGS }, /* ttyS0 */
+#else
+#define SERIAL_PORT_DFNS \
+ { 0, BASE_BAUD, M32R_SIO_OFFSET, M32R_IRQ_SIO0_R, \
+ STD_COM_FLAGS }, /* ttyS0 */
+#endif
+
+#endif /* !CONFIG_PLAT_USRV */
+
+static struct old_serial_port old_serial_port[] = {
+ SERIAL_PORT_DFNS
+};
+
+#define UART_NR ARRAY_SIZE(old_serial_port)
+
+struct uart_sio_port {
+ struct uart_port port;
+ struct timer_list timer; /* "no irq" timer */
+ struct list_head list; /* ports on this IRQ */
+ unsigned short rev;
+ unsigned char acr;
+ unsigned char ier;
+ unsigned char lcr;
+ unsigned char mcr_mask; /* mask of user bits */
+ unsigned char mcr_force; /* mask of forced bits */
+ unsigned char lsr_break_flag;
+
+ /*
+ * We provide a per-port pm hook.
+ */
+ void (*pm)(struct uart_port *port,
+ unsigned int state, unsigned int old);
+};
+
+struct irq_info {
+ spinlock_t lock;
+ struct list_head *head;
+};
+
+static struct irq_info irq_lists[NR_IRQS];
+
+/*
+ * Here we define the default xmit fifo size used for each type of UART.
+ */
+static const struct serial_uart_config uart_config[] = {
+ [PORT_UNKNOWN] = {
+ .name = "unknown",
+ .dfl_xmit_fifo_size = 1,
+ .flags = 0,
+ },
+ [PORT_INDEX(PORT_M32R_SIO)] = {
+ .name = "M32RSIO",
+ .dfl_xmit_fifo_size = 1,
+ .flags = 0,
+ },
+};
+
+#ifdef CONFIG_SERIAL_M32R_PLDSIO
+
+#define __sio_in(x) inw((unsigned long)(x))
+#define __sio_out(v,x) outw((v),(unsigned long)(x))
+
+static inline void sio_set_baud_rate(unsigned long baud)
+{
+ unsigned short sbaud;
+ sbaud = (boot_cpu_data.bus_clock / (baud * 4))-1;
+ __sio_out(sbaud, PLD_ESIO0BAUR);
+}
+
+static void sio_reset(void)
+{
+ unsigned short tmp;
+
+ tmp = __sio_in(PLD_ESIO0RXB);
+ tmp = __sio_in(PLD_ESIO0RXB);
+ tmp = __sio_in(PLD_ESIO0CR);
+ sio_set_baud_rate(BAUD_RATE);
+ __sio_out(0x0300, PLD_ESIO0CR);
+ __sio_out(0x0003, PLD_ESIO0CR);
+}
+
+static void sio_init(void)
+{
+ unsigned short tmp;
+
+ tmp = __sio_in(PLD_ESIO0RXB);
+ tmp = __sio_in(PLD_ESIO0RXB);
+ tmp = __sio_in(PLD_ESIO0CR);
+ __sio_out(0x0300, PLD_ESIO0CR);
+ __sio_out(0x0003, PLD_ESIO0CR);
+}
+
+static void sio_error(int *status)
+{
+ printk("SIO0 error[%04x]\n", *status);
+ do {
+ sio_init();
+ } while ((*status = __sio_in(PLD_ESIO0CR)) != 3);
+}
+
+#else /* not CONFIG_SERIAL_M32R_PLDSIO */
+
+#define __sio_in(x) inl(x)
+#define __sio_out(v,x) outl((v),(x))
+
+static inline void sio_set_baud_rate(unsigned long baud)
+{
+ unsigned long i, j;
+
+ i = boot_cpu_data.bus_clock / (baud * 16);
+ j = (boot_cpu_data.bus_clock - (i * baud * 16)) / baud;
+ i -= 1;
+ j = (j + 1) >> 1;
+
+ __sio_out(i, M32R_SIO0_BAUR_PORTL);
+ __sio_out(j, M32R_SIO0_RBAUR_PORTL);
+}
+
+static void sio_reset(void)
+{
+ __sio_out(0x00000300, M32R_SIO0_CR_PORTL); /* init status */
+ __sio_out(0x00000800, M32R_SIO0_MOD1_PORTL); /* 8bit */
+ __sio_out(0x00000080, M32R_SIO0_MOD0_PORTL); /* 1stop non */
+ sio_set_baud_rate(BAUD_RATE);
+ __sio_out(0x00000000, M32R_SIO0_TRCR_PORTL);
+ __sio_out(0x00000003, M32R_SIO0_CR_PORTL); /* RXCEN */
+}
+
+static void sio_init(void)
+{
+ unsigned int tmp;
+
+ tmp = __sio_in(M32R_SIO0_RXB_PORTL);
+ tmp = __sio_in(M32R_SIO0_RXB_PORTL);
+ tmp = __sio_in(M32R_SIO0_STS_PORTL);
+ __sio_out(0x00000003, M32R_SIO0_CR_PORTL);
+}
+
+static void sio_error(int *status)
+{
+ printk("SIO0 error[%04x]\n", *status);
+ do {
+ sio_init();
+ } while ((*status = __sio_in(M32R_SIO0_CR_PORTL)) != 3);
+}
+
+#endif /* CONFIG_SERIAL_M32R_PLDSIO */
+
+static unsigned int sio_in(struct uart_sio_port *up, int offset)
+{
+ return __sio_in(up->port.iobase + offset);
+}
+
+static void sio_out(struct uart_sio_port *up, int offset, int value)
+{
+ __sio_out(value, up->port.iobase + offset);
+}
+
+static unsigned int serial_in(struct uart_sio_port *up, int offset)
+{
+ if (!offset)
+ return 0;
+
+ return __sio_in(offset);
+}
+
+static void serial_out(struct uart_sio_port *up, int offset, int value)
+{
+ if (!offset)
+ return;
+
+ __sio_out(value, offset);
+}
+
+static void m32r_sio_stop_tx(struct uart_port *port)
+{
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+
+ if (up->ier & UART_IER_THRI) {
+ up->ier &= ~UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+static void m32r_sio_start_tx(struct uart_port *port)
+{
+#ifdef CONFIG_SERIAL_M32R_PLDSIO
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct circ_buf *xmit = &up->port.state->xmit;
+
+ if (!(up->ier & UART_IER_THRI)) {
+ up->ier |= UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ }
+ while((serial_in(up, UART_LSR) & UART_EMPTY) != UART_EMPTY);
+#else
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+
+ if (!(up->ier & UART_IER_THRI)) {
+ up->ier |= UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+#endif
+}
+
+static void m32r_sio_stop_rx(struct uart_port *port)
+{
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+
+ up->ier &= ~UART_IER_RLSI;
+ up->port.read_status_mask &= ~UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+}
+
+static void m32r_sio_enable_ms(struct uart_port *port)
+{
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+}
+
+static void receive_chars(struct uart_sio_port *up, int *status)
+{
+ struct tty_struct *tty = up->port.state->port.tty;
+ unsigned char ch;
+ unsigned char flag;
+ int max_count = 256;
+
+ do {
+ ch = sio_in(up, SIORXB);
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+ if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
+ UART_LSR_FE | UART_LSR_OE))) {
+ /*
+ * For statistics only
+ */
+ if (*status & UART_LSR_BI) {
+ *status &= ~(UART_LSR_FE | UART_LSR_PE);
+ up->port.icount.brk++;
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(&up->port))
+ goto ignore_char;
+ } else if (*status & UART_LSR_PE)
+ up->port.icount.parity++;
+ else if (*status & UART_LSR_FE)
+ up->port.icount.frame++;
+ if (*status & UART_LSR_OE)
+ up->port.icount.overrun++;
+
+ /*
+ * Mask off conditions which should be ingored.
+ */
+ *status &= up->port.read_status_mask;
+
+ if (up->port.line == up->port.cons->index) {
+ /* Recover the break flag from console xmit */
+ *status |= up->lsr_break_flag;
+ up->lsr_break_flag = 0;
+ }
+
+ if (*status & UART_LSR_BI) {
+ DEBUG_INTR("handling break....");
+ flag = TTY_BREAK;
+ } else if (*status & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (*status & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+ if (uart_handle_sysrq_char(&up->port, ch))
+ goto ignore_char;
+ if ((*status & up->port.ignore_status_mask) == 0)
+ tty_insert_flip_char(tty, ch, flag);
+
+ if (*status & UART_LSR_OE) {
+ /*
+ * Overrun is special, since it's reported
+ * immediately, and doesn't affect the current
+ * character.
+ */
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+ ignore_char:
+ *status = serial_in(up, UART_LSR);
+ } while ((*status & UART_LSR_DR) && (max_count-- > 0));
+ tty_flip_buffer_push(tty);
+}
+
+static void transmit_chars(struct uart_sio_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int count;
+
+ if (up->port.x_char) {
+#ifndef CONFIG_SERIAL_M32R_PLDSIO /* XXX */
+ serial_out(up, UART_TX, up->port.x_char);
+#endif
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+ m32r_sio_stop_tx(&up->port);
+ return;
+ }
+
+ count = up->port.fifosize;
+ do {
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ while (!(serial_in(up, UART_LSR) & UART_LSR_THRE));
+
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ DEBUG_INTR("THRE...");
+
+ if (uart_circ_empty(xmit))
+ m32r_sio_stop_tx(&up->port);
+}
+
+/*
+ * This handles the interrupt from one port.
+ */
+static inline void m32r_sio_handle_port(struct uart_sio_port *up,
+ unsigned int status)
+{
+ DEBUG_INTR("status = %x...", status);
+
+ if (status & 0x04)
+ receive_chars(up, &status);
+ if (status & 0x01)
+ transmit_chars(up);
+}
+
+/*
+ * This is the serial driver's interrupt routine.
+ *
+ * Arjan thinks the old way was overly complex, so it got simplified.
+ * Alan disagrees, saying that need the complexity to handle the weird
+ * nature of ISA shared interrupts. (This is a special exception.)
+ *
+ * In order to handle ISA shared interrupts properly, we need to check
+ * that all ports have been serviced, and therefore the ISA interrupt
+ * line has been de-asserted.
+ *
+ * This means we need to loop through all ports. checking that they
+ * don't have an interrupt pending.
+ */
+static irqreturn_t m32r_sio_interrupt(int irq, void *dev_id)
+{
+ struct irq_info *i = dev_id;
+ struct list_head *l, *end = NULL;
+ int pass_counter = 0;
+
+ DEBUG_INTR("m32r_sio_interrupt(%d)...", irq);
+
+#ifdef CONFIG_SERIAL_M32R_PLDSIO
+// if (irq == PLD_IRQ_SIO0_SND)
+// irq = PLD_IRQ_SIO0_RCV;
+#else
+ if (irq == M32R_IRQ_SIO0_S)
+ irq = M32R_IRQ_SIO0_R;
+#endif
+
+ spin_lock(&i->lock);
+
+ l = i->head;
+ do {
+ struct uart_sio_port *up;
+ unsigned int sts;
+
+ up = list_entry(l, struct uart_sio_port, list);
+
+ sts = sio_in(up, SIOSTS);
+ if (sts & 0x5) {
+ spin_lock(&up->port.lock);
+ m32r_sio_handle_port(up, sts);
+ spin_unlock(&up->port.lock);
+
+ end = NULL;
+ } else if (end == NULL)
+ end = l;
+
+ l = l->next;
+
+ if (l == i->head && pass_counter++ > PASS_LIMIT) {
+ if (sts & 0xe0)
+ sio_error(&sts);
+ break;
+ }
+ } while (l != end);
+
+ spin_unlock(&i->lock);
+
+ DEBUG_INTR("end.\n");
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * To support ISA shared interrupts, we need to have one interrupt
+ * handler that ensures that the IRQ line has been deasserted
+ * before returning. Failing to do this will result in the IRQ
+ * line being stuck active, and, since ISA irqs are edge triggered,
+ * no more IRQs will be seen.
+ */
+static void serial_do_unlink(struct irq_info *i, struct uart_sio_port *up)
+{
+ spin_lock_irq(&i->lock);
+
+ if (!list_empty(i->head)) {
+ if (i->head == &up->list)
+ i->head = i->head->next;
+ list_del(&up->list);
+ } else {
+ BUG_ON(i->head != &up->list);
+ i->head = NULL;
+ }
+
+ spin_unlock_irq(&i->lock);
+}
+
+static int serial_link_irq_chain(struct uart_sio_port *up)
+{
+ struct irq_info *i = irq_lists + up->port.irq;
+ int ret, irq_flags = 0;
+
+ spin_lock_irq(&i->lock);
+
+ if (i->head) {
+ list_add(&up->list, i->head);
+ spin_unlock_irq(&i->lock);
+
+ ret = 0;
+ } else {
+ INIT_LIST_HEAD(&up->list);
+ i->head = &up->list;
+ spin_unlock_irq(&i->lock);
+
+ ret = request_irq(up->port.irq, m32r_sio_interrupt,
+ irq_flags, "SIO0-RX", i);
+ ret |= request_irq(up->port.irq + 1, m32r_sio_interrupt,
+ irq_flags, "SIO0-TX", i);
+ if (ret < 0)
+ serial_do_unlink(i, up);
+ }
+
+ return ret;
+}
+
+static void serial_unlink_irq_chain(struct uart_sio_port *up)
+{
+ struct irq_info *i = irq_lists + up->port.irq;
+
+ BUG_ON(i->head == NULL);
+
+ if (list_empty(i->head)) {
+ free_irq(up->port.irq, i);
+ free_irq(up->port.irq + 1, i);
+ }
+
+ serial_do_unlink(i, up);
+}
+
+/*
+ * This function is used to handle ports that do not have an interrupt.
+ */
+static void m32r_sio_timeout(unsigned long data)
+{
+ struct uart_sio_port *up = (struct uart_sio_port *)data;
+ unsigned int timeout;
+ unsigned int sts;
+
+ sts = sio_in(up, SIOSTS);
+ if (sts & 0x5) {
+ spin_lock(&up->port.lock);
+ m32r_sio_handle_port(up, sts);
+ spin_unlock(&up->port.lock);
+ }
+
+ timeout = up->port.timeout;
+ timeout = timeout > 6 ? (timeout / 2 - 2) : 1;
+ mod_timer(&up->timer, jiffies + timeout);
+}
+
+static unsigned int m32r_sio_tx_empty(struct uart_port *port)
+{
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return ret;
+}
+
+static unsigned int m32r_sio_get_mctrl(struct uart_port *port)
+{
+ return 0;
+}
+
+static void m32r_sio_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+
+}
+
+static void m32r_sio_break_ctl(struct uart_port *port, int break_state)
+{
+
+}
+
+static int m32r_sio_startup(struct uart_port *port)
+{
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+ int retval;
+
+ sio_init();
+
+ /*
+ * If the "interrupt" for this port doesn't correspond with any
+ * hardware interrupt, we use a timer-based system. The original
+ * driver used to do this with IRQ0.
+ */
+ if (!is_real_interrupt(up->port.irq)) {
+ unsigned int timeout = up->port.timeout;
+
+ timeout = timeout > 6 ? (timeout / 2 - 2) : 1;
+
+ up->timer.data = (unsigned long)up;
+ mod_timer(&up->timer, jiffies + timeout);
+ } else {
+ retval = serial_link_irq_chain(up);
+ if (retval)
+ return retval;
+ }
+
+ /*
+ * Finally, enable interrupts. Note: Modem status interrupts
+ * are set via set_termios(), which will be occurring imminently
+ * anyway, so we don't enable them here.
+ * - M32R_SIO: 0x0c
+ * - M32R_PLDSIO: 0x04
+ */
+ up->ier = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
+ sio_out(up, SIOTRCR, up->ier);
+
+ /*
+ * And clear the interrupt registers again for luck.
+ */
+ sio_reset();
+
+ return 0;
+}
+
+static void m32r_sio_shutdown(struct uart_port *port)
+{
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+
+ /*
+ * Disable interrupts from this port
+ */
+ up->ier = 0;
+ sio_out(up, SIOTRCR, 0);
+
+ /*
+ * Disable break condition and FIFOs
+ */
+
+ sio_init();
+
+ if (!is_real_interrupt(up->port.irq))
+ del_timer_sync(&up->timer);
+ else
+ serial_unlink_irq_chain(up);
+}
+
+static unsigned int m32r_sio_get_divisor(struct uart_port *port,
+ unsigned int baud)
+{
+ return uart_get_divisor(port, baud);
+}
+
+static void m32r_sio_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old)
+{
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+ unsigned char cval = 0;
+ unsigned long flags;
+ unsigned int baud, quot;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ cval = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ cval = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ cval |= UART_LCR_STOP;
+ if (termios->c_cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(termios->c_cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+#ifdef CMSPAR
+ if (termios->c_cflag & CMSPAR)
+ cval |= UART_LCR_SPAR;
+#endif
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+#ifdef CONFIG_SERIAL_M32R_PLDSIO
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/4);
+#else
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+#endif
+ quot = m32r_sio_get_divisor(port, baud);
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ sio_set_baud_rate(baud);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characteres to ignore
+ */
+ up->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ up->port.ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * CTS flow control flag and modem status interrupts
+ */
+ up->ier &= ~UART_IER_MSI;
+ if (UART_ENABLE_MS(&up->port, termios->c_cflag))
+ up->ier |= UART_IER_MSI;
+
+ serial_out(up, UART_IER, up->ier);
+
+ up->lcr = cval; /* Save LCR */
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static void m32r_sio_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+
+ if (up->pm)
+ up->pm(port, state, oldstate);
+}
+
+/*
+ * Resource handling. This is complicated by the fact that resources
+ * depend on the port type. Maybe we should be claiming the standard
+ * 8250 ports, and then trying to get other resources as necessary?
+ */
+static int
+m32r_sio_request_std_resource(struct uart_sio_port *up, struct resource **res)
+{
+ unsigned int size = 8 << up->port.regshift;
+#ifndef CONFIG_SERIAL_M32R_PLDSIO
+ unsigned long start;
+#endif
+ int ret = 0;
+
+ switch (up->port.iotype) {
+ case UPIO_MEM:
+ if (up->port.mapbase) {
+#ifdef CONFIG_SERIAL_M32R_PLDSIO
+ *res = request_mem_region(up->port.mapbase, size, "serial");
+#else
+ start = up->port.mapbase;
+ *res = request_mem_region(start, size, "serial");
+#endif
+ if (!*res)
+ ret = -EBUSY;
+ }
+ break;
+
+ case UPIO_PORT:
+ *res = request_region(up->port.iobase, size, "serial");
+ if (!*res)
+ ret = -EBUSY;
+ break;
+ }
+ return ret;
+}
+
+static void m32r_sio_release_port(struct uart_port *port)
+{
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+ unsigned long start, offset = 0, size = 0;
+
+ size <<= up->port.regshift;
+
+ switch (up->port.iotype) {
+ case UPIO_MEM:
+ if (up->port.mapbase) {
+ /*
+ * Unmap the area.
+ */
+ iounmap(up->port.membase);
+ up->port.membase = NULL;
+
+ start = up->port.mapbase;
+
+ if (size)
+ release_mem_region(start + offset, size);
+ release_mem_region(start, 8 << up->port.regshift);
+ }
+ break;
+
+ case UPIO_PORT:
+ start = up->port.iobase;
+
+ if (size)
+ release_region(start + offset, size);
+ release_region(start + offset, 8 << up->port.regshift);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int m32r_sio_request_port(struct uart_port *port)
+{
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+ struct resource *res = NULL;
+ int ret = 0;
+
+ ret = m32r_sio_request_std_resource(up, &res);
+
+ /*
+ * If we have a mapbase, then request that as well.
+ */
+ if (ret == 0 && up->port.flags & UPF_IOREMAP) {
+ int size = res->end - res->start + 1;
+
+ up->port.membase = ioremap(up->port.mapbase, size);
+ if (!up->port.membase)
+ ret = -ENOMEM;
+ }
+
+ if (ret < 0) {
+ if (res)
+ release_resource(res);
+ }
+
+ return ret;
+}
+
+static void m32r_sio_config_port(struct uart_port *port, int unused)
+{
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ up->port.type = (PORT_M32R_SIO - PORT_M32R_BASE + 1);
+ up->port.fifosize = uart_config[up->port.type].dfl_xmit_fifo_size;
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static int
+m32r_sio_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if (ser->irq >= nr_irqs || ser->irq < 0 ||
+ ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
+ ser->type >= ARRAY_SIZE(uart_config))
+ return -EINVAL;
+ return 0;
+}
+
+static const char *
+m32r_sio_type(struct uart_port *port)
+{
+ int type = port->type;
+
+ if (type >= ARRAY_SIZE(uart_config))
+ type = 0;
+ return uart_config[type].name;
+}
+
+static struct uart_ops m32r_sio_pops = {
+ .tx_empty = m32r_sio_tx_empty,
+ .set_mctrl = m32r_sio_set_mctrl,
+ .get_mctrl = m32r_sio_get_mctrl,
+ .stop_tx = m32r_sio_stop_tx,
+ .start_tx = m32r_sio_start_tx,
+ .stop_rx = m32r_sio_stop_rx,
+ .enable_ms = m32r_sio_enable_ms,
+ .break_ctl = m32r_sio_break_ctl,
+ .startup = m32r_sio_startup,
+ .shutdown = m32r_sio_shutdown,
+ .set_termios = m32r_sio_set_termios,
+ .pm = m32r_sio_pm,
+ .type = m32r_sio_type,
+ .release_port = m32r_sio_release_port,
+ .request_port = m32r_sio_request_port,
+ .config_port = m32r_sio_config_port,
+ .verify_port = m32r_sio_verify_port,
+};
+
+static struct uart_sio_port m32r_sio_ports[UART_NR];
+
+static void __init m32r_sio_init_ports(void)
+{
+ struct uart_sio_port *up;
+ static int first = 1;
+ int i;
+
+ if (!first)
+ return;
+ first = 0;
+
+ for (i = 0, up = m32r_sio_ports; i < ARRAY_SIZE(old_serial_port);
+ i++, up++) {
+ up->port.iobase = old_serial_port[i].port;
+ up->port.irq = irq_canonicalize(old_serial_port[i].irq);
+ up->port.uartclk = old_serial_port[i].baud_base * 16;
+ up->port.flags = old_serial_port[i].flags;
+ up->port.membase = old_serial_port[i].iomem_base;
+ up->port.iotype = old_serial_port[i].io_type;
+ up->port.regshift = old_serial_port[i].iomem_reg_shift;
+ up->port.ops = &m32r_sio_pops;
+ }
+}
+
+static void __init m32r_sio_register_ports(struct uart_driver *drv)
+{
+ int i;
+
+ m32r_sio_init_ports();
+
+ for (i = 0; i < UART_NR; i++) {
+ struct uart_sio_port *up = &m32r_sio_ports[i];
+
+ up->port.line = i;
+ up->port.ops = &m32r_sio_pops;
+ init_timer(&up->timer);
+ up->timer.function = m32r_sio_timeout;
+
+ /*
+ * ALPHA_KLUDGE_MCR needs to be killed.
+ */
+ up->mcr_mask = ~ALPHA_KLUDGE_MCR;
+ up->mcr_force = ALPHA_KLUDGE_MCR;
+
+ uart_add_one_port(drv, &up->port);
+ }
+}
+
+#ifdef CONFIG_SERIAL_M32R_SIO_CONSOLE
+
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static inline void wait_for_xmitr(struct uart_sio_port *up)
+{
+ unsigned int status, tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ do {
+ status = sio_in(up, SIOSTS);
+
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ } while ((status & UART_EMPTY) != UART_EMPTY);
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ tmout = 1000000;
+ while (--tmout)
+ udelay(1);
+ }
+}
+
+static void m32r_sio_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_sio_port *up = (struct uart_sio_port *)port;
+
+ wait_for_xmitr(up);
+ sio_out(up, SIOTXB, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void m32r_sio_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_sio_port *up = &m32r_sio_ports[co->index];
+ unsigned int ier;
+
+ /*
+ * First save the UER then disable the interrupts
+ */
+ ier = sio_in(up, SIOTRCR);
+ sio_out(up, SIOTRCR, 0);
+
+ uart_console_write(&up->port, s, count, m32r_sio_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up);
+ sio_out(up, SIOTRCR, ier);
+}
+
+static int __init m32r_sio_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index >= UART_NR)
+ co->index = 0;
+ port = &m32r_sio_ports[co->index].port;
+
+ /*
+ * Temporary fix.
+ */
+ spin_lock_init(&port->lock);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver m32r_sio_reg;
+static struct console m32r_sio_console = {
+ .name = "ttyS",
+ .write = m32r_sio_console_write,
+ .device = uart_console_device,
+ .setup = m32r_sio_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &m32r_sio_reg,
+};
+
+static int __init m32r_sio_console_init(void)
+{
+ sio_reset();
+ sio_init();
+ m32r_sio_init_ports();
+ register_console(&m32r_sio_console);
+ return 0;
+}
+console_initcall(m32r_sio_console_init);
+
+#define M32R_SIO_CONSOLE &m32r_sio_console
+#else
+#define M32R_SIO_CONSOLE NULL
+#endif
+
+static struct uart_driver m32r_sio_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "sio",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = UART_NR,
+ .cons = M32R_SIO_CONSOLE,
+};
+
+/**
+ * m32r_sio_suspend_port - suspend one serial port
+ * @line: serial line number
+ *
+ * Suspend one serial port.
+ */
+void m32r_sio_suspend_port(int line)
+{
+ uart_suspend_port(&m32r_sio_reg, &m32r_sio_ports[line].port);
+}
+
+/**
+ * m32r_sio_resume_port - resume one serial port
+ * @line: serial line number
+ *
+ * Resume one serial port.
+ */
+void m32r_sio_resume_port(int line)
+{
+ uart_resume_port(&m32r_sio_reg, &m32r_sio_ports[line].port);
+}
+
+static int __init m32r_sio_init(void)
+{
+ int ret, i;
+
+ printk(KERN_INFO "Serial: M32R SIO driver\n");
+
+ for (i = 0; i < nr_irqs; i++)
+ spin_lock_init(&irq_lists[i].lock);
+
+ ret = uart_register_driver(&m32r_sio_reg);
+ if (ret >= 0)
+ m32r_sio_register_ports(&m32r_sio_reg);
+
+ return ret;
+}
+
+static void __exit m32r_sio_exit(void)
+{
+ int i;
+
+ for (i = 0; i < UART_NR; i++)
+ uart_remove_one_port(&m32r_sio_reg, &m32r_sio_ports[i].port);
+
+ uart_unregister_driver(&m32r_sio_reg);
+}
+
+module_init(m32r_sio_init);
+module_exit(m32r_sio_exit);
+
+EXPORT_SYMBOL(m32r_sio_suspend_port);
+EXPORT_SYMBOL(m32r_sio_resume_port);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Generic M32R SIO serial driver");
diff --git a/drivers/tty/serial/m32r_sio.h b/drivers/tty/serial/m32r_sio.h
new file mode 100644
index 00000000..e9b7e117
--- /dev/null
+++ b/drivers/tty/serial/m32r_sio.h
@@ -0,0 +1,48 @@
+/*
+ * m32r_sio.h
+ *
+ * Driver for M32R serial ports
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ * Based on drivers/serial/8250.h.
+ *
+ * Copyright (C) 2001 Russell King.
+ * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+
+struct m32r_sio_probe {
+ struct module *owner;
+ int (*pci_init_one)(struct pci_dev *dev);
+ void (*pci_remove_one)(struct pci_dev *dev);
+ void (*pnp_init)(void);
+};
+
+int m32r_sio_register_probe(struct m32r_sio_probe *probe);
+void m32r_sio_unregister_probe(struct m32r_sio_probe *probe);
+void m32r_sio_get_irq_map(unsigned int *map);
+void m32r_sio_suspend_port(int line);
+void m32r_sio_resume_port(int line);
+
+struct old_serial_port {
+ unsigned int uart;
+ unsigned int baud_base;
+ unsigned int port;
+ unsigned int irq;
+ unsigned int flags;
+ unsigned char io_type;
+ unsigned char __iomem *iomem_base;
+ unsigned short iomem_reg_shift;
+};
+
+#define _INLINE_ inline
+
+#define PROBE_RSA (1 << 0)
+#define PROBE_ANY (~0)
+
+#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
diff --git a/drivers/tty/serial/m32r_sio_reg.h b/drivers/tty/serial/m32r_sio_reg.h
new file mode 100644
index 00000000..46714737
--- /dev/null
+++ b/drivers/tty/serial/m32r_sio_reg.h
@@ -0,0 +1,152 @@
+/*
+ * m32r_sio_reg.h
+ *
+ * Copyright (C) 1992, 1994 by Theodore Ts'o.
+ * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
+ *
+ * Redistribution of this file is permitted under the terms of the GNU
+ * Public License (GPL)
+ *
+ * These are the UART port assignments, expressed as offsets from the base
+ * register. These assignments should hold for any serial port based on
+ * a 8250, 16450, or 16550(A).
+ */
+
+#ifndef _M32R_SIO_REG_H
+#define _M32R_SIO_REG_H
+
+
+#ifdef CONFIG_SERIAL_M32R_PLDSIO
+
+#define SIOCR 0x000
+#define SIOMOD0 0x002
+#define SIOMOD1 0x004
+#define SIOSTS 0x006
+#define SIOTRCR 0x008
+#define SIOBAUR 0x00a
+// #define SIORBAUR 0x018
+#define SIOTXB 0x00c
+#define SIORXB 0x00e
+
+#define UART_RX ((unsigned long) PLD_ESIO0RXB)
+ /* In: Receive buffer (DLAB=0) */
+#define UART_TX ((unsigned long) PLD_ESIO0TXB)
+ /* Out: Transmit buffer (DLAB=0) */
+#define UART_DLL 0 /* Out: Divisor Latch Low (DLAB=1) */
+#define UART_TRG 0 /* (LCR=BF) FCTR bit 7 selects Rx or Tx
+ * In: Fifo count
+ * Out: Fifo custom trigger levels
+ * XR16C85x only */
+
+#define UART_DLM 0 /* Out: Divisor Latch High (DLAB=1) */
+#define UART_IER ((unsigned long) PLD_ESIO0INTCR)
+ /* Out: Interrupt Enable Register */
+#define UART_FCTR 0 /* (LCR=BF) Feature Control Register
+ * XR16C85x only */
+
+#define UART_IIR 0 /* In: Interrupt ID Register */
+#define UART_FCR 0 /* Out: FIFO Control Register */
+#define UART_EFR 0 /* I/O: Extended Features Register */
+ /* (DLAB=1, 16C660 only) */
+
+#define UART_LCR 0 /* Out: Line Control Register */
+#define UART_MCR 0 /* Out: Modem Control Register */
+#define UART_LSR ((unsigned long) PLD_ESIO0STS)
+ /* In: Line Status Register */
+#define UART_MSR 0 /* In: Modem Status Register */
+#define UART_SCR 0 /* I/O: Scratch Register */
+#define UART_EMSR 0 /* (LCR=BF) Extended Mode Select Register
+ * FCTR bit 6 selects SCR or EMSR
+ * XR16c85x only */
+
+#else /* not CONFIG_SERIAL_M32R_PLDSIO */
+
+#define SIOCR 0x000
+#define SIOMOD0 0x004
+#define SIOMOD1 0x008
+#define SIOSTS 0x00c
+#define SIOTRCR 0x010
+#define SIOBAUR 0x014
+#define SIORBAUR 0x018
+#define SIOTXB 0x01c
+#define SIORXB 0x020
+
+#define UART_RX M32R_SIO0_RXB_PORTL /* In: Receive buffer (DLAB=0) */
+#define UART_TX M32R_SIO0_TXB_PORTL /* Out: Transmit buffer (DLAB=0) */
+#define UART_DLL 0 /* Out: Divisor Latch Low (DLAB=1) */
+#define UART_TRG 0 /* (LCR=BF) FCTR bit 7 selects Rx or Tx
+ * In: Fifo count
+ * Out: Fifo custom trigger levels
+ * XR16C85x only */
+
+#define UART_DLM 0 /* Out: Divisor Latch High (DLAB=1) */
+#define UART_IER M32R_SIO0_TRCR_PORTL /* Out: Interrupt Enable Register */
+#define UART_FCTR 0 /* (LCR=BF) Feature Control Register
+ * XR16C85x only */
+
+#define UART_IIR 0 /* In: Interrupt ID Register */
+#define UART_FCR 0 /* Out: FIFO Control Register */
+#define UART_EFR 0 /* I/O: Extended Features Register */
+ /* (DLAB=1, 16C660 only) */
+
+#define UART_LCR 0 /* Out: Line Control Register */
+#define UART_MCR 0 /* Out: Modem Control Register */
+#define UART_LSR M32R_SIO0_STS_PORTL /* In: Line Status Register */
+#define UART_MSR 0 /* In: Modem Status Register */
+#define UART_SCR 0 /* I/O: Scratch Register */
+#define UART_EMSR 0 /* (LCR=BF) Extended Mode Select Register
+ * FCTR bit 6 selects SCR or EMSR
+ * XR16c85x only */
+
+#endif /* CONFIG_SERIAL_M32R_PLDSIO */
+
+#define UART_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+/*
+ * These are the definitions for the Line Control Register
+ *
+ * Note: if the word length is 5 bits (UART_LCR_WLEN5), then setting
+ * UART_LCR_STOP will select 1.5 stop bits, not 2 stop bits.
+ */
+#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
+#define UART_LCR_SBC 0x40 /* Set break control */
+#define UART_LCR_SPAR 0x20 /* Stick parity (?) */
+#define UART_LCR_EPAR 0x10 /* Even parity select */
+#define UART_LCR_PARITY 0x08 /* Parity Enable */
+#define UART_LCR_STOP 0x04 /* Stop bits: 0=1 stop bit, 1= 2 stop bits */
+#define UART_LCR_WLEN5 0x00 /* Wordlength: 5 bits */
+#define UART_LCR_WLEN6 0x01 /* Wordlength: 6 bits */
+#define UART_LCR_WLEN7 0x02 /* Wordlength: 7 bits */
+#define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */
+
+/*
+ * These are the definitions for the Line Status Register
+ */
+#define UART_LSR_TEMT 0x02 /* Transmitter empty */
+#define UART_LSR_THRE 0x01 /* Transmit-hold-register empty */
+#define UART_LSR_BI 0x00 /* Break interrupt indicator */
+#define UART_LSR_FE 0x80 /* Frame error indicator */
+#define UART_LSR_PE 0x40 /* Parity error indicator */
+#define UART_LSR_OE 0x20 /* Overrun error indicator */
+#define UART_LSR_DR 0x04 /* Receiver data ready */
+
+/*
+ * These are the definitions for the Interrupt Identification Register
+ */
+#define UART_IIR_NO_INT 0x01 /* No interrupts pending */
+#define UART_IIR_ID 0x06 /* Mask for the interrupt ID */
+
+#define UART_IIR_MSI 0x00 /* Modem status interrupt */
+#define UART_IIR_THRI 0x02 /* Transmitter holding register empty */
+#define UART_IIR_RDI 0x04 /* Receiver data interrupt */
+#define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */
+
+/*
+ * These are the definitions for the Interrupt Enable Register
+ */
+#define UART_IER_MSI 0x00 /* Enable Modem status interrupt */
+#define UART_IER_RLSI 0x08 /* Enable receiver line status interrupt */
+#define UART_IER_THRI 0x03 /* Enable Transmitter holding register int. */
+#define UART_IER_RDI 0x04 /* Enable receiver data interrupt */
+
+#endif /* _M32R_SIO_REG_H */
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
new file mode 100644
index 00000000..7b951ada
--- /dev/null
+++ b/drivers/tty/serial/max3100.c
@@ -0,0 +1,926 @@
+/*
+ *
+ * Copyright (C) 2008 Christian Pellegrin <chripell@evolware.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ *
+ * Notes: the MAX3100 doesn't provide an interrupt on CTS so we have
+ * to use polling for flow control. TX empty IRQ is unusable, since
+ * writing conf clears FIFO buffer and we cannot have this interrupt
+ * always asking us for attention.
+ *
+ * Example platform data:
+
+ static struct plat_max3100 max3100_plat_data = {
+ .loopback = 0,
+ .crystal = 0,
+ .poll_time = 100,
+ };
+
+ static struct spi_board_info spi_board_info[] = {
+ {
+ .modalias = "max3100",
+ .platform_data = &max3100_plat_data,
+ .irq = IRQ_EINT12,
+ .max_speed_hz = 5*1000*1000,
+ .chip_select = 0,
+ },
+ };
+
+ * The initial minor number is 209 in the low-density serial port:
+ * mknod /dev/ttyMAX0 c 204 209
+ */
+
+#define MAX3100_MAJOR 204
+#define MAX3100_MINOR 209
+/* 4 MAX3100s should be enough for everyone */
+#define MAX_MAX3100 4
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/spi/spi.h>
+#include <linux/freezer.h>
+
+#include <linux/serial_max3100.h>
+
+#define MAX3100_C (1<<14)
+#define MAX3100_D (0<<14)
+#define MAX3100_W (1<<15)
+#define MAX3100_RX (0<<15)
+
+#define MAX3100_WC (MAX3100_W | MAX3100_C)
+#define MAX3100_RC (MAX3100_RX | MAX3100_C)
+#define MAX3100_WD (MAX3100_W | MAX3100_D)
+#define MAX3100_RD (MAX3100_RX | MAX3100_D)
+#define MAX3100_CMD (3 << 14)
+
+#define MAX3100_T (1<<14)
+#define MAX3100_R (1<<15)
+
+#define MAX3100_FEN (1<<13)
+#define MAX3100_SHDN (1<<12)
+#define MAX3100_TM (1<<11)
+#define MAX3100_RM (1<<10)
+#define MAX3100_PM (1<<9)
+#define MAX3100_RAM (1<<8)
+#define MAX3100_IR (1<<7)
+#define MAX3100_ST (1<<6)
+#define MAX3100_PE (1<<5)
+#define MAX3100_L (1<<4)
+#define MAX3100_BAUD (0xf)
+
+#define MAX3100_TE (1<<10)
+#define MAX3100_RAFE (1<<10)
+#define MAX3100_RTS (1<<9)
+#define MAX3100_CTS (1<<9)
+#define MAX3100_PT (1<<8)
+#define MAX3100_DATA (0xff)
+
+#define MAX3100_RT (MAX3100_R | MAX3100_T)
+#define MAX3100_RTC (MAX3100_RT | MAX3100_CTS | MAX3100_RAFE)
+
+/* the following simulate a status reg for ignore_status_mask */
+#define MAX3100_STATUS_PE 1
+#define MAX3100_STATUS_FE 2
+#define MAX3100_STATUS_OE 4
+
+struct max3100_port {
+ struct uart_port port;
+ struct spi_device *spi;
+
+ int cts; /* last CTS received for flow ctrl */
+ int tx_empty; /* last TX empty bit */
+
+ spinlock_t conf_lock; /* shared data */
+ int conf_commit; /* need to make changes */
+ int conf; /* configuration for the MAX31000
+ * (bits 0-7, bits 8-11 are irqs) */
+ int rts_commit; /* need to change rts */
+ int rts; /* rts status */
+ int baud; /* current baud rate */
+
+ int parity; /* keeps track if we should send parity */
+#define MAX3100_PARITY_ON 1
+#define MAX3100_PARITY_ODD 2
+#define MAX3100_7BIT 4
+ int rx_enabled; /* if we should rx chars */
+
+ int irq; /* irq assigned to the max3100 */
+
+ int minor; /* minor number */
+ int crystal; /* 1 if 3.6864Mhz crystal 0 for 1.8432 */
+ int loopback; /* 1 if we are in loopback mode */
+
+ /* for handling irqs: need workqueue since we do spi_sync */
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ /* set to 1 to make the workhandler exit as soon as possible */
+ int force_end_work;
+ /* need to know we are suspending to avoid deadlock on workqueue */
+ int suspending;
+
+ /* hook for suspending MAX3100 via dedicated pin */
+ void (*max3100_hw_suspend) (int suspend);
+
+ /* poll time (in ms) for ctrl lines */
+ int poll_time;
+ /* and its timer */
+ struct timer_list timer;
+};
+
+static struct max3100_port *max3100s[MAX_MAX3100]; /* the chips */
+static DEFINE_MUTEX(max3100s_lock); /* race on probe */
+
+static int max3100_do_parity(struct max3100_port *s, u16 c)
+{
+ int parity;
+
+ if (s->parity & MAX3100_PARITY_ODD)
+ parity = 1;
+ else
+ parity = 0;
+
+ if (s->parity & MAX3100_7BIT)
+ c &= 0x7f;
+ else
+ c &= 0xff;
+
+ parity = parity ^ (hweight8(c) & 1);
+ return parity;
+}
+
+static int max3100_check_parity(struct max3100_port *s, u16 c)
+{
+ return max3100_do_parity(s, c) == ((c >> 8) & 1);
+}
+
+static void max3100_calc_parity(struct max3100_port *s, u16 *c)
+{
+ if (s->parity & MAX3100_7BIT)
+ *c &= 0x7f;
+ else
+ *c &= 0xff;
+
+ if (s->parity & MAX3100_PARITY_ON)
+ *c |= max3100_do_parity(s, *c) << 8;
+}
+
+static void max3100_work(struct work_struct *w);
+
+static void max3100_dowork(struct max3100_port *s)
+{
+ if (!s->force_end_work && !work_pending(&s->work) &&
+ !freezing(current) && !s->suspending)
+ queue_work(s->workqueue, &s->work);
+}
+
+static void max3100_timeout(unsigned long data)
+{
+ struct max3100_port *s = (struct max3100_port *)data;
+
+ if (s->port.state) {
+ max3100_dowork(s);
+ mod_timer(&s->timer, jiffies + s->poll_time);
+ }
+}
+
+static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx)
+{
+ struct spi_message message;
+ u16 etx, erx;
+ int status;
+ struct spi_transfer tran = {
+ .tx_buf = &etx,
+ .rx_buf = &erx,
+ .len = 2,
+ };
+
+ etx = cpu_to_be16(tx);
+ spi_message_init(&message);
+ spi_message_add_tail(&tran, &message);
+ status = spi_sync(s->spi, &message);
+ if (status) {
+ dev_warn(&s->spi->dev, "error while calling spi_sync\n");
+ return -EIO;
+ }
+ *rx = be16_to_cpu(erx);
+ s->tx_empty = (*rx & MAX3100_T) > 0;
+ dev_dbg(&s->spi->dev, "%04x - %04x\n", tx, *rx);
+ return 0;
+}
+
+static int max3100_handlerx(struct max3100_port *s, u16 rx)
+{
+ unsigned int ch, flg, status = 0;
+ int ret = 0, cts;
+
+ if (rx & MAX3100_R && s->rx_enabled) {
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+ ch = rx & (s->parity & MAX3100_7BIT ? 0x7f : 0xff);
+ if (rx & MAX3100_RAFE) {
+ s->port.icount.frame++;
+ flg = TTY_FRAME;
+ status |= MAX3100_STATUS_FE;
+ } else {
+ if (s->parity & MAX3100_PARITY_ON) {
+ if (max3100_check_parity(s, rx)) {
+ s->port.icount.rx++;
+ flg = TTY_NORMAL;
+ } else {
+ s->port.icount.parity++;
+ flg = TTY_PARITY;
+ status |= MAX3100_STATUS_PE;
+ }
+ } else {
+ s->port.icount.rx++;
+ flg = TTY_NORMAL;
+ }
+ }
+ uart_insert_char(&s->port, status, MAX3100_STATUS_OE, ch, flg);
+ ret = 1;
+ }
+
+ cts = (rx & MAX3100_CTS) > 0;
+ if (s->cts != cts) {
+ s->cts = cts;
+ uart_handle_cts_change(&s->port, cts ? TIOCM_CTS : 0);
+ }
+
+ return ret;
+}
+
+static void max3100_work(struct work_struct *w)
+{
+ struct max3100_port *s = container_of(w, struct max3100_port, work);
+ int rxchars;
+ u16 tx, rx;
+ int conf, cconf, rts, crts;
+ struct circ_buf *xmit = &s->port.state->xmit;
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ rxchars = 0;
+ do {
+ spin_lock(&s->conf_lock);
+ conf = s->conf;
+ cconf = s->conf_commit;
+ s->conf_commit = 0;
+ rts = s->rts;
+ crts = s->rts_commit;
+ s->rts_commit = 0;
+ spin_unlock(&s->conf_lock);
+ if (cconf)
+ max3100_sr(s, MAX3100_WC | conf, &rx);
+ if (crts) {
+ max3100_sr(s, MAX3100_WD | MAX3100_TE |
+ (s->rts ? MAX3100_RTS : 0), &rx);
+ rxchars += max3100_handlerx(s, rx);
+ }
+
+ max3100_sr(s, MAX3100_RD, &rx);
+ rxchars += max3100_handlerx(s, rx);
+
+ if (rx & MAX3100_T) {
+ tx = 0xffff;
+ if (s->port.x_char) {
+ tx = s->port.x_char;
+ s->port.icount.tx++;
+ s->port.x_char = 0;
+ } else if (!uart_circ_empty(xmit) &&
+ !uart_tx_stopped(&s->port)) {
+ tx = xmit->buf[xmit->tail];
+ xmit->tail = (xmit->tail + 1) &
+ (UART_XMIT_SIZE - 1);
+ s->port.icount.tx++;
+ }
+ if (tx != 0xffff) {
+ max3100_calc_parity(s, &tx);
+ tx |= MAX3100_WD | (s->rts ? MAX3100_RTS : 0);
+ max3100_sr(s, tx, &rx);
+ rxchars += max3100_handlerx(s, rx);
+ }
+ }
+
+ if (rxchars > 16 && s->port.state->port.tty != NULL) {
+ tty_flip_buffer_push(s->port.state->port.tty);
+ rxchars = 0;
+ }
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&s->port);
+
+ } while (!s->force_end_work &&
+ !freezing(current) &&
+ ((rx & MAX3100_R) ||
+ (!uart_circ_empty(xmit) &&
+ !uart_tx_stopped(&s->port))));
+
+ if (rxchars > 0 && s->port.state->port.tty != NULL)
+ tty_flip_buffer_push(s->port.state->port.tty);
+}
+
+static irqreturn_t max3100_irq(int irqno, void *dev_id)
+{
+ struct max3100_port *s = dev_id;
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ max3100_dowork(s);
+ return IRQ_HANDLED;
+}
+
+static void max3100_enable_ms(struct uart_port *port)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+
+ if (s->poll_time > 0)
+ mod_timer(&s->timer, jiffies);
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+}
+
+static void max3100_start_tx(struct uart_port *port)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ max3100_dowork(s);
+}
+
+static void max3100_stop_rx(struct uart_port *port)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ s->rx_enabled = 0;
+ spin_lock(&s->conf_lock);
+ s->conf &= ~MAX3100_RM;
+ s->conf_commit = 1;
+ spin_unlock(&s->conf_lock);
+ max3100_dowork(s);
+}
+
+static unsigned int max3100_tx_empty(struct uart_port *port)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ /* may not be truly up-to-date */
+ max3100_dowork(s);
+ return s->tx_empty;
+}
+
+static unsigned int max3100_get_mctrl(struct uart_port *port)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ /* may not be truly up-to-date */
+ max3100_dowork(s);
+ /* always assert DCD and DSR since these lines are not wired */
+ return (s->cts ? TIOCM_CTS : 0) | TIOCM_DSR | TIOCM_CAR;
+}
+
+static void max3100_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+ int rts;
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ rts = (mctrl & TIOCM_RTS) > 0;
+
+ spin_lock(&s->conf_lock);
+ if (s->rts != rts) {
+ s->rts = rts;
+ s->rts_commit = 1;
+ max3100_dowork(s);
+ }
+ spin_unlock(&s->conf_lock);
+}
+
+static void
+max3100_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+ int baud = 0;
+ unsigned cflag;
+ u32 param_new, param_mask, parity = 0;
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ cflag = termios->c_cflag;
+ param_new = 0;
+ param_mask = 0;
+
+ baud = tty_termios_baud_rate(termios);
+ param_new = s->conf & MAX3100_BAUD;
+ switch (baud) {
+ case 300:
+ if (s->crystal)
+ baud = s->baud;
+ else
+ param_new = 15;
+ break;
+ case 600:
+ param_new = 14 + s->crystal;
+ break;
+ case 1200:
+ param_new = 13 + s->crystal;
+ break;
+ case 2400:
+ param_new = 12 + s->crystal;
+ break;
+ case 4800:
+ param_new = 11 + s->crystal;
+ break;
+ case 9600:
+ param_new = 10 + s->crystal;
+ break;
+ case 19200:
+ param_new = 9 + s->crystal;
+ break;
+ case 38400:
+ param_new = 8 + s->crystal;
+ break;
+ case 57600:
+ param_new = 1 + s->crystal;
+ break;
+ case 115200:
+ param_new = 0 + s->crystal;
+ break;
+ case 230400:
+ if (s->crystal)
+ param_new = 0;
+ else
+ baud = s->baud;
+ break;
+ default:
+ baud = s->baud;
+ }
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ s->baud = baud;
+ param_mask |= MAX3100_BAUD;
+
+ if ((cflag & CSIZE) == CS8) {
+ param_new &= ~MAX3100_L;
+ parity &= ~MAX3100_7BIT;
+ } else {
+ param_new |= MAX3100_L;
+ parity |= MAX3100_7BIT;
+ cflag = (cflag & ~CSIZE) | CS7;
+ }
+ param_mask |= MAX3100_L;
+
+ if (cflag & CSTOPB)
+ param_new |= MAX3100_ST;
+ else
+ param_new &= ~MAX3100_ST;
+ param_mask |= MAX3100_ST;
+
+ if (cflag & PARENB) {
+ param_new |= MAX3100_PE;
+ parity |= MAX3100_PARITY_ON;
+ } else {
+ param_new &= ~MAX3100_PE;
+ parity &= ~MAX3100_PARITY_ON;
+ }
+ param_mask |= MAX3100_PE;
+
+ if (cflag & PARODD)
+ parity |= MAX3100_PARITY_ODD;
+ else
+ parity &= ~MAX3100_PARITY_ODD;
+
+ /* mask termios capabilities we don't support */
+ cflag &= ~CMSPAR;
+ termios->c_cflag = cflag;
+
+ s->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ s->port.ignore_status_mask |=
+ MAX3100_STATUS_PE | MAX3100_STATUS_FE |
+ MAX3100_STATUS_OE;
+
+ /* we are sending char from a workqueue so enable */
+ s->port.state->port.tty->low_latency = 1;
+
+ if (s->poll_time > 0)
+ del_timer_sync(&s->timer);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_lock(&s->conf_lock);
+ s->conf = (s->conf & ~param_mask) | (param_new & param_mask);
+ s->conf_commit = 1;
+ s->parity = parity;
+ spin_unlock(&s->conf_lock);
+ max3100_dowork(s);
+
+ if (UART_ENABLE_MS(&s->port, termios->c_cflag))
+ max3100_enable_ms(&s->port);
+}
+
+static void max3100_shutdown(struct uart_port *port)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ if (s->suspending)
+ return;
+
+ s->force_end_work = 1;
+
+ if (s->poll_time > 0)
+ del_timer_sync(&s->timer);
+
+ if (s->workqueue) {
+ flush_workqueue(s->workqueue);
+ destroy_workqueue(s->workqueue);
+ s->workqueue = NULL;
+ }
+ if (s->irq)
+ free_irq(s->irq, s);
+
+ /* set shutdown mode to save power */
+ if (s->max3100_hw_suspend)
+ s->max3100_hw_suspend(1);
+ else {
+ u16 tx, rx;
+
+ tx = MAX3100_WC | MAX3100_SHDN;
+ max3100_sr(s, tx, &rx);
+ }
+}
+
+static int max3100_startup(struct uart_port *port)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+ char b[12];
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ s->conf = MAX3100_RM;
+ s->baud = s->crystal ? 230400 : 115200;
+ s->rx_enabled = 1;
+
+ if (s->suspending)
+ return 0;
+
+ s->force_end_work = 0;
+ s->parity = 0;
+ s->rts = 0;
+
+ sprintf(b, "max3100-%d", s->minor);
+ s->workqueue = create_freezable_workqueue(b);
+ if (!s->workqueue) {
+ dev_warn(&s->spi->dev, "cannot create workqueue\n");
+ return -EBUSY;
+ }
+ INIT_WORK(&s->work, max3100_work);
+
+ if (request_irq(s->irq, max3100_irq,
+ IRQF_TRIGGER_FALLING, "max3100", s) < 0) {
+ dev_warn(&s->spi->dev, "cannot allocate irq %d\n", s->irq);
+ s->irq = 0;
+ destroy_workqueue(s->workqueue);
+ s->workqueue = NULL;
+ return -EBUSY;
+ }
+
+ if (s->loopback) {
+ u16 tx, rx;
+ tx = 0x4001;
+ max3100_sr(s, tx, &rx);
+ }
+
+ if (s->max3100_hw_suspend)
+ s->max3100_hw_suspend(0);
+ s->conf_commit = 1;
+ max3100_dowork(s);
+ /* wait for clock to settle */
+ msleep(50);
+
+ max3100_enable_ms(&s->port);
+
+ return 0;
+}
+
+static const char *max3100_type(struct uart_port *port)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ return s->port.type == PORT_MAX3100 ? "MAX3100" : NULL;
+}
+
+static void max3100_release_port(struct uart_port *port)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+}
+
+static void max3100_config_port(struct uart_port *port, int flags)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ if (flags & UART_CONFIG_TYPE)
+ s->port.type = PORT_MAX3100;
+}
+
+static int max3100_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+ int ret = -EINVAL;
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ if (ser->type == PORT_UNKNOWN || ser->type == PORT_MAX3100)
+ ret = 0;
+ return ret;
+}
+
+static void max3100_stop_tx(struct uart_port *port)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+}
+
+static int max3100_request_port(struct uart_port *port)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+ return 0;
+}
+
+static void max3100_break_ctl(struct uart_port *port, int break_state)
+{
+ struct max3100_port *s = container_of(port,
+ struct max3100_port,
+ port);
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+}
+
+static struct uart_ops max3100_ops = {
+ .tx_empty = max3100_tx_empty,
+ .set_mctrl = max3100_set_mctrl,
+ .get_mctrl = max3100_get_mctrl,
+ .stop_tx = max3100_stop_tx,
+ .start_tx = max3100_start_tx,
+ .stop_rx = max3100_stop_rx,
+ .enable_ms = max3100_enable_ms,
+ .break_ctl = max3100_break_ctl,
+ .startup = max3100_startup,
+ .shutdown = max3100_shutdown,
+ .set_termios = max3100_set_termios,
+ .type = max3100_type,
+ .release_port = max3100_release_port,
+ .request_port = max3100_request_port,
+ .config_port = max3100_config_port,
+ .verify_port = max3100_verify_port,
+};
+
+static struct uart_driver max3100_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyMAX",
+ .dev_name = "ttyMAX",
+ .major = MAX3100_MAJOR,
+ .minor = MAX3100_MINOR,
+ .nr = MAX_MAX3100,
+};
+static int uart_driver_registered;
+
+static int __devinit max3100_probe(struct spi_device *spi)
+{
+ int i, retval;
+ struct plat_max3100 *pdata;
+ u16 tx, rx;
+
+ mutex_lock(&max3100s_lock);
+
+ if (!uart_driver_registered) {
+ uart_driver_registered = 1;
+ retval = uart_register_driver(&max3100_uart_driver);
+ if (retval) {
+ printk(KERN_ERR "Couldn't register max3100 uart driver\n");
+ mutex_unlock(&max3100s_lock);
+ return retval;
+ }
+ }
+
+ for (i = 0; i < MAX_MAX3100; i++)
+ if (!max3100s[i])
+ break;
+ if (i == MAX_MAX3100) {
+ dev_warn(&spi->dev, "too many MAX3100 chips\n");
+ mutex_unlock(&max3100s_lock);
+ return -ENOMEM;
+ }
+
+ max3100s[i] = kzalloc(sizeof(struct max3100_port), GFP_KERNEL);
+ if (!max3100s[i]) {
+ dev_warn(&spi->dev,
+ "kmalloc for max3100 structure %d failed!\n", i);
+ mutex_unlock(&max3100s_lock);
+ return -ENOMEM;
+ }
+ max3100s[i]->spi = spi;
+ max3100s[i]->irq = spi->irq;
+ spin_lock_init(&max3100s[i]->conf_lock);
+ dev_set_drvdata(&spi->dev, max3100s[i]);
+ pdata = spi->dev.platform_data;
+ max3100s[i]->crystal = pdata->crystal;
+ max3100s[i]->loopback = pdata->loopback;
+ max3100s[i]->poll_time = pdata->poll_time * HZ / 1000;
+ if (pdata->poll_time > 0 && max3100s[i]->poll_time == 0)
+ max3100s[i]->poll_time = 1;
+ max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend;
+ max3100s[i]->minor = i;
+ init_timer(&max3100s[i]->timer);
+ max3100s[i]->timer.function = max3100_timeout;
+ max3100s[i]->timer.data = (unsigned long) max3100s[i];
+
+ dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i);
+ max3100s[i]->port.irq = max3100s[i]->irq;
+ max3100s[i]->port.uartclk = max3100s[i]->crystal ? 3686400 : 1843200;
+ max3100s[i]->port.fifosize = 16;
+ max3100s[i]->port.ops = &max3100_ops;
+ max3100s[i]->port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
+ max3100s[i]->port.line = i;
+ max3100s[i]->port.type = PORT_MAX3100;
+ max3100s[i]->port.dev = &spi->dev;
+ retval = uart_add_one_port(&max3100_uart_driver, &max3100s[i]->port);
+ if (retval < 0)
+ dev_warn(&spi->dev,
+ "uart_add_one_port failed for line %d with error %d\n",
+ i, retval);
+
+ /* set shutdown mode to save power. Will be woken-up on open */
+ if (max3100s[i]->max3100_hw_suspend)
+ max3100s[i]->max3100_hw_suspend(1);
+ else {
+ tx = MAX3100_WC | MAX3100_SHDN;
+ max3100_sr(max3100s[i], tx, &rx);
+ }
+ mutex_unlock(&max3100s_lock);
+ return 0;
+}
+
+static int __devexit max3100_remove(struct spi_device *spi)
+{
+ struct max3100_port *s = dev_get_drvdata(&spi->dev);
+ int i;
+
+ mutex_lock(&max3100s_lock);
+
+ /* find out the index for the chip we are removing */
+ for (i = 0; i < MAX_MAX3100; i++)
+ if (max3100s[i] == s)
+ break;
+
+ dev_dbg(&spi->dev, "%s: removing port %d\n", __func__, i);
+ uart_remove_one_port(&max3100_uart_driver, &max3100s[i]->port);
+ kfree(max3100s[i]);
+ max3100s[i] = NULL;
+
+ /* check if this is the last chip we have */
+ for (i = 0; i < MAX_MAX3100; i++)
+ if (max3100s[i]) {
+ mutex_unlock(&max3100s_lock);
+ return 0;
+ }
+ pr_debug("removing max3100 driver\n");
+ uart_unregister_driver(&max3100_uart_driver);
+
+ mutex_unlock(&max3100s_lock);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int max3100_suspend(struct spi_device *spi, pm_message_t state)
+{
+ struct max3100_port *s = dev_get_drvdata(&spi->dev);
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ disable_irq(s->irq);
+
+ s->suspending = 1;
+ uart_suspend_port(&max3100_uart_driver, &s->port);
+
+ if (s->max3100_hw_suspend)
+ s->max3100_hw_suspend(1);
+ else {
+ /* no HW suspend, so do SW one */
+ u16 tx, rx;
+
+ tx = MAX3100_WC | MAX3100_SHDN;
+ max3100_sr(s, tx, &rx);
+ }
+ return 0;
+}
+
+static int max3100_resume(struct spi_device *spi)
+{
+ struct max3100_port *s = dev_get_drvdata(&spi->dev);
+
+ dev_dbg(&s->spi->dev, "%s\n", __func__);
+
+ if (s->max3100_hw_suspend)
+ s->max3100_hw_suspend(0);
+ uart_resume_port(&max3100_uart_driver, &s->port);
+ s->suspending = 0;
+
+ enable_irq(s->irq);
+
+ s->conf_commit = 1;
+ if (s->workqueue)
+ max3100_dowork(s);
+
+ return 0;
+}
+
+#else
+#define max3100_suspend NULL
+#define max3100_resume NULL
+#endif
+
+static struct spi_driver max3100_driver = {
+ .driver = {
+ .name = "max3100",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+
+ .probe = max3100_probe,
+ .remove = __devexit_p(max3100_remove),
+ .suspend = max3100_suspend,
+ .resume = max3100_resume,
+};
+
+static int __init max3100_init(void)
+{
+ return spi_register_driver(&max3100_driver);
+}
+module_init(max3100_init);
+
+static void __exit max3100_exit(void)
+{
+ spi_unregister_driver(&max3100_driver);
+}
+module_exit(max3100_exit);
+
+MODULE_DESCRIPTION("MAX3100 driver");
+MODULE_AUTHOR("Christian Pellegrin <chripell@evolware.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:max3100");
diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
new file mode 100644
index 00000000..d73aadd7
--- /dev/null
+++ b/drivers/tty/serial/max3107-aava.c
@@ -0,0 +1,344 @@
+/*
+ * max3107.c - spi uart protocol driver for Maxim 3107
+ * Based on max3100.c
+ * by Christian Pellegrin <chripell@evolware.org>
+ * and max3110.c
+ * by Feng Tang <feng.tang@intel.com>
+ *
+ * Copyright (C) Aavamobile 2009
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/spi/spi.h>
+#include <linux/freezer.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/sfi.h>
+#include <asm/mrst.h>
+#include "max3107.h"
+
+/* GPIO direction to input function */
+static int max3107_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
+{
+ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
+ u16 buf[1]; /* Buffer for SPI transfer */
+
+ if (offset >= MAX3107_GPIO_COUNT) {
+ dev_err(&s->spi->dev, "Invalid GPIO\n");
+ return -EINVAL;
+ }
+
+ /* Read current GPIO configuration register */
+ buf[0] = MAX3107_GPIOCFG_REG;
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
+ dev_err(&s->spi->dev, "SPI transfer GPIO read failed\n");
+ return -EIO;
+ }
+ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
+
+ /* Set GPIO to input */
+ buf[0] &= ~(0x0001 << offset);
+
+ /* Write new GPIO configuration register value */
+ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG);
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, NULL, 2)) {
+ dev_err(&s->spi->dev, "SPI transfer GPIO write failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/* GPIO direction to output function */
+static int max3107_gpio_direction_out(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
+ u16 buf[2]; /* Buffer for SPI transfers */
+
+ if (offset >= MAX3107_GPIO_COUNT) {
+ dev_err(&s->spi->dev, "Invalid GPIO\n");
+ return -EINVAL;
+ }
+
+ /* Read current GPIO configuration and data registers */
+ buf[0] = MAX3107_GPIOCFG_REG;
+ buf[1] = MAX3107_GPIODATA_REG;
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) {
+ dev_err(&s->spi->dev, "SPI transfer gpio failed\n");
+ return -EIO;
+ }
+ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
+ buf[1] &= MAX3107_SPI_RX_DATA_MASK;
+
+ /* Set GPIO to output */
+ buf[0] |= (0x0001 << offset);
+ /* Set value */
+ if (value)
+ buf[1] |= (0x0001 << offset);
+ else
+ buf[1] &= ~(0x0001 << offset);
+
+ /* Write new GPIO configuration and data register values */
+ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG);
+ buf[1] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG);
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, NULL, 4)) {
+ dev_err(&s->spi->dev,
+ "SPI transfer for GPIO conf data w failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/* GPIO value query function */
+static int max3107_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
+ u16 buf[1]; /* Buffer for SPI transfer */
+
+ if (offset >= MAX3107_GPIO_COUNT) {
+ dev_err(&s->spi->dev, "Invalid GPIO\n");
+ return -EINVAL;
+ }
+
+ /* Read current GPIO data register */
+ buf[0] = MAX3107_GPIODATA_REG;
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
+ dev_err(&s->spi->dev, "SPI transfer GPIO data r failed\n");
+ return -EIO;
+ }
+ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
+
+ /* Return value */
+ return buf[0] & (0x0001 << offset);
+}
+
+/* GPIO value set function */
+static void max3107_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
+ u16 buf[2]; /* Buffer for SPI transfers */
+
+ if (offset >= MAX3107_GPIO_COUNT) {
+ dev_err(&s->spi->dev, "Invalid GPIO\n");
+ return;
+ }
+
+ /* Read current GPIO configuration registers*/
+ buf[0] = MAX3107_GPIODATA_REG;
+ buf[1] = MAX3107_GPIOCFG_REG;
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) {
+ dev_err(&s->spi->dev,
+ "SPI transfer for GPIO data and config read failed\n");
+ return;
+ }
+ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
+ buf[1] &= MAX3107_SPI_RX_DATA_MASK;
+
+ if (!(buf[1] & (0x0001 << offset))) {
+ /* Configured as input, can't set value */
+ dev_warn(&s->spi->dev,
+ "Trying to set value for input GPIO\n");
+ return;
+ }
+
+ /* Set value */
+ if (value)
+ buf[0] |= (0x0001 << offset);
+ else
+ buf[0] &= ~(0x0001 << offset);
+
+ /* Write new GPIO data register value */
+ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG);
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, NULL, 2))
+ dev_err(&s->spi->dev, "SPI transfer GPIO data w failed\n");
+}
+
+/* GPIO chip data */
+static struct gpio_chip max3107_gpio_chip = {
+ .owner = THIS_MODULE,
+ .direction_input = max3107_gpio_direction_in,
+ .direction_output = max3107_gpio_direction_out,
+ .get = max3107_gpio_get,
+ .set = max3107_gpio_set,
+ .can_sleep = 1,
+ .base = MAX3107_GPIO_BASE,
+ .ngpio = MAX3107_GPIO_COUNT,
+};
+
+/**
+ * max3107_aava_reset - reset on AAVA systems
+ * @spi: The SPI device we are probing
+ *
+ * Reset the device ready for probing.
+ */
+
+static int max3107_aava_reset(struct spi_device *spi)
+{
+ /* Reset the chip */
+ if (gpio_request(MAX3107_RESET_GPIO, "max3107")) {
+ pr_err("Requesting RESET GPIO failed\n");
+ return -EIO;
+ }
+ if (gpio_direction_output(MAX3107_RESET_GPIO, 0)) {
+ pr_err("Setting RESET GPIO to 0 failed\n");
+ gpio_free(MAX3107_RESET_GPIO);
+ return -EIO;
+ }
+ msleep(MAX3107_RESET_DELAY);
+ if (gpio_direction_output(MAX3107_RESET_GPIO, 1)) {
+ pr_err("Setting RESET GPIO to 1 failed\n");
+ gpio_free(MAX3107_RESET_GPIO);
+ return -EIO;
+ }
+ gpio_free(MAX3107_RESET_GPIO);
+ msleep(MAX3107_WAKEUP_DELAY);
+ return 0;
+}
+
+static int max3107_aava_configure(struct max3107_port *s)
+{
+ int retval;
+
+ /* Initialize GPIO chip data */
+ s->chip = max3107_gpio_chip;
+ s->chip.label = s->spi->modalias;
+ s->chip.dev = &s->spi->dev;
+
+ /* Add GPIO chip */
+ retval = gpiochip_add(&s->chip);
+ if (retval) {
+ dev_err(&s->spi->dev, "Adding GPIO chip failed\n");
+ return retval;
+ }
+
+ /* Temporary fix for EV2 boot problems, set modem reset to 0 */
+ max3107_gpio_direction_out(&s->chip, 3, 0);
+ return 0;
+}
+
+#if 0
+/* This will get enabled once we have the board stuff merged for this
+ specific case */
+
+static const struct baud_table brg13_ext[] = {
+ { 300, MAX3107_BRG13_B300 },
+ { 600, MAX3107_BRG13_B600 },
+ { 1200, MAX3107_BRG13_B1200 },
+ { 2400, MAX3107_BRG13_B2400 },
+ { 4800, MAX3107_BRG13_B4800 },
+ { 9600, MAX3107_BRG13_B9600 },
+ { 19200, MAX3107_BRG13_B19200 },
+ { 57600, MAX3107_BRG13_B57600 },
+ { 115200, MAX3107_BRG13_B115200 },
+ { 230400, MAX3107_BRG13_B230400 },
+ { 460800, MAX3107_BRG13_B460800 },
+ { 921600, MAX3107_BRG13_B921600 },
+ { 0, 0 }
+};
+
+static void max3107_aava_init(struct max3107_port *s)
+{
+ /*override for AAVA SC specific*/
+ if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
+ if (get_koski_build_id() <= KOSKI_EV2)
+ if (s->ext_clk) {
+ s->brg_cfg = MAX3107_BRG13_B9600;
+ s->baud_tbl = (struct baud_table *)brg13_ext;
+ }
+ }
+}
+#endif
+
+static int __devexit max3107_aava_remove(struct spi_device *spi)
+{
+ struct max3107_port *s = dev_get_drvdata(&spi->dev);
+
+ /* Remove GPIO chip */
+ if (gpiochip_remove(&s->chip))
+ dev_warn(&spi->dev, "Removing GPIO chip failed\n");
+
+ /* Then do the default remove */
+ return max3107_remove(spi);
+}
+
+/* Platform data */
+static struct max3107_plat aava_plat_data = {
+ .loopback = 0,
+ .ext_clk = 1,
+/* .init = max3107_aava_init, */
+ .configure = max3107_aava_configure,
+ .hw_suspend = max3107_hw_susp,
+ .polled_mode = 0,
+ .poll_time = 0,
+};
+
+
+static int __devinit max3107_probe_aava(struct spi_device *spi)
+{
+ int err = max3107_aava_reset(spi);
+ if (err < 0)
+ return err;
+ return max3107_probe(spi, &aava_plat_data);
+}
+
+/* Spi driver data */
+static struct spi_driver max3107_driver = {
+ .driver = {
+ .name = "aava-max3107",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = max3107_probe_aava,
+ .remove = __devexit_p(max3107_aava_remove),
+ .suspend = max3107_suspend,
+ .resume = max3107_resume,
+};
+
+/* Driver init function */
+static int __init max3107_init(void)
+{
+ return spi_register_driver(&max3107_driver);
+}
+
+/* Driver exit function */
+static void __exit max3107_exit(void)
+{
+ spi_unregister_driver(&max3107_driver);
+}
+
+module_init(max3107_init);
+module_exit(max3107_exit);
+
+MODULE_DESCRIPTION("MAX3107 driver");
+MODULE_AUTHOR("Aavamobile");
+MODULE_ALIAS("spi:aava-max3107");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
new file mode 100644
index 00000000..a8164601
--- /dev/null
+++ b/drivers/tty/serial/max3107.c
@@ -0,0 +1,1213 @@
+/*
+ * max3107.c - spi uart protocol driver for Maxim 3107
+ * Based on max3100.c
+ * by Christian Pellegrin <chripell@evolware.org>
+ * and max3110.c
+ * by Feng Tang <feng.tang@intel.com>
+ *
+ * Copyright (C) Aavamobile 2009
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/freezer.h>
+#include "max3107.h"
+
+static const struct baud_table brg26_ext[] = {
+ { 300, MAX3107_BRG26_B300 },
+ { 600, MAX3107_BRG26_B600 },
+ { 1200, MAX3107_BRG26_B1200 },
+ { 2400, MAX3107_BRG26_B2400 },
+ { 4800, MAX3107_BRG26_B4800 },
+ { 9600, MAX3107_BRG26_B9600 },
+ { 19200, MAX3107_BRG26_B19200 },
+ { 57600, MAX3107_BRG26_B57600 },
+ { 115200, MAX3107_BRG26_B115200 },
+ { 230400, MAX3107_BRG26_B230400 },
+ { 460800, MAX3107_BRG26_B460800 },
+ { 921600, MAX3107_BRG26_B921600 },
+ { 0, 0 }
+};
+
+static const struct baud_table brg13_int[] = {
+ { 300, MAX3107_BRG13_IB300 },
+ { 600, MAX3107_BRG13_IB600 },
+ { 1200, MAX3107_BRG13_IB1200 },
+ { 2400, MAX3107_BRG13_IB2400 },
+ { 4800, MAX3107_BRG13_IB4800 },
+ { 9600, MAX3107_BRG13_IB9600 },
+ { 19200, MAX3107_BRG13_IB19200 },
+ { 57600, MAX3107_BRG13_IB57600 },
+ { 115200, MAX3107_BRG13_IB115200 },
+ { 230400, MAX3107_BRG13_IB230400 },
+ { 460800, MAX3107_BRG13_IB460800 },
+ { 921600, MAX3107_BRG13_IB921600 },
+ { 0, 0 }
+};
+
+static u32 get_new_brg(int baud, struct max3107_port *s)
+{
+ int i;
+ const struct baud_table *baud_tbl = s->baud_tbl;
+
+ for (i = 0; i < 13; i++) {
+ if (baud == baud_tbl[i].baud)
+ return baud_tbl[i].new_brg;
+ }
+
+ return 0;
+}
+
+/* Perform SPI transfer for write/read of device register(s) */
+int max3107_rw(struct max3107_port *s, u8 *tx, u8 *rx, int len)
+{
+ struct spi_message spi_msg;
+ struct spi_transfer spi_xfer;
+
+ /* Initialize SPI ,message */
+ spi_message_init(&spi_msg);
+
+ /* Initialize SPI transfer */
+ memset(&spi_xfer, 0, sizeof spi_xfer);
+ spi_xfer.len = len;
+ spi_xfer.tx_buf = tx;
+ spi_xfer.rx_buf = rx;
+ spi_xfer.speed_hz = MAX3107_SPI_SPEED;
+
+ /* Add SPI transfer to SPI message */
+ spi_message_add_tail(&spi_xfer, &spi_msg);
+
+#ifdef DBG_TRACE_SPI_DATA
+ {
+ int i;
+ pr_info("tx len %d:\n", spi_xfer.len);
+ for (i = 0 ; i < spi_xfer.len && i < 32 ; i++)
+ pr_info(" %x", ((u8 *)spi_xfer.tx_buf)[i]);
+ pr_info("\n");
+ }
+#endif
+
+ /* Perform synchronous SPI transfer */
+ if (spi_sync(s->spi, &spi_msg)) {
+ dev_err(&s->spi->dev, "spi_sync failure\n");
+ return -EIO;
+ }
+
+#ifdef DBG_TRACE_SPI_DATA
+ if (spi_xfer.rx_buf) {
+ int i;
+ pr_info("rx len %d:\n", spi_xfer.len);
+ for (i = 0 ; i < spi_xfer.len && i < 32 ; i++)
+ pr_info(" %x", ((u8 *)spi_xfer.rx_buf)[i]);
+ pr_info("\n");
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max3107_rw);
+
+/* Puts received data to circular buffer */
+static void put_data_to_circ_buf(struct max3107_port *s, unsigned char *data,
+ int len)
+{
+ struct uart_port *port = &s->port;
+ struct tty_struct *tty;
+
+ if (!port->state)
+ return;
+
+ tty = port->state->port.tty;
+ if (!tty)
+ return;
+
+ /* Insert received data */
+ tty_insert_flip_string(tty, data, len);
+ /* Update RX counter */
+ port->icount.rx += len;
+}
+
+/* Handle data receiving */
+static void max3107_handlerx(struct max3107_port *s, u16 rxlvl)
+{
+ int i;
+ int j;
+ int len; /* SPI transfer buffer length */
+ u16 *buf;
+ u8 *valid_str;
+
+ if (!s->rx_enabled)
+ /* RX is disabled */
+ return;
+
+ if (rxlvl == 0) {
+ /* RX fifo is empty */
+ return;
+ } else if (rxlvl >= MAX3107_RX_FIFO_SIZE) {
+ dev_warn(&s->spi->dev, "Possible RX FIFO overrun %d\n", rxlvl);
+ /* Ensure sanity of RX level */
+ rxlvl = MAX3107_RX_FIFO_SIZE;
+ }
+ if ((s->rxbuf == 0) || (s->rxstr == 0)) {
+ dev_warn(&s->spi->dev, "Rx buffer/str isn't ready\n");
+ return;
+ }
+ buf = s->rxbuf;
+ valid_str = s->rxstr;
+ while (rxlvl) {
+ pr_debug("rxlvl %d\n", rxlvl);
+ /* Clear buffer */
+ memset(buf, 0, sizeof(u16) * (MAX3107_RX_FIFO_SIZE + 2));
+ len = 0;
+ if (s->irqen_reg & MAX3107_IRQ_RXFIFO_BIT) {
+ /* First disable RX FIFO interrupt */
+ pr_debug("Disabling RX INT\n");
+ buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
+ s->irqen_reg &= ~MAX3107_IRQ_RXFIFO_BIT;
+ buf[0] |= s->irqen_reg;
+ len++;
+ }
+ /* Just increase the length by amount of words in FIFO since
+ * buffer was zeroed and SPI transfer of 0x0000 means reading
+ * from RX FIFO
+ */
+ len += rxlvl;
+ /* Append RX level query */
+ buf[len] = MAX3107_RXFIFOLVL_REG;
+ len++;
+
+ /* Perform the SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, len * 2)) {
+ dev_err(&s->spi->dev, "SPI transfer for RX h failed\n");
+ return;
+ }
+
+ /* Skip RX FIFO interrupt disabling word if it was added */
+ j = ((len - 1) - rxlvl);
+ /* Read received words */
+ for (i = 0; i < rxlvl; i++, j++)
+ valid_str[i] = (u8)buf[j];
+ put_data_to_circ_buf(s, valid_str, rxlvl);
+ /* Get new RX level */
+ rxlvl = (buf[len - 1] & MAX3107_SPI_RX_DATA_MASK);
+ }
+
+ if (s->rx_enabled) {
+ /* RX still enabled, re-enable RX FIFO interrupt */
+ pr_debug("Enabling RX INT\n");
+ buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
+ s->irqen_reg |= MAX3107_IRQ_RXFIFO_BIT;
+ buf[0] |= s->irqen_reg;
+ if (max3107_rw(s, (u8 *)buf, NULL, 2))
+ dev_err(&s->spi->dev, "RX FIFO INT enabling failed\n");
+ }
+
+ /* Push the received data to receivers */
+ if (s->port.state->port.tty)
+ tty_flip_buffer_push(s->port.state->port.tty);
+}
+
+
+/* Handle data sending */
+static void max3107_handletx(struct max3107_port *s)
+{
+ struct circ_buf *xmit = &s->port.state->xmit;
+ int i;
+ unsigned long flags;
+ int len; /* SPI transfer buffer length */
+ u16 *buf;
+
+ if (!s->tx_fifo_empty)
+ /* Don't send more data before previous data is sent */
+ return;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&s->port))
+ /* No data to send or TX is stopped */
+ return;
+
+ if (!s->txbuf) {
+ dev_warn(&s->spi->dev, "Txbuf isn't ready\n");
+ return;
+ }
+ buf = s->txbuf;
+ /* Get length of data pending in circular buffer */
+ len = uart_circ_chars_pending(xmit);
+ if (len) {
+ /* Limit to size of TX FIFO */
+ if (len > MAX3107_TX_FIFO_SIZE)
+ len = MAX3107_TX_FIFO_SIZE;
+
+ pr_debug("txlen %d\n", len);
+
+ /* Update TX counter */
+ s->port.icount.tx += len;
+
+ /* TX FIFO will no longer be empty */
+ s->tx_fifo_empty = 0;
+
+ i = 0;
+ if (s->irqen_reg & MAX3107_IRQ_TXEMPTY_BIT) {
+ /* First disable TX empty interrupt */
+ pr_debug("Disabling TE INT\n");
+ buf[i] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
+ s->irqen_reg &= ~MAX3107_IRQ_TXEMPTY_BIT;
+ buf[i] |= s->irqen_reg;
+ i++;
+ len++;
+ }
+ /* Add data to send */
+ spin_lock_irqsave(&s->port.lock, flags);
+ for ( ; i < len ; i++) {
+ buf[i] = (MAX3107_WRITE_BIT | MAX3107_THR_REG);
+ buf[i] |= ((u16)xmit->buf[xmit->tail] &
+ MAX3107_SPI_TX_DATA_MASK);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ }
+ spin_unlock_irqrestore(&s->port.lock, flags);
+ if (!(s->irqen_reg & MAX3107_IRQ_TXEMPTY_BIT)) {
+ /* Enable TX empty interrupt */
+ pr_debug("Enabling TE INT\n");
+ buf[i] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
+ s->irqen_reg |= MAX3107_IRQ_TXEMPTY_BIT;
+ buf[i] |= s->irqen_reg;
+ i++;
+ len++;
+ }
+ if (!s->tx_enabled) {
+ /* Enable TX */
+ pr_debug("Enable TX\n");
+ buf[i] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
+ spin_lock_irqsave(&s->data_lock, flags);
+ s->mode1_reg &= ~MAX3107_MODE1_TXDIS_BIT;
+ buf[i] |= s->mode1_reg;
+ spin_unlock_irqrestore(&s->data_lock, flags);
+ s->tx_enabled = 1;
+ i++;
+ len++;
+ }
+
+ /* Perform the SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, NULL, len*2)) {
+ dev_err(&s->spi->dev,
+ "SPI transfer TX handling failed\n");
+ return;
+ }
+ }
+
+ /* Indicate wake up if circular buffer is getting low on data */
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&s->port);
+
+}
+
+/* Handle interrupts
+ * Also reads and returns current RX FIFO level
+ */
+static u16 handle_interrupt(struct max3107_port *s)
+{
+ u16 buf[4]; /* Buffer for SPI transfers */
+ u8 irq_status;
+ u16 rx_level;
+ unsigned long flags;
+
+ /* Read IRQ status register */
+ buf[0] = MAX3107_IRQSTS_REG;
+ /* Read status IRQ status register */
+ buf[1] = MAX3107_STS_IRQSTS_REG;
+ /* Read LSR IRQ status register */
+ buf[2] = MAX3107_LSR_IRQSTS_REG;
+ /* Query RX level */
+ buf[3] = MAX3107_RXFIFOLVL_REG;
+
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 8)) {
+ dev_err(&s->spi->dev,
+ "SPI transfer for INTR handling failed\n");
+ return 0;
+ }
+
+ irq_status = (u8)buf[0];
+ pr_debug("IRQSTS %x\n", irq_status);
+ rx_level = (buf[3] & MAX3107_SPI_RX_DATA_MASK);
+
+ if (irq_status & MAX3107_IRQ_LSR_BIT) {
+ /* LSR interrupt */
+ if (buf[2] & MAX3107_LSR_RXTO_BIT)
+ /* RX timeout interrupt,
+ * handled by normal RX handling
+ */
+ pr_debug("RX TO INT\n");
+ }
+
+ if (irq_status & MAX3107_IRQ_TXEMPTY_BIT) {
+ /* Tx empty interrupt,
+ * disable TX and set tx_fifo_empty flag
+ */
+ pr_debug("TE INT, disabling TX\n");
+ buf[0] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
+ spin_lock_irqsave(&s->data_lock, flags);
+ s->mode1_reg |= MAX3107_MODE1_TXDIS_BIT;
+ buf[0] |= s->mode1_reg;
+ spin_unlock_irqrestore(&s->data_lock, flags);
+ if (max3107_rw(s, (u8 *)buf, NULL, 2))
+ dev_err(&s->spi->dev, "SPI transfer TX dis failed\n");
+ s->tx_enabled = 0;
+ s->tx_fifo_empty = 1;
+ }
+
+ if (irq_status & MAX3107_IRQ_RXFIFO_BIT)
+ /* RX FIFO interrupt,
+ * handled by normal RX handling
+ */
+ pr_debug("RFIFO INT\n");
+
+ /* Return RX level */
+ return rx_level;
+}
+
+/* Trigger work thread*/
+static void max3107_dowork(struct max3107_port *s)
+{
+ if (!work_pending(&s->work) && !freezing(current) && !s->suspended)
+ queue_work(s->workqueue, &s->work);
+ else
+ dev_warn(&s->spi->dev, "interrup isn't serviced normally!\n");
+}
+
+/* Work thread */
+static void max3107_work(struct work_struct *w)
+{
+ struct max3107_port *s = container_of(w, struct max3107_port, work);
+ u16 rxlvl = 0;
+ int len; /* SPI transfer buffer length */
+ u16 buf[5]; /* Buffer for SPI transfers */
+ unsigned long flags;
+
+ /* Start by reading current RX FIFO level */
+ buf[0] = MAX3107_RXFIFOLVL_REG;
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
+ dev_err(&s->spi->dev, "SPI transfer RX lev failed\n");
+ rxlvl = 0;
+ } else {
+ rxlvl = (buf[0] & MAX3107_SPI_RX_DATA_MASK);
+ }
+
+ do {
+ pr_debug("rxlvl %d\n", rxlvl);
+
+ /* Handle RX */
+ max3107_handlerx(s, rxlvl);
+ rxlvl = 0;
+
+ if (s->handle_irq) {
+ /* Handle pending interrupts
+ * We also get new RX FIFO level since new data may
+ * have been received while pushing received data to
+ * receivers
+ */
+ s->handle_irq = 0;
+ rxlvl = handle_interrupt(s);
+ }
+
+ /* Handle TX */
+ max3107_handletx(s);
+
+ /* Handle configuration changes */
+ len = 0;
+ spin_lock_irqsave(&s->data_lock, flags);
+ if (s->mode1_commit) {
+ pr_debug("mode1_commit\n");
+ buf[len] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
+ buf[len++] |= s->mode1_reg;
+ s->mode1_commit = 0;
+ }
+ if (s->lcr_commit) {
+ pr_debug("lcr_commit\n");
+ buf[len] = (MAX3107_WRITE_BIT | MAX3107_LCR_REG);
+ buf[len++] |= s->lcr_reg;
+ s->lcr_commit = 0;
+ }
+ if (s->brg_commit) {
+ pr_debug("brg_commit\n");
+ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVMSB_REG);
+ buf[len++] |= ((s->brg_cfg >> 16) &
+ MAX3107_SPI_TX_DATA_MASK);
+ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVLSB_REG);
+ buf[len++] |= ((s->brg_cfg >> 8) &
+ MAX3107_SPI_TX_DATA_MASK);
+ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGCFG_REG);
+ buf[len++] |= ((s->brg_cfg) & 0xff);
+ s->brg_commit = 0;
+ }
+ spin_unlock_irqrestore(&s->data_lock, flags);
+
+ if (len > 0) {
+ if (max3107_rw(s, (u8 *)buf, NULL, len * 2))
+ dev_err(&s->spi->dev,
+ "SPI transfer config failed\n");
+ }
+
+ /* Reloop if interrupt handling indicated data in RX FIFO */
+ } while (rxlvl);
+
+}
+
+/* Set sleep mode */
+static void max3107_set_sleep(struct max3107_port *s, int mode)
+{
+ u16 buf[1]; /* Buffer for SPI transfer */
+ unsigned long flags;
+ pr_debug("enter, mode %d\n", mode);
+
+ buf[0] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
+ spin_lock_irqsave(&s->data_lock, flags);
+ switch (mode) {
+ case MAX3107_DISABLE_FORCED_SLEEP:
+ s->mode1_reg &= ~MAX3107_MODE1_FORCESLEEP_BIT;
+ break;
+ case MAX3107_ENABLE_FORCED_SLEEP:
+ s->mode1_reg |= MAX3107_MODE1_FORCESLEEP_BIT;
+ break;
+ case MAX3107_DISABLE_AUTOSLEEP:
+ s->mode1_reg &= ~MAX3107_MODE1_AUTOSLEEP_BIT;
+ break;
+ case MAX3107_ENABLE_AUTOSLEEP:
+ s->mode1_reg |= MAX3107_MODE1_AUTOSLEEP_BIT;
+ break;
+ default:
+ spin_unlock_irqrestore(&s->data_lock, flags);
+ dev_warn(&s->spi->dev, "invalid sleep mode\n");
+ return;
+ }
+ buf[0] |= s->mode1_reg;
+ spin_unlock_irqrestore(&s->data_lock, flags);
+
+ if (max3107_rw(s, (u8 *)buf, NULL, 2))
+ dev_err(&s->spi->dev, "SPI transfer sleep mode failed\n");
+
+ if (mode == MAX3107_DISABLE_AUTOSLEEP ||
+ mode == MAX3107_DISABLE_FORCED_SLEEP)
+ msleep(MAX3107_WAKEUP_DELAY);
+}
+
+/* Perform full register initialization */
+static void max3107_register_init(struct max3107_port *s)
+{
+ u16 buf[11]; /* Buffer for SPI transfers */
+
+ /* 1. Configure baud rate, 9600 as default */
+ s->baud = 9600;
+ /* the below is default*/
+ if (s->ext_clk) {
+ s->brg_cfg = MAX3107_BRG26_B9600;
+ s->baud_tbl = (struct baud_table *)brg26_ext;
+ } else {
+ s->brg_cfg = MAX3107_BRG13_IB9600;
+ s->baud_tbl = (struct baud_table *)brg13_int;
+ }
+
+ if (s->pdata->init)
+ s->pdata->init(s);
+
+ buf[0] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVMSB_REG)
+ | ((s->brg_cfg >> 16) & MAX3107_SPI_TX_DATA_MASK);
+ buf[1] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVLSB_REG)
+ | ((s->brg_cfg >> 8) & MAX3107_SPI_TX_DATA_MASK);
+ buf[2] = (MAX3107_WRITE_BIT | MAX3107_BRGCFG_REG)
+ | ((s->brg_cfg) & 0xff);
+
+ /* 2. Configure LCR register, 8N1 mode by default */
+ s->lcr_reg = MAX3107_LCR_WORD_LEN_8;
+ buf[3] = (MAX3107_WRITE_BIT | MAX3107_LCR_REG)
+ | s->lcr_reg;
+
+ /* 3. Configure MODE 1 register */
+ s->mode1_reg = 0;
+ /* Enable IRQ pin */
+ s->mode1_reg |= MAX3107_MODE1_IRQSEL_BIT;
+ /* Disable TX */
+ s->mode1_reg |= MAX3107_MODE1_TXDIS_BIT;
+ s->tx_enabled = 0;
+ /* RX is enabled */
+ s->rx_enabled = 1;
+ buf[4] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG)
+ | s->mode1_reg;
+
+ /* 4. Configure MODE 2 register */
+ buf[5] = (MAX3107_WRITE_BIT | MAX3107_MODE2_REG);
+ if (s->loopback) {
+ /* Enable loopback */
+ buf[5] |= MAX3107_MODE2_LOOPBACK_BIT;
+ }
+ /* Reset FIFOs */
+ buf[5] |= MAX3107_MODE2_FIFORST_BIT;
+ s->tx_fifo_empty = 1;
+
+ /* 5. Configure FIFO trigger level register */
+ buf[6] = (MAX3107_WRITE_BIT | MAX3107_FIFOTRIGLVL_REG);
+ /* RX FIFO trigger for 16 words, TX FIFO trigger not used */
+ buf[6] |= (MAX3107_FIFOTRIGLVL_RX(16) | MAX3107_FIFOTRIGLVL_TX(0));
+
+ /* 6. Configure flow control levels */
+ buf[7] = (MAX3107_WRITE_BIT | MAX3107_FLOWLVL_REG);
+ /* Flow control halt level 96, resume level 48 */
+ buf[7] |= (MAX3107_FLOWLVL_RES(48) | MAX3107_FLOWLVL_HALT(96));
+
+ /* 7. Configure flow control */
+ buf[8] = (MAX3107_WRITE_BIT | MAX3107_FLOWCTRL_REG);
+ /* Enable auto CTS and auto RTS flow control */
+ buf[8] |= (MAX3107_FLOWCTRL_AUTOCTS_BIT | MAX3107_FLOWCTRL_AUTORTS_BIT);
+
+ /* 8. Configure RX timeout register */
+ buf[9] = (MAX3107_WRITE_BIT | MAX3107_RXTO_REG);
+ /* Timeout after 48 character intervals */
+ buf[9] |= 0x0030;
+
+ /* 9. Configure LSR interrupt enable register */
+ buf[10] = (MAX3107_WRITE_BIT | MAX3107_LSR_IRQEN_REG);
+ /* Enable RX timeout interrupt */
+ buf[10] |= MAX3107_LSR_RXTO_BIT;
+
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, NULL, 22))
+ dev_err(&s->spi->dev, "SPI transfer for init failed\n");
+
+ /* 10. Clear IRQ status register by reading it */
+ buf[0] = MAX3107_IRQSTS_REG;
+
+ /* 11. Configure interrupt enable register */
+ /* Enable LSR interrupt */
+ s->irqen_reg = MAX3107_IRQ_LSR_BIT;
+ /* Enable RX FIFO interrupt */
+ s->irqen_reg |= MAX3107_IRQ_RXFIFO_BIT;
+ buf[1] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG)
+ | s->irqen_reg;
+
+ /* 12. Clear FIFO reset that was set in step 6 */
+ buf[2] = (MAX3107_WRITE_BIT | MAX3107_MODE2_REG);
+ if (s->loopback) {
+ /* Keep loopback enabled */
+ buf[2] |= MAX3107_MODE2_LOOPBACK_BIT;
+ }
+
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 6))
+ dev_err(&s->spi->dev, "SPI transfer for init failed\n");
+
+}
+
+/* IRQ handler */
+static irqreturn_t max3107_irq(int irqno, void *dev_id)
+{
+ struct max3107_port *s = dev_id;
+
+ if (irqno != s->spi->irq) {
+ /* Unexpected IRQ */
+ return IRQ_NONE;
+ }
+
+ /* Indicate irq */
+ s->handle_irq = 1;
+
+ /* Trigger work thread */
+ max3107_dowork(s);
+
+ return IRQ_HANDLED;
+}
+
+/* HW suspension function
+ *
+ * Currently autosleep is used to decrease current consumption, alternative
+ * approach would be to set the chip to reset mode if UART is not being
+ * used but that would mess the GPIOs
+ *
+ */
+void max3107_hw_susp(struct max3107_port *s, int suspend)
+{
+ pr_debug("enter, suspend %d\n", suspend);
+
+ if (suspend) {
+ /* Suspend requested,
+ * enable autosleep to decrease current consumption
+ */
+ s->suspended = 1;
+ max3107_set_sleep(s, MAX3107_ENABLE_AUTOSLEEP);
+ } else {
+ /* Resume requested,
+ * disable autosleep
+ */
+ s->suspended = 0;
+ max3107_set_sleep(s, MAX3107_DISABLE_AUTOSLEEP);
+ }
+}
+EXPORT_SYMBOL_GPL(max3107_hw_susp);
+
+/* Modem status IRQ enabling */
+static void max3107_enable_ms(struct uart_port *port)
+{
+ /* Modem status not supported */
+}
+
+/* Data send function */
+static void max3107_start_tx(struct uart_port *port)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+
+ /* Trigger work thread for sending data */
+ max3107_dowork(s);
+}
+
+/* Function for checking that there is no pending transfers */
+static unsigned int max3107_tx_empty(struct uart_port *port)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+
+ pr_debug("returning %d\n",
+ (s->tx_fifo_empty && uart_circ_empty(&s->port.state->xmit)));
+ return s->tx_fifo_empty && uart_circ_empty(&s->port.state->xmit);
+}
+
+/* Function for stopping RX */
+static void max3107_stop_rx(struct uart_port *port)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+ unsigned long flags;
+
+ /* Set RX disabled in MODE 1 register */
+ spin_lock_irqsave(&s->data_lock, flags);
+ s->mode1_reg |= MAX3107_MODE1_RXDIS_BIT;
+ s->mode1_commit = 1;
+ spin_unlock_irqrestore(&s->data_lock, flags);
+ /* Set RX disabled */
+ s->rx_enabled = 0;
+ /* Trigger work thread for doing the actual configuration change */
+ max3107_dowork(s);
+}
+
+/* Function for returning control pin states */
+static unsigned int max3107_get_mctrl(struct uart_port *port)
+{
+ /* DCD and DSR are not wired and CTS/RTS is handled automatically
+ * so just indicate DSR and CAR asserted
+ */
+ return TIOCM_DSR | TIOCM_CAR;
+}
+
+/* Function for setting control pin states */
+static void max3107_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /* DCD and DSR are not wired and CTS/RTS is hadnled automatically
+ * so do nothing
+ */
+}
+
+/* Function for configuring UART parameters */
+static void max3107_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+ struct tty_struct *tty;
+ int baud;
+ u16 new_lcr = 0;
+ u32 new_brg = 0;
+ unsigned long flags;
+
+ if (!port->state)
+ return;
+
+ tty = port->state->port.tty;
+ if (!tty)
+ return;
+
+ /* Get new LCR register values */
+ /* Word size */
+ if ((termios->c_cflag & CSIZE) == CS7)
+ new_lcr |= MAX3107_LCR_WORD_LEN_7;
+ else
+ new_lcr |= MAX3107_LCR_WORD_LEN_8;
+
+ /* Parity */
+ if (termios->c_cflag & PARENB) {
+ new_lcr |= MAX3107_LCR_PARITY_BIT;
+ if (!(termios->c_cflag & PARODD))
+ new_lcr |= MAX3107_LCR_EVENPARITY_BIT;
+ }
+
+ /* Stop bits */
+ if (termios->c_cflag & CSTOPB) {
+ /* 2 stop bits */
+ new_lcr |= MAX3107_LCR_STOPLEN_BIT;
+ }
+
+ /* Mask termios capabilities we don't support */
+ termios->c_cflag &= ~CMSPAR;
+
+ /* Set status ignore mask */
+ s->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ s->port.ignore_status_mask |= MAX3107_ALL_ERRORS;
+
+ /* Set low latency to immediately handle pushed data */
+ s->port.state->port.tty->low_latency = 1;
+
+ /* Get new baud rate generator configuration */
+ baud = tty_get_baud_rate(tty);
+
+ spin_lock_irqsave(&s->data_lock, flags);
+ new_brg = get_new_brg(baud, s);
+ /* if can't find the corrent config, use previous */
+ if (!new_brg) {
+ baud = s->baud;
+ new_brg = s->brg_cfg;
+ }
+ spin_unlock_irqrestore(&s->data_lock, flags);
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ s->baud = baud;
+
+ /* Update timeout according to new baud rate */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_lock_irqsave(&s->data_lock, flags);
+ if (s->lcr_reg != new_lcr) {
+ s->lcr_reg = new_lcr;
+ s->lcr_commit = 1;
+ }
+ if (s->brg_cfg != new_brg) {
+ s->brg_cfg = new_brg;
+ s->brg_commit = 1;
+ }
+ spin_unlock_irqrestore(&s->data_lock, flags);
+
+ /* Trigger work thread for doing the actual configuration change */
+ max3107_dowork(s);
+}
+
+/* Port shutdown function */
+static void max3107_shutdown(struct uart_port *port)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+
+ if (s->suspended && s->pdata->hw_suspend)
+ s->pdata->hw_suspend(s, 0);
+
+ /* Free the interrupt */
+ free_irq(s->spi->irq, s);
+
+ if (s->workqueue) {
+ /* Flush and destroy work queue */
+ flush_workqueue(s->workqueue);
+ destroy_workqueue(s->workqueue);
+ s->workqueue = NULL;
+ }
+
+ /* Suspend HW */
+ if (s->pdata->hw_suspend)
+ s->pdata->hw_suspend(s, 1);
+}
+
+/* Port startup function */
+static int max3107_startup(struct uart_port *port)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+
+ /* Initialize work queue */
+ s->workqueue = create_freezable_workqueue("max3107");
+ if (!s->workqueue) {
+ dev_err(&s->spi->dev, "Workqueue creation failed\n");
+ return -EBUSY;
+ }
+ INIT_WORK(&s->work, max3107_work);
+
+ /* Setup IRQ */
+ if (request_irq(s->spi->irq, max3107_irq, IRQF_TRIGGER_FALLING,
+ "max3107", s)) {
+ dev_err(&s->spi->dev, "IRQ reguest failed\n");
+ destroy_workqueue(s->workqueue);
+ s->workqueue = NULL;
+ return -EBUSY;
+ }
+
+ /* Resume HW */
+ if (s->pdata->hw_suspend)
+ s->pdata->hw_suspend(s, 0);
+
+ /* Init registers */
+ max3107_register_init(s);
+
+ return 0;
+}
+
+/* Port type function */
+static const char *max3107_type(struct uart_port *port)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+ return s->spi->modalias;
+}
+
+/* Port release function */
+static void max3107_release_port(struct uart_port *port)
+{
+ /* Do nothing */
+}
+
+/* Port request function */
+static int max3107_request_port(struct uart_port *port)
+{
+ /* Do nothing */
+ return 0;
+}
+
+/* Port config function */
+static void max3107_config_port(struct uart_port *port, int flags)
+{
+ struct max3107_port *s = container_of(port, struct max3107_port, port);
+ s->port.type = PORT_MAX3107;
+}
+
+/* Port verify function */
+static int max3107_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (ser->type == PORT_UNKNOWN || ser->type == PORT_MAX3107)
+ return 0;
+
+ return -EINVAL;
+}
+
+/* Port stop TX function */
+static void max3107_stop_tx(struct uart_port *port)
+{
+ /* Do nothing */
+}
+
+/* Port break control function */
+static void max3107_break_ctl(struct uart_port *port, int break_state)
+{
+ /* We don't support break control, do nothing */
+}
+
+
+/* Port functions */
+static struct uart_ops max3107_ops = {
+ .tx_empty = max3107_tx_empty,
+ .set_mctrl = max3107_set_mctrl,
+ .get_mctrl = max3107_get_mctrl,
+ .stop_tx = max3107_stop_tx,
+ .start_tx = max3107_start_tx,
+ .stop_rx = max3107_stop_rx,
+ .enable_ms = max3107_enable_ms,
+ .break_ctl = max3107_break_ctl,
+ .startup = max3107_startup,
+ .shutdown = max3107_shutdown,
+ .set_termios = max3107_set_termios,
+ .type = max3107_type,
+ .release_port = max3107_release_port,
+ .request_port = max3107_request_port,
+ .config_port = max3107_config_port,
+ .verify_port = max3107_verify_port,
+};
+
+/* UART driver data */
+static struct uart_driver max3107_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyMAX",
+ .dev_name = "ttyMAX",
+ .nr = 1,
+};
+
+static int driver_registered = 0;
+
+
+
+/* 'Generic' platform data */
+static struct max3107_plat generic_plat_data = {
+ .loopback = 0,
+ .ext_clk = 1,
+ .hw_suspend = max3107_hw_susp,
+ .polled_mode = 0,
+ .poll_time = 0,
+};
+
+
+/*******************************************************************/
+
+/**
+ * max3107_probe - SPI bus probe entry point
+ * @spi: the spi device
+ *
+ * SPI wants us to probe this device and if appropriate claim it.
+ * Perform any platform specific requirements and then initialise
+ * the device.
+ */
+
+int max3107_probe(struct spi_device *spi, struct max3107_plat *pdata)
+{
+ struct max3107_port *s;
+ u16 buf[2]; /* Buffer for SPI transfers */
+ int retval;
+
+ pr_info("enter max3107 probe\n");
+
+ /* Allocate port structure */
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ pr_err("Allocating port structure failed\n");
+ return -ENOMEM;
+ }
+
+ s->pdata = pdata;
+
+ /* SPI Rx buffer
+ * +2 for RX FIFO interrupt
+ * disabling and RX level query
+ */
+ s->rxbuf = kzalloc(sizeof(u16) * (MAX3107_RX_FIFO_SIZE+2), GFP_KERNEL);
+ if (!s->rxbuf) {
+ pr_err("Allocating RX buffer failed\n");
+ retval = -ENOMEM;
+ goto err_free4;
+ }
+ s->rxstr = kzalloc(sizeof(u8) * MAX3107_RX_FIFO_SIZE, GFP_KERNEL);
+ if (!s->rxstr) {
+ pr_err("Allocating RX buffer failed\n");
+ retval = -ENOMEM;
+ goto err_free3;
+ }
+ /* SPI Tx buffer
+ * SPI transfer buffer
+ * +3 for TX FIFO empty
+ * interrupt disabling and
+ * enabling and TX enabling
+ */
+ s->txbuf = kzalloc(sizeof(u16) * MAX3107_TX_FIFO_SIZE + 3, GFP_KERNEL);
+ if (!s->txbuf) {
+ pr_err("Allocating TX buffer failed\n");
+ retval = -ENOMEM;
+ goto err_free2;
+ }
+ /* Initialize shared data lock */
+ spin_lock_init(&s->data_lock);
+
+ /* SPI intializations */
+ dev_set_drvdata(&spi->dev, s);
+ spi->mode = SPI_MODE_0;
+ spi->dev.platform_data = pdata;
+ spi->bits_per_word = 16;
+ s->ext_clk = pdata->ext_clk;
+ s->loopback = pdata->loopback;
+ spi_setup(spi);
+ s->spi = spi;
+
+ /* Check REV ID to ensure we are talking to what we expect */
+ buf[0] = MAX3107_REVID_REG;
+ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
+ dev_err(&s->spi->dev, "SPI transfer for REVID read failed\n");
+ retval = -EIO;
+ goto err_free1;
+ }
+ if ((buf[0] & MAX3107_SPI_RX_DATA_MASK) != MAX3107_REVID1 &&
+ (buf[0] & MAX3107_SPI_RX_DATA_MASK) != MAX3107_REVID2) {
+ dev_err(&s->spi->dev, "REVID %x does not match\n",
+ (buf[0] & MAX3107_SPI_RX_DATA_MASK));
+ retval = -ENODEV;
+ goto err_free1;
+ }
+
+ /* Disable all interrupts */
+ buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG | 0x0000);
+ buf[0] |= 0x0000;
+
+ /* Configure clock source */
+ buf[1] = (MAX3107_WRITE_BIT | MAX3107_CLKSRC_REG);
+ if (s->ext_clk) {
+ /* External clock */
+ buf[1] |= MAX3107_CLKSRC_EXTCLK_BIT;
+ }
+
+ /* PLL bypass ON */
+ buf[1] |= MAX3107_CLKSRC_PLLBYP_BIT;
+
+ /* Perform SPI transfer */
+ if (max3107_rw(s, (u8 *)buf, NULL, 4)) {
+ dev_err(&s->spi->dev, "SPI transfer for init failed\n");
+ retval = -EIO;
+ goto err_free1;
+ }
+
+ /* Register UART driver */
+ if (!driver_registered) {
+ retval = uart_register_driver(&max3107_uart_driver);
+ if (retval) {
+ dev_err(&s->spi->dev, "Registering UART driver failed\n");
+ goto err_free1;
+ }
+ driver_registered = 1;
+ }
+
+ /* Initialize UART port data */
+ s->port.fifosize = 128;
+ s->port.ops = &max3107_ops;
+ s->port.line = 0;
+ s->port.dev = &spi->dev;
+ s->port.uartclk = 9600;
+ s->port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
+ s->port.irq = s->spi->irq;
+ s->port.type = PORT_MAX3107;
+
+ /* Add UART port */
+ retval = uart_add_one_port(&max3107_uart_driver, &s->port);
+ if (retval < 0) {
+ dev_err(&s->spi->dev, "Adding UART port failed\n");
+ goto err_free1;
+ }
+
+ if (pdata->configure) {
+ retval = pdata->configure(s);
+ if (retval < 0)
+ goto err_free1;
+ }
+
+ /* Go to suspend mode */
+ if (pdata->hw_suspend)
+ pdata->hw_suspend(s, 1);
+
+ return 0;
+
+err_free1:
+ kfree(s->txbuf);
+err_free2:
+ kfree(s->rxstr);
+err_free3:
+ kfree(s->rxbuf);
+err_free4:
+ kfree(s);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(max3107_probe);
+
+/* Driver remove function */
+int max3107_remove(struct spi_device *spi)
+{
+ struct max3107_port *s = dev_get_drvdata(&spi->dev);
+
+ pr_info("enter max3107 remove\n");
+
+ /* Remove port */
+ if (uart_remove_one_port(&max3107_uart_driver, &s->port))
+ dev_warn(&s->spi->dev, "Removing UART port failed\n");
+
+
+ /* Free TxRx buffer */
+ kfree(s->rxbuf);
+ kfree(s->rxstr);
+ kfree(s->txbuf);
+
+ /* Free port structure */
+ kfree(s);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max3107_remove);
+
+/* Driver suspend function */
+int max3107_suspend(struct spi_device *spi, pm_message_t state)
+{
+#ifdef CONFIG_PM
+ struct max3107_port *s = dev_get_drvdata(&spi->dev);
+
+ pr_debug("enter suspend\n");
+
+ /* Suspend UART port */
+ uart_suspend_port(&max3107_uart_driver, &s->port);
+
+ /* Go to suspend mode */
+ if (s->pdata->hw_suspend)
+ s->pdata->hw_suspend(s, 1);
+#endif /* CONFIG_PM */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max3107_suspend);
+
+/* Driver resume function */
+int max3107_resume(struct spi_device *spi)
+{
+#ifdef CONFIG_PM
+ struct max3107_port *s = dev_get_drvdata(&spi->dev);
+
+ pr_debug("enter resume\n");
+
+ /* Resume from suspend */
+ if (s->pdata->hw_suspend)
+ s->pdata->hw_suspend(s, 0);
+
+ /* Resume UART port */
+ uart_resume_port(&max3107_uart_driver, &s->port);
+#endif /* CONFIG_PM */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max3107_resume);
+
+static int max3107_probe_generic(struct spi_device *spi)
+{
+ return max3107_probe(spi, &generic_plat_data);
+}
+
+/* Spi driver data */
+static struct spi_driver max3107_driver = {
+ .driver = {
+ .name = "max3107",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = max3107_probe_generic,
+ .remove = __devexit_p(max3107_remove),
+ .suspend = max3107_suspend,
+ .resume = max3107_resume,
+};
+
+/* Driver init function */
+static int __init max3107_init(void)
+{
+ pr_info("enter max3107 init\n");
+ return spi_register_driver(&max3107_driver);
+}
+
+/* Driver exit function */
+static void __exit max3107_exit(void)
+{
+ pr_info("enter max3107 exit\n");
+ /* Unregister UART driver */
+ if (driver_registered)
+ uart_unregister_driver(&max3107_uart_driver);
+ spi_unregister_driver(&max3107_driver);
+}
+
+module_init(max3107_init);
+module_exit(max3107_exit);
+
+MODULE_DESCRIPTION("MAX3107 driver");
+MODULE_AUTHOR("Aavamobile");
+MODULE_ALIAS("spi:max3107");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/max3107.h b/drivers/tty/serial/max3107.h
new file mode 100644
index 00000000..8415fc72
--- /dev/null
+++ b/drivers/tty/serial/max3107.h
@@ -0,0 +1,441 @@
+/*
+ * max3107.h - spi uart protocol driver header for Maxim 3107
+ *
+ * Copyright (C) Aavamobile 2009
+ * Based on serial_max3100.h by Christian Pellegrin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MAX3107_H
+#define _MAX3107_H
+
+/* Serial error status definitions */
+#define MAX3107_PARITY_ERROR 1
+#define MAX3107_FRAME_ERROR 2
+#define MAX3107_OVERRUN_ERROR 4
+#define MAX3107_ALL_ERRORS (MAX3107_PARITY_ERROR | \
+ MAX3107_FRAME_ERROR | \
+ MAX3107_OVERRUN_ERROR)
+
+/* GPIO definitions */
+#define MAX3107_GPIO_BASE 88
+#define MAX3107_GPIO_COUNT 4
+
+
+/* GPIO connected to chip's reset pin */
+#define MAX3107_RESET_GPIO 87
+
+
+/* Chip reset delay */
+#define MAX3107_RESET_DELAY 10
+
+/* Chip wakeup delay */
+#define MAX3107_WAKEUP_DELAY 50
+
+
+/* Sleep mode definitions */
+#define MAX3107_DISABLE_FORCED_SLEEP 0
+#define MAX3107_ENABLE_FORCED_SLEEP 1
+#define MAX3107_DISABLE_AUTOSLEEP 2
+#define MAX3107_ENABLE_AUTOSLEEP 3
+
+
+/* Definitions for register access with SPI transfers
+ *
+ * SPI transfer format:
+ *
+ * Master to slave bits xzzzzzzzyyyyyyyy
+ * Slave to master bits aaaaaaaabbbbbbbb
+ *
+ * where:
+ * x = 0 for reads, 1 for writes
+ * z = register address
+ * y = new register value if write, 0 if read
+ * a = unspecified
+ * b = register value if read, unspecified if write
+ */
+
+/* SPI speed */
+#define MAX3107_SPI_SPEED (3125000 * 2)
+
+/* Write bit */
+#define MAX3107_WRITE_BIT (1 << 15)
+
+/* SPI TX data mask */
+#define MAX3107_SPI_RX_DATA_MASK (0x00ff)
+
+/* SPI RX data mask */
+#define MAX3107_SPI_TX_DATA_MASK (0x00ff)
+
+/* Register access masks */
+#define MAX3107_RHR_REG (0x0000) /* RX FIFO */
+#define MAX3107_THR_REG (0x0000) /* TX FIFO */
+#define MAX3107_IRQEN_REG (0x0100) /* IRQ enable */
+#define MAX3107_IRQSTS_REG (0x0200) /* IRQ status */
+#define MAX3107_LSR_IRQEN_REG (0x0300) /* LSR IRQ enable */
+#define MAX3107_LSR_IRQSTS_REG (0x0400) /* LSR IRQ status */
+#define MAX3107_SPCHR_IRQEN_REG (0x0500) /* Special char IRQ enable */
+#define MAX3107_SPCHR_IRQSTS_REG (0x0600) /* Special char IRQ status */
+#define MAX3107_STS_IRQEN_REG (0x0700) /* Status IRQ enable */
+#define MAX3107_STS_IRQSTS_REG (0x0800) /* Status IRQ status */
+#define MAX3107_MODE1_REG (0x0900) /* MODE1 */
+#define MAX3107_MODE2_REG (0x0a00) /* MODE2 */
+#define MAX3107_LCR_REG (0x0b00) /* LCR */
+#define MAX3107_RXTO_REG (0x0c00) /* RX timeout */
+#define MAX3107_HDPIXDELAY_REG (0x0d00) /* Auto transceiver delays */
+#define MAX3107_IRDA_REG (0x0e00) /* IRDA settings */
+#define MAX3107_FLOWLVL_REG (0x0f00) /* Flow control levels */
+#define MAX3107_FIFOTRIGLVL_REG (0x1000) /* FIFO IRQ trigger levels */
+#define MAX3107_TXFIFOLVL_REG (0x1100) /* TX FIFO level */
+#define MAX3107_RXFIFOLVL_REG (0x1200) /* RX FIFO level */
+#define MAX3107_FLOWCTRL_REG (0x1300) /* Flow control */
+#define MAX3107_XON1_REG (0x1400) /* XON1 character */
+#define MAX3107_XON2_REG (0x1500) /* XON2 character */
+#define MAX3107_XOFF1_REG (0x1600) /* XOFF1 character */
+#define MAX3107_XOFF2_REG (0x1700) /* XOFF2 character */
+#define MAX3107_GPIOCFG_REG (0x1800) /* GPIO config */
+#define MAX3107_GPIODATA_REG (0x1900) /* GPIO data */
+#define MAX3107_PLLCFG_REG (0x1a00) /* PLL config */
+#define MAX3107_BRGCFG_REG (0x1b00) /* Baud rate generator conf */
+#define MAX3107_BRGDIVLSB_REG (0x1c00) /* Baud rate divisor LSB */
+#define MAX3107_BRGDIVMSB_REG (0x1d00) /* Baud rate divisor MSB */
+#define MAX3107_CLKSRC_REG (0x1e00) /* Clock source */
+#define MAX3107_REVID_REG (0x1f00) /* Revision identification */
+
+/* IRQ register bits */
+#define MAX3107_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
+#define MAX3107_IRQ_SPCHR_BIT (1 << 1) /* Special char interrupt */
+#define MAX3107_IRQ_STS_BIT (1 << 2) /* Status interrupt */
+#define MAX3107_IRQ_RXFIFO_BIT (1 << 3) /* RX FIFO interrupt */
+#define MAX3107_IRQ_TXFIFO_BIT (1 << 4) /* TX FIFO interrupt */
+#define MAX3107_IRQ_TXEMPTY_BIT (1 << 5) /* TX FIFO empty interrupt */
+#define MAX3107_IRQ_RXEMPTY_BIT (1 << 6) /* RX FIFO empty interrupt */
+#define MAX3107_IRQ_CTS_BIT (1 << 7) /* CTS interrupt */
+
+/* LSR register bits */
+#define MAX3107_LSR_RXTO_BIT (1 << 0) /* RX timeout */
+#define MAX3107_LSR_RXOVR_BIT (1 << 1) /* RX overrun */
+#define MAX3107_LSR_RXPAR_BIT (1 << 2) /* RX parity error */
+#define MAX3107_LSR_FRERR_BIT (1 << 3) /* Frame error */
+#define MAX3107_LSR_RXBRK_BIT (1 << 4) /* RX break */
+#define MAX3107_LSR_RXNOISE_BIT (1 << 5) /* RX noise */
+#define MAX3107_LSR_UNDEF6_BIT (1 << 6) /* Undefined/not used */
+#define MAX3107_LSR_CTS_BIT (1 << 7) /* CTS pin state */
+
+/* Special character register bits */
+#define MAX3107_SPCHR_XON1_BIT (1 << 0) /* XON1 character */
+#define MAX3107_SPCHR_XON2_BIT (1 << 1) /* XON2 character */
+#define MAX3107_SPCHR_XOFF1_BIT (1 << 2) /* XOFF1 character */
+#define MAX3107_SPCHR_XOFF2_BIT (1 << 3) /* XOFF2 character */
+#define MAX3107_SPCHR_BREAK_BIT (1 << 4) /* RX break */
+#define MAX3107_SPCHR_MULTIDROP_BIT (1 << 5) /* 9-bit multidrop addr char */
+#define MAX3107_SPCHR_UNDEF6_BIT (1 << 6) /* Undefined/not used */
+#define MAX3107_SPCHR_UNDEF7_BIT (1 << 7) /* Undefined/not used */
+
+/* Status register bits */
+#define MAX3107_STS_GPIO0_BIT (1 << 0) /* GPIO 0 interrupt */
+#define MAX3107_STS_GPIO1_BIT (1 << 1) /* GPIO 1 interrupt */
+#define MAX3107_STS_GPIO2_BIT (1 << 2) /* GPIO 2 interrupt */
+#define MAX3107_STS_GPIO3_BIT (1 << 3) /* GPIO 3 interrupt */
+#define MAX3107_STS_UNDEF4_BIT (1 << 4) /* Undefined/not used */
+#define MAX3107_STS_CLKREADY_BIT (1 << 5) /* Clock ready */
+#define MAX3107_STS_SLEEP_BIT (1 << 6) /* Sleep interrupt */
+#define MAX3107_STS_UNDEF7_BIT (1 << 7) /* Undefined/not used */
+
+/* MODE1 register bits */
+#define MAX3107_MODE1_RXDIS_BIT (1 << 0) /* RX disable */
+#define MAX3107_MODE1_TXDIS_BIT (1 << 1) /* TX disable */
+#define MAX3107_MODE1_TXHIZ_BIT (1 << 2) /* TX pin three-state */
+#define MAX3107_MODE1_RTSHIZ_BIT (1 << 3) /* RTS pin three-state */
+#define MAX3107_MODE1_TRNSCVCTRL_BIT (1 << 4) /* Transceiver ctrl enable */
+#define MAX3107_MODE1_FORCESLEEP_BIT (1 << 5) /* Force sleep mode */
+#define MAX3107_MODE1_AUTOSLEEP_BIT (1 << 6) /* Auto sleep enable */
+#define MAX3107_MODE1_IRQSEL_BIT (1 << 7) /* IRQ pin enable */
+
+/* MODE2 register bits */
+#define MAX3107_MODE2_RST_BIT (1 << 0) /* Chip reset */
+#define MAX3107_MODE2_FIFORST_BIT (1 << 1) /* FIFO reset */
+#define MAX3107_MODE2_RXTRIGINV_BIT (1 << 2) /* RX FIFO INT invert */
+#define MAX3107_MODE2_RXEMPTINV_BIT (1 << 3) /* RX FIFO empty INT invert */
+#define MAX3107_MODE2_SPCHR_BIT (1 << 4) /* Special chr detect enable */
+#define MAX3107_MODE2_LOOPBACK_BIT (1 << 5) /* Internal loopback enable */
+#define MAX3107_MODE2_MULTIDROP_BIT (1 << 6) /* 9-bit multidrop enable */
+#define MAX3107_MODE2_ECHOSUPR_BIT (1 << 7) /* ECHO suppression enable */
+
+/* LCR register bits */
+#define MAX3107_LCR_LENGTH0_BIT (1 << 0) /* Word length bit 0 */
+#define MAX3107_LCR_LENGTH1_BIT (1 << 1) /* Word length bit 1
+ *
+ * Word length bits table:
+ * 00 -> 5 bit words
+ * 01 -> 6 bit words
+ * 10 -> 7 bit words
+ * 11 -> 8 bit words
+ */
+#define MAX3107_LCR_STOPLEN_BIT (1 << 2) /* STOP length bit
+ *
+ * STOP length bit table:
+ * 0 -> 1 stop bit
+ * 1 -> 1-1.5 stop bits if
+ * word length is 5,
+ * 2 stop bits otherwise
+ */
+#define MAX3107_LCR_PARITY_BIT (1 << 3) /* Parity bit enable */
+#define MAX3107_LCR_EVENPARITY_BIT (1 << 4) /* Even parity bit enable */
+#define MAX3107_LCR_FORCEPARITY_BIT (1 << 5) /* 9-bit multidrop parity */
+#define MAX3107_LCR_TXBREAK_BIT (1 << 6) /* TX break enable */
+#define MAX3107_LCR_RTS_BIT (1 << 7) /* RTS pin control */
+#define MAX3107_LCR_WORD_LEN_5 (0x0000)
+#define MAX3107_LCR_WORD_LEN_6 (0x0001)
+#define MAX3107_LCR_WORD_LEN_7 (0x0002)
+#define MAX3107_LCR_WORD_LEN_8 (0x0003)
+
+
+/* IRDA register bits */
+#define MAX3107_IRDA_IRDAEN_BIT (1 << 0) /* IRDA mode enable */
+#define MAX3107_IRDA_SIR_BIT (1 << 1) /* SIR mode enable */
+#define MAX3107_IRDA_SHORTIR_BIT (1 << 2) /* Short SIR mode enable */
+#define MAX3107_IRDA_MIR_BIT (1 << 3) /* MIR mode enable */
+#define MAX3107_IRDA_RXINV_BIT (1 << 4) /* RX logic inversion enable */
+#define MAX3107_IRDA_TXINV_BIT (1 << 5) /* TX logic inversion enable */
+#define MAX3107_IRDA_UNDEF6_BIT (1 << 6) /* Undefined/not used */
+#define MAX3107_IRDA_UNDEF7_BIT (1 << 7) /* Undefined/not used */
+
+/* Flow control trigger level register masks */
+#define MAX3107_FLOWLVL_HALT_MASK (0x000f) /* Flow control halt level */
+#define MAX3107_FLOWLVL_RES_MASK (0x00f0) /* Flow control resume level */
+#define MAX3107_FLOWLVL_HALT(words) ((words/8) & 0x000f)
+#define MAX3107_FLOWLVL_RES(words) (((words/8) & 0x000f) << 4)
+
+/* FIFO interrupt trigger level register masks */
+#define MAX3107_FIFOTRIGLVL_TX_MASK (0x000f) /* TX FIFO trigger level */
+#define MAX3107_FIFOTRIGLVL_RX_MASK (0x00f0) /* RX FIFO trigger level */
+#define MAX3107_FIFOTRIGLVL_TX(words) ((words/8) & 0x000f)
+#define MAX3107_FIFOTRIGLVL_RX(words) (((words/8) & 0x000f) << 4)
+
+/* Flow control register bits */
+#define MAX3107_FLOWCTRL_AUTORTS_BIT (1 << 0) /* Auto RTS flow ctrl enable */
+#define MAX3107_FLOWCTRL_AUTOCTS_BIT (1 << 1) /* Auto CTS flow ctrl enable */
+#define MAX3107_FLOWCTRL_GPIADDR_BIT (1 << 2) /* Enables that GPIO inputs
+ * are used in conjunction with
+ * XOFF2 for definition of
+ * special character */
+#define MAX3107_FLOWCTRL_SWFLOWEN_BIT (1 << 3) /* Auto SW flow ctrl enable */
+#define MAX3107_FLOWCTRL_SWFLOW0_BIT (1 << 4) /* SWFLOW bit 0 */
+#define MAX3107_FLOWCTRL_SWFLOW1_BIT (1 << 5) /* SWFLOW bit 1
+ *
+ * SWFLOW bits 1 & 0 table:
+ * 00 -> no transmitter flow
+ * control
+ * 01 -> receiver compares
+ * XON2 and XOFF2
+ * and controls
+ * transmitter
+ * 10 -> receiver compares
+ * XON1 and XOFF1
+ * and controls
+ * transmitter
+ * 11 -> receiver compares
+ * XON1, XON2, XOFF1 and
+ * XOFF2 and controls
+ * transmitter
+ */
+#define MAX3107_FLOWCTRL_SWFLOW2_BIT (1 << 6) /* SWFLOW bit 2 */
+#define MAX3107_FLOWCTRL_SWFLOW3_BIT (1 << 7) /* SWFLOW bit 3
+ *
+ * SWFLOW bits 3 & 2 table:
+ * 00 -> no received flow
+ * control
+ * 01 -> transmitter generates
+ * XON2 and XOFF2
+ * 10 -> transmitter generates
+ * XON1 and XOFF1
+ * 11 -> transmitter generates
+ * XON1, XON2, XOFF1 and
+ * XOFF2
+ */
+
+/* GPIO configuration register bits */
+#define MAX3107_GPIOCFG_GP0OUT_BIT (1 << 0) /* GPIO 0 output enable */
+#define MAX3107_GPIOCFG_GP1OUT_BIT (1 << 1) /* GPIO 1 output enable */
+#define MAX3107_GPIOCFG_GP2OUT_BIT (1 << 2) /* GPIO 2 output enable */
+#define MAX3107_GPIOCFG_GP3OUT_BIT (1 << 3) /* GPIO 3 output enable */
+#define MAX3107_GPIOCFG_GP0OD_BIT (1 << 4) /* GPIO 0 open-drain enable */
+#define MAX3107_GPIOCFG_GP1OD_BIT (1 << 5) /* GPIO 1 open-drain enable */
+#define MAX3107_GPIOCFG_GP2OD_BIT (1 << 6) /* GPIO 2 open-drain enable */
+#define MAX3107_GPIOCFG_GP3OD_BIT (1 << 7) /* GPIO 3 open-drain enable */
+
+/* GPIO DATA register bits */
+#define MAX3107_GPIODATA_GP0OUT_BIT (1 << 0) /* GPIO 0 output value */
+#define MAX3107_GPIODATA_GP1OUT_BIT (1 << 1) /* GPIO 1 output value */
+#define MAX3107_GPIODATA_GP2OUT_BIT (1 << 2) /* GPIO 2 output value */
+#define MAX3107_GPIODATA_GP3OUT_BIT (1 << 3) /* GPIO 3 output value */
+#define MAX3107_GPIODATA_GP0IN_BIT (1 << 4) /* GPIO 0 input value */
+#define MAX3107_GPIODATA_GP1IN_BIT (1 << 5) /* GPIO 1 input value */
+#define MAX3107_GPIODATA_GP2IN_BIT (1 << 6) /* GPIO 2 input value */
+#define MAX3107_GPIODATA_GP3IN_BIT (1 << 7) /* GPIO 3 input value */
+
+/* PLL configuration register masks */
+#define MAX3107_PLLCFG_PREDIV_MASK (0x003f) /* PLL predivision value */
+#define MAX3107_PLLCFG_PLLFACTOR_MASK (0x00c0) /* PLL multiplication factor */
+
+/* Baud rate generator configuration register masks and bits */
+#define MAX3107_BRGCFG_FRACT_MASK (0x000f) /* Fractional portion of
+ * Baud rate generator divisor
+ */
+#define MAX3107_BRGCFG_2XMODE_BIT (1 << 4) /* Double baud rate */
+#define MAX3107_BRGCFG_4XMODE_BIT (1 << 5) /* Quadruple baud rate */
+#define MAX3107_BRGCFG_UNDEF6_BIT (1 << 6) /* Undefined/not used */
+#define MAX3107_BRGCFG_UNDEF7_BIT (1 << 7) /* Undefined/not used */
+
+/* Clock source register bits */
+#define MAX3107_CLKSRC_INTOSC_BIT (1 << 0) /* Internal osc enable */
+#define MAX3107_CLKSRC_CRYST_BIT (1 << 1) /* Crystal osc enable */
+#define MAX3107_CLKSRC_PLL_BIT (1 << 2) /* PLL enable */
+#define MAX3107_CLKSRC_PLLBYP_BIT (1 << 3) /* PLL bypass */
+#define MAX3107_CLKSRC_EXTCLK_BIT (1 << 4) /* External clock enable */
+#define MAX3107_CLKSRC_UNDEF5_BIT (1 << 5) /* Undefined/not used */
+#define MAX3107_CLKSRC_UNDEF6_BIT (1 << 6) /* Undefined/not used */
+#define MAX3107_CLKSRC_CLK2RTS_BIT (1 << 7) /* Baud clk to RTS pin */
+
+
+/* HW definitions */
+#define MAX3107_RX_FIFO_SIZE 128
+#define MAX3107_TX_FIFO_SIZE 128
+#define MAX3107_REVID1 0x00a0
+#define MAX3107_REVID2 0x00a1
+
+
+/* Baud rate generator configuration values for external clock 13MHz */
+#define MAX3107_BRG13_B300 (0x0A9400 | 0x05)
+#define MAX3107_BRG13_B600 (0x054A00 | 0x03)
+#define MAX3107_BRG13_B1200 (0x02A500 | 0x01)
+#define MAX3107_BRG13_B2400 (0x015200 | 0x09)
+#define MAX3107_BRG13_B4800 (0x00A900 | 0x04)
+#define MAX3107_BRG13_B9600 (0x005400 | 0x0A)
+#define MAX3107_BRG13_B19200 (0x002A00 | 0x05)
+#define MAX3107_BRG13_B38400 (0x001500 | 0x03)
+#define MAX3107_BRG13_B57600 (0x000E00 | 0x02)
+#define MAX3107_BRG13_B115200 (0x000700 | 0x01)
+#define MAX3107_BRG13_B230400 (0x000300 | 0x08)
+#define MAX3107_BRG13_B460800 (0x000100 | 0x0c)
+#define MAX3107_BRG13_B921600 (0x000100 | 0x1c)
+
+/* Baud rate generator configuration values for external clock 26MHz */
+#define MAX3107_BRG26_B300 (0x152800 | 0x0A)
+#define MAX3107_BRG26_B600 (0x0A9400 | 0x05)
+#define MAX3107_BRG26_B1200 (0x054A00 | 0x03)
+#define MAX3107_BRG26_B2400 (0x02A500 | 0x01)
+#define MAX3107_BRG26_B4800 (0x015200 | 0x09)
+#define MAX3107_BRG26_B9600 (0x00A900 | 0x04)
+#define MAX3107_BRG26_B19200 (0x005400 | 0x0A)
+#define MAX3107_BRG26_B38400 (0x002A00 | 0x05)
+#define MAX3107_BRG26_B57600 (0x001C00 | 0x03)
+#define MAX3107_BRG26_B115200 (0x000E00 | 0x02)
+#define MAX3107_BRG26_B230400 (0x000700 | 0x01)
+#define MAX3107_BRG26_B460800 (0x000300 | 0x08)
+#define MAX3107_BRG26_B921600 (0x000100 | 0x0C)
+
+/* Baud rate generator configuration values for internal clock */
+#define MAX3107_BRG13_IB300 (0x008000 | 0x00)
+#define MAX3107_BRG13_IB600 (0x004000 | 0x00)
+#define MAX3107_BRG13_IB1200 (0x002000 | 0x00)
+#define MAX3107_BRG13_IB2400 (0x001000 | 0x00)
+#define MAX3107_BRG13_IB4800 (0x000800 | 0x00)
+#define MAX3107_BRG13_IB9600 (0x000400 | 0x00)
+#define MAX3107_BRG13_IB19200 (0x000200 | 0x00)
+#define MAX3107_BRG13_IB38400 (0x000100 | 0x00)
+#define MAX3107_BRG13_IB57600 (0x000000 | 0x0B)
+#define MAX3107_BRG13_IB115200 (0x000000 | 0x05)
+#define MAX3107_BRG13_IB230400 (0x000000 | 0x03)
+#define MAX3107_BRG13_IB460800 (0x000000 | 0x00)
+#define MAX3107_BRG13_IB921600 (0x000000 | 0x00)
+
+
+struct baud_table {
+ int baud;
+ u32 new_brg;
+};
+
+struct max3107_port {
+ /* UART port structure */
+ struct uart_port port;
+
+ /* SPI device structure */
+ struct spi_device *spi;
+
+#if defined(CONFIG_GPIOLIB)
+ /* GPIO chip structure */
+ struct gpio_chip chip;
+#endif
+
+ /* Workqueue that does all the magic */
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+
+ /* Lock for shared data */
+ spinlock_t data_lock;
+
+ /* Device configuration */
+ int ext_clk; /* 1 if external clock used */
+ int loopback; /* Current loopback mode state */
+ int baud; /* Current baud rate */
+
+ /* State flags */
+ int suspended; /* Indicates suspend mode */
+ int tx_fifo_empty; /* Flag for TX FIFO state */
+ int rx_enabled; /* Flag for receiver state */
+ int tx_enabled; /* Flag for transmitter state */
+
+ u16 irqen_reg; /* Current IRQ enable register value */
+ /* Shared data */
+ u16 mode1_reg; /* Current mode1 register value*/
+ int mode1_commit; /* Flag for setting new mode1 register value */
+ u16 lcr_reg; /* Current LCR register value */
+ int lcr_commit; /* Flag for setting new LCR register value */
+ u32 brg_cfg; /* Current Baud rate generator config */
+ int brg_commit; /* Flag for setting new baud rate generator
+ * config
+ */
+ struct baud_table *baud_tbl;
+ int handle_irq; /* Indicates that IRQ should be handled */
+
+ /* Rx buffer and str*/
+ u16 *rxbuf;
+ u8 *rxstr;
+ /* Tx buffer*/
+ u16 *txbuf;
+
+ struct max3107_plat *pdata; /* Platform data */
+};
+
+/* Platform data structure */
+struct max3107_plat {
+ /* Loopback mode enable */
+ int loopback;
+ /* External clock enable */
+ int ext_clk;
+ /* Called during the register initialisation */
+ void (*init)(struct max3107_port *s);
+ /* Called when the port is found and configured */
+ int (*configure)(struct max3107_port *s);
+ /* HW suspend function */
+ void (*hw_suspend) (struct max3107_port *s, int suspend);
+ /* Polling mode enable */
+ int polled_mode;
+ /* Polling period if polling mode enabled */
+ int poll_time;
+};
+
+extern int max3107_rw(struct max3107_port *s, u8 *tx, u8 *rx, int len);
+extern void max3107_hw_susp(struct max3107_port *s, int suspend);
+extern int max3107_probe(struct spi_device *spi, struct max3107_plat *pdata);
+extern int max3107_remove(struct spi_device *spi);
+extern int max3107_suspend(struct spi_device *spi, pm_message_t state);
+extern int max3107_resume(struct spi_device *spi);
+
+#endif /* _LINUX_SERIAL_MAX3107_H */
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
new file mode 100644
index 00000000..3394b7cc
--- /dev/null
+++ b/drivers/tty/serial/mcf.c
@@ -0,0 +1,662 @@
+/****************************************************************************/
+
+/*
+ * mcf.c -- Freescale ColdFire UART driver
+ *
+ * (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfuart.h>
+#include <asm/nettel.h>
+
+/****************************************************************************/
+
+/*
+ * Some boards implement the DTR/DCD lines using GPIO lines, most
+ * don't. Dummy out the access macros for those that don't. Those
+ * that do should define these macros somewhere in there board
+ * specific inlude files.
+ */
+#if !defined(mcf_getppdcd)
+#define mcf_getppdcd(p) (1)
+#endif
+#if !defined(mcf_getppdtr)
+#define mcf_getppdtr(p) (1)
+#endif
+#if !defined(mcf_setppdtr)
+#define mcf_setppdtr(p, v) do { } while (0)
+#endif
+
+/****************************************************************************/
+
+/*
+ * Local per-uart structure.
+ */
+struct mcf_uart {
+ struct uart_port port;
+ unsigned int sigs; /* Local copy of line sigs */
+ unsigned char imr; /* Local IMR mirror */
+};
+
+/****************************************************************************/
+
+static unsigned int mcf_tx_empty(struct uart_port *port)
+{
+ return (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXEMPTY) ?
+ TIOCSER_TEMT : 0;
+}
+
+/****************************************************************************/
+
+static unsigned int mcf_get_mctrl(struct uart_port *port)
+{
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+ unsigned int sigs;
+
+ sigs = (readb(port->membase + MCFUART_UIPR) & MCFUART_UIPR_CTS) ?
+ 0 : TIOCM_CTS;
+ sigs |= (pp->sigs & TIOCM_RTS);
+ sigs |= (mcf_getppdcd(port->line) ? TIOCM_CD : 0);
+ sigs |= (mcf_getppdtr(port->line) ? TIOCM_DTR : 0);
+
+ return sigs;
+}
+
+/****************************************************************************/
+
+static void mcf_set_mctrl(struct uart_port *port, unsigned int sigs)
+{
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+
+ pp->sigs = sigs;
+ mcf_setppdtr(port->line, (sigs & TIOCM_DTR));
+ if (sigs & TIOCM_RTS)
+ writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP1);
+ else
+ writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP0);
+}
+
+/****************************************************************************/
+
+static void mcf_start_tx(struct uart_port *port)
+{
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+
+ pp->imr |= MCFUART_UIR_TXREADY;
+ writeb(pp->imr, port->membase + MCFUART_UIMR);
+}
+
+/****************************************************************************/
+
+static void mcf_stop_tx(struct uart_port *port)
+{
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+
+ pp->imr &= ~MCFUART_UIR_TXREADY;
+ writeb(pp->imr, port->membase + MCFUART_UIMR);
+}
+
+/****************************************************************************/
+
+static void mcf_stop_rx(struct uart_port *port)
+{
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+
+ pp->imr &= ~MCFUART_UIR_RXREADY;
+ writeb(pp->imr, port->membase + MCFUART_UIMR);
+}
+
+/****************************************************************************/
+
+static void mcf_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (break_state == -1)
+ writeb(MCFUART_UCR_CMDBREAKSTART, port->membase + MCFUART_UCR);
+ else
+ writeb(MCFUART_UCR_CMDBREAKSTOP, port->membase + MCFUART_UCR);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/****************************************************************************/
+
+static void mcf_enable_ms(struct uart_port *port)
+{
+}
+
+/****************************************************************************/
+
+static int mcf_startup(struct uart_port *port)
+{
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Reset UART, get it into known state... */
+ writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
+ writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
+
+ /* Enable the UART transmitter and receiver */
+ writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
+ port->membase + MCFUART_UCR);
+
+ /* Enable RX interrupts now */
+ pp->imr = MCFUART_UIR_RXREADY;
+ writeb(pp->imr, port->membase + MCFUART_UIMR);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+/****************************************************************************/
+
+static void mcf_shutdown(struct uart_port *port)
+{
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Disable all interrupts now */
+ pp->imr = 0;
+ writeb(pp->imr, port->membase + MCFUART_UIMR);
+
+ /* Disable UART transmitter and receiver */
+ writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
+ writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/****************************************************************************/
+
+static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int baud, baudclk;
+#if defined(CONFIG_M5272)
+ unsigned int baudfr;
+#endif
+ unsigned char mr1, mr2;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, 230400);
+#if defined(CONFIG_M5272)
+ baudclk = (MCF_BUSCLK / baud) / 32;
+ baudfr = (((MCF_BUSCLK / baud) + 1) / 2) % 16;
+#else
+ baudclk = ((MCF_BUSCLK / baud) + 16) / 32;
+#endif
+
+ mr1 = MCFUART_MR1_RXIRQRDY | MCFUART_MR1_RXERRCHAR;
+ mr2 = 0;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5: mr1 |= MCFUART_MR1_CS5; break;
+ case CS6: mr1 |= MCFUART_MR1_CS6; break;
+ case CS7: mr1 |= MCFUART_MR1_CS7; break;
+ case CS8:
+ default: mr1 |= MCFUART_MR1_CS8; break;
+ }
+
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & CMSPAR) {
+ if (termios->c_cflag & PARODD)
+ mr1 |= MCFUART_MR1_PARITYMARK;
+ else
+ mr1 |= MCFUART_MR1_PARITYSPACE;
+ } else {
+ if (termios->c_cflag & PARODD)
+ mr1 |= MCFUART_MR1_PARITYODD;
+ else
+ mr1 |= MCFUART_MR1_PARITYEVEN;
+ }
+ } else {
+ mr1 |= MCFUART_MR1_PARITYNONE;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ mr2 |= MCFUART_MR2_STOP2;
+ else
+ mr2 |= MCFUART_MR2_STOP1;
+
+ if (termios->c_cflag & CRTSCTS) {
+ mr1 |= MCFUART_MR1_RXRTS;
+ mr2 |= MCFUART_MR2_TXCTS;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
+ writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
+ writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR);
+ writeb(mr1, port->membase + MCFUART_UMR);
+ writeb(mr2, port->membase + MCFUART_UMR);
+ writeb((baudclk & 0xff00) >> 8, port->membase + MCFUART_UBG1);
+ writeb((baudclk & 0xff), port->membase + MCFUART_UBG2);
+#if defined(CONFIG_M5272)
+ writeb((baudfr & 0x0f), port->membase + MCFUART_UFPD);
+#endif
+ writeb(MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER,
+ port->membase + MCFUART_UCSR);
+ writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
+ port->membase + MCFUART_UCR);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/****************************************************************************/
+
+static void mcf_rx_chars(struct mcf_uart *pp)
+{
+ struct uart_port *port = &pp->port;
+ unsigned char status, ch, flag;
+
+ while ((status = readb(port->membase + MCFUART_USR)) & MCFUART_USR_RXREADY) {
+ ch = readb(port->membase + MCFUART_URB);
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (status & MCFUART_USR_RXERR) {
+ writeb(MCFUART_UCR_CMDRESETERR,
+ port->membase + MCFUART_UCR);
+
+ if (status & MCFUART_USR_RXBREAK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (status & MCFUART_USR_RXPARITY) {
+ port->icount.parity++;
+ } else if (status & MCFUART_USR_RXOVERRUN) {
+ port->icount.overrun++;
+ } else if (status & MCFUART_USR_RXFRAMING) {
+ port->icount.frame++;
+ }
+
+ status &= port->read_status_mask;
+
+ if (status & MCFUART_USR_RXBREAK)
+ flag = TTY_BREAK;
+ else if (status & MCFUART_USR_RXPARITY)
+ flag = TTY_PARITY;
+ else if (status & MCFUART_USR_RXFRAMING)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+ uart_insert_char(port, status, MCFUART_USR_RXOVERRUN, ch, flag);
+ }
+
+ tty_flip_buffer_push(port->state->port.tty);
+}
+
+/****************************************************************************/
+
+static void mcf_tx_chars(struct mcf_uart *pp)
+{
+ struct uart_port *port = &pp->port;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (port->x_char) {
+ /* Send special char - probably flow control */
+ writeb(port->x_char, port->membase + MCFUART_UTB);
+ port->x_char = 0;
+ port->icount.tx++;
+ return;
+ }
+
+ while (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY) {
+ if (xmit->head == xmit->tail)
+ break;
+ writeb(xmit->buf[xmit->tail], port->membase + MCFUART_UTB);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE -1);
+ port->icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (xmit->head == xmit->tail) {
+ pp->imr &= ~MCFUART_UIR_TXREADY;
+ writeb(pp->imr, port->membase + MCFUART_UIMR);
+ }
+}
+
+/****************************************************************************/
+
+static irqreturn_t mcf_interrupt(int irq, void *data)
+{
+ struct uart_port *port = data;
+ struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
+ unsigned int isr;
+ irqreturn_t ret = IRQ_NONE;
+
+ isr = readb(port->membase + MCFUART_UISR) & pp->imr;
+
+ spin_lock(&port->lock);
+ if (isr & MCFUART_UIR_RXREADY) {
+ mcf_rx_chars(pp);
+ ret = IRQ_HANDLED;
+ }
+ if (isr & MCFUART_UIR_TXREADY) {
+ mcf_tx_chars(pp);
+ ret = IRQ_HANDLED;
+ }
+ spin_unlock(&port->lock);
+
+ return ret;
+}
+
+/****************************************************************************/
+
+static void mcf_config_port(struct uart_port *port, int flags)
+{
+ port->type = PORT_MCF;
+ port->fifosize = MCFUART_TXFIFOSIZE;
+
+ /* Clear mask, so no surprise interrupts. */
+ writeb(0, port->membase + MCFUART_UIMR);
+
+ if (request_irq(port->irq, mcf_interrupt, IRQF_DISABLED, "UART", port))
+ printk(KERN_ERR "MCF: unable to attach ColdFire UART %d "
+ "interrupt vector=%d\n", port->line, port->irq);
+}
+
+/****************************************************************************/
+
+static const char *mcf_type(struct uart_port *port)
+{
+ return (port->type == PORT_MCF) ? "ColdFire UART" : NULL;
+}
+
+/****************************************************************************/
+
+static int mcf_request_port(struct uart_port *port)
+{
+ /* UARTs always present */
+ return 0;
+}
+
+/****************************************************************************/
+
+static void mcf_release_port(struct uart_port *port)
+{
+ /* Nothing to release... */
+}
+
+/****************************************************************************/
+
+static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_MCF))
+ return -EINVAL;
+ return 0;
+}
+
+/****************************************************************************/
+
+/*
+ * Define the basic serial functions we support.
+ */
+static const struct uart_ops mcf_uart_ops = {
+ .tx_empty = mcf_tx_empty,
+ .get_mctrl = mcf_get_mctrl,
+ .set_mctrl = mcf_set_mctrl,
+ .start_tx = mcf_start_tx,
+ .stop_tx = mcf_stop_tx,
+ .stop_rx = mcf_stop_rx,
+ .enable_ms = mcf_enable_ms,
+ .break_ctl = mcf_break_ctl,
+ .startup = mcf_startup,
+ .shutdown = mcf_shutdown,
+ .set_termios = mcf_set_termios,
+ .type = mcf_type,
+ .request_port = mcf_request_port,
+ .release_port = mcf_release_port,
+ .config_port = mcf_config_port,
+ .verify_port = mcf_verify_port,
+};
+
+static struct mcf_uart mcf_ports[4];
+
+#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports)
+
+/****************************************************************************/
+#if defined(CONFIG_SERIAL_MCF_CONSOLE)
+/****************************************************************************/
+
+int __init early_mcf_setup(struct mcf_platform_uart *platp)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) {
+ port = &mcf_ports[i].port;
+
+ port->line = i;
+ port->type = PORT_MCF;
+ port->mapbase = platp[i].mapbase;
+ port->membase = (platp[i].membase) ? platp[i].membase :
+ (unsigned char __iomem *) port->mapbase;
+ port->iotype = SERIAL_IO_MEM;
+ port->irq = platp[i].irq;
+ port->uartclk = MCF_BUSCLK;
+ port->flags = ASYNC_BOOT_AUTOCONF;
+ port->ops = &mcf_uart_ops;
+ }
+
+ return 0;
+}
+
+/****************************************************************************/
+
+static void mcf_console_putc(struct console *co, const char c)
+{
+ struct uart_port *port = &(mcf_ports + co->index)->port;
+ int i;
+
+ for (i = 0; (i < 0x10000); i++) {
+ if (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY)
+ break;
+ }
+ writeb(c, port->membase + MCFUART_UTB);
+ for (i = 0; (i < 0x10000); i++) {
+ if (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY)
+ break;
+ }
+}
+
+/****************************************************************************/
+
+static void mcf_console_write(struct console *co, const char *s, unsigned int count)
+{
+ for (; (count); count--, s++) {
+ mcf_console_putc(co, *s);
+ if (*s == '\n')
+ mcf_console_putc(co, '\r');
+ }
+}
+
+/****************************************************************************/
+
+static int __init mcf_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = CONFIG_SERIAL_MCF_BAUDRATE;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if ((co->index < 0) || (co->index >= MCF_MAXPORTS))
+ co->index = 0;
+ port = &mcf_ports[co->index].port;
+ if (port->membase == 0)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+/****************************************************************************/
+
+static struct uart_driver mcf_driver;
+
+static struct console mcf_console = {
+ .name = "ttyS",
+ .write = mcf_console_write,
+ .device = uart_console_device,
+ .setup = mcf_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &mcf_driver,
+};
+
+static int __init mcf_console_init(void)
+{
+ register_console(&mcf_console);
+ return 0;
+}
+
+console_initcall(mcf_console_init);
+
+#define MCF_CONSOLE &mcf_console
+
+/****************************************************************************/
+#else
+/****************************************************************************/
+
+#define MCF_CONSOLE NULL
+
+/****************************************************************************/
+#endif /* CONFIG_MCF_CONSOLE */
+/****************************************************************************/
+
+/*
+ * Define the mcf UART driver structure.
+ */
+static struct uart_driver mcf_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "mcf",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = MCF_MAXPORTS,
+ .cons = MCF_CONSOLE,
+};
+
+/****************************************************************************/
+
+static int __devinit mcf_probe(struct platform_device *pdev)
+{
+ struct mcf_platform_uart *platp = pdev->dev.platform_data;
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) {
+ port = &mcf_ports[i].port;
+
+ port->line = i;
+ port->type = PORT_MCF;
+ port->mapbase = platp[i].mapbase;
+ port->membase = (platp[i].membase) ? platp[i].membase :
+ (unsigned char __iomem *) platp[i].mapbase;
+ port->iotype = SERIAL_IO_MEM;
+ port->irq = platp[i].irq;
+ port->uartclk = MCF_BUSCLK;
+ port->ops = &mcf_uart_ops;
+ port->flags = ASYNC_BOOT_AUTOCONF;
+
+ uart_add_one_port(&mcf_driver, port);
+ }
+
+ return 0;
+}
+
+/****************************************************************************/
+
+static int __devexit mcf_remove(struct platform_device *pdev)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; (i < MCF_MAXPORTS); i++) {
+ port = &mcf_ports[i].port;
+ if (port)
+ uart_remove_one_port(&mcf_driver, port);
+ }
+
+ return 0;
+}
+
+/****************************************************************************/
+
+static struct platform_driver mcf_platform_driver = {
+ .probe = mcf_probe,
+ .remove = __devexit_p(mcf_remove),
+ .driver = {
+ .name = "mcfuart",
+ .owner = THIS_MODULE,
+ },
+};
+
+/****************************************************************************/
+
+static int __init mcf_init(void)
+{
+ int rc;
+
+ printk("ColdFire internal UART serial driver\n");
+
+ rc = uart_register_driver(&mcf_driver);
+ if (rc)
+ return rc;
+ rc = platform_driver_register(&mcf_platform_driver);
+ if (rc)
+ return rc;
+ return 0;
+}
+
+/****************************************************************************/
+
+static void __exit mcf_exit(void)
+{
+ platform_driver_unregister(&mcf_platform_driver);
+ uart_unregister_driver(&mcf_driver);
+}
+
+/****************************************************************************/
+
+module_init(mcf_init);
+module_exit(mcf_exit);
+
+MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
+MODULE_DESCRIPTION("Freescale ColdFire UART driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mcfuart");
+
+/****************************************************************************/
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
new file mode 100644
index 00000000..cab52f4a
--- /dev/null
+++ b/drivers/tty/serial/mfd.c
@@ -0,0 +1,1470 @@
+/*
+ * mfd.c: driver for High Speed UART device of Intel Medfield platform
+ *
+ * Refer pxa.c, 8250.c and some other drivers in drivers/serial/
+ *
+ * (C) Copyright 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+/* Notes:
+ * 1. DMA channel allocation: 0/1 channel are assigned to port 0,
+ * 2/3 chan to port 1, 4/5 chan to port 3. Even number chans
+ * are used for RX, odd chans for TX
+ *
+ * 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always
+ * asserted, only when the HW is reset the DDCD and DDSR will
+ * be triggered
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/slab.h>
+#include <linux/serial_reg.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial_mfd.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+
+#define HSU_DMA_BUF_SIZE 2048
+
+#define chan_readl(chan, offset) readl(chan->reg + offset)
+#define chan_writel(chan, offset, val) writel(val, chan->reg + offset)
+
+#define mfd_readl(obj, offset) readl(obj->reg + offset)
+#define mfd_writel(obj, offset, val) writel(val, obj->reg + offset)
+
+static int hsu_dma_enable;
+module_param(hsu_dma_enable, int, 0);
+MODULE_PARM_DESC(hsu_dma_enable,
+ "It is a bitmap to set working mode, if bit[x] is 1, then port[x] will work in DMA mode, otherwise in PIO mode.");
+
+struct hsu_dma_buffer {
+ u8 *buf;
+ dma_addr_t dma_addr;
+ u32 dma_size;
+ u32 ofs;
+};
+
+struct hsu_dma_chan {
+ u32 id;
+ enum dma_data_direction dirt;
+ struct uart_hsu_port *uport;
+ void __iomem *reg;
+};
+
+struct uart_hsu_port {
+ struct uart_port port;
+ unsigned char ier;
+ unsigned char lcr;
+ unsigned char mcr;
+ unsigned int lsr_break_flag;
+ char name[12];
+ int index;
+ struct device *dev;
+
+ struct hsu_dma_chan *txc;
+ struct hsu_dma_chan *rxc;
+ struct hsu_dma_buffer txbuf;
+ struct hsu_dma_buffer rxbuf;
+ int use_dma; /* flag for DMA/PIO */
+ int running;
+ int dma_tx_on;
+};
+
+/* Top level data structure of HSU */
+struct hsu_port {
+ void __iomem *reg;
+ unsigned long paddr;
+ unsigned long iolen;
+ u32 irq;
+
+ struct uart_hsu_port port[3];
+ struct hsu_dma_chan chans[10];
+
+ struct dentry *debugfs;
+};
+
+static inline unsigned int serial_in(struct uart_hsu_port *up, int offset)
+{
+ unsigned int val;
+
+ if (offset > UART_MSR) {
+ offset <<= 2;
+ val = readl(up->port.membase + offset);
+ } else
+ val = (unsigned int)readb(up->port.membase + offset);
+
+ return val;
+}
+
+static inline void serial_out(struct uart_hsu_port *up, int offset, int value)
+{
+ if (offset > UART_MSR) {
+ offset <<= 2;
+ writel(value, up->port.membase + offset);
+ } else {
+ unsigned char val = value & 0xff;
+ writeb(val, up->port.membase + offset);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define HSU_REGS_BUFSIZE 1024
+
+static int hsu_show_regs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t port_show_regs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct uart_hsu_port *up = file->private_data;
+ char *buf;
+ u32 len = 0;
+ ssize_t ret;
+
+ buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "MFD HSU port[%d] regs:\n", up->index);
+
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "=================================\n");
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "IER: \t\t0x%08x\n", serial_in(up, UART_IER));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "IIR: \t\t0x%08x\n", serial_in(up, UART_IIR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "LCR: \t\t0x%08x\n", serial_in(up, UART_LCR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "MCR: \t\t0x%08x\n", serial_in(up, UART_MCR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "LSR: \t\t0x%08x\n", serial_in(up, UART_LSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "MSR: \t\t0x%08x\n", serial_in(up, UART_MSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "FOR: \t\t0x%08x\n", serial_in(up, UART_FOR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "PS: \t\t0x%08x\n", serial_in(up, UART_PS));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "MUL: \t\t0x%08x\n", serial_in(up, UART_MUL));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "DIV: \t\t0x%08x\n", serial_in(up, UART_DIV));
+
+ if (len > HSU_REGS_BUFSIZE)
+ len = HSU_REGS_BUFSIZE;
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t dma_show_regs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hsu_dma_chan *chan = file->private_data;
+ char *buf;
+ u32 len = 0;
+ ssize_t ret;
+
+ buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "MFD HSU DMA channel [%d] regs:\n", chan->id);
+
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "=================================\n");
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "CR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_CR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "DCR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_DCR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "BSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_BSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "MOTSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_MOTSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0SAR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0TSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1SAR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1TSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2SAR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2TSR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3SAR));
+ len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
+ "D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR));
+
+ if (len > HSU_REGS_BUFSIZE)
+ len = HSU_REGS_BUFSIZE;
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations port_regs_ops = {
+ .owner = THIS_MODULE,
+ .open = hsu_show_regs_open,
+ .read = port_show_regs,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations dma_regs_ops = {
+ .owner = THIS_MODULE,
+ .open = hsu_show_regs_open,
+ .read = dma_show_regs,
+ .llseek = default_llseek,
+};
+
+static int hsu_debugfs_init(struct hsu_port *hsu)
+{
+ int i;
+ char name[32];
+
+ hsu->debugfs = debugfs_create_dir("hsu", NULL);
+ if (!hsu->debugfs)
+ return -ENOMEM;
+
+ for (i = 0; i < 3; i++) {
+ snprintf(name, sizeof(name), "port_%d_regs", i);
+ debugfs_create_file(name, S_IFREG | S_IRUGO,
+ hsu->debugfs, (void *)(&hsu->port[i]), &port_regs_ops);
+ }
+
+ for (i = 0; i < 6; i++) {
+ snprintf(name, sizeof(name), "dma_chan_%d_regs", i);
+ debugfs_create_file(name, S_IFREG | S_IRUGO,
+ hsu->debugfs, (void *)&hsu->chans[i], &dma_regs_ops);
+ }
+
+ return 0;
+}
+
+static void hsu_debugfs_remove(struct hsu_port *hsu)
+{
+ if (hsu->debugfs)
+ debugfs_remove_recursive(hsu->debugfs);
+}
+
+#else
+static inline int hsu_debugfs_init(struct hsu_port *hsu)
+{
+ return 0;
+}
+
+static inline void hsu_debugfs_remove(struct hsu_port *hsu)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static void serial_hsu_enable_ms(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+}
+
+void hsu_dma_tx(struct uart_hsu_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ struct hsu_dma_buffer *dbuf = &up->txbuf;
+ int count;
+
+ /* test_and_set_bit may be better, but anyway it's in lock protected mode */
+ if (up->dma_tx_on)
+ return;
+
+ /* Update the circ buf info */
+ xmit->tail += dbuf->ofs;
+ xmit->tail &= UART_XMIT_SIZE - 1;
+
+ up->port.icount.tx += dbuf->ofs;
+ dbuf->ofs = 0;
+
+ /* Disable the channel */
+ chan_writel(up->txc, HSU_CH_CR, 0x0);
+
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
+ dma_sync_single_for_device(up->port.dev,
+ dbuf->dma_addr,
+ dbuf->dma_size,
+ DMA_TO_DEVICE);
+
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ dbuf->ofs = count;
+
+ /* Reprogram the channel */
+ chan_writel(up->txc, HSU_CH_D0SAR, dbuf->dma_addr + xmit->tail);
+ chan_writel(up->txc, HSU_CH_D0TSR, count);
+
+ /* Reenable the channel */
+ chan_writel(up->txc, HSU_CH_DCR, 0x1
+ | (0x1 << 8)
+ | (0x1 << 16)
+ | (0x1 << 24));
+ up->dma_tx_on = 1;
+ chan_writel(up->txc, HSU_CH_CR, 0x1);
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+}
+
+/* The buffer is already cache coherent */
+void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc, struct hsu_dma_buffer *dbuf)
+{
+ dbuf->ofs = 0;
+
+ chan_writel(rxc, HSU_CH_BSR, 32);
+ chan_writel(rxc, HSU_CH_MOTSR, 4);
+
+ chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr);
+ chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size);
+ chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8)
+ | (0x1 << 16)
+ | (0x1 << 24) /* timeout bit, see HSU Errata 1 */
+ );
+ chan_writel(rxc, HSU_CH_CR, 0x3);
+}
+
+/* Protected by spin_lock_irqsave(port->lock) */
+static void serial_hsu_start_tx(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ if (up->use_dma) {
+ hsu_dma_tx(up);
+ } else if (!(up->ier & UART_IER_THRI)) {
+ up->ier |= UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+static void serial_hsu_stop_tx(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ struct hsu_dma_chan *txc = up->txc;
+
+ if (up->use_dma)
+ chan_writel(txc, HSU_CH_CR, 0x0);
+ else if (up->ier & UART_IER_THRI) {
+ up->ier &= ~UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+/* This is always called in spinlock protected mode, so
+ * modify timeout timer is safe here */
+void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
+{
+ struct hsu_dma_buffer *dbuf = &up->rxbuf;
+ struct hsu_dma_chan *chan = up->rxc;
+ struct uart_port *port = &up->port;
+ struct tty_struct *tty = port->state->port.tty;
+ int count;
+
+ if (!tty)
+ return;
+
+ /*
+ * First need to know how many is already transferred,
+ * then check if its a timeout DMA irq, and return
+ * the trail bytes out, push them up and reenable the
+ * channel
+ */
+
+ /* Timeout IRQ, need wait some time, see Errata 2 */
+ if (int_sts & 0xf00)
+ udelay(2);
+
+ /* Stop the channel */
+ chan_writel(chan, HSU_CH_CR, 0x0);
+
+ count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
+ if (!count) {
+ /* Restart the channel before we leave */
+ chan_writel(chan, HSU_CH_CR, 0x3);
+ return;
+ }
+
+ dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
+ dbuf->dma_size, DMA_FROM_DEVICE);
+
+ /*
+ * Head will only wrap around when we recycle
+ * the DMA buffer, and when that happens, we
+ * explicitly set tail to 0. So head will
+ * always be greater than tail.
+ */
+ tty_insert_flip_string(tty, dbuf->buf, count);
+ port->icount.rx += count;
+
+ dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
+ dbuf->dma_size, DMA_FROM_DEVICE);
+
+ /* Reprogram the channel */
+ chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
+ chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
+ chan_writel(chan, HSU_CH_DCR, 0x1
+ | (0x1 << 8)
+ | (0x1 << 16)
+ | (0x1 << 24) /* timeout bit, see HSU Errata 1 */
+ );
+ tty_flip_buffer_push(tty);
+
+ chan_writel(chan, HSU_CH_CR, 0x3);
+
+}
+
+static void serial_hsu_stop_rx(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ struct hsu_dma_chan *chan = up->rxc;
+
+ if (up->use_dma)
+ chan_writel(chan, HSU_CH_CR, 0x2);
+ else {
+ up->ier &= ~UART_IER_RLSI;
+ up->port.read_status_mask &= ~UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+static inline void receive_chars(struct uart_hsu_port *up, int *status)
+{
+ struct tty_struct *tty = up->port.state->port.tty;
+ unsigned int ch, flag;
+ unsigned int max_count = 256;
+
+ if (!tty)
+ return;
+
+ do {
+ ch = serial_in(up, UART_RX);
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+ if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
+ UART_LSR_FE | UART_LSR_OE))) {
+
+ dev_warn(up->dev, "We really rush into ERR/BI case"
+ "status = 0x%02x", *status);
+ /* For statistics only */
+ if (*status & UART_LSR_BI) {
+ *status &= ~(UART_LSR_FE | UART_LSR_PE);
+ up->port.icount.brk++;
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(&up->port))
+ goto ignore_char;
+ } else if (*status & UART_LSR_PE)
+ up->port.icount.parity++;
+ else if (*status & UART_LSR_FE)
+ up->port.icount.frame++;
+ if (*status & UART_LSR_OE)
+ up->port.icount.overrun++;
+
+ /* Mask off conditions which should be ignored. */
+ *status &= up->port.read_status_mask;
+
+#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
+ if (up->port.cons &&
+ up->port.cons->index == up->port.line) {
+ /* Recover the break flag from console xmit */
+ *status |= up->lsr_break_flag;
+ up->lsr_break_flag = 0;
+ }
+#endif
+ if (*status & UART_LSR_BI) {
+ flag = TTY_BREAK;
+ } else if (*status & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (*status & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&up->port, ch))
+ goto ignore_char;
+
+ uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
+ ignore_char:
+ *status = serial_in(up, UART_LSR);
+ } while ((*status & UART_LSR_DR) && max_count--);
+ tty_flip_buffer_push(tty);
+}
+
+static void transmit_chars(struct uart_hsu_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int count;
+
+ if (up->port.x_char) {
+ serial_out(up, UART_TX, up->port.x_char);
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+ serial_hsu_stop_tx(&up->port);
+ return;
+ }
+
+ /* The IRQ is for TX FIFO half-empty */
+ count = up->port.fifosize / 2;
+
+ do {
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (uart_circ_empty(xmit))
+ serial_hsu_stop_tx(&up->port);
+}
+
+static inline void check_modem_status(struct uart_hsu_port *up)
+{
+ int status;
+
+ status = serial_in(up, UART_MSR);
+
+ if ((status & UART_MSR_ANY_DELTA) == 0)
+ return;
+
+ if (status & UART_MSR_TERI)
+ up->port.icount.rng++;
+ if (status & UART_MSR_DDSR)
+ up->port.icount.dsr++;
+ /* We may only get DDCD when HW init and reset */
+ if (status & UART_MSR_DDCD)
+ uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
+ /* Will start/stop_tx accordingly */
+ if (status & UART_MSR_DCTS)
+ uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
+
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+}
+
+/*
+ * This handles the interrupt from one port.
+ */
+static irqreturn_t port_irq(int irq, void *dev_id)
+{
+ struct uart_hsu_port *up = dev_id;
+ unsigned int iir, lsr;
+ unsigned long flags;
+
+ if (unlikely(!up->running))
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (up->use_dma) {
+ lsr = serial_in(up, UART_LSR);
+ if (unlikely(lsr & (UART_LSR_BI | UART_LSR_PE |
+ UART_LSR_FE | UART_LSR_OE)))
+ dev_warn(up->dev,
+ "Got lsr irq while using DMA, lsr = 0x%2x\n",
+ lsr);
+ check_modem_status(up);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ iir = serial_in(up, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return IRQ_NONE;
+ }
+
+ lsr = serial_in(up, UART_LSR);
+ if (lsr & UART_LSR_DR)
+ receive_chars(up, &lsr);
+ check_modem_status(up);
+
+ /* lsr will be renewed during the receive_chars */
+ if (lsr & UART_LSR_THRE)
+ transmit_chars(up);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return IRQ_HANDLED;
+}
+
+static inline void dma_chan_irq(struct hsu_dma_chan *chan)
+{
+ struct uart_hsu_port *up = chan->uport;
+ unsigned long flags;
+ u32 int_sts;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ if (!up->use_dma || !up->running)
+ goto exit;
+
+ /*
+ * No matter what situation, need read clear the IRQ status
+ * There is a bug, see Errata 5, HSD 2900918
+ */
+ int_sts = chan_readl(chan, HSU_CH_SR);
+
+ /* Rx channel */
+ if (chan->dirt == DMA_FROM_DEVICE)
+ hsu_dma_rx(up, int_sts);
+
+ /* Tx channel */
+ if (chan->dirt == DMA_TO_DEVICE) {
+ chan_writel(chan, HSU_CH_CR, 0x0);
+ up->dma_tx_on = 0;
+ hsu_dma_tx(up);
+ }
+
+exit:
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return;
+}
+
+static irqreturn_t dma_irq(int irq, void *dev_id)
+{
+ struct hsu_port *hsu = dev_id;
+ u32 int_sts, i;
+
+ int_sts = mfd_readl(hsu, HSU_GBL_DMAISR);
+
+ /* Currently we only have 6 channels may be used */
+ for (i = 0; i < 6; i++) {
+ if (int_sts & 0x1)
+ dma_chan_irq(&hsu->chans[i]);
+ int_sts >>= 1;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int serial_hsu_tx_empty(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return ret;
+}
+
+static unsigned int serial_hsu_get_mctrl(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ unsigned char status;
+ unsigned int ret;
+
+ status = serial_in(up, UART_MSR);
+
+ ret = 0;
+ if (status & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+ if (status & UART_MSR_RI)
+ ret |= TIOCM_RNG;
+ if (status & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
+ if (status & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+static void serial_hsu_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ unsigned char mcr = 0;
+
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ mcr |= up->mcr;
+
+ serial_out(up, UART_MCR, mcr);
+}
+
+static void serial_hsu_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_out(up, UART_LCR, up->lcr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+/*
+ * What special to do:
+ * 1. chose the 64B fifo mode
+ * 2. start dma or pio depends on configuration
+ * 3. we only allocate dma memory when needed
+ */
+static int serial_hsu_startup(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ unsigned long flags;
+
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in set_termios())
+ */
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ serial_out(up, UART_FCR, 0);
+
+ /* Clear the interrupt registers. */
+ (void) serial_in(up, UART_LSR);
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
+
+ /* Now, initialize the UART, default is 8n1 */
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ up->port.mctrl |= TIOCM_OUT2;
+ serial_hsu_set_mctrl(&up->port, up->port.mctrl);
+
+ /*
+ * Finally, enable interrupts. Note: Modem status interrupts
+ * are set via set_termios(), which will be occurring imminently
+ * anyway, so we don't enable them here.
+ */
+ if (!up->use_dma)
+ up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE;
+ else
+ up->ier = 0;
+ serial_out(up, UART_IER, up->ier);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /* DMA init */
+ if (up->use_dma) {
+ struct hsu_dma_buffer *dbuf;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ up->dma_tx_on = 0;
+
+ /* First allocate the RX buffer */
+ dbuf = &up->rxbuf;
+ dbuf->buf = kzalloc(HSU_DMA_BUF_SIZE, GFP_KERNEL);
+ if (!dbuf->buf) {
+ up->use_dma = 0;
+ goto exit;
+ }
+ dbuf->dma_addr = dma_map_single(port->dev,
+ dbuf->buf,
+ HSU_DMA_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ dbuf->dma_size = HSU_DMA_BUF_SIZE;
+
+ /* Start the RX channel right now */
+ hsu_dma_start_rx_chan(up->rxc, dbuf);
+
+ /* Next init the TX DMA */
+ dbuf = &up->txbuf;
+ dbuf->buf = xmit->buf;
+ dbuf->dma_addr = dma_map_single(port->dev,
+ dbuf->buf,
+ UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+ dbuf->dma_size = UART_XMIT_SIZE;
+
+ /* This should not be changed all around */
+ chan_writel(up->txc, HSU_CH_BSR, 32);
+ chan_writel(up->txc, HSU_CH_MOTSR, 4);
+ dbuf->ofs = 0;
+ }
+
+exit:
+ /* And clear the interrupt registers again for luck. */
+ (void) serial_in(up, UART_LSR);
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
+
+ up->running = 1;
+ return 0;
+}
+
+static void serial_hsu_shutdown(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ unsigned long flags;
+
+ /* Disable interrupts from this port */
+ up->ier = 0;
+ serial_out(up, UART_IER, 0);
+ up->running = 0;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ up->port.mctrl &= ~TIOCM_OUT2;
+ serial_hsu_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /* Disable break condition and FIFOs */
+ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT);
+ serial_out(up, UART_FCR, 0);
+}
+
+static void
+serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned char cval, fcr = 0;
+ unsigned long flags;
+ unsigned int baud, quot;
+ u32 ps, mul;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ cval = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ cval = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ break;
+ }
+
+ /* CMSPAR isn't supported by this driver */
+ if (tty)
+ tty->termios->c_cflag &= ~CMSPAR;
+
+ if (termios->c_cflag & CSTOPB)
+ cval |= UART_LCR_STOP;
+ if (termios->c_cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(termios->c_cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+
+ /*
+ * The base clk is 50Mhz, and the baud rate come from:
+ * baud = 50M * MUL / (DIV * PS * DLAB)
+ *
+ * For those basic low baud rate we can get the direct
+ * scalar from 2746800, like 115200 = 2746800/24. For those
+ * higher baud rate, we handle them case by case, mainly by
+ * adjusting the MUL/PS registers, and DIV register is kept
+ * as default value 0x3d09 to make things simple
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+
+ quot = 1;
+ ps = 0x10;
+ mul = 0x3600;
+ switch (baud) {
+ case 3500000:
+ mul = 0x3345;
+ ps = 0xC;
+ break;
+ case 1843200:
+ mul = 0x2400;
+ break;
+ case 3000000:
+ case 2500000:
+ case 2000000:
+ case 1500000:
+ case 1000000:
+ case 500000:
+ /* mul/ps/quot = 0x9C4/0x10/0x1 will make a 500000 bps */
+ mul = baud / 500000 * 0x9C4;
+ break;
+ default:
+ /* Use uart_get_divisor to get quot for other baud rates */
+ quot = 0;
+ }
+
+ if (!quot)
+ quot = uart_get_divisor(port, baud);
+
+ if ((up->port.uartclk / quot) < (2400 * 16))
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B;
+ else if ((up->port.uartclk / quot) < (230400 * 16))
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_16B;
+ else
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_32B;
+
+ fcr |= UART_FCR_HSU_64B_FIFO;
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /* Update the per-port timeout */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /* Characters to ignore */
+ up->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ up->port.ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /* Ignore all characters if CREAD is not set */
+ if ((termios->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * CTS flow control flag and modem status interrupts, disable
+ * MSI by default
+ */
+ up->ier &= ~UART_IER_MSI;
+ if (UART_ENABLE_MS(&up->port, termios->c_cflag))
+ up->ier |= UART_IER_MSI;
+
+ serial_out(up, UART_IER, up->ier);
+
+ if (termios->c_cflag & CRTSCTS)
+ up->mcr |= UART_MCR_AFE | UART_MCR_RTS;
+ else
+ up->mcr &= ~UART_MCR_AFE;
+
+ serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
+ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
+ serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
+ serial_out(up, UART_LCR, cval); /* reset DLAB */
+ serial_out(up, UART_MUL, mul); /* set MUL */
+ serial_out(up, UART_PS, ps); /* set PS */
+ up->lcr = cval; /* Save LCR */
+ serial_hsu_set_mctrl(&up->port, up->port.mctrl);
+ serial_out(up, UART_FCR, fcr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static void
+serial_hsu_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+}
+
+static void serial_hsu_release_port(struct uart_port *port)
+{
+}
+
+static int serial_hsu_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void serial_hsu_config_port(struct uart_port *port, int flags)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ up->port.type = PORT_MFD;
+}
+
+static int
+serial_hsu_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ /* We don't want the core code to modify any port params */
+ return -EINVAL;
+}
+
+static const char *
+serial_hsu_type(struct uart_port *port)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+ return up->name;
+}
+
+/* Mainly for uart console use */
+static struct uart_hsu_port *serial_hsu_ports[3];
+static struct uart_driver serial_hsu_reg;
+
+#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+/* Wait for transmitter & holding register to empty */
+static inline void wait_for_xmitr(struct uart_hsu_port *up)
+{
+ unsigned int status, tmout = 1000;
+
+ /* Wait up to 1ms for the character to be sent. */
+ do {
+ status = serial_in(up, UART_LSR);
+
+ if (status & UART_LSR_BI)
+ up->lsr_break_flag = UART_LSR_BI;
+
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ } while (!(status & BOTH_EMPTY));
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ tmout = 1000000;
+ while (--tmout &&
+ ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
+ udelay(1);
+ }
+}
+
+static void serial_hsu_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_hsu_port *up =
+ container_of(port, struct uart_hsu_port, port);
+
+ wait_for_xmitr(up);
+ serial_out(up, UART_TX, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void
+serial_hsu_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_hsu_port *up = serial_hsu_ports[co->index];
+ unsigned long flags;
+ unsigned int ier;
+ int locked = 1;
+
+ local_irq_save(flags);
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress) {
+ locked = spin_trylock(&up->port.lock);
+ } else
+ spin_lock(&up->port.lock);
+
+ /* First save the IER then disable the interrupts */
+ ier = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, 0);
+
+ uart_console_write(&up->port, s, count, serial_hsu_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up);
+ serial_out(up, UART_IER, ier);
+
+ if (locked)
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
+}
+
+static struct console serial_hsu_console;
+
+static int __init
+serial_hsu_console_setup(struct console *co, char *options)
+{
+ struct uart_hsu_port *up;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ if (co->index == -1 || co->index >= serial_hsu_reg.nr)
+ co->index = 0;
+ up = serial_hsu_ports[co->index];
+ if (!up)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ ret = uart_set_options(&up->port, co, baud, parity, bits, flow);
+
+ return ret;
+}
+
+static struct console serial_hsu_console = {
+ .name = "ttyMFD",
+ .write = serial_hsu_console_write,
+ .device = uart_console_device,
+ .setup = serial_hsu_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = 2,
+ .data = &serial_hsu_reg,
+};
+#endif
+
+struct uart_ops serial_hsu_pops = {
+ .tx_empty = serial_hsu_tx_empty,
+ .set_mctrl = serial_hsu_set_mctrl,
+ .get_mctrl = serial_hsu_get_mctrl,
+ .stop_tx = serial_hsu_stop_tx,
+ .start_tx = serial_hsu_start_tx,
+ .stop_rx = serial_hsu_stop_rx,
+ .enable_ms = serial_hsu_enable_ms,
+ .break_ctl = serial_hsu_break_ctl,
+ .startup = serial_hsu_startup,
+ .shutdown = serial_hsu_shutdown,
+ .set_termios = serial_hsu_set_termios,
+ .pm = serial_hsu_pm,
+ .type = serial_hsu_type,
+ .release_port = serial_hsu_release_port,
+ .request_port = serial_hsu_request_port,
+ .config_port = serial_hsu_config_port,
+ .verify_port = serial_hsu_verify_port,
+};
+
+static struct uart_driver serial_hsu_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "MFD serial",
+ .dev_name = "ttyMFD",
+ .major = TTY_MAJOR,
+ .minor = 128,
+ .nr = 3,
+};
+
+#ifdef CONFIG_PM
+static int serial_hsu_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ void *priv = pci_get_drvdata(pdev);
+ struct uart_hsu_port *up;
+
+ /* Make sure this is not the internal dma controller */
+ if (priv && (pdev->device != 0x081E)) {
+ up = priv;
+ uart_suspend_port(&serial_hsu_reg, &up->port);
+ }
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int serial_hsu_resume(struct pci_dev *pdev)
+{
+ void *priv = pci_get_drvdata(pdev);
+ struct uart_hsu_port *up;
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "HSU: can't re-enable device, try to continue\n");
+
+ if (priv && (pdev->device != 0x081E)) {
+ up = priv;
+ uart_resume_port(&serial_hsu_reg, &up->port);
+ }
+ return 0;
+}
+#else
+#define serial_hsu_suspend NULL
+#define serial_hsu_resume NULL
+#endif
+
+/* temp global pointer before we settle down on using one or four PCI dev */
+static struct hsu_port *phsu;
+
+static int serial_hsu_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct uart_hsu_port *uport;
+ int index, ret;
+
+ printk(KERN_INFO "HSU: found PCI Serial controller(ID: %04x:%04x)\n",
+ pdev->vendor, pdev->device);
+
+ switch (pdev->device) {
+ case 0x081B:
+ index = 0;
+ break;
+ case 0x081C:
+ index = 1;
+ break;
+ case 0x081D:
+ index = 2;
+ break;
+ case 0x081E:
+ /* internal DMA controller */
+ index = 3;
+ break;
+ default:
+ dev_err(&pdev->dev, "HSU: out of index!");
+ return -ENODEV;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ if (index == 3) {
+ /* DMA controller */
+ ret = request_irq(pdev->irq, dma_irq, 0, "hsu_dma", phsu);
+ if (ret) {
+ dev_err(&pdev->dev, "can not get IRQ\n");
+ goto err_disable;
+ }
+ pci_set_drvdata(pdev, phsu);
+ } else {
+ /* UART port 0~2 */
+ uport = &phsu->port[index];
+ uport->port.irq = pdev->irq;
+ uport->port.dev = &pdev->dev;
+ uport->dev = &pdev->dev;
+
+ ret = request_irq(pdev->irq, port_irq, 0, uport->name, uport);
+ if (ret) {
+ dev_err(&pdev->dev, "can not get IRQ\n");
+ goto err_disable;
+ }
+ uart_add_one_port(&serial_hsu_reg, &uport->port);
+
+#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
+ if (index == 2) {
+ register_console(&serial_hsu_console);
+ uport->port.cons = &serial_hsu_console;
+ }
+#endif
+ pci_set_drvdata(pdev, uport);
+ }
+
+ return 0;
+
+err_disable:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void hsu_global_init(void)
+{
+ struct hsu_port *hsu;
+ struct uart_hsu_port *uport;
+ struct hsu_dma_chan *dchan;
+ int i, ret;
+
+ hsu = kzalloc(sizeof(struct hsu_port), GFP_KERNEL);
+ if (!hsu)
+ return;
+
+ /* Get basic io resource and map it */
+ hsu->paddr = 0xffa28000;
+ hsu->iolen = 0x1000;
+
+ if (!(request_mem_region(hsu->paddr, hsu->iolen, "HSU global")))
+ pr_warning("HSU: error in request mem region\n");
+
+ hsu->reg = ioremap_nocache((unsigned long)hsu->paddr, hsu->iolen);
+ if (!hsu->reg) {
+ pr_err("HSU: error in ioremap\n");
+ ret = -ENOMEM;
+ goto err_free_region;
+ }
+
+ /* Initialise the 3 UART ports */
+ uport = hsu->port;
+ for (i = 0; i < 3; i++) {
+ uport->port.type = PORT_MFD;
+ uport->port.iotype = UPIO_MEM;
+ uport->port.mapbase = (resource_size_t)hsu->paddr
+ + HSU_PORT_REG_OFFSET
+ + i * HSU_PORT_REG_LENGTH;
+ uport->port.membase = hsu->reg + HSU_PORT_REG_OFFSET
+ + i * HSU_PORT_REG_LENGTH;
+
+ sprintf(uport->name, "hsu_port%d", i);
+ uport->port.fifosize = 64;
+ uport->port.ops = &serial_hsu_pops;
+ uport->port.line = i;
+ uport->port.flags = UPF_IOREMAP;
+ /* set the scalable maxim support rate to 2746800 bps */
+ uport->port.uartclk = 115200 * 24 * 16;
+
+ uport->running = 0;
+ uport->txc = &hsu->chans[i * 2];
+ uport->rxc = &hsu->chans[i * 2 + 1];
+
+ serial_hsu_ports[i] = uport;
+ uport->index = i;
+
+ if (hsu_dma_enable & (1<<i))
+ uport->use_dma = 1;
+ else
+ uport->use_dma = 0;
+
+ uport++;
+ }
+
+ /* Initialise 6 dma channels */
+ dchan = hsu->chans;
+ for (i = 0; i < 6; i++) {
+ dchan->id = i;
+ dchan->dirt = (i & 0x1) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ dchan->uport = &hsu->port[i/2];
+ dchan->reg = hsu->reg + HSU_DMA_CHANS_REG_OFFSET +
+ i * HSU_DMA_CHANS_REG_LENGTH;
+
+ dchan++;
+ }
+
+ phsu = hsu;
+ hsu_debugfs_init(hsu);
+ return;
+
+err_free_region:
+ release_mem_region(hsu->paddr, hsu->iolen);
+ kfree(hsu);
+ return;
+}
+
+static void serial_hsu_remove(struct pci_dev *pdev)
+{
+ void *priv = pci_get_drvdata(pdev);
+ struct uart_hsu_port *up;
+
+ if (!priv)
+ return;
+
+ /* For port 0/1/2, priv is the address of uart_hsu_port */
+ if (pdev->device != 0x081E) {
+ up = priv;
+ uart_remove_one_port(&serial_hsu_reg, &up->port);
+ }
+
+ pci_set_drvdata(pdev, NULL);
+ free_irq(pdev->irq, priv);
+ pci_disable_device(pdev);
+}
+
+/* First 3 are UART ports, and the 4th is the DMA */
+static const struct pci_device_id pci_ids[] __devinitdata = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081E) },
+ {},
+};
+
+static struct pci_driver hsu_pci_driver = {
+ .name = "HSU serial",
+ .id_table = pci_ids,
+ .probe = serial_hsu_probe,
+ .remove = __devexit_p(serial_hsu_remove),
+ .suspend = serial_hsu_suspend,
+ .resume = serial_hsu_resume,
+};
+
+static int __init hsu_pci_init(void)
+{
+ int ret;
+
+ hsu_global_init();
+
+ ret = uart_register_driver(&serial_hsu_reg);
+ if (ret)
+ return ret;
+
+ return pci_register_driver(&hsu_pci_driver);
+}
+
+static void __exit hsu_pci_exit(void)
+{
+ pci_unregister_driver(&hsu_pci_driver);
+ uart_unregister_driver(&serial_hsu_reg);
+
+ hsu_debugfs_remove(phsu);
+
+ kfree(phsu);
+}
+
+module_init(hsu_pci_init);
+module_exit(hsu_pci_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:medfield-hsu");
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
new file mode 100644
index 00000000..a0bcd8a3
--- /dev/null
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -0,0 +1,1524 @@
+/*
+ * Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs.
+ *
+ * FIXME According to the usermanual the status bits in the status register
+ * are only updated when the peripherals access the FIFO and not when the
+ * CPU access them. So since we use this bits to know when we stop writing
+ * and reading, they may not be updated in-time and a race condition may
+ * exists. But I haven't be able to prove this and I don't care. But if
+ * any problem arises, it might worth checking. The TX/RX FIFO Stats
+ * registers should be used in addition.
+ * Update: Actually, they seem updated ... At least the bits we use.
+ *
+ *
+ * Maintainer : Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Some of the code has been inspired/copied from the 2.4 code written
+ * by Dale Farnsworth <dfarnsworth@mvista.com>.
+ *
+ * Copyright (C) 2008 Freescale Semiconductor Inc.
+ * John Rigby <jrigby@gmail.com>
+ * Added support for MPC5121
+ * Copyright (C) 2006 Secret Lab Technologies Ltd.
+ * Grant Likely <grant.likely@secretlab.ca>
+ * Copyright (C) 2004-2006 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#undef DEBUG
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+
+#include <asm/mpc52xx.h>
+#include <asm/mpc52xx_psc.h>
+
+#if defined(CONFIG_SERIAL_MPC52xx_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+
+/* We've been assigned a range on the "Low-density serial ports" major */
+#define SERIAL_PSC_MAJOR 204
+#define SERIAL_PSC_MINOR 148
+
+
+#define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */
+
+
+static struct uart_port mpc52xx_uart_ports[MPC52xx_PSC_MAXNUM];
+ /* Rem: - We use the read_status_mask as a shadow of
+ * psc->mpc52xx_psc_imr
+ * - It's important that is array is all zero on start as we
+ * use it to know if it's initialized or not ! If it's not sure
+ * it's cleared, then a memset(...,0,...) should be added to
+ * the console_init
+ */
+
+/* lookup table for matching device nodes to index numbers */
+static struct device_node *mpc52xx_uart_nodes[MPC52xx_PSC_MAXNUM];
+
+static void mpc52xx_uart_of_enumerate(void);
+
+
+#define PSC(port) ((struct mpc52xx_psc __iomem *)((port)->membase))
+
+
+/* Forward declaration of the interruption handling routine */
+static irqreturn_t mpc52xx_uart_int(int irq, void *dev_id);
+static irqreturn_t mpc5xxx_uart_process_int(struct uart_port *port);
+
+
+/* Simple macro to test if a port is console or not. This one is taken
+ * for serial_core.c and maybe should be moved to serial_core.h ? */
+#ifdef CONFIG_SERIAL_CORE_CONSOLE
+#define uart_console(port) \
+ ((port)->cons && (port)->cons->index == (port)->line)
+#else
+#define uart_console(port) (0)
+#endif
+
+/* ======================================================================== */
+/* PSC fifo operations for isolating differences between 52xx and 512x */
+/* ======================================================================== */
+
+struct psc_ops {
+ void (*fifo_init)(struct uart_port *port);
+ int (*raw_rx_rdy)(struct uart_port *port);
+ int (*raw_tx_rdy)(struct uart_port *port);
+ int (*rx_rdy)(struct uart_port *port);
+ int (*tx_rdy)(struct uart_port *port);
+ int (*tx_empty)(struct uart_port *port);
+ void (*stop_rx)(struct uart_port *port);
+ void (*start_tx)(struct uart_port *port);
+ void (*stop_tx)(struct uart_port *port);
+ void (*rx_clr_irq)(struct uart_port *port);
+ void (*tx_clr_irq)(struct uart_port *port);
+ void (*write_char)(struct uart_port *port, unsigned char c);
+ unsigned char (*read_char)(struct uart_port *port);
+ void (*cw_disable_ints)(struct uart_port *port);
+ void (*cw_restore_ints)(struct uart_port *port);
+ unsigned int (*set_baudrate)(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old);
+ int (*clock)(struct uart_port *port, int enable);
+ int (*fifoc_init)(void);
+ void (*fifoc_uninit)(void);
+ void (*get_irq)(struct uart_port *, struct device_node *);
+ irqreturn_t (*handle_irq)(struct uart_port *port);
+};
+
+/* setting the prescaler and divisor reg is common for all chips */
+static inline void mpc52xx_set_divisor(struct mpc52xx_psc __iomem *psc,
+ u16 prescaler, unsigned int divisor)
+{
+ /* select prescaler */
+ out_be16(&psc->mpc52xx_psc_clock_select, prescaler);
+ out_8(&psc->ctur, divisor >> 8);
+ out_8(&psc->ctlr, divisor & 0xff);
+}
+
+#ifdef CONFIG_PPC_MPC52xx
+#define FIFO_52xx(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1))
+static void mpc52xx_psc_fifo_init(struct uart_port *port)
+{
+ struct mpc52xx_psc __iomem *psc = PSC(port);
+ struct mpc52xx_psc_fifo __iomem *fifo = FIFO_52xx(port);
+
+ out_8(&fifo->rfcntl, 0x00);
+ out_be16(&fifo->rfalarm, 0x1ff);
+ out_8(&fifo->tfcntl, 0x07);
+ out_be16(&fifo->tfalarm, 0x80);
+
+ port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY;
+ out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static int mpc52xx_psc_raw_rx_rdy(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_status)
+ & MPC52xx_PSC_SR_RXRDY;
+}
+
+static int mpc52xx_psc_raw_tx_rdy(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_status)
+ & MPC52xx_PSC_SR_TXRDY;
+}
+
+
+static int mpc52xx_psc_rx_rdy(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_isr)
+ & port->read_status_mask
+ & MPC52xx_PSC_IMR_RXRDY;
+}
+
+static int mpc52xx_psc_tx_rdy(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_isr)
+ & port->read_status_mask
+ & MPC52xx_PSC_IMR_TXRDY;
+}
+
+static int mpc52xx_psc_tx_empty(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_status)
+ & MPC52xx_PSC_SR_TXEMP;
+}
+
+static void mpc52xx_psc_start_tx(struct uart_port *port)
+{
+ port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
+ out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static void mpc52xx_psc_stop_tx(struct uart_port *port)
+{
+ port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY;
+ out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static void mpc52xx_psc_stop_rx(struct uart_port *port)
+{
+ port->read_status_mask &= ~MPC52xx_PSC_IMR_RXRDY;
+ out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static void mpc52xx_psc_rx_clr_irq(struct uart_port *port)
+{
+}
+
+static void mpc52xx_psc_tx_clr_irq(struct uart_port *port)
+{
+}
+
+static void mpc52xx_psc_write_char(struct uart_port *port, unsigned char c)
+{
+ out_8(&PSC(port)->mpc52xx_psc_buffer_8, c);
+}
+
+static unsigned char mpc52xx_psc_read_char(struct uart_port *port)
+{
+ return in_8(&PSC(port)->mpc52xx_psc_buffer_8);
+}
+
+static void mpc52xx_psc_cw_disable_ints(struct uart_port *port)
+{
+ out_be16(&PSC(port)->mpc52xx_psc_imr, 0);
+}
+
+static void mpc52xx_psc_cw_restore_ints(struct uart_port *port)
+{
+ out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static unsigned int mpc5200_psc_set_baudrate(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
+{
+ unsigned int baud;
+ unsigned int divisor;
+
+ /* The 5200 has a fixed /32 prescaler, uartclk contains the ipb freq */
+ baud = uart_get_baud_rate(port, new, old,
+ port->uartclk / (32 * 0xffff) + 1,
+ port->uartclk / 32);
+ divisor = (port->uartclk + 16 * baud) / (32 * baud);
+
+ /* enable the /32 prescaler and set the divisor */
+ mpc52xx_set_divisor(PSC(port), 0xdd00, divisor);
+ return baud;
+}
+
+static unsigned int mpc5200b_psc_set_baudrate(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
+{
+ unsigned int baud;
+ unsigned int divisor;
+ u16 prescaler;
+
+ /* The 5200B has a selectable /4 or /32 prescaler, uartclk contains the
+ * ipb freq */
+ baud = uart_get_baud_rate(port, new, old,
+ port->uartclk / (32 * 0xffff) + 1,
+ port->uartclk / 4);
+ divisor = (port->uartclk + 2 * baud) / (4 * baud);
+
+ /* select the proper prescaler and set the divisor */
+ if (divisor > 0xffff) {
+ divisor = (divisor + 4) / 8;
+ prescaler = 0xdd00; /* /32 */
+ } else
+ prescaler = 0xff00; /* /4 */
+ mpc52xx_set_divisor(PSC(port), prescaler, divisor);
+ return baud;
+}
+
+static void mpc52xx_psc_get_irq(struct uart_port *port, struct device_node *np)
+{
+ port->irqflags = IRQF_DISABLED;
+ port->irq = irq_of_parse_and_map(np, 0);
+}
+
+/* 52xx specific interrupt handler. The caller holds the port lock */
+static irqreturn_t mpc52xx_psc_handle_irq(struct uart_port *port)
+{
+ return mpc5xxx_uart_process_int(port);
+}
+
+static struct psc_ops mpc52xx_psc_ops = {
+ .fifo_init = mpc52xx_psc_fifo_init,
+ .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
+ .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
+ .rx_rdy = mpc52xx_psc_rx_rdy,
+ .tx_rdy = mpc52xx_psc_tx_rdy,
+ .tx_empty = mpc52xx_psc_tx_empty,
+ .stop_rx = mpc52xx_psc_stop_rx,
+ .start_tx = mpc52xx_psc_start_tx,
+ .stop_tx = mpc52xx_psc_stop_tx,
+ .rx_clr_irq = mpc52xx_psc_rx_clr_irq,
+ .tx_clr_irq = mpc52xx_psc_tx_clr_irq,
+ .write_char = mpc52xx_psc_write_char,
+ .read_char = mpc52xx_psc_read_char,
+ .cw_disable_ints = mpc52xx_psc_cw_disable_ints,
+ .cw_restore_ints = mpc52xx_psc_cw_restore_ints,
+ .set_baudrate = mpc5200_psc_set_baudrate,
+ .get_irq = mpc52xx_psc_get_irq,
+ .handle_irq = mpc52xx_psc_handle_irq,
+};
+
+static struct psc_ops mpc5200b_psc_ops = {
+ .fifo_init = mpc52xx_psc_fifo_init,
+ .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
+ .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
+ .rx_rdy = mpc52xx_psc_rx_rdy,
+ .tx_rdy = mpc52xx_psc_tx_rdy,
+ .tx_empty = mpc52xx_psc_tx_empty,
+ .stop_rx = mpc52xx_psc_stop_rx,
+ .start_tx = mpc52xx_psc_start_tx,
+ .stop_tx = mpc52xx_psc_stop_tx,
+ .rx_clr_irq = mpc52xx_psc_rx_clr_irq,
+ .tx_clr_irq = mpc52xx_psc_tx_clr_irq,
+ .write_char = mpc52xx_psc_write_char,
+ .read_char = mpc52xx_psc_read_char,
+ .cw_disable_ints = mpc52xx_psc_cw_disable_ints,
+ .cw_restore_ints = mpc52xx_psc_cw_restore_ints,
+ .set_baudrate = mpc5200b_psc_set_baudrate,
+ .get_irq = mpc52xx_psc_get_irq,
+ .handle_irq = mpc52xx_psc_handle_irq,
+};
+
+#endif /* CONFIG_MPC52xx */
+
+#ifdef CONFIG_PPC_MPC512x
+#define FIFO_512x(port) ((struct mpc512x_psc_fifo __iomem *)(PSC(port)+1))
+
+/* PSC FIFO Controller for mpc512x */
+struct psc_fifoc {
+ u32 fifoc_cmd;
+ u32 fifoc_int;
+ u32 fifoc_dma;
+ u32 fifoc_axe;
+ u32 fifoc_debug;
+};
+
+static struct psc_fifoc __iomem *psc_fifoc;
+static unsigned int psc_fifoc_irq;
+
+static void mpc512x_psc_fifo_init(struct uart_port *port)
+{
+ /* /32 prescaler */
+ out_be16(&PSC(port)->mpc52xx_psc_clock_select, 0xdd00);
+
+ out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_RESET_SLICE);
+ out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_ENABLE_SLICE);
+ out_be32(&FIFO_512x(port)->txalarm, 1);
+ out_be32(&FIFO_512x(port)->tximr, 0);
+
+ out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_RESET_SLICE);
+ out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_ENABLE_SLICE);
+ out_be32(&FIFO_512x(port)->rxalarm, 1);
+ out_be32(&FIFO_512x(port)->rximr, 0);
+
+ out_be32(&FIFO_512x(port)->tximr, MPC512x_PSC_FIFO_ALARM);
+ out_be32(&FIFO_512x(port)->rximr, MPC512x_PSC_FIFO_ALARM);
+}
+
+static int mpc512x_psc_raw_rx_rdy(struct uart_port *port)
+{
+ return !(in_be32(&FIFO_512x(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
+}
+
+static int mpc512x_psc_raw_tx_rdy(struct uart_port *port)
+{
+ return !(in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_FULL);
+}
+
+static int mpc512x_psc_rx_rdy(struct uart_port *port)
+{
+ return in_be32(&FIFO_512x(port)->rxsr)
+ & in_be32(&FIFO_512x(port)->rximr)
+ & MPC512x_PSC_FIFO_ALARM;
+}
+
+static int mpc512x_psc_tx_rdy(struct uart_port *port)
+{
+ return in_be32(&FIFO_512x(port)->txsr)
+ & in_be32(&FIFO_512x(port)->tximr)
+ & MPC512x_PSC_FIFO_ALARM;
+}
+
+static int mpc512x_psc_tx_empty(struct uart_port *port)
+{
+ return in_be32(&FIFO_512x(port)->txsr)
+ & MPC512x_PSC_FIFO_EMPTY;
+}
+
+static void mpc512x_psc_stop_rx(struct uart_port *port)
+{
+ unsigned long rx_fifo_imr;
+
+ rx_fifo_imr = in_be32(&FIFO_512x(port)->rximr);
+ rx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM;
+ out_be32(&FIFO_512x(port)->rximr, rx_fifo_imr);
+}
+
+static void mpc512x_psc_start_tx(struct uart_port *port)
+{
+ unsigned long tx_fifo_imr;
+
+ tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr);
+ tx_fifo_imr |= MPC512x_PSC_FIFO_ALARM;
+ out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr);
+}
+
+static void mpc512x_psc_stop_tx(struct uart_port *port)
+{
+ unsigned long tx_fifo_imr;
+
+ tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr);
+ tx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM;
+ out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr);
+}
+
+static void mpc512x_psc_rx_clr_irq(struct uart_port *port)
+{
+ out_be32(&FIFO_512x(port)->rxisr, in_be32(&FIFO_512x(port)->rxisr));
+}
+
+static void mpc512x_psc_tx_clr_irq(struct uart_port *port)
+{
+ out_be32(&FIFO_512x(port)->txisr, in_be32(&FIFO_512x(port)->txisr));
+}
+
+static void mpc512x_psc_write_char(struct uart_port *port, unsigned char c)
+{
+ out_8(&FIFO_512x(port)->txdata_8, c);
+}
+
+static unsigned char mpc512x_psc_read_char(struct uart_port *port)
+{
+ return in_8(&FIFO_512x(port)->rxdata_8);
+}
+
+static void mpc512x_psc_cw_disable_ints(struct uart_port *port)
+{
+ port->read_status_mask =
+ in_be32(&FIFO_512x(port)->tximr) << 16 |
+ in_be32(&FIFO_512x(port)->rximr);
+ out_be32(&FIFO_512x(port)->tximr, 0);
+ out_be32(&FIFO_512x(port)->rximr, 0);
+}
+
+static void mpc512x_psc_cw_restore_ints(struct uart_port *port)
+{
+ out_be32(&FIFO_512x(port)->tximr,
+ (port->read_status_mask >> 16) & 0x7f);
+ out_be32(&FIFO_512x(port)->rximr, port->read_status_mask & 0x7f);
+}
+
+static unsigned int mpc512x_psc_set_baudrate(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
+{
+ unsigned int baud;
+ unsigned int divisor;
+
+ /*
+ * The "MPC5121e Microcontroller Reference Manual, Rev. 3" says on
+ * pg. 30-10 that the chip supports a /32 and a /10 prescaler.
+ * Furthermore, it states that "After reset, the prescaler by 10
+ * for the UART mode is selected", but the reset register value is
+ * 0x0000 which means a /32 prescaler. This is wrong.
+ *
+ * In reality using /32 prescaler doesn't work, as it is not supported!
+ * Use /16 or /10 prescaler, see "MPC5121e Hardware Design Guide",
+ * Chapter 4.1 PSC in UART Mode.
+ * Calculate with a /16 prescaler here.
+ */
+
+ /* uartclk contains the ips freq */
+ baud = uart_get_baud_rate(port, new, old,
+ port->uartclk / (16 * 0xffff) + 1,
+ port->uartclk / 16);
+ divisor = (port->uartclk + 8 * baud) / (16 * baud);
+
+ /* enable the /16 prescaler and set the divisor */
+ mpc52xx_set_divisor(PSC(port), 0xdd00, divisor);
+ return baud;
+}
+
+/* Init PSC FIFO Controller */
+static int __init mpc512x_psc_fifoc_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL,
+ "fsl,mpc5121-psc-fifo");
+ if (!np) {
+ pr_err("%s: Can't find FIFOC node\n", __func__);
+ return -ENODEV;
+ }
+
+ psc_fifoc = of_iomap(np, 0);
+ if (!psc_fifoc) {
+ pr_err("%s: Can't map FIFOC\n", __func__);
+ of_node_put(np);
+ return -ENODEV;
+ }
+
+ psc_fifoc_irq = irq_of_parse_and_map(np, 0);
+ of_node_put(np);
+ if (psc_fifoc_irq == NO_IRQ) {
+ pr_err("%s: Can't get FIFOC irq\n", __func__);
+ iounmap(psc_fifoc);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit mpc512x_psc_fifoc_uninit(void)
+{
+ iounmap(psc_fifoc);
+}
+
+/* 512x specific interrupt handler. The caller holds the port lock */
+static irqreturn_t mpc512x_psc_handle_irq(struct uart_port *port)
+{
+ unsigned long fifoc_int;
+ int psc_num;
+
+ /* Read pending PSC FIFOC interrupts */
+ fifoc_int = in_be32(&psc_fifoc->fifoc_int);
+
+ /* Check if it is an interrupt for this port */
+ psc_num = (port->mapbase & 0xf00) >> 8;
+ if (test_bit(psc_num, &fifoc_int) ||
+ test_bit(psc_num + 16, &fifoc_int))
+ return mpc5xxx_uart_process_int(port);
+
+ return IRQ_NONE;
+}
+
+static int mpc512x_psc_clock(struct uart_port *port, int enable)
+{
+ struct clk *psc_clk;
+ int psc_num;
+ char clk_name[10];
+
+ if (uart_console(port))
+ return 0;
+
+ psc_num = (port->mapbase & 0xf00) >> 8;
+ snprintf(clk_name, sizeof(clk_name), "psc%d_clk", psc_num);
+ psc_clk = clk_get(port->dev, clk_name);
+ if (IS_ERR(psc_clk)) {
+ dev_err(port->dev, "Failed to get PSC clock entry!\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(port->dev, "%s %sable\n", clk_name, enable ? "en" : "dis");
+
+ if (enable)
+ clk_enable(psc_clk);
+ else
+ clk_disable(psc_clk);
+
+ return 0;
+}
+
+static void mpc512x_psc_get_irq(struct uart_port *port, struct device_node *np)
+{
+ port->irqflags = IRQF_SHARED;
+ port->irq = psc_fifoc_irq;
+}
+
+static struct psc_ops mpc512x_psc_ops = {
+ .fifo_init = mpc512x_psc_fifo_init,
+ .raw_rx_rdy = mpc512x_psc_raw_rx_rdy,
+ .raw_tx_rdy = mpc512x_psc_raw_tx_rdy,
+ .rx_rdy = mpc512x_psc_rx_rdy,
+ .tx_rdy = mpc512x_psc_tx_rdy,
+ .tx_empty = mpc512x_psc_tx_empty,
+ .stop_rx = mpc512x_psc_stop_rx,
+ .start_tx = mpc512x_psc_start_tx,
+ .stop_tx = mpc512x_psc_stop_tx,
+ .rx_clr_irq = mpc512x_psc_rx_clr_irq,
+ .tx_clr_irq = mpc512x_psc_tx_clr_irq,
+ .write_char = mpc512x_psc_write_char,
+ .read_char = mpc512x_psc_read_char,
+ .cw_disable_ints = mpc512x_psc_cw_disable_ints,
+ .cw_restore_ints = mpc512x_psc_cw_restore_ints,
+ .set_baudrate = mpc512x_psc_set_baudrate,
+ .clock = mpc512x_psc_clock,
+ .fifoc_init = mpc512x_psc_fifoc_init,
+ .fifoc_uninit = mpc512x_psc_fifoc_uninit,
+ .get_irq = mpc512x_psc_get_irq,
+ .handle_irq = mpc512x_psc_handle_irq,
+};
+#endif
+
+static struct psc_ops *psc_ops;
+
+/* ======================================================================== */
+/* UART operations */
+/* ======================================================================== */
+
+static unsigned int
+mpc52xx_uart_tx_empty(struct uart_port *port)
+{
+ return psc_ops->tx_empty(port) ? TIOCSER_TEMT : 0;
+}
+
+static void
+mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ if (mctrl & TIOCM_RTS)
+ out_8(&PSC(port)->op1, MPC52xx_PSC_OP_RTS);
+ else
+ out_8(&PSC(port)->op0, MPC52xx_PSC_OP_RTS);
+}
+
+static unsigned int
+mpc52xx_uart_get_mctrl(struct uart_port *port)
+{
+ unsigned int ret = TIOCM_DSR;
+ u8 status = in_8(&PSC(port)->mpc52xx_psc_ipcr);
+
+ if (!(status & MPC52xx_PSC_CTS))
+ ret |= TIOCM_CTS;
+ if (!(status & MPC52xx_PSC_DCD))
+ ret |= TIOCM_CAR;
+
+ return ret;
+}
+
+static void
+mpc52xx_uart_stop_tx(struct uart_port *port)
+{
+ /* port->lock taken by caller */
+ psc_ops->stop_tx(port);
+}
+
+static void
+mpc52xx_uart_start_tx(struct uart_port *port)
+{
+ /* port->lock taken by caller */
+ psc_ops->start_tx(port);
+}
+
+static void
+mpc52xx_uart_send_xchar(struct uart_port *port, char ch)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&port->lock, flags);
+
+ port->x_char = ch;
+ if (ch) {
+ /* Make sure tx interrupts are on */
+ /* Truly necessary ??? They should be anyway */
+ psc_ops->start_tx(port);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void
+mpc52xx_uart_stop_rx(struct uart_port *port)
+{
+ /* port->lock taken by caller */
+ psc_ops->stop_rx(port);
+}
+
+static void
+mpc52xx_uart_enable_ms(struct uart_port *port)
+{
+ struct mpc52xx_psc __iomem *psc = PSC(port);
+
+ /* clear D_*-bits by reading them */
+ in_8(&psc->mpc52xx_psc_ipcr);
+ /* enable CTS and DCD as IPC interrupts */
+ out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD);
+
+ port->read_status_mask |= MPC52xx_PSC_IMR_IPC;
+ out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static void
+mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (ctl == -1)
+ out_8(&PSC(port)->command, MPC52xx_PSC_START_BRK);
+ else
+ out_8(&PSC(port)->command, MPC52xx_PSC_STOP_BRK);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int
+mpc52xx_uart_startup(struct uart_port *port)
+{
+ struct mpc52xx_psc __iomem *psc = PSC(port);
+ int ret;
+
+ if (psc_ops->clock) {
+ ret = psc_ops->clock(port, 1);
+ if (ret)
+ return ret;
+ }
+
+ /* Request IRQ */
+ ret = request_irq(port->irq, mpc52xx_uart_int,
+ port->irqflags, "mpc52xx_psc_uart", port);
+ if (ret)
+ return ret;
+
+ /* Reset/activate the port, clear and enable interrupts */
+ out_8(&psc->command, MPC52xx_PSC_RST_RX);
+ out_8(&psc->command, MPC52xx_PSC_RST_TX);
+
+ out_be32(&psc->sicr, 0); /* UART mode DCD ignored */
+
+ psc_ops->fifo_init(port);
+
+ out_8(&psc->command, MPC52xx_PSC_TX_ENABLE);
+ out_8(&psc->command, MPC52xx_PSC_RX_ENABLE);
+
+ return 0;
+}
+
+static void
+mpc52xx_uart_shutdown(struct uart_port *port)
+{
+ struct mpc52xx_psc __iomem *psc = PSC(port);
+
+ /* Shut down the port. Leave TX active if on a console port */
+ out_8(&psc->command, MPC52xx_PSC_RST_RX);
+ if (!uart_console(port))
+ out_8(&psc->command, MPC52xx_PSC_RST_TX);
+
+ port->read_status_mask = 0;
+ out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+
+ if (psc_ops->clock)
+ psc_ops->clock(port, 0);
+
+ /* Release interrupt */
+ free_irq(port->irq, port);
+}
+
+static void
+mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
+ struct ktermios *old)
+{
+ struct mpc52xx_psc __iomem *psc = PSC(port);
+ unsigned long flags;
+ unsigned char mr1, mr2;
+ unsigned int j;
+ unsigned int baud;
+
+ /* Prepare what we're gonna write */
+ mr1 = 0;
+
+ switch (new->c_cflag & CSIZE) {
+ case CS5: mr1 |= MPC52xx_PSC_MODE_5_BITS;
+ break;
+ case CS6: mr1 |= MPC52xx_PSC_MODE_6_BITS;
+ break;
+ case CS7: mr1 |= MPC52xx_PSC_MODE_7_BITS;
+ break;
+ case CS8:
+ default: mr1 |= MPC52xx_PSC_MODE_8_BITS;
+ }
+
+ if (new->c_cflag & PARENB) {
+ mr1 |= (new->c_cflag & PARODD) ?
+ MPC52xx_PSC_MODE_PARODD : MPC52xx_PSC_MODE_PAREVEN;
+ } else
+ mr1 |= MPC52xx_PSC_MODE_PARNONE;
+
+
+ mr2 = 0;
+
+ if (new->c_cflag & CSTOPB)
+ mr2 |= MPC52xx_PSC_MODE_TWO_STOP;
+ else
+ mr2 |= ((new->c_cflag & CSIZE) == CS5) ?
+ MPC52xx_PSC_MODE_ONE_STOP_5_BITS :
+ MPC52xx_PSC_MODE_ONE_STOP;
+
+ if (new->c_cflag & CRTSCTS) {
+ mr1 |= MPC52xx_PSC_MODE_RXRTS;
+ mr2 |= MPC52xx_PSC_MODE_TXCTS;
+ }
+
+ /* Get the lock */
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Do our best to flush TX & RX, so we don't lose anything */
+ /* But we don't wait indefinitely ! */
+ j = 5000000; /* Maximum wait */
+ /* FIXME Can't receive chars since set_termios might be called at early
+ * boot for the console, all stuff is not yet ready to receive at that
+ * time and that just makes the kernel oops */
+ /* while (j-- && mpc52xx_uart_int_rx_chars(port)); */
+ while (!mpc52xx_uart_tx_empty(port) && --j)
+ udelay(1);
+
+ if (!j)
+ printk(KERN_ERR "mpc52xx_uart.c: "
+ "Unable to flush RX & TX fifos in-time in set_termios."
+ "Some chars may have been lost.\n");
+
+ /* Reset the TX & RX */
+ out_8(&psc->command, MPC52xx_PSC_RST_RX);
+ out_8(&psc->command, MPC52xx_PSC_RST_TX);
+
+ /* Send new mode settings */
+ out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
+ out_8(&psc->mode, mr1);
+ out_8(&psc->mode, mr2);
+ baud = psc_ops->set_baudrate(port, new, old);
+
+ /* Update the per-port timeout */
+ uart_update_timeout(port, new->c_cflag, baud);
+
+ if (UART_ENABLE_MS(port, new->c_cflag))
+ mpc52xx_uart_enable_ms(port);
+
+ /* Reenable TX & RX */
+ out_8(&psc->command, MPC52xx_PSC_TX_ENABLE);
+ out_8(&psc->command, MPC52xx_PSC_RX_ENABLE);
+
+ /* We're all set, release the lock */
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *
+mpc52xx_uart_type(struct uart_port *port)
+{
+ /*
+ * We keep using PORT_MPC52xx for historic reasons although it applies
+ * for MPC512x, too, but print "MPC5xxx" to not irritate users
+ */
+ return port->type == PORT_MPC52xx ? "MPC5xxx PSC" : NULL;
+}
+
+static void
+mpc52xx_uart_release_port(struct uart_port *port)
+{
+ /* remapped by us ? */
+ if (port->flags & UPF_IOREMAP) {
+ iounmap(port->membase);
+ port->membase = NULL;
+ }
+
+ release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc));
+}
+
+static int
+mpc52xx_uart_request_port(struct uart_port *port)
+{
+ int err;
+
+ if (port->flags & UPF_IOREMAP) /* Need to remap ? */
+ port->membase = ioremap(port->mapbase,
+ sizeof(struct mpc52xx_psc));
+
+ if (!port->membase)
+ return -EINVAL;
+
+ err = request_mem_region(port->mapbase, sizeof(struct mpc52xx_psc),
+ "mpc52xx_psc_uart") != NULL ? 0 : -EBUSY;
+
+ if (err && (port->flags & UPF_IOREMAP)) {
+ iounmap(port->membase);
+ port->membase = NULL;
+ }
+
+ return err;
+}
+
+static void
+mpc52xx_uart_config_port(struct uart_port *port, int flags)
+{
+ if ((flags & UART_CONFIG_TYPE)
+ && (mpc52xx_uart_request_port(port) == 0))
+ port->type = PORT_MPC52xx;
+}
+
+static int
+mpc52xx_uart_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPC52xx)
+ return -EINVAL;
+
+ if ((ser->irq != port->irq) ||
+ (ser->io_type != UPIO_MEM) ||
+ (ser->baud_base != port->uartclk) ||
+ (ser->iomem_base != (void *)port->mapbase) ||
+ (ser->hub6 != 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static struct uart_ops mpc52xx_uart_ops = {
+ .tx_empty = mpc52xx_uart_tx_empty,
+ .set_mctrl = mpc52xx_uart_set_mctrl,
+ .get_mctrl = mpc52xx_uart_get_mctrl,
+ .stop_tx = mpc52xx_uart_stop_tx,
+ .start_tx = mpc52xx_uart_start_tx,
+ .send_xchar = mpc52xx_uart_send_xchar,
+ .stop_rx = mpc52xx_uart_stop_rx,
+ .enable_ms = mpc52xx_uart_enable_ms,
+ .break_ctl = mpc52xx_uart_break_ctl,
+ .startup = mpc52xx_uart_startup,
+ .shutdown = mpc52xx_uart_shutdown,
+ .set_termios = mpc52xx_uart_set_termios,
+/* .pm = mpc52xx_uart_pm, Not supported yet */
+/* .set_wake = mpc52xx_uart_set_wake, Not supported yet */
+ .type = mpc52xx_uart_type,
+ .release_port = mpc52xx_uart_release_port,
+ .request_port = mpc52xx_uart_request_port,
+ .config_port = mpc52xx_uart_config_port,
+ .verify_port = mpc52xx_uart_verify_port
+};
+
+
+/* ======================================================================== */
+/* Interrupt handling */
+/* ======================================================================== */
+
+static inline int
+mpc52xx_uart_int_rx_chars(struct uart_port *port)
+{
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned char ch, flag;
+ unsigned short status;
+
+ /* While we can read, do so ! */
+ while (psc_ops->raw_rx_rdy(port)) {
+ /* Get the char */
+ ch = psc_ops->read_char(port);
+
+ /* Handle sysreq char */
+#ifdef SUPPORT_SYSRQ
+ if (uart_handle_sysrq_char(port, ch)) {
+ port->sysrq = 0;
+ continue;
+ }
+#endif
+
+ /* Store it */
+
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ status = in_be16(&PSC(port)->mpc52xx_psc_status);
+
+ if (status & (MPC52xx_PSC_SR_PE |
+ MPC52xx_PSC_SR_FE |
+ MPC52xx_PSC_SR_RB)) {
+
+ if (status & MPC52xx_PSC_SR_RB) {
+ flag = TTY_BREAK;
+ uart_handle_break(port);
+ port->icount.brk++;
+ } else if (status & MPC52xx_PSC_SR_PE) {
+ flag = TTY_PARITY;
+ port->icount.parity++;
+ }
+ else if (status & MPC52xx_PSC_SR_FE) {
+ flag = TTY_FRAME;
+ port->icount.frame++;
+ }
+
+ /* Clear error condition */
+ out_8(&PSC(port)->command, MPC52xx_PSC_RST_ERR_STAT);
+
+ }
+ tty_insert_flip_char(tty, ch, flag);
+ if (status & MPC52xx_PSC_SR_OE) {
+ /*
+ * Overrun is special, since it's
+ * reported immediately, and doesn't
+ * affect the current character
+ */
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ port->icount.overrun++;
+ }
+ }
+
+ spin_unlock(&port->lock);
+ tty_flip_buffer_push(tty);
+ spin_lock(&port->lock);
+
+ return psc_ops->raw_rx_rdy(port);
+}
+
+static inline int
+mpc52xx_uart_int_tx_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+
+ /* Process out of band chars */
+ if (port->x_char) {
+ psc_ops->write_char(port, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ return 1;
+ }
+
+ /* Nothing to do ? */
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ mpc52xx_uart_stop_tx(port);
+ return 0;
+ }
+
+ /* Send chars */
+ while (psc_ops->raw_tx_rdy(port)) {
+ psc_ops->write_char(port, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ }
+
+ /* Wake up */
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ /* Maybe we're done after all */
+ if (uart_circ_empty(xmit)) {
+ mpc52xx_uart_stop_tx(port);
+ return 0;
+ }
+
+ return 1;
+}
+
+static irqreturn_t
+mpc5xxx_uart_process_int(struct uart_port *port)
+{
+ unsigned long pass = ISR_PASS_LIMIT;
+ unsigned int keepgoing;
+ u8 status;
+
+ /* While we have stuff to do, we continue */
+ do {
+ /* If we don't find anything to do, we stop */
+ keepgoing = 0;
+
+ psc_ops->rx_clr_irq(port);
+ if (psc_ops->rx_rdy(port))
+ keepgoing |= mpc52xx_uart_int_rx_chars(port);
+
+ psc_ops->tx_clr_irq(port);
+ if (psc_ops->tx_rdy(port))
+ keepgoing |= mpc52xx_uart_int_tx_chars(port);
+
+ status = in_8(&PSC(port)->mpc52xx_psc_ipcr);
+ if (status & MPC52xx_PSC_D_DCD)
+ uart_handle_dcd_change(port, !(status & MPC52xx_PSC_DCD));
+
+ if (status & MPC52xx_PSC_D_CTS)
+ uart_handle_cts_change(port, !(status & MPC52xx_PSC_CTS));
+
+ /* Limit number of iteration */
+ if (!(--pass))
+ keepgoing = 0;
+
+ } while (keepgoing);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+mpc52xx_uart_int(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ irqreturn_t ret;
+
+ spin_lock(&port->lock);
+
+ ret = psc_ops->handle_irq(port);
+
+ spin_unlock(&port->lock);
+
+ return ret;
+}
+
+/* ======================================================================== */
+/* Console ( if applicable ) */
+/* ======================================================================== */
+
+#ifdef CONFIG_SERIAL_MPC52xx_CONSOLE
+
+static void __init
+mpc52xx_console_get_options(struct uart_port *port,
+ int *baud, int *parity, int *bits, int *flow)
+{
+ struct mpc52xx_psc __iomem *psc = PSC(port);
+ unsigned char mr1;
+
+ pr_debug("mpc52xx_console_get_options(port=%p)\n", port);
+
+ /* Read the mode registers */
+ out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
+ mr1 = in_8(&psc->mode);
+
+ /* CT{U,L}R are write-only ! */
+ *baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
+
+ /* Parse them */
+ switch (mr1 & MPC52xx_PSC_MODE_BITS_MASK) {
+ case MPC52xx_PSC_MODE_5_BITS:
+ *bits = 5;
+ break;
+ case MPC52xx_PSC_MODE_6_BITS:
+ *bits = 6;
+ break;
+ case MPC52xx_PSC_MODE_7_BITS:
+ *bits = 7;
+ break;
+ case MPC52xx_PSC_MODE_8_BITS:
+ default:
+ *bits = 8;
+ }
+
+ if (mr1 & MPC52xx_PSC_MODE_PARNONE)
+ *parity = 'n';
+ else
+ *parity = mr1 & MPC52xx_PSC_MODE_PARODD ? 'o' : 'e';
+}
+
+static void
+mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_port *port = &mpc52xx_uart_ports[co->index];
+ unsigned int i, j;
+
+ /* Disable interrupts */
+ psc_ops->cw_disable_ints(port);
+
+ /* Wait the TX buffer to be empty */
+ j = 5000000; /* Maximum wait */
+ while (!mpc52xx_uart_tx_empty(port) && --j)
+ udelay(1);
+
+ /* Write all the chars */
+ for (i = 0; i < count; i++, s++) {
+ /* Line return handling */
+ if (*s == '\n')
+ psc_ops->write_char(port, '\r');
+
+ /* Send the char */
+ psc_ops->write_char(port, *s);
+
+ /* Wait the TX buffer to be empty */
+ j = 20000; /* Maximum wait */
+ while (!mpc52xx_uart_tx_empty(port) && --j)
+ udelay(1);
+ }
+
+ /* Restore interrupt state */
+ psc_ops->cw_restore_ints(port);
+}
+
+
+static int __init
+mpc52xx_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port = &mpc52xx_uart_ports[co->index];
+ struct device_node *np = mpc52xx_uart_nodes[co->index];
+ unsigned int uartclk;
+ struct resource res;
+ int ret;
+
+ int baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n",
+ co, co->index, options);
+
+ if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) {
+ pr_debug("PSC%x out of range\n", co->index);
+ return -EINVAL;
+ }
+
+ if (!np) {
+ pr_debug("PSC%x not found in device tree\n", co->index);
+ return -EINVAL;
+ }
+
+ pr_debug("Console on ttyPSC%x is %s\n",
+ co->index, mpc52xx_uart_nodes[co->index]->full_name);
+
+ /* Fetch register locations */
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ pr_debug("Could not get resources for PSC%x\n", co->index);
+ return ret;
+ }
+
+ uartclk = mpc5xxx_get_bus_frequency(np);
+ if (uartclk == 0) {
+ pr_debug("Could not find uart clock frequency!\n");
+ return -EINVAL;
+ }
+
+ /* Basic port init. Needed since we use some uart_??? func before
+ * real init for early access */
+ spin_lock_init(&port->lock);
+ port->uartclk = uartclk;
+ port->ops = &mpc52xx_uart_ops;
+ port->mapbase = res.start;
+ port->membase = ioremap(res.start, sizeof(struct mpc52xx_psc));
+ port->irq = irq_of_parse_and_map(np, 0);
+
+ if (port->membase == NULL)
+ return -EINVAL;
+
+ pr_debug("mpc52xx-psc uart at %p, mapped to %p, irq=%x, freq=%i\n",
+ (void *)port->mapbase, port->membase,
+ port->irq, port->uartclk);
+
+ /* Setup the port parameters accoding to options */
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ mpc52xx_console_get_options(port, &baud, &parity, &bits, &flow);
+
+ pr_debug("Setting console parameters: %i %i%c1 flow=%c\n",
+ baud, bits, parity, flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+
+static struct uart_driver mpc52xx_uart_driver;
+
+static struct console mpc52xx_console = {
+ .name = "ttyPSC",
+ .write = mpc52xx_console_write,
+ .device = uart_console_device,
+ .setup = mpc52xx_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1, /* Specified on the cmdline (e.g. console=ttyPSC0) */
+ .data = &mpc52xx_uart_driver,
+};
+
+
+static int __init
+mpc52xx_console_init(void)
+{
+ mpc52xx_uart_of_enumerate();
+ register_console(&mpc52xx_console);
+ return 0;
+}
+
+console_initcall(mpc52xx_console_init);
+
+#define MPC52xx_PSC_CONSOLE &mpc52xx_console
+#else
+#define MPC52xx_PSC_CONSOLE NULL
+#endif
+
+
+/* ======================================================================== */
+/* UART Driver */
+/* ======================================================================== */
+
+static struct uart_driver mpc52xx_uart_driver = {
+ .driver_name = "mpc52xx_psc_uart",
+ .dev_name = "ttyPSC",
+ .major = SERIAL_PSC_MAJOR,
+ .minor = SERIAL_PSC_MINOR,
+ .nr = MPC52xx_PSC_MAXNUM,
+ .cons = MPC52xx_PSC_CONSOLE,
+};
+
+/* ======================================================================== */
+/* OF Platform Driver */
+/* ======================================================================== */
+
+static struct of_device_id mpc52xx_uart_of_match[] = {
+#ifdef CONFIG_PPC_MPC52xx
+ { .compatible = "fsl,mpc5200b-psc-uart", .data = &mpc5200b_psc_ops, },
+ { .compatible = "fsl,mpc5200-psc-uart", .data = &mpc52xx_psc_ops, },
+ /* binding used by old lite5200 device trees: */
+ { .compatible = "mpc5200-psc-uart", .data = &mpc52xx_psc_ops, },
+ /* binding used by efika: */
+ { .compatible = "mpc5200-serial", .data = &mpc52xx_psc_ops, },
+#endif
+#ifdef CONFIG_PPC_MPC512x
+ { .compatible = "fsl,mpc5121-psc-uart", .data = &mpc512x_psc_ops, },
+#endif
+ {},
+};
+
+static int __devinit mpc52xx_uart_of_probe(struct platform_device *op)
+{
+ int idx = -1;
+ unsigned int uartclk;
+ struct uart_port *port = NULL;
+ struct resource res;
+ int ret;
+
+ /* Check validity & presence */
+ for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++)
+ if (mpc52xx_uart_nodes[idx] == op->dev.of_node)
+ break;
+ if (idx >= MPC52xx_PSC_MAXNUM)
+ return -EINVAL;
+ pr_debug("Found %s assigned to ttyPSC%x\n",
+ mpc52xx_uart_nodes[idx]->full_name, idx);
+
+ /* set the uart clock to the input clock of the psc, the different
+ * prescalers are taken into account in the set_baudrate() methods
+ * of the respective chip */
+ uartclk = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ if (uartclk == 0) {
+ dev_dbg(&op->dev, "Could not find uart clock frequency!\n");
+ return -EINVAL;
+ }
+
+ /* Init the port structure */
+ port = &mpc52xx_uart_ports[idx];
+
+ spin_lock_init(&port->lock);
+ port->uartclk = uartclk;
+ port->fifosize = 512;
+ port->iotype = UPIO_MEM;
+ port->flags = UPF_BOOT_AUTOCONF |
+ (uart_console(port) ? 0 : UPF_IOREMAP);
+ port->line = idx;
+ port->ops = &mpc52xx_uart_ops;
+ port->dev = &op->dev;
+
+ /* Search for IRQ and mapbase */
+ ret = of_address_to_resource(op->dev.of_node, 0, &res);
+ if (ret)
+ return ret;
+
+ port->mapbase = res.start;
+ if (!port->mapbase) {
+ dev_dbg(&op->dev, "Could not allocate resources for PSC\n");
+ return -EINVAL;
+ }
+
+ psc_ops->get_irq(port, op->dev.of_node);
+ if (port->irq == NO_IRQ) {
+ dev_dbg(&op->dev, "Could not get irq\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(&op->dev, "mpc52xx-psc uart at %p, irq=%x, freq=%i\n",
+ (void *)port->mapbase, port->irq, port->uartclk);
+
+ /* Add the port to the uart sub-system */
+ ret = uart_add_one_port(&mpc52xx_uart_driver, port);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(&op->dev, (void *)port);
+ return 0;
+}
+
+static int
+mpc52xx_uart_of_remove(struct platform_device *op)
+{
+ struct uart_port *port = dev_get_drvdata(&op->dev);
+ dev_set_drvdata(&op->dev, NULL);
+
+ if (port)
+ uart_remove_one_port(&mpc52xx_uart_driver, port);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int
+mpc52xx_uart_of_suspend(struct platform_device *op, pm_message_t state)
+{
+ struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev);
+
+ if (port)
+ uart_suspend_port(&mpc52xx_uart_driver, port);
+
+ return 0;
+}
+
+static int
+mpc52xx_uart_of_resume(struct platform_device *op)
+{
+ struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev);
+
+ if (port)
+ uart_resume_port(&mpc52xx_uart_driver, port);
+
+ return 0;
+}
+#endif
+
+static void
+mpc52xx_uart_of_assign(struct device_node *np)
+{
+ int i;
+
+ /* Find the first free PSC number */
+ for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) {
+ if (mpc52xx_uart_nodes[i] == NULL) {
+ of_node_get(np);
+ mpc52xx_uart_nodes[i] = np;
+ return;
+ }
+ }
+}
+
+static void
+mpc52xx_uart_of_enumerate(void)
+{
+ static int enum_done;
+ struct device_node *np;
+ const struct of_device_id *match;
+ int i;
+
+ if (enum_done)
+ return;
+
+ /* Assign index to each PSC in device tree */
+ for_each_matching_node(np, mpc52xx_uart_of_match) {
+ match = of_match_node(mpc52xx_uart_of_match, np);
+ psc_ops = match->data;
+ mpc52xx_uart_of_assign(np);
+ }
+
+ enum_done = 1;
+
+ for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) {
+ if (mpc52xx_uart_nodes[i])
+ pr_debug("%s assigned to ttyPSC%x\n",
+ mpc52xx_uart_nodes[i]->full_name, i);
+ }
+}
+
+MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match);
+
+static struct platform_driver mpc52xx_uart_of_driver = {
+ .probe = mpc52xx_uart_of_probe,
+ .remove = mpc52xx_uart_of_remove,
+#ifdef CONFIG_PM
+ .suspend = mpc52xx_uart_of_suspend,
+ .resume = mpc52xx_uart_of_resume,
+#endif
+ .driver = {
+ .name = "mpc52xx-psc-uart",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_uart_of_match,
+ },
+};
+
+
+/* ======================================================================== */
+/* Module */
+/* ======================================================================== */
+
+static int __init
+mpc52xx_uart_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Serial: MPC52xx PSC UART driver\n");
+
+ ret = uart_register_driver(&mpc52xx_uart_driver);
+ if (ret) {
+ printk(KERN_ERR "%s: uart_register_driver failed (%i)\n",
+ __FILE__, ret);
+ return ret;
+ }
+
+ mpc52xx_uart_of_enumerate();
+
+ /*
+ * Map the PSC FIFO Controller and init if on MPC512x.
+ */
+ if (psc_ops && psc_ops->fifoc_init) {
+ ret = psc_ops->fifoc_init();
+ if (ret)
+ return ret;
+ }
+
+ ret = platform_driver_register(&mpc52xx_uart_of_driver);
+ if (ret) {
+ printk(KERN_ERR "%s: platform_driver_register failed (%i)\n",
+ __FILE__, ret);
+ uart_unregister_driver(&mpc52xx_uart_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit
+mpc52xx_uart_exit(void)
+{
+ if (psc_ops->fifoc_uninit)
+ psc_ops->fifoc_uninit();
+
+ platform_driver_unregister(&mpc52xx_uart_of_driver);
+ uart_unregister_driver(&mpc52xx_uart_driver);
+}
+
+
+module_init(mpc52xx_uart_init);
+module_exit(mpc52xx_uart_exit);
+
+MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
+MODULE_DESCRIPTION("Freescale MPC52xx PSC UART");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
new file mode 100644
index 00000000..6a9c6605
--- /dev/null
+++ b/drivers/tty/serial/mpsc.c
@@ -0,0 +1,2159 @@
+/*
+ * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
+ * GT64260, MV64340, MV64360, GT96100, ... ).
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * Based on an old MPSC driver that was in the linuxppc tree. It appears to
+ * have been created by Chris Zankel (formerly of MontaVista) but there
+ * is no proper Copyright so I'm not sure. Apparently, parts were also
+ * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c
+ * by Russell King.
+ *
+ * 2004 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+/*
+ * The MPSC interface is much like a typical network controller's interface.
+ * That is, you set up separate rings of descriptors for transmitting and
+ * receiving data. There is also a pool of buffers with (one buffer per
+ * descriptor) that incoming data are dma'd into or outgoing data are dma'd
+ * out of.
+ *
+ * The MPSC requires two other controllers to be able to work. The Baud Rate
+ * Generator (BRG) provides a clock at programmable frequencies which determines
+ * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the
+ * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the
+ * MPSC. It is actually the SDMA interrupt that the driver uses to keep the
+ * transmit and receive "engines" going (i.e., indicate data has been
+ * transmitted or received).
+ *
+ * NOTES:
+ *
+ * 1) Some chips have an erratum where several regs cannot be
+ * read. To work around that, we keep a local copy of those regs in
+ * 'mpsc_port_info'.
+ *
+ * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr
+ * accesses system mem with coherency enabled. For that reason, the driver
+ * assumes that coherency for that ctlr has been disabled. This means
+ * that when in a cache coherent system, the driver has to manually manage
+ * the data cache on the areas that it touches because the dma_* macro are
+ * basically no-ops.
+ *
+ * 3) There is an erratum (on PPC) where you can't use the instruction to do
+ * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places
+ * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed.
+ *
+ * 4) AFAICT, hardware flow control isn't supported by the controller --MAG.
+ */
+
+
+#if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mv643xx.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#define MPSC_NUM_CTLRS 2
+
+/*
+ * Descriptors and buffers must be cache line aligned.
+ * Buffers lengths must be multiple of cache line size.
+ * Number of Tx & Rx descriptors must be powers of 2.
+ */
+#define MPSC_RXR_ENTRIES 32
+#define MPSC_RXRE_SIZE dma_get_cache_alignment()
+#define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE)
+#define MPSC_RXBE_SIZE dma_get_cache_alignment()
+#define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE)
+
+#define MPSC_TXR_ENTRIES 32
+#define MPSC_TXRE_SIZE dma_get_cache_alignment()
+#define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE)
+#define MPSC_TXBE_SIZE dma_get_cache_alignment()
+#define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE)
+
+#define MPSC_DMA_ALLOC_SIZE (MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \
+ + MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */)
+
+/* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */
+struct mpsc_rx_desc {
+ u16 bufsize;
+ u16 bytecnt;
+ u32 cmdstat;
+ u32 link;
+ u32 buf_ptr;
+} __attribute((packed));
+
+struct mpsc_tx_desc {
+ u16 bytecnt;
+ u16 shadow;
+ u32 cmdstat;
+ u32 link;
+ u32 buf_ptr;
+} __attribute((packed));
+
+/*
+ * Some regs that have the erratum that you can't read them are are shared
+ * between the two MPSC controllers. This struct contains those shared regs.
+ */
+struct mpsc_shared_regs {
+ phys_addr_t mpsc_routing_base_p;
+ phys_addr_t sdma_intr_base_p;
+
+ void __iomem *mpsc_routing_base;
+ void __iomem *sdma_intr_base;
+
+ u32 MPSC_MRR_m;
+ u32 MPSC_RCRR_m;
+ u32 MPSC_TCRR_m;
+ u32 SDMA_INTR_CAUSE_m;
+ u32 SDMA_INTR_MASK_m;
+};
+
+/* The main driver data structure */
+struct mpsc_port_info {
+ struct uart_port port; /* Overlay uart_port structure */
+
+ /* Internal driver state for this ctlr */
+ u8 ready;
+ u8 rcv_data;
+ tcflag_t c_iflag; /* save termios->c_iflag */
+ tcflag_t c_cflag; /* save termios->c_cflag */
+
+ /* Info passed in from platform */
+ u8 mirror_regs; /* Need to mirror regs? */
+ u8 cache_mgmt; /* Need manual cache mgmt? */
+ u8 brg_can_tune; /* BRG has baud tuning? */
+ u32 brg_clk_src;
+ u16 mpsc_max_idle;
+ int default_baud;
+ int default_bits;
+ int default_parity;
+ int default_flow;
+
+ /* Physical addresses of various blocks of registers (from platform) */
+ phys_addr_t mpsc_base_p;
+ phys_addr_t sdma_base_p;
+ phys_addr_t brg_base_p;
+
+ /* Virtual addresses of various blocks of registers (from platform) */
+ void __iomem *mpsc_base;
+ void __iomem *sdma_base;
+ void __iomem *brg_base;
+
+ /* Descriptor ring and buffer allocations */
+ void *dma_region;
+ dma_addr_t dma_region_p;
+
+ dma_addr_t rxr; /* Rx descriptor ring */
+ dma_addr_t rxr_p; /* Phys addr of rxr */
+ u8 *rxb; /* Rx Ring I/O buf */
+ u8 *rxb_p; /* Phys addr of rxb */
+ u32 rxr_posn; /* First desc w/ Rx data */
+
+ dma_addr_t txr; /* Tx descriptor ring */
+ dma_addr_t txr_p; /* Phys addr of txr */
+ u8 *txb; /* Tx Ring I/O buf */
+ u8 *txb_p; /* Phys addr of txb */
+ int txr_head; /* Where new data goes */
+ int txr_tail; /* Where sent data comes off */
+ spinlock_t tx_lock; /* transmit lock */
+
+ /* Mirrored values of regs we can't read (if 'mirror_regs' set) */
+ u32 MPSC_MPCR_m;
+ u32 MPSC_CHR_1_m;
+ u32 MPSC_CHR_2_m;
+ u32 MPSC_CHR_10_m;
+ u32 BRG_BCR_m;
+ struct mpsc_shared_regs *shared_regs;
+};
+
+/* Hooks to platform-specific code */
+int mpsc_platform_register_driver(void);
+void mpsc_platform_unregister_driver(void);
+
+/* Hooks back in to mpsc common to be called by platform-specific code */
+struct mpsc_port_info *mpsc_device_probe(int index);
+struct mpsc_port_info *mpsc_device_remove(int index);
+
+/* Main MPSC Configuration Register Offsets */
+#define MPSC_MMCRL 0x0000
+#define MPSC_MMCRH 0x0004
+#define MPSC_MPCR 0x0008
+#define MPSC_CHR_1 0x000c
+#define MPSC_CHR_2 0x0010
+#define MPSC_CHR_3 0x0014
+#define MPSC_CHR_4 0x0018
+#define MPSC_CHR_5 0x001c
+#define MPSC_CHR_6 0x0020
+#define MPSC_CHR_7 0x0024
+#define MPSC_CHR_8 0x0028
+#define MPSC_CHR_9 0x002c
+#define MPSC_CHR_10 0x0030
+#define MPSC_CHR_11 0x0034
+
+#define MPSC_MPCR_FRZ (1 << 9)
+#define MPSC_MPCR_CL_5 0
+#define MPSC_MPCR_CL_6 1
+#define MPSC_MPCR_CL_7 2
+#define MPSC_MPCR_CL_8 3
+#define MPSC_MPCR_SBL_1 0
+#define MPSC_MPCR_SBL_2 1
+
+#define MPSC_CHR_2_TEV (1<<1)
+#define MPSC_CHR_2_TA (1<<7)
+#define MPSC_CHR_2_TTCS (1<<9)
+#define MPSC_CHR_2_REV (1<<17)
+#define MPSC_CHR_2_RA (1<<23)
+#define MPSC_CHR_2_CRD (1<<25)
+#define MPSC_CHR_2_EH (1<<31)
+#define MPSC_CHR_2_PAR_ODD 0
+#define MPSC_CHR_2_PAR_SPACE 1
+#define MPSC_CHR_2_PAR_EVEN 2
+#define MPSC_CHR_2_PAR_MARK 3
+
+/* MPSC Signal Routing */
+#define MPSC_MRR 0x0000
+#define MPSC_RCRR 0x0004
+#define MPSC_TCRR 0x0008
+
+/* Serial DMA Controller Interface Registers */
+#define SDMA_SDC 0x0000
+#define SDMA_SDCM 0x0008
+#define SDMA_RX_DESC 0x0800
+#define SDMA_RX_BUF_PTR 0x0808
+#define SDMA_SCRDP 0x0810
+#define SDMA_TX_DESC 0x0c00
+#define SDMA_SCTDP 0x0c10
+#define SDMA_SFTDP 0x0c14
+
+#define SDMA_DESC_CMDSTAT_PE (1<<0)
+#define SDMA_DESC_CMDSTAT_CDL (1<<1)
+#define SDMA_DESC_CMDSTAT_FR (1<<3)
+#define SDMA_DESC_CMDSTAT_OR (1<<6)
+#define SDMA_DESC_CMDSTAT_BR (1<<9)
+#define SDMA_DESC_CMDSTAT_MI (1<<10)
+#define SDMA_DESC_CMDSTAT_A (1<<11)
+#define SDMA_DESC_CMDSTAT_AM (1<<12)
+#define SDMA_DESC_CMDSTAT_CT (1<<13)
+#define SDMA_DESC_CMDSTAT_C (1<<14)
+#define SDMA_DESC_CMDSTAT_ES (1<<15)
+#define SDMA_DESC_CMDSTAT_L (1<<16)
+#define SDMA_DESC_CMDSTAT_F (1<<17)
+#define SDMA_DESC_CMDSTAT_P (1<<18)
+#define SDMA_DESC_CMDSTAT_EI (1<<23)
+#define SDMA_DESC_CMDSTAT_O (1<<31)
+
+#define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O \
+ | SDMA_DESC_CMDSTAT_EI)
+
+#define SDMA_SDC_RFT (1<<0)
+#define SDMA_SDC_SFM (1<<1)
+#define SDMA_SDC_BLMR (1<<6)
+#define SDMA_SDC_BLMT (1<<7)
+#define SDMA_SDC_POVR (1<<8)
+#define SDMA_SDC_RIFB (1<<9)
+
+#define SDMA_SDCM_ERD (1<<7)
+#define SDMA_SDCM_AR (1<<15)
+#define SDMA_SDCM_STD (1<<16)
+#define SDMA_SDCM_TXD (1<<23)
+#define SDMA_SDCM_AT (1<<31)
+
+#define SDMA_0_CAUSE_RXBUF (1<<0)
+#define SDMA_0_CAUSE_RXERR (1<<1)
+#define SDMA_0_CAUSE_TXBUF (1<<2)
+#define SDMA_0_CAUSE_TXEND (1<<3)
+#define SDMA_1_CAUSE_RXBUF (1<<8)
+#define SDMA_1_CAUSE_RXERR (1<<9)
+#define SDMA_1_CAUSE_TXBUF (1<<10)
+#define SDMA_1_CAUSE_TXEND (1<<11)
+
+#define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \
+ | SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR)
+#define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \
+ | SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND)
+
+/* SDMA Interrupt registers */
+#define SDMA_INTR_CAUSE 0x0000
+#define SDMA_INTR_MASK 0x0080
+
+/* Baud Rate Generator Interface Registers */
+#define BRG_BCR 0x0000
+#define BRG_BTR 0x0004
+
+/*
+ * Define how this driver is known to the outside (we've been assigned a
+ * range on the "Low-density serial ports" major).
+ */
+#define MPSC_MAJOR 204
+#define MPSC_MINOR_START 44
+#define MPSC_DRIVER_NAME "MPSC"
+#define MPSC_DEV_NAME "ttyMM"
+#define MPSC_VERSION "1.00"
+
+static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS];
+static struct mpsc_shared_regs mpsc_shared_regs;
+static struct uart_driver mpsc_reg;
+
+static void mpsc_start_rx(struct mpsc_port_info *pi);
+static void mpsc_free_ring_mem(struct mpsc_port_info *pi);
+static void mpsc_release_port(struct uart_port *port);
+/*
+ ******************************************************************************
+ *
+ * Baud Rate Generator Routines (BRG)
+ *
+ ******************************************************************************
+ */
+static void mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src)
+{
+ u32 v;
+
+ v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
+ v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18);
+
+ if (pi->brg_can_tune)
+ v &= ~(1 << 25);
+
+ if (pi->mirror_regs)
+ pi->BRG_BCR_m = v;
+ writel(v, pi->brg_base + BRG_BCR);
+
+ writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000,
+ pi->brg_base + BRG_BTR);
+}
+
+static void mpsc_brg_enable(struct mpsc_port_info *pi)
+{
+ u32 v;
+
+ v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
+ v |= (1 << 16);
+
+ if (pi->mirror_regs)
+ pi->BRG_BCR_m = v;
+ writel(v, pi->brg_base + BRG_BCR);
+}
+
+static void mpsc_brg_disable(struct mpsc_port_info *pi)
+{
+ u32 v;
+
+ v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
+ v &= ~(1 << 16);
+
+ if (pi->mirror_regs)
+ pi->BRG_BCR_m = v;
+ writel(v, pi->brg_base + BRG_BCR);
+}
+
+/*
+ * To set the baud, we adjust the CDV field in the BRG_BCR reg.
+ * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1.
+ * However, the input clock is divided by 16 in the MPSC b/c of how
+ * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our
+ * calculation by 16 to account for that. So the real calculation
+ * that accounts for the way the mpsc is set up is:
+ * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1.
+ */
+static void mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud)
+{
+ u32 cdv = (pi->port.uartclk / (baud << 5)) - 1;
+ u32 v;
+
+ mpsc_brg_disable(pi);
+ v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
+ v = (v & 0xffff0000) | (cdv & 0xffff);
+
+ if (pi->mirror_regs)
+ pi->BRG_BCR_m = v;
+ writel(v, pi->brg_base + BRG_BCR);
+ mpsc_brg_enable(pi);
+}
+
+/*
+ ******************************************************************************
+ *
+ * Serial DMA Routines (SDMA)
+ *
+ ******************************************************************************
+ */
+
+static void mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size)
+{
+ u32 v;
+
+ pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n",
+ pi->port.line, burst_size);
+
+ burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */
+
+ if (burst_size < 2)
+ v = 0x0; /* 1 64-bit word */
+ else if (burst_size < 4)
+ v = 0x1; /* 2 64-bit words */
+ else if (burst_size < 8)
+ v = 0x2; /* 4 64-bit words */
+ else
+ v = 0x3; /* 8 64-bit words */
+
+ writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12),
+ pi->sdma_base + SDMA_SDC);
+}
+
+static void mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size)
+{
+ pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line,
+ burst_size);
+
+ writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f,
+ pi->sdma_base + SDMA_SDC);
+ mpsc_sdma_burstsize(pi, burst_size);
+}
+
+static u32 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask)
+{
+ u32 old, v;
+
+ pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask);
+
+ old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m :
+ readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
+
+ mask &= 0xf;
+ if (pi->port.line)
+ mask <<= 8;
+ v &= ~mask;
+
+ if (pi->mirror_regs)
+ pi->shared_regs->SDMA_INTR_MASK_m = v;
+ writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
+
+ if (pi->port.line)
+ old >>= 8;
+ return old & 0xf;
+}
+
+static void mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask)
+{
+ u32 v;
+
+ pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask);
+
+ v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m
+ : readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
+
+ mask &= 0xf;
+ if (pi->port.line)
+ mask <<= 8;
+ v |= mask;
+
+ if (pi->mirror_regs)
+ pi->shared_regs->SDMA_INTR_MASK_m = v;
+ writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
+}
+
+static void mpsc_sdma_intr_ack(struct mpsc_port_info *pi)
+{
+ pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line);
+
+ if (pi->mirror_regs)
+ pi->shared_regs->SDMA_INTR_CAUSE_m = 0;
+ writeb(0x00, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE
+ + pi->port.line);
+}
+
+static void mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi,
+ struct mpsc_rx_desc *rxre_p)
+{
+ pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n",
+ pi->port.line, (u32)rxre_p);
+
+ writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP);
+}
+
+static void mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi,
+ struct mpsc_tx_desc *txre_p)
+{
+ writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP);
+ writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP);
+}
+
+static void mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val)
+{
+ u32 v;
+
+ v = readl(pi->sdma_base + SDMA_SDCM);
+ if (val)
+ v |= val;
+ else
+ v = 0;
+ wmb();
+ writel(v, pi->sdma_base + SDMA_SDCM);
+ wmb();
+}
+
+static uint mpsc_sdma_tx_active(struct mpsc_port_info *pi)
+{
+ return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD;
+}
+
+static void mpsc_sdma_start_tx(struct mpsc_port_info *pi)
+{
+ struct mpsc_tx_desc *txre, *txre_p;
+
+ /* If tx isn't running & there's a desc ready to go, start it */
+ if (!mpsc_sdma_tx_active(pi)) {
+ txre = (struct mpsc_tx_desc *)(pi->txr
+ + (pi->txr_tail * MPSC_TXRE_SIZE));
+ dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
+ DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ invalidate_dcache_range((ulong)txre,
+ (ulong)txre + MPSC_TXRE_SIZE);
+#endif
+
+ if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) {
+ txre_p = (struct mpsc_tx_desc *)
+ (pi->txr_p + (pi->txr_tail * MPSC_TXRE_SIZE));
+
+ mpsc_sdma_set_tx_ring(pi, txre_p);
+ mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD);
+ }
+ }
+}
+
+static void mpsc_sdma_stop(struct mpsc_port_info *pi)
+{
+ pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line);
+
+ /* Abort any SDMA transfers */
+ mpsc_sdma_cmd(pi, 0);
+ mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT);
+
+ /* Clear the SDMA current and first TX and RX pointers */
+ mpsc_sdma_set_tx_ring(pi, NULL);
+ mpsc_sdma_set_rx_ring(pi, NULL);
+
+ /* Disable interrupts */
+ mpsc_sdma_intr_mask(pi, 0xf);
+ mpsc_sdma_intr_ack(pi);
+}
+
+/*
+ ******************************************************************************
+ *
+ * Multi-Protocol Serial Controller Routines (MPSC)
+ *
+ ******************************************************************************
+ */
+
+static void mpsc_hw_init(struct mpsc_port_info *pi)
+{
+ u32 v;
+
+ pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line);
+
+ /* Set up clock routing */
+ if (pi->mirror_regs) {
+ v = pi->shared_regs->MPSC_MRR_m;
+ v &= ~0x1c7;
+ pi->shared_regs->MPSC_MRR_m = v;
+ writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
+
+ v = pi->shared_regs->MPSC_RCRR_m;
+ v = (v & ~0xf0f) | 0x100;
+ pi->shared_regs->MPSC_RCRR_m = v;
+ writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
+
+ v = pi->shared_regs->MPSC_TCRR_m;
+ v = (v & ~0xf0f) | 0x100;
+ pi->shared_regs->MPSC_TCRR_m = v;
+ writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
+ } else {
+ v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR);
+ v &= ~0x1c7;
+ writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
+
+ v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
+ v = (v & ~0xf0f) | 0x100;
+ writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
+
+ v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
+ v = (v & ~0xf0f) | 0x100;
+ writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
+ }
+
+ /* Put MPSC in UART mode & enabel Tx/Rx egines */
+ writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL);
+
+ /* No preamble, 16x divider, low-latency, */
+ writel(0x04400400, pi->mpsc_base + MPSC_MMCRH);
+ mpsc_set_baudrate(pi, pi->default_baud);
+
+ if (pi->mirror_regs) {
+ pi->MPSC_CHR_1_m = 0;
+ pi->MPSC_CHR_2_m = 0;
+ }
+ writel(0, pi->mpsc_base + MPSC_CHR_1);
+ writel(0, pi->mpsc_base + MPSC_CHR_2);
+ writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3);
+ writel(0, pi->mpsc_base + MPSC_CHR_4);
+ writel(0, pi->mpsc_base + MPSC_CHR_5);
+ writel(0, pi->mpsc_base + MPSC_CHR_6);
+ writel(0, pi->mpsc_base + MPSC_CHR_7);
+ writel(0, pi->mpsc_base + MPSC_CHR_8);
+ writel(0, pi->mpsc_base + MPSC_CHR_9);
+ writel(0, pi->mpsc_base + MPSC_CHR_10);
+}
+
+static void mpsc_enter_hunt(struct mpsc_port_info *pi)
+{
+ pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line);
+
+ if (pi->mirror_regs) {
+ writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH,
+ pi->mpsc_base + MPSC_CHR_2);
+ /* Erratum prevents reading CHR_2 so just delay for a while */
+ udelay(100);
+ } else {
+ writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH,
+ pi->mpsc_base + MPSC_CHR_2);
+
+ while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH)
+ udelay(10);
+ }
+}
+
+static void mpsc_freeze(struct mpsc_port_info *pi)
+{
+ u32 v;
+
+ pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line);
+
+ v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
+ readl(pi->mpsc_base + MPSC_MPCR);
+ v |= MPSC_MPCR_FRZ;
+
+ if (pi->mirror_regs)
+ pi->MPSC_MPCR_m = v;
+ writel(v, pi->mpsc_base + MPSC_MPCR);
+}
+
+static void mpsc_unfreeze(struct mpsc_port_info *pi)
+{
+ u32 v;
+
+ v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
+ readl(pi->mpsc_base + MPSC_MPCR);
+ v &= ~MPSC_MPCR_FRZ;
+
+ if (pi->mirror_regs)
+ pi->MPSC_MPCR_m = v;
+ writel(v, pi->mpsc_base + MPSC_MPCR);
+
+ pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line);
+}
+
+static void mpsc_set_char_length(struct mpsc_port_info *pi, u32 len)
+{
+ u32 v;
+
+ pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len);
+
+ v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
+ readl(pi->mpsc_base + MPSC_MPCR);
+ v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12);
+
+ if (pi->mirror_regs)
+ pi->MPSC_MPCR_m = v;
+ writel(v, pi->mpsc_base + MPSC_MPCR);
+}
+
+static void mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len)
+{
+ u32 v;
+
+ pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n",
+ pi->port.line, len);
+
+ v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
+ readl(pi->mpsc_base + MPSC_MPCR);
+
+ v = (v & ~(1 << 14)) | ((len & 0x1) << 14);
+
+ if (pi->mirror_regs)
+ pi->MPSC_MPCR_m = v;
+ writel(v, pi->mpsc_base + MPSC_MPCR);
+}
+
+static void mpsc_set_parity(struct mpsc_port_info *pi, u32 p)
+{
+ u32 v;
+
+ pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p);
+
+ v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m :
+ readl(pi->mpsc_base + MPSC_CHR_2);
+
+ p &= 0x3;
+ v = (v & ~0xc000c) | (p << 18) | (p << 2);
+
+ if (pi->mirror_regs)
+ pi->MPSC_CHR_2_m = v;
+ writel(v, pi->mpsc_base + MPSC_CHR_2);
+}
+
+/*
+ ******************************************************************************
+ *
+ * Driver Init Routines
+ *
+ ******************************************************************************
+ */
+
+static void mpsc_init_hw(struct mpsc_port_info *pi)
+{
+ pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line);
+
+ mpsc_brg_init(pi, pi->brg_clk_src);
+ mpsc_brg_enable(pi);
+ mpsc_sdma_init(pi, dma_get_cache_alignment()); /* burst a cacheline */
+ mpsc_sdma_stop(pi);
+ mpsc_hw_init(pi);
+}
+
+static int mpsc_alloc_ring_mem(struct mpsc_port_info *pi)
+{
+ int rc = 0;
+
+ pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n",
+ pi->port.line);
+
+ if (!pi->dma_region) {
+ if (!dma_supported(pi->port.dev, 0xffffffff)) {
+ printk(KERN_ERR "MPSC: Inadequate DMA support\n");
+ rc = -ENXIO;
+ } else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev,
+ MPSC_DMA_ALLOC_SIZE,
+ &pi->dma_region_p, GFP_KERNEL))
+ == NULL) {
+ printk(KERN_ERR "MPSC: Can't alloc Desc region\n");
+ rc = -ENOMEM;
+ }
+ }
+
+ return rc;
+}
+
+static void mpsc_free_ring_mem(struct mpsc_port_info *pi)
+{
+ pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line);
+
+ if (pi->dma_region) {
+ dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE,
+ pi->dma_region, pi->dma_region_p);
+ pi->dma_region = NULL;
+ pi->dma_region_p = (dma_addr_t)NULL;
+ }
+}
+
+static void mpsc_init_rings(struct mpsc_port_info *pi)
+{
+ struct mpsc_rx_desc *rxre;
+ struct mpsc_tx_desc *txre;
+ dma_addr_t dp, dp_p;
+ u8 *bp, *bp_p;
+ int i;
+
+ pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line);
+
+ BUG_ON(pi->dma_region == NULL);
+
+ memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE);
+
+ /*
+ * Descriptors & buffers are multiples of cacheline size and must be
+ * cacheline aligned.
+ */
+ dp = ALIGN((u32)pi->dma_region, dma_get_cache_alignment());
+ dp_p = ALIGN((u32)pi->dma_region_p, dma_get_cache_alignment());
+
+ /*
+ * Partition dma region into rx ring descriptor, rx buffers,
+ * tx ring descriptors, and tx buffers.
+ */
+ pi->rxr = dp;
+ pi->rxr_p = dp_p;
+ dp += MPSC_RXR_SIZE;
+ dp_p += MPSC_RXR_SIZE;
+
+ pi->rxb = (u8 *)dp;
+ pi->rxb_p = (u8 *)dp_p;
+ dp += MPSC_RXB_SIZE;
+ dp_p += MPSC_RXB_SIZE;
+
+ pi->rxr_posn = 0;
+
+ pi->txr = dp;
+ pi->txr_p = dp_p;
+ dp += MPSC_TXR_SIZE;
+ dp_p += MPSC_TXR_SIZE;
+
+ pi->txb = (u8 *)dp;
+ pi->txb_p = (u8 *)dp_p;
+
+ pi->txr_head = 0;
+ pi->txr_tail = 0;
+
+ /* Init rx ring descriptors */
+ dp = pi->rxr;
+ dp_p = pi->rxr_p;
+ bp = pi->rxb;
+ bp_p = pi->rxb_p;
+
+ for (i = 0; i < MPSC_RXR_ENTRIES; i++) {
+ rxre = (struct mpsc_rx_desc *)dp;
+
+ rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE);
+ rxre->bytecnt = cpu_to_be16(0);
+ rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
+ | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
+ | SDMA_DESC_CMDSTAT_L);
+ rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE);
+ rxre->buf_ptr = cpu_to_be32(bp_p);
+
+ dp += MPSC_RXRE_SIZE;
+ dp_p += MPSC_RXRE_SIZE;
+ bp += MPSC_RXBE_SIZE;
+ bp_p += MPSC_RXBE_SIZE;
+ }
+ rxre->link = cpu_to_be32(pi->rxr_p); /* Wrap last back to first */
+
+ /* Init tx ring descriptors */
+ dp = pi->txr;
+ dp_p = pi->txr_p;
+ bp = pi->txb;
+ bp_p = pi->txb_p;
+
+ for (i = 0; i < MPSC_TXR_ENTRIES; i++) {
+ txre = (struct mpsc_tx_desc *)dp;
+
+ txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE);
+ txre->buf_ptr = cpu_to_be32(bp_p);
+
+ dp += MPSC_TXRE_SIZE;
+ dp_p += MPSC_TXRE_SIZE;
+ bp += MPSC_TXBE_SIZE;
+ bp_p += MPSC_TXBE_SIZE;
+ }
+ txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */
+
+ dma_cache_sync(pi->port.dev, (void *)pi->dma_region,
+ MPSC_DMA_ALLOC_SIZE, DMA_BIDIRECTIONAL);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ flush_dcache_range((ulong)pi->dma_region,
+ (ulong)pi->dma_region
+ + MPSC_DMA_ALLOC_SIZE);
+#endif
+
+ return;
+}
+
+static void mpsc_uninit_rings(struct mpsc_port_info *pi)
+{
+ pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line);
+
+ BUG_ON(pi->dma_region == NULL);
+
+ pi->rxr = 0;
+ pi->rxr_p = 0;
+ pi->rxb = NULL;
+ pi->rxb_p = NULL;
+ pi->rxr_posn = 0;
+
+ pi->txr = 0;
+ pi->txr_p = 0;
+ pi->txb = NULL;
+ pi->txb_p = NULL;
+ pi->txr_head = 0;
+ pi->txr_tail = 0;
+}
+
+static int mpsc_make_ready(struct mpsc_port_info *pi)
+{
+ int rc;
+
+ pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line);
+
+ if (!pi->ready) {
+ mpsc_init_hw(pi);
+ if ((rc = mpsc_alloc_ring_mem(pi)))
+ return rc;
+ mpsc_init_rings(pi);
+ pi->ready = 1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int serial_polled;
+#endif
+
+/*
+ ******************************************************************************
+ *
+ * Interrupt Handling Routines
+ *
+ ******************************************************************************
+ */
+
+static int mpsc_rx_intr(struct mpsc_port_info *pi)
+{
+ struct mpsc_rx_desc *rxre;
+ struct tty_struct *tty = pi->port.state->port.tty;
+ u32 cmdstat, bytes_in, i;
+ int rc = 0;
+ u8 *bp;
+ char flag = TTY_NORMAL;
+
+ pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
+
+ rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
+
+ dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
+ DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ invalidate_dcache_range((ulong)rxre,
+ (ulong)rxre + MPSC_RXRE_SIZE);
+#endif
+
+ /*
+ * Loop through Rx descriptors handling ones that have been completed.
+ */
+ while (!((cmdstat = be32_to_cpu(rxre->cmdstat))
+ & SDMA_DESC_CMDSTAT_O)) {
+ bytes_in = be16_to_cpu(rxre->bytecnt);
+#ifdef CONFIG_CONSOLE_POLL
+ if (unlikely(serial_polled)) {
+ serial_polled = 0;
+ return 0;
+ }
+#endif
+ /* Following use of tty struct directly is deprecated */
+ if (unlikely(tty_buffer_request_room(tty, bytes_in)
+ < bytes_in)) {
+ if (tty->low_latency)
+ tty_flip_buffer_push(tty);
+ /*
+ * If this failed then we will throw away the bytes
+ * but must do so to clear interrupts.
+ */
+ }
+
+ bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
+ dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE,
+ DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ invalidate_dcache_range((ulong)bp,
+ (ulong)bp + MPSC_RXBE_SIZE);
+#endif
+
+ /*
+ * Other than for parity error, the manual provides little
+ * info on what data will be in a frame flagged by any of
+ * these errors. For parity error, it is the last byte in
+ * the buffer that had the error. As for the rest, I guess
+ * we'll assume there is no data in the buffer.
+ * If there is...it gets lost.
+ */
+ if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
+ | SDMA_DESC_CMDSTAT_FR
+ | SDMA_DESC_CMDSTAT_OR))) {
+
+ pi->port.icount.rx++;
+
+ if (cmdstat & SDMA_DESC_CMDSTAT_BR) { /* Break */
+ pi->port.icount.brk++;
+
+ if (uart_handle_break(&pi->port))
+ goto next_frame;
+ } else if (cmdstat & SDMA_DESC_CMDSTAT_FR) {
+ pi->port.icount.frame++;
+ } else if (cmdstat & SDMA_DESC_CMDSTAT_OR) {
+ pi->port.icount.overrun++;
+ }
+
+ cmdstat &= pi->port.read_status_mask;
+
+ if (cmdstat & SDMA_DESC_CMDSTAT_BR)
+ flag = TTY_BREAK;
+ else if (cmdstat & SDMA_DESC_CMDSTAT_FR)
+ flag = TTY_FRAME;
+ else if (cmdstat & SDMA_DESC_CMDSTAT_OR)
+ flag = TTY_OVERRUN;
+ else if (cmdstat & SDMA_DESC_CMDSTAT_PE)
+ flag = TTY_PARITY;
+ }
+
+ if (uart_handle_sysrq_char(&pi->port, *bp)) {
+ bp++;
+ bytes_in--;
+#ifdef CONFIG_CONSOLE_POLL
+ if (unlikely(serial_polled)) {
+ serial_polled = 0;
+ return 0;
+ }
+#endif
+ goto next_frame;
+ }
+
+ if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR
+ | SDMA_DESC_CMDSTAT_FR
+ | SDMA_DESC_CMDSTAT_OR)))
+ && !(cmdstat & pi->port.ignore_status_mask)) {
+ tty_insert_flip_char(tty, *bp, flag);
+ } else {
+ for (i=0; i<bytes_in; i++)
+ tty_insert_flip_char(tty, *bp++, TTY_NORMAL);
+
+ pi->port.icount.rx += bytes_in;
+ }
+
+next_frame:
+ rxre->bytecnt = cpu_to_be16(0);
+ wmb();
+ rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O
+ | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F
+ | SDMA_DESC_CMDSTAT_L);
+ wmb();
+ dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
+ DMA_BIDIRECTIONAL);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ flush_dcache_range((ulong)rxre,
+ (ulong)rxre + MPSC_RXRE_SIZE);
+#endif
+
+ /* Advance to next descriptor */
+ pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
+ rxre = (struct mpsc_rx_desc *)
+ (pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE));
+ dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE,
+ DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ invalidate_dcache_range((ulong)rxre,
+ (ulong)rxre + MPSC_RXRE_SIZE);
+#endif
+ rc = 1;
+ }
+
+ /* Restart rx engine, if its stopped */
+ if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
+ mpsc_start_rx(pi);
+
+ tty_flip_buffer_push(tty);
+ return rc;
+}
+
+static void mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr)
+{
+ struct mpsc_tx_desc *txre;
+
+ txre = (struct mpsc_tx_desc *)(pi->txr
+ + (pi->txr_head * MPSC_TXRE_SIZE));
+
+ txre->bytecnt = cpu_to_be16(count);
+ txre->shadow = txre->bytecnt;
+ wmb(); /* ensure cmdstat is last field updated */
+ txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F
+ | SDMA_DESC_CMDSTAT_L
+ | ((intr) ? SDMA_DESC_CMDSTAT_EI : 0));
+ wmb();
+ dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
+ DMA_BIDIRECTIONAL);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ flush_dcache_range((ulong)txre,
+ (ulong)txre + MPSC_TXRE_SIZE);
+#endif
+}
+
+static void mpsc_copy_tx_data(struct mpsc_port_info *pi)
+{
+ struct circ_buf *xmit = &pi->port.state->xmit;
+ u8 *bp;
+ u32 i;
+
+ /* Make sure the desc ring isn't full */
+ while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES)
+ < (MPSC_TXR_ENTRIES - 1)) {
+ if (pi->port.x_char) {
+ /*
+ * Ideally, we should use the TCS field in
+ * CHR_1 to put the x_char out immediately but
+ * errata prevents us from being able to read
+ * CHR_2 to know that its safe to write to
+ * CHR_1. Instead, just put it in-band with
+ * all the other Tx data.
+ */
+ bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
+ *bp = pi->port.x_char;
+ pi->port.x_char = 0;
+ i = 1;
+ } else if (!uart_circ_empty(xmit)
+ && !uart_tx_stopped(&pi->port)) {
+ i = min((u32)MPSC_TXBE_SIZE,
+ (u32)uart_circ_chars_pending(xmit));
+ i = min(i, (u32)CIRC_CNT_TO_END(xmit->head, xmit->tail,
+ UART_XMIT_SIZE));
+ bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
+ memcpy(bp, &xmit->buf[xmit->tail], i);
+ xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&pi->port);
+ } else { /* All tx data copied into ring bufs */
+ return;
+ }
+
+ dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
+ DMA_BIDIRECTIONAL);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ flush_dcache_range((ulong)bp,
+ (ulong)bp + MPSC_TXBE_SIZE);
+#endif
+ mpsc_setup_tx_desc(pi, i, 1);
+
+ /* Advance to next descriptor */
+ pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
+ }
+}
+
+static int mpsc_tx_intr(struct mpsc_port_info *pi)
+{
+ struct mpsc_tx_desc *txre;
+ int rc = 0;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&pi->tx_lock, iflags);
+
+ if (!mpsc_sdma_tx_active(pi)) {
+ txre = (struct mpsc_tx_desc *)(pi->txr
+ + (pi->txr_tail * MPSC_TXRE_SIZE));
+
+ dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE,
+ DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ invalidate_dcache_range((ulong)txre,
+ (ulong)txre + MPSC_TXRE_SIZE);
+#endif
+
+ while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) {
+ rc = 1;
+ pi->port.icount.tx += be16_to_cpu(txre->bytecnt);
+ pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1);
+
+ /* If no more data to tx, fall out of loop */
+ if (pi->txr_head == pi->txr_tail)
+ break;
+
+ txre = (struct mpsc_tx_desc *)(pi->txr
+ + (pi->txr_tail * MPSC_TXRE_SIZE));
+ dma_cache_sync(pi->port.dev, (void *)txre,
+ MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ invalidate_dcache_range((ulong)txre,
+ (ulong)txre + MPSC_TXRE_SIZE);
+#endif
+ }
+
+ mpsc_copy_tx_data(pi);
+ mpsc_sdma_start_tx(pi); /* start next desc if ready */
+ }
+
+ spin_unlock_irqrestore(&pi->tx_lock, iflags);
+ return rc;
+}
+
+/*
+ * This is the driver's interrupt handler. To avoid a race, we first clear
+ * the interrupt, then handle any completed Rx/Tx descriptors. When done
+ * handling those descriptors, we restart the Rx/Tx engines if they're stopped.
+ */
+static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id)
+{
+ struct mpsc_port_info *pi = dev_id;
+ ulong iflags;
+ int rc = IRQ_NONE;
+
+ pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line);
+
+ spin_lock_irqsave(&pi->port.lock, iflags);
+ mpsc_sdma_intr_ack(pi);
+ if (mpsc_rx_intr(pi))
+ rc = IRQ_HANDLED;
+ if (mpsc_tx_intr(pi))
+ rc = IRQ_HANDLED;
+ spin_unlock_irqrestore(&pi->port.lock, iflags);
+
+ pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line);
+ return rc;
+}
+
+/*
+ ******************************************************************************
+ *
+ * serial_core.c Interface routines
+ *
+ ******************************************************************************
+ */
+static uint mpsc_tx_empty(struct uart_port *port)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ ulong iflags;
+ uint rc;
+
+ spin_lock_irqsave(&pi->port.lock, iflags);
+ rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT;
+ spin_unlock_irqrestore(&pi->port.lock, iflags);
+
+ return rc;
+}
+
+static void mpsc_set_mctrl(struct uart_port *port, uint mctrl)
+{
+ /* Have no way to set modem control lines AFAICT */
+}
+
+static uint mpsc_get_mctrl(struct uart_port *port)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ u32 mflags, status;
+
+ status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m
+ : readl(pi->mpsc_base + MPSC_CHR_10);
+
+ mflags = 0;
+ if (status & 0x1)
+ mflags |= TIOCM_CTS;
+ if (status & 0x2)
+ mflags |= TIOCM_CAR;
+
+ return mflags | TIOCM_DSR; /* No way to tell if DSR asserted */
+}
+
+static void mpsc_stop_tx(struct uart_port *port)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+
+ pr_debug("mpsc_stop_tx[%d]\n", port->line);
+
+ mpsc_freeze(pi);
+}
+
+static void mpsc_start_tx(struct uart_port *port)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&pi->tx_lock, iflags);
+
+ mpsc_unfreeze(pi);
+ mpsc_copy_tx_data(pi);
+ mpsc_sdma_start_tx(pi);
+
+ spin_unlock_irqrestore(&pi->tx_lock, iflags);
+
+ pr_debug("mpsc_start_tx[%d]\n", port->line);
+}
+
+static void mpsc_start_rx(struct mpsc_port_info *pi)
+{
+ pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line);
+
+ if (pi->rcv_data) {
+ mpsc_enter_hunt(pi);
+ mpsc_sdma_cmd(pi, SDMA_SDCM_ERD);
+ }
+}
+
+static void mpsc_stop_rx(struct uart_port *port)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+
+ pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line);
+
+ if (pi->mirror_regs) {
+ writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_RA,
+ pi->mpsc_base + MPSC_CHR_2);
+ /* Erratum prevents reading CHR_2 so just delay for a while */
+ udelay(100);
+ } else {
+ writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_RA,
+ pi->mpsc_base + MPSC_CHR_2);
+
+ while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_RA)
+ udelay(10);
+ }
+
+ mpsc_sdma_cmd(pi, SDMA_SDCM_AR);
+}
+
+static void mpsc_enable_ms(struct uart_port *port)
+{
+}
+
+static void mpsc_break_ctl(struct uart_port *port, int ctl)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ ulong flags;
+ u32 v;
+
+ v = ctl ? 0x00ff0000 : 0;
+
+ spin_lock_irqsave(&pi->port.lock, flags);
+ if (pi->mirror_regs)
+ pi->MPSC_CHR_1_m = v;
+ writel(v, pi->mpsc_base + MPSC_CHR_1);
+ spin_unlock_irqrestore(&pi->port.lock, flags);
+}
+
+static int mpsc_startup(struct uart_port *port)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ u32 flag = 0;
+ int rc;
+
+ pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n",
+ port->line, pi->port.irq);
+
+ if ((rc = mpsc_make_ready(pi)) == 0) {
+ /* Setup IRQ handler */
+ mpsc_sdma_intr_ack(pi);
+
+ /* If irq's are shared, need to set flag */
+ if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq)
+ flag = IRQF_SHARED;
+
+ if (request_irq(pi->port.irq, mpsc_sdma_intr, flag,
+ "mpsc-sdma", pi))
+ printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n",
+ pi->port.irq);
+
+ mpsc_sdma_intr_unmask(pi, 0xf);
+ mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p
+ + (pi->rxr_posn * MPSC_RXRE_SIZE)));
+ }
+
+ return rc;
+}
+
+static void mpsc_shutdown(struct uart_port *port)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+
+ pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line);
+
+ mpsc_sdma_stop(pi);
+ free_irq(pi->port.irq, pi);
+}
+
+static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ u32 baud;
+ ulong flags;
+ u32 chr_bits, stop_bits, par;
+
+ pi->c_iflag = termios->c_iflag;
+ pi->c_cflag = termios->c_cflag;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ chr_bits = MPSC_MPCR_CL_5;
+ break;
+ case CS6:
+ chr_bits = MPSC_MPCR_CL_6;
+ break;
+ case CS7:
+ chr_bits = MPSC_MPCR_CL_7;
+ break;
+ case CS8:
+ default:
+ chr_bits = MPSC_MPCR_CL_8;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ stop_bits = MPSC_MPCR_SBL_2;
+ else
+ stop_bits = MPSC_MPCR_SBL_1;
+
+ par = MPSC_CHR_2_PAR_EVEN;
+ if (termios->c_cflag & PARENB)
+ if (termios->c_cflag & PARODD)
+ par = MPSC_CHR_2_PAR_ODD;
+#ifdef CMSPAR
+ if (termios->c_cflag & CMSPAR) {
+ if (termios->c_cflag & PARODD)
+ par = MPSC_CHR_2_PAR_MARK;
+ else
+ par = MPSC_CHR_2_PAR_SPACE;
+ }
+#endif
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk);
+
+ spin_lock_irqsave(&pi->port.lock, flags);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ mpsc_set_char_length(pi, chr_bits);
+ mpsc_set_stop_bit_length(pi, stop_bits);
+ mpsc_set_parity(pi, par);
+ mpsc_set_baudrate(pi, baud);
+
+ /* Characters/events to read */
+ pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR;
+
+ if (termios->c_iflag & INPCK)
+ pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
+ | SDMA_DESC_CMDSTAT_FR;
+
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
+
+ /* Characters/events to ignore */
+ pi->port.ignore_status_mask = 0;
+
+ if (termios->c_iflag & IGNPAR)
+ pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE
+ | SDMA_DESC_CMDSTAT_FR;
+
+ if (termios->c_iflag & IGNBRK) {
+ pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR;
+
+ if (termios->c_iflag & IGNPAR)
+ pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR;
+ }
+
+ if ((termios->c_cflag & CREAD)) {
+ if (!pi->rcv_data) {
+ pi->rcv_data = 1;
+ mpsc_start_rx(pi);
+ }
+ } else if (pi->rcv_data) {
+ mpsc_stop_rx(port);
+ pi->rcv_data = 0;
+ }
+
+ spin_unlock_irqrestore(&pi->port.lock, flags);
+}
+
+static const char *mpsc_type(struct uart_port *port)
+{
+ pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME);
+ return MPSC_DRIVER_NAME;
+}
+
+static int mpsc_request_port(struct uart_port *port)
+{
+ /* Should make chip/platform specific call */
+ return 0;
+}
+
+static void mpsc_release_port(struct uart_port *port)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+
+ if (pi->ready) {
+ mpsc_uninit_rings(pi);
+ mpsc_free_ring_mem(pi);
+ pi->ready = 0;
+ }
+}
+
+static void mpsc_config_port(struct uart_port *port, int flags)
+{
+}
+
+static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ int rc = 0;
+
+ pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line);
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC)
+ rc = -EINVAL;
+ else if (pi->port.irq != ser->irq)
+ rc = -EINVAL;
+ else if (ser->io_type != SERIAL_IO_MEM)
+ rc = -EINVAL;
+ else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */
+ rc = -EINVAL;
+ else if ((void *)pi->port.mapbase != ser->iomem_base)
+ rc = -EINVAL;
+ else if (pi->port.iobase != ser->port)
+ rc = -EINVAL;
+ else if (ser->hub6 != 0)
+ rc = -EINVAL;
+
+ return rc;
+}
+#ifdef CONFIG_CONSOLE_POLL
+/* Serial polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+static char poll_buf[2048];
+static int poll_ptr;
+static int poll_cnt;
+static void mpsc_put_poll_char(struct uart_port *port,
+ unsigned char c);
+
+static int mpsc_get_poll_char(struct uart_port *port)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_rx_desc *rxre;
+ u32 cmdstat, bytes_in, i;
+ u8 *bp;
+
+ if (!serial_polled)
+ serial_polled = 1;
+
+ pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
+
+ if (poll_cnt) {
+ poll_cnt--;
+ return poll_buf[poll_ptr++];
+ }
+ poll_ptr = 0;
+ poll_cnt = 0;
+
+ while (poll_cnt == 0) {
+ rxre = (struct mpsc_rx_desc *)(pi->rxr +
+ (pi->rxr_posn*MPSC_RXRE_SIZE));
+ dma_cache_sync(pi->port.dev, (void *)rxre,
+ MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ invalidate_dcache_range((ulong)rxre,
+ (ulong)rxre + MPSC_RXRE_SIZE);
+#endif
+ /*
+ * Loop through Rx descriptors handling ones that have
+ * been completed.
+ */
+ while (poll_cnt == 0 &&
+ !((cmdstat = be32_to_cpu(rxre->cmdstat)) &
+ SDMA_DESC_CMDSTAT_O)){
+ bytes_in = be16_to_cpu(rxre->bytecnt);
+ bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
+ dma_cache_sync(pi->port.dev, (void *) bp,
+ MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ invalidate_dcache_range((ulong)bp,
+ (ulong)bp + MPSC_RXBE_SIZE);
+#endif
+ if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
+ SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
+ !(cmdstat & pi->port.ignore_status_mask)) {
+ poll_buf[poll_cnt] = *bp;
+ poll_cnt++;
+ } else {
+ for (i = 0; i < bytes_in; i++) {
+ poll_buf[poll_cnt] = *bp++;
+ poll_cnt++;
+ }
+ pi->port.icount.rx += bytes_in;
+ }
+ rxre->bytecnt = cpu_to_be16(0);
+ wmb();
+ rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
+ SDMA_DESC_CMDSTAT_EI |
+ SDMA_DESC_CMDSTAT_F |
+ SDMA_DESC_CMDSTAT_L);
+ wmb();
+ dma_cache_sync(pi->port.dev, (void *)rxre,
+ MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ flush_dcache_range((ulong)rxre,
+ (ulong)rxre + MPSC_RXRE_SIZE);
+#endif
+
+ /* Advance to next descriptor */
+ pi->rxr_posn = (pi->rxr_posn + 1) &
+ (MPSC_RXR_ENTRIES - 1);
+ rxre = (struct mpsc_rx_desc *)(pi->rxr +
+ (pi->rxr_posn * MPSC_RXRE_SIZE));
+ dma_cache_sync(pi->port.dev, (void *)rxre,
+ MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ invalidate_dcache_range((ulong)rxre,
+ (ulong)rxre + MPSC_RXRE_SIZE);
+#endif
+ }
+
+ /* Restart rx engine, if its stopped */
+ if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
+ mpsc_start_rx(pi);
+ }
+ if (poll_cnt) {
+ poll_cnt--;
+ return poll_buf[poll_ptr++];
+ }
+
+ return 0;
+}
+
+
+static void mpsc_put_poll_char(struct uart_port *port,
+ unsigned char c)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ u32 data;
+
+ data = readl(pi->mpsc_base + MPSC_MPCR);
+ writeb(c, pi->mpsc_base + MPSC_CHR_1);
+ mb();
+ data = readl(pi->mpsc_base + MPSC_CHR_2);
+ data |= MPSC_CHR_2_TTCS;
+ writel(data, pi->mpsc_base + MPSC_CHR_2);
+ mb();
+
+ while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS);
+}
+#endif
+
+static struct uart_ops mpsc_pops = {
+ .tx_empty = mpsc_tx_empty,
+ .set_mctrl = mpsc_set_mctrl,
+ .get_mctrl = mpsc_get_mctrl,
+ .stop_tx = mpsc_stop_tx,
+ .start_tx = mpsc_start_tx,
+ .stop_rx = mpsc_stop_rx,
+ .enable_ms = mpsc_enable_ms,
+ .break_ctl = mpsc_break_ctl,
+ .startup = mpsc_startup,
+ .shutdown = mpsc_shutdown,
+ .set_termios = mpsc_set_termios,
+ .type = mpsc_type,
+ .release_port = mpsc_release_port,
+ .request_port = mpsc_request_port,
+ .config_port = mpsc_config_port,
+ .verify_port = mpsc_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = mpsc_get_poll_char,
+ .poll_put_char = mpsc_put_poll_char,
+#endif
+};
+
+/*
+ ******************************************************************************
+ *
+ * Console Interface Routines
+ *
+ ******************************************************************************
+ */
+
+#ifdef CONFIG_SERIAL_MPSC_CONSOLE
+static void mpsc_console_write(struct console *co, const char *s, uint count)
+{
+ struct mpsc_port_info *pi = &mpsc_ports[co->index];
+ u8 *bp, *dp, add_cr = 0;
+ int i;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&pi->tx_lock, iflags);
+
+ while (pi->txr_head != pi->txr_tail) {
+ while (mpsc_sdma_tx_active(pi))
+ udelay(100);
+ mpsc_sdma_intr_ack(pi);
+ mpsc_tx_intr(pi);
+ }
+
+ while (mpsc_sdma_tx_active(pi))
+ udelay(100);
+
+ while (count > 0) {
+ bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
+
+ for (i = 0; i < MPSC_TXBE_SIZE; i++) {
+ if (count == 0)
+ break;
+
+ if (add_cr) {
+ *(dp++) = '\r';
+ add_cr = 0;
+ } else {
+ *(dp++) = *s;
+
+ if (*(s++) == '\n') { /* add '\r' after '\n' */
+ add_cr = 1;
+ count++;
+ }
+ }
+
+ count--;
+ }
+
+ dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE,
+ DMA_BIDIRECTIONAL);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ flush_dcache_range((ulong)bp,
+ (ulong)bp + MPSC_TXBE_SIZE);
+#endif
+ mpsc_setup_tx_desc(pi, i, 0);
+ pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
+ mpsc_sdma_start_tx(pi);
+
+ while (mpsc_sdma_tx_active(pi))
+ udelay(100);
+
+ pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1);
+ }
+
+ spin_unlock_irqrestore(&pi->tx_lock, iflags);
+}
+
+static int __init mpsc_console_setup(struct console *co, char *options)
+{
+ struct mpsc_port_info *pi;
+ int baud, bits, parity, flow;
+
+ pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options);
+
+ if (co->index >= MPSC_NUM_CTLRS)
+ co->index = 0;
+
+ pi = &mpsc_ports[co->index];
+
+ baud = pi->default_baud;
+ bits = pi->default_bits;
+ parity = pi->default_parity;
+ flow = pi->default_flow;
+
+ if (!pi->port.ops)
+ return -ENODEV;
+
+ spin_lock_init(&pi->port.lock); /* Temporary fix--copied from 8250.c */
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&pi->port, co, baud, parity, bits, flow);
+}
+
+static struct console mpsc_console = {
+ .name = MPSC_DEV_NAME,
+ .write = mpsc_console_write,
+ .device = uart_console_device,
+ .setup = mpsc_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &mpsc_reg,
+};
+
+static int __init mpsc_late_console_init(void)
+{
+ pr_debug("mpsc_late_console_init: Enter\n");
+
+ if (!(mpsc_console.flags & CON_ENABLED))
+ register_console(&mpsc_console);
+ return 0;
+}
+
+late_initcall(mpsc_late_console_init);
+
+#define MPSC_CONSOLE &mpsc_console
+#else
+#define MPSC_CONSOLE NULL
+#endif
+/*
+ ******************************************************************************
+ *
+ * Dummy Platform Driver to extract & map shared register regions
+ *
+ ******************************************************************************
+ */
+static void mpsc_resource_err(char *s)
+{
+ printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s);
+}
+
+static int mpsc_shared_map_regs(struct platform_device *pd)
+{
+ struct resource *r;
+
+ if ((r = platform_get_resource(pd, IORESOURCE_MEM,
+ MPSC_ROUTING_BASE_ORDER))
+ && request_mem_region(r->start,
+ MPSC_ROUTING_REG_BLOCK_SIZE,
+ "mpsc_routing_regs")) {
+ mpsc_shared_regs.mpsc_routing_base = ioremap(r->start,
+ MPSC_ROUTING_REG_BLOCK_SIZE);
+ mpsc_shared_regs.mpsc_routing_base_p = r->start;
+ } else {
+ mpsc_resource_err("MPSC routing base");
+ return -ENOMEM;
+ }
+
+ if ((r = platform_get_resource(pd, IORESOURCE_MEM,
+ MPSC_SDMA_INTR_BASE_ORDER))
+ && request_mem_region(r->start,
+ MPSC_SDMA_INTR_REG_BLOCK_SIZE,
+ "sdma_intr_regs")) {
+ mpsc_shared_regs.sdma_intr_base = ioremap(r->start,
+ MPSC_SDMA_INTR_REG_BLOCK_SIZE);
+ mpsc_shared_regs.sdma_intr_base_p = r->start;
+ } else {
+ iounmap(mpsc_shared_regs.mpsc_routing_base);
+ release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
+ MPSC_ROUTING_REG_BLOCK_SIZE);
+ mpsc_resource_err("SDMA intr base");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mpsc_shared_unmap_regs(void)
+{
+ if (!mpsc_shared_regs.mpsc_routing_base) {
+ iounmap(mpsc_shared_regs.mpsc_routing_base);
+ release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
+ MPSC_ROUTING_REG_BLOCK_SIZE);
+ }
+ if (!mpsc_shared_regs.sdma_intr_base) {
+ iounmap(mpsc_shared_regs.sdma_intr_base);
+ release_mem_region(mpsc_shared_regs.sdma_intr_base_p,
+ MPSC_SDMA_INTR_REG_BLOCK_SIZE);
+ }
+
+ mpsc_shared_regs.mpsc_routing_base = NULL;
+ mpsc_shared_regs.sdma_intr_base = NULL;
+
+ mpsc_shared_regs.mpsc_routing_base_p = 0;
+ mpsc_shared_regs.sdma_intr_base_p = 0;
+}
+
+static int mpsc_shared_drv_probe(struct platform_device *dev)
+{
+ struct mpsc_shared_pdata *pdata;
+ int rc = -ENODEV;
+
+ if (dev->id == 0) {
+ if (!(rc = mpsc_shared_map_regs(dev))) {
+ pdata = (struct mpsc_shared_pdata *)
+ dev->dev.platform_data;
+
+ mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
+ mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
+ mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val;
+ mpsc_shared_regs.SDMA_INTR_CAUSE_m =
+ pdata->intr_cause_val;
+ mpsc_shared_regs.SDMA_INTR_MASK_m =
+ pdata->intr_mask_val;
+
+ rc = 0;
+ }
+ }
+
+ return rc;
+}
+
+static int mpsc_shared_drv_remove(struct platform_device *dev)
+{
+ int rc = -ENODEV;
+
+ if (dev->id == 0) {
+ mpsc_shared_unmap_regs();
+ mpsc_shared_regs.MPSC_MRR_m = 0;
+ mpsc_shared_regs.MPSC_RCRR_m = 0;
+ mpsc_shared_regs.MPSC_TCRR_m = 0;
+ mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0;
+ mpsc_shared_regs.SDMA_INTR_MASK_m = 0;
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static struct platform_driver mpsc_shared_driver = {
+ .probe = mpsc_shared_drv_probe,
+ .remove = mpsc_shared_drv_remove,
+ .driver = {
+ .name = MPSC_SHARED_NAME,
+ },
+};
+
+/*
+ ******************************************************************************
+ *
+ * Driver Interface Routines
+ *
+ ******************************************************************************
+ */
+static struct uart_driver mpsc_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = MPSC_DRIVER_NAME,
+ .dev_name = MPSC_DEV_NAME,
+ .major = MPSC_MAJOR,
+ .minor = MPSC_MINOR_START,
+ .nr = MPSC_NUM_CTLRS,
+ .cons = MPSC_CONSOLE,
+};
+
+static int mpsc_drv_map_regs(struct mpsc_port_info *pi,
+ struct platform_device *pd)
+{
+ struct resource *r;
+
+ if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER))
+ && request_mem_region(r->start, MPSC_REG_BLOCK_SIZE,
+ "mpsc_regs")) {
+ pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE);
+ pi->mpsc_base_p = r->start;
+ } else {
+ mpsc_resource_err("MPSC base");
+ goto err;
+ }
+
+ if ((r = platform_get_resource(pd, IORESOURCE_MEM,
+ MPSC_SDMA_BASE_ORDER))
+ && request_mem_region(r->start,
+ MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) {
+ pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE);
+ pi->sdma_base_p = r->start;
+ } else {
+ mpsc_resource_err("SDMA base");
+ if (pi->mpsc_base) {
+ iounmap(pi->mpsc_base);
+ pi->mpsc_base = NULL;
+ }
+ goto err;
+ }
+
+ if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER))
+ && request_mem_region(r->start,
+ MPSC_BRG_REG_BLOCK_SIZE, "brg_regs")) {
+ pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE);
+ pi->brg_base_p = r->start;
+ } else {
+ mpsc_resource_err("BRG base");
+ if (pi->mpsc_base) {
+ iounmap(pi->mpsc_base);
+ pi->mpsc_base = NULL;
+ }
+ if (pi->sdma_base) {
+ iounmap(pi->sdma_base);
+ pi->sdma_base = NULL;
+ }
+ goto err;
+ }
+ return 0;
+
+err:
+ return -ENOMEM;
+}
+
+static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi)
+{
+ if (!pi->mpsc_base) {
+ iounmap(pi->mpsc_base);
+ release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE);
+ }
+ if (!pi->sdma_base) {
+ iounmap(pi->sdma_base);
+ release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE);
+ }
+ if (!pi->brg_base) {
+ iounmap(pi->brg_base);
+ release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE);
+ }
+
+ pi->mpsc_base = NULL;
+ pi->sdma_base = NULL;
+ pi->brg_base = NULL;
+
+ pi->mpsc_base_p = 0;
+ pi->sdma_base_p = 0;
+ pi->brg_base_p = 0;
+}
+
+static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
+ struct platform_device *pd, int num)
+{
+ struct mpsc_pdata *pdata;
+
+ pdata = (struct mpsc_pdata *)pd->dev.platform_data;
+
+ pi->port.uartclk = pdata->brg_clk_freq;
+ pi->port.iotype = UPIO_MEM;
+ pi->port.line = num;
+ pi->port.type = PORT_MPSC;
+ pi->port.fifosize = MPSC_TXBE_SIZE;
+ pi->port.membase = pi->mpsc_base;
+ pi->port.mapbase = (ulong)pi->mpsc_base;
+ pi->port.ops = &mpsc_pops;
+
+ pi->mirror_regs = pdata->mirror_regs;
+ pi->cache_mgmt = pdata->cache_mgmt;
+ pi->brg_can_tune = pdata->brg_can_tune;
+ pi->brg_clk_src = pdata->brg_clk_src;
+ pi->mpsc_max_idle = pdata->max_idle;
+ pi->default_baud = pdata->default_baud;
+ pi->default_bits = pdata->default_bits;
+ pi->default_parity = pdata->default_parity;
+ pi->default_flow = pdata->default_flow;
+
+ /* Initial values of mirrored regs */
+ pi->MPSC_CHR_1_m = pdata->chr_1_val;
+ pi->MPSC_CHR_2_m = pdata->chr_2_val;
+ pi->MPSC_CHR_10_m = pdata->chr_10_val;
+ pi->MPSC_MPCR_m = pdata->mpcr_val;
+ pi->BRG_BCR_m = pdata->bcr_val;
+
+ pi->shared_regs = &mpsc_shared_regs;
+
+ pi->port.irq = platform_get_irq(pd, 0);
+}
+
+static int mpsc_drv_probe(struct platform_device *dev)
+{
+ struct mpsc_port_info *pi;
+ int rc = -ENODEV;
+
+ pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev->id);
+
+ if (dev->id < MPSC_NUM_CTLRS) {
+ pi = &mpsc_ports[dev->id];
+
+ if (!(rc = mpsc_drv_map_regs(pi, dev))) {
+ mpsc_drv_get_platform_data(pi, dev, dev->id);
+ pi->port.dev = &dev->dev;
+
+ if (!(rc = mpsc_make_ready(pi))) {
+ spin_lock_init(&pi->tx_lock);
+ if (!(rc = uart_add_one_port(&mpsc_reg,
+ &pi->port))) {
+ rc = 0;
+ } else {
+ mpsc_release_port((struct uart_port *)
+ pi);
+ mpsc_drv_unmap_regs(pi);
+ }
+ } else {
+ mpsc_drv_unmap_regs(pi);
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int mpsc_drv_remove(struct platform_device *dev)
+{
+ pr_debug("mpsc_drv_exit: Removing MPSC %d\n", dev->id);
+
+ if (dev->id < MPSC_NUM_CTLRS) {
+ uart_remove_one_port(&mpsc_reg, &mpsc_ports[dev->id].port);
+ mpsc_release_port((struct uart_port *)
+ &mpsc_ports[dev->id].port);
+ mpsc_drv_unmap_regs(&mpsc_ports[dev->id]);
+ return 0;
+ } else {
+ return -ENODEV;
+ }
+}
+
+static struct platform_driver mpsc_driver = {
+ .probe = mpsc_drv_probe,
+ .remove = mpsc_drv_remove,
+ .driver = {
+ .name = MPSC_CTLR_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mpsc_drv_init(void)
+{
+ int rc;
+
+ printk(KERN_INFO "Serial: MPSC driver\n");
+
+ memset(mpsc_ports, 0, sizeof(mpsc_ports));
+ memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
+
+ if (!(rc = uart_register_driver(&mpsc_reg))) {
+ if (!(rc = platform_driver_register(&mpsc_shared_driver))) {
+ if ((rc = platform_driver_register(&mpsc_driver))) {
+ platform_driver_unregister(&mpsc_shared_driver);
+ uart_unregister_driver(&mpsc_reg);
+ }
+ } else {
+ uart_unregister_driver(&mpsc_reg);
+ }
+ }
+
+ return rc;
+}
+
+static void __exit mpsc_drv_exit(void)
+{
+ platform_driver_unregister(&mpsc_driver);
+ platform_driver_unregister(&mpsc_shared_driver);
+ uart_unregister_driver(&mpsc_reg);
+ memset(mpsc_ports, 0, sizeof(mpsc_ports));
+ memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
+}
+
+module_init(mpsc_drv_init);
+module_exit(mpsc_drv_exit);
+
+MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
+MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver");
+MODULE_VERSION(MPSC_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR);
+MODULE_ALIAS("platform:" MPSC_CTLR_NAME);
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
new file mode 100644
index 00000000..23bc743f
--- /dev/null
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -0,0 +1,920 @@
+/*
+ * mrst_max3110.c - spi uart protocol driver for Maxim 3110
+ *
+ * Copyright (c) 2008-2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * Note:
+ * 1. From Max3110 spec, the Rx FIFO has 8 words, while the Tx FIFO only has
+ * 1 word. If SPI master controller doesn't support sclk frequency change,
+ * then the char need be sent out one by one with some delay
+ *
+ * 2. Currently only RX available interrrupt is used, no need for waiting TXE
+ * interrupt for a low speed UART device
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+
+#include <linux/kthread.h>
+#include <linux/spi/spi.h>
+
+#include "mrst_max3110.h"
+
+#define PR_FMT "mrst_max3110: "
+
+#define UART_TX_NEEDED 1
+#define CON_TX_NEEDED 2
+#define BIT_IRQ_PENDING 3
+
+struct uart_max3110 {
+ struct uart_port port;
+ struct spi_device *spi;
+ char name[SPI_NAME_SIZE];
+
+ wait_queue_head_t wq;
+ struct task_struct *main_thread;
+ struct task_struct *read_thread;
+ struct mutex thread_mutex;
+
+ u32 baud;
+ u16 cur_conf;
+ u8 clock;
+ u8 parity, word_7bits;
+ u16 irq;
+
+ unsigned long uart_flags;
+
+ /* console related */
+ struct circ_buf con_xmit;
+};
+
+/* global data structure, may need be removed */
+static struct uart_max3110 *pmax;
+
+static void receive_chars(struct uart_max3110 *max,
+ unsigned char *str, int len);
+static int max3110_read_multi(struct uart_max3110 *max, u8 *buf);
+static void max3110_con_receive(struct uart_max3110 *max);
+
+static int max3110_write_then_read(struct uart_max3110 *max,
+ const void *txbuf, void *rxbuf, unsigned len, int always_fast)
+{
+ struct spi_device *spi = max->spi;
+ struct spi_message message;
+ struct spi_transfer x;
+ int ret;
+
+ spi_message_init(&message);
+ memset(&x, 0, sizeof x);
+ x.len = len;
+ x.tx_buf = txbuf;
+ x.rx_buf = rxbuf;
+ spi_message_add_tail(&x, &message);
+
+ if (always_fast)
+ x.speed_hz = spi->max_speed_hz;
+ else if (max->baud)
+ x.speed_hz = max->baud;
+
+ /* Do the i/o */
+ ret = spi_sync(spi, &message);
+ return ret;
+}
+
+/* Write a 16b word to the device */
+static int max3110_out(struct uart_max3110 *max, const u16 out)
+{
+ void *buf;
+ u16 *obuf, *ibuf;
+ u8 ch;
+ int ret;
+
+ buf = kzalloc(8, GFP_KERNEL | GFP_DMA);
+ if (!buf)
+ return -ENOMEM;
+
+ obuf = buf;
+ ibuf = buf + 4;
+ *obuf = out;
+ ret = max3110_write_then_read(max, obuf, ibuf, 2, 1);
+ if (ret) {
+ pr_warning(PR_FMT "%s(): get err msg %d when sending 0x%x\n",
+ __func__, ret, out);
+ goto exit;
+ }
+
+ /* If some valid data is read back */
+ if (*ibuf & MAX3110_READ_DATA_AVAILABLE) {
+ ch = *ibuf & 0xff;
+ receive_chars(max, &ch, 1);
+ }
+
+exit:
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * This is usually used to read data from SPIC RX FIFO, which doesn't
+ * need any delay like flushing character out.
+ *
+ * Return how many valide bytes are read back
+ */
+static int max3110_read_multi(struct uart_max3110 *max, u8 *rxbuf)
+{
+ void *buf;
+ u16 *obuf, *ibuf;
+ u8 *pbuf, valid_str[M3110_RX_FIFO_DEPTH];
+ int i, j, blen;
+
+ blen = M3110_RX_FIFO_DEPTH * sizeof(u16);
+ buf = kzalloc(blen * 2, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ pr_warning(PR_FMT "%s(): fail to alloc dma buffer\n", __func__);
+ return 0;
+ }
+
+ /* tx/rx always have the same length */
+ obuf = buf;
+ ibuf = buf + blen;
+
+ if (max3110_write_then_read(max, obuf, ibuf, blen, 1)) {
+ kfree(buf);
+ return 0;
+ }
+
+ /* If caller doesn't provide a buffer, then handle received char */
+ pbuf = rxbuf ? rxbuf : valid_str;
+
+ for (i = 0, j = 0; i < M3110_RX_FIFO_DEPTH; i++) {
+ if (ibuf[i] & MAX3110_READ_DATA_AVAILABLE)
+ pbuf[j++] = ibuf[i] & 0xff;
+ }
+
+ if (j && (pbuf == valid_str))
+ receive_chars(max, valid_str, j);
+
+ kfree(buf);
+ return j;
+}
+
+static void serial_m3110_con_putchar(struct uart_port *port, int ch)
+{
+ struct uart_max3110 *max =
+ container_of(port, struct uart_max3110, port);
+ struct circ_buf *xmit = &max->con_xmit;
+
+ if (uart_circ_chars_free(xmit)) {
+ xmit->buf[xmit->head] = (char)ch;
+ xmit->head = (xmit->head + 1) & (PAGE_SIZE - 1);
+ }
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void serial_m3110_con_write(struct console *co,
+ const char *s, unsigned int count)
+{
+ if (!pmax)
+ return;
+
+ uart_console_write(&pmax->port, s, count, serial_m3110_con_putchar);
+
+ if (!test_and_set_bit(CON_TX_NEEDED, &pmax->uart_flags))
+ wake_up_process(pmax->main_thread);
+}
+
+static int __init
+serial_m3110_con_setup(struct console *co, char *options)
+{
+ struct uart_max3110 *max = pmax;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ pr_info(PR_FMT "setting up console\n");
+
+ if (co->index == -1)
+ co->index = 0;
+
+ if (!max) {
+ pr_err(PR_FMT "pmax is NULL, return");
+ return -ENODEV;
+ }
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&max->port, co, baud, parity, bits, flow);
+}
+
+static struct tty_driver *serial_m3110_con_device(struct console *co,
+ int *index)
+{
+ struct uart_driver *p = co->data;
+ *index = co->index;
+ return p->tty_driver;
+}
+
+static struct uart_driver serial_m3110_reg;
+static struct console serial_m3110_console = {
+ .name = "ttyS",
+ .write = serial_m3110_con_write,
+ .device = serial_m3110_con_device,
+ .setup = serial_m3110_con_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &serial_m3110_reg,
+};
+
+static unsigned int serial_m3110_tx_empty(struct uart_port *port)
+{
+ return 1;
+}
+
+static void serial_m3110_stop_tx(struct uart_port *port)
+{
+ return;
+}
+
+/* stop_rx will be called in spin_lock env */
+static void serial_m3110_stop_rx(struct uart_port *port)
+{
+ return;
+}
+
+#define WORDS_PER_XFER 128
+static void send_circ_buf(struct uart_max3110 *max,
+ struct circ_buf *xmit)
+{
+ void *buf;
+ u16 *obuf, *ibuf;
+ u8 valid_str[WORDS_PER_XFER];
+ int i, j, len, blen, dma_size, left, ret = 0;
+
+
+ dma_size = WORDS_PER_XFER * sizeof(u16) * 2;
+ buf = kzalloc(dma_size, GFP_KERNEL | GFP_DMA);
+ if (!buf)
+ return;
+ obuf = buf;
+ ibuf = buf + dma_size/2;
+
+ while (!uart_circ_empty(xmit)) {
+ left = uart_circ_chars_pending(xmit);
+ while (left) {
+ len = min(left, WORDS_PER_XFER);
+ blen = len * sizeof(u16);
+ memset(ibuf, 0, blen);
+
+ for (i = 0; i < len; i++) {
+ obuf[i] = (u8)xmit->buf[xmit->tail] | WD_TAG;
+ xmit->tail = (xmit->tail + 1) &
+ (UART_XMIT_SIZE - 1);
+ }
+
+ /* Fail to send msg to console is not very critical */
+ ret = max3110_write_then_read(max, obuf, ibuf, blen, 0);
+ if (ret)
+ pr_warning(PR_FMT "%s(): get err msg %d\n",
+ __func__, ret);
+
+ for (i = 0, j = 0; i < len; i++) {
+ if (ibuf[i] & MAX3110_READ_DATA_AVAILABLE)
+ valid_str[j++] = ibuf[i] & 0xff;
+ }
+
+ if (j)
+ receive_chars(max, valid_str, j);
+
+ max->port.icount.tx += len;
+ left -= len;
+ }
+ }
+
+ kfree(buf);
+}
+
+static void transmit_char(struct uart_max3110 *max)
+{
+ struct uart_port *port = &max->port;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+ return;
+
+ send_circ_buf(max, xmit);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ serial_m3110_stop_tx(port);
+}
+
+/*
+ * This will be called by uart_write() and tty_write, can't
+ * go to sleep
+ */
+static void serial_m3110_start_tx(struct uart_port *port)
+{
+ struct uart_max3110 *max =
+ container_of(port, struct uart_max3110, port);
+
+ if (!test_and_set_bit(UART_TX_NEEDED, &max->uart_flags))
+ wake_up_process(max->main_thread);
+}
+
+static void receive_chars(struct uart_max3110 *max, unsigned char *str, int len)
+{
+ struct uart_port *port = &max->port;
+ struct tty_struct *tty;
+ int usable;
+
+ /* If uart is not opened, just return */
+ if (!port->state)
+ return;
+
+ tty = port->state->port.tty;
+ if (!tty)
+ return;
+
+ while (len) {
+ usable = tty_buffer_request_room(tty, len);
+ if (usable) {
+ tty_insert_flip_string(tty, str, usable);
+ str += usable;
+ port->icount.rx += usable;
+ }
+ len -= usable;
+ }
+ tty_flip_buffer_push(tty);
+}
+
+/*
+ * This routine will be used in read_thread or RX IRQ handling,
+ * it will first do one round buffer read(8 words), if there is some
+ * valid RX data, will try to read 5 more rounds till all data
+ * is read out.
+ *
+ * Use stack space as data buffer to save some system load, and chose
+ * 504 Btyes as a threadhold to do a bulk push to upper tty layer when
+ * receiving bulk data, a much bigger buffer may cause stack overflow
+ */
+static void max3110_con_receive(struct uart_max3110 *max)
+{
+ int loop = 1, num, total = 0;
+ u8 recv_buf[512], *pbuf;
+
+ pbuf = recv_buf;
+ do {
+ num = max3110_read_multi(max, pbuf);
+
+ if (num) {
+ loop = 5;
+ pbuf += num;
+ total += num;
+
+ if (total >= 504) {
+ receive_chars(max, recv_buf, total);
+ pbuf = recv_buf;
+ total = 0;
+ }
+ }
+ } while (--loop);
+
+ if (total)
+ receive_chars(max, recv_buf, total);
+}
+
+static int max3110_main_thread(void *_max)
+{
+ struct uart_max3110 *max = _max;
+ wait_queue_head_t *wq = &max->wq;
+ int ret = 0;
+ struct circ_buf *xmit = &max->con_xmit;
+
+ pr_info(PR_FMT "start main thread\n");
+
+ do {
+ wait_event_interruptible(*wq, max->uart_flags || kthread_should_stop());
+
+ mutex_lock(&max->thread_mutex);
+
+ if (test_and_clear_bit(BIT_IRQ_PENDING, &max->uart_flags))
+ max3110_con_receive(max);
+
+ /* first handle console output */
+ if (test_and_clear_bit(CON_TX_NEEDED, &max->uart_flags))
+ send_circ_buf(max, xmit);
+
+ /* handle uart output */
+ if (test_and_clear_bit(UART_TX_NEEDED, &max->uart_flags))
+ transmit_char(max);
+
+ mutex_unlock(&max->thread_mutex);
+
+ } while (!kthread_should_stop());
+
+ return ret;
+}
+
+static irqreturn_t serial_m3110_irq(int irq, void *dev_id)
+{
+ struct uart_max3110 *max = dev_id;
+
+ /* max3110's irq is a falling edge, not level triggered,
+ * so no need to disable the irq */
+ if (!test_and_set_bit(BIT_IRQ_PENDING, &max->uart_flags))
+ wake_up_process(max->main_thread);
+
+ return IRQ_HANDLED;
+}
+
+/* if don't use RX IRQ, then need a thread to polling read */
+static int max3110_read_thread(void *_max)
+{
+ struct uart_max3110 *max = _max;
+
+ pr_info(PR_FMT "start read thread\n");
+ do {
+ /*
+ * If can't acquire the mutex, it means the main thread
+ * is running which will also perform the rx job
+ */
+ if (mutex_trylock(&max->thread_mutex)) {
+ max3110_con_receive(max);
+ mutex_unlock(&max->thread_mutex);
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 20);
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static int serial_m3110_startup(struct uart_port *port)
+{
+ struct uart_max3110 *max =
+ container_of(port, struct uart_max3110, port);
+ u16 config = 0;
+ int ret = 0;
+
+ if (port->line != 0) {
+ pr_err(PR_FMT "uart port startup failed\n");
+ return -1;
+ }
+
+ /* Disable all IRQ and config it to 115200, 8n1 */
+ config = WC_TAG | WC_FIFO_ENABLE
+ | WC_1_STOPBITS
+ | WC_8BIT_WORD
+ | WC_BAUD_DR2;
+
+ /* as we use thread to handle tx/rx, need set low latency */
+ port->state->port.tty->low_latency = 1;
+
+ if (max->irq) {
+ max->read_thread = NULL;
+ ret = request_irq(max->irq, serial_m3110_irq,
+ IRQ_TYPE_EDGE_FALLING, "max3110", max);
+ if (ret) {
+ max->irq = 0;
+ pr_err(PR_FMT "unable to allocate IRQ, polling\n");
+ } else {
+ /* Enable RX IRQ only */
+ config |= WC_RXA_IRQ_ENABLE;
+ }
+ }
+
+ if (max->irq == 0) {
+ /* If IRQ is disabled, start a read thread for input data */
+ max->read_thread =
+ kthread_run(max3110_read_thread, max, "max3110_read");
+ if (IS_ERR(max->read_thread)) {
+ ret = PTR_ERR(max->read_thread);
+ max->read_thread = NULL;
+ pr_err(PR_FMT "Can't create read thread!\n");
+ return ret;
+ }
+ }
+
+ ret = max3110_out(max, config);
+ if (ret) {
+ if (max->irq)
+ free_irq(max->irq, max);
+ if (max->read_thread)
+ kthread_stop(max->read_thread);
+ max->read_thread = NULL;
+ return ret;
+ }
+
+ max->cur_conf = config;
+ return 0;
+}
+
+static void serial_m3110_shutdown(struct uart_port *port)
+{
+ struct uart_max3110 *max =
+ container_of(port, struct uart_max3110, port);
+ u16 config;
+
+ if (max->read_thread) {
+ kthread_stop(max->read_thread);
+ max->read_thread = NULL;
+ }
+
+ if (max->irq)
+ free_irq(max->irq, max);
+
+ /* Disable interrupts from this port */
+ config = WC_TAG | WC_SW_SHDI;
+ max3110_out(max, config);
+}
+
+static void serial_m3110_release_port(struct uart_port *port)
+{
+}
+
+static int serial_m3110_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void serial_m3110_config_port(struct uart_port *port, int flags)
+{
+ port->type = PORT_MAX3100;
+}
+
+static int
+serial_m3110_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ /* we don't want the core code to modify any port params */
+ return -EINVAL;
+}
+
+
+static const char *serial_m3110_type(struct uart_port *port)
+{
+ struct uart_max3110 *max =
+ container_of(port, struct uart_max3110, port);
+ return max->name;
+}
+
+static void
+serial_m3110_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_max3110 *max =
+ container_of(port, struct uart_max3110, port);
+ unsigned char cval;
+ unsigned int baud, parity = 0;
+ int clk_div = -1;
+ u16 new_conf = max->cur_conf;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ new_conf |= WC_7BIT_WORD;
+ break;
+ default:
+ /* We only support CS7 & CS8 */
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ new_conf |= WC_8BIT_WORD;
+ break;
+ }
+
+ baud = uart_get_baud_rate(port, termios, old, 0, 230400);
+
+ /* First calc the div for 1.8MHZ clock case */
+ switch (baud) {
+ case 300:
+ clk_div = WC_BAUD_DR384;
+ break;
+ case 600:
+ clk_div = WC_BAUD_DR192;
+ break;
+ case 1200:
+ clk_div = WC_BAUD_DR96;
+ break;
+ case 2400:
+ clk_div = WC_BAUD_DR48;
+ break;
+ case 4800:
+ clk_div = WC_BAUD_DR24;
+ break;
+ case 9600:
+ clk_div = WC_BAUD_DR12;
+ break;
+ case 19200:
+ clk_div = WC_BAUD_DR6;
+ break;
+ case 38400:
+ clk_div = WC_BAUD_DR3;
+ break;
+ case 57600:
+ clk_div = WC_BAUD_DR2;
+ break;
+ case 115200:
+ clk_div = WC_BAUD_DR1;
+ break;
+ case 230400:
+ if (max->clock & MAX3110_HIGH_CLK)
+ break;
+ default:
+ /* Pick the previous baud rate */
+ baud = max->baud;
+ clk_div = max->cur_conf & WC_BAUD_DIV_MASK;
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ }
+
+ if (max->clock & MAX3110_HIGH_CLK) {
+ clk_div += 1;
+ /* High clk version max3110 doesn't support B300 */
+ if (baud == 300) {
+ baud = 600;
+ clk_div = WC_BAUD_DR384;
+ }
+ if (baud == 230400)
+ clk_div = WC_BAUD_DR1;
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ }
+
+ new_conf = (new_conf & ~WC_BAUD_DIV_MASK) | clk_div;
+
+ if (unlikely(termios->c_cflag & CMSPAR))
+ termios->c_cflag &= ~CMSPAR;
+
+ if (termios->c_cflag & CSTOPB)
+ new_conf |= WC_2_STOPBITS;
+ else
+ new_conf &= ~WC_2_STOPBITS;
+
+ if (termios->c_cflag & PARENB) {
+ new_conf |= WC_PARITY_ENABLE;
+ parity |= UART_LCR_PARITY;
+ } else
+ new_conf &= ~WC_PARITY_ENABLE;
+
+ if (!(termios->c_cflag & PARODD))
+ parity |= UART_LCR_EPAR;
+ max->parity = parity;
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ new_conf |= WC_TAG;
+ if (new_conf != max->cur_conf) {
+ if (!max3110_out(max, new_conf)) {
+ max->cur_conf = new_conf;
+ max->baud = baud;
+ }
+ }
+}
+
+/* Don't handle hw handshaking */
+static unsigned int serial_m3110_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_DSR | TIOCM_CAR | TIOCM_DSR;
+}
+
+static void serial_m3110_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static void serial_m3110_break_ctl(struct uart_port *port, int break_state)
+{
+}
+
+static void serial_m3110_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+}
+
+static void serial_m3110_enable_ms(struct uart_port *port)
+{
+}
+
+struct uart_ops serial_m3110_ops = {
+ .tx_empty = serial_m3110_tx_empty,
+ .set_mctrl = serial_m3110_set_mctrl,
+ .get_mctrl = serial_m3110_get_mctrl,
+ .stop_tx = serial_m3110_stop_tx,
+ .start_tx = serial_m3110_start_tx,
+ .stop_rx = serial_m3110_stop_rx,
+ .enable_ms = serial_m3110_enable_ms,
+ .break_ctl = serial_m3110_break_ctl,
+ .startup = serial_m3110_startup,
+ .shutdown = serial_m3110_shutdown,
+ .set_termios = serial_m3110_set_termios,
+ .pm = serial_m3110_pm,
+ .type = serial_m3110_type,
+ .release_port = serial_m3110_release_port,
+ .request_port = serial_m3110_request_port,
+ .config_port = serial_m3110_config_port,
+ .verify_port = serial_m3110_verify_port,
+};
+
+static struct uart_driver serial_m3110_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "MRST serial",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = 1,
+ .cons = &serial_m3110_console,
+};
+
+#ifdef CONFIG_PM
+static int serial_m3110_suspend(struct spi_device *spi, pm_message_t state)
+{
+ struct uart_max3110 *max = spi_get_drvdata(spi);
+
+ disable_irq(max->irq);
+ uart_suspend_port(&serial_m3110_reg, &max->port);
+ max3110_out(max, max->cur_conf | WC_SW_SHDI);
+ return 0;
+}
+
+static int serial_m3110_resume(struct spi_device *spi)
+{
+ struct uart_max3110 *max = spi_get_drvdata(spi);
+
+ max3110_out(max, max->cur_conf);
+ uart_resume_port(&serial_m3110_reg, &max->port);
+ enable_irq(max->irq);
+ return 0;
+}
+#else
+#define serial_m3110_suspend NULL
+#define serial_m3110_resume NULL
+#endif
+
+static int __devinit serial_m3110_probe(struct spi_device *spi)
+{
+ struct uart_max3110 *max;
+ void *buffer;
+ u16 res;
+ int ret = 0;
+
+ max = kzalloc(sizeof(*max), GFP_KERNEL);
+ if (!max)
+ return -ENOMEM;
+
+ /* Set spi info */
+ spi->bits_per_word = 16;
+ max->clock = MAX3110_HIGH_CLK;
+
+ spi_setup(spi);
+
+ max->port.type = PORT_MAX3100;
+ max->port.fifosize = 2; /* Only have 16b buffer */
+ max->port.ops = &serial_m3110_ops;
+ max->port.line = 0;
+ max->port.dev = &spi->dev;
+ max->port.uartclk = 115200;
+
+ max->spi = spi;
+ strcpy(max->name, spi->modalias);
+ max->irq = (u16)spi->irq;
+
+ mutex_init(&max->thread_mutex);
+
+ max->word_7bits = 0;
+ max->parity = 0;
+ max->baud = 0;
+
+ max->cur_conf = 0;
+ max->uart_flags = 0;
+
+ /* Check if reading configuration register returns something sane */
+
+ res = RC_TAG;
+ ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
+ if (ret < 0 || res == 0 || res == 0xffff) {
+ dev_dbg(&spi->dev, "MAX3111 deemed not present (conf reg %04x)",
+ res);
+ ret = -ENODEV;
+ goto err_get_page;
+ }
+
+ buffer = (void *)__get_free_page(GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto err_get_page;
+ }
+ max->con_xmit.buf = buffer;
+ max->con_xmit.head = 0;
+ max->con_xmit.tail = 0;
+
+ init_waitqueue_head(&max->wq);
+
+ max->main_thread = kthread_run(max3110_main_thread,
+ max, "max3110_main");
+ if (IS_ERR(max->main_thread)) {
+ ret = PTR_ERR(max->main_thread);
+ goto err_kthread;
+ }
+
+ spi_set_drvdata(spi, max);
+ pmax = max;
+
+ /* Give membase a psudo value to pass serial_core's check */
+ max->port.membase = (void *)0xff110000;
+ uart_add_one_port(&serial_m3110_reg, &max->port);
+
+ return 0;
+
+err_kthread:
+ free_page((unsigned long)buffer);
+err_get_page:
+ kfree(max);
+ return ret;
+}
+
+static int __devexit serial_m3110_remove(struct spi_device *dev)
+{
+ struct uart_max3110 *max = spi_get_drvdata(dev);
+
+ if (!max)
+ return 0;
+
+ uart_remove_one_port(&serial_m3110_reg, &max->port);
+
+ free_page((unsigned long)max->con_xmit.buf);
+
+ if (max->main_thread)
+ kthread_stop(max->main_thread);
+
+ kfree(max);
+ return 0;
+}
+
+static struct spi_driver uart_max3110_driver = {
+ .driver = {
+ .name = "spi_max3111",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = serial_m3110_probe,
+ .remove = __devexit_p(serial_m3110_remove),
+ .suspend = serial_m3110_suspend,
+ .resume = serial_m3110_resume,
+};
+
+static int __init serial_m3110_init(void)
+{
+ int ret = 0;
+
+ ret = uart_register_driver(&serial_m3110_reg);
+ if (ret)
+ return ret;
+
+ ret = spi_register_driver(&uart_max3110_driver);
+ if (ret)
+ uart_unregister_driver(&serial_m3110_reg);
+
+ return ret;
+}
+
+static void __exit serial_m3110_exit(void)
+{
+ spi_unregister_driver(&uart_max3110_driver);
+ uart_unregister_driver(&serial_m3110_reg);
+}
+
+module_init(serial_m3110_init);
+module_exit(serial_m3110_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:max3110-uart");
diff --git a/drivers/tty/serial/mrst_max3110.h b/drivers/tty/serial/mrst_max3110.h
new file mode 100644
index 00000000..c37ea48c
--- /dev/null
+++ b/drivers/tty/serial/mrst_max3110.h
@@ -0,0 +1,60 @@
+#ifndef _MRST_MAX3110_H
+#define _MRST_MAX3110_H
+
+#define MAX3110_HIGH_CLK 0x1 /* 3.6864 MHZ */
+#define MAX3110_LOW_CLK 0x0 /* 1.8432 MHZ */
+
+/* status bits for all 4 MAX3110 operate modes */
+#define MAX3110_READ_DATA_AVAILABLE (1 << 15)
+#define MAX3110_WRITE_BUF_EMPTY (1 << 14)
+
+#define WC_TAG (3 << 14)
+#define RC_TAG (1 << 14)
+#define WD_TAG (2 << 14)
+#define RD_TAG (0 << 14)
+
+/* bits def for write configuration */
+#define WC_FIFO_ENABLE_MASK (1 << 13)
+#define WC_FIFO_ENABLE (0 << 13)
+
+#define WC_SW_SHDI (1 << 12)
+
+#define WC_IRQ_MASK (0xF << 8)
+#define WC_TXE_IRQ_ENABLE (1 << 11) /* TX empty irq */
+#define WC_RXA_IRQ_ENABLE (1 << 10) /* RX available irq */
+#define WC_PAR_HIGH_IRQ_ENABLE (1 << 9)
+#define WC_REC_ACT_IRQ_ENABLE (1 << 8)
+
+#define WC_IRDA_ENABLE (1 << 7)
+
+#define WC_STOPBITS_MASK (1 << 6)
+#define WC_2_STOPBITS (1 << 6)
+#define WC_1_STOPBITS (0 << 6)
+
+#define WC_PARITY_ENABLE_MASK (1 << 5)
+#define WC_PARITY_ENABLE (1 << 5)
+
+#define WC_WORDLEN_MASK (1 << 4)
+#define WC_7BIT_WORD (1 << 4)
+#define WC_8BIT_WORD (0 << 4)
+
+#define WC_BAUD_DIV_MASK (0xF)
+#define WC_BAUD_DR1 (0x0)
+#define WC_BAUD_DR2 (0x1)
+#define WC_BAUD_DR4 (0x2)
+#define WC_BAUD_DR8 (0x3)
+#define WC_BAUD_DR16 (0x4)
+#define WC_BAUD_DR32 (0x5)
+#define WC_BAUD_DR64 (0x6)
+#define WC_BAUD_DR128 (0x7)
+#define WC_BAUD_DR3 (0x8)
+#define WC_BAUD_DR6 (0x9)
+#define WC_BAUD_DR12 (0xA)
+#define WC_BAUD_DR24 (0xB)
+#define WC_BAUD_DR48 (0xC)
+#define WC_BAUD_DR96 (0xD)
+#define WC_BAUD_DR192 (0xE)
+#define WC_BAUD_DR384 (0xF)
+
+#define M3110_RX_FIFO_DEPTH 8
+#endif
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
new file mode 100644
index 00000000..e6ba8387
--- /dev/null
+++ b/drivers/tty/serial/msm_serial.c
@@ -0,0 +1,966 @@
+/*
+ * Driver for msm7k serial device and console
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Robert Love <rlove@google.com>
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if defined(CONFIG_SERIAL_MSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+# define SUPPORT_SYSRQ
+#endif
+
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#include "msm_serial.h"
+
+struct msm_port {
+ struct uart_port uart;
+ char name[16];
+ struct clk *clk;
+ struct clk *pclk;
+ unsigned int imr;
+ unsigned int *gsbi_base;
+ int is_uartdm;
+ unsigned int old_snap_state;
+};
+
+static inline void wait_for_xmitr(struct uart_port *port, int bits)
+{
+ if (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY))
+ while ((msm_read(port, UART_ISR) & bits) != bits)
+ cpu_relax();
+}
+
+static void msm_stop_tx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr &= ~UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_start_tx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr |= UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_stop_rx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_enable_ms(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr |= UART_IMR_DELTA_CTS;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void handle_rx_dm(struct uart_port *port, unsigned int misr)
+{
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int sr;
+ int count = 0;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ }
+
+ if (misr & UART_IMR_RXSTALE) {
+ count = msm_read(port, UARTDM_RX_TOTAL_SNAP) -
+ msm_port->old_snap_state;
+ msm_port->old_snap_state = 0;
+ } else {
+ count = 4 * (msm_read(port, UART_RFWR));
+ msm_port->old_snap_state += count;
+ }
+
+ /* TODO: Precise error reporting */
+
+ port->icount.rx += count;
+
+ while (count > 0) {
+ unsigned int c;
+
+ sr = msm_read(port, UART_SR);
+ if ((sr & UART_SR_RX_READY) == 0) {
+ msm_port->old_snap_state -= count;
+ break;
+ }
+ c = msm_read(port, UARTDM_RF);
+ if (sr & UART_SR_RX_BREAK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (sr & UART_SR_PAR_FRAME_ERR)
+ port->icount.frame++;
+
+ /* TODO: handle sysrq */
+ tty_insert_flip_string(tty, (char *) &c,
+ (count > 4) ? 4 : count);
+ count -= 4;
+ }
+
+ tty_flip_buffer_push(tty);
+ if (misr & (UART_IMR_RXSTALE))
+ msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+}
+
+static void handle_rx(struct uart_port *port)
+{
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int sr;
+
+ /*
+ * Handle overrun. My understanding of the hardware is that overrun
+ * is not tied to the RX buffer, so we handle the case out of band.
+ */
+ if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ }
+
+ /* and now the main RX loop */
+ while ((sr = msm_read(port, UART_SR)) & UART_SR_RX_READY) {
+ unsigned int c;
+ char flag = TTY_NORMAL;
+
+ c = msm_read(port, UART_RF);
+
+ if (sr & UART_SR_RX_BREAK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ port->icount.frame++;
+ } else {
+ port->icount.rx++;
+ }
+
+ /* Mask conditions we're ignorning. */
+ sr &= port->read_status_mask;
+
+ if (sr & UART_SR_RX_BREAK) {
+ flag = TTY_BREAK;
+ } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ flag = TTY_FRAME;
+ }
+
+ if (!uart_handle_sysrq_char(port, c))
+ tty_insert_flip_char(tty, c, flag);
+ }
+
+ tty_flip_buffer_push(tty);
+}
+
+static void reset_dm_count(struct uart_port *port)
+{
+ wait_for_xmitr(port, UART_ISR_TX_READY);
+ msm_write(port, 1, UARTDM_NCF_TX);
+}
+
+static void handle_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ int sent_tx;
+
+ if (port->x_char) {
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
+
+ msm_write(port, port->x_char,
+ msm_port->is_uartdm ? UARTDM_TF : UART_TF);
+ port->icount.tx++;
+ port->x_char = 0;
+ }
+
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
+
+ while (msm_read(port, UART_SR) & UART_SR_TX_READY) {
+ if (uart_circ_empty(xmit)) {
+ /* disable tx interrupts */
+ msm_port->imr &= ~UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+ break;
+ }
+ msm_write(port, xmit->buf[xmit->tail],
+ msm_port->is_uartdm ? UARTDM_TF : UART_TF);
+
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ sent_tx = 1;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static void handle_delta_cts(struct uart_port *port)
+{
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ port->icount.cts++;
+ wake_up_interruptible(&port->state->port.delta_msr_wait);
+}
+
+static irqreturn_t msm_irq(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int misr;
+
+ spin_lock(&port->lock);
+ misr = msm_read(port, UART_MISR);
+ msm_write(port, 0, UART_IMR); /* disable interrupt */
+
+ if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
+ if (msm_port->is_uartdm)
+ handle_rx_dm(port, misr);
+ else
+ handle_rx(port);
+ }
+ if (misr & UART_IMR_TXLEV)
+ handle_tx(port);
+ if (misr & UART_IMR_DELTA_CTS)
+ handle_delta_cts(port);
+
+ msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int msm_tx_empty(struct uart_port *port)
+{
+ return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int msm_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS;
+}
+
+
+static void msm_reset(struct uart_port *port)
+{
+ /* reset everything */
+ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+}
+
+void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned int mr;
+ mr = msm_read(port, UART_MR1);
+
+ if (!(mctrl & TIOCM_RTS)) {
+ mr &= ~UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
+ msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
+ } else {
+ mr |= UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
+ }
+}
+
+static void msm_break_ctl(struct uart_port *port, int break_ctl)
+{
+ if (break_ctl)
+ msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
+ else
+ msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
+}
+
+static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
+{
+ unsigned int baud_code, rxstale, watermark;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ switch (baud) {
+ case 300:
+ baud_code = UART_CSR_300;
+ rxstale = 1;
+ break;
+ case 600:
+ baud_code = UART_CSR_600;
+ rxstale = 1;
+ break;
+ case 1200:
+ baud_code = UART_CSR_1200;
+ rxstale = 1;
+ break;
+ case 2400:
+ baud_code = UART_CSR_2400;
+ rxstale = 1;
+ break;
+ case 4800:
+ baud_code = UART_CSR_4800;
+ rxstale = 1;
+ break;
+ case 9600:
+ baud_code = UART_CSR_9600;
+ rxstale = 2;
+ break;
+ case 14400:
+ baud_code = UART_CSR_14400;
+ rxstale = 3;
+ break;
+ case 19200:
+ baud_code = UART_CSR_19200;
+ rxstale = 4;
+ break;
+ case 28800:
+ baud_code = UART_CSR_28800;
+ rxstale = 6;
+ break;
+ case 38400:
+ baud_code = UART_CSR_38400;
+ rxstale = 8;
+ break;
+ case 57600:
+ baud_code = UART_CSR_57600;
+ rxstale = 16;
+ break;
+ case 115200:
+ default:
+ baud_code = UART_CSR_115200;
+ baud = 115200;
+ rxstale = 31;
+ break;
+ }
+
+ if (msm_port->is_uartdm)
+ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+
+ msm_write(port, baud_code, UART_CSR);
+
+ /* RX stale watermark */
+ watermark = UART_IPR_STALE_LSB & rxstale;
+ watermark |= UART_IPR_RXSTALE_LAST;
+ watermark |= UART_IPR_STALE_TIMEOUT_MSB & (rxstale << 2);
+ msm_write(port, watermark, UART_IPR);
+
+ /* set RX watermark */
+ watermark = (port->fifosize * 3) / 4;
+ msm_write(port, watermark, UART_RFWR);
+
+ /* set TX watermark */
+ msm_write(port, 10, UART_TFWR);
+
+ if (msm_port->is_uartdm) {
+ msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ }
+
+ return baud;
+}
+
+
+static void msm_init_clock(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ clk_enable(msm_port->clk);
+ if (!IS_ERR(msm_port->pclk))
+ clk_enable(msm_port->pclk);
+ msm_serial_set_mnd_regs(port);
+}
+
+static int msm_startup(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int data, rfr_level;
+ int ret;
+
+ snprintf(msm_port->name, sizeof(msm_port->name),
+ "msm_serial%d", port->line);
+
+ ret = request_irq(port->irq, msm_irq, IRQF_TRIGGER_HIGH,
+ msm_port->name, port);
+ if (unlikely(ret))
+ return ret;
+
+ msm_init_clock(port);
+
+ if (likely(port->fifosize > 12))
+ rfr_level = port->fifosize - 12;
+ else
+ rfr_level = port->fifosize;
+
+ /* set automatic RFR level */
+ data = msm_read(port, UART_MR1);
+ data &= ~UART_MR1_AUTO_RFR_LEVEL1;
+ data &= ~UART_MR1_AUTO_RFR_LEVEL0;
+ data |= UART_MR1_AUTO_RFR_LEVEL1 & (rfr_level << 2);
+ data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
+ msm_write(port, data, UART_MR1);
+
+ /* make sure that RXSTALE count is non-zero */
+ data = msm_read(port, UART_IPR);
+ if (unlikely(!data)) {
+ data |= UART_IPR_RXSTALE_LAST;
+ data |= UART_IPR_STALE_LSB;
+ msm_write(port, data, UART_IPR);
+ }
+
+ data = 0;
+ if (!port->cons || (port->cons && !(port->cons->flags & CON_ENABLED))) {
+ msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
+ msm_reset(port);
+ data = UART_CR_TX_ENABLE;
+ }
+
+ data |= UART_CR_RX_ENABLE;
+ msm_write(port, data, UART_CR); /* enable TX & RX */
+
+ /* Make sure IPR is not 0 to start with*/
+ if (msm_port->is_uartdm)
+ msm_write(port, UART_IPR_STALE_LSB, UART_IPR);
+
+ /* turn on RX and CTS interrupts */
+ msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
+ UART_IMR_CURRENT_CTS;
+
+ if (msm_port->is_uartdm) {
+ msm_write(port, 0xFFFFFF, UARTDM_DMRX);
+ msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+ }
+
+ msm_write(port, msm_port->imr, UART_IMR);
+ return 0;
+}
+
+static void msm_shutdown(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr = 0;
+ msm_write(port, 0, UART_IMR); /* disable interrupts */
+
+ clk_disable(msm_port->clk);
+
+ free_irq(port->irq, port);
+}
+
+static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int baud, mr;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* calculate and set baud rate */
+ baud = uart_get_baud_rate(port, termios, old, 300, 115200);
+ baud = msm_set_baud_rate(port, baud);
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ /* calculate parity */
+ mr = msm_read(port, UART_MR2);
+ mr &= ~UART_MR2_PARITY_MODE;
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & PARODD)
+ mr |= UART_MR2_PARITY_MODE_ODD;
+ else if (termios->c_cflag & CMSPAR)
+ mr |= UART_MR2_PARITY_MODE_SPACE;
+ else
+ mr |= UART_MR2_PARITY_MODE_EVEN;
+ }
+
+ /* calculate bits per char */
+ mr &= ~UART_MR2_BITS_PER_CHAR;
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ mr |= UART_MR2_BITS_PER_CHAR_5;
+ break;
+ case CS6:
+ mr |= UART_MR2_BITS_PER_CHAR_6;
+ break;
+ case CS7:
+ mr |= UART_MR2_BITS_PER_CHAR_7;
+ break;
+ case CS8:
+ default:
+ mr |= UART_MR2_BITS_PER_CHAR_8;
+ break;
+ }
+
+ /* calculate stop bits */
+ mr &= ~(UART_MR2_STOP_BIT_LEN_ONE | UART_MR2_STOP_BIT_LEN_TWO);
+ if (termios->c_cflag & CSTOPB)
+ mr |= UART_MR2_STOP_BIT_LEN_TWO;
+ else
+ mr |= UART_MR2_STOP_BIT_LEN_ONE;
+
+ /* set parity, bits per char, and stop bit */
+ msm_write(port, mr, UART_MR2);
+
+ /* calculate and set hardware flow control */
+ mr = msm_read(port, UART_MR1);
+ mr &= ~(UART_MR1_CTS_CTL | UART_MR1_RX_RDY_CTL);
+ if (termios->c_cflag & CRTSCTS) {
+ mr |= UART_MR1_CTS_CTL;
+ mr |= UART_MR1_RX_RDY_CTL;
+ }
+ msm_write(port, mr, UART_MR1);
+
+ /* Configure status bits to ignore based on termio flags. */
+ port->read_status_mask = 0;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= UART_SR_RX_BREAK;
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *msm_type(struct uart_port *port)
+{
+ return "MSM";
+}
+
+static void msm_release_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ struct resource *uart_resource;
+ struct resource *gsbi_resource;
+ resource_size_t size;
+
+ uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!uart_resource))
+ return;
+ size = resource_size(uart_resource);
+
+ release_mem_region(port->mapbase, size);
+ iounmap(port->membase);
+ port->membase = NULL;
+
+ if (msm_port->gsbi_base) {
+ iowrite32(GSBI_PROTOCOL_IDLE, msm_port->gsbi_base +
+ GSBI_CONTROL);
+
+ gsbi_resource = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "gsbi_resource");
+
+ if (unlikely(!gsbi_resource))
+ return;
+
+ size = resource_size(gsbi_resource);
+ release_mem_region(gsbi_resource->start, size);
+ iounmap(msm_port->gsbi_base);
+ msm_port->gsbi_base = NULL;
+ }
+}
+
+static int msm_request_port(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *uart_resource;
+ struct resource *gsbi_resource;
+ resource_size_t size;
+ int ret;
+
+ uart_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "uart_resource");
+ if (unlikely(!uart_resource))
+ return -ENXIO;
+
+ size = resource_size(uart_resource);
+
+ if (!request_mem_region(port->mapbase, size, "msm_serial"))
+ return -EBUSY;
+
+ port->membase = ioremap(port->mapbase, size);
+ if (!port->membase) {
+ ret = -EBUSY;
+ goto fail_release_port;
+ }
+
+ gsbi_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "gsbi_resource");
+ /* Is this a GSBI-based port? */
+ if (gsbi_resource) {
+ size = resource_size(gsbi_resource);
+
+ if (!request_mem_region(gsbi_resource->start, size,
+ "msm_serial")) {
+ ret = -EBUSY;
+ goto fail_release_port;
+ }
+
+ msm_port->gsbi_base = ioremap(gsbi_resource->start, size);
+ if (!msm_port->gsbi_base) {
+ ret = -EBUSY;
+ goto fail_release_gsbi;
+ }
+ }
+
+ return 0;
+
+fail_release_gsbi:
+ release_mem_region(gsbi_resource->start, size);
+fail_release_port:
+ release_mem_region(port->mapbase, size);
+ return ret;
+}
+
+static void msm_config_port(struct uart_port *port, int flags)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ int ret;
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_MSM;
+ ret = msm_request_port(port);
+ if (ret)
+ return;
+ }
+
+ if (msm_port->is_uartdm)
+ iowrite32(GSBI_PROTOCOL_UART, msm_port->gsbi_base +
+ GSBI_CONTROL);
+}
+
+static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_MSM))
+ return -EINVAL;
+ if (unlikely(port->irq != ser->irq))
+ return -EINVAL;
+ return 0;
+}
+
+static void msm_power(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ switch (state) {
+ case 0:
+ clk_enable(msm_port->clk);
+ if (!IS_ERR(msm_port->pclk))
+ clk_enable(msm_port->pclk);
+ break;
+ case 3:
+ clk_disable(msm_port->clk);
+ if (!IS_ERR(msm_port->pclk))
+ clk_disable(msm_port->pclk);
+ break;
+ default:
+ printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state);
+ }
+}
+
+static struct uart_ops msm_uart_pops = {
+ .tx_empty = msm_tx_empty,
+ .set_mctrl = msm_set_mctrl,
+ .get_mctrl = msm_get_mctrl,
+ .stop_tx = msm_stop_tx,
+ .start_tx = msm_start_tx,
+ .stop_rx = msm_stop_rx,
+ .enable_ms = msm_enable_ms,
+ .break_ctl = msm_break_ctl,
+ .startup = msm_startup,
+ .shutdown = msm_shutdown,
+ .set_termios = msm_set_termios,
+ .type = msm_type,
+ .release_port = msm_release_port,
+ .request_port = msm_request_port,
+ .config_port = msm_config_port,
+ .verify_port = msm_verify_port,
+ .pm = msm_power,
+};
+
+static struct msm_port msm_uart_ports[] = {
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 64,
+ .line = 0,
+ },
+ },
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 64,
+ .line = 1,
+ },
+ },
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 64,
+ .line = 2,
+ },
+ },
+};
+
+#define UART_NR ARRAY_SIZE(msm_uart_ports)
+
+static inline struct uart_port *get_port_from_line(unsigned int line)
+{
+ return &msm_uart_ports[line].uart;
+}
+
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+
+static void msm_console_putchar(struct uart_port *port, int c)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ if (msm_port->is_uartdm)
+ reset_dm_count(port);
+
+ while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ ;
+ msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
+}
+
+static void msm_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port;
+ struct msm_port *msm_port;
+
+ BUG_ON(co->index < 0 || co->index >= UART_NR);
+
+ port = get_port_from_line(co->index);
+ msm_port = UART_TO_MSM(port);
+
+ spin_lock(&port->lock);
+ uart_console_write(port, s, count, msm_console_putchar);
+ spin_unlock(&port->lock);
+}
+
+static int __init msm_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ struct msm_port *msm_port;
+ int baud, flow, bits, parity;
+
+ if (unlikely(co->index >= UART_NR || co->index < 0))
+ return -ENXIO;
+
+ port = get_port_from_line(co->index);
+ msm_port = UART_TO_MSM(port);
+
+ if (unlikely(!port->membase))
+ return -ENXIO;
+
+ port->cons = co;
+
+ msm_init_clock(port);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ bits = 8;
+ parity = 'n';
+ flow = 'n';
+ msm_write(port, UART_MR2_BITS_PER_CHAR_8 | UART_MR2_STOP_BIT_LEN_ONE,
+ UART_MR2); /* 8N1 */
+
+ if (baud < 300 || baud > 115200)
+ baud = 115200;
+ msm_set_baud_rate(port, baud);
+
+ msm_reset(port);
+
+ if (msm_port->is_uartdm) {
+ msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR);
+ msm_write(port, UART_CR_TX_ENABLE, UART_CR);
+ }
+
+ printk(KERN_INFO "msm_serial: console setup on port #%d\n", port->line);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver msm_uart_driver;
+
+static struct console msm_console = {
+ .name = "ttyMSM",
+ .write = msm_console_write,
+ .device = uart_console_device,
+ .setup = msm_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &msm_uart_driver,
+};
+
+#define MSM_CONSOLE (&msm_console)
+
+#else
+#define MSM_CONSOLE NULL
+#endif
+
+static struct uart_driver msm_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "msm_serial",
+ .dev_name = "ttyMSM",
+ .nr = UART_NR,
+ .cons = MSM_CONSOLE,
+};
+
+static int __init msm_serial_probe(struct platform_device *pdev)
+{
+ struct msm_port *msm_port;
+ struct resource *resource;
+ struct uart_port *port;
+ int irq;
+
+ if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
+ return -ENXIO;
+
+ printk(KERN_INFO "msm_serial: detected port #%d\n", pdev->id);
+
+ port = get_port_from_line(pdev->id);
+ port->dev = &pdev->dev;
+ msm_port = UART_TO_MSM(port);
+
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsbi_resource"))
+ msm_port->is_uartdm = 1;
+ else
+ msm_port->is_uartdm = 0;
+
+ if (msm_port->is_uartdm) {
+ msm_port->clk = clk_get(&pdev->dev, "gsbi_uart_clk");
+ msm_port->pclk = clk_get(&pdev->dev, "gsbi_pclk");
+ } else {
+ msm_port->clk = clk_get(&pdev->dev, "uart_clk");
+ msm_port->pclk = ERR_PTR(-ENOENT);
+ }
+
+ if (unlikely(IS_ERR(msm_port->clk) || (IS_ERR(msm_port->pclk) &&
+ msm_port->is_uartdm)))
+ return PTR_ERR(msm_port->clk);
+
+ if (msm_port->is_uartdm)
+ clk_set_rate(msm_port->clk, 7372800);
+
+ port->uartclk = clk_get_rate(msm_port->clk);
+ printk(KERN_INFO "uartclk = %d\n", port->uartclk);
+
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "uart_resource");
+ if (unlikely(!resource))
+ return -ENXIO;
+ port->mapbase = resource->start;
+
+ irq = platform_get_irq(pdev, 0);
+ if (unlikely(irq < 0))
+ return -ENXIO;
+ port->irq = irq;
+
+ platform_set_drvdata(pdev, port);
+
+ return uart_add_one_port(&msm_uart_driver, port);
+}
+
+static int __devexit msm_serial_remove(struct platform_device *pdev)
+{
+ struct msm_port *msm_port = platform_get_drvdata(pdev);
+
+ clk_put(msm_port->clk);
+
+ return 0;
+}
+
+static struct platform_driver msm_platform_driver = {
+ .remove = msm_serial_remove,
+ .driver = {
+ .name = "msm_serial",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_serial_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&msm_uart_driver);
+ if (unlikely(ret))
+ return ret;
+
+ ret = platform_driver_probe(&msm_platform_driver, msm_serial_probe);
+ if (unlikely(ret))
+ uart_unregister_driver(&msm_uart_driver);
+
+ printk(KERN_INFO "msm_serial: driver initialized\n");
+
+ return ret;
+}
+
+static void __exit msm_serial_exit(void)
+{
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+ unregister_console(&msm_console);
+#endif
+ platform_driver_unregister(&msm_platform_driver);
+ uart_unregister_driver(&msm_uart_driver);
+}
+
+module_init(msm_serial_init);
+module_exit(msm_serial_exit);
+
+MODULE_AUTHOR("Robert Love <rlove@google.com>");
+MODULE_DESCRIPTION("Driver for msm7x serial device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
new file mode 100644
index 00000000..e4acef5d
--- /dev/null
+++ b/drivers/tty/serial/msm_serial.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Robert Love <rlove@google.com>
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRIVERS_SERIAL_MSM_SERIAL_H
+#define __DRIVERS_SERIAL_MSM_SERIAL_H
+
+#define UART_MR1 0x0000
+
+#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
+#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
+#define UART_MR1_RX_RDY_CTL (1 << 7)
+#define UART_MR1_CTS_CTL (1 << 6)
+
+#define UART_MR2 0x0004
+#define UART_MR2_ERROR_MODE (1 << 6)
+#define UART_MR2_BITS_PER_CHAR 0x30
+#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
+#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
+#define UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
+#define UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
+#define UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
+#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
+#define UART_MR2_PARITY_MODE_NONE 0x0
+#define UART_MR2_PARITY_MODE_ODD 0x1
+#define UART_MR2_PARITY_MODE_EVEN 0x2
+#define UART_MR2_PARITY_MODE_SPACE 0x3
+#define UART_MR2_PARITY_MODE 0x3
+
+#define UART_CSR 0x0008
+#define UART_CSR_115200 0xFF
+#define UART_CSR_57600 0xEE
+#define UART_CSR_38400 0xDD
+#define UART_CSR_28800 0xCC
+#define UART_CSR_19200 0xBB
+#define UART_CSR_14400 0xAA
+#define UART_CSR_9600 0x99
+#define UART_CSR_4800 0x77
+#define UART_CSR_2400 0x55
+#define UART_CSR_1200 0x44
+#define UART_CSR_600 0x33
+#define UART_CSR_300 0x22
+
+#define UART_TF 0x000C
+#define UARTDM_TF 0x0070
+
+#define UART_CR 0x0010
+#define UART_CR_CMD_NULL (0 << 4)
+#define UART_CR_CMD_RESET_RX (1 << 4)
+#define UART_CR_CMD_RESET_TX (2 << 4)
+#define UART_CR_CMD_RESET_ERR (3 << 4)
+#define UART_CR_CMD_RESET_BREAK_INT (4 << 4)
+#define UART_CR_CMD_START_BREAK (5 << 4)
+#define UART_CR_CMD_STOP_BREAK (6 << 4)
+#define UART_CR_CMD_RESET_CTS (7 << 4)
+#define UART_CR_CMD_RESET_STALE_INT (8 << 4)
+#define UART_CR_CMD_PACKET_MODE (9 << 4)
+#define UART_CR_CMD_MODE_RESET (12 << 4)
+#define UART_CR_CMD_SET_RFR (13 << 4)
+#define UART_CR_CMD_RESET_RFR (14 << 4)
+#define UART_CR_CMD_PROTECTION_EN (16 << 4)
+#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define UART_CR_TX_DISABLE (1 << 3)
+#define UART_CR_TX_ENABLE (1 << 2)
+#define UART_CR_RX_DISABLE (1 << 1)
+#define UART_CR_RX_ENABLE (1 << 0)
+
+#define UART_IMR 0x0014
+#define UART_IMR_TXLEV (1 << 0)
+#define UART_IMR_RXSTALE (1 << 3)
+#define UART_IMR_RXLEV (1 << 4)
+#define UART_IMR_DELTA_CTS (1 << 5)
+#define UART_IMR_CURRENT_CTS (1 << 6)
+
+#define UART_IPR_RXSTALE_LAST 0x20
+#define UART_IPR_STALE_LSB 0x1F
+#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+
+#define UART_IPR 0x0018
+#define UART_TFWR 0x001C
+#define UART_RFWR 0x0020
+#define UART_HCR 0x0024
+
+#define UART_MREG 0x0028
+#define UART_NREG 0x002C
+#define UART_DREG 0x0030
+#define UART_MNDREG 0x0034
+#define UART_IRDA 0x0038
+#define UART_MISR_MODE 0x0040
+#define UART_MISR_RESET 0x0044
+#define UART_MISR_EXPORT 0x0048
+#define UART_MISR_VAL 0x004C
+#define UART_TEST_CTRL 0x0050
+
+#define UART_SR 0x0008
+#define UART_SR_HUNT_CHAR (1 << 7)
+#define UART_SR_RX_BREAK (1 << 6)
+#define UART_SR_PAR_FRAME_ERR (1 << 5)
+#define UART_SR_OVERRUN (1 << 4)
+#define UART_SR_TX_EMPTY (1 << 3)
+#define UART_SR_TX_READY (1 << 2)
+#define UART_SR_RX_FULL (1 << 1)
+#define UART_SR_RX_READY (1 << 0)
+
+#define UART_RF 0x000C
+#define UARTDM_RF 0x0070
+#define UART_MISR 0x0010
+#define UART_ISR 0x0014
+#define UART_ISR_TX_READY (1 << 7)
+
+#define GSBI_CONTROL 0x0
+#define GSBI_PROTOCOL_CODE 0x30
+#define GSBI_PROTOCOL_UART 0x40
+#define GSBI_PROTOCOL_IDLE 0x0
+
+#define UARTDM_DMRX 0x34
+#define UARTDM_NCF_TX 0x40
+#define UARTDM_RX_TOTAL_SNAP 0x38
+
+#define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port)
+
+static inline
+void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
+{
+ __raw_writel(val, port->membase + off);
+}
+
+static inline
+unsigned int msm_read(struct uart_port *port, unsigned int off)
+{
+ return __raw_readl(port->membase + off);
+}
+
+/*
+ * Setup the MND registers to use the TCXO clock.
+ */
+static inline void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
+{
+ msm_write(port, 0x06, UART_MREG);
+ msm_write(port, 0xF1, UART_NREG);
+ msm_write(port, 0x0F, UART_DREG);
+ msm_write(port, 0x1A, UART_MNDREG);
+}
+
+/*
+ * Setup the MND registers to use the TCXO clock divided by 4.
+ */
+static inline void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port)
+{
+ msm_write(port, 0x18, UART_MREG);
+ msm_write(port, 0xF6, UART_NREG);
+ msm_write(port, 0x0F, UART_DREG);
+ msm_write(port, 0x0A, UART_MNDREG);
+}
+
+static inline
+void msm_serial_set_mnd_regs_from_uartclk(struct uart_port *port)
+{
+ if (port->uartclk == 19200000)
+ msm_serial_set_mnd_regs_tcxo(port);
+ else
+ msm_serial_set_mnd_regs_tcxoby4(port);
+}
+
+/*
+ * TROUT has a specific defect that makes it report it's uartclk
+ * as 19.2Mhz (TCXO) when it's actually 4.8Mhz (TCXO/4). This special
+ * cases TROUT to use the right clock.
+ */
+#ifdef CONFIG_MACH_TROUT
+#define msm_serial_set_mnd_regs msm_serial_set_mnd_regs_tcxoby4
+#else
+#define msm_serial_set_mnd_regs msm_serial_set_mnd_regs_from_uartclk
+#endif
+
+#endif /* __DRIVERS_SERIAL_MSM_SERIAL_H */
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
new file mode 100644
index 00000000..624701f8
--- /dev/null
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -0,0 +1,1880 @@
+/*
+ * MSM 7k/8k High speed uart driver
+ *
+ * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2008 Google Inc.
+ * Modified: Nick Pelly <npelly@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * Has optional support for uart power management independent of linux
+ * suspend/resume:
+ *
+ * RX wakeup.
+ * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
+ * UART RX pin). This should only be used if there is not a wakeup
+ * GPIO on the UART CTS, and the first RX byte is known (for example, with the
+ * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
+ * always be lost. RTS will be asserted even while the UART is off in this mode
+ * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
+ */
+
+#include <linux/module.h>
+
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <linux/atomic.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+
+#include <mach/hardware.h>
+#include <mach/dma.h>
+#include <linux/platform_data/msm_serial_hs.h>
+
+/* HSUART Registers */
+#define UARTDM_MR1_ADDR 0x0
+#define UARTDM_MR2_ADDR 0x4
+
+/* Data Mover result codes */
+#define RSLT_FIFO_CNTR_BMSK (0xE << 28)
+#define RSLT_VLD BIT(1)
+
+/* write only register */
+#define UARTDM_CSR_ADDR 0x8
+#define UARTDM_CSR_115200 0xFF
+#define UARTDM_CSR_57600 0xEE
+#define UARTDM_CSR_38400 0xDD
+#define UARTDM_CSR_28800 0xCC
+#define UARTDM_CSR_19200 0xBB
+#define UARTDM_CSR_14400 0xAA
+#define UARTDM_CSR_9600 0x99
+#define UARTDM_CSR_7200 0x88
+#define UARTDM_CSR_4800 0x77
+#define UARTDM_CSR_3600 0x66
+#define UARTDM_CSR_2400 0x55
+#define UARTDM_CSR_1200 0x44
+#define UARTDM_CSR_600 0x33
+#define UARTDM_CSR_300 0x22
+#define UARTDM_CSR_150 0x11
+#define UARTDM_CSR_75 0x00
+
+/* write only register */
+#define UARTDM_TF_ADDR 0x70
+#define UARTDM_TF2_ADDR 0x74
+#define UARTDM_TF3_ADDR 0x78
+#define UARTDM_TF4_ADDR 0x7C
+
+/* write only register */
+#define UARTDM_CR_ADDR 0x10
+#define UARTDM_IMR_ADDR 0x14
+
+#define UARTDM_IPR_ADDR 0x18
+#define UARTDM_TFWR_ADDR 0x1c
+#define UARTDM_RFWR_ADDR 0x20
+#define UARTDM_HCR_ADDR 0x24
+#define UARTDM_DMRX_ADDR 0x34
+#define UARTDM_IRDA_ADDR 0x38
+#define UARTDM_DMEN_ADDR 0x3c
+
+/* UART_DM_NO_CHARS_FOR_TX */
+#define UARTDM_NCF_TX_ADDR 0x40
+
+#define UARTDM_BADR_ADDR 0x44
+
+#define UARTDM_SIM_CFG_ADDR 0x80
+/* Read Only register */
+#define UARTDM_SR_ADDR 0x8
+
+/* Read Only register */
+#define UARTDM_RF_ADDR 0x70
+#define UARTDM_RF2_ADDR 0x74
+#define UARTDM_RF3_ADDR 0x78
+#define UARTDM_RF4_ADDR 0x7C
+
+/* Read Only register */
+#define UARTDM_MISR_ADDR 0x10
+
+/* Read Only register */
+#define UARTDM_ISR_ADDR 0x14
+#define UARTDM_RX_TOTAL_SNAP_ADDR 0x38
+
+#define UARTDM_RXFS_ADDR 0x50
+
+/* Register field Mask Mapping */
+#define UARTDM_SR_PAR_FRAME_BMSK BIT(5)
+#define UARTDM_SR_OVERRUN_BMSK BIT(4)
+#define UARTDM_SR_TXEMT_BMSK BIT(3)
+#define UARTDM_SR_TXRDY_BMSK BIT(2)
+#define UARTDM_SR_RXRDY_BMSK BIT(0)
+
+#define UARTDM_CR_TX_DISABLE_BMSK BIT(3)
+#define UARTDM_CR_RX_DISABLE_BMSK BIT(1)
+#define UARTDM_CR_TX_EN_BMSK BIT(2)
+#define UARTDM_CR_RX_EN_BMSK BIT(0)
+
+/* UARTDM_CR channel_comman bit value (register field is bits 8:4) */
+#define RESET_RX 0x10
+#define RESET_TX 0x20
+#define RESET_ERROR_STATUS 0x30
+#define RESET_BREAK_INT 0x40
+#define START_BREAK 0x50
+#define STOP_BREAK 0x60
+#define RESET_CTS 0x70
+#define RESET_STALE_INT 0x80
+#define RFR_LOW 0xD0
+#define RFR_HIGH 0xE0
+#define CR_PROTECTION_EN 0x100
+#define STALE_EVENT_ENABLE 0x500
+#define STALE_EVENT_DISABLE 0x600
+#define FORCE_STALE_EVENT 0x400
+#define CLEAR_TX_READY 0x300
+#define RESET_TX_ERROR 0x800
+#define RESET_TX_DONE 0x810
+
+#define UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK 0xffffff00
+#define UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK 0x3f
+#define UARTDM_MR1_CTS_CTL_BMSK 0x40
+#define UARTDM_MR1_RX_RDY_CTL_BMSK 0x80
+
+#define UARTDM_MR2_ERROR_MODE_BMSK 0x40
+#define UARTDM_MR2_BITS_PER_CHAR_BMSK 0x30
+
+/* bits per character configuration */
+#define FIVE_BPC (0 << 4)
+#define SIX_BPC (1 << 4)
+#define SEVEN_BPC (2 << 4)
+#define EIGHT_BPC (3 << 4)
+
+#define UARTDM_MR2_STOP_BIT_LEN_BMSK 0xc
+#define STOP_BIT_ONE (1 << 2)
+#define STOP_BIT_TWO (3 << 2)
+
+#define UARTDM_MR2_PARITY_MODE_BMSK 0x3
+
+/* Parity configuration */
+#define NO_PARITY 0x0
+#define EVEN_PARITY 0x1
+#define ODD_PARITY 0x2
+#define SPACE_PARITY 0x3
+
+#define UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK 0xffffff80
+#define UARTDM_IPR_STALE_LSB_BMSK 0x1f
+
+/* These can be used for both ISR and IMR register */
+#define UARTDM_ISR_TX_READY_BMSK BIT(7)
+#define UARTDM_ISR_CURRENT_CTS_BMSK BIT(6)
+#define UARTDM_ISR_DELTA_CTS_BMSK BIT(5)
+#define UARTDM_ISR_RXLEV_BMSK BIT(4)
+#define UARTDM_ISR_RXSTALE_BMSK BIT(3)
+#define UARTDM_ISR_RXBREAK_BMSK BIT(2)
+#define UARTDM_ISR_RXHUNT_BMSK BIT(1)
+#define UARTDM_ISR_TXLEV_BMSK BIT(0)
+
+/* Field definitions for UART_DM_DMEN*/
+#define UARTDM_TX_DM_EN_BMSK 0x1
+#define UARTDM_RX_DM_EN_BMSK 0x2
+
+#define UART_FIFOSIZE 64
+#define UARTCLK 7372800
+
+/* Rx DMA request states */
+enum flush_reason {
+ FLUSH_NONE,
+ FLUSH_DATA_READY,
+ FLUSH_DATA_INVALID, /* values after this indicate invalid data */
+ FLUSH_IGNORE = FLUSH_DATA_INVALID,
+ FLUSH_STOP,
+ FLUSH_SHUTDOWN,
+};
+
+/* UART clock states */
+enum msm_hs_clk_states_e {
+ MSM_HS_CLK_PORT_OFF, /* port not in use */
+ MSM_HS_CLK_OFF, /* clock disabled */
+ MSM_HS_CLK_REQUEST_OFF, /* disable after TX and RX flushed */
+ MSM_HS_CLK_ON, /* clock enabled */
+};
+
+/* Track the forced RXSTALE flush during clock off sequence.
+ * These states are only valid during MSM_HS_CLK_REQUEST_OFF */
+enum msm_hs_clk_req_off_state_e {
+ CLK_REQ_OFF_START,
+ CLK_REQ_OFF_RXSTALE_ISSUED,
+ CLK_REQ_OFF_FLUSH_ISSUED,
+ CLK_REQ_OFF_RXSTALE_FLUSHED,
+};
+
+/**
+ * struct msm_hs_tx
+ * @tx_ready_int_en: ok to dma more tx?
+ * @dma_in_flight: tx dma in progress
+ * @xfer: top level DMA command pointer structure
+ * @command_ptr: third level command struct pointer
+ * @command_ptr_ptr: second level command list struct pointer
+ * @mapped_cmd_ptr: DMA view of third level command struct
+ * @mapped_cmd_ptr_ptr: DMA view of second level command list struct
+ * @tx_count: number of bytes to transfer in DMA transfer
+ * @dma_base: DMA view of UART xmit buffer
+ *
+ * This structure describes a single Tx DMA transaction. MSM DMA
+ * commands have two levels of indirection. The top level command
+ * ptr points to a list of command ptr which in turn points to a
+ * single DMA 'command'. In our case each Tx transaction consists
+ * of a single second level pointer pointing to a 'box type' command.
+ */
+struct msm_hs_tx {
+ unsigned int tx_ready_int_en;
+ unsigned int dma_in_flight;
+ struct msm_dmov_cmd xfer;
+ dmov_box *command_ptr;
+ u32 *command_ptr_ptr;
+ dma_addr_t mapped_cmd_ptr;
+ dma_addr_t mapped_cmd_ptr_ptr;
+ int tx_count;
+ dma_addr_t dma_base;
+};
+
+/**
+ * struct msm_hs_rx
+ * @flush: Rx DMA request state
+ * @xfer: top level DMA command pointer structure
+ * @cmdptr_dmaaddr: DMA view of second level command structure
+ * @command_ptr: third level DMA command pointer structure
+ * @command_ptr_ptr: second level DMA command list pointer
+ * @mapped_cmd_ptr: DMA view of the third level command structure
+ * @wait: wait for DMA completion before shutdown
+ * @buffer: destination buffer for RX DMA
+ * @rbuffer: DMA view of buffer
+ * @pool: dma pool out of which coherent rx buffer is allocated
+ * @tty_work: private work-queue for tty flip buffer push task
+ *
+ * This structure describes a single Rx DMA transaction. Rx DMA
+ * transactions use box mode DMA commands.
+ */
+struct msm_hs_rx {
+ enum flush_reason flush;
+ struct msm_dmov_cmd xfer;
+ dma_addr_t cmdptr_dmaaddr;
+ dmov_box *command_ptr;
+ u32 *command_ptr_ptr;
+ dma_addr_t mapped_cmd_ptr;
+ wait_queue_head_t wait;
+ dma_addr_t rbuffer;
+ unsigned char *buffer;
+ struct dma_pool *pool;
+ struct work_struct tty_work;
+};
+
+/**
+ * struct msm_hs_rx_wakeup
+ * @irq: IRQ line to be configured as interrupt source on Rx activity
+ * @ignore: boolean value. 1 = ignore the wakeup interrupt
+ * @rx_to_inject: extra character to be inserted to Rx tty on wakeup
+ * @inject_rx: 1 = insert rx_to_inject. 0 = do not insert extra character
+ *
+ * This is an optional structure required for UART Rx GPIO IRQ based
+ * wakeup from low power state. UART wakeup can be triggered by RX activity
+ * (using a wakeup GPIO on the UART RX pin). This should only be used if
+ * there is not a wakeup GPIO on the UART CTS, and the first RX byte is
+ * known (eg., with the Bluetooth Texas Instruments HCILL protocol),
+ * since the first RX byte will always be lost. RTS will be asserted even
+ * while the UART is clocked off in this mode of operation.
+ */
+struct msm_hs_rx_wakeup {
+ int irq; /* < 0 indicates low power wakeup disabled */
+ unsigned char ignore;
+ unsigned char inject_rx;
+ char rx_to_inject;
+};
+
+/**
+ * struct msm_hs_port
+ * @uport: embedded uart port structure
+ * @imr_reg: shadow value of UARTDM_IMR
+ * @clk: uart input clock handle
+ * @tx: Tx transaction related data structure
+ * @rx: Rx transaction related data structure
+ * @dma_tx_channel: Tx DMA command channel
+ * @dma_rx_channel Rx DMA command channel
+ * @dma_tx_crci: Tx channel rate control interface number
+ * @dma_rx_crci: Rx channel rate control interface number
+ * @clk_off_timer: Timer to poll DMA event completion before clock off
+ * @clk_off_delay: clk_off_timer poll interval
+ * @clk_state: overall clock state
+ * @clk_req_off_state: post flush clock states
+ * @rx_wakeup: optional rx_wakeup feature related data
+ * @exit_lpm_cb: optional callback to exit low power mode
+ *
+ * Low level serial port structure.
+ */
+struct msm_hs_port {
+ struct uart_port uport;
+ unsigned long imr_reg;
+ struct clk *clk;
+ struct msm_hs_tx tx;
+ struct msm_hs_rx rx;
+
+ int dma_tx_channel;
+ int dma_rx_channel;
+ int dma_tx_crci;
+ int dma_rx_crci;
+
+ struct hrtimer clk_off_timer;
+ ktime_t clk_off_delay;
+ enum msm_hs_clk_states_e clk_state;
+ enum msm_hs_clk_req_off_state_e clk_req_off_state;
+
+ struct msm_hs_rx_wakeup rx_wakeup;
+ void (*exit_lpm_cb)(struct uart_port *);
+};
+
+#define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */
+#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
+#define UARTDM_RX_BUF_SIZE 512
+
+#define UARTDM_NR 2
+
+static struct msm_hs_port q_uart_port[UARTDM_NR];
+static struct platform_driver msm_serial_hs_platform_driver;
+static struct uart_driver msm_hs_driver;
+static struct uart_ops msm_hs_ops;
+static struct workqueue_struct *msm_hs_workqueue;
+
+#define UARTDM_TO_MSM(uart_port) \
+ container_of((uart_port), struct msm_hs_port, uport)
+
+static unsigned int use_low_power_rx_wakeup(struct msm_hs_port
+ *msm_uport)
+{
+ return (msm_uport->rx_wakeup.irq >= 0);
+}
+
+static unsigned int msm_hs_read(struct uart_port *uport,
+ unsigned int offset)
+{
+ return ioread32(uport->membase + offset);
+}
+
+static void msm_hs_write(struct uart_port *uport, unsigned int offset,
+ unsigned int value)
+{
+ iowrite32(value, uport->membase + offset);
+}
+
+static void msm_hs_release_port(struct uart_port *port)
+{
+ iounmap(port->membase);
+}
+
+static int msm_hs_request_port(struct uart_port *port)
+{
+ port->membase = ioremap(port->mapbase, PAGE_SIZE);
+ if (unlikely(!port->membase))
+ return -ENOMEM;
+
+ /* configure the CR Protection to Enable */
+ msm_hs_write(port, UARTDM_CR_ADDR, CR_PROTECTION_EN);
+ return 0;
+}
+
+static int __devexit msm_hs_remove(struct platform_device *pdev)
+{
+
+ struct msm_hs_port *msm_uport;
+ struct device *dev;
+
+ if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
+ printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ msm_uport = &q_uart_port[pdev->id];
+ dev = msm_uport->uport.dev;
+
+ dma_unmap_single(dev, msm_uport->rx.mapped_cmd_ptr, sizeof(dmov_box),
+ DMA_TO_DEVICE);
+ dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
+ msm_uport->rx.rbuffer);
+ dma_pool_destroy(msm_uport->rx.pool);
+
+ dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32 *),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32 *),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box),
+ DMA_TO_DEVICE);
+
+ uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
+ clk_put(msm_uport->clk);
+
+ /* Free the tx resources */
+ kfree(msm_uport->tx.command_ptr);
+ kfree(msm_uport->tx.command_ptr_ptr);
+
+ /* Free the rx resources */
+ kfree(msm_uport->rx.command_ptr);
+ kfree(msm_uport->rx.command_ptr_ptr);
+
+ iounmap(msm_uport->uport.membase);
+
+ return 0;
+}
+
+static int msm_hs_init_clk_locked(struct uart_port *uport)
+{
+ int ret;
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ ret = clk_enable(msm_uport->clk);
+ if (ret) {
+ printk(KERN_ERR "Error could not turn on UART clk\n");
+ return ret;
+ }
+
+ /* Set up the MREG/NREG/DREG/MNDREG */
+ ret = clk_set_rate(msm_uport->clk, uport->uartclk);
+ if (ret) {
+ printk(KERN_WARNING "Error setting clock rate on UART\n");
+ clk_disable(msm_uport->clk);
+ return ret;
+ }
+
+ msm_uport->clk_state = MSM_HS_CLK_ON;
+ return 0;
+}
+
+/* Enable and Disable clocks (Used for power management) */
+static void msm_hs_pm(struct uart_port *uport, unsigned int state,
+ unsigned int oldstate)
+{
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ if (use_low_power_rx_wakeup(msm_uport) ||
+ msm_uport->exit_lpm_cb)
+ return; /* ignore linux PM states,
+ use msm_hs_request_clock API */
+
+ switch (state) {
+ case 0:
+ clk_enable(msm_uport->clk);
+ break;
+ case 3:
+ clk_disable(msm_uport->clk);
+ break;
+ default:
+ dev_err(uport->dev, "msm_serial: Unknown PM state %d\n",
+ state);
+ }
+}
+
+/*
+ * programs the UARTDM_CSR register with correct bit rates
+ *
+ * Interrupts should be disabled before we are called, as
+ * we modify Set Baud rate
+ * Set receive stale interrupt level, dependent on Bit Rate
+ * Goal is to have around 8 ms before indicate stale.
+ * roundup (((Bit Rate * .008) / 10) + 1
+ */
+static void msm_hs_set_bps_locked(struct uart_port *uport,
+ unsigned int bps)
+{
+ unsigned long rxstale;
+ unsigned long data;
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ switch (bps) {
+ case 300:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_75);
+ rxstale = 1;
+ break;
+ case 600:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_150);
+ rxstale = 1;
+ break;
+ case 1200:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_300);
+ rxstale = 1;
+ break;
+ case 2400:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_600);
+ rxstale = 1;
+ break;
+ case 4800:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_1200);
+ rxstale = 1;
+ break;
+ case 9600:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_2400);
+ rxstale = 2;
+ break;
+ case 14400:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_3600);
+ rxstale = 3;
+ break;
+ case 19200:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_4800);
+ rxstale = 4;
+ break;
+ case 28800:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_7200);
+ rxstale = 6;
+ break;
+ case 38400:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_9600);
+ rxstale = 8;
+ break;
+ case 57600:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_14400);
+ rxstale = 16;
+ break;
+ case 76800:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_19200);
+ rxstale = 16;
+ break;
+ case 115200:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_28800);
+ rxstale = 31;
+ break;
+ case 230400:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_57600);
+ rxstale = 31;
+ break;
+ case 460800:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_115200);
+ rxstale = 31;
+ break;
+ case 4000000:
+ case 3686400:
+ case 3200000:
+ case 3500000:
+ case 3000000:
+ case 2500000:
+ case 1500000:
+ case 1152000:
+ case 1000000:
+ case 921600:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_115200);
+ rxstale = 31;
+ break;
+ default:
+ msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_2400);
+ /* default to 9600 */
+ bps = 9600;
+ rxstale = 2;
+ break;
+ }
+ if (bps > 460800)
+ uport->uartclk = bps * 16;
+ else
+ uport->uartclk = UARTCLK;
+
+ if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
+ printk(KERN_WARNING "Error setting clock rate on UART\n");
+ return;
+ }
+
+ data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
+ data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
+
+ msm_hs_write(uport, UARTDM_IPR_ADDR, data);
+}
+
+/*
+ * termios : new ktermios
+ * oldtermios: old ktermios previous setting
+ *
+ * Configure the serial port
+ */
+static void msm_hs_set_termios(struct uart_port *uport,
+ struct ktermios *termios,
+ struct ktermios *oldtermios)
+{
+ unsigned int bps;
+ unsigned long data;
+ unsigned long flags;
+ unsigned int c_cflag = termios->c_cflag;
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ spin_lock_irqsave(&uport->lock, flags);
+ clk_enable(msm_uport->clk);
+
+ /* 300 is the minimum baud support by the driver */
+ bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
+
+ /* Temporary remapping 200 BAUD to 3.2 mbps */
+ if (bps == 200)
+ bps = 3200000;
+
+ msm_hs_set_bps_locked(uport, bps);
+
+ data = msm_hs_read(uport, UARTDM_MR2_ADDR);
+ data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
+ /* set parity */
+ if (PARENB == (c_cflag & PARENB)) {
+ if (PARODD == (c_cflag & PARODD))
+ data |= ODD_PARITY;
+ else if (CMSPAR == (c_cflag & CMSPAR))
+ data |= SPACE_PARITY;
+ else
+ data |= EVEN_PARITY;
+ }
+
+ /* Set bits per char */
+ data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
+
+ switch (c_cflag & CSIZE) {
+ case CS5:
+ data |= FIVE_BPC;
+ break;
+ case CS6:
+ data |= SIX_BPC;
+ break;
+ case CS7:
+ data |= SEVEN_BPC;
+ break;
+ default:
+ data |= EIGHT_BPC;
+ break;
+ }
+ /* stop bits */
+ if (c_cflag & CSTOPB) {
+ data |= STOP_BIT_TWO;
+ } else {
+ /* otherwise 1 stop bit */
+ data |= STOP_BIT_ONE;
+ }
+ data |= UARTDM_MR2_ERROR_MODE_BMSK;
+ /* write parity/bits per char/stop bit configuration */
+ msm_hs_write(uport, UARTDM_MR2_ADDR, data);
+
+ /* Configure HW flow control */
+ data = msm_hs_read(uport, UARTDM_MR1_ADDR);
+
+ data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
+
+ if (c_cflag & CRTSCTS) {
+ data |= UARTDM_MR1_CTS_CTL_BMSK;
+ data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+ }
+
+ msm_hs_write(uport, UARTDM_MR1_ADDR, data);
+
+ uport->ignore_status_mask = termios->c_iflag & INPCK;
+ uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
+ uport->read_status_mask = (termios->c_cflag & CREAD);
+
+ msm_hs_write(uport, UARTDM_IMR_ADDR, 0);
+
+ /* Set Transmit software time out */
+ uart_update_timeout(uport, c_cflag, bps);
+
+ msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
+ msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
+
+ if (msm_uport->rx.flush == FLUSH_NONE) {
+ msm_uport->rx.flush = FLUSH_IGNORE;
+ msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1);
+ }
+
+ msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+
+ clk_disable(msm_uport->clk);
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+/*
+ * Standard API, Transmitter
+ * Any character in the transmit shift register is sent
+ */
+static unsigned int msm_hs_tx_empty(struct uart_port *uport)
+{
+ unsigned int data;
+ unsigned int ret = 0;
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ clk_enable(msm_uport->clk);
+
+ data = msm_hs_read(uport, UARTDM_SR_ADDR);
+ if (data & UARTDM_SR_TXEMT_BMSK)
+ ret = TIOCSER_TEMT;
+
+ clk_disable(msm_uport->clk);
+
+ return ret;
+}
+
+/*
+ * Standard API, Stop transmitter.
+ * Any character in the transmit shift register is sent as
+ * well as the current data mover transfer .
+ */
+static void msm_hs_stop_tx_locked(struct uart_port *uport)
+{
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ msm_uport->tx.tx_ready_int_en = 0;
+}
+
+/*
+ * Standard API, Stop receiver as soon as possible.
+ *
+ * Function immediately terminates the operation of the
+ * channel receiver and any incoming characters are lost. None
+ * of the receiver status bits are affected by this command and
+ * characters that are already in the receive FIFO there.
+ */
+static void msm_hs_stop_rx_locked(struct uart_port *uport)
+{
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ unsigned int data;
+
+ clk_enable(msm_uport->clk);
+
+ /* disable dlink */
+ data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
+ data &= ~UARTDM_RX_DM_EN_BMSK;
+ msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
+
+ /* Disable the receiver */
+ if (msm_uport->rx.flush == FLUSH_NONE)
+ msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1);
+
+ if (msm_uport->rx.flush != FLUSH_SHUTDOWN)
+ msm_uport->rx.flush = FLUSH_STOP;
+
+ clk_disable(msm_uport->clk);
+}
+
+/* Transmit the next chunk of data */
+static void msm_hs_submit_tx_locked(struct uart_port *uport)
+{
+ int left;
+ int tx_count;
+ dma_addr_t src_addr;
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ struct msm_hs_tx *tx = &msm_uport->tx;
+ struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
+
+ if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
+ msm_hs_stop_tx_locked(uport);
+ return;
+ }
+
+ tx->dma_in_flight = 1;
+
+ tx_count = uart_circ_chars_pending(tx_buf);
+
+ if (UARTDM_TX_BUF_SIZE < tx_count)
+ tx_count = UARTDM_TX_BUF_SIZE;
+
+ left = UART_XMIT_SIZE - tx_buf->tail;
+
+ if (tx_count > left)
+ tx_count = left;
+
+ src_addr = tx->dma_base + tx_buf->tail;
+ dma_sync_single_for_device(uport->dev, src_addr, tx_count,
+ DMA_TO_DEVICE);
+
+ tx->command_ptr->num_rows = (((tx_count + 15) >> 4) << 16) |
+ ((tx_count + 15) >> 4);
+ tx->command_ptr->src_row_addr = src_addr;
+
+ dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr,
+ sizeof(dmov_box), DMA_TO_DEVICE);
+
+ *tx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(tx->mapped_cmd_ptr);
+
+ dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr,
+ sizeof(u32 *), DMA_TO_DEVICE);
+
+ /* Save tx_count to use in Callback */
+ tx->tx_count = tx_count;
+ msm_hs_write(uport, UARTDM_NCF_TX_ADDR, tx_count);
+
+ /* Disable the tx_ready interrupt */
+ msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK;
+ msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+ msm_dmov_enqueue_cmd(msm_uport->dma_tx_channel, &tx->xfer);
+}
+
+/* Start to receive the next chunk of data */
+static void msm_hs_start_rx_locked(struct uart_port *uport)
+{
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
+ msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE);
+ msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE);
+ msm_uport->imr_reg |= UARTDM_ISR_RXLEV_BMSK;
+ msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+
+ msm_uport->rx.flush = FLUSH_NONE;
+ msm_dmov_enqueue_cmd(msm_uport->dma_rx_channel, &msm_uport->rx.xfer);
+
+ /* might have finished RX and be ready to clock off */
+ hrtimer_start(&msm_uport->clk_off_timer, msm_uport->clk_off_delay,
+ HRTIMER_MODE_REL);
+}
+
+/* Enable the transmitter Interrupt */
+static void msm_hs_start_tx_locked(struct uart_port *uport)
+{
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ clk_enable(msm_uport->clk);
+
+ if (msm_uport->exit_lpm_cb)
+ msm_uport->exit_lpm_cb(uport);
+
+ if (msm_uport->tx.tx_ready_int_en == 0) {
+ msm_uport->tx.tx_ready_int_en = 1;
+ msm_hs_submit_tx_locked(uport);
+ }
+
+ clk_disable(msm_uport->clk);
+}
+
+/*
+ * This routine is called when we are done with a DMA transfer
+ *
+ * This routine is registered with Data mover when we set
+ * up a Data Mover transfer. It is called from Data mover ISR
+ * when the DMA transfer is done.
+ */
+static void msm_hs_dmov_tx_callback(struct msm_dmov_cmd *cmd_ptr,
+ unsigned int result,
+ struct msm_dmov_errdata *err)
+{
+ unsigned long flags;
+ struct msm_hs_port *msm_uport;
+
+ /* DMA did not finish properly */
+ WARN_ON((((result & RSLT_FIFO_CNTR_BMSK) >> 28) == 1) &&
+ !(result & RSLT_VLD));
+
+ msm_uport = container_of(cmd_ptr, struct msm_hs_port, tx.xfer);
+
+ spin_lock_irqsave(&msm_uport->uport.lock, flags);
+ clk_enable(msm_uport->clk);
+
+ msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK;
+ msm_hs_write(&msm_uport->uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+
+ clk_disable(msm_uport->clk);
+ spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+}
+
+/*
+ * This routine is called when we are done with a DMA transfer or the
+ * a flush has been sent to the data mover driver.
+ *
+ * This routine is registered with Data mover when we set up a Data Mover
+ * transfer. It is called from Data mover ISR when the DMA transfer is done.
+ */
+static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
+ unsigned int result,
+ struct msm_dmov_errdata *err)
+{
+ int retval;
+ int rx_count;
+ unsigned long status;
+ unsigned int error_f = 0;
+ unsigned long flags;
+ unsigned int flush;
+ struct tty_struct *tty;
+ struct uart_port *uport;
+ struct msm_hs_port *msm_uport;
+
+ msm_uport = container_of(cmd_ptr, struct msm_hs_port, rx.xfer);
+ uport = &msm_uport->uport;
+
+ spin_lock_irqsave(&uport->lock, flags);
+ clk_enable(msm_uport->clk);
+
+ tty = uport->state->port.tty;
+
+ msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
+
+ status = msm_hs_read(uport, UARTDM_SR_ADDR);
+
+ /* overflow is not connect to data in a FIFO */
+ if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
+ (uport->read_status_mask & CREAD))) {
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ uport->icount.buf_overrun++;
+ error_f = 1;
+ }
+
+ if (!(uport->ignore_status_mask & INPCK))
+ status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
+
+ if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
+ /* Can not tell difference between parity & frame error */
+ uport->icount.parity++;
+ error_f = 1;
+ if (uport->ignore_status_mask & IGNPAR)
+ tty_insert_flip_char(tty, 0, TTY_PARITY);
+ }
+
+ if (error_f)
+ msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
+
+ if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED)
+ msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED;
+
+ flush = msm_uport->rx.flush;
+ if (flush == FLUSH_IGNORE)
+ msm_hs_start_rx_locked(uport);
+ if (flush == FLUSH_STOP)
+ msm_uport->rx.flush = FLUSH_SHUTDOWN;
+ if (flush >= FLUSH_DATA_INVALID)
+ goto out;
+
+ rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
+
+ if (0 != (uport->read_status_mask & CREAD)) {
+ retval = tty_insert_flip_string(tty, msm_uport->rx.buffer,
+ rx_count);
+ BUG_ON(retval != rx_count);
+ }
+
+ msm_hs_start_rx_locked(uport);
+
+out:
+ clk_disable(msm_uport->clk);
+
+ spin_unlock_irqrestore(&uport->lock, flags);
+
+ if (flush < FLUSH_DATA_INVALID)
+ queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work);
+}
+
+static void msm_hs_tty_flip_buffer_work(struct work_struct *work)
+{
+ struct msm_hs_port *msm_uport =
+ container_of(work, struct msm_hs_port, rx.tty_work);
+ struct tty_struct *tty = msm_uport->uport.state->port.tty;
+
+ tty_flip_buffer_push(tty);
+}
+
+/*
+ * Standard API, Current states of modem control inputs
+ *
+ * Since CTS can be handled entirely by HARDWARE we always
+ * indicate clear to send and count on the TX FIFO to block when
+ * it fills up.
+ *
+ * - TIOCM_DCD
+ * - TIOCM_CTS
+ * - TIOCM_DSR
+ * - TIOCM_RI
+ * (Unsupported) DCD and DSR will return them high. RI will return low.
+ */
+static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
+{
+ return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+}
+
+/*
+ * True enables UART auto RFR, which indicates we are ready for data if the RX
+ * buffer is not full. False disables auto RFR, and deasserts RFR to indicate
+ * we are not ready for data. Must be called with UART clock on.
+ */
+static void set_rfr_locked(struct uart_port *uport, int auto_rfr)
+{
+ unsigned int data;
+
+ data = msm_hs_read(uport, UARTDM_MR1_ADDR);
+
+ if (auto_rfr) {
+ /* enable auto ready-for-receiving */
+ data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+ msm_hs_write(uport, UARTDM_MR1_ADDR, data);
+ } else {
+ /* disable auto ready-for-receiving */
+ data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
+ msm_hs_write(uport, UARTDM_MR1_ADDR, data);
+ /* RFR is active low, set high */
+ msm_hs_write(uport, UARTDM_CR_ADDR, RFR_HIGH);
+ }
+}
+
+/*
+ * Standard API, used to set or clear RFR
+ */
+static void msm_hs_set_mctrl_locked(struct uart_port *uport,
+ unsigned int mctrl)
+{
+ unsigned int auto_rfr;
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ clk_enable(msm_uport->clk);
+
+ auto_rfr = TIOCM_RTS & mctrl ? 1 : 0;
+ set_rfr_locked(uport, auto_rfr);
+
+ clk_disable(msm_uport->clk);
+}
+
+/* Standard API, Enable modem status (CTS) interrupt */
+static void msm_hs_enable_ms_locked(struct uart_port *uport)
+{
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ clk_enable(msm_uport->clk);
+
+ /* Enable DELTA_CTS Interrupt */
+ msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
+ msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+
+ clk_disable(msm_uport->clk);
+
+}
+
+/*
+ * Standard API, Break Signal
+ *
+ * Control the transmission of a break signal. ctl eq 0 => break
+ * signal terminate ctl ne 0 => start break signal
+ */
+static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
+{
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ clk_enable(msm_uport->clk);
+ msm_hs_write(uport, UARTDM_CR_ADDR, ctl ? START_BREAK : STOP_BREAK);
+ clk_disable(msm_uport->clk);
+}
+
+static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&uport->lock, flags);
+ if (cfg_flags & UART_CONFIG_TYPE) {
+ uport->type = PORT_MSM;
+ msm_hs_request_port(uport);
+ }
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+/* Handle CTS changes (Called from interrupt handler) */
+static void msm_hs_handle_delta_cts(struct uart_port *uport)
+{
+ unsigned long flags;
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ spin_lock_irqsave(&uport->lock, flags);
+ clk_enable(msm_uport->clk);
+
+ /* clear interrupt */
+ msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
+ uport->icount.cts++;
+
+ clk_disable(msm_uport->clk);
+ spin_unlock_irqrestore(&uport->lock, flags);
+
+ /* clear the IOCTL TIOCMIWAIT if called */
+ wake_up_interruptible(&uport->state->port.delta_msr_wait);
+}
+
+/* check if the TX path is flushed, and if so clock off
+ * returns 0 did not clock off, need to retry (still sending final byte)
+ * -1 did not clock off, do not retry
+ * 1 if we clocked off
+ */
+static int msm_hs_check_clock_off_locked(struct uart_port *uport)
+{
+ unsigned long sr_status;
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ struct circ_buf *tx_buf = &uport->state->xmit;
+
+ /* Cancel if tx tty buffer is not empty, dma is in flight,
+ * or tx fifo is not empty, or rx fifo is not empty */
+ if (msm_uport->clk_state != MSM_HS_CLK_REQUEST_OFF ||
+ !uart_circ_empty(tx_buf) || msm_uport->tx.dma_in_flight ||
+ (msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) ||
+ !(msm_uport->imr_reg & UARTDM_ISR_RXLEV_BMSK)) {
+ return -1;
+ }
+
+ /* Make sure the uart is finished with the last byte */
+ sr_status = msm_hs_read(uport, UARTDM_SR_ADDR);
+ if (!(sr_status & UARTDM_SR_TXEMT_BMSK))
+ return 0; /* retry */
+
+ /* Make sure forced RXSTALE flush complete */
+ switch (msm_uport->clk_req_off_state) {
+ case CLK_REQ_OFF_START:
+ msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED;
+ msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT);
+ return 0; /* RXSTALE flush not complete - retry */
+ case CLK_REQ_OFF_RXSTALE_ISSUED:
+ case CLK_REQ_OFF_FLUSH_ISSUED:
+ return 0; /* RXSTALE flush not complete - retry */
+ case CLK_REQ_OFF_RXSTALE_FLUSHED:
+ break; /* continue */
+ }
+
+ if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
+ if (msm_uport->rx.flush == FLUSH_NONE)
+ msm_hs_stop_rx_locked(uport);
+ return 0; /* come back later to really clock off */
+ }
+
+ /* we really want to clock off */
+ clk_disable(msm_uport->clk);
+ msm_uport->clk_state = MSM_HS_CLK_OFF;
+
+ if (use_low_power_rx_wakeup(msm_uport)) {
+ msm_uport->rx_wakeup.ignore = 1;
+ enable_irq(msm_uport->rx_wakeup.irq);
+ }
+ return 1;
+}
+
+static enum hrtimer_restart msm_hs_clk_off_retry(struct hrtimer *timer)
+{
+ unsigned long flags;
+ int ret = HRTIMER_NORESTART;
+ struct msm_hs_port *msm_uport = container_of(timer, struct msm_hs_port,
+ clk_off_timer);
+ struct uart_port *uport = &msm_uport->uport;
+
+ spin_lock_irqsave(&uport->lock, flags);
+
+ if (!msm_hs_check_clock_off_locked(uport)) {
+ hrtimer_forward_now(timer, msm_uport->clk_off_delay);
+ ret = HRTIMER_RESTART;
+ }
+
+ spin_unlock_irqrestore(&uport->lock, flags);
+
+ return ret;
+}
+
+static irqreturn_t msm_hs_isr(int irq, void *dev)
+{
+ unsigned long flags;
+ unsigned long isr_status;
+ struct msm_hs_port *msm_uport = dev;
+ struct uart_port *uport = &msm_uport->uport;
+ struct circ_buf *tx_buf = &uport->state->xmit;
+ struct msm_hs_tx *tx = &msm_uport->tx;
+ struct msm_hs_rx *rx = &msm_uport->rx;
+
+ spin_lock_irqsave(&uport->lock, flags);
+
+ isr_status = msm_hs_read(uport, UARTDM_MISR_ADDR);
+
+ /* Uart RX starting */
+ if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
+ msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
+ msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+ }
+ /* Stale rx interrupt */
+ if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
+ msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
+ msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
+
+ if (msm_uport->clk_req_off_state == CLK_REQ_OFF_RXSTALE_ISSUED)
+ msm_uport->clk_req_off_state =
+ CLK_REQ_OFF_FLUSH_ISSUED;
+ if (rx->flush == FLUSH_NONE) {
+ rx->flush = FLUSH_DATA_READY;
+ msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1);
+ }
+ }
+ /* tx ready interrupt */
+ if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
+ /* Clear TX Ready */
+ msm_hs_write(uport, UARTDM_CR_ADDR, CLEAR_TX_READY);
+
+ if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
+ msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
+ msm_hs_write(uport, UARTDM_IMR_ADDR,
+ msm_uport->imr_reg);
+ }
+
+ /* Complete DMA TX transactions and submit new transactions */
+ tx_buf->tail = (tx_buf->tail + tx->tx_count) & ~UART_XMIT_SIZE;
+
+ tx->dma_in_flight = 0;
+
+ uport->icount.tx += tx->tx_count;
+ if (tx->tx_ready_int_en)
+ msm_hs_submit_tx_locked(uport);
+
+ if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
+ uart_write_wakeup(uport);
+ }
+ if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
+ /* TX FIFO is empty */
+ msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
+ msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+ if (!msm_hs_check_clock_off_locked(uport))
+ hrtimer_start(&msm_uport->clk_off_timer,
+ msm_uport->clk_off_delay,
+ HRTIMER_MODE_REL);
+ }
+
+ /* Change in CTS interrupt */
+ if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
+ msm_hs_handle_delta_cts(uport);
+
+ spin_unlock_irqrestore(&uport->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+void msm_hs_request_clock_off_locked(struct uart_port *uport)
+{
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ if (msm_uport->clk_state == MSM_HS_CLK_ON) {
+ msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF;
+ msm_uport->clk_req_off_state = CLK_REQ_OFF_START;
+ if (!use_low_power_rx_wakeup(msm_uport))
+ set_rfr_locked(uport, 0);
+ msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
+ msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+ }
+}
+
+/**
+ * msm_hs_request_clock_off - request to (i.e. asynchronously) turn off uart
+ * clock once pending TX is flushed and Rx DMA command is terminated.
+ * @uport: uart_port structure for the device instance.
+ *
+ * This functions puts the device into a partially active low power mode. It
+ * waits to complete all pending tx transactions, flushes ongoing Rx DMA
+ * command and terminates UART side Rx transaction, puts UART HW in non DMA
+ * mode and then clocks off the device. A client calls this when no UART
+ * data is expected. msm_request_clock_on() must be called before any further
+ * UART can be sent or received.
+ */
+void msm_hs_request_clock_off(struct uart_port *uport)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&uport->lock, flags);
+ msm_hs_request_clock_off_locked(uport);
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+void msm_hs_request_clock_on_locked(struct uart_port *uport)
+{
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ unsigned int data;
+
+ switch (msm_uport->clk_state) {
+ case MSM_HS_CLK_OFF:
+ clk_enable(msm_uport->clk);
+ disable_irq_nosync(msm_uport->rx_wakeup.irq);
+ /* fall-through */
+ case MSM_HS_CLK_REQUEST_OFF:
+ if (msm_uport->rx.flush == FLUSH_STOP ||
+ msm_uport->rx.flush == FLUSH_SHUTDOWN) {
+ msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
+ data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
+ data |= UARTDM_RX_DM_EN_BMSK;
+ msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
+ }
+ hrtimer_try_to_cancel(&msm_uport->clk_off_timer);
+ if (msm_uport->rx.flush == FLUSH_SHUTDOWN)
+ msm_hs_start_rx_locked(uport);
+ if (!use_low_power_rx_wakeup(msm_uport))
+ set_rfr_locked(uport, 1);
+ if (msm_uport->rx.flush == FLUSH_STOP)
+ msm_uport->rx.flush = FLUSH_IGNORE;
+ msm_uport->clk_state = MSM_HS_CLK_ON;
+ break;
+ case MSM_HS_CLK_ON:
+ break;
+ case MSM_HS_CLK_PORT_OFF:
+ break;
+ }
+}
+
+/**
+ * msm_hs_request_clock_on - Switch the device from partially active low
+ * power mode to fully active (i.e. clock on) mode.
+ * @uport: uart_port structure for the device.
+ *
+ * This function switches on the input clock, puts UART HW into DMA mode
+ * and enqueues an Rx DMA command if the device was in partially active
+ * mode. It has no effect if called with the device in inactive state.
+ */
+void msm_hs_request_clock_on(struct uart_port *uport)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&uport->lock, flags);
+ msm_hs_request_clock_on_locked(uport);
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+static irqreturn_t msm_hs_rx_wakeup_isr(int irq, void *dev)
+{
+ unsigned int wakeup = 0;
+ unsigned long flags;
+ struct msm_hs_port *msm_uport = dev;
+ struct uart_port *uport = &msm_uport->uport;
+ struct tty_struct *tty = NULL;
+
+ spin_lock_irqsave(&uport->lock, flags);
+ if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
+ /* ignore the first irq - it is a pending irq that occurred
+ * before enable_irq() */
+ if (msm_uport->rx_wakeup.ignore)
+ msm_uport->rx_wakeup.ignore = 0;
+ else
+ wakeup = 1;
+ }
+
+ if (wakeup) {
+ /* the uart was clocked off during an rx, wake up and
+ * optionally inject char into tty rx */
+ msm_hs_request_clock_on_locked(uport);
+ if (msm_uport->rx_wakeup.inject_rx) {
+ tty = uport->state->port.tty;
+ tty_insert_flip_char(tty,
+ msm_uport->rx_wakeup.rx_to_inject,
+ TTY_NORMAL);
+ queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work);
+ }
+ }
+
+ spin_unlock_irqrestore(&uport->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static const char *msm_hs_type(struct uart_port *port)
+{
+ return (port->type == PORT_MSM) ? "MSM_HS_UART" : NULL;
+}
+
+/* Called when port is opened */
+static int msm_hs_startup(struct uart_port *uport)
+{
+ int ret;
+ int rfr_level;
+ unsigned long flags;
+ unsigned int data;
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ struct circ_buf *tx_buf = &uport->state->xmit;
+ struct msm_hs_tx *tx = &msm_uport->tx;
+ struct msm_hs_rx *rx = &msm_uport->rx;
+
+ rfr_level = uport->fifosize;
+ if (rfr_level > 16)
+ rfr_level -= 16;
+
+ tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+
+ /* do not let tty layer execute RX in global workqueue, use a
+ * dedicated workqueue managed by this driver */
+ uport->state->port.tty->low_latency = 1;
+
+ /* turn on uart clk */
+ ret = msm_hs_init_clk_locked(uport);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "Turning uartclk failed!\n");
+ goto err_msm_hs_init_clk;
+ }
+
+ /* Set auto RFR Level */
+ data = msm_hs_read(uport, UARTDM_MR1_ADDR);
+ data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
+ data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
+ data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
+ data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
+ msm_hs_write(uport, UARTDM_MR1_ADDR, data);
+
+ /* Make sure RXSTALE count is non-zero */
+ data = msm_hs_read(uport, UARTDM_IPR_ADDR);
+ if (!data) {
+ data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
+ msm_hs_write(uport, UARTDM_IPR_ADDR, data);
+ }
+
+ /* Enable Data Mover Mode */
+ data = UARTDM_TX_DM_EN_BMSK | UARTDM_RX_DM_EN_BMSK;
+ msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
+
+ /* Reset TX */
+ msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
+ msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
+ msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
+ msm_hs_write(uport, UARTDM_CR_ADDR, RESET_BREAK_INT);
+ msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
+ msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
+ msm_hs_write(uport, UARTDM_CR_ADDR, RFR_LOW);
+ /* Turn on Uart Receiver */
+ msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_EN_BMSK);
+
+ /* Turn on Uart Transmitter */
+ msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_EN_BMSK);
+
+ /* Initialize the tx */
+ tx->tx_ready_int_en = 0;
+ tx->dma_in_flight = 0;
+
+ tx->xfer.complete_func = msm_hs_dmov_tx_callback;
+ tx->xfer.execute_func = NULL;
+
+ tx->command_ptr->cmd = CMD_LC |
+ CMD_DST_CRCI(msm_uport->dma_tx_crci) | CMD_MODE_BOX;
+
+ tx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
+ | (MSM_UARTDM_BURST_SIZE);
+
+ tx->command_ptr->row_offset = (MSM_UARTDM_BURST_SIZE << 16);
+
+ tx->command_ptr->dst_row_addr =
+ msm_uport->uport.mapbase + UARTDM_TF_ADDR;
+
+
+ /* Turn on Uart Receive */
+ rx->xfer.complete_func = msm_hs_dmov_rx_callback;
+ rx->xfer.execute_func = NULL;
+
+ rx->command_ptr->cmd = CMD_LC |
+ CMD_SRC_CRCI(msm_uport->dma_rx_crci) | CMD_MODE_BOX;
+
+ rx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
+ | (MSM_UARTDM_BURST_SIZE);
+ rx->command_ptr->row_offset = MSM_UARTDM_BURST_SIZE;
+ rx->command_ptr->src_row_addr = uport->mapbase + UARTDM_RF_ADDR;
+
+
+ msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK;
+ /* Enable reading the current CTS, no harm even if CTS is ignored */
+ msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
+
+ msm_hs_write(uport, UARTDM_TFWR_ADDR, 0); /* TXLEV on empty TX fifo */
+
+
+ ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
+ "msm_hs_uart", msm_uport);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "Request msm_hs_uart IRQ failed!\n");
+ goto err_request_irq;
+ }
+ if (use_low_power_rx_wakeup(msm_uport)) {
+ ret = request_irq(msm_uport->rx_wakeup.irq,
+ msm_hs_rx_wakeup_isr,
+ IRQF_TRIGGER_FALLING,
+ "msm_hs_rx_wakeup", msm_uport);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "Request msm_hs_rx_wakeup IRQ failed!\n");
+ free_irq(uport->irq, msm_uport);
+ goto err_request_irq;
+ }
+ disable_irq(msm_uport->rx_wakeup.irq);
+ }
+
+ spin_lock_irqsave(&uport->lock, flags);
+
+ msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
+ msm_hs_start_rx_locked(uport);
+
+ spin_unlock_irqrestore(&uport->lock, flags);
+ ret = pm_runtime_set_active(uport->dev);
+ if (ret)
+ dev_err(uport->dev, "set active error:%d\n", ret);
+ pm_runtime_enable(uport->dev);
+
+ return 0;
+
+err_request_irq:
+err_msm_hs_init_clk:
+ dma_unmap_single(uport->dev, tx->dma_base,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ return ret;
+}
+
+/* Initialize tx and rx data structures */
+static int __devinit uartdm_init_port(struct uart_port *uport)
+{
+ int ret = 0;
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ struct msm_hs_tx *tx = &msm_uport->tx;
+ struct msm_hs_rx *rx = &msm_uport->rx;
+
+ /* Allocate the command pointer. Needs to be 64 bit aligned */
+ tx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
+ if (!tx->command_ptr)
+ return -ENOMEM;
+
+ tx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
+ if (!tx->command_ptr_ptr) {
+ ret = -ENOMEM;
+ goto err_tx_command_ptr_ptr;
+ }
+
+ tx->mapped_cmd_ptr = dma_map_single(uport->dev, tx->command_ptr,
+ sizeof(dmov_box), DMA_TO_DEVICE);
+ tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev,
+ tx->command_ptr_ptr,
+ sizeof(u32 *), DMA_TO_DEVICE);
+ tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr);
+
+ init_waitqueue_head(&rx->wait);
+
+ rx->pool = dma_pool_create("rx_buffer_pool", uport->dev,
+ UARTDM_RX_BUF_SIZE, 16, 0);
+ if (!rx->pool) {
+ pr_err("%s(): cannot allocate rx_buffer_pool", __func__);
+ ret = -ENOMEM;
+ goto err_dma_pool_create;
+ }
+
+ rx->buffer = dma_pool_alloc(rx->pool, GFP_KERNEL, &rx->rbuffer);
+ if (!rx->buffer) {
+ pr_err("%s(): cannot allocate rx->buffer", __func__);
+ ret = -ENOMEM;
+ goto err_dma_pool_alloc;
+ }
+
+ /* Allocate the command pointer. Needs to be 64 bit aligned */
+ rx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
+ if (!rx->command_ptr) {
+ pr_err("%s(): cannot allocate rx->command_ptr", __func__);
+ ret = -ENOMEM;
+ goto err_rx_command_ptr;
+ }
+
+ rx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
+ if (!rx->command_ptr_ptr) {
+ pr_err("%s(): cannot allocate rx->command_ptr_ptr", __func__);
+ ret = -ENOMEM;
+ goto err_rx_command_ptr_ptr;
+ }
+
+ rx->command_ptr->num_rows = ((UARTDM_RX_BUF_SIZE >> 4) << 16) |
+ (UARTDM_RX_BUF_SIZE >> 4);
+
+ rx->command_ptr->dst_row_addr = rx->rbuffer;
+
+ rx->mapped_cmd_ptr = dma_map_single(uport->dev, rx->command_ptr,
+ sizeof(dmov_box), DMA_TO_DEVICE);
+
+ *rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr);
+
+ rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr,
+ sizeof(u32 *), DMA_TO_DEVICE);
+ rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr);
+
+ INIT_WORK(&rx->tty_work, msm_hs_tty_flip_buffer_work);
+
+ return ret;
+
+err_rx_command_ptr_ptr:
+ kfree(rx->command_ptr);
+err_rx_command_ptr:
+ dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
+ msm_uport->rx.rbuffer);
+err_dma_pool_alloc:
+ dma_pool_destroy(msm_uport->rx.pool);
+err_dma_pool_create:
+ dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr_ptr,
+ sizeof(u32 *), DMA_TO_DEVICE);
+ dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr,
+ sizeof(dmov_box), DMA_TO_DEVICE);
+ kfree(msm_uport->tx.command_ptr_ptr);
+err_tx_command_ptr_ptr:
+ kfree(msm_uport->tx.command_ptr);
+ return ret;
+}
+
+static int __devinit msm_hs_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct uart_port *uport;
+ struct msm_hs_port *msm_uport;
+ struct resource *resource;
+ const struct msm_serial_hs_platform_data *pdata =
+ pdev->dev.platform_data;
+
+ if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
+ printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ msm_uport = &q_uart_port[pdev->id];
+ uport = &msm_uport->uport;
+
+ uport->dev = &pdev->dev;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return -ENXIO;
+
+ uport->mapbase = resource->start;
+ uport->irq = platform_get_irq(pdev, 0);
+ if (unlikely(uport->irq < 0))
+ return -ENXIO;
+
+ if (unlikely(irq_set_irq_wake(uport->irq, 1)))
+ return -ENXIO;
+
+ if (pdata == NULL || pdata->rx_wakeup_irq < 0)
+ msm_uport->rx_wakeup.irq = -1;
+ else {
+ msm_uport->rx_wakeup.irq = pdata->rx_wakeup_irq;
+ msm_uport->rx_wakeup.ignore = 1;
+ msm_uport->rx_wakeup.inject_rx = pdata->inject_rx_on_wakeup;
+ msm_uport->rx_wakeup.rx_to_inject = pdata->rx_to_inject;
+
+ if (unlikely(msm_uport->rx_wakeup.irq < 0))
+ return -ENXIO;
+
+ if (unlikely(irq_set_irq_wake(msm_uport->rx_wakeup.irq, 1)))
+ return -ENXIO;
+ }
+
+ if (pdata == NULL)
+ msm_uport->exit_lpm_cb = NULL;
+ else
+ msm_uport->exit_lpm_cb = pdata->exit_lpm_cb;
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ "uartdm_channels");
+ if (unlikely(!resource))
+ return -ENXIO;
+
+ msm_uport->dma_tx_channel = resource->start;
+ msm_uport->dma_rx_channel = resource->end;
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ "uartdm_crci");
+ if (unlikely(!resource))
+ return -ENXIO;
+
+ msm_uport->dma_tx_crci = resource->start;
+ msm_uport->dma_rx_crci = resource->end;
+
+ uport->iotype = UPIO_MEM;
+ uport->fifosize = UART_FIFOSIZE;
+ uport->ops = &msm_hs_ops;
+ uport->flags = UPF_BOOT_AUTOCONF;
+ uport->uartclk = UARTCLK;
+ msm_uport->imr_reg = 0x0;
+ msm_uport->clk = clk_get(&pdev->dev, "uartdm_clk");
+ if (IS_ERR(msm_uport->clk))
+ return PTR_ERR(msm_uport->clk);
+
+ ret = uartdm_init_port(uport);
+ if (unlikely(ret))
+ return ret;
+
+ msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
+ hrtimer_init(&msm_uport->clk_off_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ msm_uport->clk_off_timer.function = msm_hs_clk_off_retry;
+ msm_uport->clk_off_delay = ktime_set(0, 1000000); /* 1ms */
+
+ uport->line = pdev->id;
+ return uart_add_one_port(&msm_hs_driver, uport);
+}
+
+static int __init msm_serial_hs_init(void)
+{
+ int ret, i;
+
+ /* Init all UARTS as non-configured */
+ for (i = 0; i < UARTDM_NR; i++)
+ q_uart_port[i].uport.type = PORT_UNKNOWN;
+
+ msm_hs_workqueue = create_singlethread_workqueue("msm_serial_hs");
+ if (unlikely(!msm_hs_workqueue))
+ return -ENOMEM;
+
+ ret = uart_register_driver(&msm_hs_driver);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "%s failed to load\n", __func__);
+ goto err_uart_register_driver;
+ }
+
+ ret = platform_driver_register(&msm_serial_hs_platform_driver);
+ if (ret) {
+ printk(KERN_ERR "%s failed to load\n", __func__);
+ goto err_platform_driver_register;
+ }
+
+ return ret;
+
+err_platform_driver_register:
+ uart_unregister_driver(&msm_hs_driver);
+err_uart_register_driver:
+ destroy_workqueue(msm_hs_workqueue);
+ return ret;
+}
+module_init(msm_serial_hs_init);
+
+/*
+ * Called by the upper layer when port is closed.
+ * - Disables the port
+ * - Unhook the ISR
+ */
+static void msm_hs_shutdown(struct uart_port *uport)
+{
+ unsigned long flags;
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ BUG_ON(msm_uport->rx.flush < FLUSH_STOP);
+
+ spin_lock_irqsave(&uport->lock, flags);
+ clk_enable(msm_uport->clk);
+
+ /* Disable the transmitter */
+ msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_DISABLE_BMSK);
+ /* Disable the receiver */
+ msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_DISABLE_BMSK);
+
+ pm_runtime_disable(uport->dev);
+ pm_runtime_set_suspended(uport->dev);
+
+ /* Free the interrupt */
+ free_irq(uport->irq, msm_uport);
+ if (use_low_power_rx_wakeup(msm_uport))
+ free_irq(msm_uport->rx_wakeup.irq, msm_uport);
+
+ msm_uport->imr_reg = 0;
+ msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
+
+ wait_event(msm_uport->rx.wait, msm_uport->rx.flush == FLUSH_SHUTDOWN);
+
+ clk_disable(msm_uport->clk); /* to balance local clk_enable() */
+ if (msm_uport->clk_state != MSM_HS_CLK_OFF)
+ clk_disable(msm_uport->clk); /* to balance clk_state */
+ msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
+
+ dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ spin_unlock_irqrestore(&uport->lock, flags);
+
+ if (cancel_work_sync(&msm_uport->rx.tty_work))
+ msm_hs_tty_flip_buffer_work(&msm_uport->rx.tty_work);
+}
+
+static void __exit msm_serial_hs_exit(void)
+{
+ flush_workqueue(msm_hs_workqueue);
+ destroy_workqueue(msm_hs_workqueue);
+ platform_driver_unregister(&msm_serial_hs_platform_driver);
+ uart_unregister_driver(&msm_hs_driver);
+}
+module_exit(msm_serial_hs_exit);
+
+#ifdef CONFIG_PM_RUNTIME
+static int msm_hs_runtime_idle(struct device *dev)
+{
+ /*
+ * returning success from idle results in runtime suspend to be
+ * called
+ */
+ return 0;
+}
+
+static int msm_hs_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct
+ platform_device, dev);
+ struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
+
+ msm_hs_request_clock_on(&msm_uport->uport);
+ return 0;
+}
+
+static int msm_hs_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct
+ platform_device, dev);
+ struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
+
+ msm_hs_request_clock_off(&msm_uport->uport);
+ return 0;
+}
+#else
+#define msm_hs_runtime_idle NULL
+#define msm_hs_runtime_resume NULL
+#define msm_hs_runtime_suspend NULL
+#endif
+
+static const struct dev_pm_ops msm_hs_dev_pm_ops = {
+ .runtime_suspend = msm_hs_runtime_suspend,
+ .runtime_resume = msm_hs_runtime_resume,
+ .runtime_idle = msm_hs_runtime_idle,
+};
+
+static struct platform_driver msm_serial_hs_platform_driver = {
+ .probe = msm_hs_probe,
+ .remove = __devexit_p(msm_hs_remove),
+ .driver = {
+ .name = "msm_serial_hs",
+ .owner = THIS_MODULE,
+ .pm = &msm_hs_dev_pm_ops,
+ },
+};
+
+static struct uart_driver msm_hs_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "msm_serial_hs",
+ .dev_name = "ttyHS",
+ .nr = UARTDM_NR,
+ .cons = 0,
+};
+
+static struct uart_ops msm_hs_ops = {
+ .tx_empty = msm_hs_tx_empty,
+ .set_mctrl = msm_hs_set_mctrl_locked,
+ .get_mctrl = msm_hs_get_mctrl_locked,
+ .stop_tx = msm_hs_stop_tx_locked,
+ .start_tx = msm_hs_start_tx_locked,
+ .stop_rx = msm_hs_stop_rx_locked,
+ .enable_ms = msm_hs_enable_ms_locked,
+ .break_ctl = msm_hs_break_ctl,
+ .startup = msm_hs_startup,
+ .shutdown = msm_hs_shutdown,
+ .set_termios = msm_hs_set_termios,
+ .pm = msm_hs_pm,
+ .type = msm_hs_type,
+ .config_port = msm_hs_config_port,
+ .release_port = msm_hs_release_port,
+ .request_port = msm_hs_request_port,
+};
+
+MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
+MODULE_VERSION("1.2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/msm_smd_tty.c b/drivers/tty/serial/msm_smd_tty.c
new file mode 100644
index 00000000..4f41dcdc
--- /dev/null
+++ b/drivers/tty/serial/msm_smd_tty.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <mach/msm_smd.h>
+
+#define MAX_SMD_TTYS 32
+
+struct smd_tty_info {
+ struct tty_port port;
+ smd_channel_t *ch;
+};
+
+struct smd_tty_channel_desc {
+ int id;
+ const char *name;
+};
+
+static struct smd_tty_info smd_tty[MAX_SMD_TTYS];
+
+static const struct smd_tty_channel_desc smd_default_tty_channels[] = {
+ { .id = 0, .name = "SMD_DS" },
+ { .id = 27, .name = "SMD_GPSNMEA" },
+};
+
+static const struct smd_tty_channel_desc *smd_tty_channels =
+ smd_default_tty_channels;
+static int smd_tty_channels_len = ARRAY_SIZE(smd_default_tty_channels);
+
+static void smd_tty_notify(void *priv, unsigned event)
+{
+ unsigned char *ptr;
+ int avail;
+ struct smd_tty_info *info = priv;
+ struct tty_struct *tty;
+
+ if (event != SMD_EVENT_DATA)
+ return;
+
+ tty = tty_port_tty_get(&info->port);
+ if (!tty)
+ return;
+
+ for (;;) {
+ if (test_bit(TTY_THROTTLED, &tty->flags))
+ break;
+ avail = smd_read_avail(info->ch);
+ if (avail == 0)
+ break;
+
+ avail = tty_prepare_flip_string(tty, &ptr, avail);
+
+ if (smd_read(info->ch, ptr, avail) != avail) {
+ /* shouldn't be possible since we're in interrupt
+ ** context here and nobody else could 'steal' our
+ ** characters.
+ */
+ pr_err("OOPS - smd_tty_buffer mismatch?!");
+ }
+
+ tty_flip_buffer_push(tty);
+ }
+
+ /* XXX only when writable and necessary */
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+}
+
+static int smd_tty_port_activate(struct tty_port *tport, struct tty_struct *tty)
+{
+ int i, res = 0;
+ int n = tty->index;
+ const char *name = NULL;
+ struct smd_tty_info *info = smd_tty + n;
+
+ for (i = 0; i < smd_tty_channels_len; i++) {
+ if (smd_tty_channels[i].id == n) {
+ name = smd_tty_channels[i].name;
+ break;
+ }
+ }
+ if (!name)
+ return -ENODEV;
+
+ if (info->ch)
+ smd_kick(info->ch);
+ else
+ res = smd_open(name, &info->ch, info, smd_tty_notify);
+
+ if (!res)
+ tty->driver_data = info;
+
+ return res;
+}
+
+static void smd_tty_port_shutdown(struct tty_port *tport)
+{
+ struct smd_tty_info *info;
+ struct tty_struct *tty = tty_port_tty_get(tport);
+
+ info = tty->driver_data;
+ if (info->ch) {
+ smd_close(info->ch);
+ info->ch = 0;
+ }
+
+ tty->driver_data = 0;
+ tty_kref_put(tty);
+}
+
+static int smd_tty_open(struct tty_struct *tty, struct file *f)
+{
+ struct smd_tty_info *info = smd_tty + tty->index;
+
+ return tty_port_open(&info->port, tty, f);
+}
+
+static void smd_tty_close(struct tty_struct *tty, struct file *f)
+{
+ struct smd_tty_info *info = tty->driver_data;
+
+ tty_port_close(&info->port, tty, f);
+}
+
+static int smd_tty_write(struct tty_struct *tty,
+ const unsigned char *buf, int len)
+{
+ struct smd_tty_info *info = tty->driver_data;
+ int avail;
+
+ /* if we're writing to a packet channel we will
+ ** never be able to write more data than there
+ ** is currently space for
+ */
+ avail = smd_write_avail(info->ch);
+ if (len > avail)
+ len = avail;
+
+ return smd_write(info->ch, buf, len);
+}
+
+static int smd_tty_write_room(struct tty_struct *tty)
+{
+ struct smd_tty_info *info = tty->driver_data;
+ return smd_write_avail(info->ch);
+}
+
+static int smd_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct smd_tty_info *info = tty->driver_data;
+ return smd_read_avail(info->ch);
+}
+
+static void smd_tty_unthrottle(struct tty_struct *tty)
+{
+ struct smd_tty_info *info = tty->driver_data;
+ smd_kick(info->ch);
+}
+
+static const struct tty_port_operations smd_tty_port_ops = {
+ .shutdown = smd_tty_port_shutdown,
+ .activate = smd_tty_port_activate,
+};
+
+static const struct tty_operations smd_tty_ops = {
+ .open = smd_tty_open,
+ .close = smd_tty_close,
+ .write = smd_tty_write,
+ .write_room = smd_tty_write_room,
+ .chars_in_buffer = smd_tty_chars_in_buffer,
+ .unthrottle = smd_tty_unthrottle,
+};
+
+static struct tty_driver *smd_tty_driver;
+
+static int __init smd_tty_init(void)
+{
+ int ret, i;
+
+ smd_tty_driver = alloc_tty_driver(MAX_SMD_TTYS);
+ if (smd_tty_driver == 0)
+ return -ENOMEM;
+
+ smd_tty_driver->owner = THIS_MODULE;
+ smd_tty_driver->driver_name = "smd_tty_driver";
+ smd_tty_driver->name = "smd";
+ smd_tty_driver->major = 0;
+ smd_tty_driver->minor_start = 0;
+ smd_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ smd_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ smd_tty_driver->init_termios = tty_std_termios;
+ smd_tty_driver->init_termios.c_iflag = 0;
+ smd_tty_driver->init_termios.c_oflag = 0;
+ smd_tty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
+ smd_tty_driver->init_termios.c_lflag = 0;
+ smd_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS |
+ TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(smd_tty_driver, &smd_tty_ops);
+
+ ret = tty_register_driver(smd_tty_driver);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < smd_tty_channels_len; i++) {
+ tty_port_init(&smd_tty[smd_tty_channels[i].id].port);
+ smd_tty[smd_tty_channels[i].id].port.ops = &smd_tty_port_ops;
+ tty_register_device(smd_tty_driver, smd_tty_channels[i].id, 0);
+ }
+
+ return 0;
+}
+
+module_init(smd_tty_init);
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
new file mode 100644
index 00000000..9711e06a
--- /dev/null
+++ b/drivers/tty/serial/mux.c
@@ -0,0 +1,633 @@
+/*
+** mux.c:
+** serial driver for the Mux console found in some PA-RISC servers.
+**
+** (c) Copyright 2002 Ryan Bradetich
+** (c) Copyright 2002 Hewlett-Packard Company
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This Driver currently only supports the console (port 0) on the MUX.
+** Additional work will be needed on this driver to enable the full
+** functionality of the MUX.
+**
+*/
+
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/delay.h> /* for udelay */
+#include <linux/device.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/parisc-device.h>
+
+#ifdef CONFIG_MAGIC_SYSRQ
+#include <linux/sysrq.h>
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+#define MUX_OFFSET 0x800
+#define MUX_LINE_OFFSET 0x80
+
+#define MUX_FIFO_SIZE 255
+#define MUX_POLL_DELAY (30 * HZ / 1000)
+
+#define IO_DATA_REG_OFFSET 0x3c
+#define IO_DCOUNT_REG_OFFSET 0x40
+
+#define MUX_EOFIFO(status) ((status & 0xF000) == 0xF000)
+#define MUX_STATUS(status) ((status & 0xF000) == 0x8000)
+#define MUX_BREAK(status) ((status & 0xF000) == 0x2000)
+
+#define MUX_NR 256
+static unsigned int port_cnt __read_mostly;
+struct mux_port {
+ struct uart_port port;
+ int enabled;
+};
+static struct mux_port mux_ports[MUX_NR];
+
+static struct uart_driver mux_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyB",
+ .dev_name = "ttyB",
+ .major = MUX_MAJOR,
+ .minor = 0,
+ .nr = MUX_NR,
+};
+
+static struct timer_list mux_timer;
+
+#define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + IO_DATA_REG_OFFSET)
+#define UART_GET_FIFO_CNT(p) __raw_readl((p)->membase + IO_DCOUNT_REG_OFFSET)
+
+/**
+ * get_mux_port_count - Get the number of available ports on the Mux.
+ * @dev: The parisc device.
+ *
+ * This function is used to determine the number of ports the Mux
+ * supports. The IODC data reports the number of ports the Mux
+ * can support, but there are cases where not all the Mux ports
+ * are connected. This function can override the IODC and
+ * return the true port count.
+ */
+static int __init get_mux_port_count(struct parisc_device *dev)
+{
+ int status;
+ u8 iodc_data[32];
+ unsigned long bytecnt;
+
+ /* If this is the built-in Mux for the K-Class (Eole CAP/MUX),
+ * we only need to allocate resources for 1 port since the
+ * other 7 ports are not connected.
+ */
+ if(dev->id.hversion == 0x15)
+ return 1;
+
+ status = pdc_iodc_read(&bytecnt, dev->hpa.start, 0, iodc_data, 32);
+ BUG_ON(status != PDC_OK);
+
+ /* Return the number of ports specified in the iodc data. */
+ return ((((iodc_data)[4] & 0xf0) >> 4) * 8) + 8;
+}
+
+/**
+ * mux_tx_empty - Check if the transmitter fifo is empty.
+ * @port: Ptr to the uart_port.
+ *
+ * This function test if the transmitter fifo for the port
+ * described by 'port' is empty. If it is empty, this function
+ * should return TIOCSER_TEMT, otherwise return 0.
+ */
+static unsigned int mux_tx_empty(struct uart_port *port)
+{
+ return UART_GET_FIFO_CNT(port) ? 0 : TIOCSER_TEMT;
+}
+
+/**
+ * mux_set_mctrl - Set the current state of the modem control inputs.
+ * @ports: Ptr to the uart_port.
+ * @mctrl: Modem control bits.
+ *
+ * The Serial MUX does not support CTS, DCD or DSR so this function
+ * is ignored.
+ */
+static void mux_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+/**
+ * mux_get_mctrl - Returns the current state of modem control inputs.
+ * @port: Ptr to the uart_port.
+ *
+ * The Serial MUX does not support CTS, DCD or DSR so these lines are
+ * treated as permanently active.
+ */
+static unsigned int mux_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+}
+
+/**
+ * mux_stop_tx - Stop transmitting characters.
+ * @port: Ptr to the uart_port.
+ *
+ * The Serial MUX does not support this function.
+ */
+static void mux_stop_tx(struct uart_port *port)
+{
+}
+
+/**
+ * mux_start_tx - Start transmitting characters.
+ * @port: Ptr to the uart_port.
+ *
+ * The Serial Mux does not support this function.
+ */
+static void mux_start_tx(struct uart_port *port)
+{
+}
+
+/**
+ * mux_stop_rx - Stop receiving characters.
+ * @port: Ptr to the uart_port.
+ *
+ * The Serial Mux does not support this function.
+ */
+static void mux_stop_rx(struct uart_port *port)
+{
+}
+
+/**
+ * mux_enable_ms - Enable modum status interrupts.
+ * @port: Ptr to the uart_port.
+ *
+ * The Serial Mux does not support this function.
+ */
+static void mux_enable_ms(struct uart_port *port)
+{
+}
+
+/**
+ * mux_break_ctl - Control the transmitssion of a break signal.
+ * @port: Ptr to the uart_port.
+ * @break_state: Raise/Lower the break signal.
+ *
+ * The Serial Mux does not support this function.
+ */
+static void mux_break_ctl(struct uart_port *port, int break_state)
+{
+}
+
+/**
+ * mux_write - Write chars to the mux fifo.
+ * @port: Ptr to the uart_port.
+ *
+ * This function writes all the data from the uart buffer to
+ * the mux fifo.
+ */
+static void mux_write(struct uart_port *port)
+{
+ int count;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if(port->x_char) {
+ UART_PUT_CHAR(port, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if(uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ mux_stop_tx(port);
+ return;
+ }
+
+ count = (port->fifosize) - UART_GET_FIFO_CNT(port);
+ do {
+ UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if(uart_circ_empty(xmit))
+ break;
+
+ } while(--count > 0);
+
+ while(UART_GET_FIFO_CNT(port))
+ udelay(1);
+
+ if(uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ mux_stop_tx(port);
+}
+
+/**
+ * mux_read - Read chars from the mux fifo.
+ * @port: Ptr to the uart_port.
+ *
+ * This reads all available data from the mux's fifo and pushes
+ * the data to the tty layer.
+ */
+static void mux_read(struct uart_port *port)
+{
+ int data;
+ struct tty_struct *tty = port->state->port.tty;
+ __u32 start_count = port->icount.rx;
+
+ while(1) {
+ data = __raw_readl(port->membase + IO_DATA_REG_OFFSET);
+
+ if (MUX_STATUS(data))
+ continue;
+
+ if (MUX_EOFIFO(data))
+ break;
+
+ port->icount.rx++;
+
+ if (MUX_BREAK(data)) {
+ port->icount.brk++;
+ if(uart_handle_break(port))
+ continue;
+ }
+
+ if (uart_handle_sysrq_char(port, data & 0xffu))
+ continue;
+
+ tty_insert_flip_char(tty, data & 0xFF, TTY_NORMAL);
+ }
+
+ if (start_count != port->icount.rx) {
+ tty_flip_buffer_push(tty);
+ }
+}
+
+/**
+ * mux_startup - Initialize the port.
+ * @port: Ptr to the uart_port.
+ *
+ * Grab any resources needed for this port and start the
+ * mux timer.
+ */
+static int mux_startup(struct uart_port *port)
+{
+ mux_ports[port->line].enabled = 1;
+ return 0;
+}
+
+/**
+ * mux_shutdown - Disable the port.
+ * @port: Ptr to the uart_port.
+ *
+ * Release any resources needed for the port.
+ */
+static void mux_shutdown(struct uart_port *port)
+{
+ mux_ports[port->line].enabled = 0;
+}
+
+/**
+ * mux_set_termios - Chane port parameters.
+ * @port: Ptr to the uart_port.
+ * @termios: new termios settings.
+ * @old: old termios settings.
+ *
+ * The Serial Mux does not support this function.
+ */
+static void
+mux_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+}
+
+/**
+ * mux_type - Describe the port.
+ * @port: Ptr to the uart_port.
+ *
+ * Return a pointer to a string constant describing the
+ * specified port.
+ */
+static const char *mux_type(struct uart_port *port)
+{
+ return "Mux";
+}
+
+/**
+ * mux_release_port - Release memory and IO regions.
+ * @port: Ptr to the uart_port.
+ *
+ * Release any memory and IO region resources currently in use by
+ * the port.
+ */
+static void mux_release_port(struct uart_port *port)
+{
+}
+
+/**
+ * mux_request_port - Request memory and IO regions.
+ * @port: Ptr to the uart_port.
+ *
+ * Request any memory and IO region resources required by the port.
+ * If any fail, no resources should be registered when this function
+ * returns, and it should return -EBUSY on failure.
+ */
+static int mux_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+/**
+ * mux_config_port - Perform port autoconfiguration.
+ * @port: Ptr to the uart_port.
+ * @type: Bitmask of required configurations.
+ *
+ * Perform any autoconfiguration steps for the port. This function is
+ * called if the UPF_BOOT_AUTOCONF flag is specified for the port.
+ * [Note: This is required for now because of a bug in the Serial core.
+ * rmk has already submitted a patch to linus, should be available for
+ * 2.5.47.]
+ */
+static void mux_config_port(struct uart_port *port, int type)
+{
+ port->type = PORT_MUX;
+}
+
+/**
+ * mux_verify_port - Verify the port information.
+ * @port: Ptr to the uart_port.
+ * @ser: Ptr to the serial information.
+ *
+ * Verify the new serial port information contained within serinfo is
+ * suitable for this port type.
+ */
+static int mux_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if(port->membase == NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * mux_drv_poll - Mux poll function.
+ * @unused: Unused variable
+ *
+ * This function periodically polls the Serial MUX to check for new data.
+ */
+static void mux_poll(unsigned long unused)
+{
+ int i;
+
+ for(i = 0; i < port_cnt; ++i) {
+ if(!mux_ports[i].enabled)
+ continue;
+
+ mux_read(&mux_ports[i].port);
+ mux_write(&mux_ports[i].port);
+ }
+
+ mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
+}
+
+
+#ifdef CONFIG_SERIAL_MUX_CONSOLE
+static void mux_console_write(struct console *co, const char *s, unsigned count)
+{
+ /* Wait until the FIFO drains. */
+ while(UART_GET_FIFO_CNT(&mux_ports[0].port))
+ udelay(1);
+
+ while(count--) {
+ if(*s == '\n') {
+ UART_PUT_CHAR(&mux_ports[0].port, '\r');
+ }
+ UART_PUT_CHAR(&mux_ports[0].port, *s++);
+ }
+
+}
+
+static int mux_console_setup(struct console *co, char *options)
+{
+ return 0;
+}
+
+struct tty_driver *mux_console_device(struct console *co, int *index)
+{
+ *index = co->index;
+ return mux_driver.tty_driver;
+}
+
+static struct console mux_console = {
+ .name = "ttyB",
+ .write = mux_console_write,
+ .device = mux_console_device,
+ .setup = mux_console_setup,
+ .flags = CON_ENABLED | CON_PRINTBUFFER,
+ .index = 0,
+};
+
+#define MUX_CONSOLE &mux_console
+#else
+#define MUX_CONSOLE NULL
+#endif
+
+static struct uart_ops mux_pops = {
+ .tx_empty = mux_tx_empty,
+ .set_mctrl = mux_set_mctrl,
+ .get_mctrl = mux_get_mctrl,
+ .stop_tx = mux_stop_tx,
+ .start_tx = mux_start_tx,
+ .stop_rx = mux_stop_rx,
+ .enable_ms = mux_enable_ms,
+ .break_ctl = mux_break_ctl,
+ .startup = mux_startup,
+ .shutdown = mux_shutdown,
+ .set_termios = mux_set_termios,
+ .type = mux_type,
+ .release_port = mux_release_port,
+ .request_port = mux_request_port,
+ .config_port = mux_config_port,
+ .verify_port = mux_verify_port,
+};
+
+/**
+ * mux_probe - Determine if the Serial Mux should claim this device.
+ * @dev: The parisc device.
+ *
+ * Deterimine if the Serial Mux should claim this chip (return 0)
+ * or not (return 1).
+ */
+static int __init mux_probe(struct parisc_device *dev)
+{
+ int i, status;
+
+ int port_count = get_mux_port_count(dev);
+ printk(KERN_INFO "Serial mux driver (%d ports) Revision: 0.6\n", port_count);
+
+ dev_set_drvdata(&dev->dev, (void *)(long)port_count);
+ request_mem_region(dev->hpa.start + MUX_OFFSET,
+ port_count * MUX_LINE_OFFSET, "Mux");
+
+ if(!port_cnt) {
+ mux_driver.cons = MUX_CONSOLE;
+
+ status = uart_register_driver(&mux_driver);
+ if(status) {
+ printk(KERN_ERR "Serial mux: Unable to register driver.\n");
+ return 1;
+ }
+ }
+
+ for(i = 0; i < port_count; ++i, ++port_cnt) {
+ struct uart_port *port = &mux_ports[port_cnt].port;
+ port->iobase = 0;
+ port->mapbase = dev->hpa.start + MUX_OFFSET +
+ (i * MUX_LINE_OFFSET);
+ port->membase = ioremap_nocache(port->mapbase, MUX_LINE_OFFSET);
+ port->iotype = UPIO_MEM;
+ port->type = PORT_MUX;
+ port->irq = NO_IRQ;
+ port->uartclk = 0;
+ port->fifosize = MUX_FIFO_SIZE;
+ port->ops = &mux_pops;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->line = port_cnt;
+
+ /* The port->timeout needs to match what is present in
+ * uart_wait_until_sent in serial_core.c. Otherwise
+ * the time spent in msleep_interruptable will be very
+ * long, causing the appearance of a console hang.
+ */
+ port->timeout = HZ / 50;
+ spin_lock_init(&port->lock);
+
+ status = uart_add_one_port(&mux_driver, port);
+ BUG_ON(status);
+ }
+
+ return 0;
+}
+
+static int __devexit mux_remove(struct parisc_device *dev)
+{
+ int i, j;
+ int port_count = (long)dev_get_drvdata(&dev->dev);
+
+ /* Find Port 0 for this card in the mux_ports list. */
+ for(i = 0; i < port_cnt; ++i) {
+ if(mux_ports[i].port.mapbase == dev->hpa.start + MUX_OFFSET)
+ break;
+ }
+ BUG_ON(i + port_count > port_cnt);
+
+ /* Release the resources associated with each port on the device. */
+ for(j = 0; j < port_count; ++j, ++i) {
+ struct uart_port *port = &mux_ports[i].port;
+
+ uart_remove_one_port(&mux_driver, port);
+ if(port->membase)
+ iounmap(port->membase);
+ }
+
+ release_mem_region(dev->hpa.start + MUX_OFFSET, port_count * MUX_LINE_OFFSET);
+ return 0;
+}
+
+/* Hack. This idea was taken from the 8250_gsc.c on how to properly order
+ * the serial port detection in the proper order. The idea is we always
+ * want the builtin mux to be detected before addin mux cards, so we
+ * specifically probe for the builtin mux cards first.
+ *
+ * This table only contains the parisc_device_id of known builtin mux
+ * devices. All other mux cards will be detected by the generic mux_tbl.
+ */
+static struct parisc_device_id builtin_mux_tbl[] = {
+ { HPHW_A_DIRECT, HVERSION_REV_ANY_ID, 0x15, 0x0000D }, /* All K-class */
+ { HPHW_A_DIRECT, HVERSION_REV_ANY_ID, 0x44, 0x0000D }, /* E35, E45, and E55 */
+ { 0, }
+};
+
+static struct parisc_device_id mux_tbl[] = {
+ { HPHW_A_DIRECT, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0000D },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(parisc, builtin_mux_tbl);
+MODULE_DEVICE_TABLE(parisc, mux_tbl);
+
+static struct parisc_driver builtin_serial_mux_driver = {
+ .name = "builtin_serial_mux",
+ .id_table = builtin_mux_tbl,
+ .probe = mux_probe,
+ .remove = __devexit_p(mux_remove),
+};
+
+static struct parisc_driver serial_mux_driver = {
+ .name = "serial_mux",
+ .id_table = mux_tbl,
+ .probe = mux_probe,
+ .remove = __devexit_p(mux_remove),
+};
+
+/**
+ * mux_init - Serial MUX initialization procedure.
+ *
+ * Register the Serial MUX driver.
+ */
+static int __init mux_init(void)
+{
+ register_parisc_driver(&builtin_serial_mux_driver);
+ register_parisc_driver(&serial_mux_driver);
+
+ if(port_cnt > 0) {
+ /* Start the Mux timer */
+ init_timer(&mux_timer);
+ mux_timer.function = mux_poll;
+ mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
+
+#ifdef CONFIG_SERIAL_MUX_CONSOLE
+ register_console(&mux_console);
+#endif
+ }
+
+ return 0;
+}
+
+/**
+ * mux_exit - Serial MUX cleanup procedure.
+ *
+ * Unregister the Serial MUX driver from the tty layer.
+ */
+static void __exit mux_exit(void)
+{
+ /* Delete the Mux timer. */
+ if(port_cnt > 0) {
+ del_timer(&mux_timer);
+#ifdef CONFIG_SERIAL_MUX_CONSOLE
+ unregister_console(&mux_console);
+#endif
+ }
+
+ unregister_parisc_driver(&builtin_serial_mux_driver);
+ unregister_parisc_driver(&serial_mux_driver);
+ uart_unregister_driver(&mux_driver);
+}
+
+module_init(mux_init);
+module_exit(mux_exit);
+
+MODULE_AUTHOR("Ryan Bradetich");
+MODULE_DESCRIPTION("Serial MUX driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(MUX_MAJOR);
diff --git a/drivers/tty/serial/mxc_uart_early.c b/drivers/tty/serial/mxc_uart_early.c
new file mode 100644
index 00000000..ffa3660b
--- /dev/null
+++ b/drivers/tty/serial/mxc_uart_early.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2011-2012 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*!
+ * @file drivers/serial/mxc_uart_early.c
+ *
+ * @brief Driver for the Freescale Semiconductor MXC serial ports based on
+ * drivers/char/8250_early.c,
+ * Copyright 2004 Hewlett-Packard Development Company,
+ * L.P.by Bjorn Helgaasby.
+ *
+ * Early serial console for MXC UARTS.
+ *
+ * This is for use before the serial driver has initialized, in
+ * particular, before the UARTs have been discovered and named.
+ * Instead of specifying the console device as, e.g., "ttymxc0",
+ * we locate the device directly by its MMIO or I/O port address.
+ *
+ * The user can specify the device directly, e.g.,
+ * console=mxcuart,0x43f90000,115200n8
+ * or platform code can call early_uart_console_init() to set
+ * the early UART device.
+ *
+ * After the normal serial driver starts, we try to locate the
+ * matching ttymxc device and start a console there.
+ */
+
+/*
+ * Include Files
+ */
+
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/clk.h>
+#include <mach/mxc_uart.h>
+
+struct mxc_early_uart_device {
+ struct uart_port port;
+ char options[16]; /* e.g., 115200n8 */
+ unsigned int baud;
+ struct clk *clk;
+};
+static struct mxc_early_uart_device mxc_early_device __initdata;
+
+/*
+ * Write out a character once the UART is ready
+ */
+static void __init mxcuart_console_write_char(struct uart_port *port, int ch)
+{
+ unsigned int status;
+
+ do {
+ status = readl(port->membase + MXC_UARTUSR2);
+ } while ((status & MXC_UARTUSR2_TXFE) == 0);
+ writel(ch, port->membase + MXC_UARTUTXD);
+}
+
+/*!
+ * This function is called to write the console messages through the UART port.
+ *
+ * @param co the console structure
+ * @param s the log message to be written to the UART
+ * @param count length of the message
+ */
+void __init early_mxcuart_console_write(struct console *co, const char *s,
+ u_int count)
+{
+ struct uart_port *port = &mxc_early_device.port;
+ unsigned int status, oldcr1, oldcr2, oldcr3, cr2, cr3;
+
+ /*
+ * First save the control registers and then disable the interrupts
+ */
+ oldcr1 = readl(port->membase + MXC_UARTUCR1);
+ oldcr2 = readl(port->membase + MXC_UARTUCR2);
+ oldcr3 = readl(port->membase + MXC_UARTUCR3);
+ cr2 =
+ oldcr2 & ~(MXC_UARTUCR2_ATEN | MXC_UARTUCR2_RTSEN |
+ MXC_UARTUCR2_ESCI);
+ cr3 =
+ oldcr3 & ~(MXC_UARTUCR3_DCD | MXC_UARTUCR3_RI |
+ MXC_UARTUCR3_DTRDEN);
+ writel(MXC_UARTUCR1_UARTEN, port->membase + MXC_UARTUCR1);
+ writel(cr2, port->membase + MXC_UARTUCR2);
+ writel(cr3, port->membase + MXC_UARTUCR3);
+
+ /* Transmit string */
+ uart_console_write(port, s, count, mxcuart_console_write_char);
+
+ /*
+ * Finally, wait for the transmitter to become empty
+ */
+ do {
+ status = readl(port->membase + MXC_UARTUSR2);
+ } while (!(status & MXC_UARTUSR2_TXDC));
+
+ /*
+ * Restore the control registers
+ */
+ writel(oldcr1, port->membase + MXC_UARTUCR1);
+ writel(oldcr2, port->membase + MXC_UARTUCR2);
+ writel(oldcr3, port->membase + MXC_UARTUCR3);
+}
+
+static unsigned int __init probe_baud(struct uart_port *port)
+{
+ /* FIXME Return Default Baud Rate */
+ return 115200;
+}
+
+static int __init mxc_early_uart_setup(struct console *console, char *options)
+{
+ struct mxc_early_uart_device *device = &mxc_early_device;
+ struct uart_port *port = &device->port;
+ int length;
+
+ if (device->port.membase || device->port.iobase)
+ return -ENODEV;
+
+ /* Enable Early MXC UART Clock */
+ clk_enable(device->clk);
+
+ port->uartclk = 5600000;
+ port->iotype = UPIO_MEM;
+ port->membase = ioremap(port->mapbase, SZ_4K);
+
+ if (options) {
+ device->baud = simple_strtoul(options, NULL, 0);
+ length = min(strlen(options), sizeof(device->options));
+ strncpy(device->options, options, length);
+ } else {
+ device->baud = probe_baud(port);
+ snprintf(device->options, sizeof(device->options), "%u",
+ device->baud);
+ }
+ printk(KERN_INFO
+ "MXC_Early serial console at MMIO 0x%x (options '%s')\n",
+ port->mapbase, device->options);
+ return 0;
+}
+
+static struct console mxc_early_uart_console __initdata = {
+ .name = "ttymxc",
+ .write = early_mxcuart_console_write,
+ .setup = mxc_early_uart_setup,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1,
+};
+
+int __init mxc_early_serial_console_init(unsigned long base, struct clk *clk)
+{
+ mxc_early_device.clk = clk;
+ mxc_early_device.port.mapbase = base;
+
+ register_console(&mxc_early_uart_console);
+ return 0;
+}
+
+int __init mxc_early_uart_console_disable(void)
+{
+ struct mxc_early_uart_device *device = &mxc_early_device;
+ struct uart_port *port = &device->port;
+
+ if (mxc_early_uart_console.index >= 0) {
+ unregister_console(&mxc_early_uart_console);
+ iounmap(port->membase);
+ clk_disable(device->clk);
+ clk_put(device->clk);
+ }
+ return 0;
+}
+late_initcall_sync(mxc_early_uart_console_disable);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
new file mode 100644
index 00000000..5b3d063a
--- /dev/null
+++ b/drivers/tty/serial/mxs-auart.c
@@ -0,0 +1,800 @@
+/*
+ * Freescale STMP37XX/STMP378X Application UART driver
+ *
+ * Author: dmitry pervushin <dimka@embeddedalley.com>
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+
+#define MXS_AUART_PORTS 5
+
+#define AUART_CTRL0 0x00000000
+#define AUART_CTRL0_SET 0x00000004
+#define AUART_CTRL0_CLR 0x00000008
+#define AUART_CTRL0_TOG 0x0000000c
+#define AUART_CTRL1 0x00000010
+#define AUART_CTRL1_SET 0x00000014
+#define AUART_CTRL1_CLR 0x00000018
+#define AUART_CTRL1_TOG 0x0000001c
+#define AUART_CTRL2 0x00000020
+#define AUART_CTRL2_SET 0x00000024
+#define AUART_CTRL2_CLR 0x00000028
+#define AUART_CTRL2_TOG 0x0000002c
+#define AUART_LINECTRL 0x00000030
+#define AUART_LINECTRL_SET 0x00000034
+#define AUART_LINECTRL_CLR 0x00000038
+#define AUART_LINECTRL_TOG 0x0000003c
+#define AUART_LINECTRL2 0x00000040
+#define AUART_LINECTRL2_SET 0x00000044
+#define AUART_LINECTRL2_CLR 0x00000048
+#define AUART_LINECTRL2_TOG 0x0000004c
+#define AUART_INTR 0x00000050
+#define AUART_INTR_SET 0x00000054
+#define AUART_INTR_CLR 0x00000058
+#define AUART_INTR_TOG 0x0000005c
+#define AUART_DATA 0x00000060
+#define AUART_STAT 0x00000070
+#define AUART_DEBUG 0x00000080
+#define AUART_VERSION 0x00000090
+#define AUART_AUTOBAUD 0x000000a0
+
+#define AUART_CTRL0_SFTRST (1 << 31)
+#define AUART_CTRL0_CLKGATE (1 << 30)
+
+#define AUART_CTRL2_CTSEN (1 << 15)
+#define AUART_CTRL2_RTS (1 << 11)
+#define AUART_CTRL2_RXE (1 << 9)
+#define AUART_CTRL2_TXE (1 << 8)
+#define AUART_CTRL2_UARTEN (1 << 0)
+
+#define AUART_LINECTRL_BAUD_DIVINT_SHIFT 16
+#define AUART_LINECTRL_BAUD_DIVINT_MASK 0xffff0000
+#define AUART_LINECTRL_BAUD_DIVINT(v) (((v) & 0xffff) << 16)
+#define AUART_LINECTRL_BAUD_DIVFRAC_SHIFT 8
+#define AUART_LINECTRL_BAUD_DIVFRAC_MASK 0x00003f00
+#define AUART_LINECTRL_BAUD_DIVFRAC(v) (((v) & 0x3f) << 8)
+#define AUART_LINECTRL_WLEN_MASK 0x00000060
+#define AUART_LINECTRL_WLEN(v) (((v) & 0x3) << 5)
+#define AUART_LINECTRL_FEN (1 << 4)
+#define AUART_LINECTRL_STP2 (1 << 3)
+#define AUART_LINECTRL_EPS (1 << 2)
+#define AUART_LINECTRL_PEN (1 << 1)
+#define AUART_LINECTRL_BRK (1 << 0)
+
+#define AUART_INTR_RTIEN (1 << 22)
+#define AUART_INTR_TXIEN (1 << 21)
+#define AUART_INTR_RXIEN (1 << 20)
+#define AUART_INTR_CTSMIEN (1 << 17)
+#define AUART_INTR_RTIS (1 << 6)
+#define AUART_INTR_TXIS (1 << 5)
+#define AUART_INTR_RXIS (1 << 4)
+#define AUART_INTR_CTSMIS (1 << 1)
+
+#define AUART_STAT_BUSY (1 << 29)
+#define AUART_STAT_CTS (1 << 28)
+#define AUART_STAT_TXFE (1 << 27)
+#define AUART_STAT_TXFF (1 << 25)
+#define AUART_STAT_RXFE (1 << 24)
+#define AUART_STAT_OERR (1 << 19)
+#define AUART_STAT_BERR (1 << 18)
+#define AUART_STAT_PERR (1 << 17)
+#define AUART_STAT_FERR (1 << 16)
+
+static struct uart_driver auart_driver;
+
+struct mxs_auart_port {
+ struct uart_port port;
+
+ unsigned int flags;
+ unsigned int ctrl;
+
+ unsigned int irq;
+
+ struct clk *clk;
+ struct device *dev;
+};
+
+static void mxs_auart_stop_tx(struct uart_port *u);
+
+#define to_auart_port(u) container_of(u, struct mxs_auart_port, port)
+
+static inline void mxs_auart_tx_chars(struct mxs_auart_port *s)
+{
+ struct circ_buf *xmit = &s->port.state->xmit;
+
+ while (!(readl(s->port.membase + AUART_STAT) &
+ AUART_STAT_TXFF)) {
+ if (s->port.x_char) {
+ s->port.icount.tx++;
+ writel(s->port.x_char,
+ s->port.membase + AUART_DATA);
+ s->port.x_char = 0;
+ continue;
+ }
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) {
+ s->port.icount.tx++;
+ writel(xmit->buf[xmit->tail],
+ s->port.membase + AUART_DATA);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&s->port);
+ } else
+ break;
+ }
+ if (uart_circ_empty(&(s->port.state->xmit)))
+ writel(AUART_INTR_TXIEN,
+ s->port.membase + AUART_INTR_CLR);
+ else
+ writel(AUART_INTR_TXIEN,
+ s->port.membase + AUART_INTR_SET);
+
+ if (uart_tx_stopped(&s->port))
+ mxs_auart_stop_tx(&s->port);
+}
+
+static void mxs_auart_rx_char(struct mxs_auart_port *s)
+{
+ int flag;
+ u32 stat;
+ u8 c;
+
+ c = readl(s->port.membase + AUART_DATA);
+ stat = readl(s->port.membase + AUART_STAT);
+
+ flag = TTY_NORMAL;
+ s->port.icount.rx++;
+
+ if (stat & AUART_STAT_BERR) {
+ s->port.icount.brk++;
+ if (uart_handle_break(&s->port))
+ goto out;
+ } else if (stat & AUART_STAT_PERR) {
+ s->port.icount.parity++;
+ } else if (stat & AUART_STAT_FERR) {
+ s->port.icount.frame++;
+ }
+
+ /*
+ * Mask off conditions which should be ingored.
+ */
+ stat &= s->port.read_status_mask;
+
+ if (stat & AUART_STAT_BERR) {
+ flag = TTY_BREAK;
+ } else if (stat & AUART_STAT_PERR)
+ flag = TTY_PARITY;
+ else if (stat & AUART_STAT_FERR)
+ flag = TTY_FRAME;
+
+ if (stat & AUART_STAT_OERR)
+ s->port.icount.overrun++;
+
+ if (uart_handle_sysrq_char(&s->port, c))
+ goto out;
+
+ uart_insert_char(&s->port, stat, AUART_STAT_OERR, c, flag);
+out:
+ writel(stat, s->port.membase + AUART_STAT);
+}
+
+static void mxs_auart_rx_chars(struct mxs_auart_port *s)
+{
+ struct tty_struct *tty = s->port.state->port.tty;
+ u32 stat = 0;
+
+ for (;;) {
+ stat = readl(s->port.membase + AUART_STAT);
+ if (stat & AUART_STAT_RXFE)
+ break;
+ mxs_auart_rx_char(s);
+ }
+
+ writel(stat, s->port.membase + AUART_STAT);
+ tty_flip_buffer_push(tty);
+}
+
+static int mxs_auart_request_port(struct uart_port *u)
+{
+ return 0;
+}
+
+static int mxs_auart_verify_port(struct uart_port *u,
+ struct serial_struct *ser)
+{
+ if (u->type != PORT_UNKNOWN && u->type != PORT_IMX)
+ return -EINVAL;
+ return 0;
+}
+
+static void mxs_auart_config_port(struct uart_port *u, int flags)
+{
+}
+
+static const char *mxs_auart_type(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ return dev_name(s->dev);
+}
+
+static void mxs_auart_release_port(struct uart_port *u)
+{
+}
+
+static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ u32 ctrl = readl(u->membase + AUART_CTRL2);
+
+ ctrl &= ~AUART_CTRL2_RTS;
+ if (mctrl & TIOCM_RTS)
+ ctrl |= AUART_CTRL2_RTS;
+ s->ctrl = mctrl;
+ writel(ctrl, u->membase + AUART_CTRL2);
+}
+
+static u32 mxs_auart_get_mctrl(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+ u32 stat = readl(u->membase + AUART_STAT);
+ int ctrl2 = readl(u->membase + AUART_CTRL2);
+ u32 mctrl = s->ctrl;
+
+ mctrl &= ~TIOCM_CTS;
+ if (stat & AUART_STAT_CTS)
+ mctrl |= TIOCM_CTS;
+
+ if (ctrl2 & AUART_CTRL2_RTS)
+ mctrl |= TIOCM_RTS;
+
+ return mctrl;
+}
+
+static void mxs_auart_settermios(struct uart_port *u,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ u32 bm, ctrl, ctrl2, div;
+ unsigned int cflag, baud;
+
+ cflag = termios->c_cflag;
+
+ ctrl = AUART_LINECTRL_FEN;
+ ctrl2 = readl(u->membase + AUART_CTRL2);
+
+ /* byte size */
+ switch (cflag & CSIZE) {
+ case CS5:
+ bm = 0;
+ break;
+ case CS6:
+ bm = 1;
+ break;
+ case CS7:
+ bm = 2;
+ break;
+ case CS8:
+ bm = 3;
+ break;
+ default:
+ return;
+ }
+
+ ctrl |= AUART_LINECTRL_WLEN(bm);
+
+ /* parity */
+ if (cflag & PARENB) {
+ ctrl |= AUART_LINECTRL_PEN;
+ if ((cflag & PARODD) == 0)
+ ctrl |= AUART_LINECTRL_EPS;
+ }
+
+ u->read_status_mask = 0;
+
+ if (termios->c_iflag & INPCK)
+ u->read_status_mask |= AUART_STAT_PERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ u->read_status_mask |= AUART_STAT_BERR;
+
+ /*
+ * Characters to ignore
+ */
+ u->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ u->ignore_status_mask |= AUART_STAT_PERR;
+ if (termios->c_iflag & IGNBRK) {
+ u->ignore_status_mask |= AUART_STAT_BERR;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ u->ignore_status_mask |= AUART_STAT_OERR;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if (cflag & CREAD)
+ ctrl2 |= AUART_CTRL2_RXE;
+ else
+ ctrl2 &= ~AUART_CTRL2_RXE;
+
+ /* figure out the stop bits requested */
+ if (cflag & CSTOPB)
+ ctrl |= AUART_LINECTRL_STP2;
+
+ /* figure out the hardware flow control settings */
+ if (cflag & CRTSCTS)
+ ctrl2 |= AUART_CTRL2_CTSEN;
+ else
+ ctrl2 &= ~AUART_CTRL2_CTSEN;
+
+ /* set baud rate */
+ baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk);
+ div = u->uartclk * 32 / baud;
+ ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
+ ctrl |= AUART_LINECTRL_BAUD_DIVINT(div >> 6);
+
+ writel(ctrl, u->membase + AUART_LINECTRL);
+ writel(ctrl2, u->membase + AUART_CTRL2);
+
+ uart_update_timeout(u, termios->c_cflag, baud);
+}
+
+static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+{
+ u32 istatus, istat;
+ struct mxs_auart_port *s = context;
+ u32 stat = readl(s->port.membase + AUART_STAT);
+
+ istatus = istat = readl(s->port.membase + AUART_INTR);
+
+ if (istat & AUART_INTR_CTSMIS) {
+ uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
+ writel(AUART_INTR_CTSMIS,
+ s->port.membase + AUART_INTR_CLR);
+ istat &= ~AUART_INTR_CTSMIS;
+ }
+
+ if (istat & (AUART_INTR_RTIS | AUART_INTR_RXIS)) {
+ mxs_auart_rx_chars(s);
+ istat &= ~(AUART_INTR_RTIS | AUART_INTR_RXIS);
+ }
+
+ if (istat & AUART_INTR_TXIS) {
+ mxs_auart_tx_chars(s);
+ istat &= ~AUART_INTR_TXIS;
+ }
+
+ writel(istatus & (AUART_INTR_RTIS
+ | AUART_INTR_TXIS
+ | AUART_INTR_RXIS
+ | AUART_INTR_CTSMIS),
+ s->port.membase + AUART_INTR_CLR);
+
+ return IRQ_HANDLED;
+}
+
+static void mxs_auart_reset(struct uart_port *u)
+{
+ int i;
+ unsigned int reg;
+
+ writel(AUART_CTRL0_SFTRST, u->membase + AUART_CTRL0_CLR);
+
+ for (i = 0; i < 10000; i++) {
+ reg = readl(u->membase + AUART_CTRL0);
+ if (!(reg & AUART_CTRL0_SFTRST))
+ break;
+ udelay(3);
+ }
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+}
+
+static int mxs_auart_startup(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ clk_enable(s->clk);
+
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+
+ writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_SET);
+
+ writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
+ u->membase + AUART_INTR);
+
+ /*
+ * Enable fifo so all four bytes of a DMA word are written to
+ * output (otherwise, only the LSB is written, ie. 1 in 4 bytes)
+ */
+ writel(AUART_LINECTRL_FEN, u->membase + AUART_LINECTRL_SET);
+
+ return 0;
+}
+
+static void mxs_auart_shutdown(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_CLR);
+
+ writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_SET);
+
+ writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
+ u->membase + AUART_INTR_CLR);
+
+ clk_disable(s->clk);
+}
+
+static unsigned int mxs_auart_tx_empty(struct uart_port *u)
+{
+ if (readl(u->membase + AUART_STAT) & AUART_STAT_TXFE)
+ return TIOCSER_TEMT;
+ else
+ return 0;
+}
+
+static void mxs_auart_start_tx(struct uart_port *u)
+{
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ /* enable transmitter */
+ writel(AUART_CTRL2_TXE, u->membase + AUART_CTRL2_SET);
+
+ mxs_auart_tx_chars(s);
+}
+
+static void mxs_auart_stop_tx(struct uart_port *u)
+{
+ writel(AUART_CTRL2_TXE, u->membase + AUART_CTRL2_CLR);
+}
+
+static void mxs_auart_stop_rx(struct uart_port *u)
+{
+ writel(AUART_CTRL2_RXE, u->membase + AUART_CTRL2_CLR);
+}
+
+static void mxs_auart_break_ctl(struct uart_port *u, int ctl)
+{
+ if (ctl)
+ writel(AUART_LINECTRL_BRK,
+ u->membase + AUART_LINECTRL_SET);
+ else
+ writel(AUART_LINECTRL_BRK,
+ u->membase + AUART_LINECTRL_CLR);
+}
+
+static void mxs_auart_enable_ms(struct uart_port *port)
+{
+ /* just empty */
+}
+
+static struct uart_ops mxs_auart_ops = {
+ .tx_empty = mxs_auart_tx_empty,
+ .start_tx = mxs_auart_start_tx,
+ .stop_tx = mxs_auart_stop_tx,
+ .stop_rx = mxs_auart_stop_rx,
+ .enable_ms = mxs_auart_enable_ms,
+ .break_ctl = mxs_auart_break_ctl,
+ .set_mctrl = mxs_auart_set_mctrl,
+ .get_mctrl = mxs_auart_get_mctrl,
+ .startup = mxs_auart_startup,
+ .shutdown = mxs_auart_shutdown,
+ .set_termios = mxs_auart_settermios,
+ .type = mxs_auart_type,
+ .release_port = mxs_auart_release_port,
+ .request_port = mxs_auart_request_port,
+ .config_port = mxs_auart_config_port,
+ .verify_port = mxs_auart_verify_port,
+};
+
+static struct mxs_auart_port *auart_port[MXS_AUART_PORTS];
+
+#ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE
+static void mxs_auart_console_putchar(struct uart_port *port, int ch)
+{
+ unsigned int to = 1000;
+
+ while (readl(port->membase + AUART_STAT) & AUART_STAT_TXFF) {
+ if (!to--)
+ break;
+ udelay(1);
+ }
+
+ writel(ch, port->membase + AUART_DATA);
+}
+
+static void
+auart_console_write(struct console *co, const char *str, unsigned int count)
+{
+ struct mxs_auart_port *s;
+ struct uart_port *port;
+ unsigned int old_ctrl0, old_ctrl2;
+ unsigned int to = 1000;
+
+ if (co->index > MXS_AUART_PORTS || co->index < 0)
+ return;
+
+ s = auart_port[co->index];
+ port = &s->port;
+
+ clk_enable(s->clk);
+
+ /* First save the CR then disable the interrupts */
+ old_ctrl2 = readl(port->membase + AUART_CTRL2);
+ old_ctrl0 = readl(port->membase + AUART_CTRL0);
+
+ writel(AUART_CTRL0_CLKGATE,
+ port->membase + AUART_CTRL0_CLR);
+ writel(AUART_CTRL2_UARTEN | AUART_CTRL2_TXE,
+ port->membase + AUART_CTRL2_SET);
+
+ uart_console_write(port, str, count, mxs_auart_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the TCR
+ */
+ while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
+ if (!to--)
+ break;
+ udelay(1);
+ }
+
+ writel(old_ctrl0, port->membase + AUART_CTRL0);
+ writel(old_ctrl2, port->membase + AUART_CTRL2);
+
+ clk_disable(s->clk);
+}
+
+static void __init
+auart_console_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits)
+{
+ unsigned int lcr_h, quot;
+
+ if (!(readl(port->membase + AUART_CTRL2) & AUART_CTRL2_UARTEN))
+ return;
+
+ lcr_h = readl(port->membase + AUART_LINECTRL);
+
+ *parity = 'n';
+ if (lcr_h & AUART_LINECTRL_PEN) {
+ if (lcr_h & AUART_LINECTRL_EPS)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+
+ if ((lcr_h & AUART_LINECTRL_WLEN_MASK) == AUART_LINECTRL_WLEN(2))
+ *bits = 7;
+ else
+ *bits = 8;
+
+ quot = ((readl(port->membase + AUART_LINECTRL)
+ & AUART_LINECTRL_BAUD_DIVINT_MASK))
+ >> (AUART_LINECTRL_BAUD_DIVINT_SHIFT - 6);
+ quot |= ((readl(port->membase + AUART_LINECTRL)
+ & AUART_LINECTRL_BAUD_DIVFRAC_MASK))
+ >> AUART_LINECTRL_BAUD_DIVFRAC_SHIFT;
+ if (quot == 0)
+ quot = 1;
+
+ *baud = (port->uartclk << 2) / quot;
+}
+
+static int __init
+auart_console_setup(struct console *co, char *options)
+{
+ struct mxs_auart_port *s;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index == -1 || co->index >= ARRAY_SIZE(auart_port))
+ co->index = 0;
+ s = auart_port[co->index];
+ if (!s)
+ return -ENODEV;
+
+ clk_enable(s->clk);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ auart_console_get_options(&s->port, &baud, &parity, &bits);
+
+ ret = uart_set_options(&s->port, co, baud, parity, bits, flow);
+
+ clk_disable(s->clk);
+
+ return ret;
+}
+
+static struct console auart_console = {
+ .name = "ttyAPP",
+ .write = auart_console_write,
+ .device = uart_console_device,
+ .setup = auart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &auart_driver,
+};
+#endif
+
+static struct uart_driver auart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyAPP",
+ .dev_name = "ttyAPP",
+ .major = 0,
+ .minor = 0,
+ .nr = MXS_AUART_PORTS,
+#ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE
+ .cons = &auart_console,
+#endif
+};
+
+static int __devinit mxs_auart_probe(struct platform_device *pdev)
+{
+ struct mxs_auart_port *s;
+ u32 version;
+ int ret = 0;
+ struct resource *r;
+
+ s = kzalloc(sizeof(struct mxs_auart_port), GFP_KERNEL);
+ if (!s) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ s->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(s->clk)) {
+ ret = PTR_ERR(s->clk);
+ goto out_free;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ ret = -ENXIO;
+ goto out_free_clk;
+ }
+
+ s->port.mapbase = r->start;
+ s->port.membase = ioremap(r->start, resource_size(r));
+ s->port.ops = &mxs_auart_ops;
+ s->port.iotype = UPIO_MEM;
+ s->port.line = pdev->id < 0 ? 0 : pdev->id;
+ s->port.fifosize = 16;
+ s->port.uartclk = clk_get_rate(s->clk);
+ s->port.type = PORT_IMX;
+ s->port.dev = s->dev = get_device(&pdev->dev);
+
+ s->flags = 0;
+ s->ctrl = 0;
+
+ s->irq = platform_get_irq(pdev, 0);
+ s->port.irq = s->irq;
+ ret = request_irq(s->irq, mxs_auart_irq_handle, 0, dev_name(&pdev->dev), s);
+ if (ret)
+ goto out_free_clk;
+
+ platform_set_drvdata(pdev, s);
+
+ auart_port[pdev->id] = s;
+
+ mxs_auart_reset(&s->port);
+
+ ret = uart_add_one_port(&auart_driver, &s->port);
+ if (ret)
+ goto out_free_irq;
+
+ version = readl(s->port.membase + AUART_VERSION);
+ dev_info(&pdev->dev, "Found APPUART %d.%d.%d\n",
+ (version >> 24) & 0xff,
+ (version >> 16) & 0xff, version & 0xffff);
+
+ return 0;
+
+out_free_irq:
+ auart_port[pdev->id] = NULL;
+ free_irq(s->irq, s);
+out_free_clk:
+ clk_put(s->clk);
+out_free:
+ kfree(s);
+out:
+ return ret;
+}
+
+static int __devexit mxs_auart_remove(struct platform_device *pdev)
+{
+ struct mxs_auart_port *s = platform_get_drvdata(pdev);
+
+ uart_remove_one_port(&auart_driver, &s->port);
+
+ auart_port[pdev->id] = NULL;
+
+ clk_put(s->clk);
+ free_irq(s->irq, s);
+ kfree(s);
+
+ return 0;
+}
+
+static struct platform_driver mxs_auart_driver = {
+ .probe = mxs_auart_probe,
+ .remove = __devexit_p(mxs_auart_remove),
+ .driver = {
+ .name = "mxs-auart",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mxs_auart_init(void)
+{
+ int r;
+
+ r = uart_register_driver(&auart_driver);
+ if (r)
+ goto out;
+
+ r = platform_driver_register(&mxs_auart_driver);
+ if (r)
+ goto out_err;
+
+ return 0;
+out_err:
+ uart_unregister_driver(&auart_driver);
+out:
+ return r;
+}
+
+static void __exit mxs_auart_exit(void)
+{
+ platform_driver_unregister(&mxs_auart_driver);
+ uart_unregister_driver(&auart_driver);
+}
+
+module_init(mxs_auart_init);
+module_exit(mxs_auart_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Freescale MXS application uart driver");
diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
new file mode 100644
index 00000000..d40da78e
--- /dev/null
+++ b/drivers/tty/serial/netx-serial.c
@@ -0,0 +1,748 @@
+/*
+ * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#if defined(CONFIG_SERIAL_NETX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <mach/hardware.h>
+#include <mach/netx-regs.h>
+
+/* We've been assigned a range on the "Low-density serial ports" major */
+#define SERIAL_NX_MAJOR 204
+#define MINOR_START 170
+
+enum uart_regs {
+ UART_DR = 0x00,
+ UART_SR = 0x04,
+ UART_LINE_CR = 0x08,
+ UART_BAUDDIV_MSB = 0x0c,
+ UART_BAUDDIV_LSB = 0x10,
+ UART_CR = 0x14,
+ UART_FR = 0x18,
+ UART_IIR = 0x1c,
+ UART_ILPR = 0x20,
+ UART_RTS_CR = 0x24,
+ UART_RTS_LEAD = 0x28,
+ UART_RTS_TRAIL = 0x2c,
+ UART_DRV_ENABLE = 0x30,
+ UART_BRM_CR = 0x34,
+ UART_RXFIFO_IRQLEVEL = 0x38,
+ UART_TXFIFO_IRQLEVEL = 0x3c,
+};
+
+#define SR_FE (1<<0)
+#define SR_PE (1<<1)
+#define SR_BE (1<<2)
+#define SR_OE (1<<3)
+
+#define LINE_CR_BRK (1<<0)
+#define LINE_CR_PEN (1<<1)
+#define LINE_CR_EPS (1<<2)
+#define LINE_CR_STP2 (1<<3)
+#define LINE_CR_FEN (1<<4)
+#define LINE_CR_5BIT (0<<5)
+#define LINE_CR_6BIT (1<<5)
+#define LINE_CR_7BIT (2<<5)
+#define LINE_CR_8BIT (3<<5)
+#define LINE_CR_BITS_MASK (3<<5)
+
+#define CR_UART_EN (1<<0)
+#define CR_SIREN (1<<1)
+#define CR_SIRLP (1<<2)
+#define CR_MSIE (1<<3)
+#define CR_RIE (1<<4)
+#define CR_TIE (1<<5)
+#define CR_RTIE (1<<6)
+#define CR_LBE (1<<7)
+
+#define FR_CTS (1<<0)
+#define FR_DSR (1<<1)
+#define FR_DCD (1<<2)
+#define FR_BUSY (1<<3)
+#define FR_RXFE (1<<4)
+#define FR_TXFF (1<<5)
+#define FR_RXFF (1<<6)
+#define FR_TXFE (1<<7)
+
+#define IIR_MIS (1<<0)
+#define IIR_RIS (1<<1)
+#define IIR_TIS (1<<2)
+#define IIR_RTIS (1<<3)
+#define IIR_MASK 0xf
+
+#define RTS_CR_AUTO (1<<0)
+#define RTS_CR_RTS (1<<1)
+#define RTS_CR_COUNT (1<<2)
+#define RTS_CR_MOD2 (1<<3)
+#define RTS_CR_RTS_POL (1<<4)
+#define RTS_CR_CTS_CTR (1<<5)
+#define RTS_CR_CTS_POL (1<<6)
+#define RTS_CR_STICK (1<<7)
+
+#define UART_PORT_SIZE 0x40
+#define DRIVER_NAME "netx-uart"
+
+struct netx_port {
+ struct uart_port port;
+};
+
+static void netx_stop_tx(struct uart_port *port)
+{
+ unsigned int val;
+ val = readl(port->membase + UART_CR);
+ writel(val & ~CR_TIE, port->membase + UART_CR);
+}
+
+static void netx_stop_rx(struct uart_port *port)
+{
+ unsigned int val;
+ val = readl(port->membase + UART_CR);
+ writel(val & ~CR_RIE, port->membase + UART_CR);
+}
+
+static void netx_enable_ms(struct uart_port *port)
+{
+ unsigned int val;
+ val = readl(port->membase + UART_CR);
+ writel(val | CR_MSIE, port->membase + UART_CR);
+}
+
+static inline void netx_transmit_buffer(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (port->x_char) {
+ writel(port->x_char, port->membase + UART_DR);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if (uart_tx_stopped(port) || uart_circ_empty(xmit)) {
+ netx_stop_tx(port);
+ return;
+ }
+
+ do {
+ /* send xmit->buf[xmit->tail]
+ * out the port here */
+ writel(xmit->buf[xmit->tail], port->membase + UART_DR);
+ xmit->tail = (xmit->tail + 1) &
+ (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (!(readl(port->membase + UART_FR) & FR_TXFF));
+
+ if (uart_circ_empty(xmit))
+ netx_stop_tx(port);
+}
+
+static void netx_start_tx(struct uart_port *port)
+{
+ writel(
+ readl(port->membase + UART_CR) | CR_TIE, port->membase + UART_CR);
+
+ if (!(readl(port->membase + UART_FR) & FR_TXFF))
+ netx_transmit_buffer(port);
+}
+
+static unsigned int netx_tx_empty(struct uart_port *port)
+{
+ return readl(port->membase + UART_FR) & FR_BUSY ? 0 : TIOCSER_TEMT;
+}
+
+static void netx_txint(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ netx_stop_tx(port);
+ return;
+ }
+
+ netx_transmit_buffer(port);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static void netx_rxint(struct uart_port *port)
+{
+ unsigned char rx, flg, status;
+ struct tty_struct *tty = port->state->port.tty;
+
+ while (!(readl(port->membase + UART_FR) & FR_RXFE)) {
+ rx = readl(port->membase + UART_DR);
+ flg = TTY_NORMAL;
+ port->icount.rx++;
+ status = readl(port->membase + UART_SR);
+ if (status & SR_BE) {
+ writel(0, port->membase + UART_SR);
+ if (uart_handle_break(port))
+ continue;
+ }
+
+ if (unlikely(status & (SR_FE | SR_PE | SR_OE))) {
+
+ if (status & SR_PE)
+ port->icount.parity++;
+ else if (status & SR_FE)
+ port->icount.frame++;
+ if (status & SR_OE)
+ port->icount.overrun++;
+
+ status &= port->read_status_mask;
+
+ if (status & SR_BE)
+ flg = TTY_BREAK;
+ else if (status & SR_PE)
+ flg = TTY_PARITY;
+ else if (status & SR_FE)
+ flg = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, rx))
+ continue;
+
+ uart_insert_char(port, status, SR_OE, rx, flg);
+ }
+
+ tty_flip_buffer_push(tty);
+ return;
+}
+
+static irqreturn_t netx_int(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ unsigned long flags;
+ unsigned char status;
+
+ spin_lock_irqsave(&port->lock,flags);
+
+ status = readl(port->membase + UART_IIR) & IIR_MASK;
+ while (status) {
+ if (status & IIR_RIS)
+ netx_rxint(port);
+ if (status & IIR_TIS)
+ netx_txint(port);
+ if (status & IIR_MIS) {
+ if (readl(port->membase + UART_FR) & FR_CTS)
+ uart_handle_cts_change(port, 1);
+ else
+ uart_handle_cts_change(port, 0);
+ }
+ writel(0, port->membase + UART_IIR);
+ status = readl(port->membase + UART_IIR) & IIR_MASK;
+ }
+
+ spin_unlock_irqrestore(&port->lock,flags);
+ return IRQ_HANDLED;
+}
+
+static unsigned int netx_get_mctrl(struct uart_port *port)
+{
+ unsigned int ret = TIOCM_DSR | TIOCM_CAR;
+
+ if (readl(port->membase + UART_FR) & FR_CTS)
+ ret |= TIOCM_CTS;
+
+ return ret;
+}
+
+static void netx_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned int val;
+
+ /* FIXME: Locking needed ? */
+ if (mctrl & TIOCM_RTS) {
+ val = readl(port->membase + UART_RTS_CR);
+ writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR);
+ }
+}
+
+static void netx_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned int line_cr;
+ spin_lock_irq(&port->lock);
+
+ line_cr = readl(port->membase + UART_LINE_CR);
+ if (break_state != 0)
+ line_cr |= LINE_CR_BRK;
+ else
+ line_cr &= ~LINE_CR_BRK;
+ writel(line_cr, port->membase + UART_LINE_CR);
+
+ spin_unlock_irq(&port->lock);
+}
+
+static int netx_startup(struct uart_port *port)
+{
+ int ret;
+
+ ret = request_irq(port->irq, netx_int, 0,
+ DRIVER_NAME, port);
+ if (ret) {
+ dev_err(port->dev, "unable to grab irq%d\n",port->irq);
+ goto exit;
+ }
+
+ writel(readl(port->membase + UART_LINE_CR) | LINE_CR_FEN,
+ port->membase + UART_LINE_CR);
+
+ writel(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE | CR_UART_EN,
+ port->membase + UART_CR);
+
+exit:
+ return ret;
+}
+
+static void netx_shutdown(struct uart_port *port)
+{
+ writel(0, port->membase + UART_CR) ;
+
+ free_irq(port->irq, port);
+}
+
+static void
+netx_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud, quot;
+ unsigned char old_cr;
+ unsigned char line_cr = LINE_CR_FEN;
+ unsigned char rts_cr = 0;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ line_cr |= LINE_CR_5BIT;
+ break;
+ case CS6:
+ line_cr |= LINE_CR_6BIT;
+ break;
+ case CS7:
+ line_cr |= LINE_CR_7BIT;
+ break;
+ case CS8:
+ line_cr |= LINE_CR_8BIT;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ line_cr |= LINE_CR_STP2;
+
+ if (termios->c_cflag & PARENB) {
+ line_cr |= LINE_CR_PEN;
+ if (!(termios->c_cflag & PARODD))
+ line_cr |= LINE_CR_EPS;
+ }
+
+ if (termios->c_cflag & CRTSCTS)
+ rts_cr = RTS_CR_AUTO | RTS_CR_CTS_CTR | RTS_CR_RTS_POL;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = baud * 4096;
+ quot /= 1000;
+ quot *= 256;
+ quot /= 100000;
+
+ spin_lock_irq(&port->lock);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ old_cr = readl(port->membase + UART_CR);
+
+ /* disable interrupts */
+ writel(old_cr & ~(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE),
+ port->membase + UART_CR);
+
+ /* drain transmitter */
+ while (readl(port->membase + UART_FR) & FR_BUSY);
+
+ /* disable UART */
+ writel(old_cr & ~CR_UART_EN, port->membase + UART_CR);
+
+ /* modem status interrupts */
+ old_cr &= ~CR_MSIE;
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ old_cr |= CR_MSIE;
+
+ writel((quot>>8) & 0xff, port->membase + UART_BAUDDIV_MSB);
+ writel(quot & 0xff, port->membase + UART_BAUDDIV_LSB);
+ writel(line_cr, port->membase + UART_LINE_CR);
+
+ writel(rts_cr, port->membase + UART_RTS_CR);
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= SR_PE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= SR_BE;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= SR_PE;
+ }
+
+ port->read_status_mask = 0;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= SR_BE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= SR_PE | SR_FE;
+
+ writel(old_cr, port->membase + UART_CR);
+
+ spin_unlock_irq(&port->lock);
+}
+
+static const char *netx_type(struct uart_port *port)
+{
+ return port->type == PORT_NETX ? "NETX" : NULL;
+}
+
+static void netx_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, UART_PORT_SIZE);
+}
+
+static int netx_request_port(struct uart_port *port)
+{
+ return request_mem_region(port->mapbase, UART_PORT_SIZE,
+ DRIVER_NAME) != NULL ? 0 : -EBUSY;
+}
+
+static void netx_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE && netx_request_port(port) == 0)
+ port->type = PORT_NETX;
+}
+
+static int
+netx_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ int ret = 0;
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_NETX)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static struct uart_ops netx_pops = {
+ .tx_empty = netx_tx_empty,
+ .set_mctrl = netx_set_mctrl,
+ .get_mctrl = netx_get_mctrl,
+ .stop_tx = netx_stop_tx,
+ .start_tx = netx_start_tx,
+ .stop_rx = netx_stop_rx,
+ .enable_ms = netx_enable_ms,
+ .break_ctl = netx_break_ctl,
+ .startup = netx_startup,
+ .shutdown = netx_shutdown,
+ .set_termios = netx_set_termios,
+ .type = netx_type,
+ .release_port = netx_release_port,
+ .request_port = netx_request_port,
+ .config_port = netx_config_port,
+ .verify_port = netx_verify_port,
+};
+
+static struct netx_port netx_ports[] = {
+ {
+ .port = {
+ .type = PORT_NETX,
+ .iotype = UPIO_MEM,
+ .membase = (char __iomem *)io_p2v(NETX_PA_UART0),
+ .mapbase = NETX_PA_UART0,
+ .irq = NETX_IRQ_UART0,
+ .uartclk = 100000000,
+ .fifosize = 16,
+ .flags = UPF_BOOT_AUTOCONF,
+ .ops = &netx_pops,
+ .line = 0,
+ },
+ }, {
+ .port = {
+ .type = PORT_NETX,
+ .iotype = UPIO_MEM,
+ .membase = (char __iomem *)io_p2v(NETX_PA_UART1),
+ .mapbase = NETX_PA_UART1,
+ .irq = NETX_IRQ_UART1,
+ .uartclk = 100000000,
+ .fifosize = 16,
+ .flags = UPF_BOOT_AUTOCONF,
+ .ops = &netx_pops,
+ .line = 1,
+ },
+ }, {
+ .port = {
+ .type = PORT_NETX,
+ .iotype = UPIO_MEM,
+ .membase = (char __iomem *)io_p2v(NETX_PA_UART2),
+ .mapbase = NETX_PA_UART2,
+ .irq = NETX_IRQ_UART2,
+ .uartclk = 100000000,
+ .fifosize = 16,
+ .flags = UPF_BOOT_AUTOCONF,
+ .ops = &netx_pops,
+ .line = 2,
+ },
+ }
+};
+
+#ifdef CONFIG_SERIAL_NETX_CONSOLE
+
+static void netx_console_putchar(struct uart_port *port, int ch)
+{
+ while (readl(port->membase + UART_FR) & FR_BUSY);
+ writel(ch, port->membase + UART_DR);
+}
+
+static void
+netx_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_port *port = &netx_ports[co->index].port;
+ unsigned char cr_save;
+
+ cr_save = readl(port->membase + UART_CR);
+ writel(cr_save | CR_UART_EN, port->membase + UART_CR);
+
+ uart_console_write(port, s, count, netx_console_putchar);
+
+ while (readl(port->membase + UART_FR) & FR_BUSY);
+ writel(cr_save, port->membase + UART_CR);
+}
+
+static void __init
+netx_console_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits, int *flow)
+{
+ unsigned char line_cr;
+
+ *baud = (readl(port->membase + UART_BAUDDIV_MSB) << 8) |
+ readl(port->membase + UART_BAUDDIV_LSB);
+ *baud *= 1000;
+ *baud /= 4096;
+ *baud *= 1000;
+ *baud /= 256;
+ *baud *= 100;
+
+ line_cr = readl(port->membase + UART_LINE_CR);
+ *parity = 'n';
+ if (line_cr & LINE_CR_PEN) {
+ if (line_cr & LINE_CR_EPS)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+
+ switch (line_cr & LINE_CR_BITS_MASK) {
+ case LINE_CR_8BIT:
+ *bits = 8;
+ break;
+ case LINE_CR_7BIT:
+ *bits = 7;
+ break;
+ case LINE_CR_6BIT:
+ *bits = 6;
+ break;
+ case LINE_CR_5BIT:
+ *bits = 5;
+ break;
+ }
+
+ if (readl(port->membase + UART_RTS_CR) & RTS_CR_AUTO)
+ *flow = 'r';
+}
+
+static int __init
+netx_console_setup(struct console *co, char *options)
+{
+ struct netx_port *sport;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index == -1 || co->index >= ARRAY_SIZE(netx_ports))
+ co->index = 0;
+ sport = &netx_ports[co->index];
+
+ if (options) {
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ } else {
+ /* if the UART is enabled, assume it has been correctly setup
+ * by the bootloader and get the options
+ */
+ if (readl(sport->port.membase + UART_CR) & CR_UART_EN) {
+ netx_console_get_options(&sport->port, &baud,
+ &parity, &bits, &flow);
+ }
+
+ }
+
+ return uart_set_options(&sport->port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver netx_reg;
+static struct console netx_console = {
+ .name = "ttyNX",
+ .write = netx_console_write,
+ .device = uart_console_device,
+ .setup = netx_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &netx_reg,
+};
+
+static int __init netx_console_init(void)
+{
+ register_console(&netx_console);
+ return 0;
+}
+console_initcall(netx_console_init);
+
+#define NETX_CONSOLE &netx_console
+#else
+#define NETX_CONSOLE NULL
+#endif
+
+static struct uart_driver netx_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = DRIVER_NAME,
+ .dev_name = "ttyNX",
+ .major = SERIAL_NX_MAJOR,
+ .minor = MINOR_START,
+ .nr = ARRAY_SIZE(netx_ports),
+ .cons = NETX_CONSOLE,
+};
+
+static int serial_netx_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct netx_port *sport = platform_get_drvdata(pdev);
+
+ if (sport)
+ uart_suspend_port(&netx_reg, &sport->port);
+
+ return 0;
+}
+
+static int serial_netx_resume(struct platform_device *pdev)
+{
+ struct netx_port *sport = platform_get_drvdata(pdev);
+
+ if (sport)
+ uart_resume_port(&netx_reg, &sport->port);
+
+ return 0;
+}
+
+static int serial_netx_probe(struct platform_device *pdev)
+{
+ struct uart_port *port = &netx_ports[pdev->id].port;
+
+ dev_info(&pdev->dev, "initialising\n");
+
+ port->dev = &pdev->dev;
+
+ writel(1, port->membase + UART_RXFIFO_IRQLEVEL);
+ uart_add_one_port(&netx_reg, &netx_ports[pdev->id].port);
+ platform_set_drvdata(pdev, &netx_ports[pdev->id]);
+
+ return 0;
+}
+
+static int serial_netx_remove(struct platform_device *pdev)
+{
+ struct netx_port *sport = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (sport)
+ uart_remove_one_port(&netx_reg, &sport->port);
+
+ return 0;
+}
+
+static struct platform_driver serial_netx_driver = {
+ .probe = serial_netx_probe,
+ .remove = serial_netx_remove,
+
+ .suspend = serial_netx_suspend,
+ .resume = serial_netx_resume,
+
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init netx_serial_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Serial: NetX driver\n");
+
+ ret = uart_register_driver(&netx_reg);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&serial_netx_driver);
+ if (ret != 0)
+ uart_unregister_driver(&netx_reg);
+
+ return 0;
+}
+
+static void __exit netx_serial_exit(void)
+{
+ platform_driver_unregister(&serial_netx_driver);
+ uart_unregister_driver(&netx_reg);
+}
+
+module_init(netx_serial_init);
+module_exit(netx_serial_exit);
+
+MODULE_AUTHOR("Sascha Hauer");
+MODULE_DESCRIPTION("NetX serial port driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/tty/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c
new file mode 100644
index 00000000..de173671
--- /dev/null
+++ b/drivers/tty/serial/nwpserial.c
@@ -0,0 +1,477 @@
+/*
+ * Serial Port driver for a NWP uart device
+ *
+ * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/serial.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/irqreturn.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/nwpserial.h>
+#include <asm/prom.h>
+#include <asm/dcr.h>
+
+#define NWPSERIAL_NR 2
+
+#define NWPSERIAL_STATUS_RXVALID 0x1
+#define NWPSERIAL_STATUS_TXFULL 0x2
+
+struct nwpserial_port {
+ struct uart_port port;
+ dcr_host_t dcr_host;
+ unsigned int ier;
+ unsigned int mcr;
+};
+
+static DEFINE_MUTEX(nwpserial_mutex);
+static struct nwpserial_port nwpserial_ports[NWPSERIAL_NR];
+
+static void wait_for_bits(struct nwpserial_port *up, int bits)
+{
+ unsigned int status, tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ do {
+ status = dcr_read(up->dcr_host, UART_LSR);
+
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ } while ((status & bits) != bits);
+}
+
+#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
+static void nwpserial_console_putchar(struct uart_port *port, int c)
+{
+ struct nwpserial_port *up;
+ up = container_of(port, struct nwpserial_port, port);
+ /* check if tx buffer is full */
+ wait_for_bits(up, UART_LSR_THRE);
+ dcr_write(up->dcr_host, UART_TX, c);
+ up->port.icount.tx++;
+}
+
+static void
+nwpserial_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct nwpserial_port *up = &nwpserial_ports[co->index];
+ unsigned long flags;
+ int locked = 1;
+
+ if (oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /* save and disable interrupt */
+ up->ier = dcr_read(up->dcr_host, UART_IER);
+ dcr_write(up->dcr_host, UART_IER, up->ier & ~UART_IER_RDI);
+
+ uart_console_write(&up->port, s, count, nwpserial_console_putchar);
+
+ /* wait for transmitter to become empty */
+ while ((dcr_read(up->dcr_host, UART_LSR) & UART_LSR_THRE) == 0)
+ cpu_relax();
+
+ /* restore interrupt state */
+ dcr_write(up->dcr_host, UART_IER, up->ier);
+
+ if (locked)
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static struct uart_driver nwpserial_reg;
+static struct console nwpserial_console = {
+ .name = "ttySQ",
+ .write = nwpserial_console_write,
+ .device = uart_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &nwpserial_reg,
+};
+#define NWPSERIAL_CONSOLE (&nwpserial_console)
+#else
+#define NWPSERIAL_CONSOLE NULL
+#endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */
+
+/**************************************************************************/
+
+static int nwpserial_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void nwpserial_release_port(struct uart_port *port)
+{
+ /* N/A */
+}
+
+static void nwpserial_config_port(struct uart_port *port, int flags)
+{
+ port->type = PORT_NWPSERIAL;
+}
+
+static irqreturn_t nwpserial_interrupt(int irq, void *dev_id)
+{
+ struct nwpserial_port *up = dev_id;
+ struct tty_struct *tty = up->port.state->port.tty;
+ irqreturn_t ret;
+ unsigned int iir;
+ unsigned char ch;
+
+ spin_lock(&up->port.lock);
+
+ /* check if the uart was the interrupt source. */
+ iir = dcr_read(up->dcr_host, UART_IIR);
+ if (!iir) {
+ ret = IRQ_NONE;
+ goto out;
+ }
+
+ do {
+ up->port.icount.rx++;
+ ch = dcr_read(up->dcr_host, UART_RX);
+ if (up->port.ignore_status_mask != NWPSERIAL_STATUS_RXVALID)
+ tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ } while (dcr_read(up->dcr_host, UART_LSR) & UART_LSR_DR);
+
+ tty_flip_buffer_push(tty);
+ ret = IRQ_HANDLED;
+
+ /* clear interrupt */
+ dcr_write(up->dcr_host, UART_IIR, 1);
+out:
+ spin_unlock(&up->port.lock);
+ return ret;
+}
+
+static int nwpserial_startup(struct uart_port *port)
+{
+ struct nwpserial_port *up;
+ int err;
+
+ up = container_of(port, struct nwpserial_port, port);
+
+ /* disable flow control by default */
+ up->mcr = dcr_read(up->dcr_host, UART_MCR) & ~UART_MCR_AFE;
+ dcr_write(up->dcr_host, UART_MCR, up->mcr);
+
+ /* register interrupt handler */
+ err = request_irq(up->port.irq, nwpserial_interrupt,
+ IRQF_SHARED, "nwpserial", up);
+ if (err)
+ return err;
+
+ /* enable interrupts */
+ up->ier = UART_IER_RDI;
+ dcr_write(up->dcr_host, UART_IER, up->ier);
+
+ /* enable receiving */
+ up->port.ignore_status_mask &= ~NWPSERIAL_STATUS_RXVALID;
+
+ return 0;
+}
+
+static void nwpserial_shutdown(struct uart_port *port)
+{
+ struct nwpserial_port *up;
+ up = container_of(port, struct nwpserial_port, port);
+
+ /* disable receiving */
+ up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID;
+
+ /* disable interrupts from this port */
+ up->ier = 0;
+ dcr_write(up->dcr_host, UART_IER, up->ier);
+
+ /* free irq */
+ free_irq(up->port.irq, port);
+}
+
+static int nwpserial_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ return -EINVAL;
+}
+
+static const char *nwpserial_type(struct uart_port *port)
+{
+ return port->type == PORT_NWPSERIAL ? "nwpserial" : NULL;
+}
+
+static void nwpserial_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old)
+{
+ struct nwpserial_port *up;
+ up = container_of(port, struct nwpserial_port, port);
+
+ up->port.read_status_mask = NWPSERIAL_STATUS_RXVALID
+ | NWPSERIAL_STATUS_TXFULL;
+
+ up->port.ignore_status_mask = 0;
+ /* ignore all characters if CREAD is not set */
+ if ((termios->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID;
+
+ /* Copy back the old hardware settings */
+ if (old)
+ tty_termios_copy_hw(termios, old);
+}
+
+static void nwpserial_break_ctl(struct uart_port *port, int ctl)
+{
+ /* N/A */
+}
+
+static void nwpserial_enable_ms(struct uart_port *port)
+{
+ /* N/A */
+}
+
+static void nwpserial_stop_rx(struct uart_port *port)
+{
+ struct nwpserial_port *up;
+ up = container_of(port, struct nwpserial_port, port);
+ /* don't forward any more data (like !CREAD) */
+ up->port.ignore_status_mask = NWPSERIAL_STATUS_RXVALID;
+}
+
+static void nwpserial_putchar(struct nwpserial_port *up, unsigned char c)
+{
+ /* check if tx buffer is full */
+ wait_for_bits(up, UART_LSR_THRE);
+ dcr_write(up->dcr_host, UART_TX, c);
+ up->port.icount.tx++;
+}
+
+static void nwpserial_start_tx(struct uart_port *port)
+{
+ struct nwpserial_port *up;
+ struct circ_buf *xmit;
+ up = container_of(port, struct nwpserial_port, port);
+ xmit = &up->port.state->xmit;
+
+ if (port->x_char) {
+ nwpserial_putchar(up, up->port.x_char);
+ port->x_char = 0;
+ }
+
+ while (!(uart_circ_empty(xmit) || uart_tx_stopped(&up->port))) {
+ nwpserial_putchar(up, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
+ }
+}
+
+static unsigned int nwpserial_get_mctrl(struct uart_port *port)
+{
+ return 0;
+}
+
+static void nwpserial_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /* N/A */
+}
+
+static void nwpserial_stop_tx(struct uart_port *port)
+{
+ /* N/A */
+}
+
+static unsigned int nwpserial_tx_empty(struct uart_port *port)
+{
+ struct nwpserial_port *up;
+ unsigned long flags;
+ int ret;
+ up = container_of(port, struct nwpserial_port, port);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ ret = dcr_read(up->dcr_host, UART_LSR);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return ret & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+}
+
+static struct uart_ops nwpserial_pops = {
+ .tx_empty = nwpserial_tx_empty,
+ .set_mctrl = nwpserial_set_mctrl,
+ .get_mctrl = nwpserial_get_mctrl,
+ .stop_tx = nwpserial_stop_tx,
+ .start_tx = nwpserial_start_tx,
+ .stop_rx = nwpserial_stop_rx,
+ .enable_ms = nwpserial_enable_ms,
+ .break_ctl = nwpserial_break_ctl,
+ .startup = nwpserial_startup,
+ .shutdown = nwpserial_shutdown,
+ .set_termios = nwpserial_set_termios,
+ .type = nwpserial_type,
+ .release_port = nwpserial_release_port,
+ .request_port = nwpserial_request_port,
+ .config_port = nwpserial_config_port,
+ .verify_port = nwpserial_verify_port,
+};
+
+static struct uart_driver nwpserial_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "nwpserial",
+ .dev_name = "ttySQ",
+ .major = TTY_MAJOR,
+ .minor = 68,
+ .nr = NWPSERIAL_NR,
+ .cons = NWPSERIAL_CONSOLE,
+};
+
+int nwpserial_register_port(struct uart_port *port)
+{
+ struct nwpserial_port *up = NULL;
+ int ret = -1;
+ int i;
+ static int first = 1;
+ int dcr_len;
+ int dcr_base;
+ struct device_node *dn;
+
+ mutex_lock(&nwpserial_mutex);
+
+ dn = port->dev->of_node;
+ if (dn == NULL)
+ goto out;
+
+ /* get dcr base. */
+ dcr_base = dcr_resource_start(dn, 0);
+
+ /* find matching entry */
+ for (i = 0; i < NWPSERIAL_NR; i++)
+ if (nwpserial_ports[i].port.iobase == dcr_base) {
+ up = &nwpserial_ports[i];
+ break;
+ }
+
+ /* we didn't find a mtching entry, search for a free port */
+ if (up == NULL)
+ for (i = 0; i < NWPSERIAL_NR; i++)
+ if (nwpserial_ports[i].port.type == PORT_UNKNOWN &&
+ nwpserial_ports[i].port.iobase == 0) {
+ up = &nwpserial_ports[i];
+ break;
+ }
+
+ if (up == NULL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (first)
+ uart_register_driver(&nwpserial_reg);
+ first = 0;
+
+ up->port.membase = port->membase;
+ up->port.irq = port->irq;
+ up->port.uartclk = port->uartclk;
+ up->port.fifosize = port->fifosize;
+ up->port.regshift = port->regshift;
+ up->port.iotype = port->iotype;
+ up->port.flags = port->flags;
+ up->port.mapbase = port->mapbase;
+ up->port.private_data = port->private_data;
+
+ if (port->dev)
+ up->port.dev = port->dev;
+
+ if (up->port.iobase != dcr_base) {
+ up->port.ops = &nwpserial_pops;
+ up->port.fifosize = 16;
+
+ spin_lock_init(&up->port.lock);
+
+ up->port.iobase = dcr_base;
+ dcr_len = dcr_resource_len(dn, 0);
+
+ up->dcr_host = dcr_map(dn, dcr_base, dcr_len);
+ if (!DCR_MAP_OK(up->dcr_host)) {
+ printk(KERN_ERR "Cannot map DCR resources for NWPSERIAL");
+ goto out;
+ }
+ }
+
+ ret = uart_add_one_port(&nwpserial_reg, &up->port);
+ if (ret == 0)
+ ret = up->port.line;
+
+out:
+ mutex_unlock(&nwpserial_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(nwpserial_register_port);
+
+void nwpserial_unregister_port(int line)
+{
+ struct nwpserial_port *up = &nwpserial_ports[line];
+ mutex_lock(&nwpserial_mutex);
+ uart_remove_one_port(&nwpserial_reg, &up->port);
+
+ up->port.type = PORT_UNKNOWN;
+
+ mutex_unlock(&nwpserial_mutex);
+}
+EXPORT_SYMBOL(nwpserial_unregister_port);
+
+#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
+static int __init nwpserial_console_init(void)
+{
+ struct nwpserial_port *up = NULL;
+ struct device_node *dn;
+ const char *name;
+ int dcr_base;
+ int dcr_len;
+ int i;
+
+ /* search for a free port */
+ for (i = 0; i < NWPSERIAL_NR; i++)
+ if (nwpserial_ports[i].port.type == PORT_UNKNOWN) {
+ up = &nwpserial_ports[i];
+ break;
+ }
+
+ if (up == NULL)
+ return -1;
+
+ name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+ if (name == NULL)
+ return -1;
+
+ dn = of_find_node_by_path(name);
+ if (!dn)
+ return -1;
+
+ spin_lock_init(&up->port.lock);
+ up->port.ops = &nwpserial_pops;
+ up->port.type = PORT_NWPSERIAL;
+ up->port.fifosize = 16;
+
+ dcr_base = dcr_resource_start(dn, 0);
+ dcr_len = dcr_resource_len(dn, 0);
+ up->port.iobase = dcr_base;
+
+ up->dcr_host = dcr_map(dn, dcr_base, dcr_len);
+ if (!DCR_MAP_OK(up->dcr_host)) {
+ printk("Cannot map DCR resources for SERIAL");
+ return -1;
+ }
+ register_console(&nwpserial_console);
+ return 0;
+}
+console_initcall(nwpserial_console_init);
+#endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
new file mode 100644
index 00000000..c911b241
--- /dev/null
+++ b/drivers/tty/serial/of_serial.c
@@ -0,0 +1,206 @@
+/*
+ * Serial Port driver for Open Firmware platform devices
+ *
+ * Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/nwpserial.h>
+
+struct of_serial_info {
+ int type;
+ int line;
+};
+
+/*
+ * Fill a struct uart_port for a given device node
+ */
+static int __devinit of_platform_serial_setup(struct platform_device *ofdev,
+ int type, struct uart_port *port)
+{
+ struct resource resource;
+ struct device_node *np = ofdev->dev.of_node;
+ const __be32 *clk, *spd;
+ const __be32 *prop;
+ int ret, prop_size;
+
+ memset(port, 0, sizeof *port);
+ spd = of_get_property(np, "current-speed", NULL);
+ clk = of_get_property(np, "clock-frequency", NULL);
+ if (!clk) {
+ dev_warn(&ofdev->dev, "no clock-frequency property set\n");
+ return -ENODEV;
+ }
+
+ ret = of_address_to_resource(np, 0, &resource);
+ if (ret) {
+ dev_warn(&ofdev->dev, "invalid address\n");
+ return ret;
+ }
+
+ spin_lock_init(&port->lock);
+ port->mapbase = resource.start;
+
+ /* Check for shifted address mapping */
+ prop = of_get_property(np, "reg-offset", &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ port->mapbase += be32_to_cpup(prop);
+
+ /* Check for registers offset within the devices address range */
+ prop = of_get_property(np, "reg-shift", &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ port->regshift = be32_to_cpup(prop);
+
+ port->irq = irq_of_parse_and_map(np, 0);
+ port->iotype = UPIO_MEM;
+ port->type = type;
+ port->uartclk = be32_to_cpup(clk);
+ port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP
+ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
+ port->dev = &ofdev->dev;
+ /* If current-speed was set, then try not to change it. */
+ if (spd)
+ port->custom_divisor = be32_to_cpup(clk) / (16 * (be32_to_cpup(spd)));
+
+ return 0;
+}
+
+/*
+ * Try to register a serial port
+ */
+static struct of_device_id of_platform_serial_table[];
+static int __devinit of_platform_serial_probe(struct platform_device *ofdev)
+{
+ const struct of_device_id *match;
+ struct of_serial_info *info;
+ struct uart_port port;
+ int port_type;
+ int ret;
+
+ match = of_match_device(of_platform_serial_table, &ofdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL))
+ return -EBUSY;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL)
+ return -ENOMEM;
+
+ port_type = (unsigned long)match->data;
+ ret = of_platform_serial_setup(ofdev, port_type, &port);
+ if (ret)
+ goto out;
+
+ switch (port_type) {
+#ifdef CONFIG_SERIAL_8250
+ case PORT_8250 ... PORT_MAX_8250:
+ ret = serial8250_register_port(&port);
+ break;
+#endif
+#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
+ case PORT_NWPSERIAL:
+ ret = nwpserial_register_port(&port);
+ break;
+#endif
+ default:
+ /* need to add code for these */
+ case PORT_UNKNOWN:
+ dev_info(&ofdev->dev, "Unknown serial port found, ignored\n");
+ ret = -ENODEV;
+ break;
+ }
+ if (ret < 0)
+ goto out;
+
+ info->type = port_type;
+ info->line = ret;
+ dev_set_drvdata(&ofdev->dev, info);
+ return 0;
+out:
+ kfree(info);
+ irq_dispose_mapping(port.irq);
+ return ret;
+}
+
+/*
+ * Release a line
+ */
+static int of_platform_serial_remove(struct platform_device *ofdev)
+{
+ struct of_serial_info *info = dev_get_drvdata(&ofdev->dev);
+ switch (info->type) {
+#ifdef CONFIG_SERIAL_8250
+ case PORT_8250 ... PORT_MAX_8250:
+ serial8250_unregister_port(info->line);
+ break;
+#endif
+#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
+ case PORT_NWPSERIAL:
+ nwpserial_unregister_port(info->line);
+ break;
+#endif
+ default:
+ /* need to add code for these */
+ break;
+ }
+ kfree(info);
+ return 0;
+}
+
+/*
+ * A few common types, add more as needed.
+ */
+static struct of_device_id __devinitdata of_platform_serial_table[] = {
+ { .compatible = "ns8250", .data = (void *)PORT_8250, },
+ { .compatible = "ns16450", .data = (void *)PORT_16450, },
+ { .compatible = "ns16550a", .data = (void *)PORT_16550A, },
+ { .compatible = "ns16550", .data = (void *)PORT_16550, },
+ { .compatible = "ns16750", .data = (void *)PORT_16750, },
+ { .compatible = "ns16850", .data = (void *)PORT_16850, },
+#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
+ { .compatible = "ibm,qpace-nwp-serial",
+ .data = (void *)PORT_NWPSERIAL, },
+#endif
+ { .type = "serial", .data = (void *)PORT_UNKNOWN, },
+ { /* end of list */ },
+};
+
+static struct platform_driver of_platform_serial_driver = {
+ .driver = {
+ .name = "of_serial",
+ .owner = THIS_MODULE,
+ .of_match_table = of_platform_serial_table,
+ },
+ .probe = of_platform_serial_probe,
+ .remove = of_platform_serial_remove,
+};
+
+static int __init of_platform_serial_init(void)
+{
+ return platform_driver_register(&of_platform_serial_driver);
+}
+module_init(of_platform_serial_init);
+
+static void __exit of_platform_serial_exit(void)
+{
+ return platform_driver_unregister(&of_platform_serial_driver);
+};
+module_exit(of_platform_serial_exit);
+
+MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Serial Port driver for Open Firmware platform devices");
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
new file mode 100644
index 00000000..6d3ec14b
--- /dev/null
+++ b/drivers/tty/serial/omap-serial.c
@@ -0,0 +1,1361 @@
+/*
+ * Driver for OMAP-UART controller.
+ * Based on drivers/serial/8250.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Authors:
+ * Govindraj R <govindraj.raja@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Note: This driver is made separate from 8250 driver as we cannot
+ * over load 8250 driver with omap platform specific configuration for
+ * features like DMA, it makes easier to implement features like DMA and
+ * hardware flow control and software flow control configuration with
+ * this driver as required for the omap-platform.
+ */
+
+#if defined(CONFIG_SERIAL_OMAP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/serial_reg.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/serial_core.h>
+#include <linux/irq.h>
+
+#include <plat/dma.h>
+#include <plat/dmtimer.h>
+#include <plat/omap-serial.h>
+
+static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
+
+/* Forward declaration of functions */
+static void uart_tx_dma_callback(int lch, u16 ch_status, void *data);
+static void serial_omap_rx_timeout(unsigned long uart_no);
+static int serial_omap_start_rxdma(struct uart_omap_port *up);
+
+static inline unsigned int serial_in(struct uart_omap_port *up, int offset)
+{
+ offset <<= up->port.regshift;
+ return readw(up->port.membase + offset);
+}
+
+static inline void serial_out(struct uart_omap_port *up, int offset, int value)
+{
+ offset <<= up->port.regshift;
+ writew(value, up->port.membase + offset);
+}
+
+static inline void serial_omap_clear_fifos(struct uart_omap_port *up)
+{
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ serial_out(up, UART_FCR, 0);
+}
+
+/*
+ * serial_omap_get_divisor - calculate divisor value
+ * @port: uart port info
+ * @baud: baudrate for which divisor needs to be calculated.
+ *
+ * We have written our own function to get the divisor so as to support
+ * 13x mode. 3Mbps Baudrate as an different divisor.
+ * Reference OMAP TRM Chapter 17:
+ * Table 17-1. UART Mode Baud Rates, Divisor Values, and Error Rates
+ * referring to oversampling - divisor value
+ * baudrate 460,800 to 3,686,400 all have divisor 13
+ * except 3,000,000 which has divisor value 16
+ */
+static unsigned int
+serial_omap_get_divisor(struct uart_port *port, unsigned int baud)
+{
+ unsigned int divisor;
+
+ if (baud > OMAP_MODE13X_SPEED && baud != 3000000)
+ divisor = 13;
+ else
+ divisor = 16;
+ return port->uartclk/(baud * divisor);
+}
+
+static void serial_omap_stop_rxdma(struct uart_omap_port *up)
+{
+ if (up->uart_dma.rx_dma_used) {
+ del_timer(&up->uart_dma.rx_timer);
+ omap_stop_dma(up->uart_dma.rx_dma_channel);
+ omap_free_dma(up->uart_dma.rx_dma_channel);
+ up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ up->uart_dma.rx_dma_used = false;
+ }
+}
+
+static void serial_omap_enable_ms(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->pdev->id);
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+}
+
+static void serial_omap_stop_tx(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ if (up->use_dma &&
+ up->uart_dma.tx_dma_channel != OMAP_UART_DMA_CH_FREE) {
+ /*
+ * Check if dma is still active. If yes do nothing,
+ * return. Else stop dma
+ */
+ if (omap_get_dma_active_status(up->uart_dma.tx_dma_channel))
+ return;
+ omap_stop_dma(up->uart_dma.tx_dma_channel);
+ omap_free_dma(up->uart_dma.tx_dma_channel);
+ up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ }
+
+ if (up->ier & UART_IER_THRI) {
+ up->ier &= ~UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+static void serial_omap_stop_rx(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ if (up->use_dma)
+ serial_omap_stop_rxdma(up);
+ up->ier &= ~UART_IER_RLSI;
+ up->port.read_status_mask &= ~UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+}
+
+static inline void receive_chars(struct uart_omap_port *up, int *status)
+{
+ struct tty_struct *tty = up->port.state->port.tty;
+ unsigned int flag;
+ unsigned char ch, lsr = *status;
+ int max_count = 256;
+
+ do {
+ if (likely(lsr & UART_LSR_DR))
+ ch = serial_in(up, UART_RX);
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+ if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
+ /*
+ * For statistics only
+ */
+ if (lsr & UART_LSR_BI) {
+ lsr &= ~(UART_LSR_FE | UART_LSR_PE);
+ up->port.icount.brk++;
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(&up->port))
+ goto ignore_char;
+ } else if (lsr & UART_LSR_PE) {
+ up->port.icount.parity++;
+ } else if (lsr & UART_LSR_FE) {
+ up->port.icount.frame++;
+ }
+
+ if (lsr & UART_LSR_OE)
+ up->port.icount.overrun++;
+
+ /*
+ * Mask off conditions which should be ignored.
+ */
+ lsr &= up->port.read_status_mask;
+
+#ifdef CONFIG_SERIAL_OMAP_CONSOLE
+ if (up->port.line == up->port.cons->index) {
+ /* Recover the break flag from console xmit */
+ lsr |= up->lsr_break_flag;
+ }
+#endif
+ if (lsr & UART_LSR_BI)
+ flag = TTY_BREAK;
+ else if (lsr & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (lsr & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&up->port, ch))
+ goto ignore_char;
+ uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag);
+ignore_char:
+ lsr = serial_in(up, UART_LSR);
+ } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
+ spin_unlock(&up->port.lock);
+ tty_flip_buffer_push(tty);
+ spin_lock(&up->port.lock);
+}
+
+static void transmit_chars(struct uart_omap_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int count;
+
+ if (up->port.x_char) {
+ serial_out(up, UART_TX, up->port.x_char);
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+ serial_omap_stop_tx(&up->port);
+ return;
+ }
+ count = up->port.fifosize / 4;
+ do {
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (uart_circ_empty(xmit))
+ serial_omap_stop_tx(&up->port);
+}
+
+static inline void serial_omap_enable_ier_thri(struct uart_omap_port *up)
+{
+ if (!(up->ier & UART_IER_THRI)) {
+ up->ier |= UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+static void serial_omap_start_tx(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ struct circ_buf *xmit;
+ unsigned int start;
+ int ret = 0;
+
+ if (!up->use_dma) {
+ serial_omap_enable_ier_thri(up);
+ return;
+ }
+
+ if (up->uart_dma.tx_dma_used)
+ return;
+
+ xmit = &up->port.state->xmit;
+
+ if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) {
+ ret = omap_request_dma(up->uart_dma.uart_dma_tx,
+ "UART Tx DMA",
+ (void *)uart_tx_dma_callback, up,
+ &(up->uart_dma.tx_dma_channel));
+
+ if (ret < 0) {
+ serial_omap_enable_ier_thri(up);
+ return;
+ }
+ }
+ spin_lock(&(up->uart_dma.tx_lock));
+ up->uart_dma.tx_dma_used = true;
+ spin_unlock(&(up->uart_dma.tx_lock));
+
+ start = up->uart_dma.tx_buf_dma_phys +
+ (xmit->tail & (UART_XMIT_SIZE - 1));
+
+ up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit);
+ /*
+ * It is a circular buffer. See if the buffer has wounded back.
+ * If yes it will have to be transferred in two separate dma
+ * transfers
+ */
+ if (start + up->uart_dma.tx_buf_size >=
+ up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE)
+ up->uart_dma.tx_buf_size =
+ (up->uart_dma.tx_buf_dma_phys +
+ UART_XMIT_SIZE) - start;
+
+ omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ up->uart_dma.uart_base, 0, 0);
+ omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0,
+ OMAP_DMA_AMODE_POST_INC, start, 0, 0);
+ omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel,
+ OMAP_DMA_DATA_TYPE_S8,
+ up->uart_dma.tx_buf_size, 1,
+ OMAP_DMA_SYNC_ELEMENT,
+ up->uart_dma.uart_dma_tx, 0);
+ /* FIXME: Cache maintenance needed here? */
+ omap_start_dma(up->uart_dma.tx_dma_channel);
+}
+
+static unsigned int check_modem_status(struct uart_omap_port *up)
+{
+ unsigned int status;
+
+ status = serial_in(up, UART_MSR);
+ status |= up->msr_saved_flags;
+ up->msr_saved_flags = 0;
+ if ((status & UART_MSR_ANY_DELTA) == 0)
+ return status;
+
+ if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
+ up->port.state != NULL) {
+ if (status & UART_MSR_TERI)
+ up->port.icount.rng++;
+ if (status & UART_MSR_DDSR)
+ up->port.icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ uart_handle_dcd_change
+ (&up->port, status & UART_MSR_DCD);
+ if (status & UART_MSR_DCTS)
+ uart_handle_cts_change
+ (&up->port, status & UART_MSR_CTS);
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+ }
+
+ return status;
+}
+
+/**
+ * serial_omap_irq() - This handles the interrupt from one port
+ * @irq: uart port irq number
+ * @dev_id: uart port info
+ */
+static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
+{
+ struct uart_omap_port *up = dev_id;
+ unsigned int iir, lsr;
+ unsigned long flags;
+
+ iir = serial_in(up, UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ lsr = serial_in(up, UART_LSR);
+ if (iir & UART_IIR_RLSI) {
+ if (!up->use_dma) {
+ if (lsr & UART_LSR_DR)
+ receive_chars(up, &lsr);
+ } else {
+ up->ier &= ~(UART_IER_RDI | UART_IER_RLSI);
+ serial_out(up, UART_IER, up->ier);
+ if ((serial_omap_start_rxdma(up) != 0) &&
+ (lsr & UART_LSR_DR))
+ receive_chars(up, &lsr);
+ }
+ }
+
+ check_modem_status(up);
+ if ((lsr & UART_LSR_THRE) && (iir & UART_IIR_THRI))
+ transmit_chars(up);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ up->port_activity = jiffies;
+ return IRQ_HANDLED;
+}
+
+static unsigned int serial_omap_tx_empty(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+ unsigned int ret = 0;
+
+ dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->pdev->id);
+ spin_lock_irqsave(&up->port.lock, flags);
+ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return ret;
+}
+
+static unsigned int serial_omap_get_mctrl(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char status;
+ unsigned int ret = 0;
+
+ status = check_modem_status(up);
+ dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->pdev->id);
+
+ if (status & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+ if (status & UART_MSR_RI)
+ ret |= TIOCM_RNG;
+ if (status & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
+ if (status & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char mcr = 0;
+
+ dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->pdev->id);
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ mcr |= up->mcr;
+ serial_out(up, UART_MCR, mcr);
+}
+
+static void serial_omap_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+
+ dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->pdev->id);
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_out(up, UART_LCR, up->lcr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static int serial_omap_startup(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+ int retval;
+
+ /*
+ * Allocate the IRQ
+ */
+ retval = request_irq(up->port.irq, serial_omap_irq, up->port.irqflags,
+ up->name, up);
+ if (retval)
+ return retval;
+
+ dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->pdev->id);
+
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in set_termios())
+ */
+ serial_omap_clear_fifos(up);
+ /* For Hardware flow control */
+ serial_out(up, UART_MCR, UART_MCR_RTS);
+
+ /*
+ * Clear the interrupt registers.
+ */
+ (void) serial_in(up, UART_LSR);
+ if (serial_in(up, UART_LSR) & UART_LSR_DR)
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
+
+ /*
+ * Now, initialize the UART
+ */
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
+ spin_lock_irqsave(&up->port.lock, flags);
+ /*
+ * Most PC uarts need OUT2 raised to enable interrupts.
+ */
+ up->port.mctrl |= TIOCM_OUT2;
+ serial_omap_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ up->msr_saved_flags = 0;
+ if (up->use_dma) {
+ free_page((unsigned long)up->port.state->xmit.buf);
+ up->port.state->xmit.buf = dma_alloc_coherent(NULL,
+ UART_XMIT_SIZE,
+ (dma_addr_t *)&(up->uart_dma.tx_buf_dma_phys),
+ 0);
+ init_timer(&(up->uart_dma.rx_timer));
+ up->uart_dma.rx_timer.function = serial_omap_rx_timeout;
+ up->uart_dma.rx_timer.data = up->pdev->id;
+ /* Currently the buffer size is 4KB. Can increase it */
+ up->uart_dma.rx_buf = dma_alloc_coherent(NULL,
+ up->uart_dma.rx_buf_size,
+ (dma_addr_t *)&(up->uart_dma.rx_buf_dma_phys), 0);
+ }
+ /*
+ * Finally, enable interrupts. Note: Modem status interrupts
+ * are set via set_termios(), which will be occurring imminently
+ * anyway, so we don't enable them here.
+ */
+ up->ier = UART_IER_RLSI | UART_IER_RDI;
+ serial_out(up, UART_IER, up->ier);
+
+ /* Enable module level wake up */
+ serial_out(up, UART_OMAP_WER, OMAP_UART_WER_MOD_WKUP);
+
+ up->port_activity = jiffies;
+ return 0;
+}
+
+static void serial_omap_shutdown(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned long flags = 0;
+
+ dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->pdev->id);
+ /*
+ * Disable interrupts from this port
+ */
+ up->ier = 0;
+ serial_out(up, UART_IER, 0);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ up->port.mctrl &= ~TIOCM_OUT2;
+ serial_omap_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /*
+ * Disable break condition and FIFOs
+ */
+ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
+ serial_omap_clear_fifos(up);
+
+ /*
+ * Read data port to reset things, and then free the irq
+ */
+ if (serial_in(up, UART_LSR) & UART_LSR_DR)
+ (void) serial_in(up, UART_RX);
+ if (up->use_dma) {
+ dma_free_coherent(up->port.dev,
+ UART_XMIT_SIZE, up->port.state->xmit.buf,
+ up->uart_dma.tx_buf_dma_phys);
+ up->port.state->xmit.buf = NULL;
+ serial_omap_stop_rx(port);
+ dma_free_coherent(up->port.dev,
+ up->uart_dma.rx_buf_size, up->uart_dma.rx_buf,
+ up->uart_dma.rx_buf_dma_phys);
+ up->uart_dma.rx_buf = NULL;
+ }
+ free_irq(up->port.irq, up);
+}
+
+static inline void
+serial_omap_configure_xonxoff
+ (struct uart_omap_port *up, struct ktermios *termios)
+{
+ unsigned char efr = 0;
+
+ up->lcr = serial_in(up, UART_LCR);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ up->efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, up->efr & ~UART_EFR_ECB);
+
+ serial_out(up, UART_XON1, termios->c_cc[VSTART]);
+ serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]);
+
+ /* clear SW control mode bits */
+ efr = up->efr;
+ efr &= OMAP_UART_SW_CLR;
+
+ /*
+ * IXON Flag:
+ * Enable XON/XOFF flow control on output.
+ * Transmit XON1, XOFF1
+ */
+ if (termios->c_iflag & IXON)
+ efr |= OMAP_UART_SW_TX;
+
+ /*
+ * IXOFF Flag:
+ * Enable XON/XOFF flow control on input.
+ * Receiver compares XON1, XOFF1.
+ */
+ if (termios->c_iflag & IXOFF)
+ efr |= OMAP_UART_SW_RX;
+
+ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+
+ up->mcr = serial_in(up, UART_MCR);
+
+ /*
+ * IXANY Flag:
+ * Enable any character to restart output.
+ * Operation resumes after receiving any
+ * character after recognition of the XOFF character
+ */
+ if (termios->c_iflag & IXANY)
+ up->mcr |= UART_MCR_XONANY;
+
+ serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
+ /* Enable special char function UARTi.EFR_REG[5] and
+ * load the new software flow control mode IXON or IXOFF
+ * and restore the UARTi.EFR_REG[4] ENHANCED_EN value.
+ */
+ serial_out(up, UART_EFR, efr | UART_EFR_SCD);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+
+ serial_out(up, UART_MCR, up->mcr & ~UART_MCR_TCRTLR);
+ serial_out(up, UART_LCR, up->lcr);
+}
+
+static void
+serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char cval = 0;
+ unsigned char efr = 0;
+ unsigned long flags = 0;
+ unsigned int baud, quot;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ cval = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ cval = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ cval |= UART_LCR_STOP;
+ if (termios->c_cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(termios->c_cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13);
+ quot = serial_omap_get_divisor(port, baud);
+
+ up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 |
+ UART_FCR_ENABLE_FIFO;
+ if (up->use_dma)
+ up->fcr |= UART_FCR_DMA_SELECT;
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characters to ignore
+ */
+ up->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ up->port.ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * Modem status interrupts
+ */
+ up->ier &= ~UART_IER_MSI;
+ if (UART_ENABLE_MS(&up->port, termios->c_cflag))
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+ serial_out(up, UART_LCR, cval); /* reset DLAB */
+
+ /* FIFOs and DMA Settings */
+
+ /* FCR can be changed only when the
+ * baud clock is not running
+ * DLL_REG and DLH_REG set to 0.
+ */
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ serial_out(up, UART_DLL, 0);
+ serial_out(up, UART_DLM, 0);
+ serial_out(up, UART_LCR, 0);
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ up->efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ up->mcr = serial_in(up, UART_MCR);
+ serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
+ /* FIFO ENABLE, DMA MODE */
+ serial_out(up, UART_FCR, up->fcr);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ if (up->use_dma) {
+ serial_out(up, UART_TI752_TLR, 0);
+ serial_out(up, UART_OMAP_SCR,
+ (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8));
+ }
+
+ serial_out(up, UART_EFR, up->efr);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ serial_out(up, UART_MCR, up->mcr);
+
+ /* Protocol, Baud Rate, and Interrupt Settings */
+
+ serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ up->efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
+
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_IER, 0);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
+ serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
+
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_IER, up->ier);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ serial_out(up, UART_EFR, up->efr);
+ serial_out(up, UART_LCR, cval);
+
+ if (baud > 230400 && baud != 3000000)
+ serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_13X_MODE);
+ else
+ serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
+
+ /* Hardware Flow Control Configuration */
+
+ if (termios->c_cflag & CRTSCTS) {
+ efr |= (UART_EFR_CTS | UART_EFR_RTS);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+
+ up->mcr = serial_in(up, UART_MCR);
+ serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ up->efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
+
+ serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
+ serial_out(up, UART_EFR, efr); /* Enable AUTORTS and AUTOCTS */
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ serial_out(up, UART_MCR, up->mcr | UART_MCR_RTS);
+ serial_out(up, UART_LCR, cval);
+ }
+
+ serial_omap_set_mctrl(&up->port, up->port.mctrl);
+ /* Software Flow Control Configuration */
+ serial_omap_configure_xonxoff(up, termios);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
+}
+
+static void
+serial_omap_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned char efr;
+
+ dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->pdev->id);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ efr = serial_in(up, UART_EFR);
+ serial_out(up, UART_EFR, efr | UART_EFR_ECB);
+ serial_out(up, UART_LCR, 0);
+
+ serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(up, UART_EFR, efr);
+ serial_out(up, UART_LCR, 0);
+}
+
+static void serial_omap_release_port(struct uart_port *port)
+{
+ dev_dbg(port->dev, "serial_omap_release_port+\n");
+}
+
+static int serial_omap_request_port(struct uart_port *port)
+{
+ dev_dbg(port->dev, "serial_omap_request_port+\n");
+ return 0;
+}
+
+static void serial_omap_config_port(struct uart_port *port, int flags)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ dev_dbg(up->port.dev, "serial_omap_config_port+%d\n",
+ up->pdev->id);
+ up->port.type = PORT_OMAP;
+}
+
+static int
+serial_omap_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ /* we don't want the core code to modify any port params */
+ dev_dbg(port->dev, "serial_omap_verify_port+\n");
+ return -EINVAL;
+}
+
+static const char *
+serial_omap_type(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->pdev->id);
+ return up->name;
+}
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static inline void wait_for_xmitr(struct uart_omap_port *up)
+{
+ unsigned int status, tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ do {
+ status = serial_in(up, UART_LSR);
+
+ if (status & UART_LSR_BI)
+ up->lsr_break_flag = UART_LSR_BI;
+
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ tmout = 1000000;
+ for (tmout = 1000000; tmout; tmout--) {
+ unsigned int msr = serial_in(up, UART_MSR);
+
+ up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
+ if (msr & UART_MSR_CTS)
+ break;
+
+ udelay(1);
+ }
+ }
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+
+static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ wait_for_xmitr(up);
+ serial_out(up, UART_TX, ch);
+}
+
+static int serial_omap_poll_get_char(struct uart_port *port)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+ unsigned int status = serial_in(up, UART_LSR);
+
+ if (!(status & UART_LSR_DR))
+ return NO_POLL_CHAR;
+
+ return serial_in(up, UART_RX);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
+#ifdef CONFIG_SERIAL_OMAP_CONSOLE
+
+static struct uart_omap_port *serial_omap_console_ports[4];
+
+static struct uart_driver serial_omap_reg;
+
+static void serial_omap_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ wait_for_xmitr(up);
+ serial_out(up, UART_TX, ch);
+}
+
+static void
+serial_omap_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_omap_port *up = serial_omap_console_ports[co->index];
+ unsigned long flags;
+ unsigned int ier;
+ int locked = 1;
+
+ local_irq_save(flags);
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock(&up->port.lock);
+ else
+ spin_lock(&up->port.lock);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, 0);
+
+ uart_console_write(&up->port, s, count, serial_omap_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up);
+ serial_out(up, UART_IER, ier);
+ /*
+ * The receive handling will happen properly because the
+ * receive ready bit will still be set; it is not cleared
+ * on read. However, modem control will not, we must
+ * call it if we have saved something in the saved flags
+ * while processing with interrupts off.
+ */
+ if (up->msr_saved_flags)
+ check_modem_status(up);
+
+ if (locked)
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
+}
+
+static int __init
+serial_omap_console_setup(struct console *co, char *options)
+{
+ struct uart_omap_port *up;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (serial_omap_console_ports[co->index] == NULL)
+ return -ENODEV;
+ up = serial_omap_console_ports[co->index];
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
+}
+
+static struct console serial_omap_console = {
+ .name = OMAP_SERIAL_NAME,
+ .write = serial_omap_console_write,
+ .device = uart_console_device,
+ .setup = serial_omap_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &serial_omap_reg,
+};
+
+static void serial_omap_add_console_port(struct uart_omap_port *up)
+{
+ serial_omap_console_ports[up->pdev->id] = up;
+}
+
+#define OMAP_CONSOLE (&serial_omap_console)
+
+#else
+
+#define OMAP_CONSOLE NULL
+
+static inline void serial_omap_add_console_port(struct uart_omap_port *up)
+{}
+
+#endif
+
+static struct uart_ops serial_omap_pops = {
+ .tx_empty = serial_omap_tx_empty,
+ .set_mctrl = serial_omap_set_mctrl,
+ .get_mctrl = serial_omap_get_mctrl,
+ .stop_tx = serial_omap_stop_tx,
+ .start_tx = serial_omap_start_tx,
+ .stop_rx = serial_omap_stop_rx,
+ .enable_ms = serial_omap_enable_ms,
+ .break_ctl = serial_omap_break_ctl,
+ .startup = serial_omap_startup,
+ .shutdown = serial_omap_shutdown,
+ .set_termios = serial_omap_set_termios,
+ .pm = serial_omap_pm,
+ .type = serial_omap_type,
+ .release_port = serial_omap_release_port,
+ .request_port = serial_omap_request_port,
+ .config_port = serial_omap_config_port,
+ .verify_port = serial_omap_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_put_char = serial_omap_poll_put_char,
+ .poll_get_char = serial_omap_poll_get_char,
+#endif
+};
+
+static struct uart_driver serial_omap_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "OMAP-SERIAL",
+ .dev_name = OMAP_SERIAL_NAME,
+ .nr = OMAP_MAX_HSUART_PORTS,
+ .cons = OMAP_CONSOLE,
+};
+
+static int
+serial_omap_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct uart_omap_port *up = platform_get_drvdata(pdev);
+
+ if (up)
+ uart_suspend_port(&serial_omap_reg, &up->port);
+ return 0;
+}
+
+static int serial_omap_resume(struct platform_device *dev)
+{
+ struct uart_omap_port *up = platform_get_drvdata(dev);
+
+ if (up)
+ uart_resume_port(&serial_omap_reg, &up->port);
+ return 0;
+}
+
+static void serial_omap_rx_timeout(unsigned long uart_no)
+{
+ struct uart_omap_port *up = ui[uart_no];
+ unsigned int curr_dma_pos, curr_transmitted_size;
+ int ret = 0;
+
+ curr_dma_pos = omap_get_dma_dst_pos(up->uart_dma.rx_dma_channel);
+ if ((curr_dma_pos == up->uart_dma.prev_rx_dma_pos) ||
+ (curr_dma_pos == 0)) {
+ if (jiffies_to_msecs(jiffies - up->port_activity) <
+ RX_TIMEOUT) {
+ mod_timer(&up->uart_dma.rx_timer, jiffies +
+ usecs_to_jiffies(up->uart_dma.rx_timeout));
+ } else {
+ serial_omap_stop_rxdma(up);
+ up->ier |= (UART_IER_RDI | UART_IER_RLSI);
+ serial_out(up, UART_IER, up->ier);
+ }
+ return;
+ }
+
+ curr_transmitted_size = curr_dma_pos -
+ up->uart_dma.prev_rx_dma_pos;
+ up->port.icount.rx += curr_transmitted_size;
+ tty_insert_flip_string(up->port.state->port.tty,
+ up->uart_dma.rx_buf +
+ (up->uart_dma.prev_rx_dma_pos -
+ up->uart_dma.rx_buf_dma_phys),
+ curr_transmitted_size);
+ tty_flip_buffer_push(up->port.state->port.tty);
+ up->uart_dma.prev_rx_dma_pos = curr_dma_pos;
+ if (up->uart_dma.rx_buf_size +
+ up->uart_dma.rx_buf_dma_phys == curr_dma_pos) {
+ ret = serial_omap_start_rxdma(up);
+ if (ret < 0) {
+ serial_omap_stop_rxdma(up);
+ up->ier |= (UART_IER_RDI | UART_IER_RLSI);
+ serial_out(up, UART_IER, up->ier);
+ }
+ } else {
+ mod_timer(&up->uart_dma.rx_timer, jiffies +
+ usecs_to_jiffies(up->uart_dma.rx_timeout));
+ }
+ up->port_activity = jiffies;
+}
+
+static void uart_rx_dma_callback(int lch, u16 ch_status, void *data)
+{
+ return;
+}
+
+static int serial_omap_start_rxdma(struct uart_omap_port *up)
+{
+ int ret = 0;
+
+ if (up->uart_dma.rx_dma_channel == -1) {
+ ret = omap_request_dma(up->uart_dma.uart_dma_rx,
+ "UART Rx DMA",
+ (void *)uart_rx_dma_callback, up,
+ &(up->uart_dma.rx_dma_channel));
+ if (ret < 0)
+ return ret;
+
+ omap_set_dma_src_params(up->uart_dma.rx_dma_channel, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ up->uart_dma.uart_base, 0, 0);
+ omap_set_dma_dest_params(up->uart_dma.rx_dma_channel, 0,
+ OMAP_DMA_AMODE_POST_INC,
+ up->uart_dma.rx_buf_dma_phys, 0, 0);
+ omap_set_dma_transfer_params(up->uart_dma.rx_dma_channel,
+ OMAP_DMA_DATA_TYPE_S8,
+ up->uart_dma.rx_buf_size, 1,
+ OMAP_DMA_SYNC_ELEMENT,
+ up->uart_dma.uart_dma_rx, 0);
+ }
+ up->uart_dma.prev_rx_dma_pos = up->uart_dma.rx_buf_dma_phys;
+ /* FIXME: Cache maintenance needed here? */
+ omap_start_dma(up->uart_dma.rx_dma_channel);
+ mod_timer(&up->uart_dma.rx_timer, jiffies +
+ usecs_to_jiffies(up->uart_dma.rx_timeout));
+ up->uart_dma.rx_dma_used = true;
+ return ret;
+}
+
+static void serial_omap_continue_tx(struct uart_omap_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ unsigned int start = up->uart_dma.tx_buf_dma_phys
+ + (xmit->tail & (UART_XMIT_SIZE - 1));
+
+ if (uart_circ_empty(xmit))
+ return;
+
+ up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit);
+ /*
+ * It is a circular buffer. See if the buffer has wounded back.
+ * If yes it will have to be transferred in two separate dma
+ * transfers
+ */
+ if (start + up->uart_dma.tx_buf_size >=
+ up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE)
+ up->uart_dma.tx_buf_size =
+ (up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) - start;
+ omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ up->uart_dma.uart_base, 0, 0);
+ omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0,
+ OMAP_DMA_AMODE_POST_INC, start, 0, 0);
+ omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel,
+ OMAP_DMA_DATA_TYPE_S8,
+ up->uart_dma.tx_buf_size, 1,
+ OMAP_DMA_SYNC_ELEMENT,
+ up->uart_dma.uart_dma_tx, 0);
+ /* FIXME: Cache maintenance needed here? */
+ omap_start_dma(up->uart_dma.tx_dma_channel);
+}
+
+static void uart_tx_dma_callback(int lch, u16 ch_status, void *data)
+{
+ struct uart_omap_port *up = (struct uart_omap_port *)data;
+ struct circ_buf *xmit = &up->port.state->xmit;
+
+ xmit->tail = (xmit->tail + up->uart_dma.tx_buf_size) & \
+ (UART_XMIT_SIZE - 1);
+ up->port.icount.tx += up->uart_dma.tx_buf_size;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (uart_circ_empty(xmit)) {
+ spin_lock(&(up->uart_dma.tx_lock));
+ serial_omap_stop_tx(&up->port);
+ up->uart_dma.tx_dma_used = false;
+ spin_unlock(&(up->uart_dma.tx_lock));
+ } else {
+ omap_stop_dma(up->uart_dma.tx_dma_channel);
+ serial_omap_continue_tx(up);
+ }
+ up->port_activity = jiffies;
+ return;
+}
+
+static int serial_omap_probe(struct platform_device *pdev)
+{
+ struct uart_omap_port *up;
+ struct resource *mem, *irq, *dma_tx, *dma_rx;
+ struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
+ int ret = -ENOSPC;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -ENODEV;
+ }
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(mem->start, (mem->end - mem->start) + 1,
+ pdev->dev.driver->name)) {
+ dev_err(&pdev->dev, "memory region already claimed\n");
+ return -EBUSY;
+ }
+
+ dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
+ if (!dma_rx) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
+ if (!dma_tx) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ up = kzalloc(sizeof(*up), GFP_KERNEL);
+ if (up == NULL) {
+ ret = -ENOMEM;
+ goto do_release_region;
+ }
+ sprintf(up->name, "OMAP UART%d", pdev->id);
+ up->pdev = pdev;
+ up->port.dev = &pdev->dev;
+ up->port.type = PORT_OMAP;
+ up->port.iotype = UPIO_MEM;
+ up->port.irq = irq->start;
+
+ up->port.regshift = 2;
+ up->port.fifosize = 64;
+ up->port.ops = &serial_omap_pops;
+ up->port.line = pdev->id;
+
+ up->port.membase = omap_up_info->membase;
+ up->port.mapbase = omap_up_info->mapbase;
+ up->port.flags = omap_up_info->flags;
+ up->port.irqflags = omap_up_info->irqflags;
+ up->port.uartclk = omap_up_info->uartclk;
+ up->uart_dma.uart_base = mem->start;
+
+ if (omap_up_info->dma_enabled) {
+ up->uart_dma.uart_dma_tx = dma_tx->start;
+ up->uart_dma.uart_dma_rx = dma_rx->start;
+ up->use_dma = 1;
+ up->uart_dma.rx_buf_size = 4096;
+ up->uart_dma.rx_timeout = 2;
+ spin_lock_init(&(up->uart_dma.tx_lock));
+ spin_lock_init(&(up->uart_dma.rx_lock));
+ up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ }
+
+ ui[pdev->id] = up;
+ serial_omap_add_console_port(up);
+
+ ret = uart_add_one_port(&serial_omap_reg, &up->port);
+ if (ret != 0)
+ goto do_release_region;
+
+ platform_set_drvdata(pdev, up);
+ return 0;
+err:
+ dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
+ pdev->id, __func__, ret);
+do_release_region:
+ release_mem_region(mem->start, (mem->end - mem->start) + 1);
+ return ret;
+}
+
+static int serial_omap_remove(struct platform_device *dev)
+{
+ struct uart_omap_port *up = platform_get_drvdata(dev);
+
+ platform_set_drvdata(dev, NULL);
+ if (up) {
+ uart_remove_one_port(&serial_omap_reg, &up->port);
+ kfree(up);
+ }
+ return 0;
+}
+
+static struct platform_driver serial_omap_driver = {
+ .probe = serial_omap_probe,
+ .remove = serial_omap_remove,
+
+ .suspend = serial_omap_suspend,
+ .resume = serial_omap_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init serial_omap_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&serial_omap_reg);
+ if (ret != 0)
+ return ret;
+ ret = platform_driver_register(&serial_omap_driver);
+ if (ret != 0)
+ uart_unregister_driver(&serial_omap_reg);
+ return ret;
+}
+
+static void __exit serial_omap_exit(void)
+{
+ platform_driver_unregister(&serial_omap_driver);
+ uart_unregister_driver(&serial_omap_reg);
+}
+
+module_init(serial_omap_init);
+module_exit(serial_omap_exit);
+
+MODULE_DESCRIPTION("OMAP High Speed UART driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
new file mode 100644
index 00000000..101eda9f
--- /dev/null
+++ b/drivers/tty/serial/pch_uart.c
@@ -0,0 +1,1624 @@
+/*
+ *Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ *
+ *This program is free software; you can redistribute it and/or modify
+ *it under the terms of the GNU General Public License as published by
+ *the Free Software Foundation; version 2 of the License.
+ *
+ *This program is distributed in the hope that it will be useful,
+ *but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *GNU General Public License for more details.
+ *
+ *You should have received a copy of the GNU General Public License
+ *along with this program; if not, write to the Free Software
+ *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/serial_reg.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/serial_core.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/dmi.h>
+
+#include <linux/dmaengine.h>
+#include <linux/pch_dma.h>
+
+enum {
+ PCH_UART_HANDLED_RX_INT_SHIFT,
+ PCH_UART_HANDLED_TX_INT_SHIFT,
+ PCH_UART_HANDLED_RX_ERR_INT_SHIFT,
+ PCH_UART_HANDLED_RX_TRG_INT_SHIFT,
+ PCH_UART_HANDLED_MS_INT_SHIFT,
+};
+
+enum {
+ PCH_UART_8LINE,
+ PCH_UART_2LINE,
+};
+
+#define PCH_UART_DRIVER_DEVICE "ttyPCH"
+
+/* Set the max number of UART port
+ * Intel EG20T PCH: 4 port
+ * OKI SEMICONDUCTOR ML7213 IOH: 3 port
+*/
+#define PCH_UART_NR 4
+
+#define PCH_UART_HANDLED_RX_INT (1<<((PCH_UART_HANDLED_RX_INT_SHIFT)<<1))
+#define PCH_UART_HANDLED_TX_INT (1<<((PCH_UART_HANDLED_TX_INT_SHIFT)<<1))
+#define PCH_UART_HANDLED_RX_ERR_INT (1<<((\
+ PCH_UART_HANDLED_RX_ERR_INT_SHIFT)<<1))
+#define PCH_UART_HANDLED_RX_TRG_INT (1<<((\
+ PCH_UART_HANDLED_RX_TRG_INT_SHIFT)<<1))
+#define PCH_UART_HANDLED_MS_INT (1<<((PCH_UART_HANDLED_MS_INT_SHIFT)<<1))
+
+#define PCH_UART_RBR 0x00
+#define PCH_UART_THR 0x00
+
+#define PCH_UART_IER_MASK (PCH_UART_IER_ERBFI|PCH_UART_IER_ETBEI|\
+ PCH_UART_IER_ELSI|PCH_UART_IER_EDSSI)
+#define PCH_UART_IER_ERBFI 0x00000001
+#define PCH_UART_IER_ETBEI 0x00000002
+#define PCH_UART_IER_ELSI 0x00000004
+#define PCH_UART_IER_EDSSI 0x00000008
+
+#define PCH_UART_IIR_IP 0x00000001
+#define PCH_UART_IIR_IID 0x00000006
+#define PCH_UART_IIR_MSI 0x00000000
+#define PCH_UART_IIR_TRI 0x00000002
+#define PCH_UART_IIR_RRI 0x00000004
+#define PCH_UART_IIR_REI 0x00000006
+#define PCH_UART_IIR_TOI 0x00000008
+#define PCH_UART_IIR_FIFO256 0x00000020
+#define PCH_UART_IIR_FIFO64 PCH_UART_IIR_FIFO256
+#define PCH_UART_IIR_FE 0x000000C0
+
+#define PCH_UART_FCR_FIFOE 0x00000001
+#define PCH_UART_FCR_RFR 0x00000002
+#define PCH_UART_FCR_TFR 0x00000004
+#define PCH_UART_FCR_DMS 0x00000008
+#define PCH_UART_FCR_FIFO256 0x00000020
+#define PCH_UART_FCR_RFTL 0x000000C0
+
+#define PCH_UART_FCR_RFTL1 0x00000000
+#define PCH_UART_FCR_RFTL64 0x00000040
+#define PCH_UART_FCR_RFTL128 0x00000080
+#define PCH_UART_FCR_RFTL224 0x000000C0
+#define PCH_UART_FCR_RFTL16 PCH_UART_FCR_RFTL64
+#define PCH_UART_FCR_RFTL32 PCH_UART_FCR_RFTL128
+#define PCH_UART_FCR_RFTL56 PCH_UART_FCR_RFTL224
+#define PCH_UART_FCR_RFTL4 PCH_UART_FCR_RFTL64
+#define PCH_UART_FCR_RFTL8 PCH_UART_FCR_RFTL128
+#define PCH_UART_FCR_RFTL14 PCH_UART_FCR_RFTL224
+#define PCH_UART_FCR_RFTL_SHIFT 6
+
+#define PCH_UART_LCR_WLS 0x00000003
+#define PCH_UART_LCR_STB 0x00000004
+#define PCH_UART_LCR_PEN 0x00000008
+#define PCH_UART_LCR_EPS 0x00000010
+#define PCH_UART_LCR_SP 0x00000020
+#define PCH_UART_LCR_SB 0x00000040
+#define PCH_UART_LCR_DLAB 0x00000080
+#define PCH_UART_LCR_NP 0x00000000
+#define PCH_UART_LCR_OP PCH_UART_LCR_PEN
+#define PCH_UART_LCR_EP (PCH_UART_LCR_PEN | PCH_UART_LCR_EPS)
+#define PCH_UART_LCR_1P (PCH_UART_LCR_PEN | PCH_UART_LCR_SP)
+#define PCH_UART_LCR_0P (PCH_UART_LCR_PEN | PCH_UART_LCR_EPS |\
+ PCH_UART_LCR_SP)
+
+#define PCH_UART_LCR_5BIT 0x00000000
+#define PCH_UART_LCR_6BIT 0x00000001
+#define PCH_UART_LCR_7BIT 0x00000002
+#define PCH_UART_LCR_8BIT 0x00000003
+
+#define PCH_UART_MCR_DTR 0x00000001
+#define PCH_UART_MCR_RTS 0x00000002
+#define PCH_UART_MCR_OUT 0x0000000C
+#define PCH_UART_MCR_LOOP 0x00000010
+#define PCH_UART_MCR_AFE 0x00000020
+
+#define PCH_UART_LSR_DR 0x00000001
+#define PCH_UART_LSR_ERR (1<<7)
+
+#define PCH_UART_MSR_DCTS 0x00000001
+#define PCH_UART_MSR_DDSR 0x00000002
+#define PCH_UART_MSR_TERI 0x00000004
+#define PCH_UART_MSR_DDCD 0x00000008
+#define PCH_UART_MSR_CTS 0x00000010
+#define PCH_UART_MSR_DSR 0x00000020
+#define PCH_UART_MSR_RI 0x00000040
+#define PCH_UART_MSR_DCD 0x00000080
+#define PCH_UART_MSR_DELTA (PCH_UART_MSR_DCTS | PCH_UART_MSR_DDSR |\
+ PCH_UART_MSR_TERI | PCH_UART_MSR_DDCD)
+
+#define PCH_UART_DLL 0x00
+#define PCH_UART_DLM 0x01
+
+#define DIV_ROUND(a, b) (((a) + ((b)/2)) / (b))
+
+#define PCH_UART_IID_RLS (PCH_UART_IIR_REI)
+#define PCH_UART_IID_RDR (PCH_UART_IIR_RRI)
+#define PCH_UART_IID_RDR_TO (PCH_UART_IIR_RRI | PCH_UART_IIR_TOI)
+#define PCH_UART_IID_THRE (PCH_UART_IIR_TRI)
+#define PCH_UART_IID_MS (PCH_UART_IIR_MSI)
+
+#define PCH_UART_HAL_PARITY_NONE (PCH_UART_LCR_NP)
+#define PCH_UART_HAL_PARITY_ODD (PCH_UART_LCR_OP)
+#define PCH_UART_HAL_PARITY_EVEN (PCH_UART_LCR_EP)
+#define PCH_UART_HAL_PARITY_FIX1 (PCH_UART_LCR_1P)
+#define PCH_UART_HAL_PARITY_FIX0 (PCH_UART_LCR_0P)
+#define PCH_UART_HAL_5BIT (PCH_UART_LCR_5BIT)
+#define PCH_UART_HAL_6BIT (PCH_UART_LCR_6BIT)
+#define PCH_UART_HAL_7BIT (PCH_UART_LCR_7BIT)
+#define PCH_UART_HAL_8BIT (PCH_UART_LCR_8BIT)
+#define PCH_UART_HAL_STB1 0
+#define PCH_UART_HAL_STB2 (PCH_UART_LCR_STB)
+
+#define PCH_UART_HAL_CLR_TX_FIFO (PCH_UART_FCR_TFR)
+#define PCH_UART_HAL_CLR_RX_FIFO (PCH_UART_FCR_RFR)
+#define PCH_UART_HAL_CLR_ALL_FIFO (PCH_UART_HAL_CLR_TX_FIFO | \
+ PCH_UART_HAL_CLR_RX_FIFO)
+
+#define PCH_UART_HAL_DMA_MODE0 0
+#define PCH_UART_HAL_FIFO_DIS 0
+#define PCH_UART_HAL_FIFO16 (PCH_UART_FCR_FIFOE)
+#define PCH_UART_HAL_FIFO256 (PCH_UART_FCR_FIFOE | \
+ PCH_UART_FCR_FIFO256)
+#define PCH_UART_HAL_FIFO64 (PCH_UART_HAL_FIFO256)
+#define PCH_UART_HAL_TRIGGER1 (PCH_UART_FCR_RFTL1)
+#define PCH_UART_HAL_TRIGGER64 (PCH_UART_FCR_RFTL64)
+#define PCH_UART_HAL_TRIGGER128 (PCH_UART_FCR_RFTL128)
+#define PCH_UART_HAL_TRIGGER224 (PCH_UART_FCR_RFTL224)
+#define PCH_UART_HAL_TRIGGER16 (PCH_UART_FCR_RFTL16)
+#define PCH_UART_HAL_TRIGGER32 (PCH_UART_FCR_RFTL32)
+#define PCH_UART_HAL_TRIGGER56 (PCH_UART_FCR_RFTL56)
+#define PCH_UART_HAL_TRIGGER4 (PCH_UART_FCR_RFTL4)
+#define PCH_UART_HAL_TRIGGER8 (PCH_UART_FCR_RFTL8)
+#define PCH_UART_HAL_TRIGGER14 (PCH_UART_FCR_RFTL14)
+#define PCH_UART_HAL_TRIGGER_L (PCH_UART_FCR_RFTL64)
+#define PCH_UART_HAL_TRIGGER_M (PCH_UART_FCR_RFTL128)
+#define PCH_UART_HAL_TRIGGER_H (PCH_UART_FCR_RFTL224)
+
+#define PCH_UART_HAL_RX_INT (PCH_UART_IER_ERBFI)
+#define PCH_UART_HAL_TX_INT (PCH_UART_IER_ETBEI)
+#define PCH_UART_HAL_RX_ERR_INT (PCH_UART_IER_ELSI)
+#define PCH_UART_HAL_MS_INT (PCH_UART_IER_EDSSI)
+#define PCH_UART_HAL_ALL_INT (PCH_UART_IER_MASK)
+
+#define PCH_UART_HAL_DTR (PCH_UART_MCR_DTR)
+#define PCH_UART_HAL_RTS (PCH_UART_MCR_RTS)
+#define PCH_UART_HAL_OUT (PCH_UART_MCR_OUT)
+#define PCH_UART_HAL_LOOP (PCH_UART_MCR_LOOP)
+#define PCH_UART_HAL_AFE (PCH_UART_MCR_AFE)
+
+#define PCI_VENDOR_ID_ROHM 0x10DB
+
+struct pch_uart_buffer {
+ unsigned char *buf;
+ int size;
+};
+
+struct eg20t_port {
+ struct uart_port port;
+ int port_type;
+ void __iomem *membase;
+ resource_size_t mapbase;
+ unsigned int iobase;
+ struct pci_dev *pdev;
+ int fifo_size;
+ int base_baud;
+ int start_tx;
+ int start_rx;
+ int tx_empty;
+ int int_dis_flag;
+ int trigger;
+ int trigger_level;
+ struct pch_uart_buffer rxbuf;
+ unsigned int dmsr;
+ unsigned int fcr;
+ unsigned int mcr;
+ unsigned int use_dma;
+ unsigned int use_dma_flag;
+ struct dma_async_tx_descriptor *desc_tx;
+ struct dma_async_tx_descriptor *desc_rx;
+ struct pch_dma_slave param_tx;
+ struct pch_dma_slave param_rx;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+ struct scatterlist *sg_tx_p;
+ int nent;
+ struct scatterlist sg_rx;
+ int tx_dma_use;
+ void *rx_buf_virt;
+ dma_addr_t rx_buf_dma;
+};
+
+/**
+ * struct pch_uart_driver_data - private data structure for UART-DMA
+ * @port_type: The number of DMA channel
+ * @line_no: UART port line number (0, 1, 2...)
+ */
+struct pch_uart_driver_data {
+ int port_type;
+ int line_no;
+};
+
+enum pch_uart_num_t {
+ pch_et20t_uart0 = 0,
+ pch_et20t_uart1,
+ pch_et20t_uart2,
+ pch_et20t_uart3,
+ pch_ml7213_uart0,
+ pch_ml7213_uart1,
+ pch_ml7213_uart2,
+ pch_ml7223_uart0,
+ pch_ml7223_uart1,
+ pch_ml7831_uart0,
+ pch_ml7831_uart1,
+};
+
+static struct pch_uart_driver_data drv_dat[] = {
+ [pch_et20t_uart0] = {PCH_UART_8LINE, 0},
+ [pch_et20t_uart1] = {PCH_UART_2LINE, 1},
+ [pch_et20t_uart2] = {PCH_UART_2LINE, 2},
+ [pch_et20t_uart3] = {PCH_UART_2LINE, 3},
+ [pch_ml7213_uart0] = {PCH_UART_8LINE, 0},
+ [pch_ml7213_uart1] = {PCH_UART_2LINE, 1},
+ [pch_ml7213_uart2] = {PCH_UART_2LINE, 2},
+ [pch_ml7223_uart0] = {PCH_UART_8LINE, 0},
+ [pch_ml7223_uart1] = {PCH_UART_2LINE, 1},
+ [pch_ml7831_uart0] = {PCH_UART_8LINE, 0},
+ [pch_ml7831_uart1] = {PCH_UART_2LINE, 1},
+};
+
+static unsigned int default_baud = 9600;
+static const int trigger_level_256[4] = { 1, 64, 128, 224 };
+static const int trigger_level_64[4] = { 1, 16, 32, 56 };
+static const int trigger_level_16[4] = { 1, 4, 8, 14 };
+static const int trigger_level_1[4] = { 1, 1, 1, 1 };
+
+static void pch_uart_hal_request(struct pci_dev *pdev, int fifosize,
+ int base_baud)
+{
+ struct eg20t_port *priv = pci_get_drvdata(pdev);
+
+ priv->trigger_level = 1;
+ priv->fcr = 0;
+}
+
+static unsigned int get_msr(struct eg20t_port *priv, void __iomem *base)
+{
+ unsigned int msr = ioread8(base + UART_MSR);
+ priv->dmsr |= msr & PCH_UART_MSR_DELTA;
+
+ return msr;
+}
+
+static void pch_uart_hal_enable_interrupt(struct eg20t_port *priv,
+ unsigned int flag)
+{
+ u8 ier = ioread8(priv->membase + UART_IER);
+ ier |= flag & PCH_UART_IER_MASK;
+ iowrite8(ier, priv->membase + UART_IER);
+}
+
+static void pch_uart_hal_disable_interrupt(struct eg20t_port *priv,
+ unsigned int flag)
+{
+ u8 ier = ioread8(priv->membase + UART_IER);
+ ier &= ~(flag & PCH_UART_IER_MASK);
+ iowrite8(ier, priv->membase + UART_IER);
+}
+
+static int pch_uart_hal_set_line(struct eg20t_port *priv, int baud,
+ unsigned int parity, unsigned int bits,
+ unsigned int stb)
+{
+ unsigned int dll, dlm, lcr;
+ int div;
+
+ div = DIV_ROUND(priv->base_baud / 16, baud);
+ if (div < 0 || USHRT_MAX <= div) {
+ dev_err(priv->port.dev, "Invalid Baud(div=0x%x)\n", div);
+ return -EINVAL;
+ }
+
+ dll = (unsigned int)div & 0x00FFU;
+ dlm = ((unsigned int)div >> 8) & 0x00FFU;
+
+ if (parity & ~(PCH_UART_LCR_PEN | PCH_UART_LCR_EPS | PCH_UART_LCR_SP)) {
+ dev_err(priv->port.dev, "Invalid parity(0x%x)\n", parity);
+ return -EINVAL;
+ }
+
+ if (bits & ~PCH_UART_LCR_WLS) {
+ dev_err(priv->port.dev, "Invalid bits(0x%x)\n", bits);
+ return -EINVAL;
+ }
+
+ if (stb & ~PCH_UART_LCR_STB) {
+ dev_err(priv->port.dev, "Invalid STB(0x%x)\n", stb);
+ return -EINVAL;
+ }
+
+ lcr = parity;
+ lcr |= bits;
+ lcr |= stb;
+
+ dev_dbg(priv->port.dev, "%s:baud = %d, div = %04x, lcr = %02x (%lu)\n",
+ __func__, baud, div, lcr, jiffies);
+ iowrite8(PCH_UART_LCR_DLAB, priv->membase + UART_LCR);
+ iowrite8(dll, priv->membase + PCH_UART_DLL);
+ iowrite8(dlm, priv->membase + PCH_UART_DLM);
+ iowrite8(lcr, priv->membase + UART_LCR);
+
+ return 0;
+}
+
+static int pch_uart_hal_fifo_reset(struct eg20t_port *priv,
+ unsigned int flag)
+{
+ if (flag & ~(PCH_UART_FCR_TFR | PCH_UART_FCR_RFR)) {
+ dev_err(priv->port.dev, "%s:Invalid flag(0x%x)\n",
+ __func__, flag);
+ return -EINVAL;
+ }
+
+ iowrite8(PCH_UART_FCR_FIFOE | priv->fcr, priv->membase + UART_FCR);
+ iowrite8(PCH_UART_FCR_FIFOE | priv->fcr | flag,
+ priv->membase + UART_FCR);
+ iowrite8(priv->fcr, priv->membase + UART_FCR);
+
+ return 0;
+}
+
+static int pch_uart_hal_set_fifo(struct eg20t_port *priv,
+ unsigned int dmamode,
+ unsigned int fifo_size, unsigned int trigger)
+{
+ u8 fcr;
+
+ if (dmamode & ~PCH_UART_FCR_DMS) {
+ dev_err(priv->port.dev, "%s:Invalid DMA Mode(0x%x)\n",
+ __func__, dmamode);
+ return -EINVAL;
+ }
+
+ if (fifo_size & ~(PCH_UART_FCR_FIFOE | PCH_UART_FCR_FIFO256)) {
+ dev_err(priv->port.dev, "%s:Invalid FIFO SIZE(0x%x)\n",
+ __func__, fifo_size);
+ return -EINVAL;
+ }
+
+ if (trigger & ~PCH_UART_FCR_RFTL) {
+ dev_err(priv->port.dev, "%s:Invalid TRIGGER(0x%x)\n",
+ __func__, trigger);
+ return -EINVAL;
+ }
+
+ switch (priv->fifo_size) {
+ case 256:
+ priv->trigger_level =
+ trigger_level_256[trigger >> PCH_UART_FCR_RFTL_SHIFT];
+ break;
+ case 64:
+ priv->trigger_level =
+ trigger_level_64[trigger >> PCH_UART_FCR_RFTL_SHIFT];
+ break;
+ case 16:
+ priv->trigger_level =
+ trigger_level_16[trigger >> PCH_UART_FCR_RFTL_SHIFT];
+ break;
+ default:
+ priv->trigger_level =
+ trigger_level_1[trigger >> PCH_UART_FCR_RFTL_SHIFT];
+ break;
+ }
+ fcr =
+ dmamode | fifo_size | trigger | PCH_UART_FCR_RFR | PCH_UART_FCR_TFR;
+ iowrite8(PCH_UART_FCR_FIFOE, priv->membase + UART_FCR);
+ iowrite8(PCH_UART_FCR_FIFOE | PCH_UART_FCR_RFR | PCH_UART_FCR_TFR,
+ priv->membase + UART_FCR);
+ iowrite8(fcr, priv->membase + UART_FCR);
+ priv->fcr = fcr;
+
+ return 0;
+}
+
+static u8 pch_uart_hal_get_modem(struct eg20t_port *priv)
+{
+ priv->dmsr = 0;
+ return get_msr(priv, priv->membase);
+}
+
+static void pch_uart_hal_write(struct eg20t_port *priv,
+ const unsigned char *buf, int tx_size)
+{
+ int i;
+ unsigned int thr;
+
+ for (i = 0; i < tx_size;) {
+ thr = buf[i++];
+ iowrite8(thr, priv->membase + PCH_UART_THR);
+ }
+}
+
+static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf,
+ int rx_size)
+{
+ int i;
+ u8 rbr, lsr;
+
+ lsr = ioread8(priv->membase + UART_LSR);
+ for (i = 0, lsr = ioread8(priv->membase + UART_LSR);
+ i < rx_size && lsr & UART_LSR_DR;
+ lsr = ioread8(priv->membase + UART_LSR)) {
+ rbr = ioread8(priv->membase + PCH_UART_RBR);
+ buf[i++] = rbr;
+ }
+ return i;
+}
+
+static unsigned int pch_uart_hal_get_iid(struct eg20t_port *priv)
+{
+ unsigned int iir;
+ int ret;
+
+ iir = ioread8(priv->membase + UART_IIR);
+ ret = (iir & (PCH_UART_IIR_IID | PCH_UART_IIR_TOI | PCH_UART_IIR_IP));
+ return ret;
+}
+
+static u8 pch_uart_hal_get_line_status(struct eg20t_port *priv)
+{
+ return ioread8(priv->membase + UART_LSR);
+}
+
+static void pch_uart_hal_set_break(struct eg20t_port *priv, int on)
+{
+ unsigned int lcr;
+
+ lcr = ioread8(priv->membase + UART_LCR);
+ if (on)
+ lcr |= PCH_UART_LCR_SB;
+ else
+ lcr &= ~PCH_UART_LCR_SB;
+
+ iowrite8(lcr, priv->membase + UART_LCR);
+}
+
+static int push_rx(struct eg20t_port *priv, const unsigned char *buf,
+ int size)
+{
+ struct uart_port *port;
+ struct tty_struct *tty;
+
+ port = &priv->port;
+ tty = tty_port_tty_get(&port->state->port);
+ if (!tty) {
+ dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
+ return -EBUSY;
+ }
+
+ tty_insert_flip_string(tty, buf, size);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+
+ return 0;
+}
+
+static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
+{
+ int ret;
+ struct uart_port *port = &priv->port;
+
+ if (port->x_char) {
+ dev_dbg(priv->port.dev, "%s:X character send %02x (%lu)\n",
+ __func__, port->x_char, jiffies);
+ buf[0] = port->x_char;
+ port->x_char = 0;
+ ret = 1;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int dma_push_rx(struct eg20t_port *priv, int size)
+{
+ struct tty_struct *tty;
+ int room;
+ struct uart_port *port = &priv->port;
+
+ port = &priv->port;
+ tty = tty_port_tty_get(&port->state->port);
+ if (!tty) {
+ dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
+ return 0;
+ }
+
+ room = tty_buffer_request_room(tty, size);
+
+ if (room < size)
+ dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
+ size - room);
+ if (!room)
+ return room;
+
+ tty_insert_flip_string(tty, sg_virt(&priv->sg_rx), size);
+
+ port->icount.rx += room;
+ tty_kref_put(tty);
+
+ return room;
+}
+
+static void pch_free_dma(struct uart_port *port)
+{
+ struct eg20t_port *priv;
+ priv = container_of(port, struct eg20t_port, port);
+
+ if (priv->chan_tx) {
+ dma_release_channel(priv->chan_tx);
+ priv->chan_tx = NULL;
+ }
+ if (priv->chan_rx) {
+ dma_release_channel(priv->chan_rx);
+ priv->chan_rx = NULL;
+ }
+ if (sg_dma_address(&priv->sg_rx))
+ dma_free_coherent(port->dev, port->fifosize,
+ sg_virt(&priv->sg_rx),
+ sg_dma_address(&priv->sg_rx));
+
+ return;
+}
+
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ struct pch_dma_slave *param = slave;
+
+ if ((chan->chan_id == param->chan_id) && (param->dma_dev ==
+ chan->device->dev)) {
+ chan->private = param;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static void pch_request_dma(struct uart_port *port)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ struct pci_dev *dma_dev;
+ struct pch_dma_slave *param;
+ struct eg20t_port *priv =
+ container_of(port, struct eg20t_port, port);
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ dma_dev = pci_get_bus_and_slot(priv->pdev->bus->number,
+ PCI_DEVFN(0xa, 0)); /* Get DMA's dev
+ information */
+ /* Set Tx DMA */
+ param = &priv->param_tx;
+ param->dma_dev = &dma_dev->dev;
+ param->chan_id = priv->port.line * 2; /* Tx = 0, 2, 4, ... */
+
+ param->tx_reg = port->mapbase + UART_TX;
+ chan = dma_request_channel(mask, filter, param);
+ if (!chan) {
+ dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Tx)\n",
+ __func__);
+ return;
+ }
+ priv->chan_tx = chan;
+
+ /* Set Rx DMA */
+ param = &priv->param_rx;
+ param->dma_dev = &dma_dev->dev;
+ param->chan_id = priv->port.line * 2 + 1; /* Rx = Tx + 1 */
+
+ param->rx_reg = port->mapbase + UART_RX;
+ chan = dma_request_channel(mask, filter, param);
+ if (!chan) {
+ dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n",
+ __func__);
+ dma_release_channel(priv->chan_tx);
+ priv->chan_tx = NULL;
+ return;
+ }
+
+ /* Get Consistent memory for DMA */
+ priv->rx_buf_virt = dma_alloc_coherent(port->dev, port->fifosize,
+ &priv->rx_buf_dma, GFP_KERNEL);
+ priv->chan_rx = chan;
+}
+
+static void pch_dma_rx_complete(void *arg)
+{
+ struct eg20t_port *priv = arg;
+ struct uart_port *port = &priv->port;
+ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
+ int count;
+
+ if (!tty) {
+ dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
+ return;
+ }
+
+ dma_sync_sg_for_cpu(port->dev, &priv->sg_rx, 1, DMA_FROM_DEVICE);
+ count = dma_push_rx(priv, priv->trigger_level);
+ if (count)
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ async_tx_ack(priv->desc_rx);
+ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
+}
+
+static void pch_dma_tx_complete(void *arg)
+{
+ struct eg20t_port *priv = arg;
+ struct uart_port *port = &priv->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct scatterlist *sg = priv->sg_tx_p;
+ int i;
+
+ for (i = 0; i < priv->nent; i++, sg++) {
+ xmit->tail += sg_dma_len(sg);
+ port->icount.tx += sg_dma_len(sg);
+ }
+ xmit->tail &= UART_XMIT_SIZE - 1;
+ async_tx_ack(priv->desc_tx);
+ dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE);
+ priv->tx_dma_use = 0;
+ priv->nent = 0;
+ kfree(priv->sg_tx_p);
+ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT);
+}
+
+static int pop_tx(struct eg20t_port *priv, int size)
+{
+ int count = 0;
+ struct uart_port *port = &priv->port;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (uart_tx_stopped(port) || uart_circ_empty(xmit) || count >= size)
+ goto pop_tx_end;
+
+ do {
+ int cnt_to_end =
+ CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ int sz = min(size - count, cnt_to_end);
+ pch_uart_hal_write(priv, &xmit->buf[xmit->tail], sz);
+ xmit->tail = (xmit->tail + sz) & (UART_XMIT_SIZE - 1);
+ count += sz;
+ } while (!uart_circ_empty(xmit) && count < size);
+
+pop_tx_end:
+ dev_dbg(priv->port.dev, "%d characters. Remained %d characters.(%lu)\n",
+ count, size - count, jiffies);
+
+ return count;
+}
+
+static int handle_rx_to(struct eg20t_port *priv)
+{
+ struct pch_uart_buffer *buf;
+ int rx_size;
+ int ret;
+ if (!priv->start_rx) {
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
+ return 0;
+ }
+ buf = &priv->rxbuf;
+ do {
+ rx_size = pch_uart_hal_read(priv, buf->buf, buf->size);
+ ret = push_rx(priv, buf->buf, rx_size);
+ if (ret)
+ return 0;
+ } while (rx_size == buf->size);
+
+ return PCH_UART_HANDLED_RX_INT;
+}
+
+static int handle_rx(struct eg20t_port *priv)
+{
+ return handle_rx_to(priv);
+}
+
+static int dma_handle_rx(struct eg20t_port *priv)
+{
+ struct uart_port *port = &priv->port;
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sg;
+
+ priv = container_of(port, struct eg20t_port, port);
+ sg = &priv->sg_rx;
+
+ sg_init_table(&priv->sg_rx, 1); /* Initialize SG table */
+
+ sg_dma_len(sg) = priv->trigger_level;
+
+ sg_set_page(&priv->sg_rx, virt_to_page(priv->rx_buf_virt),
+ sg_dma_len(sg), (unsigned long)priv->rx_buf_virt &
+ ~PAGE_MASK);
+
+ sg_dma_address(sg) = priv->rx_buf_dma;
+
+ desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx,
+ sg, 1, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!desc)
+ return 0;
+
+ priv->desc_rx = desc;
+ desc->callback = pch_dma_rx_complete;
+ desc->callback_param = priv;
+ desc->tx_submit(desc);
+ dma_async_issue_pending(priv->chan_rx);
+
+ return PCH_UART_HANDLED_RX_INT;
+}
+
+static unsigned int handle_tx(struct eg20t_port *priv)
+{
+ struct uart_port *port = &priv->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ int fifo_size;
+ int tx_size;
+ int size;
+ int tx_empty;
+
+ if (!priv->start_tx) {
+ dev_info(priv->port.dev, "%s:Tx isn't started. (%lu)\n",
+ __func__, jiffies);
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT);
+ priv->tx_empty = 1;
+ return 0;
+ }
+
+ fifo_size = max(priv->fifo_size, 1);
+ tx_empty = 1;
+ if (pop_tx_x(priv, xmit->buf)) {
+ pch_uart_hal_write(priv, xmit->buf, 1);
+ port->icount.tx++;
+ tx_empty = 0;
+ fifo_size--;
+ }
+ size = min(xmit->head - xmit->tail, fifo_size);
+ if (size < 0)
+ size = fifo_size;
+
+ tx_size = pop_tx(priv, size);
+ if (tx_size > 0) {
+ port->icount.tx += tx_size;
+ tx_empty = 0;
+ }
+
+ priv->tx_empty = tx_empty;
+
+ if (tx_empty) {
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT);
+ uart_write_wakeup(port);
+ }
+
+ return PCH_UART_HANDLED_TX_INT;
+}
+
+static unsigned int dma_handle_tx(struct eg20t_port *priv)
+{
+ struct uart_port *port = &priv->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct scatterlist *sg;
+ int nent;
+ int fifo_size;
+ int tx_empty;
+ struct dma_async_tx_descriptor *desc;
+ int num;
+ int i;
+ int bytes;
+ int size;
+ int rem;
+
+ if (!priv->start_tx) {
+ dev_info(priv->port.dev, "%s:Tx isn't started. (%lu)\n",
+ __func__, jiffies);
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT);
+ priv->tx_empty = 1;
+ return 0;
+ }
+
+ if (priv->tx_dma_use) {
+ dev_dbg(priv->port.dev, "%s:Tx is not completed. (%lu)\n",
+ __func__, jiffies);
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT);
+ priv->tx_empty = 1;
+ return 0;
+ }
+
+ fifo_size = max(priv->fifo_size, 1);
+ tx_empty = 1;
+ if (pop_tx_x(priv, xmit->buf)) {
+ pch_uart_hal_write(priv, xmit->buf, 1);
+ port->icount.tx++;
+ tx_empty = 0;
+ fifo_size--;
+ }
+
+ bytes = min((int)CIRC_CNT(xmit->head, xmit->tail,
+ UART_XMIT_SIZE), CIRC_CNT_TO_END(xmit->head,
+ xmit->tail, UART_XMIT_SIZE));
+ if (!bytes) {
+ dev_dbg(priv->port.dev, "%s 0 bytes return\n", __func__);
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT);
+ uart_write_wakeup(port);
+ return 0;
+ }
+
+ if (bytes > fifo_size) {
+ num = bytes / fifo_size + 1;
+ size = fifo_size;
+ rem = bytes % fifo_size;
+ } else {
+ num = 1;
+ size = bytes;
+ rem = bytes;
+ }
+
+ dev_dbg(priv->port.dev, "%s num=%d size=%d rem=%d\n",
+ __func__, num, size, rem);
+
+ priv->tx_dma_use = 1;
+
+ priv->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+
+ sg_init_table(priv->sg_tx_p, num); /* Initialize SG table */
+ sg = priv->sg_tx_p;
+
+ for (i = 0; i < num; i++, sg++) {
+ if (i == (num - 1))
+ sg_set_page(sg, virt_to_page(xmit->buf),
+ rem, fifo_size * i);
+ else
+ sg_set_page(sg, virt_to_page(xmit->buf),
+ size, fifo_size * i);
+ }
+
+ sg = priv->sg_tx_p;
+ nent = dma_map_sg(port->dev, sg, num, DMA_TO_DEVICE);
+ if (!nent) {
+ dev_err(priv->port.dev, "%s:dma_map_sg Failed\n", __func__);
+ return 0;
+ }
+ priv->nent = nent;
+
+ for (i = 0; i < nent; i++, sg++) {
+ sg->offset = (xmit->tail & (UART_XMIT_SIZE - 1)) +
+ fifo_size * i;
+ sg_dma_address(sg) = (sg_dma_address(sg) &
+ ~(UART_XMIT_SIZE - 1)) + sg->offset;
+ if (i == (nent - 1))
+ sg_dma_len(sg) = rem;
+ else
+ sg_dma_len(sg) = size;
+ }
+
+ desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx,
+ priv->sg_tx_p, nent, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n",
+ __func__);
+ return 0;
+ }
+ dma_sync_sg_for_device(port->dev, priv->sg_tx_p, nent, DMA_TO_DEVICE);
+ priv->desc_tx = desc;
+ desc->callback = pch_dma_tx_complete;
+ desc->callback_param = priv;
+
+ desc->tx_submit(desc);
+
+ dma_async_issue_pending(priv->chan_tx);
+
+ return PCH_UART_HANDLED_TX_INT;
+}
+
+static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
+{
+ u8 fcr = ioread8(priv->membase + UART_FCR);
+
+ /* Reset FIFO */
+ fcr |= UART_FCR_CLEAR_RCVR;
+ iowrite8(fcr, priv->membase + UART_FCR);
+
+ if (lsr & PCH_UART_LSR_ERR)
+ dev_err(&priv->pdev->dev, "Error data in FIFO\n");
+
+ if (lsr & UART_LSR_FE)
+ dev_err(&priv->pdev->dev, "Framing Error\n");
+
+ if (lsr & UART_LSR_PE)
+ dev_err(&priv->pdev->dev, "Parity Error\n");
+
+ if (lsr & UART_LSR_OE)
+ dev_err(&priv->pdev->dev, "Overrun Error\n");
+}
+
+static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
+{
+ struct eg20t_port *priv = dev_id;
+ unsigned int handled;
+ u8 lsr;
+ int ret = 0;
+ unsigned int iid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->port.lock, flags);
+ handled = 0;
+ while ((iid = pch_uart_hal_get_iid(priv)) > 1) {
+ switch (iid) {
+ case PCH_UART_IID_RLS: /* Receiver Line Status */
+ lsr = pch_uart_hal_get_line_status(priv);
+ if (lsr & (PCH_UART_LSR_ERR | UART_LSR_FE |
+ UART_LSR_PE | UART_LSR_OE)) {
+ pch_uart_err_ir(priv, lsr);
+ ret = PCH_UART_HANDLED_RX_ERR_INT;
+ }
+ break;
+ case PCH_UART_IID_RDR: /* Received Data Ready */
+ if (priv->use_dma) {
+ pch_uart_hal_disable_interrupt(priv,
+ PCH_UART_HAL_RX_INT);
+ ret = dma_handle_rx(priv);
+ if (!ret)
+ pch_uart_hal_enable_interrupt(priv,
+ PCH_UART_HAL_RX_INT);
+ } else {
+ ret = handle_rx(priv);
+ }
+ break;
+ case PCH_UART_IID_RDR_TO: /* Received Data Ready
+ (FIFO Timeout) */
+ ret = handle_rx_to(priv);
+ break;
+ case PCH_UART_IID_THRE: /* Transmitter Holding Register
+ Empty */
+ if (priv->use_dma)
+ ret = dma_handle_tx(priv);
+ else
+ ret = handle_tx(priv);
+ break;
+ case PCH_UART_IID_MS: /* Modem Status */
+ ret = PCH_UART_HANDLED_MS_INT;
+ break;
+ default: /* Never junp to this label */
+ dev_err(priv->port.dev, "%s:iid=%d (%lu)\n", __func__,
+ iid, jiffies);
+ ret = -1;
+ break;
+ }
+ handled |= (unsigned int)ret;
+ }
+ if (handled == 0 && iid <= 1) {
+ if (priv->int_dis_flag)
+ priv->int_dis_flag = 0;
+ }
+
+ spin_unlock_irqrestore(&priv->port.lock, flags);
+ return IRQ_RETVAL(handled);
+}
+
+/* This function tests whether the transmitter fifo and shifter for the port
+ described by 'port' is empty. */
+static unsigned int pch_uart_tx_empty(struct uart_port *port)
+{
+ struct eg20t_port *priv;
+ int ret;
+ priv = container_of(port, struct eg20t_port, port);
+ if (priv->tx_empty)
+ ret = TIOCSER_TEMT;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+/* Returns the current state of modem control inputs. */
+static unsigned int pch_uart_get_mctrl(struct uart_port *port)
+{
+ struct eg20t_port *priv;
+ u8 modem;
+ unsigned int ret = 0;
+
+ priv = container_of(port, struct eg20t_port, port);
+ modem = pch_uart_hal_get_modem(priv);
+
+ if (modem & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+
+ if (modem & UART_MSR_RI)
+ ret |= TIOCM_RNG;
+
+ if (modem & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
+
+ if (modem & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
+
+ return ret;
+}
+
+static void pch_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ u32 mcr = 0;
+ struct eg20t_port *priv = container_of(port, struct eg20t_port, port);
+
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ if (priv->mcr & UART_MCR_AFE)
+ mcr |= UART_MCR_AFE;
+
+ if (mctrl)
+ iowrite8(mcr, priv->membase + UART_MCR);
+}
+
+static void pch_uart_stop_tx(struct uart_port *port)
+{
+ struct eg20t_port *priv;
+ priv = container_of(port, struct eg20t_port, port);
+ priv->start_tx = 0;
+ priv->tx_dma_use = 0;
+}
+
+static void pch_uart_start_tx(struct uart_port *port)
+{
+ struct eg20t_port *priv;
+
+ priv = container_of(port, struct eg20t_port, port);
+
+ if (priv->use_dma) {
+ if (priv->tx_dma_use) {
+ dev_dbg(priv->port.dev, "%s : Tx DMA is NOT empty.\n",
+ __func__);
+ return;
+ }
+ }
+
+ priv->start_tx = 1;
+ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT);
+}
+
+static void pch_uart_stop_rx(struct uart_port *port)
+{
+ struct eg20t_port *priv;
+ priv = container_of(port, struct eg20t_port, port);
+ priv->start_rx = 0;
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
+ priv->int_dis_flag = 1;
+}
+
+/* Enable the modem status interrupts. */
+static void pch_uart_enable_ms(struct uart_port *port)
+{
+ struct eg20t_port *priv;
+ priv = container_of(port, struct eg20t_port, port);
+ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_MS_INT);
+}
+
+/* Control the transmission of a break signal. */
+static void pch_uart_break_ctl(struct uart_port *port, int ctl)
+{
+ struct eg20t_port *priv;
+ unsigned long flags;
+
+ priv = container_of(port, struct eg20t_port, port);
+ spin_lock_irqsave(&port->lock, flags);
+ pch_uart_hal_set_break(priv, ctl);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* Grab any interrupt resources and initialise any low level driver state. */
+static int pch_uart_startup(struct uart_port *port)
+{
+ struct eg20t_port *priv;
+ int ret;
+ int fifo_size;
+ int trigger_level;
+
+ priv = container_of(port, struct eg20t_port, port);
+ priv->tx_empty = 1;
+
+ if (port->uartclk)
+ priv->base_baud = port->uartclk;
+ else
+ port->uartclk = priv->base_baud;
+
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_ALL_INT);
+ ret = pch_uart_hal_set_line(priv, default_baud,
+ PCH_UART_HAL_PARITY_NONE, PCH_UART_HAL_8BIT,
+ PCH_UART_HAL_STB1);
+ if (ret)
+ return ret;
+
+ switch (priv->fifo_size) {
+ case 256:
+ fifo_size = PCH_UART_HAL_FIFO256;
+ break;
+ case 64:
+ fifo_size = PCH_UART_HAL_FIFO64;
+ break;
+ case 16:
+ fifo_size = PCH_UART_HAL_FIFO16;
+ case 1:
+ default:
+ fifo_size = PCH_UART_HAL_FIFO_DIS;
+ break;
+ }
+
+ switch (priv->trigger) {
+ case PCH_UART_HAL_TRIGGER1:
+ trigger_level = 1;
+ break;
+ case PCH_UART_HAL_TRIGGER_L:
+ trigger_level = priv->fifo_size / 4;
+ break;
+ case PCH_UART_HAL_TRIGGER_M:
+ trigger_level = priv->fifo_size / 2;
+ break;
+ case PCH_UART_HAL_TRIGGER_H:
+ default:
+ trigger_level = priv->fifo_size - (priv->fifo_size / 8);
+ break;
+ }
+
+ priv->trigger_level = trigger_level;
+ ret = pch_uart_hal_set_fifo(priv, PCH_UART_HAL_DMA_MODE0,
+ fifo_size, priv->trigger);
+ if (ret < 0)
+ return ret;
+
+ ret = request_irq(priv->port.irq, pch_uart_interrupt, IRQF_SHARED,
+ KBUILD_MODNAME, priv);
+ if (ret < 0)
+ return ret;
+
+ if (priv->use_dma)
+ pch_request_dma(port);
+
+ priv->start_rx = 1;
+ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
+ uart_update_timeout(port, CS8, default_baud);
+
+ return 0;
+}
+
+static void pch_uart_shutdown(struct uart_port *port)
+{
+ struct eg20t_port *priv;
+ int ret;
+
+ priv = container_of(port, struct eg20t_port, port);
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_ALL_INT);
+ pch_uart_hal_fifo_reset(priv, PCH_UART_HAL_CLR_ALL_FIFO);
+ ret = pch_uart_hal_set_fifo(priv, PCH_UART_HAL_DMA_MODE0,
+ PCH_UART_HAL_FIFO_DIS, PCH_UART_HAL_TRIGGER1);
+ if (ret)
+ dev_err(priv->port.dev,
+ "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret);
+
+ pch_free_dma(port);
+
+ free_irq(priv->port.irq, priv);
+}
+
+/* Change the port parameters, including word length, parity, stop
+ *bits. Update read_status_mask and ignore_status_mask to indicate
+ *the types of events we are interested in receiving. */
+static void pch_uart_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old)
+{
+ int baud;
+ int rtn;
+ unsigned int parity, bits, stb;
+ struct eg20t_port *priv;
+ unsigned long flags;
+
+ priv = container_of(port, struct eg20t_port, port);
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ bits = PCH_UART_HAL_5BIT;
+ break;
+ case CS6:
+ bits = PCH_UART_HAL_6BIT;
+ break;
+ case CS7:
+ bits = PCH_UART_HAL_7BIT;
+ break;
+ default: /* CS8 */
+ bits = PCH_UART_HAL_8BIT;
+ break;
+ }
+ if (termios->c_cflag & CSTOPB)
+ stb = PCH_UART_HAL_STB2;
+ else
+ stb = PCH_UART_HAL_STB1;
+
+ if (termios->c_cflag & PARENB) {
+ if (!(termios->c_cflag & PARODD))
+ parity = PCH_UART_HAL_PARITY_ODD;
+ else
+ parity = PCH_UART_HAL_PARITY_EVEN;
+
+ } else {
+ parity = PCH_UART_HAL_PARITY_NONE;
+ }
+
+ /* Only UART0 has auto hardware flow function */
+ if ((termios->c_cflag & CRTSCTS) && (priv->fifo_size == 256))
+ priv->mcr |= UART_MCR_AFE;
+ else
+ priv->mcr &= ~UART_MCR_AFE;
+
+ termios->c_cflag &= ~CMSPAR; /* Mark/Space parity is not supported */
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+ rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
+ if (rtn)
+ goto out;
+
+ pch_uart_set_mctrl(&priv->port, priv->port.mctrl);
+ /* Don't rewrite B0 */
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+out:
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *pch_uart_type(struct uart_port *port)
+{
+ return KBUILD_MODNAME;
+}
+
+static void pch_uart_release_port(struct uart_port *port)
+{
+ struct eg20t_port *priv;
+
+ priv = container_of(port, struct eg20t_port, port);
+ pci_iounmap(priv->pdev, priv->membase);
+ pci_release_regions(priv->pdev);
+}
+
+static int pch_uart_request_port(struct uart_port *port)
+{
+ struct eg20t_port *priv;
+ int ret;
+ void __iomem *membase;
+
+ priv = container_of(port, struct eg20t_port, port);
+ ret = pci_request_regions(priv->pdev, KBUILD_MODNAME);
+ if (ret < 0)
+ return -EBUSY;
+
+ membase = pci_iomap(priv->pdev, 1, 0);
+ if (!membase) {
+ pci_release_regions(priv->pdev);
+ return -EBUSY;
+ }
+ priv->membase = port->membase = membase;
+
+ return 0;
+}
+
+static void pch_uart_config_port(struct uart_port *port, int type)
+{
+ struct eg20t_port *priv;
+
+ priv = container_of(port, struct eg20t_port, port);
+ if (type & UART_CONFIG_TYPE) {
+ port->type = priv->port_type;
+ pch_uart_request_port(port);
+ }
+}
+
+static int pch_uart_verify_port(struct uart_port *port,
+ struct serial_struct *serinfo)
+{
+ struct eg20t_port *priv;
+
+ priv = container_of(port, struct eg20t_port, port);
+ if (serinfo->flags & UPF_LOW_LATENCY) {
+ dev_info(priv->port.dev,
+ "PCH UART : Use PIO Mode (without DMA)\n");
+ priv->use_dma = 0;
+ serinfo->flags &= ~UPF_LOW_LATENCY;
+ } else {
+#ifndef CONFIG_PCH_DMA
+ dev_err(priv->port.dev, "%s : PCH DMA is not Loaded.\n",
+ __func__);
+ return -EOPNOTSUPP;
+#endif
+ priv->use_dma_flag = 1;
+ dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n");
+ if (!priv->use_dma)
+ pch_request_dma(port);
+ priv->use_dma = 1;
+ }
+
+ return 0;
+}
+
+static struct uart_ops pch_uart_ops = {
+ .tx_empty = pch_uart_tx_empty,
+ .set_mctrl = pch_uart_set_mctrl,
+ .get_mctrl = pch_uart_get_mctrl,
+ .stop_tx = pch_uart_stop_tx,
+ .start_tx = pch_uart_start_tx,
+ .stop_rx = pch_uart_stop_rx,
+ .enable_ms = pch_uart_enable_ms,
+ .break_ctl = pch_uart_break_ctl,
+ .startup = pch_uart_startup,
+ .shutdown = pch_uart_shutdown,
+ .set_termios = pch_uart_set_termios,
+/* .pm = pch_uart_pm, Not supported yet */
+/* .set_wake = pch_uart_set_wake, Not supported yet */
+ .type = pch_uart_type,
+ .release_port = pch_uart_release_port,
+ .request_port = pch_uart_request_port,
+ .config_port = pch_uart_config_port,
+ .verify_port = pch_uart_verify_port
+};
+
+static struct uart_driver pch_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = KBUILD_MODNAME,
+ .dev_name = PCH_UART_DRIVER_DEVICE,
+ .major = 0,
+ .minor = 0,
+ .nr = PCH_UART_NR,
+};
+
+static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct eg20t_port *priv;
+ int ret;
+ unsigned int iobase;
+ unsigned int mapbase;
+ unsigned char *rxbuf;
+ int fifosize, base_baud;
+ int port_type;
+ struct pch_uart_driver_data *board;
+ const char *board_name;
+
+ board = &drv_dat[id->driver_data];
+ port_type = board->port_type;
+
+ priv = kzalloc(sizeof(struct eg20t_port), GFP_KERNEL);
+ if (priv == NULL)
+ goto init_port_alloc_err;
+
+ rxbuf = (unsigned char *)__get_free_page(GFP_KERNEL);
+ if (!rxbuf)
+ goto init_port_free_txbuf;
+
+ base_baud = 1843200; /* 1.8432MHz */
+
+ /* quirk for CM-iTC board */
+ board_name = dmi_get_system_info(DMI_BOARD_NAME);
+ if (board_name && strstr(board_name, "CM-iTC"))
+ base_baud = 192000000; /* 192.0MHz */
+
+ switch (port_type) {
+ case PORT_UNKNOWN:
+ fifosize = 256; /* EG20T/ML7213: UART0 */
+ break;
+ case PORT_8250:
+ fifosize = 64; /* EG20T:UART1~3 ML7213: UART1~2*/
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid Port Type(=%d)\n", port_type);
+ goto init_port_hal_free;
+ }
+
+ iobase = pci_resource_start(pdev, 0);
+ mapbase = pci_resource_start(pdev, 1);
+ priv->mapbase = mapbase;
+ priv->iobase = iobase;
+ priv->pdev = pdev;
+ priv->tx_empty = 1;
+ priv->rxbuf.buf = rxbuf;
+ priv->rxbuf.size = PAGE_SIZE;
+
+ priv->fifo_size = fifosize;
+ priv->base_baud = base_baud;
+ priv->port_type = PORT_MAX_8250 + port_type + 1;
+ priv->port.dev = &pdev->dev;
+ priv->port.iobase = iobase;
+ priv->port.membase = NULL;
+ priv->port.mapbase = mapbase;
+ priv->port.irq = pdev->irq;
+ priv->port.iotype = UPIO_PORT;
+ priv->port.ops = &pch_uart_ops;
+ priv->port.flags = UPF_BOOT_AUTOCONF;
+ priv->port.fifosize = fifosize;
+ priv->port.line = board->line_no;
+ priv->trigger = PCH_UART_HAL_TRIGGER_M;
+
+ spin_lock_init(&priv->port.lock);
+
+ pci_set_drvdata(pdev, priv);
+ pch_uart_hal_request(pdev, fifosize, base_baud);
+
+ ret = uart_add_one_port(&pch_uart_driver, &priv->port);
+ if (ret < 0)
+ goto init_port_hal_free;
+
+ return priv;
+
+init_port_hal_free:
+ free_page((unsigned long)rxbuf);
+init_port_free_txbuf:
+ kfree(priv);
+init_port_alloc_err:
+
+ return NULL;
+}
+
+static void pch_uart_exit_port(struct eg20t_port *priv)
+{
+ uart_remove_one_port(&pch_uart_driver, &priv->port);
+ pci_set_drvdata(priv->pdev, NULL);
+ free_page((unsigned long)priv->rxbuf.buf);
+}
+
+static void pch_uart_pci_remove(struct pci_dev *pdev)
+{
+ struct eg20t_port *priv;
+
+ priv = (struct eg20t_port *)pci_get_drvdata(pdev);
+ pch_uart_exit_port(priv);
+ pci_disable_device(pdev);
+ kfree(priv);
+ return;
+}
+#ifdef CONFIG_PM
+static int pch_uart_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct eg20t_port *priv = pci_get_drvdata(pdev);
+
+ uart_suspend_port(&pch_uart_driver, &priv->port);
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int pch_uart_pci_resume(struct pci_dev *pdev)
+{
+ struct eg20t_port *priv = pci_get_drvdata(pdev);
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s-pci_enable_device failed(ret=%d) ", __func__, ret);
+ return ret;
+ }
+
+ uart_resume_port(&pch_uart_driver, &priv->port);
+
+ return 0;
+}
+#else
+#define pch_uart_pci_suspend NULL
+#define pch_uart_pci_resume NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8811),
+ .driver_data = pch_et20t_uart0},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8812),
+ .driver_data = pch_et20t_uart1},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8813),
+ .driver_data = pch_et20t_uart2},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8814),
+ .driver_data = pch_et20t_uart3},
+ {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8027),
+ .driver_data = pch_ml7213_uart0},
+ {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8028),
+ .driver_data = pch_ml7213_uart1},
+ {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8029),
+ .driver_data = pch_ml7213_uart2},
+ {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800C),
+ .driver_data = pch_ml7223_uart0},
+ {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D),
+ .driver_data = pch_ml7223_uart1},
+ {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8811),
+ .driver_data = pch_ml7831_uart0},
+ {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8812),
+ .driver_data = pch_ml7831_uart1},
+ {0,},
+};
+
+static int __devinit pch_uart_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret;
+ struct eg20t_port *priv;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0)
+ goto probe_error;
+
+ priv = pch_uart_init_port(pdev, id);
+ if (!priv) {
+ ret = -EBUSY;
+ goto probe_disable_device;
+ }
+ pci_set_drvdata(pdev, priv);
+
+ return ret;
+
+probe_disable_device:
+ pci_disable_device(pdev);
+probe_error:
+ return ret;
+}
+
+static struct pci_driver pch_uart_pci_driver = {
+ .name = "pch_uart",
+ .id_table = pch_uart_pci_id,
+ .probe = pch_uart_pci_probe,
+ .remove = __devexit_p(pch_uart_pci_remove),
+ .suspend = pch_uart_pci_suspend,
+ .resume = pch_uart_pci_resume,
+};
+
+static int __init pch_uart_module_init(void)
+{
+ int ret;
+
+ /* register as UART driver */
+ ret = uart_register_driver(&pch_uart_driver);
+ if (ret < 0)
+ return ret;
+
+ /* register as PCI driver */
+ ret = pci_register_driver(&pch_uart_pci_driver);
+ if (ret < 0)
+ uart_unregister_driver(&pch_uart_driver);
+
+ return ret;
+}
+module_init(pch_uart_module_init);
+
+static void __exit pch_uart_module_exit(void)
+{
+ pci_unregister_driver(&pch_uart_pci_driver);
+ uart_unregister_driver(&pch_uart_driver);
+}
+module_exit(pch_uart_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel EG20T PCH UART PCI Driver");
+module_param(default_baud, uint, S_IRUGO);
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
new file mode 100644
index 00000000..5acd24a2
--- /dev/null
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -0,0 +1,2206 @@
+/*
+ * Driver for PowerMac Z85c30 based ESCC cell found in the
+ * "macio" ASICs of various PowerMac models
+ *
+ * Copyright (C) 2003 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * Derived from drivers/macintosh/macserial.c by Paul Mackerras
+ * and drivers/serial/sunzilog.c by David S. Miller
+ *
+ * Hrm... actually, I ripped most of sunzilog (Thanks David !) and
+ * adapted special tweaks needed for us. I don't think it's worth
+ * merging back those though. The DMA code still has to get in
+ * and once done, I expect that driver to remain fairly stable in
+ * the long term, unless we change the driver model again...
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * 2004-08-06 Harald Welte <laforge@gnumonks.org>
+ * - Enable BREAK interrupt
+ * - Add support for sysreq
+ *
+ * TODO: - Add DMA support
+ * - Defer port shutdown to a few seconds after close
+ * - maybe put something right into uap->clk_divisor
+ */
+
+#undef DEBUG
+#undef DEBUG_HARD
+#undef USE_CTRL_O_SYSRQ
+
+#include <linux/module.h>
+#include <linux/tty.h>
+
+#include <linux/tty_flip.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/bitops.h>
+#include <linux/sysrq.h>
+#include <linux/mutex.h>
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#ifdef CONFIG_PPC_PMAC
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/dbdma.h>
+#include <asm/macio.h>
+#else
+#include <linux/platform_device.h>
+#define of_machine_is_compatible(x) (0)
+#endif
+
+#if defined (CONFIG_SERIAL_PMACZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+
+#include "pmac_zilog.h"
+
+/* Not yet implemented */
+#undef HAS_DBDMA
+
+static char version[] __initdata = "pmac_zilog: 0.6 (Benjamin Herrenschmidt <benh@kernel.crashing.org>)";
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("Driver for the Mac and PowerMac serial ports.");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_SERIAL_PMACZILOG_TTYS
+#define PMACZILOG_MAJOR TTY_MAJOR
+#define PMACZILOG_MINOR 64
+#define PMACZILOG_NAME "ttyS"
+#else
+#define PMACZILOG_MAJOR 204
+#define PMACZILOG_MINOR 192
+#define PMACZILOG_NAME "ttyPZ"
+#endif
+
+
+/*
+ * For the sake of early serial console, we can do a pre-probe
+ * (optional) of the ports at rather early boot time.
+ */
+static struct uart_pmac_port pmz_ports[MAX_ZS_PORTS];
+static int pmz_ports_count;
+static DEFINE_MUTEX(pmz_irq_mutex);
+
+static struct uart_driver pmz_uart_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = PMACZILOG_NAME,
+ .dev_name = PMACZILOG_NAME,
+ .major = PMACZILOG_MAJOR,
+ .minor = PMACZILOG_MINOR,
+};
+
+
+/*
+ * Load all registers to reprogram the port
+ * This function must only be called when the TX is not busy. The UART
+ * port lock must be held and local interrupts disabled.
+ */
+static void pmz_load_zsregs(struct uart_pmac_port *uap, u8 *regs)
+{
+ int i;
+
+ if (ZS_IS_ASLEEP(uap))
+ return;
+
+ /* Let pending transmits finish. */
+ for (i = 0; i < 1000; i++) {
+ unsigned char stat = read_zsreg(uap, R1);
+ if (stat & ALL_SNT)
+ break;
+ udelay(100);
+ }
+
+ ZS_CLEARERR(uap);
+ zssync(uap);
+ ZS_CLEARFIFO(uap);
+ zssync(uap);
+ ZS_CLEARERR(uap);
+
+ /* Disable all interrupts. */
+ write_zsreg(uap, R1,
+ regs[R1] & ~(RxINT_MASK | TxINT_ENAB | EXT_INT_ENAB));
+
+ /* Set parity, sync config, stop bits, and clock divisor. */
+ write_zsreg(uap, R4, regs[R4]);
+
+ /* Set misc. TX/RX control bits. */
+ write_zsreg(uap, R10, regs[R10]);
+
+ /* Set TX/RX controls sans the enable bits. */
+ write_zsreg(uap, R3, regs[R3] & ~RxENABLE);
+ write_zsreg(uap, R5, regs[R5] & ~TxENABLE);
+
+ /* now set R7 "prime" on ESCC */
+ write_zsreg(uap, R15, regs[R15] | EN85C30);
+ write_zsreg(uap, R7, regs[R7P]);
+
+ /* make sure we use R7 "non-prime" on ESCC */
+ write_zsreg(uap, R15, regs[R15] & ~EN85C30);
+
+ /* Synchronous mode config. */
+ write_zsreg(uap, R6, regs[R6]);
+ write_zsreg(uap, R7, regs[R7]);
+
+ /* Disable baud generator. */
+ write_zsreg(uap, R14, regs[R14] & ~BRENAB);
+
+ /* Clock mode control. */
+ write_zsreg(uap, R11, regs[R11]);
+
+ /* Lower and upper byte of baud rate generator divisor. */
+ write_zsreg(uap, R12, regs[R12]);
+ write_zsreg(uap, R13, regs[R13]);
+
+ /* Now rewrite R14, with BRENAB (if set). */
+ write_zsreg(uap, R14, regs[R14]);
+
+ /* Reset external status interrupts. */
+ write_zsreg(uap, R0, RES_EXT_INT);
+ write_zsreg(uap, R0, RES_EXT_INT);
+
+ /* Rewrite R3/R5, this time without enables masked. */
+ write_zsreg(uap, R3, regs[R3]);
+ write_zsreg(uap, R5, regs[R5]);
+
+ /* Rewrite R1, this time without IRQ enabled masked. */
+ write_zsreg(uap, R1, regs[R1]);
+
+ /* Enable interrupts */
+ write_zsreg(uap, R9, regs[R9]);
+}
+
+/*
+ * We do like sunzilog to avoid disrupting pending Tx
+ * Reprogram the Zilog channel HW registers with the copies found in the
+ * software state struct. If the transmitter is busy, we defer this update
+ * until the next TX complete interrupt. Else, we do it right now.
+ *
+ * The UART port lock must be held and local interrupts disabled.
+ */
+static void pmz_maybe_update_regs(struct uart_pmac_port *uap)
+{
+ if (!ZS_REGS_HELD(uap)) {
+ if (ZS_TX_ACTIVE(uap)) {
+ uap->flags |= PMACZILOG_FLAG_REGS_HELD;
+ } else {
+ pmz_debug("pmz: maybe_update_regs: updating\n");
+ pmz_load_zsregs(uap, uap->curregs);
+ }
+ }
+}
+
+static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
+{
+ struct tty_struct *tty = NULL;
+ unsigned char ch, r1, drop, error, flag;
+ int loops = 0;
+
+ /* The interrupt can be enabled when the port isn't open, typically
+ * that happens when using one port is open and the other closed (stale
+ * interrupt) or when one port is used as a console.
+ */
+ if (!ZS_IS_OPEN(uap)) {
+ pmz_debug("pmz: draining input\n");
+ /* Port is closed, drain input data */
+ for (;;) {
+ if ((++loops) > 1000)
+ goto flood;
+ (void)read_zsreg(uap, R1);
+ write_zsreg(uap, R0, ERR_RES);
+ (void)read_zsdata(uap);
+ ch = read_zsreg(uap, R0);
+ if (!(ch & Rx_CH_AV))
+ break;
+ }
+ return NULL;
+ }
+
+ /* Sanity check, make sure the old bug is no longer happening */
+ if (uap->port.state == NULL || uap->port.state->port.tty == NULL) {
+ WARN_ON(1);
+ (void)read_zsdata(uap);
+ return NULL;
+ }
+ tty = uap->port.state->port.tty;
+
+ while (1) {
+ error = 0;
+ drop = 0;
+
+ r1 = read_zsreg(uap, R1);
+ ch = read_zsdata(uap);
+
+ if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) {
+ write_zsreg(uap, R0, ERR_RES);
+ zssync(uap);
+ }
+
+ ch &= uap->parity_mask;
+ if (ch == 0 && uap->flags & PMACZILOG_FLAG_BREAK) {
+ uap->flags &= ~PMACZILOG_FLAG_BREAK;
+ }
+
+#if defined(CONFIG_MAGIC_SYSRQ) && defined(CONFIG_SERIAL_CORE_CONSOLE)
+#ifdef USE_CTRL_O_SYSRQ
+ /* Handle the SysRq ^O Hack */
+ if (ch == '\x0f') {
+ uap->port.sysrq = jiffies + HZ*5;
+ goto next_char;
+ }
+#endif /* USE_CTRL_O_SYSRQ */
+ if (uap->port.sysrq) {
+ int swallow;
+ spin_unlock(&uap->port.lock);
+ swallow = uart_handle_sysrq_char(&uap->port, ch);
+ spin_lock(&uap->port.lock);
+ if (swallow)
+ goto next_char;
+ }
+#endif /* CONFIG_MAGIC_SYSRQ && CONFIG_SERIAL_CORE_CONSOLE */
+
+ /* A real serial line, record the character and status. */
+ if (drop)
+ goto next_char;
+
+ flag = TTY_NORMAL;
+ uap->port.icount.rx++;
+
+ if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR | BRK_ABRT)) {
+ error = 1;
+ if (r1 & BRK_ABRT) {
+ pmz_debug("pmz: got break !\n");
+ r1 &= ~(PAR_ERR | CRC_ERR);
+ uap->port.icount.brk++;
+ if (uart_handle_break(&uap->port))
+ goto next_char;
+ }
+ else if (r1 & PAR_ERR)
+ uap->port.icount.parity++;
+ else if (r1 & CRC_ERR)
+ uap->port.icount.frame++;
+ if (r1 & Rx_OVR)
+ uap->port.icount.overrun++;
+ r1 &= uap->port.read_status_mask;
+ if (r1 & BRK_ABRT)
+ flag = TTY_BREAK;
+ else if (r1 & PAR_ERR)
+ flag = TTY_PARITY;
+ else if (r1 & CRC_ERR)
+ flag = TTY_FRAME;
+ }
+
+ if (uap->port.ignore_status_mask == 0xff ||
+ (r1 & uap->port.ignore_status_mask) == 0) {
+ tty_insert_flip_char(tty, ch, flag);
+ }
+ if (r1 & Rx_OVR)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ next_char:
+ /* We can get stuck in an infinite loop getting char 0 when the
+ * line is in a wrong HW state, we break that here.
+ * When that happens, I disable the receive side of the driver.
+ * Note that what I've been experiencing is a real irq loop where
+ * I'm getting flooded regardless of the actual port speed.
+ * Something strange is going on with the HW
+ */
+ if ((++loops) > 1000)
+ goto flood;
+ ch = read_zsreg(uap, R0);
+ if (!(ch & Rx_CH_AV))
+ break;
+ }
+
+ return tty;
+ flood:
+ uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
+ write_zsreg(uap, R1, uap->curregs[R1]);
+ zssync(uap);
+ pmz_error("pmz: rx irq flood !\n");
+ return tty;
+}
+
+static void pmz_status_handle(struct uart_pmac_port *uap)
+{
+ unsigned char status;
+
+ status = read_zsreg(uap, R0);
+ write_zsreg(uap, R0, RES_EXT_INT);
+ zssync(uap);
+
+ if (ZS_IS_OPEN(uap) && ZS_WANTS_MODEM_STATUS(uap)) {
+ if (status & SYNC_HUNT)
+ uap->port.icount.dsr++;
+
+ /* The Zilog just gives us an interrupt when DCD/CTS/etc. change.
+ * But it does not tell us which bit has changed, we have to keep
+ * track of this ourselves.
+ * The CTS input is inverted for some reason. -- paulus
+ */
+ if ((status ^ uap->prev_status) & DCD)
+ uart_handle_dcd_change(&uap->port,
+ (status & DCD));
+ if ((status ^ uap->prev_status) & CTS)
+ uart_handle_cts_change(&uap->port,
+ !(status & CTS));
+
+ wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
+ }
+
+ if (status & BRK_ABRT)
+ uap->flags |= PMACZILOG_FLAG_BREAK;
+
+ uap->prev_status = status;
+}
+
+static void pmz_transmit_chars(struct uart_pmac_port *uap)
+{
+ struct circ_buf *xmit;
+
+ if (ZS_IS_ASLEEP(uap))
+ return;
+ if (ZS_IS_CONS(uap)) {
+ unsigned char status = read_zsreg(uap, R0);
+
+ /* TX still busy? Just wait for the next TX done interrupt.
+ *
+ * It can occur because of how we do serial console writes. It would
+ * be nice to transmit console writes just like we normally would for
+ * a TTY line. (ie. buffered and TX interrupt driven). That is not
+ * easy because console writes cannot sleep. One solution might be
+ * to poll on enough port->xmit space becoming free. -DaveM
+ */
+ if (!(status & Tx_BUF_EMP))
+ return;
+ }
+
+ uap->flags &= ~PMACZILOG_FLAG_TX_ACTIVE;
+
+ if (ZS_REGS_HELD(uap)) {
+ pmz_load_zsregs(uap, uap->curregs);
+ uap->flags &= ~PMACZILOG_FLAG_REGS_HELD;
+ }
+
+ if (ZS_TX_STOPPED(uap)) {
+ uap->flags &= ~PMACZILOG_FLAG_TX_STOPPED;
+ goto ack_tx_int;
+ }
+
+ /* Under some circumstances, we see interrupts reported for
+ * a closed channel. The interrupt mask in R1 is clear, but
+ * R3 still signals the interrupts and we see them when taking
+ * an interrupt for the other channel (this could be a qemu
+ * bug but since the ESCC doc doesn't specify precsiely whether
+ * R3 interrup status bits are masked by R1 interrupt enable
+ * bits, better safe than sorry). --BenH.
+ */
+ if (!ZS_IS_OPEN(uap))
+ goto ack_tx_int;
+
+ if (uap->port.x_char) {
+ uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
+ write_zsdata(uap, uap->port.x_char);
+ zssync(uap);
+ uap->port.icount.tx++;
+ uap->port.x_char = 0;
+ return;
+ }
+
+ if (uap->port.state == NULL)
+ goto ack_tx_int;
+ xmit = &uap->port.state->xmit;
+ if (uart_circ_empty(xmit)) {
+ uart_write_wakeup(&uap->port);
+ goto ack_tx_int;
+ }
+ if (uart_tx_stopped(&uap->port))
+ goto ack_tx_int;
+
+ uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
+ write_zsdata(uap, xmit->buf[xmit->tail]);
+ zssync(uap);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ uap->port.icount.tx++;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&uap->port);
+
+ return;
+
+ack_tx_int:
+ write_zsreg(uap, R0, RES_Tx_P);
+ zssync(uap);
+}
+
+/* Hrm... we register that twice, fixme later.... */
+static irqreturn_t pmz_interrupt(int irq, void *dev_id)
+{
+ struct uart_pmac_port *uap = dev_id;
+ struct uart_pmac_port *uap_a;
+ struct uart_pmac_port *uap_b;
+ int rc = IRQ_NONE;
+ struct tty_struct *tty;
+ u8 r3;
+
+ uap_a = pmz_get_port_A(uap);
+ uap_b = uap_a->mate;
+
+ spin_lock(&uap_a->port.lock);
+ r3 = read_zsreg(uap_a, R3);
+
+#ifdef DEBUG_HARD
+ pmz_debug("irq, r3: %x\n", r3);
+#endif
+ /* Channel A */
+ tty = NULL;
+ if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
+ write_zsreg(uap_a, R0, RES_H_IUS);
+ zssync(uap_a);
+ if (r3 & CHAEXT)
+ pmz_status_handle(uap_a);
+ if (r3 & CHARxIP)
+ tty = pmz_receive_chars(uap_a);
+ if (r3 & CHATxIP)
+ pmz_transmit_chars(uap_a);
+ rc = IRQ_HANDLED;
+ }
+ spin_unlock(&uap_a->port.lock);
+ if (tty != NULL)
+ tty_flip_buffer_push(tty);
+
+ if (uap_b->node == NULL)
+ goto out;
+
+ spin_lock(&uap_b->port.lock);
+ tty = NULL;
+ if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
+ write_zsreg(uap_b, R0, RES_H_IUS);
+ zssync(uap_b);
+ if (r3 & CHBEXT)
+ pmz_status_handle(uap_b);
+ if (r3 & CHBRxIP)
+ tty = pmz_receive_chars(uap_b);
+ if (r3 & CHBTxIP)
+ pmz_transmit_chars(uap_b);
+ rc = IRQ_HANDLED;
+ }
+ spin_unlock(&uap_b->port.lock);
+ if (tty != NULL)
+ tty_flip_buffer_push(tty);
+
+ out:
+#ifdef DEBUG_HARD
+ pmz_debug("irq done.\n");
+#endif
+ return rc;
+}
+
+/*
+ * Peek the status register, lock not held by caller
+ */
+static inline u8 pmz_peek_status(struct uart_pmac_port *uap)
+{
+ unsigned long flags;
+ u8 status;
+
+ spin_lock_irqsave(&uap->port.lock, flags);
+ status = read_zsreg(uap, R0);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+
+ return status;
+}
+
+/*
+ * Check if transmitter is empty
+ * The port lock is not held.
+ */
+static unsigned int pmz_tx_empty(struct uart_port *port)
+{
+ struct uart_pmac_port *uap = to_pmz(port);
+ unsigned char status;
+
+ if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
+ return TIOCSER_TEMT;
+
+ status = pmz_peek_status(to_pmz(port));
+ if (status & Tx_BUF_EMP)
+ return TIOCSER_TEMT;
+ return 0;
+}
+
+/*
+ * Set Modem Control (RTS & DTR) bits
+ * The port lock is held and interrupts are disabled.
+ * Note: Shall we really filter out RTS on external ports or
+ * should that be dealt at higher level only ?
+ */
+static void pmz_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_pmac_port *uap = to_pmz(port);
+ unsigned char set_bits, clear_bits;
+
+ /* Do nothing for irda for now... */
+ if (ZS_IS_IRDA(uap))
+ return;
+ /* We get called during boot with a port not up yet */
+ if (ZS_IS_ASLEEP(uap) ||
+ !(ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)))
+ return;
+
+ set_bits = clear_bits = 0;
+
+ if (ZS_IS_INTMODEM(uap)) {
+ if (mctrl & TIOCM_RTS)
+ set_bits |= RTS;
+ else
+ clear_bits |= RTS;
+ }
+ if (mctrl & TIOCM_DTR)
+ set_bits |= DTR;
+ else
+ clear_bits |= DTR;
+
+ /* NOTE: Not subject to 'transmitter active' rule. */
+ uap->curregs[R5] |= set_bits;
+ uap->curregs[R5] &= ~clear_bits;
+ if (ZS_IS_ASLEEP(uap))
+ return;
+ write_zsreg(uap, R5, uap->curregs[R5]);
+ pmz_debug("pmz_set_mctrl: set bits: %x, clear bits: %x -> %x\n",
+ set_bits, clear_bits, uap->curregs[R5]);
+ zssync(uap);
+}
+
+/*
+ * Get Modem Control bits (only the input ones, the core will
+ * or that with a cached value of the control ones)
+ * The port lock is held and interrupts are disabled.
+ */
+static unsigned int pmz_get_mctrl(struct uart_port *port)
+{
+ struct uart_pmac_port *uap = to_pmz(port);
+ unsigned char status;
+ unsigned int ret;
+
+ if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
+ return 0;
+
+ status = read_zsreg(uap, R0);
+
+ ret = 0;
+ if (status & DCD)
+ ret |= TIOCM_CAR;
+ if (status & SYNC_HUNT)
+ ret |= TIOCM_DSR;
+ if (!(status & CTS))
+ ret |= TIOCM_CTS;
+
+ return ret;
+}
+
+/*
+ * Stop TX side. Dealt like sunzilog at next Tx interrupt,
+ * though for DMA, we will have to do a bit more.
+ * The port lock is held and interrupts are disabled.
+ */
+static void pmz_stop_tx(struct uart_port *port)
+{
+ to_pmz(port)->flags |= PMACZILOG_FLAG_TX_STOPPED;
+}
+
+/*
+ * Kick the Tx side.
+ * The port lock is held and interrupts are disabled.
+ */
+static void pmz_start_tx(struct uart_port *port)
+{
+ struct uart_pmac_port *uap = to_pmz(port);
+ unsigned char status;
+
+ pmz_debug("pmz: start_tx()\n");
+
+ uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
+ uap->flags &= ~PMACZILOG_FLAG_TX_STOPPED;
+
+ if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
+ return;
+
+ status = read_zsreg(uap, R0);
+
+ /* TX busy? Just wait for the TX done interrupt. */
+ if (!(status & Tx_BUF_EMP))
+ return;
+
+ /* Send the first character to jump-start the TX done
+ * IRQ sending engine.
+ */
+ if (port->x_char) {
+ write_zsdata(uap, port->x_char);
+ zssync(uap);
+ port->icount.tx++;
+ port->x_char = 0;
+ } else {
+ struct circ_buf *xmit = &port->state->xmit;
+
+ write_zsdata(uap, xmit->buf[xmit->tail]);
+ zssync(uap);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&uap->port);
+ }
+ pmz_debug("pmz: start_tx() done.\n");
+}
+
+/*
+ * Stop Rx side, basically disable emitting of
+ * Rx interrupts on the port. We don't disable the rx
+ * side of the chip proper though
+ * The port lock is held.
+ */
+static void pmz_stop_rx(struct uart_port *port)
+{
+ struct uart_pmac_port *uap = to_pmz(port);
+
+ if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
+ return;
+
+ pmz_debug("pmz: stop_rx()()\n");
+
+ /* Disable all RX interrupts. */
+ uap->curregs[R1] &= ~RxINT_MASK;
+ pmz_maybe_update_regs(uap);
+
+ pmz_debug("pmz: stop_rx() done.\n");
+}
+
+/*
+ * Enable modem status change interrupts
+ * The port lock is held.
+ */
+static void pmz_enable_ms(struct uart_port *port)
+{
+ struct uart_pmac_port *uap = to_pmz(port);
+ unsigned char new_reg;
+
+ if (ZS_IS_IRDA(uap) || uap->node == NULL)
+ return;
+ new_reg = uap->curregs[R15] | (DCDIE | SYNCIE | CTSIE);
+ if (new_reg != uap->curregs[R15]) {
+ uap->curregs[R15] = new_reg;
+
+ if (ZS_IS_ASLEEP(uap))
+ return;
+ /* NOTE: Not subject to 'transmitter active' rule. */
+ write_zsreg(uap, R15, uap->curregs[R15]);
+ }
+}
+
+/*
+ * Control break state emission
+ * The port lock is not held.
+ */
+static void pmz_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_pmac_port *uap = to_pmz(port);
+ unsigned char set_bits, clear_bits, new_reg;
+ unsigned long flags;
+
+ if (uap->node == NULL)
+ return;
+ set_bits = clear_bits = 0;
+
+ if (break_state)
+ set_bits |= SND_BRK;
+ else
+ clear_bits |= SND_BRK;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ new_reg = (uap->curregs[R5] | set_bits) & ~clear_bits;
+ if (new_reg != uap->curregs[R5]) {
+ uap->curregs[R5] = new_reg;
+
+ /* NOTE: Not subject to 'transmitter active' rule. */
+ if (ZS_IS_ASLEEP(uap)) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ return;
+ }
+ write_zsreg(uap, R5, uap->curregs[R5]);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+#ifdef CONFIG_PPC_PMAC
+
+/*
+ * Turn power on or off to the SCC and associated stuff
+ * (port drivers, modem, IR port, etc.)
+ * Returns the number of milliseconds we should wait before
+ * trying to use the port.
+ */
+static int pmz_set_scc_power(struct uart_pmac_port *uap, int state)
+{
+ int delay = 0;
+ int rc;
+
+ if (state) {
+ rc = pmac_call_feature(
+ PMAC_FTR_SCC_ENABLE, uap->node, uap->port_type, 1);
+ pmz_debug("port power on result: %d\n", rc);
+ if (ZS_IS_INTMODEM(uap)) {
+ rc = pmac_call_feature(
+ PMAC_FTR_MODEM_ENABLE, uap->node, 0, 1);
+ delay = 2500; /* wait for 2.5s before using */
+ pmz_debug("modem power result: %d\n", rc);
+ }
+ } else {
+ /* TODO: Make that depend on a timer, don't power down
+ * immediately
+ */
+ if (ZS_IS_INTMODEM(uap)) {
+ rc = pmac_call_feature(
+ PMAC_FTR_MODEM_ENABLE, uap->node, 0, 0);
+ pmz_debug("port power off result: %d\n", rc);
+ }
+ pmac_call_feature(PMAC_FTR_SCC_ENABLE, uap->node, uap->port_type, 0);
+ }
+ return delay;
+}
+
+#else
+
+static int pmz_set_scc_power(struct uart_pmac_port *uap, int state)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_PPC_PMAC */
+
+/*
+ * FixZeroBug....Works around a bug in the SCC receiving channel.
+ * Inspired from Darwin code, 15 Sept. 2000 -DanM
+ *
+ * The following sequence prevents a problem that is seen with O'Hare ASICs
+ * (most versions -- also with some Heathrow and Hydra ASICs) where a zero
+ * at the input to the receiver becomes 'stuck' and locks up the receiver.
+ * This problem can occur as a result of a zero bit at the receiver input
+ * coincident with any of the following events:
+ *
+ * The SCC is initialized (hardware or software).
+ * A framing error is detected.
+ * The clocking option changes from synchronous or X1 asynchronous
+ * clocking to X16, X32, or X64 asynchronous clocking.
+ * The decoding mode is changed among NRZ, NRZI, FM0, or FM1.
+ *
+ * This workaround attempts to recover from the lockup condition by placing
+ * the SCC in synchronous loopback mode with a fast clock before programming
+ * any of the asynchronous modes.
+ */
+static void pmz_fix_zero_bug_scc(struct uart_pmac_port *uap)
+{
+ write_zsreg(uap, 9, ZS_IS_CHANNEL_A(uap) ? CHRA : CHRB);
+ zssync(uap);
+ udelay(10);
+ write_zsreg(uap, 9, (ZS_IS_CHANNEL_A(uap) ? CHRA : CHRB) | NV);
+ zssync(uap);
+
+ write_zsreg(uap, 4, X1CLK | MONSYNC);
+ write_zsreg(uap, 3, Rx8);
+ write_zsreg(uap, 5, Tx8 | RTS);
+ write_zsreg(uap, 9, NV); /* Didn't we already do this? */
+ write_zsreg(uap, 11, RCBR | TCBR);
+ write_zsreg(uap, 12, 0);
+ write_zsreg(uap, 13, 0);
+ write_zsreg(uap, 14, (LOOPBAK | BRSRC));
+ write_zsreg(uap, 14, (LOOPBAK | BRSRC | BRENAB));
+ write_zsreg(uap, 3, Rx8 | RxENABLE);
+ write_zsreg(uap, 0, RES_EXT_INT);
+ write_zsreg(uap, 0, RES_EXT_INT);
+ write_zsreg(uap, 0, RES_EXT_INT); /* to kill some time */
+
+ /* The channel should be OK now, but it is probably receiving
+ * loopback garbage.
+ * Switch to asynchronous mode, disable the receiver,
+ * and discard everything in the receive buffer.
+ */
+ write_zsreg(uap, 9, NV);
+ write_zsreg(uap, 4, X16CLK | SB_MASK);
+ write_zsreg(uap, 3, Rx8);
+
+ while (read_zsreg(uap, 0) & Rx_CH_AV) {
+ (void)read_zsreg(uap, 8);
+ write_zsreg(uap, 0, RES_EXT_INT);
+ write_zsreg(uap, 0, ERR_RES);
+ }
+}
+
+/*
+ * Real startup routine, powers up the hardware and sets up
+ * the SCC. Returns a delay in ms where you need to wait before
+ * actually using the port, this is typically the internal modem
+ * powerup delay. This routine expect the lock to be taken.
+ */
+static int __pmz_startup(struct uart_pmac_port *uap)
+{
+ int pwr_delay = 0;
+
+ memset(&uap->curregs, 0, sizeof(uap->curregs));
+
+ /* Power up the SCC & underlying hardware (modem/irda) */
+ pwr_delay = pmz_set_scc_power(uap, 1);
+
+ /* Nice buggy HW ... */
+ pmz_fix_zero_bug_scc(uap);
+
+ /* Reset the channel */
+ uap->curregs[R9] = 0;
+ write_zsreg(uap, 9, ZS_IS_CHANNEL_A(uap) ? CHRA : CHRB);
+ zssync(uap);
+ udelay(10);
+ write_zsreg(uap, 9, 0);
+ zssync(uap);
+
+ /* Clear the interrupt registers */
+ write_zsreg(uap, R1, 0);
+ write_zsreg(uap, R0, ERR_RES);
+ write_zsreg(uap, R0, ERR_RES);
+ write_zsreg(uap, R0, RES_H_IUS);
+ write_zsreg(uap, R0, RES_H_IUS);
+
+ /* Setup some valid baud rate */
+ uap->curregs[R4] = X16CLK | SB1;
+ uap->curregs[R3] = Rx8;
+ uap->curregs[R5] = Tx8 | RTS;
+ if (!ZS_IS_IRDA(uap))
+ uap->curregs[R5] |= DTR;
+ uap->curregs[R12] = 0;
+ uap->curregs[R13] = 0;
+ uap->curregs[R14] = BRENAB;
+
+ /* Clear handshaking, enable BREAK interrupts */
+ uap->curregs[R15] = BRKIE;
+
+ /* Master interrupt enable */
+ uap->curregs[R9] |= NV | MIE;
+
+ pmz_load_zsregs(uap, uap->curregs);
+
+ /* Enable receiver and transmitter. */
+ write_zsreg(uap, R3, uap->curregs[R3] |= RxENABLE);
+ write_zsreg(uap, R5, uap->curregs[R5] |= TxENABLE);
+
+ /* Remember status for DCD/CTS changes */
+ uap->prev_status = read_zsreg(uap, R0);
+
+ return pwr_delay;
+}
+
+static void pmz_irda_reset(struct uart_pmac_port *uap)
+{
+ uap->curregs[R5] |= DTR;
+ write_zsreg(uap, R5, uap->curregs[R5]);
+ zssync(uap);
+ mdelay(110);
+ uap->curregs[R5] &= ~DTR;
+ write_zsreg(uap, R5, uap->curregs[R5]);
+ zssync(uap);
+ mdelay(10);
+}
+
+/*
+ * This is the "normal" startup routine, using the above one
+ * wrapped with the lock and doing a schedule delay
+ */
+static int pmz_startup(struct uart_port *port)
+{
+ struct uart_pmac_port *uap = to_pmz(port);
+ unsigned long flags;
+ int pwr_delay = 0;
+
+ pmz_debug("pmz: startup()\n");
+
+ if (ZS_IS_ASLEEP(uap))
+ return -EAGAIN;
+ if (uap->node == NULL)
+ return -ENODEV;
+
+ mutex_lock(&pmz_irq_mutex);
+
+ uap->flags |= PMACZILOG_FLAG_IS_OPEN;
+
+ /* A console is never powered down. Else, power up and
+ * initialize the chip
+ */
+ if (!ZS_IS_CONS(uap)) {
+ spin_lock_irqsave(&port->lock, flags);
+ pwr_delay = __pmz_startup(uap);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ pmz_get_port_A(uap)->flags |= PMACZILOG_FLAG_IS_IRQ_ON;
+ if (request_irq(uap->port.irq, pmz_interrupt, IRQF_SHARED,
+ "SCC", uap)) {
+ pmz_error("Unable to register zs interrupt handler.\n");
+ pmz_set_scc_power(uap, 0);
+ mutex_unlock(&pmz_irq_mutex);
+ return -ENXIO;
+ }
+
+ mutex_unlock(&pmz_irq_mutex);
+
+ /* Right now, we deal with delay by blocking here, I'll be
+ * smarter later on
+ */
+ if (pwr_delay != 0) {
+ pmz_debug("pmz: delaying %d ms\n", pwr_delay);
+ msleep(pwr_delay);
+ }
+
+ /* IrDA reset is done now */
+ if (ZS_IS_IRDA(uap))
+ pmz_irda_reset(uap);
+
+ /* Enable interrupts emission from the chip */
+ spin_lock_irqsave(&port->lock, flags);
+ uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
+ if (!ZS_IS_EXTCLK(uap))
+ uap->curregs[R1] |= EXT_INT_ENAB;
+ write_zsreg(uap, R1, uap->curregs[R1]);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ pmz_debug("pmz: startup() done.\n");
+
+ return 0;
+}
+
+static void pmz_shutdown(struct uart_port *port)
+{
+ struct uart_pmac_port *uap = to_pmz(port);
+ unsigned long flags;
+
+ pmz_debug("pmz: shutdown()\n");
+
+ if (uap->node == NULL)
+ return;
+
+ mutex_lock(&pmz_irq_mutex);
+
+ /* Release interrupt handler */
+ free_irq(uap->port.irq, uap);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
+
+ if (!ZS_IS_OPEN(uap->mate))
+ pmz_get_port_A(uap)->flags &= ~PMACZILOG_FLAG_IS_IRQ_ON;
+
+ /* Disable interrupts */
+ if (!ZS_IS_ASLEEP(uap)) {
+ uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
+ write_zsreg(uap, R1, uap->curregs[R1]);
+ zssync(uap);
+ }
+
+ if (ZS_IS_CONS(uap) || ZS_IS_ASLEEP(uap)) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ mutex_unlock(&pmz_irq_mutex);
+ return;
+ }
+
+ /* Disable receiver and transmitter. */
+ uap->curregs[R3] &= ~RxENABLE;
+ uap->curregs[R5] &= ~TxENABLE;
+
+ /* Disable all interrupts and BRK assertion. */
+ uap->curregs[R5] &= ~SND_BRK;
+ pmz_maybe_update_regs(uap);
+
+ /* Shut the chip down */
+ pmz_set_scc_power(uap, 0);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ mutex_unlock(&pmz_irq_mutex);
+
+ pmz_debug("pmz: shutdown() done.\n");
+}
+
+/* Shared by TTY driver and serial console setup. The port lock is held
+ * and local interrupts are disabled.
+ */
+static void pmz_convert_to_zs(struct uart_pmac_port *uap, unsigned int cflag,
+ unsigned int iflag, unsigned long baud)
+{
+ int brg;
+
+ /* Switch to external clocking for IrDA high clock rates. That
+ * code could be re-used for Midi interfaces with different
+ * multipliers
+ */
+ if (baud >= 115200 && ZS_IS_IRDA(uap)) {
+ uap->curregs[R4] = X1CLK;
+ uap->curregs[R11] = RCTRxCP | TCTRxCP;
+ uap->curregs[R14] = 0; /* BRG off */
+ uap->curregs[R12] = 0;
+ uap->curregs[R13] = 0;
+ uap->flags |= PMACZILOG_FLAG_IS_EXTCLK;
+ } else {
+ switch (baud) {
+ case ZS_CLOCK/16: /* 230400 */
+ uap->curregs[R4] = X16CLK;
+ uap->curregs[R11] = 0;
+ uap->curregs[R14] = 0;
+ break;
+ case ZS_CLOCK/32: /* 115200 */
+ uap->curregs[R4] = X32CLK;
+ uap->curregs[R11] = 0;
+ uap->curregs[R14] = 0;
+ break;
+ default:
+ uap->curregs[R4] = X16CLK;
+ uap->curregs[R11] = TCBR | RCBR;
+ brg = BPS_TO_BRG(baud, ZS_CLOCK / 16);
+ uap->curregs[R12] = (brg & 255);
+ uap->curregs[R13] = ((brg >> 8) & 255);
+ uap->curregs[R14] = BRENAB;
+ }
+ uap->flags &= ~PMACZILOG_FLAG_IS_EXTCLK;
+ }
+
+ /* Character size, stop bits, and parity. */
+ uap->curregs[3] &= ~RxN_MASK;
+ uap->curregs[5] &= ~TxN_MASK;
+
+ switch (cflag & CSIZE) {
+ case CS5:
+ uap->curregs[3] |= Rx5;
+ uap->curregs[5] |= Tx5;
+ uap->parity_mask = 0x1f;
+ break;
+ case CS6:
+ uap->curregs[3] |= Rx6;
+ uap->curregs[5] |= Tx6;
+ uap->parity_mask = 0x3f;
+ break;
+ case CS7:
+ uap->curregs[3] |= Rx7;
+ uap->curregs[5] |= Tx7;
+ uap->parity_mask = 0x7f;
+ break;
+ case CS8:
+ default:
+ uap->curregs[3] |= Rx8;
+ uap->curregs[5] |= Tx8;
+ uap->parity_mask = 0xff;
+ break;
+ };
+ uap->curregs[4] &= ~(SB_MASK);
+ if (cflag & CSTOPB)
+ uap->curregs[4] |= SB2;
+ else
+ uap->curregs[4] |= SB1;
+ if (cflag & PARENB)
+ uap->curregs[4] |= PAR_ENAB;
+ else
+ uap->curregs[4] &= ~PAR_ENAB;
+ if (!(cflag & PARODD))
+ uap->curregs[4] |= PAR_EVEN;
+ else
+ uap->curregs[4] &= ~PAR_EVEN;
+
+ uap->port.read_status_mask = Rx_OVR;
+ if (iflag & INPCK)
+ uap->port.read_status_mask |= CRC_ERR | PAR_ERR;
+ if (iflag & (BRKINT | PARMRK))
+ uap->port.read_status_mask |= BRK_ABRT;
+
+ uap->port.ignore_status_mask = 0;
+ if (iflag & IGNPAR)
+ uap->port.ignore_status_mask |= CRC_ERR | PAR_ERR;
+ if (iflag & IGNBRK) {
+ uap->port.ignore_status_mask |= BRK_ABRT;
+ if (iflag & IGNPAR)
+ uap->port.ignore_status_mask |= Rx_OVR;
+ }
+
+ if ((cflag & CREAD) == 0)
+ uap->port.ignore_status_mask = 0xff;
+}
+
+
+/*
+ * Set the irda codec on the imac to the specified baud rate.
+ */
+static void pmz_irda_setup(struct uart_pmac_port *uap, unsigned long *baud)
+{
+ u8 cmdbyte;
+ int t, version;
+
+ switch (*baud) {
+ /* SIR modes */
+ case 2400:
+ cmdbyte = 0x53;
+ break;
+ case 4800:
+ cmdbyte = 0x52;
+ break;
+ case 9600:
+ cmdbyte = 0x51;
+ break;
+ case 19200:
+ cmdbyte = 0x50;
+ break;
+ case 38400:
+ cmdbyte = 0x4f;
+ break;
+ case 57600:
+ cmdbyte = 0x4e;
+ break;
+ case 115200:
+ cmdbyte = 0x4d;
+ break;
+ /* The FIR modes aren't really supported at this point, how
+ * do we select the speed ? via the FCR on KeyLargo ?
+ */
+ case 1152000:
+ cmdbyte = 0;
+ break;
+ case 4000000:
+ cmdbyte = 0;
+ break;
+ default: /* 9600 */
+ cmdbyte = 0x51;
+ *baud = 9600;
+ break;
+ }
+
+ /* Wait for transmitter to drain */
+ t = 10000;
+ while ((read_zsreg(uap, R0) & Tx_BUF_EMP) == 0
+ || (read_zsreg(uap, R1) & ALL_SNT) == 0) {
+ if (--t <= 0) {
+ pmz_error("transmitter didn't drain\n");
+ return;
+ }
+ udelay(10);
+ }
+
+ /* Drain the receiver too */
+ t = 100;
+ (void)read_zsdata(uap);
+ (void)read_zsdata(uap);
+ (void)read_zsdata(uap);
+ mdelay(10);
+ while (read_zsreg(uap, R0) & Rx_CH_AV) {
+ read_zsdata(uap);
+ mdelay(10);
+ if (--t <= 0) {
+ pmz_error("receiver didn't drain\n");
+ return;
+ }
+ }
+
+ /* Switch to command mode */
+ uap->curregs[R5] |= DTR;
+ write_zsreg(uap, R5, uap->curregs[R5]);
+ zssync(uap);
+ mdelay(1);
+
+ /* Switch SCC to 19200 */
+ pmz_convert_to_zs(uap, CS8, 0, 19200);
+ pmz_load_zsregs(uap, uap->curregs);
+ mdelay(1);
+
+ /* Write get_version command byte */
+ write_zsdata(uap, 1);
+ t = 5000;
+ while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0) {
+ if (--t <= 0) {
+ pmz_error("irda_setup timed out on get_version byte\n");
+ goto out;
+ }
+ udelay(10);
+ }
+ version = read_zsdata(uap);
+
+ if (version < 4) {
+ pmz_info("IrDA: dongle version %d not supported\n", version);
+ goto out;
+ }
+
+ /* Send speed mode */
+ write_zsdata(uap, cmdbyte);
+ t = 5000;
+ while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0) {
+ if (--t <= 0) {
+ pmz_error("irda_setup timed out on speed mode byte\n");
+ goto out;
+ }
+ udelay(10);
+ }
+ t = read_zsdata(uap);
+ if (t != cmdbyte)
+ pmz_error("irda_setup speed mode byte = %x (%x)\n", t, cmdbyte);
+
+ pmz_info("IrDA setup for %ld bps, dongle version: %d\n",
+ *baud, version);
+
+ (void)read_zsdata(uap);
+ (void)read_zsdata(uap);
+ (void)read_zsdata(uap);
+
+ out:
+ /* Switch back to data mode */
+ uap->curregs[R5] &= ~DTR;
+ write_zsreg(uap, R5, uap->curregs[R5]);
+ zssync(uap);
+
+ (void)read_zsdata(uap);
+ (void)read_zsdata(uap);
+ (void)read_zsdata(uap);
+}
+
+
+static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_pmac_port *uap = to_pmz(port);
+ unsigned long baud;
+
+ pmz_debug("pmz: set_termios()\n");
+
+ if (ZS_IS_ASLEEP(uap))
+ return;
+
+ memcpy(&uap->termios_cache, termios, sizeof(struct ktermios));
+
+ /* XXX Check which revs of machines actually allow 1 and 4Mb speeds
+ * on the IR dongle. Note that the IRTTY driver currently doesn't know
+ * about the FIR mode and high speed modes. So these are unused. For
+ * implementing proper support for these, we should probably add some
+ * DMA as well, at least on the Rx side, which isn't a simple thing
+ * at this point.
+ */
+ if (ZS_IS_IRDA(uap)) {
+ /* Calc baud rate */
+ baud = uart_get_baud_rate(port, termios, old, 1200, 4000000);
+ pmz_debug("pmz: switch IRDA to %ld bauds\n", baud);
+ /* Cet the irda codec to the right rate */
+ pmz_irda_setup(uap, &baud);
+ /* Set final baud rate */
+ pmz_convert_to_zs(uap, termios->c_cflag, termios->c_iflag, baud);
+ pmz_load_zsregs(uap, uap->curregs);
+ zssync(uap);
+ } else {
+ baud = uart_get_baud_rate(port, termios, old, 1200, 230400);
+ pmz_convert_to_zs(uap, termios->c_cflag, termios->c_iflag, baud);
+ /* Make sure modem status interrupts are correctly configured */
+ if (UART_ENABLE_MS(&uap->port, termios->c_cflag)) {
+ uap->curregs[R15] |= DCDIE | SYNCIE | CTSIE;
+ uap->flags |= PMACZILOG_FLAG_MODEM_STATUS;
+ } else {
+ uap->curregs[R15] &= ~(DCDIE | SYNCIE | CTSIE);
+ uap->flags &= ~PMACZILOG_FLAG_MODEM_STATUS;
+ }
+
+ /* Load registers to the chip */
+ pmz_maybe_update_regs(uap);
+ }
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ pmz_debug("pmz: set_termios() done.\n");
+}
+
+/* The port lock is not held. */
+static void pmz_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_pmac_port *uap = to_pmz(port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Disable IRQs on the port */
+ uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
+ write_zsreg(uap, R1, uap->curregs[R1]);
+
+ /* Setup new port configuration */
+ __pmz_set_termios(port, termios, old);
+
+ /* Re-enable IRQs on the port */
+ if (ZS_IS_OPEN(uap)) {
+ uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
+ if (!ZS_IS_EXTCLK(uap))
+ uap->curregs[R1] |= EXT_INT_ENAB;
+ write_zsreg(uap, R1, uap->curregs[R1]);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *pmz_type(struct uart_port *port)
+{
+ struct uart_pmac_port *uap = to_pmz(port);
+
+ if (ZS_IS_IRDA(uap))
+ return "Z85c30 ESCC - Infrared port";
+ else if (ZS_IS_INTMODEM(uap))
+ return "Z85c30 ESCC - Internal modem";
+ return "Z85c30 ESCC - Serial port";
+}
+
+/* We do not request/release mappings of the registers here, this
+ * happens at early serial probe time.
+ */
+static void pmz_release_port(struct uart_port *port)
+{
+}
+
+static int pmz_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+/* These do not need to do anything interesting either. */
+static void pmz_config_port(struct uart_port *port, int flags)
+{
+}
+
+/* We do not support letting the user mess with the divisor, IRQ, etc. */
+static int pmz_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ return -EINVAL;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+
+static int pmz_poll_get_char(struct uart_port *port)
+{
+ struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
+
+ while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0)
+ udelay(5);
+ return read_zsdata(uap);
+}
+
+static void pmz_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
+
+ /* Wait for the transmit buffer to empty. */
+ while ((read_zsreg(uap, R0) & Tx_BUF_EMP) == 0)
+ udelay(5);
+ write_zsdata(uap, c);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
+static struct uart_ops pmz_pops = {
+ .tx_empty = pmz_tx_empty,
+ .set_mctrl = pmz_set_mctrl,
+ .get_mctrl = pmz_get_mctrl,
+ .stop_tx = pmz_stop_tx,
+ .start_tx = pmz_start_tx,
+ .stop_rx = pmz_stop_rx,
+ .enable_ms = pmz_enable_ms,
+ .break_ctl = pmz_break_ctl,
+ .startup = pmz_startup,
+ .shutdown = pmz_shutdown,
+ .set_termios = pmz_set_termios,
+ .type = pmz_type,
+ .release_port = pmz_release_port,
+ .request_port = pmz_request_port,
+ .config_port = pmz_config_port,
+ .verify_port = pmz_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = pmz_poll_get_char,
+ .poll_put_char = pmz_poll_put_char,
+#endif
+};
+
+#ifdef CONFIG_PPC_PMAC
+
+/*
+ * Setup one port structure after probing, HW is down at this point,
+ * Unlike sunzilog, we don't need to pre-init the spinlock as we don't
+ * register our console before uart_add_one_port() is called
+ */
+static int __init pmz_init_port(struct uart_pmac_port *uap)
+{
+ struct device_node *np = uap->node;
+ const char *conn;
+ const struct slot_names_prop {
+ int count;
+ char name[1];
+ } *slots;
+ int len;
+ struct resource r_ports, r_rxdma, r_txdma;
+
+ /*
+ * Request & map chip registers
+ */
+ if (of_address_to_resource(np, 0, &r_ports))
+ return -ENODEV;
+ uap->port.mapbase = r_ports.start;
+ uap->port.membase = ioremap(uap->port.mapbase, 0x1000);
+
+ uap->control_reg = uap->port.membase;
+ uap->data_reg = uap->control_reg + 0x10;
+
+ /*
+ * Request & map DBDMA registers
+ */
+#ifdef HAS_DBDMA
+ if (of_address_to_resource(np, 1, &r_txdma) == 0 &&
+ of_address_to_resource(np, 2, &r_rxdma) == 0)
+ uap->flags |= PMACZILOG_FLAG_HAS_DMA;
+#else
+ memset(&r_txdma, 0, sizeof(struct resource));
+ memset(&r_rxdma, 0, sizeof(struct resource));
+#endif
+ if (ZS_HAS_DMA(uap)) {
+ uap->tx_dma_regs = ioremap(r_txdma.start, 0x100);
+ if (uap->tx_dma_regs == NULL) {
+ uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
+ goto no_dma;
+ }
+ uap->rx_dma_regs = ioremap(r_rxdma.start, 0x100);
+ if (uap->rx_dma_regs == NULL) {
+ iounmap(uap->tx_dma_regs);
+ uap->tx_dma_regs = NULL;
+ uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
+ goto no_dma;
+ }
+ uap->tx_dma_irq = irq_of_parse_and_map(np, 1);
+ uap->rx_dma_irq = irq_of_parse_and_map(np, 2);
+ }
+no_dma:
+
+ /*
+ * Detect port type
+ */
+ if (of_device_is_compatible(np, "cobalt"))
+ uap->flags |= PMACZILOG_FLAG_IS_INTMODEM;
+ conn = of_get_property(np, "AAPL,connector", &len);
+ if (conn && (strcmp(conn, "infrared") == 0))
+ uap->flags |= PMACZILOG_FLAG_IS_IRDA;
+ uap->port_type = PMAC_SCC_ASYNC;
+ /* 1999 Powerbook G3 has slot-names property instead */
+ slots = of_get_property(np, "slot-names", &len);
+ if (slots && slots->count > 0) {
+ if (strcmp(slots->name, "IrDA") == 0)
+ uap->flags |= PMACZILOG_FLAG_IS_IRDA;
+ else if (strcmp(slots->name, "Modem") == 0)
+ uap->flags |= PMACZILOG_FLAG_IS_INTMODEM;
+ }
+ if (ZS_IS_IRDA(uap))
+ uap->port_type = PMAC_SCC_IRDA;
+ if (ZS_IS_INTMODEM(uap)) {
+ struct device_node* i2c_modem =
+ of_find_node_by_name(NULL, "i2c-modem");
+ if (i2c_modem) {
+ const char* mid =
+ of_get_property(i2c_modem, "modem-id", NULL);
+ if (mid) switch(*mid) {
+ case 0x04 :
+ case 0x05 :
+ case 0x07 :
+ case 0x08 :
+ case 0x0b :
+ case 0x0c :
+ uap->port_type = PMAC_SCC_I2S1;
+ }
+ printk(KERN_INFO "pmac_zilog: i2c-modem detected, id: %d\n",
+ mid ? (*mid) : 0);
+ of_node_put(i2c_modem);
+ } else {
+ printk(KERN_INFO "pmac_zilog: serial modem detected\n");
+ }
+ }
+
+ /*
+ * Init remaining bits of "port" structure
+ */
+ uap->port.iotype = UPIO_MEM;
+ uap->port.irq = irq_of_parse_and_map(np, 0);
+ uap->port.uartclk = ZS_CLOCK;
+ uap->port.fifosize = 1;
+ uap->port.ops = &pmz_pops;
+ uap->port.type = PORT_PMAC_ZILOG;
+ uap->port.flags = 0;
+
+ /*
+ * Fixup for the port on Gatwick for which the device-tree has
+ * missing interrupts. Normally, the macio_dev would contain
+ * fixed up interrupt info, but we use the device-tree directly
+ * here due to early probing so we need the fixup too.
+ */
+ if (uap->port.irq == NO_IRQ &&
+ np->parent && np->parent->parent &&
+ of_device_is_compatible(np->parent->parent, "gatwick")) {
+ /* IRQs on gatwick are offset by 64 */
+ uap->port.irq = irq_create_mapping(NULL, 64 + 15);
+ uap->tx_dma_irq = irq_create_mapping(NULL, 64 + 4);
+ uap->rx_dma_irq = irq_create_mapping(NULL, 64 + 5);
+ }
+
+ /* Setup some valid baud rate information in the register
+ * shadows so we don't write crap there before baud rate is
+ * first initialized.
+ */
+ pmz_convert_to_zs(uap, CS8, 0, 9600);
+
+ return 0;
+}
+
+/*
+ * Get rid of a port on module removal
+ */
+static void pmz_dispose_port(struct uart_pmac_port *uap)
+{
+ struct device_node *np;
+
+ np = uap->node;
+ iounmap(uap->rx_dma_regs);
+ iounmap(uap->tx_dma_regs);
+ iounmap(uap->control_reg);
+ uap->node = NULL;
+ of_node_put(np);
+ memset(uap, 0, sizeof(struct uart_pmac_port));
+}
+
+/*
+ * Called upon match with an escc node in the device-tree.
+ */
+static int pmz_attach(struct macio_dev *mdev, const struct of_device_id *match)
+{
+ int i;
+
+ /* Iterate the pmz_ports array to find a matching entry
+ */
+ for (i = 0; i < MAX_ZS_PORTS; i++)
+ if (pmz_ports[i].node == mdev->ofdev.dev.of_node) {
+ struct uart_pmac_port *uap = &pmz_ports[i];
+
+ uap->dev = mdev;
+ dev_set_drvdata(&mdev->ofdev.dev, uap);
+ if (macio_request_resources(uap->dev, "pmac_zilog"))
+ printk(KERN_WARNING "%s: Failed to request resource"
+ ", port still active\n",
+ uap->node->name);
+ else
+ uap->flags |= PMACZILOG_FLAG_RSRC_REQUESTED;
+ return 0;
+ }
+ return -ENODEV;
+}
+
+/*
+ * That one should not be called, macio isn't really a hotswap device,
+ * we don't expect one of those serial ports to go away...
+ */
+static int pmz_detach(struct macio_dev *mdev)
+{
+ struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
+
+ if (!uap)
+ return -ENODEV;
+
+ if (uap->flags & PMACZILOG_FLAG_RSRC_REQUESTED) {
+ macio_release_resources(uap->dev);
+ uap->flags &= ~PMACZILOG_FLAG_RSRC_REQUESTED;
+ }
+ dev_set_drvdata(&mdev->ofdev.dev, NULL);
+ uap->dev = NULL;
+
+ return 0;
+}
+
+
+static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
+{
+ struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
+ struct uart_state *state;
+ unsigned long flags;
+
+ if (uap == NULL) {
+ printk("HRM... pmz_suspend with NULL uap\n");
+ return 0;
+ }
+
+ if (pm_state.event == mdev->ofdev.dev.power.power_state.event)
+ return 0;
+
+ pmz_debug("suspend, switching to state %d\n", pm_state.event);
+
+ state = pmz_uart_reg.state + uap->port.line;
+
+ mutex_lock(&pmz_irq_mutex);
+ mutex_lock(&state->port.mutex);
+
+ spin_lock_irqsave(&uap->port.lock, flags);
+
+ if (ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)) {
+ /* Disable receiver and transmitter. */
+ uap->curregs[R3] &= ~RxENABLE;
+ uap->curregs[R5] &= ~TxENABLE;
+
+ /* Disable all interrupts and BRK assertion. */
+ uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
+ uap->curregs[R5] &= ~SND_BRK;
+ pmz_load_zsregs(uap, uap->curregs);
+ uap->flags |= PMACZILOG_FLAG_IS_ASLEEP;
+ mb();
+ }
+
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+
+ if (ZS_IS_OPEN(uap) || ZS_IS_OPEN(uap->mate))
+ if (ZS_IS_ASLEEP(uap->mate) && ZS_IS_IRQ_ON(pmz_get_port_A(uap))) {
+ pmz_get_port_A(uap)->flags &= ~PMACZILOG_FLAG_IS_IRQ_ON;
+ disable_irq(uap->port.irq);
+ }
+
+ if (ZS_IS_CONS(uap))
+ uap->port.cons->flags &= ~CON_ENABLED;
+
+ /* Shut the chip down */
+ pmz_set_scc_power(uap, 0);
+
+ mutex_unlock(&state->port.mutex);
+ mutex_unlock(&pmz_irq_mutex);
+
+ pmz_debug("suspend, switching complete\n");
+
+ mdev->ofdev.dev.power.power_state = pm_state;
+
+ return 0;
+}
+
+
+static int pmz_resume(struct macio_dev *mdev)
+{
+ struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
+ struct uart_state *state;
+ unsigned long flags;
+ int pwr_delay = 0;
+
+ if (uap == NULL)
+ return 0;
+
+ if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON)
+ return 0;
+
+ pmz_debug("resume, switching to state 0\n");
+
+ state = pmz_uart_reg.state + uap->port.line;
+
+ mutex_lock(&pmz_irq_mutex);
+ mutex_lock(&state->port.mutex);
+
+ spin_lock_irqsave(&uap->port.lock, flags);
+ if (!ZS_IS_OPEN(uap) && !ZS_IS_CONS(uap)) {
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+ goto bail;
+ }
+ pwr_delay = __pmz_startup(uap);
+
+ /* Take care of config that may have changed while asleep */
+ __pmz_set_termios(&uap->port, &uap->termios_cache, NULL);
+
+ if (ZS_IS_OPEN(uap)) {
+ /* Enable interrupts */
+ uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
+ if (!ZS_IS_EXTCLK(uap))
+ uap->curregs[R1] |= EXT_INT_ENAB;
+ write_zsreg(uap, R1, uap->curregs[R1]);
+ }
+
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+
+ if (ZS_IS_CONS(uap))
+ uap->port.cons->flags |= CON_ENABLED;
+
+ /* Re-enable IRQ on the controller */
+ if (ZS_IS_OPEN(uap) && !ZS_IS_IRQ_ON(pmz_get_port_A(uap))) {
+ pmz_get_port_A(uap)->flags |= PMACZILOG_FLAG_IS_IRQ_ON;
+ enable_irq(uap->port.irq);
+ }
+
+ bail:
+ mutex_unlock(&state->port.mutex);
+ mutex_unlock(&pmz_irq_mutex);
+
+ /* Right now, we deal with delay by blocking here, I'll be
+ * smarter later on
+ */
+ if (pwr_delay != 0) {
+ pmz_debug("pmz: delaying %d ms\n", pwr_delay);
+ msleep(pwr_delay);
+ }
+
+ pmz_debug("resume, switching complete\n");
+
+ mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
+
+ return 0;
+}
+
+/*
+ * Probe all ports in the system and build the ports array, we register
+ * with the serial layer at this point, the macio-type probing is only
+ * used later to "attach" to the sysfs tree so we get power management
+ * events
+ */
+static int __init pmz_probe(void)
+{
+ struct device_node *node_p, *node_a, *node_b, *np;
+ int count = 0;
+ int rc;
+
+ /*
+ * Find all escc chips in the system
+ */
+ node_p = of_find_node_by_name(NULL, "escc");
+ while (node_p) {
+ /*
+ * First get channel A/B node pointers
+ *
+ * TODO: Add routines with proper locking to do that...
+ */
+ node_a = node_b = NULL;
+ for (np = NULL; (np = of_get_next_child(node_p, np)) != NULL;) {
+ if (strncmp(np->name, "ch-a", 4) == 0)
+ node_a = of_node_get(np);
+ else if (strncmp(np->name, "ch-b", 4) == 0)
+ node_b = of_node_get(np);
+ }
+ if (!node_a && !node_b) {
+ of_node_put(node_a);
+ of_node_put(node_b);
+ printk(KERN_ERR "pmac_zilog: missing node %c for escc %s\n",
+ (!node_a) ? 'a' : 'b', node_p->full_name);
+ goto next;
+ }
+
+ /*
+ * Fill basic fields in the port structures
+ */
+ pmz_ports[count].mate = &pmz_ports[count+1];
+ pmz_ports[count+1].mate = &pmz_ports[count];
+ pmz_ports[count].flags = PMACZILOG_FLAG_IS_CHANNEL_A;
+ pmz_ports[count].node = node_a;
+ pmz_ports[count+1].node = node_b;
+ pmz_ports[count].port.line = count;
+ pmz_ports[count+1].port.line = count+1;
+
+ /*
+ * Setup the ports for real
+ */
+ rc = pmz_init_port(&pmz_ports[count]);
+ if (rc == 0 && node_b != NULL)
+ rc = pmz_init_port(&pmz_ports[count+1]);
+ if (rc != 0) {
+ of_node_put(node_a);
+ of_node_put(node_b);
+ memset(&pmz_ports[count], 0, sizeof(struct uart_pmac_port));
+ memset(&pmz_ports[count+1], 0, sizeof(struct uart_pmac_port));
+ goto next;
+ }
+ count += 2;
+next:
+ node_p = of_find_node_by_name(node_p, "escc");
+ }
+ pmz_ports_count = count;
+
+ return 0;
+}
+
+#else
+
+extern struct platform_device scc_a_pdev, scc_b_pdev;
+
+static int __init pmz_init_port(struct uart_pmac_port *uap)
+{
+ struct resource *r_ports;
+ int irq;
+
+ r_ports = platform_get_resource(uap->node, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(uap->node, 0);
+ if (!r_ports || !irq)
+ return -ENODEV;
+
+ uap->port.mapbase = r_ports->start;
+ uap->port.membase = (unsigned char __iomem *) r_ports->start;
+ uap->port.iotype = UPIO_MEM;
+ uap->port.irq = irq;
+ uap->port.uartclk = ZS_CLOCK;
+ uap->port.fifosize = 1;
+ uap->port.ops = &pmz_pops;
+ uap->port.type = PORT_PMAC_ZILOG;
+ uap->port.flags = 0;
+
+ uap->control_reg = uap->port.membase;
+ uap->data_reg = uap->control_reg + 4;
+ uap->port_type = 0;
+
+ pmz_convert_to_zs(uap, CS8, 0, 9600);
+
+ return 0;
+}
+
+static int __init pmz_probe(void)
+{
+ int err;
+
+ pmz_ports_count = 0;
+
+ pmz_ports[0].mate = &pmz_ports[1];
+ pmz_ports[0].port.line = 0;
+ pmz_ports[0].flags = PMACZILOG_FLAG_IS_CHANNEL_A;
+ pmz_ports[0].node = &scc_a_pdev;
+ err = pmz_init_port(&pmz_ports[0]);
+ if (err)
+ return err;
+ pmz_ports_count++;
+
+ pmz_ports[1].mate = &pmz_ports[0];
+ pmz_ports[1].port.line = 1;
+ pmz_ports[1].flags = 0;
+ pmz_ports[1].node = &scc_b_pdev;
+ err = pmz_init_port(&pmz_ports[1]);
+ if (err)
+ return err;
+ pmz_ports_count++;
+
+ return 0;
+}
+
+static void pmz_dispose_port(struct uart_pmac_port *uap)
+{
+ memset(uap, 0, sizeof(struct uart_pmac_port));
+}
+
+static int __init pmz_attach(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < pmz_ports_count; i++)
+ if (pmz_ports[i].node == pdev)
+ return 0;
+ return -ENODEV;
+}
+
+static int __exit pmz_detach(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_PPC_PMAC */
+
+#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
+
+static void pmz_console_write(struct console *con, const char *s, unsigned int count);
+static int __init pmz_console_setup(struct console *co, char *options);
+
+static struct console pmz_console = {
+ .name = PMACZILOG_NAME,
+ .write = pmz_console_write,
+ .device = uart_console_device,
+ .setup = pmz_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &pmz_uart_reg,
+};
+
+#define PMACZILOG_CONSOLE &pmz_console
+#else /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
+#define PMACZILOG_CONSOLE (NULL)
+#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
+
+/*
+ * Register the driver, console driver and ports with the serial
+ * core
+ */
+static int __init pmz_register(void)
+{
+ int i, rc;
+
+ pmz_uart_reg.nr = pmz_ports_count;
+ pmz_uart_reg.cons = PMACZILOG_CONSOLE;
+
+ /*
+ * Register this driver with the serial core
+ */
+ rc = uart_register_driver(&pmz_uart_reg);
+ if (rc)
+ return rc;
+
+ /*
+ * Register each port with the serial core
+ */
+ for (i = 0; i < pmz_ports_count; i++) {
+ struct uart_pmac_port *uport = &pmz_ports[i];
+ /* NULL node may happen on wallstreet */
+ if (uport->node != NULL)
+ rc = uart_add_one_port(&pmz_uart_reg, &uport->port);
+ if (rc)
+ goto err_out;
+ }
+
+ return 0;
+err_out:
+ while (i-- > 0) {
+ struct uart_pmac_port *uport = &pmz_ports[i];
+ uart_remove_one_port(&pmz_uart_reg, &uport->port);
+ }
+ uart_unregister_driver(&pmz_uart_reg);
+ return rc;
+}
+
+#ifdef CONFIG_PPC_PMAC
+
+static struct of_device_id pmz_match[] =
+{
+ {
+ .name = "ch-a",
+ },
+ {
+ .name = "ch-b",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE (of, pmz_match);
+
+static struct macio_driver pmz_driver = {
+ .driver = {
+ .name = "pmac_zilog",
+ .owner = THIS_MODULE,
+ .of_match_table = pmz_match,
+ },
+ .probe = pmz_attach,
+ .remove = pmz_detach,
+ .suspend = pmz_suspend,
+ .resume = pmz_resume,
+};
+
+#else
+
+static struct platform_driver pmz_driver = {
+ .remove = __exit_p(pmz_detach),
+ .driver = {
+ .name = "scc",
+ .owner = THIS_MODULE,
+ },
+};
+
+#endif /* !CONFIG_PPC_PMAC */
+
+static int __init init_pmz(void)
+{
+ int rc, i;
+ printk(KERN_INFO "%s\n", version);
+
+ /*
+ * First, we need to do a direct OF-based probe pass. We
+ * do that because we want serial console up before the
+ * macio stuffs calls us back, and since that makes it
+ * easier to pass the proper number of channels to
+ * uart_register_driver()
+ */
+ if (pmz_ports_count == 0)
+ pmz_probe();
+
+ /*
+ * Bail early if no port found
+ */
+ if (pmz_ports_count == 0)
+ return -ENODEV;
+
+ /*
+ * Now we register with the serial layer
+ */
+ rc = pmz_register();
+ if (rc) {
+ printk(KERN_ERR
+ "pmac_zilog: Error registering serial device, disabling pmac_zilog.\n"
+ "pmac_zilog: Did another serial driver already claim the minors?\n");
+ /* effectively "pmz_unprobe()" */
+ for (i=0; i < pmz_ports_count; i++)
+ pmz_dispose_port(&pmz_ports[i]);
+ return rc;
+ }
+
+ /*
+ * Then we register the macio driver itself
+ */
+#ifdef CONFIG_PPC_PMAC
+ return macio_register_driver(&pmz_driver);
+#else
+ return platform_driver_probe(&pmz_driver, pmz_attach);
+#endif
+}
+
+static void __exit exit_pmz(void)
+{
+ int i;
+
+#ifdef CONFIG_PPC_PMAC
+ /* Get rid of macio-driver (detach from macio) */
+ macio_unregister_driver(&pmz_driver);
+#else
+ platform_driver_unregister(&pmz_driver);
+#endif
+
+ for (i = 0; i < pmz_ports_count; i++) {
+ struct uart_pmac_port *uport = &pmz_ports[i];
+ if (uport->node != NULL) {
+ uart_remove_one_port(&pmz_uart_reg, &uport->port);
+ pmz_dispose_port(uport);
+ }
+ }
+ /* Unregister UART driver */
+ uart_unregister_driver(&pmz_uart_reg);
+}
+
+#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
+
+static void pmz_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
+
+ /* Wait for the transmit buffer to empty. */
+ while ((read_zsreg(uap, R0) & Tx_BUF_EMP) == 0)
+ udelay(5);
+ write_zsdata(uap, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ */
+static void pmz_console_write(struct console *con, const char *s, unsigned int count)
+{
+ struct uart_pmac_port *uap = &pmz_ports[con->index];
+ unsigned long flags;
+
+ if (ZS_IS_ASLEEP(uap))
+ return;
+ spin_lock_irqsave(&uap->port.lock, flags);
+
+ /* Turn of interrupts and enable the transmitter. */
+ write_zsreg(uap, R1, uap->curregs[1] & ~TxINT_ENAB);
+ write_zsreg(uap, R5, uap->curregs[5] | TxENABLE | RTS | DTR);
+
+ uart_console_write(&uap->port, s, count, pmz_console_putchar);
+
+ /* Restore the values in the registers. */
+ write_zsreg(uap, R1, uap->curregs[1]);
+ /* Don't disable the transmitter. */
+
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+}
+
+/*
+ * Setup the serial console
+ */
+static int __init pmz_console_setup(struct console *co, char *options)
+{
+ struct uart_pmac_port *uap;
+ struct uart_port *port;
+ int baud = 38400;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ unsigned long pwr_delay;
+
+ /*
+ * XServe's default to 57600 bps
+ */
+ if (of_machine_is_compatible("RackMac1,1")
+ || of_machine_is_compatible("RackMac1,2")
+ || of_machine_is_compatible("MacRISC4"))
+ baud = 57600;
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index >= pmz_ports_count)
+ co->index = 0;
+ uap = &pmz_ports[co->index];
+ if (uap->node == NULL)
+ return -ENODEV;
+ port = &uap->port;
+
+ /*
+ * Mark port as beeing a console
+ */
+ uap->flags |= PMACZILOG_FLAG_IS_CONS;
+
+ /*
+ * Temporary fix for uart layer who didn't setup the spinlock yet
+ */
+ spin_lock_init(&port->lock);
+
+ /*
+ * Enable the hardware
+ */
+ pwr_delay = __pmz_startup(uap);
+ if (pwr_delay)
+ mdelay(pwr_delay);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static int __init pmz_console_init(void)
+{
+ /* Probe ports */
+ pmz_probe();
+
+ /* TODO: Autoprobe console based on OF */
+ /* pmz_console.index = i; */
+ register_console(&pmz_console);
+
+ return 0;
+
+}
+console_initcall(pmz_console_init);
+#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
+
+module_init(init_pmz);
+module_exit(exit_pmz);
diff --git a/drivers/tty/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h
new file mode 100644
index 00000000..cbc34fbb
--- /dev/null
+++ b/drivers/tty/serial/pmac_zilog.h
@@ -0,0 +1,396 @@
+#ifndef __PMAC_ZILOG_H__
+#define __PMAC_ZILOG_H__
+
+#ifdef CONFIG_PPC_PMAC
+#define pmz_debug(fmt, arg...) dev_dbg(&uap->dev->ofdev.dev, fmt, ## arg)
+#define pmz_error(fmt, arg...) dev_err(&uap->dev->ofdev.dev, fmt, ## arg)
+#define pmz_info(fmt, arg...) dev_info(&uap->dev->ofdev.dev, fmt, ## arg)
+#else
+#define pmz_debug(fmt, arg...) dev_dbg(&uap->node->dev, fmt, ## arg)
+#define pmz_error(fmt, arg...) dev_err(&uap->node->dev, fmt, ## arg)
+#define pmz_info(fmt, arg...) dev_info(&uap->node->dev, fmt, ## arg)
+#endif
+
+/*
+ * At most 2 ESCCs with 2 ports each
+ */
+#define MAX_ZS_PORTS 4
+
+/*
+ * We wrap our port structure around the generic uart_port.
+ */
+#define NUM_ZSREGS 17
+
+struct uart_pmac_port {
+ struct uart_port port;
+ struct uart_pmac_port *mate;
+
+#ifdef CONFIG_PPC_PMAC
+ /* macio_dev for the escc holding this port (maybe be null on
+ * early inited port)
+ */
+ struct macio_dev *dev;
+ /* device node to this port, this points to one of 2 childs
+ * of "escc" node (ie. ch-a or ch-b)
+ */
+ struct device_node *node;
+#else
+ struct platform_device *node;
+#endif
+
+ /* Port type as obtained from device tree (IRDA, modem, ...) */
+ int port_type;
+ u8 curregs[NUM_ZSREGS];
+
+ unsigned int flags;
+#define PMACZILOG_FLAG_IS_CONS 0x00000001
+#define PMACZILOG_FLAG_IS_KGDB 0x00000002
+#define PMACZILOG_FLAG_MODEM_STATUS 0x00000004
+#define PMACZILOG_FLAG_IS_CHANNEL_A 0x00000008
+#define PMACZILOG_FLAG_REGS_HELD 0x00000010
+#define PMACZILOG_FLAG_TX_STOPPED 0x00000020
+#define PMACZILOG_FLAG_TX_ACTIVE 0x00000040
+#define PMACZILOG_FLAG_ENABLED 0x00000080
+#define PMACZILOG_FLAG_IS_IRDA 0x00000100
+#define PMACZILOG_FLAG_IS_INTMODEM 0x00000200
+#define PMACZILOG_FLAG_HAS_DMA 0x00000400
+#define PMACZILOG_FLAG_RSRC_REQUESTED 0x00000800
+#define PMACZILOG_FLAG_IS_ASLEEP 0x00001000
+#define PMACZILOG_FLAG_IS_OPEN 0x00002000
+#define PMACZILOG_FLAG_IS_IRQ_ON 0x00004000
+#define PMACZILOG_FLAG_IS_EXTCLK 0x00008000
+#define PMACZILOG_FLAG_BREAK 0x00010000
+
+ unsigned char parity_mask;
+ unsigned char prev_status;
+
+ volatile u8 __iomem *control_reg;
+ volatile u8 __iomem *data_reg;
+
+#ifdef CONFIG_PPC_PMAC
+ unsigned int tx_dma_irq;
+ unsigned int rx_dma_irq;
+ volatile struct dbdma_regs __iomem *tx_dma_regs;
+ volatile struct dbdma_regs __iomem *rx_dma_regs;
+#endif
+
+ struct ktermios termios_cache;
+};
+
+#define to_pmz(p) ((struct uart_pmac_port *)(p))
+
+static inline struct uart_pmac_port *pmz_get_port_A(struct uart_pmac_port *uap)
+{
+ if (uap->flags & PMACZILOG_FLAG_IS_CHANNEL_A)
+ return uap;
+ return uap->mate;
+}
+
+/*
+ * Register accessors. Note that we don't need to enforce a recovery
+ * delay on PCI PowerMac hardware, it's dealt in HW by the MacIO chip,
+ * though if we try to use this driver on older machines, we might have
+ * to add it back
+ */
+static inline u8 read_zsreg(struct uart_pmac_port *port, u8 reg)
+{
+ if (reg != 0)
+ writeb(reg, port->control_reg);
+ return readb(port->control_reg);
+}
+
+static inline void write_zsreg(struct uart_pmac_port *port, u8 reg, u8 value)
+{
+ if (reg != 0)
+ writeb(reg, port->control_reg);
+ writeb(value, port->control_reg);
+}
+
+static inline u8 read_zsdata(struct uart_pmac_port *port)
+{
+ return readb(port->data_reg);
+}
+
+static inline void write_zsdata(struct uart_pmac_port *port, u8 data)
+{
+ writeb(data, port->data_reg);
+}
+
+static inline void zssync(struct uart_pmac_port *port)
+{
+ (void)readb(port->control_reg);
+}
+
+/* Conversion routines to/from brg time constants from/to bits
+ * per second.
+ */
+#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
+#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
+
+#define ZS_CLOCK 3686400 /* Z8530 RTxC input clock rate */
+
+/* The Zilog register set */
+
+#define FLAG 0x7e
+
+/* Write Register 0 */
+#define R0 0 /* Register selects */
+#define R1 1
+#define R2 2
+#define R3 3
+#define R4 4
+#define R5 5
+#define R6 6
+#define R7 7
+#define R8 8
+#define R9 9
+#define R10 10
+#define R11 11
+#define R12 12
+#define R13 13
+#define R14 14
+#define R15 15
+#define R7P 16
+
+#define NULLCODE 0 /* Null Code */
+#define POINT_HIGH 0x8 /* Select upper half of registers */
+#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */
+#define SEND_ABORT 0x18 /* HDLC Abort */
+#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */
+#define RES_Tx_P 0x28 /* Reset TxINT Pending */
+#define ERR_RES 0x30 /* Error Reset */
+#define RES_H_IUS 0x38 /* Reset highest IUS */
+
+#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */
+#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */
+#define RES_EOM_L 0xC0 /* Reset EOM latch */
+
+/* Write Register 1 */
+
+#define EXT_INT_ENAB 0x1 /* Ext Int Enable */
+#define TxINT_ENAB 0x2 /* Tx Int Enable */
+#define PAR_SPEC 0x4 /* Parity is special condition */
+
+#define RxINT_DISAB 0 /* Rx Int Disable */
+#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */
+#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */
+#define INT_ERR_Rx 0x18 /* Int on error only */
+#define RxINT_MASK 0x18
+
+#define WT_RDY_RT 0x20 /* W/Req reflects recv if 1, xmit if 0 */
+#define WT_FN_RDYFN 0x40 /* W/Req pin is DMA request if 1, wait if 0 */
+#define WT_RDY_ENAB 0x80 /* Enable W/Req pin */
+
+/* Write Register #2 (Interrupt Vector) */
+
+/* Write Register 3 */
+
+#define RxENABLE 0x1 /* Rx Enable */
+#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */
+#define ADD_SM 0x4 /* Address Search Mode (SDLC) */
+#define RxCRC_ENAB 0x8 /* Rx CRC Enable */
+#define ENT_HM 0x10 /* Enter Hunt Mode */
+#define AUTO_ENAB 0x20 /* Auto Enables */
+#define Rx5 0x0 /* Rx 5 Bits/Character */
+#define Rx7 0x40 /* Rx 7 Bits/Character */
+#define Rx6 0x80 /* Rx 6 Bits/Character */
+#define Rx8 0xc0 /* Rx 8 Bits/Character */
+#define RxN_MASK 0xc0
+
+/* Write Register 4 */
+
+#define PAR_ENAB 0x1 /* Parity Enable */
+#define PAR_EVEN 0x2 /* Parity Even/Odd* */
+
+#define SYNC_ENAB 0 /* Sync Modes Enable */
+#define SB1 0x4 /* 1 stop bit/char */
+#define SB15 0x8 /* 1.5 stop bits/char */
+#define SB2 0xc /* 2 stop bits/char */
+#define SB_MASK 0xc
+
+#define MONSYNC 0 /* 8 Bit Sync character */
+#define BISYNC 0x10 /* 16 bit sync character */
+#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */
+#define EXTSYNC 0x30 /* External Sync Mode */
+
+#define X1CLK 0x0 /* x1 clock mode */
+#define X16CLK 0x40 /* x16 clock mode */
+#define X32CLK 0x80 /* x32 clock mode */
+#define X64CLK 0xC0 /* x64 clock mode */
+#define XCLK_MASK 0xC0
+
+/* Write Register 5 */
+
+#define TxCRC_ENAB 0x1 /* Tx CRC Enable */
+#define RTS 0x2 /* RTS */
+#define SDLC_CRC 0x4 /* SDLC/CRC-16 */
+#define TxENABLE 0x8 /* Tx Enable */
+#define SND_BRK 0x10 /* Send Break */
+#define Tx5 0x0 /* Tx 5 bits (or less)/character */
+#define Tx7 0x20 /* Tx 7 bits/character */
+#define Tx6 0x40 /* Tx 6 bits/character */
+#define Tx8 0x60 /* Tx 8 bits/character */
+#define TxN_MASK 0x60
+#define DTR 0x80 /* DTR */
+
+/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
+
+/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
+
+/* Write Register 7' (Some enhanced feature control) */
+#define ENEXREAD 0x40 /* Enable read of some write registers */
+
+/* Write Register 8 (transmit buffer) */
+
+/* Write Register 9 (Master interrupt control) */
+#define VIS 1 /* Vector Includes Status */
+#define NV 2 /* No Vector */
+#define DLC 4 /* Disable Lower Chain */
+#define MIE 8 /* Master Interrupt Enable */
+#define STATHI 0x10 /* Status high */
+#define NORESET 0 /* No reset on write to R9 */
+#define CHRB 0x40 /* Reset channel B */
+#define CHRA 0x80 /* Reset channel A */
+#define FHWRES 0xc0 /* Force hardware reset */
+
+/* Write Register 10 (misc control bits) */
+#define BIT6 1 /* 6 bit/8bit sync */
+#define LOOPMODE 2 /* SDLC Loop mode */
+#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */
+#define MARKIDLE 8 /* Mark/flag on idle */
+#define GAOP 0x10 /* Go active on poll */
+#define NRZ 0 /* NRZ mode */
+#define NRZI 0x20 /* NRZI mode */
+#define FM1 0x40 /* FM1 (transition = 1) */
+#define FM0 0x60 /* FM0 (transition = 0) */
+#define CRCPS 0x80 /* CRC Preset I/O */
+
+/* Write Register 11 (Clock Mode control) */
+#define TRxCXT 0 /* TRxC = Xtal output */
+#define TRxCTC 1 /* TRxC = Transmit clock */
+#define TRxCBR 2 /* TRxC = BR Generator Output */
+#define TRxCDP 3 /* TRxC = DPLL output */
+#define TRxCOI 4 /* TRxC O/I */
+#define TCRTxCP 0 /* Transmit clock = RTxC pin */
+#define TCTRxCP 8 /* Transmit clock = TRxC pin */
+#define TCBR 0x10 /* Transmit clock = BR Generator output */
+#define TCDPLL 0x18 /* Transmit clock = DPLL output */
+#define RCRTxCP 0 /* Receive clock = RTxC pin */
+#define RCTRxCP 0x20 /* Receive clock = TRxC pin */
+#define RCBR 0x40 /* Receive clock = BR Generator output */
+#define RCDPLL 0x60 /* Receive clock = DPLL output */
+#define RTxCX 0x80 /* RTxC Xtal/No Xtal */
+
+/* Write Register 12 (lower byte of baud rate generator time constant) */
+
+/* Write Register 13 (upper byte of baud rate generator time constant) */
+
+/* Write Register 14 (Misc control bits) */
+#define BRENAB 1 /* Baud rate generator enable */
+#define BRSRC 2 /* Baud rate generator source */
+#define DTRREQ 4 /* DTR/Request function */
+#define AUTOECHO 8 /* Auto Echo */
+#define LOOPBAK 0x10 /* Local loopback */
+#define SEARCH 0x20 /* Enter search mode */
+#define RMC 0x40 /* Reset missing clock */
+#define DISDPLL 0x60 /* Disable DPLL */
+#define SSBR 0x80 /* Set DPLL source = BR generator */
+#define SSRTxC 0xa0 /* Set DPLL source = RTxC */
+#define SFMM 0xc0 /* Set FM mode */
+#define SNRZI 0xe0 /* Set NRZI mode */
+
+/* Write Register 15 (external/status interrupt control) */
+#define EN85C30 1 /* Enable some 85c30-enhanced registers */
+#define ZCIE 2 /* Zero count IE */
+#define ENSTFIFO 4 /* Enable status FIFO (SDLC) */
+#define DCDIE 8 /* DCD IE */
+#define SYNCIE 0x10 /* Sync/hunt IE */
+#define CTSIE 0x20 /* CTS IE */
+#define TxUIE 0x40 /* Tx Underrun/EOM IE */
+#define BRKIE 0x80 /* Break/Abort IE */
+
+
+/* Read Register 0 */
+#define Rx_CH_AV 0x1 /* Rx Character Available */
+#define ZCOUNT 0x2 /* Zero count */
+#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */
+#define DCD 0x8 /* DCD */
+#define SYNC_HUNT 0x10 /* Sync/hunt */
+#define CTS 0x20 /* CTS */
+#define TxEOM 0x40 /* Tx underrun */
+#define BRK_ABRT 0x80 /* Break/Abort */
+
+/* Read Register 1 */
+#define ALL_SNT 0x1 /* All sent */
+/* Residue Data for 8 Rx bits/char programmed */
+#define RES3 0x8 /* 0/3 */
+#define RES4 0x4 /* 0/4 */
+#define RES5 0xc /* 0/5 */
+#define RES6 0x2 /* 0/6 */
+#define RES7 0xa /* 0/7 */
+#define RES8 0x6 /* 0/8 */
+#define RES18 0xe /* 1/8 */
+#define RES28 0x0 /* 2/8 */
+/* Special Rx Condition Interrupts */
+#define PAR_ERR 0x10 /* Parity error */
+#define Rx_OVR 0x20 /* Rx Overrun Error */
+#define CRC_ERR 0x40 /* CRC/Framing Error */
+#define END_FR 0x80 /* End of Frame (SDLC) */
+
+/* Read Register 2 (channel b only) - Interrupt vector */
+#define CHB_Tx_EMPTY 0x00
+#define CHB_EXT_STAT 0x02
+#define CHB_Rx_AVAIL 0x04
+#define CHB_SPECIAL 0x06
+#define CHA_Tx_EMPTY 0x08
+#define CHA_EXT_STAT 0x0a
+#define CHA_Rx_AVAIL 0x0c
+#define CHA_SPECIAL 0x0e
+#define STATUS_MASK 0x06
+
+/* Read Register 3 (interrupt pending register) ch a only */
+#define CHBEXT 0x1 /* Channel B Ext/Stat IP */
+#define CHBTxIP 0x2 /* Channel B Tx IP */
+#define CHBRxIP 0x4 /* Channel B Rx IP */
+#define CHAEXT 0x8 /* Channel A Ext/Stat IP */
+#define CHATxIP 0x10 /* Channel A Tx IP */
+#define CHARxIP 0x20 /* Channel A Rx IP */
+
+/* Read Register 8 (receive data register) */
+
+/* Read Register 10 (misc status bits) */
+#define ONLOOP 2 /* On loop */
+#define LOOPSEND 0x10 /* Loop sending */
+#define CLK2MIS 0x40 /* Two clocks missing */
+#define CLK1MIS 0x80 /* One clock missing */
+
+/* Read Register 12 (lower byte of baud rate generator constant) */
+
+/* Read Register 13 (upper byte of baud rate generator constant) */
+
+/* Read Register 15 (value of WR 15) */
+
+/* Misc macros */
+#define ZS_CLEARERR(port) (write_zsreg(port, 0, ERR_RES))
+#define ZS_CLEARFIFO(port) do { volatile unsigned char garbage; \
+ garbage = read_zsdata(port); \
+ garbage = read_zsdata(port); \
+ garbage = read_zsdata(port); \
+ } while(0)
+
+#define ZS_IS_CONS(UP) ((UP)->flags & PMACZILOG_FLAG_IS_CONS)
+#define ZS_IS_KGDB(UP) ((UP)->flags & PMACZILOG_FLAG_IS_KGDB)
+#define ZS_IS_CHANNEL_A(UP) ((UP)->flags & PMACZILOG_FLAG_IS_CHANNEL_A)
+#define ZS_REGS_HELD(UP) ((UP)->flags & PMACZILOG_FLAG_REGS_HELD)
+#define ZS_TX_STOPPED(UP) ((UP)->flags & PMACZILOG_FLAG_TX_STOPPED)
+#define ZS_TX_ACTIVE(UP) ((UP)->flags & PMACZILOG_FLAG_TX_ACTIVE)
+#define ZS_WANTS_MODEM_STATUS(UP) ((UP)->flags & PMACZILOG_FLAG_MODEM_STATUS)
+#define ZS_IS_IRDA(UP) ((UP)->flags & PMACZILOG_FLAG_IS_IRDA)
+#define ZS_IS_INTMODEM(UP) ((UP)->flags & PMACZILOG_FLAG_IS_INTMODEM)
+#define ZS_HAS_DMA(UP) ((UP)->flags & PMACZILOG_FLAG_HAS_DMA)
+#define ZS_IS_ASLEEP(UP) ((UP)->flags & PMACZILOG_FLAG_IS_ASLEEP)
+#define ZS_IS_OPEN(UP) ((UP)->flags & PMACZILOG_FLAG_IS_OPEN)
+#define ZS_IS_IRQ_ON(UP) ((UP)->flags & PMACZILOG_FLAG_IS_IRQ_ON)
+#define ZS_IS_EXTCLK(UP) ((UP)->flags & PMACZILOG_FLAG_IS_EXTCLK)
+
+#endif /* __PMAC_ZILOG_H__ */
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
new file mode 100644
index 00000000..0aa75a97
--- /dev/null
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -0,0 +1,854 @@
+/*
+ * UART driver for PNX8XXX SoCs
+ *
+ * Author: Per Hallsmark per.hallsmark@mvista.com
+ * Ported to 2.6 kernel by EmbeddedAlley
+ * Reworked by Vitaly Wool <vitalywool@gmail.com>
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ * Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of
+ * any kind, whether express or implied.
+ *
+ */
+
+#if defined(CONFIG_SERIAL_PNX8XXX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/serial_pnx8xxx.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+/* We'll be using StrongARM sa1100 serial port major/minor */
+#define SERIAL_PNX8XXX_MAJOR 204
+#define MINOR_START 5
+
+#define NR_PORTS 2
+
+#define PNX8XXX_ISR_PASS_LIMIT 256
+
+/*
+ * Convert from ignore_status_mask or read_status_mask to FIFO
+ * and interrupt status bits
+ */
+#define SM_TO_FIFO(x) ((x) >> 10)
+#define SM_TO_ISTAT(x) ((x) & 0x000001ff)
+#define FIFO_TO_SM(x) ((x) << 10)
+#define ISTAT_TO_SM(x) ((x) & 0x000001ff)
+
+/*
+ * This is the size of our serial port register set.
+ */
+#define UART_PORT_SIZE 0x1000
+
+/*
+ * This determines how often we check the modem status signals
+ * for any change. They generally aren't connected to an IRQ
+ * so we have to poll them. We also check immediately before
+ * filling the TX fifo incase CTS has been dropped.
+ */
+#define MCTRL_TIMEOUT (250*HZ/1000)
+
+extern struct pnx8xxx_port pnx8xxx_ports[];
+
+static inline int serial_in(struct pnx8xxx_port *sport, int offset)
+{
+ return (__raw_readl(sport->port.membase + offset));
+}
+
+static inline void serial_out(struct pnx8xxx_port *sport, int offset, int value)
+{
+ __raw_writel(value, sport->port.membase + offset);
+}
+
+/*
+ * Handle any change of modem status signal since we were last called.
+ */
+static void pnx8xxx_mctrl_check(struct pnx8xxx_port *sport)
+{
+ unsigned int status, changed;
+
+ status = sport->port.ops->get_mctrl(&sport->port);
+ changed = status ^ sport->old_status;
+
+ if (changed == 0)
+ return;
+
+ sport->old_status = status;
+
+ if (changed & TIOCM_RI)
+ sport->port.icount.rng++;
+ if (changed & TIOCM_DSR)
+ sport->port.icount.dsr++;
+ if (changed & TIOCM_CAR)
+ uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
+ if (changed & TIOCM_CTS)
+ uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
+
+ wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
+}
+
+/*
+ * This is our per-port timeout handler, for checking the
+ * modem status signals.
+ */
+static void pnx8xxx_timeout(unsigned long data)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)data;
+ unsigned long flags;
+
+ if (sport->port.state) {
+ spin_lock_irqsave(&sport->port.lock, flags);
+ pnx8xxx_mctrl_check(sport);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
+ }
+}
+
+/*
+ * interrupts disabled on entry
+ */
+static void pnx8xxx_stop_tx(struct uart_port *port)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ u32 ien;
+
+ /* Disable TX intr */
+ ien = serial_in(sport, PNX8XXX_IEN);
+ serial_out(sport, PNX8XXX_IEN, ien & ~PNX8XXX_UART_INT_ALLTX);
+
+ /* Clear all pending TX intr */
+ serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLTX);
+}
+
+/*
+ * interrupts may not be disabled on entry
+ */
+static void pnx8xxx_start_tx(struct uart_port *port)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ u32 ien;
+
+ /* Clear all pending TX intr */
+ serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLTX);
+
+ /* Enable TX intr */
+ ien = serial_in(sport, PNX8XXX_IEN);
+ serial_out(sport, PNX8XXX_IEN, ien | PNX8XXX_UART_INT_ALLTX);
+}
+
+/*
+ * Interrupts enabled
+ */
+static void pnx8xxx_stop_rx(struct uart_port *port)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ u32 ien;
+
+ /* Disable RX intr */
+ ien = serial_in(sport, PNX8XXX_IEN);
+ serial_out(sport, PNX8XXX_IEN, ien & ~PNX8XXX_UART_INT_ALLRX);
+
+ /* Clear all pending RX intr */
+ serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLRX);
+}
+
+/*
+ * Set the modem control timer to fire immediately.
+ */
+static void pnx8xxx_enable_ms(struct uart_port *port)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+
+ mod_timer(&sport->timer, jiffies);
+}
+
+static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
+{
+ struct tty_struct *tty = sport->port.state->port.tty;
+ unsigned int status, ch, flg;
+
+ status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
+ ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT));
+ while (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFIFO)) {
+ ch = serial_in(sport, PNX8XXX_FIFO) & 0xff;
+
+ sport->port.icount.rx++;
+
+ flg = TTY_NORMAL;
+
+ /*
+ * note that the error handling code is
+ * out of the main execution path
+ */
+ if (status & (FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE |
+ PNX8XXX_UART_FIFO_RXPAR |
+ PNX8XXX_UART_FIFO_RXBRK) |
+ ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN))) {
+ if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXBRK)) {
+ status &= ~(FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
+ FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR));
+ sport->port.icount.brk++;
+ if (uart_handle_break(&sport->port))
+ goto ignore_char;
+ } else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR))
+ sport->port.icount.parity++;
+ else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE))
+ sport->port.icount.frame++;
+ if (status & ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN))
+ sport->port.icount.overrun++;
+
+ status &= sport->port.read_status_mask;
+
+ if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR))
+ flg = TTY_PARITY;
+ else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE))
+ flg = TTY_FRAME;
+
+#ifdef SUPPORT_SYSRQ
+ sport->port.sysrq = 0;
+#endif
+ }
+
+ if (uart_handle_sysrq_char(&sport->port, ch))
+ goto ignore_char;
+
+ uart_insert_char(&sport->port, status,
+ ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN), ch, flg);
+
+ ignore_char:
+ serial_out(sport, PNX8XXX_LCR, serial_in(sport, PNX8XXX_LCR) |
+ PNX8XXX_UART_LCR_RX_NEXT);
+ status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
+ ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT));
+ }
+ tty_flip_buffer_push(tty);
+}
+
+static void pnx8xxx_tx_chars(struct pnx8xxx_port *sport)
+{
+ struct circ_buf *xmit = &sport->port.state->xmit;
+
+ if (sport->port.x_char) {
+ serial_out(sport, PNX8XXX_FIFO, sport->port.x_char);
+ sport->port.icount.tx++;
+ sport->port.x_char = 0;
+ return;
+ }
+
+ /*
+ * Check the modem control lines before
+ * transmitting anything.
+ */
+ pnx8xxx_mctrl_check(sport);
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
+ pnx8xxx_stop_tx(&sport->port);
+ return;
+ }
+
+ /*
+ * TX while bytes available
+ */
+ while (((serial_in(sport, PNX8XXX_FIFO) &
+ PNX8XXX_UART_FIFO_TXFIFO) >> 16) < 16) {
+ serial_out(sport, PNX8XXX_FIFO, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ sport->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+
+ if (uart_circ_empty(xmit))
+ pnx8xxx_stop_tx(&sport->port);
+}
+
+static irqreturn_t pnx8xxx_int(int irq, void *dev_id)
+{
+ struct pnx8xxx_port *sport = dev_id;
+ unsigned int status;
+
+ spin_lock(&sport->port.lock);
+ /* Get the interrupts */
+ status = serial_in(sport, PNX8XXX_ISTAT) & serial_in(sport, PNX8XXX_IEN);
+
+ /* Byte or break signal received */
+ if (status & (PNX8XXX_UART_INT_RX | PNX8XXX_UART_INT_BREAK))
+ pnx8xxx_rx_chars(sport);
+
+ /* TX holding register empty - transmit a byte */
+ if (status & PNX8XXX_UART_INT_TX)
+ pnx8xxx_tx_chars(sport);
+
+ /* Clear the ISTAT register */
+ serial_out(sport, PNX8XXX_ICLR, status);
+
+ spin_unlock(&sport->port.lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Return TIOCSER_TEMT when transmitter is not busy.
+ */
+static unsigned int pnx8xxx_tx_empty(struct uart_port *port)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+
+ return serial_in(sport, PNX8XXX_FIFO) & PNX8XXX_UART_FIFO_TXFIFO_STA ? 0 : TIOCSER_TEMT;
+}
+
+static unsigned int pnx8xxx_get_mctrl(struct uart_port *port)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ unsigned int mctrl = TIOCM_DSR;
+ unsigned int msr;
+
+ /* REVISIT */
+
+ msr = serial_in(sport, PNX8XXX_MCR);
+
+ mctrl |= msr & PNX8XXX_UART_MCR_CTS ? TIOCM_CTS : 0;
+ mctrl |= msr & PNX8XXX_UART_MCR_DCD ? TIOCM_CAR : 0;
+
+ return mctrl;
+}
+
+static void pnx8xxx_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+#if 0 /* FIXME */
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ unsigned int msr;
+#endif
+}
+
+/*
+ * Interrupts always disabled.
+ */
+static void pnx8xxx_break_ctl(struct uart_port *port, int break_state)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ unsigned long flags;
+ unsigned int lcr;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ lcr = serial_in(sport, PNX8XXX_LCR);
+ if (break_state == -1)
+ lcr |= PNX8XXX_UART_LCR_TXBREAK;
+ else
+ lcr &= ~PNX8XXX_UART_LCR_TXBREAK;
+ serial_out(sport, PNX8XXX_LCR, lcr);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static int pnx8xxx_startup(struct uart_port *port)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ int retval;
+
+ /*
+ * Allocate the IRQ
+ */
+ retval = request_irq(sport->port.irq, pnx8xxx_int, 0,
+ "pnx8xxx-uart", sport);
+ if (retval)
+ return retval;
+
+ /*
+ * Finally, clear and enable interrupts
+ */
+
+ serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLRX |
+ PNX8XXX_UART_INT_ALLTX);
+
+ serial_out(sport, PNX8XXX_IEN, serial_in(sport, PNX8XXX_IEN) |
+ PNX8XXX_UART_INT_ALLRX |
+ PNX8XXX_UART_INT_ALLTX);
+
+ /*
+ * Enable modem status interrupts
+ */
+ spin_lock_irq(&sport->port.lock);
+ pnx8xxx_enable_ms(&sport->port);
+ spin_unlock_irq(&sport->port.lock);
+
+ return 0;
+}
+
+static void pnx8xxx_shutdown(struct uart_port *port)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ int lcr;
+
+ /*
+ * Stop our timer.
+ */
+ del_timer_sync(&sport->timer);
+
+ /*
+ * Disable all interrupts
+ */
+ serial_out(sport, PNX8XXX_IEN, 0);
+
+ /*
+ * Reset the Tx and Rx FIFOS, disable the break condition
+ */
+ lcr = serial_in(sport, PNX8XXX_LCR);
+ lcr &= ~PNX8XXX_UART_LCR_TXBREAK;
+ lcr |= PNX8XXX_UART_LCR_TX_RST | PNX8XXX_UART_LCR_RX_RST;
+ serial_out(sport, PNX8XXX_LCR, lcr);
+
+ /*
+ * Clear all interrupts
+ */
+ serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_ALLRX |
+ PNX8XXX_UART_INT_ALLTX);
+
+ /*
+ * Free the interrupt
+ */
+ free_irq(sport->port.irq, sport);
+}
+
+static void
+pnx8xxx_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ unsigned long flags;
+ unsigned int lcr_fcr, old_ien, baud, quot;
+ unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
+
+ /*
+ * We only support CS7 and CS8.
+ */
+ while ((termios->c_cflag & CSIZE) != CS7 &&
+ (termios->c_cflag & CSIZE) != CS8) {
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= old_csize;
+ old_csize = CS8;
+ }
+
+ if ((termios->c_cflag & CSIZE) == CS8)
+ lcr_fcr = PNX8XXX_UART_LCR_8BIT;
+ else
+ lcr_fcr = 0;
+
+ if (termios->c_cflag & CSTOPB)
+ lcr_fcr |= PNX8XXX_UART_LCR_2STOPB;
+ if (termios->c_cflag & PARENB) {
+ lcr_fcr |= PNX8XXX_UART_LCR_PAREN;
+ if (!(termios->c_cflag & PARODD))
+ lcr_fcr |= PNX8XXX_UART_LCR_PAREVN;
+ }
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = uart_get_divisor(port, baud);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ sport->port.read_status_mask = ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN) |
+ ISTAT_TO_SM(PNX8XXX_UART_INT_EMPTY) |
+ ISTAT_TO_SM(PNX8XXX_UART_INT_RX);
+ if (termios->c_iflag & INPCK)
+ sport->port.read_status_mask |=
+ FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
+ FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR);
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ sport->port.read_status_mask |=
+ ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK);
+
+ /*
+ * Characters to ignore
+ */
+ sport->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ sport->port.ignore_status_mask |=
+ FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
+ FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR);
+ if (termios->c_iflag & IGNBRK) {
+ sport->port.ignore_status_mask |=
+ ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK);
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ sport->port.ignore_status_mask |=
+ ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN);
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ sport->port.ignore_status_mask |=
+ ISTAT_TO_SM(PNX8XXX_UART_INT_RX);
+
+ del_timer_sync(&sport->timer);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ /*
+ * disable interrupts and drain transmitter
+ */
+ old_ien = serial_in(sport, PNX8XXX_IEN);
+ serial_out(sport, PNX8XXX_IEN, old_ien & ~(PNX8XXX_UART_INT_ALLTX |
+ PNX8XXX_UART_INT_ALLRX));
+
+ while (serial_in(sport, PNX8XXX_FIFO) & PNX8XXX_UART_FIFO_TXFIFO_STA)
+ barrier();
+
+ /* then, disable everything */
+ serial_out(sport, PNX8XXX_IEN, 0);
+
+ /* Reset the Rx and Tx FIFOs too */
+ lcr_fcr |= PNX8XXX_UART_LCR_TX_RST;
+ lcr_fcr |= PNX8XXX_UART_LCR_RX_RST;
+
+ /* set the parity, stop bits and data size */
+ serial_out(sport, PNX8XXX_LCR, lcr_fcr);
+
+ /* set the baud rate */
+ quot -= 1;
+ serial_out(sport, PNX8XXX_BAUD, quot);
+
+ serial_out(sport, PNX8XXX_ICLR, -1);
+
+ serial_out(sport, PNX8XXX_IEN, old_ien);
+
+ if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
+ pnx8xxx_enable_ms(&sport->port);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static const char *pnx8xxx_type(struct uart_port *port)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+
+ return sport->port.type == PORT_PNX8XXX ? "PNX8XXX" : NULL;
+}
+
+/*
+ * Release the memory region(s) being used by 'port'.
+ */
+static void pnx8xxx_release_port(struct uart_port *port)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+
+ release_mem_region(sport->port.mapbase, UART_PORT_SIZE);
+}
+
+/*
+ * Request the memory region(s) being used by 'port'.
+ */
+static int pnx8xxx_request_port(struct uart_port *port)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ return request_mem_region(sport->port.mapbase, UART_PORT_SIZE,
+ "pnx8xxx-uart") != NULL ? 0 : -EBUSY;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void pnx8xxx_config_port(struct uart_port *port, int flags)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+
+ if (flags & UART_CONFIG_TYPE &&
+ pnx8xxx_request_port(&sport->port) == 0)
+ sport->port.type = PORT_PNX8XXX;
+}
+
+/*
+ * Verify the new serial_struct (for TIOCSSERIAL).
+ * The only change we allow are to the flags and type, and
+ * even then only between PORT_PNX8XXX and PORT_UNKNOWN
+ */
+static int
+pnx8xxx_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ int ret = 0;
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_PNX8XXX)
+ ret = -EINVAL;
+ if (sport->port.irq != ser->irq)
+ ret = -EINVAL;
+ if (ser->io_type != SERIAL_IO_MEM)
+ ret = -EINVAL;
+ if (sport->port.uartclk / 16 != ser->baud_base)
+ ret = -EINVAL;
+ if ((void *)sport->port.mapbase != ser->iomem_base)
+ ret = -EINVAL;
+ if (sport->port.iobase != ser->port)
+ ret = -EINVAL;
+ if (ser->hub6 != 0)
+ ret = -EINVAL;
+ return ret;
+}
+
+static struct uart_ops pnx8xxx_pops = {
+ .tx_empty = pnx8xxx_tx_empty,
+ .set_mctrl = pnx8xxx_set_mctrl,
+ .get_mctrl = pnx8xxx_get_mctrl,
+ .stop_tx = pnx8xxx_stop_tx,
+ .start_tx = pnx8xxx_start_tx,
+ .stop_rx = pnx8xxx_stop_rx,
+ .enable_ms = pnx8xxx_enable_ms,
+ .break_ctl = pnx8xxx_break_ctl,
+ .startup = pnx8xxx_startup,
+ .shutdown = pnx8xxx_shutdown,
+ .set_termios = pnx8xxx_set_termios,
+ .type = pnx8xxx_type,
+ .release_port = pnx8xxx_release_port,
+ .request_port = pnx8xxx_request_port,
+ .config_port = pnx8xxx_config_port,
+ .verify_port = pnx8xxx_verify_port,
+};
+
+
+/*
+ * Setup the PNX8XXX serial ports.
+ *
+ * Note also that we support "console=ttySx" where "x" is either 0 or 1.
+ */
+static void __init pnx8xxx_init_ports(void)
+{
+ static int first = 1;
+ int i;
+
+ if (!first)
+ return;
+ first = 0;
+
+ for (i = 0; i < NR_PORTS; i++) {
+ init_timer(&pnx8xxx_ports[i].timer);
+ pnx8xxx_ports[i].timer.function = pnx8xxx_timeout;
+ pnx8xxx_ports[i].timer.data = (unsigned long)&pnx8xxx_ports[i];
+ pnx8xxx_ports[i].port.ops = &pnx8xxx_pops;
+ }
+}
+
+#ifdef CONFIG_SERIAL_PNX8XXX_CONSOLE
+
+static void pnx8xxx_console_putchar(struct uart_port *port, int ch)
+{
+ struct pnx8xxx_port *sport = (struct pnx8xxx_port *)port;
+ int status;
+
+ do {
+ /* Wait for UART_TX register to empty */
+ status = serial_in(sport, PNX8XXX_FIFO);
+ } while (status & PNX8XXX_UART_FIFO_TXFIFO);
+ serial_out(sport, PNX8XXX_FIFO, ch);
+}
+
+/*
+ * Interrupts are disabled on entering
+ */static void
+pnx8xxx_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct pnx8xxx_port *sport = &pnx8xxx_ports[co->index];
+ unsigned int old_ien, status;
+
+ /*
+ * First, save IEN and then disable interrupts
+ */
+ old_ien = serial_in(sport, PNX8XXX_IEN);
+ serial_out(sport, PNX8XXX_IEN, old_ien & ~(PNX8XXX_UART_INT_ALLTX |
+ PNX8XXX_UART_INT_ALLRX));
+
+ uart_console_write(&sport->port, s, count, pnx8xxx_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore IEN
+ */
+ do {
+ /* Wait for UART_TX register to empty */
+ status = serial_in(sport, PNX8XXX_FIFO);
+ } while (status & PNX8XXX_UART_FIFO_TXFIFO);
+
+ /* Clear TX and EMPTY interrupt */
+ serial_out(sport, PNX8XXX_ICLR, PNX8XXX_UART_INT_TX |
+ PNX8XXX_UART_INT_EMPTY);
+
+ serial_out(sport, PNX8XXX_IEN, old_ien);
+}
+
+static int __init
+pnx8xxx_console_setup(struct console *co, char *options)
+{
+ struct pnx8xxx_port *sport;
+ int baud = 38400;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index == -1 || co->index >= NR_PORTS)
+ co->index = 0;
+ sport = &pnx8xxx_ports[co->index];
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&sport->port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver pnx8xxx_reg;
+static struct console pnx8xxx_console = {
+ .name = "ttyS",
+ .write = pnx8xxx_console_write,
+ .device = uart_console_device,
+ .setup = pnx8xxx_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &pnx8xxx_reg,
+};
+
+static int __init pnx8xxx_rs_console_init(void)
+{
+ pnx8xxx_init_ports();
+ register_console(&pnx8xxx_console);
+ return 0;
+}
+console_initcall(pnx8xxx_rs_console_init);
+
+#define PNX8XXX_CONSOLE &pnx8xxx_console
+#else
+#define PNX8XXX_CONSOLE NULL
+#endif
+
+static struct uart_driver pnx8xxx_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttyS",
+ .dev_name = "ttyS",
+ .major = SERIAL_PNX8XXX_MAJOR,
+ .minor = MINOR_START,
+ .nr = NR_PORTS,
+ .cons = PNX8XXX_CONSOLE,
+};
+
+static int pnx8xxx_serial_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct pnx8xxx_port *sport = platform_get_drvdata(pdev);
+
+ return uart_suspend_port(&pnx8xxx_reg, &sport->port);
+}
+
+static int pnx8xxx_serial_resume(struct platform_device *pdev)
+{
+ struct pnx8xxx_port *sport = platform_get_drvdata(pdev);
+
+ return uart_resume_port(&pnx8xxx_reg, &sport->port);
+}
+
+static int pnx8xxx_serial_probe(struct platform_device *pdev)
+{
+ struct resource *res = pdev->resource;
+ int i;
+
+ for (i = 0; i < pdev->num_resources; i++, res++) {
+ if (!(res->flags & IORESOURCE_MEM))
+ continue;
+
+ for (i = 0; i < NR_PORTS; i++) {
+ if (pnx8xxx_ports[i].port.mapbase != res->start)
+ continue;
+
+ pnx8xxx_ports[i].port.dev = &pdev->dev;
+ uart_add_one_port(&pnx8xxx_reg, &pnx8xxx_ports[i].port);
+ platform_set_drvdata(pdev, &pnx8xxx_ports[i]);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int pnx8xxx_serial_remove(struct platform_device *pdev)
+{
+ struct pnx8xxx_port *sport = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (sport)
+ uart_remove_one_port(&pnx8xxx_reg, &sport->port);
+
+ return 0;
+}
+
+static struct platform_driver pnx8xxx_serial_driver = {
+ .driver = {
+ .name = "pnx8xxx-uart",
+ .owner = THIS_MODULE,
+ },
+ .probe = pnx8xxx_serial_probe,
+ .remove = pnx8xxx_serial_remove,
+ .suspend = pnx8xxx_serial_suspend,
+ .resume = pnx8xxx_serial_resume,
+};
+
+static int __init pnx8xxx_serial_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Serial: PNX8XXX driver\n");
+
+ pnx8xxx_init_ports();
+
+ ret = uart_register_driver(&pnx8xxx_reg);
+ if (ret == 0) {
+ ret = platform_driver_register(&pnx8xxx_serial_driver);
+ if (ret)
+ uart_unregister_driver(&pnx8xxx_reg);
+ }
+ return ret;
+}
+
+static void __exit pnx8xxx_serial_exit(void)
+{
+ platform_driver_unregister(&pnx8xxx_serial_driver);
+ uart_unregister_driver(&pnx8xxx_reg);
+}
+
+module_init(pnx8xxx_serial_init);
+module_exit(pnx8xxx_serial_exit);
+
+MODULE_AUTHOR("Embedded Alley Solutions, Inc.");
+MODULE_DESCRIPTION("PNX8XXX SoCs serial port driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_PNX8XXX_MAJOR);
+MODULE_ALIAS("platform:pnx8xxx-uart");
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
new file mode 100644
index 00000000..81243a62
--- /dev/null
+++ b/drivers/tty/serial/pxa.c
@@ -0,0 +1,897 @@
+/*
+ * Based on drivers/serial/8250.c by Russell King.
+ *
+ * Author: Nicolas Pitre
+ * Created: Feb 20, 2003
+ * Copyright: (C) 2003 Monta Vista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Note 1: This driver is made separate from the already too overloaded
+ * 8250.c because it needs some kirks of its own and that'll make it
+ * easier to add DMA support.
+ *
+ * Note 2: I'm too sick of device allocation policies for serial ports.
+ * If someone else wants to request an "official" allocation of major/minor
+ * for this driver please be my guest. And don't forget that new hardware
+ * to come from Intel might have more than 3 or 4 of those UARTs. Let's
+ * hope for a better port registration and dynamic device allocation scheme
+ * with the serial core maintainer satisfaction to appear soon.
+ */
+
+
+#if defined(CONFIG_SERIAL_PXA_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/serial_reg.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+struct uart_pxa_port {
+ struct uart_port port;
+ unsigned char ier;
+ unsigned char lcr;
+ unsigned char mcr;
+ unsigned int lsr_break_flag;
+ struct clk *clk;
+ char *name;
+};
+
+static inline unsigned int serial_in(struct uart_pxa_port *up, int offset)
+{
+ offset <<= 2;
+ return readl(up->port.membase + offset);
+}
+
+static inline void serial_out(struct uart_pxa_port *up, int offset, int value)
+{
+ offset <<= 2;
+ writel(value, up->port.membase + offset);
+}
+
+static void serial_pxa_enable_ms(struct uart_port *port)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+}
+
+static void serial_pxa_stop_tx(struct uart_port *port)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+
+ if (up->ier & UART_IER_THRI) {
+ up->ier &= ~UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+static void serial_pxa_stop_rx(struct uart_port *port)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+
+ up->ier &= ~UART_IER_RLSI;
+ up->port.read_status_mask &= ~UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+}
+
+static inline void receive_chars(struct uart_pxa_port *up, int *status)
+{
+ struct tty_struct *tty = up->port.state->port.tty;
+ unsigned int ch, flag;
+ int max_count = 256;
+
+ do {
+ /* work around Errata #20 according to
+ * Intel(R) PXA27x Processor Family
+ * Specification Update (May 2005)
+ *
+ * Step 2
+ * Disable the Reciever Time Out Interrupt via IER[RTOEI]
+ */
+ up->ier &= ~UART_IER_RTOIE;
+ serial_out(up, UART_IER, up->ier);
+
+ ch = serial_in(up, UART_RX);
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+ if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
+ UART_LSR_FE | UART_LSR_OE))) {
+ /*
+ * For statistics only
+ */
+ if (*status & UART_LSR_BI) {
+ *status &= ~(UART_LSR_FE | UART_LSR_PE);
+ up->port.icount.brk++;
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(&up->port))
+ goto ignore_char;
+ } else if (*status & UART_LSR_PE)
+ up->port.icount.parity++;
+ else if (*status & UART_LSR_FE)
+ up->port.icount.frame++;
+ if (*status & UART_LSR_OE)
+ up->port.icount.overrun++;
+
+ /*
+ * Mask off conditions which should be ignored.
+ */
+ *status &= up->port.read_status_mask;
+
+#ifdef CONFIG_SERIAL_PXA_CONSOLE
+ if (up->port.line == up->port.cons->index) {
+ /* Recover the break flag from console xmit */
+ *status |= up->lsr_break_flag;
+ up->lsr_break_flag = 0;
+ }
+#endif
+ if (*status & UART_LSR_BI) {
+ flag = TTY_BREAK;
+ } else if (*status & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (*status & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&up->port, ch))
+ goto ignore_char;
+
+ uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
+
+ ignore_char:
+ *status = serial_in(up, UART_LSR);
+ } while ((*status & UART_LSR_DR) && (max_count-- > 0));
+ tty_flip_buffer_push(tty);
+
+ /* work around Errata #20 according to
+ * Intel(R) PXA27x Processor Family
+ * Specification Update (May 2005)
+ *
+ * Step 6:
+ * No more data in FIFO: Re-enable RTO interrupt via IER[RTOIE]
+ */
+ up->ier |= UART_IER_RTOIE;
+ serial_out(up, UART_IER, up->ier);
+}
+
+static void transmit_chars(struct uart_pxa_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int count;
+
+ if (up->port.x_char) {
+ serial_out(up, UART_TX, up->port.x_char);
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+ serial_pxa_stop_tx(&up->port);
+ return;
+ }
+
+ count = up->port.fifosize / 2;
+ do {
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+
+ if (uart_circ_empty(xmit))
+ serial_pxa_stop_tx(&up->port);
+}
+
+static void serial_pxa_start_tx(struct uart_port *port)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+
+ if (!(up->ier & UART_IER_THRI)) {
+ up->ier |= UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+}
+
+static inline void check_modem_status(struct uart_pxa_port *up)
+{
+ int status;
+
+ status = serial_in(up, UART_MSR);
+
+ if ((status & UART_MSR_ANY_DELTA) == 0)
+ return;
+
+ if (status & UART_MSR_TERI)
+ up->port.icount.rng++;
+ if (status & UART_MSR_DDSR)
+ up->port.icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
+ if (status & UART_MSR_DCTS)
+ uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
+
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+}
+
+/*
+ * This handles the interrupt from one port.
+ */
+static inline irqreturn_t serial_pxa_irq(int irq, void *dev_id)
+{
+ struct uart_pxa_port *up = dev_id;
+ unsigned int iir, lsr;
+
+ iir = serial_in(up, UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ return IRQ_NONE;
+ lsr = serial_in(up, UART_LSR);
+ if (lsr & UART_LSR_DR)
+ receive_chars(up, &lsr);
+ check_modem_status(up);
+ if (lsr & UART_LSR_THRE)
+ transmit_chars(up);
+ return IRQ_HANDLED;
+}
+
+static unsigned int serial_pxa_tx_empty(struct uart_port *port)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return ret;
+}
+
+static unsigned int serial_pxa_get_mctrl(struct uart_port *port)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+ unsigned char status;
+ unsigned int ret;
+
+ status = serial_in(up, UART_MSR);
+
+ ret = 0;
+ if (status & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+ if (status & UART_MSR_RI)
+ ret |= TIOCM_RNG;
+ if (status & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
+ if (status & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+static void serial_pxa_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+ unsigned char mcr = 0;
+
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ mcr |= up->mcr;
+
+ serial_out(up, UART_MCR, mcr);
+}
+
+static void serial_pxa_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_out(up, UART_LCR, up->lcr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+#if 0
+static void serial_pxa_dma_init(struct pxa_uart *up)
+{
+ up->rxdma =
+ pxa_request_dma(up->name, DMA_PRIO_LOW, pxa_receive_dma, up);
+ if (up->rxdma < 0)
+ goto out;
+ up->txdma =
+ pxa_request_dma(up->name, DMA_PRIO_LOW, pxa_transmit_dma, up);
+ if (up->txdma < 0)
+ goto err_txdma;
+ up->dmadesc = kmalloc(4 * sizeof(pxa_dma_desc), GFP_KERNEL);
+ if (!up->dmadesc)
+ goto err_alloc;
+
+ /* ... */
+err_alloc:
+ pxa_free_dma(up->txdma);
+err_rxdma:
+ pxa_free_dma(up->rxdma);
+out:
+ return;
+}
+#endif
+
+static int serial_pxa_startup(struct uart_port *port)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+ unsigned long flags;
+ int retval;
+
+ if (port->line == 3) /* HWUART */
+ up->mcr |= UART_MCR_AFE;
+ else
+ up->mcr = 0;
+
+ up->port.uartclk = clk_get_rate(up->clk);
+
+ /*
+ * Allocate the IRQ
+ */
+ retval = request_irq(up->port.irq, serial_pxa_irq, 0, up->name, up);
+ if (retval)
+ return retval;
+
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in set_termios())
+ */
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ serial_out(up, UART_FCR, 0);
+
+ /*
+ * Clear the interrupt registers.
+ */
+ (void) serial_in(up, UART_LSR);
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
+
+ /*
+ * Now, initialize the UART
+ */
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ up->port.mctrl |= TIOCM_OUT2;
+ serial_pxa_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /*
+ * Finally, enable interrupts. Note: Modem status interrupts
+ * are set via set_termios(), which will be occurring imminently
+ * anyway, so we don't enable them here.
+ */
+ up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE | UART_IER_UUE;
+ serial_out(up, UART_IER, up->ier);
+
+ /*
+ * And clear the interrupt registers again for luck.
+ */
+ (void) serial_in(up, UART_LSR);
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
+
+ return 0;
+}
+
+static void serial_pxa_shutdown(struct uart_port *port)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+ unsigned long flags;
+
+ free_irq(up->port.irq, up);
+
+ /*
+ * Disable interrupts from this port
+ */
+ up->ier = 0;
+ serial_out(up, UART_IER, 0);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ up->port.mctrl &= ~TIOCM_OUT2;
+ serial_pxa_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /*
+ * Disable break condition and FIFOs
+ */
+ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT);
+ serial_out(up, UART_FCR, 0);
+}
+
+static void
+serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+ unsigned char cval, fcr = 0;
+ unsigned long flags;
+ unsigned int baud, quot;
+ unsigned int dll;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ cval = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ cval = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ cval = UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ cval = UART_LCR_WLEN8;
+ break;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ cval |= UART_LCR_STOP;
+ if (termios->c_cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(termios->c_cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = uart_get_divisor(port, baud);
+
+ if ((up->port.uartclk / quot) < (2400 * 16))
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR1;
+ else if ((up->port.uartclk / quot) < (230400 * 16))
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR8;
+ else
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR32;
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * Ensure the port will be enabled.
+ * This is required especially for serial console.
+ */
+ up->ier |= UART_IER_UUE;
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characters to ignore
+ */
+ up->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (termios->c_iflag & IGNBRK) {
+ up->port.ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * CTS flow control flag and modem status interrupts
+ */
+ up->ier &= ~UART_IER_MSI;
+ if (UART_ENABLE_MS(&up->port, termios->c_cflag))
+ up->ier |= UART_IER_MSI;
+
+ serial_out(up, UART_IER, up->ier);
+
+ if (termios->c_cflag & CRTSCTS)
+ up->mcr |= UART_MCR_AFE;
+ else
+ up->mcr &= ~UART_MCR_AFE;
+
+ serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
+ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
+
+ /*
+ * work around Errata #75 according to Intel(R) PXA27x Processor Family
+ * Specification Update (Nov 2005)
+ */
+ dll = serial_in(up, UART_DLL);
+ WARN_ON(dll != (quot & 0xff));
+
+ serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
+ serial_out(up, UART_LCR, cval); /* reset DLAB */
+ up->lcr = cval; /* Save LCR */
+ serial_pxa_set_mctrl(&up->port, up->port.mctrl);
+ serial_out(up, UART_FCR, fcr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static void
+serial_pxa_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+
+ if (!state)
+ clk_enable(up->clk);
+ else
+ clk_disable(up->clk);
+}
+
+static void serial_pxa_release_port(struct uart_port *port)
+{
+}
+
+static int serial_pxa_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void serial_pxa_config_port(struct uart_port *port, int flags)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+ up->port.type = PORT_PXA;
+}
+
+static int
+serial_pxa_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ /* we don't want the core code to modify any port params */
+ return -EINVAL;
+}
+
+static const char *
+serial_pxa_type(struct uart_port *port)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+ return up->name;
+}
+
+static struct uart_pxa_port *serial_pxa_ports[4];
+static struct uart_driver serial_pxa_reg;
+
+#ifdef CONFIG_SERIAL_PXA_CONSOLE
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static inline void wait_for_xmitr(struct uart_pxa_port *up)
+{
+ unsigned int status, tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ do {
+ status = serial_in(up, UART_LSR);
+
+ if (status & UART_LSR_BI)
+ up->lsr_break_flag = UART_LSR_BI;
+
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ tmout = 1000000;
+ while (--tmout &&
+ ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
+ udelay(1);
+ }
+}
+
+static void serial_pxa_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_pxa_port *up = (struct uart_pxa_port *)port;
+
+ wait_for_xmitr(up);
+ serial_out(up, UART_TX, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void
+serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_pxa_port *up = serial_pxa_ports[co->index];
+ unsigned int ier;
+
+ clk_enable(up->clk);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, UART_IER_UUE);
+
+ uart_console_write(&up->port, s, count, serial_pxa_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up);
+ serial_out(up, UART_IER, ier);
+
+ clk_disable(up->clk);
+}
+
+static int __init
+serial_pxa_console_setup(struct console *co, char *options)
+{
+ struct uart_pxa_port *up;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index == -1 || co->index >= serial_pxa_reg.nr)
+ co->index = 0;
+ up = serial_pxa_ports[co->index];
+ if (!up)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
+}
+
+static struct console serial_pxa_console = {
+ .name = "ttyS",
+ .write = serial_pxa_console_write,
+ .device = uart_console_device,
+ .setup = serial_pxa_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &serial_pxa_reg,
+};
+
+#define PXA_CONSOLE &serial_pxa_console
+#else
+#define PXA_CONSOLE NULL
+#endif
+
+struct uart_ops serial_pxa_pops = {
+ .tx_empty = serial_pxa_tx_empty,
+ .set_mctrl = serial_pxa_set_mctrl,
+ .get_mctrl = serial_pxa_get_mctrl,
+ .stop_tx = serial_pxa_stop_tx,
+ .start_tx = serial_pxa_start_tx,
+ .stop_rx = serial_pxa_stop_rx,
+ .enable_ms = serial_pxa_enable_ms,
+ .break_ctl = serial_pxa_break_ctl,
+ .startup = serial_pxa_startup,
+ .shutdown = serial_pxa_shutdown,
+ .set_termios = serial_pxa_set_termios,
+ .pm = serial_pxa_pm,
+ .type = serial_pxa_type,
+ .release_port = serial_pxa_release_port,
+ .request_port = serial_pxa_request_port,
+ .config_port = serial_pxa_config_port,
+ .verify_port = serial_pxa_verify_port,
+};
+
+static struct uart_driver serial_pxa_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "PXA serial",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = 4,
+ .cons = PXA_CONSOLE,
+};
+
+#ifdef CONFIG_PM
+static int serial_pxa_suspend(struct device *dev)
+{
+ struct uart_pxa_port *sport = dev_get_drvdata(dev);
+
+ if (sport)
+ uart_suspend_port(&serial_pxa_reg, &sport->port);
+
+ return 0;
+}
+
+static int serial_pxa_resume(struct device *dev)
+{
+ struct uart_pxa_port *sport = dev_get_drvdata(dev);
+
+ if (sport)
+ uart_resume_port(&serial_pxa_reg, &sport->port);
+
+ return 0;
+}
+
+static const struct dev_pm_ops serial_pxa_pm_ops = {
+ .suspend = serial_pxa_suspend,
+ .resume = serial_pxa_resume,
+};
+#endif
+
+static int serial_pxa_probe(struct platform_device *dev)
+{
+ struct uart_pxa_port *sport;
+ struct resource *mmres, *irqres;
+ int ret;
+
+ mmres = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ irqres = platform_get_resource(dev, IORESOURCE_IRQ, 0);
+ if (!mmres || !irqres)
+ return -ENODEV;
+
+ sport = kzalloc(sizeof(struct uart_pxa_port), GFP_KERNEL);
+ if (!sport)
+ return -ENOMEM;
+
+ sport->clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(sport->clk)) {
+ ret = PTR_ERR(sport->clk);
+ goto err_free;
+ }
+
+ sport->port.type = PORT_PXA;
+ sport->port.iotype = UPIO_MEM;
+ sport->port.mapbase = mmres->start;
+ sport->port.irq = irqres->start;
+ sport->port.fifosize = 64;
+ sport->port.ops = &serial_pxa_pops;
+ sport->port.line = dev->id;
+ sport->port.dev = &dev->dev;
+ sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
+ sport->port.uartclk = clk_get_rate(sport->clk);
+
+ switch (dev->id) {
+ case 0: sport->name = "FFUART"; break;
+ case 1: sport->name = "BTUART"; break;
+ case 2: sport->name = "STUART"; break;
+ case 3: sport->name = "HWUART"; break;
+ default:
+ sport->name = "???";
+ break;
+ }
+
+ sport->port.membase = ioremap(mmres->start, mmres->end - mmres->start + 1);
+ if (!sport->port.membase) {
+ ret = -ENOMEM;
+ goto err_clk;
+ }
+
+ serial_pxa_ports[dev->id] = sport;
+
+ uart_add_one_port(&serial_pxa_reg, &sport->port);
+ platform_set_drvdata(dev, sport);
+
+ return 0;
+
+ err_clk:
+ clk_put(sport->clk);
+ err_free:
+ kfree(sport);
+ return ret;
+}
+
+static int serial_pxa_remove(struct platform_device *dev)
+{
+ struct uart_pxa_port *sport = platform_get_drvdata(dev);
+
+ platform_set_drvdata(dev, NULL);
+
+ uart_remove_one_port(&serial_pxa_reg, &sport->port);
+ clk_put(sport->clk);
+ kfree(sport);
+
+ return 0;
+}
+
+static struct platform_driver serial_pxa_driver = {
+ .probe = serial_pxa_probe,
+ .remove = serial_pxa_remove,
+
+ .driver = {
+ .name = "pxa2xx-uart",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &serial_pxa_pm_ops,
+#endif
+ },
+};
+
+int __init serial_pxa_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&serial_pxa_reg);
+ if (ret != 0)
+ return ret;
+
+ ret = platform_driver_register(&serial_pxa_driver);
+ if (ret != 0)
+ uart_unregister_driver(&serial_pxa_reg);
+
+ return ret;
+}
+
+void __exit serial_pxa_exit(void)
+{
+ platform_driver_unregister(&serial_pxa_driver);
+ uart_unregister_driver(&serial_pxa_reg);
+}
+
+module_init(serial_pxa_init);
+module_exit(serial_pxa_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-uart");
diff --git a/drivers/tty/serial/s3c2400.c b/drivers/tty/serial/s3c2400.c
new file mode 100644
index 00000000..d13051b3
--- /dev/null
+++ b/drivers/tty/serial/s3c2400.c
@@ -0,0 +1,105 @@
+/*
+ * Driver for Samsung SoC onboard UARTs.
+ *
+ * Ben Dooks, Copyright (c) 2003-2005 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include <asm/irq.h>
+
+#include <mach/hardware.h>
+
+#include <plat/regs-serial.h>
+#include <mach/regs-gpio.h>
+
+#include "samsung.h"
+
+static int s3c2400_serial_getsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ clk->divisor = 1;
+ clk->name = "pclk";
+
+ return 0;
+}
+
+static int s3c2400_serial_setsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ return 0;
+}
+
+static int s3c2400_serial_resetport(struct uart_port *port,
+ struct s3c2410_uartcfg *cfg)
+{
+ dbg("s3c2400_serial_resetport: port=%p (%08lx), cfg=%p\n",
+ port, port->mapbase, cfg);
+
+ wr_regl(port, S3C2410_UCON, cfg->ucon);
+ wr_regl(port, S3C2410_ULCON, cfg->ulcon);
+
+ /* reset both fifos */
+
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon);
+
+ return 0;
+}
+
+static struct s3c24xx_uart_info s3c2400_uart_inf = {
+ .name = "Samsung S3C2400 UART",
+ .type = PORT_S3C2400,
+ .fifosize = 16,
+ .rx_fifomask = S3C2410_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2410_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2410_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2410_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT,
+ .get_clksrc = s3c2400_serial_getsource,
+ .set_clksrc = s3c2400_serial_setsource,
+ .reset_port = s3c2400_serial_resetport,
+};
+
+static int s3c2400_serial_probe(struct platform_device *dev)
+{
+ return s3c24xx_serial_probe(dev, &s3c2400_uart_inf);
+}
+
+static struct platform_driver s3c2400_serial_driver = {
+ .probe = s3c2400_serial_probe,
+ .remove = __devexit_p(s3c24xx_serial_remove),
+ .driver = {
+ .name = "s3c2400-uart",
+ .owner = THIS_MODULE,
+ },
+};
+
+s3c24xx_console_init(&s3c2400_serial_driver, &s3c2400_uart_inf);
+
+static inline int s3c2400_serial_init(void)
+{
+ return s3c24xx_serial_init(&s3c2400_serial_driver, &s3c2400_uart_inf);
+}
+
+static inline void s3c2400_serial_exit(void)
+{
+ platform_driver_unregister(&s3c2400_serial_driver);
+}
+
+module_init(s3c2400_serial_init);
+module_exit(s3c2400_serial_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("Samsung S3C2400 SoC Serial port driver");
+MODULE_ALIAS("platform:s3c2400-uart");
diff --git a/drivers/tty/serial/s3c2410.c b/drivers/tty/serial/s3c2410.c
new file mode 100644
index 00000000..bffe6ff9
--- /dev/null
+++ b/drivers/tty/serial/s3c2410.c
@@ -0,0 +1,117 @@
+/*
+ * Driver for Samsung S3C2410 SoC onboard UARTs.
+ *
+ * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+
+#include <asm/irq.h>
+#include <mach/hardware.h>
+
+#include <plat/regs-serial.h>
+#include <mach/regs-gpio.h>
+
+#include "samsung.h"
+
+static int s3c2410_serial_setsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+
+ if (strcmp(clk->name, "uclk") == 0)
+ ucon |= S3C2410_UCON_UCLK;
+ else
+ ucon &= ~S3C2410_UCON_UCLK;
+
+ wr_regl(port, S3C2410_UCON, ucon);
+ return 0;
+}
+
+static int s3c2410_serial_getsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+
+ clk->divisor = 1;
+ clk->name = (ucon & S3C2410_UCON_UCLK) ? "uclk" : "pclk";
+
+ return 0;
+}
+
+static int s3c2410_serial_resetport(struct uart_port *port,
+ struct s3c2410_uartcfg *cfg)
+{
+ dbg("s3c2410_serial_resetport: port=%p (%08lx), cfg=%p\n",
+ port, port->mapbase, cfg);
+
+ wr_regl(port, S3C2410_UCON, cfg->ucon);
+ wr_regl(port, S3C2410_ULCON, cfg->ulcon);
+
+ /* reset both fifos */
+
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon);
+
+ return 0;
+}
+
+static struct s3c24xx_uart_info s3c2410_uart_inf = {
+ .name = "Samsung S3C2410 UART",
+ .type = PORT_S3C2410,
+ .fifosize = 16,
+ .rx_fifomask = S3C2410_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2410_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2410_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2410_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT,
+ .get_clksrc = s3c2410_serial_getsource,
+ .set_clksrc = s3c2410_serial_setsource,
+ .reset_port = s3c2410_serial_resetport,
+};
+
+static int s3c2410_serial_probe(struct platform_device *dev)
+{
+ return s3c24xx_serial_probe(dev, &s3c2410_uart_inf);
+}
+
+static struct platform_driver s3c2410_serial_driver = {
+ .probe = s3c2410_serial_probe,
+ .remove = __devexit_p(s3c24xx_serial_remove),
+ .driver = {
+ .name = "s3c2410-uart",
+ .owner = THIS_MODULE,
+ },
+};
+
+s3c24xx_console_init(&s3c2410_serial_driver, &s3c2410_uart_inf);
+
+static int __init s3c2410_serial_init(void)
+{
+ return s3c24xx_serial_init(&s3c2410_serial_driver, &s3c2410_uart_inf);
+}
+
+static void __exit s3c2410_serial_exit(void)
+{
+ platform_driver_unregister(&s3c2410_serial_driver);
+}
+
+module_init(s3c2410_serial_init);
+module_exit(s3c2410_serial_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("Samsung S3C2410 SoC Serial port driver");
+MODULE_ALIAS("platform:s3c2410-uart");
diff --git a/drivers/tty/serial/s3c2412.c b/drivers/tty/serial/s3c2412.c
new file mode 100644
index 00000000..7e2b9504
--- /dev/null
+++ b/drivers/tty/serial/s3c2412.c
@@ -0,0 +1,151 @@
+/*
+ * Driver for Samsung S3C2412 and S3C2413 SoC onboard UARTs.
+ *
+ * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+
+#include <asm/irq.h>
+#include <mach/hardware.h>
+
+#include <plat/regs-serial.h>
+#include <mach/regs-gpio.h>
+
+#include "samsung.h"
+
+static int s3c2412_serial_setsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+
+ ucon &= ~S3C2412_UCON_CLKMASK;
+
+ if (strcmp(clk->name, "uclk") == 0)
+ ucon |= S3C2440_UCON_UCLK;
+ else if (strcmp(clk->name, "pclk") == 0)
+ ucon |= S3C2440_UCON_PCLK;
+ else if (strcmp(clk->name, "usysclk") == 0)
+ ucon |= S3C2412_UCON_USYSCLK;
+ else {
+ printk(KERN_ERR "unknown clock source %s\n", clk->name);
+ return -EINVAL;
+ }
+
+ wr_regl(port, S3C2410_UCON, ucon);
+ return 0;
+}
+
+
+static int s3c2412_serial_getsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+
+ switch (ucon & S3C2412_UCON_CLKMASK) {
+ case S3C2412_UCON_UCLK:
+ clk->divisor = 1;
+ clk->name = "uclk";
+ break;
+
+ case S3C2412_UCON_PCLK:
+ case S3C2412_UCON_PCLK2:
+ clk->divisor = 1;
+ clk->name = "pclk";
+ break;
+
+ case S3C2412_UCON_USYSCLK:
+ clk->divisor = 1;
+ clk->name = "usysclk";
+ break;
+ }
+
+ return 0;
+}
+
+static int s3c2412_serial_resetport(struct uart_port *port,
+ struct s3c2410_uartcfg *cfg)
+{
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+
+ dbg("%s: port=%p (%08lx), cfg=%p\n",
+ __func__, port, port->mapbase, cfg);
+
+ /* ensure we don't change the clock settings... */
+
+ ucon &= S3C2412_UCON_CLKMASK;
+
+ wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
+ wr_regl(port, S3C2410_ULCON, cfg->ulcon);
+
+ /* reset both fifos */
+
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon);
+
+ return 0;
+}
+
+static struct s3c24xx_uart_info s3c2412_uart_inf = {
+ .name = "Samsung S3C2412 UART",
+ .type = PORT_S3C2412,
+ .fifosize = 64,
+ .has_divslot = 1,
+ .rx_fifomask = S3C2440_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2440_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2440_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2440_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
+ .get_clksrc = s3c2412_serial_getsource,
+ .set_clksrc = s3c2412_serial_setsource,
+ .reset_port = s3c2412_serial_resetport,
+};
+
+/* device management */
+
+static int s3c2412_serial_probe(struct platform_device *dev)
+{
+ dbg("s3c2440_serial_probe: dev=%p\n", dev);
+ return s3c24xx_serial_probe(dev, &s3c2412_uart_inf);
+}
+
+static struct platform_driver s3c2412_serial_driver = {
+ .probe = s3c2412_serial_probe,
+ .remove = __devexit_p(s3c24xx_serial_remove),
+ .driver = {
+ .name = "s3c2412-uart",
+ .owner = THIS_MODULE,
+ },
+};
+
+s3c24xx_console_init(&s3c2412_serial_driver, &s3c2412_uart_inf);
+
+static inline int s3c2412_serial_init(void)
+{
+ return s3c24xx_serial_init(&s3c2412_serial_driver, &s3c2412_uart_inf);
+}
+
+static inline void s3c2412_serial_exit(void)
+{
+ platform_driver_unregister(&s3c2412_serial_driver);
+}
+
+module_init(s3c2412_serial_init);
+module_exit(s3c2412_serial_exit);
+
+MODULE_DESCRIPTION("Samsung S3C2412,S3C2413 SoC Serial port driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:s3c2412-uart");
diff --git a/drivers/tty/serial/s3c2440.c b/drivers/tty/serial/s3c2440.c
new file mode 100644
index 00000000..9e10d415
--- /dev/null
+++ b/drivers/tty/serial/s3c2440.c
@@ -0,0 +1,180 @@
+/*
+ * Driver for Samsung S3C2440 and S3C2442 SoC onboard UARTs.
+ *
+ * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+
+#include <asm/irq.h>
+#include <mach/hardware.h>
+
+#include <plat/regs-serial.h>
+#include <mach/regs-gpio.h>
+
+#include "samsung.h"
+
+
+static int s3c2440_serial_setsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+
+ /* todo - proper fclk<>nonfclk switch. */
+
+ ucon &= ~S3C2440_UCON_CLKMASK;
+
+ if (strcmp(clk->name, "uclk") == 0)
+ ucon |= S3C2440_UCON_UCLK;
+ else if (strcmp(clk->name, "pclk") == 0)
+ ucon |= S3C2440_UCON_PCLK;
+ else if (strcmp(clk->name, "fclk") == 0)
+ ucon |= S3C2440_UCON_FCLK;
+ else {
+ printk(KERN_ERR "unknown clock source %s\n", clk->name);
+ return -EINVAL;
+ }
+
+ wr_regl(port, S3C2410_UCON, ucon);
+ return 0;
+}
+
+
+static int s3c2440_serial_getsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+ unsigned long ucon0, ucon1, ucon2;
+
+ switch (ucon & S3C2440_UCON_CLKMASK) {
+ case S3C2440_UCON_UCLK:
+ clk->divisor = 1;
+ clk->name = "uclk";
+ break;
+
+ case S3C2440_UCON_PCLK:
+ case S3C2440_UCON_PCLK2:
+ clk->divisor = 1;
+ clk->name = "pclk";
+ break;
+
+ case S3C2440_UCON_FCLK:
+ /* the fun of calculating the uart divisors on
+ * the s3c2440 */
+
+ ucon0 = __raw_readl(S3C24XX_VA_UART0 + S3C2410_UCON);
+ ucon1 = __raw_readl(S3C24XX_VA_UART1 + S3C2410_UCON);
+ ucon2 = __raw_readl(S3C24XX_VA_UART2 + S3C2410_UCON);
+
+ printk("ucons: %08lx, %08lx, %08lx\n", ucon0, ucon1, ucon2);
+
+ ucon0 &= S3C2440_UCON0_DIVMASK;
+ ucon1 &= S3C2440_UCON1_DIVMASK;
+ ucon2 &= S3C2440_UCON2_DIVMASK;
+
+ if (ucon0 != 0) {
+ clk->divisor = ucon0 >> S3C2440_UCON_DIVSHIFT;
+ clk->divisor += 6;
+ } else if (ucon1 != 0) {
+ clk->divisor = ucon1 >> S3C2440_UCON_DIVSHIFT;
+ clk->divisor += 21;
+ } else if (ucon2 != 0) {
+ clk->divisor = ucon2 >> S3C2440_UCON_DIVSHIFT;
+ clk->divisor += 36;
+ } else {
+ /* manual calims 44, seems to be 9 */
+ clk->divisor = 9;
+ }
+
+ clk->name = "fclk";
+ break;
+ }
+
+ return 0;
+}
+
+static int s3c2440_serial_resetport(struct uart_port *port,
+ struct s3c2410_uartcfg *cfg)
+{
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+
+ dbg("s3c2440_serial_resetport: port=%p (%08lx), cfg=%p\n",
+ port, port->mapbase, cfg);
+
+ /* ensure we don't change the clock settings... */
+
+ ucon &= (S3C2440_UCON0_DIVMASK | (3<<10));
+
+ wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
+ wr_regl(port, S3C2410_ULCON, cfg->ulcon);
+
+ /* reset both fifos */
+
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon);
+
+ return 0;
+}
+
+static struct s3c24xx_uart_info s3c2440_uart_inf = {
+ .name = "Samsung S3C2440 UART",
+ .type = PORT_S3C2440,
+ .fifosize = 64,
+ .rx_fifomask = S3C2440_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2440_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2440_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2440_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
+ .get_clksrc = s3c2440_serial_getsource,
+ .set_clksrc = s3c2440_serial_setsource,
+ .reset_port = s3c2440_serial_resetport,
+};
+
+/* device management */
+
+static int s3c2440_serial_probe(struct platform_device *dev)
+{
+ dbg("s3c2440_serial_probe: dev=%p\n", dev);
+ return s3c24xx_serial_probe(dev, &s3c2440_uart_inf);
+}
+
+static struct platform_driver s3c2440_serial_driver = {
+ .probe = s3c2440_serial_probe,
+ .remove = __devexit_p(s3c24xx_serial_remove),
+ .driver = {
+ .name = "s3c2440-uart",
+ .owner = THIS_MODULE,
+ },
+};
+
+s3c24xx_console_init(&s3c2440_serial_driver, &s3c2440_uart_inf);
+
+static int __init s3c2440_serial_init(void)
+{
+ return s3c24xx_serial_init(&s3c2440_serial_driver, &s3c2440_uart_inf);
+}
+
+static void __exit s3c2440_serial_exit(void)
+{
+ platform_driver_unregister(&s3c2440_serial_driver);
+}
+
+module_init(s3c2440_serial_init);
+module_exit(s3c2440_serial_exit);
+
+MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:s3c2440-uart");
diff --git a/drivers/tty/serial/s3c24a0.c b/drivers/tty/serial/s3c24a0.c
new file mode 100644
index 00000000..914eff22
--- /dev/null
+++ b/drivers/tty/serial/s3c24a0.c
@@ -0,0 +1,117 @@
+/*
+ * Driver for Samsung S3C24A0 SoC onboard UARTs.
+ *
+ * Based on drivers/serial/s3c2410.c
+ *
+ * Author: Sandeep Patil <sandeep.patil@azingo.com>
+ *
+ * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+
+#include <mach/hardware.h>
+
+#include <plat/regs-serial.h>
+#include <mach/regs-gpio.h>
+
+#include "samsung.h"
+
+static int s3c24a0_serial_setsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+
+ if (strcmp(clk->name, "uclk") == 0)
+ ucon |= S3C2410_UCON_UCLK;
+ else
+ ucon &= ~S3C2410_UCON_UCLK;
+
+ wr_regl(port, S3C2410_UCON, ucon);
+ return 0;
+}
+
+static int s3c24a0_serial_getsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+
+ clk->divisor = 1;
+ clk->name = (ucon & S3C2410_UCON_UCLK) ? "uclk" : "pclk";
+
+ return 0;
+}
+
+static int s3c24a0_serial_resetport(struct uart_port *port,
+ struct s3c2410_uartcfg *cfg)
+{
+ dbg("s3c24a0_serial_resetport: port=%p (%08lx), cfg=%p\n",
+ port, port->mapbase, cfg);
+
+ wr_regl(port, S3C2410_UCON, cfg->ucon);
+ wr_regl(port, S3C2410_ULCON, cfg->ulcon);
+
+ /* reset both fifos */
+
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon);
+
+ return 0;
+}
+
+static struct s3c24xx_uart_info s3c24a0_uart_inf = {
+ .name = "Samsung S3C24A0 UART",
+ .type = PORT_S3C2410,
+ .fifosize = 16,
+ .rx_fifomask = S3C24A0_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C24A0_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C24A0_UFSTAT_RXFULL,
+ .tx_fifofull = S3C24A0_UFSTAT_TXFULL,
+ .tx_fifomask = S3C24A0_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C24A0_UFSTAT_TXSHIFT,
+ .get_clksrc = s3c24a0_serial_getsource,
+ .set_clksrc = s3c24a0_serial_setsource,
+ .reset_port = s3c24a0_serial_resetport,
+};
+
+static int s3c24a0_serial_probe(struct platform_device *dev)
+{
+ return s3c24xx_serial_probe(dev, &s3c24a0_uart_inf);
+}
+
+static struct platform_driver s3c24a0_serial_driver = {
+ .probe = s3c24a0_serial_probe,
+ .remove = __devexit_p(s3c24xx_serial_remove),
+ .driver = {
+ .name = "s3c24a0-uart",
+ .owner = THIS_MODULE,
+ },
+};
+
+s3c24xx_console_init(&s3c24a0_serial_driver, &s3c24a0_uart_inf);
+
+static int __init s3c24a0_serial_init(void)
+{
+ return s3c24xx_serial_init(&s3c24a0_serial_driver, &s3c24a0_uart_inf);
+}
+
+static void __exit s3c24a0_serial_exit(void)
+{
+ platform_driver_unregister(&s3c24a0_serial_driver);
+}
+
+module_init(s3c24a0_serial_init);
+module_exit(s3c24a0_serial_exit);
+
diff --git a/drivers/tty/serial/s3c6400.c b/drivers/tty/serial/s3c6400.c
new file mode 100644
index 00000000..ded26c42
--- /dev/null
+++ b/drivers/tty/serial/s3c6400.c
@@ -0,0 +1,151 @@
+/*
+ * Driver for Samsung S3C6400 and S3C6410 SoC onboard UARTs.
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+
+#include <asm/irq.h>
+#include <mach/hardware.h>
+
+#include <plat/regs-serial.h>
+
+#include "samsung.h"
+
+static int s3c6400_serial_setsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+
+ if (strcmp(clk->name, "uclk0") == 0) {
+ ucon &= ~S3C6400_UCON_CLKMASK;
+ ucon |= S3C6400_UCON_UCLK0;
+ } else if (strcmp(clk->name, "uclk1") == 0)
+ ucon |= S3C6400_UCON_UCLK1;
+ else if (strcmp(clk->name, "pclk") == 0) {
+ /* See notes about transitioning from UCLK to PCLK */
+ ucon &= ~S3C6400_UCON_UCLK0;
+ } else {
+ printk(KERN_ERR "unknown clock source %s\n", clk->name);
+ return -EINVAL;
+ }
+
+ wr_regl(port, S3C2410_UCON, ucon);
+ return 0;
+}
+
+
+static int s3c6400_serial_getsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ u32 ucon = rd_regl(port, S3C2410_UCON);
+
+ clk->divisor = 1;
+
+ switch (ucon & S3C6400_UCON_CLKMASK) {
+ case S3C6400_UCON_UCLK0:
+ clk->name = "uclk0";
+ break;
+
+ case S3C6400_UCON_UCLK1:
+ clk->name = "uclk1";
+ break;
+
+ case S3C6400_UCON_PCLK:
+ case S3C6400_UCON_PCLK2:
+ clk->name = "pclk";
+ break;
+ }
+
+ return 0;
+}
+
+static int s3c6400_serial_resetport(struct uart_port *port,
+ struct s3c2410_uartcfg *cfg)
+{
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+
+ dbg("s3c6400_serial_resetport: port=%p (%08lx), cfg=%p\n",
+ port, port->mapbase, cfg);
+
+ /* ensure we don't change the clock settings... */
+
+ ucon &= S3C6400_UCON_CLKMASK;
+
+ wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
+ wr_regl(port, S3C2410_ULCON, cfg->ulcon);
+
+ /* reset both fifos */
+
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon);
+
+ return 0;
+}
+
+static struct s3c24xx_uart_info s3c6400_uart_inf = {
+ .name = "Samsung S3C6400 UART",
+ .type = PORT_S3C6400,
+ .fifosize = 64,
+ .has_divslot = 1,
+ .rx_fifomask = S3C2440_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2440_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2440_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2440_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
+ .get_clksrc = s3c6400_serial_getsource,
+ .set_clksrc = s3c6400_serial_setsource,
+ .reset_port = s3c6400_serial_resetport,
+};
+
+/* device management */
+
+static int s3c6400_serial_probe(struct platform_device *dev)
+{
+ dbg("s3c6400_serial_probe: dev=%p\n", dev);
+ return s3c24xx_serial_probe(dev, &s3c6400_uart_inf);
+}
+
+static struct platform_driver s3c6400_serial_driver = {
+ .probe = s3c6400_serial_probe,
+ .remove = __devexit_p(s3c24xx_serial_remove),
+ .driver = {
+ .name = "s3c6400-uart",
+ .owner = THIS_MODULE,
+ },
+};
+
+s3c24xx_console_init(&s3c6400_serial_driver, &s3c6400_uart_inf);
+
+static int __init s3c6400_serial_init(void)
+{
+ return s3c24xx_serial_init(&s3c6400_serial_driver, &s3c6400_uart_inf);
+}
+
+static void __exit s3c6400_serial_exit(void)
+{
+ platform_driver_unregister(&s3c6400_serial_driver);
+}
+
+module_init(s3c6400_serial_init);
+module_exit(s3c6400_serial_exit);
+
+MODULE_DESCRIPTION("Samsung S3C6400,S3C6410 SoC Serial port driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:s3c6400-uart");
diff --git a/drivers/tty/serial/s5pv210.c b/drivers/tty/serial/s5pv210.c
new file mode 100644
index 00000000..dd194dc8
--- /dev/null
+++ b/drivers/tty/serial/s5pv210.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Based on drivers/serial/s3c6400.c
+ *
+ * Driver for Samsung S5PV210 SoC UARTs.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+
+#include <asm/irq.h>
+#include <mach/hardware.h>
+#include <plat/regs-serial.h>
+#include "samsung.h"
+
+static int s5pv210_serial_setsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ struct s3c2410_uartcfg *cfg = port->dev->platform_data;
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+
+ if (cfg->flags & NO_NEED_CHECK_CLKSRC)
+ return 0;
+
+ if (strcmp(clk->name, "pclk") == 0)
+ ucon &= ~S5PV210_UCON_CLKMASK;
+ else if (strcmp(clk->name, "uclk1") == 0)
+ ucon |= S5PV210_UCON_CLKMASK;
+ else {
+ printk(KERN_ERR "unknown clock source %s\n", clk->name);
+ return -EINVAL;
+ }
+
+ wr_regl(port, S3C2410_UCON, ucon);
+ return 0;
+}
+
+
+static int s5pv210_serial_getsource(struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clk)
+{
+ struct s3c2410_uartcfg *cfg = port->dev->platform_data;
+ u32 ucon = rd_regl(port, S3C2410_UCON);
+
+ clk->divisor = 1;
+
+ if (cfg->flags & NO_NEED_CHECK_CLKSRC)
+ return 0;
+
+ switch (ucon & S5PV210_UCON_CLKMASK) {
+ case S5PV210_UCON_PCLK:
+ clk->name = "pclk";
+ break;
+ case S5PV210_UCON_UCLK:
+ clk->name = "uclk1";
+ break;
+ }
+
+ return 0;
+}
+
+static int s5pv210_serial_resetport(struct uart_port *port,
+ struct s3c2410_uartcfg *cfg)
+{
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+
+ ucon &= S5PV210_UCON_CLKMASK;
+ wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
+ wr_regl(port, S3C2410_ULCON, cfg->ulcon);
+
+ /* reset both fifos */
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon);
+
+ return 0;
+}
+
+#define S5PV210_UART_DEFAULT_INFO(fifo_size) \
+ .name = "Samsung S5PV210 UART0", \
+ .type = PORT_S3C6400, \
+ .fifosize = fifo_size, \
+ .has_divslot = 1, \
+ .rx_fifomask = S5PV210_UFSTAT_RXMASK, \
+ .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \
+ .rx_fifofull = S5PV210_UFSTAT_RXFULL, \
+ .tx_fifofull = S5PV210_UFSTAT_TXFULL, \
+ .tx_fifomask = S5PV210_UFSTAT_TXMASK, \
+ .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, \
+ .get_clksrc = s5pv210_serial_getsource, \
+ .set_clksrc = s5pv210_serial_setsource, \
+ .reset_port = s5pv210_serial_resetport
+
+static struct s3c24xx_uart_info s5p_port_fifo256 = {
+ S5PV210_UART_DEFAULT_INFO(256),
+};
+
+static struct s3c24xx_uart_info s5p_port_fifo64 = {
+ S5PV210_UART_DEFAULT_INFO(64),
+};
+
+static struct s3c24xx_uart_info s5p_port_fifo16 = {
+ S5PV210_UART_DEFAULT_INFO(16),
+};
+
+static struct s3c24xx_uart_info *s5p_uart_inf[] = {
+ [0] = &s5p_port_fifo256,
+ [1] = &s5p_port_fifo64,
+ [2] = &s5p_port_fifo16,
+ [3] = &s5p_port_fifo16,
+};
+
+/* device management */
+static int s5p_serial_probe(struct platform_device *pdev)
+{
+ return s3c24xx_serial_probe(pdev, s5p_uart_inf[pdev->id]);
+}
+
+static struct platform_driver s5p_serial_driver = {
+ .probe = s5p_serial_probe,
+ .remove = __devexit_p(s3c24xx_serial_remove),
+ .driver = {
+ .name = "s5pv210-uart",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init s5pv210_serial_console_init(void)
+{
+ return s3c24xx_serial_initconsole(&s5p_serial_driver, s5p_uart_inf);
+}
+
+console_initcall(s5pv210_serial_console_init);
+
+static int __init s5p_serial_init(void)
+{
+ return s3c24xx_serial_init(&s5p_serial_driver, *s5p_uart_inf);
+}
+
+static void __exit s5p_serial_exit(void)
+{
+ platform_driver_unregister(&s5p_serial_driver);
+}
+
+module_init(s5p_serial_init);
+module_exit(s5p_serial_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s5pv210-uart");
+MODULE_DESCRIPTION("Samsung S5PV210 UART Driver support");
+MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>");
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
new file mode 100644
index 00000000..ef7a21a6
--- /dev/null
+++ b/drivers/tty/serial/sa1100.c
@@ -0,0 +1,916 @@
+/*
+ * Driver for SA11x0 serial ports
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#if defined(CONFIG_SERIAL_SA1100_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <mach/hardware.h>
+#include <asm/mach/serial_sa1100.h>
+
+/* We've been assigned a range on the "Low-density serial ports" major */
+#define SERIAL_SA1100_MAJOR 204
+#define MINOR_START 5
+
+#define NR_PORTS 3
+
+#define SA1100_ISR_PASS_LIMIT 256
+
+/*
+ * Convert from ignore_status_mask or read_status_mask to UTSR[01]
+ */
+#define SM_TO_UTSR0(x) ((x) & 0xff)
+#define SM_TO_UTSR1(x) ((x) >> 8)
+#define UTSR0_TO_SM(x) ((x))
+#define UTSR1_TO_SM(x) ((x) << 8)
+
+#define UART_GET_UTCR0(sport) __raw_readl((sport)->port.membase + UTCR0)
+#define UART_GET_UTCR1(sport) __raw_readl((sport)->port.membase + UTCR1)
+#define UART_GET_UTCR2(sport) __raw_readl((sport)->port.membase + UTCR2)
+#define UART_GET_UTCR3(sport) __raw_readl((sport)->port.membase + UTCR3)
+#define UART_GET_UTSR0(sport) __raw_readl((sport)->port.membase + UTSR0)
+#define UART_GET_UTSR1(sport) __raw_readl((sport)->port.membase + UTSR1)
+#define UART_GET_CHAR(sport) __raw_readl((sport)->port.membase + UTDR)
+
+#define UART_PUT_UTCR0(sport,v) __raw_writel((v),(sport)->port.membase + UTCR0)
+#define UART_PUT_UTCR1(sport,v) __raw_writel((v),(sport)->port.membase + UTCR1)
+#define UART_PUT_UTCR2(sport,v) __raw_writel((v),(sport)->port.membase + UTCR2)
+#define UART_PUT_UTCR3(sport,v) __raw_writel((v),(sport)->port.membase + UTCR3)
+#define UART_PUT_UTSR0(sport,v) __raw_writel((v),(sport)->port.membase + UTSR0)
+#define UART_PUT_UTSR1(sport,v) __raw_writel((v),(sport)->port.membase + UTSR1)
+#define UART_PUT_CHAR(sport,v) __raw_writel((v),(sport)->port.membase + UTDR)
+
+/*
+ * This is the size of our serial port register set.
+ */
+#define UART_PORT_SIZE 0x24
+
+/*
+ * This determines how often we check the modem status signals
+ * for any change. They generally aren't connected to an IRQ
+ * so we have to poll them. We also check immediately before
+ * filling the TX fifo incase CTS has been dropped.
+ */
+#define MCTRL_TIMEOUT (250*HZ/1000)
+
+struct sa1100_port {
+ struct uart_port port;
+ struct timer_list timer;
+ unsigned int old_status;
+};
+
+/*
+ * Handle any change of modem status signal since we were last called.
+ */
+static void sa1100_mctrl_check(struct sa1100_port *sport)
+{
+ unsigned int status, changed;
+
+ status = sport->port.ops->get_mctrl(&sport->port);
+ changed = status ^ sport->old_status;
+
+ if (changed == 0)
+ return;
+
+ sport->old_status = status;
+
+ if (changed & TIOCM_RI)
+ sport->port.icount.rng++;
+ if (changed & TIOCM_DSR)
+ sport->port.icount.dsr++;
+ if (changed & TIOCM_CAR)
+ uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
+ if (changed & TIOCM_CTS)
+ uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
+
+ wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
+}
+
+/*
+ * This is our per-port timeout handler, for checking the
+ * modem status signals.
+ */
+static void sa1100_timeout(unsigned long data)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)data;
+ unsigned long flags;
+
+ if (sport->port.state) {
+ spin_lock_irqsave(&sport->port.lock, flags);
+ sa1100_mctrl_check(sport);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
+ }
+}
+
+/*
+ * interrupts disabled on entry
+ */
+static void sa1100_stop_tx(struct uart_port *port)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+ u32 utcr3;
+
+ utcr3 = UART_GET_UTCR3(sport);
+ UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_TIE);
+ sport->port.read_status_mask &= ~UTSR0_TO_SM(UTSR0_TFS);
+}
+
+/*
+ * port locked and interrupts disabled
+ */
+static void sa1100_start_tx(struct uart_port *port)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+ u32 utcr3;
+
+ utcr3 = UART_GET_UTCR3(sport);
+ sport->port.read_status_mask |= UTSR0_TO_SM(UTSR0_TFS);
+ UART_PUT_UTCR3(sport, utcr3 | UTCR3_TIE);
+}
+
+/*
+ * Interrupts enabled
+ */
+static void sa1100_stop_rx(struct uart_port *port)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+ u32 utcr3;
+
+ utcr3 = UART_GET_UTCR3(sport);
+ UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_RIE);
+}
+
+/*
+ * Set the modem control timer to fire immediately.
+ */
+static void sa1100_enable_ms(struct uart_port *port)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+
+ mod_timer(&sport->timer, jiffies);
+}
+
+static void
+sa1100_rx_chars(struct sa1100_port *sport)
+{
+ struct tty_struct *tty = sport->port.state->port.tty;
+ unsigned int status, ch, flg;
+
+ status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) |
+ UTSR0_TO_SM(UART_GET_UTSR0(sport));
+ while (status & UTSR1_TO_SM(UTSR1_RNE)) {
+ ch = UART_GET_CHAR(sport);
+
+ sport->port.icount.rx++;
+
+ flg = TTY_NORMAL;
+
+ /*
+ * note that the error handling code is
+ * out of the main execution path
+ */
+ if (status & UTSR1_TO_SM(UTSR1_PRE | UTSR1_FRE | UTSR1_ROR)) {
+ if (status & UTSR1_TO_SM(UTSR1_PRE))
+ sport->port.icount.parity++;
+ else if (status & UTSR1_TO_SM(UTSR1_FRE))
+ sport->port.icount.frame++;
+ if (status & UTSR1_TO_SM(UTSR1_ROR))
+ sport->port.icount.overrun++;
+
+ status &= sport->port.read_status_mask;
+
+ if (status & UTSR1_TO_SM(UTSR1_PRE))
+ flg = TTY_PARITY;
+ else if (status & UTSR1_TO_SM(UTSR1_FRE))
+ flg = TTY_FRAME;
+
+#ifdef SUPPORT_SYSRQ
+ sport->port.sysrq = 0;
+#endif
+ }
+
+ if (uart_handle_sysrq_char(&sport->port, ch))
+ goto ignore_char;
+
+ uart_insert_char(&sport->port, status, UTSR1_TO_SM(UTSR1_ROR), ch, flg);
+
+ ignore_char:
+ status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) |
+ UTSR0_TO_SM(UART_GET_UTSR0(sport));
+ }
+ tty_flip_buffer_push(tty);
+}
+
+static void sa1100_tx_chars(struct sa1100_port *sport)
+{
+ struct circ_buf *xmit = &sport->port.state->xmit;
+
+ if (sport->port.x_char) {
+ UART_PUT_CHAR(sport, sport->port.x_char);
+ sport->port.icount.tx++;
+ sport->port.x_char = 0;
+ return;
+ }
+
+ /*
+ * Check the modem control lines before
+ * transmitting anything.
+ */
+ sa1100_mctrl_check(sport);
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
+ sa1100_stop_tx(&sport->port);
+ return;
+ }
+
+ /*
+ * Tried using FIFO (not checking TNF) for fifo fill:
+ * still had the '4 bytes repeated' problem.
+ */
+ while (UART_GET_UTSR1(sport) & UTSR1_TNF) {
+ UART_PUT_CHAR(sport, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ sport->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+
+ if (uart_circ_empty(xmit))
+ sa1100_stop_tx(&sport->port);
+}
+
+static irqreturn_t sa1100_int(int irq, void *dev_id)
+{
+ struct sa1100_port *sport = dev_id;
+ unsigned int status, pass_counter = 0;
+
+ spin_lock(&sport->port.lock);
+ status = UART_GET_UTSR0(sport);
+ status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS;
+ do {
+ if (status & (UTSR0_RFS | UTSR0_RID)) {
+ /* Clear the receiver idle bit, if set */
+ if (status & UTSR0_RID)
+ UART_PUT_UTSR0(sport, UTSR0_RID);
+ sa1100_rx_chars(sport);
+ }
+
+ /* Clear the relevant break bits */
+ if (status & (UTSR0_RBB | UTSR0_REB))
+ UART_PUT_UTSR0(sport, status & (UTSR0_RBB | UTSR0_REB));
+
+ if (status & UTSR0_RBB)
+ sport->port.icount.brk++;
+
+ if (status & UTSR0_REB)
+ uart_handle_break(&sport->port);
+
+ if (status & UTSR0_TFS)
+ sa1100_tx_chars(sport);
+ if (pass_counter++ > SA1100_ISR_PASS_LIMIT)
+ break;
+ status = UART_GET_UTSR0(sport);
+ status &= SM_TO_UTSR0(sport->port.read_status_mask) |
+ ~UTSR0_TFS;
+ } while (status & (UTSR0_TFS | UTSR0_RFS | UTSR0_RID));
+ spin_unlock(&sport->port.lock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Return TIOCSER_TEMT when transmitter is not busy.
+ */
+static unsigned int sa1100_tx_empty(struct uart_port *port)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+
+ return UART_GET_UTSR1(sport) & UTSR1_TBY ? 0 : TIOCSER_TEMT;
+}
+
+static unsigned int sa1100_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+}
+
+static void sa1100_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+/*
+ * Interrupts always disabled.
+ */
+static void sa1100_break_ctl(struct uart_port *port, int break_state)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+ unsigned long flags;
+ unsigned int utcr3;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ utcr3 = UART_GET_UTCR3(sport);
+ if (break_state == -1)
+ utcr3 |= UTCR3_BRK;
+ else
+ utcr3 &= ~UTCR3_BRK;
+ UART_PUT_UTCR3(sport, utcr3);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static int sa1100_startup(struct uart_port *port)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+ int retval;
+
+ /*
+ * Allocate the IRQ
+ */
+ retval = request_irq(sport->port.irq, sa1100_int, 0,
+ "sa11x0-uart", sport);
+ if (retval)
+ return retval;
+
+ /*
+ * Finally, clear and enable interrupts
+ */
+ UART_PUT_UTSR0(sport, -1);
+ UART_PUT_UTCR3(sport, UTCR3_RXE | UTCR3_TXE | UTCR3_RIE);
+
+ /*
+ * Enable modem status interrupts
+ */
+ spin_lock_irq(&sport->port.lock);
+ sa1100_enable_ms(&sport->port);
+ spin_unlock_irq(&sport->port.lock);
+
+ return 0;
+}
+
+static void sa1100_shutdown(struct uart_port *port)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+
+ /*
+ * Stop our timer.
+ */
+ del_timer_sync(&sport->timer);
+
+ /*
+ * Free the interrupt
+ */
+ free_irq(sport->port.irq, sport);
+
+ /*
+ * Disable all interrupts, port and break condition.
+ */
+ UART_PUT_UTCR3(sport, 0);
+}
+
+static void
+sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+ unsigned long flags;
+ unsigned int utcr0, old_utcr3, baud, quot;
+ unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
+
+ /*
+ * We only support CS7 and CS8.
+ */
+ while ((termios->c_cflag & CSIZE) != CS7 &&
+ (termios->c_cflag & CSIZE) != CS8) {
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= old_csize;
+ old_csize = CS8;
+ }
+
+ if ((termios->c_cflag & CSIZE) == CS8)
+ utcr0 = UTCR0_DSS;
+ else
+ utcr0 = 0;
+
+ if (termios->c_cflag & CSTOPB)
+ utcr0 |= UTCR0_SBS;
+ if (termios->c_cflag & PARENB) {
+ utcr0 |= UTCR0_PE;
+ if (!(termios->c_cflag & PARODD))
+ utcr0 |= UTCR0_OES;
+ }
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = uart_get_divisor(port, baud);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
+ sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR);
+ if (termios->c_iflag & INPCK)
+ sport->port.read_status_mask |=
+ UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE);
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ sport->port.read_status_mask |=
+ UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB);
+
+ /*
+ * Characters to ignore
+ */
+ sport->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ sport->port.ignore_status_mask |=
+ UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE);
+ if (termios->c_iflag & IGNBRK) {
+ sport->port.ignore_status_mask |=
+ UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB);
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ sport->port.ignore_status_mask |=
+ UTSR1_TO_SM(UTSR1_ROR);
+ }
+
+ del_timer_sync(&sport->timer);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ /*
+ * disable interrupts and drain transmitter
+ */
+ old_utcr3 = UART_GET_UTCR3(sport);
+ UART_PUT_UTCR3(sport, old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE));
+
+ while (UART_GET_UTSR1(sport) & UTSR1_TBY)
+ barrier();
+
+ /* then, disable everything */
+ UART_PUT_UTCR3(sport, 0);
+
+ /* set the parity, stop bits and data size */
+ UART_PUT_UTCR0(sport, utcr0);
+
+ /* set the baud rate */
+ quot -= 1;
+ UART_PUT_UTCR1(sport, ((quot & 0xf00) >> 8));
+ UART_PUT_UTCR2(sport, (quot & 0xff));
+
+ UART_PUT_UTSR0(sport, -1);
+
+ UART_PUT_UTCR3(sport, old_utcr3);
+
+ if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
+ sa1100_enable_ms(&sport->port);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static const char *sa1100_type(struct uart_port *port)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+
+ return sport->port.type == PORT_SA1100 ? "SA1100" : NULL;
+}
+
+/*
+ * Release the memory region(s) being used by 'port'.
+ */
+static void sa1100_release_port(struct uart_port *port)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+
+ release_mem_region(sport->port.mapbase, UART_PORT_SIZE);
+}
+
+/*
+ * Request the memory region(s) being used by 'port'.
+ */
+static int sa1100_request_port(struct uart_port *port)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+
+ return request_mem_region(sport->port.mapbase, UART_PORT_SIZE,
+ "sa11x0-uart") != NULL ? 0 : -EBUSY;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void sa1100_config_port(struct uart_port *port, int flags)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+
+ if (flags & UART_CONFIG_TYPE &&
+ sa1100_request_port(&sport->port) == 0)
+ sport->port.type = PORT_SA1100;
+}
+
+/*
+ * Verify the new serial_struct (for TIOCSSERIAL).
+ * The only change we allow are to the flags and type, and
+ * even then only between PORT_SA1100 and PORT_UNKNOWN
+ */
+static int
+sa1100_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+ int ret = 0;
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_SA1100)
+ ret = -EINVAL;
+ if (sport->port.irq != ser->irq)
+ ret = -EINVAL;
+ if (ser->io_type != SERIAL_IO_MEM)
+ ret = -EINVAL;
+ if (sport->port.uartclk / 16 != ser->baud_base)
+ ret = -EINVAL;
+ if ((void *)sport->port.mapbase != ser->iomem_base)
+ ret = -EINVAL;
+ if (sport->port.iobase != ser->port)
+ ret = -EINVAL;
+ if (ser->hub6 != 0)
+ ret = -EINVAL;
+ return ret;
+}
+
+static struct uart_ops sa1100_pops = {
+ .tx_empty = sa1100_tx_empty,
+ .set_mctrl = sa1100_set_mctrl,
+ .get_mctrl = sa1100_get_mctrl,
+ .stop_tx = sa1100_stop_tx,
+ .start_tx = sa1100_start_tx,
+ .stop_rx = sa1100_stop_rx,
+ .enable_ms = sa1100_enable_ms,
+ .break_ctl = sa1100_break_ctl,
+ .startup = sa1100_startup,
+ .shutdown = sa1100_shutdown,
+ .set_termios = sa1100_set_termios,
+ .type = sa1100_type,
+ .release_port = sa1100_release_port,
+ .request_port = sa1100_request_port,
+ .config_port = sa1100_config_port,
+ .verify_port = sa1100_verify_port,
+};
+
+static struct sa1100_port sa1100_ports[NR_PORTS];
+
+/*
+ * Setup the SA1100 serial ports. Note that we don't include the IrDA
+ * port here since we have our own SIR/FIR driver (see drivers/net/irda)
+ *
+ * Note also that we support "console=ttySAx" where "x" is either 0 or 1.
+ * Which serial port this ends up being depends on the machine you're
+ * running this kernel on. I'm not convinced that this is a good idea,
+ * but that's the way it traditionally works.
+ *
+ * Note that NanoEngine UART3 becomes UART2, and UART2 is no longer
+ * used here.
+ */
+static void __init sa1100_init_ports(void)
+{
+ static int first = 1;
+ int i;
+
+ if (!first)
+ return;
+ first = 0;
+
+ for (i = 0; i < NR_PORTS; i++) {
+ sa1100_ports[i].port.uartclk = 3686400;
+ sa1100_ports[i].port.ops = &sa1100_pops;
+ sa1100_ports[i].port.fifosize = 8;
+ sa1100_ports[i].port.line = i;
+ sa1100_ports[i].port.iotype = UPIO_MEM;
+ init_timer(&sa1100_ports[i].timer);
+ sa1100_ports[i].timer.function = sa1100_timeout;
+ sa1100_ports[i].timer.data = (unsigned long)&sa1100_ports[i];
+ }
+
+ /*
+ * make transmit lines outputs, so that when the port
+ * is closed, the output is in the MARK state.
+ */
+ PPDR |= PPC_TXD1 | PPC_TXD3;
+ PPSR |= PPC_TXD1 | PPC_TXD3;
+}
+
+void __devinit sa1100_register_uart_fns(struct sa1100_port_fns *fns)
+{
+ if (fns->get_mctrl)
+ sa1100_pops.get_mctrl = fns->get_mctrl;
+ if (fns->set_mctrl)
+ sa1100_pops.set_mctrl = fns->set_mctrl;
+
+ sa1100_pops.pm = fns->pm;
+ sa1100_pops.set_wake = fns->set_wake;
+}
+
+void __init sa1100_register_uart(int idx, int port)
+{
+ if (idx >= NR_PORTS) {
+ printk(KERN_ERR "%s: bad index number %d\n", __func__, idx);
+ return;
+ }
+
+ switch (port) {
+ case 1:
+ sa1100_ports[idx].port.membase = (void __iomem *)&Ser1UTCR0;
+ sa1100_ports[idx].port.mapbase = _Ser1UTCR0;
+ sa1100_ports[idx].port.irq = IRQ_Ser1UART;
+ sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF;
+ break;
+
+ case 2:
+ sa1100_ports[idx].port.membase = (void __iomem *)&Ser2UTCR0;
+ sa1100_ports[idx].port.mapbase = _Ser2UTCR0;
+ sa1100_ports[idx].port.irq = IRQ_Ser2ICP;
+ sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF;
+ break;
+
+ case 3:
+ sa1100_ports[idx].port.membase = (void __iomem *)&Ser3UTCR0;
+ sa1100_ports[idx].port.mapbase = _Ser3UTCR0;
+ sa1100_ports[idx].port.irq = IRQ_Ser3UART;
+ sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF;
+ break;
+
+ default:
+ printk(KERN_ERR "%s: bad port number %d\n", __func__, port);
+ }
+}
+
+
+#ifdef CONFIG_SERIAL_SA1100_CONSOLE
+static void sa1100_console_putchar(struct uart_port *port, int ch)
+{
+ struct sa1100_port *sport = (struct sa1100_port *)port;
+
+ while (!(UART_GET_UTSR1(sport) & UTSR1_TNF))
+ barrier();
+ UART_PUT_CHAR(sport, ch);
+}
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+sa1100_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct sa1100_port *sport = &sa1100_ports[co->index];
+ unsigned int old_utcr3, status;
+
+ /*
+ * First, save UTCR3 and then disable interrupts
+ */
+ old_utcr3 = UART_GET_UTCR3(sport);
+ UART_PUT_UTCR3(sport, (old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE)) |
+ UTCR3_TXE);
+
+ uart_console_write(&sport->port, s, count, sa1100_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore UTCR3
+ */
+ do {
+ status = UART_GET_UTSR1(sport);
+ } while (status & UTSR1_TBY);
+ UART_PUT_UTCR3(sport, old_utcr3);
+}
+
+/*
+ * If the port was already initialised (eg, by a boot loader),
+ * try to determine the current setup.
+ */
+static void __init
+sa1100_console_get_options(struct sa1100_port *sport, int *baud,
+ int *parity, int *bits)
+{
+ unsigned int utcr3;
+
+ utcr3 = UART_GET_UTCR3(sport) & (UTCR3_RXE | UTCR3_TXE);
+ if (utcr3 == (UTCR3_RXE | UTCR3_TXE)) {
+ /* ok, the port was enabled */
+ unsigned int utcr0, quot;
+
+ utcr0 = UART_GET_UTCR0(sport);
+
+ *parity = 'n';
+ if (utcr0 & UTCR0_PE) {
+ if (utcr0 & UTCR0_OES)
+ *parity = 'e';
+ else
+ *parity = 'o';
+ }
+
+ if (utcr0 & UTCR0_DSS)
+ *bits = 8;
+ else
+ *bits = 7;
+
+ quot = UART_GET_UTCR2(sport) | UART_GET_UTCR1(sport) << 8;
+ quot &= 0xfff;
+ *baud = sport->port.uartclk / (16 * (quot + 1));
+ }
+}
+
+static int __init
+sa1100_console_setup(struct console *co, char *options)
+{
+ struct sa1100_port *sport;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index == -1 || co->index >= NR_PORTS)
+ co->index = 0;
+ sport = &sa1100_ports[co->index];
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ sa1100_console_get_options(sport, &baud, &parity, &bits);
+
+ return uart_set_options(&sport->port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver sa1100_reg;
+static struct console sa1100_console = {
+ .name = "ttySA",
+ .write = sa1100_console_write,
+ .device = uart_console_device,
+ .setup = sa1100_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sa1100_reg,
+};
+
+static int __init sa1100_rs_console_init(void)
+{
+ sa1100_init_ports();
+ register_console(&sa1100_console);
+ return 0;
+}
+console_initcall(sa1100_rs_console_init);
+
+#define SA1100_CONSOLE &sa1100_console
+#else
+#define SA1100_CONSOLE NULL
+#endif
+
+static struct uart_driver sa1100_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "ttySA",
+ .dev_name = "ttySA",
+ .major = SERIAL_SA1100_MAJOR,
+ .minor = MINOR_START,
+ .nr = NR_PORTS,
+ .cons = SA1100_CONSOLE,
+};
+
+static int sa1100_serial_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct sa1100_port *sport = platform_get_drvdata(dev);
+
+ if (sport)
+ uart_suspend_port(&sa1100_reg, &sport->port);
+
+ return 0;
+}
+
+static int sa1100_serial_resume(struct platform_device *dev)
+{
+ struct sa1100_port *sport = platform_get_drvdata(dev);
+
+ if (sport)
+ uart_resume_port(&sa1100_reg, &sport->port);
+
+ return 0;
+}
+
+static int sa1100_serial_probe(struct platform_device *dev)
+{
+ struct resource *res = dev->resource;
+ int i;
+
+ for (i = 0; i < dev->num_resources; i++, res++)
+ if (res->flags & IORESOURCE_MEM)
+ break;
+
+ if (i < dev->num_resources) {
+ for (i = 0; i < NR_PORTS; i++) {
+ if (sa1100_ports[i].port.mapbase != res->start)
+ continue;
+
+ sa1100_ports[i].port.dev = &dev->dev;
+ uart_add_one_port(&sa1100_reg, &sa1100_ports[i].port);
+ platform_set_drvdata(dev, &sa1100_ports[i]);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int sa1100_serial_remove(struct platform_device *pdev)
+{
+ struct sa1100_port *sport = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (sport)
+ uart_remove_one_port(&sa1100_reg, &sport->port);
+
+ return 0;
+}
+
+static struct platform_driver sa11x0_serial_driver = {
+ .probe = sa1100_serial_probe,
+ .remove = sa1100_serial_remove,
+ .suspend = sa1100_serial_suspend,
+ .resume = sa1100_serial_resume,
+ .driver = {
+ .name = "sa11x0-uart",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sa1100_serial_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Serial: SA11x0 driver\n");
+
+ sa1100_init_ports();
+
+ ret = uart_register_driver(&sa1100_reg);
+ if (ret == 0) {
+ ret = platform_driver_register(&sa11x0_serial_driver);
+ if (ret)
+ uart_unregister_driver(&sa1100_reg);
+ }
+ return ret;
+}
+
+static void __exit sa1100_serial_exit(void)
+{
+ platform_driver_unregister(&sa11x0_serial_driver);
+ uart_unregister_driver(&sa1100_reg);
+}
+
+module_init(sa1100_serial_init);
+module_exit(sa1100_serial_exit);
+
+MODULE_AUTHOR("Deep Blue Solutions Ltd");
+MODULE_DESCRIPTION("SA1100 generic serial port driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_SA1100_MAJOR);
+MODULE_ALIAS("platform:sa11x0-uart");
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
new file mode 100644
index 00000000..f66f6482
--- /dev/null
+++ b/drivers/tty/serial/samsung.c
@@ -0,0 +1,1486 @@
+/*
+ * Driver core for Samsung SoC onboard UARTs.
+ *
+ * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* Hote on 2410 error handling
+ *
+ * The s3c2410 manual has a love/hate affair with the contents of the
+ * UERSTAT register in the UART blocks, and keeps marking some of the
+ * error bits as reserved. Having checked with the s3c2410x01,
+ * it copes with BREAKs properly, so I am happy to ignore the RESERVED
+ * feature from the latter versions of the manual.
+ *
+ * If it becomes aparrent that latter versions of the 2410 remove these
+ * bits, then action will have to be taken to differentiate the versions
+ * and change the policy on BREAK
+ *
+ * BJD, 04-Nov-2004
+*/
+
+#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+
+#include <asm/irq.h>
+
+#include <mach/hardware.h>
+#include <mach/map.h>
+
+#include <plat/regs-serial.h>
+
+#include "samsung.h"
+
+/* UART name and device definitions */
+
+#define S3C24XX_SERIAL_NAME "ttySAC"
+#define S3C24XX_SERIAL_MAJOR 204
+#define S3C24XX_SERIAL_MINOR 64
+
+/* macros to change one thing to another */
+
+#define tx_enabled(port) ((port)->unused[0])
+#define rx_enabled(port) ((port)->unused[1])
+
+/* flag to ignore all characters coming in */
+#define RXSTAT_DUMMY_READ (0x10000000)
+
+static inline struct s3c24xx_uart_port *to_ourport(struct uart_port *port)
+{
+ return container_of(port, struct s3c24xx_uart_port, port);
+}
+
+/* translate a port to the device name */
+
+static inline const char *s3c24xx_serial_portname(struct uart_port *port)
+{
+ return to_platform_device(port->dev)->name;
+}
+
+static int s3c24xx_serial_txempty_nofifo(struct uart_port *port)
+{
+ return (rd_regl(port, S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXE);
+}
+
+static void s3c24xx_serial_rx_enable(struct uart_port *port)
+{
+ unsigned long flags;
+ unsigned int ucon, ufcon;
+ int count = 10000;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ while (--count && !s3c24xx_serial_txempty_nofifo(port))
+ udelay(100);
+
+ ufcon = rd_regl(port, S3C2410_UFCON);
+ ufcon |= S3C2410_UFCON_RESETRX;
+ wr_regl(port, S3C2410_UFCON, ufcon);
+
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon |= S3C2410_UCON_RXIRQMODE;
+ wr_regl(port, S3C2410_UCON, ucon);
+
+ rx_enabled(port) = 1;
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void s3c24xx_serial_rx_disable(struct uart_port *port)
+{
+ unsigned long flags;
+ unsigned int ucon;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon &= ~S3C2410_UCON_RXIRQMODE;
+ wr_regl(port, S3C2410_UCON, ucon);
+
+ rx_enabled(port) = 0;
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void s3c24xx_serial_stop_tx(struct uart_port *port)
+{
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+
+ if (tx_enabled(port)) {
+ disable_irq_nosync(ourport->tx_irq);
+ tx_enabled(port) = 0;
+ if (port->flags & UPF_CONS_FLOW)
+ s3c24xx_serial_rx_enable(port);
+ }
+}
+
+static void s3c24xx_serial_start_tx(struct uart_port *port)
+{
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+
+ if (!tx_enabled(port)) {
+ if (port->flags & UPF_CONS_FLOW)
+ s3c24xx_serial_rx_disable(port);
+
+ enable_irq(ourport->tx_irq);
+ tx_enabled(port) = 1;
+ }
+}
+
+
+static void s3c24xx_serial_stop_rx(struct uart_port *port)
+{
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+
+ if (rx_enabled(port)) {
+ dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
+ disable_irq_nosync(ourport->rx_irq);
+ rx_enabled(port) = 0;
+ }
+}
+
+static void s3c24xx_serial_enable_ms(struct uart_port *port)
+{
+}
+
+static inline struct s3c24xx_uart_info *s3c24xx_port_to_info(struct uart_port *port)
+{
+ return to_ourport(port)->info;
+}
+
+static inline struct s3c2410_uartcfg *s3c24xx_port_to_cfg(struct uart_port *port)
+{
+ if (port->dev == NULL)
+ return NULL;
+
+ return (struct s3c2410_uartcfg *)port->dev->platform_data;
+}
+
+static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
+ unsigned long ufstat)
+{
+ struct s3c24xx_uart_info *info = ourport->info;
+
+ if (ufstat & info->rx_fifofull)
+ return info->fifosize;
+
+ return (ufstat & info->rx_fifomask) >> info->rx_fifoshift;
+}
+
+
+/* ? - where has parity gone?? */
+#define S3C2410_UERSTAT_PARITY (0x1000)
+
+static irqreturn_t
+s3c24xx_serial_rx_chars(int irq, void *dev_id)
+{
+ struct s3c24xx_uart_port *ourport = dev_id;
+ struct uart_port *port = &ourport->port;
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int ufcon, ch, flag, ufstat, uerstat;
+ int max_count = 64;
+
+ while (max_count-- > 0) {
+ ufcon = rd_regl(port, S3C2410_UFCON);
+ ufstat = rd_regl(port, S3C2410_UFSTAT);
+
+ if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0)
+ break;
+
+ uerstat = rd_regl(port, S3C2410_UERSTAT);
+ ch = rd_regb(port, S3C2410_URXH);
+
+ if (port->flags & UPF_CONS_FLOW) {
+ int txe = s3c24xx_serial_txempty_nofifo(port);
+
+ if (rx_enabled(port)) {
+ if (!txe) {
+ rx_enabled(port) = 0;
+ continue;
+ }
+ } else {
+ if (txe) {
+ ufcon |= S3C2410_UFCON_RESETRX;
+ wr_regl(port, S3C2410_UFCON, ufcon);
+ rx_enabled(port) = 1;
+ goto out;
+ }
+ continue;
+ }
+ }
+
+ /* insert the character into the buffer */
+
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (unlikely(uerstat & S3C2410_UERSTAT_ANY)) {
+ dbg("rxerr: port ch=0x%02x, rxs=0x%08x\n",
+ ch, uerstat);
+
+ /* check for break */
+ if (uerstat & S3C2410_UERSTAT_BREAK) {
+ dbg("break!\n");
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ goto ignore_char;
+ }
+
+ if (uerstat & S3C2410_UERSTAT_FRAME)
+ port->icount.frame++;
+ if (uerstat & S3C2410_UERSTAT_OVERRUN)
+ port->icount.overrun++;
+
+ uerstat &= port->read_status_mask;
+
+ if (uerstat & S3C2410_UERSTAT_BREAK)
+ flag = TTY_BREAK;
+ else if (uerstat & S3C2410_UERSTAT_PARITY)
+ flag = TTY_PARITY;
+ else if (uerstat & (S3C2410_UERSTAT_FRAME |
+ S3C2410_UERSTAT_OVERRUN))
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ goto ignore_char;
+
+ uart_insert_char(port, uerstat, S3C2410_UERSTAT_OVERRUN,
+ ch, flag);
+
+ ignore_char:
+ continue;
+ }
+ tty_flip_buffer_push(tty);
+
+ out:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
+{
+ struct s3c24xx_uart_port *ourport = id;
+ struct uart_port *port = &ourport->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ int count = 256;
+
+ if (port->x_char) {
+ wr_regb(port, S3C2410_UTXH, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ goto out;
+ }
+
+ /* if there isn't anything more to transmit, or the uart is now
+ * stopped, disable the uart and exit
+ */
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ s3c24xx_serial_stop_tx(port);
+ goto out;
+ }
+
+ /* try and drain the buffer... */
+
+ while (!uart_circ_empty(xmit) && count-- > 0) {
+ if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
+ break;
+
+ wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ s3c24xx_serial_stop_tx(port);
+
+ out:
+ return IRQ_HANDLED;
+}
+
+static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
+{
+ struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ unsigned long ufstat = rd_regl(port, S3C2410_UFSTAT);
+ unsigned long ufcon = rd_regl(port, S3C2410_UFCON);
+
+ if (ufcon & S3C2410_UFCON_FIFOMODE) {
+ if ((ufstat & info->tx_fifomask) != 0 ||
+ (ufstat & info->tx_fifofull))
+ return 0;
+
+ return 1;
+ }
+
+ return s3c24xx_serial_txempty_nofifo(port);
+}
+
+/* no modem control lines */
+static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
+{
+ unsigned int umstat = rd_regb(port, S3C2410_UMSTAT);
+
+ if (umstat & S3C2410_UMSTAT_CTS)
+ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+ else
+ return TIOCM_CAR | TIOCM_DSR;
+}
+
+static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /* todo - possibly remove AFC and do manual CTS */
+}
+
+static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned long flags;
+ unsigned int ucon;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ ucon = rd_regl(port, S3C2410_UCON);
+
+ if (break_state)
+ ucon |= S3C2410_UCON_SBREAK;
+ else
+ ucon &= ~S3C2410_UCON_SBREAK;
+
+ wr_regl(port, S3C2410_UCON, ucon);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void s3c24xx_serial_shutdown(struct uart_port *port)
+{
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+
+ if (ourport->tx_claimed) {
+ free_irq(ourport->tx_irq, ourport);
+ tx_enabled(port) = 0;
+ ourport->tx_claimed = 0;
+ }
+
+ if (ourport->rx_claimed) {
+ free_irq(ourport->rx_irq, ourport);
+ ourport->rx_claimed = 0;
+ rx_enabled(port) = 0;
+ }
+}
+
+
+static int s3c24xx_serial_startup(struct uart_port *port)
+{
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+ int ret;
+
+ dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
+ port->mapbase, port->membase);
+
+ rx_enabled(port) = 1;
+
+ ret = request_irq(ourport->rx_irq, s3c24xx_serial_rx_chars, 0,
+ s3c24xx_serial_portname(port), ourport);
+
+ if (ret != 0) {
+ printk(KERN_ERR "cannot get irq %d\n", ourport->rx_irq);
+ return ret;
+ }
+
+ ourport->rx_claimed = 1;
+
+ dbg("requesting tx irq...\n");
+
+ tx_enabled(port) = 1;
+
+ ret = request_irq(ourport->tx_irq, s3c24xx_serial_tx_chars, 0,
+ s3c24xx_serial_portname(port), ourport);
+
+ if (ret) {
+ printk(KERN_ERR "cannot get irq %d\n", ourport->tx_irq);
+ goto err;
+ }
+
+ ourport->tx_claimed = 1;
+
+ dbg("s3c24xx_serial_startup ok\n");
+
+ /* the port reset code should have done the correct
+ * register setup for the port controls */
+
+ return ret;
+
+ err:
+ s3c24xx_serial_shutdown(port);
+ return ret;
+}
+
+/* power power management control */
+
+static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
+ unsigned int old)
+{
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+
+ ourport->pm_level = level;
+
+ switch (level) {
+ case 3:
+ if (!IS_ERR(ourport->baudclk) && ourport->baudclk != NULL)
+ clk_disable(ourport->baudclk);
+
+ clk_disable(ourport->clk);
+ break;
+
+ case 0:
+ clk_enable(ourport->clk);
+
+ if (!IS_ERR(ourport->baudclk) && ourport->baudclk != NULL)
+ clk_enable(ourport->baudclk);
+
+ break;
+ default:
+ printk(KERN_ERR "s3c24xx_serial: unknown pm %d\n", level);
+ }
+}
+
+/* baud rate calculation
+ *
+ * The UARTs on the S3C2410/S3C2440 can take their clocks from a number
+ * of different sources, including the peripheral clock ("pclk") and an
+ * external clock ("uclk"). The S3C2440 also adds the core clock ("fclk")
+ * with a programmable extra divisor.
+ *
+ * The following code goes through the clock sources, and calculates the
+ * baud clocks (and the resultant actual baud rates) and then tries to
+ * pick the closest one and select that.
+ *
+*/
+
+
+#define MAX_CLKS (8)
+
+static struct s3c24xx_uart_clksrc tmp_clksrc = {
+ .name = "pclk",
+ .min_baud = 0,
+ .max_baud = 0,
+ .divisor = 1,
+};
+
+static inline int
+s3c24xx_serial_getsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
+{
+ struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+
+ return (info->get_clksrc)(port, c);
+}
+
+static inline int
+s3c24xx_serial_setsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
+{
+ struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+
+ return (info->set_clksrc)(port, c);
+}
+
+struct baud_calc {
+ struct s3c24xx_uart_clksrc *clksrc;
+ unsigned int calc;
+ unsigned int divslot;
+ unsigned int quot;
+ struct clk *src;
+};
+
+static int s3c24xx_serial_calcbaud(struct baud_calc *calc,
+ struct uart_port *port,
+ struct s3c24xx_uart_clksrc *clksrc,
+ unsigned int baud)
+{
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+ unsigned long rate;
+
+ calc->src = clk_get(port->dev, clksrc->name);
+ if (calc->src == NULL || IS_ERR(calc->src))
+ return 0;
+
+ rate = clk_get_rate(calc->src);
+ rate /= clksrc->divisor;
+
+ calc->clksrc = clksrc;
+
+ if (ourport->info->has_divslot) {
+ unsigned long div = rate / baud;
+
+ /* The UDIVSLOT register on the newer UARTs allows us to
+ * get a divisor adjustment of 1/16th on the baud clock.
+ *
+ * We don't keep the UDIVSLOT value (the 16ths we calculated
+ * by not multiplying the baud by 16) as it is easy enough
+ * to recalculate.
+ */
+
+ calc->quot = div / 16;
+ calc->calc = rate / div;
+ } else {
+ calc->quot = (rate + (8 * baud)) / (16 * baud);
+ calc->calc = (rate / (calc->quot * 16));
+ }
+
+ calc->quot--;
+ return 1;
+}
+
+static unsigned int s3c24xx_serial_getclk(struct uart_port *port,
+ struct s3c24xx_uart_clksrc **clksrc,
+ struct clk **clk,
+ unsigned int baud)
+{
+ struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
+ struct s3c24xx_uart_clksrc *clkp;
+ struct baud_calc res[MAX_CLKS];
+ struct baud_calc *resptr, *best, *sptr;
+ int i;
+
+ clkp = cfg->clocks;
+ best = NULL;
+
+ if (cfg->clocks_size < 2) {
+ if (cfg->clocks_size == 0)
+ clkp = &tmp_clksrc;
+
+ /* check to see if we're sourcing fclk, and if so we're
+ * going to have to update the clock source
+ */
+
+ if (strcmp(clkp->name, "fclk") == 0) {
+ struct s3c24xx_uart_clksrc src;
+
+ s3c24xx_serial_getsource(port, &src);
+
+ /* check that the port already using fclk, and if
+ * not, then re-select fclk
+ */
+
+ if (strcmp(src.name, clkp->name) == 0) {
+ s3c24xx_serial_setsource(port, clkp);
+ s3c24xx_serial_getsource(port, &src);
+ }
+
+ clkp->divisor = src.divisor;
+ }
+
+ s3c24xx_serial_calcbaud(res, port, clkp, baud);
+ best = res;
+ resptr = best + 1;
+ } else {
+ resptr = res;
+
+ for (i = 0; i < cfg->clocks_size; i++, clkp++) {
+ if (s3c24xx_serial_calcbaud(resptr, port, clkp, baud))
+ resptr++;
+ }
+ }
+
+ /* ok, we now need to select the best clock we found */
+
+ if (!best) {
+ unsigned int deviation = (1<<30)|((1<<30)-1);
+ int calc_deviation;
+
+ for (sptr = res; sptr < resptr; sptr++) {
+ calc_deviation = baud - sptr->calc;
+ if (calc_deviation < 0)
+ calc_deviation = -calc_deviation;
+
+ if (calc_deviation < deviation) {
+ best = sptr;
+ deviation = calc_deviation;
+ }
+ }
+ }
+
+ /* store results to pass back */
+
+ *clksrc = best->clksrc;
+ *clk = best->src;
+
+ return best->quot;
+}
+
+/* udivslot_table[]
+ *
+ * This table takes the fractional value of the baud divisor and gives
+ * the recommended setting for the UDIVSLOT register.
+ */
+static u16 udivslot_table[16] = {
+ [0] = 0x0000,
+ [1] = 0x0080,
+ [2] = 0x0808,
+ [3] = 0x0888,
+ [4] = 0x2222,
+ [5] = 0x4924,
+ [6] = 0x4A52,
+ [7] = 0x54AA,
+ [8] = 0x5555,
+ [9] = 0xD555,
+ [10] = 0xD5D5,
+ [11] = 0xDDD5,
+ [12] = 0xDDDD,
+ [13] = 0xDFDD,
+ [14] = 0xDFDF,
+ [15] = 0xFFDF,
+};
+
+static void s3c24xx_serial_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+ struct s3c24xx_uart_clksrc *clksrc = NULL;
+ struct clk *clk = NULL;
+ unsigned long flags;
+ unsigned int baud, quot;
+ unsigned int ulcon;
+ unsigned int umcon;
+ unsigned int udivslot = 0;
+
+ /*
+ * We don't support modem control lines.
+ */
+ termios->c_cflag &= ~(HUPCL | CMSPAR);
+ termios->c_cflag |= CLOCAL;
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+
+ baud = uart_get_baud_rate(port, termios, old, 0, 115200*8);
+
+ if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
+ quot = port->custom_divisor;
+ else
+ quot = s3c24xx_serial_getclk(port, &clksrc, &clk, baud);
+
+ /* check to see if we need to change clock source */
+
+ if (ourport->clksrc != clksrc || ourport->baudclk != clk) {
+ dbg("selecting clock %p\n", clk);
+ s3c24xx_serial_setsource(port, clksrc);
+
+ if (ourport->baudclk != NULL && !IS_ERR(ourport->baudclk)) {
+ clk_disable(ourport->baudclk);
+ ourport->baudclk = NULL;
+ }
+
+ clk_enable(clk);
+
+ ourport->clksrc = clksrc;
+ ourport->baudclk = clk;
+ ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
+ }
+
+ if (ourport->info->has_divslot) {
+ unsigned int div = ourport->baudclk_rate / baud;
+
+ if (cfg->has_fracval) {
+ udivslot = (div & 15);
+ dbg("fracval = %04x\n", udivslot);
+ } else {
+ udivslot = udivslot_table[div & 15];
+ dbg("udivslot = %04x (div %d)\n", udivslot, div & 15);
+ }
+ }
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ dbg("config: 5bits/char\n");
+ ulcon = S3C2410_LCON_CS5;
+ break;
+ case CS6:
+ dbg("config: 6bits/char\n");
+ ulcon = S3C2410_LCON_CS6;
+ break;
+ case CS7:
+ dbg("config: 7bits/char\n");
+ ulcon = S3C2410_LCON_CS7;
+ break;
+ case CS8:
+ default:
+ dbg("config: 8bits/char\n");
+ ulcon = S3C2410_LCON_CS8;
+ break;
+ }
+
+ /* preserve original lcon IR settings */
+ ulcon |= (cfg->ulcon & S3C2410_LCON_IRM);
+
+ if (termios->c_cflag & CSTOPB)
+ ulcon |= S3C2410_LCON_STOPB;
+
+ umcon = (termios->c_cflag & CRTSCTS) ? S3C2410_UMCOM_AFC : 0;
+
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & PARODD)
+ ulcon |= S3C2410_LCON_PODD;
+ else
+ ulcon |= S3C2410_LCON_PEVEN;
+ } else {
+ ulcon |= S3C2410_LCON_PNONE;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ dbg("setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
+ ulcon, quot, udivslot);
+
+ wr_regl(port, S3C2410_ULCON, ulcon);
+ wr_regl(port, S3C2410_UBRDIV, quot);
+ wr_regl(port, S3C2410_UMCON, umcon);
+
+ if (ourport->info->has_divslot)
+ wr_regl(port, S3C2443_DIVSLOT, udivslot);
+
+ dbg("uart: ulcon = 0x%08x, ucon = 0x%08x, ufcon = 0x%08x\n",
+ rd_regl(port, S3C2410_ULCON),
+ rd_regl(port, S3C2410_UCON),
+ rd_regl(port, S3C2410_UFCON));
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ /*
+ * Which character status flags are we interested in?
+ */
+ port->read_status_mask = S3C2410_UERSTAT_OVERRUN;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= S3C2410_UERSTAT_FRAME | S3C2410_UERSTAT_PARITY;
+
+ /*
+ * Which character status flags should we ignore?
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= S3C2410_UERSTAT_OVERRUN;
+ if (termios->c_iflag & IGNBRK && termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= S3C2410_UERSTAT_FRAME;
+
+ /*
+ * Ignore all characters if CREAD is not set.
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= RXSTAT_DUMMY_READ;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *s3c24xx_serial_type(struct uart_port *port)
+{
+ switch (port->type) {
+ case PORT_S3C2410:
+ return "S3C2410";
+ case PORT_S3C2440:
+ return "S3C2440";
+ case PORT_S3C2412:
+ return "S3C2412";
+ case PORT_S3C6400:
+ return "S3C6400/10";
+ default:
+ return NULL;
+ }
+}
+
+#define MAP_SIZE (0x100)
+
+static void s3c24xx_serial_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, MAP_SIZE);
+}
+
+static int s3c24xx_serial_request_port(struct uart_port *port)
+{
+ const char *name = s3c24xx_serial_portname(port);
+ return request_mem_region(port->mapbase, MAP_SIZE, name) ? 0 : -EBUSY;
+}
+
+static void s3c24xx_serial_config_port(struct uart_port *port, int flags)
+{
+ struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+
+ if (flags & UART_CONFIG_TYPE &&
+ s3c24xx_serial_request_port(port) == 0)
+ port->type = info->type;
+}
+
+/*
+ * verify the new serial_struct (for TIOCSSERIAL).
+ */
+static int
+s3c24xx_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+
+ if (ser->type != PORT_UNKNOWN && ser->type != info->type)
+ return -EINVAL;
+
+ return 0;
+}
+
+
+#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
+
+static struct console s3c24xx_serial_console;
+
+#define S3C24XX_SERIAL_CONSOLE &s3c24xx_serial_console
+#else
+#define S3C24XX_SERIAL_CONSOLE NULL
+#endif
+
+static struct uart_ops s3c24xx_serial_ops = {
+ .pm = s3c24xx_serial_pm,
+ .tx_empty = s3c24xx_serial_tx_empty,
+ .get_mctrl = s3c24xx_serial_get_mctrl,
+ .set_mctrl = s3c24xx_serial_set_mctrl,
+ .stop_tx = s3c24xx_serial_stop_tx,
+ .start_tx = s3c24xx_serial_start_tx,
+ .stop_rx = s3c24xx_serial_stop_rx,
+ .enable_ms = s3c24xx_serial_enable_ms,
+ .break_ctl = s3c24xx_serial_break_ctl,
+ .startup = s3c24xx_serial_startup,
+ .shutdown = s3c24xx_serial_shutdown,
+ .set_termios = s3c24xx_serial_set_termios,
+ .type = s3c24xx_serial_type,
+ .release_port = s3c24xx_serial_release_port,
+ .request_port = s3c24xx_serial_request_port,
+ .config_port = s3c24xx_serial_config_port,
+ .verify_port = s3c24xx_serial_verify_port,
+};
+
+
+static struct uart_driver s3c24xx_uart_drv = {
+ .owner = THIS_MODULE,
+ .driver_name = "s3c2410_serial",
+ .nr = CONFIG_SERIAL_SAMSUNG_UARTS,
+ .cons = S3C24XX_SERIAL_CONSOLE,
+ .dev_name = S3C24XX_SERIAL_NAME,
+ .major = S3C24XX_SERIAL_MAJOR,
+ .minor = S3C24XX_SERIAL_MINOR,
+};
+
+static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = {
+ [0] = {
+ .port = {
+ .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[0].port.lock),
+ .iotype = UPIO_MEM,
+ .irq = IRQ_S3CUART_RX0,
+ .uartclk = 0,
+ .fifosize = 16,
+ .ops = &s3c24xx_serial_ops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 0,
+ }
+ },
+ [1] = {
+ .port = {
+ .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[1].port.lock),
+ .iotype = UPIO_MEM,
+ .irq = IRQ_S3CUART_RX1,
+ .uartclk = 0,
+ .fifosize = 16,
+ .ops = &s3c24xx_serial_ops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 1,
+ }
+ },
+#if CONFIG_SERIAL_SAMSUNG_UARTS > 2
+
+ [2] = {
+ .port = {
+ .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[2].port.lock),
+ .iotype = UPIO_MEM,
+ .irq = IRQ_S3CUART_RX2,
+ .uartclk = 0,
+ .fifosize = 16,
+ .ops = &s3c24xx_serial_ops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 2,
+ }
+ },
+#endif
+#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
+ [3] = {
+ .port = {
+ .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[3].port.lock),
+ .iotype = UPIO_MEM,
+ .irq = IRQ_S3CUART_RX3,
+ .uartclk = 0,
+ .fifosize = 16,
+ .ops = &s3c24xx_serial_ops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 3,
+ }
+ }
+#endif
+};
+
+/* s3c24xx_serial_resetport
+ *
+ * wrapper to call the specific reset for this port (reset the fifos
+ * and the settings)
+*/
+
+static inline int s3c24xx_serial_resetport(struct uart_port *port,
+ struct s3c2410_uartcfg *cfg)
+{
+ struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+
+ return (info->reset_port)(port, cfg);
+}
+
+
+#ifdef CONFIG_CPU_FREQ
+
+static int s3c24xx_serial_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct s3c24xx_uart_port *port;
+ struct uart_port *uport;
+
+ port = container_of(nb, struct s3c24xx_uart_port, freq_transition);
+ uport = &port->port;
+
+ /* check to see if port is enabled */
+
+ if (port->pm_level != 0)
+ return 0;
+
+ /* try and work out if the baudrate is changing, we can detect
+ * a change in rate, but we do not have support for detecting
+ * a disturbance in the clock-rate over the change.
+ */
+
+ if (IS_ERR(port->clk))
+ goto exit;
+
+ if (port->baudclk_rate == clk_get_rate(port->clk))
+ goto exit;
+
+ if (val == CPUFREQ_PRECHANGE) {
+ /* we should really shut the port down whilst the
+ * frequency change is in progress. */
+
+ } else if (val == CPUFREQ_POSTCHANGE) {
+ struct ktermios *termios;
+ struct tty_struct *tty;
+
+ if (uport->state == NULL)
+ goto exit;
+
+ tty = uport->state->port.tty;
+
+ if (tty == NULL)
+ goto exit;
+
+ termios = tty->termios;
+
+ if (termios == NULL) {
+ printk(KERN_WARNING "%s: no termios?\n", __func__);
+ goto exit;
+ }
+
+ s3c24xx_serial_set_termios(uport, termios, NULL);
+ }
+
+ exit:
+ return 0;
+}
+
+static inline int s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port)
+{
+ port->freq_transition.notifier_call = s3c24xx_serial_cpufreq_transition;
+
+ return cpufreq_register_notifier(&port->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port)
+{
+ cpufreq_unregister_notifier(&port->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+#else
+static inline int s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port)
+{
+ return 0;
+}
+
+static inline void s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port)
+{
+}
+#endif
+
+/* s3c24xx_serial_init_port
+ *
+ * initialise a single serial port from the platform device given
+ */
+
+static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
+ struct s3c24xx_uart_info *info,
+ struct platform_device *platdev)
+{
+ struct uart_port *port = &ourport->port;
+ struct s3c2410_uartcfg *cfg;
+ struct resource *res;
+ int ret;
+
+ dbg("s3c24xx_serial_init_port: port=%p, platdev=%p\n", port, platdev);
+
+ if (platdev == NULL)
+ return -ENODEV;
+
+ cfg = s3c24xx_dev_to_cfg(&platdev->dev);
+
+ if (port->mapbase != 0)
+ return 0;
+
+ if (cfg->hwport > CONFIG_SERIAL_SAMSUNG_UARTS) {
+ printk(KERN_ERR "%s: port %d bigger than %d\n", __func__,
+ cfg->hwport, CONFIG_SERIAL_SAMSUNG_UARTS);
+ return -ERANGE;
+ }
+
+ /* setup info for port */
+ port->dev = &platdev->dev;
+ ourport->info = info;
+
+ /* copy the info in from provided structure */
+ ourport->port.fifosize = info->fifosize;
+
+ dbg("s3c24xx_serial_init_port: %p (hw %d)...\n", port, cfg->hwport);
+
+ port->uartclk = 1;
+
+ if (cfg->uart_flags & UPF_CONS_FLOW) {
+ dbg("s3c24xx_serial_init_port: enabling flow control\n");
+ port->flags |= UPF_CONS_FLOW;
+ }
+
+ /* sort our the physical and virtual addresses for each UART */
+
+ res = platform_get_resource(platdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ printk(KERN_ERR "failed to find memory resource for uart\n");
+ return -EINVAL;
+ }
+
+ dbg("resource %p (%lx..%lx)\n", res, res->start, res->end);
+
+ port->mapbase = res->start;
+ port->membase = S3C_VA_UART + (res->start & 0xfffff);
+ ret = platform_get_irq(platdev, 0);
+ if (ret < 0)
+ port->irq = 0;
+ else {
+ port->irq = ret;
+ ourport->rx_irq = ret;
+ ourport->tx_irq = ret + 1;
+ }
+
+ ret = platform_get_irq(platdev, 1);
+ if (ret > 0)
+ ourport->tx_irq = ret;
+
+ ourport->clk = clk_get(&platdev->dev, "uart");
+
+ dbg("port: map=%08x, mem=%08x, irq=%d (%d,%d), clock=%ld\n",
+ port->mapbase, port->membase, port->irq,
+ ourport->rx_irq, ourport->tx_irq, port->uartclk);
+
+ /* reset the fifos (and setup the uart) */
+ s3c24xx_serial_resetport(port, cfg);
+ return 0;
+}
+
+static ssize_t s3c24xx_serial_show_clksrc(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct uart_port *port = s3c24xx_dev_to_port(dev);
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+
+ return snprintf(buf, PAGE_SIZE, "* %s\n", ourport->clksrc->name);
+}
+
+static DEVICE_ATTR(clock_source, S_IRUGO, s3c24xx_serial_show_clksrc, NULL);
+
+/* Device driver serial port probe */
+
+static int probe_index;
+
+int s3c24xx_serial_probe(struct platform_device *dev,
+ struct s3c24xx_uart_info *info)
+{
+ struct s3c24xx_uart_port *ourport;
+ int ret;
+
+ dbg("s3c24xx_serial_probe(%p, %p) %d\n", dev, info, probe_index);
+
+ ourport = &s3c24xx_serial_ports[probe_index];
+ probe_index++;
+
+ dbg("%s: initialising port %p...\n", __func__, ourport);
+
+ ret = s3c24xx_serial_init_port(ourport, info, dev);
+ if (ret < 0)
+ goto probe_err;
+
+ dbg("%s: adding port\n", __func__);
+ uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
+ platform_set_drvdata(dev, &ourport->port);
+
+ ret = device_create_file(&dev->dev, &dev_attr_clock_source);
+ if (ret < 0)
+ printk(KERN_ERR "%s: failed to add clksrc attr.\n", __func__);
+
+ ret = s3c24xx_serial_cpufreq_register(ourport);
+ if (ret < 0)
+ dev_err(&dev->dev, "failed to add cpufreq notifier\n");
+
+ return 0;
+
+ probe_err:
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(s3c24xx_serial_probe);
+
+int __devexit s3c24xx_serial_remove(struct platform_device *dev)
+{
+ struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
+
+ if (port) {
+ s3c24xx_serial_cpufreq_deregister(to_ourport(port));
+ device_remove_file(&dev->dev, &dev_attr_clock_source);
+ uart_remove_one_port(&s3c24xx_uart_drv, port);
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(s3c24xx_serial_remove);
+
+/* UART power management code */
+
+#ifdef CONFIG_PM
+
+static int s3c24xx_serial_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
+
+ if (port)
+ uart_suspend_port(&s3c24xx_uart_drv, port);
+
+ return 0;
+}
+
+static int s3c24xx_serial_resume(struct platform_device *dev)
+{
+ struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+
+ if (port) {
+ clk_enable(ourport->clk);
+ s3c24xx_serial_resetport(port, s3c24xx_port_to_cfg(port));
+ clk_disable(ourport->clk);
+
+ uart_resume_port(&s3c24xx_uart_drv, port);
+ }
+
+ return 0;
+}
+#endif
+
+int s3c24xx_serial_init(struct platform_driver *drv,
+ struct s3c24xx_uart_info *info)
+{
+ dbg("s3c24xx_serial_init(%p,%p)\n", drv, info);
+
+#ifdef CONFIG_PM
+ drv->suspend = s3c24xx_serial_suspend;
+ drv->resume = s3c24xx_serial_resume;
+#endif
+
+ return platform_driver_register(drv);
+}
+
+EXPORT_SYMBOL_GPL(s3c24xx_serial_init);
+
+/* module initialisation code */
+
+static int __init s3c24xx_serial_modinit(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&s3c24xx_uart_drv);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to register UART driver\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void __exit s3c24xx_serial_modexit(void)
+{
+ uart_unregister_driver(&s3c24xx_uart_drv);
+}
+
+module_init(s3c24xx_serial_modinit);
+module_exit(s3c24xx_serial_modexit);
+
+/* Console code */
+
+#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
+
+static struct uart_port *cons_uart;
+
+static int
+s3c24xx_serial_console_txrdy(struct uart_port *port, unsigned int ufcon)
+{
+ struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ unsigned long ufstat, utrstat;
+
+ if (ufcon & S3C2410_UFCON_FIFOMODE) {
+ /* fifo mode - check amount of data in fifo registers... */
+
+ ufstat = rd_regl(port, S3C2410_UFSTAT);
+ return (ufstat & info->tx_fifofull) ? 0 : 1;
+ }
+
+ /* in non-fifo mode, we go and use the tx buffer empty */
+
+ utrstat = rd_regl(port, S3C2410_UTRSTAT);
+ return (utrstat & S3C2410_UTRSTAT_TXE) ? 1 : 0;
+}
+
+static void
+s3c24xx_serial_console_putchar(struct uart_port *port, int ch)
+{
+ unsigned int ufcon = rd_regl(cons_uart, S3C2410_UFCON);
+ while (!s3c24xx_serial_console_txrdy(port, ufcon))
+ barrier();
+ wr_regb(cons_uart, S3C2410_UTXH, ch);
+}
+
+static void
+s3c24xx_serial_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
+}
+
+static void __init
+s3c24xx_serial_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits)
+{
+ struct s3c24xx_uart_clksrc clksrc;
+ struct clk *clk;
+ unsigned int ulcon;
+ unsigned int ucon;
+ unsigned int ubrdiv;
+ unsigned long rate;
+
+ ulcon = rd_regl(port, S3C2410_ULCON);
+ ucon = rd_regl(port, S3C2410_UCON);
+ ubrdiv = rd_regl(port, S3C2410_UBRDIV);
+
+ dbg("s3c24xx_serial_get_options: port=%p\n"
+ "registers: ulcon=%08x, ucon=%08x, ubdriv=%08x\n",
+ port, ulcon, ucon, ubrdiv);
+
+ if ((ucon & 0xf) != 0) {
+ /* consider the serial port configured if the tx/rx mode set */
+
+ switch (ulcon & S3C2410_LCON_CSMASK) {
+ case S3C2410_LCON_CS5:
+ *bits = 5;
+ break;
+ case S3C2410_LCON_CS6:
+ *bits = 6;
+ break;
+ case S3C2410_LCON_CS7:
+ *bits = 7;
+ break;
+ default:
+ case S3C2410_LCON_CS8:
+ *bits = 8;
+ break;
+ }
+
+ switch (ulcon & S3C2410_LCON_PMASK) {
+ case S3C2410_LCON_PEVEN:
+ *parity = 'e';
+ break;
+
+ case S3C2410_LCON_PODD:
+ *parity = 'o';
+ break;
+
+ case S3C2410_LCON_PNONE:
+ default:
+ *parity = 'n';
+ }
+
+ /* now calculate the baud rate */
+
+ s3c24xx_serial_getsource(port, &clksrc);
+
+ clk = clk_get(port->dev, clksrc.name);
+ if (!IS_ERR(clk) && clk != NULL)
+ rate = clk_get_rate(clk) / clksrc.divisor;
+ else
+ rate = 1;
+
+
+ *baud = rate / (16 * (ubrdiv + 1));
+ dbg("calculated baud %d\n", *baud);
+ }
+
+}
+
+/* s3c24xx_serial_init_ports
+ *
+ * initialise the serial ports from the machine provided initialisation
+ * data.
+*/
+
+static int s3c24xx_serial_init_ports(struct s3c24xx_uart_info **info)
+{
+ struct s3c24xx_uart_port *ptr = s3c24xx_serial_ports;
+ struct platform_device **platdev_ptr;
+ int i;
+
+ dbg("s3c24xx_serial_init_ports: initialising ports...\n");
+
+ platdev_ptr = s3c24xx_uart_devs;
+
+ for (i = 0; i < CONFIG_SERIAL_SAMSUNG_UARTS; i++, ptr++, platdev_ptr++) {
+ s3c24xx_serial_init_port(ptr, info[i], *platdev_ptr);
+ }
+
+ return 0;
+}
+
+static int __init
+s3c24xx_serial_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ dbg("s3c24xx_serial_console_setup: co=%p (%d), %s\n",
+ co, co->index, options);
+
+ /* is this a valid port */
+
+ if (co->index == -1 || co->index >= CONFIG_SERIAL_SAMSUNG_UARTS)
+ co->index = 0;
+
+ port = &s3c24xx_serial_ports[co->index].port;
+
+ /* is the port configured? */
+
+ if (port->mapbase == 0x0) {
+ co->index = 0;
+ port = &s3c24xx_serial_ports[co->index].port;
+ }
+
+ cons_uart = port;
+
+ dbg("s3c24xx_serial_console_setup: port=%p (%d)\n", port, co->index);
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ s3c24xx_serial_get_options(port, &baud, &parity, &bits);
+
+ dbg("s3c24xx_serial_console_setup: baud %d\n", baud);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+/* s3c24xx_serial_initconsole
+ *
+ * initialise the console from one of the uart drivers
+*/
+
+static struct console s3c24xx_serial_console = {
+ .name = S3C24XX_SERIAL_NAME,
+ .device = uart_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .write = s3c24xx_serial_console_write,
+ .setup = s3c24xx_serial_console_setup
+};
+
+int s3c24xx_serial_initconsole(struct platform_driver *drv,
+ struct s3c24xx_uart_info **info)
+
+{
+ struct platform_device *dev = s3c24xx_uart_devs[0];
+
+ dbg("s3c24xx_serial_initconsole\n");
+
+ /* select driver based on the cpu */
+
+ if (dev == NULL) {
+ printk(KERN_ERR "s3c24xx: no devices for console init\n");
+ return 0;
+ }
+
+ if (strcmp(dev->name, drv->driver.name) != 0)
+ return 0;
+
+ s3c24xx_serial_console.data = &s3c24xx_uart_drv;
+ s3c24xx_serial_init_ports(info);
+
+ register_console(&s3c24xx_serial_console);
+ return 0;
+}
+
+#endif /* CONFIG_SERIAL_SAMSUNG_CONSOLE */
+
+MODULE_DESCRIPTION("Samsung SoC Serial port driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
new file mode 100644
index 00000000..5b098cd7
--- /dev/null
+++ b/drivers/tty/serial/samsung.h
@@ -0,0 +1,119 @@
+/*
+ * Driver for Samsung SoC onboard UARTs.
+ *
+ * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+struct s3c24xx_uart_info {
+ char *name;
+ unsigned int type;
+ unsigned int fifosize;
+ unsigned long rx_fifomask;
+ unsigned long rx_fifoshift;
+ unsigned long rx_fifofull;
+ unsigned long tx_fifomask;
+ unsigned long tx_fifoshift;
+ unsigned long tx_fifofull;
+
+ /* uart port features */
+
+ unsigned int has_divslot:1;
+
+ /* clock source control */
+
+ int (*get_clksrc)(struct uart_port *, struct s3c24xx_uart_clksrc *clk);
+ int (*set_clksrc)(struct uart_port *, struct s3c24xx_uart_clksrc *clk);
+
+ /* uart controls */
+ int (*reset_port)(struct uart_port *, struct s3c2410_uartcfg *);
+};
+
+struct s3c24xx_uart_port {
+ unsigned char rx_claimed;
+ unsigned char tx_claimed;
+ unsigned int pm_level;
+ unsigned long baudclk_rate;
+
+ unsigned int rx_irq;
+ unsigned int tx_irq;
+
+ struct s3c24xx_uart_info *info;
+ struct s3c24xx_uart_clksrc *clksrc;
+ struct clk *clk;
+ struct clk *baudclk;
+ struct uart_port port;
+
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+#endif
+};
+
+/* conversion functions */
+
+#define s3c24xx_dev_to_port(__dev) (struct uart_port *)dev_get_drvdata(__dev)
+#define s3c24xx_dev_to_cfg(__dev) (struct s3c2410_uartcfg *)((__dev)->platform_data)
+
+/* register access controls */
+
+#define portaddr(port, reg) ((port)->membase + (reg))
+
+#define rd_regb(port, reg) (__raw_readb(portaddr(port, reg)))
+#define rd_regl(port, reg) (__raw_readl(portaddr(port, reg)))
+
+#define wr_regb(port, reg, val) __raw_writeb(val, portaddr(port, reg))
+#define wr_regl(port, reg, val) __raw_writel(val, portaddr(port, reg))
+
+extern int s3c24xx_serial_probe(struct platform_device *dev,
+ struct s3c24xx_uart_info *uart);
+
+extern int __devexit s3c24xx_serial_remove(struct platform_device *dev);
+
+extern int s3c24xx_serial_initconsole(struct platform_driver *drv,
+ struct s3c24xx_uart_info **uart);
+
+extern int s3c24xx_serial_init(struct platform_driver *drv,
+ struct s3c24xx_uart_info *info);
+
+#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
+
+#define s3c24xx_console_init(__drv, __inf) \
+static int __init s3c_serial_console_init(void) \
+{ \
+ struct s3c24xx_uart_info *uinfo[CONFIG_SERIAL_SAMSUNG_UARTS]; \
+ int i; \
+ \
+ for (i = 0; i < CONFIG_SERIAL_SAMSUNG_UARTS; i++) \
+ uinfo[i] = __inf; \
+ return s3c24xx_serial_initconsole(__drv, uinfo); \
+} \
+ \
+console_initcall(s3c_serial_console_init)
+
+#else
+#define s3c24xx_console_init(drv, inf) extern void no_console(void)
+#endif
+
+#ifdef CONFIG_SERIAL_SAMSUNG_DEBUG
+
+extern void printascii(const char *);
+
+static void dbg(const char *fmt, ...)
+{
+ va_list va;
+ char buff[256];
+
+ va_start(va, fmt);
+ vsprintf(buff, fmt, va);
+ va_end(va);
+
+ printascii(buff);
+}
+
+#else
+#define dbg(x...) do { } while (0)
+#endif
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
new file mode 100644
index 00000000..ea2340b8
--- /dev/null
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -0,0 +1,974 @@
+/*
+ * Support for the asynchronous serial interface (DUART) included
+ * in the BCM1250 and derived System-On-a-Chip (SOC) devices.
+ *
+ * Copyright (c) 2007 Maciej W. Rozycki
+ *
+ * Derived from drivers/char/sb1250_duart.c for which the following
+ * copyright applies:
+ *
+ * Copyright (c) 2000, 2001, 2002, 2003, 2004 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * References:
+ *
+ * "BCM1250/BCM1125/BCM1125H User Manual", Broadcom Corporation
+ */
+
+#if defined(CONFIG_SERIAL_SB1250_DUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/compiler.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/types.h>
+
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/war.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_uart.h>
+#include <asm/sibyte/swarm.h>
+
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+
+#define SBD_CHANREGS(line) A_BCM1480_DUART_CHANREG((line), 0)
+#define SBD_CTRLREGS(line) A_BCM1480_DUART_CTRLREG((line), 0)
+#define SBD_INT(line) (K_BCM1480_INT_UART_0 + (line))
+
+#define DUART_CHANREG_SPACING BCM1480_DUART_CHANREG_SPACING
+
+#define R_DUART_IMRREG(line) R_BCM1480_DUART_IMRREG(line)
+#define R_DUART_INCHREG(line) R_BCM1480_DUART_INCHREG(line)
+#define R_DUART_ISRREG(line) R_BCM1480_DUART_ISRREG(line)
+
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+
+#define SBD_CHANREGS(line) A_DUART_CHANREG((line), 0)
+#define SBD_CTRLREGS(line) A_DUART_CTRLREG(0)
+#define SBD_INT(line) (K_INT_UART_0 + (line))
+
+#else
+#error invalid SB1250 UART configuration
+
+#endif
+
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_DESCRIPTION("BCM1xxx on-chip DUART serial driver");
+MODULE_LICENSE("GPL");
+
+
+#define DUART_MAX_CHIP 2
+#define DUART_MAX_SIDE 2
+
+/*
+ * Per-port state.
+ */
+struct sbd_port {
+ struct sbd_duart *duart;
+ struct uart_port port;
+ unsigned char __iomem *memctrl;
+ int tx_stopped;
+ int initialised;
+};
+
+/*
+ * Per-DUART state for the shared register space.
+ */
+struct sbd_duart {
+ struct sbd_port sport[2];
+ unsigned long mapctrl;
+ atomic_t map_guard;
+};
+
+#define to_sport(uport) container_of(uport, struct sbd_port, port)
+
+static struct sbd_duart sbd_duarts[DUART_MAX_CHIP];
+
+
+/*
+ * Reading and writing SB1250 DUART registers.
+ *
+ * There are three register spaces: two per-channel ones and
+ * a shared one. We have to define accessors appropriately.
+ * All registers are 64-bit and all but the Baud Rate Clock
+ * registers only define 8 least significant bits. There is
+ * also a workaround to take into account. Raw accessors use
+ * the full register width, but cooked ones truncate it
+ * intentionally so that the rest of the driver does not care.
+ */
+static u64 __read_sbdchn(struct sbd_port *sport, int reg)
+{
+ void __iomem *csr = sport->port.membase + reg;
+
+ return __raw_readq(csr);
+}
+
+static u64 __read_sbdshr(struct sbd_port *sport, int reg)
+{
+ void __iomem *csr = sport->memctrl + reg;
+
+ return __raw_readq(csr);
+}
+
+static void __write_sbdchn(struct sbd_port *sport, int reg, u64 value)
+{
+ void __iomem *csr = sport->port.membase + reg;
+
+ __raw_writeq(value, csr);
+}
+
+static void __write_sbdshr(struct sbd_port *sport, int reg, u64 value)
+{
+ void __iomem *csr = sport->memctrl + reg;
+
+ __raw_writeq(value, csr);
+}
+
+/*
+ * In bug 1956, we get glitches that can mess up uart registers. This
+ * "read-mode-reg after any register access" is an accepted workaround.
+ */
+static void __war_sbd1956(struct sbd_port *sport)
+{
+ __read_sbdchn(sport, R_DUART_MODE_REG_1);
+ __read_sbdchn(sport, R_DUART_MODE_REG_2);
+}
+
+static unsigned char read_sbdchn(struct sbd_port *sport, int reg)
+{
+ unsigned char retval;
+
+ retval = __read_sbdchn(sport, reg);
+ if (SIBYTE_1956_WAR)
+ __war_sbd1956(sport);
+ return retval;
+}
+
+static unsigned char read_sbdshr(struct sbd_port *sport, int reg)
+{
+ unsigned char retval;
+
+ retval = __read_sbdshr(sport, reg);
+ if (SIBYTE_1956_WAR)
+ __war_sbd1956(sport);
+ return retval;
+}
+
+static void write_sbdchn(struct sbd_port *sport, int reg, unsigned int value)
+{
+ __write_sbdchn(sport, reg, value);
+ if (SIBYTE_1956_WAR)
+ __war_sbd1956(sport);
+}
+
+static void write_sbdshr(struct sbd_port *sport, int reg, unsigned int value)
+{
+ __write_sbdshr(sport, reg, value);
+ if (SIBYTE_1956_WAR)
+ __war_sbd1956(sport);
+}
+
+
+static int sbd_receive_ready(struct sbd_port *sport)
+{
+ return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_RX_RDY;
+}
+
+static int sbd_receive_drain(struct sbd_port *sport)
+{
+ int loops = 10000;
+
+ while (sbd_receive_ready(sport) && --loops)
+ read_sbdchn(sport, R_DUART_RX_HOLD);
+ return loops;
+}
+
+static int __maybe_unused sbd_transmit_ready(struct sbd_port *sport)
+{
+ return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_RDY;
+}
+
+static int __maybe_unused sbd_transmit_drain(struct sbd_port *sport)
+{
+ int loops = 10000;
+
+ while (!sbd_transmit_ready(sport) && --loops)
+ udelay(2);
+ return loops;
+}
+
+static int sbd_transmit_empty(struct sbd_port *sport)
+{
+ return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_EMT;
+}
+
+static int sbd_line_drain(struct sbd_port *sport)
+{
+ int loops = 10000;
+
+ while (!sbd_transmit_empty(sport) && --loops)
+ udelay(2);
+ return loops;
+}
+
+
+static unsigned int sbd_tx_empty(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ return sbd_transmit_empty(sport) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int sbd_get_mctrl(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int mctrl, status;
+
+ status = read_sbdshr(sport, R_DUART_IN_PORT);
+ status >>= (uport->line) % 2;
+ mctrl = (!(status & M_DUART_IN_PIN0_VAL) ? TIOCM_CTS : 0) |
+ (!(status & M_DUART_IN_PIN4_VAL) ? TIOCM_CAR : 0) |
+ (!(status & M_DUART_RIN0_PIN) ? TIOCM_RNG : 0) |
+ (!(status & M_DUART_IN_PIN2_VAL) ? TIOCM_DSR : 0);
+ return mctrl;
+}
+
+static void sbd_set_mctrl(struct uart_port *uport, unsigned int mctrl)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int clr = 0, set = 0, mode2;
+
+ if (mctrl & TIOCM_DTR)
+ set |= M_DUART_SET_OPR2;
+ else
+ clr |= M_DUART_CLR_OPR2;
+ if (mctrl & TIOCM_RTS)
+ set |= M_DUART_SET_OPR0;
+ else
+ clr |= M_DUART_CLR_OPR0;
+ clr <<= (uport->line) % 2;
+ set <<= (uport->line) % 2;
+
+ mode2 = read_sbdchn(sport, R_DUART_MODE_REG_2);
+ mode2 &= ~M_DUART_CHAN_MODE;
+ if (mctrl & TIOCM_LOOP)
+ mode2 |= V_DUART_CHAN_MODE_LCL_LOOP;
+ else
+ mode2 |= V_DUART_CHAN_MODE_NORMAL;
+
+ write_sbdshr(sport, R_DUART_CLEAR_OPR, clr);
+ write_sbdshr(sport, R_DUART_SET_OPR, set);
+ write_sbdchn(sport, R_DUART_MODE_REG_2, mode2);
+}
+
+static void sbd_stop_tx(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
+ sport->tx_stopped = 1;
+};
+
+static void sbd_start_tx(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int mask;
+
+ /* Enable tx interrupts. */
+ mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
+ mask |= M_DUART_IMR_TX;
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
+
+ /* Go!, go!, go!... */
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
+ sport->tx_stopped = 0;
+};
+
+static void sbd_stop_rx(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
+};
+
+static void sbd_enable_ms(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ write_sbdchn(sport, R_DUART_AUXCTL_X,
+ M_DUART_CIN_CHNG_ENA | M_DUART_CTS_CHNG_ENA);
+}
+
+static void sbd_break_ctl(struct uart_port *uport, int break_state)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ if (break_state == -1)
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_START_BREAK);
+ else
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_STOP_BREAK);
+}
+
+
+static void sbd_receive_chars(struct sbd_port *sport)
+{
+ struct uart_port *uport = &sport->port;
+ struct uart_icount *icount;
+ unsigned int status, ch, flag;
+ int count;
+
+ for (count = 16; count; count--) {
+ status = read_sbdchn(sport, R_DUART_STATUS);
+ if (!(status & M_DUART_RX_RDY))
+ break;
+
+ ch = read_sbdchn(sport, R_DUART_RX_HOLD);
+
+ flag = TTY_NORMAL;
+
+ icount = &uport->icount;
+ icount->rx++;
+
+ if (unlikely(status &
+ (M_DUART_RCVD_BRK | M_DUART_FRM_ERR |
+ M_DUART_PARITY_ERR | M_DUART_OVRUN_ERR))) {
+ if (status & M_DUART_RCVD_BRK) {
+ icount->brk++;
+ if (uart_handle_break(uport))
+ continue;
+ } else if (status & M_DUART_FRM_ERR)
+ icount->frame++;
+ else if (status & M_DUART_PARITY_ERR)
+ icount->parity++;
+ if (status & M_DUART_OVRUN_ERR)
+ icount->overrun++;
+
+ status &= uport->read_status_mask;
+ if (status & M_DUART_RCVD_BRK)
+ flag = TTY_BREAK;
+ else if (status & M_DUART_FRM_ERR)
+ flag = TTY_FRAME;
+ else if (status & M_DUART_PARITY_ERR)
+ flag = TTY_PARITY;
+ }
+
+ if (uart_handle_sysrq_char(uport, ch))
+ continue;
+
+ uart_insert_char(uport, status, M_DUART_OVRUN_ERR, ch, flag);
+ }
+
+ tty_flip_buffer_push(uport->state->port.tty);
+}
+
+static void sbd_transmit_chars(struct sbd_port *sport)
+{
+ struct uart_port *uport = &sport->port;
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ unsigned int mask;
+ int stop_tx;
+
+ /* XON/XOFF chars. */
+ if (sport->port.x_char) {
+ write_sbdchn(sport, R_DUART_TX_HOLD, sport->port.x_char);
+ sport->port.icount.tx++;
+ sport->port.x_char = 0;
+ return;
+ }
+
+ /* If nothing to do or stopped or hardware stopped. */
+ stop_tx = (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port));
+
+ /* Send char. */
+ if (!stop_tx) {
+ write_sbdchn(sport, R_DUART_TX_HOLD, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ sport->port.icount.tx++;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+ }
+
+ /* Are we are done? */
+ if (stop_tx || uart_circ_empty(xmit)) {
+ /* Disable tx interrupts. */
+ mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
+ mask &= ~M_DUART_IMR_TX;
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
+ }
+}
+
+static void sbd_status_handle(struct sbd_port *sport)
+{
+ struct uart_port *uport = &sport->port;
+ unsigned int delta;
+
+ delta = read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
+ delta >>= (uport->line) % 2;
+
+ if (delta & (M_DUART_IN_PIN0_VAL << S_DUART_IN_PIN_CHNG))
+ uart_handle_cts_change(uport, !(delta & M_DUART_IN_PIN0_VAL));
+
+ if (delta & (M_DUART_IN_PIN2_VAL << S_DUART_IN_PIN_CHNG))
+ uport->icount.dsr++;
+
+ if (delta & ((M_DUART_IN_PIN2_VAL | M_DUART_IN_PIN0_VAL) <<
+ S_DUART_IN_PIN_CHNG))
+ wake_up_interruptible(&uport->state->port.delta_msr_wait);
+}
+
+static irqreturn_t sbd_interrupt(int irq, void *dev_id)
+{
+ struct sbd_port *sport = dev_id;
+ struct uart_port *uport = &sport->port;
+ irqreturn_t status = IRQ_NONE;
+ unsigned int intstat;
+ int count;
+
+ for (count = 16; count; count--) {
+ intstat = read_sbdshr(sport,
+ R_DUART_ISRREG((uport->line) % 2));
+ intstat &= read_sbdshr(sport,
+ R_DUART_IMRREG((uport->line) % 2));
+ intstat &= M_DUART_ISR_ALL;
+ if (!intstat)
+ break;
+
+ if (intstat & M_DUART_ISR_RX)
+ sbd_receive_chars(sport);
+ if (intstat & M_DUART_ISR_IN)
+ sbd_status_handle(sport);
+ if (intstat & M_DUART_ISR_TX)
+ sbd_transmit_chars(sport);
+
+ status = IRQ_HANDLED;
+ }
+
+ return status;
+}
+
+
+static int sbd_startup(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int mode1;
+ int ret;
+
+ ret = request_irq(sport->port.irq, sbd_interrupt,
+ IRQF_SHARED, "sb1250-duart", sport);
+ if (ret)
+ return ret;
+
+ /* Clear the receive FIFO. */
+ sbd_receive_drain(sport);
+
+ /* Clear the interrupt registers. */
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_BREAK_INT);
+ read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
+
+ /* Set rx/tx interrupt to FIFO available. */
+ mode1 = read_sbdchn(sport, R_DUART_MODE_REG_1);
+ mode1 &= ~(M_DUART_RX_IRQ_SEL_RXFULL | M_DUART_TX_IRQ_SEL_TXEMPT);
+ write_sbdchn(sport, R_DUART_MODE_REG_1, mode1);
+
+ /* Disable tx, enable rx. */
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_EN);
+ sport->tx_stopped = 1;
+
+ /* Enable interrupts. */
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
+ M_DUART_IMR_IN | M_DUART_IMR_RX);
+
+ return 0;
+}
+
+static void sbd_shutdown(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
+ sport->tx_stopped = 1;
+ free_irq(sport->port.irq, sport);
+}
+
+
+static void sbd_init_port(struct sbd_port *sport)
+{
+ struct uart_port *uport = &sport->port;
+
+ if (sport->initialised)
+ return;
+
+ /* There is no DUART reset feature, so just set some sane defaults. */
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_TX);
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_RX);
+ write_sbdchn(sport, R_DUART_MODE_REG_1, V_DUART_BITS_PER_CHAR_8);
+ write_sbdchn(sport, R_DUART_MODE_REG_2, 0);
+ write_sbdchn(sport, R_DUART_FULL_CTL,
+ V_DUART_INT_TIME(0) | V_DUART_SIG_FULL(15));
+ write_sbdchn(sport, R_DUART_OPCR_X, 0);
+ write_sbdchn(sport, R_DUART_AUXCTL_X, 0);
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
+
+ sport->initialised = 1;
+}
+
+static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
+ struct ktermios *old_termios)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int mode1 = 0, mode2 = 0, aux = 0;
+ unsigned int mode1mask = 0, mode2mask = 0, auxmask = 0;
+ unsigned int oldmode1, oldmode2, oldaux;
+ unsigned int baud, brg;
+ unsigned int command;
+
+ mode1mask |= ~(M_DUART_PARITY_MODE | M_DUART_PARITY_TYPE_ODD |
+ M_DUART_BITS_PER_CHAR);
+ mode2mask |= ~M_DUART_STOP_BIT_LEN_2;
+ auxmask |= ~M_DUART_CTS_CHNG_ENA;
+
+ /* Byte size. */
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ case CS6:
+ /* Unsupported, leave unchanged. */
+ mode1mask |= M_DUART_PARITY_MODE;
+ break;
+ case CS7:
+ mode1 |= V_DUART_BITS_PER_CHAR_7;
+ break;
+ case CS8:
+ default:
+ mode1 |= V_DUART_BITS_PER_CHAR_8;
+ break;
+ }
+
+ /* Parity and stop bits. */
+ if (termios->c_cflag & CSTOPB)
+ mode2 |= M_DUART_STOP_BIT_LEN_2;
+ else
+ mode2 |= M_DUART_STOP_BIT_LEN_1;
+ if (termios->c_cflag & PARENB)
+ mode1 |= V_DUART_PARITY_MODE_ADD;
+ else
+ mode1 |= V_DUART_PARITY_MODE_NONE;
+ if (termios->c_cflag & PARODD)
+ mode1 |= M_DUART_PARITY_TYPE_ODD;
+ else
+ mode1 |= M_DUART_PARITY_TYPE_EVEN;
+
+ baud = uart_get_baud_rate(uport, termios, old_termios, 1200, 5000000);
+ brg = V_DUART_BAUD_RATE(baud);
+ /* The actual lower bound is 1221bps, so compensate. */
+ if (brg > M_DUART_CLK_COUNTER)
+ brg = M_DUART_CLK_COUNTER;
+
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
+ uport->read_status_mask = M_DUART_OVRUN_ERR;
+ if (termios->c_iflag & INPCK)
+ uport->read_status_mask |= M_DUART_FRM_ERR |
+ M_DUART_PARITY_ERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ uport->read_status_mask |= M_DUART_RCVD_BRK;
+
+ uport->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ uport->ignore_status_mask |= M_DUART_FRM_ERR |
+ M_DUART_PARITY_ERR;
+ if (termios->c_iflag & IGNBRK) {
+ uport->ignore_status_mask |= M_DUART_RCVD_BRK;
+ if (termios->c_iflag & IGNPAR)
+ uport->ignore_status_mask |= M_DUART_OVRUN_ERR;
+ }
+
+ if (termios->c_cflag & CREAD)
+ command = M_DUART_RX_EN;
+ else
+ command = M_DUART_RX_DIS;
+
+ if (termios->c_cflag & CRTSCTS)
+ aux |= M_DUART_CTS_CHNG_ENA;
+ else
+ aux &= ~M_DUART_CTS_CHNG_ENA;
+
+ spin_lock(&uport->lock);
+
+ if (sport->tx_stopped)
+ command |= M_DUART_TX_DIS;
+ else
+ command |= M_DUART_TX_EN;
+
+ oldmode1 = read_sbdchn(sport, R_DUART_MODE_REG_1) & mode1mask;
+ oldmode2 = read_sbdchn(sport, R_DUART_MODE_REG_2) & mode2mask;
+ oldaux = read_sbdchn(sport, R_DUART_AUXCTL_X) & auxmask;
+
+ if (!sport->tx_stopped)
+ sbd_line_drain(sport);
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
+
+ write_sbdchn(sport, R_DUART_MODE_REG_1, mode1 | oldmode1);
+ write_sbdchn(sport, R_DUART_MODE_REG_2, mode2 | oldmode2);
+ write_sbdchn(sport, R_DUART_CLK_SEL, brg);
+ write_sbdchn(sport, R_DUART_AUXCTL_X, aux | oldaux);
+
+ write_sbdchn(sport, R_DUART_CMD, command);
+
+ spin_unlock(&uport->lock);
+}
+
+
+static const char *sbd_type(struct uart_port *uport)
+{
+ return "SB1250 DUART";
+}
+
+static void sbd_release_port(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+ struct sbd_duart *duart = sport->duart;
+ int map_guard;
+
+ iounmap(sport->memctrl);
+ sport->memctrl = NULL;
+ iounmap(uport->membase);
+ uport->membase = NULL;
+
+ map_guard = atomic_add_return(-1, &duart->map_guard);
+ if (!map_guard)
+ release_mem_region(duart->mapctrl, DUART_CHANREG_SPACING);
+ release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
+}
+
+static int sbd_map_port(struct uart_port *uport)
+{
+ const char *err = KERN_ERR "sbd: Cannot map MMIO\n";
+ struct sbd_port *sport = to_sport(uport);
+ struct sbd_duart *duart = sport->duart;
+
+ if (!uport->membase)
+ uport->membase = ioremap_nocache(uport->mapbase,
+ DUART_CHANREG_SPACING);
+ if (!uport->membase) {
+ printk(err);
+ return -ENOMEM;
+ }
+
+ if (!sport->memctrl)
+ sport->memctrl = ioremap_nocache(duart->mapctrl,
+ DUART_CHANREG_SPACING);
+ if (!sport->memctrl) {
+ printk(err);
+ iounmap(uport->membase);
+ uport->membase = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int sbd_request_port(struct uart_port *uport)
+{
+ const char *err = KERN_ERR "sbd: Unable to reserve MMIO resource\n";
+ struct sbd_duart *duart = to_sport(uport)->duart;
+ int map_guard;
+ int ret = 0;
+
+ if (!request_mem_region(uport->mapbase, DUART_CHANREG_SPACING,
+ "sb1250-duart")) {
+ printk(err);
+ return -EBUSY;
+ }
+ map_guard = atomic_add_return(1, &duart->map_guard);
+ if (map_guard == 1) {
+ if (!request_mem_region(duart->mapctrl, DUART_CHANREG_SPACING,
+ "sb1250-duart")) {
+ atomic_add(-1, &duart->map_guard);
+ printk(err);
+ ret = -EBUSY;
+ }
+ }
+ if (!ret) {
+ ret = sbd_map_port(uport);
+ if (ret) {
+ map_guard = atomic_add_return(-1, &duart->map_guard);
+ if (!map_guard)
+ release_mem_region(duart->mapctrl,
+ DUART_CHANREG_SPACING);
+ }
+ }
+ if (ret) {
+ release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
+ return ret;
+ }
+ return 0;
+}
+
+static void sbd_config_port(struct uart_port *uport, int flags)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ if (flags & UART_CONFIG_TYPE) {
+ if (sbd_request_port(uport))
+ return;
+
+ uport->type = PORT_SB1250_DUART;
+
+ sbd_init_port(sport);
+ }
+}
+
+static int sbd_verify_port(struct uart_port *uport, struct serial_struct *ser)
+{
+ int ret = 0;
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_SB1250_DUART)
+ ret = -EINVAL;
+ if (ser->irq != uport->irq)
+ ret = -EINVAL;
+ if (ser->baud_base != uport->uartclk / 16)
+ ret = -EINVAL;
+ return ret;
+}
+
+
+static const struct uart_ops sbd_ops = {
+ .tx_empty = sbd_tx_empty,
+ .set_mctrl = sbd_set_mctrl,
+ .get_mctrl = sbd_get_mctrl,
+ .stop_tx = sbd_stop_tx,
+ .start_tx = sbd_start_tx,
+ .stop_rx = sbd_stop_rx,
+ .enable_ms = sbd_enable_ms,
+ .break_ctl = sbd_break_ctl,
+ .startup = sbd_startup,
+ .shutdown = sbd_shutdown,
+ .set_termios = sbd_set_termios,
+ .type = sbd_type,
+ .release_port = sbd_release_port,
+ .request_port = sbd_request_port,
+ .config_port = sbd_config_port,
+ .verify_port = sbd_verify_port,
+};
+
+/* Initialize SB1250 DUART port structures. */
+static void __init sbd_probe_duarts(void)
+{
+ static int probed;
+ int chip, side;
+ int max_lines, line;
+
+ if (probed)
+ return;
+
+ /* Set the number of available units based on the SOC type. */
+ switch (soc_type) {
+ case K_SYS_SOC_TYPE_BCM1x55:
+ case K_SYS_SOC_TYPE_BCM1x80:
+ max_lines = 4;
+ break;
+ default:
+ /* Assume at least two serial ports at the normal address. */
+ max_lines = 2;
+ break;
+ }
+
+ probed = 1;
+
+ for (chip = 0, line = 0; chip < DUART_MAX_CHIP && line < max_lines;
+ chip++) {
+ sbd_duarts[chip].mapctrl = SBD_CTRLREGS(line);
+
+ for (side = 0; side < DUART_MAX_SIDE && line < max_lines;
+ side++, line++) {
+ struct sbd_port *sport = &sbd_duarts[chip].sport[side];
+ struct uart_port *uport = &sport->port;
+
+ sport->duart = &sbd_duarts[chip];
+
+ uport->irq = SBD_INT(line);
+ uport->uartclk = 100000000 / 20 * 16;
+ uport->fifosize = 16;
+ uport->iotype = UPIO_MEM;
+ uport->flags = UPF_BOOT_AUTOCONF;
+ uport->ops = &sbd_ops;
+ uport->line = line;
+ uport->mapbase = SBD_CHANREGS(line);
+ }
+ }
+}
+
+
+#ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE
+/*
+ * Serial console stuff. Very basic, polling driver for doing serial
+ * console output. The console_lock is held by the caller, so we
+ * shouldn't be interrupted for more console activity.
+ */
+static void sbd_console_putchar(struct uart_port *uport, int ch)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ sbd_transmit_drain(sport);
+ write_sbdchn(sport, R_DUART_TX_HOLD, ch);
+}
+
+static void sbd_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ int chip = co->index / DUART_MAX_SIDE;
+ int side = co->index % DUART_MAX_SIDE;
+ struct sbd_port *sport = &sbd_duarts[chip].sport[side];
+ struct uart_port *uport = &sport->port;
+ unsigned long flags;
+ unsigned int mask;
+
+ /* Disable transmit interrupts and enable the transmitter. */
+ spin_lock_irqsave(&uport->lock, flags);
+ mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
+ mask & ~M_DUART_IMR_TX);
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
+ spin_unlock_irqrestore(&uport->lock, flags);
+
+ uart_console_write(&sport->port, s, count, sbd_console_putchar);
+
+ /* Restore transmit interrupts and the transmitter enable. */
+ spin_lock_irqsave(&uport->lock, flags);
+ sbd_line_drain(sport);
+ if (sport->tx_stopped)
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+static int __init sbd_console_setup(struct console *co, char *options)
+{
+ int chip = co->index / DUART_MAX_SIDE;
+ int side = co->index % DUART_MAX_SIDE;
+ struct sbd_port *sport = &sbd_duarts[chip].sport[side];
+ struct uart_port *uport = &sport->port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ if (!sport->duart)
+ return -ENXIO;
+
+ ret = sbd_map_port(uport);
+ if (ret)
+ return ret;
+
+ sbd_init_port(sport);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ return uart_set_options(uport, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver sbd_reg;
+static struct console sbd_console = {
+ .name = "duart",
+ .write = sbd_console_write,
+ .device = uart_console_device,
+ .setup = sbd_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sbd_reg
+};
+
+static int __init sbd_serial_console_init(void)
+{
+ sbd_probe_duarts();
+ register_console(&sbd_console);
+
+ return 0;
+}
+
+console_initcall(sbd_serial_console_init);
+
+#define SERIAL_SB1250_DUART_CONSOLE &sbd_console
+#else
+#define SERIAL_SB1250_DUART_CONSOLE NULL
+#endif /* CONFIG_SERIAL_SB1250_DUART_CONSOLE */
+
+
+static struct uart_driver sbd_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "sb1250_duart",
+ .dev_name = "duart",
+ .major = TTY_MAJOR,
+ .minor = SB1250_DUART_MINOR_BASE,
+ .nr = DUART_MAX_CHIP * DUART_MAX_SIDE,
+ .cons = SERIAL_SB1250_DUART_CONSOLE,
+};
+
+/* Set up the driver and register it. */
+static int __init sbd_init(void)
+{
+ int i, ret;
+
+ sbd_probe_duarts();
+
+ ret = uart_register_driver(&sbd_reg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DUART_MAX_CHIP * DUART_MAX_SIDE; i++) {
+ struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
+ struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
+ struct uart_port *uport = &sport->port;
+
+ if (sport->duart)
+ uart_add_one_port(&sbd_reg, uport);
+ }
+
+ return 0;
+}
+
+/* Unload the driver. Unregister stuff, get ready to go away. */
+static void __exit sbd_exit(void)
+{
+ int i;
+
+ for (i = DUART_MAX_CHIP * DUART_MAX_SIDE - 1; i >= 0; i--) {
+ struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
+ struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
+ struct uart_port *uport = &sport->port;
+
+ if (sport->duart)
+ uart_remove_one_port(&sbd_reg, uport);
+ }
+
+ uart_unregister_driver(&sbd_reg);
+}
+
+module_init(sbd_init);
+module_exit(sbd_exit);
diff --git a/drivers/tty/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
new file mode 100644
index 00000000..75038ad2
--- /dev/null
+++ b/drivers/tty/serial/sc26xx.c
@@ -0,0 +1,757 @@
+/*
+ * SC268xx.c: Serial driver for Philiphs SC2681/SC2692 devices.
+ *
+ * Copyright (C) 2006,2007 Thomas Bogendörfer (tsbogend@alpha.franken.de)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/major.h>
+#include <linux/circ_buf.h>
+#include <linux/serial.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+
+#if defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+#define SC26XX_MAJOR 204
+#define SC26XX_MINOR_START 205
+#define SC26XX_NR 2
+
+struct uart_sc26xx_port {
+ struct uart_port port[2];
+ u8 dsr_mask[2];
+ u8 cts_mask[2];
+ u8 dcd_mask[2];
+ u8 ri_mask[2];
+ u8 dtr_mask[2];
+ u8 rts_mask[2];
+ u8 imr;
+};
+
+/* register common to both ports */
+#define RD_ISR 0x14
+#define RD_IPR 0x34
+
+#define WR_ACR 0x10
+#define WR_IMR 0x14
+#define WR_OPCR 0x34
+#define WR_OPR_SET 0x38
+#define WR_OPR_CLR 0x3C
+
+/* access common register */
+#define READ_SC(p, r) readb((p)->membase + RD_##r)
+#define WRITE_SC(p, r, v) writeb((v), (p)->membase + WR_##r)
+
+/* register per port */
+#define RD_PORT_MRx 0x00
+#define RD_PORT_SR 0x04
+#define RD_PORT_RHR 0x0c
+
+#define WR_PORT_MRx 0x00
+#define WR_PORT_CSR 0x04
+#define WR_PORT_CR 0x08
+#define WR_PORT_THR 0x0c
+
+/* SR bits */
+#define SR_BREAK (1 << 7)
+#define SR_FRAME (1 << 6)
+#define SR_PARITY (1 << 5)
+#define SR_OVERRUN (1 << 4)
+#define SR_TXRDY (1 << 2)
+#define SR_RXRDY (1 << 0)
+
+#define CR_RES_MR (1 << 4)
+#define CR_RES_RX (2 << 4)
+#define CR_RES_TX (3 << 4)
+#define CR_STRT_BRK (6 << 4)
+#define CR_STOP_BRK (7 << 4)
+#define CR_DIS_TX (1 << 3)
+#define CR_ENA_TX (1 << 2)
+#define CR_DIS_RX (1 << 1)
+#define CR_ENA_RX (1 << 0)
+
+/* ISR bits */
+#define ISR_RXRDYB (1 << 5)
+#define ISR_TXRDYB (1 << 4)
+#define ISR_RXRDYA (1 << 1)
+#define ISR_TXRDYA (1 << 0)
+
+/* IMR bits */
+#define IMR_RXRDY (1 << 1)
+#define IMR_TXRDY (1 << 0)
+
+/* access port register */
+static inline u8 read_sc_port(struct uart_port *p, u8 reg)
+{
+ return readb(p->membase + p->line * 0x20 + reg);
+}
+
+static inline void write_sc_port(struct uart_port *p, u8 reg, u8 val)
+{
+ writeb(val, p->membase + p->line * 0x20 + reg);
+}
+
+#define READ_SC_PORT(p, r) read_sc_port(p, RD_PORT_##r)
+#define WRITE_SC_PORT(p, r, v) write_sc_port(p, WR_PORT_##r, v)
+
+static void sc26xx_enable_irq(struct uart_port *port, int mask)
+{
+ struct uart_sc26xx_port *up;
+ int line = port->line;
+
+ port -= line;
+ up = container_of(port, struct uart_sc26xx_port, port[0]);
+
+ up->imr |= mask << (line * 4);
+ WRITE_SC(port, IMR, up->imr);
+}
+
+static void sc26xx_disable_irq(struct uart_port *port, int mask)
+{
+ struct uart_sc26xx_port *up;
+ int line = port->line;
+
+ port -= line;
+ up = container_of(port, struct uart_sc26xx_port, port[0]);
+
+ up->imr &= ~(mask << (line * 4));
+ WRITE_SC(port, IMR, up->imr);
+}
+
+static struct tty_struct *receive_chars(struct uart_port *port)
+{
+ struct tty_struct *tty = NULL;
+ int limit = 10000;
+ unsigned char ch;
+ char flag;
+ u8 status;
+
+ if (port->state != NULL) /* Unopened serial console */
+ tty = port->state->port.tty;
+
+ while (limit-- > 0) {
+ status = READ_SC_PORT(port, SR);
+ if (!(status & SR_RXRDY))
+ break;
+ ch = READ_SC_PORT(port, RHR);
+
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (unlikely(status & (SR_BREAK | SR_FRAME |
+ SR_PARITY | SR_OVERRUN))) {
+ if (status & SR_BREAK) {
+ status &= ~(SR_PARITY | SR_FRAME);
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (status & SR_PARITY)
+ port->icount.parity++;
+ else if (status & SR_FRAME)
+ port->icount.frame++;
+ if (status & SR_OVERRUN)
+ port->icount.overrun++;
+
+ status &= port->read_status_mask;
+ if (status & SR_BREAK)
+ flag = TTY_BREAK;
+ else if (status & SR_PARITY)
+ flag = TTY_PARITY;
+ else if (status & SR_FRAME)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+
+ if (status & port->ignore_status_mask)
+ continue;
+
+ tty_insert_flip_char(tty, ch, flag);
+ }
+ return tty;
+}
+
+static void transmit_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit;
+
+ if (!port->state)
+ return;
+
+ xmit = &port->state->xmit;
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ sc26xx_disable_irq(port, IMR_TXRDY);
+ return;
+ }
+ while (!uart_circ_empty(xmit)) {
+ if (!(READ_SC_PORT(port, SR) & SR_TXRDY))
+ break;
+
+ WRITE_SC_PORT(port, THR, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static irqreturn_t sc26xx_interrupt(int irq, void *dev_id)
+{
+ struct uart_sc26xx_port *up = dev_id;
+ struct tty_struct *tty;
+ unsigned long flags;
+ u8 isr;
+
+ spin_lock_irqsave(&up->port[0].lock, flags);
+
+ tty = NULL;
+ isr = READ_SC(&up->port[0], ISR);
+ if (isr & ISR_TXRDYA)
+ transmit_chars(&up->port[0]);
+ if (isr & ISR_RXRDYA)
+ tty = receive_chars(&up->port[0]);
+
+ spin_unlock(&up->port[0].lock);
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+
+ spin_lock(&up->port[1].lock);
+
+ tty = NULL;
+ if (isr & ISR_TXRDYB)
+ transmit_chars(&up->port[1]);
+ if (isr & ISR_RXRDYB)
+ tty = receive_chars(&up->port[1]);
+
+ spin_unlock_irqrestore(&up->port[1].lock, flags);
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+
+ return IRQ_HANDLED;
+}
+
+/* port->lock is not held. */
+static unsigned int sc26xx_tx_empty(struct uart_port *port)
+{
+ return (READ_SC_PORT(port, SR) & SR_TXRDY) ? TIOCSER_TEMT : 0;
+}
+
+/* port->lock held by caller. */
+static void sc26xx_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_sc26xx_port *up;
+ int line = port->line;
+
+ port -= line;
+ up = container_of(port, struct uart_sc26xx_port, port[0]);
+
+ if (up->dtr_mask[line]) {
+ if (mctrl & TIOCM_DTR)
+ WRITE_SC(port, OPR_SET, up->dtr_mask[line]);
+ else
+ WRITE_SC(port, OPR_CLR, up->dtr_mask[line]);
+ }
+ if (up->rts_mask[line]) {
+ if (mctrl & TIOCM_RTS)
+ WRITE_SC(port, OPR_SET, up->rts_mask[line]);
+ else
+ WRITE_SC(port, OPR_CLR, up->rts_mask[line]);
+ }
+}
+
+/* port->lock is held by caller and interrupts are disabled. */
+static unsigned int sc26xx_get_mctrl(struct uart_port *port)
+{
+ struct uart_sc26xx_port *up;
+ int line = port->line;
+ unsigned int mctrl = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR;
+ u8 ipr;
+
+ port -= line;
+ up = container_of(port, struct uart_sc26xx_port, port[0]);
+ ipr = READ_SC(port, IPR) ^ 0xff;
+
+ if (up->dsr_mask[line]) {
+ mctrl &= ~TIOCM_DSR;
+ mctrl |= ipr & up->dsr_mask[line] ? TIOCM_DSR : 0;
+ }
+ if (up->cts_mask[line]) {
+ mctrl &= ~TIOCM_CTS;
+ mctrl |= ipr & up->cts_mask[line] ? TIOCM_CTS : 0;
+ }
+ if (up->dcd_mask[line]) {
+ mctrl &= ~TIOCM_CAR;
+ mctrl |= ipr & up->dcd_mask[line] ? TIOCM_CAR : 0;
+ }
+ if (up->ri_mask[line]) {
+ mctrl &= ~TIOCM_RNG;
+ mctrl |= ipr & up->ri_mask[line] ? TIOCM_RNG : 0;
+ }
+ return mctrl;
+}
+
+/* port->lock held by caller. */
+static void sc26xx_stop_tx(struct uart_port *port)
+{
+ return;
+}
+
+/* port->lock held by caller. */
+static void sc26xx_start_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+
+ while (!uart_circ_empty(xmit)) {
+ if (!(READ_SC_PORT(port, SR) & SR_TXRDY)) {
+ sc26xx_enable_irq(port, IMR_TXRDY);
+ break;
+ }
+ WRITE_SC_PORT(port, THR, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+}
+
+/* port->lock held by caller. */
+static void sc26xx_stop_rx(struct uart_port *port)
+{
+}
+
+/* port->lock held by caller. */
+static void sc26xx_enable_ms(struct uart_port *port)
+{
+}
+
+/* port->lock is not held. */
+static void sc26xx_break_ctl(struct uart_port *port, int break_state)
+{
+ if (break_state == -1)
+ WRITE_SC_PORT(port, CR, CR_STRT_BRK);
+ else
+ WRITE_SC_PORT(port, CR, CR_STOP_BRK);
+}
+
+/* port->lock is not held. */
+static int sc26xx_startup(struct uart_port *port)
+{
+ sc26xx_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
+ WRITE_SC(port, OPCR, 0);
+
+ /* reset tx and rx */
+ WRITE_SC_PORT(port, CR, CR_RES_RX);
+ WRITE_SC_PORT(port, CR, CR_RES_TX);
+
+ /* start rx/tx */
+ WRITE_SC_PORT(port, CR, CR_ENA_TX | CR_ENA_RX);
+
+ /* enable irqs */
+ sc26xx_enable_irq(port, IMR_RXRDY);
+ return 0;
+}
+
+/* port->lock is not held. */
+static void sc26xx_shutdown(struct uart_port *port)
+{
+ /* disable interrupst */
+ sc26xx_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
+
+ /* stop tx/rx */
+ WRITE_SC_PORT(port, CR, CR_DIS_TX | CR_DIS_RX);
+}
+
+/* port->lock is not held. */
+static void sc26xx_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ unsigned int quot = uart_get_divisor(port, baud);
+ unsigned int iflag, cflag;
+ unsigned long flags;
+ u8 mr1, mr2, csr;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc)
+ udelay(2);
+
+ WRITE_SC_PORT(port, CR, CR_DIS_TX | CR_DIS_RX);
+
+ iflag = termios->c_iflag;
+ cflag = termios->c_cflag;
+
+ port->read_status_mask = SR_OVERRUN;
+ if (iflag & INPCK)
+ port->read_status_mask |= SR_PARITY | SR_FRAME;
+ if (iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= SR_BREAK;
+
+ port->ignore_status_mask = 0;
+ if (iflag & IGNBRK)
+ port->ignore_status_mask |= SR_BREAK;
+ if ((cflag & CREAD) == 0)
+ port->ignore_status_mask |= SR_BREAK | SR_FRAME |
+ SR_PARITY | SR_OVERRUN;
+
+ switch (cflag & CSIZE) {
+ case CS5:
+ mr1 = 0x00;
+ break;
+ case CS6:
+ mr1 = 0x01;
+ break;
+ case CS7:
+ mr1 = 0x02;
+ break;
+ default:
+ case CS8:
+ mr1 = 0x03;
+ break;
+ }
+ mr2 = 0x07;
+ if (cflag & CSTOPB)
+ mr2 = 0x0f;
+ if (cflag & PARENB) {
+ if (cflag & PARODD)
+ mr1 |= (1 << 2);
+ } else
+ mr1 |= (2 << 3);
+
+ switch (baud) {
+ case 50:
+ csr = 0x00;
+ break;
+ case 110:
+ csr = 0x11;
+ break;
+ case 134:
+ csr = 0x22;
+ break;
+ case 200:
+ csr = 0x33;
+ break;
+ case 300:
+ csr = 0x44;
+ break;
+ case 600:
+ csr = 0x55;
+ break;
+ case 1200:
+ csr = 0x66;
+ break;
+ case 2400:
+ csr = 0x88;
+ break;
+ case 4800:
+ csr = 0x99;
+ break;
+ default:
+ case 9600:
+ csr = 0xbb;
+ break;
+ case 19200:
+ csr = 0xcc;
+ break;
+ }
+
+ WRITE_SC_PORT(port, CR, CR_RES_MR);
+ WRITE_SC_PORT(port, MRx, mr1);
+ WRITE_SC_PORT(port, MRx, mr2);
+
+ WRITE_SC(port, ACR, 0x80);
+ WRITE_SC_PORT(port, CSR, csr);
+
+ /* reset tx and rx */
+ WRITE_SC_PORT(port, CR, CR_RES_RX);
+ WRITE_SC_PORT(port, CR, CR_RES_TX);
+
+ WRITE_SC_PORT(port, CR, CR_ENA_TX | CR_ENA_RX);
+ while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc)
+ udelay(2);
+
+ /* XXX */
+ uart_update_timeout(port, cflag,
+ (port->uartclk / (16 * quot)));
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *sc26xx_type(struct uart_port *port)
+{
+ return "SC26XX";
+}
+
+static void sc26xx_release_port(struct uart_port *port)
+{
+}
+
+static int sc26xx_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void sc26xx_config_port(struct uart_port *port, int flags)
+{
+}
+
+static int sc26xx_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ return -EINVAL;
+}
+
+static struct uart_ops sc26xx_ops = {
+ .tx_empty = sc26xx_tx_empty,
+ .set_mctrl = sc26xx_set_mctrl,
+ .get_mctrl = sc26xx_get_mctrl,
+ .stop_tx = sc26xx_stop_tx,
+ .start_tx = sc26xx_start_tx,
+ .stop_rx = sc26xx_stop_rx,
+ .enable_ms = sc26xx_enable_ms,
+ .break_ctl = sc26xx_break_ctl,
+ .startup = sc26xx_startup,
+ .shutdown = sc26xx_shutdown,
+ .set_termios = sc26xx_set_termios,
+ .type = sc26xx_type,
+ .release_port = sc26xx_release_port,
+ .request_port = sc26xx_request_port,
+ .config_port = sc26xx_config_port,
+ .verify_port = sc26xx_verify_port,
+};
+
+static struct uart_port *sc26xx_port;
+
+#ifdef CONFIG_SERIAL_SC26XX_CONSOLE
+static void sc26xx_console_putchar(struct uart_port *port, char c)
+{
+ unsigned long flags;
+ int limit = 1000000;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ while (limit-- > 0) {
+ if (READ_SC_PORT(port, SR) & SR_TXRDY) {
+ WRITE_SC_PORT(port, THR, c);
+ break;
+ }
+ udelay(2);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void sc26xx_console_write(struct console *con, const char *s, unsigned n)
+{
+ struct uart_port *port = sc26xx_port;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (*s == '\n')
+ sc26xx_console_putchar(port, '\r');
+ sc26xx_console_putchar(port, *s++);
+ }
+}
+
+static int __init sc26xx_console_setup(struct console *con, char *options)
+{
+ struct uart_port *port = sc26xx_port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (port->type != PORT_SC26XX)
+ return -1;
+
+ printk(KERN_INFO "Console: ttySC%d (SC26XX)\n", con->index);
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, con, baud, parity, bits, flow);
+}
+
+static struct uart_driver sc26xx_reg;
+static struct console sc26xx_console = {
+ .name = "ttySC",
+ .write = sc26xx_console_write,
+ .device = uart_console_device,
+ .setup = sc26xx_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sc26xx_reg,
+};
+#define SC26XX_CONSOLE &sc26xx_console
+#else
+#define SC26XX_CONSOLE NULL
+#endif
+
+static struct uart_driver sc26xx_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "SC26xx",
+ .dev_name = "ttySC",
+ .major = SC26XX_MAJOR,
+ .minor = SC26XX_MINOR_START,
+ .nr = SC26XX_NR,
+ .cons = SC26XX_CONSOLE,
+};
+
+static u8 sc26xx_flags2mask(unsigned int flags, unsigned int bitpos)
+{
+ unsigned int bit = (flags >> bitpos) & 15;
+
+ return bit ? (1 << (bit - 1)) : 0;
+}
+
+static void __devinit sc26xx_init_masks(struct uart_sc26xx_port *up,
+ int line, unsigned int data)
+{
+ up->dtr_mask[line] = sc26xx_flags2mask(data, 0);
+ up->rts_mask[line] = sc26xx_flags2mask(data, 4);
+ up->dsr_mask[line] = sc26xx_flags2mask(data, 8);
+ up->cts_mask[line] = sc26xx_flags2mask(data, 12);
+ up->dcd_mask[line] = sc26xx_flags2mask(data, 16);
+ up->ri_mask[line] = sc26xx_flags2mask(data, 20);
+}
+
+static int __devinit sc26xx_probe(struct platform_device *dev)
+{
+ struct resource *res;
+ struct uart_sc26xx_port *up;
+ unsigned int *sc26xx_data = dev->dev.platform_data;
+ int err;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ up = kzalloc(sizeof *up, GFP_KERNEL);
+ if (unlikely(!up))
+ return -ENOMEM;
+
+ up->port[0].line = 0;
+ up->port[0].ops = &sc26xx_ops;
+ up->port[0].type = PORT_SC26XX;
+ up->port[0].uartclk = (29491200 / 16); /* arbitrary */
+
+ up->port[0].mapbase = res->start;
+ up->port[0].membase = ioremap_nocache(up->port[0].mapbase, 0x40);
+ up->port[0].iotype = UPIO_MEM;
+ up->port[0].irq = platform_get_irq(dev, 0);
+
+ up->port[0].dev = &dev->dev;
+
+ sc26xx_init_masks(up, 0, sc26xx_data[0]);
+
+ sc26xx_port = &up->port[0];
+
+ up->port[1].line = 1;
+ up->port[1].ops = &sc26xx_ops;
+ up->port[1].type = PORT_SC26XX;
+ up->port[1].uartclk = (29491200 / 16); /* arbitrary */
+
+ up->port[1].mapbase = up->port[0].mapbase;
+ up->port[1].membase = up->port[0].membase;
+ up->port[1].iotype = UPIO_MEM;
+ up->port[1].irq = up->port[0].irq;
+
+ up->port[1].dev = &dev->dev;
+
+ sc26xx_init_masks(up, 1, sc26xx_data[1]);
+
+ err = uart_register_driver(&sc26xx_reg);
+ if (err)
+ goto out_free_port;
+
+ sc26xx_reg.tty_driver->name_base = sc26xx_reg.minor;
+
+ err = uart_add_one_port(&sc26xx_reg, &up->port[0]);
+ if (err)
+ goto out_unregister_driver;
+
+ err = uart_add_one_port(&sc26xx_reg, &up->port[1]);
+ if (err)
+ goto out_remove_port0;
+
+ err = request_irq(up->port[0].irq, sc26xx_interrupt, 0, "sc26xx", up);
+ if (err)
+ goto out_remove_ports;
+
+ dev_set_drvdata(&dev->dev, up);
+ return 0;
+
+out_remove_ports:
+ uart_remove_one_port(&sc26xx_reg, &up->port[1]);
+out_remove_port0:
+ uart_remove_one_port(&sc26xx_reg, &up->port[0]);
+
+out_unregister_driver:
+ uart_unregister_driver(&sc26xx_reg);
+
+out_free_port:
+ kfree(up);
+ sc26xx_port = NULL;
+ return err;
+}
+
+
+static int __exit sc26xx_driver_remove(struct platform_device *dev)
+{
+ struct uart_sc26xx_port *up = dev_get_drvdata(&dev->dev);
+
+ free_irq(up->port[0].irq, up);
+
+ uart_remove_one_port(&sc26xx_reg, &up->port[0]);
+ uart_remove_one_port(&sc26xx_reg, &up->port[1]);
+
+ uart_unregister_driver(&sc26xx_reg);
+
+ kfree(up);
+ sc26xx_port = NULL;
+
+ dev_set_drvdata(&dev->dev, NULL);
+ return 0;
+}
+
+static struct platform_driver sc26xx_driver = {
+ .probe = sc26xx_probe,
+ .remove = __devexit_p(sc26xx_driver_remove),
+ .driver = {
+ .name = "SC26xx",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sc26xx_init(void)
+{
+ return platform_driver_register(&sc26xx_driver);
+}
+
+static void __exit sc26xx_exit(void)
+{
+ platform_driver_unregister(&sc26xx_driver);
+}
+
+module_init(sc26xx_init);
+module_exit(sc26xx_exit);
+
+
+MODULE_AUTHOR("Thomas Bogendörfer");
+MODULE_DESCRIPTION("SC681/SC2692 serial driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:SC26xx");
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
new file mode 100644
index 00000000..2a1db8c6
--- /dev/null
+++ b/drivers/tty/serial/serial_core.c
@@ -0,0 +1,2510 @@
+/*
+ * Driver core for serial ports
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * Copyright 1999 ARM Limited
+ * Copyright (C) 2000-2001 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/device.h>
+#include <linux/serial.h> /* for serial_state and serial_icounter_struct */
+#include <linux/serial_core.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+/*
+ * This is used to lock changes in serial line configuration.
+ */
+static DEFINE_MUTEX(port_mutex);
+
+/*
+ * lockdep: port->lock is initialized in two places, but we
+ * want only one lock-class:
+ */
+static struct lock_class_key port_lock_key;
+
+#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
+
+#ifdef CONFIG_SERIAL_CORE_CONSOLE
+#define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line)
+#else
+#define uart_console(port) (0)
+#endif
+
+static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
+ struct ktermios *old_termios);
+static void __uart_wait_until_sent(struct uart_port *port, int timeout);
+static void uart_change_pm(struct uart_state *state, int pm_state);
+
+/*
+ * This routine is used by the interrupt handler to schedule processing in
+ * the software interrupt portion of the driver.
+ */
+void uart_write_wakeup(struct uart_port *port)
+{
+ struct uart_state *state = port->state;
+ /*
+ * This means you called this function _after_ the port was
+ * closed. No cookie for you.
+ */
+ BUG_ON(!state);
+ tasklet_schedule(&state->tlet);
+}
+
+static void uart_stop(struct tty_struct *tty)
+{
+ struct uart_state *state = tty->driver_data;
+ struct uart_port *port = state->uart_port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->ops->stop_tx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void __uart_start(struct tty_struct *tty)
+{
+ struct uart_state *state = tty->driver_data;
+ struct uart_port *port = state->uart_port;
+
+ if (port->ops->wake_peer)
+ port->ops->wake_peer(port);
+
+ if (!uart_circ_empty(&state->xmit) && state->xmit.buf &&
+ !tty->stopped && !tty->hw_stopped)
+ port->ops->start_tx(port);
+}
+
+static void uart_start(struct tty_struct *tty)
+{
+ struct uart_state *state = tty->driver_data;
+ struct uart_port *port = state->uart_port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ __uart_start(tty);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void uart_tasklet_action(unsigned long data)
+{
+ struct uart_state *state = (struct uart_state *)data;
+ tty_wakeup(state->port.tty);
+}
+
+static inline void
+uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
+{
+ unsigned long flags;
+ unsigned int old;
+
+ spin_lock_irqsave(&port->lock, flags);
+ old = port->mctrl;
+ port->mctrl = (old & ~clear) | set;
+ if (old != port->mctrl)
+ port->ops->set_mctrl(port, port->mctrl);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+#define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0)
+#define uart_clear_mctrl(port, clear) uart_update_mctrl(port, 0, clear)
+
+/*
+ * Startup the port. This will be called once per open. All calls
+ * will be serialised by the per-port mutex.
+ */
+static int uart_startup(struct tty_struct *tty, struct uart_state *state, int init_hw)
+{
+ struct uart_port *uport = state->uart_port;
+ struct tty_port *port = &state->port;
+ unsigned long page;
+ int retval = 0;
+
+ if (port->flags & ASYNC_INITIALIZED)
+ return 0;
+
+ /*
+ * Set the TTY IO error marker - we will only clear this
+ * once we have successfully opened the port. Also set
+ * up the tty->alt_speed kludge
+ */
+ set_bit(TTY_IO_ERROR, &tty->flags);
+
+ if (uport->type == PORT_UNKNOWN)
+ return 0;
+
+ /*
+ * Initialise and allocate the transmit and temporary
+ * buffer.
+ */
+ if (!state->xmit.buf) {
+ /* This is protected by the per port mutex */
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ state->xmit.buf = (unsigned char *) page;
+ uart_circ_clear(&state->xmit);
+ }
+
+ retval = uport->ops->startup(uport);
+ if (retval == 0) {
+ if (uart_console(uport) && uport->cons->cflag) {
+ tty->termios->c_cflag = uport->cons->cflag;
+ uport->cons->cflag = 0;
+ }
+ /*
+ * Initialise the hardware port settings.
+ */
+ uart_change_speed(tty, state, NULL);
+
+ if (init_hw) {
+ /*
+ * Setup the RTS and DTR signals once the
+ * port is open and ready to respond.
+ */
+ if (tty->termios->c_cflag & CBAUD)
+ uart_set_mctrl(uport, TIOCM_RTS | TIOCM_DTR);
+ }
+
+ if (port->flags & ASYNC_CTS_FLOW) {
+ spin_lock_irq(&uport->lock);
+ if (!(uport->ops->get_mctrl(uport) & TIOCM_CTS))
+ tty->hw_stopped = 1;
+ spin_unlock_irq(&uport->lock);
+ }
+
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+ }
+
+ if (retval && capable(CAP_SYS_ADMIN))
+ retval = 0;
+
+ return retval;
+}
+
+/*
+ * This routine will shutdown a serial port; interrupts are disabled, and
+ * DTR is dropped if the hangup on close termio flag is on. Calls to
+ * uart_shutdown are serialised by the per-port semaphore.
+ */
+static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
+{
+ struct uart_port *uport = state->uart_port;
+ struct tty_port *port = &state->port;
+
+ /*
+ * Set the TTY IO error marker
+ */
+ if (tty)
+ set_bit(TTY_IO_ERROR, &tty->flags);
+
+ if (test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags)) {
+ /*
+ * Turn off DTR and RTS early.
+ */
+ if (!tty || (tty->termios->c_cflag & HUPCL))
+ uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
+
+ /*
+ * clear delta_msr_wait queue to avoid mem leaks: we may free
+ * the irq here so the queue might never be woken up. Note
+ * that we won't end up waiting on delta_msr_wait again since
+ * any outstanding file descriptors should be pointing at
+ * hung_up_tty_fops now.
+ */
+ wake_up_interruptible(&port->delta_msr_wait);
+
+ /*
+ * Free the IRQ and disable the port.
+ */
+ uport->ops->shutdown(uport);
+
+ /*
+ * Ensure that the IRQ handler isn't running on another CPU.
+ */
+ synchronize_irq(uport->irq);
+ }
+
+ /*
+ * kill off our tasklet
+ */
+ tasklet_kill(&state->tlet);
+
+ /*
+ * Free the transmit buffer page.
+ */
+ if (state->xmit.buf) {
+ free_page((unsigned long)state->xmit.buf);
+ state->xmit.buf = NULL;
+ }
+}
+
+/**
+ * uart_update_timeout - update per-port FIFO timeout.
+ * @port: uart_port structure describing the port
+ * @cflag: termios cflag value
+ * @baud: speed of the port
+ *
+ * Set the port FIFO timeout value. The @cflag value should
+ * reflect the actual hardware settings.
+ */
+void
+uart_update_timeout(struct uart_port *port, unsigned int cflag,
+ unsigned int baud)
+{
+ unsigned int bits;
+
+ /* byte size and parity */
+ switch (cflag & CSIZE) {
+ case CS5:
+ bits = 7;
+ break;
+ case CS6:
+ bits = 8;
+ break;
+ case CS7:
+ bits = 9;
+ break;
+ default:
+ bits = 10;
+ break; /* CS8 */
+ }
+
+ if (cflag & CSTOPB)
+ bits++;
+ if (cflag & PARENB)
+ bits++;
+
+ /*
+ * The total number of bits to be transmitted in the fifo.
+ */
+ bits = bits * port->fifosize;
+
+ /*
+ * Figure the timeout to send the above number of bits.
+ * Add .02 seconds of slop
+ */
+ port->timeout = (HZ * bits) / baud + HZ/50;
+}
+
+EXPORT_SYMBOL(uart_update_timeout);
+
+/**
+ * uart_get_baud_rate - return baud rate for a particular port
+ * @port: uart_port structure describing the port in question.
+ * @termios: desired termios settings.
+ * @old: old termios (or NULL)
+ * @min: minimum acceptable baud rate
+ * @max: maximum acceptable baud rate
+ *
+ * Decode the termios structure into a numeric baud rate,
+ * taking account of the magic 38400 baud rate (with spd_*
+ * flags), and mapping the %B0 rate to 9600 baud.
+ *
+ * If the new baud rate is invalid, try the old termios setting.
+ * If it's still invalid, we try 9600 baud.
+ *
+ * Update the @termios structure to reflect the baud rate
+ * we're actually going to be using. Don't do this for the case
+ * where B0 is requested ("hang up").
+ */
+unsigned int
+uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old, unsigned int min, unsigned int max)
+{
+ unsigned int try, baud, altbaud = 38400;
+ int hung_up = 0;
+ upf_t flags = port->flags & UPF_SPD_MASK;
+
+ if (flags == UPF_SPD_HI)
+ altbaud = 57600;
+ else if (flags == UPF_SPD_VHI)
+ altbaud = 115200;
+ else if (flags == UPF_SPD_SHI)
+ altbaud = 230400;
+ else if (flags == UPF_SPD_WARP)
+ altbaud = 460800;
+
+ for (try = 0; try < 2; try++) {
+ baud = tty_termios_baud_rate(termios);
+
+ /*
+ * The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
+ * Die! Die! Die!
+ */
+ if (baud == 38400)
+ baud = altbaud;
+
+ /*
+ * Special case: B0 rate.
+ */
+ if (baud == 0) {
+ hung_up = 1;
+ baud = 9600;
+ }
+
+ if (baud >= min && baud <= max)
+ return baud;
+
+ /*
+ * Oops, the quotient was zero. Try again with
+ * the old baud rate if possible.
+ */
+ termios->c_cflag &= ~CBAUD;
+ if (old) {
+ baud = tty_termios_baud_rate(old);
+ if (!hung_up)
+ tty_termios_encode_baud_rate(termios,
+ baud, baud);
+ old = NULL;
+ continue;
+ }
+
+ /*
+ * As a last resort, if the range cannot be met then clip to
+ * the nearest chip supported rate.
+ */
+ if (!hung_up) {
+ if (baud <= min)
+ tty_termios_encode_baud_rate(termios,
+ min + 1, min + 1);
+ else
+ tty_termios_encode_baud_rate(termios,
+ max - 1, max - 1);
+ }
+ }
+ /* Should never happen */
+ WARN_ON(1);
+ return 0;
+}
+
+EXPORT_SYMBOL(uart_get_baud_rate);
+
+/**
+ * uart_get_divisor - return uart clock divisor
+ * @port: uart_port structure describing the port.
+ * @baud: desired baud rate
+ *
+ * Calculate the uart clock divisor for the port.
+ */
+unsigned int
+uart_get_divisor(struct uart_port *port, unsigned int baud)
+{
+ unsigned int quot;
+
+ /*
+ * Old custom speed handling.
+ */
+ if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
+ quot = port->custom_divisor;
+ else
+ quot = (port->uartclk + (8 * baud)) / (16 * baud);
+
+ return quot;
+}
+
+EXPORT_SYMBOL(uart_get_divisor);
+
+/* FIXME: Consistent locking policy */
+static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
+ struct ktermios *old_termios)
+{
+ struct tty_port *port = &state->port;
+ struct uart_port *uport = state->uart_port;
+ struct ktermios *termios;
+
+ /*
+ * If we have no tty, termios, or the port does not exist,
+ * then we can't set the parameters for this port.
+ */
+ if (!tty || !tty->termios || uport->type == PORT_UNKNOWN)
+ return;
+
+ termios = tty->termios;
+
+ /*
+ * Set flags based on termios cflag
+ */
+ if (termios->c_cflag & CRTSCTS)
+ set_bit(ASYNCB_CTS_FLOW, &port->flags);
+ else
+ clear_bit(ASYNCB_CTS_FLOW, &port->flags);
+
+ if (termios->c_cflag & CLOCAL)
+ clear_bit(ASYNCB_CHECK_CD, &port->flags);
+ else
+ set_bit(ASYNCB_CHECK_CD, &port->flags);
+
+ uport->ops->set_termios(uport, termios, old_termios);
+}
+
+static inline int __uart_put_char(struct uart_port *port,
+ struct circ_buf *circ, unsigned char c)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (!circ->buf)
+ return 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (uart_circ_chars_free(circ) != 0) {
+ circ->buf[circ->head] = c;
+ circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ return ret;
+}
+
+static int uart_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct uart_state *state = tty->driver_data;
+
+ return __uart_put_char(state->uart_port, &state->xmit, ch);
+}
+
+static void uart_flush_chars(struct tty_struct *tty)
+{
+ uart_start(tty);
+}
+
+static int uart_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ struct uart_state *state = tty->driver_data;
+ struct uart_port *port;
+ struct circ_buf *circ;
+ unsigned long flags;
+ int c, ret = 0;
+
+ /*
+ * This means you called this function _after_ the port was
+ * closed. No cookie for you.
+ */
+ if (!state) {
+ WARN_ON(1);
+ return -EL3HLT;
+ }
+
+ port = state->uart_port;
+ circ = &state->xmit;
+
+ if (!circ->buf)
+ return 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+ while (1) {
+ c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
+ if (count < c)
+ c = count;
+ if (c <= 0)
+ break;
+ memcpy(circ->buf + circ->head, buf, c);
+ circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ uart_start(tty);
+ return ret;
+}
+
+static int uart_write_room(struct tty_struct *tty)
+{
+ struct uart_state *state = tty->driver_data;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&state->uart_port->lock, flags);
+ ret = uart_circ_chars_free(&state->xmit);
+ spin_unlock_irqrestore(&state->uart_port->lock, flags);
+ return ret;
+}
+
+static int uart_chars_in_buffer(struct tty_struct *tty)
+{
+ struct uart_state *state = tty->driver_data;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&state->uart_port->lock, flags);
+ ret = uart_circ_chars_pending(&state->xmit);
+ spin_unlock_irqrestore(&state->uart_port->lock, flags);
+ return ret;
+}
+
+static void uart_flush_buffer(struct tty_struct *tty)
+{
+ struct uart_state *state = tty->driver_data;
+ struct uart_port *port;
+ unsigned long flags;
+
+ /*
+ * This means you called this function _after_ the port was
+ * closed. No cookie for you.
+ */
+ if (!state) {
+ WARN_ON(1);
+ return;
+ }
+
+ port = state->uart_port;
+ pr_debug("uart_flush_buffer(%d) called\n", tty->index);
+
+ spin_lock_irqsave(&port->lock, flags);
+ uart_circ_clear(&state->xmit);
+ if (port->ops->flush_buffer)
+ port->ops->flush_buffer(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ tty_wakeup(tty);
+}
+
+/*
+ * This function is used to send a high-priority XON/XOFF character to
+ * the device
+ */
+static void uart_send_xchar(struct tty_struct *tty, char ch)
+{
+ struct uart_state *state = tty->driver_data;
+ struct uart_port *port = state->uart_port;
+ unsigned long flags;
+
+ if (port->ops->send_xchar)
+ port->ops->send_xchar(port, ch);
+ else {
+ port->x_char = ch;
+ if (ch) {
+ spin_lock_irqsave(&port->lock, flags);
+ port->ops->start_tx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+ }
+}
+
+static void uart_throttle(struct tty_struct *tty)
+{
+ struct uart_state *state = tty->driver_data;
+
+ if (I_IXOFF(tty))
+ uart_send_xchar(tty, STOP_CHAR(tty));
+
+ if (tty->termios->c_cflag & CRTSCTS)
+ uart_clear_mctrl(state->uart_port, TIOCM_RTS);
+}
+
+static void uart_unthrottle(struct tty_struct *tty)
+{
+ struct uart_state *state = tty->driver_data;
+ struct uart_port *port = state->uart_port;
+
+ if (I_IXOFF(tty)) {
+ if (port->x_char)
+ port->x_char = 0;
+ else
+ uart_send_xchar(tty, START_CHAR(tty));
+ }
+
+ if (tty->termios->c_cflag & CRTSCTS)
+ uart_set_mctrl(port, TIOCM_RTS);
+}
+
+static int uart_get_info(struct uart_state *state,
+ struct serial_struct __user *retinfo)
+{
+ struct uart_port *uport = state->uart_port;
+ struct tty_port *port = &state->port;
+ struct serial_struct tmp;
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ /* Ensure the state we copy is consistent and no hardware changes
+ occur as we go */
+ mutex_lock(&port->mutex);
+
+ tmp.type = uport->type;
+ tmp.line = uport->line;
+ tmp.port = uport->iobase;
+ if (HIGH_BITS_OFFSET)
+ tmp.port_high = (long) uport->iobase >> HIGH_BITS_OFFSET;
+ tmp.irq = uport->irq;
+ tmp.flags = uport->flags;
+ tmp.xmit_fifo_size = uport->fifosize;
+ tmp.baud_base = uport->uartclk / 16;
+ tmp.close_delay = port->close_delay / 10;
+ tmp.closing_wait = port->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ ASYNC_CLOSING_WAIT_NONE :
+ port->closing_wait / 10;
+ tmp.custom_divisor = uport->custom_divisor;
+ tmp.hub6 = uport->hub6;
+ tmp.io_type = uport->iotype;
+ tmp.iomem_reg_shift = uport->regshift;
+ tmp.iomem_base = (void *)(unsigned long)uport->mapbase;
+
+ mutex_unlock(&port->mutex);
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int uart_set_info(struct tty_struct *tty, struct uart_state *state,
+ struct serial_struct __user *newinfo)
+{
+ struct serial_struct new_serial;
+ struct uart_port *uport = state->uart_port;
+ struct tty_port *port = &state->port;
+ unsigned long new_port;
+ unsigned int change_irq, change_port, closing_wait;
+ unsigned int old_custom_divisor, close_delay;
+ upf_t old_flags, new_flags;
+ int retval = 0;
+
+ if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
+ return -EFAULT;
+
+ new_port = new_serial.port;
+ if (HIGH_BITS_OFFSET)
+ new_port += (unsigned long) new_serial.port_high << HIGH_BITS_OFFSET;
+
+ new_serial.irq = irq_canonicalize(new_serial.irq);
+ close_delay = new_serial.close_delay * 10;
+ closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
+
+ /*
+ * This semaphore protects port->count. It is also
+ * very useful to prevent opens. Also, take the
+ * port configuration semaphore to make sure that a
+ * module insertion/removal doesn't change anything
+ * under us.
+ */
+ mutex_lock(&port->mutex);
+
+ change_irq = !(uport->flags & UPF_FIXED_PORT)
+ && new_serial.irq != uport->irq;
+
+ /*
+ * Since changing the 'type' of the port changes its resource
+ * allocations, we should treat type changes the same as
+ * IO port changes.
+ */
+ change_port = !(uport->flags & UPF_FIXED_PORT)
+ && (new_port != uport->iobase ||
+ (unsigned long)new_serial.iomem_base != uport->mapbase ||
+ new_serial.hub6 != uport->hub6 ||
+ new_serial.io_type != uport->iotype ||
+ new_serial.iomem_reg_shift != uport->regshift ||
+ new_serial.type != uport->type);
+
+ old_flags = uport->flags;
+ new_flags = new_serial.flags;
+ old_custom_divisor = uport->custom_divisor;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ retval = -EPERM;
+ if (change_irq || change_port ||
+ (new_serial.baud_base != uport->uartclk / 16) ||
+ (close_delay != port->close_delay) ||
+ (closing_wait != port->closing_wait) ||
+ (new_serial.xmit_fifo_size &&
+ new_serial.xmit_fifo_size != uport->fifosize) ||
+ (((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0))
+ goto exit;
+ uport->flags = ((uport->flags & ~UPF_USR_MASK) |
+ (new_flags & UPF_USR_MASK));
+ uport->custom_divisor = new_serial.custom_divisor;
+ goto check_and_exit;
+ }
+
+ /*
+ * Ask the low level driver to verify the settings.
+ */
+ if (uport->ops->verify_port)
+ retval = uport->ops->verify_port(uport, &new_serial);
+
+ if ((new_serial.irq >= nr_irqs) || (new_serial.irq < 0) ||
+ (new_serial.baud_base < 9600))
+ retval = -EINVAL;
+
+ if (retval)
+ goto exit;
+
+ if (change_port || change_irq) {
+ retval = -EBUSY;
+
+ /*
+ * Make sure that we are the sole user of this port.
+ */
+ if (tty_port_users(port) > 1)
+ goto exit;
+
+ /*
+ * We need to shutdown the serial port at the old
+ * port/type/irq combination.
+ */
+ uart_shutdown(tty, state);
+ }
+
+ if (change_port) {
+ unsigned long old_iobase, old_mapbase;
+ unsigned int old_type, old_iotype, old_hub6, old_shift;
+
+ old_iobase = uport->iobase;
+ old_mapbase = uport->mapbase;
+ old_type = uport->type;
+ old_hub6 = uport->hub6;
+ old_iotype = uport->iotype;
+ old_shift = uport->regshift;
+
+ /*
+ * Free and release old regions
+ */
+ if (old_type != PORT_UNKNOWN)
+ uport->ops->release_port(uport);
+
+ uport->iobase = new_port;
+ uport->type = new_serial.type;
+ uport->hub6 = new_serial.hub6;
+ uport->iotype = new_serial.io_type;
+ uport->regshift = new_serial.iomem_reg_shift;
+ uport->mapbase = (unsigned long)new_serial.iomem_base;
+
+ /*
+ * Claim and map the new regions
+ */
+ if (uport->type != PORT_UNKNOWN) {
+ retval = uport->ops->request_port(uport);
+ } else {
+ /* Always success - Jean II */
+ retval = 0;
+ }
+
+ /*
+ * If we fail to request resources for the
+ * new port, try to restore the old settings.
+ */
+ if (retval && old_type != PORT_UNKNOWN) {
+ uport->iobase = old_iobase;
+ uport->type = old_type;
+ uport->hub6 = old_hub6;
+ uport->iotype = old_iotype;
+ uport->regshift = old_shift;
+ uport->mapbase = old_mapbase;
+ retval = uport->ops->request_port(uport);
+ /*
+ * If we failed to restore the old settings,
+ * we fail like this.
+ */
+ if (retval)
+ uport->type = PORT_UNKNOWN;
+
+ /*
+ * We failed anyway.
+ */
+ retval = -EBUSY;
+ /* Added to return the correct error -Ram Gupta */
+ goto exit;
+ }
+ }
+
+ if (change_irq)
+ uport->irq = new_serial.irq;
+ if (!(uport->flags & UPF_FIXED_PORT))
+ uport->uartclk = new_serial.baud_base * 16;
+ uport->flags = (uport->flags & ~UPF_CHANGE_MASK) |
+ (new_flags & UPF_CHANGE_MASK);
+ uport->custom_divisor = new_serial.custom_divisor;
+ port->close_delay = close_delay;
+ port->closing_wait = closing_wait;
+ if (new_serial.xmit_fifo_size)
+ uport->fifosize = new_serial.xmit_fifo_size;
+ if (port->tty)
+ port->tty->low_latency =
+ (uport->flags & UPF_LOW_LATENCY) ? 1 : 0;
+
+ check_and_exit:
+ retval = 0;
+ if (uport->type == PORT_UNKNOWN)
+ goto exit;
+ if (port->flags & ASYNC_INITIALIZED) {
+ if (((old_flags ^ uport->flags) & UPF_SPD_MASK) ||
+ old_custom_divisor != uport->custom_divisor) {
+ /*
+ * If they're setting up a custom divisor or speed,
+ * instead of clearing it, then bitch about it. No
+ * need to rate-limit; it's CAP_SYS_ADMIN only.
+ */
+ if (uport->flags & UPF_SPD_MASK) {
+ char buf[64];
+ printk(KERN_NOTICE
+ "%s sets custom speed on %s. This "
+ "is deprecated.\n", current->comm,
+ tty_name(port->tty, buf));
+ }
+ uart_change_speed(tty, state, NULL);
+ }
+ } else
+ retval = uart_startup(tty, state, 1);
+ exit:
+ mutex_unlock(&port->mutex);
+ return retval;
+}
+
+/**
+ * uart_get_lsr_info - get line status register info
+ * @tty: tty associated with the UART
+ * @state: UART being queried
+ * @value: returned modem value
+ *
+ * Note: uart_ioctl protects us against hangups.
+ */
+static int uart_get_lsr_info(struct tty_struct *tty,
+ struct uart_state *state, unsigned int __user *value)
+{
+ struct uart_port *uport = state->uart_port;
+ unsigned int result;
+
+ result = uport->ops->tx_empty(uport);
+
+ /*
+ * If we're about to load something into the transmit
+ * register, we'll pretend the transmitter isn't empty to
+ * avoid a race condition (depending on when the transmit
+ * interrupt happens).
+ */
+ if (uport->x_char ||
+ ((uart_circ_chars_pending(&state->xmit) > 0) &&
+ !tty->stopped && !tty->hw_stopped))
+ result &= ~TIOCSER_TEMT;
+
+ return put_user(result, value);
+}
+
+static int uart_tiocmget(struct tty_struct *tty)
+{
+ struct uart_state *state = tty->driver_data;
+ struct tty_port *port = &state->port;
+ struct uart_port *uport = state->uart_port;
+ int result = -EIO;
+
+ mutex_lock(&port->mutex);
+ if (!(tty->flags & (1 << TTY_IO_ERROR))) {
+ result = uport->mctrl;
+ spin_lock_irq(&uport->lock);
+ result |= uport->ops->get_mctrl(uport);
+ spin_unlock_irq(&uport->lock);
+ }
+ mutex_unlock(&port->mutex);
+
+ return result;
+}
+
+static int
+uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
+{
+ struct uart_state *state = tty->driver_data;
+ struct uart_port *uport = state->uart_port;
+ struct tty_port *port = &state->port;
+ int ret = -EIO;
+
+ mutex_lock(&port->mutex);
+ if (!(tty->flags & (1 << TTY_IO_ERROR))) {
+ uart_update_mctrl(uport, set, clear);
+ ret = 0;
+ }
+ mutex_unlock(&port->mutex);
+ return ret;
+}
+
+static int uart_break_ctl(struct tty_struct *tty, int break_state)
+{
+ struct uart_state *state = tty->driver_data;
+ struct tty_port *port = &state->port;
+ struct uart_port *uport = state->uart_port;
+
+ mutex_lock(&port->mutex);
+
+ if (uport->type != PORT_UNKNOWN)
+ uport->ops->break_ctl(uport, break_state);
+
+ mutex_unlock(&port->mutex);
+ return 0;
+}
+
+static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
+{
+ struct uart_port *uport = state->uart_port;
+ struct tty_port *port = &state->port;
+ int flags, ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * Take the per-port semaphore. This prevents count from
+ * changing, and hence any extra opens of the port while
+ * we're auto-configuring.
+ */
+ if (mutex_lock_interruptible(&port->mutex))
+ return -ERESTARTSYS;
+
+ ret = -EBUSY;
+ if (tty_port_users(port) == 1) {
+ uart_shutdown(tty, state);
+
+ /*
+ * If we already have a port type configured,
+ * we must release its resources.
+ */
+ if (uport->type != PORT_UNKNOWN)
+ uport->ops->release_port(uport);
+
+ flags = UART_CONFIG_TYPE;
+ if (uport->flags & UPF_AUTO_IRQ)
+ flags |= UART_CONFIG_IRQ;
+
+ /*
+ * This will claim the ports resources if
+ * a port is found.
+ */
+ uport->ops->config_port(uport, flags);
+
+ ret = uart_startup(tty, state, 1);
+ }
+ mutex_unlock(&port->mutex);
+ return ret;
+}
+
+/*
+ * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
+ * - mask passed in arg for lines of interest
+ * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
+ * Caller should use TIOCGICOUNT to see which one it was
+ *
+ * FIXME: This wants extracting into a common all driver implementation
+ * of TIOCMWAIT using tty_port.
+ */
+static int
+uart_wait_modem_status(struct uart_state *state, unsigned long arg)
+{
+ struct uart_port *uport = state->uart_port;
+ struct tty_port *port = &state->port;
+ DECLARE_WAITQUEUE(wait, current);
+ struct uart_icount cprev, cnow;
+ int ret;
+
+ /*
+ * note the counters on entry
+ */
+ spin_lock_irq(&uport->lock);
+ memcpy(&cprev, &uport->icount, sizeof(struct uart_icount));
+
+ /*
+ * Force modem status interrupts on
+ */
+ uport->ops->enable_ms(uport);
+ spin_unlock_irq(&uport->lock);
+
+ add_wait_queue(&port->delta_msr_wait, &wait);
+ for (;;) {
+ spin_lock_irq(&uport->lock);
+ memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
+ spin_unlock_irq(&uport->lock);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
+ ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
+ ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
+ ret = 0;
+ break;
+ }
+
+ schedule();
+
+ /* see if a signal did it */
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ cprev = cnow;
+ }
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&port->delta_msr_wait, &wait);
+
+ return ret;
+}
+
+/*
+ * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+ * Return: write counters to the user passed counter struct
+ * NB: both 1->0 and 0->1 transitions are counted except for
+ * RI where only 0->1 is counted.
+ */
+static int uart_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+{
+ struct uart_state *state = tty->driver_data;
+ struct uart_icount cnow;
+ struct uart_port *uport = state->uart_port;
+
+ spin_lock_irq(&uport->lock);
+ memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
+ spin_unlock_irq(&uport->lock);
+
+ icount->cts = cnow.cts;
+ icount->dsr = cnow.dsr;
+ icount->rng = cnow.rng;
+ icount->dcd = cnow.dcd;
+ icount->rx = cnow.rx;
+ icount->tx = cnow.tx;
+ icount->frame = cnow.frame;
+ icount->overrun = cnow.overrun;
+ icount->parity = cnow.parity;
+ icount->brk = cnow.brk;
+ icount->buf_overrun = cnow.buf_overrun;
+
+ return 0;
+}
+
+/*
+ * Called via sys_ioctl. We can use spin_lock_irq() here.
+ */
+static int
+uart_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct uart_state *state = tty->driver_data;
+ struct tty_port *port = &state->port;
+ void __user *uarg = (void __user *)arg;
+ int ret = -ENOIOCTLCMD;
+
+
+ /*
+ * These ioctls don't rely on the hardware to be present.
+ */
+ switch (cmd) {
+ case TIOCGSERIAL:
+ ret = uart_get_info(state, uarg);
+ break;
+
+ case TIOCSSERIAL:
+ ret = uart_set_info(tty, state, uarg);
+ break;
+
+ case TIOCSERCONFIG:
+ ret = uart_do_autoconfig(tty, state);
+ break;
+
+ case TIOCSERGWILD: /* obsolete */
+ case TIOCSERSWILD: /* obsolete */
+ ret = 0;
+ break;
+ }
+
+ if (ret != -ENOIOCTLCMD)
+ goto out;
+
+ if (tty->flags & (1 << TTY_IO_ERROR)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * The following should only be used when hardware is present.
+ */
+ switch (cmd) {
+ case TIOCMIWAIT:
+ ret = uart_wait_modem_status(state, arg);
+ break;
+ }
+
+ if (ret != -ENOIOCTLCMD)
+ goto out;
+
+ mutex_lock(&port->mutex);
+
+ if (tty->flags & (1 << TTY_IO_ERROR)) {
+ ret = -EIO;
+ goto out_up;
+ }
+
+ /*
+ * All these rely on hardware being present and need to be
+ * protected against the tty being hung up.
+ */
+ switch (cmd) {
+ case TIOCSERGETLSR: /* Get line status register */
+ ret = uart_get_lsr_info(tty, state, uarg);
+ break;
+
+ default: {
+ struct uart_port *uport = state->uart_port;
+ if (uport->ops->ioctl)
+ ret = uport->ops->ioctl(uport, cmd, arg);
+ break;
+ }
+ }
+out_up:
+ mutex_unlock(&port->mutex);
+out:
+ return ret;
+}
+
+static void uart_set_ldisc(struct tty_struct *tty)
+{
+ struct uart_state *state = tty->driver_data;
+ struct uart_port *uport = state->uart_port;
+
+ if (uport->ops->set_ldisc)
+ uport->ops->set_ldisc(uport, tty->termios->c_line);
+}
+
+static void uart_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios)
+{
+ struct uart_state *state = tty->driver_data;
+ unsigned long flags;
+ unsigned int cflag = tty->termios->c_cflag;
+
+
+ /*
+ * These are the bits that are used to setup various
+ * flags in the low level driver. We can ignore the Bfoo
+ * bits in c_cflag; c_[io]speed will always be set
+ * appropriately by set_termios() in tty_ioctl.c
+ */
+#define RELEVANT_IFLAG(iflag) ((iflag) & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
+ if ((cflag ^ old_termios->c_cflag) == 0 &&
+ tty->termios->c_ospeed == old_termios->c_ospeed &&
+ tty->termios->c_ispeed == old_termios->c_ispeed &&
+ RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0) {
+ return;
+ }
+
+ uart_change_speed(tty, state, old_termios);
+
+ /* Handle transition to B0 status */
+ if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
+ uart_clear_mctrl(state->uart_port, TIOCM_RTS | TIOCM_DTR);
+ /* Handle transition away from B0 status */
+ else if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
+ unsigned int mask = TIOCM_DTR;
+ if (!(cflag & CRTSCTS) ||
+ !test_bit(TTY_THROTTLED, &tty->flags))
+ mask |= TIOCM_RTS;
+ uart_set_mctrl(state->uart_port, mask);
+ }
+
+ /* Handle turning off CRTSCTS */
+ if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) {
+ spin_lock_irqsave(&state->uart_port->lock, flags);
+ tty->hw_stopped = 0;
+ __uart_start(tty);
+ spin_unlock_irqrestore(&state->uart_port->lock, flags);
+ }
+ /* Handle turning on CRTSCTS */
+ else if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
+ spin_lock_irqsave(&state->uart_port->lock, flags);
+ if (!(state->uart_port->ops->get_mctrl(state->uart_port) & TIOCM_CTS)) {
+ tty->hw_stopped = 1;
+ state->uart_port->ops->stop_tx(state->uart_port);
+ }
+ spin_unlock_irqrestore(&state->uart_port->lock, flags);
+ }
+}
+
+/*
+ * In 2.4.5, calls to this will be serialized via the BKL in
+ * linux/drivers/char/tty_io.c:tty_release()
+ * linux/drivers/char/tty_io.c:do_tty_handup()
+ */
+static void uart_close(struct tty_struct *tty, struct file *filp)
+{
+ struct uart_state *state = tty->driver_data;
+ struct tty_port *port;
+ struct uart_port *uport;
+ unsigned long flags;
+
+ BUG_ON(!tty_locked());
+
+ if (!state)
+ return;
+
+ uport = state->uart_port;
+ port = &state->port;
+
+ pr_debug("uart_close(%d) called\n", uport->line);
+
+ mutex_lock(&port->mutex);
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (tty_hung_up_p(filp)) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ goto done;
+ }
+
+ if ((tty->count == 1) && (port->count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. port->count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ printk(KERN_ERR "uart_close: bad serial port count; tty->count is 1, "
+ "port->count is %d\n", port->count);
+ port->count = 1;
+ }
+ if (--port->count < 0) {
+ printk(KERN_ERR "uart_close: bad serial port count for %s: %d\n",
+ tty->name, port->count);
+ port->count = 0;
+ }
+ if (port->count) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ goto done;
+ }
+
+ /*
+ * Now we wait for the transmit buffer to clear; and we notify
+ * the line discipline to only process XON/XOFF characters by
+ * setting tty->closing.
+ */
+ tty->closing = 1;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
+ /*
+ * hack: open-coded tty_wait_until_sent to avoid
+ * recursive tty_lock
+ */
+ long timeout = msecs_to_jiffies(port->closing_wait);
+ if (wait_event_interruptible_timeout(tty->write_wait,
+ !tty_chars_in_buffer(tty), timeout) >= 0)
+ __uart_wait_until_sent(uport, timeout);
+ }
+
+ /*
+ * At this point, we stop accepting input. To do this, we
+ * disable the receive line status interrupts.
+ */
+ if (port->flags & ASYNC_INITIALIZED) {
+ unsigned long flags;
+ spin_lock_irqsave(&uport->lock, flags);
+ uport->ops->stop_rx(uport);
+ spin_unlock_irqrestore(&uport->lock, flags);
+ /*
+ * Before we drop DTR, make sure the UART transmitter
+ * has completely drained; this is especially
+ * important if there is a transmit FIFO!
+ */
+ __uart_wait_until_sent(uport, uport->timeout);
+ }
+
+ uart_shutdown(tty, state);
+ uart_flush_buffer(tty);
+
+ tty_ldisc_flush(tty);
+
+ tty_port_tty_set(port, NULL);
+ spin_lock_irqsave(&port->lock, flags);
+ tty->closing = 0;
+
+ if (port->blocked_open) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ if (port->close_delay)
+ msleep_interruptible(port->close_delay);
+ spin_lock_irqsave(&port->lock, flags);
+ } else if (!uart_console(uport)) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ uart_change_pm(state, 3);
+ spin_lock_irqsave(&port->lock, flags);
+ }
+
+ /*
+ * Wake up anyone trying to open this port.
+ */
+ clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
+ spin_unlock_irqrestore(&port->lock, flags);
+ wake_up_interruptible(&port->open_wait);
+
+done:
+ mutex_unlock(&port->mutex);
+}
+
+static void __uart_wait_until_sent(struct uart_port *port, int timeout)
+{
+ unsigned long char_time, expire;
+
+ if (port->type == PORT_UNKNOWN || port->fifosize == 0)
+ return;
+
+ /*
+ * Set the check interval to be 1/5 of the estimated time to
+ * send a single character, and make it at least 1. The check
+ * interval should also be less than the timeout.
+ *
+ * Note: we have to use pretty tight timings here to satisfy
+ * the NIST-PCTS.
+ */
+ char_time = (port->timeout - HZ/50) / port->fifosize;
+ char_time = char_time / 5;
+ if (char_time == 0)
+ char_time = 1;
+ if (timeout && timeout < char_time)
+ char_time = timeout;
+
+ /*
+ * If the transmitter hasn't cleared in twice the approximate
+ * amount of time to send the entire FIFO, it probably won't
+ * ever clear. This assumes the UART isn't doing flow
+ * control, which is currently the case. Hence, if it ever
+ * takes longer than port->timeout, this is probably due to a
+ * UART bug of some kind. So, we clamp the timeout parameter at
+ * 2*port->timeout.
+ */
+ if (timeout == 0 || timeout > 2 * port->timeout)
+ timeout = 2 * port->timeout;
+
+ expire = jiffies + timeout;
+
+ pr_debug("uart_wait_until_sent(%d), jiffies=%lu, expire=%lu...\n",
+ port->line, jiffies, expire);
+
+ /*
+ * Check whether the transmitter is empty every 'char_time'.
+ * 'timeout' / 'expire' give us the maximum amount of time
+ * we wait.
+ */
+ while (!port->ops->tx_empty(port)) {
+ msleep_interruptible(jiffies_to_msecs(char_time));
+ if (signal_pending(current))
+ break;
+ if (time_after(jiffies, expire))
+ break;
+ }
+}
+
+static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct uart_state *state = tty->driver_data;
+ struct uart_port *port = state->uart_port;
+
+ tty_lock();
+ __uart_wait_until_sent(port, timeout);
+ tty_unlock();
+}
+
+/*
+ * This is called with the BKL held in
+ * linux/drivers/char/tty_io.c:do_tty_hangup()
+ * We're called from the eventd thread, so we can sleep for
+ * a _short_ time only.
+ */
+static void uart_hangup(struct tty_struct *tty)
+{
+ struct uart_state *state = tty->driver_data;
+ struct tty_port *port = &state->port;
+ unsigned long flags;
+
+ BUG_ON(!tty_locked());
+ pr_debug("uart_hangup(%d)\n", state->uart_port->line);
+
+ mutex_lock(&port->mutex);
+ if (port->flags & ASYNC_NORMAL_ACTIVE) {
+ uart_flush_buffer(tty);
+ uart_shutdown(tty, state);
+ spin_lock_irqsave(&port->lock, flags);
+ port->count = 0;
+ clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
+ spin_unlock_irqrestore(&port->lock, flags);
+ tty_port_tty_set(port, NULL);
+ wake_up_interruptible(&port->open_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
+ }
+ mutex_unlock(&port->mutex);
+}
+
+static int uart_carrier_raised(struct tty_port *port)
+{
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport = state->uart_port;
+ int mctrl;
+ spin_lock_irq(&uport->lock);
+ uport->ops->enable_ms(uport);
+ mctrl = uport->ops->get_mctrl(uport);
+ spin_unlock_irq(&uport->lock);
+ if (mctrl & TIOCM_CAR)
+ return 1;
+ return 0;
+}
+
+static void uart_dtr_rts(struct tty_port *port, int onoff)
+{
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport = state->uart_port;
+
+ if (onoff)
+ uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
+ else
+ uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
+}
+
+static struct uart_state *uart_get(struct uart_driver *drv, int line)
+{
+ struct uart_state *state;
+ struct tty_port *port;
+ int ret = 0;
+
+ state = drv->state + line;
+ port = &state->port;
+ if (mutex_lock_interruptible(&port->mutex)) {
+ ret = -ERESTARTSYS;
+ goto err;
+ }
+
+ port->count++;
+ if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
+ ret = -ENXIO;
+ goto err_unlock;
+ }
+ return state;
+
+ err_unlock:
+ port->count--;
+ mutex_unlock(&port->mutex);
+ err:
+ return ERR_PTR(ret);
+}
+
+/*
+ * calls to uart_open are serialised by the BKL in
+ * fs/char_dev.c:chrdev_open()
+ * Note that if this fails, then uart_close() _will_ be called.
+ *
+ * In time, we want to scrap the "opening nonpresent ports"
+ * behaviour and implement an alternative way for setserial
+ * to set base addresses/ports/types. This will allow us to
+ * get rid of a certain amount of extra tests.
+ */
+static int uart_open(struct tty_struct *tty, struct file *filp)
+{
+ struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state;
+ struct uart_state *state;
+ struct tty_port *port;
+ int retval, line = tty->index;
+
+ BUG_ON(!tty_locked());
+ pr_debug("uart_open(%d) called\n", line);
+
+ /*
+ * We take the semaphore inside uart_get to guarantee that we won't
+ * be re-entered while allocating the state structure, or while we
+ * request any IRQs that the driver may need. This also has the nice
+ * side-effect that it delays the action of uart_hangup, so we can
+ * guarantee that state->port.tty will always contain something
+ * reasonable.
+ */
+ state = uart_get(drv, line);
+ if (IS_ERR(state)) {
+ retval = PTR_ERR(state);
+ goto fail;
+ }
+ port = &state->port;
+
+ /*
+ * Once we set tty->driver_data here, we are guaranteed that
+ * uart_close() will decrement the driver module use count.
+ * Any failures from here onwards should not touch the count.
+ */
+ tty->driver_data = state;
+ state->uart_port->state = state;
+ tty->low_latency = (state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0;
+ tty->alt_speed = 0;
+ tty_port_tty_set(port, tty);
+
+ /*
+ * If the port is in the middle of closing, bail out now.
+ */
+ if (tty_hung_up_p(filp)) {
+ retval = -EAGAIN;
+ port->count--;
+ mutex_unlock(&port->mutex);
+ goto fail;
+ }
+
+ /*
+ * Make sure the device is in D0 state.
+ */
+ if (port->count == 1)
+ uart_change_pm(state, 0);
+
+ /*
+ * Start up the serial port.
+ */
+ retval = uart_startup(tty, state, 0);
+
+ /*
+ * If we succeeded, wait until the port is ready.
+ */
+ mutex_unlock(&port->mutex);
+ if (retval == 0)
+ retval = tty_port_block_til_ready(port, tty, filp);
+
+fail:
+ return retval;
+}
+
+static const char *uart_type(struct uart_port *port)
+{
+ const char *str = NULL;
+
+ if (port->ops->type)
+ str = port->ops->type(port);
+
+ if (!str)
+ str = "unknown";
+
+ return str;
+}
+
+#ifdef CONFIG_PROC_FS
+
+static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
+{
+ struct uart_state *state = drv->state + i;
+ struct tty_port *port = &state->port;
+ int pm_state;
+ struct uart_port *uport = state->uart_port;
+ char stat_buf[32];
+ unsigned int status;
+ int mmio;
+
+ if (!uport)
+ return;
+
+ mmio = uport->iotype >= UPIO_MEM;
+ seq_printf(m, "%d: uart:%s %s%08llX irq:%d",
+ uport->line, uart_type(uport),
+ mmio ? "mmio:0x" : "port:",
+ mmio ? (unsigned long long)uport->mapbase
+ : (unsigned long long)uport->iobase,
+ uport->irq);
+
+ if (uport->type == PORT_UNKNOWN) {
+ seq_putc(m, '\n');
+ return;
+ }
+
+ if (capable(CAP_SYS_ADMIN)) {
+ mutex_lock(&port->mutex);
+ pm_state = state->pm_state;
+ if (pm_state)
+ uart_change_pm(state, 0);
+ spin_lock_irq(&uport->lock);
+ status = uport->ops->get_mctrl(uport);
+ spin_unlock_irq(&uport->lock);
+ if (pm_state)
+ uart_change_pm(state, pm_state);
+ mutex_unlock(&port->mutex);
+
+ seq_printf(m, " tx:%d rx:%d",
+ uport->icount.tx, uport->icount.rx);
+ if (uport->icount.frame)
+ seq_printf(m, " fe:%d",
+ uport->icount.frame);
+ if (uport->icount.parity)
+ seq_printf(m, " pe:%d",
+ uport->icount.parity);
+ if (uport->icount.brk)
+ seq_printf(m, " brk:%d",
+ uport->icount.brk);
+ if (uport->icount.overrun)
+ seq_printf(m, " oe:%d",
+ uport->icount.overrun);
+
+#define INFOBIT(bit, str) \
+ if (uport->mctrl & (bit)) \
+ strncat(stat_buf, (str), sizeof(stat_buf) - \
+ strlen(stat_buf) - 2)
+#define STATBIT(bit, str) \
+ if (status & (bit)) \
+ strncat(stat_buf, (str), sizeof(stat_buf) - \
+ strlen(stat_buf) - 2)
+
+ stat_buf[0] = '\0';
+ stat_buf[1] = '\0';
+ INFOBIT(TIOCM_RTS, "|RTS");
+ STATBIT(TIOCM_CTS, "|CTS");
+ INFOBIT(TIOCM_DTR, "|DTR");
+ STATBIT(TIOCM_DSR, "|DSR");
+ STATBIT(TIOCM_CAR, "|CD");
+ STATBIT(TIOCM_RNG, "|RI");
+ if (stat_buf[0])
+ stat_buf[0] = ' ';
+
+ seq_puts(m, stat_buf);
+ }
+ seq_putc(m, '\n');
+#undef STATBIT
+#undef INFOBIT
+}
+
+static int uart_proc_show(struct seq_file *m, void *v)
+{
+ struct tty_driver *ttydrv = m->private;
+ struct uart_driver *drv = ttydrv->driver_state;
+ int i;
+
+ seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n",
+ "", "", "");
+ for (i = 0; i < drv->nr; i++)
+ uart_line_info(m, drv, i);
+ return 0;
+}
+
+static int uart_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uart_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations uart_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = uart_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
+/*
+ * uart_console_write - write a console message to a serial port
+ * @port: the port to write the message
+ * @s: array of characters
+ * @count: number of characters in string to write
+ * @write: function to write character to port
+ */
+void uart_console_write(struct uart_port *port, const char *s,
+ unsigned int count,
+ void (*putchar)(struct uart_port *, int))
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++, s++) {
+ if (*s == '\n')
+ putchar(port, '\r');
+ putchar(port, *s);
+ }
+}
+EXPORT_SYMBOL_GPL(uart_console_write);
+
+/*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+struct uart_port * __init
+uart_get_console(struct uart_port *ports, int nr, struct console *co)
+{
+ int idx = co->index;
+
+ if (idx < 0 || idx >= nr || (ports[idx].iobase == 0 &&
+ ports[idx].membase == NULL))
+ for (idx = 0; idx < nr; idx++)
+ if (ports[idx].iobase != 0 ||
+ ports[idx].membase != NULL)
+ break;
+
+ co->index = idx;
+
+ return ports + idx;
+}
+
+/**
+ * uart_parse_options - Parse serial port baud/parity/bits/flow contro.
+ * @options: pointer to option string
+ * @baud: pointer to an 'int' variable for the baud rate.
+ * @parity: pointer to an 'int' variable for the parity.
+ * @bits: pointer to an 'int' variable for the number of data bits.
+ * @flow: pointer to an 'int' variable for the flow control character.
+ *
+ * uart_parse_options decodes a string containing the serial console
+ * options. The format of the string is <baud><parity><bits><flow>,
+ * eg: 115200n8r
+ */
+void
+uart_parse_options(char *options, int *baud, int *parity, int *bits, int *flow)
+{
+ char *s = options;
+
+ *baud = simple_strtoul(s, NULL, 10);
+ while (*s >= '0' && *s <= '9')
+ s++;
+ if (*s)
+ *parity = *s++;
+ if (*s)
+ *bits = *s++ - '0';
+ if (*s)
+ *flow = *s;
+}
+EXPORT_SYMBOL_GPL(uart_parse_options);
+
+struct baud_rates {
+ unsigned int rate;
+ unsigned int cflag;
+};
+
+static const struct baud_rates baud_rates[] = {
+ { 921600, B921600 },
+ { 460800, B460800 },
+ { 230400, B230400 },
+ { 115200, B115200 },
+ { 57600, B57600 },
+ { 38400, B38400 },
+ { 19200, B19200 },
+ { 9600, B9600 },
+ { 4800, B4800 },
+ { 2400, B2400 },
+ { 1200, B1200 },
+ { 0, B38400 }
+};
+
+/**
+ * uart_set_options - setup the serial console parameters
+ * @port: pointer to the serial ports uart_port structure
+ * @co: console pointer
+ * @baud: baud rate
+ * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
+ * @bits: number of data bits
+ * @flow: flow control character - 'r' (rts)
+ */
+int
+uart_set_options(struct uart_port *port, struct console *co,
+ int baud, int parity, int bits, int flow)
+{
+ struct ktermios termios;
+ static struct ktermios dummy;
+ int i;
+
+ /*
+ * Ensure that the serial console lock is initialised
+ * early.
+ */
+ spin_lock_init(&port->lock);
+ lockdep_set_class(&port->lock, &port_lock_key);
+
+ memset(&termios, 0, sizeof(struct ktermios));
+
+ termios.c_cflag = CREAD | HUPCL | CLOCAL;
+
+ /*
+ * Construct a cflag setting.
+ */
+ for (i = 0; baud_rates[i].rate; i++)
+ if (baud_rates[i].rate <= baud)
+ break;
+
+ termios.c_cflag |= baud_rates[i].cflag;
+
+ if (bits == 7)
+ termios.c_cflag |= CS7;
+ else
+ termios.c_cflag |= CS8;
+
+ switch (parity) {
+ case 'o': case 'O':
+ termios.c_cflag |= PARODD;
+ /*fall through*/
+ case 'e': case 'E':
+ termios.c_cflag |= PARENB;
+ break;
+ }
+
+ if (flow == 'r')
+ termios.c_cflag |= CRTSCTS;
+
+ /*
+ * some uarts on other side don't support no flow control.
+ * So we set * DTR in host uart to make them happy
+ */
+ port->mctrl |= TIOCM_DTR;
+
+ port->ops->set_termios(port, &termios, &dummy);
+ /*
+ * Allow the setting of the UART parameters with a NULL console
+ * too:
+ */
+ if (co)
+ co->cflag = termios.c_cflag;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uart_set_options);
+#endif /* CONFIG_SERIAL_CORE_CONSOLE */
+
+static void uart_change_pm(struct uart_state *state, int pm_state)
+{
+ struct uart_port *port = state->uart_port;
+
+ if (state->pm_state != pm_state) {
+ if (port->ops->pm)
+ port->ops->pm(port, pm_state, state->pm_state);
+ state->pm_state = pm_state;
+ }
+}
+
+struct uart_match {
+ struct uart_port *port;
+ struct uart_driver *driver;
+};
+
+static int serial_match_port(struct device *dev, void *data)
+{
+ struct uart_match *match = data;
+ struct tty_driver *tty_drv = match->driver->tty_driver;
+ dev_t devt = MKDEV(tty_drv->major, tty_drv->minor_start) +
+ match->port->line;
+
+ return dev->devt == devt; /* Actually, only one tty per port */
+}
+
+int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
+{
+ struct uart_state *state = drv->state + uport->line;
+ struct tty_port *port = &state->port;
+ struct device *tty_dev;
+ struct uart_match match = {uport, drv};
+
+ mutex_lock(&port->mutex);
+
+ tty_dev = device_find_child(uport->dev, &match, serial_match_port);
+ if (device_may_wakeup(tty_dev)) {
+ if (!enable_irq_wake(uport->irq))
+ uport->irq_wake = 1;
+ put_device(tty_dev);
+ mutex_unlock(&port->mutex);
+ return 0;
+ }
+ if (console_suspend_enabled || !uart_console(uport))
+ uport->suspended = 1;
+
+ if (port->flags & ASYNC_INITIALIZED) {
+ const struct uart_ops *ops = uport->ops;
+ int tries;
+
+ if (console_suspend_enabled || !uart_console(uport)) {
+ set_bit(ASYNCB_SUSPENDED, &port->flags);
+ clear_bit(ASYNCB_INITIALIZED, &port->flags);
+
+ spin_lock_irq(&uport->lock);
+ ops->stop_tx(uport);
+ ops->set_mctrl(uport, 0);
+ ops->stop_rx(uport);
+ spin_unlock_irq(&uport->lock);
+ }
+
+ /*
+ * Wait for the transmitter to empty.
+ */
+ for (tries = 3; !ops->tx_empty(uport) && tries; tries--)
+ msleep(10);
+ if (!tries)
+ printk(KERN_ERR "%s%s%s%d: Unable to drain "
+ "transmitter\n",
+ uport->dev ? dev_name(uport->dev) : "",
+ uport->dev ? ": " : "",
+ drv->dev_name,
+ drv->tty_driver->name_base + uport->line);
+
+ if (console_suspend_enabled || !uart_console(uport))
+ ops->shutdown(uport);
+ }
+
+ /*
+ * Disable the console device before suspending.
+ */
+ if (console_suspend_enabled && uart_console(uport))
+ console_stop(uport->cons);
+
+ if (console_suspend_enabled || !uart_console(uport))
+ uart_change_pm(state, 3);
+
+ mutex_unlock(&port->mutex);
+
+ return 0;
+}
+
+int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
+{
+ struct uart_state *state = drv->state + uport->line;
+ struct tty_port *port = &state->port;
+ struct device *tty_dev;
+ struct uart_match match = {uport, drv};
+ struct ktermios termios;
+
+ mutex_lock(&port->mutex);
+
+ tty_dev = device_find_child(uport->dev, &match, serial_match_port);
+ if (!uport->suspended && device_may_wakeup(tty_dev)) {
+ if (uport->irq_wake) {
+ disable_irq_wake(uport->irq);
+ uport->irq_wake = 0;
+ }
+ mutex_unlock(&port->mutex);
+ return 0;
+ }
+ uport->suspended = 0;
+
+ /*
+ * Re-enable the console device after suspending.
+ */
+ if (uart_console(uport)) {
+ /*
+ * First try to use the console cflag setting.
+ */
+ memset(&termios, 0, sizeof(struct ktermios));
+ termios.c_cflag = uport->cons->cflag;
+
+ /*
+ * If that's unset, use the tty termios setting.
+ */
+ if (port->tty && port->tty->termios && termios.c_cflag == 0)
+ termios = *(port->tty->termios);
+
+ if (console_suspend_enabled)
+ uart_change_pm(state, 0);
+ uport->ops->set_termios(uport, &termios, NULL);
+ if (console_suspend_enabled)
+ console_start(uport->cons);
+ }
+
+ if (port->flags & ASYNC_SUSPENDED) {
+ const struct uart_ops *ops = uport->ops;
+ int ret;
+
+ uart_change_pm(state, 0);
+ spin_lock_irq(&uport->lock);
+ ops->set_mctrl(uport, 0);
+ spin_unlock_irq(&uport->lock);
+ if (console_suspend_enabled || !uart_console(uport)) {
+ /* Protected by port mutex for now */
+ struct tty_struct *tty = port->tty;
+ ret = ops->startup(uport);
+ if (ret == 0) {
+ if (tty)
+ uart_change_speed(tty, state, NULL);
+ spin_lock_irq(&uport->lock);
+ ops->set_mctrl(uport, uport->mctrl);
+ ops->start_tx(uport);
+ spin_unlock_irq(&uport->lock);
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+ } else {
+ /*
+ * Failed to resume - maybe hardware went away?
+ * Clear the "initialized" flag so we won't try
+ * to call the low level drivers shutdown method.
+ */
+ uart_shutdown(tty, state);
+ }
+ }
+
+ clear_bit(ASYNCB_SUSPENDED, &port->flags);
+ }
+
+ mutex_unlock(&port->mutex);
+
+ return 0;
+}
+
+static inline void
+uart_report_port(struct uart_driver *drv, struct uart_port *port)
+{
+ char address[64];
+
+ switch (port->iotype) {
+ case UPIO_PORT:
+ snprintf(address, sizeof(address), "I/O 0x%lx", port->iobase);
+ break;
+ case UPIO_HUB6:
+ snprintf(address, sizeof(address),
+ "I/O 0x%lx offset 0x%x", port->iobase, port->hub6);
+ break;
+ case UPIO_MEM:
+ case UPIO_MEM32:
+ case UPIO_AU:
+ case UPIO_TSI:
+ case UPIO_DWAPB:
+ case UPIO_DWAPB32:
+ snprintf(address, sizeof(address),
+ "MMIO 0x%llx", (unsigned long long)port->mapbase);
+ break;
+ default:
+ strlcpy(address, "*unknown*", sizeof(address));
+ break;
+ }
+
+ printk(KERN_INFO "%s%s%s%d at %s (irq = %d) is a %s\n",
+ port->dev ? dev_name(port->dev) : "",
+ port->dev ? ": " : "",
+ drv->dev_name,
+ drv->tty_driver->name_base + port->line,
+ address, port->irq, uart_type(port));
+}
+
+static void
+uart_configure_port(struct uart_driver *drv, struct uart_state *state,
+ struct uart_port *port)
+{
+ unsigned int flags;
+
+ /*
+ * If there isn't a port here, don't do anything further.
+ */
+ if (!port->iobase && !port->mapbase && !port->membase)
+ return;
+
+ /*
+ * Now do the auto configuration stuff. Note that config_port
+ * is expected to claim the resources and map the port for us.
+ */
+ flags = 0;
+ if (port->flags & UPF_AUTO_IRQ)
+ flags |= UART_CONFIG_IRQ;
+ if (port->flags & UPF_BOOT_AUTOCONF) {
+ if (!(port->flags & UPF_FIXED_TYPE)) {
+ port->type = PORT_UNKNOWN;
+ flags |= UART_CONFIG_TYPE;
+ }
+ port->ops->config_port(port, flags);
+ }
+
+ if (port->type != PORT_UNKNOWN) {
+ unsigned long flags;
+
+ uart_report_port(drv, port);
+
+ /* Power up port for set_mctrl() */
+ uart_change_pm(state, 0);
+
+ /*
+ * Ensure that the modem control lines are de-activated.
+ * keep the DTR setting that is set in uart_set_options()
+ * We probably don't need a spinlock around this, but
+ */
+ spin_lock_irqsave(&port->lock, flags);
+ port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /*
+ * If this driver supports console, and it hasn't been
+ * successfully registered yet, try to re-register it.
+ * It may be that the port was not available.
+ */
+ if (port->cons && !(port->cons->flags & CON_ENABLED))
+ register_console(port->cons);
+
+ /*
+ * Power down all ports by default, except the
+ * console if we have one.
+ */
+ if (!uart_console(port))
+ uart_change_pm(state, 3);
+ }
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+
+static int uart_poll_init(struct tty_driver *driver, int line, char *options)
+{
+ struct uart_driver *drv = driver->driver_state;
+ struct uart_state *state = drv->state + line;
+ struct uart_port *port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (!state || !state->uart_port)
+ return -1;
+
+ port = state->uart_port;
+ if (!(port->ops->poll_get_char && port->ops->poll_put_char))
+ return -1;
+
+ if (options) {
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ return uart_set_options(port, NULL, baud, parity, bits, flow);
+ }
+
+ return 0;
+}
+
+static int uart_poll_get_char(struct tty_driver *driver, int line)
+{
+ struct uart_driver *drv = driver->driver_state;
+ struct uart_state *state = drv->state + line;
+ struct uart_port *port;
+
+ if (!state || !state->uart_port)
+ return -1;
+
+ port = state->uart_port;
+ return port->ops->poll_get_char(port);
+}
+
+static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
+{
+ struct uart_driver *drv = driver->driver_state;
+ struct uart_state *state = drv->state + line;
+ struct uart_port *port;
+
+ if (!state || !state->uart_port)
+ return;
+
+ port = state->uart_port;
+ port->ops->poll_put_char(port, ch);
+}
+#endif
+
+static const struct tty_operations uart_ops = {
+ .open = uart_open,
+ .close = uart_close,
+ .write = uart_write,
+ .put_char = uart_put_char,
+ .flush_chars = uart_flush_chars,
+ .write_room = uart_write_room,
+ .chars_in_buffer= uart_chars_in_buffer,
+ .flush_buffer = uart_flush_buffer,
+ .ioctl = uart_ioctl,
+ .throttle = uart_throttle,
+ .unthrottle = uart_unthrottle,
+ .send_xchar = uart_send_xchar,
+ .set_termios = uart_set_termios,
+ .set_ldisc = uart_set_ldisc,
+ .stop = uart_stop,
+ .start = uart_start,
+ .hangup = uart_hangup,
+ .break_ctl = uart_break_ctl,
+ .wait_until_sent= uart_wait_until_sent,
+#ifdef CONFIG_PROC_FS
+ .proc_fops = &uart_proc_fops,
+#endif
+ .tiocmget = uart_tiocmget,
+ .tiocmset = uart_tiocmset,
+ .get_icount = uart_get_icount,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_init = uart_poll_init,
+ .poll_get_char = uart_poll_get_char,
+ .poll_put_char = uart_poll_put_char,
+#endif
+};
+
+static const struct tty_port_operations uart_port_ops = {
+ .carrier_raised = uart_carrier_raised,
+ .dtr_rts = uart_dtr_rts,
+};
+
+/**
+ * uart_register_driver - register a driver with the uart core layer
+ * @drv: low level driver structure
+ *
+ * Register a uart driver with the core driver. We in turn register
+ * with the tty layer, and initialise the core driver per-port state.
+ *
+ * We have a proc file in /proc/tty/driver which is named after the
+ * normal driver.
+ *
+ * drv->port should be NULL, and the per-port structures should be
+ * registered using uart_add_one_port after this call has succeeded.
+ */
+int uart_register_driver(struct uart_driver *drv)
+{
+ struct tty_driver *normal;
+ int i, retval;
+
+ BUG_ON(drv->state);
+
+ /*
+ * Maybe we should be using a slab cache for this, especially if
+ * we have a large number of ports to handle.
+ */
+ drv->state = kzalloc(sizeof(struct uart_state) * drv->nr, GFP_KERNEL);
+ if (!drv->state)
+ goto out;
+
+ normal = alloc_tty_driver(drv->nr);
+ if (!normal)
+ goto out_kfree;
+
+ drv->tty_driver = normal;
+
+ normal->owner = drv->owner;
+ normal->driver_name = drv->driver_name;
+ normal->name = drv->dev_name;
+ normal->major = drv->major;
+ normal->minor_start = drv->minor;
+ normal->type = TTY_DRIVER_TYPE_SERIAL;
+ normal->subtype = SERIAL_TYPE_NORMAL;
+ normal->init_termios = tty_std_termios;
+ normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ normal->init_termios.c_ispeed = normal->init_termios.c_ospeed = 9600;
+ normal->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ normal->driver_state = drv;
+ tty_set_operations(normal, &uart_ops);
+
+ /*
+ * Initialise the UART state(s).
+ */
+ for (i = 0; i < drv->nr; i++) {
+ struct uart_state *state = drv->state + i;
+ struct tty_port *port = &state->port;
+
+ tty_port_init(port);
+ port->ops = &uart_port_ops;
+ port->close_delay = 500; /* .5 seconds */
+ port->closing_wait = 30000; /* 30 seconds */
+ tasklet_init(&state->tlet, uart_tasklet_action,
+ (unsigned long)state);
+ }
+
+ retval = tty_register_driver(normal);
+ if (retval >= 0)
+ return retval;
+
+ put_tty_driver(normal);
+out_kfree:
+ kfree(drv->state);
+out:
+ return -ENOMEM;
+}
+
+/**
+ * uart_unregister_driver - remove a driver from the uart core layer
+ * @drv: low level driver structure
+ *
+ * Remove all references to a driver from the core driver. The low
+ * level driver must have removed all its ports via the
+ * uart_remove_one_port() if it registered them with uart_add_one_port().
+ * (ie, drv->port == NULL)
+ */
+void uart_unregister_driver(struct uart_driver *drv)
+{
+ struct tty_driver *p = drv->tty_driver;
+ tty_unregister_driver(p);
+ put_tty_driver(p);
+ kfree(drv->state);
+ drv->state = NULL;
+ drv->tty_driver = NULL;
+}
+
+struct tty_driver *uart_console_device(struct console *co, int *index)
+{
+ struct uart_driver *p = co->data;
+ *index = co->index;
+ return p->tty_driver;
+}
+
+/**
+ * uart_add_one_port - attach a driver-defined port structure
+ * @drv: pointer to the uart low level driver structure for this port
+ * @uport: uart port structure to use for this port.
+ *
+ * This allows the driver to register its own uart_port structure
+ * with the core driver. The main purpose is to allow the low
+ * level uart drivers to expand uart_port, rather than having yet
+ * more levels of structures.
+ */
+int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
+{
+ struct uart_state *state;
+ struct tty_port *port;
+ int ret = 0;
+ struct device *tty_dev;
+
+ BUG_ON(in_interrupt());
+
+ if (uport->line >= drv->nr)
+ return -EINVAL;
+
+ state = drv->state + uport->line;
+ port = &state->port;
+
+ mutex_lock(&port_mutex);
+ mutex_lock(&port->mutex);
+ if (state->uart_port) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ state->uart_port = uport;
+ state->pm_state = -1;
+
+ uport->cons = drv->cons;
+ uport->state = state;
+
+ /*
+ * If this port is a console, then the spinlock is already
+ * initialised.
+ */
+ if (!(uart_console(uport) && (uport->cons->flags & CON_ENABLED))) {
+ spin_lock_init(&uport->lock);
+ lockdep_set_class(&uport->lock, &port_lock_key);
+ }
+
+ uart_configure_port(drv, state, uport);
+
+ /*
+ * Register the port whether it's detected or not. This allows
+ * setserial to be used to alter this ports parameters.
+ */
+ tty_dev = tty_register_device(drv->tty_driver, uport->line, uport->dev);
+ if (likely(!IS_ERR(tty_dev))) {
+ device_set_wakeup_capable(tty_dev, 1);
+ } else {
+ printk(KERN_ERR "Cannot register tty device on line %d\n",
+ uport->line);
+ }
+
+ /*
+ * Ensure UPF_DEAD is not set.
+ */
+ uport->flags &= ~UPF_DEAD;
+
+ out:
+ mutex_unlock(&port->mutex);
+ mutex_unlock(&port_mutex);
+
+ return ret;
+}
+
+/**
+ * uart_remove_one_port - detach a driver defined port structure
+ * @drv: pointer to the uart low level driver structure for this port
+ * @uport: uart port structure for this port
+ *
+ * This unhooks (and hangs up) the specified port structure from the
+ * core driver. No further calls will be made to the low-level code
+ * for this port.
+ */
+int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
+{
+ struct uart_state *state = drv->state + uport->line;
+ struct tty_port *port = &state->port;
+
+ BUG_ON(in_interrupt());
+
+ if (state->uart_port != uport)
+ printk(KERN_ALERT "Removing wrong port: %p != %p\n",
+ state->uart_port, uport);
+
+ mutex_lock(&port_mutex);
+
+ /*
+ * Mark the port "dead" - this prevents any opens from
+ * succeeding while we shut down the port.
+ */
+ mutex_lock(&port->mutex);
+ uport->flags |= UPF_DEAD;
+ mutex_unlock(&port->mutex);
+
+ /*
+ * Remove the devices from the tty layer
+ */
+ tty_unregister_device(drv->tty_driver, uport->line);
+
+ if (port->tty)
+ tty_vhangup(port->tty);
+
+ /*
+ * Free the port IO and memory resources, if any.
+ */
+ if (uport->type != PORT_UNKNOWN)
+ uport->ops->release_port(uport);
+
+ /*
+ * Indicate that there isn't a port here anymore.
+ */
+ uport->type = PORT_UNKNOWN;
+
+ /*
+ * Kill the tasklet, and free resources.
+ */
+ tasklet_kill(&state->tlet);
+
+ state->uart_port = NULL;
+ mutex_unlock(&port_mutex);
+
+ return 0;
+}
+
+/*
+ * Are the two ports equivalent?
+ */
+int uart_match_port(struct uart_port *port1, struct uart_port *port2)
+{
+ if (port1->iotype != port2->iotype)
+ return 0;
+
+ switch (port1->iotype) {
+ case UPIO_PORT:
+ return (port1->iobase == port2->iobase);
+ case UPIO_HUB6:
+ return (port1->iobase == port2->iobase) &&
+ (port1->hub6 == port2->hub6);
+ case UPIO_MEM:
+ case UPIO_MEM32:
+ case UPIO_AU:
+ case UPIO_TSI:
+ case UPIO_DWAPB:
+ case UPIO_DWAPB32:
+ return (port1->mapbase == port2->mapbase);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(uart_match_port);
+
+EXPORT_SYMBOL(uart_write_wakeup);
+EXPORT_SYMBOL(uart_register_driver);
+EXPORT_SYMBOL(uart_unregister_driver);
+EXPORT_SYMBOL(uart_suspend_port);
+EXPORT_SYMBOL(uart_resume_port);
+EXPORT_SYMBOL(uart_add_one_port);
+EXPORT_SYMBOL(uart_remove_one_port);
+
+MODULE_DESCRIPTION("Serial driver core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
new file mode 100644
index 00000000..eef736ff
--- /dev/null
+++ b/drivers/tty/serial/serial_cs.c
@@ -0,0 +1,870 @@
+/*======================================================================
+
+ A driver for PCMCIA serial devices
+
+ serial_cs.c 1.134 2002/05/04 05:48:53
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in which
+ case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/serial_core.h>
+#include <linux/delay.h>
+#include <linux/major.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+
+#include "8250.h"
+
+
+/*====================================================================*/
+
+/* Parameters that can be set with 'insmod' */
+
+/* Enable the speaker? */
+static int do_sound = 1;
+/* Skip strict UART tests? */
+static int buggy_uart;
+
+module_param(do_sound, int, 0444);
+module_param(buggy_uart, int, 0444);
+
+/*====================================================================*/
+
+/* Table of multi-port card ID's */
+
+struct serial_quirk {
+ unsigned int manfid;
+ unsigned int prodid;
+ int multi; /* 1 = multifunction, > 1 = # ports */
+ void (*config)(struct pcmcia_device *);
+ void (*setup)(struct pcmcia_device *, struct uart_port *);
+ void (*wakeup)(struct pcmcia_device *);
+ int (*post)(struct pcmcia_device *);
+};
+
+struct serial_info {
+ struct pcmcia_device *p_dev;
+ int ndev;
+ int multi;
+ int slave;
+ int manfid;
+ int prodid;
+ int c950ctrl;
+ int line[4];
+ const struct serial_quirk *quirk;
+};
+
+struct serial_cfg_mem {
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[256];
+};
+
+/*
+ * vers_1 5.0, "Brain Boxes", "2-Port RS232 card", "r6"
+ * manfid 0x0160, 0x0104
+ * This card appears to have a 14.7456MHz clock.
+ */
+/* Generic Modem: MD55x (GPRS/EDGE) have
+ * Elan VPU16551 UART with 14.7456MHz oscillator
+ * manfid 0x015D, 0x4C45
+ */
+static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_port *port)
+{
+ port->uartclk = 14745600;
+}
+
+static int quirk_post_ibm(struct pcmcia_device *link)
+{
+ u8 val;
+ int ret;
+
+ ret = pcmcia_read_config_byte(link, 0x800, &val);
+ if (ret)
+ goto failed;
+
+ ret = pcmcia_write_config_byte(link, 0x800, val | 1);
+ if (ret)
+ goto failed;
+ return 0;
+
+ failed:
+ return -ENODEV;
+}
+
+/*
+ * Nokia cards are not really multiport cards. Shouldn't this
+ * be handled by setting the quirk entry .multi = 0 | 1 ?
+ */
+static void quirk_config_nokia(struct pcmcia_device *link)
+{
+ struct serial_info *info = link->priv;
+
+ if (info->multi > 1)
+ info->multi = 1;
+}
+
+static void quirk_wakeup_oxsemi(struct pcmcia_device *link)
+{
+ struct serial_info *info = link->priv;
+
+ if (info->c950ctrl)
+ outb(12, info->c950ctrl + 1);
+}
+
+/* request_region? oxsemi branch does no request_region too... */
+/*
+ * This sequence is needed to properly initialize MC45 attached to OXCF950.
+ * I tried decreasing these msleep()s, but it worked properly (survived
+ * 1000 stop/start operations) with these timeouts (or bigger).
+ */
+static void quirk_wakeup_possio_gcc(struct pcmcia_device *link)
+{
+ struct serial_info *info = link->priv;
+ unsigned int ctrl = info->c950ctrl;
+
+ outb(0xA, ctrl + 1);
+ msleep(100);
+ outb(0xE, ctrl + 1);
+ msleep(300);
+ outb(0xC, ctrl + 1);
+ msleep(100);
+ outb(0xE, ctrl + 1);
+ msleep(200);
+ outb(0xF, ctrl + 1);
+ msleep(100);
+ outb(0xE, ctrl + 1);
+ msleep(100);
+ outb(0xC, ctrl + 1);
+}
+
+/*
+ * Socket Dual IO: this enables irq's for second port
+ */
+static void quirk_config_socket(struct pcmcia_device *link)
+{
+ struct serial_info *info = link->priv;
+
+ if (info->multi)
+ link->config_flags |= CONF_ENABLE_ESR;
+}
+
+static const struct serial_quirk quirks[] = {
+ {
+ .manfid = 0x0160,
+ .prodid = 0x0104,
+ .multi = -1,
+ .setup = quirk_setup_brainboxes_0104,
+ }, {
+ .manfid = 0x015D,
+ .prodid = 0x4C45,
+ .multi = -1,
+ .setup = quirk_setup_brainboxes_0104,
+ }, {
+ .manfid = MANFID_IBM,
+ .prodid = ~0,
+ .multi = -1,
+ .post = quirk_post_ibm,
+ }, {
+ .manfid = MANFID_INTEL,
+ .prodid = PRODID_INTEL_DUAL_RS232,
+ .multi = 2,
+ }, {
+ .manfid = MANFID_NATINST,
+ .prodid = PRODID_NATINST_QUAD_RS232,
+ .multi = 4,
+ }, {
+ .manfid = MANFID_NOKIA,
+ .prodid = ~0,
+ .multi = -1,
+ .config = quirk_config_nokia,
+ }, {
+ .manfid = MANFID_OMEGA,
+ .prodid = PRODID_OMEGA_QSP_100,
+ .multi = 4,
+ }, {
+ .manfid = MANFID_OXSEMI,
+ .prodid = ~0,
+ .multi = -1,
+ .wakeup = quirk_wakeup_oxsemi,
+ }, {
+ .manfid = MANFID_POSSIO,
+ .prodid = PRODID_POSSIO_GCC,
+ .multi = -1,
+ .wakeup = quirk_wakeup_possio_gcc,
+ }, {
+ .manfid = MANFID_QUATECH,
+ .prodid = PRODID_QUATECH_DUAL_RS232,
+ .multi = 2,
+ }, {
+ .manfid = MANFID_QUATECH,
+ .prodid = PRODID_QUATECH_DUAL_RS232_D1,
+ .multi = 2,
+ }, {
+ .manfid = MANFID_QUATECH,
+ .prodid = PRODID_QUATECH_DUAL_RS232_G,
+ .multi = 2,
+ }, {
+ .manfid = MANFID_QUATECH,
+ .prodid = PRODID_QUATECH_QUAD_RS232,
+ .multi = 4,
+ }, {
+ .manfid = MANFID_SOCKET,
+ .prodid = PRODID_SOCKET_DUAL_RS232,
+ .multi = 2,
+ .config = quirk_config_socket,
+ }, {
+ .manfid = MANFID_SOCKET,
+ .prodid = ~0,
+ .multi = -1,
+ .config = quirk_config_socket,
+ }
+};
+
+
+static int serial_config(struct pcmcia_device * link);
+
+
+static void serial_remove(struct pcmcia_device *link)
+{
+ struct serial_info *info = link->priv;
+ int i;
+
+ dev_dbg(&link->dev, "serial_release\n");
+
+ /*
+ * Recheck to see if the device is still configured.
+ */
+ for (i = 0; i < info->ndev; i++)
+ serial8250_unregister_port(info->line[i]);
+
+ if (!info->slave)
+ pcmcia_disable_device(link);
+}
+
+static int serial_suspend(struct pcmcia_device *link)
+{
+ struct serial_info *info = link->priv;
+ int i;
+
+ for (i = 0; i < info->ndev; i++)
+ serial8250_suspend_port(info->line[i]);
+
+ return 0;
+}
+
+static int serial_resume(struct pcmcia_device *link)
+{
+ struct serial_info *info = link->priv;
+ int i;
+
+ for (i = 0; i < info->ndev; i++)
+ serial8250_resume_port(info->line[i]);
+
+ if (info->quirk && info->quirk->wakeup)
+ info->quirk->wakeup(link);
+
+ return 0;
+}
+
+static int serial_probe(struct pcmcia_device *link)
+{
+ struct serial_info *info;
+
+ dev_dbg(&link->dev, "serial_attach()\n");
+
+ /* Create new serial device */
+ info = kzalloc(sizeof (*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ info->p_dev = link;
+ link->priv = info;
+
+ link->config_flags |= CONF_ENABLE_IRQ;
+ if (do_sound)
+ link->config_flags |= CONF_ENABLE_SPKR;
+
+ return serial_config(link);
+}
+
+static void serial_detach(struct pcmcia_device *link)
+{
+ struct serial_info *info = link->priv;
+
+ dev_dbg(&link->dev, "serial_detach\n");
+
+ /*
+ * Ensure that the ports have been released.
+ */
+ serial_remove(link);
+
+ /* free bits */
+ kfree(info);
+}
+
+/*====================================================================*/
+
+static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
+ unsigned int iobase, int irq)
+{
+ struct uart_port port;
+ int line;
+
+ memset(&port, 0, sizeof (struct uart_port));
+ port.iobase = iobase;
+ port.irq = irq;
+ port.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ;
+ port.uartclk = 1843200;
+ port.dev = &handle->dev;
+ if (buggy_uart)
+ port.flags |= UPF_BUGGY_UART;
+
+ if (info->quirk && info->quirk->setup)
+ info->quirk->setup(handle, &port);
+
+ line = serial8250_register_port(&port);
+ if (line < 0) {
+ printk(KERN_NOTICE "serial_cs: serial8250_register_port() at "
+ "0x%04lx, irq %d failed\n", (u_long)iobase, irq);
+ return -EINVAL;
+ }
+
+ info->line[info->ndev] = line;
+ info->ndev++;
+
+ return 0;
+}
+
+/*====================================================================*/
+
+static int pfc_config(struct pcmcia_device *p_dev)
+{
+ unsigned int port = 0;
+ struct serial_info *info = p_dev->priv;
+
+ if ((p_dev->resource[1]->end != 0) &&
+ (resource_size(p_dev->resource[1]) == 8)) {
+ port = p_dev->resource[1]->start;
+ info->slave = 1;
+ } else if ((info->manfid == MANFID_OSITECH) &&
+ (resource_size(p_dev->resource[0]) == 0x40)) {
+ port = p_dev->resource[0]->start + 0x28;
+ info->slave = 1;
+ }
+ if (info->slave)
+ return setup_serial(p_dev, info, port, p_dev->irq);
+
+ dev_warn(&p_dev->dev, "no usable port range found, giving up\n");
+ return -ENODEV;
+}
+
+static int simple_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ static const int size_table[2] = { 8, 16 };
+ int *try = priv_data;
+
+ if (p_dev->resource[0]->start == 0)
+ return -ENODEV;
+
+ if ((*try & 0x1) == 0)
+ p_dev->io_lines = 16;
+
+ if (p_dev->resource[0]->end != size_table[(*try >> 1)])
+ return -ENODEV;
+
+ p_dev->resource[0]->end = 8;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+
+ return pcmcia_request_io(p_dev);
+}
+
+static int simple_config_check_notpicky(struct pcmcia_device *p_dev,
+ void *priv_data)
+{
+ static const unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
+ int j;
+
+ if (p_dev->io_lines > 3)
+ return -ENODEV;
+
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->resource[0]->end = 8;
+
+ for (j = 0; j < 5; j++) {
+ p_dev->resource[0]->start = base[j];
+ p_dev->io_lines = base[j] ? 16 : 3;
+ if (!pcmcia_request_io(p_dev))
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int simple_config(struct pcmcia_device *link)
+{
+ struct serial_info *info = link->priv;
+ int i = -ENODEV, try;
+
+ /* First pass: look for a config entry that looks normal.
+ * Two tries: without IO aliases, then with aliases */
+ link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_SET_IO;
+ for (try = 0; try < 4; try++)
+ if (!pcmcia_loop_config(link, simple_config_check, &try))
+ goto found_port;
+
+ /* Second pass: try to find an entry that isn't picky about
+ its base address, then try to grab any standard serial port
+ address, and finally try to get any free port. */
+ if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL))
+ goto found_port;
+
+ dev_warn(&link->dev, "no usable port range found, giving up\n");
+ return -1;
+
+found_port:
+ if (info->multi && (info->manfid == MANFID_3COM))
+ link->config_index &= ~(0x08);
+
+ /*
+ * Apply any configuration quirks.
+ */
+ if (info->quirk && info->quirk->config)
+ info->quirk->config(link);
+
+ i = pcmcia_enable_device(link);
+ if (i != 0)
+ return -1;
+ return setup_serial(link, info, link->resource[0]->start, link->irq);
+}
+
+static int multi_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ int *multi = priv_data;
+
+ if (p_dev->resource[1]->end)
+ return -EINVAL;
+
+ /* The quad port cards have bad CIS's, so just look for a
+ window larger than 8 ports and assume it will be right */
+ if (p_dev->resource[0]->end <= 8)
+ return -EINVAL;
+
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->resource[0]->end = *multi * 8;
+
+ if (pcmcia_request_io(p_dev))
+ return -ENODEV;
+ return 0;
+}
+
+static int multi_config_check_notpicky(struct pcmcia_device *p_dev,
+ void *priv_data)
+{
+ int *base2 = priv_data;
+
+ if (!p_dev->resource[0]->end || !p_dev->resource[1]->end)
+ return -ENODEV;
+
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 8;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+
+ if (pcmcia_request_io(p_dev))
+ return -ENODEV;
+
+ *base2 = p_dev->resource[0]->start + 8;
+ return 0;
+}
+
+static int multi_config(struct pcmcia_device *link)
+{
+ struct serial_info *info = link->priv;
+ int i, base2 = 0;
+
+ link->config_flags |= CONF_AUTO_SET_IO;
+ /* First, look for a generic full-sized window */
+ if (!pcmcia_loop_config(link, multi_config_check, &info->multi))
+ base2 = link->resource[0]->start + 8;
+ else {
+ /* If that didn't work, look for two windows */
+ info->multi = 2;
+ if (pcmcia_loop_config(link, multi_config_check_notpicky,
+ &base2)) {
+ dev_warn(&link->dev, "no usable port range "
+ "found, giving up\n");
+ return -ENODEV;
+ }
+ }
+
+ if (!link->irq)
+ dev_warn(&link->dev, "no usable IRQ found, continuing...\n");
+
+ /*
+ * Apply any configuration quirks.
+ */
+ if (info->quirk && info->quirk->config)
+ info->quirk->config(link);
+
+ i = pcmcia_enable_device(link);
+ if (i != 0)
+ return -ENODEV;
+
+ /* The Oxford Semiconductor OXCF950 cards are in fact single-port:
+ * 8 registers are for the UART, the others are extra registers.
+ * Siemen's MC45 PCMCIA (Possio's GCC) is OXCF950 based too.
+ */
+ if (info->manfid == MANFID_OXSEMI || (info->manfid == MANFID_POSSIO &&
+ info->prodid == PRODID_POSSIO_GCC)) {
+ int err;
+
+ if (link->config_index == 1 ||
+ link->config_index == 3) {
+ err = setup_serial(link, info, base2,
+ link->irq);
+ base2 = link->resource[0]->start;
+ } else {
+ err = setup_serial(link, info, link->resource[0]->start,
+ link->irq);
+ }
+ info->c950ctrl = base2;
+
+ /*
+ * FIXME: We really should wake up the port prior to
+ * handing it over to the serial layer.
+ */
+ if (info->quirk && info->quirk->wakeup)
+ info->quirk->wakeup(link);
+
+ return 0;
+ }
+
+ setup_serial(link, info, link->resource[0]->start, link->irq);
+ for (i = 0; i < info->multi - 1; i++)
+ setup_serial(link, info, base2 + (8 * i),
+ link->irq);
+ return 0;
+}
+
+static int serial_check_for_multi(struct pcmcia_device *p_dev, void *priv_data)
+{
+ struct serial_info *info = p_dev->priv;
+
+ if (!p_dev->resource[0]->end)
+ return -EINVAL;
+
+ if ((!p_dev->resource[1]->end) && (p_dev->resource[0]->end % 8 == 0))
+ info->multi = p_dev->resource[0]->end >> 3;
+
+ if ((p_dev->resource[1]->end) && (p_dev->resource[0]->end == 8)
+ && (p_dev->resource[1]->end == 8))
+ info->multi = 2;
+
+ return 0; /* break */
+}
+
+
+static int serial_config(struct pcmcia_device * link)
+{
+ struct serial_info *info = link->priv;
+ int i;
+
+ dev_dbg(&link->dev, "serial_config\n");
+
+ /* Is this a compliant multifunction card? */
+ info->multi = (link->socket->functions > 1);
+
+ /* Is this a multiport card? */
+ info->manfid = link->manf_id;
+ info->prodid = link->card_id;
+
+ for (i = 0; i < ARRAY_SIZE(quirks); i++)
+ if ((quirks[i].manfid == ~0 ||
+ quirks[i].manfid == info->manfid) &&
+ (quirks[i].prodid == ~0 ||
+ quirks[i].prodid == info->prodid)) {
+ info->quirk = &quirks[i];
+ break;
+ }
+
+ /* Another check for dual-serial cards: look for either serial or
+ multifunction cards that ask for appropriate IO port ranges */
+ if ((info->multi == 0) &&
+ (link->has_func_id) &&
+ (link->socket->pcmcia_pfc == 0) &&
+ ((link->func_id == CISTPL_FUNCID_MULTI) ||
+ (link->func_id == CISTPL_FUNCID_SERIAL)))
+ pcmcia_loop_config(link, serial_check_for_multi, info);
+
+ /*
+ * Apply any multi-port quirk.
+ */
+ if (info->quirk && info->quirk->multi != -1)
+ info->multi = info->quirk->multi;
+
+ dev_info(&link->dev,
+ "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n",
+ link->manf_id, link->card_id,
+ link->socket->pcmcia_pfc, info->multi, info->quirk);
+ if (link->socket->pcmcia_pfc)
+ i = pfc_config(link);
+ else if (info->multi > 1)
+ i = multi_config(link);
+ else
+ i = simple_config(link);
+
+ if (i || info->ndev == 0)
+ goto failed;
+
+ /*
+ * Apply any post-init quirk. FIXME: This should really happen
+ * before we register the port, since it might already be in use.
+ */
+ if (info->quirk && info->quirk->post)
+ if (info->quirk->post(link))
+ goto failed;
+
+ return 0;
+
+failed:
+ dev_warn(&link->dev, "failed to initialize\n");
+ serial_remove(link);
+ return -ENODEV;
+}
+
+static const struct pcmcia_device_id serial_ids[] = {
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0057, 0x0021),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0089, 0x110a),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0104, 0x000a),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0105, 0x0d0a),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0105, 0x0e0a),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0105, 0xea15),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0109, 0x0501),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0138, 0x110a),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0140, 0x000a),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0143, 0x3341),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0143, 0xc0ab),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x016c, 0x0081),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x021b, 0x0101),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x08a1, 0xc0ab),
+ PCMCIA_PFC_DEVICE_PROD_ID123(1, "MEGAHERTZ", "CC/XJEM3288", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x04cd2988, 0x46a52d63),
+ PCMCIA_PFC_DEVICE_PROD_ID123(1, "MEGAHERTZ", "CC/XJEM3336", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x0143b773, 0x46a52d63),
+ PCMCIA_PFC_DEVICE_PROD_ID123(1, "MEGAHERTZ", "EM1144T", "PCMCIA MODEM", 0xf510db04, 0x856d66c8, 0xbd6c43ef),
+ PCMCIA_PFC_DEVICE_PROD_ID123(1, "MEGAHERTZ", "XJEM1144/CCEM1144", "PCMCIA MODEM", 0xf510db04, 0x52d21e1e, 0xbd6c43ef),
+ PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "CEM28", 0x2e3ee845, 0x0ea978ea),
+ PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "CEM33", 0x2e3ee845, 0x80609023),
+ PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "CEM56", 0x2e3ee845, 0xa650c32a),
+ PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29),
+ PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "MEGAHERTZ", "XJEM1144/CCEM1144", 0xf510db04, 0x52d21e1e),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Diamonds Modem+Ethernet", 0xc2f80cd, 0x656947b9),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Hearts Modem+Ethernet", 0xc2f80cd, 0xdc9ba5ed),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
+ PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
+ PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
+ PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0104, 0x0070),
+ PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x016c, 0x0020),
+ PCMCIA_MFC_DEVICE_PROD_ID123(1, "APEX DATA", "MULTICARD", "ETHERNET-MODEM", 0x11c2da09, 0x7289dc5d, 0xaad95e1f),
+ PCMCIA_MFC_DEVICE_PROD_ID12(1, "IBM", "Home and Away 28.8 PC Card ", 0xb569a6e5, 0x5bd4ff2c),
+ PCMCIA_MFC_DEVICE_PROD_ID12(1, "IBM", "Home and Away Credit Card Adapter", 0xb569a6e5, 0x4bdf15c3),
+ PCMCIA_MFC_DEVICE_PROD_ID12(1, "IBM", "w95 Home and Away Credit Card ", 0xb569a6e5, 0xae911c15),
+ PCMCIA_MFC_DEVICE_PROD_ID1(1, "Motorola MARQUIS", 0xf03e4e77),
+ PCMCIA_MFC_DEVICE_PROD_ID2(1, "FAX/Modem/Ethernet Combo Card ", 0x1ed59302),
+ PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0301),
+ PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x0276),
+ PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0039),
+ PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0006),
+ PCMCIA_DEVICE_MANF_CARD(0x0105, 0x0101), /* TDK DF2814 */
+ PCMCIA_DEVICE_MANF_CARD(0x0105, 0x100a), /* Xircom CM-56G */
+ PCMCIA_DEVICE_MANF_CARD(0x0105, 0x3e0a), /* TDK DF5660 */
+ PCMCIA_DEVICE_MANF_CARD(0x0105, 0x410a),
+ PCMCIA_DEVICE_MANF_CARD(0x0107, 0x0002), /* USRobotics 14,400 */
+ PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0d50),
+ PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0d51),
+ PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0d52),
+ PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0d53),
+ PCMCIA_DEVICE_MANF_CARD(0x010b, 0xd180),
+ PCMCIA_DEVICE_MANF_CARD(0x0115, 0x3330), /* USRobotics/SUN 14,400 */
+ PCMCIA_DEVICE_MANF_CARD(0x0124, 0x0100), /* Nokia DTP-2 ver II */
+ PCMCIA_DEVICE_MANF_CARD(0x0134, 0x5600), /* LASAT COMMUNICATIONS A/S */
+ PCMCIA_DEVICE_MANF_CARD(0x0137, 0x000e),
+ PCMCIA_DEVICE_MANF_CARD(0x0137, 0x001b),
+ PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0025),
+ PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0045),
+ PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0052),
+ PCMCIA_DEVICE_MANF_CARD(0x016c, 0x0006), /* Psion 56K+Fax */
+ PCMCIA_DEVICE_MANF_CARD(0x0200, 0x0001), /* MultiMobile */
+ PCMCIA_DEVICE_PROD_ID134("ADV", "TECH", "COMpad-32/85", 0x67459937, 0x916d02ba, 0x8fbe92ae),
+ PCMCIA_DEVICE_PROD_ID124("GATEWAY2000", "CC3144", "PCMCIA MODEM", 0x506bccae, 0xcb3685f1, 0xbd6c43ef),
+ PCMCIA_DEVICE_PROD_ID14("MEGAHERTZ", "PCMCIA MODEM", 0xf510db04, 0xbd6c43ef),
+ PCMCIA_DEVICE_PROD_ID124("TOSHIBA", "T144PF", "PCMCIA MODEM", 0xb4585a1a, 0x7271409c, 0xbd6c43ef),
+ PCMCIA_DEVICE_PROD_ID123("FUJITSU", "FC14F ", "MBH10213", 0x6ee5a3d8, 0x30ead12b, 0xb00f05a0),
+ PCMCIA_DEVICE_PROD_ID123("Novatel Wireless", "Merlin UMTS Modem", "U630", 0x32607776, 0xd9e73b13, 0xe87332e),
+ PCMCIA_DEVICE_PROD_ID13("MEGAHERTZ", "V.34 PCMCIA MODEM", 0xf510db04, 0xbb2cce4a),
+ PCMCIA_DEVICE_PROD_ID12("Brain Boxes", "Bluetooth PC Card", 0xee138382, 0xd4ce9b02),
+ PCMCIA_DEVICE_PROD_ID12("CIRRUS LOGIC", "FAX MODEM", 0xe625f451, 0xcecd6dfa),
+ PCMCIA_DEVICE_PROD_ID12("COMPAQ", "PCMCIA 28800 FAX/DATA MODEM", 0xa3a3062c, 0x8cbd7c76),
+ PCMCIA_DEVICE_PROD_ID12("COMPAQ", "PCMCIA 33600 FAX/DATA MODEM", 0xa3a3062c, 0x5a00ce95),
+ PCMCIA_DEVICE_PROD_ID12("Computerboards, Inc.", "PCM-COM422", 0xd0b78f51, 0x7e2d49ed),
+ PCMCIA_DEVICE_PROD_ID12("Dr. Neuhaus", "FURY CARD 14K4", 0x76942813, 0x8b96ce65),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "ISDN/56K/GSM", 0xb569a6e5, 0xfee5297b),
+ PCMCIA_DEVICE_PROD_ID12("Intelligent", "ANGIA FAX/MODEM", 0xb496e65e, 0xf31602a6),
+ PCMCIA_DEVICE_PROD_ID12("Intel", "MODEM 2400+", 0x816cc815, 0x412729fb),
+ PCMCIA_DEVICE_PROD_ID12("Intertex", "IX34-PCMCIA", 0xf8a097e3, 0x97880447),
+ PCMCIA_DEVICE_PROD_ID12("IOTech Inc ", "PCMCIA Dual RS-232 Serial Port Card", 0x3bd2d898, 0x92abc92f),
+ PCMCIA_DEVICE_PROD_ID12("MACRONIX", "FAX/MODEM", 0x668388b3, 0x3f9bdf2f),
+ PCMCIA_DEVICE_PROD_ID12("Multi-Tech", "MT1432LT", 0x5f73be51, 0x0b3e2383),
+ PCMCIA_DEVICE_PROD_ID12("Multi-Tech", "MT2834LT", 0x5f73be51, 0x4cd7c09e),
+ PCMCIA_DEVICE_PROD_ID12("OEM ", "C288MX ", 0xb572d360, 0xd2385b7a),
+ PCMCIA_DEVICE_PROD_ID12("Option International", "V34bis GSM/PSTN Data/Fax Modem", 0x9d7cd6f5, 0x5cb8bf41),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA ", "C336MX ", 0x99bcafe9, 0xaa25bcab),
+ PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "PCMCIA Dual RS-232 Serial Port Card", 0xc4420b35, 0x92abc92f),
+ PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "Dual RS-232 Serial Port PC Card", 0xc4420b35, 0x031a380d),
+ PCMCIA_DEVICE_PROD_ID12("Telia", "SurfinBird 560P/A+", 0xe2cdd5e, 0xc9314b38),
+ PCMCIA_DEVICE_PROD_ID1("Smart Serial Port", 0x2d8ce292),
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "PCMCIA", "EN2218-LAN/MODEM", 0x281f1c5d, 0x570f348e, "cis/PCMLM28.cis"),
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "PCMCIA", "UE2218-LAN/MODEM", 0x281f1c5d, 0x6fdcacee, "cis/PCMLM28.cis"),
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "TOSHIBA", "Modem/LAN Card", 0xb4585a1a, 0x53f922f8, "cis/PCMLM28.cis"),
+ PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
+ PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */
+ PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC860", 0xd85f6206, 0x698f93db, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC860 3G Network Adapter R1 */
+ PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC710/AC750", 0xd85f6206, 0x761b11e0, "cis/SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
+ PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
+ PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */
+ PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
+ PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"),
+ PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b),
+ PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83),
+ PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232 1.00.",0x19ca78af,0x69fb7490),
+ PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232",0x19ca78af,0xb6bc0235),
+ PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232",0x63f2e0bd,0xb9e175d3),
+ PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232-5",0x63f2e0bd,0xfce33442),
+ PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232",0x3beb8cf2,0x171e7190),
+ PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232-5",0x3beb8cf2,0x20da4262),
+ PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF428",0x3beb8cf2,0xea5dd57d),
+ PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF500",0x3beb8cf2,0xd77255fa),
+ PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: IC232",0x3beb8cf2,0x6a709903),
+ PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: SL232",0x3beb8cf2,0x18430676),
+ PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: XL232",0x3beb8cf2,0x6f933767),
+ PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7),
+ PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41),
+ PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029),
+ PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
+ PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
+ PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7),
+ PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41),
+ PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029),
+ PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
+ PCMCIA_MFC_DEVICE_PROD_ID12(2,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
+ PCMCIA_MFC_DEVICE_PROD_ID12(3,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
+ PCMCIA_DEVICE_MANF_CARD(0x0279, 0x950b),
+ /* too generic */
+ /* PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0160, 0x0002), */
+ /* PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0160, 0x0002), */
+ PCMCIA_DEVICE_FUNC_ID(2),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, serial_ids);
+
+MODULE_FIRMWARE("cis/PCMLM28.cis");
+MODULE_FIRMWARE("cis/DP83903.cis");
+MODULE_FIRMWARE("cis/3CCFEM556.cis");
+MODULE_FIRMWARE("cis/3CXEM556.cis");
+MODULE_FIRMWARE("cis/SW_8xx_SER.cis");
+MODULE_FIRMWARE("cis/SW_7xx_SER.cis");
+MODULE_FIRMWARE("cis/SW_555_SER.cis");
+MODULE_FIRMWARE("cis/MT5634ZLX.cis");
+MODULE_FIRMWARE("cis/COMpad2.cis");
+MODULE_FIRMWARE("cis/COMpad4.cis");
+MODULE_FIRMWARE("cis/RS-COM-2P.cis");
+
+static struct pcmcia_driver serial_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "serial_cs",
+ .probe = serial_probe,
+ .remove = serial_detach,
+ .id_table = serial_ids,
+ .suspend = serial_suspend,
+ .resume = serial_resume,
+};
+
+static int __init init_serial_cs(void)
+{
+ return pcmcia_register_driver(&serial_cs_driver);
+}
+
+static void __exit exit_serial_cs(void)
+{
+ pcmcia_unregister_driver(&serial_cs_driver);
+}
+
+module_init(init_serial_cs);
+module_exit(exit_serial_cs);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
new file mode 100644
index 00000000..2430319f
--- /dev/null
+++ b/drivers/tty/serial/serial_ks8695.c
@@ -0,0 +1,703 @@
+/*
+ * Driver for KS8695 serial ports
+ *
+ * Based on drivers/serial/serial_amba.c, by Kam Lee.
+ *
+ * Copyright 2002-2005 Micrel Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/device.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mach/irq.h>
+
+#include <mach/regs-uart.h>
+#include <mach/regs-irq.h>
+
+#if defined(CONFIG_SERIAL_KS8695_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+
+#define SERIAL_KS8695_MAJOR 204
+#define SERIAL_KS8695_MINOR 16
+#define SERIAL_KS8695_DEVNAME "ttyAM"
+
+#define SERIAL_KS8695_NR 1
+
+/*
+ * Access macros for the KS8695 UART
+ */
+#define UART_GET_CHAR(p) (__raw_readl((p)->membase + KS8695_URRB) & 0xFF)
+#define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + KS8695_URTH)
+#define UART_GET_FCR(p) __raw_readl((p)->membase + KS8695_URFC)
+#define UART_PUT_FCR(p, c) __raw_writel((c), (p)->membase + KS8695_URFC)
+#define UART_GET_MSR(p) __raw_readl((p)->membase + KS8695_URMS)
+#define UART_GET_LSR(p) __raw_readl((p)->membase + KS8695_URLS)
+#define UART_GET_LCR(p) __raw_readl((p)->membase + KS8695_URLC)
+#define UART_PUT_LCR(p, c) __raw_writel((c), (p)->membase + KS8695_URLC)
+#define UART_GET_MCR(p) __raw_readl((p)->membase + KS8695_URMC)
+#define UART_PUT_MCR(p, c) __raw_writel((c), (p)->membase + KS8695_URMC)
+#define UART_GET_BRDR(p) __raw_readl((p)->membase + KS8695_URBD)
+#define UART_PUT_BRDR(p, c) __raw_writel((c), (p)->membase + KS8695_URBD)
+
+#define KS8695_CLR_TX_INT() __raw_writel(1 << KS8695_IRQ_UART_TX, KS8695_IRQ_VA + KS8695_INTST)
+
+#define UART_DUMMY_LSR_RX 0x100
+#define UART_PORT_SIZE (KS8695_USR - KS8695_URRB + 4)
+
+static inline int tx_enabled(struct uart_port *port)
+{
+ return port->unused[0] & 1;
+}
+
+static inline int rx_enabled(struct uart_port *port)
+{
+ return port->unused[0] & 2;
+}
+
+static inline int ms_enabled(struct uart_port *port)
+{
+ return port->unused[0] & 4;
+}
+
+static inline void ms_enable(struct uart_port *port, int enabled)
+{
+ if(enabled)
+ port->unused[0] |= 4;
+ else
+ port->unused[0] &= ~4;
+}
+
+static inline void rx_enable(struct uart_port *port, int enabled)
+{
+ if(enabled)
+ port->unused[0] |= 2;
+ else
+ port->unused[0] &= ~2;
+}
+
+static inline void tx_enable(struct uart_port *port, int enabled)
+{
+ if(enabled)
+ port->unused[0] |= 1;
+ else
+ port->unused[0] &= ~1;
+}
+
+
+#ifdef SUPPORT_SYSRQ
+static struct console ks8695_console;
+#endif
+
+static void ks8695uart_stop_tx(struct uart_port *port)
+{
+ if (tx_enabled(port)) {
+ /* use disable_irq_nosync() and not disable_irq() to avoid self
+ * imposed deadlock by not waiting for irq handler to end,
+ * since this ks8695uart_stop_tx() is called from interrupt context.
+ */
+ disable_irq_nosync(KS8695_IRQ_UART_TX);
+ tx_enable(port, 0);
+ }
+}
+
+static void ks8695uart_start_tx(struct uart_port *port)
+{
+ if (!tx_enabled(port)) {
+ enable_irq(KS8695_IRQ_UART_TX);
+ tx_enable(port, 1);
+ }
+}
+
+static void ks8695uart_stop_rx(struct uart_port *port)
+{
+ if (rx_enabled(port)) {
+ disable_irq(KS8695_IRQ_UART_RX);
+ rx_enable(port, 0);
+ }
+}
+
+static void ks8695uart_enable_ms(struct uart_port *port)
+{
+ if (!ms_enabled(port)) {
+ enable_irq(KS8695_IRQ_UART_MODEM_STATUS);
+ ms_enable(port,1);
+ }
+}
+
+static void ks8695uart_disable_ms(struct uart_port *port)
+{
+ if (ms_enabled(port)) {
+ disable_irq(KS8695_IRQ_UART_MODEM_STATUS);
+ ms_enable(port,0);
+ }
+}
+
+static irqreturn_t ks8695uart_rx_chars(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int status, ch, lsr, flg, max_count = 256;
+
+ status = UART_GET_LSR(port); /* clears pending LSR interrupts */
+ while ((status & URLS_URDR) && max_count--) {
+ ch = UART_GET_CHAR(port);
+ flg = TTY_NORMAL;
+
+ port->icount.rx++;
+
+ /*
+ * Note that the error handling code is
+ * out of the main execution path
+ */
+ lsr = UART_GET_LSR(port) | UART_DUMMY_LSR_RX;
+ if (unlikely(lsr & (URLS_URBI | URLS_URPE | URLS_URFE | URLS_URROE))) {
+ if (lsr & URLS_URBI) {
+ lsr &= ~(URLS_URFE | URLS_URPE);
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ goto ignore_char;
+ }
+ if (lsr & URLS_URPE)
+ port->icount.parity++;
+ if (lsr & URLS_URFE)
+ port->icount.frame++;
+ if (lsr & URLS_URROE)
+ port->icount.overrun++;
+
+ lsr &= port->read_status_mask;
+
+ if (lsr & URLS_URBI)
+ flg = TTY_BREAK;
+ else if (lsr & URLS_URPE)
+ flg = TTY_PARITY;
+ else if (lsr & URLS_URFE)
+ flg = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ goto ignore_char;
+
+ uart_insert_char(port, lsr, URLS_URROE, ch, flg);
+
+ignore_char:
+ status = UART_GET_LSR(port);
+ }
+ tty_flip_buffer_push(tty);
+
+ return IRQ_HANDLED;
+}
+
+
+static irqreturn_t ks8695uart_tx_chars(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned int count;
+
+ if (port->x_char) {
+ KS8695_CLR_TX_INT();
+ UART_PUT_CHAR(port, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ return IRQ_HANDLED;
+ }
+
+ if (uart_tx_stopped(port) || uart_circ_empty(xmit)) {
+ ks8695uart_stop_tx(port);
+ return IRQ_HANDLED;
+ }
+
+ count = 16; /* fifo size */
+ while (!uart_circ_empty(xmit) && (count-- > 0)) {
+ KS8695_CLR_TX_INT();
+ UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ ks8695uart_stop_tx(port);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ks8695uart_modem_status(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ unsigned int status;
+
+ /*
+ * clear modem interrupt by reading MSR
+ */
+ status = UART_GET_MSR(port);
+
+ if (status & URMS_URDDCD)
+ uart_handle_dcd_change(port, status & URMS_URDDCD);
+
+ if (status & URMS_URDDST)
+ port->icount.dsr++;
+
+ if (status & URMS_URDCTS)
+ uart_handle_cts_change(port, status & URMS_URDCTS);
+
+ if (status & URMS_URTERI)
+ port->icount.rng++;
+
+ wake_up_interruptible(&port->state->port.delta_msr_wait);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int ks8695uart_tx_empty(struct uart_port *port)
+{
+ return (UART_GET_LSR(port) & URLS_URTE) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int ks8695uart_get_mctrl(struct uart_port *port)
+{
+ unsigned int result = 0;
+ unsigned int status;
+
+ status = UART_GET_MSR(port);
+ if (status & URMS_URDCD)
+ result |= TIOCM_CAR;
+ if (status & URMS_URDSR)
+ result |= TIOCM_DSR;
+ if (status & URMS_URCTS)
+ result |= TIOCM_CTS;
+ if (status & URMS_URRI)
+ result |= TIOCM_RI;
+
+ return result;
+}
+
+static void ks8695uart_set_mctrl(struct uart_port *port, u_int mctrl)
+{
+ unsigned int mcr;
+
+ mcr = UART_GET_MCR(port);
+ if (mctrl & TIOCM_RTS)
+ mcr |= URMC_URRTS;
+ else
+ mcr &= ~URMC_URRTS;
+
+ if (mctrl & TIOCM_DTR)
+ mcr |= URMC_URDTR;
+ else
+ mcr &= ~URMC_URDTR;
+
+ UART_PUT_MCR(port, mcr);
+}
+
+static void ks8695uart_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned int lcr;
+
+ lcr = UART_GET_LCR(port);
+
+ if (break_state == -1)
+ lcr |= URLC_URSBC;
+ else
+ lcr &= ~URLC_URSBC;
+
+ UART_PUT_LCR(port, lcr);
+}
+
+static int ks8695uart_startup(struct uart_port *port)
+{
+ int retval;
+
+ set_irq_flags(KS8695_IRQ_UART_TX, IRQF_VALID | IRQF_NOAUTOEN);
+ tx_enable(port, 0);
+ rx_enable(port, 1);
+ ms_enable(port, 1);
+
+ /*
+ * Allocate the IRQ
+ */
+ retval = request_irq(KS8695_IRQ_UART_TX, ks8695uart_tx_chars, IRQF_DISABLED, "UART TX", port);
+ if (retval)
+ goto err_tx;
+
+ retval = request_irq(KS8695_IRQ_UART_RX, ks8695uart_rx_chars, IRQF_DISABLED, "UART RX", port);
+ if (retval)
+ goto err_rx;
+
+ retval = request_irq(KS8695_IRQ_UART_LINE_STATUS, ks8695uart_rx_chars, IRQF_DISABLED, "UART LineStatus", port);
+ if (retval)
+ goto err_ls;
+
+ retval = request_irq(KS8695_IRQ_UART_MODEM_STATUS, ks8695uart_modem_status, IRQF_DISABLED, "UART ModemStatus", port);
+ if (retval)
+ goto err_ms;
+
+ return 0;
+
+err_ms:
+ free_irq(KS8695_IRQ_UART_LINE_STATUS, port);
+err_ls:
+ free_irq(KS8695_IRQ_UART_RX, port);
+err_rx:
+ free_irq(KS8695_IRQ_UART_TX, port);
+err_tx:
+ return retval;
+}
+
+static void ks8695uart_shutdown(struct uart_port *port)
+{
+ /*
+ * Free the interrupt
+ */
+ free_irq(KS8695_IRQ_UART_RX, port);
+ free_irq(KS8695_IRQ_UART_TX, port);
+ free_irq(KS8695_IRQ_UART_MODEM_STATUS, port);
+ free_irq(KS8695_IRQ_UART_LINE_STATUS, port);
+
+ /* disable break condition and fifos */
+ UART_PUT_LCR(port, UART_GET_LCR(port) & ~URLC_URSBC);
+ UART_PUT_FCR(port, UART_GET_FCR(port) & ~URFC_URFE);
+}
+
+static void ks8695uart_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old)
+{
+ unsigned int lcr, fcr = 0;
+ unsigned long flags;
+ unsigned int baud, quot;
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = uart_get_divisor(port, baud);
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ lcr = URCL_5;
+ break;
+ case CS6:
+ lcr = URCL_6;
+ break;
+ case CS7:
+ lcr = URCL_7;
+ break;
+ default:
+ lcr = URCL_8;
+ break;
+ }
+
+ /* stop bits */
+ if (termios->c_cflag & CSTOPB)
+ lcr |= URLC_URSB;
+
+ /* parity */
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & CMSPAR) { /* Mark or Space parity */
+ if (termios->c_cflag & PARODD)
+ lcr |= URPE_MARK;
+ else
+ lcr |= URPE_SPACE;
+ }
+ else if (termios->c_cflag & PARODD)
+ lcr |= URPE_ODD;
+ else
+ lcr |= URPE_EVEN;
+ }
+
+ if (port->fifosize > 1)
+ fcr = URFC_URFRT_8 | URFC_URTFR | URFC_URRFR | URFC_URFE;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ port->read_status_mask = URLS_URROE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= (URLS_URFE | URLS_URPE);
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= URLS_URBI;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= (URLS_URFE | URLS_URPE);
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= URLS_URBI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= URLS_URROE;
+ }
+
+ /*
+ * Ignore all characters if CREAD is not set.
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= UART_DUMMY_LSR_RX;
+
+ /* first, disable everything */
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ ks8695uart_enable_ms(port);
+ else
+ ks8695uart_disable_ms(port);
+
+ /* Set baud rate */
+ UART_PUT_BRDR(port, quot);
+
+ UART_PUT_LCR(port, lcr);
+ UART_PUT_FCR(port, fcr);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *ks8695uart_type(struct uart_port *port)
+{
+ return port->type == PORT_KS8695 ? "KS8695" : NULL;
+}
+
+/*
+ * Release the memory region(s) being used by 'port'
+ */
+static void ks8695uart_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, UART_PORT_SIZE);
+}
+
+/*
+ * Request the memory region(s) being used by 'port'
+ */
+static int ks8695uart_request_port(struct uart_port *port)
+{
+ return request_mem_region(port->mapbase, UART_PORT_SIZE,
+ "serial_ks8695") != NULL ? 0 : -EBUSY;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void ks8695uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_KS8695;
+ ks8695uart_request_port(port);
+ }
+}
+
+/*
+ * verify the new serial_struct (for TIOCSSERIAL).
+ */
+static int ks8695uart_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ int ret = 0;
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_KS8695)
+ ret = -EINVAL;
+ if (ser->irq != port->irq)
+ ret = -EINVAL;
+ if (ser->baud_base < 9600)
+ ret = -EINVAL;
+ return ret;
+}
+
+static struct uart_ops ks8695uart_pops = {
+ .tx_empty = ks8695uart_tx_empty,
+ .set_mctrl = ks8695uart_set_mctrl,
+ .get_mctrl = ks8695uart_get_mctrl,
+ .stop_tx = ks8695uart_stop_tx,
+ .start_tx = ks8695uart_start_tx,
+ .stop_rx = ks8695uart_stop_rx,
+ .enable_ms = ks8695uart_enable_ms,
+ .break_ctl = ks8695uart_break_ctl,
+ .startup = ks8695uart_startup,
+ .shutdown = ks8695uart_shutdown,
+ .set_termios = ks8695uart_set_termios,
+ .type = ks8695uart_type,
+ .release_port = ks8695uart_release_port,
+ .request_port = ks8695uart_request_port,
+ .config_port = ks8695uart_config_port,
+ .verify_port = ks8695uart_verify_port,
+};
+
+static struct uart_port ks8695uart_ports[SERIAL_KS8695_NR] = {
+ {
+ .membase = (void *) KS8695_UART_VA,
+ .mapbase = KS8695_UART_VA,
+ .iotype = SERIAL_IO_MEM,
+ .irq = KS8695_IRQ_UART_TX,
+ .uartclk = KS8695_CLOCK_RATE * 16,
+ .fifosize = 16,
+ .ops = &ks8695uart_pops,
+ .flags = ASYNC_BOOT_AUTOCONF,
+ .line = 0,
+ }
+};
+
+#ifdef CONFIG_SERIAL_KS8695_CONSOLE
+static void ks8695_console_putchar(struct uart_port *port, int ch)
+{
+ while (!(UART_GET_LSR(port) & URLS_URTHRE))
+ barrier();
+
+ UART_PUT_CHAR(port, ch);
+}
+
+static void ks8695_console_write(struct console *co, const char *s, u_int count)
+{
+ struct uart_port *port = ks8695uart_ports + co->index;
+
+ uart_console_write(port, s, count, ks8695_console_putchar);
+}
+
+static void __init ks8695_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits)
+{
+ unsigned int lcr;
+
+ lcr = UART_GET_LCR(port);
+
+ switch (lcr & URLC_PARITY) {
+ case URPE_ODD:
+ *parity = 'o';
+ break;
+ case URPE_EVEN:
+ *parity = 'e';
+ break;
+ default:
+ *parity = 'n';
+ }
+
+ switch (lcr & URLC_URCL) {
+ case URCL_5:
+ *bits = 5;
+ break;
+ case URCL_6:
+ *bits = 6;
+ break;
+ case URCL_7:
+ *bits = 7;
+ break;
+ default:
+ *bits = 8;
+ }
+
+ *baud = port->uartclk / (UART_GET_BRDR(port) & 0x0FFF);
+ *baud /= 16;
+ *baud &= 0xFFFFFFF0;
+}
+
+static int __init ks8695_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ port = uart_get_console(ks8695uart_ports, SERIAL_KS8695_NR, co);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ ks8695_console_get_options(port, &baud, &parity, &bits);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver ks8695_reg;
+
+static struct console ks8695_console = {
+ .name = SERIAL_KS8695_DEVNAME,
+ .write = ks8695_console_write,
+ .device = uart_console_device,
+ .setup = ks8695_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &ks8695_reg,
+};
+
+static int __init ks8695_console_init(void)
+{
+ add_preferred_console(SERIAL_KS8695_DEVNAME, 0, NULL);
+ register_console(&ks8695_console);
+ return 0;
+}
+
+console_initcall(ks8695_console_init);
+
+#define KS8695_CONSOLE &ks8695_console
+#else
+#define KS8695_CONSOLE NULL
+#endif
+
+static struct uart_driver ks8695_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "serial_ks8695",
+ .dev_name = SERIAL_KS8695_DEVNAME,
+ .major = SERIAL_KS8695_MAJOR,
+ .minor = SERIAL_KS8695_MINOR,
+ .nr = SERIAL_KS8695_NR,
+ .cons = KS8695_CONSOLE,
+};
+
+static int __init ks8695uart_init(void)
+{
+ int i, ret;
+
+ printk(KERN_INFO "Serial: Micrel KS8695 UART driver\n");
+
+ ret = uart_register_driver(&ks8695_reg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < SERIAL_KS8695_NR; i++)
+ uart_add_one_port(&ks8695_reg, &ks8695uart_ports[0]);
+
+ return 0;
+}
+
+static void __exit ks8695uart_exit(void)
+{
+ int i;
+
+ for (i = 0; i < SERIAL_KS8695_NR; i++)
+ uart_remove_one_port(&ks8695_reg, &ks8695uart_ports[0]);
+ uart_unregister_driver(&ks8695_reg);
+}
+
+module_init(ks8695uart_init);
+module_exit(ks8695uart_exit);
+
+MODULE_DESCRIPTION("KS8695 serial port driver");
+MODULE_AUTHOR("Micrel Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
new file mode 100644
index 00000000..8e3fc194
--- /dev/null
+++ b/drivers/tty/serial/serial_txx9.c
@@ -0,0 +1,1342 @@
+/*
+ * Derived from many drivers using generic_serial interface,
+ * especially serial_tx3912.c by Steven J. Hill and r39xx_serial.c
+ * (was in Linux/VR tree) by Jim Pick.
+ *
+ * Copyright (C) 1999 Harald Koerfgen
+ * Copyright (C) 2000 Jim Pick <jim@jimpick.com>
+ * Copyright (C) 2001 Steven J. Hill (sjhill@realitydiluted.com)
+ * Copyright (C) 2000-2002 Toshiba Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Serial driver for TX3927/TX4927/TX4925/TX4938 internal SIO controller
+ */
+
+#if defined(CONFIG_SERIAL_TXX9_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+
+#include <asm/io.h>
+
+static char *serial_version = "1.11";
+static char *serial_name = "TX39/49 Serial driver";
+
+#define PASS_LIMIT 256
+
+#if !defined(CONFIG_SERIAL_TXX9_STDSERIAL)
+/* "ttyS" is used for standard serial driver */
+#define TXX9_TTY_NAME "ttyTX"
+#define TXX9_TTY_MINOR_START 196
+#define TXX9_TTY_MAJOR 204
+#else
+/* acts like standard serial driver */
+#define TXX9_TTY_NAME "ttyS"
+#define TXX9_TTY_MINOR_START 64
+#define TXX9_TTY_MAJOR TTY_MAJOR
+#endif
+
+/* flag aliases */
+#define UPF_TXX9_HAVE_CTS_LINE UPF_BUGGY_UART
+#define UPF_TXX9_USE_SCLK UPF_MAGIC_MULTIPLIER
+
+#ifdef CONFIG_PCI
+/* support for Toshiba TC86C001 SIO */
+#define ENABLE_SERIAL_TXX9_PCI
+#endif
+
+/*
+ * Number of serial ports
+ */
+#define UART_NR CONFIG_SERIAL_TXX9_NR_UARTS
+
+struct uart_txx9_port {
+ struct uart_port port;
+ /* No additional info for now */
+};
+
+#define TXX9_REGION_SIZE 0x24
+
+/* TXX9 Serial Registers */
+#define TXX9_SILCR 0x00
+#define TXX9_SIDICR 0x04
+#define TXX9_SIDISR 0x08
+#define TXX9_SICISR 0x0c
+#define TXX9_SIFCR 0x10
+#define TXX9_SIFLCR 0x14
+#define TXX9_SIBGR 0x18
+#define TXX9_SITFIFO 0x1c
+#define TXX9_SIRFIFO 0x20
+
+/* SILCR : Line Control */
+#define TXX9_SILCR_SCS_MASK 0x00000060
+#define TXX9_SILCR_SCS_IMCLK 0x00000000
+#define TXX9_SILCR_SCS_IMCLK_BG 0x00000020
+#define TXX9_SILCR_SCS_SCLK 0x00000040
+#define TXX9_SILCR_SCS_SCLK_BG 0x00000060
+#define TXX9_SILCR_UEPS 0x00000010
+#define TXX9_SILCR_UPEN 0x00000008
+#define TXX9_SILCR_USBL_MASK 0x00000004
+#define TXX9_SILCR_USBL_1BIT 0x00000000
+#define TXX9_SILCR_USBL_2BIT 0x00000004
+#define TXX9_SILCR_UMODE_MASK 0x00000003
+#define TXX9_SILCR_UMODE_8BIT 0x00000000
+#define TXX9_SILCR_UMODE_7BIT 0x00000001
+
+/* SIDICR : DMA/Int. Control */
+#define TXX9_SIDICR_TDE 0x00008000
+#define TXX9_SIDICR_RDE 0x00004000
+#define TXX9_SIDICR_TIE 0x00002000
+#define TXX9_SIDICR_RIE 0x00001000
+#define TXX9_SIDICR_SPIE 0x00000800
+#define TXX9_SIDICR_CTSAC 0x00000600
+#define TXX9_SIDICR_STIE_MASK 0x0000003f
+#define TXX9_SIDICR_STIE_OERS 0x00000020
+#define TXX9_SIDICR_STIE_CTSS 0x00000010
+#define TXX9_SIDICR_STIE_RBRKD 0x00000008
+#define TXX9_SIDICR_STIE_TRDY 0x00000004
+#define TXX9_SIDICR_STIE_TXALS 0x00000002
+#define TXX9_SIDICR_STIE_UBRKD 0x00000001
+
+/* SIDISR : DMA/Int. Status */
+#define TXX9_SIDISR_UBRK 0x00008000
+#define TXX9_SIDISR_UVALID 0x00004000
+#define TXX9_SIDISR_UFER 0x00002000
+#define TXX9_SIDISR_UPER 0x00001000
+#define TXX9_SIDISR_UOER 0x00000800
+#define TXX9_SIDISR_ERI 0x00000400
+#define TXX9_SIDISR_TOUT 0x00000200
+#define TXX9_SIDISR_TDIS 0x00000100
+#define TXX9_SIDISR_RDIS 0x00000080
+#define TXX9_SIDISR_STIS 0x00000040
+#define TXX9_SIDISR_RFDN_MASK 0x0000001f
+
+/* SICISR : Change Int. Status */
+#define TXX9_SICISR_OERS 0x00000020
+#define TXX9_SICISR_CTSS 0x00000010
+#define TXX9_SICISR_RBRKD 0x00000008
+#define TXX9_SICISR_TRDY 0x00000004
+#define TXX9_SICISR_TXALS 0x00000002
+#define TXX9_SICISR_UBRKD 0x00000001
+
+/* SIFCR : FIFO Control */
+#define TXX9_SIFCR_SWRST 0x00008000
+#define TXX9_SIFCR_RDIL_MASK 0x00000180
+#define TXX9_SIFCR_RDIL_1 0x00000000
+#define TXX9_SIFCR_RDIL_4 0x00000080
+#define TXX9_SIFCR_RDIL_8 0x00000100
+#define TXX9_SIFCR_RDIL_12 0x00000180
+#define TXX9_SIFCR_RDIL_MAX 0x00000180
+#define TXX9_SIFCR_TDIL_MASK 0x00000018
+#define TXX9_SIFCR_TDIL_MASK 0x00000018
+#define TXX9_SIFCR_TDIL_1 0x00000000
+#define TXX9_SIFCR_TDIL_4 0x00000001
+#define TXX9_SIFCR_TDIL_8 0x00000010
+#define TXX9_SIFCR_TDIL_MAX 0x00000010
+#define TXX9_SIFCR_TFRST 0x00000004
+#define TXX9_SIFCR_RFRST 0x00000002
+#define TXX9_SIFCR_FRSTE 0x00000001
+#define TXX9_SIO_TX_FIFO 8
+#define TXX9_SIO_RX_FIFO 16
+
+/* SIFLCR : Flow Control */
+#define TXX9_SIFLCR_RCS 0x00001000
+#define TXX9_SIFLCR_TES 0x00000800
+#define TXX9_SIFLCR_RTSSC 0x00000200
+#define TXX9_SIFLCR_RSDE 0x00000100
+#define TXX9_SIFLCR_TSDE 0x00000080
+#define TXX9_SIFLCR_RTSTL_MASK 0x0000001e
+#define TXX9_SIFLCR_RTSTL_MAX 0x0000001e
+#define TXX9_SIFLCR_TBRK 0x00000001
+
+/* SIBGR : Baudrate Control */
+#define TXX9_SIBGR_BCLK_MASK 0x00000300
+#define TXX9_SIBGR_BCLK_T0 0x00000000
+#define TXX9_SIBGR_BCLK_T2 0x00000100
+#define TXX9_SIBGR_BCLK_T4 0x00000200
+#define TXX9_SIBGR_BCLK_T6 0x00000300
+#define TXX9_SIBGR_BRD_MASK 0x000000ff
+
+static inline unsigned int sio_in(struct uart_txx9_port *up, int offset)
+{
+ switch (up->port.iotype) {
+ default:
+ return __raw_readl(up->port.membase + offset);
+ case UPIO_PORT:
+ return inl(up->port.iobase + offset);
+ }
+}
+
+static inline void
+sio_out(struct uart_txx9_port *up, int offset, int value)
+{
+ switch (up->port.iotype) {
+ default:
+ __raw_writel(value, up->port.membase + offset);
+ break;
+ case UPIO_PORT:
+ outl(value, up->port.iobase + offset);
+ break;
+ }
+}
+
+static inline void
+sio_mask(struct uart_txx9_port *up, int offset, unsigned int value)
+{
+ sio_out(up, offset, sio_in(up, offset) & ~value);
+}
+static inline void
+sio_set(struct uart_txx9_port *up, int offset, unsigned int value)
+{
+ sio_out(up, offset, sio_in(up, offset) | value);
+}
+
+static inline void
+sio_quot_set(struct uart_txx9_port *up, int quot)
+{
+ quot >>= 1;
+ if (quot < 256)
+ sio_out(up, TXX9_SIBGR, quot | TXX9_SIBGR_BCLK_T0);
+ else if (quot < (256 << 2))
+ sio_out(up, TXX9_SIBGR, (quot >> 2) | TXX9_SIBGR_BCLK_T2);
+ else if (quot < (256 << 4))
+ sio_out(up, TXX9_SIBGR, (quot >> 4) | TXX9_SIBGR_BCLK_T4);
+ else if (quot < (256 << 6))
+ sio_out(up, TXX9_SIBGR, (quot >> 6) | TXX9_SIBGR_BCLK_T6);
+ else
+ sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6);
+}
+
+static struct uart_txx9_port *to_uart_txx9_port(struct uart_port *port)
+{
+ return container_of(port, struct uart_txx9_port, port);
+}
+
+static void serial_txx9_stop_tx(struct uart_port *port)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+ sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
+}
+
+static void serial_txx9_start_tx(struct uart_port *port)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+ sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
+}
+
+static void serial_txx9_stop_rx(struct uart_port *port)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+ up->port.read_status_mask &= ~TXX9_SIDISR_RDIS;
+}
+
+static void serial_txx9_enable_ms(struct uart_port *port)
+{
+ /* TXX9-SIO can not control DTR... */
+}
+
+static void serial_txx9_initialize(struct uart_port *port)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+ unsigned int tmout = 10000;
+
+ sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST);
+ /* TX4925 BUG WORKAROUND. Accessing SIOC register
+ * immediately after soft reset causes bus error. */
+ mmiowb();
+ udelay(1);
+ while ((sio_in(up, TXX9_SIFCR) & TXX9_SIFCR_SWRST) && --tmout)
+ udelay(1);
+ /* TX Int by FIFO Empty, RX Int by Receiving 1 char. */
+ sio_set(up, TXX9_SIFCR,
+ TXX9_SIFCR_TDIL_MAX | TXX9_SIFCR_RDIL_1);
+ /* initial settings */
+ sio_out(up, TXX9_SILCR,
+ TXX9_SILCR_UMODE_8BIT | TXX9_SILCR_USBL_1BIT |
+ ((up->port.flags & UPF_TXX9_USE_SCLK) ?
+ TXX9_SILCR_SCS_SCLK_BG : TXX9_SILCR_SCS_IMCLK_BG));
+ sio_quot_set(up, uart_get_divisor(port, 9600));
+ sio_out(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSTL_MAX /* 15 */);
+ sio_out(up, TXX9_SIDICR, 0);
+}
+
+static inline void
+receive_chars(struct uart_txx9_port *up, unsigned int *status)
+{
+ struct tty_struct *tty = up->port.state->port.tty;
+ unsigned char ch;
+ unsigned int disr = *status;
+ int max_count = 256;
+ char flag;
+ unsigned int next_ignore_status_mask;
+
+ do {
+ ch = sio_in(up, TXX9_SIRFIFO);
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+ /* mask out RFDN_MASK bit added by previous overrun */
+ next_ignore_status_mask =
+ up->port.ignore_status_mask & ~TXX9_SIDISR_RFDN_MASK;
+ if (unlikely(disr & (TXX9_SIDISR_UBRK | TXX9_SIDISR_UPER |
+ TXX9_SIDISR_UFER | TXX9_SIDISR_UOER))) {
+ /*
+ * For statistics only
+ */
+ if (disr & TXX9_SIDISR_UBRK) {
+ disr &= ~(TXX9_SIDISR_UFER | TXX9_SIDISR_UPER);
+ up->port.icount.brk++;
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(&up->port))
+ goto ignore_char;
+ } else if (disr & TXX9_SIDISR_UPER)
+ up->port.icount.parity++;
+ else if (disr & TXX9_SIDISR_UFER)
+ up->port.icount.frame++;
+ if (disr & TXX9_SIDISR_UOER) {
+ up->port.icount.overrun++;
+ /*
+ * The receiver read buffer still hold
+ * a char which caused overrun.
+ * Ignore next char by adding RFDN_MASK
+ * to ignore_status_mask temporarily.
+ */
+ next_ignore_status_mask |=
+ TXX9_SIDISR_RFDN_MASK;
+ }
+
+ /*
+ * Mask off conditions which should be ingored.
+ */
+ disr &= up->port.read_status_mask;
+
+ if (disr & TXX9_SIDISR_UBRK) {
+ flag = TTY_BREAK;
+ } else if (disr & TXX9_SIDISR_UPER)
+ flag = TTY_PARITY;
+ else if (disr & TXX9_SIDISR_UFER)
+ flag = TTY_FRAME;
+ }
+ if (uart_handle_sysrq_char(&up->port, ch))
+ goto ignore_char;
+
+ uart_insert_char(&up->port, disr, TXX9_SIDISR_UOER, ch, flag);
+
+ ignore_char:
+ up->port.ignore_status_mask = next_ignore_status_mask;
+ disr = sio_in(up, TXX9_SIDISR);
+ } while (!(disr & TXX9_SIDISR_UVALID) && (max_count-- > 0));
+ spin_unlock(&up->port.lock);
+ tty_flip_buffer_push(tty);
+ spin_lock(&up->port.lock);
+ *status = disr;
+}
+
+static inline void transmit_chars(struct uart_txx9_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int count;
+
+ if (up->port.x_char) {
+ sio_out(up, TXX9_SITFIFO, up->port.x_char);
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+ serial_txx9_stop_tx(&up->port);
+ return;
+ }
+
+ count = TXX9_SIO_TX_FIFO;
+ do {
+ sio_out(up, TXX9_SITFIFO, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (uart_circ_empty(xmit))
+ serial_txx9_stop_tx(&up->port);
+}
+
+static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id)
+{
+ int pass_counter = 0;
+ struct uart_txx9_port *up = dev_id;
+ unsigned int status;
+
+ while (1) {
+ spin_lock(&up->port.lock);
+ status = sio_in(up, TXX9_SIDISR);
+ if (!(sio_in(up, TXX9_SIDICR) & TXX9_SIDICR_TIE))
+ status &= ~TXX9_SIDISR_TDIS;
+ if (!(status & (TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
+ TXX9_SIDISR_TOUT))) {
+ spin_unlock(&up->port.lock);
+ break;
+ }
+
+ if (status & TXX9_SIDISR_RDIS)
+ receive_chars(up, &status);
+ if (status & TXX9_SIDISR_TDIS)
+ transmit_chars(up);
+ /* Clear TX/RX Int. Status */
+ sio_mask(up, TXX9_SIDISR,
+ TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
+ TXX9_SIDISR_TOUT);
+ spin_unlock(&up->port.lock);
+
+ if (pass_counter++ > PASS_LIMIT)
+ break;
+ }
+
+ return pass_counter ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static unsigned int serial_txx9_tx_empty(struct uart_port *port)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ ret = (sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS) ? TIOCSER_TEMT : 0;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return ret;
+}
+
+static unsigned int serial_txx9_get_mctrl(struct uart_port *port)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+ unsigned int ret;
+
+ /* no modem control lines */
+ ret = TIOCM_CAR | TIOCM_DSR;
+ ret |= (sio_in(up, TXX9_SIFLCR) & TXX9_SIFLCR_RTSSC) ? 0 : TIOCM_RTS;
+ ret |= (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS) ? 0 : TIOCM_CTS;
+
+ return ret;
+}
+
+static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+
+ if (mctrl & TIOCM_RTS)
+ sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC);
+ else
+ sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC);
+}
+
+static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (break_state == -1)
+ sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
+ else
+ sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL)
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct uart_txx9_port *up)
+{
+ unsigned int tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ while (--tmout &&
+ !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS))
+ udelay(1);
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ tmout = 1000000;
+ while (--tmout &&
+ (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS))
+ udelay(1);
+ }
+}
+#endif
+
+#ifdef CONFIG_CONSOLE_POLL
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+static int serial_txx9_get_poll_char(struct uart_port *port)
+{
+ unsigned int ier;
+ unsigned char c;
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = sio_in(up, TXX9_SIDICR);
+ sio_out(up, TXX9_SIDICR, 0);
+
+ while (sio_in(up, TXX9_SIDISR) & TXX9_SIDISR_UVALID)
+ ;
+
+ c = sio_in(up, TXX9_SIRFIFO);
+
+ /*
+ * Finally, clear RX interrupt status
+ * and restore the IER
+ */
+ sio_mask(up, TXX9_SIDISR, TXX9_SIDISR_RDIS);
+ sio_out(up, TXX9_SIDICR, ier);
+ return c;
+}
+
+
+static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
+{
+ unsigned int ier;
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = sio_in(up, TXX9_SIDICR);
+ sio_out(up, TXX9_SIDICR, 0);
+
+ wait_for_xmitr(up);
+ /*
+ * Send the character out.
+ * If a LF, also do CR...
+ */
+ sio_out(up, TXX9_SITFIFO, c);
+ if (c == 10) {
+ wait_for_xmitr(up);
+ sio_out(up, TXX9_SITFIFO, 13);
+ }
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up);
+ sio_out(up, TXX9_SIDICR, ier);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
+static int serial_txx9_startup(struct uart_port *port)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+ unsigned long flags;
+ int retval;
+
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in set_termios())
+ */
+ sio_set(up, TXX9_SIFCR,
+ TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
+ /* clear reset */
+ sio_mask(up, TXX9_SIFCR,
+ TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
+ sio_out(up, TXX9_SIDICR, 0);
+
+ /*
+ * Clear the interrupt registers.
+ */
+ sio_out(up, TXX9_SIDISR, 0);
+
+ retval = request_irq(up->port.irq, serial_txx9_interrupt,
+ IRQF_SHARED, "serial_txx9", up);
+ if (retval)
+ return retval;
+
+ /*
+ * Now, initialize the UART
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+ serial_txx9_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /* Enable RX/TX */
+ sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
+
+ /*
+ * Finally, enable interrupts.
+ */
+ sio_set(up, TXX9_SIDICR, TXX9_SIDICR_RIE);
+
+ return 0;
+}
+
+static void serial_txx9_shutdown(struct uart_port *port)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+ unsigned long flags;
+
+ /*
+ * Disable interrupts from this port
+ */
+ sio_out(up, TXX9_SIDICR, 0); /* disable all intrs */
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ serial_txx9_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /*
+ * Disable break condition
+ */
+ sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
+
+#ifdef CONFIG_SERIAL_TXX9_CONSOLE
+ if (up->port.cons && up->port.line == up->port.cons->index) {
+ free_irq(up->port.irq, up);
+ return;
+ }
+#endif
+ /* reset FIFOs */
+ sio_set(up, TXX9_SIFCR,
+ TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
+ /* clear reset */
+ sio_mask(up, TXX9_SIFCR,
+ TXX9_SIFCR_TFRST | TXX9_SIFCR_RFRST | TXX9_SIFCR_FRSTE);
+
+ /* Disable RX/TX */
+ sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
+
+ free_irq(up->port.irq, up);
+}
+
+static void
+serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+ unsigned int cval, fcr = 0;
+ unsigned long flags;
+ unsigned int baud, quot;
+
+ /*
+ * We don't support modem control lines.
+ */
+ termios->c_cflag &= ~(HUPCL | CMSPAR);
+ termios->c_cflag |= CLOCAL;
+
+ cval = sio_in(up, TXX9_SILCR);
+ /* byte size and parity */
+ cval &= ~TXX9_SILCR_UMODE_MASK;
+ switch (termios->c_cflag & CSIZE) {
+ case CS7:
+ cval |= TXX9_SILCR_UMODE_7BIT;
+ break;
+ default:
+ case CS5: /* not supported */
+ case CS6: /* not supported */
+ case CS8:
+ cval |= TXX9_SILCR_UMODE_8BIT;
+ break;
+ }
+
+ cval &= ~TXX9_SILCR_USBL_MASK;
+ if (termios->c_cflag & CSTOPB)
+ cval |= TXX9_SILCR_USBL_2BIT;
+ else
+ cval |= TXX9_SILCR_USBL_1BIT;
+ cval &= ~(TXX9_SILCR_UPEN | TXX9_SILCR_UEPS);
+ if (termios->c_cflag & PARENB)
+ cval |= TXX9_SILCR_UPEN;
+ if (!(termios->c_cflag & PARODD))
+ cval |= TXX9_SILCR_UEPS;
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16/2);
+ quot = uart_get_divisor(port, baud);
+
+ /* Set up FIFOs */
+ /* TX Int by FIFO Empty, RX Int by Receiving 1 char. */
+ fcr = TXX9_SIFCR_TDIL_MAX | TXX9_SIFCR_RDIL_1;
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ up->port.read_status_mask = TXX9_SIDISR_UOER |
+ TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= TXX9_SIDISR_UBRK;
+
+ /*
+ * Characteres to ignore
+ */
+ up->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= TXX9_SIDISR_UPER | TXX9_SIDISR_UFER;
+ if (termios->c_iflag & IGNBRK) {
+ up->port.ignore_status_mask |= TXX9_SIDISR_UBRK;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ up->port.ignore_status_mask |= TXX9_SIDISR_UOER;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= TXX9_SIDISR_RDIS;
+
+ /* CTS flow control flag */
+ if ((termios->c_cflag & CRTSCTS) &&
+ (up->port.flags & UPF_TXX9_HAVE_CTS_LINE)) {
+ sio_set(up, TXX9_SIFLCR,
+ TXX9_SIFLCR_RCS | TXX9_SIFLCR_TES);
+ } else {
+ sio_mask(up, TXX9_SIFLCR,
+ TXX9_SIFLCR_RCS | TXX9_SIFLCR_TES);
+ }
+
+ sio_out(up, TXX9_SILCR, cval);
+ sio_quot_set(up, quot);
+ sio_out(up, TXX9_SIFCR, fcr);
+
+ serial_txx9_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static void
+serial_txx9_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ /*
+ * If oldstate was -1 this is called from
+ * uart_configure_port(). In this case do not initialize the
+ * port now, because the port was already initialized (for
+ * non-console port) or should not be initialized here (for
+ * console port). If we initialized the port here we lose
+ * serial console settings.
+ */
+ if (state == 0 && oldstate != -1)
+ serial_txx9_initialize(port);
+}
+
+static int serial_txx9_request_resource(struct uart_txx9_port *up)
+{
+ unsigned int size = TXX9_REGION_SIZE;
+ int ret = 0;
+
+ switch (up->port.iotype) {
+ default:
+ if (!up->port.mapbase)
+ break;
+
+ if (!request_mem_region(up->port.mapbase, size, "serial_txx9")) {
+ ret = -EBUSY;
+ break;
+ }
+
+ if (up->port.flags & UPF_IOREMAP) {
+ up->port.membase = ioremap(up->port.mapbase, size);
+ if (!up->port.membase) {
+ release_mem_region(up->port.mapbase, size);
+ ret = -ENOMEM;
+ }
+ }
+ break;
+
+ case UPIO_PORT:
+ if (!request_region(up->port.iobase, size, "serial_txx9"))
+ ret = -EBUSY;
+ break;
+ }
+ return ret;
+}
+
+static void serial_txx9_release_resource(struct uart_txx9_port *up)
+{
+ unsigned int size = TXX9_REGION_SIZE;
+
+ switch (up->port.iotype) {
+ default:
+ if (!up->port.mapbase)
+ break;
+
+ if (up->port.flags & UPF_IOREMAP) {
+ iounmap(up->port.membase);
+ up->port.membase = NULL;
+ }
+
+ release_mem_region(up->port.mapbase, size);
+ break;
+
+ case UPIO_PORT:
+ release_region(up->port.iobase, size);
+ break;
+ }
+}
+
+static void serial_txx9_release_port(struct uart_port *port)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+ serial_txx9_release_resource(up);
+}
+
+static int serial_txx9_request_port(struct uart_port *port)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+ return serial_txx9_request_resource(up);
+}
+
+static void serial_txx9_config_port(struct uart_port *port, int uflags)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+ int ret;
+
+ /*
+ * Find the region that we can probe for. This in turn
+ * tells us whether we can probe for the type of port.
+ */
+ ret = serial_txx9_request_resource(up);
+ if (ret < 0)
+ return;
+ port->type = PORT_TXX9;
+ up->port.fifosize = TXX9_SIO_TX_FIFO;
+
+#ifdef CONFIG_SERIAL_TXX9_CONSOLE
+ if (up->port.line == up->port.cons->index)
+ return;
+#endif
+ serial_txx9_initialize(port);
+}
+
+static const char *
+serial_txx9_type(struct uart_port *port)
+{
+ return "txx9";
+}
+
+static struct uart_ops serial_txx9_pops = {
+ .tx_empty = serial_txx9_tx_empty,
+ .set_mctrl = serial_txx9_set_mctrl,
+ .get_mctrl = serial_txx9_get_mctrl,
+ .stop_tx = serial_txx9_stop_tx,
+ .start_tx = serial_txx9_start_tx,
+ .stop_rx = serial_txx9_stop_rx,
+ .enable_ms = serial_txx9_enable_ms,
+ .break_ctl = serial_txx9_break_ctl,
+ .startup = serial_txx9_startup,
+ .shutdown = serial_txx9_shutdown,
+ .set_termios = serial_txx9_set_termios,
+ .pm = serial_txx9_pm,
+ .type = serial_txx9_type,
+ .release_port = serial_txx9_release_port,
+ .request_port = serial_txx9_request_port,
+ .config_port = serial_txx9_config_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = serial_txx9_get_poll_char,
+ .poll_put_char = serial_txx9_put_poll_char,
+#endif
+};
+
+static struct uart_txx9_port serial_txx9_ports[UART_NR];
+
+static void __init serial_txx9_register_ports(struct uart_driver *drv,
+ struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < UART_NR; i++) {
+ struct uart_txx9_port *up = &serial_txx9_ports[i];
+
+ up->port.line = i;
+ up->port.ops = &serial_txx9_pops;
+ up->port.dev = dev;
+ if (up->port.iobase || up->port.mapbase)
+ uart_add_one_port(drv, &up->port);
+ }
+}
+
+#ifdef CONFIG_SERIAL_TXX9_CONSOLE
+
+static void serial_txx9_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_txx9_port *up = to_uart_txx9_port(port);
+
+ wait_for_xmitr(up);
+ sio_out(up, TXX9_SITFIFO, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void
+serial_txx9_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_txx9_port *up = &serial_txx9_ports[co->index];
+ unsigned int ier, flcr;
+
+ /*
+ * First save the UER then disable the interrupts
+ */
+ ier = sio_in(up, TXX9_SIDICR);
+ sio_out(up, TXX9_SIDICR, 0);
+ /*
+ * Disable flow-control if enabled (and unnecessary)
+ */
+ flcr = sio_in(up, TXX9_SIFLCR);
+ if (!(up->port.flags & UPF_CONS_FLOW) && (flcr & TXX9_SIFLCR_TES))
+ sio_out(up, TXX9_SIFLCR, flcr & ~TXX9_SIFLCR_TES);
+
+ uart_console_write(&up->port, s, count, serial_txx9_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up);
+ sio_out(up, TXX9_SIFLCR, flcr);
+ sio_out(up, TXX9_SIDICR, ier);
+}
+
+static int __init serial_txx9_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ struct uart_txx9_port *up;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index >= UART_NR)
+ co->index = 0;
+ up = &serial_txx9_ports[co->index];
+ port = &up->port;
+ if (!port->ops)
+ return -ENODEV;
+
+ serial_txx9_initialize(&up->port);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver serial_txx9_reg;
+static struct console serial_txx9_console = {
+ .name = TXX9_TTY_NAME,
+ .write = serial_txx9_console_write,
+ .device = uart_console_device,
+ .setup = serial_txx9_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &serial_txx9_reg,
+};
+
+static int __init serial_txx9_console_init(void)
+{
+ register_console(&serial_txx9_console);
+ return 0;
+}
+console_initcall(serial_txx9_console_init);
+
+#define SERIAL_TXX9_CONSOLE &serial_txx9_console
+#else
+#define SERIAL_TXX9_CONSOLE NULL
+#endif
+
+static struct uart_driver serial_txx9_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "serial_txx9",
+ .dev_name = TXX9_TTY_NAME,
+ .major = TXX9_TTY_MAJOR,
+ .minor = TXX9_TTY_MINOR_START,
+ .nr = UART_NR,
+ .cons = SERIAL_TXX9_CONSOLE,
+};
+
+int __init early_serial_txx9_setup(struct uart_port *port)
+{
+ if (port->line >= ARRAY_SIZE(serial_txx9_ports))
+ return -ENODEV;
+
+ serial_txx9_ports[port->line].port = *port;
+ serial_txx9_ports[port->line].port.ops = &serial_txx9_pops;
+ serial_txx9_ports[port->line].port.flags |=
+ UPF_BOOT_AUTOCONF | UPF_FIXED_PORT;
+ return 0;
+}
+
+static DEFINE_MUTEX(serial_txx9_mutex);
+
+/**
+ * serial_txx9_register_port - register a serial port
+ * @port: serial port template
+ *
+ * Configure the serial port specified by the request.
+ *
+ * The port is then probed and if necessary the IRQ is autodetected
+ * If this fails an error is returned.
+ *
+ * On success the port is ready to use and the line number is returned.
+ */
+static int __devinit serial_txx9_register_port(struct uart_port *port)
+{
+ int i;
+ struct uart_txx9_port *uart;
+ int ret = -ENOSPC;
+
+ mutex_lock(&serial_txx9_mutex);
+ for (i = 0; i < UART_NR; i++) {
+ uart = &serial_txx9_ports[i];
+ if (uart_match_port(&uart->port, port)) {
+ uart_remove_one_port(&serial_txx9_reg, &uart->port);
+ break;
+ }
+ }
+ if (i == UART_NR) {
+ /* Find unused port */
+ for (i = 0; i < UART_NR; i++) {
+ uart = &serial_txx9_ports[i];
+ if (!(uart->port.iobase || uart->port.mapbase))
+ break;
+ }
+ }
+ if (i < UART_NR) {
+ uart->port.iobase = port->iobase;
+ uart->port.membase = port->membase;
+ uart->port.irq = port->irq;
+ uart->port.uartclk = port->uartclk;
+ uart->port.iotype = port->iotype;
+ uart->port.flags = port->flags
+ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT;
+ uart->port.mapbase = port->mapbase;
+ if (port->dev)
+ uart->port.dev = port->dev;
+ ret = uart_add_one_port(&serial_txx9_reg, &uart->port);
+ if (ret == 0)
+ ret = uart->port.line;
+ }
+ mutex_unlock(&serial_txx9_mutex);
+ return ret;
+}
+
+/**
+ * serial_txx9_unregister_port - remove a txx9 serial port at runtime
+ * @line: serial line number
+ *
+ * Remove one serial port. This may not be called from interrupt
+ * context. We hand the port back to the our control.
+ */
+static void __devexit serial_txx9_unregister_port(int line)
+{
+ struct uart_txx9_port *uart = &serial_txx9_ports[line];
+
+ mutex_lock(&serial_txx9_mutex);
+ uart_remove_one_port(&serial_txx9_reg, &uart->port);
+ uart->port.flags = 0;
+ uart->port.type = PORT_UNKNOWN;
+ uart->port.iobase = 0;
+ uart->port.mapbase = 0;
+ uart->port.membase = NULL;
+ uart->port.dev = NULL;
+ mutex_unlock(&serial_txx9_mutex);
+}
+
+/*
+ * Register a set of serial devices attached to a platform device.
+ */
+static int __devinit serial_txx9_probe(struct platform_device *dev)
+{
+ struct uart_port *p = dev->dev.platform_data;
+ struct uart_port port;
+ int ret, i;
+
+ memset(&port, 0, sizeof(struct uart_port));
+ for (i = 0; p && p->uartclk != 0; p++, i++) {
+ port.iobase = p->iobase;
+ port.membase = p->membase;
+ port.irq = p->irq;
+ port.uartclk = p->uartclk;
+ port.iotype = p->iotype;
+ port.flags = p->flags;
+ port.mapbase = p->mapbase;
+ port.dev = &dev->dev;
+ ret = serial_txx9_register_port(&port);
+ if (ret < 0) {
+ dev_err(&dev->dev, "unable to register port at index %d "
+ "(IO%lx MEM%llx IRQ%d): %d\n", i,
+ p->iobase, (unsigned long long)p->mapbase,
+ p->irq, ret);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Remove serial ports registered against a platform device.
+ */
+static int __devexit serial_txx9_remove(struct platform_device *dev)
+{
+ int i;
+
+ for (i = 0; i < UART_NR; i++) {
+ struct uart_txx9_port *up = &serial_txx9_ports[i];
+
+ if (up->port.dev == &dev->dev)
+ serial_txx9_unregister_port(i);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int serial_txx9_suspend(struct platform_device *dev, pm_message_t state)
+{
+ int i;
+
+ for (i = 0; i < UART_NR; i++) {
+ struct uart_txx9_port *up = &serial_txx9_ports[i];
+
+ if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
+ uart_suspend_port(&serial_txx9_reg, &up->port);
+ }
+
+ return 0;
+}
+
+static int serial_txx9_resume(struct platform_device *dev)
+{
+ int i;
+
+ for (i = 0; i < UART_NR; i++) {
+ struct uart_txx9_port *up = &serial_txx9_ports[i];
+
+ if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
+ uart_resume_port(&serial_txx9_reg, &up->port);
+ }
+
+ return 0;
+}
+#endif
+
+static struct platform_driver serial_txx9_plat_driver = {
+ .probe = serial_txx9_probe,
+ .remove = __devexit_p(serial_txx9_remove),
+#ifdef CONFIG_PM
+ .suspend = serial_txx9_suspend,
+ .resume = serial_txx9_resume,
+#endif
+ .driver = {
+ .name = "serial_txx9",
+ .owner = THIS_MODULE,
+ },
+};
+
+#ifdef ENABLE_SERIAL_TXX9_PCI
+/*
+ * Probe one serial board. Unfortunately, there is no rhyme nor reason
+ * to the arrangement of serial ports on a PCI card.
+ */
+static int __devinit
+pciserial_txx9_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+ struct uart_port port;
+ int line;
+ int rc;
+
+ rc = pci_enable_device(dev);
+ if (rc)
+ return rc;
+
+ memset(&port, 0, sizeof(port));
+ port.ops = &serial_txx9_pops;
+ port.flags |= UPF_TXX9_HAVE_CTS_LINE;
+ port.uartclk = 66670000;
+ port.irq = dev->irq;
+ port.iotype = UPIO_PORT;
+ port.iobase = pci_resource_start(dev, 1);
+ port.dev = &dev->dev;
+ line = serial_txx9_register_port(&port);
+ if (line < 0) {
+ printk(KERN_WARNING "Couldn't register serial port %s: %d\n", pci_name(dev), line);
+ pci_disable_device(dev);
+ return line;
+ }
+ pci_set_drvdata(dev, &serial_txx9_ports[line]);
+
+ return 0;
+}
+
+static void __devexit pciserial_txx9_remove_one(struct pci_dev *dev)
+{
+ struct uart_txx9_port *up = pci_get_drvdata(dev);
+
+ pci_set_drvdata(dev, NULL);
+
+ if (up) {
+ serial_txx9_unregister_port(up->port.line);
+ pci_disable_device(dev);
+ }
+}
+
+#ifdef CONFIG_PM
+static int pciserial_txx9_suspend_one(struct pci_dev *dev, pm_message_t state)
+{
+ struct uart_txx9_port *up = pci_get_drvdata(dev);
+
+ if (up)
+ uart_suspend_port(&serial_txx9_reg, &up->port);
+ pci_save_state(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
+ return 0;
+}
+
+static int pciserial_txx9_resume_one(struct pci_dev *dev)
+{
+ struct uart_txx9_port *up = pci_get_drvdata(dev);
+
+ pci_set_power_state(dev, PCI_D0);
+ pci_restore_state(dev);
+ if (up)
+ uart_resume_port(&serial_txx9_reg, &up->port);
+ return 0;
+}
+#endif
+
+static const struct pci_device_id serial_txx9_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC) },
+ { 0, }
+};
+
+static struct pci_driver serial_txx9_pci_driver = {
+ .name = "serial_txx9",
+ .probe = pciserial_txx9_init_one,
+ .remove = __devexit_p(pciserial_txx9_remove_one),
+#ifdef CONFIG_PM
+ .suspend = pciserial_txx9_suspend_one,
+ .resume = pciserial_txx9_resume_one,
+#endif
+ .id_table = serial_txx9_pci_tbl,
+};
+
+MODULE_DEVICE_TABLE(pci, serial_txx9_pci_tbl);
+#endif /* ENABLE_SERIAL_TXX9_PCI */
+
+static struct platform_device *serial_txx9_plat_devs;
+
+static int __init serial_txx9_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "%s version %s\n", serial_name, serial_version);
+
+ ret = uart_register_driver(&serial_txx9_reg);
+ if (ret)
+ goto out;
+
+ serial_txx9_plat_devs = platform_device_alloc("serial_txx9", -1);
+ if (!serial_txx9_plat_devs) {
+ ret = -ENOMEM;
+ goto unreg_uart_drv;
+ }
+
+ ret = platform_device_add(serial_txx9_plat_devs);
+ if (ret)
+ goto put_dev;
+
+ serial_txx9_register_ports(&serial_txx9_reg,
+ &serial_txx9_plat_devs->dev);
+
+ ret = platform_driver_register(&serial_txx9_plat_driver);
+ if (ret)
+ goto del_dev;
+
+#ifdef ENABLE_SERIAL_TXX9_PCI
+ ret = pci_register_driver(&serial_txx9_pci_driver);
+#endif
+ if (ret == 0)
+ goto out;
+
+ del_dev:
+ platform_device_del(serial_txx9_plat_devs);
+ put_dev:
+ platform_device_put(serial_txx9_plat_devs);
+ unreg_uart_drv:
+ uart_unregister_driver(&serial_txx9_reg);
+ out:
+ return ret;
+}
+
+static void __exit serial_txx9_exit(void)
+{
+ int i;
+
+#ifdef ENABLE_SERIAL_TXX9_PCI
+ pci_unregister_driver(&serial_txx9_pci_driver);
+#endif
+ platform_driver_unregister(&serial_txx9_plat_driver);
+ platform_device_unregister(serial_txx9_plat_devs);
+ for (i = 0; i < UART_NR; i++) {
+ struct uart_txx9_port *up = &serial_txx9_ports[i];
+ if (up->port.iobase || up->port.mapbase)
+ uart_remove_one_port(&serial_txx9_reg, &up->port);
+ }
+
+ uart_unregister_driver(&serial_txx9_reg);
+}
+
+module_init(serial_txx9_init);
+module_exit(serial_txx9_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TX39/49 serial driver");
+
+MODULE_ALIAS_CHARDEV_MAJOR(TXX9_TTY_MAJOR);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
new file mode 100644
index 00000000..bead17e5
--- /dev/null
+++ b/drivers/tty/serial/sh-sci.c
@@ -0,0 +1,2088 @@
+/*
+ * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
+ *
+ * Copyright (C) 2002 - 2011 Paul Mundt
+ * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
+ *
+ * based off of the old drivers/char/sh-sci.c by:
+ *
+ * Copyright (C) 1999, 2000 Niibe Yutaka
+ * Copyright (C) 2000 Sugioka Toshinobu
+ * Modified to support multiple serial ports. Stuart Menefy (May 2000).
+ * Modified to support SecureEdge. David McCullough (2002)
+ * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
+ * Removed SH7300 support (Jul 2007).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/sysrq.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/platform_device.h>
+#include <linux/serial_sci.h>
+#include <linux/notifier.h>
+#include <linux/pm_runtime.h>
+#include <linux/cpufreq.h>
+#include <linux/clk.h>
+#include <linux/ctype.h>
+#include <linux/err.h>
+#include <linux/dmaengine.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_SUPERH
+#include <asm/sh_bios.h>
+#endif
+
+#ifdef CONFIG_H8300
+#include <asm/gpio.h>
+#endif
+
+#include "sh-sci.h"
+
+struct sci_port {
+ struct uart_port port;
+
+ /* Platform configuration */
+ struct plat_sci_port *cfg;
+
+ /* Port enable callback */
+ void (*enable)(struct uart_port *port);
+
+ /* Port disable callback */
+ void (*disable)(struct uart_port *port);
+
+ /* Break timer */
+ struct timer_list break_timer;
+ int break_flag;
+
+ /* Interface clock */
+ struct clk *iclk;
+ /* Function clock */
+ struct clk *fclk;
+
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ struct dma_async_tx_descriptor *desc_tx;
+ struct dma_async_tx_descriptor *desc_rx[2];
+ dma_cookie_t cookie_tx;
+ dma_cookie_t cookie_rx[2];
+ dma_cookie_t active_rx;
+ struct scatterlist sg_tx;
+ unsigned int sg_len_tx;
+ struct scatterlist sg_rx[2];
+ size_t buf_len_rx;
+ struct sh_dmae_slave param_tx;
+ struct sh_dmae_slave param_rx;
+ struct work_struct work_tx;
+ struct work_struct work_rx;
+ struct timer_list rx_timer;
+ unsigned int rx_timeout;
+#endif
+
+ struct notifier_block freq_transition;
+};
+
+/* Function prototypes */
+static void sci_start_tx(struct uart_port *port);
+static void sci_stop_tx(struct uart_port *port);
+static void sci_start_rx(struct uart_port *port);
+
+#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
+
+static struct sci_port sci_ports[SCI_NPORTS];
+static struct uart_driver sci_uart_driver;
+
+static inline struct sci_port *
+to_sci_port(struct uart_port *uart)
+{
+ return container_of(uart, struct sci_port, port);
+}
+
+#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
+
+#ifdef CONFIG_CONSOLE_POLL
+static int sci_poll_get_char(struct uart_port *port)
+{
+ unsigned short status;
+ int c;
+
+ do {
+ status = sci_in(port, SCxSR);
+ if (status & SCxSR_ERRORS(port)) {
+ sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
+ continue;
+ }
+ break;
+ } while (1);
+
+ if (!(status & SCxSR_RDxF(port)))
+ return NO_POLL_CHAR;
+
+ c = sci_in(port, SCxRDR);
+
+ /* Dummy read */
+ sci_in(port, SCxSR);
+ sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
+
+ return c;
+}
+#endif
+
+static void sci_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ unsigned short status;
+
+ do {
+ status = sci_in(port, SCxSR);
+ } while (!(status & SCxSR_TDxE(port)));
+
+ sci_out(port, SCxTDR, c);
+ sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
+}
+#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
+
+#if defined(__H8300H__) || defined(__H8300S__)
+static void sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ int ch = (port->mapbase - SMR0) >> 3;
+
+ /* set DDR regs */
+ H8300_GPIO_DDR(h8300_sci_pins[ch].port,
+ h8300_sci_pins[ch].rx,
+ H8300_GPIO_INPUT);
+ H8300_GPIO_DDR(h8300_sci_pins[ch].port,
+ h8300_sci_pins[ch].tx,
+ H8300_GPIO_OUTPUT);
+
+ /* tx mark output*/
+ H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx;
+}
+#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
+static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ if (port->mapbase == 0xA4400000) {
+ __raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
+ __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
+ } else if (port->mapbase == 0xA4410000)
+ __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
+}
+#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721)
+static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ unsigned short data;
+
+ if (cflag & CRTSCTS) {
+ /* enable RTS/CTS */
+ if (port->mapbase == 0xa4430000) { /* SCIF0 */
+ /* Clear PTCR bit 9-2; enable all scif pins but sck */
+ data = __raw_readw(PORT_PTCR);
+ __raw_writew((data & 0xfc03), PORT_PTCR);
+ } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
+ /* Clear PVCR bit 9-2 */
+ data = __raw_readw(PORT_PVCR);
+ __raw_writew((data & 0xfc03), PORT_PVCR);
+ }
+ } else {
+ if (port->mapbase == 0xa4430000) { /* SCIF0 */
+ /* Clear PTCR bit 5-2; enable only tx and rx */
+ data = __raw_readw(PORT_PTCR);
+ __raw_writew((data & 0xffc3), PORT_PTCR);
+ } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
+ /* Clear PVCR bit 5-2 */
+ data = __raw_readw(PORT_PVCR);
+ __raw_writew((data & 0xffc3), PORT_PVCR);
+ }
+ }
+}
+#elif defined(CONFIG_CPU_SH3)
+/* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */
+static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ unsigned short data;
+
+ /* We need to set SCPCR to enable RTS/CTS */
+ data = __raw_readw(SCPCR);
+ /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
+ __raw_writew(data & 0x0fcf, SCPCR);
+
+ if (!(cflag & CRTSCTS)) {
+ /* We need to set SCPCR to enable RTS/CTS */
+ data = __raw_readw(SCPCR);
+ /* Clear out SCP7MD1,0, SCP4MD1,0,
+ Set SCP6MD1,0 = {01} (output) */
+ __raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
+
+ data = __raw_readb(SCPDR);
+ /* Set /RTS2 (bit6) = 0 */
+ __raw_writeb(data & 0xbf, SCPDR);
+ }
+}
+#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
+static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ unsigned short data;
+
+ if (port->mapbase == 0xffe00000) {
+ data = __raw_readw(PSCR);
+ data &= ~0x03cf;
+ if (!(cflag & CRTSCTS))
+ data |= 0x0340;
+
+ __raw_writew(data, PSCR);
+ }
+}
+#elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7763) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7780) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786) || \
+ defined(CONFIG_CPU_SUBTYPE_SHX3)
+static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ if (!(cflag & CRTSCTS))
+ __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */
+}
+#elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A)
+static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ if (!(cflag & CRTSCTS))
+ __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */
+}
+#else
+static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ /* Nothing to do */
+}
+#endif
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7780) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786)
+static int scif_txfill(struct uart_port *port)
+{
+ return sci_in(port, SCTFDR) & 0xff;
+}
+
+static int scif_txroom(struct uart_port *port)
+{
+ return SCIF_TXROOM_MAX - scif_txfill(port);
+}
+
+static int scif_rxfill(struct uart_port *port)
+{
+ return sci_in(port, SCRFDR) & 0xff;
+}
+#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
+static int scif_txfill(struct uart_port *port)
+{
+ if (port->mapbase == 0xffe00000 ||
+ port->mapbase == 0xffe08000)
+ /* SCIF0/1*/
+ return sci_in(port, SCTFDR) & 0xff;
+ else
+ /* SCIF2 */
+ return sci_in(port, SCFDR) >> 8;
+}
+
+static int scif_txroom(struct uart_port *port)
+{
+ if (port->mapbase == 0xffe00000 ||
+ port->mapbase == 0xffe08000)
+ /* SCIF0/1*/
+ return SCIF_TXROOM_MAX - scif_txfill(port);
+ else
+ /* SCIF2 */
+ return SCIF2_TXROOM_MAX - scif_txfill(port);
+}
+
+static int scif_rxfill(struct uart_port *port)
+{
+ if ((port->mapbase == 0xffe00000) ||
+ (port->mapbase == 0xffe08000)) {
+ /* SCIF0/1*/
+ return sci_in(port, SCRFDR) & 0xff;
+ } else {
+ /* SCIF2 */
+ return sci_in(port, SCFDR) & SCIF2_RFDC_MASK;
+ }
+}
+#elif defined(CONFIG_ARCH_SH7372)
+static int scif_txfill(struct uart_port *port)
+{
+ if (port->type == PORT_SCIFA)
+ return sci_in(port, SCFDR) >> 8;
+ else
+ return sci_in(port, SCTFDR);
+}
+
+static int scif_txroom(struct uart_port *port)
+{
+ return port->fifosize - scif_txfill(port);
+}
+
+static int scif_rxfill(struct uart_port *port)
+{
+ if (port->type == PORT_SCIFA)
+ return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
+ else
+ return sci_in(port, SCRFDR);
+}
+#else
+static int scif_txfill(struct uart_port *port)
+{
+ return sci_in(port, SCFDR) >> 8;
+}
+
+static int scif_txroom(struct uart_port *port)
+{
+ return SCIF_TXROOM_MAX - scif_txfill(port);
+}
+
+static int scif_rxfill(struct uart_port *port)
+{
+ return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
+}
+#endif
+
+static int sci_txfill(struct uart_port *port)
+{
+ return !(sci_in(port, SCxSR) & SCI_TDRE);
+}
+
+static int sci_txroom(struct uart_port *port)
+{
+ return !sci_txfill(port);
+}
+
+static int sci_rxfill(struct uart_port *port)
+{
+ return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
+}
+
+/* ********************************************************************** *
+ * the interrupt related routines *
+ * ********************************************************************** */
+
+static void sci_transmit_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned int stopped = uart_tx_stopped(port);
+ unsigned short status;
+ unsigned short ctrl;
+ int count;
+
+ status = sci_in(port, SCxSR);
+ if (!(status & SCxSR_TDxE(port))) {
+ ctrl = sci_in(port, SCSCR);
+ if (uart_circ_empty(xmit))
+ ctrl &= ~SCSCR_TIE;
+ else
+ ctrl |= SCSCR_TIE;
+ sci_out(port, SCSCR, ctrl);
+ return;
+ }
+
+ if (port->type == PORT_SCI)
+ count = sci_txroom(port);
+ else
+ count = scif_txroom(port);
+
+ do {
+ unsigned char c;
+
+ if (port->x_char) {
+ c = port->x_char;
+ port->x_char = 0;
+ } else if (!uart_circ_empty(xmit) && !stopped) {
+ c = xmit->buf[xmit->tail];
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ } else {
+ break;
+ }
+
+ sci_out(port, SCxTDR, c);
+
+ port->icount.tx++;
+ } while (--count > 0);
+
+ sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+ if (uart_circ_empty(xmit)) {
+ sci_stop_tx(port);
+ } else {
+ ctrl = sci_in(port, SCSCR);
+
+ if (port->type != PORT_SCI) {
+ sci_in(port, SCxSR); /* Dummy read */
+ sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
+ }
+
+ ctrl |= SCSCR_TIE;
+ sci_out(port, SCSCR, ctrl);
+ }
+}
+
+/* On SH3, SCIF may read end-of-break as a space->mark char */
+#define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
+
+static void sci_receive_chars(struct uart_port *port)
+{
+ struct sci_port *sci_port = to_sci_port(port);
+ struct tty_struct *tty = port->state->port.tty;
+ int i, count, copied = 0;
+ unsigned short status;
+ unsigned char flag;
+
+ status = sci_in(port, SCxSR);
+ if (!(status & SCxSR_RDxF(port)))
+ return;
+
+ while (1) {
+ if (port->type == PORT_SCI)
+ count = sci_rxfill(port);
+ else
+ count = scif_rxfill(port);
+
+ /* Don't copy more bytes than there is room for in the buffer */
+ count = tty_buffer_request_room(tty, count);
+
+ /* If for any reason we can't copy more data, we're done! */
+ if (count == 0)
+ break;
+
+ if (port->type == PORT_SCI) {
+ char c = sci_in(port, SCxRDR);
+ if (uart_handle_sysrq_char(port, c) ||
+ sci_port->break_flag)
+ count = 0;
+ else
+ tty_insert_flip_char(tty, c, TTY_NORMAL);
+ } else {
+ for (i = 0; i < count; i++) {
+ char c = sci_in(port, SCxRDR);
+ status = sci_in(port, SCxSR);
+#if defined(CONFIG_CPU_SH3)
+ /* Skip "chars" during break */
+ if (sci_port->break_flag) {
+ if ((c == 0) &&
+ (status & SCxSR_FER(port))) {
+ count--; i--;
+ continue;
+ }
+
+ /* Nonzero => end-of-break */
+ dev_dbg(port->dev, "debounce<%02x>\n", c);
+ sci_port->break_flag = 0;
+
+ if (STEPFN(c)) {
+ count--; i--;
+ continue;
+ }
+ }
+#endif /* CONFIG_CPU_SH3 */
+ if (uart_handle_sysrq_char(port, c)) {
+ count--; i--;
+ continue;
+ }
+
+ /* Store data and status */
+ if (status & SCxSR_FER(port)) {
+ flag = TTY_FRAME;
+ dev_notice(port->dev, "frame error\n");
+ } else if (status & SCxSR_PER(port)) {
+ flag = TTY_PARITY;
+ dev_notice(port->dev, "parity error\n");
+ } else
+ flag = TTY_NORMAL;
+
+ tty_insert_flip_char(tty, c, flag);
+ }
+ }
+
+ sci_in(port, SCxSR); /* dummy read */
+ sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
+
+ copied += count;
+ port->icount.rx += count;
+ }
+
+ if (copied) {
+ /* Tell the rest of the system the news. New characters! */
+ tty_flip_buffer_push(tty);
+ } else {
+ sci_in(port, SCxSR); /* dummy read */
+ sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
+ }
+}
+
+#define SCI_BREAK_JIFFIES (HZ/20)
+
+/*
+ * The sci generates interrupts during the break,
+ * 1 per millisecond or so during the break period, for 9600 baud.
+ * So dont bother disabling interrupts.
+ * But dont want more than 1 break event.
+ * Use a kernel timer to periodically poll the rx line until
+ * the break is finished.
+ */
+static inline void sci_schedule_break_timer(struct sci_port *port)
+{
+ mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES);
+}
+
+/* Ensure that two consecutive samples find the break over. */
+static void sci_break_timer(unsigned long data)
+{
+ struct sci_port *port = (struct sci_port *)data;
+
+ if (port->enable)
+ port->enable(&port->port);
+
+ if (sci_rxd_in(&port->port) == 0) {
+ port->break_flag = 1;
+ sci_schedule_break_timer(port);
+ } else if (port->break_flag == 1) {
+ /* break is over. */
+ port->break_flag = 2;
+ sci_schedule_break_timer(port);
+ } else
+ port->break_flag = 0;
+
+ if (port->disable)
+ port->disable(&port->port);
+}
+
+static int sci_handle_errors(struct uart_port *port)
+{
+ int copied = 0;
+ unsigned short status = sci_in(port, SCxSR);
+ struct tty_struct *tty = port->state->port.tty;
+
+ if (status & SCxSR_ORER(port)) {
+ /* overrun error */
+ if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
+ copied++;
+
+ dev_notice(port->dev, "overrun error");
+ }
+
+ if (status & SCxSR_FER(port)) {
+ if (sci_rxd_in(port) == 0) {
+ /* Notify of BREAK */
+ struct sci_port *sci_port = to_sci_port(port);
+
+ if (!sci_port->break_flag) {
+ sci_port->break_flag = 1;
+ sci_schedule_break_timer(sci_port);
+
+ /* Do sysrq handling. */
+ if (uart_handle_break(port))
+ return 0;
+
+ dev_dbg(port->dev, "BREAK detected\n");
+
+ if (tty_insert_flip_char(tty, 0, TTY_BREAK))
+ copied++;
+ }
+
+ } else {
+ /* frame error */
+ if (tty_insert_flip_char(tty, 0, TTY_FRAME))
+ copied++;
+
+ dev_notice(port->dev, "frame error\n");
+ }
+ }
+
+ if (status & SCxSR_PER(port)) {
+ /* parity error */
+ if (tty_insert_flip_char(tty, 0, TTY_PARITY))
+ copied++;
+
+ dev_notice(port->dev, "parity error");
+ }
+
+ if (copied)
+ tty_flip_buffer_push(tty);
+
+ return copied;
+}
+
+static int sci_handle_fifo_overrun(struct uart_port *port)
+{
+ struct tty_struct *tty = port->state->port.tty;
+ int copied = 0;
+
+ if (port->type != PORT_SCIF)
+ return 0;
+
+ if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) {
+ sci_out(port, SCLSR, 0);
+
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_flip_buffer_push(tty);
+
+ dev_notice(port->dev, "overrun error\n");
+ copied++;
+ }
+
+ return copied;
+}
+
+static int sci_handle_breaks(struct uart_port *port)
+{
+ int copied = 0;
+ unsigned short status = sci_in(port, SCxSR);
+ struct tty_struct *tty = port->state->port.tty;
+ struct sci_port *s = to_sci_port(port);
+
+ if (uart_handle_break(port))
+ return 0;
+
+ if (!s->break_flag && status & SCxSR_BRK(port)) {
+#if defined(CONFIG_CPU_SH3)
+ /* Debounce break */
+ s->break_flag = 1;
+#endif
+ /* Notify of BREAK */
+ if (tty_insert_flip_char(tty, 0, TTY_BREAK))
+ copied++;
+
+ dev_dbg(port->dev, "BREAK detected\n");
+ }
+
+ if (copied)
+ tty_flip_buffer_push(tty);
+
+ copied += sci_handle_fifo_overrun(port);
+
+ return copied;
+}
+
+static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
+{
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ struct uart_port *port = ptr;
+ struct sci_port *s = to_sci_port(port);
+
+ if (s->chan_rx) {
+ u16 scr = sci_in(port, SCSCR);
+ u16 ssr = sci_in(port, SCxSR);
+
+ /* Disable future Rx interrupts */
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ disable_irq_nosync(irq);
+ scr |= 0x4000;
+ } else {
+ scr &= ~SCSCR_RIE;
+ }
+ sci_out(port, SCSCR, scr);
+ /* Clear current interrupt */
+ sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
+ dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
+ jiffies, s->rx_timeout);
+ mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
+
+ return IRQ_HANDLED;
+ }
+#endif
+
+ /* I think sci_receive_chars has to be called irrespective
+ * of whether the I_IXOFF is set, otherwise, how is the interrupt
+ * to be disabled?
+ */
+ sci_receive_chars(ptr);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
+{
+ struct uart_port *port = ptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ sci_transmit_chars(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sci_er_interrupt(int irq, void *ptr)
+{
+ struct uart_port *port = ptr;
+
+ /* Handle errors */
+ if (port->type == PORT_SCI) {
+ if (sci_handle_errors(port)) {
+ /* discard character in rx buffer */
+ sci_in(port, SCxSR);
+ sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
+ }
+ } else {
+ sci_handle_fifo_overrun(port);
+ sci_rx_interrupt(irq, ptr);
+ }
+
+ sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
+
+ /* Kick the transmission */
+ sci_tx_interrupt(irq, ptr);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sci_br_interrupt(int irq, void *ptr)
+{
+ struct uart_port *port = ptr;
+
+ /* Handle BREAKs */
+ sci_handle_breaks(port);
+ sci_out(port, SCxSR, SCxSR_BREAK_CLEAR(port));
+
+ return IRQ_HANDLED;
+}
+
+static inline unsigned long port_rx_irq_mask(struct uart_port *port)
+{
+ /*
+ * Not all ports (such as SCIFA) will support REIE. Rather than
+ * special-casing the port type, we check the port initialization
+ * IRQ enable mask to see whether the IRQ is desired at all. If
+ * it's unset, it's logically inferred that there's no point in
+ * testing for it.
+ */
+ return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
+}
+
+static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
+{
+ unsigned short ssr_status, scr_status, err_enabled;
+ struct uart_port *port = ptr;
+ struct sci_port *s = to_sci_port(port);
+ irqreturn_t ret = IRQ_NONE;
+
+ ssr_status = sci_in(port, SCxSR);
+ scr_status = sci_in(port, SCSCR);
+ err_enabled = scr_status & port_rx_irq_mask(port);
+
+ /* Tx Interrupt */
+ if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
+ !s->chan_tx)
+ ret = sci_tx_interrupt(irq, ptr);
+
+ /*
+ * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
+ * DR flags
+ */
+ if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
+ (scr_status & SCSCR_RIE))
+ ret = sci_rx_interrupt(irq, ptr);
+
+ /* Error Interrupt */
+ if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
+ ret = sci_er_interrupt(irq, ptr);
+
+ /* Break Interrupt */
+ if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
+ ret = sci_br_interrupt(irq, ptr);
+
+ return ret;
+}
+
+/*
+ * Here we define a transition notifier so that we can update all of our
+ * ports' baud rate when the peripheral clock changes.
+ */
+static int sci_notifier(struct notifier_block *self,
+ unsigned long phase, void *p)
+{
+ struct sci_port *sci_port;
+ unsigned long flags;
+
+ sci_port = container_of(self, struct sci_port, freq_transition);
+
+ if ((phase == CPUFREQ_POSTCHANGE) ||
+ (phase == CPUFREQ_RESUMECHANGE)) {
+ struct uart_port *port = &sci_port->port;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->uartclk = clk_get_rate(sci_port->iclk);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ return NOTIFY_OK;
+}
+
+static void sci_clk_enable(struct uart_port *port)
+{
+ struct sci_port *sci_port = to_sci_port(port);
+
+ pm_runtime_get_sync(port->dev);
+
+ clk_enable(sci_port->iclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
+ clk_enable(sci_port->fclk);
+}
+
+static void sci_clk_disable(struct uart_port *port)
+{
+ struct sci_port *sci_port = to_sci_port(port);
+
+ clk_disable(sci_port->fclk);
+ clk_disable(sci_port->iclk);
+
+ pm_runtime_put_sync(port->dev);
+}
+
+static int sci_request_irq(struct sci_port *port)
+{
+ int i;
+ irqreturn_t (*handlers[4])(int irq, void *ptr) = {
+ sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt,
+ sci_br_interrupt,
+ };
+ const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full",
+ "SCI Transmit Data Empty", "SCI Break" };
+
+ if (port->cfg->irqs[0] == port->cfg->irqs[1]) {
+ if (unlikely(!port->cfg->irqs[0]))
+ return -ENODEV;
+
+ if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt,
+ IRQF_DISABLED, "sci", port)) {
+ dev_err(port->port.dev, "Can't allocate IRQ\n");
+ return -ENODEV;
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+ if (unlikely(!port->cfg->irqs[i]))
+ continue;
+
+ if (request_irq(port->cfg->irqs[i], handlers[i],
+ IRQF_DISABLED, desc[i], port)) {
+ dev_err(port->port.dev, "Can't allocate IRQ\n");
+ return -ENODEV;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void sci_free_irq(struct sci_port *port)
+{
+ int i;
+
+ if (port->cfg->irqs[0] == port->cfg->irqs[1])
+ free_irq(port->cfg->irqs[0], port);
+ else {
+ for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) {
+ if (!port->cfg->irqs[i])
+ continue;
+
+ free_irq(port->cfg->irqs[i], port);
+ }
+ }
+}
+
+static unsigned int sci_tx_empty(struct uart_port *port)
+{
+ unsigned short status = sci_in(port, SCxSR);
+ unsigned short in_tx_fifo = scif_txfill(port);
+
+ return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
+}
+
+static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */
+ /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */
+ /* If you have signals for DTR and DCD, please implement here. */
+}
+
+static unsigned int sci_get_mctrl(struct uart_port *port)
+{
+ /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
+ and CTS/RTS */
+
+ return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR;
+}
+
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+static void sci_dma_tx_complete(void *arg)
+{
+ struct sci_port *s = arg;
+ struct uart_port *port = &s->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned long flags;
+
+ dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ xmit->tail += sg_dma_len(&s->sg_tx);
+ xmit->tail &= UART_XMIT_SIZE - 1;
+
+ port->icount.tx += sg_dma_len(&s->sg_tx);
+
+ async_tx_ack(s->desc_tx);
+ s->desc_tx = NULL;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (!uart_circ_empty(xmit)) {
+ s->cookie_tx = 0;
+ schedule_work(&s->work_tx);
+ } else {
+ s->cookie_tx = -EINVAL;
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ u16 ctrl = sci_in(port, SCSCR);
+ sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
+ }
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* Locking: called with port lock held */
+static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty,
+ size_t count)
+{
+ struct uart_port *port = &s->port;
+ int i, active, room;
+
+ room = tty_buffer_request_room(tty, count);
+
+ if (s->active_rx == s->cookie_rx[0]) {
+ active = 0;
+ } else if (s->active_rx == s->cookie_rx[1]) {
+ active = 1;
+ } else {
+ dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
+ return 0;
+ }
+
+ if (room < count)
+ dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
+ count - room);
+ if (!room)
+ return room;
+
+ for (i = 0; i < room; i++)
+ tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
+ TTY_NORMAL);
+
+ port->icount.rx += room;
+
+ return room;
+}
+
+static void sci_dma_rx_complete(void *arg)
+{
+ struct sci_port *s = arg;
+ struct uart_port *port = &s->port;
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned long flags;
+ int count;
+
+ dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ count = sci_dma_rx_push(s, tty, s->buf_len_rx);
+
+ mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (count)
+ tty_flip_buffer_push(tty);
+
+ schedule_work(&s->work_rx);
+}
+
+static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
+{
+ struct dma_chan *chan = s->chan_rx;
+ struct uart_port *port = &s->port;
+
+ s->chan_rx = NULL;
+ s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
+ dma_release_channel(chan);
+ if (sg_dma_address(&s->sg_rx[0]))
+ dma_free_coherent(port->dev, s->buf_len_rx * 2,
+ sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
+ if (enable_pio)
+ sci_start_rx(port);
+}
+
+static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
+{
+ struct dma_chan *chan = s->chan_tx;
+ struct uart_port *port = &s->port;
+
+ s->chan_tx = NULL;
+ s->cookie_tx = -EINVAL;
+ dma_release_channel(chan);
+ if (enable_pio)
+ sci_start_tx(port);
+}
+
+static void sci_submit_rx(struct sci_port *s)
+{
+ struct dma_chan *chan = s->chan_rx;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct scatterlist *sg = &s->sg_rx[i];
+ struct dma_async_tx_descriptor *desc;
+
+ desc = chan->device->device_prep_slave_sg(chan,
+ sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
+
+ if (desc) {
+ s->desc_rx[i] = desc;
+ desc->callback = sci_dma_rx_complete;
+ desc->callback_param = s;
+ s->cookie_rx[i] = desc->tx_submit(desc);
+ }
+
+ if (!desc || s->cookie_rx[i] < 0) {
+ if (i) {
+ async_tx_ack(s->desc_rx[0]);
+ s->cookie_rx[0] = -EINVAL;
+ }
+ if (desc) {
+ async_tx_ack(desc);
+ s->cookie_rx[i] = -EINVAL;
+ }
+ dev_warn(s->port.dev,
+ "failed to re-start DMA, using PIO\n");
+ sci_rx_dma_release(s, true);
+ return;
+ }
+ dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
+ s->cookie_rx[i], i);
+ }
+
+ s->active_rx = s->cookie_rx[0];
+
+ dma_async_issue_pending(chan);
+}
+
+static void work_fn_rx(struct work_struct *work)
+{
+ struct sci_port *s = container_of(work, struct sci_port, work_rx);
+ struct uart_port *port = &s->port;
+ struct dma_async_tx_descriptor *desc;
+ int new;
+
+ if (s->active_rx == s->cookie_rx[0]) {
+ new = 0;
+ } else if (s->active_rx == s->cookie_rx[1]) {
+ new = 1;
+ } else {
+ dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
+ return;
+ }
+ desc = s->desc_rx[new];
+
+ if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
+ DMA_SUCCESS) {
+ /* Handle incomplete DMA receive */
+ struct tty_struct *tty = port->state->port.tty;
+ struct dma_chan *chan = s->chan_rx;
+ struct sh_desc *sh_desc = container_of(desc, struct sh_desc,
+ async_tx);
+ unsigned long flags;
+ int count;
+
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
+ sh_desc->partial, sh_desc->cookie);
+
+ spin_lock_irqsave(&port->lock, flags);
+ count = sci_dma_rx_push(s, tty, sh_desc->partial);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (count)
+ tty_flip_buffer_push(tty);
+
+ sci_submit_rx(s);
+
+ return;
+ }
+
+ s->cookie_rx[new] = desc->tx_submit(desc);
+ if (s->cookie_rx[new] < 0) {
+ dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
+ sci_rx_dma_release(s, true);
+ return;
+ }
+
+ s->active_rx = s->cookie_rx[!new];
+
+ dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
+ s->cookie_rx[new], new, s->active_rx);
+}
+
+static void work_fn_tx(struct work_struct *work)
+{
+ struct sci_port *s = container_of(work, struct sci_port, work_tx);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan = s->chan_tx;
+ struct uart_port *port = &s->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct scatterlist *sg = &s->sg_tx;
+
+ /*
+ * DMA is idle now.
+ * Port xmit buffer is already mapped, and it is one page... Just adjust
+ * offsets and lengths. Since it is a circular buffer, we have to
+ * transmit till the end, and then the rest. Take the port lock to get a
+ * consistent xmit buffer state.
+ */
+ spin_lock_irq(&port->lock);
+ sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
+ sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
+ sg->offset;
+ sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
+ CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
+ spin_unlock_irq(&port->lock);
+
+ BUG_ON(!sg_dma_len(sg));
+
+ desc = chan->device->device_prep_slave_sg(chan,
+ sg, s->sg_len_tx, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ /* switch to PIO */
+ sci_tx_dma_release(s, true);
+ return;
+ }
+
+ dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
+
+ spin_lock_irq(&port->lock);
+ s->desc_tx = desc;
+ desc->callback = sci_dma_tx_complete;
+ desc->callback_param = s;
+ spin_unlock_irq(&port->lock);
+ s->cookie_tx = desc->tx_submit(desc);
+ if (s->cookie_tx < 0) {
+ dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
+ /* switch to PIO */
+ sci_tx_dma_release(s, true);
+ return;
+ }
+
+ dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__,
+ xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
+
+ dma_async_issue_pending(chan);
+}
+#endif
+
+static void sci_start_tx(struct uart_port *port)
+{
+ struct sci_port *s = to_sci_port(port);
+ unsigned short ctrl;
+
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ u16 new, scr = sci_in(port, SCSCR);
+ if (s->chan_tx)
+ new = scr | 0x8000;
+ else
+ new = scr & ~0x8000;
+ if (new != scr)
+ sci_out(port, SCSCR, new);
+ }
+
+ if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
+ s->cookie_tx < 0) {
+ s->cookie_tx = 0;
+ schedule_work(&s->work_tx);
+ }
+#endif
+
+ if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
+ ctrl = sci_in(port, SCSCR);
+ sci_out(port, SCSCR, ctrl | SCSCR_TIE);
+ }
+}
+
+static void sci_stop_tx(struct uart_port *port)
+{
+ unsigned short ctrl;
+
+ /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
+ ctrl = sci_in(port, SCSCR);
+
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
+ ctrl &= ~0x8000;
+
+ ctrl &= ~SCSCR_TIE;
+
+ sci_out(port, SCSCR, ctrl);
+}
+
+static void sci_start_rx(struct uart_port *port)
+{
+ unsigned short ctrl;
+
+ ctrl = sci_in(port, SCSCR) | port_rx_irq_mask(port);
+
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
+ ctrl &= ~0x4000;
+
+ sci_out(port, SCSCR, ctrl);
+}
+
+static void sci_stop_rx(struct uart_port *port)
+{
+ unsigned short ctrl;
+
+ ctrl = sci_in(port, SCSCR);
+
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
+ ctrl &= ~0x4000;
+
+ ctrl &= ~port_rx_irq_mask(port);
+
+ sci_out(port, SCSCR, ctrl);
+}
+
+static void sci_enable_ms(struct uart_port *port)
+{
+ /* Nothing here yet .. */
+}
+
+static void sci_break_ctl(struct uart_port *port, int break_state)
+{
+ /* Nothing here yet .. */
+}
+
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ struct sh_dmae_slave *param = slave;
+
+ dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
+ param->slave_id);
+
+ if (param->dma_dev == chan->device->dev) {
+ chan->private = param;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static void rx_timer_fn(unsigned long arg)
+{
+ struct sci_port *s = (struct sci_port *)arg;
+ struct uart_port *port = &s->port;
+ u16 scr = sci_in(port, SCSCR);
+
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ scr &= ~0x4000;
+ enable_irq(s->cfg->irqs[1]);
+ }
+ sci_out(port, SCSCR, scr | SCSCR_RIE);
+ dev_dbg(port->dev, "DMA Rx timed out\n");
+ schedule_work(&s->work_rx);
+}
+
+static void sci_request_dma(struct uart_port *port)
+{
+ struct sci_port *s = to_sci_port(port);
+ struct sh_dmae_slave *param;
+ struct dma_chan *chan;
+ dma_cap_mask_t mask;
+ int nent;
+
+ dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__,
+ port->line, s->cfg->dma_dev);
+
+ if (!s->cfg->dma_dev)
+ return;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ param = &s->param_tx;
+
+ /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
+ param->slave_id = s->cfg->dma_slave_tx;
+ param->dma_dev = s->cfg->dma_dev;
+
+ s->cookie_tx = -EINVAL;
+ chan = dma_request_channel(mask, filter, param);
+ dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
+ if (chan) {
+ s->chan_tx = chan;
+ sg_init_table(&s->sg_tx, 1);
+ /* UART circular tx buffer is an aligned page. */
+ BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
+ sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
+ UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK);
+ nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
+ if (!nent)
+ sci_tx_dma_release(s, false);
+ else
+ dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
+ sg_dma_len(&s->sg_tx),
+ port->state->xmit.buf, sg_dma_address(&s->sg_tx));
+
+ s->sg_len_tx = nent;
+
+ INIT_WORK(&s->work_tx, work_fn_tx);
+ }
+
+ param = &s->param_rx;
+
+ /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
+ param->slave_id = s->cfg->dma_slave_rx;
+ param->dma_dev = s->cfg->dma_dev;
+
+ chan = dma_request_channel(mask, filter, param);
+ dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
+ if (chan) {
+ dma_addr_t dma[2];
+ void *buf[2];
+ int i;
+
+ s->chan_rx = chan;
+
+ s->buf_len_rx = 2 * max(16, (int)port->fifosize);
+ buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2,
+ &dma[0], GFP_KERNEL);
+
+ if (!buf[0]) {
+ dev_warn(port->dev,
+ "failed to allocate dma buffer, using PIO\n");
+ sci_rx_dma_release(s, true);
+ return;
+ }
+
+ buf[1] = buf[0] + s->buf_len_rx;
+ dma[1] = dma[0] + s->buf_len_rx;
+
+ for (i = 0; i < 2; i++) {
+ struct scatterlist *sg = &s->sg_rx[i];
+
+ sg_init_table(sg, 1);
+ sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
+ (int)buf[i] & ~PAGE_MASK);
+ sg_dma_address(sg) = dma[i];
+ }
+
+ INIT_WORK(&s->work_rx, work_fn_rx);
+ setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
+
+ sci_submit_rx(s);
+ }
+}
+
+static void sci_free_dma(struct uart_port *port)
+{
+ struct sci_port *s = to_sci_port(port);
+
+ if (!s->cfg->dma_dev)
+ return;
+
+ if (s->chan_tx)
+ sci_tx_dma_release(s, false);
+ if (s->chan_rx)
+ sci_rx_dma_release(s, false);
+}
+#else
+static inline void sci_request_dma(struct uart_port *port)
+{
+}
+
+static inline void sci_free_dma(struct uart_port *port)
+{
+}
+#endif
+
+static int sci_startup(struct uart_port *port)
+{
+ struct sci_port *s = to_sci_port(port);
+ int ret;
+
+ dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+
+ if (s->enable)
+ s->enable(port);
+
+ ret = sci_request_irq(s);
+ if (unlikely(ret < 0))
+ return ret;
+
+ sci_request_dma(port);
+
+ sci_start_tx(port);
+ sci_start_rx(port);
+
+ return 0;
+}
+
+static void sci_shutdown(struct uart_port *port)
+{
+ struct sci_port *s = to_sci_port(port);
+
+ dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+
+ sci_stop_rx(port);
+ sci_stop_tx(port);
+
+ sci_free_dma(port);
+ sci_free_irq(s);
+
+ if (s->disable)
+ s->disable(port);
+}
+
+static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
+ unsigned long freq)
+{
+ switch (algo_id) {
+ case SCBRR_ALGO_1:
+ return ((freq + 16 * bps) / (16 * bps) - 1);
+ case SCBRR_ALGO_2:
+ return ((freq + 16 * bps) / (32 * bps) - 1);
+ case SCBRR_ALGO_3:
+ return (((freq * 2) + 16 * bps) / (16 * bps) - 1);
+ case SCBRR_ALGO_4:
+ return (((freq * 2) + 16 * bps) / (32 * bps) - 1);
+ case SCBRR_ALGO_5:
+ return (((freq * 1000 / 32) / bps) - 1);
+ }
+
+ /* Warn, but use a safe default */
+ WARN_ON(1);
+
+ return ((freq + 16 * bps) / (32 * bps) - 1);
+}
+
+static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct sci_port *s = to_sci_port(port);
+ unsigned int status, baud, smr_val, max_baud;
+ int t = -1;
+ u16 scfcr = 0;
+
+ /*
+ * earlyprintk comes here early on with port->uartclk set to zero.
+ * the clock framework is not up and running at this point so here
+ * we assume that 115200 is the maximum baud rate. please note that
+ * the baud rate is not programmed during earlyprintk - it is assumed
+ * that the previous boot loader has enabled required clocks and
+ * setup the baud rate generator hardware for us already.
+ */
+ max_baud = port->uartclk ? port->uartclk / 16 : 115200;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
+ if (likely(baud && port->uartclk))
+ t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk);
+
+ if (s->enable)
+ s->enable(port);
+
+ do {
+ status = sci_in(port, SCxSR);
+ } while (!(status & SCxSR_TEND(port)));
+
+ sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
+
+ if (port->type != PORT_SCI)
+ sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
+
+ smr_val = sci_in(port, SCSMR) & 3;
+
+ if ((termios->c_cflag & CSIZE) == CS7)
+ smr_val |= 0x40;
+ if (termios->c_cflag & PARENB)
+ smr_val |= 0x20;
+ if (termios->c_cflag & PARODD)
+ smr_val |= 0x30;
+ if (termios->c_cflag & CSTOPB)
+ smr_val |= 0x08;
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ sci_out(port, SCSMR, smr_val);
+
+ dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
+ s->cfg->scscr);
+
+ if (t > 0) {
+ if (t >= 256) {
+ sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1);
+ t >>= 2;
+ } else
+ sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3);
+
+ sci_out(port, SCBRR, t);
+ udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
+ }
+
+ sci_init_pins(port, termios->c_cflag);
+ sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
+
+ sci_out(port, SCSCR, s->cfg->scscr);
+
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ /*
+ * Calculate delay for 1.5 DMA buffers: see
+ * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
+ * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
+ * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
+ * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
+ * sizes), but it has been found out experimentally, that this is not
+ * enough: the driver too often needlessly runs on a DMA timeout. 20ms
+ * as a minimum seem to work perfectly.
+ */
+ if (s->chan_rx) {
+ s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
+ port->fifosize / 2;
+ dev_dbg(port->dev,
+ "DMA Rx t-out %ums, tty t-out %u jiffies\n",
+ s->rx_timeout * 1000 / HZ, port->timeout);
+ if (s->rx_timeout < msecs_to_jiffies(20))
+ s->rx_timeout = msecs_to_jiffies(20);
+ }
+#endif
+
+ if ((termios->c_cflag & CREAD) != 0)
+ sci_start_rx(port);
+
+ if (s->disable)
+ s->disable(port);
+}
+
+static const char *sci_type(struct uart_port *port)
+{
+ switch (port->type) {
+ case PORT_IRDA:
+ return "irda";
+ case PORT_SCI:
+ return "sci";
+ case PORT_SCIF:
+ return "scif";
+ case PORT_SCIFA:
+ return "scifa";
+ case PORT_SCIFB:
+ return "scifb";
+ }
+
+ return NULL;
+}
+
+static inline unsigned long sci_port_size(struct uart_port *port)
+{
+ /*
+ * Pick an arbitrary size that encapsulates all of the base
+ * registers by default. This can be optimized later, or derived
+ * from platform resource data at such a time that ports begin to
+ * behave more erratically.
+ */
+ return 64;
+}
+
+static int sci_remap_port(struct uart_port *port)
+{
+ unsigned long size = sci_port_size(port);
+
+ /*
+ * Nothing to do if there's already an established membase.
+ */
+ if (port->membase)
+ return 0;
+
+ if (port->flags & UPF_IOREMAP) {
+ port->membase = ioremap_nocache(port->mapbase, size);
+ if (unlikely(!port->membase)) {
+ dev_err(port->dev, "can't remap port#%d\n", port->line);
+ return -ENXIO;
+ }
+ } else {
+ /*
+ * For the simple (and majority of) cases where we don't
+ * need to do any remapping, just cast the cookie
+ * directly.
+ */
+ port->membase = (void __iomem *)port->mapbase;
+ }
+
+ return 0;
+}
+
+static void sci_release_port(struct uart_port *port)
+{
+ if (port->flags & UPF_IOREMAP) {
+ iounmap(port->membase);
+ port->membase = NULL;
+ }
+
+ release_mem_region(port->mapbase, sci_port_size(port));
+}
+
+static int sci_request_port(struct uart_port *port)
+{
+ unsigned long size = sci_port_size(port);
+ struct resource *res;
+ int ret;
+
+ res = request_mem_region(port->mapbase, size, dev_name(port->dev));
+ if (unlikely(res == NULL))
+ return -EBUSY;
+
+ ret = sci_remap_port(port);
+ if (unlikely(ret != 0)) {
+ release_resource(res);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sci_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ struct sci_port *sport = to_sci_port(port);
+
+ port->type = sport->cfg->type;
+ sci_request_port(port);
+ }
+}
+
+static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ struct sci_port *s = to_sci_port(port);
+
+ if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
+ return -EINVAL;
+ if (ser->baud_base < 2400)
+ /* No paper tape reader for Mitch.. */
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct uart_ops sci_uart_ops = {
+ .tx_empty = sci_tx_empty,
+ .set_mctrl = sci_set_mctrl,
+ .get_mctrl = sci_get_mctrl,
+ .start_tx = sci_start_tx,
+ .stop_tx = sci_stop_tx,
+ .stop_rx = sci_stop_rx,
+ .enable_ms = sci_enable_ms,
+ .break_ctl = sci_break_ctl,
+ .startup = sci_startup,
+ .shutdown = sci_shutdown,
+ .set_termios = sci_set_termios,
+ .type = sci_type,
+ .release_port = sci_release_port,
+ .request_port = sci_request_port,
+ .config_port = sci_config_port,
+ .verify_port = sci_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = sci_poll_get_char,
+ .poll_put_char = sci_poll_put_char,
+#endif
+};
+
+static int __devinit sci_init_single(struct platform_device *dev,
+ struct sci_port *sci_port,
+ unsigned int index,
+ struct plat_sci_port *p)
+{
+ struct uart_port *port = &sci_port->port;
+
+ port->ops = &sci_uart_ops;
+ port->iotype = UPIO_MEM;
+ port->line = index;
+
+ switch (p->type) {
+ case PORT_SCIFB:
+ port->fifosize = 256;
+ break;
+ case PORT_SCIFA:
+ port->fifosize = 64;
+ break;
+ case PORT_SCIF:
+ port->fifosize = 16;
+ break;
+ default:
+ port->fifosize = 1;
+ break;
+ }
+
+ if (dev) {
+ sci_port->iclk = clk_get(&dev->dev, "sci_ick");
+ if (IS_ERR(sci_port->iclk)) {
+ sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
+ if (IS_ERR(sci_port->iclk)) {
+ dev_err(&dev->dev, "can't get iclk\n");
+ return PTR_ERR(sci_port->iclk);
+ }
+ }
+
+ /*
+ * The function clock is optional, ignore it if we can't
+ * find it.
+ */
+ sci_port->fclk = clk_get(&dev->dev, "sci_fck");
+ if (IS_ERR(sci_port->fclk))
+ sci_port->fclk = NULL;
+
+ sci_port->enable = sci_clk_enable;
+ sci_port->disable = sci_clk_disable;
+ port->dev = &dev->dev;
+
+ pm_runtime_enable(&dev->dev);
+ }
+
+ sci_port->break_timer.data = (unsigned long)sci_port;
+ sci_port->break_timer.function = sci_break_timer;
+ init_timer(&sci_port->break_timer);
+
+ sci_port->cfg = p;
+
+ port->mapbase = p->mapbase;
+ port->type = p->type;
+ port->flags = p->flags;
+
+ /*
+ * The UART port needs an IRQ value, so we peg this to the TX IRQ
+ * for the multi-IRQ ports, which is where we are primarily
+ * concerned with the shutdown path synchronization.
+ *
+ * For the muxed case there's nothing more to do.
+ */
+ port->irq = p->irqs[SCIx_RXI_IRQ];
+
+ if (p->dma_dev)
+ dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
+ p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
+
+ return 0;
+}
+
+#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
+static void serial_console_putchar(struct uart_port *port, int ch)
+{
+ sci_poll_put_char(port, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ */
+static void serial_console_write(struct console *co, const char *s,
+ unsigned count)
+{
+ struct sci_port *sci_port = &sci_ports[co->index];
+ struct uart_port *port = &sci_port->port;
+ unsigned short bits;
+
+ if (sci_port->enable)
+ sci_port->enable(port);
+
+ uart_console_write(port, s, count, serial_console_putchar);
+
+ /* wait until fifo is empty and last bit has been transmitted */
+ bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
+ while ((sci_in(port, SCxSR) & bits) != bits)
+ cpu_relax();
+
+ if (sci_port->disable)
+ sci_port->disable(port);
+}
+
+static int __devinit serial_console_setup(struct console *co, char *options)
+{
+ struct sci_port *sci_port;
+ struct uart_port *port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ /*
+ * Refuse to handle any bogus ports.
+ */
+ if (co->index < 0 || co->index >= SCI_NPORTS)
+ return -ENODEV;
+
+ sci_port = &sci_ports[co->index];
+ port = &sci_port->port;
+
+ /*
+ * Refuse to handle uninitialized ports.
+ */
+ if (!port->ops)
+ return -ENODEV;
+
+ ret = sci_remap_port(port);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (sci_port->enable)
+ sci_port->enable(port);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ ret = uart_set_options(port, co, baud, parity, bits, flow);
+#if defined(__H8300H__) || defined(__H8300S__)
+ /* disable rx interrupt */
+ if (ret == 0)
+ sci_stop_rx(port);
+#endif
+ /* TODO: disable clock */
+ return ret;
+}
+
+static struct console serial_console = {
+ .name = "ttySC",
+ .device = uart_console_device,
+ .write = serial_console_write,
+ .setup = serial_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sci_uart_driver,
+};
+
+static struct console early_serial_console = {
+ .name = "early_ttySC",
+ .write = serial_console_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static char early_serial_buf[32];
+
+static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
+{
+ struct plat_sci_port *cfg = pdev->dev.platform_data;
+
+ if (early_serial_console.data)
+ return -EEXIST;
+
+ early_serial_console.index = pdev->id;
+
+ sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg);
+
+ serial_console_setup(&early_serial_console, early_serial_buf);
+
+ if (!strstr(early_serial_buf, "keep"))
+ early_serial_console.flags |= CON_BOOT;
+
+ register_console(&early_serial_console);
+ return 0;
+}
+
+#define SCI_CONSOLE (&serial_console)
+
+#else
+static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
+{
+ return -EINVAL;
+}
+
+#define SCI_CONSOLE NULL
+
+#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
+
+static char banner[] __initdata =
+ KERN_INFO "SuperH SCI(F) driver initialized\n";
+
+static struct uart_driver sci_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "sci",
+ .dev_name = "ttySC",
+ .major = SCI_MAJOR,
+ .minor = SCI_MINOR_START,
+ .nr = SCI_NPORTS,
+ .cons = SCI_CONSOLE,
+};
+
+static int sci_remove(struct platform_device *dev)
+{
+ struct sci_port *port = platform_get_drvdata(dev);
+
+ cpufreq_unregister_notifier(&port->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ uart_remove_one_port(&sci_uart_driver, &port->port);
+
+ clk_put(port->iclk);
+ clk_put(port->fclk);
+
+ pm_runtime_disable(&dev->dev);
+ return 0;
+}
+
+static int __devinit sci_probe_single(struct platform_device *dev,
+ unsigned int index,
+ struct plat_sci_port *p,
+ struct sci_port *sciport)
+{
+ int ret;
+
+ /* Sanity check */
+ if (unlikely(index >= SCI_NPORTS)) {
+ dev_notice(&dev->dev, "Attempting to register port "
+ "%d when only %d are available.\n",
+ index+1, SCI_NPORTS);
+ dev_notice(&dev->dev, "Consider bumping "
+ "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
+ return 0;
+ }
+
+ ret = sci_init_single(dev, sciport, index, p);
+ if (ret)
+ return ret;
+
+ return uart_add_one_port(&sci_uart_driver, &sciport->port);
+}
+
+static int __devinit sci_probe(struct platform_device *dev)
+{
+ struct plat_sci_port *p = dev->dev.platform_data;
+ struct sci_port *sp = &sci_ports[dev->id];
+ int ret;
+
+ /*
+ * If we've come here via earlyprintk initialization, head off to
+ * the special early probe. We don't have sufficient device state
+ * to make it beyond this yet.
+ */
+ if (is_early_platform_device(dev))
+ return sci_probe_earlyprintk(dev);
+
+ platform_set_drvdata(dev, sp);
+
+ ret = sci_probe_single(dev, dev->id, p, sp);
+ if (ret)
+ goto err_unreg;
+
+ sp->freq_transition.notifier_call = sci_notifier;
+
+ ret = cpufreq_register_notifier(&sp->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (unlikely(ret < 0))
+ goto err_unreg;
+
+#ifdef CONFIG_SH_STANDARD_BIOS
+ sh_bios_gdb_detach();
+#endif
+
+ return 0;
+
+err_unreg:
+ sci_remove(dev);
+ return ret;
+}
+
+static int sci_suspend(struct device *dev)
+{
+ struct sci_port *sport = dev_get_drvdata(dev);
+
+ if (sport)
+ uart_suspend_port(&sci_uart_driver, &sport->port);
+
+ return 0;
+}
+
+static int sci_resume(struct device *dev)
+{
+ struct sci_port *sport = dev_get_drvdata(dev);
+
+ if (sport)
+ uart_resume_port(&sci_uart_driver, &sport->port);
+
+ return 0;
+}
+
+static const struct dev_pm_ops sci_dev_pm_ops = {
+ .suspend = sci_suspend,
+ .resume = sci_resume,
+};
+
+static struct platform_driver sci_driver = {
+ .probe = sci_probe,
+ .remove = sci_remove,
+ .driver = {
+ .name = "sh-sci",
+ .owner = THIS_MODULE,
+ .pm = &sci_dev_pm_ops,
+ },
+};
+
+static int __init sci_init(void)
+{
+ int ret;
+
+ printk(banner);
+
+ ret = uart_register_driver(&sci_uart_driver);
+ if (likely(ret == 0)) {
+ ret = platform_driver_register(&sci_driver);
+ if (unlikely(ret))
+ uart_unregister_driver(&sci_uart_driver);
+ }
+
+ return ret;
+}
+
+static void __exit sci_exit(void)
+{
+ platform_driver_unregister(&sci_driver);
+ uart_unregister_driver(&sci_uart_driver);
+}
+
+#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
+early_platform_init_buffer("earlyprintk", &sci_driver,
+ early_serial_buf, ARRAY_SIZE(early_serial_buf));
+#endif
+module_init(sci_init);
+module_exit(sci_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sh-sci");
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
new file mode 100644
index 00000000..b04d937c
--- /dev/null
+++ b/drivers/tty/serial/sh-sci.h
@@ -0,0 +1,468 @@
+#include <linux/serial_core.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
+#include <asm/regs306x.h>
+#endif
+#if defined(CONFIG_H8S2678)
+#include <asm/regs267x.h>
+#endif
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7708) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+# define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */
+# define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
+# define SCIF0 0xA4400000
+# define SCIF2 0xA4410000
+# define SCPCR 0xA4000116
+# define SCPDR 0xA4000136
+#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7721) || \
+ defined(CONFIG_ARCH_SH73A0) || \
+ defined(CONFIG_ARCH_SH7367) || \
+ defined(CONFIG_ARCH_SH7377) || \
+ defined(CONFIG_ARCH_SH7372)
+# define PORT_PTCR 0xA405011EUL
+# define PORT_PVCR 0xA4050122UL
+# define SCIF_ORER 0x0200 /* overrun error bit */
+#elif defined(CONFIG_SH_RTS7751R2D)
+# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */
+# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001 /* overrun error bit */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7091) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751R)
+# define SCSPTR1 0xffe0001c /* 8 bit SCI */
+# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001 /* overrun error bit */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
+# define SCSPTR0 0xfe600024 /* 16 bit SCIF */
+# define SCSPTR1 0xfe610024 /* 16 bit SCIF */
+# define SCSPTR2 0xfe620024 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001 /* overrun error bit */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
+# define SCSPTR0 0xA4400000 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001 /* overrun error bit */
+# define PACR 0xa4050100
+# define PBCR 0xa4050102
+#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
+# define SCSPTR0 0xffe00010 /* 16 bit SCIF */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
+# define PADR 0xA4050120
+# define PSDR 0xA405013e
+# define PWDR 0xA4050166
+# define PSCR 0xA405011E
+# define SCIF_ORER 0x0001 /* overrun error bit */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
+# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */
+# define SCSPTR0 SCPDR0
+# define SCIF_ORER 0x0001 /* overrun error bit */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
+# define SCSPTR0 0xa4050160
+# define SCIF_ORER 0x0001 /* overrun error bit */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
+# define SCIF_ORER 0x0001 /* overrun error bit */
+#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
+# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001 /* overrun error bit */
+#elif defined(CONFIG_H83007) || defined(CONFIG_H83068)
+# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
+#elif defined(CONFIG_H8S2678)
+# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
+# define SCSPTR0 0xfe4b0020
+# define SCIF_ORER 0x0001
+#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
+# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001 /* overrun error bit */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
+# define SCSPTR0 0xff923020 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001 /* overrun error bit */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
+# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001 /* Overrun error bit */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786)
+# define SCSPTR0 0xffea0024 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001 /* Overrun error bit */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7203) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7206) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7263)
+# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+# define SCSPTR0 0xf8400020 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001 /* overrun error bit */
+#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
+# define SCSPTR0 0xffc30020 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001 /* Overrun error bit */
+#else
+# error CPU subtype not defined
+#endif
+
+/* SCxSR SCI */
+#define SCI_TDRE 0x80 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
+#define SCI_RDRF 0x40 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
+#define SCI_ORER 0x20 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
+#define SCI_FER 0x10 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
+#define SCI_PER 0x08 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
+#define SCI_TEND 0x04 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
+/* SCI_MPB 0x02 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
+/* SCI_MPBT 0x01 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
+
+#define SCI_ERRORS ( SCI_PER | SCI_FER | SCI_ORER)
+
+/* SCxSR SCIF */
+#define SCIF_ER 0x0080 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
+#define SCIF_TEND 0x0040 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
+#define SCIF_TDFE 0x0020 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
+#define SCIF_BRK 0x0010 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
+#define SCIF_FER 0x0008 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
+#define SCIF_PER 0x0004 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
+#define SCIF_RDF 0x0002 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
+#define SCIF_DR 0x0001 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7720) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7721) || \
+ defined(CONFIG_ARCH_SH73A0) || \
+ defined(CONFIG_ARCH_SH7367) || \
+ defined(CONFIG_ARCH_SH7377) || \
+ defined(CONFIG_ARCH_SH7372)
+# define SCIF_ORER 0x0200
+# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK | SCIF_ORER)
+# define SCIF_RFDC_MASK 0x007f
+# define SCIF_TXROOM_MAX 64
+#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
+# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK )
+# define SCIF_RFDC_MASK 0x007f
+# define SCIF_TXROOM_MAX 64
+/* SH7763 SCIF2 support */
+# define SCIF2_RFDC_MASK 0x001f
+# define SCIF2_TXROOM_MAX 16
+#else
+# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK)
+# define SCIF_RFDC_MASK 0x001f
+# define SCIF_TXROOM_MAX 16
+#endif
+
+#ifndef SCIF_ORER
+#define SCIF_ORER 0x0000
+#endif
+
+#define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND)
+#define SCxSR_ERRORS(port) (((port)->type == PORT_SCI) ? SCI_ERRORS : SCIF_ERRORS)
+#define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF)
+#define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE)
+#define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER)
+#define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER)
+#define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK)
+#define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : SCIF_ORER)
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7720) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7721) || \
+ defined(CONFIG_ARCH_SH73A0) || \
+ defined(CONFIG_ARCH_SH7367) || \
+ defined(CONFIG_ARCH_SH7377) || \
+ defined(CONFIG_ARCH_SH7372)
+# define SCxSR_RDxF_CLEAR(port) (sci_in(port, SCxSR) & 0xfffc)
+# define SCxSR_ERROR_CLEAR(port) (sci_in(port, SCxSR) & 0xfd73)
+# define SCxSR_TDxE_CLEAR(port) (sci_in(port, SCxSR) & 0xffdf)
+# define SCxSR_BREAK_CLEAR(port) (sci_in(port, SCxSR) & 0xffe3)
+#else
+# define SCxSR_RDxF_CLEAR(port) (((port)->type == PORT_SCI) ? 0xbc : 0x00fc)
+# define SCxSR_ERROR_CLEAR(port) (((port)->type == PORT_SCI) ? 0xc4 : 0x0073)
+# define SCxSR_TDxE_CLEAR(port) (((port)->type == PORT_SCI) ? 0x78 : 0x00df)
+# define SCxSR_BREAK_CLEAR(port) (((port)->type == PORT_SCI) ? 0xc4 : 0x00e3)
+#endif
+
+/* SCFCR */
+#define SCFCR_RFRST 0x0002
+#define SCFCR_TFRST 0x0004
+#define SCFCR_MCE 0x0008
+
+#define SCI_MAJOR 204
+#define SCI_MINOR_START 8
+
+#define SCI_IN(size, offset) \
+ if ((size) == 8) { \
+ return ioread8(port->membase + (offset)); \
+ } else { \
+ return ioread16(port->membase + (offset)); \
+ }
+#define SCI_OUT(size, offset, value) \
+ if ((size) == 8) { \
+ iowrite8(value, port->membase + (offset)); \
+ } else if ((size) == 16) { \
+ iowrite16(value, port->membase + (offset)); \
+ }
+
+#define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\
+ static inline unsigned int sci_##name##_in(struct uart_port *port) \
+ { \
+ if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \
+ SCI_IN(scif_size, scif_offset) \
+ } else { /* PORT_SCI or PORT_SCIFA */ \
+ SCI_IN(sci_size, sci_offset); \
+ } \
+ } \
+ static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
+ { \
+ if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \
+ SCI_OUT(scif_size, scif_offset, value) \
+ } else { /* PORT_SCI or PORT_SCIFA */ \
+ SCI_OUT(sci_size, sci_offset, value); \
+ } \
+ }
+
+#ifdef CONFIG_H8300
+/* h8300 don't have SCIF */
+#define CPU_SCIF_FNS(name) \
+ static inline unsigned int sci_##name##_in(struct uart_port *port) \
+ { \
+ return 0; \
+ } \
+ static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
+ { \
+ }
+#else
+#define CPU_SCIF_FNS(name, scif_offset, scif_size) \
+ static inline unsigned int sci_##name##_in(struct uart_port *port) \
+ { \
+ SCI_IN(scif_size, scif_offset); \
+ } \
+ static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
+ { \
+ SCI_OUT(scif_size, scif_offset, value); \
+ }
+#endif
+
+#define CPU_SCI_FNS(name, sci_offset, sci_size) \
+ static inline unsigned int sci_##name##_in(struct uart_port* port) \
+ { \
+ SCI_IN(sci_size, sci_offset); \
+ } \
+ static inline void sci_##name##_out(struct uart_port* port, unsigned int value) \
+ { \
+ SCI_OUT(sci_size, sci_offset, value); \
+ }
+
+#if defined(CONFIG_CPU_SH3) || \
+ defined(CONFIG_ARCH_SH73A0) || \
+ defined(CONFIG_ARCH_SH7367) || \
+ defined(CONFIG_ARCH_SH7377) || \
+ defined(CONFIG_ARCH_SH7372)
+#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
+#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
+ sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
+ h8_sci_offset, h8_sci_size) \
+ CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size)
+#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
+ CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7720) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7721) || \
+ defined(CONFIG_ARCH_SH7367)
+#define SCIF_FNS(name, scif_offset, scif_size) \
+ CPU_SCIF_FNS(name, scif_offset, scif_size)
+#elif defined(CONFIG_ARCH_SH7377) || \
+ defined(CONFIG_ARCH_SH7372) || \
+ defined(CONFIG_ARCH_SH73A0)
+#define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) \
+ CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size)
+#define SCIF_FNS(name, scif_offset, scif_size) \
+ CPU_SCIF_FNS(name, scif_offset, scif_size)
+#else
+#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
+ sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
+ h8_sci_offset, h8_sci_size) \
+ CPU_SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh3_scif_offset, sh3_scif_size)
+#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
+ CPU_SCIF_FNS(name, sh3_scif_offset, sh3_scif_size)
+#endif
+#elif defined(__H8300H__) || defined(__H8300S__)
+#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
+ sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
+ h8_sci_offset, h8_sci_size) \
+ CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size)
+#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
+ CPU_SCIF_FNS(name)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
+ defined(CONFIG_CPU_SUBTYPE_SH7724)
+ #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \
+ CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size)
+ #define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \
+ CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
+#else
+#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
+ sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
+ h8_sci_offset, h8_sci_size) \
+ CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size)
+#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
+ CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
+#endif
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7720) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7721) || \
+ defined(CONFIG_ARCH_SH7367)
+
+SCIF_FNS(SCSMR, 0x00, 16)
+SCIF_FNS(SCBRR, 0x04, 8)
+SCIF_FNS(SCSCR, 0x08, 16)
+SCIF_FNS(SCxSR, 0x14, 16)
+SCIF_FNS(SCFCR, 0x18, 16)
+SCIF_FNS(SCFDR, 0x1c, 16)
+SCIF_FNS(SCxTDR, 0x20, 8)
+SCIF_FNS(SCxRDR, 0x24, 8)
+SCIF_FNS(SCLSR, 0x00, 0)
+#elif defined(CONFIG_ARCH_SH7377) || \
+ defined(CONFIG_ARCH_SH7372) || \
+ defined(CONFIG_ARCH_SH73A0)
+SCIF_FNS(SCSMR, 0x00, 16)
+SCIF_FNS(SCBRR, 0x04, 8)
+SCIF_FNS(SCSCR, 0x08, 16)
+SCIF_FNS(SCTDSR, 0x0c, 16)
+SCIF_FNS(SCFER, 0x10, 16)
+SCIF_FNS(SCxSR, 0x14, 16)
+SCIF_FNS(SCFCR, 0x18, 16)
+SCIF_FNS(SCFDR, 0x1c, 16)
+SCIF_FNS(SCTFDR, 0x38, 16)
+SCIF_FNS(SCRFDR, 0x3c, 16)
+SCIx_FNS(SCxTDR, 0x20, 8, 0x40, 8)
+SCIx_FNS(SCxRDR, 0x24, 8, 0x60, 8)
+SCIF_FNS(SCLSR, 0x00, 0)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
+ defined(CONFIG_CPU_SUBTYPE_SH7724)
+SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16)
+SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8)
+SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16)
+SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8)
+SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16)
+SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8)
+SCIx_FNS(SCSPTR, 0, 0, 0, 0)
+SCIF_FNS(SCFCR, 0x18, 16)
+SCIF_FNS(SCFDR, 0x1c, 16)
+SCIF_FNS(SCLSR, 0x24, 16)
+#else
+/* reg SCI/SH3 SCI/SH4 SCIF/SH3 SCIF/SH4 SCI/H8*/
+/* name off sz off sz off sz off sz off sz*/
+SCIx_FNS(SCSMR, 0x00, 8, 0x00, 8, 0x00, 8, 0x00, 16, 0x00, 8)
+SCIx_FNS(SCBRR, 0x02, 8, 0x04, 8, 0x02, 8, 0x04, 8, 0x01, 8)
+SCIx_FNS(SCSCR, 0x04, 8, 0x08, 8, 0x04, 8, 0x08, 16, 0x02, 8)
+SCIx_FNS(SCxTDR, 0x06, 8, 0x0c, 8, 0x06, 8, 0x0C, 8, 0x03, 8)
+SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16, 0x04, 8)
+SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8)
+SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16)
+#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7780) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786)
+SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
+SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
+SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
+SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
+SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
+SCIF_FNS(SCFDR, 0, 0, 0x1C, 16)
+SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
+SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
+SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
+SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
+#else
+SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
+#if defined(CONFIG_CPU_SUBTYPE_SH7722)
+SCIF_FNS(SCSPTR, 0, 0, 0, 0)
+#else
+SCIF_FNS(SCSPTR, 0, 0, 0x20, 16)
+#endif
+SCIF_FNS(SCLSR, 0, 0, 0x24, 16)
+#endif
+#endif
+#define sci_in(port, reg) sci_##reg##_in(port)
+#define sci_out(port, reg, value) sci_##reg##_out(port, value)
+
+/* H8/300 series SCI pins assignment */
+#if defined(__H8300H__) || defined(__H8300S__)
+static const struct __attribute__((packed)) {
+ int port; /* GPIO port no */
+ unsigned short rx,tx; /* GPIO bit no */
+} h8300_sci_pins[] = {
+#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
+ { /* SCI0 */
+ .port = H8300_GPIO_P9,
+ .rx = H8300_GPIO_B2,
+ .tx = H8300_GPIO_B0,
+ },
+ { /* SCI1 */
+ .port = H8300_GPIO_P9,
+ .rx = H8300_GPIO_B3,
+ .tx = H8300_GPIO_B1,
+ },
+ { /* SCI2 */
+ .port = H8300_GPIO_PB,
+ .rx = H8300_GPIO_B7,
+ .tx = H8300_GPIO_B6,
+ }
+#elif defined(CONFIG_H8S2678)
+ { /* SCI0 */
+ .port = H8300_GPIO_P3,
+ .rx = H8300_GPIO_B2,
+ .tx = H8300_GPIO_B0,
+ },
+ { /* SCI1 */
+ .port = H8300_GPIO_P3,
+ .rx = H8300_GPIO_B3,
+ .tx = H8300_GPIO_B1,
+ },
+ { /* SCI2 */
+ .port = H8300_GPIO_P5,
+ .rx = H8300_GPIO_B1,
+ .tx = H8300_GPIO_B0,
+ }
+#endif
+};
+#endif
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7708) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+static inline int sci_rxd_in(struct uart_port *port)
+{
+ if (port->mapbase == 0xfffffe80)
+ return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */
+ return 1;
+}
+#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7091)
+static inline int sci_rxd_in(struct uart_port *port)
+{
+ if (port->mapbase == 0xffe00000)
+ return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */
+ return 1;
+}
+#elif defined(__H8300H__) || defined(__H8300S__)
+static inline int sci_rxd_in(struct uart_port *port)
+{
+ int ch = (port->mapbase - SMR0) >> 3;
+ return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0;
+}
+#else /* default case for non-SCI processors */
+static inline int sci_rxd_in(struct uart_port *port)
+{
+ return 1;
+}
+#endif
diff --git a/drivers/tty/serial/sn_console.c b/drivers/tty/serial/sn_console.c
new file mode 100644
index 00000000..377ae74e
--- /dev/null
+++ b/drivers/tty/serial/sn_console.c
@@ -0,0 +1,1085 @@
+/*
+ * C-Brick Serial Port (and console) driver for SGI Altix machines.
+ *
+ * This driver is NOT suitable for talking to the l1-controller for
+ * anything other than 'console activities' --- please use the l1
+ * driver for that.
+ *
+ *
+ * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1500 Crittenden Lane,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ */
+
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/sysrq.h>
+#include <linux/circ_buf.h>
+#include <linux/serial_reg.h>
+#include <linux/delay.h> /* for mdelay */
+#include <linux/miscdevice.h>
+#include <linux/serial_core.h>
+
+#include <asm/io.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/sn_sal.h>
+
+/* number of characters we can transmit to the SAL console at a time */
+#define SN_SAL_MAX_CHARS 120
+
+/* 64K, when we're asynch, it must be at least printk's LOG_BUF_LEN to
+ * avoid losing chars, (always has to be a power of 2) */
+#define SN_SAL_BUFFER_SIZE (64 * (1 << 10))
+
+#define SN_SAL_UART_FIFO_DEPTH 16
+#define SN_SAL_UART_FIFO_SPEED_CPS (9600/10)
+
+/* sn_transmit_chars() calling args */
+#define TRANSMIT_BUFFERED 0
+#define TRANSMIT_RAW 1
+
+/* To use dynamic numbers only and not use the assigned major and minor,
+ * define the following.. */
+ /* #define USE_DYNAMIC_MINOR 1 *//* use dynamic minor number */
+#define USE_DYNAMIC_MINOR 0 /* Don't rely on misc_register dynamic minor */
+
+/* Device name we're using */
+#define DEVICE_NAME "ttySG"
+#define DEVICE_NAME_DYNAMIC "ttySG0" /* need full name for misc_register */
+/* The major/minor we are using, ignored for USE_DYNAMIC_MINOR */
+#define DEVICE_MAJOR 204
+#define DEVICE_MINOR 40
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static char sysrq_serial_str[] = "\eSYS";
+static char *sysrq_serial_ptr = sysrq_serial_str;
+static unsigned long sysrq_requested;
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+/*
+ * Port definition - this kinda drives it all
+ */
+struct sn_cons_port {
+ struct timer_list sc_timer;
+ struct uart_port sc_port;
+ struct sn_sal_ops {
+ int (*sal_puts_raw) (const char *s, int len);
+ int (*sal_puts) (const char *s, int len);
+ int (*sal_getc) (void);
+ int (*sal_input_pending) (void);
+ void (*sal_wakeup_transmit) (struct sn_cons_port *, int);
+ } *sc_ops;
+ unsigned long sc_interrupt_timeout;
+ int sc_is_asynch;
+};
+
+static struct sn_cons_port sal_console_port;
+static int sn_process_input;
+
+/* Only used if USE_DYNAMIC_MINOR is set to 1 */
+static struct miscdevice misc; /* used with misc_register for dynamic */
+
+extern void early_sn_setup(void);
+
+#undef DEBUG
+#ifdef DEBUG
+static int sn_debug_printf(const char *fmt, ...);
+#define DPRINTF(x...) sn_debug_printf(x)
+#else
+#define DPRINTF(x...) do { } while (0)
+#endif
+
+/* Prototypes */
+static int snt_hw_puts_raw(const char *, int);
+static int snt_hw_puts_buffered(const char *, int);
+static int snt_poll_getc(void);
+static int snt_poll_input_pending(void);
+static int snt_intr_getc(void);
+static int snt_intr_input_pending(void);
+static void sn_transmit_chars(struct sn_cons_port *, int);
+
+/* A table for polling:
+ */
+static struct sn_sal_ops poll_ops = {
+ .sal_puts_raw = snt_hw_puts_raw,
+ .sal_puts = snt_hw_puts_raw,
+ .sal_getc = snt_poll_getc,
+ .sal_input_pending = snt_poll_input_pending
+};
+
+/* A table for interrupts enabled */
+static struct sn_sal_ops intr_ops = {
+ .sal_puts_raw = snt_hw_puts_raw,
+ .sal_puts = snt_hw_puts_buffered,
+ .sal_getc = snt_intr_getc,
+ .sal_input_pending = snt_intr_input_pending,
+ .sal_wakeup_transmit = sn_transmit_chars
+};
+
+/* the console does output in two distinctly different ways:
+ * synchronous (raw) and asynchronous (buffered). initially, early_printk
+ * does synchronous output. any data written goes directly to the SAL
+ * to be output (incidentally, it is internally buffered by the SAL)
+ * after interrupts and timers are initialized and available for use,
+ * the console init code switches to asynchronous output. this is
+ * also the earliest opportunity to begin polling for console input.
+ * after console initialization, console output and tty (serial port)
+ * output is buffered and sent to the SAL asynchronously (either by
+ * timer callback or by UART interrupt) */
+
+/* routines for running the console in polling mode */
+
+/**
+ * snt_poll_getc - Get a character from the console in polling mode
+ *
+ */
+static int snt_poll_getc(void)
+{
+ int ch;
+
+ ia64_sn_console_getc(&ch);
+ return ch;
+}
+
+/**
+ * snt_poll_input_pending - Check if any input is waiting - polling mode.
+ *
+ */
+static int snt_poll_input_pending(void)
+{
+ int status, input;
+
+ status = ia64_sn_console_check(&input);
+ return !status && input;
+}
+
+/* routines for an interrupt driven console (normal) */
+
+/**
+ * snt_intr_getc - Get a character from the console, interrupt mode
+ *
+ */
+static int snt_intr_getc(void)
+{
+ return ia64_sn_console_readc();
+}
+
+/**
+ * snt_intr_input_pending - Check if input is pending, interrupt mode
+ *
+ */
+static int snt_intr_input_pending(void)
+{
+ return ia64_sn_console_intr_status() & SAL_CONSOLE_INTR_RECV;
+}
+
+/* these functions are polled and interrupt */
+
+/**
+ * snt_hw_puts_raw - Send raw string to the console, polled or interrupt mode
+ * @s: String
+ * @len: Length
+ *
+ */
+static int snt_hw_puts_raw(const char *s, int len)
+{
+ /* this will call the PROM and not return until this is done */
+ return ia64_sn_console_putb(s, len);
+}
+
+/**
+ * snt_hw_puts_buffered - Send string to console, polled or interrupt mode
+ * @s: String
+ * @len: Length
+ *
+ */
+static int snt_hw_puts_buffered(const char *s, int len)
+{
+ /* queue data to the PROM */
+ return ia64_sn_console_xmit_chars((char *)s, len);
+}
+
+/* uart interface structs
+ * These functions are associated with the uart_port that the serial core
+ * infrastructure calls.
+ *
+ * Note: Due to how the console works, many routines are no-ops.
+ */
+
+/**
+ * snp_type - What type of console are we?
+ * @port: Port to operate with (we ignore since we only have one port)
+ *
+ */
+static const char *snp_type(struct uart_port *port)
+{
+ return ("SGI SN L1");
+}
+
+/**
+ * snp_tx_empty - Is the transmitter empty? We pretend we're always empty
+ * @port: Port to operate on (we ignore since we only have one port)
+ *
+ */
+static unsigned int snp_tx_empty(struct uart_port *port)
+{
+ return 1;
+}
+
+/**
+ * snp_stop_tx - stop the transmitter - no-op for us
+ * @port: Port to operat eon - we ignore - no-op function
+ *
+ */
+static void snp_stop_tx(struct uart_port *port)
+{
+}
+
+/**
+ * snp_release_port - Free i/o and resources for port - no-op for us
+ * @port: Port to operate on - we ignore - no-op function
+ *
+ */
+static void snp_release_port(struct uart_port *port)
+{
+}
+
+/**
+ * snp_enable_ms - Force modem status interrupts on - no-op for us
+ * @port: Port to operate on - we ignore - no-op function
+ *
+ */
+static void snp_enable_ms(struct uart_port *port)
+{
+}
+
+/**
+ * snp_shutdown - shut down the port - free irq and disable - no-op for us
+ * @port: Port to shut down - we ignore
+ *
+ */
+static void snp_shutdown(struct uart_port *port)
+{
+}
+
+/**
+ * snp_set_mctrl - set control lines (dtr, rts, etc) - no-op for our console
+ * @port: Port to operate on - we ignore
+ * @mctrl: Lines to set/unset - we ignore
+ *
+ */
+static void snp_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+/**
+ * snp_get_mctrl - get contorl line info, we just return a static value
+ * @port: port to operate on - we only have one port so we ignore this
+ *
+ */
+static unsigned int snp_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_RNG | TIOCM_DSR | TIOCM_CTS;
+}
+
+/**
+ * snp_stop_rx - Stop the receiver - we ignor ethis
+ * @port: Port to operate on - we ignore
+ *
+ */
+static void snp_stop_rx(struct uart_port *port)
+{
+}
+
+/**
+ * snp_start_tx - Start transmitter
+ * @port: Port to operate on
+ *
+ */
+static void snp_start_tx(struct uart_port *port)
+{
+ if (sal_console_port.sc_ops->sal_wakeup_transmit)
+ sal_console_port.sc_ops->sal_wakeup_transmit(&sal_console_port,
+ TRANSMIT_BUFFERED);
+
+}
+
+/**
+ * snp_break_ctl - handle breaks - ignored by us
+ * @port: Port to operate on
+ * @break_state: Break state
+ *
+ */
+static void snp_break_ctl(struct uart_port *port, int break_state)
+{
+}
+
+/**
+ * snp_startup - Start up the serial port - always return 0 (We're always on)
+ * @port: Port to operate on
+ *
+ */
+static int snp_startup(struct uart_port *port)
+{
+ return 0;
+}
+
+/**
+ * snp_set_termios - set termios stuff - we ignore these
+ * @port: port to operate on
+ * @termios: New settings
+ * @termios: Old
+ *
+ */
+static void
+snp_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+}
+
+/**
+ * snp_request_port - allocate resources for port - ignored by us
+ * @port: port to operate on
+ *
+ */
+static int snp_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+/**
+ * snp_config_port - allocate resources, set up - we ignore, we're always on
+ * @port: Port to operate on
+ * @flags: flags used for port setup
+ *
+ */
+static void snp_config_port(struct uart_port *port, int flags)
+{
+}
+
+/* Associate the uart functions above - given to serial core */
+
+static struct uart_ops sn_console_ops = {
+ .tx_empty = snp_tx_empty,
+ .set_mctrl = snp_set_mctrl,
+ .get_mctrl = snp_get_mctrl,
+ .stop_tx = snp_stop_tx,
+ .start_tx = snp_start_tx,
+ .stop_rx = snp_stop_rx,
+ .enable_ms = snp_enable_ms,
+ .break_ctl = snp_break_ctl,
+ .startup = snp_startup,
+ .shutdown = snp_shutdown,
+ .set_termios = snp_set_termios,
+ .pm = NULL,
+ .type = snp_type,
+ .release_port = snp_release_port,
+ .request_port = snp_request_port,
+ .config_port = snp_config_port,
+ .verify_port = NULL,
+};
+
+/* End of uart struct functions and defines */
+
+#ifdef DEBUG
+
+/**
+ * sn_debug_printf - close to hardware debugging printf
+ * @fmt: printf format
+ *
+ * This is as "close to the metal" as we can get, used when the driver
+ * itself may be broken.
+ *
+ */
+static int sn_debug_printf(const char *fmt, ...)
+{
+ static char printk_buf[1024];
+ int printed_len;
+ va_list args;
+
+ va_start(args, fmt);
+ printed_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
+
+ if (!sal_console_port.sc_ops) {
+ sal_console_port.sc_ops = &poll_ops;
+ early_sn_setup();
+ }
+ sal_console_port.sc_ops->sal_puts_raw(printk_buf, printed_len);
+
+ va_end(args);
+ return printed_len;
+}
+#endif /* DEBUG */
+
+/*
+ * Interrupt handling routines.
+ */
+
+/**
+ * sn_receive_chars - Grab characters, pass them to tty layer
+ * @port: Port to operate on
+ * @flags: irq flags
+ *
+ * Note: If we're not registered with the serial core infrastructure yet,
+ * we don't try to send characters to it...
+ *
+ */
+static void
+sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
+{
+ int ch;
+ struct tty_struct *tty;
+
+ if (!port) {
+ printk(KERN_ERR "sn_receive_chars - port NULL so can't receieve\n");
+ return;
+ }
+
+ if (!port->sc_ops) {
+ printk(KERN_ERR "sn_receive_chars - port->sc_ops NULL so can't receieve\n");
+ return;
+ }
+
+ if (port->sc_port.state) {
+ /* The serial_core stuffs are initialized, use them */
+ tty = port->sc_port.state->port.tty;
+ }
+ else {
+ /* Not registered yet - can't pass to tty layer. */
+ tty = NULL;
+ }
+
+ while (port->sc_ops->sal_input_pending()) {
+ ch = port->sc_ops->sal_getc();
+ if (ch < 0) {
+ printk(KERN_ERR "sn_console: An error occurred while "
+ "obtaining data from the console (0x%0x)\n", ch);
+ break;
+ }
+#ifdef CONFIG_MAGIC_SYSRQ
+ if (sysrq_requested) {
+ unsigned long sysrq_timeout = sysrq_requested + HZ*5;
+
+ sysrq_requested = 0;
+ if (ch && time_before(jiffies, sysrq_timeout)) {
+ spin_unlock_irqrestore(&port->sc_port.lock, flags);
+ handle_sysrq(ch);
+ spin_lock_irqsave(&port->sc_port.lock, flags);
+ /* ignore actual sysrq command char */
+ continue;
+ }
+ }
+ if (ch == *sysrq_serial_ptr) {
+ if (!(*++sysrq_serial_ptr)) {
+ sysrq_requested = jiffies;
+ sysrq_serial_ptr = sysrq_serial_str;
+ }
+ /*
+ * ignore the whole sysrq string except for the
+ * leading escape
+ */
+ if (ch != '\e')
+ continue;
+ }
+ else
+ sysrq_serial_ptr = sysrq_serial_str;
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+ /* record the character to pass up to the tty layer */
+ if (tty) {
+ if(tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0)
+ break;
+ }
+ port->sc_port.icount.rx++;
+ }
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+}
+
+/**
+ * sn_transmit_chars - grab characters from serial core, send off
+ * @port: Port to operate on
+ * @raw: Transmit raw or buffered
+ *
+ * Note: If we're early, before we're registered with serial core, the
+ * writes are going through sn_sal_console_write because that's how
+ * register_console has been set up. We currently could have asynch
+ * polls calling this function due to sn_sal_switch_to_asynch but we can
+ * ignore them until we register with the serial core stuffs.
+ *
+ */
+static void sn_transmit_chars(struct sn_cons_port *port, int raw)
+{
+ int xmit_count, tail, head, loops, ii;
+ int result;
+ char *start;
+ struct circ_buf *xmit;
+
+ if (!port)
+ return;
+
+ BUG_ON(!port->sc_is_asynch);
+
+ if (port->sc_port.state) {
+ /* We're initialized, using serial core infrastructure */
+ xmit = &port->sc_port.state->xmit;
+ } else {
+ /* Probably sn_sal_switch_to_asynch has been run but serial core isn't
+ * initialized yet. Just return. Writes are going through
+ * sn_sal_console_write (due to register_console) at this time.
+ */
+ return;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&port->sc_port)) {
+ /* Nothing to do. */
+ ia64_sn_console_intr_disable(SAL_CONSOLE_INTR_XMIT);
+ return;
+ }
+
+ head = xmit->head;
+ tail = xmit->tail;
+ start = &xmit->buf[tail];
+
+ /* twice around gets the tail to the end of the buffer and
+ * then to the head, if needed */
+ loops = (head < tail) ? 2 : 1;
+
+ for (ii = 0; ii < loops; ii++) {
+ xmit_count = (head < tail) ?
+ (UART_XMIT_SIZE - tail) : (head - tail);
+
+ if (xmit_count > 0) {
+ if (raw == TRANSMIT_RAW)
+ result =
+ port->sc_ops->sal_puts_raw(start,
+ xmit_count);
+ else
+ result =
+ port->sc_ops->sal_puts(start, xmit_count);
+#ifdef DEBUG
+ if (!result)
+ DPRINTF("`");
+#endif
+ if (result > 0) {
+ xmit_count -= result;
+ port->sc_port.icount.tx += result;
+ tail += result;
+ tail &= UART_XMIT_SIZE - 1;
+ xmit->tail = tail;
+ start = &xmit->buf[tail];
+ }
+ }
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&port->sc_port);
+
+ if (uart_circ_empty(xmit))
+ snp_stop_tx(&port->sc_port); /* no-op for us */
+}
+
+/**
+ * sn_sal_interrupt - Handle console interrupts
+ * @irq: irq #, useful for debug statements
+ * @dev_id: our pointer to our port (sn_cons_port which contains the uart port)
+ *
+ */
+static irqreturn_t sn_sal_interrupt(int irq, void *dev_id)
+{
+ struct sn_cons_port *port = (struct sn_cons_port *)dev_id;
+ unsigned long flags;
+ int status = ia64_sn_console_intr_status();
+
+ if (!port)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&port->sc_port.lock, flags);
+ if (status & SAL_CONSOLE_INTR_RECV) {
+ sn_receive_chars(port, flags);
+ }
+ if (status & SAL_CONSOLE_INTR_XMIT) {
+ sn_transmit_chars(port, TRANSMIT_BUFFERED);
+ }
+ spin_unlock_irqrestore(&port->sc_port.lock, flags);
+ return IRQ_HANDLED;
+}
+
+/**
+ * sn_sal_timer_poll - this function handles polled console mode
+ * @data: A pointer to our sn_cons_port (which contains the uart port)
+ *
+ * data is the pointer that init_timer will store for us. This function is
+ * associated with init_timer to see if there is any console traffic.
+ * Obviously not used in interrupt mode
+ *
+ */
+static void sn_sal_timer_poll(unsigned long data)
+{
+ struct sn_cons_port *port = (struct sn_cons_port *)data;
+ unsigned long flags;
+
+ if (!port)
+ return;
+
+ if (!port->sc_port.irq) {
+ spin_lock_irqsave(&port->sc_port.lock, flags);
+ if (sn_process_input)
+ sn_receive_chars(port, flags);
+ sn_transmit_chars(port, TRANSMIT_RAW);
+ spin_unlock_irqrestore(&port->sc_port.lock, flags);
+ mod_timer(&port->sc_timer,
+ jiffies + port->sc_interrupt_timeout);
+ }
+}
+
+/*
+ * Boot-time initialization code
+ */
+
+/**
+ * sn_sal_switch_to_asynch - Switch to async mode (as opposed to synch)
+ * @port: Our sn_cons_port (which contains the uart port)
+ *
+ * So this is used by sn_sal_serial_console_init (early on, before we're
+ * registered with serial core). It's also used by sn_sal_module_init
+ * right after we've registered with serial core. The later only happens
+ * if we didn't already come through here via sn_sal_serial_console_init.
+ *
+ */
+static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port)
+{
+ unsigned long flags;
+
+ if (!port)
+ return;
+
+ DPRINTF("sn_console: about to switch to asynchronous console\n");
+
+ /* without early_printk, we may be invoked late enough to race
+ * with other cpus doing console IO at this point, however
+ * console interrupts will never be enabled */
+ spin_lock_irqsave(&port->sc_port.lock, flags);
+
+ /* early_printk invocation may have done this for us */
+ if (!port->sc_ops)
+ port->sc_ops = &poll_ops;
+
+ /* we can't turn on the console interrupt (as request_irq
+ * calls kmalloc, which isn't set up yet), so we rely on a
+ * timer to poll for input and push data from the console
+ * buffer.
+ */
+ init_timer(&port->sc_timer);
+ port->sc_timer.function = sn_sal_timer_poll;
+ port->sc_timer.data = (unsigned long)port;
+
+ if (IS_RUNNING_ON_SIMULATOR())
+ port->sc_interrupt_timeout = 6;
+ else {
+ /* 960cps / 16 char FIFO = 60HZ
+ * HZ / (SN_SAL_FIFO_SPEED_CPS / SN_SAL_FIFO_DEPTH) */
+ port->sc_interrupt_timeout =
+ HZ * SN_SAL_UART_FIFO_DEPTH / SN_SAL_UART_FIFO_SPEED_CPS;
+ }
+ mod_timer(&port->sc_timer, jiffies + port->sc_interrupt_timeout);
+
+ port->sc_is_asynch = 1;
+ spin_unlock_irqrestore(&port->sc_port.lock, flags);
+}
+
+/**
+ * sn_sal_switch_to_interrupts - Switch to interrupt driven mode
+ * @port: Our sn_cons_port (which contains the uart port)
+ *
+ * In sn_sal_module_init, after we're registered with serial core and
+ * the port is added, this function is called to switch us to interrupt
+ * mode. We were previously in asynch/polling mode (using init_timer).
+ *
+ * We attempt to switch to interrupt mode here by calling
+ * request_irq. If that works out, we enable receive interrupts.
+ */
+static void __init sn_sal_switch_to_interrupts(struct sn_cons_port *port)
+{
+ unsigned long flags;
+
+ if (port) {
+ DPRINTF("sn_console: switching to interrupt driven console\n");
+
+ if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt,
+ IRQF_DISABLED | IRQF_SHARED,
+ "SAL console driver", port) >= 0) {
+ spin_lock_irqsave(&port->sc_port.lock, flags);
+ port->sc_port.irq = SGI_UART_VECTOR;
+ port->sc_ops = &intr_ops;
+
+ /* turn on receive interrupts */
+ ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
+ spin_unlock_irqrestore(&port->sc_port.lock, flags);
+ }
+ else {
+ printk(KERN_INFO
+ "sn_console: console proceeding in polled mode\n");
+ }
+ }
+}
+
+/*
+ * Kernel console definitions
+ */
+
+static void sn_sal_console_write(struct console *, const char *, unsigned);
+static int sn_sal_console_setup(struct console *, char *);
+static struct uart_driver sal_console_uart;
+extern struct tty_driver *uart_console_device(struct console *, int *);
+
+static struct console sal_console = {
+ .name = DEVICE_NAME,
+ .write = sn_sal_console_write,
+ .device = uart_console_device,
+ .setup = sn_sal_console_setup,
+ .index = -1, /* unspecified */
+ .data = &sal_console_uart,
+};
+
+#define SAL_CONSOLE &sal_console
+
+static struct uart_driver sal_console_uart = {
+ .owner = THIS_MODULE,
+ .driver_name = "sn_console",
+ .dev_name = DEVICE_NAME,
+ .major = 0, /* major/minor set at registration time per USE_DYNAMIC_MINOR */
+ .minor = 0,
+ .nr = 1, /* one port */
+ .cons = SAL_CONSOLE,
+};
+
+/**
+ * sn_sal_module_init - When the kernel loads us, get us rolling w/ serial core
+ *
+ * Before this is called, we've been printing kernel messages in a special
+ * early mode not making use of the serial core infrastructure. When our
+ * driver is loaded for real, we register the driver and port with serial
+ * core and try to enable interrupt driven mode.
+ *
+ */
+static int __init sn_sal_module_init(void)
+{
+ int retval;
+
+ if (!ia64_platform_is("sn2"))
+ return 0;
+
+ printk(KERN_INFO "sn_console: Console driver init\n");
+
+ if (USE_DYNAMIC_MINOR == 1) {
+ misc.minor = MISC_DYNAMIC_MINOR;
+ misc.name = DEVICE_NAME_DYNAMIC;
+ retval = misc_register(&misc);
+ if (retval != 0) {
+ printk(KERN_WARNING "Failed to register console "
+ "device using misc_register.\n");
+ return -ENODEV;
+ }
+ sal_console_uart.major = MISC_MAJOR;
+ sal_console_uart.minor = misc.minor;
+ } else {
+ sal_console_uart.major = DEVICE_MAJOR;
+ sal_console_uart.minor = DEVICE_MINOR;
+ }
+
+ /* We register the driver and the port before switching to interrupts
+ * or async above so the proper uart structures are populated */
+
+ if (uart_register_driver(&sal_console_uart) < 0) {
+ printk
+ ("ERROR sn_sal_module_init failed uart_register_driver, line %d\n",
+ __LINE__);
+ return -ENODEV;
+ }
+
+ spin_lock_init(&sal_console_port.sc_port.lock);
+
+ /* Setup the port struct with the minimum needed */
+ sal_console_port.sc_port.membase = (char *)1; /* just needs to be non-zero */
+ sal_console_port.sc_port.type = PORT_16550A;
+ sal_console_port.sc_port.fifosize = SN_SAL_MAX_CHARS;
+ sal_console_port.sc_port.ops = &sn_console_ops;
+ sal_console_port.sc_port.line = 0;
+
+ if (uart_add_one_port(&sal_console_uart, &sal_console_port.sc_port) < 0) {
+ /* error - not sure what I'd do - so I'll do nothing */
+ printk(KERN_ERR "%s: unable to add port\n", __func__);
+ }
+
+ /* when this driver is compiled in, the console initialization
+ * will have already switched us into asynchronous operation
+ * before we get here through the module initcalls */
+ if (!sal_console_port.sc_is_asynch) {
+ sn_sal_switch_to_asynch(&sal_console_port);
+ }
+
+ /* at this point (module_init) we can try to turn on interrupts */
+ if (!IS_RUNNING_ON_SIMULATOR()) {
+ sn_sal_switch_to_interrupts(&sal_console_port);
+ }
+ sn_process_input = 1;
+ return 0;
+}
+
+/**
+ * sn_sal_module_exit - When we're unloaded, remove the driver/port
+ *
+ */
+static void __exit sn_sal_module_exit(void)
+{
+ del_timer_sync(&sal_console_port.sc_timer);
+ uart_remove_one_port(&sal_console_uart, &sal_console_port.sc_port);
+ uart_unregister_driver(&sal_console_uart);
+ misc_deregister(&misc);
+}
+
+module_init(sn_sal_module_init);
+module_exit(sn_sal_module_exit);
+
+/**
+ * puts_raw_fixed - sn_sal_console_write helper for adding \r's as required
+ * @puts_raw : puts function to do the writing
+ * @s: input string
+ * @count: length
+ *
+ * We need a \r ahead of every \n for direct writes through
+ * ia64_sn_console_putb (what sal_puts_raw below actually does).
+ *
+ */
+
+static void puts_raw_fixed(int (*puts_raw) (const char *s, int len),
+ const char *s, int count)
+{
+ const char *s1;
+
+ /* Output '\r' before each '\n' */
+ while ((s1 = memchr(s, '\n', count)) != NULL) {
+ puts_raw(s, s1 - s);
+ puts_raw("\r\n", 2);
+ count -= s1 + 1 - s;
+ s = s1 + 1;
+ }
+ puts_raw(s, count);
+}
+
+/**
+ * sn_sal_console_write - Print statements before serial core available
+ * @console: Console to operate on - we ignore since we have just one
+ * @s: String to send
+ * @count: length
+ *
+ * This is referenced in the console struct. It is used for early
+ * console printing before we register with serial core and for things
+ * such as kdb. The console_lock must be held when we get here.
+ *
+ * This function has some code for trying to print output even if the lock
+ * is held. We try to cover the case where a lock holder could have died.
+ * We don't use this special case code if we're not registered with serial
+ * core yet. After we're registered with serial core, the only time this
+ * function would be used is for high level kernel output like magic sys req,
+ * kdb, and printk's.
+ */
+static void
+sn_sal_console_write(struct console *co, const char *s, unsigned count)
+{
+ unsigned long flags = 0;
+ struct sn_cons_port *port = &sal_console_port;
+ static int stole_lock = 0;
+
+ BUG_ON(!port->sc_is_asynch);
+
+ /* We can't look at the xmit buffer if we're not registered with serial core
+ * yet. So only do the fancy recovery after registering
+ */
+ if (!port->sc_port.state) {
+ /* Not yet registered with serial core - simple case */
+ puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
+ return;
+ }
+
+ /* somebody really wants this output, might be an
+ * oops, kdb, panic, etc. make sure they get it. */
+ if (spin_is_locked(&port->sc_port.lock)) {
+ int lhead = port->sc_port.state->xmit.head;
+ int ltail = port->sc_port.state->xmit.tail;
+ int counter, got_lock = 0;
+
+ /*
+ * We attempt to determine if someone has died with the
+ * lock. We wait ~20 secs after the head and tail ptrs
+ * stop moving and assume the lock holder is not functional
+ * and plow ahead. If the lock is freed within the time out
+ * period we re-get the lock and go ahead normally. We also
+ * remember if we have plowed ahead so that we don't have
+ * to wait out the time out period again - the asumption
+ * is that we will time out again.
+ */
+
+ for (counter = 0; counter < 150; mdelay(125), counter++) {
+ if (!spin_is_locked(&port->sc_port.lock)
+ || stole_lock) {
+ if (!stole_lock) {
+ spin_lock_irqsave(&port->sc_port.lock,
+ flags);
+ got_lock = 1;
+ }
+ break;
+ } else {
+ /* still locked */
+ if ((lhead != port->sc_port.state->xmit.head)
+ || (ltail !=
+ port->sc_port.state->xmit.tail)) {
+ lhead =
+ port->sc_port.state->xmit.head;
+ ltail =
+ port->sc_port.state->xmit.tail;
+ counter = 0;
+ }
+ }
+ }
+ /* flush anything in the serial core xmit buffer, raw */
+ sn_transmit_chars(port, 1);
+ if (got_lock) {
+ spin_unlock_irqrestore(&port->sc_port.lock, flags);
+ stole_lock = 0;
+ } else {
+ /* fell thru */
+ stole_lock = 1;
+ }
+ puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
+ } else {
+ stole_lock = 0;
+ spin_lock_irqsave(&port->sc_port.lock, flags);
+ sn_transmit_chars(port, 1);
+ spin_unlock_irqrestore(&port->sc_port.lock, flags);
+
+ puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
+ }
+}
+
+
+/**
+ * sn_sal_console_setup - Set up console for early printing
+ * @co: Console to work with
+ * @options: Options to set
+ *
+ * Altix console doesn't do anything with baud rates, etc, anyway.
+ *
+ * This isn't required since not providing the setup function in the
+ * console struct is ok. However, other patches like KDB plop something
+ * here so providing it is easier.
+ *
+ */
+static int sn_sal_console_setup(struct console *co, char *options)
+{
+ return 0;
+}
+
+/**
+ * sn_sal_console_write_early - simple early output routine
+ * @co - console struct
+ * @s - string to print
+ * @count - count
+ *
+ * Simple function to provide early output, before even
+ * sn_sal_serial_console_init is called. Referenced in the
+ * console struct registerd in sn_serial_console_early_setup.
+ *
+ */
+static void __init
+sn_sal_console_write_early(struct console *co, const char *s, unsigned count)
+{
+ puts_raw_fixed(sal_console_port.sc_ops->sal_puts_raw, s, count);
+}
+
+/* Used for very early console printing - again, before
+ * sn_sal_serial_console_init is run */
+static struct console sal_console_early __initdata = {
+ .name = "sn_sal",
+ .write = sn_sal_console_write_early,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+/**
+ * sn_serial_console_early_setup - Sets up early console output support
+ *
+ * Register a console early on... This is for output before even
+ * sn_sal_serial_cosnole_init is called. This function is called from
+ * setup.c. This allows us to do really early polled writes. When
+ * sn_sal_serial_console_init is called, this console is unregistered
+ * and a new one registered.
+ */
+int __init sn_serial_console_early_setup(void)
+{
+ if (!ia64_platform_is("sn2"))
+ return -1;
+
+ sal_console_port.sc_ops = &poll_ops;
+ spin_lock_init(&sal_console_port.sc_port.lock);
+ early_sn_setup(); /* Find SAL entry points */
+ register_console(&sal_console_early);
+
+ return 0;
+}
+
+/**
+ * sn_sal_serial_console_init - Early console output - set up for register
+ *
+ * This function is called when regular console init happens. Because we
+ * support even earlier console output with sn_serial_console_early_setup
+ * (called from setup.c directly), this function unregisters the really
+ * early console.
+ *
+ * Note: Even if setup.c doesn't register sal_console_early, unregistering
+ * it here doesn't hurt anything.
+ *
+ */
+static int __init sn_sal_serial_console_init(void)
+{
+ if (ia64_platform_is("sn2")) {
+ sn_sal_switch_to_asynch(&sal_console_port);
+ DPRINTF("sn_sal_serial_console_init : register console\n");
+ register_console(&sal_console);
+ unregister_console(&sal_console_early);
+ }
+ return 0;
+}
+
+console_initcall(sn_sal_serial_console_init);
diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c
new file mode 100644
index 00000000..6381a028
--- /dev/null
+++ b/drivers/tty/serial/suncore.c
@@ -0,0 +1,247 @@
+/* suncore.c
+ *
+ * Common SUN serial routines. Based entirely
+ * upon drivers/sbus/char/sunserial.c which is:
+ *
+ * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
+ *
+ * Adaptation to new UART layer is:
+ *
+ * Copyright (C) 2002 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/serial_core.h>
+#include <linux/init.h>
+
+#include <asm/prom.h>
+
+#include "suncore.h"
+
+static int sunserial_current_minor = 64;
+
+int sunserial_register_minors(struct uart_driver *drv, int count)
+{
+ int err = 0;
+
+ drv->minor = sunserial_current_minor;
+ drv->nr += count;
+ /* Register the driver on the first call */
+ if (drv->nr == count)
+ err = uart_register_driver(drv);
+ if (err == 0) {
+ sunserial_current_minor += count;
+ drv->tty_driver->name_base = drv->minor - 64;
+ }
+ return err;
+}
+EXPORT_SYMBOL(sunserial_register_minors);
+
+void sunserial_unregister_minors(struct uart_driver *drv, int count)
+{
+ drv->nr -= count;
+ sunserial_current_minor -= count;
+
+ if (drv->nr == 0)
+ uart_unregister_driver(drv);
+}
+EXPORT_SYMBOL(sunserial_unregister_minors);
+
+int sunserial_console_match(struct console *con, struct device_node *dp,
+ struct uart_driver *drv, int line, bool ignore_line)
+{
+ if (!con)
+ return 0;
+
+ drv->cons = con;
+
+ if (of_console_device != dp)
+ return 0;
+
+ if (!ignore_line) {
+ int off = 0;
+
+ if (of_console_options &&
+ *of_console_options == 'b')
+ off = 1;
+
+ if ((line & 1) != off)
+ return 0;
+ }
+
+ if (!console_set_on_cmdline) {
+ con->index = line;
+ add_preferred_console(con->name, line, NULL);
+ }
+ return 1;
+}
+EXPORT_SYMBOL(sunserial_console_match);
+
+void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
+{
+ const char *mode, *s;
+ char mode_prop[] = "ttyX-mode";
+ int baud, bits, stop, cflag;
+ char parity;
+
+ if (!strcmp(uart_dp->name, "rsc") ||
+ !strcmp(uart_dp->name, "rsc-console") ||
+ !strcmp(uart_dp->name, "rsc-control")) {
+ mode = of_get_property(uart_dp,
+ "ssp-console-modes", NULL);
+ if (!mode)
+ mode = "115200,8,n,1,-";
+ } else if (!strcmp(uart_dp->name, "lom-console")) {
+ mode = "9600,8,n,1,-";
+ } else {
+ struct device_node *dp;
+ char c;
+
+ c = 'a';
+ if (of_console_options)
+ c = *of_console_options;
+
+ mode_prop[3] = c;
+
+ dp = of_find_node_by_path("/options");
+ mode = of_get_property(dp, mode_prop, NULL);
+ if (!mode)
+ mode = "9600,8,n,1,-";
+ }
+
+ cflag = CREAD | HUPCL | CLOCAL;
+
+ s = mode;
+ baud = simple_strtoul(s, NULL, 0);
+ s = strchr(s, ',');
+ bits = simple_strtoul(++s, NULL, 0);
+ s = strchr(s, ',');
+ parity = *(++s);
+ s = strchr(s, ',');
+ stop = simple_strtoul(++s, NULL, 0);
+ s = strchr(s, ',');
+ /* XXX handshake is not handled here. */
+
+ switch (baud) {
+ case 150: cflag |= B150; break;
+ case 300: cflag |= B300; break;
+ case 600: cflag |= B600; break;
+ case 1200: cflag |= B1200; break;
+ case 2400: cflag |= B2400; break;
+ case 4800: cflag |= B4800; break;
+ case 9600: cflag |= B9600; break;
+ case 19200: cflag |= B19200; break;
+ case 38400: cflag |= B38400; break;
+ case 57600: cflag |= B57600; break;
+ case 115200: cflag |= B115200; break;
+ case 230400: cflag |= B230400; break;
+ case 460800: cflag |= B460800; break;
+ default: baud = 9600; cflag |= B9600; break;
+ }
+
+ switch (bits) {
+ case 5: cflag |= CS5; break;
+ case 6: cflag |= CS6; break;
+ case 7: cflag |= CS7; break;
+ case 8: cflag |= CS8; break;
+ default: cflag |= CS8; break;
+ }
+
+ switch (parity) {
+ case 'o': cflag |= (PARENB | PARODD); break;
+ case 'e': cflag |= PARENB; break;
+ case 'n': default: break;
+ }
+
+ switch (stop) {
+ case 2: cflag |= CSTOPB; break;
+ case 1: default: break;
+ }
+
+ con->cflag = cflag;
+}
+
+/* Sun serial MOUSE auto baud rate detection. */
+static struct mouse_baud_cflag {
+ int baud;
+ unsigned int cflag;
+} mouse_baud_table[] = {
+ { 1200, B1200 },
+ { 2400, B2400 },
+ { 4800, B4800 },
+ { 9600, B9600 },
+ { -1, ~0 },
+ { -1, ~0 },
+};
+
+unsigned int suncore_mouse_baud_cflag_next(unsigned int cflag, int *new_baud)
+{
+ int i;
+
+ for (i = 0; mouse_baud_table[i].baud != -1; i++)
+ if (mouse_baud_table[i].cflag == (cflag & CBAUD))
+ break;
+
+ i += 1;
+ if (mouse_baud_table[i].baud == -1)
+ i = 0;
+
+ *new_baud = mouse_baud_table[i].baud;
+ return mouse_baud_table[i].cflag;
+}
+
+EXPORT_SYMBOL(suncore_mouse_baud_cflag_next);
+
+/* Basically, when the baud rate is wrong the mouse spits out
+ * breaks to us.
+ */
+int suncore_mouse_baud_detection(unsigned char ch, int is_break)
+{
+ static int mouse_got_break = 0;
+ static int ctr = 0;
+
+ if (is_break) {
+ /* Let a few normal bytes go by before we jump the gun
+ * and say we need to try another baud rate.
+ */
+ if (mouse_got_break && ctr < 8)
+ return 1;
+
+ /* Ok, we need to try another baud. */
+ ctr = 0;
+ mouse_got_break = 1;
+ return 2;
+ }
+ if (mouse_got_break) {
+ ctr++;
+ if (ch == 0x87) {
+ /* Correct baud rate determined. */
+ mouse_got_break = 0;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL(suncore_mouse_baud_detection);
+
+static int __init suncore_init(void)
+{
+ return 0;
+}
+
+static void __exit suncore_exit(void)
+{
+}
+
+module_init(suncore_init);
+module_exit(suncore_exit);
+
+MODULE_AUTHOR("Eddie C. Dost, David S. Miller");
+MODULE_DESCRIPTION("Sun serial common layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/suncore.h b/drivers/tty/serial/suncore.h
new file mode 100644
index 00000000..db205793
--- /dev/null
+++ b/drivers/tty/serial/suncore.h
@@ -0,0 +1,33 @@
+/* suncore.h
+ *
+ * Generic SUN serial/kbd/ms layer. Based entirely
+ * upon drivers/sbus/char/sunserial.h which is:
+ *
+ * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
+ *
+ * Port to new UART layer is:
+ *
+ * Copyright (C) 2002 David S. Miller (davem@redhat.com)
+ */
+
+#ifndef _SERIAL_SUN_H
+#define _SERIAL_SUN_H
+
+/* Serial keyboard defines for L1-A processing... */
+#define SUNKBD_RESET 0xff
+#define SUNKBD_L1 0x01
+#define SUNKBD_UP 0x80
+#define SUNKBD_A 0x4d
+
+extern unsigned int suncore_mouse_baud_cflag_next(unsigned int, int *);
+extern int suncore_mouse_baud_detection(unsigned char, int);
+
+extern int sunserial_register_minors(struct uart_driver *, int);
+extern void sunserial_unregister_minors(struct uart_driver *, int);
+
+extern int sunserial_console_match(struct console *, struct device_node *,
+ struct uart_driver *, int, bool);
+extern void sunserial_console_termios(struct console *,
+ struct device_node *);
+
+#endif /* !(_SERIAL_SUN_H) */
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
new file mode 100644
index 00000000..c0b7246d
--- /dev/null
+++ b/drivers/tty/serial/sunhv.c
@@ -0,0 +1,661 @@
+/* sunhv.c: Serial driver for SUN4V hypervisor console.
+ *
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/major.h>
+#include <linux/circ_buf.h>
+#include <linux/serial.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+
+#include <asm/hypervisor.h>
+#include <asm/spitfire.h>
+#include <asm/prom.h>
+#include <asm/irq.h>
+
+#if defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+#include "suncore.h"
+
+#define CON_BREAK ((long)-1)
+#define CON_HUP ((long)-2)
+
+#define IGNORE_BREAK 0x1
+#define IGNORE_ALL 0x2
+
+static char *con_write_page;
+static char *con_read_page;
+
+static int hung_up = 0;
+
+static void transmit_chars_putchar(struct uart_port *port, struct circ_buf *xmit)
+{
+ while (!uart_circ_empty(xmit)) {
+ long status = sun4v_con_putchar(xmit->buf[xmit->tail]);
+
+ if (status != HV_EOK)
+ break;
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+}
+
+static void transmit_chars_write(struct uart_port *port, struct circ_buf *xmit)
+{
+ while (!uart_circ_empty(xmit)) {
+ unsigned long ra = __pa(xmit->buf + xmit->tail);
+ unsigned long len, status, sent;
+
+ len = CIRC_CNT_TO_END(xmit->head, xmit->tail,
+ UART_XMIT_SIZE);
+ status = sun4v_con_write(ra, len, &sent);
+ if (status != HV_EOK)
+ break;
+ xmit->tail = (xmit->tail + sent) & (UART_XMIT_SIZE - 1);
+ port->icount.tx += sent;
+ }
+}
+
+static int receive_chars_getchar(struct uart_port *port, struct tty_struct *tty)
+{
+ int saw_console_brk = 0;
+ int limit = 10000;
+
+ while (limit-- > 0) {
+ long status;
+ long c = sun4v_con_getchar(&status);
+
+ if (status == HV_EWOULDBLOCK)
+ break;
+
+ if (c == CON_BREAK) {
+ if (uart_handle_break(port))
+ continue;
+ saw_console_brk = 1;
+ c = 0;
+ }
+
+ if (c == CON_HUP) {
+ hung_up = 1;
+ uart_handle_dcd_change(port, 0);
+ } else if (hung_up) {
+ hung_up = 0;
+ uart_handle_dcd_change(port, 1);
+ }
+
+ if (tty == NULL) {
+ uart_handle_sysrq_char(port, c);
+ continue;
+ }
+
+ port->icount.rx++;
+
+ if (uart_handle_sysrq_char(port, c))
+ continue;
+
+ tty_insert_flip_char(tty, c, TTY_NORMAL);
+ }
+
+ return saw_console_brk;
+}
+
+static int receive_chars_read(struct uart_port *port, struct tty_struct *tty)
+{
+ int saw_console_brk = 0;
+ int limit = 10000;
+
+ while (limit-- > 0) {
+ unsigned long ra = __pa(con_read_page);
+ unsigned long bytes_read, i;
+ long stat = sun4v_con_read(ra, PAGE_SIZE, &bytes_read);
+
+ if (stat != HV_EOK) {
+ bytes_read = 0;
+
+ if (stat == CON_BREAK) {
+ if (uart_handle_break(port))
+ continue;
+ saw_console_brk = 1;
+ *con_read_page = 0;
+ bytes_read = 1;
+ } else if (stat == CON_HUP) {
+ hung_up = 1;
+ uart_handle_dcd_change(port, 0);
+ continue;
+ } else {
+ /* HV_EWOULDBLOCK, etc. */
+ break;
+ }
+ }
+
+ if (hung_up) {
+ hung_up = 0;
+ uart_handle_dcd_change(port, 1);
+ }
+
+ for (i = 0; i < bytes_read; i++)
+ uart_handle_sysrq_char(port, con_read_page[i]);
+
+ if (tty == NULL)
+ continue;
+
+ port->icount.rx += bytes_read;
+
+ tty_insert_flip_string(tty, con_read_page, bytes_read);
+ }
+
+ return saw_console_brk;
+}
+
+struct sunhv_ops {
+ void (*transmit_chars)(struct uart_port *port, struct circ_buf *xmit);
+ int (*receive_chars)(struct uart_port *port, struct tty_struct *tty);
+};
+
+static struct sunhv_ops bychar_ops = {
+ .transmit_chars = transmit_chars_putchar,
+ .receive_chars = receive_chars_getchar,
+};
+
+static struct sunhv_ops bywrite_ops = {
+ .transmit_chars = transmit_chars_write,
+ .receive_chars = receive_chars_read,
+};
+
+static struct sunhv_ops *sunhv_ops = &bychar_ops;
+
+static struct tty_struct *receive_chars(struct uart_port *port)
+{
+ struct tty_struct *tty = NULL;
+
+ if (port->state != NULL) /* Unopened serial console */
+ tty = port->state->port.tty;
+
+ if (sunhv_ops->receive_chars(port, tty))
+ sun_do_break();
+
+ return tty;
+}
+
+static void transmit_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit;
+
+ if (!port->state)
+ return;
+
+ xmit = &port->state->xmit;
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+ return;
+
+ sunhv_ops->transmit_chars(port, xmit);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static irqreturn_t sunhv_interrupt(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct tty_struct *tty;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ tty = receive_chars(port);
+ transmit_chars(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+
+ return IRQ_HANDLED;
+}
+
+/* port->lock is not held. */
+static unsigned int sunhv_tx_empty(struct uart_port *port)
+{
+ /* Transmitter is always empty for us. If the circ buffer
+ * is non-empty or there is an x_char pending, our caller
+ * will do the right thing and ignore what we return here.
+ */
+ return TIOCSER_TEMT;
+}
+
+/* port->lock held by caller. */
+static void sunhv_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ return;
+}
+
+/* port->lock is held by caller and interrupts are disabled. */
+static unsigned int sunhv_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+}
+
+/* port->lock held by caller. */
+static void sunhv_stop_tx(struct uart_port *port)
+{
+ return;
+}
+
+/* port->lock held by caller. */
+static void sunhv_start_tx(struct uart_port *port)
+{
+ transmit_chars(port);
+}
+
+/* port->lock is not held. */
+static void sunhv_send_xchar(struct uart_port *port, char ch)
+{
+ unsigned long flags;
+ int limit = 10000;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ while (limit-- > 0) {
+ long status = sun4v_con_putchar(ch);
+ if (status == HV_EOK)
+ break;
+ udelay(1);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* port->lock held by caller. */
+static void sunhv_stop_rx(struct uart_port *port)
+{
+}
+
+/* port->lock held by caller. */
+static void sunhv_enable_ms(struct uart_port *port)
+{
+}
+
+/* port->lock is not held. */
+static void sunhv_break_ctl(struct uart_port *port, int break_state)
+{
+ if (break_state) {
+ unsigned long flags;
+ int limit = 10000;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ while (limit-- > 0) {
+ long status = sun4v_con_putchar(CON_BREAK);
+ if (status == HV_EOK)
+ break;
+ udelay(1);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+}
+
+/* port->lock is not held. */
+static int sunhv_startup(struct uart_port *port)
+{
+ return 0;
+}
+
+/* port->lock is not held. */
+static void sunhv_shutdown(struct uart_port *port)
+{
+}
+
+/* port->lock is not held. */
+static void sunhv_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ unsigned int quot = uart_get_divisor(port, baud);
+ unsigned int iflag, cflag;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ iflag = termios->c_iflag;
+ cflag = termios->c_cflag;
+
+ port->ignore_status_mask = 0;
+ if (iflag & IGNBRK)
+ port->ignore_status_mask |= IGNORE_BREAK;
+ if ((cflag & CREAD) == 0)
+ port->ignore_status_mask |= IGNORE_ALL;
+
+ /* XXX */
+ uart_update_timeout(port, cflag,
+ (port->uartclk / (16 * quot)));
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *sunhv_type(struct uart_port *port)
+{
+ return "SUN4V HCONS";
+}
+
+static void sunhv_release_port(struct uart_port *port)
+{
+}
+
+static int sunhv_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void sunhv_config_port(struct uart_port *port, int flags)
+{
+}
+
+static int sunhv_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ return -EINVAL;
+}
+
+static struct uart_ops sunhv_pops = {
+ .tx_empty = sunhv_tx_empty,
+ .set_mctrl = sunhv_set_mctrl,
+ .get_mctrl = sunhv_get_mctrl,
+ .stop_tx = sunhv_stop_tx,
+ .start_tx = sunhv_start_tx,
+ .send_xchar = sunhv_send_xchar,
+ .stop_rx = sunhv_stop_rx,
+ .enable_ms = sunhv_enable_ms,
+ .break_ctl = sunhv_break_ctl,
+ .startup = sunhv_startup,
+ .shutdown = sunhv_shutdown,
+ .set_termios = sunhv_set_termios,
+ .type = sunhv_type,
+ .release_port = sunhv_release_port,
+ .request_port = sunhv_request_port,
+ .config_port = sunhv_config_port,
+ .verify_port = sunhv_verify_port,
+};
+
+static struct uart_driver sunhv_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "sunhv",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+};
+
+static struct uart_port *sunhv_port;
+
+/* Copy 's' into the con_write_page, decoding "\n" into
+ * "\r\n" along the way. We have to return two lengths
+ * because the caller needs to know how much to advance
+ * 's' and also how many bytes to output via con_write_page.
+ */
+static int fill_con_write_page(const char *s, unsigned int n,
+ unsigned long *page_bytes)
+{
+ const char *orig_s = s;
+ char *p = con_write_page;
+ int left = PAGE_SIZE;
+
+ while (n--) {
+ if (*s == '\n') {
+ if (left < 2)
+ break;
+ *p++ = '\r';
+ left--;
+ } else if (left < 1)
+ break;
+ *p++ = *s++;
+ left--;
+ }
+ *page_bytes = p - con_write_page;
+ return s - orig_s;
+}
+
+static void sunhv_console_write_paged(struct console *con, const char *s, unsigned n)
+{
+ struct uart_port *port = sunhv_port;
+ unsigned long flags;
+ int locked = 1;
+
+ local_irq_save(flags);
+ if (port->sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&port->lock);
+ } else
+ spin_lock(&port->lock);
+
+ while (n > 0) {
+ unsigned long ra = __pa(con_write_page);
+ unsigned long page_bytes;
+ unsigned int cpy = fill_con_write_page(s, n,
+ &page_bytes);
+
+ n -= cpy;
+ s += cpy;
+ while (page_bytes > 0) {
+ unsigned long written;
+ int limit = 1000000;
+
+ while (limit--) {
+ unsigned long stat;
+
+ stat = sun4v_con_write(ra, page_bytes,
+ &written);
+ if (stat == HV_EOK)
+ break;
+ udelay(1);
+ }
+ if (limit < 0)
+ break;
+ page_bytes -= written;
+ ra += written;
+ }
+ }
+
+ if (locked)
+ spin_unlock(&port->lock);
+ local_irq_restore(flags);
+}
+
+static inline void sunhv_console_putchar(struct uart_port *port, char c)
+{
+ int limit = 1000000;
+
+ while (limit-- > 0) {
+ long status = sun4v_con_putchar(c);
+ if (status == HV_EOK)
+ break;
+ udelay(1);
+ }
+}
+
+static void sunhv_console_write_bychar(struct console *con, const char *s, unsigned n)
+{
+ struct uart_port *port = sunhv_port;
+ unsigned long flags;
+ int i, locked = 1;
+
+ local_irq_save(flags);
+ if (port->sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&port->lock);
+ } else
+ spin_lock(&port->lock);
+
+ for (i = 0; i < n; i++) {
+ if (*s == '\n')
+ sunhv_console_putchar(port, '\r');
+ sunhv_console_putchar(port, *s++);
+ }
+
+ if (locked)
+ spin_unlock(&port->lock);
+ local_irq_restore(flags);
+}
+
+static struct console sunhv_console = {
+ .name = "ttyHV",
+ .write = sunhv_console_write_bychar,
+ .device = uart_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sunhv_reg,
+};
+
+static int __devinit hv_probe(struct platform_device *op)
+{
+ struct uart_port *port;
+ unsigned long minor;
+ int err;
+
+ if (op->archdata.irqs[0] == 0xffffffff)
+ return -ENODEV;
+
+ port = kzalloc(sizeof(struct uart_port), GFP_KERNEL);
+ if (unlikely(!port))
+ return -ENOMEM;
+
+ minor = 1;
+ if (sun4v_hvapi_register(HV_GRP_CORE, 1, &minor) == 0 &&
+ minor >= 1) {
+ err = -ENOMEM;
+ con_write_page = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!con_write_page)
+ goto out_free_port;
+
+ con_read_page = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!con_read_page)
+ goto out_free_con_write_page;
+
+ sunhv_console.write = sunhv_console_write_paged;
+ sunhv_ops = &bywrite_ops;
+ }
+
+ sunhv_port = port;
+
+ port->line = 0;
+ port->ops = &sunhv_pops;
+ port->type = PORT_SUNHV;
+ port->uartclk = ( 29491200 / 16 ); /* arbitrary */
+
+ port->membase = (unsigned char __iomem *) __pa(port);
+
+ port->irq = op->archdata.irqs[0];
+
+ port->dev = &op->dev;
+
+ err = sunserial_register_minors(&sunhv_reg, 1);
+ if (err)
+ goto out_free_con_read_page;
+
+ sunserial_console_match(&sunhv_console, op->dev.of_node,
+ &sunhv_reg, port->line, false);
+
+ err = uart_add_one_port(&sunhv_reg, port);
+ if (err)
+ goto out_unregister_driver;
+
+ err = request_irq(port->irq, sunhv_interrupt, 0, "hvcons", port);
+ if (err)
+ goto out_remove_port;
+
+ dev_set_drvdata(&op->dev, port);
+
+ return 0;
+
+out_remove_port:
+ uart_remove_one_port(&sunhv_reg, port);
+
+out_unregister_driver:
+ sunserial_unregister_minors(&sunhv_reg, 1);
+
+out_free_con_read_page:
+ kfree(con_read_page);
+
+out_free_con_write_page:
+ kfree(con_write_page);
+
+out_free_port:
+ kfree(port);
+ sunhv_port = NULL;
+ return err;
+}
+
+static int __devexit hv_remove(struct platform_device *dev)
+{
+ struct uart_port *port = dev_get_drvdata(&dev->dev);
+
+ free_irq(port->irq, port);
+
+ uart_remove_one_port(&sunhv_reg, port);
+
+ sunserial_unregister_minors(&sunhv_reg, 1);
+
+ kfree(port);
+ sunhv_port = NULL;
+
+ dev_set_drvdata(&dev->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id hv_match[] = {
+ {
+ .name = "console",
+ .compatible = "qcn",
+ },
+ {
+ .name = "console",
+ .compatible = "SUNW,sun4v-console",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hv_match);
+
+static struct platform_driver hv_driver = {
+ .driver = {
+ .name = "hv",
+ .owner = THIS_MODULE,
+ .of_match_table = hv_match,
+ },
+ .probe = hv_probe,
+ .remove = __devexit_p(hv_remove),
+};
+
+static int __init sunhv_init(void)
+{
+ if (tlb_type != hypervisor)
+ return -ENODEV;
+
+ return platform_driver_register(&hv_driver);
+}
+
+static void __exit sunhv_exit(void)
+{
+ platform_driver_unregister(&hv_driver);
+}
+
+module_init(sunhv_init);
+module_exit(sunhv_exit);
+
+MODULE_AUTHOR("David S. Miller");
+MODULE_DESCRIPTION("SUN4V Hypervisor console driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
new file mode 100644
index 00000000..b5fa2a57
--- /dev/null
+++ b/drivers/tty/serial/sunsab.c
@@ -0,0 +1,1152 @@
+/* sunsab.c: ASYNC Driver for the SIEMENS SAB82532 DUSCC.
+ *
+ * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 2002, 2006 David S. Miller (davem@davemloft.net)
+ *
+ * Rewrote buffer handling to use CIRC(Circular Buffer) macros.
+ * Maxim Krasnyanskiy <maxk@qualcomm.com>
+ *
+ * Fixed to use tty_get_baud_rate, and to allow for arbitrary baud
+ * rates to be programmed into the UART. Also eliminated a lot of
+ * duplicated code in the console setup.
+ * Theodore Ts'o <tytso@mit.edu>, 2001-Oct-12
+ *
+ * Ported to new 2.5.x UART layer.
+ * David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/circ_buf.h>
+#include <linux/serial.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+
+#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+#include "suncore.h"
+#include "sunsab.h"
+
+struct uart_sunsab_port {
+ struct uart_port port; /* Generic UART port */
+ union sab82532_async_regs __iomem *regs; /* Chip registers */
+ unsigned long irqflags; /* IRQ state flags */
+ int dsr; /* Current DSR state */
+ unsigned int cec_timeout; /* Chip poll timeout... */
+ unsigned int tec_timeout; /* likewise */
+ unsigned char interrupt_mask0;/* ISR0 masking */
+ unsigned char interrupt_mask1;/* ISR1 masking */
+ unsigned char pvr_dtr_bit; /* Which PVR bit is DTR */
+ unsigned char pvr_dsr_bit; /* Which PVR bit is DSR */
+ unsigned int gis_shift;
+ int type; /* SAB82532 version */
+
+ /* Setting configuration bits while the transmitter is active
+ * can cause garbage characters to get emitted by the chip.
+ * Therefore, we cache such writes here and do the real register
+ * write the next time the transmitter becomes idle.
+ */
+ unsigned int cached_ebrg;
+ unsigned char cached_mode;
+ unsigned char cached_pvr;
+ unsigned char cached_dafo;
+};
+
+/*
+ * This assumes you have a 29.4912 MHz clock for your UART.
+ */
+#define SAB_BASE_BAUD ( 29491200 / 16 )
+
+static char *sab82532_version[16] = {
+ "V1.0", "V2.0", "V3.2", "V(0x03)",
+ "V(0x04)", "V(0x05)", "V(0x06)", "V(0x07)",
+ "V(0x08)", "V(0x09)", "V(0x0a)", "V(0x0b)",
+ "V(0x0c)", "V(0x0d)", "V(0x0e)", "V(0x0f)"
+};
+
+#define SAB82532_MAX_TEC_TIMEOUT 200000 /* 1 character time (at 50 baud) */
+#define SAB82532_MAX_CEC_TIMEOUT 50000 /* 2.5 TX CLKs (at 50 baud) */
+
+#define SAB82532_RECV_FIFO_SIZE 32 /* Standard async fifo sizes */
+#define SAB82532_XMIT_FIFO_SIZE 32
+
+static __inline__ void sunsab_tec_wait(struct uart_sunsab_port *up)
+{
+ int timeout = up->tec_timeout;
+
+ while ((readb(&up->regs->r.star) & SAB82532_STAR_TEC) && --timeout)
+ udelay(1);
+}
+
+static __inline__ void sunsab_cec_wait(struct uart_sunsab_port *up)
+{
+ int timeout = up->cec_timeout;
+
+ while ((readb(&up->regs->r.star) & SAB82532_STAR_CEC) && --timeout)
+ udelay(1);
+}
+
+static struct tty_struct *
+receive_chars(struct uart_sunsab_port *up,
+ union sab82532_irq_status *stat)
+{
+ struct tty_struct *tty = NULL;
+ unsigned char buf[32];
+ int saw_console_brk = 0;
+ int free_fifo = 0;
+ int count = 0;
+ int i;
+
+ if (up->port.state != NULL) /* Unopened serial console */
+ tty = up->port.state->port.tty;
+
+ /* Read number of BYTES (Character + Status) available. */
+ if (stat->sreg.isr0 & SAB82532_ISR0_RPF) {
+ count = SAB82532_RECV_FIFO_SIZE;
+ free_fifo++;
+ }
+
+ if (stat->sreg.isr0 & SAB82532_ISR0_TCD) {
+ count = readb(&up->regs->r.rbcl) & (SAB82532_RECV_FIFO_SIZE - 1);
+ free_fifo++;
+ }
+
+ /* Issue a FIFO read command in case we where idle. */
+ if (stat->sreg.isr0 & SAB82532_ISR0_TIME) {
+ sunsab_cec_wait(up);
+ writeb(SAB82532_CMDR_RFRD, &up->regs->w.cmdr);
+ return tty;
+ }
+
+ if (stat->sreg.isr0 & SAB82532_ISR0_RFO)
+ free_fifo++;
+
+ /* Read the FIFO. */
+ for (i = 0; i < count; i++)
+ buf[i] = readb(&up->regs->r.rfifo[i]);
+
+ /* Issue Receive Message Complete command. */
+ if (free_fifo) {
+ sunsab_cec_wait(up);
+ writeb(SAB82532_CMDR_RMC, &up->regs->w.cmdr);
+ }
+
+ /* Count may be zero for BRK, so we check for it here */
+ if ((stat->sreg.isr1 & SAB82532_ISR1_BRK) &&
+ (up->port.line == up->port.cons->index))
+ saw_console_brk = 1;
+
+ for (i = 0; i < count; i++) {
+ unsigned char ch = buf[i], flag;
+
+ if (tty == NULL) {
+ uart_handle_sysrq_char(&up->port, ch);
+ continue;
+ }
+
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+ if (unlikely(stat->sreg.isr0 & (SAB82532_ISR0_PERR |
+ SAB82532_ISR0_FERR |
+ SAB82532_ISR0_RFO)) ||
+ unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) {
+ /*
+ * For statistics only
+ */
+ if (stat->sreg.isr1 & SAB82532_ISR1_BRK) {
+ stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR |
+ SAB82532_ISR0_FERR);
+ up->port.icount.brk++;
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(&up->port))
+ continue;
+ } else if (stat->sreg.isr0 & SAB82532_ISR0_PERR)
+ up->port.icount.parity++;
+ else if (stat->sreg.isr0 & SAB82532_ISR0_FERR)
+ up->port.icount.frame++;
+ if (stat->sreg.isr0 & SAB82532_ISR0_RFO)
+ up->port.icount.overrun++;
+
+ /*
+ * Mask off conditions which should be ingored.
+ */
+ stat->sreg.isr0 &= (up->port.read_status_mask & 0xff);
+ stat->sreg.isr1 &= ((up->port.read_status_mask >> 8) & 0xff);
+
+ if (stat->sreg.isr1 & SAB82532_ISR1_BRK) {
+ flag = TTY_BREAK;
+ } else if (stat->sreg.isr0 & SAB82532_ISR0_PERR)
+ flag = TTY_PARITY;
+ else if (stat->sreg.isr0 & SAB82532_ISR0_FERR)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(&up->port, ch))
+ continue;
+
+ if ((stat->sreg.isr0 & (up->port.ignore_status_mask & 0xff)) == 0 &&
+ (stat->sreg.isr1 & ((up->port.ignore_status_mask >> 8) & 0xff)) == 0)
+ tty_insert_flip_char(tty, ch, flag);
+ if (stat->sreg.isr0 & SAB82532_ISR0_RFO)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+
+ if (saw_console_brk)
+ sun_do_break();
+
+ return tty;
+}
+
+static void sunsab_stop_tx(struct uart_port *);
+static void sunsab_tx_idle(struct uart_sunsab_port *);
+
+static void transmit_chars(struct uart_sunsab_port *up,
+ union sab82532_irq_status *stat)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int i;
+
+ if (stat->sreg.isr1 & SAB82532_ISR1_ALLS) {
+ up->interrupt_mask1 |= SAB82532_IMR1_ALLS;
+ writeb(up->interrupt_mask1, &up->regs->w.imr1);
+ set_bit(SAB82532_ALLS, &up->irqflags);
+ }
+
+#if 0 /* bde@nwlink.com says this check causes problems */
+ if (!(stat->sreg.isr1 & SAB82532_ISR1_XPR))
+ return;
+#endif
+
+ if (!(readb(&up->regs->r.star) & SAB82532_STAR_XFW))
+ return;
+
+ set_bit(SAB82532_XPR, &up->irqflags);
+ sunsab_tx_idle(up);
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+ up->interrupt_mask1 |= SAB82532_IMR1_XPR;
+ writeb(up->interrupt_mask1, &up->regs->w.imr1);
+ return;
+ }
+
+ up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR);
+ writeb(up->interrupt_mask1, &up->regs->w.imr1);
+ clear_bit(SAB82532_ALLS, &up->irqflags);
+
+ /* Stuff 32 bytes into Transmit FIFO. */
+ clear_bit(SAB82532_XPR, &up->irqflags);
+ for (i = 0; i < up->port.fifosize; i++) {
+ writeb(xmit->buf[xmit->tail],
+ &up->regs->w.xfifo[i]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ }
+
+ /* Issue a Transmit Frame command. */
+ sunsab_cec_wait(up);
+ writeb(SAB82532_CMDR_XF, &up->regs->w.cmdr);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (uart_circ_empty(xmit))
+ sunsab_stop_tx(&up->port);
+}
+
+static void check_status(struct uart_sunsab_port *up,
+ union sab82532_irq_status *stat)
+{
+ if (stat->sreg.isr0 & SAB82532_ISR0_CDSC)
+ uart_handle_dcd_change(&up->port,
+ !(readb(&up->regs->r.vstr) & SAB82532_VSTR_CD));
+
+ if (stat->sreg.isr1 & SAB82532_ISR1_CSC)
+ uart_handle_cts_change(&up->port,
+ (readb(&up->regs->r.star) & SAB82532_STAR_CTS));
+
+ if ((readb(&up->regs->r.pvr) & up->pvr_dsr_bit) ^ up->dsr) {
+ up->dsr = (readb(&up->regs->r.pvr) & up->pvr_dsr_bit) ? 0 : 1;
+ up->port.icount.dsr++;
+ }
+
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+}
+
+static irqreturn_t sunsab_interrupt(int irq, void *dev_id)
+{
+ struct uart_sunsab_port *up = dev_id;
+ struct tty_struct *tty;
+ union sab82532_irq_status status;
+ unsigned long flags;
+ unsigned char gis;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ status.stat = 0;
+ gis = readb(&up->regs->r.gis) >> up->gis_shift;
+ if (gis & 1)
+ status.sreg.isr0 = readb(&up->regs->r.isr0);
+ if (gis & 2)
+ status.sreg.isr1 = readb(&up->regs->r.isr1);
+
+ tty = NULL;
+ if (status.stat) {
+ if ((status.sreg.isr0 & (SAB82532_ISR0_TCD | SAB82532_ISR0_TIME |
+ SAB82532_ISR0_RFO | SAB82532_ISR0_RPF)) ||
+ (status.sreg.isr1 & SAB82532_ISR1_BRK))
+ tty = receive_chars(up, &status);
+ if ((status.sreg.isr0 & SAB82532_ISR0_CDSC) ||
+ (status.sreg.isr1 & SAB82532_ISR1_CSC))
+ check_status(up, &status);
+ if (status.sreg.isr1 & (SAB82532_ISR1_ALLS | SAB82532_ISR1_XPR))
+ transmit_chars(up, &status);
+ }
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+
+ return IRQ_HANDLED;
+}
+
+/* port->lock is not held. */
+static unsigned int sunsab_tx_empty(struct uart_port *port)
+{
+ struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ int ret;
+
+ /* Do not need a lock for a state test like this. */
+ if (test_bit(SAB82532_ALLS, &up->irqflags))
+ ret = TIOCSER_TEMT;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+/* port->lock held by caller. */
+static void sunsab_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+
+ if (mctrl & TIOCM_RTS) {
+ up->cached_mode &= ~SAB82532_MODE_FRTS;
+ up->cached_mode |= SAB82532_MODE_RTS;
+ } else {
+ up->cached_mode |= (SAB82532_MODE_FRTS |
+ SAB82532_MODE_RTS);
+ }
+ if (mctrl & TIOCM_DTR) {
+ up->cached_pvr &= ~(up->pvr_dtr_bit);
+ } else {
+ up->cached_pvr |= up->pvr_dtr_bit;
+ }
+
+ set_bit(SAB82532_REGS_PENDING, &up->irqflags);
+ if (test_bit(SAB82532_XPR, &up->irqflags))
+ sunsab_tx_idle(up);
+}
+
+/* port->lock is held by caller and interrupts are disabled. */
+static unsigned int sunsab_get_mctrl(struct uart_port *port)
+{
+ struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ unsigned char val;
+ unsigned int result;
+
+ result = 0;
+
+ val = readb(&up->regs->r.pvr);
+ result |= (val & up->pvr_dsr_bit) ? 0 : TIOCM_DSR;
+
+ val = readb(&up->regs->r.vstr);
+ result |= (val & SAB82532_VSTR_CD) ? 0 : TIOCM_CAR;
+
+ val = readb(&up->regs->r.star);
+ result |= (val & SAB82532_STAR_CTS) ? TIOCM_CTS : 0;
+
+ return result;
+}
+
+/* port->lock held by caller. */
+static void sunsab_stop_tx(struct uart_port *port)
+{
+ struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+
+ up->interrupt_mask1 |= SAB82532_IMR1_XPR;
+ writeb(up->interrupt_mask1, &up->regs->w.imr1);
+}
+
+/* port->lock held by caller. */
+static void sunsab_tx_idle(struct uart_sunsab_port *up)
+{
+ if (test_bit(SAB82532_REGS_PENDING, &up->irqflags)) {
+ u8 tmp;
+
+ clear_bit(SAB82532_REGS_PENDING, &up->irqflags);
+ writeb(up->cached_mode, &up->regs->rw.mode);
+ writeb(up->cached_pvr, &up->regs->rw.pvr);
+ writeb(up->cached_dafo, &up->regs->w.dafo);
+
+ writeb(up->cached_ebrg & 0xff, &up->regs->w.bgr);
+ tmp = readb(&up->regs->rw.ccr2);
+ tmp &= ~0xc0;
+ tmp |= (up->cached_ebrg >> 2) & 0xc0;
+ writeb(tmp, &up->regs->rw.ccr2);
+ }
+}
+
+/* port->lock held by caller. */
+static void sunsab_start_tx(struct uart_port *port)
+{
+ struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int i;
+
+ up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR);
+ writeb(up->interrupt_mask1, &up->regs->w.imr1);
+
+ if (!test_bit(SAB82532_XPR, &up->irqflags))
+ return;
+
+ clear_bit(SAB82532_ALLS, &up->irqflags);
+ clear_bit(SAB82532_XPR, &up->irqflags);
+
+ for (i = 0; i < up->port.fifosize; i++) {
+ writeb(xmit->buf[xmit->tail],
+ &up->regs->w.xfifo[i]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ }
+
+ /* Issue a Transmit Frame command. */
+ sunsab_cec_wait(up);
+ writeb(SAB82532_CMDR_XF, &up->regs->w.cmdr);
+}
+
+/* port->lock is not held. */
+static void sunsab_send_xchar(struct uart_port *port, char ch)
+{
+ struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ sunsab_tec_wait(up);
+ writeb(ch, &up->regs->w.tic);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+/* port->lock held by caller. */
+static void sunsab_stop_rx(struct uart_port *port)
+{
+ struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+
+ up->interrupt_mask0 |= SAB82532_IMR0_TCD;
+ writeb(up->interrupt_mask1, &up->regs->w.imr0);
+}
+
+/* port->lock held by caller. */
+static void sunsab_enable_ms(struct uart_port *port)
+{
+ /* For now we always receive these interrupts. */
+}
+
+/* port->lock is not held. */
+static void sunsab_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ unsigned long flags;
+ unsigned char val;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ val = up->cached_dafo;
+ if (break_state)
+ val |= SAB82532_DAFO_XBRK;
+ else
+ val &= ~SAB82532_DAFO_XBRK;
+ up->cached_dafo = val;
+
+ set_bit(SAB82532_REGS_PENDING, &up->irqflags);
+ if (test_bit(SAB82532_XPR, &up->irqflags))
+ sunsab_tx_idle(up);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+/* port->lock is not held. */
+static int sunsab_startup(struct uart_port *port)
+{
+ struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ unsigned long flags;
+ unsigned char tmp;
+ int err = request_irq(up->port.irq, sunsab_interrupt,
+ IRQF_SHARED, "sab", up);
+ if (err)
+ return err;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * Wait for any commands or immediate characters
+ */
+ sunsab_cec_wait(up);
+ sunsab_tec_wait(up);
+
+ /*
+ * Clear the FIFO buffers.
+ */
+ writeb(SAB82532_CMDR_RRES, &up->regs->w.cmdr);
+ sunsab_cec_wait(up);
+ writeb(SAB82532_CMDR_XRES, &up->regs->w.cmdr);
+
+ /*
+ * Clear the interrupt registers.
+ */
+ (void) readb(&up->regs->r.isr0);
+ (void) readb(&up->regs->r.isr1);
+
+ /*
+ * Now, initialize the UART
+ */
+ writeb(0, &up->regs->w.ccr0); /* power-down */
+ writeb(SAB82532_CCR0_MCE | SAB82532_CCR0_SC_NRZ |
+ SAB82532_CCR0_SM_ASYNC, &up->regs->w.ccr0);
+ writeb(SAB82532_CCR1_ODS | SAB82532_CCR1_BCR | 7, &up->regs->w.ccr1);
+ writeb(SAB82532_CCR2_BDF | SAB82532_CCR2_SSEL |
+ SAB82532_CCR2_TOE, &up->regs->w.ccr2);
+ writeb(0, &up->regs->w.ccr3);
+ writeb(SAB82532_CCR4_MCK4 | SAB82532_CCR4_EBRG, &up->regs->w.ccr4);
+ up->cached_mode = (SAB82532_MODE_RTS | SAB82532_MODE_FCTS |
+ SAB82532_MODE_RAC);
+ writeb(up->cached_mode, &up->regs->w.mode);
+ writeb(SAB82532_RFC_DPS|SAB82532_RFC_RFTH_32, &up->regs->w.rfc);
+
+ tmp = readb(&up->regs->rw.ccr0);
+ tmp |= SAB82532_CCR0_PU; /* power-up */
+ writeb(tmp, &up->regs->rw.ccr0);
+
+ /*
+ * Finally, enable interrupts
+ */
+ up->interrupt_mask0 = (SAB82532_IMR0_PERR | SAB82532_IMR0_FERR |
+ SAB82532_IMR0_PLLA);
+ writeb(up->interrupt_mask0, &up->regs->w.imr0);
+ up->interrupt_mask1 = (SAB82532_IMR1_BRKT | SAB82532_IMR1_ALLS |
+ SAB82532_IMR1_XOFF | SAB82532_IMR1_TIN |
+ SAB82532_IMR1_CSC | SAB82532_IMR1_XON |
+ SAB82532_IMR1_XPR);
+ writeb(up->interrupt_mask1, &up->regs->w.imr1);
+ set_bit(SAB82532_ALLS, &up->irqflags);
+ set_bit(SAB82532_XPR, &up->irqflags);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return 0;
+}
+
+/* port->lock is not held. */
+static void sunsab_shutdown(struct uart_port *port)
+{
+ struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /* Disable Interrupts */
+ up->interrupt_mask0 = 0xff;
+ writeb(up->interrupt_mask0, &up->regs->w.imr0);
+ up->interrupt_mask1 = 0xff;
+ writeb(up->interrupt_mask1, &up->regs->w.imr1);
+
+ /* Disable break condition */
+ up->cached_dafo = readb(&up->regs->rw.dafo);
+ up->cached_dafo &= ~SAB82532_DAFO_XBRK;
+ writeb(up->cached_dafo, &up->regs->rw.dafo);
+
+ /* Disable Receiver */
+ up->cached_mode &= ~SAB82532_MODE_RAC;
+ writeb(up->cached_mode, &up->regs->rw.mode);
+
+ /*
+ * XXX FIXME
+ *
+ * If the chip is powered down here the system hangs/crashes during
+ * reboot or shutdown. This needs to be investigated further,
+ * similar behaviour occurs in 2.4 when the driver is configured
+ * as a module only. One hint may be that data is sometimes
+ * transmitted at 9600 baud during shutdown (regardless of the
+ * speed the chip was configured for when the port was open).
+ */
+#if 0
+ /* Power Down */
+ tmp = readb(&up->regs->rw.ccr0);
+ tmp &= ~SAB82532_CCR0_PU;
+ writeb(tmp, &up->regs->rw.ccr0);
+#endif
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ free_irq(up->port.irq, up);
+}
+
+/*
+ * This is used to figure out the divisor speeds.
+ *
+ * The formula is: Baud = SAB_BASE_BAUD / ((N + 1) * (1 << M)),
+ *
+ * with 0 <= N < 64 and 0 <= M < 16
+ */
+
+static void calc_ebrg(int baud, int *n_ret, int *m_ret)
+{
+ int n, m;
+
+ if (baud == 0) {
+ *n_ret = 0;
+ *m_ret = 0;
+ return;
+ }
+
+ /*
+ * We scale numbers by 10 so that we get better accuracy
+ * without having to use floating point. Here we increment m
+ * until n is within the valid range.
+ */
+ n = (SAB_BASE_BAUD * 10) / baud;
+ m = 0;
+ while (n >= 640) {
+ n = n / 2;
+ m++;
+ }
+ n = (n+5) / 10;
+ /*
+ * We try very hard to avoid speeds with M == 0 since they may
+ * not work correctly for XTAL frequences above 10 MHz.
+ */
+ if ((m == 0) && ((n & 1) == 0)) {
+ n = n / 2;
+ m++;
+ }
+ *n_ret = n - 1;
+ *m_ret = m;
+}
+
+/* Internal routine, port->lock is held and local interrupts are disabled. */
+static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cflag,
+ unsigned int iflag, unsigned int baud,
+ unsigned int quot)
+{
+ unsigned char dafo;
+ int bits, n, m;
+
+ /* Byte size and parity */
+ switch (cflag & CSIZE) {
+ case CS5: dafo = SAB82532_DAFO_CHL5; bits = 7; break;
+ case CS6: dafo = SAB82532_DAFO_CHL6; bits = 8; break;
+ case CS7: dafo = SAB82532_DAFO_CHL7; bits = 9; break;
+ case CS8: dafo = SAB82532_DAFO_CHL8; bits = 10; break;
+ /* Never happens, but GCC is too dumb to figure it out */
+ default: dafo = SAB82532_DAFO_CHL5; bits = 7; break;
+ }
+
+ if (cflag & CSTOPB) {
+ dafo |= SAB82532_DAFO_STOP;
+ bits++;
+ }
+
+ if (cflag & PARENB) {
+ dafo |= SAB82532_DAFO_PARE;
+ bits++;
+ }
+
+ if (cflag & PARODD) {
+ dafo |= SAB82532_DAFO_PAR_ODD;
+ } else {
+ dafo |= SAB82532_DAFO_PAR_EVEN;
+ }
+ up->cached_dafo = dafo;
+
+ calc_ebrg(baud, &n, &m);
+
+ up->cached_ebrg = n | (m << 6);
+
+ up->tec_timeout = (10 * 1000000) / baud;
+ up->cec_timeout = up->tec_timeout >> 2;
+
+ /* CTS flow control flags */
+ /* We encode read_status_mask and ignore_status_mask like so:
+ *
+ * ---------------------
+ * | ... | ISR1 | ISR0 |
+ * ---------------------
+ * .. 15 8 7 0
+ */
+
+ up->port.read_status_mask = (SAB82532_ISR0_TCD | SAB82532_ISR0_TIME |
+ SAB82532_ISR0_RFO | SAB82532_ISR0_RPF |
+ SAB82532_ISR0_CDSC);
+ up->port.read_status_mask |= (SAB82532_ISR1_CSC |
+ SAB82532_ISR1_ALLS |
+ SAB82532_ISR1_XPR) << 8;
+ if (iflag & INPCK)
+ up->port.read_status_mask |= (SAB82532_ISR0_PERR |
+ SAB82532_ISR0_FERR);
+ if (iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= (SAB82532_ISR1_BRK << 8);
+
+ /*
+ * Characteres to ignore
+ */
+ up->port.ignore_status_mask = 0;
+ if (iflag & IGNPAR)
+ up->port.ignore_status_mask |= (SAB82532_ISR0_PERR |
+ SAB82532_ISR0_FERR);
+ if (iflag & IGNBRK) {
+ up->port.ignore_status_mask |= (SAB82532_ISR1_BRK << 8);
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (iflag & IGNPAR)
+ up->port.ignore_status_mask |= SAB82532_ISR0_RFO;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= (SAB82532_ISR0_RPF |
+ SAB82532_ISR0_TCD);
+
+ uart_update_timeout(&up->port, cflag,
+ (up->port.uartclk / (16 * quot)));
+
+ /* Now schedule a register update when the chip's
+ * transmitter is idle.
+ */
+ up->cached_mode |= SAB82532_MODE_RAC;
+ set_bit(SAB82532_REGS_PENDING, &up->irqflags);
+ if (test_bit(SAB82532_XPR, &up->irqflags))
+ sunsab_tx_idle(up);
+}
+
+/* port->lock is not held. */
+static void sunsab_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
+ unsigned long flags;
+ unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ unsigned int quot = uart_get_divisor(port, baud);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ sunsab_convert_to_sab(up, termios->c_cflag, termios->c_iflag, baud, quot);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static const char *sunsab_type(struct uart_port *port)
+{
+ struct uart_sunsab_port *up = (void *)port;
+ static char buf[36];
+
+ sprintf(buf, "SAB82532 %s", sab82532_version[up->type]);
+ return buf;
+}
+
+static void sunsab_release_port(struct uart_port *port)
+{
+}
+
+static int sunsab_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void sunsab_config_port(struct uart_port *port, int flags)
+{
+}
+
+static int sunsab_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ return -EINVAL;
+}
+
+static struct uart_ops sunsab_pops = {
+ .tx_empty = sunsab_tx_empty,
+ .set_mctrl = sunsab_set_mctrl,
+ .get_mctrl = sunsab_get_mctrl,
+ .stop_tx = sunsab_stop_tx,
+ .start_tx = sunsab_start_tx,
+ .send_xchar = sunsab_send_xchar,
+ .stop_rx = sunsab_stop_rx,
+ .enable_ms = sunsab_enable_ms,
+ .break_ctl = sunsab_break_ctl,
+ .startup = sunsab_startup,
+ .shutdown = sunsab_shutdown,
+ .set_termios = sunsab_set_termios,
+ .type = sunsab_type,
+ .release_port = sunsab_release_port,
+ .request_port = sunsab_request_port,
+ .config_port = sunsab_config_port,
+ .verify_port = sunsab_verify_port,
+};
+
+static struct uart_driver sunsab_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "sunsab",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+};
+
+static struct uart_sunsab_port *sunsab_ports;
+
+#ifdef CONFIG_SERIAL_SUNSAB_CONSOLE
+
+static void sunsab_console_putchar(struct uart_port *port, int c)
+{
+ struct uart_sunsab_port *up = (struct uart_sunsab_port *)port;
+
+ sunsab_tec_wait(up);
+ writeb(c, &up->regs->w.tic);
+}
+
+static void sunsab_console_write(struct console *con, const char *s, unsigned n)
+{
+ struct uart_sunsab_port *up = &sunsab_ports[con->index];
+ unsigned long flags;
+ int locked = 1;
+
+ local_irq_save(flags);
+ if (up->port.sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&up->port.lock);
+ } else
+ spin_lock(&up->port.lock);
+
+ uart_console_write(&up->port, s, n, sunsab_console_putchar);
+ sunsab_tec_wait(up);
+
+ if (locked)
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
+}
+
+static int sunsab_console_setup(struct console *con, char *options)
+{
+ struct uart_sunsab_port *up = &sunsab_ports[con->index];
+ unsigned long flags;
+ unsigned int baud, quot;
+
+ /*
+ * The console framework calls us for each and every port
+ * registered. Defer the console setup until the requested
+ * port has been properly discovered. A bit of a hack,
+ * though...
+ */
+ if (up->port.type != PORT_SUNSAB)
+ return -1;
+
+ printk("Console: ttyS%d (SAB82532)\n",
+ (sunsab_reg.minor - 64) + con->index);
+
+ sunserial_console_termios(con, up->port.dev->of_node);
+
+ switch (con->cflag & CBAUD) {
+ case B150: baud = 150; break;
+ case B300: baud = 300; break;
+ case B600: baud = 600; break;
+ case B1200: baud = 1200; break;
+ case B2400: baud = 2400; break;
+ case B4800: baud = 4800; break;
+ default: case B9600: baud = 9600; break;
+ case B19200: baud = 19200; break;
+ case B38400: baud = 38400; break;
+ case B57600: baud = 57600; break;
+ case B115200: baud = 115200; break;
+ case B230400: baud = 230400; break;
+ case B460800: baud = 460800; break;
+ };
+
+ /*
+ * Temporary fix.
+ */
+ spin_lock_init(&up->port.lock);
+
+ /*
+ * Initialize the hardware
+ */
+ sunsab_startup(&up->port);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * Finally, enable interrupts
+ */
+ up->interrupt_mask0 = SAB82532_IMR0_PERR | SAB82532_IMR0_FERR |
+ SAB82532_IMR0_PLLA | SAB82532_IMR0_CDSC;
+ writeb(up->interrupt_mask0, &up->regs->w.imr0);
+ up->interrupt_mask1 = SAB82532_IMR1_BRKT | SAB82532_IMR1_ALLS |
+ SAB82532_IMR1_XOFF | SAB82532_IMR1_TIN |
+ SAB82532_IMR1_CSC | SAB82532_IMR1_XON |
+ SAB82532_IMR1_XPR;
+ writeb(up->interrupt_mask1, &up->regs->w.imr1);
+
+ quot = uart_get_divisor(&up->port, baud);
+ sunsab_convert_to_sab(up, con->cflag, 0, baud, quot);
+ sunsab_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return 0;
+}
+
+static struct console sunsab_console = {
+ .name = "ttyS",
+ .write = sunsab_console_write,
+ .device = uart_console_device,
+ .setup = sunsab_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sunsab_reg,
+};
+
+static inline struct console *SUNSAB_CONSOLE(void)
+{
+ return &sunsab_console;
+}
+#else
+#define SUNSAB_CONSOLE() (NULL)
+#define sunsab_console_init() do { } while (0)
+#endif
+
+static int __devinit sunsab_init_one(struct uart_sunsab_port *up,
+ struct platform_device *op,
+ unsigned long offset,
+ int line)
+{
+ up->port.line = line;
+ up->port.dev = &op->dev;
+
+ up->port.mapbase = op->resource[0].start + offset;
+ up->port.membase = of_ioremap(&op->resource[0], offset,
+ sizeof(union sab82532_async_regs),
+ "sab");
+ if (!up->port.membase)
+ return -ENOMEM;
+ up->regs = (union sab82532_async_regs __iomem *) up->port.membase;
+
+ up->port.irq = op->archdata.irqs[0];
+
+ up->port.fifosize = SAB82532_XMIT_FIFO_SIZE;
+ up->port.iotype = UPIO_MEM;
+
+ writeb(SAB82532_IPC_IC_ACT_LOW, &up->regs->w.ipc);
+
+ up->port.ops = &sunsab_pops;
+ up->port.type = PORT_SUNSAB;
+ up->port.uartclk = SAB_BASE_BAUD;
+
+ up->type = readb(&up->regs->r.vstr) & 0x0f;
+ writeb(~((1 << 1) | (1 << 2) | (1 << 4)), &up->regs->w.pcr);
+ writeb(0xff, &up->regs->w.pim);
+ if ((up->port.line & 0x1) == 0) {
+ up->pvr_dsr_bit = (1 << 0);
+ up->pvr_dtr_bit = (1 << 1);
+ up->gis_shift = 2;
+ } else {
+ up->pvr_dsr_bit = (1 << 3);
+ up->pvr_dtr_bit = (1 << 2);
+ up->gis_shift = 0;
+ }
+ up->cached_pvr = (1 << 1) | (1 << 2) | (1 << 4);
+ writeb(up->cached_pvr, &up->regs->w.pvr);
+ up->cached_mode = readb(&up->regs->rw.mode);
+ up->cached_mode |= SAB82532_MODE_FRTS;
+ writeb(up->cached_mode, &up->regs->rw.mode);
+ up->cached_mode |= SAB82532_MODE_RTS;
+ writeb(up->cached_mode, &up->regs->rw.mode);
+
+ up->tec_timeout = SAB82532_MAX_TEC_TIMEOUT;
+ up->cec_timeout = SAB82532_MAX_CEC_TIMEOUT;
+
+ return 0;
+}
+
+static int __devinit sab_probe(struct platform_device *op)
+{
+ static int inst;
+ struct uart_sunsab_port *up;
+ int err;
+
+ up = &sunsab_ports[inst * 2];
+
+ err = sunsab_init_one(&up[0], op,
+ 0,
+ (inst * 2) + 0);
+ if (err)
+ goto out;
+
+ err = sunsab_init_one(&up[1], op,
+ sizeof(union sab82532_async_regs),
+ (inst * 2) + 1);
+ if (err)
+ goto out1;
+
+ sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node,
+ &sunsab_reg, up[0].port.line,
+ false);
+
+ sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node,
+ &sunsab_reg, up[1].port.line,
+ false);
+
+ err = uart_add_one_port(&sunsab_reg, &up[0].port);
+ if (err)
+ goto out2;
+
+ err = uart_add_one_port(&sunsab_reg, &up[1].port);
+ if (err)
+ goto out3;
+
+ dev_set_drvdata(&op->dev, &up[0]);
+
+ inst++;
+
+ return 0;
+
+out3:
+ uart_remove_one_port(&sunsab_reg, &up[0].port);
+out2:
+ of_iounmap(&op->resource[0],
+ up[1].port.membase,
+ sizeof(union sab82532_async_regs));
+out1:
+ of_iounmap(&op->resource[0],
+ up[0].port.membase,
+ sizeof(union sab82532_async_regs));
+out:
+ return err;
+}
+
+static int __devexit sab_remove(struct platform_device *op)
+{
+ struct uart_sunsab_port *up = dev_get_drvdata(&op->dev);
+
+ uart_remove_one_port(&sunsab_reg, &up[1].port);
+ uart_remove_one_port(&sunsab_reg, &up[0].port);
+ of_iounmap(&op->resource[0],
+ up[1].port.membase,
+ sizeof(union sab82532_async_regs));
+ of_iounmap(&op->resource[0],
+ up[0].port.membase,
+ sizeof(union sab82532_async_regs));
+
+ dev_set_drvdata(&op->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id sab_match[] = {
+ {
+ .name = "se",
+ },
+ {
+ .name = "serial",
+ .compatible = "sab82532",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sab_match);
+
+static struct platform_driver sab_driver = {
+ .driver = {
+ .name = "sab",
+ .owner = THIS_MODULE,
+ .of_match_table = sab_match,
+ },
+ .probe = sab_probe,
+ .remove = __devexit_p(sab_remove),
+};
+
+static int __init sunsab_init(void)
+{
+ struct device_node *dp;
+ int err;
+ int num_channels = 0;
+
+ for_each_node_by_name(dp, "se")
+ num_channels += 2;
+ for_each_node_by_name(dp, "serial") {
+ if (of_device_is_compatible(dp, "sab82532"))
+ num_channels += 2;
+ }
+
+ if (num_channels) {
+ sunsab_ports = kzalloc(sizeof(struct uart_sunsab_port) *
+ num_channels, GFP_KERNEL);
+ if (!sunsab_ports)
+ return -ENOMEM;
+
+ err = sunserial_register_minors(&sunsab_reg, num_channels);
+ if (err) {
+ kfree(sunsab_ports);
+ sunsab_ports = NULL;
+
+ return err;
+ }
+ }
+
+ return platform_driver_register(&sab_driver);
+}
+
+static void __exit sunsab_exit(void)
+{
+ platform_driver_unregister(&sab_driver);
+ if (sunsab_reg.nr) {
+ sunserial_unregister_minors(&sunsab_reg, sunsab_reg.nr);
+ }
+
+ kfree(sunsab_ports);
+ sunsab_ports = NULL;
+}
+
+module_init(sunsab_init);
+module_exit(sunsab_exit);
+
+MODULE_AUTHOR("Eddie C. Dost and David S. Miller");
+MODULE_DESCRIPTION("Sun SAB82532 serial port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/sunsab.h b/drivers/tty/serial/sunsab.h
new file mode 100644
index 00000000..b78e1f7b
--- /dev/null
+++ b/drivers/tty/serial/sunsab.h
@@ -0,0 +1,322 @@
+/* sunsab.h: Register Definitions for the Siemens SAB82532 DUSCC
+ *
+ * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
+ */
+
+#ifndef _SUNSAB_H
+#define _SUNSAB_H
+
+struct sab82532_async_rd_regs {
+ u8 rfifo[0x20]; /* Receive FIFO */
+ u8 star; /* Status Register */
+ u8 __pad1;
+ u8 mode; /* Mode Register */
+ u8 timr; /* Timer Register */
+ u8 xon; /* XON Character */
+ u8 xoff; /* XOFF Character */
+ u8 tcr; /* Termination Character Register */
+ u8 dafo; /* Data Format */
+ u8 rfc; /* RFIFO Control Register */
+ u8 __pad2;
+ u8 rbcl; /* Receive Byte Count Low */
+ u8 rbch; /* Receive Byte Count High */
+ u8 ccr0; /* Channel Configuration Register 0 */
+ u8 ccr1; /* Channel Configuration Register 1 */
+ u8 ccr2; /* Channel Configuration Register 2 */
+ u8 ccr3; /* Channel Configuration Register 3 */
+ u8 __pad3[4];
+ u8 vstr; /* Version Status Register */
+ u8 __pad4[3];
+ u8 gis; /* Global Interrupt Status */
+ u8 ipc; /* Interrupt Port Configuration */
+ u8 isr0; /* Interrupt Status 0 */
+ u8 isr1; /* Interrupt Status 1 */
+ u8 pvr; /* Port Value Register */
+ u8 pis; /* Port Interrupt Status */
+ u8 pcr; /* Port Configuration Register */
+ u8 ccr4; /* Channel Configuration Register 4 */
+};
+
+struct sab82532_async_wr_regs {
+ u8 xfifo[0x20]; /* Transmit FIFO */
+ u8 cmdr; /* Command Register */
+ u8 __pad1;
+ u8 mode;
+ u8 timr;
+ u8 xon;
+ u8 xoff;
+ u8 tcr;
+ u8 dafo;
+ u8 rfc;
+ u8 __pad2;
+ u8 xbcl; /* Transmit Byte Count Low */
+ u8 xbch; /* Transmit Byte Count High */
+ u8 ccr0;
+ u8 ccr1;
+ u8 ccr2;
+ u8 ccr3;
+ u8 tsax; /* Time-Slot Assignment Reg. Transmit */
+ u8 tsar; /* Time-Slot Assignment Reg. Receive */
+ u8 xccr; /* Transmit Channel Capacity Register */
+ u8 rccr; /* Receive Channel Capacity Register */
+ u8 bgr; /* Baud Rate Generator Register */
+ u8 tic; /* Transmit Immediate Character */
+ u8 mxn; /* Mask XON Character */
+ u8 mxf; /* Mask XOFF Character */
+ u8 iva; /* Interrupt Vector Address */
+ u8 ipc;
+ u8 imr0; /* Interrupt Mask Register 0 */
+ u8 imr1; /* Interrupt Mask Register 1 */
+ u8 pvr;
+ u8 pim; /* Port Interrupt Mask */
+ u8 pcr;
+ u8 ccr4;
+};
+
+struct sab82532_async_rw_regs { /* Read/Write registers */
+ u8 __pad1[0x20];
+ u8 __pad2;
+ u8 __pad3;
+ u8 mode;
+ u8 timr;
+ u8 xon;
+ u8 xoff;
+ u8 tcr;
+ u8 dafo;
+ u8 rfc;
+ u8 __pad4;
+ u8 __pad5;
+ u8 __pad6;
+ u8 ccr0;
+ u8 ccr1;
+ u8 ccr2;
+ u8 ccr3;
+ u8 __pad7;
+ u8 __pad8;
+ u8 __pad9;
+ u8 __pad10;
+ u8 __pad11;
+ u8 __pad12;
+ u8 __pad13;
+ u8 __pad14;
+ u8 __pad15;
+ u8 ipc;
+ u8 __pad16;
+ u8 __pad17;
+ u8 pvr;
+ u8 __pad18;
+ u8 pcr;
+ u8 ccr4;
+};
+
+union sab82532_async_regs {
+ __volatile__ struct sab82532_async_rd_regs r;
+ __volatile__ struct sab82532_async_wr_regs w;
+ __volatile__ struct sab82532_async_rw_regs rw;
+};
+
+union sab82532_irq_status {
+ unsigned short stat;
+ struct {
+ unsigned char isr0;
+ unsigned char isr1;
+ } sreg;
+};
+
+/* irqflags bits */
+#define SAB82532_ALLS 0x00000001
+#define SAB82532_XPR 0x00000002
+#define SAB82532_REGS_PENDING 0x00000004
+
+/* RFIFO Status Byte */
+#define SAB82532_RSTAT_PE 0x80
+#define SAB82532_RSTAT_FE 0x40
+#define SAB82532_RSTAT_PARITY 0x01
+
+/* Status Register (STAR) */
+#define SAB82532_STAR_XDOV 0x80
+#define SAB82532_STAR_XFW 0x40
+#define SAB82532_STAR_RFNE 0x20
+#define SAB82532_STAR_FCS 0x10
+#define SAB82532_STAR_TEC 0x08
+#define SAB82532_STAR_CEC 0x04
+#define SAB82532_STAR_CTS 0x02
+
+/* Command Register (CMDR) */
+#define SAB82532_CMDR_RMC 0x80
+#define SAB82532_CMDR_RRES 0x40
+#define SAB82532_CMDR_RFRD 0x20
+#define SAB82532_CMDR_STI 0x10
+#define SAB82532_CMDR_XF 0x08
+#define SAB82532_CMDR_XRES 0x01
+
+/* Mode Register (MODE) */
+#define SAB82532_MODE_FRTS 0x40
+#define SAB82532_MODE_FCTS 0x20
+#define SAB82532_MODE_FLON 0x10
+#define SAB82532_MODE_RAC 0x08
+#define SAB82532_MODE_RTS 0x04
+#define SAB82532_MODE_TRS 0x02
+#define SAB82532_MODE_TLP 0x01
+
+/* Timer Register (TIMR) */
+#define SAB82532_TIMR_CNT_MASK 0xe0
+#define SAB82532_TIMR_VALUE_MASK 0x1f
+
+/* Data Format (DAFO) */
+#define SAB82532_DAFO_XBRK 0x40
+#define SAB82532_DAFO_STOP 0x20
+#define SAB82532_DAFO_PAR_SPACE 0x00
+#define SAB82532_DAFO_PAR_ODD 0x08
+#define SAB82532_DAFO_PAR_EVEN 0x10
+#define SAB82532_DAFO_PAR_MARK 0x18
+#define SAB82532_DAFO_PARE 0x04
+#define SAB82532_DAFO_CHL8 0x00
+#define SAB82532_DAFO_CHL7 0x01
+#define SAB82532_DAFO_CHL6 0x02
+#define SAB82532_DAFO_CHL5 0x03
+
+/* RFIFO Control Register (RFC) */
+#define SAB82532_RFC_DPS 0x40
+#define SAB82532_RFC_DXS 0x20
+#define SAB82532_RFC_RFDF 0x10
+#define SAB82532_RFC_RFTH_1 0x00
+#define SAB82532_RFC_RFTH_4 0x04
+#define SAB82532_RFC_RFTH_16 0x08
+#define SAB82532_RFC_RFTH_32 0x0c
+#define SAB82532_RFC_TCDE 0x01
+
+/* Received Byte Count High (RBCH) */
+#define SAB82532_RBCH_DMA 0x80
+#define SAB82532_RBCH_CAS 0x20
+
+/* Transmit Byte Count High (XBCH) */
+#define SAB82532_XBCH_DMA 0x80
+#define SAB82532_XBCH_CAS 0x20
+#define SAB82532_XBCH_XC 0x10
+
+/* Channel Configuration Register 0 (CCR0) */
+#define SAB82532_CCR0_PU 0x80
+#define SAB82532_CCR0_MCE 0x40
+#define SAB82532_CCR0_SC_NRZ 0x00
+#define SAB82532_CCR0_SC_NRZI 0x08
+#define SAB82532_CCR0_SC_FM0 0x10
+#define SAB82532_CCR0_SC_FM1 0x14
+#define SAB82532_CCR0_SC_MANCH 0x18
+#define SAB82532_CCR0_SM_HDLC 0x00
+#define SAB82532_CCR0_SM_SDLC_LOOP 0x01
+#define SAB82532_CCR0_SM_BISYNC 0x02
+#define SAB82532_CCR0_SM_ASYNC 0x03
+
+/* Channel Configuration Register 1 (CCR1) */
+#define SAB82532_CCR1_ODS 0x10
+#define SAB82532_CCR1_BCR 0x08
+#define SAB82532_CCR1_CM_MASK 0x07
+
+/* Channel Configuration Register 2 (CCR2) */
+#define SAB82532_CCR2_SOC1 0x80
+#define SAB82532_CCR2_SOC0 0x40
+#define SAB82532_CCR2_BR9 0x80
+#define SAB82532_CCR2_BR8 0x40
+#define SAB82532_CCR2_BDF 0x20
+#define SAB82532_CCR2_SSEL 0x10
+#define SAB82532_CCR2_XCS0 0x20
+#define SAB82532_CCR2_RCS0 0x10
+#define SAB82532_CCR2_TOE 0x08
+#define SAB82532_CCR2_RWX 0x04
+#define SAB82532_CCR2_DIV 0x01
+
+/* Channel Configuration Register 3 (CCR3) */
+#define SAB82532_CCR3_PSD 0x01
+
+/* Time Slot Assignment Register Transmit (TSAX) */
+#define SAB82532_TSAX_TSNX_MASK 0xfc
+#define SAB82532_TSAX_XCS2 0x02 /* see also CCR2 */
+#define SAB82532_TSAX_XCS1 0x01
+
+/* Time Slot Assignment Register Receive (TSAR) */
+#define SAB82532_TSAR_TSNR_MASK 0xfc
+#define SAB82532_TSAR_RCS2 0x02 /* see also CCR2 */
+#define SAB82532_TSAR_RCS1 0x01
+
+/* Version Status Register (VSTR) */
+#define SAB82532_VSTR_CD 0x80
+#define SAB82532_VSTR_DPLA 0x40
+#define SAB82532_VSTR_VN_MASK 0x0f
+#define SAB82532_VSTR_VN_1 0x00
+#define SAB82532_VSTR_VN_2 0x01
+#define SAB82532_VSTR_VN_3_2 0x02
+
+/* Global Interrupt Status Register (GIS) */
+#define SAB82532_GIS_PI 0x80
+#define SAB82532_GIS_ISA1 0x08
+#define SAB82532_GIS_ISA0 0x04
+#define SAB82532_GIS_ISB1 0x02
+#define SAB82532_GIS_ISB0 0x01
+
+/* Interrupt Vector Address (IVA) */
+#define SAB82532_IVA_MASK 0xf1
+
+/* Interrupt Port Configuration (IPC) */
+#define SAB82532_IPC_VIS 0x80
+#define SAB82532_IPC_SLA1 0x10
+#define SAB82532_IPC_SLA0 0x08
+#define SAB82532_IPC_CASM 0x04
+#define SAB82532_IPC_IC_OPEN_DRAIN 0x00
+#define SAB82532_IPC_IC_ACT_LOW 0x01
+#define SAB82532_IPC_IC_ACT_HIGH 0x03
+
+/* Interrupt Status Register 0 (ISR0) */
+#define SAB82532_ISR0_TCD 0x80
+#define SAB82532_ISR0_TIME 0x40
+#define SAB82532_ISR0_PERR 0x20
+#define SAB82532_ISR0_FERR 0x10
+#define SAB82532_ISR0_PLLA 0x08
+#define SAB82532_ISR0_CDSC 0x04
+#define SAB82532_ISR0_RFO 0x02
+#define SAB82532_ISR0_RPF 0x01
+
+/* Interrupt Status Register 1 (ISR1) */
+#define SAB82532_ISR1_BRK 0x80
+#define SAB82532_ISR1_BRKT 0x40
+#define SAB82532_ISR1_ALLS 0x20
+#define SAB82532_ISR1_XOFF 0x10
+#define SAB82532_ISR1_TIN 0x08
+#define SAB82532_ISR1_CSC 0x04
+#define SAB82532_ISR1_XON 0x02
+#define SAB82532_ISR1_XPR 0x01
+
+/* Interrupt Mask Register 0 (IMR0) */
+#define SAB82532_IMR0_TCD 0x80
+#define SAB82532_IMR0_TIME 0x40
+#define SAB82532_IMR0_PERR 0x20
+#define SAB82532_IMR0_FERR 0x10
+#define SAB82532_IMR0_PLLA 0x08
+#define SAB82532_IMR0_CDSC 0x04
+#define SAB82532_IMR0_RFO 0x02
+#define SAB82532_IMR0_RPF 0x01
+
+/* Interrupt Mask Register 1 (IMR1) */
+#define SAB82532_IMR1_BRK 0x80
+#define SAB82532_IMR1_BRKT 0x40
+#define SAB82532_IMR1_ALLS 0x20
+#define SAB82532_IMR1_XOFF 0x10
+#define SAB82532_IMR1_TIN 0x08
+#define SAB82532_IMR1_CSC 0x04
+#define SAB82532_IMR1_XON 0x02
+#define SAB82532_IMR1_XPR 0x01
+
+/* Port Interrupt Status Register (PIS) */
+#define SAB82532_PIS_SYNC_B 0x08
+#define SAB82532_PIS_DTR_B 0x04
+#define SAB82532_PIS_DTR_A 0x02
+#define SAB82532_PIS_SYNC_A 0x01
+
+/* Channel Configuration Register 4 (CCR4) */
+#define SAB82532_CCR4_MCK4 0x80
+#define SAB82532_CCR4_EBRG 0x40
+#define SAB82532_CCR4_TST1 0x20
+#define SAB82532_CCR4_ICD 0x10
+
+
+#endif /* !(_SUNSAB_H) */
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
new file mode 100644
index 00000000..92aa5455
--- /dev/null
+++ b/drivers/tty/serial/sunsu.c
@@ -0,0 +1,1608 @@
+/*
+ * su.c: Small serial driver for keyboard/mouse interface on sparc32/PCI
+ *
+ * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1998-1999 Pete Zaitcev (zaitcev@yahoo.com)
+ *
+ * This is mainly a variation of 8250.c, credits go to authors mentioned
+ * therein. In fact this driver should be merged into the generic 8250.c
+ * infrastructure perhaps using a 8250_sparc.c module.
+ *
+ * Fixed to use tty_get_baud_rate().
+ * Theodore Ts'o <tytso@mit.edu>, 2001-Oct-12
+ *
+ * Converted to new 2.5.x UART layer.
+ * David S. Miller (davem@davemloft.net), 2002-Jul-29
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/circ_buf.h>
+#include <linux/serial.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/slab.h>
+#ifdef CONFIG_SERIO
+#include <linux/serio.h>
+#endif
+#include <linux/serial_reg.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+
+#if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+#include "suncore.h"
+
+/* We are on a NS PC87303 clocked with 24.0 MHz, which results
+ * in a UART clock of 1.8462 MHz.
+ */
+#define SU_BASE_BAUD (1846200 / 16)
+
+enum su_type { SU_PORT_NONE, SU_PORT_MS, SU_PORT_KBD, SU_PORT_PORT };
+static char *su_typev[] = { "su(???)", "su(mouse)", "su(kbd)", "su(serial)" };
+
+/*
+ * Here we define the default xmit fifo size used for each type of UART.
+ */
+static const struct serial_uart_config uart_config[PORT_MAX_8250+1] = {
+ { "unknown", 1, 0 },
+ { "8250", 1, 0 },
+ { "16450", 1, 0 },
+ { "16550", 1, 0 },
+ { "16550A", 16, UART_CLEAR_FIFO | UART_USE_FIFO },
+ { "Cirrus", 1, 0 },
+ { "ST16650", 1, UART_CLEAR_FIFO | UART_STARTECH },
+ { "ST16650V2", 32, UART_CLEAR_FIFO | UART_USE_FIFO | UART_STARTECH },
+ { "TI16750", 64, UART_CLEAR_FIFO | UART_USE_FIFO },
+ { "Startech", 1, 0 },
+ { "16C950/954", 128, UART_CLEAR_FIFO | UART_USE_FIFO },
+ { "ST16654", 64, UART_CLEAR_FIFO | UART_USE_FIFO | UART_STARTECH },
+ { "XR16850", 128, UART_CLEAR_FIFO | UART_USE_FIFO | UART_STARTECH },
+ { "RSA", 2048, UART_CLEAR_FIFO | UART_USE_FIFO }
+};
+
+struct uart_sunsu_port {
+ struct uart_port port;
+ unsigned char acr;
+ unsigned char ier;
+ unsigned short rev;
+ unsigned char lcr;
+ unsigned int lsr_break_flag;
+ unsigned int cflag;
+
+ /* Probing information. */
+ enum su_type su_type;
+ unsigned int type_probed; /* XXX Stupid */
+ unsigned long reg_size;
+
+#ifdef CONFIG_SERIO
+ struct serio serio;
+ int serio_open;
+#endif
+};
+
+static unsigned int serial_in(struct uart_sunsu_port *up, int offset)
+{
+ offset <<= up->port.regshift;
+
+ switch (up->port.iotype) {
+ case UPIO_HUB6:
+ outb(up->port.hub6 - 1 + offset, up->port.iobase);
+ return inb(up->port.iobase + 1);
+
+ case UPIO_MEM:
+ return readb(up->port.membase + offset);
+
+ default:
+ return inb(up->port.iobase + offset);
+ }
+}
+
+static void serial_out(struct uart_sunsu_port *up, int offset, int value)
+{
+#ifndef CONFIG_SPARC64
+ /*
+ * MrCoffee has weird schematics: IRQ4 & P10(?) pins of SuperIO are
+ * connected with a gate then go to SlavIO. When IRQ4 goes tristated
+ * gate outputs a logical one. Since we use level triggered interrupts
+ * we have lockup and watchdog reset. We cannot mask IRQ because
+ * keyboard shares IRQ with us (Word has it as Bob Smelik's design).
+ * This problem is similar to what Alpha people suffer, see serial.c.
+ */
+ if (offset == UART_MCR)
+ value |= UART_MCR_OUT2;
+#endif
+ offset <<= up->port.regshift;
+
+ switch (up->port.iotype) {
+ case UPIO_HUB6:
+ outb(up->port.hub6 - 1 + offset, up->port.iobase);
+ outb(value, up->port.iobase + 1);
+ break;
+
+ case UPIO_MEM:
+ writeb(value, up->port.membase + offset);
+ break;
+
+ default:
+ outb(value, up->port.iobase + offset);
+ }
+}
+
+/*
+ * We used to support using pause I/O for certain machines. We
+ * haven't supported this for a while, but just in case it's badly
+ * needed for certain old 386 machines, I've left these #define's
+ * in....
+ */
+#define serial_inp(up, offset) serial_in(up, offset)
+#define serial_outp(up, offset, value) serial_out(up, offset, value)
+
+
+/*
+ * For the 16C950
+ */
+static void serial_icr_write(struct uart_sunsu_port *up, int offset, int value)
+{
+ serial_out(up, UART_SCR, offset);
+ serial_out(up, UART_ICR, value);
+}
+
+#if 0 /* Unused currently */
+static unsigned int serial_icr_read(struct uart_sunsu_port *up, int offset)
+{
+ unsigned int value;
+
+ serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
+ serial_out(up, UART_SCR, offset);
+ value = serial_in(up, UART_ICR);
+ serial_icr_write(up, UART_ACR, up->acr);
+
+ return value;
+}
+#endif
+
+#ifdef CONFIG_SERIAL_8250_RSA
+/*
+ * Attempts to turn on the RSA FIFO. Returns zero on failure.
+ * We set the port uart clock rate if we succeed.
+ */
+static int __enable_rsa(struct uart_sunsu_port *up)
+{
+ unsigned char mode;
+ int result;
+
+ mode = serial_inp(up, UART_RSA_MSR);
+ result = mode & UART_RSA_MSR_FIFO;
+
+ if (!result) {
+ serial_outp(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO);
+ mode = serial_inp(up, UART_RSA_MSR);
+ result = mode & UART_RSA_MSR_FIFO;
+ }
+
+ if (result)
+ up->port.uartclk = SERIAL_RSA_BAUD_BASE * 16;
+
+ return result;
+}
+
+static void enable_rsa(struct uart_sunsu_port *up)
+{
+ if (up->port.type == PORT_RSA) {
+ if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
+ spin_lock_irq(&up->port.lock);
+ __enable_rsa(up);
+ spin_unlock_irq(&up->port.lock);
+ }
+ if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
+ serial_outp(up, UART_RSA_FRR, 0);
+ }
+}
+
+/*
+ * Attempts to turn off the RSA FIFO. Returns zero on failure.
+ * It is unknown why interrupts were disabled in here. However,
+ * the caller is expected to preserve this behaviour by grabbing
+ * the spinlock before calling this function.
+ */
+static void disable_rsa(struct uart_sunsu_port *up)
+{
+ unsigned char mode;
+ int result;
+
+ if (up->port.type == PORT_RSA &&
+ up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
+ spin_lock_irq(&up->port.lock);
+
+ mode = serial_inp(up, UART_RSA_MSR);
+ result = !(mode & UART_RSA_MSR_FIFO);
+
+ if (!result) {
+ serial_outp(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
+ mode = serial_inp(up, UART_RSA_MSR);
+ result = !(mode & UART_RSA_MSR_FIFO);
+ }
+
+ if (result)
+ up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
+ spin_unlock_irq(&up->port.lock);
+ }
+}
+#endif /* CONFIG_SERIAL_8250_RSA */
+
+static inline void __stop_tx(struct uart_sunsu_port *p)
+{
+ if (p->ier & UART_IER_THRI) {
+ p->ier &= ~UART_IER_THRI;
+ serial_out(p, UART_IER, p->ier);
+ }
+}
+
+static void sunsu_stop_tx(struct uart_port *port)
+{
+ struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+
+ __stop_tx(up);
+
+ /*
+ * We really want to stop the transmitter from sending.
+ */
+ if (up->port.type == PORT_16C950) {
+ up->acr |= UART_ACR_TXDIS;
+ serial_icr_write(up, UART_ACR, up->acr);
+ }
+}
+
+static void sunsu_start_tx(struct uart_port *port)
+{
+ struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+
+ if (!(up->ier & UART_IER_THRI)) {
+ up->ier |= UART_IER_THRI;
+ serial_out(up, UART_IER, up->ier);
+ }
+
+ /*
+ * Re-enable the transmitter if we disabled it.
+ */
+ if (up->port.type == PORT_16C950 && up->acr & UART_ACR_TXDIS) {
+ up->acr &= ~UART_ACR_TXDIS;
+ serial_icr_write(up, UART_ACR, up->acr);
+ }
+}
+
+static void sunsu_stop_rx(struct uart_port *port)
+{
+ struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+
+ up->ier &= ~UART_IER_RLSI;
+ up->port.read_status_mask &= ~UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+}
+
+static void sunsu_enable_ms(struct uart_port *port)
+{
+ struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ up->ier |= UART_IER_MSI;
+ serial_out(up, UART_IER, up->ier);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static struct tty_struct *
+receive_chars(struct uart_sunsu_port *up, unsigned char *status)
+{
+ struct tty_struct *tty = up->port.state->port.tty;
+ unsigned char ch, flag;
+ int max_count = 256;
+ int saw_console_brk = 0;
+
+ do {
+ ch = serial_inp(up, UART_RX);
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+ if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
+ UART_LSR_FE | UART_LSR_OE))) {
+ /*
+ * For statistics only
+ */
+ if (*status & UART_LSR_BI) {
+ *status &= ~(UART_LSR_FE | UART_LSR_PE);
+ up->port.icount.brk++;
+ if (up->port.cons != NULL &&
+ up->port.line == up->port.cons->index)
+ saw_console_brk = 1;
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(&up->port))
+ goto ignore_char;
+ } else if (*status & UART_LSR_PE)
+ up->port.icount.parity++;
+ else if (*status & UART_LSR_FE)
+ up->port.icount.frame++;
+ if (*status & UART_LSR_OE)
+ up->port.icount.overrun++;
+
+ /*
+ * Mask off conditions which should be ingored.
+ */
+ *status &= up->port.read_status_mask;
+
+ if (up->port.cons != NULL &&
+ up->port.line == up->port.cons->index) {
+ /* Recover the break flag from console xmit */
+ *status |= up->lsr_break_flag;
+ up->lsr_break_flag = 0;
+ }
+
+ if (*status & UART_LSR_BI) {
+ flag = TTY_BREAK;
+ } else if (*status & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (*status & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+ if (uart_handle_sysrq_char(&up->port, ch))
+ goto ignore_char;
+ if ((*status & up->port.ignore_status_mask) == 0)
+ tty_insert_flip_char(tty, ch, flag);
+ if (*status & UART_LSR_OE)
+ /*
+ * Overrun is special, since it's reported
+ * immediately, and doesn't affect the current
+ * character.
+ */
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ ignore_char:
+ *status = serial_inp(up, UART_LSR);
+ } while ((*status & UART_LSR_DR) && (max_count-- > 0));
+
+ if (saw_console_brk)
+ sun_do_break();
+
+ return tty;
+}
+
+static void transmit_chars(struct uart_sunsu_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int count;
+
+ if (up->port.x_char) {
+ serial_outp(up, UART_TX, up->port.x_char);
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ return;
+ }
+ if (uart_tx_stopped(&up->port)) {
+ sunsu_stop_tx(&up->port);
+ return;
+ }
+ if (uart_circ_empty(xmit)) {
+ __stop_tx(up);
+ return;
+ }
+
+ count = up->port.fifosize;
+ do {
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (uart_circ_empty(xmit))
+ __stop_tx(up);
+}
+
+static void check_modem_status(struct uart_sunsu_port *up)
+{
+ int status;
+
+ status = serial_in(up, UART_MSR);
+
+ if ((status & UART_MSR_ANY_DELTA) == 0)
+ return;
+
+ if (status & UART_MSR_TERI)
+ up->port.icount.rng++;
+ if (status & UART_MSR_DDSR)
+ up->port.icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
+ if (status & UART_MSR_DCTS)
+ uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
+
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+}
+
+static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id)
+{
+ struct uart_sunsu_port *up = dev_id;
+ unsigned long flags;
+ unsigned char status;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ do {
+ struct tty_struct *tty;
+
+ status = serial_inp(up, UART_LSR);
+ tty = NULL;
+ if (status & UART_LSR_DR)
+ tty = receive_chars(up, &status);
+ check_modem_status(up);
+ if (status & UART_LSR_THRE)
+ transmit_chars(up);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ } while (!(serial_in(up, UART_IIR) & UART_IIR_NO_INT));
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/* Separate interrupt handling path for keyboard/mouse ports. */
+
+static void
+sunsu_change_speed(struct uart_port *port, unsigned int cflag,
+ unsigned int iflag, unsigned int quot);
+
+static void sunsu_change_mouse_baud(struct uart_sunsu_port *up)
+{
+ unsigned int cur_cflag = up->cflag;
+ int quot, new_baud;
+
+ up->cflag &= ~CBAUD;
+ up->cflag |= suncore_mouse_baud_cflag_next(cur_cflag, &new_baud);
+
+ quot = up->port.uartclk / (16 * new_baud);
+
+ sunsu_change_speed(&up->port, up->cflag, 0, quot);
+}
+
+static void receive_kbd_ms_chars(struct uart_sunsu_port *up, int is_break)
+{
+ do {
+ unsigned char ch = serial_inp(up, UART_RX);
+
+ /* Stop-A is handled by drivers/char/keyboard.c now. */
+ if (up->su_type == SU_PORT_KBD) {
+#ifdef CONFIG_SERIO
+ serio_interrupt(&up->serio, ch, 0);
+#endif
+ } else if (up->su_type == SU_PORT_MS) {
+ int ret = suncore_mouse_baud_detection(ch, is_break);
+
+ switch (ret) {
+ case 2:
+ sunsu_change_mouse_baud(up);
+ /* fallthru */
+ case 1:
+ break;
+
+ case 0:
+#ifdef CONFIG_SERIO
+ serio_interrupt(&up->serio, ch, 0);
+#endif
+ break;
+ };
+ }
+ } while (serial_in(up, UART_LSR) & UART_LSR_DR);
+}
+
+static irqreturn_t sunsu_kbd_ms_interrupt(int irq, void *dev_id)
+{
+ struct uart_sunsu_port *up = dev_id;
+
+ if (!(serial_in(up, UART_IIR) & UART_IIR_NO_INT)) {
+ unsigned char status = serial_inp(up, UART_LSR);
+
+ if ((status & UART_LSR_DR) || (status & UART_LSR_BI))
+ receive_kbd_ms_chars(up, (status & UART_LSR_BI) != 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int sunsu_tx_empty(struct uart_port *port)
+{
+ struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return ret;
+}
+
+static unsigned int sunsu_get_mctrl(struct uart_port *port)
+{
+ struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ unsigned char status;
+ unsigned int ret;
+
+ status = serial_in(up, UART_MSR);
+
+ ret = 0;
+ if (status & UART_MSR_DCD)
+ ret |= TIOCM_CAR;
+ if (status & UART_MSR_RI)
+ ret |= TIOCM_RNG;
+ if (status & UART_MSR_DSR)
+ ret |= TIOCM_DSR;
+ if (status & UART_MSR_CTS)
+ ret |= TIOCM_CTS;
+ return ret;
+}
+
+static void sunsu_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ unsigned char mcr = 0;
+
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ serial_out(up, UART_MCR, mcr);
+}
+
+static void sunsu_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_out(up, UART_LCR, up->lcr);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static int sunsu_startup(struct uart_port *port)
+{
+ struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ unsigned long flags;
+ int retval;
+
+ if (up->port.type == PORT_16C950) {
+ /* Wake up and initialize UART */
+ up->acr = 0;
+ serial_outp(up, UART_LCR, 0xBF);
+ serial_outp(up, UART_EFR, UART_EFR_ECB);
+ serial_outp(up, UART_IER, 0);
+ serial_outp(up, UART_LCR, 0);
+ serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
+ serial_outp(up, UART_LCR, 0xBF);
+ serial_outp(up, UART_EFR, UART_EFR_ECB);
+ serial_outp(up, UART_LCR, 0);
+ }
+
+#ifdef CONFIG_SERIAL_8250_RSA
+ /*
+ * If this is an RSA port, see if we can kick it up to the
+ * higher speed clock.
+ */
+ enable_rsa(up);
+#endif
+
+ /*
+ * Clear the FIFO buffers and disable them.
+ * (they will be reenabled in set_termios())
+ */
+ if (uart_config[up->port.type].flags & UART_CLEAR_FIFO) {
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ serial_outp(up, UART_FCR, 0);
+ }
+
+ /*
+ * Clear the interrupt registers.
+ */
+ (void) serial_inp(up, UART_LSR);
+ (void) serial_inp(up, UART_RX);
+ (void) serial_inp(up, UART_IIR);
+ (void) serial_inp(up, UART_MSR);
+
+ /*
+ * At this point, there's no way the LSR could still be 0xff;
+ * if it is, then bail out, because there's likely no UART
+ * here.
+ */
+ if (!(up->port.flags & UPF_BUGGY_UART) &&
+ (serial_inp(up, UART_LSR) == 0xff)) {
+ printk("ttyS%d: LSR safety check engaged!\n", up->port.line);
+ return -ENODEV;
+ }
+
+ if (up->su_type != SU_PORT_PORT) {
+ retval = request_irq(up->port.irq, sunsu_kbd_ms_interrupt,
+ IRQF_SHARED, su_typev[up->su_type], up);
+ } else {
+ retval = request_irq(up->port.irq, sunsu_serial_interrupt,
+ IRQF_SHARED, su_typev[up->su_type], up);
+ }
+ if (retval) {
+ printk("su: Cannot register IRQ %d\n", up->port.irq);
+ return retval;
+ }
+
+ /*
+ * Now, initialize the UART
+ */
+ serial_outp(up, UART_LCR, UART_LCR_WLEN8);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ up->port.mctrl |= TIOCM_OUT2;
+
+ sunsu_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /*
+ * Finally, enable interrupts. Note: Modem status interrupts
+ * are set via set_termios(), which will be occurring imminently
+ * anyway, so we don't enable them here.
+ */
+ up->ier = UART_IER_RLSI | UART_IER_RDI;
+ serial_outp(up, UART_IER, up->ier);
+
+ if (up->port.flags & UPF_FOURPORT) {
+ unsigned int icp;
+ /*
+ * Enable interrupts on the AST Fourport board
+ */
+ icp = (up->port.iobase & 0xfe0) | 0x01f;
+ outb_p(0x80, icp);
+ (void) inb_p(icp);
+ }
+
+ /*
+ * And clear the interrupt registers again for luck.
+ */
+ (void) serial_inp(up, UART_LSR);
+ (void) serial_inp(up, UART_RX);
+ (void) serial_inp(up, UART_IIR);
+ (void) serial_inp(up, UART_MSR);
+
+ return 0;
+}
+
+static void sunsu_shutdown(struct uart_port *port)
+{
+ struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ unsigned long flags;
+
+ /*
+ * Disable interrupts from this port
+ */
+ up->ier = 0;
+ serial_outp(up, UART_IER, 0);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (up->port.flags & UPF_FOURPORT) {
+ /* reset interrupts on the AST Fourport board */
+ inb((up->port.iobase & 0xfe0) | 0x1f);
+ up->port.mctrl |= TIOCM_OUT1;
+ } else
+ up->port.mctrl &= ~TIOCM_OUT2;
+
+ sunsu_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ /*
+ * Disable break condition and FIFOs
+ */
+ serial_out(up, UART_LCR, serial_inp(up, UART_LCR) & ~UART_LCR_SBC);
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT);
+ serial_outp(up, UART_FCR, 0);
+
+#ifdef CONFIG_SERIAL_8250_RSA
+ /*
+ * Reset the RSA board back to 115kbps compat mode.
+ */
+ disable_rsa(up);
+#endif
+
+ /*
+ * Read data port to reset things.
+ */
+ (void) serial_in(up, UART_RX);
+
+ free_irq(up->port.irq, up);
+}
+
+static void
+sunsu_change_speed(struct uart_port *port, unsigned int cflag,
+ unsigned int iflag, unsigned int quot)
+{
+ struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+ unsigned char cval, fcr = 0;
+ unsigned long flags;
+
+ switch (cflag & CSIZE) {
+ case CS5:
+ cval = 0x00;
+ break;
+ case CS6:
+ cval = 0x01;
+ break;
+ case CS7:
+ cval = 0x02;
+ break;
+ default:
+ case CS8:
+ cval = 0x03;
+ break;
+ }
+
+ if (cflag & CSTOPB)
+ cval |= 0x04;
+ if (cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+#ifdef CMSPAR
+ if (cflag & CMSPAR)
+ cval |= UART_LCR_SPAR;
+#endif
+
+ /*
+ * Work around a bug in the Oxford Semiconductor 952 rev B
+ * chip which causes it to seriously miscalculate baud rates
+ * when DLL is 0.
+ */
+ if ((quot & 0xff) == 0 && up->port.type == PORT_16C950 &&
+ up->rev == 0x5201)
+ quot ++;
+
+ if (uart_config[up->port.type].flags & UART_USE_FIFO) {
+ if ((up->port.uartclk / quot) < (2400 * 16))
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
+#ifdef CONFIG_SERIAL_8250_RSA
+ else if (up->port.type == PORT_RSA)
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_14;
+#endif
+ else
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_8;
+ }
+ if (up->port.type == PORT_16750)
+ fcr |= UART_FCR7_64BYTE;
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, cflag, (port->uartclk / (16 * quot)));
+
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /*
+ * Characteres to ignore
+ */
+ up->port.ignore_status_mask = 0;
+ if (iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+ if (iflag & IGNBRK) {
+ up->port.ignore_status_mask |= UART_LSR_BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (iflag & IGNPAR)
+ up->port.ignore_status_mask |= UART_LSR_OE;
+ }
+
+ /*
+ * ignore all characters if CREAD is not set
+ */
+ if ((cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= UART_LSR_DR;
+
+ /*
+ * CTS flow control flag and modem status interrupts
+ */
+ up->ier &= ~UART_IER_MSI;
+ if (UART_ENABLE_MS(&up->port, cflag))
+ up->ier |= UART_IER_MSI;
+
+ serial_out(up, UART_IER, up->ier);
+
+ if (uart_config[up->port.type].flags & UART_STARTECH) {
+ serial_outp(up, UART_LCR, 0xBF);
+ serial_outp(up, UART_EFR, cflag & CRTSCTS ? UART_EFR_CTS :0);
+ }
+ serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
+ serial_outp(up, UART_DLL, quot & 0xff); /* LS of divisor */
+ serial_outp(up, UART_DLM, quot >> 8); /* MS of divisor */
+ if (up->port.type == PORT_16750)
+ serial_outp(up, UART_FCR, fcr); /* set fcr */
+ serial_outp(up, UART_LCR, cval); /* reset DLAB */
+ up->lcr = cval; /* Save LCR */
+ if (up->port.type != PORT_16750) {
+ if (fcr & UART_FCR_ENABLE_FIFO) {
+ /* emulated UARTs (Lucent Venus 167x) need two steps */
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ }
+ serial_outp(up, UART_FCR, fcr); /* set fcr */
+ }
+
+ up->cflag = cflag;
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static void
+sunsu_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud, quot;
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = uart_get_divisor(port, baud);
+
+ sunsu_change_speed(port, termios->c_cflag, termios->c_iflag, quot);
+}
+
+static void sunsu_release_port(struct uart_port *port)
+{
+}
+
+static int sunsu_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void sunsu_config_port(struct uart_port *port, int flags)
+{
+ struct uart_sunsu_port *up = (struct uart_sunsu_port *) port;
+
+ if (flags & UART_CONFIG_TYPE) {
+ /*
+ * We are supposed to call autoconfig here, but this requires
+ * splitting all the OBP probing crap from the UART probing.
+ * We'll do it when we kill sunsu.c altogether.
+ */
+ port->type = up->type_probed; /* XXX */
+ }
+}
+
+static int
+sunsu_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ return -EINVAL;
+}
+
+static const char *
+sunsu_type(struct uart_port *port)
+{
+ int type = port->type;
+
+ if (type >= ARRAY_SIZE(uart_config))
+ type = 0;
+ return uart_config[type].name;
+}
+
+static struct uart_ops sunsu_pops = {
+ .tx_empty = sunsu_tx_empty,
+ .set_mctrl = sunsu_set_mctrl,
+ .get_mctrl = sunsu_get_mctrl,
+ .stop_tx = sunsu_stop_tx,
+ .start_tx = sunsu_start_tx,
+ .stop_rx = sunsu_stop_rx,
+ .enable_ms = sunsu_enable_ms,
+ .break_ctl = sunsu_break_ctl,
+ .startup = sunsu_startup,
+ .shutdown = sunsu_shutdown,
+ .set_termios = sunsu_set_termios,
+ .type = sunsu_type,
+ .release_port = sunsu_release_port,
+ .request_port = sunsu_request_port,
+ .config_port = sunsu_config_port,
+ .verify_port = sunsu_verify_port,
+};
+
+#define UART_NR 4
+
+static struct uart_sunsu_port sunsu_ports[UART_NR];
+
+#ifdef CONFIG_SERIO
+
+static DEFINE_SPINLOCK(sunsu_serio_lock);
+
+static int sunsu_serio_write(struct serio *serio, unsigned char ch)
+{
+ struct uart_sunsu_port *up = serio->port_data;
+ unsigned long flags;
+ int lsr;
+
+ spin_lock_irqsave(&sunsu_serio_lock, flags);
+
+ do {
+ lsr = serial_in(up, UART_LSR);
+ } while (!(lsr & UART_LSR_THRE));
+
+ /* Send the character out. */
+ serial_out(up, UART_TX, ch);
+
+ spin_unlock_irqrestore(&sunsu_serio_lock, flags);
+
+ return 0;
+}
+
+static int sunsu_serio_open(struct serio *serio)
+{
+ struct uart_sunsu_port *up = serio->port_data;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&sunsu_serio_lock, flags);
+ if (!up->serio_open) {
+ up->serio_open = 1;
+ ret = 0;
+ } else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&sunsu_serio_lock, flags);
+
+ return ret;
+}
+
+static void sunsu_serio_close(struct serio *serio)
+{
+ struct uart_sunsu_port *up = serio->port_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sunsu_serio_lock, flags);
+ up->serio_open = 0;
+ spin_unlock_irqrestore(&sunsu_serio_lock, flags);
+}
+
+#endif /* CONFIG_SERIO */
+
+static void sunsu_autoconfig(struct uart_sunsu_port *up)
+{
+ unsigned char status1, status2, scratch, scratch2, scratch3;
+ unsigned char save_lcr, save_mcr;
+ unsigned long flags;
+
+ if (up->su_type == SU_PORT_NONE)
+ return;
+
+ up->type_probed = PORT_UNKNOWN;
+ up->port.iotype = UPIO_MEM;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ if (!(up->port.flags & UPF_BUGGY_UART)) {
+ /*
+ * Do a simple existence test first; if we fail this, there's
+ * no point trying anything else.
+ *
+ * 0x80 is used as a nonsense port to prevent against false
+ * positives due to ISA bus float. The assumption is that
+ * 0x80 is a non-existent port; which should be safe since
+ * include/asm/io.h also makes this assumption.
+ */
+ scratch = serial_inp(up, UART_IER);
+ serial_outp(up, UART_IER, 0);
+#ifdef __i386__
+ outb(0xff, 0x080);
+#endif
+ scratch2 = serial_inp(up, UART_IER);
+ serial_outp(up, UART_IER, 0x0f);
+#ifdef __i386__
+ outb(0, 0x080);
+#endif
+ scratch3 = serial_inp(up, UART_IER);
+ serial_outp(up, UART_IER, scratch);
+ if (scratch2 != 0 || scratch3 != 0x0F)
+ goto out; /* We failed; there's nothing here */
+ }
+
+ save_mcr = serial_in(up, UART_MCR);
+ save_lcr = serial_in(up, UART_LCR);
+
+ /*
+ * Check to see if a UART is really there. Certain broken
+ * internal modems based on the Rockwell chipset fail this
+ * test, because they apparently don't implement the loopback
+ * test mode. So this test is skipped on the COM 1 through
+ * COM 4 ports. This *should* be safe, since no board
+ * manufacturer would be stupid enough to design a board
+ * that conflicts with COM 1-4 --- we hope!
+ */
+ if (!(up->port.flags & UPF_SKIP_TEST)) {
+ serial_outp(up, UART_MCR, UART_MCR_LOOP | 0x0A);
+ status1 = serial_inp(up, UART_MSR) & 0xF0;
+ serial_outp(up, UART_MCR, save_mcr);
+ if (status1 != 0x90)
+ goto out; /* We failed loopback test */
+ }
+ serial_outp(up, UART_LCR, 0xBF); /* set up for StarTech test */
+ serial_outp(up, UART_EFR, 0); /* EFR is the same as FCR */
+ serial_outp(up, UART_LCR, 0);
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ scratch = serial_in(up, UART_IIR) >> 6;
+ switch (scratch) {
+ case 0:
+ up->port.type = PORT_16450;
+ break;
+ case 1:
+ up->port.type = PORT_UNKNOWN;
+ break;
+ case 2:
+ up->port.type = PORT_16550;
+ break;
+ case 3:
+ up->port.type = PORT_16550A;
+ break;
+ }
+ if (up->port.type == PORT_16550A) {
+ /* Check for Startech UART's */
+ serial_outp(up, UART_LCR, UART_LCR_DLAB);
+ if (serial_in(up, UART_EFR) == 0) {
+ up->port.type = PORT_16650;
+ } else {
+ serial_outp(up, UART_LCR, 0xBF);
+ if (serial_in(up, UART_EFR) == 0)
+ up->port.type = PORT_16650V2;
+ }
+ }
+ if (up->port.type == PORT_16550A) {
+ /* Check for TI 16750 */
+ serial_outp(up, UART_LCR, save_lcr | UART_LCR_DLAB);
+ serial_outp(up, UART_FCR,
+ UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
+ scratch = serial_in(up, UART_IIR) >> 5;
+ if (scratch == 7) {
+ /*
+ * If this is a 16750, and not a cheap UART
+ * clone, then it should only go into 64 byte
+ * mode if the UART_FCR7_64BYTE bit was set
+ * while UART_LCR_DLAB was latched.
+ */
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_outp(up, UART_LCR, 0);
+ serial_outp(up, UART_FCR,
+ UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
+ scratch = serial_in(up, UART_IIR) >> 5;
+ if (scratch == 6)
+ up->port.type = PORT_16750;
+ }
+ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ }
+ serial_outp(up, UART_LCR, save_lcr);
+ if (up->port.type == PORT_16450) {
+ scratch = serial_in(up, UART_SCR);
+ serial_outp(up, UART_SCR, 0xa5);
+ status1 = serial_in(up, UART_SCR);
+ serial_outp(up, UART_SCR, 0x5a);
+ status2 = serial_in(up, UART_SCR);
+ serial_outp(up, UART_SCR, scratch);
+
+ if ((status1 != 0xa5) || (status2 != 0x5a))
+ up->port.type = PORT_8250;
+ }
+
+ up->port.fifosize = uart_config[up->port.type].dfl_xmit_fifo_size;
+
+ if (up->port.type == PORT_UNKNOWN)
+ goto out;
+ up->type_probed = up->port.type; /* XXX */
+
+ /*
+ * Reset the UART.
+ */
+#ifdef CONFIG_SERIAL_8250_RSA
+ if (up->port.type == PORT_RSA)
+ serial_outp(up, UART_RSA_FRR, 0);
+#endif
+ serial_outp(up, UART_MCR, save_mcr);
+ serial_outp(up, UART_FCR, (UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT));
+ serial_outp(up, UART_FCR, 0);
+ (void)serial_in(up, UART_RX);
+ serial_outp(up, UART_IER, 0);
+
+out:
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static struct uart_driver sunsu_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "sunsu",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+};
+
+static int __devinit sunsu_kbd_ms_init(struct uart_sunsu_port *up)
+{
+ int quot, baud;
+#ifdef CONFIG_SERIO
+ struct serio *serio;
+#endif
+
+ if (up->su_type == SU_PORT_KBD) {
+ up->cflag = B1200 | CS8 | CLOCAL | CREAD;
+ baud = 1200;
+ } else {
+ up->cflag = B4800 | CS8 | CLOCAL | CREAD;
+ baud = 4800;
+ }
+ quot = up->port.uartclk / (16 * baud);
+
+ sunsu_autoconfig(up);
+ if (up->port.type == PORT_UNKNOWN)
+ return -ENODEV;
+
+ printk("%s: %s port at %llx, irq %u\n",
+ up->port.dev->of_node->full_name,
+ (up->su_type == SU_PORT_KBD) ? "Keyboard" : "Mouse",
+ (unsigned long long) up->port.mapbase,
+ up->port.irq);
+
+#ifdef CONFIG_SERIO
+ serio = &up->serio;
+ serio->port_data = up;
+
+ serio->id.type = SERIO_RS232;
+ if (up->su_type == SU_PORT_KBD) {
+ serio->id.proto = SERIO_SUNKBD;
+ strlcpy(serio->name, "sukbd", sizeof(serio->name));
+ } else {
+ serio->id.proto = SERIO_SUN;
+ serio->id.extra = 1;
+ strlcpy(serio->name, "sums", sizeof(serio->name));
+ }
+ strlcpy(serio->phys,
+ (!(up->port.line & 1) ? "su/serio0" : "su/serio1"),
+ sizeof(serio->phys));
+
+ serio->write = sunsu_serio_write;
+ serio->open = sunsu_serio_open;
+ serio->close = sunsu_serio_close;
+ serio->dev.parent = up->port.dev;
+
+ serio_register_port(serio);
+#endif
+
+ sunsu_change_speed(&up->port, up->cflag, 0, quot);
+
+ sunsu_startup(&up->port);
+ return 0;
+}
+
+/*
+ * ------------------------------------------------------------
+ * Serial console driver
+ * ------------------------------------------------------------
+ */
+
+#ifdef CONFIG_SERIAL_SUNSU_CONSOLE
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static __inline__ void wait_for_xmitr(struct uart_sunsu_port *up)
+{
+ unsigned int status, tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ do {
+ status = serial_in(up, UART_LSR);
+
+ if (status & UART_LSR_BI)
+ up->lsr_break_flag = UART_LSR_BI;
+
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ tmout = 1000000;
+ while (--tmout &&
+ ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
+ udelay(1);
+ }
+}
+
+static void sunsu_console_putchar(struct uart_port *port, int ch)
+{
+ struct uart_sunsu_port *up = (struct uart_sunsu_port *)port;
+
+ wait_for_xmitr(up);
+ serial_out(up, UART_TX, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ */
+static void sunsu_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_sunsu_port *up = &sunsu_ports[co->index];
+ unsigned long flags;
+ unsigned int ier;
+ int locked = 1;
+
+ local_irq_save(flags);
+ if (up->port.sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&up->port.lock);
+ } else
+ spin_lock(&up->port.lock);
+
+ /*
+ * First save the UER then disable the interrupts
+ */
+ ier = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, 0);
+
+ uart_console_write(&up->port, s, count, sunsu_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up);
+ serial_out(up, UART_IER, ier);
+
+ if (locked)
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
+}
+
+/*
+ * Setup initial baud/bits/parity. We do two things here:
+ * - construct a cflag setting for the first su_open()
+ * - initialize the serial port
+ * Return non-zero if we didn't find a serial port.
+ */
+static int __init sunsu_console_setup(struct console *co, char *options)
+{
+ static struct ktermios dummy;
+ struct ktermios termios;
+ struct uart_port *port;
+
+ printk("Console: ttyS%d (SU)\n",
+ (sunsu_reg.minor - 64) + co->index);
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index >= UART_NR)
+ co->index = 0;
+ port = &sunsu_ports[co->index].port;
+
+ /*
+ * Temporary fix.
+ */
+ spin_lock_init(&port->lock);
+
+ /* Get firmware console settings. */
+ sunserial_console_termios(co, port->dev->of_node);
+
+ memset(&termios, 0, sizeof(struct ktermios));
+ termios.c_cflag = co->cflag;
+ port->mctrl |= TIOCM_DTR;
+ port->ops->set_termios(port, &termios, &dummy);
+
+ return 0;
+}
+
+static struct console sunsu_console = {
+ .name = "ttyS",
+ .write = sunsu_console_write,
+ .device = uart_console_device,
+ .setup = sunsu_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sunsu_reg,
+};
+
+/*
+ * Register console.
+ */
+
+static inline struct console *SUNSU_CONSOLE(void)
+{
+ return &sunsu_console;
+}
+#else
+#define SUNSU_CONSOLE() (NULL)
+#define sunsu_serial_console_init() do { } while (0)
+#endif
+
+static enum su_type __devinit su_get_type(struct device_node *dp)
+{
+ struct device_node *ap = of_find_node_by_path("/aliases");
+
+ if (ap) {
+ const char *keyb = of_get_property(ap, "keyboard", NULL);
+ const char *ms = of_get_property(ap, "mouse", NULL);
+
+ if (keyb) {
+ if (dp == of_find_node_by_path(keyb))
+ return SU_PORT_KBD;
+ }
+ if (ms) {
+ if (dp == of_find_node_by_path(ms))
+ return SU_PORT_MS;
+ }
+ }
+
+ return SU_PORT_PORT;
+}
+
+static int __devinit su_probe(struct platform_device *op)
+{
+ static int inst;
+ struct device_node *dp = op->dev.of_node;
+ struct uart_sunsu_port *up;
+ struct resource *rp;
+ enum su_type type;
+ bool ignore_line;
+ int err;
+
+ type = su_get_type(dp);
+ if (type == SU_PORT_PORT) {
+ if (inst >= UART_NR)
+ return -EINVAL;
+ up = &sunsu_ports[inst];
+ } else {
+ up = kzalloc(sizeof(*up), GFP_KERNEL);
+ if (!up)
+ return -ENOMEM;
+ }
+
+ up->port.line = inst;
+
+ spin_lock_init(&up->port.lock);
+
+ up->su_type = type;
+
+ rp = &op->resource[0];
+ up->port.mapbase = rp->start;
+ up->reg_size = (rp->end - rp->start) + 1;
+ up->port.membase = of_ioremap(rp, 0, up->reg_size, "su");
+ if (!up->port.membase) {
+ if (type != SU_PORT_PORT)
+ kfree(up);
+ return -ENOMEM;
+ }
+
+ up->port.irq = op->archdata.irqs[0];
+
+ up->port.dev = &op->dev;
+
+ up->port.type = PORT_UNKNOWN;
+ up->port.uartclk = (SU_BASE_BAUD * 16);
+
+ err = 0;
+ if (up->su_type == SU_PORT_KBD || up->su_type == SU_PORT_MS) {
+ err = sunsu_kbd_ms_init(up);
+ if (err) {
+ of_iounmap(&op->resource[0],
+ up->port.membase, up->reg_size);
+ kfree(up);
+ return err;
+ }
+ dev_set_drvdata(&op->dev, up);
+
+ return 0;
+ }
+
+ up->port.flags |= UPF_BOOT_AUTOCONF;
+
+ sunsu_autoconfig(up);
+
+ err = -ENODEV;
+ if (up->port.type == PORT_UNKNOWN)
+ goto out_unmap;
+
+ up->port.ops = &sunsu_pops;
+
+ ignore_line = false;
+ if (!strcmp(dp->name, "rsc-console") ||
+ !strcmp(dp->name, "lom-console"))
+ ignore_line = true;
+
+ sunserial_console_match(SUNSU_CONSOLE(), dp,
+ &sunsu_reg, up->port.line,
+ ignore_line);
+ err = uart_add_one_port(&sunsu_reg, &up->port);
+ if (err)
+ goto out_unmap;
+
+ dev_set_drvdata(&op->dev, up);
+
+ inst++;
+
+ return 0;
+
+out_unmap:
+ of_iounmap(&op->resource[0], up->port.membase, up->reg_size);
+ return err;
+}
+
+static int __devexit su_remove(struct platform_device *op)
+{
+ struct uart_sunsu_port *up = dev_get_drvdata(&op->dev);
+ bool kbdms = false;
+
+ if (up->su_type == SU_PORT_MS ||
+ up->su_type == SU_PORT_KBD)
+ kbdms = true;
+
+ if (kbdms) {
+#ifdef CONFIG_SERIO
+ serio_unregister_port(&up->serio);
+#endif
+ } else if (up->port.type != PORT_UNKNOWN)
+ uart_remove_one_port(&sunsu_reg, &up->port);
+
+ if (up->port.membase)
+ of_iounmap(&op->resource[0], up->port.membase, up->reg_size);
+
+ if (kbdms)
+ kfree(up);
+
+ dev_set_drvdata(&op->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id su_match[] = {
+ {
+ .name = "su",
+ },
+ {
+ .name = "su_pnp",
+ },
+ {
+ .name = "serial",
+ .compatible = "su",
+ },
+ {
+ .type = "serial",
+ .compatible = "su",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, su_match);
+
+static struct platform_driver su_driver = {
+ .driver = {
+ .name = "su",
+ .owner = THIS_MODULE,
+ .of_match_table = su_match,
+ },
+ .probe = su_probe,
+ .remove = __devexit_p(su_remove),
+};
+
+static int __init sunsu_init(void)
+{
+ struct device_node *dp;
+ int err;
+ int num_uart = 0;
+
+ for_each_node_by_name(dp, "su") {
+ if (su_get_type(dp) == SU_PORT_PORT)
+ num_uart++;
+ }
+ for_each_node_by_name(dp, "su_pnp") {
+ if (su_get_type(dp) == SU_PORT_PORT)
+ num_uart++;
+ }
+ for_each_node_by_name(dp, "serial") {
+ if (of_device_is_compatible(dp, "su")) {
+ if (su_get_type(dp) == SU_PORT_PORT)
+ num_uart++;
+ }
+ }
+ for_each_node_by_type(dp, "serial") {
+ if (of_device_is_compatible(dp, "su")) {
+ if (su_get_type(dp) == SU_PORT_PORT)
+ num_uart++;
+ }
+ }
+
+ if (num_uart) {
+ err = sunserial_register_minors(&sunsu_reg, num_uart);
+ if (err)
+ return err;
+ }
+
+ err = platform_driver_register(&su_driver);
+ if (err && num_uart)
+ sunserial_unregister_minors(&sunsu_reg, num_uart);
+
+ return err;
+}
+
+static void __exit sunsu_exit(void)
+{
+ if (sunsu_reg.nr)
+ sunserial_unregister_minors(&sunsu_reg, sunsu_reg.nr);
+}
+
+module_init(sunsu_init);
+module_exit(sunsu_exit);
+
+MODULE_AUTHOR("Eddie C. Dost, Peter Zaitcev, and David S. Miller");
+MODULE_DESCRIPTION("Sun SU serial port driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
new file mode 100644
index 00000000..8e916e76
--- /dev/null
+++ b/drivers/tty/serial/sunzilog.c
@@ -0,0 +1,1655 @@
+/* sunzilog.c: Zilog serial driver for Sparc systems.
+ *
+ * Driver for Zilog serial chips found on Sun workstations and
+ * servers. This driver could actually be made more generic.
+ *
+ * This is based on the old drivers/sbus/char/zs.c code. A lot
+ * of code has been simply moved over directly from there but
+ * much has been rewritten. Credits therefore go out to Eddie
+ * C. Dost, Pete Zaitcev, Ted Ts'o and Alex Buell for their
+ * work there.
+ *
+ * Copyright (C) 2002, 2006, 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/circ_buf.h>
+#include <linux/serial.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/spinlock.h>
+#ifdef CONFIG_SERIO
+#include <linux/serio.h>
+#endif
+#include <linux/init.h>
+#include <linux/of_device.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+
+#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+#include "suncore.h"
+#include "sunzilog.h"
+
+/* On 32-bit sparcs we need to delay after register accesses
+ * to accommodate sun4 systems, but we do not need to flush writes.
+ * On 64-bit sparc we only need to flush single writes to ensure
+ * completion.
+ */
+#ifndef CONFIG_SPARC64
+#define ZSDELAY() udelay(5)
+#define ZSDELAY_LONG() udelay(20)
+#define ZS_WSYNC(channel) do { } while (0)
+#else
+#define ZSDELAY()
+#define ZSDELAY_LONG()
+#define ZS_WSYNC(__channel) \
+ readb(&((__channel)->control))
+#endif
+
+#define ZS_CLOCK 4915200 /* Zilog input clock rate. */
+#define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */
+
+/*
+ * We wrap our port structure around the generic uart_port.
+ */
+struct uart_sunzilog_port {
+ struct uart_port port;
+
+ /* IRQ servicing chain. */
+ struct uart_sunzilog_port *next;
+
+ /* Current values of Zilog write registers. */
+ unsigned char curregs[NUM_ZSREGS];
+
+ unsigned int flags;
+#define SUNZILOG_FLAG_CONS_KEYB 0x00000001
+#define SUNZILOG_FLAG_CONS_MOUSE 0x00000002
+#define SUNZILOG_FLAG_IS_CONS 0x00000004
+#define SUNZILOG_FLAG_IS_KGDB 0x00000008
+#define SUNZILOG_FLAG_MODEM_STATUS 0x00000010
+#define SUNZILOG_FLAG_IS_CHANNEL_A 0x00000020
+#define SUNZILOG_FLAG_REGS_HELD 0x00000040
+#define SUNZILOG_FLAG_TX_STOPPED 0x00000080
+#define SUNZILOG_FLAG_TX_ACTIVE 0x00000100
+#define SUNZILOG_FLAG_ESCC 0x00000200
+#define SUNZILOG_FLAG_ISR_HANDLER 0x00000400
+
+ unsigned int cflag;
+
+ unsigned char parity_mask;
+ unsigned char prev_status;
+
+#ifdef CONFIG_SERIO
+ struct serio serio;
+ int serio_open;
+#endif
+};
+
+static void sunzilog_putchar(struct uart_port *port, int ch);
+
+#define ZILOG_CHANNEL_FROM_PORT(PORT) ((struct zilog_channel __iomem *)((PORT)->membase))
+#define UART_ZILOG(PORT) ((struct uart_sunzilog_port *)(PORT))
+
+#define ZS_IS_KEYB(UP) ((UP)->flags & SUNZILOG_FLAG_CONS_KEYB)
+#define ZS_IS_MOUSE(UP) ((UP)->flags & SUNZILOG_FLAG_CONS_MOUSE)
+#define ZS_IS_CONS(UP) ((UP)->flags & SUNZILOG_FLAG_IS_CONS)
+#define ZS_IS_KGDB(UP) ((UP)->flags & SUNZILOG_FLAG_IS_KGDB)
+#define ZS_WANTS_MODEM_STATUS(UP) ((UP)->flags & SUNZILOG_FLAG_MODEM_STATUS)
+#define ZS_IS_CHANNEL_A(UP) ((UP)->flags & SUNZILOG_FLAG_IS_CHANNEL_A)
+#define ZS_REGS_HELD(UP) ((UP)->flags & SUNZILOG_FLAG_REGS_HELD)
+#define ZS_TX_STOPPED(UP) ((UP)->flags & SUNZILOG_FLAG_TX_STOPPED)
+#define ZS_TX_ACTIVE(UP) ((UP)->flags & SUNZILOG_FLAG_TX_ACTIVE)
+
+/* Reading and writing Zilog8530 registers. The delays are to make this
+ * driver work on the Sun4 which needs a settling delay after each chip
+ * register access, other machines handle this in hardware via auxiliary
+ * flip-flops which implement the settle time we do in software.
+ *
+ * The port lock must be held and local IRQs must be disabled
+ * when {read,write}_zsreg is invoked.
+ */
+static unsigned char read_zsreg(struct zilog_channel __iomem *channel,
+ unsigned char reg)
+{
+ unsigned char retval;
+
+ writeb(reg, &channel->control);
+ ZSDELAY();
+ retval = readb(&channel->control);
+ ZSDELAY();
+
+ return retval;
+}
+
+static void write_zsreg(struct zilog_channel __iomem *channel,
+ unsigned char reg, unsigned char value)
+{
+ writeb(reg, &channel->control);
+ ZSDELAY();
+ writeb(value, &channel->control);
+ ZSDELAY();
+}
+
+static void sunzilog_clear_fifo(struct zilog_channel __iomem *channel)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ unsigned char regval;
+
+ regval = readb(&channel->control);
+ ZSDELAY();
+ if (regval & Rx_CH_AV)
+ break;
+
+ regval = read_zsreg(channel, R1);
+ readb(&channel->data);
+ ZSDELAY();
+
+ if (regval & (PAR_ERR | Rx_OVR | CRC_ERR)) {
+ writeb(ERR_RES, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+ }
+ }
+}
+
+/* This function must only be called when the TX is not busy. The UART
+ * port lock must be held and local interrupts disabled.
+ */
+static int __load_zsregs(struct zilog_channel __iomem *channel, unsigned char *regs)
+{
+ int i;
+ int escc;
+ unsigned char r15;
+
+ /* Let pending transmits finish. */
+ for (i = 0; i < 1000; i++) {
+ unsigned char stat = read_zsreg(channel, R1);
+ if (stat & ALL_SNT)
+ break;
+ udelay(100);
+ }
+
+ writeb(ERR_RES, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ sunzilog_clear_fifo(channel);
+
+ /* Disable all interrupts. */
+ write_zsreg(channel, R1,
+ regs[R1] & ~(RxINT_MASK | TxINT_ENAB | EXT_INT_ENAB));
+
+ /* Set parity, sync config, stop bits, and clock divisor. */
+ write_zsreg(channel, R4, regs[R4]);
+
+ /* Set misc. TX/RX control bits. */
+ write_zsreg(channel, R10, regs[R10]);
+
+ /* Set TX/RX controls sans the enable bits. */
+ write_zsreg(channel, R3, regs[R3] & ~RxENAB);
+ write_zsreg(channel, R5, regs[R5] & ~TxENAB);
+
+ /* Synchronous mode config. */
+ write_zsreg(channel, R6, regs[R6]);
+ write_zsreg(channel, R7, regs[R7]);
+
+ /* Don't mess with the interrupt vector (R2, unused by us) and
+ * master interrupt control (R9). We make sure this is setup
+ * properly at probe time then never touch it again.
+ */
+
+ /* Disable baud generator. */
+ write_zsreg(channel, R14, regs[R14] & ~BRENAB);
+
+ /* Clock mode control. */
+ write_zsreg(channel, R11, regs[R11]);
+
+ /* Lower and upper byte of baud rate generator divisor. */
+ write_zsreg(channel, R12, regs[R12]);
+ write_zsreg(channel, R13, regs[R13]);
+
+ /* Now rewrite R14, with BRENAB (if set). */
+ write_zsreg(channel, R14, regs[R14]);
+
+ /* External status interrupt control. */
+ write_zsreg(channel, R15, (regs[R15] | WR7pEN) & ~FIFOEN);
+
+ /* ESCC Extension Register */
+ r15 = read_zsreg(channel, R15);
+ if (r15 & 0x01) {
+ write_zsreg(channel, R7, regs[R7p]);
+
+ /* External status interrupt and FIFO control. */
+ write_zsreg(channel, R15, regs[R15] & ~WR7pEN);
+ escc = 1;
+ } else {
+ /* Clear FIFO bit case it is an issue */
+ regs[R15] &= ~FIFOEN;
+ escc = 0;
+ }
+
+ /* Reset external status interrupts. */
+ write_zsreg(channel, R0, RES_EXT_INT); /* First Latch */
+ write_zsreg(channel, R0, RES_EXT_INT); /* Second Latch */
+
+ /* Rewrite R3/R5, this time without enables masked. */
+ write_zsreg(channel, R3, regs[R3]);
+ write_zsreg(channel, R5, regs[R5]);
+
+ /* Rewrite R1, this time without IRQ enabled masked. */
+ write_zsreg(channel, R1, regs[R1]);
+
+ return escc;
+}
+
+/* Reprogram the Zilog channel HW registers with the copies found in the
+ * software state struct. If the transmitter is busy, we defer this update
+ * until the next TX complete interrupt. Else, we do it right now.
+ *
+ * The UART port lock must be held and local interrupts disabled.
+ */
+static void sunzilog_maybe_update_regs(struct uart_sunzilog_port *up,
+ struct zilog_channel __iomem *channel)
+{
+ if (!ZS_REGS_HELD(up)) {
+ if (ZS_TX_ACTIVE(up)) {
+ up->flags |= SUNZILOG_FLAG_REGS_HELD;
+ } else {
+ __load_zsregs(channel, up->curregs);
+ }
+ }
+}
+
+static void sunzilog_change_mouse_baud(struct uart_sunzilog_port *up)
+{
+ unsigned int cur_cflag = up->cflag;
+ int brg, new_baud;
+
+ up->cflag &= ~CBAUD;
+ up->cflag |= suncore_mouse_baud_cflag_next(cur_cflag, &new_baud);
+
+ brg = BPS_TO_BRG(new_baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
+ up->curregs[R12] = (brg & 0xff);
+ up->curregs[R13] = (brg >> 8) & 0xff;
+ sunzilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(&up->port));
+}
+
+static void sunzilog_kbdms_receive_chars(struct uart_sunzilog_port *up,
+ unsigned char ch, int is_break)
+{
+ if (ZS_IS_KEYB(up)) {
+ /* Stop-A is handled by drivers/char/keyboard.c now. */
+#ifdef CONFIG_SERIO
+ if (up->serio_open)
+ serio_interrupt(&up->serio, ch, 0);
+#endif
+ } else if (ZS_IS_MOUSE(up)) {
+ int ret = suncore_mouse_baud_detection(ch, is_break);
+
+ switch (ret) {
+ case 2:
+ sunzilog_change_mouse_baud(up);
+ /* fallthru */
+ case 1:
+ break;
+
+ case 0:
+#ifdef CONFIG_SERIO
+ if (up->serio_open)
+ serio_interrupt(&up->serio, ch, 0);
+#endif
+ break;
+ };
+ }
+}
+
+static struct tty_struct *
+sunzilog_receive_chars(struct uart_sunzilog_port *up,
+ struct zilog_channel __iomem *channel)
+{
+ struct tty_struct *tty;
+ unsigned char ch, r1, flag;
+
+ tty = NULL;
+ if (up->port.state != NULL && /* Unopened serial console */
+ up->port.state->port.tty != NULL) /* Keyboard || mouse */
+ tty = up->port.state->port.tty;
+
+ for (;;) {
+
+ r1 = read_zsreg(channel, R1);
+ if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) {
+ writeb(ERR_RES, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+ }
+
+ ch = readb(&channel->control);
+ ZSDELAY();
+
+ /* This funny hack depends upon BRK_ABRT not interfering
+ * with the other bits we care about in R1.
+ */
+ if (ch & BRK_ABRT)
+ r1 |= BRK_ABRT;
+
+ if (!(ch & Rx_CH_AV))
+ break;
+
+ ch = readb(&channel->data);
+ ZSDELAY();
+
+ ch &= up->parity_mask;
+
+ if (unlikely(ZS_IS_KEYB(up)) || unlikely(ZS_IS_MOUSE(up))) {
+ sunzilog_kbdms_receive_chars(up, ch, 0);
+ continue;
+ }
+
+ if (tty == NULL) {
+ uart_handle_sysrq_char(&up->port, ch);
+ continue;
+ }
+
+ /* A real serial line, record the character and status. */
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+ if (r1 & (BRK_ABRT | PAR_ERR | Rx_OVR | CRC_ERR)) {
+ if (r1 & BRK_ABRT) {
+ r1 &= ~(PAR_ERR | CRC_ERR);
+ up->port.icount.brk++;
+ if (uart_handle_break(&up->port))
+ continue;
+ }
+ else if (r1 & PAR_ERR)
+ up->port.icount.parity++;
+ else if (r1 & CRC_ERR)
+ up->port.icount.frame++;
+ if (r1 & Rx_OVR)
+ up->port.icount.overrun++;
+ r1 &= up->port.read_status_mask;
+ if (r1 & BRK_ABRT)
+ flag = TTY_BREAK;
+ else if (r1 & PAR_ERR)
+ flag = TTY_PARITY;
+ else if (r1 & CRC_ERR)
+ flag = TTY_FRAME;
+ }
+ if (uart_handle_sysrq_char(&up->port, ch))
+ continue;
+
+ if (up->port.ignore_status_mask == 0xff ||
+ (r1 & up->port.ignore_status_mask) == 0) {
+ tty_insert_flip_char(tty, ch, flag);
+ }
+ if (r1 & Rx_OVR)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+
+ return tty;
+}
+
+static void sunzilog_status_handle(struct uart_sunzilog_port *up,
+ struct zilog_channel __iomem *channel)
+{
+ unsigned char status;
+
+ status = readb(&channel->control);
+ ZSDELAY();
+
+ writeb(RES_EXT_INT, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ if (status & BRK_ABRT) {
+ if (ZS_IS_MOUSE(up))
+ sunzilog_kbdms_receive_chars(up, 0, 1);
+ if (ZS_IS_CONS(up)) {
+ /* Wait for BREAK to deassert to avoid potentially
+ * confusing the PROM.
+ */
+ while (1) {
+ status = readb(&channel->control);
+ ZSDELAY();
+ if (!(status & BRK_ABRT))
+ break;
+ }
+ sun_do_break();
+ return;
+ }
+ }
+
+ if (ZS_WANTS_MODEM_STATUS(up)) {
+ if (status & SYNC)
+ up->port.icount.dsr++;
+
+ /* The Zilog just gives us an interrupt when DCD/CTS/etc. change.
+ * But it does not tell us which bit has changed, we have to keep
+ * track of this ourselves.
+ */
+ if ((status ^ up->prev_status) ^ DCD)
+ uart_handle_dcd_change(&up->port,
+ (status & DCD));
+ if ((status ^ up->prev_status) ^ CTS)
+ uart_handle_cts_change(&up->port,
+ (status & CTS));
+
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+ }
+
+ up->prev_status = status;
+}
+
+static void sunzilog_transmit_chars(struct uart_sunzilog_port *up,
+ struct zilog_channel __iomem *channel)
+{
+ struct circ_buf *xmit;
+
+ if (ZS_IS_CONS(up)) {
+ unsigned char status = readb(&channel->control);
+ ZSDELAY();
+
+ /* TX still busy? Just wait for the next TX done interrupt.
+ *
+ * It can occur because of how we do serial console writes. It would
+ * be nice to transmit console writes just like we normally would for
+ * a TTY line. (ie. buffered and TX interrupt driven). That is not
+ * easy because console writes cannot sleep. One solution might be
+ * to poll on enough port->xmit space becoming free. -DaveM
+ */
+ if (!(status & Tx_BUF_EMP))
+ return;
+ }
+
+ up->flags &= ~SUNZILOG_FLAG_TX_ACTIVE;
+
+ if (ZS_REGS_HELD(up)) {
+ __load_zsregs(channel, up->curregs);
+ up->flags &= ~SUNZILOG_FLAG_REGS_HELD;
+ }
+
+ if (ZS_TX_STOPPED(up)) {
+ up->flags &= ~SUNZILOG_FLAG_TX_STOPPED;
+ goto ack_tx_int;
+ }
+
+ if (up->port.x_char) {
+ up->flags |= SUNZILOG_FLAG_TX_ACTIVE;
+ writeb(up->port.x_char, &channel->data);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ return;
+ }
+
+ if (up->port.state == NULL)
+ goto ack_tx_int;
+ xmit = &up->port.state->xmit;
+ if (uart_circ_empty(xmit))
+ goto ack_tx_int;
+
+ if (uart_tx_stopped(&up->port))
+ goto ack_tx_int;
+
+ up->flags |= SUNZILOG_FLAG_TX_ACTIVE;
+ writeb(xmit->buf[xmit->tail], &channel->data);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ return;
+
+ack_tx_int:
+ writeb(RES_Tx_P, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+}
+
+static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
+{
+ struct uart_sunzilog_port *up = dev_id;
+
+ while (up) {
+ struct zilog_channel __iomem *channel
+ = ZILOG_CHANNEL_FROM_PORT(&up->port);
+ struct tty_struct *tty;
+ unsigned char r3;
+
+ spin_lock(&up->port.lock);
+ r3 = read_zsreg(channel, R3);
+
+ /* Channel A */
+ tty = NULL;
+ if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
+ writeb(RES_H_IUS, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ if (r3 & CHARxIP)
+ tty = sunzilog_receive_chars(up, channel);
+ if (r3 & CHAEXT)
+ sunzilog_status_handle(up, channel);
+ if (r3 & CHATxIP)
+ sunzilog_transmit_chars(up, channel);
+ }
+ spin_unlock(&up->port.lock);
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+
+ /* Channel B */
+ up = up->next;
+ channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+
+ spin_lock(&up->port.lock);
+ tty = NULL;
+ if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
+ writeb(RES_H_IUS, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ if (r3 & CHBRxIP)
+ tty = sunzilog_receive_chars(up, channel);
+ if (r3 & CHBEXT)
+ sunzilog_status_handle(up, channel);
+ if (r3 & CHBTxIP)
+ sunzilog_transmit_chars(up, channel);
+ }
+ spin_unlock(&up->port.lock);
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+
+ up = up->next;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* A convenient way to quickly get R0 status. The caller must _not_ hold the
+ * port lock, it is acquired here.
+ */
+static __inline__ unsigned char sunzilog_read_channel_status(struct uart_port *port)
+{
+ struct zilog_channel __iomem *channel;
+ unsigned char status;
+
+ channel = ZILOG_CHANNEL_FROM_PORT(port);
+ status = readb(&channel->control);
+ ZSDELAY();
+
+ return status;
+}
+
+/* The port lock is not held. */
+static unsigned int sunzilog_tx_empty(struct uart_port *port)
+{
+ unsigned long flags;
+ unsigned char status;
+ unsigned int ret;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ status = sunzilog_read_channel_status(port);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (status & Tx_BUF_EMP)
+ ret = TIOCSER_TEMT;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+/* The port lock is held and interrupts are disabled. */
+static unsigned int sunzilog_get_mctrl(struct uart_port *port)
+{
+ unsigned char status;
+ unsigned int ret;
+
+ status = sunzilog_read_channel_status(port);
+
+ ret = 0;
+ if (status & DCD)
+ ret |= TIOCM_CAR;
+ if (status & SYNC)
+ ret |= TIOCM_DSR;
+ if (status & CTS)
+ ret |= TIOCM_CTS;
+
+ return ret;
+}
+
+/* The port lock is held and interrupts are disabled. */
+static void sunzilog_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port);
+ unsigned char set_bits, clear_bits;
+
+ set_bits = clear_bits = 0;
+
+ if (mctrl & TIOCM_RTS)
+ set_bits |= RTS;
+ else
+ clear_bits |= RTS;
+ if (mctrl & TIOCM_DTR)
+ set_bits |= DTR;
+ else
+ clear_bits |= DTR;
+
+ /* NOTE: Not subject to 'transmitter active' rule. */
+ up->curregs[R5] |= set_bits;
+ up->curregs[R5] &= ~clear_bits;
+ write_zsreg(channel, R5, up->curregs[R5]);
+}
+
+/* The port lock is held and interrupts are disabled. */
+static void sunzilog_stop_tx(struct uart_port *port)
+{
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+
+ up->flags |= SUNZILOG_FLAG_TX_STOPPED;
+}
+
+/* The port lock is held and interrupts are disabled. */
+static void sunzilog_start_tx(struct uart_port *port)
+{
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port);
+ unsigned char status;
+
+ up->flags |= SUNZILOG_FLAG_TX_ACTIVE;
+ up->flags &= ~SUNZILOG_FLAG_TX_STOPPED;
+
+ status = readb(&channel->control);
+ ZSDELAY();
+
+ /* TX busy? Just wait for the TX done interrupt. */
+ if (!(status & Tx_BUF_EMP))
+ return;
+
+ /* Send the first character to jump-start the TX done
+ * IRQ sending engine.
+ */
+ if (port->x_char) {
+ writeb(port->x_char, &channel->data);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ port->icount.tx++;
+ port->x_char = 0;
+ } else {
+ struct circ_buf *xmit = &port->state->xmit;
+
+ writeb(xmit->buf[xmit->tail], &channel->data);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+ }
+}
+
+/* The port lock is held. */
+static void sunzilog_stop_rx(struct uart_port *port)
+{
+ struct uart_sunzilog_port *up = UART_ZILOG(port);
+ struct zilog_channel __iomem *channel;
+
+ if (ZS_IS_CONS(up))
+ return;
+
+ channel = ZILOG_CHANNEL_FROM_PORT(port);
+
+ /* Disable all RX interrupts. */
+ up->curregs[R1] &= ~RxINT_MASK;
+ sunzilog_maybe_update_regs(up, channel);
+}
+
+/* The port lock is held. */
+static void sunzilog_enable_ms(struct uart_port *port)
+{
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port);
+ unsigned char new_reg;
+
+ new_reg = up->curregs[R15] | (DCDIE | SYNCIE | CTSIE);
+ if (new_reg != up->curregs[R15]) {
+ up->curregs[R15] = new_reg;
+
+ /* NOTE: Not subject to 'transmitter active' rule. */
+ write_zsreg(channel, R15, up->curregs[R15] & ~WR7pEN);
+ }
+}
+
+/* The port lock is not held. */
+static void sunzilog_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port);
+ unsigned char set_bits, clear_bits, new_reg;
+ unsigned long flags;
+
+ set_bits = clear_bits = 0;
+
+ if (break_state)
+ set_bits |= SND_BRK;
+ else
+ clear_bits |= SND_BRK;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
+ if (new_reg != up->curregs[R5]) {
+ up->curregs[R5] = new_reg;
+
+ /* NOTE: Not subject to 'transmitter active' rule. */
+ write_zsreg(channel, R5, up->curregs[R5]);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void __sunzilog_startup(struct uart_sunzilog_port *up)
+{
+ struct zilog_channel __iomem *channel;
+
+ channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+ up->prev_status = readb(&channel->control);
+
+ /* Enable receiver and transmitter. */
+ up->curregs[R3] |= RxENAB;
+ up->curregs[R5] |= TxENAB;
+
+ up->curregs[R1] |= EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB;
+ sunzilog_maybe_update_regs(up, channel);
+}
+
+static int sunzilog_startup(struct uart_port *port)
+{
+ struct uart_sunzilog_port *up = UART_ZILOG(port);
+ unsigned long flags;
+
+ if (ZS_IS_CONS(up))
+ return 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+ __sunzilog_startup(up);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return 0;
+}
+
+/*
+ * The test for ZS_IS_CONS is explained by the following e-mail:
+ *****
+ * From: Russell King <rmk@arm.linux.org.uk>
+ * Date: Sun, 8 Dec 2002 10:18:38 +0000
+ *
+ * On Sun, Dec 08, 2002 at 02:43:36AM -0500, Pete Zaitcev wrote:
+ * > I boot my 2.5 boxes using "console=ttyS0,9600" argument,
+ * > and I noticed that something is not right with reference
+ * > counting in this case. It seems that when the console
+ * > is open by kernel initially, this is not accounted
+ * > as an open, and uart_startup is not called.
+ *
+ * That is correct. We are unable to call uart_startup when the serial
+ * console is initialised because it may need to allocate memory (as
+ * request_irq does) and the memory allocators may not have been
+ * initialised.
+ *
+ * 1. initialise the port into a state where it can send characters in the
+ * console write method.
+ *
+ * 2. don't do the actual hardware shutdown in your shutdown() method (but
+ * do the normal software shutdown - ie, free irqs etc)
+ *****
+ */
+static void sunzilog_shutdown(struct uart_port *port)
+{
+ struct uart_sunzilog_port *up = UART_ZILOG(port);
+ struct zilog_channel __iomem *channel;
+ unsigned long flags;
+
+ if (ZS_IS_CONS(up))
+ return;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ channel = ZILOG_CHANNEL_FROM_PORT(port);
+
+ /* Disable receiver and transmitter. */
+ up->curregs[R3] &= ~RxENAB;
+ up->curregs[R5] &= ~TxENAB;
+
+ /* Disable all interrupts and BRK assertion. */
+ up->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
+ up->curregs[R5] &= ~SND_BRK;
+ sunzilog_maybe_update_regs(up, channel);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* Shared by TTY driver and serial console setup. The port lock is held
+ * and local interrupts are disabled.
+ */
+static void
+sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag,
+ unsigned int iflag, int brg)
+{
+
+ up->curregs[R10] = NRZ;
+ up->curregs[R11] = TCBR | RCBR;
+
+ /* Program BAUD and clock source. */
+ up->curregs[R4] &= ~XCLK_MASK;
+ up->curregs[R4] |= X16CLK;
+ up->curregs[R12] = brg & 0xff;
+ up->curregs[R13] = (brg >> 8) & 0xff;
+ up->curregs[R14] = BRSRC | BRENAB;
+
+ /* Character size, stop bits, and parity. */
+ up->curregs[R3] &= ~RxN_MASK;
+ up->curregs[R5] &= ~TxN_MASK;
+ switch (cflag & CSIZE) {
+ case CS5:
+ up->curregs[R3] |= Rx5;
+ up->curregs[R5] |= Tx5;
+ up->parity_mask = 0x1f;
+ break;
+ case CS6:
+ up->curregs[R3] |= Rx6;
+ up->curregs[R5] |= Tx6;
+ up->parity_mask = 0x3f;
+ break;
+ case CS7:
+ up->curregs[R3] |= Rx7;
+ up->curregs[R5] |= Tx7;
+ up->parity_mask = 0x7f;
+ break;
+ case CS8:
+ default:
+ up->curregs[R3] |= Rx8;
+ up->curregs[R5] |= Tx8;
+ up->parity_mask = 0xff;
+ break;
+ };
+ up->curregs[R4] &= ~0x0c;
+ if (cflag & CSTOPB)
+ up->curregs[R4] |= SB2;
+ else
+ up->curregs[R4] |= SB1;
+ if (cflag & PARENB)
+ up->curregs[R4] |= PAR_ENAB;
+ else
+ up->curregs[R4] &= ~PAR_ENAB;
+ if (!(cflag & PARODD))
+ up->curregs[R4] |= PAR_EVEN;
+ else
+ up->curregs[R4] &= ~PAR_EVEN;
+
+ up->port.read_status_mask = Rx_OVR;
+ if (iflag & INPCK)
+ up->port.read_status_mask |= CRC_ERR | PAR_ERR;
+ if (iflag & (BRKINT | PARMRK))
+ up->port.read_status_mask |= BRK_ABRT;
+
+ up->port.ignore_status_mask = 0;
+ if (iflag & IGNPAR)
+ up->port.ignore_status_mask |= CRC_ERR | PAR_ERR;
+ if (iflag & IGNBRK) {
+ up->port.ignore_status_mask |= BRK_ABRT;
+ if (iflag & IGNPAR)
+ up->port.ignore_status_mask |= Rx_OVR;
+ }
+
+ if ((cflag & CREAD) == 0)
+ up->port.ignore_status_mask = 0xff;
+}
+
+/* The port lock is not held. */
+static void
+sunzilog_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ unsigned long flags;
+ int baud, brg;
+
+ baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
+
+ sunzilog_convert_to_zs(up, termios->c_cflag, termios->c_iflag, brg);
+
+ if (UART_ENABLE_MS(&up->port, termios->c_cflag))
+ up->flags |= SUNZILOG_FLAG_MODEM_STATUS;
+ else
+ up->flags &= ~SUNZILOG_FLAG_MODEM_STATUS;
+
+ up->cflag = termios->c_cflag;
+
+ sunzilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port));
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static const char *sunzilog_type(struct uart_port *port)
+{
+ struct uart_sunzilog_port *up = UART_ZILOG(port);
+
+ return (up->flags & SUNZILOG_FLAG_ESCC) ? "zs (ESCC)" : "zs";
+}
+
+/* We do not request/release mappings of the registers here, this
+ * happens at early serial probe time.
+ */
+static void sunzilog_release_port(struct uart_port *port)
+{
+}
+
+static int sunzilog_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+/* These do not need to do anything interesting either. */
+static void sunzilog_config_port(struct uart_port *port, int flags)
+{
+}
+
+/* We do not support letting the user mess with the divisor, IRQ, etc. */
+static int sunzilog_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ return -EINVAL;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int sunzilog_get_poll_char(struct uart_port *port)
+{
+ unsigned char ch, r1;
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct zilog_channel __iomem *channel
+ = ZILOG_CHANNEL_FROM_PORT(&up->port);
+
+
+ r1 = read_zsreg(channel, R1);
+ if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) {
+ writeb(ERR_RES, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+ }
+
+ ch = readb(&channel->control);
+ ZSDELAY();
+
+ /* This funny hack depends upon BRK_ABRT not interfering
+ * with the other bits we care about in R1.
+ */
+ if (ch & BRK_ABRT)
+ r1 |= BRK_ABRT;
+
+ if (!(ch & Rx_CH_AV))
+ return NO_POLL_CHAR;
+
+ ch = readb(&channel->data);
+ ZSDELAY();
+
+ ch &= up->parity_mask;
+ return ch;
+}
+
+static void sunzilog_put_poll_char(struct uart_port *port,
+ unsigned char ch)
+{
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *)port;
+
+ sunzilog_putchar(&up->port, ch);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
+static struct uart_ops sunzilog_pops = {
+ .tx_empty = sunzilog_tx_empty,
+ .set_mctrl = sunzilog_set_mctrl,
+ .get_mctrl = sunzilog_get_mctrl,
+ .stop_tx = sunzilog_stop_tx,
+ .start_tx = sunzilog_start_tx,
+ .stop_rx = sunzilog_stop_rx,
+ .enable_ms = sunzilog_enable_ms,
+ .break_ctl = sunzilog_break_ctl,
+ .startup = sunzilog_startup,
+ .shutdown = sunzilog_shutdown,
+ .set_termios = sunzilog_set_termios,
+ .type = sunzilog_type,
+ .release_port = sunzilog_release_port,
+ .request_port = sunzilog_request_port,
+ .config_port = sunzilog_config_port,
+ .verify_port = sunzilog_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = sunzilog_get_poll_char,
+ .poll_put_char = sunzilog_put_poll_char,
+#endif
+};
+
+static int uart_chip_count;
+static struct uart_sunzilog_port *sunzilog_port_table;
+static struct zilog_layout __iomem **sunzilog_chip_regs;
+
+static struct uart_sunzilog_port *sunzilog_irq_chain;
+
+static struct uart_driver sunzilog_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "sunzilog",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+};
+
+static int __init sunzilog_alloc_tables(int num_sunzilog)
+{
+ struct uart_sunzilog_port *up;
+ unsigned long size;
+ int num_channels = num_sunzilog * 2;
+ int i;
+
+ size = num_channels * sizeof(struct uart_sunzilog_port);
+ sunzilog_port_table = kzalloc(size, GFP_KERNEL);
+ if (!sunzilog_port_table)
+ return -ENOMEM;
+
+ for (i = 0; i < num_channels; i++) {
+ up = &sunzilog_port_table[i];
+
+ spin_lock_init(&up->port.lock);
+
+ if (i == 0)
+ sunzilog_irq_chain = up;
+
+ if (i < num_channels - 1)
+ up->next = up + 1;
+ else
+ up->next = NULL;
+ }
+
+ size = num_sunzilog * sizeof(struct zilog_layout __iomem *);
+ sunzilog_chip_regs = kzalloc(size, GFP_KERNEL);
+ if (!sunzilog_chip_regs) {
+ kfree(sunzilog_port_table);
+ sunzilog_irq_chain = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void sunzilog_free_tables(void)
+{
+ kfree(sunzilog_port_table);
+ sunzilog_irq_chain = NULL;
+ kfree(sunzilog_chip_regs);
+}
+
+#define ZS_PUT_CHAR_MAX_DELAY 2000 /* 10 ms */
+
+static void sunzilog_putchar(struct uart_port *port, int ch)
+{
+ struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port);
+ int loops = ZS_PUT_CHAR_MAX_DELAY;
+
+ /* This is a timed polling loop so do not switch the explicit
+ * udelay with ZSDELAY as that is a NOP on some platforms. -DaveM
+ */
+ do {
+ unsigned char val = readb(&channel->control);
+ if (val & Tx_BUF_EMP) {
+ ZSDELAY();
+ break;
+ }
+ udelay(5);
+ } while (--loops);
+
+ writeb(ch, &channel->data);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+}
+
+#ifdef CONFIG_SERIO
+
+static DEFINE_SPINLOCK(sunzilog_serio_lock);
+
+static int sunzilog_serio_write(struct serio *serio, unsigned char ch)
+{
+ struct uart_sunzilog_port *up = serio->port_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sunzilog_serio_lock, flags);
+
+ sunzilog_putchar(&up->port, ch);
+
+ spin_unlock_irqrestore(&sunzilog_serio_lock, flags);
+
+ return 0;
+}
+
+static int sunzilog_serio_open(struct serio *serio)
+{
+ struct uart_sunzilog_port *up = serio->port_data;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&sunzilog_serio_lock, flags);
+ if (!up->serio_open) {
+ up->serio_open = 1;
+ ret = 0;
+ } else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&sunzilog_serio_lock, flags);
+
+ return ret;
+}
+
+static void sunzilog_serio_close(struct serio *serio)
+{
+ struct uart_sunzilog_port *up = serio->port_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sunzilog_serio_lock, flags);
+ up->serio_open = 0;
+ spin_unlock_irqrestore(&sunzilog_serio_lock, flags);
+}
+
+#endif /* CONFIG_SERIO */
+
+#ifdef CONFIG_SERIAL_SUNZILOG_CONSOLE
+static void
+sunzilog_console_write(struct console *con, const char *s, unsigned int count)
+{
+ struct uart_sunzilog_port *up = &sunzilog_port_table[con->index];
+ unsigned long flags;
+ int locked = 1;
+
+ local_irq_save(flags);
+ if (up->port.sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&up->port.lock);
+ } else
+ spin_lock(&up->port.lock);
+
+ uart_console_write(&up->port, s, count, sunzilog_putchar);
+ udelay(2);
+
+ if (locked)
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
+}
+
+static int __init sunzilog_console_setup(struct console *con, char *options)
+{
+ struct uart_sunzilog_port *up = &sunzilog_port_table[con->index];
+ unsigned long flags;
+ int baud, brg;
+
+ if (up->port.type != PORT_SUNZILOG)
+ return -1;
+
+ printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n",
+ (sunzilog_reg.minor - 64) + con->index, con->index);
+
+ /* Get firmware console settings. */
+ sunserial_console_termios(con, up->port.dev->of_node);
+
+ /* Firmware console speed is limited to 150-->38400 baud so
+ * this hackish cflag thing is OK.
+ */
+ switch (con->cflag & CBAUD) {
+ case B150: baud = 150; break;
+ case B300: baud = 300; break;
+ case B600: baud = 600; break;
+ case B1200: baud = 1200; break;
+ case B2400: baud = 2400; break;
+ case B4800: baud = 4800; break;
+ default: case B9600: baud = 9600; break;
+ case B19200: baud = 19200; break;
+ case B38400: baud = 38400; break;
+ };
+
+ brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ up->curregs[R15] |= BRKIE;
+ sunzilog_convert_to_zs(up, con->cflag, 0, brg);
+
+ sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
+ __sunzilog_startup(up);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return 0;
+}
+
+static struct console sunzilog_console_ops = {
+ .name = "ttyS",
+ .write = sunzilog_console_write,
+ .device = uart_console_device,
+ .setup = sunzilog_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sunzilog_reg,
+};
+
+static inline struct console *SUNZILOG_CONSOLE(void)
+{
+ return &sunzilog_console_ops;
+}
+
+#else
+#define SUNZILOG_CONSOLE() (NULL)
+#endif
+
+static void __devinit sunzilog_init_kbdms(struct uart_sunzilog_port *up)
+{
+ int baud, brg;
+
+ if (up->flags & SUNZILOG_FLAG_CONS_KEYB) {
+ up->cflag = B1200 | CS8 | CLOCAL | CREAD;
+ baud = 1200;
+ } else {
+ up->cflag = B4800 | CS8 | CLOCAL | CREAD;
+ baud = 4800;
+ }
+
+ up->curregs[R15] |= BRKIE;
+ brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
+ sunzilog_convert_to_zs(up, up->cflag, 0, brg);
+ sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
+ __sunzilog_startup(up);
+}
+
+#ifdef CONFIG_SERIO
+static void __devinit sunzilog_register_serio(struct uart_sunzilog_port *up)
+{
+ struct serio *serio = &up->serio;
+
+ serio->port_data = up;
+
+ serio->id.type = SERIO_RS232;
+ if (up->flags & SUNZILOG_FLAG_CONS_KEYB) {
+ serio->id.proto = SERIO_SUNKBD;
+ strlcpy(serio->name, "zskbd", sizeof(serio->name));
+ } else {
+ serio->id.proto = SERIO_SUN;
+ serio->id.extra = 1;
+ strlcpy(serio->name, "zsms", sizeof(serio->name));
+ }
+ strlcpy(serio->phys,
+ ((up->flags & SUNZILOG_FLAG_CONS_KEYB) ?
+ "zs/serio0" : "zs/serio1"),
+ sizeof(serio->phys));
+
+ serio->write = sunzilog_serio_write;
+ serio->open = sunzilog_serio_open;
+ serio->close = sunzilog_serio_close;
+ serio->dev.parent = up->port.dev;
+
+ serio_register_port(serio);
+}
+#endif
+
+static void __devinit sunzilog_init_hw(struct uart_sunzilog_port *up)
+{
+ struct zilog_channel __iomem *channel;
+ unsigned long flags;
+ int baud, brg;
+
+ channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (ZS_IS_CHANNEL_A(up)) {
+ write_zsreg(channel, R9, FHWRES);
+ ZSDELAY_LONG();
+ (void) read_zsreg(channel, R0);
+ }
+
+ if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
+ SUNZILOG_FLAG_CONS_MOUSE)) {
+ up->curregs[R1] = EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB;
+ up->curregs[R4] = PAR_EVEN | X16CLK | SB1;
+ up->curregs[R3] = RxENAB | Rx8;
+ up->curregs[R5] = TxENAB | Tx8;
+ up->curregs[R6] = 0x00; /* SDLC Address */
+ up->curregs[R7] = 0x7E; /* SDLC Flag */
+ up->curregs[R9] = NV;
+ up->curregs[R7p] = 0x00;
+ sunzilog_init_kbdms(up);
+ /* Only enable interrupts if an ISR handler available */
+ if (up->flags & SUNZILOG_FLAG_ISR_HANDLER)
+ up->curregs[R9] |= MIE;
+ write_zsreg(channel, R9, up->curregs[R9]);
+ } else {
+ /* Normal serial TTY. */
+ up->parity_mask = 0xff;
+ up->curregs[R1] = EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB;
+ up->curregs[R4] = PAR_EVEN | X16CLK | SB1;
+ up->curregs[R3] = RxENAB | Rx8;
+ up->curregs[R5] = TxENAB | Tx8;
+ up->curregs[R6] = 0x00; /* SDLC Address */
+ up->curregs[R7] = 0x7E; /* SDLC Flag */
+ up->curregs[R9] = NV;
+ up->curregs[R10] = NRZ;
+ up->curregs[R11] = TCBR | RCBR;
+ baud = 9600;
+ brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
+ up->curregs[R12] = (brg & 0xff);
+ up->curregs[R13] = (brg >> 8) & 0xff;
+ up->curregs[R14] = BRSRC | BRENAB;
+ up->curregs[R15] = FIFOEN; /* Use FIFO if on ESCC */
+ up->curregs[R7p] = TxFIFO_LVL | RxFIFO_LVL;
+ if (__load_zsregs(channel, up->curregs)) {
+ up->flags |= SUNZILOG_FLAG_ESCC;
+ }
+ /* Only enable interrupts if an ISR handler available */
+ if (up->flags & SUNZILOG_FLAG_ISR_HANDLER)
+ up->curregs[R9] |= MIE;
+ write_zsreg(channel, R9, up->curregs[R9]);
+ }
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+#ifdef CONFIG_SERIO
+ if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
+ SUNZILOG_FLAG_CONS_MOUSE))
+ sunzilog_register_serio(up);
+#endif
+}
+
+static int zilog_irq = -1;
+
+static int __devinit zs_probe(struct platform_device *op)
+{
+ static int kbm_inst, uart_inst;
+ int inst;
+ struct uart_sunzilog_port *up;
+ struct zilog_layout __iomem *rp;
+ int keyboard_mouse = 0;
+ int err;
+
+ if (of_find_property(op->dev.of_node, "keyboard", NULL))
+ keyboard_mouse = 1;
+
+ /* uarts must come before keyboards/mice */
+ if (keyboard_mouse)
+ inst = uart_chip_count + kbm_inst;
+ else
+ inst = uart_inst;
+
+ sunzilog_chip_regs[inst] = of_ioremap(&op->resource[0], 0,
+ sizeof(struct zilog_layout),
+ "zs");
+ if (!sunzilog_chip_regs[inst])
+ return -ENOMEM;
+
+ rp = sunzilog_chip_regs[inst];
+
+ if (zilog_irq == -1)
+ zilog_irq = op->archdata.irqs[0];
+
+ up = &sunzilog_port_table[inst * 2];
+
+ /* Channel A */
+ up[0].port.mapbase = op->resource[0].start + 0x00;
+ up[0].port.membase = (void __iomem *) &rp->channelA;
+ up[0].port.iotype = UPIO_MEM;
+ up[0].port.irq = op->archdata.irqs[0];
+ up[0].port.uartclk = ZS_CLOCK;
+ up[0].port.fifosize = 1;
+ up[0].port.ops = &sunzilog_pops;
+ up[0].port.type = PORT_SUNZILOG;
+ up[0].port.flags = 0;
+ up[0].port.line = (inst * 2) + 0;
+ up[0].port.dev = &op->dev;
+ up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A;
+ if (keyboard_mouse)
+ up[0].flags |= SUNZILOG_FLAG_CONS_KEYB;
+ sunzilog_init_hw(&up[0]);
+
+ /* Channel B */
+ up[1].port.mapbase = op->resource[0].start + 0x04;
+ up[1].port.membase = (void __iomem *) &rp->channelB;
+ up[1].port.iotype = UPIO_MEM;
+ up[1].port.irq = op->archdata.irqs[0];
+ up[1].port.uartclk = ZS_CLOCK;
+ up[1].port.fifosize = 1;
+ up[1].port.ops = &sunzilog_pops;
+ up[1].port.type = PORT_SUNZILOG;
+ up[1].port.flags = 0;
+ up[1].port.line = (inst * 2) + 1;
+ up[1].port.dev = &op->dev;
+ up[1].flags |= 0;
+ if (keyboard_mouse)
+ up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE;
+ sunzilog_init_hw(&up[1]);
+
+ if (!keyboard_mouse) {
+ if (sunserial_console_match(SUNZILOG_CONSOLE(), op->dev.of_node,
+ &sunzilog_reg, up[0].port.line,
+ false))
+ up->flags |= SUNZILOG_FLAG_IS_CONS;
+ err = uart_add_one_port(&sunzilog_reg, &up[0].port);
+ if (err) {
+ of_iounmap(&op->resource[0],
+ rp, sizeof(struct zilog_layout));
+ return err;
+ }
+ if (sunserial_console_match(SUNZILOG_CONSOLE(), op->dev.of_node,
+ &sunzilog_reg, up[1].port.line,
+ false))
+ up->flags |= SUNZILOG_FLAG_IS_CONS;
+ err = uart_add_one_port(&sunzilog_reg, &up[1].port);
+ if (err) {
+ uart_remove_one_port(&sunzilog_reg, &up[0].port);
+ of_iounmap(&op->resource[0],
+ rp, sizeof(struct zilog_layout));
+ return err;
+ }
+ uart_inst++;
+ } else {
+ printk(KERN_INFO "%s: Keyboard at MMIO 0x%llx (irq = %d) "
+ "is a %s\n",
+ dev_name(&op->dev),
+ (unsigned long long) up[0].port.mapbase,
+ op->archdata.irqs[0], sunzilog_type(&up[0].port));
+ printk(KERN_INFO "%s: Mouse at MMIO 0x%llx (irq = %d) "
+ "is a %s\n",
+ dev_name(&op->dev),
+ (unsigned long long) up[1].port.mapbase,
+ op->archdata.irqs[0], sunzilog_type(&up[1].port));
+ kbm_inst++;
+ }
+
+ dev_set_drvdata(&op->dev, &up[0]);
+
+ return 0;
+}
+
+static void __devexit zs_remove_one(struct uart_sunzilog_port *up)
+{
+ if (ZS_IS_KEYB(up) || ZS_IS_MOUSE(up)) {
+#ifdef CONFIG_SERIO
+ serio_unregister_port(&up->serio);
+#endif
+ } else
+ uart_remove_one_port(&sunzilog_reg, &up->port);
+}
+
+static int __devexit zs_remove(struct platform_device *op)
+{
+ struct uart_sunzilog_port *up = dev_get_drvdata(&op->dev);
+ struct zilog_layout __iomem *regs;
+
+ zs_remove_one(&up[0]);
+ zs_remove_one(&up[1]);
+
+ regs = sunzilog_chip_regs[up[0].port.line / 2];
+ of_iounmap(&op->resource[0], regs, sizeof(struct zilog_layout));
+
+ dev_set_drvdata(&op->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id zs_match[] = {
+ {
+ .name = "zs",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, zs_match);
+
+static struct platform_driver zs_driver = {
+ .driver = {
+ .name = "zs",
+ .owner = THIS_MODULE,
+ .of_match_table = zs_match,
+ },
+ .probe = zs_probe,
+ .remove = __devexit_p(zs_remove),
+};
+
+static int __init sunzilog_init(void)
+{
+ struct device_node *dp;
+ int err;
+ int num_keybms = 0;
+ int num_sunzilog = 0;
+
+ for_each_node_by_name(dp, "zs") {
+ num_sunzilog++;
+ if (of_find_property(dp, "keyboard", NULL))
+ num_keybms++;
+ }
+
+ if (num_sunzilog) {
+ err = sunzilog_alloc_tables(num_sunzilog);
+ if (err)
+ goto out;
+
+ uart_chip_count = num_sunzilog - num_keybms;
+
+ err = sunserial_register_minors(&sunzilog_reg,
+ uart_chip_count * 2);
+ if (err)
+ goto out_free_tables;
+ }
+
+ err = platform_driver_register(&zs_driver);
+ if (err)
+ goto out_unregister_uart;
+
+ if (zilog_irq != -1) {
+ struct uart_sunzilog_port *up = sunzilog_irq_chain;
+ err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED,
+ "zs", sunzilog_irq_chain);
+ if (err)
+ goto out_unregister_driver;
+
+ /* Enable Interrupts */
+ while (up) {
+ struct zilog_channel __iomem *channel;
+
+ /* printk (KERN_INFO "Enable IRQ for ZILOG Hardware %p\n", up); */
+ channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+ up->flags |= SUNZILOG_FLAG_ISR_HANDLER;
+ up->curregs[R9] |= MIE;
+ write_zsreg(channel, R9, up->curregs[R9]);
+ up = up->next;
+ }
+ }
+
+out:
+ return err;
+
+out_unregister_driver:
+ platform_driver_unregister(&zs_driver);
+
+out_unregister_uart:
+ if (num_sunzilog) {
+ sunserial_unregister_minors(&sunzilog_reg, num_sunzilog);
+ sunzilog_reg.cons = NULL;
+ }
+
+out_free_tables:
+ sunzilog_free_tables();
+ goto out;
+}
+
+static void __exit sunzilog_exit(void)
+{
+ platform_driver_unregister(&zs_driver);
+
+ if (zilog_irq != -1) {
+ struct uart_sunzilog_port *up = sunzilog_irq_chain;
+
+ /* Disable Interrupts */
+ while (up) {
+ struct zilog_channel __iomem *channel;
+
+ /* printk (KERN_INFO "Disable IRQ for ZILOG Hardware %p\n", up); */
+ channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
+ up->flags &= ~SUNZILOG_FLAG_ISR_HANDLER;
+ up->curregs[R9] &= ~MIE;
+ write_zsreg(channel, R9, up->curregs[R9]);
+ up = up->next;
+ }
+
+ free_irq(zilog_irq, sunzilog_irq_chain);
+ zilog_irq = -1;
+ }
+
+ if (sunzilog_reg.nr) {
+ sunserial_unregister_minors(&sunzilog_reg, sunzilog_reg.nr);
+ sunzilog_free_tables();
+ }
+}
+
+module_init(sunzilog_init);
+module_exit(sunzilog_exit);
+
+MODULE_AUTHOR("David S. Miller");
+MODULE_DESCRIPTION("Sun Zilog serial port driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/sunzilog.h b/drivers/tty/serial/sunzilog.h
new file mode 100644
index 00000000..5dec7b47
--- /dev/null
+++ b/drivers/tty/serial/sunzilog.h
@@ -0,0 +1,289 @@
+#ifndef _SUNZILOG_H
+#define _SUNZILOG_H
+
+struct zilog_channel {
+ volatile unsigned char control;
+ volatile unsigned char __pad1;
+ volatile unsigned char data;
+ volatile unsigned char __pad2;
+};
+
+struct zilog_layout {
+ struct zilog_channel channelB;
+ struct zilog_channel channelA;
+};
+
+#define NUM_ZSREGS 17
+#define R7p 16 /* Written as R7 with P15 bit 0 set */
+
+/* Conversion routines to/from brg time constants from/to bits
+ * per second.
+ */
+#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
+#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
+
+/* The Zilog register set */
+
+#define FLAG 0x7e
+
+/* Write Register 0 */
+#define R0 0 /* Register selects */
+#define R1 1
+#define R2 2
+#define R3 3
+#define R4 4
+#define R5 5
+#define R6 6
+#define R7 7
+#define R8 8
+#define R9 9
+#define R10 10
+#define R11 11
+#define R12 12
+#define R13 13
+#define R14 14
+#define R15 15
+
+#define NULLCODE 0 /* Null Code */
+#define POINT_HIGH 0x8 /* Select upper half of registers */
+#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */
+#define SEND_ABORT 0x18 /* HDLC Abort */
+#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */
+#define RES_Tx_P 0x28 /* Reset TxINT Pending */
+#define ERR_RES 0x30 /* Error Reset */
+#define RES_H_IUS 0x38 /* Reset highest IUS */
+
+#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */
+#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */
+#define RES_EOM_L 0xC0 /* Reset EOM latch */
+
+/* Write Register 1 */
+
+#define EXT_INT_ENAB 0x1 /* Ext Int Enable */
+#define TxINT_ENAB 0x2 /* Tx Int Enable */
+#define PAR_SPEC 0x4 /* Parity is special condition */
+
+#define RxINT_DISAB 0 /* Rx Int Disable */
+#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */
+#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */
+#define INT_ERR_Rx 0x18 /* Int on error only */
+#define RxINT_MASK 0x18
+
+#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */
+#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */
+#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */
+
+/* Write Register #2 (Interrupt Vector) */
+
+/* Write Register 3 */
+
+#define RxENAB 0x1 /* Rx Enable */
+#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */
+#define ADD_SM 0x4 /* Address Search Mode (SDLC) */
+#define RxCRC_ENAB 0x8 /* Rx CRC Enable */
+#define ENT_HM 0x10 /* Enter Hunt Mode */
+#define AUTO_ENAB 0x20 /* Auto Enables */
+#define Rx5 0x0 /* Rx 5 Bits/Character */
+#define Rx7 0x40 /* Rx 7 Bits/Character */
+#define Rx6 0x80 /* Rx 6 Bits/Character */
+#define Rx8 0xc0 /* Rx 8 Bits/Character */
+#define RxN_MASK 0xc0
+
+/* Write Register 4 */
+
+#define PAR_ENAB 0x1 /* Parity Enable */
+#define PAR_EVEN 0x2 /* Parity Even/Odd* */
+
+#define SYNC_ENAB 0 /* Sync Modes Enable */
+#define SB1 0x4 /* 1 stop bit/char */
+#define SB15 0x8 /* 1.5 stop bits/char */
+#define SB2 0xc /* 2 stop bits/char */
+
+#define MONSYNC 0 /* 8 Bit Sync character */
+#define BISYNC 0x10 /* 16 bit sync character */
+#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */
+#define EXTSYNC 0x30 /* External Sync Mode */
+
+#define X1CLK 0x0 /* x1 clock mode */
+#define X16CLK 0x40 /* x16 clock mode */
+#define X32CLK 0x80 /* x32 clock mode */
+#define X64CLK 0xC0 /* x64 clock mode */
+#define XCLK_MASK 0xC0
+
+/* Write Register 5 */
+
+#define TxCRC_ENAB 0x1 /* Tx CRC Enable */
+#define RTS 0x2 /* RTS */
+#define SDLC_CRC 0x4 /* SDLC/CRC-16 */
+#define TxENAB 0x8 /* Tx Enable */
+#define SND_BRK 0x10 /* Send Break */
+#define Tx5 0x0 /* Tx 5 bits (or less)/character */
+#define Tx7 0x20 /* Tx 7 bits/character */
+#define Tx6 0x40 /* Tx 6 bits/character */
+#define Tx8 0x60 /* Tx 8 bits/character */
+#define TxN_MASK 0x60
+#define DTR 0x80 /* DTR */
+
+/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
+
+/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
+
+/* Write Register 7' (ESCC Only) */
+#define AUTO_TxFLAG 1 /* Automatic Tx SDLC Flag */
+#define AUTO_EOM_RST 2 /* Automatic EOM Reset */
+#define AUTOnRTS 4 /* Automatic /RTS pin deactivation */
+#define RxFIFO_LVL 8 /* Receive FIFO interrupt level */
+#define nDTRnREQ 0x10 /* /DTR/REQ timing */
+#define TxFIFO_LVL 0x20 /* Transmit FIFO interrupt level */
+#define EXT_RD_EN 0x40 /* Extended read register enable */
+
+/* Write Register 8 (transmit buffer) */
+
+/* Write Register 9 (Master interrupt control) */
+#define VIS 1 /* Vector Includes Status */
+#define NV 2 /* No Vector */
+#define DLC 4 /* Disable Lower Chain */
+#define MIE 8 /* Master Interrupt Enable */
+#define STATHI 0x10 /* Status high */
+#define SWIACK 0x20 /* Software Interrupt Ack (not on NMOS) */
+#define NORESET 0 /* No reset on write to R9 */
+#define CHRB 0x40 /* Reset channel B */
+#define CHRA 0x80 /* Reset channel A */
+#define FHWRES 0xc0 /* Force hardware reset */
+
+/* Write Register 10 (misc control bits) */
+#define BIT6 1 /* 6 bit/8bit sync */
+#define LOOPMODE 2 /* SDLC Loop mode */
+#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */
+#define MARKIDLE 8 /* Mark/flag on idle */
+#define GAOP 0x10 /* Go active on poll */
+#define NRZ 0 /* NRZ mode */
+#define NRZI 0x20 /* NRZI mode */
+#define FM1 0x40 /* FM1 (transition = 1) */
+#define FM0 0x60 /* FM0 (transition = 0) */
+#define CRCPS 0x80 /* CRC Preset I/O */
+
+/* Write Register 11 (Clock Mode control) */
+#define TRxCXT 0 /* TRxC = Xtal output */
+#define TRxCTC 1 /* TRxC = Transmit clock */
+#define TRxCBR 2 /* TRxC = BR Generator Output */
+#define TRxCDP 3 /* TRxC = DPLL output */
+#define TRxCOI 4 /* TRxC O/I */
+#define TCRTxCP 0 /* Transmit clock = RTxC pin */
+#define TCTRxCP 8 /* Transmit clock = TRxC pin */
+#define TCBR 0x10 /* Transmit clock = BR Generator output */
+#define TCDPLL 0x18 /* Transmit clock = DPLL output */
+#define RCRTxCP 0 /* Receive clock = RTxC pin */
+#define RCTRxCP 0x20 /* Receive clock = TRxC pin */
+#define RCBR 0x40 /* Receive clock = BR Generator output */
+#define RCDPLL 0x60 /* Receive clock = DPLL output */
+#define RTxCX 0x80 /* RTxC Xtal/No Xtal */
+
+/* Write Register 12 (lower byte of baud rate generator time constant) */
+
+/* Write Register 13 (upper byte of baud rate generator time constant) */
+
+/* Write Register 14 (Misc control bits) */
+#define BRENAB 1 /* Baud rate generator enable */
+#define BRSRC 2 /* Baud rate generator source */
+#define DTRREQ 4 /* DTR/Request function */
+#define AUTOECHO 8 /* Auto Echo */
+#define LOOPBAK 0x10 /* Local loopback */
+#define SEARCH 0x20 /* Enter search mode */
+#define RMC 0x40 /* Reset missing clock */
+#define DISDPLL 0x60 /* Disable DPLL */
+#define SSBR 0x80 /* Set DPLL source = BR generator */
+#define SSRTxC 0xa0 /* Set DPLL source = RTxC */
+#define SFMM 0xc0 /* Set FM mode */
+#define SNRZI 0xe0 /* Set NRZI mode */
+
+/* Write Register 15 (external/status interrupt control) */
+#define WR7pEN 1 /* WR7' Enable (ESCC only) */
+#define ZCIE 2 /* Zero count IE */
+#define FIFOEN 4 /* FIFO Enable (ESCC only) */
+#define DCDIE 8 /* DCD IE */
+#define SYNCIE 0x10 /* Sync/hunt IE */
+#define CTSIE 0x20 /* CTS IE */
+#define TxUIE 0x40 /* Tx Underrun/EOM IE */
+#define BRKIE 0x80 /* Break/Abort IE */
+
+
+/* Read Register 0 */
+#define Rx_CH_AV 0x1 /* Rx Character Available */
+#define ZCOUNT 0x2 /* Zero count */
+#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */
+#define DCD 0x8 /* DCD */
+#define SYNC 0x10 /* Sync/hunt */
+#define CTS 0x20 /* CTS */
+#define TxEOM 0x40 /* Tx underrun */
+#define BRK_ABRT 0x80 /* Break/Abort */
+
+/* Read Register 1 */
+#define ALL_SNT 0x1 /* All sent */
+/* Residue Data for 8 Rx bits/char programmed */
+#define RES3 0x8 /* 0/3 */
+#define RES4 0x4 /* 0/4 */
+#define RES5 0xc /* 0/5 */
+#define RES6 0x2 /* 0/6 */
+#define RES7 0xa /* 0/7 */
+#define RES8 0x6 /* 0/8 */
+#define RES18 0xe /* 1/8 */
+#define RES28 0x0 /* 2/8 */
+/* Special Rx Condition Interrupts */
+#define PAR_ERR 0x10 /* Parity error */
+#define Rx_OVR 0x20 /* Rx Overrun Error */
+#define CRC_ERR 0x40 /* CRC/Framing Error */
+#define END_FR 0x80 /* End of Frame (SDLC) */
+
+/* Read Register 2 (channel b only) - Interrupt vector */
+#define CHB_Tx_EMPTY 0x00
+#define CHB_EXT_STAT 0x02
+#define CHB_Rx_AVAIL 0x04
+#define CHB_SPECIAL 0x06
+#define CHA_Tx_EMPTY 0x08
+#define CHA_EXT_STAT 0x0a
+#define CHA_Rx_AVAIL 0x0c
+#define CHA_SPECIAL 0x0e
+#define STATUS_MASK 0x0e
+
+/* Read Register 3 (interrupt pending register) ch a only */
+#define CHBEXT 0x1 /* Channel B Ext/Stat IP */
+#define CHBTxIP 0x2 /* Channel B Tx IP */
+#define CHBRxIP 0x4 /* Channel B Rx IP */
+#define CHAEXT 0x8 /* Channel A Ext/Stat IP */
+#define CHATxIP 0x10 /* Channel A Tx IP */
+#define CHARxIP 0x20 /* Channel A Rx IP */
+
+/* Read Register 6 (LSB frame byte count [Not on NMOS]) */
+
+/* Read Register 7 (MSB frame byte count and FIFO status [Not on NMOS]) */
+
+/* Read Register 8 (receive data register) */
+
+/* Read Register 10 (misc status bits) */
+#define ONLOOP 2 /* On loop */
+#define LOOPSEND 0x10 /* Loop sending */
+#define CLK2MIS 0x40 /* Two clocks missing */
+#define CLK1MIS 0x80 /* One clock missing */
+
+/* Read Register 12 (lower byte of baud rate generator constant) */
+
+/* Read Register 13 (upper byte of baud rate generator constant) */
+
+/* Read Register 15 (value of WR 15) */
+
+/* Misc macros */
+#define ZS_CLEARERR(channel) do { sbus_writeb(ERR_RES, &channel->control); \
+ udelay(5); } while(0)
+
+#define ZS_CLEARSTAT(channel) do { sbus_writeb(RES_EXT_INT, &channel->control); \
+ udelay(5); } while(0)
+
+#define ZS_CLEARFIFO(channel) do { sbus_readb(&channel->data); \
+ udelay(2); \
+ sbus_readb(&channel->data); \
+ udelay(2); \
+ sbus_readb(&channel->data); \
+ udelay(2); } while(0)
+
+#endif /* _SUNZILOG_H */
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
new file mode 100644
index 00000000..1f36b7eb
--- /dev/null
+++ b/drivers/tty/serial/timbuart.c
@@ -0,0 +1,531 @@
+/*
+ * timbuart.c timberdale FPGA UART driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA UART
+ */
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/serial_core.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#include "timbuart.h"
+
+struct timbuart_port {
+ struct uart_port port;
+ struct tasklet_struct tasklet;
+ int usedma;
+ u32 last_ier;
+ struct platform_device *dev;
+};
+
+static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800,
+ 921600, 1843200, 3250000};
+
+static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier);
+
+static irqreturn_t timbuart_handleinterrupt(int irq, void *devid);
+
+static void timbuart_stop_rx(struct uart_port *port)
+{
+ /* spin lock held by upper layer, disable all RX interrupts */
+ u32 ier = ioread32(port->membase + TIMBUART_IER) & ~RXFLAGS;
+ iowrite32(ier, port->membase + TIMBUART_IER);
+}
+
+static void timbuart_stop_tx(struct uart_port *port)
+{
+ /* spinlock held by upper layer, disable TX interrupt */
+ u32 ier = ioread32(port->membase + TIMBUART_IER) & ~TXBAE;
+ iowrite32(ier, port->membase + TIMBUART_IER);
+}
+
+static void timbuart_start_tx(struct uart_port *port)
+{
+ struct timbuart_port *uart =
+ container_of(port, struct timbuart_port, port);
+
+ /* do not transfer anything here -> fire off the tasklet */
+ tasklet_schedule(&uart->tasklet);
+}
+
+static unsigned int timbuart_tx_empty(struct uart_port *port)
+{
+ u32 isr = ioread32(port->membase + TIMBUART_ISR);
+
+ return (isr & TXBE) ? TIOCSER_TEMT : 0;
+}
+
+static void timbuart_flush_buffer(struct uart_port *port)
+{
+ if (!timbuart_tx_empty(port)) {
+ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) |
+ TIMBUART_CTRL_FLSHTX;
+
+ iowrite8(ctl, port->membase + TIMBUART_CTRL);
+ iowrite32(TXBF, port->membase + TIMBUART_ISR);
+ }
+}
+
+static void timbuart_rx_chars(struct uart_port *port)
+{
+ struct tty_struct *tty = port->state->port.tty;
+
+ while (ioread32(port->membase + TIMBUART_ISR) & RXDP) {
+ u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
+ port->icount.rx++;
+ tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ }
+
+ spin_unlock(&port->lock);
+ tty_flip_buffer_push(port->state->port.tty);
+ spin_lock(&port->lock);
+
+ dev_dbg(port->dev, "%s - total read %d bytes\n",
+ __func__, port->icount.rx);
+}
+
+static void timbuart_tx_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+
+ while (!(ioread32(port->membase + TIMBUART_ISR) & TXBF) &&
+ !uart_circ_empty(xmit)) {
+ iowrite8(xmit->buf[xmit->tail],
+ port->membase + TIMBUART_TXFIFO);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+
+ dev_dbg(port->dev,
+ "%s - total written %d bytes, CTL: %x, RTS: %x, baud: %x\n",
+ __func__,
+ port->icount.tx,
+ ioread8(port->membase + TIMBUART_CTRL),
+ port->mctrl & TIOCM_RTS,
+ ioread8(port->membase + TIMBUART_BAUDRATE));
+}
+
+static void timbuart_handle_tx_port(struct uart_port *port, u32 isr, u32 *ier)
+{
+ struct timbuart_port *uart =
+ container_of(port, struct timbuart_port, port);
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+ return;
+
+ if (port->x_char)
+ return;
+
+ if (isr & TXFLAGS) {
+ timbuart_tx_chars(port);
+ /* clear all TX interrupts */
+ iowrite32(TXFLAGS, port->membase + TIMBUART_ISR);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+ } else
+ /* Re-enable any tx interrupt */
+ *ier |= uart->last_ier & TXFLAGS;
+
+ /* enable interrupts if there are chars in the transmit buffer,
+ * Or if we delivered some bytes and want the almost empty interrupt
+ * we wake up the upper layer later when we got the interrupt
+ * to give it some time to go out...
+ */
+ if (!uart_circ_empty(xmit))
+ *ier |= TXBAE;
+
+ dev_dbg(port->dev, "%s - leaving\n", __func__);
+}
+
+void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier)
+{
+ if (isr & RXFLAGS) {
+ /* Some RX status is set */
+ if (isr & RXBF) {
+ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) |
+ TIMBUART_CTRL_FLSHRX;
+ iowrite8(ctl, port->membase + TIMBUART_CTRL);
+ port->icount.overrun++;
+ } else if (isr & (RXDP))
+ timbuart_rx_chars(port);
+
+ /* ack all RX interrupts */
+ iowrite32(RXFLAGS, port->membase + TIMBUART_ISR);
+ }
+
+ /* always have the RX interrupts enabled */
+ *ier |= RXBAF | RXBF | RXTT;
+
+ dev_dbg(port->dev, "%s - leaving\n", __func__);
+}
+
+void timbuart_tasklet(unsigned long arg)
+{
+ struct timbuart_port *uart = (struct timbuart_port *)arg;
+ u32 isr, ier = 0;
+
+ spin_lock(&uart->port.lock);
+
+ isr = ioread32(uart->port.membase + TIMBUART_ISR);
+ dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
+
+ if (!uart->usedma)
+ timbuart_handle_tx_port(&uart->port, isr, &ier);
+
+ timbuart_mctrl_check(&uart->port, isr, &ier);
+
+ if (!uart->usedma)
+ timbuart_handle_rx_port(&uart->port, isr, &ier);
+
+ iowrite32(ier, uart->port.membase + TIMBUART_IER);
+
+ spin_unlock(&uart->port.lock);
+ dev_dbg(uart->port.dev, "%s leaving\n", __func__);
+}
+
+static unsigned int timbuart_get_mctrl(struct uart_port *port)
+{
+ u8 cts = ioread8(port->membase + TIMBUART_CTRL);
+ dev_dbg(port->dev, "%s - cts %x\n", __func__, cts);
+
+ if (cts & TIMBUART_CTRL_CTS)
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+ else
+ return TIOCM_DSR | TIOCM_CAR;
+}
+
+static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ dev_dbg(port->dev, "%s - %x\n", __func__, mctrl);
+
+ if (mctrl & TIOCM_RTS)
+ iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
+ else
+ iowrite8(0, port->membase + TIMBUART_CTRL);
+}
+
+static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier)
+{
+ unsigned int cts;
+
+ if (isr & CTS_DELTA) {
+ /* ack */
+ iowrite32(CTS_DELTA, port->membase + TIMBUART_ISR);
+ cts = timbuart_get_mctrl(port);
+ uart_handle_cts_change(port, cts & TIOCM_CTS);
+ wake_up_interruptible(&port->state->port.delta_msr_wait);
+ }
+
+ *ier |= CTS_DELTA;
+}
+
+static void timbuart_enable_ms(struct uart_port *port)
+{
+ /* N/A */
+}
+
+static void timbuart_break_ctl(struct uart_port *port, int ctl)
+{
+ /* N/A */
+}
+
+static int timbuart_startup(struct uart_port *port)
+{
+ struct timbuart_port *uart =
+ container_of(port, struct timbuart_port, port);
+
+ dev_dbg(port->dev, "%s\n", __func__);
+
+ iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL);
+ iowrite32(0x1ff, port->membase + TIMBUART_ISR);
+ /* Enable all but TX interrupts */
+ iowrite32(RXBAF | RXBF | RXTT | CTS_DELTA,
+ port->membase + TIMBUART_IER);
+
+ return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED,
+ "timb-uart", uart);
+}
+
+static void timbuart_shutdown(struct uart_port *port)
+{
+ struct timbuart_port *uart =
+ container_of(port, struct timbuart_port, port);
+ dev_dbg(port->dev, "%s\n", __func__);
+ free_irq(port->irq, uart);
+ iowrite32(0, port->membase + TIMBUART_IER);
+}
+
+static int get_bindex(int baud)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(baudrates); i++)
+ if (baud <= baudrates[i])
+ return i;
+
+ return -1;
+}
+
+static void timbuart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud;
+ short bindex;
+ unsigned long flags;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+ bindex = get_bindex(baud);
+ dev_dbg(port->dev, "%s - bindex %d\n", __func__, bindex);
+
+ if (bindex < 0)
+ bindex = 0;
+ baud = baudrates[bindex];
+
+ /* The serial layer calls into this once with old = NULL when setting
+ up initially */
+ if (old)
+ tty_termios_copy_hw(termios, old);
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ spin_lock_irqsave(&port->lock, flags);
+ iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *timbuart_type(struct uart_port *port)
+{
+ return port->type == PORT_UNKNOWN ? "timbuart" : NULL;
+}
+
+/* We do not request/release mappings of the registers here,
+ * currently it's done in the proble function.
+ */
+static void timbuart_release_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ int size =
+ resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
+
+ if (port->flags & UPF_IOREMAP) {
+ iounmap(port->membase);
+ port->membase = NULL;
+ }
+
+ release_mem_region(port->mapbase, size);
+}
+
+static int timbuart_request_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ int size =
+ resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
+
+ if (!request_mem_region(port->mapbase, size, "timb-uart"))
+ return -EBUSY;
+
+ if (port->flags & UPF_IOREMAP) {
+ port->membase = ioremap(port->mapbase, size);
+ if (port->membase == NULL) {
+ release_mem_region(port->mapbase, size);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t timbuart_handleinterrupt(int irq, void *devid)
+{
+ struct timbuart_port *uart = (struct timbuart_port *)devid;
+
+ if (ioread8(uart->port.membase + TIMBUART_IPR)) {
+ uart->last_ier = ioread32(uart->port.membase + TIMBUART_IER);
+
+ /* disable interrupts, the tasklet enables them again */
+ iowrite32(0, uart->port.membase + TIMBUART_IER);
+
+ /* fire off bottom half */
+ tasklet_schedule(&uart->tasklet);
+
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void timbuart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_TIMBUART;
+ timbuart_request_port(port);
+ }
+}
+
+static int timbuart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ /* we don't want the core code to modify any port params */
+ return -EINVAL;
+}
+
+static struct uart_ops timbuart_ops = {
+ .tx_empty = timbuart_tx_empty,
+ .set_mctrl = timbuart_set_mctrl,
+ .get_mctrl = timbuart_get_mctrl,
+ .stop_tx = timbuart_stop_tx,
+ .start_tx = timbuart_start_tx,
+ .flush_buffer = timbuart_flush_buffer,
+ .stop_rx = timbuart_stop_rx,
+ .enable_ms = timbuart_enable_ms,
+ .break_ctl = timbuart_break_ctl,
+ .startup = timbuart_startup,
+ .shutdown = timbuart_shutdown,
+ .set_termios = timbuart_set_termios,
+ .type = timbuart_type,
+ .release_port = timbuart_release_port,
+ .request_port = timbuart_request_port,
+ .config_port = timbuart_config_port,
+ .verify_port = timbuart_verify_port
+};
+
+static struct uart_driver timbuart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "timberdale_uart",
+ .dev_name = "ttyTU",
+ .major = TIMBUART_MAJOR,
+ .minor = TIMBUART_MINOR,
+ .nr = 1
+};
+
+static int __devinit timbuart_probe(struct platform_device *dev)
+{
+ int err, irq;
+ struct timbuart_port *uart;
+ struct resource *iomem;
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+
+ uart = kzalloc(sizeof(*uart), GFP_KERNEL);
+ if (!uart) {
+ err = -EINVAL;
+ goto err_mem;
+ }
+
+ uart->usedma = 0;
+
+ uart->port.uartclk = 3250000 * 16;
+ uart->port.fifosize = TIMBUART_FIFO_SIZE;
+ uart->port.regshift = 2;
+ uart->port.iotype = UPIO_MEM;
+ uart->port.ops = &timbuart_ops;
+ uart->port.irq = 0;
+ uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
+ uart->port.line = 0;
+ uart->port.dev = &dev->dev;
+
+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ err = -ENOMEM;
+ goto err_register;
+ }
+ uart->port.mapbase = iomem->start;
+ uart->port.membase = NULL;
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ err = -EINVAL;
+ goto err_register;
+ }
+ uart->port.irq = irq;
+
+ tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
+
+ err = uart_register_driver(&timbuart_driver);
+ if (err)
+ goto err_register;
+
+ err = uart_add_one_port(&timbuart_driver, &uart->port);
+ if (err)
+ goto err_add_port;
+
+ platform_set_drvdata(dev, uart);
+
+ return 0;
+
+err_add_port:
+ uart_unregister_driver(&timbuart_driver);
+err_register:
+ kfree(uart);
+err_mem:
+ printk(KERN_ERR "timberdale: Failed to register Timberdale UART: %d\n",
+ err);
+
+ return err;
+}
+
+static int __devexit timbuart_remove(struct platform_device *dev)
+{
+ struct timbuart_port *uart = platform_get_drvdata(dev);
+
+ tasklet_kill(&uart->tasklet);
+ uart_remove_one_port(&timbuart_driver, &uart->port);
+ uart_unregister_driver(&timbuart_driver);
+ kfree(uart);
+
+ return 0;
+}
+
+static struct platform_driver timbuart_platform_driver = {
+ .driver = {
+ .name = "timb-uart",
+ .owner = THIS_MODULE,
+ },
+ .probe = timbuart_probe,
+ .remove = __devexit_p(timbuart_remove),
+};
+
+/*--------------------------------------------------------------------------*/
+
+static int __init timbuart_init(void)
+{
+ return platform_driver_register(&timbuart_platform_driver);
+}
+
+static void __exit timbuart_exit(void)
+{
+ platform_driver_unregister(&timbuart_platform_driver);
+}
+
+module_init(timbuart_init);
+module_exit(timbuart_exit);
+
+MODULE_DESCRIPTION("Timberdale UART driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:timb-uart");
+
diff --git a/drivers/tty/serial/timbuart.h b/drivers/tty/serial/timbuart.h
new file mode 100644
index 00000000..7e566766
--- /dev/null
+++ b/drivers/tty/serial/timbuart.h
@@ -0,0 +1,58 @@
+/*
+ * timbuart.c timberdale FPGA GPIO driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA UART
+ */
+
+#ifndef _TIMBUART_H
+#define _TIMBUART_H
+
+#define TIMBUART_FIFO_SIZE 2048
+
+#define TIMBUART_RXFIFO 0x08
+#define TIMBUART_TXFIFO 0x0c
+#define TIMBUART_IER 0x10
+#define TIMBUART_IPR 0x14
+#define TIMBUART_ISR 0x18
+#define TIMBUART_CTRL 0x1c
+#define TIMBUART_BAUDRATE 0x20
+
+#define TIMBUART_CTRL_RTS 0x01
+#define TIMBUART_CTRL_CTS 0x02
+#define TIMBUART_CTRL_FLSHTX 0x40
+#define TIMBUART_CTRL_FLSHRX 0x80
+
+#define TXBF 0x01
+#define TXBAE 0x02
+#define CTS_DELTA 0x04
+#define RXDP 0x08
+#define RXBAF 0x10
+#define RXBF 0x20
+#define RXTT 0x40
+#define RXBNAE 0x80
+#define TXBE 0x100
+
+#define RXFLAGS (RXDP | RXBAF | RXBF | RXTT | RXBNAE)
+#define TXFLAGS (TXBF | TXBAE)
+
+#define TIMBUART_MAJOR 204
+#define TIMBUART_MINOR 192
+
+#endif /* _TIMBUART_H */
+
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
new file mode 100644
index 00000000..8af1ed83
--- /dev/null
+++ b/drivers/tty/serial/uartlite.c
@@ -0,0 +1,654 @@
+/*
+ * uartlite.c: Serial driver for Xilinx uartlite serial controller
+ *
+ * Copyright (C) 2006 Peter Korsgaard <jacmet@sunsite.dk>
+ * Copyright (C) 2007 Secret Lab Technologies Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#define ULITE_NAME "ttyUL"
+#define ULITE_MAJOR 204
+#define ULITE_MINOR 187
+#define ULITE_NR_UARTS 4
+
+/* ---------------------------------------------------------------------
+ * Register definitions
+ *
+ * For register details see datasheet:
+ * http://www.xilinx.com/support/documentation/ip_documentation/opb_uartlite.pdf
+ */
+
+#define ULITE_RX 0x00
+#define ULITE_TX 0x04
+#define ULITE_STATUS 0x08
+#define ULITE_CONTROL 0x0c
+
+#define ULITE_REGION 16
+
+#define ULITE_STATUS_RXVALID 0x01
+#define ULITE_STATUS_RXFULL 0x02
+#define ULITE_STATUS_TXEMPTY 0x04
+#define ULITE_STATUS_TXFULL 0x08
+#define ULITE_STATUS_IE 0x10
+#define ULITE_STATUS_OVERRUN 0x20
+#define ULITE_STATUS_FRAME 0x40
+#define ULITE_STATUS_PARITY 0x80
+
+#define ULITE_CONTROL_RST_TX 0x01
+#define ULITE_CONTROL_RST_RX 0x02
+#define ULITE_CONTROL_IE 0x10
+
+
+static struct uart_port ulite_ports[ULITE_NR_UARTS];
+
+/* ---------------------------------------------------------------------
+ * Core UART driver operations
+ */
+
+static int ulite_receive(struct uart_port *port, int stat)
+{
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned char ch = 0;
+ char flag = TTY_NORMAL;
+
+ if ((stat & (ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
+ | ULITE_STATUS_FRAME)) == 0)
+ return 0;
+
+ /* stats */
+ if (stat & ULITE_STATUS_RXVALID) {
+ port->icount.rx++;
+ ch = ioread32be(port->membase + ULITE_RX);
+
+ if (stat & ULITE_STATUS_PARITY)
+ port->icount.parity++;
+ }
+
+ if (stat & ULITE_STATUS_OVERRUN)
+ port->icount.overrun++;
+
+ if (stat & ULITE_STATUS_FRAME)
+ port->icount.frame++;
+
+
+ /* drop byte with parity error if IGNPAR specificed */
+ if (stat & port->ignore_status_mask & ULITE_STATUS_PARITY)
+ stat &= ~ULITE_STATUS_RXVALID;
+
+ stat &= port->read_status_mask;
+
+ if (stat & ULITE_STATUS_PARITY)
+ flag = TTY_PARITY;
+
+
+ stat &= ~port->ignore_status_mask;
+
+ if (stat & ULITE_STATUS_RXVALID)
+ tty_insert_flip_char(tty, ch, flag);
+
+ if (stat & ULITE_STATUS_FRAME)
+ tty_insert_flip_char(tty, 0, TTY_FRAME);
+
+ if (stat & ULITE_STATUS_OVERRUN)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+
+ return 1;
+}
+
+static int ulite_transmit(struct uart_port *port, int stat)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (stat & ULITE_STATUS_TXFULL)
+ return 0;
+
+ if (port->x_char) {
+ iowrite32be(port->x_char, port->membase + ULITE_TX);
+ port->x_char = 0;
+ port->icount.tx++;
+ return 1;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+ return 0;
+
+ iowrite32be(xmit->buf[xmit->tail], port->membase + ULITE_TX);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
+ port->icount.tx++;
+
+ /* wake up */
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ return 1;
+}
+
+static irqreturn_t ulite_isr(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ int busy, n = 0;
+
+ do {
+ int stat = ioread32be(port->membase + ULITE_STATUS);
+ busy = ulite_receive(port, stat);
+ busy |= ulite_transmit(port, stat);
+ n++;
+ } while (busy);
+
+ /* work done? */
+ if (n > 1) {
+ tty_flip_buffer_push(port->state->port.tty);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
+}
+
+static unsigned int ulite_tx_empty(struct uart_port *port)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(&port->lock, flags);
+ ret = ioread32be(port->membase + ULITE_STATUS);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int ulite_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+}
+
+static void ulite_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /* N/A */
+}
+
+static void ulite_stop_tx(struct uart_port *port)
+{
+ /* N/A */
+}
+
+static void ulite_start_tx(struct uart_port *port)
+{
+ ulite_transmit(port, ioread32be(port->membase + ULITE_STATUS));
+}
+
+static void ulite_stop_rx(struct uart_port *port)
+{
+ /* don't forward any more data (like !CREAD) */
+ port->ignore_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY
+ | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
+}
+
+static void ulite_enable_ms(struct uart_port *port)
+{
+ /* N/A */
+}
+
+static void ulite_break_ctl(struct uart_port *port, int ctl)
+{
+ /* N/A */
+}
+
+static int ulite_startup(struct uart_port *port)
+{
+ int ret;
+
+ ret = request_irq(port->irq, ulite_isr,
+ IRQF_SHARED | IRQF_SAMPLE_RANDOM, "uartlite", port);
+ if (ret)
+ return ret;
+
+ iowrite32be(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
+ port->membase + ULITE_CONTROL);
+ iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+
+ return 0;
+}
+
+static void ulite_shutdown(struct uart_port *port)
+{
+ iowrite32be(0, port->membase + ULITE_CONTROL);
+ ioread32be(port->membase + ULITE_CONTROL); /* dummy */
+ free_irq(port->irq, port);
+}
+
+static void ulite_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int baud;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
+ | ULITE_STATUS_TXFULL;
+
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |=
+ ULITE_STATUS_PARITY | ULITE_STATUS_FRAME;
+
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= ULITE_STATUS_PARITY
+ | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
+
+ /* ignore all characters if CREAD is not set */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |=
+ ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY
+ | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
+
+ /* update timeout */
+ baud = uart_get_baud_rate(port, termios, old, 0, 460800);
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *ulite_type(struct uart_port *port)
+{
+ return port->type == PORT_UARTLITE ? "uartlite" : NULL;
+}
+
+static void ulite_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, ULITE_REGION);
+ iounmap(port->membase);
+ port->membase = NULL;
+}
+
+static int ulite_request_port(struct uart_port *port)
+{
+ pr_debug("ulite console: port=%p; port->mapbase=%llx\n",
+ port, (unsigned long long) port->mapbase);
+
+ if (!request_mem_region(port->mapbase, ULITE_REGION, "uartlite")) {
+ dev_err(port->dev, "Memory region busy\n");
+ return -EBUSY;
+ }
+
+ port->membase = ioremap(port->mapbase, ULITE_REGION);
+ if (!port->membase) {
+ dev_err(port->dev, "Unable to map registers\n");
+ release_mem_region(port->mapbase, ULITE_REGION);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void ulite_config_port(struct uart_port *port, int flags)
+{
+ if (!ulite_request_port(port))
+ port->type = PORT_UARTLITE;
+}
+
+static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ /* we don't want the core code to modify any port params */
+ return -EINVAL;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int ulite_get_poll_char(struct uart_port *port)
+{
+ if (!(ioread32be(port->membase + ULITE_STATUS)
+ & ULITE_STATUS_RXVALID))
+ return NO_POLL_CHAR;
+
+ return ioread32be(port->membase + ULITE_RX);
+}
+
+static void ulite_put_poll_char(struct uart_port *port, unsigned char ch)
+{
+ while (ioread32be(port->membase + ULITE_STATUS) & ULITE_STATUS_TXFULL)
+ cpu_relax();
+
+ /* write char to device */
+ iowrite32be(ch, port->membase + ULITE_TX);
+}
+#endif
+
+static struct uart_ops ulite_ops = {
+ .tx_empty = ulite_tx_empty,
+ .set_mctrl = ulite_set_mctrl,
+ .get_mctrl = ulite_get_mctrl,
+ .stop_tx = ulite_stop_tx,
+ .start_tx = ulite_start_tx,
+ .stop_rx = ulite_stop_rx,
+ .enable_ms = ulite_enable_ms,
+ .break_ctl = ulite_break_ctl,
+ .startup = ulite_startup,
+ .shutdown = ulite_shutdown,
+ .set_termios = ulite_set_termios,
+ .type = ulite_type,
+ .release_port = ulite_release_port,
+ .request_port = ulite_request_port,
+ .config_port = ulite_config_port,
+ .verify_port = ulite_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = ulite_get_poll_char,
+ .poll_put_char = ulite_put_poll_char,
+#endif
+};
+
+/* ---------------------------------------------------------------------
+ * Console driver operations
+ */
+
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+static void ulite_console_wait_tx(struct uart_port *port)
+{
+ int i;
+ u8 val;
+
+ /* Spin waiting for TX fifo to have space available */
+ for (i = 0; i < 100000; i++) {
+ val = ioread32be(port->membase + ULITE_STATUS);
+ if ((val & ULITE_STATUS_TXFULL) == 0)
+ break;
+ cpu_relax();
+ }
+}
+
+static void ulite_console_putchar(struct uart_port *port, int ch)
+{
+ ulite_console_wait_tx(port);
+ iowrite32be(ch, port->membase + ULITE_TX);
+}
+
+static void ulite_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port = &ulite_ports[co->index];
+ unsigned long flags;
+ unsigned int ier;
+ int locked = 1;
+
+ if (oops_in_progress) {
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ } else
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* save and disable interrupt */
+ ier = ioread32be(port->membase + ULITE_STATUS) & ULITE_STATUS_IE;
+ iowrite32be(0, port->membase + ULITE_CONTROL);
+
+ uart_console_write(port, s, count, ulite_console_putchar);
+
+ ulite_console_wait_tx(port);
+
+ /* restore interrupt state */
+ if (ier)
+ iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+
+ if (locked)
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int __devinit ulite_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index < 0 || co->index >= ULITE_NR_UARTS)
+ return -EINVAL;
+
+ port = &ulite_ports[co->index];
+
+ /* Has the device been initialized yet? */
+ if (!port->mapbase) {
+ pr_debug("console on ttyUL%i not present\n", co->index);
+ return -ENODEV;
+ }
+
+ /* not initialized yet? */
+ if (!port->membase) {
+ if (ulite_request_port(port))
+ return -ENODEV;
+ }
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver ulite_uart_driver;
+
+static struct console ulite_console = {
+ .name = ULITE_NAME,
+ .write = ulite_console_write,
+ .device = uart_console_device,
+ .setup = ulite_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1, /* Specified on the cmdline (e.g. console=ttyUL0 ) */
+ .data = &ulite_uart_driver,
+};
+
+static int __init ulite_console_init(void)
+{
+ register_console(&ulite_console);
+ return 0;
+}
+
+console_initcall(ulite_console_init);
+
+#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
+
+static struct uart_driver ulite_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "uartlite",
+ .dev_name = ULITE_NAME,
+ .major = ULITE_MAJOR,
+ .minor = ULITE_MINOR,
+ .nr = ULITE_NR_UARTS,
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+ .cons = &ulite_console,
+#endif
+};
+
+/* ---------------------------------------------------------------------
+ * Port assignment functions (mapping devices to uart_port structures)
+ */
+
+/** ulite_assign: register a uartlite device with the driver
+ *
+ * @dev: pointer to device structure
+ * @id: requested id number. Pass -1 for automatic port assignment
+ * @base: base address of uartlite registers
+ * @irq: irq number for uartlite
+ *
+ * Returns: 0 on success, <0 otherwise
+ */
+static int __devinit ulite_assign(struct device *dev, int id, u32 base, int irq)
+{
+ struct uart_port *port;
+ int rc;
+
+ /* if id = -1; then scan for a free id and use that */
+ if (id < 0) {
+ for (id = 0; id < ULITE_NR_UARTS; id++)
+ if (ulite_ports[id].mapbase == 0)
+ break;
+ }
+ if (id < 0 || id >= ULITE_NR_UARTS) {
+ dev_err(dev, "%s%i too large\n", ULITE_NAME, id);
+ return -EINVAL;
+ }
+
+ if ((ulite_ports[id].mapbase) && (ulite_ports[id].mapbase != base)) {
+ dev_err(dev, "cannot assign to %s%i; it is already in use\n",
+ ULITE_NAME, id);
+ return -EBUSY;
+ }
+
+ port = &ulite_ports[id];
+
+ spin_lock_init(&port->lock);
+ port->fifosize = 16;
+ port->regshift = 2;
+ port->iotype = UPIO_MEM;
+ port->iobase = 1; /* mark port in use */
+ port->mapbase = base;
+ port->membase = NULL;
+ port->ops = &ulite_ops;
+ port->irq = irq;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->dev = dev;
+ port->type = PORT_UNKNOWN;
+ port->line = id;
+
+ dev_set_drvdata(dev, port);
+
+ /* Register the port */
+ rc = uart_add_one_port(&ulite_uart_driver, port);
+ if (rc) {
+ dev_err(dev, "uart_add_one_port() failed; err=%i\n", rc);
+ port->mapbase = 0;
+ dev_set_drvdata(dev, NULL);
+ return rc;
+ }
+
+ return 0;
+}
+
+/** ulite_release: register a uartlite device with the driver
+ *
+ * @dev: pointer to device structure
+ */
+static int __devexit ulite_release(struct device *dev)
+{
+ struct uart_port *port = dev_get_drvdata(dev);
+ int rc = 0;
+
+ if (port) {
+ rc = uart_remove_one_port(&ulite_uart_driver, port);
+ dev_set_drvdata(dev, NULL);
+ port->mapbase = 0;
+ }
+
+ return rc;
+}
+
+/* ---------------------------------------------------------------------
+ * Platform bus binding
+ */
+
+#if defined(CONFIG_OF)
+/* Match table for of_platform binding */
+static struct of_device_id ulite_of_match[] __devinitdata = {
+ { .compatible = "xlnx,opb-uartlite-1.00.b", },
+ { .compatible = "xlnx,xps-uartlite-1.00.a", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ulite_of_match);
+#else /* CONFIG_OF */
+#define ulite_of_match NULL
+#endif /* CONFIG_OF */
+
+static int __devinit ulite_probe(struct platform_device *pdev)
+{
+ struct resource *res, *res2;
+ int id = pdev->id;
+#ifdef CONFIG_OF
+ const __be32 *prop;
+
+ prop = of_get_property(pdev->dev.of_node, "port-number", NULL);
+ if (prop)
+ id = be32_to_cpup(prop);
+#endif
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res2)
+ return -ENODEV;
+
+ return ulite_assign(&pdev->dev, id, res->start, res2->start);
+}
+
+static int __devexit ulite_remove(struct platform_device *pdev)
+{
+ return ulite_release(&pdev->dev);
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:uartlite");
+
+static struct platform_driver ulite_platform_driver = {
+ .probe = ulite_probe,
+ .remove = __devexit_p(ulite_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "uartlite",
+ .of_match_table = ulite_of_match,
+ },
+};
+
+/* ---------------------------------------------------------------------
+ * Module setup/teardown
+ */
+
+int __init ulite_init(void)
+{
+ int ret;
+
+ pr_debug("uartlite: calling uart_register_driver()\n");
+ ret = uart_register_driver(&ulite_uart_driver);
+ if (ret)
+ goto err_uart;
+
+ pr_debug("uartlite: calling platform_driver_register()\n");
+ ret = platform_driver_register(&ulite_platform_driver);
+ if (ret)
+ goto err_plat;
+
+ return 0;
+
+err_plat:
+ uart_unregister_driver(&ulite_uart_driver);
+err_uart:
+ printk(KERN_ERR "registering uartlite driver failed: err=%i", ret);
+ return ret;
+}
+
+void __exit ulite_exit(void)
+{
+ platform_driver_unregister(&ulite_platform_driver);
+ uart_unregister_driver(&ulite_uart_driver);
+}
+
+module_init(ulite_init);
+module_exit(ulite_exit);
+
+MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
+MODULE_DESCRIPTION("Xilinx uartlite serial driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
new file mode 100644
index 00000000..c327218c
--- /dev/null
+++ b/drivers/tty/serial/ucc_uart.c
@@ -0,0 +1,1539 @@
+/*
+ * Freescale QUICC Engine UART device driver
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * Copyright 2007 Freescale Semiconductor, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * This driver adds support for UART devices via Freescale's QUICC Engine
+ * found on some Freescale SOCs.
+ *
+ * If Soft-UART support is needed but not already present, then this driver
+ * will request and upload the "Soft-UART" microcode upon probe. The
+ * filename of the microcode should be fsl_qe_ucode_uart_X_YZ.bin, where "X"
+ * is the name of the SOC (e.g. 8323), and YZ is the revision of the SOC,
+ * (e.g. "11" for 1.1).
+ */
+
+#include <linux/module.h>
+#include <linux/serial.h>
+#include <linux/slab.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/fs_uart_pd.h>
+#include <asm/ucc_slow.h>
+
+#include <linux/firmware.h>
+#include <asm/reg.h>
+
+/*
+ * The GUMR flag for Soft UART. This would normally be defined in qe.h,
+ * but Soft-UART is a hack and we want to keep everything related to it in
+ * this file.
+ */
+#define UCC_SLOW_GUMR_H_SUART 0x00004000 /* Soft-UART */
+
+/*
+ * soft_uart is 1 if we need to use Soft-UART mode
+ */
+static int soft_uart;
+/*
+ * firmware_loaded is 1 if the firmware has been loaded, 0 otherwise.
+ */
+static int firmware_loaded;
+
+/* Enable this macro to configure all serial ports in internal loopback
+ mode */
+/* #define LOOPBACK */
+
+/* The major and minor device numbers are defined in
+ * http://www.lanana.org/docs/device-list/devices-2.6+.txt. For the QE
+ * UART, we have major number 204 and minor numbers 46 - 49, which are the
+ * same as for the CPM2. This decision was made because no Freescale part
+ * has both a CPM and a QE.
+ */
+#define SERIAL_QE_MAJOR 204
+#define SERIAL_QE_MINOR 46
+
+/* Since we only have minor numbers 46 - 49, there is a hard limit of 4 ports */
+#define UCC_MAX_UART 4
+
+/* The number of buffer descriptors for receiving characters. */
+#define RX_NUM_FIFO 4
+
+/* The number of buffer descriptors for transmitting characters. */
+#define TX_NUM_FIFO 4
+
+/* The maximum size of the character buffer for a single RX BD. */
+#define RX_BUF_SIZE 32
+
+/* The maximum size of the character buffer for a single TX BD. */
+#define TX_BUF_SIZE 32
+
+/*
+ * The number of jiffies to wait after receiving a close command before the
+ * device is actually closed. This allows the last few characters to be
+ * sent over the wire.
+ */
+#define UCC_WAIT_CLOSING 100
+
+struct ucc_uart_pram {
+ struct ucc_slow_pram common;
+ u8 res1[8]; /* reserved */
+ __be16 maxidl; /* Maximum idle chars */
+ __be16 idlc; /* temp idle counter */
+ __be16 brkcr; /* Break count register */
+ __be16 parec; /* receive parity error counter */
+ __be16 frmec; /* receive framing error counter */
+ __be16 nosec; /* receive noise counter */
+ __be16 brkec; /* receive break condition counter */
+ __be16 brkln; /* last received break length */
+ __be16 uaddr[2]; /* UART address character 1 & 2 */
+ __be16 rtemp; /* Temp storage */
+ __be16 toseq; /* Transmit out of sequence char */
+ __be16 cchars[8]; /* control characters 1-8 */
+ __be16 rccm; /* receive control character mask */
+ __be16 rccr; /* receive control character register */
+ __be16 rlbc; /* receive last break character */
+ __be16 res2; /* reserved */
+ __be32 res3; /* reserved, should be cleared */
+ u8 res4; /* reserved, should be cleared */
+ u8 res5[3]; /* reserved, should be cleared */
+ __be32 res6; /* reserved, should be cleared */
+ __be32 res7; /* reserved, should be cleared */
+ __be32 res8; /* reserved, should be cleared */
+ __be32 res9; /* reserved, should be cleared */
+ __be32 res10; /* reserved, should be cleared */
+ __be32 res11; /* reserved, should be cleared */
+ __be32 res12; /* reserved, should be cleared */
+ __be32 res13; /* reserved, should be cleared */
+/* The rest is for Soft-UART only */
+ __be16 supsmr; /* 0x90, Shadow UPSMR */
+ __be16 res92; /* 0x92, reserved, initialize to 0 */
+ __be32 rx_state; /* 0x94, RX state, initialize to 0 */
+ __be32 rx_cnt; /* 0x98, RX count, initialize to 0 */
+ u8 rx_length; /* 0x9C, Char length, set to 1+CL+PEN+1+SL */
+ u8 rx_bitmark; /* 0x9D, reserved, initialize to 0 */
+ u8 rx_temp_dlst_qe; /* 0x9E, reserved, initialize to 0 */
+ u8 res14[0xBC - 0x9F]; /* reserved */
+ __be32 dump_ptr; /* 0xBC, Dump pointer */
+ __be32 rx_frame_rem; /* 0xC0, reserved, initialize to 0 */
+ u8 rx_frame_rem_size; /* 0xC4, reserved, initialize to 0 */
+ u8 tx_mode; /* 0xC5, mode, 0=AHDLC, 1=UART */
+ __be16 tx_state; /* 0xC6, TX state */
+ u8 res15[0xD0 - 0xC8]; /* reserved */
+ __be32 resD0; /* 0xD0, reserved, initialize to 0 */
+ u8 resD4; /* 0xD4, reserved, initialize to 0 */
+ __be16 resD5; /* 0xD5, reserved, initialize to 0 */
+} __attribute__ ((packed));
+
+/* SUPSMR definitions, for Soft-UART only */
+#define UCC_UART_SUPSMR_SL 0x8000
+#define UCC_UART_SUPSMR_RPM_MASK 0x6000
+#define UCC_UART_SUPSMR_RPM_ODD 0x0000
+#define UCC_UART_SUPSMR_RPM_LOW 0x2000
+#define UCC_UART_SUPSMR_RPM_EVEN 0x4000
+#define UCC_UART_SUPSMR_RPM_HIGH 0x6000
+#define UCC_UART_SUPSMR_PEN 0x1000
+#define UCC_UART_SUPSMR_TPM_MASK 0x0C00
+#define UCC_UART_SUPSMR_TPM_ODD 0x0000
+#define UCC_UART_SUPSMR_TPM_LOW 0x0400
+#define UCC_UART_SUPSMR_TPM_EVEN 0x0800
+#define UCC_UART_SUPSMR_TPM_HIGH 0x0C00
+#define UCC_UART_SUPSMR_FRZ 0x0100
+#define UCC_UART_SUPSMR_UM_MASK 0x00c0
+#define UCC_UART_SUPSMR_UM_NORMAL 0x0000
+#define UCC_UART_SUPSMR_UM_MAN_MULTI 0x0040
+#define UCC_UART_SUPSMR_UM_AUTO_MULTI 0x00c0
+#define UCC_UART_SUPSMR_CL_MASK 0x0030
+#define UCC_UART_SUPSMR_CL_8 0x0030
+#define UCC_UART_SUPSMR_CL_7 0x0020
+#define UCC_UART_SUPSMR_CL_6 0x0010
+#define UCC_UART_SUPSMR_CL_5 0x0000
+
+#define UCC_UART_TX_STATE_AHDLC 0x00
+#define UCC_UART_TX_STATE_UART 0x01
+#define UCC_UART_TX_STATE_X1 0x00
+#define UCC_UART_TX_STATE_X16 0x80
+
+#define UCC_UART_PRAM_ALIGNMENT 0x100
+
+#define UCC_UART_SIZE_OF_BD UCC_SLOW_SIZE_OF_BD
+#define NUM_CONTROL_CHARS 8
+
+/* Private per-port data structure */
+struct uart_qe_port {
+ struct uart_port port;
+ struct ucc_slow __iomem *uccp;
+ struct ucc_uart_pram __iomem *uccup;
+ struct ucc_slow_info us_info;
+ struct ucc_slow_private *us_private;
+ struct device_node *np;
+ unsigned int ucc_num; /* First ucc is 0, not 1 */
+
+ u16 rx_nrfifos;
+ u16 rx_fifosize;
+ u16 tx_nrfifos;
+ u16 tx_fifosize;
+ int wait_closing;
+ u32 flags;
+ struct qe_bd *rx_bd_base;
+ struct qe_bd *rx_cur;
+ struct qe_bd *tx_bd_base;
+ struct qe_bd *tx_cur;
+ unsigned char *tx_buf;
+ unsigned char *rx_buf;
+ void *bd_virt; /* virtual address of the BD buffers */
+ dma_addr_t bd_dma_addr; /* bus address of the BD buffers */
+ unsigned int bd_size; /* size of BD buffer space */
+};
+
+static struct uart_driver ucc_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "ucc_uart",
+ .dev_name = "ttyQE",
+ .major = SERIAL_QE_MAJOR,
+ .minor = SERIAL_QE_MINOR,
+ .nr = UCC_MAX_UART,
+};
+
+/*
+ * Virtual to physical address translation.
+ *
+ * Given the virtual address for a character buffer, this function returns
+ * the physical (DMA) equivalent.
+ */
+static inline dma_addr_t cpu2qe_addr(void *addr, struct uart_qe_port *qe_port)
+{
+ if (likely((addr >= qe_port->bd_virt)) &&
+ (addr < (qe_port->bd_virt + qe_port->bd_size)))
+ return qe_port->bd_dma_addr + (addr - qe_port->bd_virt);
+
+ /* something nasty happened */
+ printk(KERN_ERR "%s: addr=%p\n", __func__, addr);
+ BUG();
+ return 0;
+}
+
+/*
+ * Physical to virtual address translation.
+ *
+ * Given the physical (DMA) address for a character buffer, this function
+ * returns the virtual equivalent.
+ */
+static inline void *qe2cpu_addr(dma_addr_t addr, struct uart_qe_port *qe_port)
+{
+ /* sanity check */
+ if (likely((addr >= qe_port->bd_dma_addr) &&
+ (addr < (qe_port->bd_dma_addr + qe_port->bd_size))))
+ return qe_port->bd_virt + (addr - qe_port->bd_dma_addr);
+
+ /* something nasty happened */
+ printk(KERN_ERR "%s: addr=%x\n", __func__, addr);
+ BUG();
+ return NULL;
+}
+
+/*
+ * Return 1 if the QE is done transmitting all buffers for this port
+ *
+ * This function scans each BD in sequence. If we find a BD that is not
+ * ready (READY=1), then we return 0 indicating that the QE is still sending
+ * data. If we reach the last BD (WRAP=1), then we know we've scanned
+ * the entire list, and all BDs are done.
+ */
+static unsigned int qe_uart_tx_empty(struct uart_port *port)
+{
+ struct uart_qe_port *qe_port =
+ container_of(port, struct uart_qe_port, port);
+ struct qe_bd *bdp = qe_port->tx_bd_base;
+
+ while (1) {
+ if (in_be16(&bdp->status) & BD_SC_READY)
+ /* This BD is not done, so return "not done" */
+ return 0;
+
+ if (in_be16(&bdp->status) & BD_SC_WRAP)
+ /*
+ * This BD is done and it's the last one, so return
+ * "done"
+ */
+ return 1;
+
+ bdp++;
+ };
+}
+
+/*
+ * Set the modem control lines
+ *
+ * Although the QE can control the modem control lines (e.g. CTS), we
+ * don't need that support. This function must exist, however, otherwise
+ * the kernel will panic.
+ */
+void qe_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+/*
+ * Get the current modem control line status
+ *
+ * Although the QE can control the modem control lines (e.g. CTS), this
+ * driver currently doesn't support that, so we always return Carrier
+ * Detect, Data Set Ready, and Clear To Send.
+ */
+static unsigned int qe_uart_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+}
+
+/*
+ * Disable the transmit interrupt.
+ *
+ * Although this function is called "stop_tx", it does not actually stop
+ * transmission of data. Instead, it tells the QE to not generate an
+ * interrupt when the UCC is finished sending characters.
+ */
+static void qe_uart_stop_tx(struct uart_port *port)
+{
+ struct uart_qe_port *qe_port =
+ container_of(port, struct uart_qe_port, port);
+
+ clrbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
+}
+
+/*
+ * Transmit as many characters to the HW as possible.
+ *
+ * This function will attempt to stuff of all the characters from the
+ * kernel's transmit buffer into TX BDs.
+ *
+ * A return value of non-zero indicates that it successfully stuffed all
+ * characters from the kernel buffer.
+ *
+ * A return value of zero indicates that there are still characters in the
+ * kernel's buffer that have not been transmitted, but there are no more BDs
+ * available. This function should be called again after a BD has been made
+ * available.
+ */
+static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
+{
+ struct qe_bd *bdp;
+ unsigned char *p;
+ unsigned int count;
+ struct uart_port *port = &qe_port->port;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ bdp = qe_port->rx_cur;
+
+ /* Handle xon/xoff */
+ if (port->x_char) {
+ /* Pick next descriptor and fill from buffer */
+ bdp = qe_port->tx_cur;
+
+ p = qe2cpu_addr(bdp->buf, qe_port);
+
+ *p++ = port->x_char;
+ out_be16(&bdp->length, 1);
+ setbits16(&bdp->status, BD_SC_READY);
+ /* Get next BD. */
+ if (in_be16(&bdp->status) & BD_SC_WRAP)
+ bdp = qe_port->tx_bd_base;
+ else
+ bdp++;
+ qe_port->tx_cur = bdp;
+
+ port->icount.tx++;
+ port->x_char = 0;
+ return 1;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ qe_uart_stop_tx(port);
+ return 0;
+ }
+
+ /* Pick next descriptor and fill from buffer */
+ bdp = qe_port->tx_cur;
+
+ while (!(in_be16(&bdp->status) & BD_SC_READY) &&
+ (xmit->tail != xmit->head)) {
+ count = 0;
+ p = qe2cpu_addr(bdp->buf, qe_port);
+ while (count < qe_port->tx_fifosize) {
+ *p++ = xmit->buf[xmit->tail];
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ count++;
+ if (xmit->head == xmit->tail)
+ break;
+ }
+
+ out_be16(&bdp->length, count);
+ setbits16(&bdp->status, BD_SC_READY);
+
+ /* Get next BD. */
+ if (in_be16(&bdp->status) & BD_SC_WRAP)
+ bdp = qe_port->tx_bd_base;
+ else
+ bdp++;
+ }
+ qe_port->tx_cur = bdp;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit)) {
+ /* The kernel buffer is empty, so turn off TX interrupts. We
+ don't need to be told when the QE is finished transmitting
+ the data. */
+ qe_uart_stop_tx(port);
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Start transmitting data
+ *
+ * This function will start transmitting any available data, if the port
+ * isn't already transmitting data.
+ */
+static void qe_uart_start_tx(struct uart_port *port)
+{
+ struct uart_qe_port *qe_port =
+ container_of(port, struct uart_qe_port, port);
+
+ /* If we currently are transmitting, then just return */
+ if (in_be16(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX)
+ return;
+
+ /* Otherwise, pump the port and start transmission */
+ if (qe_uart_tx_pump(qe_port))
+ setbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
+}
+
+/*
+ * Stop transmitting data
+ */
+static void qe_uart_stop_rx(struct uart_port *port)
+{
+ struct uart_qe_port *qe_port =
+ container_of(port, struct uart_qe_port, port);
+
+ clrbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
+}
+
+/*
+ * Enable status change interrupts
+ *
+ * We don't support status change interrupts, but we need to define this
+ * function otherwise the kernel will panic.
+ */
+static void qe_uart_enable_ms(struct uart_port *port)
+{
+}
+
+/* Start or stop sending break signal
+ *
+ * This function controls the sending of a break signal. If break_state=1,
+ * then we start sending a break signal. If break_state=0, then we stop
+ * sending the break signal.
+ */
+static void qe_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ struct uart_qe_port *qe_port =
+ container_of(port, struct uart_qe_port, port);
+
+ if (break_state)
+ ucc_slow_stop_tx(qe_port->us_private);
+ else
+ ucc_slow_restart_tx(qe_port->us_private);
+}
+
+/* ISR helper function for receiving character.
+ *
+ * This function is called by the ISR to handling receiving characters
+ */
+static void qe_uart_int_rx(struct uart_qe_port *qe_port)
+{
+ int i;
+ unsigned char ch, *cp;
+ struct uart_port *port = &qe_port->port;
+ struct tty_struct *tty = port->state->port.tty;
+ struct qe_bd *bdp;
+ u16 status;
+ unsigned int flg;
+
+ /* Just loop through the closed BDs and copy the characters into
+ * the buffer.
+ */
+ bdp = qe_port->rx_cur;
+ while (1) {
+ status = in_be16(&bdp->status);
+
+ /* If this one is empty, then we assume we've read them all */
+ if (status & BD_SC_EMPTY)
+ break;
+
+ /* get number of characters, and check space in RX buffer */
+ i = in_be16(&bdp->length);
+
+ /* If we don't have enough room in RX buffer for the entire BD,
+ * then we try later, which will be the next RX interrupt.
+ */
+ if (tty_buffer_request_room(tty, i) < i) {
+ dev_dbg(port->dev, "ucc-uart: no room in RX buffer\n");
+ return;
+ }
+
+ /* get pointer */
+ cp = qe2cpu_addr(bdp->buf, qe_port);
+
+ /* loop through the buffer */
+ while (i-- > 0) {
+ ch = *cp++;
+ port->icount.rx++;
+ flg = TTY_NORMAL;
+
+ if (!i && status &
+ (BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV))
+ goto handle_error;
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+
+error_return:
+ tty_insert_flip_char(tty, ch, flg);
+
+ }
+
+ /* This BD is ready to be used again. Clear status. get next */
+ clrsetbits_be16(&bdp->status, BD_SC_BR | BD_SC_FR | BD_SC_PR |
+ BD_SC_OV | BD_SC_ID, BD_SC_EMPTY);
+ if (in_be16(&bdp->status) & BD_SC_WRAP)
+ bdp = qe_port->rx_bd_base;
+ else
+ bdp++;
+
+ }
+
+ /* Write back buffer pointer */
+ qe_port->rx_cur = bdp;
+
+ /* Activate BH processing */
+ tty_flip_buffer_push(tty);
+
+ return;
+
+ /* Error processing */
+
+handle_error:
+ /* Statistics */
+ if (status & BD_SC_BR)
+ port->icount.brk++;
+ if (status & BD_SC_PR)
+ port->icount.parity++;
+ if (status & BD_SC_FR)
+ port->icount.frame++;
+ if (status & BD_SC_OV)
+ port->icount.overrun++;
+
+ /* Mask out ignored conditions */
+ status &= port->read_status_mask;
+
+ /* Handle the remaining ones */
+ if (status & BD_SC_BR)
+ flg = TTY_BREAK;
+ else if (status & BD_SC_PR)
+ flg = TTY_PARITY;
+ else if (status & BD_SC_FR)
+ flg = TTY_FRAME;
+
+ /* Overrun does not affect the current character ! */
+ if (status & BD_SC_OV)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+#ifdef SUPPORT_SYSRQ
+ port->sysrq = 0;
+#endif
+ goto error_return;
+}
+
+/* Interrupt handler
+ *
+ * This interrupt handler is called after a BD is processed.
+ */
+static irqreturn_t qe_uart_int(int irq, void *data)
+{
+ struct uart_qe_port *qe_port = (struct uart_qe_port *) data;
+ struct ucc_slow __iomem *uccp = qe_port->uccp;
+ u16 events;
+
+ /* Clear the interrupts */
+ events = in_be16(&uccp->ucce);
+ out_be16(&uccp->ucce, events);
+
+ if (events & UCC_UART_UCCE_BRKE)
+ uart_handle_break(&qe_port->port);
+
+ if (events & UCC_UART_UCCE_RX)
+ qe_uart_int_rx(qe_port);
+
+ if (events & UCC_UART_UCCE_TX)
+ qe_uart_tx_pump(qe_port);
+
+ return events ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/* Initialize buffer descriptors
+ *
+ * This function initializes all of the RX and TX buffer descriptors.
+ */
+static void qe_uart_initbd(struct uart_qe_port *qe_port)
+{
+ int i;
+ void *bd_virt;
+ struct qe_bd *bdp;
+
+ /* Set the physical address of the host memory buffers in the buffer
+ * descriptors, and the virtual address for us to work with.
+ */
+ bd_virt = qe_port->bd_virt;
+ bdp = qe_port->rx_bd_base;
+ qe_port->rx_cur = qe_port->rx_bd_base;
+ for (i = 0; i < (qe_port->rx_nrfifos - 1); i++) {
+ out_be16(&bdp->status, BD_SC_EMPTY | BD_SC_INTRPT);
+ out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
+ out_be16(&bdp->length, 0);
+ bd_virt += qe_port->rx_fifosize;
+ bdp++;
+ }
+
+ /* */
+ out_be16(&bdp->status, BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT);
+ out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
+ out_be16(&bdp->length, 0);
+
+ /* Set the physical address of the host memory
+ * buffers in the buffer descriptors, and the
+ * virtual address for us to work with.
+ */
+ bd_virt = qe_port->bd_virt +
+ L1_CACHE_ALIGN(qe_port->rx_nrfifos * qe_port->rx_fifosize);
+ qe_port->tx_cur = qe_port->tx_bd_base;
+ bdp = qe_port->tx_bd_base;
+ for (i = 0; i < (qe_port->tx_nrfifos - 1); i++) {
+ out_be16(&bdp->status, BD_SC_INTRPT);
+ out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
+ out_be16(&bdp->length, 0);
+ bd_virt += qe_port->tx_fifosize;
+ bdp++;
+ }
+
+ /* Loopback requires the preamble bit to be set on the first TX BD */
+#ifdef LOOPBACK
+ setbits16(&qe_port->tx_cur->status, BD_SC_P);
+#endif
+
+ out_be16(&bdp->status, BD_SC_WRAP | BD_SC_INTRPT);
+ out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
+ out_be16(&bdp->length, 0);
+}
+
+/*
+ * Initialize a UCC for UART.
+ *
+ * This function configures a given UCC to be used as a UART device. Basic
+ * UCC initialization is handled in qe_uart_request_port(). This function
+ * does all the UART-specific stuff.
+ */
+static void qe_uart_init_ucc(struct uart_qe_port *qe_port)
+{
+ u32 cecr_subblock;
+ struct ucc_slow __iomem *uccp = qe_port->uccp;
+ struct ucc_uart_pram *uccup = qe_port->uccup;
+
+ unsigned int i;
+
+ /* First, disable TX and RX in the UCC */
+ ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX);
+
+ /* Program the UCC UART parameter RAM */
+ out_8(&uccup->common.rbmr, UCC_BMR_GBL | UCC_BMR_BO_BE);
+ out_8(&uccup->common.tbmr, UCC_BMR_GBL | UCC_BMR_BO_BE);
+ out_be16(&uccup->common.mrblr, qe_port->rx_fifosize);
+ out_be16(&uccup->maxidl, 0x10);
+ out_be16(&uccup->brkcr, 1);
+ out_be16(&uccup->parec, 0);
+ out_be16(&uccup->frmec, 0);
+ out_be16(&uccup->nosec, 0);
+ out_be16(&uccup->brkec, 0);
+ out_be16(&uccup->uaddr[0], 0);
+ out_be16(&uccup->uaddr[1], 0);
+ out_be16(&uccup->toseq, 0);
+ for (i = 0; i < 8; i++)
+ out_be16(&uccup->cchars[i], 0xC000);
+ out_be16(&uccup->rccm, 0xc0ff);
+
+ /* Configure the GUMR registers for UART */
+ if (soft_uart) {
+ /* Soft-UART requires a 1X multiplier for TX */
+ clrsetbits_be32(&uccp->gumr_l,
+ UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
+ UCC_SLOW_GUMR_L_RDCR_MASK,
+ UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_1 |
+ UCC_SLOW_GUMR_L_RDCR_16);
+
+ clrsetbits_be32(&uccp->gumr_h, UCC_SLOW_GUMR_H_RFW,
+ UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX);
+ } else {
+ clrsetbits_be32(&uccp->gumr_l,
+ UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
+ UCC_SLOW_GUMR_L_RDCR_MASK,
+ UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_16 |
+ UCC_SLOW_GUMR_L_RDCR_16);
+
+ clrsetbits_be32(&uccp->gumr_h,
+ UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX,
+ UCC_SLOW_GUMR_H_RFW);
+ }
+
+#ifdef LOOPBACK
+ clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
+ UCC_SLOW_GUMR_L_DIAG_LOOP);
+ clrsetbits_be32(&uccp->gumr_h,
+ UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_RSYN,
+ UCC_SLOW_GUMR_H_CDS);
+#endif
+
+ /* Disable rx interrupts and clear all pending events. */
+ out_be16(&uccp->uccm, 0);
+ out_be16(&uccp->ucce, 0xffff);
+ out_be16(&uccp->udsr, 0x7e7e);
+
+ /* Initialize UPSMR */
+ out_be16(&uccp->upsmr, 0);
+
+ if (soft_uart) {
+ out_be16(&uccup->supsmr, 0x30);
+ out_be16(&uccup->res92, 0);
+ out_be32(&uccup->rx_state, 0);
+ out_be32(&uccup->rx_cnt, 0);
+ out_8(&uccup->rx_bitmark, 0);
+ out_8(&uccup->rx_length, 10);
+ out_be32(&uccup->dump_ptr, 0x4000);
+ out_8(&uccup->rx_temp_dlst_qe, 0);
+ out_be32(&uccup->rx_frame_rem, 0);
+ out_8(&uccup->rx_frame_rem_size, 0);
+ /* Soft-UART requires TX to be 1X */
+ out_8(&uccup->tx_mode,
+ UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1);
+ out_be16(&uccup->tx_state, 0);
+ out_8(&uccup->resD4, 0);
+ out_be16(&uccup->resD5, 0);
+
+ /* Set UART mode.
+ * Enable receive and transmit.
+ */
+
+ /* From the microcode errata:
+ * 1.GUMR_L register, set mode=0010 (QMC).
+ * 2.Set GUMR_H[17] bit. (UART/AHDLC mode).
+ * 3.Set GUMR_H[19:20] (Transparent mode)
+ * 4.Clear GUMR_H[26] (RFW)
+ * ...
+ * 6.Receiver must use 16x over sampling
+ */
+ clrsetbits_be32(&uccp->gumr_l,
+ UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
+ UCC_SLOW_GUMR_L_RDCR_MASK,
+ UCC_SLOW_GUMR_L_MODE_QMC | UCC_SLOW_GUMR_L_TDCR_16 |
+ UCC_SLOW_GUMR_L_RDCR_16);
+
+ clrsetbits_be32(&uccp->gumr_h,
+ UCC_SLOW_GUMR_H_RFW | UCC_SLOW_GUMR_H_RSYN,
+ UCC_SLOW_GUMR_H_SUART | UCC_SLOW_GUMR_H_TRX |
+ UCC_SLOW_GUMR_H_TTX | UCC_SLOW_GUMR_H_TFL);
+
+#ifdef LOOPBACK
+ clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
+ UCC_SLOW_GUMR_L_DIAG_LOOP);
+ clrbits32(&uccp->gumr_h, UCC_SLOW_GUMR_H_CTSP |
+ UCC_SLOW_GUMR_H_CDS);
+#endif
+
+ cecr_subblock = ucc_slow_get_qe_cr_subblock(qe_port->ucc_num);
+ qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ } else {
+ cecr_subblock = ucc_slow_get_qe_cr_subblock(qe_port->ucc_num);
+ qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
+ QE_CR_PROTOCOL_UART, 0);
+ }
+}
+
+/*
+ * Initialize the port.
+ */
+static int qe_uart_startup(struct uart_port *port)
+{
+ struct uart_qe_port *qe_port =
+ container_of(port, struct uart_qe_port, port);
+ int ret;
+
+ /*
+ * If we're using Soft-UART mode, then we need to make sure the
+ * firmware has been uploaded first.
+ */
+ if (soft_uart && !firmware_loaded) {
+ dev_err(port->dev, "Soft-UART firmware not uploaded\n");
+ return -ENODEV;
+ }
+
+ qe_uart_initbd(qe_port);
+ qe_uart_init_ucc(qe_port);
+
+ /* Install interrupt handler. */
+ ret = request_irq(port->irq, qe_uart_int, IRQF_SHARED, "ucc-uart",
+ qe_port);
+ if (ret) {
+ dev_err(port->dev, "could not claim IRQ %u\n", port->irq);
+ return ret;
+ }
+
+ /* Startup rx-int */
+ setbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
+ ucc_slow_enable(qe_port->us_private, COMM_DIR_RX_AND_TX);
+
+ return 0;
+}
+
+/*
+ * Shutdown the port.
+ */
+static void qe_uart_shutdown(struct uart_port *port)
+{
+ struct uart_qe_port *qe_port =
+ container_of(port, struct uart_qe_port, port);
+ struct ucc_slow __iomem *uccp = qe_port->uccp;
+ unsigned int timeout = 20;
+
+ /* Disable RX and TX */
+
+ /* Wait for all the BDs marked sent */
+ while (!qe_uart_tx_empty(port)) {
+ if (!--timeout) {
+ dev_warn(port->dev, "shutdown timeout\n");
+ break;
+ }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(2);
+ }
+
+ if (qe_port->wait_closing) {
+ /* Wait a bit longer */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(qe_port->wait_closing);
+ }
+
+ /* Stop uarts */
+ ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX);
+ clrbits16(&uccp->uccm, UCC_UART_UCCE_TX | UCC_UART_UCCE_RX);
+
+ /* Shut them really down and reinit buffer descriptors */
+ ucc_slow_graceful_stop_tx(qe_port->us_private);
+ qe_uart_initbd(qe_port);
+
+ free_irq(port->irq, qe_port);
+}
+
+/*
+ * Set the serial port parameters.
+ */
+static void qe_uart_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old)
+{
+ struct uart_qe_port *qe_port =
+ container_of(port, struct uart_qe_port, port);
+ struct ucc_slow __iomem *uccp = qe_port->uccp;
+ unsigned int baud;
+ unsigned long flags;
+ u16 upsmr = in_be16(&uccp->upsmr);
+ struct ucc_uart_pram __iomem *uccup = qe_port->uccup;
+ u16 supsmr = in_be16(&uccup->supsmr);
+ u8 char_length = 2; /* 1 + CL + PEN + 1 + SL */
+
+ /* Character length programmed into the mode register is the
+ * sum of: 1 start bit, number of data bits, 0 or 1 parity bit,
+ * 1 or 2 stop bits, minus 1.
+ * The value 'bits' counts this for us.
+ */
+
+ /* byte size */
+ upsmr &= UCC_UART_UPSMR_CL_MASK;
+ supsmr &= UCC_UART_SUPSMR_CL_MASK;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ upsmr |= UCC_UART_UPSMR_CL_5;
+ supsmr |= UCC_UART_SUPSMR_CL_5;
+ char_length += 5;
+ break;
+ case CS6:
+ upsmr |= UCC_UART_UPSMR_CL_6;
+ supsmr |= UCC_UART_SUPSMR_CL_6;
+ char_length += 6;
+ break;
+ case CS7:
+ upsmr |= UCC_UART_UPSMR_CL_7;
+ supsmr |= UCC_UART_SUPSMR_CL_7;
+ char_length += 7;
+ break;
+ default: /* case CS8 */
+ upsmr |= UCC_UART_UPSMR_CL_8;
+ supsmr |= UCC_UART_SUPSMR_CL_8;
+ char_length += 8;
+ break;
+ }
+
+ /* If CSTOPB is set, we want two stop bits */
+ if (termios->c_cflag & CSTOPB) {
+ upsmr |= UCC_UART_UPSMR_SL;
+ supsmr |= UCC_UART_SUPSMR_SL;
+ char_length++; /* + SL */
+ }
+
+ if (termios->c_cflag & PARENB) {
+ upsmr |= UCC_UART_UPSMR_PEN;
+ supsmr |= UCC_UART_SUPSMR_PEN;
+ char_length++; /* + PEN */
+
+ if (!(termios->c_cflag & PARODD)) {
+ upsmr &= ~(UCC_UART_UPSMR_RPM_MASK |
+ UCC_UART_UPSMR_TPM_MASK);
+ upsmr |= UCC_UART_UPSMR_RPM_EVEN |
+ UCC_UART_UPSMR_TPM_EVEN;
+ supsmr &= ~(UCC_UART_SUPSMR_RPM_MASK |
+ UCC_UART_SUPSMR_TPM_MASK);
+ supsmr |= UCC_UART_SUPSMR_RPM_EVEN |
+ UCC_UART_SUPSMR_TPM_EVEN;
+ }
+ }
+
+ /*
+ * Set up parity check flag
+ */
+ port->read_status_mask = BD_SC_EMPTY | BD_SC_OV;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= BD_SC_FR | BD_SC_PR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= BD_SC_BR;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= BD_SC_PR | BD_SC_FR;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= BD_SC_BR;
+ /*
+ * If we're ignore parity and break indicators, ignore
+ * overruns too. (For real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= BD_SC_OV;
+ }
+ /*
+ * !!! ignore all characters if CREAD is not set
+ */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->read_status_mask &= ~BD_SC_EMPTY;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, 115200);
+
+ /* Do we really need a spinlock here? */
+ spin_lock_irqsave(&port->lock, flags);
+
+ out_be16(&uccp->upsmr, upsmr);
+ if (soft_uart) {
+ out_be16(&uccup->supsmr, supsmr);
+ out_8(&uccup->rx_length, char_length);
+
+ /* Soft-UART requires a 1X multiplier for TX */
+ qe_setbrg(qe_port->us_info.rx_clock, baud, 16);
+ qe_setbrg(qe_port->us_info.tx_clock, baud, 1);
+ } else {
+ qe_setbrg(qe_port->us_info.rx_clock, baud, 16);
+ qe_setbrg(qe_port->us_info.tx_clock, baud, 16);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/*
+ * Return a pointer to a string that describes what kind of port this is.
+ */
+static const char *qe_uart_type(struct uart_port *port)
+{
+ return "QE";
+}
+
+/*
+ * Allocate any memory and I/O resources required by the port.
+ */
+static int qe_uart_request_port(struct uart_port *port)
+{
+ int ret;
+ struct uart_qe_port *qe_port =
+ container_of(port, struct uart_qe_port, port);
+ struct ucc_slow_info *us_info = &qe_port->us_info;
+ struct ucc_slow_private *uccs;
+ unsigned int rx_size, tx_size;
+ void *bd_virt;
+ dma_addr_t bd_dma_addr = 0;
+
+ ret = ucc_slow_init(us_info, &uccs);
+ if (ret) {
+ dev_err(port->dev, "could not initialize UCC%u\n",
+ qe_port->ucc_num);
+ return ret;
+ }
+
+ qe_port->us_private = uccs;
+ qe_port->uccp = uccs->us_regs;
+ qe_port->uccup = (struct ucc_uart_pram *) uccs->us_pram;
+ qe_port->rx_bd_base = uccs->rx_bd;
+ qe_port->tx_bd_base = uccs->tx_bd;
+
+ /*
+ * Allocate the transmit and receive data buffers.
+ */
+
+ rx_size = L1_CACHE_ALIGN(qe_port->rx_nrfifos * qe_port->rx_fifosize);
+ tx_size = L1_CACHE_ALIGN(qe_port->tx_nrfifos * qe_port->tx_fifosize);
+
+ bd_virt = dma_alloc_coherent(port->dev, rx_size + tx_size, &bd_dma_addr,
+ GFP_KERNEL);
+ if (!bd_virt) {
+ dev_err(port->dev, "could not allocate buffer descriptors\n");
+ return -ENOMEM;
+ }
+
+ qe_port->bd_virt = bd_virt;
+ qe_port->bd_dma_addr = bd_dma_addr;
+ qe_port->bd_size = rx_size + tx_size;
+
+ qe_port->rx_buf = bd_virt;
+ qe_port->tx_buf = qe_port->rx_buf + rx_size;
+
+ return 0;
+}
+
+/*
+ * Configure the port.
+ *
+ * We say we're a CPM-type port because that's mostly true. Once the device
+ * is configured, this driver operates almost identically to the CPM serial
+ * driver.
+ */
+static void qe_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_CPM;
+ qe_uart_request_port(port);
+ }
+}
+
+/*
+ * Release any memory and I/O resources that were allocated in
+ * qe_uart_request_port().
+ */
+static void qe_uart_release_port(struct uart_port *port)
+{
+ struct uart_qe_port *qe_port =
+ container_of(port, struct uart_qe_port, port);
+ struct ucc_slow_private *uccs = qe_port->us_private;
+
+ dma_free_coherent(port->dev, qe_port->bd_size, qe_port->bd_virt,
+ qe_port->bd_dma_addr);
+
+ ucc_slow_free(uccs);
+}
+
+/*
+ * Verify that the data in serial_struct is suitable for this device.
+ */
+static int qe_uart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM)
+ return -EINVAL;
+
+ if (ser->irq < 0 || ser->irq >= nr_irqs)
+ return -EINVAL;
+
+ if (ser->baud_base < 9600)
+ return -EINVAL;
+
+ return 0;
+}
+/* UART operations
+ *
+ * Details on these functions can be found in Documentation/serial/driver
+ */
+static struct uart_ops qe_uart_pops = {
+ .tx_empty = qe_uart_tx_empty,
+ .set_mctrl = qe_uart_set_mctrl,
+ .get_mctrl = qe_uart_get_mctrl,
+ .stop_tx = qe_uart_stop_tx,
+ .start_tx = qe_uart_start_tx,
+ .stop_rx = qe_uart_stop_rx,
+ .enable_ms = qe_uart_enable_ms,
+ .break_ctl = qe_uart_break_ctl,
+ .startup = qe_uart_startup,
+ .shutdown = qe_uart_shutdown,
+ .set_termios = qe_uart_set_termios,
+ .type = qe_uart_type,
+ .release_port = qe_uart_release_port,
+ .request_port = qe_uart_request_port,
+ .config_port = qe_uart_config_port,
+ .verify_port = qe_uart_verify_port,
+};
+
+/*
+ * Obtain the SOC model number and revision level
+ *
+ * This function parses the device tree to obtain the SOC model. It then
+ * reads the SVR register to the revision.
+ *
+ * The device tree stores the SOC model two different ways.
+ *
+ * The new way is:
+ *
+ * cpu@0 {
+ * compatible = "PowerPC,8323";
+ * device_type = "cpu";
+ * ...
+ *
+ *
+ * The old way is:
+ * PowerPC,8323@0 {
+ * device_type = "cpu";
+ * ...
+ *
+ * This code first checks the new way, and then the old way.
+ */
+static unsigned int soc_info(unsigned int *rev_h, unsigned int *rev_l)
+{
+ struct device_node *np;
+ const char *soc_string;
+ unsigned int svr;
+ unsigned int soc;
+
+ /* Find the CPU node */
+ np = of_find_node_by_type(NULL, "cpu");
+ if (!np)
+ return 0;
+ /* Find the compatible property */
+ soc_string = of_get_property(np, "compatible", NULL);
+ if (!soc_string)
+ /* No compatible property, so try the name. */
+ soc_string = np->name;
+
+ /* Extract the SOC number from the "PowerPC," string */
+ if ((sscanf(soc_string, "PowerPC,%u", &soc) != 1) || !soc)
+ return 0;
+
+ /* Get the revision from the SVR */
+ svr = mfspr(SPRN_SVR);
+ *rev_h = (svr >> 4) & 0xf;
+ *rev_l = svr & 0xf;
+
+ return soc;
+}
+
+/*
+ * requst_firmware_nowait() callback function
+ *
+ * This function is called by the kernel when a firmware is made available,
+ * or if it times out waiting for the firmware.
+ */
+static void uart_firmware_cont(const struct firmware *fw, void *context)
+{
+ struct qe_firmware *firmware;
+ struct device *dev = context;
+ int ret;
+
+ if (!fw) {
+ dev_err(dev, "firmware not found\n");
+ return;
+ }
+
+ firmware = (struct qe_firmware *) fw->data;
+
+ if (firmware->header.length != fw->size) {
+ dev_err(dev, "invalid firmware\n");
+ goto out;
+ }
+
+ ret = qe_upload_firmware(firmware);
+ if (ret) {
+ dev_err(dev, "could not load firmware\n");
+ goto out;
+ }
+
+ firmware_loaded = 1;
+ out:
+ release_firmware(fw);
+}
+
+static int ucc_uart_probe(struct platform_device *ofdev)
+{
+ struct device_node *np = ofdev->dev.of_node;
+ const unsigned int *iprop; /* Integer OF properties */
+ const char *sprop; /* String OF properties */
+ struct uart_qe_port *qe_port = NULL;
+ struct resource res;
+ int ret;
+
+ /*
+ * Determine if we need Soft-UART mode
+ */
+ if (of_find_property(np, "soft-uart", NULL)) {
+ dev_dbg(&ofdev->dev, "using Soft-UART mode\n");
+ soft_uart = 1;
+ }
+
+ /*
+ * If we are using Soft-UART, determine if we need to upload the
+ * firmware, too.
+ */
+ if (soft_uart) {
+ struct qe_firmware_info *qe_fw_info;
+
+ qe_fw_info = qe_get_firmware_info();
+
+ /* Check if the firmware has been uploaded. */
+ if (qe_fw_info && strstr(qe_fw_info->id, "Soft-UART")) {
+ firmware_loaded = 1;
+ } else {
+ char filename[32];
+ unsigned int soc;
+ unsigned int rev_h;
+ unsigned int rev_l;
+
+ soc = soc_info(&rev_h, &rev_l);
+ if (!soc) {
+ dev_err(&ofdev->dev, "unknown CPU model\n");
+ return -ENXIO;
+ }
+ sprintf(filename, "fsl_qe_ucode_uart_%u_%u%u.bin",
+ soc, rev_h, rev_l);
+
+ dev_info(&ofdev->dev, "waiting for firmware %s\n",
+ filename);
+
+ /*
+ * We call request_firmware_nowait instead of
+ * request_firmware so that the driver can load and
+ * initialize the ports without holding up the rest of
+ * the kernel. If hotplug support is enabled in the
+ * kernel, then we use it.
+ */
+ ret = request_firmware_nowait(THIS_MODULE,
+ FW_ACTION_HOTPLUG, filename, &ofdev->dev,
+ GFP_KERNEL, &ofdev->dev, uart_firmware_cont);
+ if (ret) {
+ dev_err(&ofdev->dev,
+ "could not load firmware %s\n",
+ filename);
+ return ret;
+ }
+ }
+ }
+
+ qe_port = kzalloc(sizeof(struct uart_qe_port), GFP_KERNEL);
+ if (!qe_port) {
+ dev_err(&ofdev->dev, "can't allocate QE port structure\n");
+ return -ENOMEM;
+ }
+
+ /* Search for IRQ and mapbase */
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(&ofdev->dev, "missing 'reg' property in device tree\n");
+ goto out_free;
+ }
+ if (!res.start) {
+ dev_err(&ofdev->dev, "invalid 'reg' property in device tree\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+ qe_port->port.mapbase = res.start;
+
+ /* Get the UCC number (device ID) */
+ /* UCCs are numbered 1-7 */
+ iprop = of_get_property(np, "cell-index", NULL);
+ if (!iprop) {
+ iprop = of_get_property(np, "device-id", NULL);
+ if (!iprop) {
+ dev_err(&ofdev->dev, "UCC is unspecified in "
+ "device tree\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ if ((*iprop < 1) || (*iprop > UCC_MAX_NUM)) {
+ dev_err(&ofdev->dev, "no support for UCC%u\n", *iprop);
+ ret = -ENODEV;
+ goto out_free;
+ }
+ qe_port->ucc_num = *iprop - 1;
+
+ /*
+ * In the future, we should not require the BRG to be specified in the
+ * device tree. If no clock-source is specified, then just pick a BRG
+ * to use. This requires a new QE library function that manages BRG
+ * assignments.
+ */
+
+ sprop = of_get_property(np, "rx-clock-name", NULL);
+ if (!sprop) {
+ dev_err(&ofdev->dev, "missing rx-clock-name in device tree\n");
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ qe_port->us_info.rx_clock = qe_clock_source(sprop);
+ if ((qe_port->us_info.rx_clock < QE_BRG1) ||
+ (qe_port->us_info.rx_clock > QE_BRG16)) {
+ dev_err(&ofdev->dev, "rx-clock-name must be a BRG for UART\n");
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+#ifdef LOOPBACK
+ /* In internal loopback mode, TX and RX must use the same clock */
+ qe_port->us_info.tx_clock = qe_port->us_info.rx_clock;
+#else
+ sprop = of_get_property(np, "tx-clock-name", NULL);
+ if (!sprop) {
+ dev_err(&ofdev->dev, "missing tx-clock-name in device tree\n");
+ ret = -ENODEV;
+ goto out_free;
+ }
+ qe_port->us_info.tx_clock = qe_clock_source(sprop);
+#endif
+ if ((qe_port->us_info.tx_clock < QE_BRG1) ||
+ (qe_port->us_info.tx_clock > QE_BRG16)) {
+ dev_err(&ofdev->dev, "tx-clock-name must be a BRG for UART\n");
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ /* Get the port number, numbered 0-3 */
+ iprop = of_get_property(np, "port-number", NULL);
+ if (!iprop) {
+ dev_err(&ofdev->dev, "missing port-number in device tree\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+ qe_port->port.line = *iprop;
+ if (qe_port->port.line >= UCC_MAX_UART) {
+ dev_err(&ofdev->dev, "port-number must be 0-%u\n",
+ UCC_MAX_UART - 1);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ qe_port->port.irq = irq_of_parse_and_map(np, 0);
+ if (qe_port->port.irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "could not map IRQ for UCC%u\n",
+ qe_port->ucc_num + 1);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ /*
+ * Newer device trees have an "fsl,qe" compatible property for the QE
+ * node, but we still need to support older device trees.
+ */
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!np) {
+ np = of_find_node_by_type(NULL, "qe");
+ if (!np) {
+ dev_err(&ofdev->dev, "could not find 'qe' node\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ iprop = of_get_property(np, "brg-frequency", NULL);
+ if (!iprop) {
+ dev_err(&ofdev->dev,
+ "missing brg-frequency in device tree\n");
+ ret = -EINVAL;
+ goto out_np;
+ }
+
+ if (*iprop)
+ qe_port->port.uartclk = *iprop;
+ else {
+ /*
+ * Older versions of U-Boot do not initialize the brg-frequency
+ * property, so in this case we assume the BRG frequency is
+ * half the QE bus frequency.
+ */
+ iprop = of_get_property(np, "bus-frequency", NULL);
+ if (!iprop) {
+ dev_err(&ofdev->dev,
+ "missing QE bus-frequency in device tree\n");
+ ret = -EINVAL;
+ goto out_np;
+ }
+ if (*iprop)
+ qe_port->port.uartclk = *iprop / 2;
+ else {
+ dev_err(&ofdev->dev,
+ "invalid QE bus-frequency in device tree\n");
+ ret = -EINVAL;
+ goto out_np;
+ }
+ }
+
+ spin_lock_init(&qe_port->port.lock);
+ qe_port->np = np;
+ qe_port->port.dev = &ofdev->dev;
+ qe_port->port.ops = &qe_uart_pops;
+ qe_port->port.iotype = UPIO_MEM;
+
+ qe_port->tx_nrfifos = TX_NUM_FIFO;
+ qe_port->tx_fifosize = TX_BUF_SIZE;
+ qe_port->rx_nrfifos = RX_NUM_FIFO;
+ qe_port->rx_fifosize = RX_BUF_SIZE;
+
+ qe_port->wait_closing = UCC_WAIT_CLOSING;
+ qe_port->port.fifosize = 512;
+ qe_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
+
+ qe_port->us_info.ucc_num = qe_port->ucc_num;
+ qe_port->us_info.regs = (phys_addr_t) res.start;
+ qe_port->us_info.irq = qe_port->port.irq;
+
+ qe_port->us_info.rx_bd_ring_len = qe_port->rx_nrfifos;
+ qe_port->us_info.tx_bd_ring_len = qe_port->tx_nrfifos;
+
+ /* Make sure ucc_slow_init() initializes both TX and RX */
+ qe_port->us_info.init_tx = 1;
+ qe_port->us_info.init_rx = 1;
+
+ /* Add the port to the uart sub-system. This will cause
+ * qe_uart_config_port() to be called, so the us_info structure must
+ * be initialized.
+ */
+ ret = uart_add_one_port(&ucc_uart_driver, &qe_port->port);
+ if (ret) {
+ dev_err(&ofdev->dev, "could not add /dev/ttyQE%u\n",
+ qe_port->port.line);
+ goto out_np;
+ }
+
+ dev_set_drvdata(&ofdev->dev, qe_port);
+
+ dev_info(&ofdev->dev, "UCC%u assigned to /dev/ttyQE%u\n",
+ qe_port->ucc_num + 1, qe_port->port.line);
+
+ /* Display the mknod command for this device */
+ dev_dbg(&ofdev->dev, "mknod command is 'mknod /dev/ttyQE%u c %u %u'\n",
+ qe_port->port.line, SERIAL_QE_MAJOR,
+ SERIAL_QE_MINOR + qe_port->port.line);
+
+ return 0;
+out_np:
+ of_node_put(np);
+out_free:
+ kfree(qe_port);
+ return ret;
+}
+
+static int ucc_uart_remove(struct platform_device *ofdev)
+{
+ struct uart_qe_port *qe_port = dev_get_drvdata(&ofdev->dev);
+
+ dev_info(&ofdev->dev, "removing /dev/ttyQE%u\n", qe_port->port.line);
+
+ uart_remove_one_port(&ucc_uart_driver, &qe_port->port);
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+ kfree(qe_port);
+
+ return 0;
+}
+
+static struct of_device_id ucc_uart_match[] = {
+ {
+ .type = "serial",
+ .compatible = "ucc_uart",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ucc_uart_match);
+
+static struct platform_driver ucc_uart_of_driver = {
+ .driver = {
+ .name = "ucc_uart",
+ .owner = THIS_MODULE,
+ .of_match_table = ucc_uart_match,
+ },
+ .probe = ucc_uart_probe,
+ .remove = ucc_uart_remove,
+};
+
+static int __init ucc_uart_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Freescale QUICC Engine UART device driver\n");
+#ifdef LOOPBACK
+ printk(KERN_INFO "ucc-uart: Using loopback mode\n");
+#endif
+
+ ret = uart_register_driver(&ucc_uart_driver);
+ if (ret) {
+ printk(KERN_ERR "ucc-uart: could not register UART driver\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&ucc_uart_of_driver);
+ if (ret)
+ printk(KERN_ERR
+ "ucc-uart: could not register platform driver\n");
+
+ return ret;
+}
+
+static void __exit ucc_uart_exit(void)
+{
+ printk(KERN_INFO
+ "Freescale QUICC Engine UART device driver unloading\n");
+
+ platform_driver_unregister(&ucc_uart_of_driver);
+ uart_unregister_driver(&ucc_uart_driver);
+}
+
+module_init(ucc_uart_init);
+module_exit(ucc_uart_exit);
+
+MODULE_DESCRIPTION("Freescale QUICC Engine (QE) UART");
+MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_QE_MAJOR);
+
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
new file mode 100644
index 00000000..3beb6ab4
--- /dev/null
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -0,0 +1,978 @@
+/*
+ * Driver for NEC VR4100 series Serial Interface Unit.
+ *
+ * Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * Based on drivers/serial/8250.c, by Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#if defined(CONFIG_SERIAL_VR41XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/console.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#include <asm/io.h>
+#include <asm/vr41xx/siu.h>
+#include <asm/vr41xx/vr41xx.h>
+
+#define SIU_BAUD_BASE 1152000
+#define SIU_MAJOR 204
+#define SIU_MINOR_BASE 82
+
+#define RX_MAX_COUNT 256
+#define TX_MAX_COUNT 15
+
+#define SIUIRSEL 0x08
+ #define TMICMODE 0x20
+ #define TMICTX 0x10
+ #define IRMSEL 0x0c
+ #define IRMSEL_HP 0x08
+ #define IRMSEL_TEMIC 0x04
+ #define IRMSEL_SHARP 0x00
+ #define IRUSESEL 0x02
+ #define SIRSEL 0x01
+
+static struct uart_port siu_uart_ports[SIU_PORTS_MAX] = {
+ [0 ... SIU_PORTS_MAX-1] = {
+ .lock = __SPIN_LOCK_UNLOCKED(siu_uart_ports->lock),
+ .irq = -1,
+ },
+};
+
+#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
+static uint8_t lsr_break_flag[SIU_PORTS_MAX];
+#endif
+
+#define siu_read(port, offset) readb((port)->membase + (offset))
+#define siu_write(port, offset, value) writeb((value), (port)->membase + (offset))
+
+void vr41xx_select_siu_interface(siu_interface_t interface)
+{
+ struct uart_port *port;
+ unsigned long flags;
+ uint8_t irsel;
+
+ port = &siu_uart_ports[0];
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ irsel = siu_read(port, SIUIRSEL);
+ if (interface == SIU_INTERFACE_IRDA)
+ irsel |= SIRSEL;
+ else
+ irsel &= ~SIRSEL;
+ siu_write(port, SIUIRSEL, irsel);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+EXPORT_SYMBOL_GPL(vr41xx_select_siu_interface);
+
+void vr41xx_use_irda(irda_use_t use)
+{
+ struct uart_port *port;
+ unsigned long flags;
+ uint8_t irsel;
+
+ port = &siu_uart_ports[0];
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ irsel = siu_read(port, SIUIRSEL);
+ if (use == FIR_USE_IRDA)
+ irsel |= IRUSESEL;
+ else
+ irsel &= ~IRUSESEL;
+ siu_write(port, SIUIRSEL, irsel);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+EXPORT_SYMBOL_GPL(vr41xx_use_irda);
+
+void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed)
+{
+ struct uart_port *port;
+ unsigned long flags;
+ uint8_t irsel;
+
+ port = &siu_uart_ports[0];
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ irsel = siu_read(port, SIUIRSEL);
+ irsel &= ~(IRMSEL | TMICTX | TMICMODE);
+ switch (module) {
+ case SHARP_IRDA:
+ irsel |= IRMSEL_SHARP;
+ break;
+ case TEMIC_IRDA:
+ irsel |= IRMSEL_TEMIC | TMICMODE;
+ if (speed == IRDA_TX_4MBPS)
+ irsel |= TMICTX;
+ break;
+ case HP_IRDA:
+ irsel |= IRMSEL_HP;
+ break;
+ default:
+ break;
+ }
+ siu_write(port, SIUIRSEL, irsel);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+EXPORT_SYMBOL_GPL(vr41xx_select_irda_module);
+
+static inline void siu_clear_fifo(struct uart_port *port)
+{
+ siu_write(port, UART_FCR, UART_FCR_ENABLE_FIFO);
+ siu_write(port, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT);
+ siu_write(port, UART_FCR, 0);
+}
+
+static inline unsigned long siu_port_size(struct uart_port *port)
+{
+ switch (port->type) {
+ case PORT_VR41XX_SIU:
+ return 11UL;
+ case PORT_VR41XX_DSIU:
+ return 8UL;
+ }
+
+ return 0;
+}
+
+static inline unsigned int siu_check_type(struct uart_port *port)
+{
+ if (port->line == 0)
+ return PORT_VR41XX_SIU;
+ if (port->line == 1 && port->irq != -1)
+ return PORT_VR41XX_DSIU;
+
+ return PORT_UNKNOWN;
+}
+
+static inline const char *siu_type_name(struct uart_port *port)
+{
+ switch (port->type) {
+ case PORT_VR41XX_SIU:
+ return "SIU";
+ case PORT_VR41XX_DSIU:
+ return "DSIU";
+ }
+
+ return NULL;
+}
+
+static unsigned int siu_tx_empty(struct uart_port *port)
+{
+ uint8_t lsr;
+
+ lsr = siu_read(port, UART_LSR);
+ if (lsr & UART_LSR_TEMT)
+ return TIOCSER_TEMT;
+
+ return 0;
+}
+
+static void siu_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ uint8_t mcr = 0;
+
+ if (mctrl & TIOCM_DTR)
+ mcr |= UART_MCR_DTR;
+ if (mctrl & TIOCM_RTS)
+ mcr |= UART_MCR_RTS;
+ if (mctrl & TIOCM_OUT1)
+ mcr |= UART_MCR_OUT1;
+ if (mctrl & TIOCM_OUT2)
+ mcr |= UART_MCR_OUT2;
+ if (mctrl & TIOCM_LOOP)
+ mcr |= UART_MCR_LOOP;
+
+ siu_write(port, UART_MCR, mcr);
+}
+
+static unsigned int siu_get_mctrl(struct uart_port *port)
+{
+ uint8_t msr;
+ unsigned int mctrl = 0;
+
+ msr = siu_read(port, UART_MSR);
+ if (msr & UART_MSR_DCD)
+ mctrl |= TIOCM_CAR;
+ if (msr & UART_MSR_RI)
+ mctrl |= TIOCM_RNG;
+ if (msr & UART_MSR_DSR)
+ mctrl |= TIOCM_DSR;
+ if (msr & UART_MSR_CTS)
+ mctrl |= TIOCM_CTS;
+
+ return mctrl;
+}
+
+static void siu_stop_tx(struct uart_port *port)
+{
+ unsigned long flags;
+ uint8_t ier;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ ier = siu_read(port, UART_IER);
+ ier &= ~UART_IER_THRI;
+ siu_write(port, UART_IER, ier);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void siu_start_tx(struct uart_port *port)
+{
+ unsigned long flags;
+ uint8_t ier;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ ier = siu_read(port, UART_IER);
+ ier |= UART_IER_THRI;
+ siu_write(port, UART_IER, ier);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void siu_stop_rx(struct uart_port *port)
+{
+ unsigned long flags;
+ uint8_t ier;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ ier = siu_read(port, UART_IER);
+ ier &= ~UART_IER_RLSI;
+ siu_write(port, UART_IER, ier);
+
+ port->read_status_mask &= ~UART_LSR_DR;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void siu_enable_ms(struct uart_port *port)
+{
+ unsigned long flags;
+ uint8_t ier;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ ier = siu_read(port, UART_IER);
+ ier |= UART_IER_MSI;
+ siu_write(port, UART_IER, ier);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void siu_break_ctl(struct uart_port *port, int ctl)
+{
+ unsigned long flags;
+ uint8_t lcr;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ lcr = siu_read(port, UART_LCR);
+ if (ctl == -1)
+ lcr |= UART_LCR_SBC;
+ else
+ lcr &= ~UART_LCR_SBC;
+ siu_write(port, UART_LCR, lcr);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static inline void receive_chars(struct uart_port *port, uint8_t *status)
+{
+ struct tty_struct *tty;
+ uint8_t lsr, ch;
+ char flag;
+ int max_count = RX_MAX_COUNT;
+
+ tty = port->state->port.tty;
+ lsr = *status;
+
+ do {
+ ch = siu_read(port, UART_RX);
+ port->icount.rx++;
+ flag = TTY_NORMAL;
+
+#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
+ lsr |= lsr_break_flag[port->line];
+ lsr_break_flag[port->line] = 0;
+#endif
+ if (unlikely(lsr & (UART_LSR_BI | UART_LSR_FE |
+ UART_LSR_PE | UART_LSR_OE))) {
+ if (lsr & UART_LSR_BI) {
+ lsr &= ~(UART_LSR_FE | UART_LSR_PE);
+ port->icount.brk++;
+
+ if (uart_handle_break(port))
+ goto ignore_char;
+ }
+
+ if (lsr & UART_LSR_FE)
+ port->icount.frame++;
+ if (lsr & UART_LSR_PE)
+ port->icount.parity++;
+ if (lsr & UART_LSR_OE)
+ port->icount.overrun++;
+
+ lsr &= port->read_status_mask;
+ if (lsr & UART_LSR_BI)
+ flag = TTY_BREAK;
+ if (lsr & UART_LSR_FE)
+ flag = TTY_FRAME;
+ if (lsr & UART_LSR_PE)
+ flag = TTY_PARITY;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ goto ignore_char;
+
+ uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
+
+ ignore_char:
+ lsr = siu_read(port, UART_LSR);
+ } while ((lsr & UART_LSR_DR) && (max_count-- > 0));
+
+ tty_flip_buffer_push(tty);
+
+ *status = lsr;
+}
+
+static inline void check_modem_status(struct uart_port *port)
+{
+ uint8_t msr;
+
+ msr = siu_read(port, UART_MSR);
+ if ((msr & UART_MSR_ANY_DELTA) == 0)
+ return;
+ if (msr & UART_MSR_DDCD)
+ uart_handle_dcd_change(port, msr & UART_MSR_DCD);
+ if (msr & UART_MSR_TERI)
+ port->icount.rng++;
+ if (msr & UART_MSR_DDSR)
+ port->icount.dsr++;
+ if (msr & UART_MSR_DCTS)
+ uart_handle_cts_change(port, msr & UART_MSR_CTS);
+
+ wake_up_interruptible(&port->state->port.delta_msr_wait);
+}
+
+static inline void transmit_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit;
+ int max_count = TX_MAX_COUNT;
+
+ xmit = &port->state->xmit;
+
+ if (port->x_char) {
+ siu_write(port, UART_TX, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ siu_stop_tx(port);
+ return;
+ }
+
+ do {
+ siu_write(port, UART_TX, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (max_count-- > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ siu_stop_tx(port);
+}
+
+static irqreturn_t siu_interrupt(int irq, void *dev_id)
+{
+ struct uart_port *port;
+ uint8_t iir, lsr;
+
+ port = (struct uart_port *)dev_id;
+
+ iir = siu_read(port, UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ return IRQ_NONE;
+
+ lsr = siu_read(port, UART_LSR);
+ if (lsr & UART_LSR_DR)
+ receive_chars(port, &lsr);
+
+ check_modem_status(port);
+
+ if (lsr & UART_LSR_THRE)
+ transmit_chars(port);
+
+ return IRQ_HANDLED;
+}
+
+static int siu_startup(struct uart_port *port)
+{
+ int retval;
+
+ if (port->membase == NULL)
+ return -ENODEV;
+
+ siu_clear_fifo(port);
+
+ (void)siu_read(port, UART_LSR);
+ (void)siu_read(port, UART_RX);
+ (void)siu_read(port, UART_IIR);
+ (void)siu_read(port, UART_MSR);
+
+ if (siu_read(port, UART_LSR) == 0xff)
+ return -ENODEV;
+
+ retval = request_irq(port->irq, siu_interrupt, 0, siu_type_name(port), port);
+ if (retval)
+ return retval;
+
+ if (port->type == PORT_VR41XX_DSIU)
+ vr41xx_enable_dsiuint(DSIUINT_ALL);
+
+ siu_write(port, UART_LCR, UART_LCR_WLEN8);
+
+ spin_lock_irq(&port->lock);
+ siu_set_mctrl(port, port->mctrl);
+ spin_unlock_irq(&port->lock);
+
+ siu_write(port, UART_IER, UART_IER_RLSI | UART_IER_RDI);
+
+ (void)siu_read(port, UART_LSR);
+ (void)siu_read(port, UART_RX);
+ (void)siu_read(port, UART_IIR);
+ (void)siu_read(port, UART_MSR);
+
+ return 0;
+}
+
+static void siu_shutdown(struct uart_port *port)
+{
+ unsigned long flags;
+ uint8_t lcr;
+
+ siu_write(port, UART_IER, 0);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ port->mctrl &= ~TIOCM_OUT2;
+ siu_set_mctrl(port, port->mctrl);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ lcr = siu_read(port, UART_LCR);
+ lcr &= ~UART_LCR_SBC;
+ siu_write(port, UART_LCR, lcr);
+
+ siu_clear_fifo(port);
+
+ (void)siu_read(port, UART_RX);
+
+ if (port->type == PORT_VR41XX_DSIU)
+ vr41xx_disable_dsiuint(DSIUINT_ALL);
+
+ free_irq(port->irq, port);
+}
+
+static void siu_set_termios(struct uart_port *port, struct ktermios *new,
+ struct ktermios *old)
+{
+ tcflag_t c_cflag, c_iflag;
+ uint8_t lcr, fcr, ier;
+ unsigned int baud, quot;
+ unsigned long flags;
+
+ c_cflag = new->c_cflag;
+ switch (c_cflag & CSIZE) {
+ case CS5:
+ lcr = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ lcr = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ lcr = UART_LCR_WLEN7;
+ break;
+ default:
+ lcr = UART_LCR_WLEN8;
+ break;
+ }
+
+ if (c_cflag & CSTOPB)
+ lcr |= UART_LCR_STOP;
+ if (c_cflag & PARENB)
+ lcr |= UART_LCR_PARITY;
+ if ((c_cflag & PARODD) != PARODD)
+ lcr |= UART_LCR_EPAR;
+ if (c_cflag & CMSPAR)
+ lcr |= UART_LCR_SPAR;
+
+ baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
+ quot = uart_get_divisor(port, baud);
+
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ uart_update_timeout(port, c_cflag, baud);
+
+ c_iflag = new->c_iflag;
+
+ port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR;
+ if (c_iflag & INPCK)
+ port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= UART_LSR_BI;
+
+ port->ignore_status_mask = 0;
+ if (c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (c_iflag & IGNBRK) {
+ port->ignore_status_mask |= UART_LSR_BI;
+ if (c_iflag & IGNPAR)
+ port->ignore_status_mask |= UART_LSR_OE;
+ }
+
+ if ((c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= UART_LSR_DR;
+
+ ier = siu_read(port, UART_IER);
+ ier &= ~UART_IER_MSI;
+ if (UART_ENABLE_MS(port, c_cflag))
+ ier |= UART_IER_MSI;
+ siu_write(port, UART_IER, ier);
+
+ siu_write(port, UART_LCR, lcr | UART_LCR_DLAB);
+
+ siu_write(port, UART_DLL, (uint8_t)quot);
+ siu_write(port, UART_DLM, (uint8_t)(quot >> 8));
+
+ siu_write(port, UART_LCR, lcr);
+
+ siu_write(port, UART_FCR, fcr);
+
+ siu_set_mctrl(port, port->mctrl);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void siu_pm(struct uart_port *port, unsigned int state, unsigned int oldstate)
+{
+ switch (state) {
+ case 0:
+ switch (port->type) {
+ case PORT_VR41XX_SIU:
+ vr41xx_supply_clock(SIU_CLOCK);
+ break;
+ case PORT_VR41XX_DSIU:
+ vr41xx_supply_clock(DSIU_CLOCK);
+ break;
+ }
+ break;
+ case 3:
+ switch (port->type) {
+ case PORT_VR41XX_SIU:
+ vr41xx_mask_clock(SIU_CLOCK);
+ break;
+ case PORT_VR41XX_DSIU:
+ vr41xx_mask_clock(DSIU_CLOCK);
+ break;
+ }
+ break;
+ }
+}
+
+static const char *siu_type(struct uart_port *port)
+{
+ return siu_type_name(port);
+}
+
+static void siu_release_port(struct uart_port *port)
+{
+ unsigned long size;
+
+ if (port->flags & UPF_IOREMAP) {
+ iounmap(port->membase);
+ port->membase = NULL;
+ }
+
+ size = siu_port_size(port);
+ release_mem_region(port->mapbase, size);
+}
+
+static int siu_request_port(struct uart_port *port)
+{
+ unsigned long size;
+ struct resource *res;
+
+ size = siu_port_size(port);
+ res = request_mem_region(port->mapbase, size, siu_type_name(port));
+ if (res == NULL)
+ return -EBUSY;
+
+ if (port->flags & UPF_IOREMAP) {
+ port->membase = ioremap(port->mapbase, size);
+ if (port->membase == NULL) {
+ release_resource(res);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void siu_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = siu_check_type(port);
+ (void)siu_request_port(port);
+ }
+}
+
+static int siu_verify_port(struct uart_port *port, struct serial_struct *serial)
+{
+ if (port->type != PORT_VR41XX_SIU && port->type != PORT_VR41XX_DSIU)
+ return -EINVAL;
+ if (port->irq != serial->irq)
+ return -EINVAL;
+ if (port->iotype != serial->io_type)
+ return -EINVAL;
+ if (port->mapbase != (unsigned long)serial->iomem_base)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct uart_ops siu_uart_ops = {
+ .tx_empty = siu_tx_empty,
+ .set_mctrl = siu_set_mctrl,
+ .get_mctrl = siu_get_mctrl,
+ .stop_tx = siu_stop_tx,
+ .start_tx = siu_start_tx,
+ .stop_rx = siu_stop_rx,
+ .enable_ms = siu_enable_ms,
+ .break_ctl = siu_break_ctl,
+ .startup = siu_startup,
+ .shutdown = siu_shutdown,
+ .set_termios = siu_set_termios,
+ .pm = siu_pm,
+ .type = siu_type,
+ .release_port = siu_release_port,
+ .request_port = siu_request_port,
+ .config_port = siu_config_port,
+ .verify_port = siu_verify_port,
+};
+
+static int siu_init_ports(struct platform_device *pdev)
+{
+ struct uart_port *port;
+ struct resource *res;
+ int *type = pdev->dev.platform_data;
+ int i;
+
+ if (!type)
+ return 0;
+
+ port = siu_uart_ports;
+ for (i = 0; i < SIU_PORTS_MAX; i++) {
+ port->type = type[i];
+ if (port->type == PORT_UNKNOWN)
+ continue;
+ port->irq = platform_get_irq(pdev, i);
+ port->uartclk = SIU_BAUD_BASE * 16;
+ port->fifosize = 16;
+ port->regshift = 0;
+ port->iotype = UPIO_MEM;
+ port->flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
+ port->line = i;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ port->mapbase = res->start;
+ port++;
+ }
+
+ return i;
+}
+
+#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static void wait_for_xmitr(struct uart_port *port)
+{
+ int timeout = 10000;
+ uint8_t lsr, msr;
+
+ do {
+ lsr = siu_read(port, UART_LSR);
+ if (lsr & UART_LSR_BI)
+ lsr_break_flag[port->line] = UART_LSR_BI;
+
+ if ((lsr & BOTH_EMPTY) == BOTH_EMPTY)
+ break;
+ } while (timeout-- > 0);
+
+ if (port->flags & UPF_CONS_FLOW) {
+ timeout = 1000000;
+
+ do {
+ msr = siu_read(port, UART_MSR);
+ if ((msr & UART_MSR_CTS) != 0)
+ break;
+ } while (timeout-- > 0);
+ }
+}
+
+static void siu_console_putchar(struct uart_port *port, int ch)
+{
+ wait_for_xmitr(port);
+ siu_write(port, UART_TX, ch);
+}
+
+static void siu_console_write(struct console *con, const char *s, unsigned count)
+{
+ struct uart_port *port;
+ uint8_t ier;
+
+ port = &siu_uart_ports[con->index];
+
+ ier = siu_read(port, UART_IER);
+ siu_write(port, UART_IER, 0);
+
+ uart_console_write(port, s, count, siu_console_putchar);
+
+ wait_for_xmitr(port);
+ siu_write(port, UART_IER, ier);
+}
+
+static int __init siu_console_setup(struct console *con, char *options)
+{
+ struct uart_port *port;
+ int baud = 9600;
+ int parity = 'n';
+ int bits = 8;
+ int flow = 'n';
+
+ if (con->index >= SIU_PORTS_MAX)
+ con->index = 0;
+
+ port = &siu_uart_ports[con->index];
+ if (port->membase == NULL) {
+ if (port->mapbase == 0)
+ return -ENODEV;
+ port->membase = ioremap(port->mapbase, siu_port_size(port));
+ }
+
+ if (port->type == PORT_VR41XX_SIU)
+ vr41xx_select_siu_interface(SIU_INTERFACE_RS232C);
+
+ if (options != NULL)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, con, baud, parity, bits, flow);
+}
+
+static struct uart_driver siu_uart_driver;
+
+static struct console siu_console = {
+ .name = "ttyVR",
+ .write = siu_console_write,
+ .device = uart_console_device,
+ .setup = siu_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &siu_uart_driver,
+};
+
+static int __devinit siu_console_init(void)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < SIU_PORTS_MAX; i++) {
+ port = &siu_uart_ports[i];
+ port->ops = &siu_uart_ops;
+ }
+
+ register_console(&siu_console);
+
+ return 0;
+}
+
+console_initcall(siu_console_init);
+
+void __init vr41xx_siu_early_setup(struct uart_port *port)
+{
+ if (port->type == PORT_UNKNOWN)
+ return;
+
+ siu_uart_ports[port->line].line = port->line;
+ siu_uart_ports[port->line].type = port->type;
+ siu_uart_ports[port->line].uartclk = SIU_BAUD_BASE * 16;
+ siu_uart_ports[port->line].mapbase = port->mapbase;
+ siu_uart_ports[port->line].mapbase = port->mapbase;
+ siu_uart_ports[port->line].ops = &siu_uart_ops;
+}
+
+#define SERIAL_VR41XX_CONSOLE &siu_console
+#else
+#define SERIAL_VR41XX_CONSOLE NULL
+#endif
+
+static struct uart_driver siu_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "SIU",
+ .dev_name = "ttyVR",
+ .major = SIU_MAJOR,
+ .minor = SIU_MINOR_BASE,
+ .cons = SERIAL_VR41XX_CONSOLE,
+};
+
+static int __devinit siu_probe(struct platform_device *dev)
+{
+ struct uart_port *port;
+ int num, i, retval;
+
+ num = siu_init_ports(dev);
+ if (num <= 0)
+ return -ENODEV;
+
+ siu_uart_driver.nr = num;
+ retval = uart_register_driver(&siu_uart_driver);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < num; i++) {
+ port = &siu_uart_ports[i];
+ port->ops = &siu_uart_ops;
+ port->dev = &dev->dev;
+
+ retval = uart_add_one_port(&siu_uart_driver, port);
+ if (retval < 0) {
+ port->dev = NULL;
+ break;
+ }
+ }
+
+ if (i == 0 && retval < 0) {
+ uart_unregister_driver(&siu_uart_driver);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int __devexit siu_remove(struct platform_device *dev)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < siu_uart_driver.nr; i++) {
+ port = &siu_uart_ports[i];
+ if (port->dev == &dev->dev) {
+ uart_remove_one_port(&siu_uart_driver, port);
+ port->dev = NULL;
+ }
+ }
+
+ uart_unregister_driver(&siu_uart_driver);
+
+ return 0;
+}
+
+static int siu_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < siu_uart_driver.nr; i++) {
+ port = &siu_uart_ports[i];
+ if ((port->type == PORT_VR41XX_SIU ||
+ port->type == PORT_VR41XX_DSIU) && port->dev == &dev->dev)
+ uart_suspend_port(&siu_uart_driver, port);
+
+ }
+
+ return 0;
+}
+
+static int siu_resume(struct platform_device *dev)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < siu_uart_driver.nr; i++) {
+ port = &siu_uart_ports[i];
+ if ((port->type == PORT_VR41XX_SIU ||
+ port->type == PORT_VR41XX_DSIU) && port->dev == &dev->dev)
+ uart_resume_port(&siu_uart_driver, port);
+ }
+
+ return 0;
+}
+
+static struct platform_driver siu_device_driver = {
+ .probe = siu_probe,
+ .remove = __devexit_p(siu_remove),
+ .suspend = siu_suspend,
+ .resume = siu_resume,
+ .driver = {
+ .name = "SIU",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init vr41xx_siu_init(void)
+{
+ return platform_driver_register(&siu_device_driver);
+}
+
+static void __exit vr41xx_siu_exit(void)
+{
+ platform_driver_unregister(&siu_device_driver);
+}
+
+module_init(vr41xx_siu_init);
+module_exit(vr41xx_siu_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:SIU");
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
new file mode 100644
index 00000000..37fc4e3d
--- /dev/null
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -0,0 +1,646 @@
+/*
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * Based on msm_serial.c, which is:
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if defined(CONFIG_SERIAL_VT8500_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+# define SUPPORT_SYSRQ
+#endif
+
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+/*
+ * UART Register offsets
+ */
+
+#define VT8500_URTDR 0x0000 /* Transmit data */
+#define VT8500_URRDR 0x0004 /* Receive data */
+#define VT8500_URDIV 0x0008 /* Clock/Baud rate divisor */
+#define VT8500_URLCR 0x000C /* Line control */
+#define VT8500_URICR 0x0010 /* IrDA control */
+#define VT8500_URIER 0x0014 /* Interrupt enable */
+#define VT8500_URISR 0x0018 /* Interrupt status */
+#define VT8500_URUSR 0x001c /* UART status */
+#define VT8500_URFCR 0x0020 /* FIFO control */
+#define VT8500_URFIDX 0x0024 /* FIFO index */
+#define VT8500_URBKR 0x0028 /* Break signal count */
+#define VT8500_URTOD 0x002c /* Time out divisor */
+#define VT8500_TXFIFO 0x1000 /* Transmit FIFO (16x8) */
+#define VT8500_RXFIFO 0x1020 /* Receive FIFO (16x10) */
+
+/*
+ * Interrupt enable and status bits
+ */
+
+#define TXDE (1 << 0) /* Tx Data empty */
+#define RXDF (1 << 1) /* Rx Data full */
+#define TXFAE (1 << 2) /* Tx FIFO almost empty */
+#define TXFE (1 << 3) /* Tx FIFO empty */
+#define RXFAF (1 << 4) /* Rx FIFO almost full */
+#define RXFF (1 << 5) /* Rx FIFO full */
+#define TXUDR (1 << 6) /* Tx underrun */
+#define RXOVER (1 << 7) /* Rx overrun */
+#define PER (1 << 8) /* Parity error */
+#define FER (1 << 9) /* Frame error */
+#define TCTS (1 << 10) /* Toggle of CTS */
+#define RXTOUT (1 << 11) /* Rx timeout */
+#define BKDONE (1 << 12) /* Break signal done */
+#define ERR (1 << 13) /* AHB error response */
+
+#define RX_FIFO_INTS (RXFAF | RXFF | RXOVER | PER | FER | RXTOUT)
+#define TX_FIFO_INTS (TXFAE | TXFE | TXUDR)
+
+struct vt8500_port {
+ struct uart_port uart;
+ char name[16];
+ struct clk *clk;
+ unsigned int ier;
+};
+
+static inline void vt8500_write(struct uart_port *port, unsigned int val,
+ unsigned int off)
+{
+ writel(val, port->membase + off);
+}
+
+static inline unsigned int vt8500_read(struct uart_port *port, unsigned int off)
+{
+ return readl(port->membase + off);
+}
+
+static void vt8500_stop_tx(struct uart_port *port)
+{
+ struct vt8500_port *vt8500_port = container_of(port,
+ struct vt8500_port,
+ uart);
+
+ vt8500_port->ier &= ~TX_FIFO_INTS;
+ vt8500_write(port, vt8500_port->ier, VT8500_URIER);
+}
+
+static void vt8500_stop_rx(struct uart_port *port)
+{
+ struct vt8500_port *vt8500_port = container_of(port,
+ struct vt8500_port,
+ uart);
+
+ vt8500_port->ier &= ~RX_FIFO_INTS;
+ vt8500_write(port, vt8500_port->ier, VT8500_URIER);
+}
+
+static void vt8500_enable_ms(struct uart_port *port)
+{
+ struct vt8500_port *vt8500_port = container_of(port,
+ struct vt8500_port,
+ uart);
+
+ vt8500_port->ier |= TCTS;
+ vt8500_write(port, vt8500_port->ier, VT8500_URIER);
+}
+
+static void handle_rx(struct uart_port *port)
+{
+ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
+ if (!tty) {
+ /* Discard data: no tty available */
+ int count = (vt8500_read(port, VT8500_URFIDX) & 0x1f00) >> 8;
+ u16 ch;
+ while (count--)
+ ch = readw(port->membase + VT8500_RXFIFO);
+ return;
+ }
+
+ /*
+ * Handle overrun
+ */
+ if ((vt8500_read(port, VT8500_URISR) & RXOVER)) {
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+
+ /* and now the main RX loop */
+ while (vt8500_read(port, VT8500_URFIDX) & 0x1f00) {
+ unsigned int c;
+ char flag = TTY_NORMAL;
+
+ c = readw(port->membase + VT8500_RXFIFO) & 0x3ff;
+
+ /* Mask conditions we're ignorning. */
+ c &= ~port->read_status_mask;
+
+ if (c & FER) {
+ port->icount.frame++;
+ flag = TTY_FRAME;
+ } else if (c & PER) {
+ port->icount.parity++;
+ flag = TTY_PARITY;
+ }
+ port->icount.rx++;
+
+ if (!uart_handle_sysrq_char(port, c))
+ tty_insert_flip_char(tty, c, flag);
+ }
+
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+}
+
+static void handle_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (port->x_char) {
+ writeb(port->x_char, port->membase + VT8500_TXFIFO);
+ port->icount.tx++;
+ port->x_char = 0;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ vt8500_stop_tx(port);
+ return;
+ }
+
+ while ((vt8500_read(port, VT8500_URFIDX) & 0x1f) < 16) {
+ if (uart_circ_empty(xmit))
+ break;
+
+ writeb(xmit->buf[xmit->tail], port->membase + VT8500_TXFIFO);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ vt8500_stop_tx(port);
+}
+
+static void vt8500_start_tx(struct uart_port *port)
+{
+ struct vt8500_port *vt8500_port = container_of(port,
+ struct vt8500_port,
+ uart);
+
+ vt8500_port->ier &= ~TX_FIFO_INTS;
+ vt8500_write(port, vt8500_port->ier, VT8500_URIER);
+ handle_tx(port);
+ vt8500_port->ier |= TX_FIFO_INTS;
+ vt8500_write(port, vt8500_port->ier, VT8500_URIER);
+}
+
+static void handle_delta_cts(struct uart_port *port)
+{
+ port->icount.cts++;
+ wake_up_interruptible(&port->state->port.delta_msr_wait);
+}
+
+static irqreturn_t vt8500_irq(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ unsigned long isr;
+
+ spin_lock(&port->lock);
+ isr = vt8500_read(port, VT8500_URISR);
+
+ /* Acknowledge active status bits */
+ vt8500_write(port, isr, VT8500_URISR);
+
+ if (isr & RX_FIFO_INTS)
+ handle_rx(port);
+ if (isr & TX_FIFO_INTS)
+ handle_tx(port);
+ if (isr & TCTS)
+ handle_delta_cts(port);
+
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int vt8500_tx_empty(struct uart_port *port)
+{
+ return (vt8500_read(port, VT8500_URFIDX) & 0x1f) < 16 ?
+ TIOCSER_TEMT : 0;
+}
+
+static unsigned int vt8500_get_mctrl(struct uart_port *port)
+{
+ unsigned int usr;
+
+ usr = vt8500_read(port, VT8500_URUSR);
+ if (usr & (1 << 4))
+ return TIOCM_CTS;
+ else
+ return 0;
+}
+
+static void vt8500_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static void vt8500_break_ctl(struct uart_port *port, int break_ctl)
+{
+ if (break_ctl)
+ vt8500_write(port, vt8500_read(port, VT8500_URLCR) | (1 << 9),
+ VT8500_URLCR);
+}
+
+static int vt8500_set_baud_rate(struct uart_port *port, unsigned int baud)
+{
+ unsigned long div;
+ unsigned int loops = 1000;
+
+ div = vt8500_read(port, VT8500_URDIV) & ~(0x3ff);
+
+ if (unlikely((baud < 900) || (baud > 921600)))
+ div |= 7;
+ else
+ div |= (921600 / baud) - 1;
+
+ while ((vt8500_read(port, VT8500_URUSR) & (1 << 5)) && --loops)
+ cpu_relax();
+ vt8500_write(port, div, VT8500_URDIV);
+
+ return baud;
+}
+
+static int vt8500_startup(struct uart_port *port)
+{
+ struct vt8500_port *vt8500_port =
+ container_of(port, struct vt8500_port, uart);
+ int ret;
+
+ snprintf(vt8500_port->name, sizeof(vt8500_port->name),
+ "vt8500_serial%d", port->line);
+
+ ret = request_irq(port->irq, vt8500_irq, IRQF_TRIGGER_HIGH,
+ vt8500_port->name, port);
+ if (unlikely(ret))
+ return ret;
+
+ vt8500_write(port, 0x03, VT8500_URLCR); /* enable TX & RX */
+
+ return 0;
+}
+
+static void vt8500_shutdown(struct uart_port *port)
+{
+ struct vt8500_port *vt8500_port =
+ container_of(port, struct vt8500_port, uart);
+
+ vt8500_port->ier = 0;
+
+ /* disable interrupts and FIFOs */
+ vt8500_write(&vt8500_port->uart, 0, VT8500_URIER);
+ vt8500_write(&vt8500_port->uart, 0x880, VT8500_URFCR);
+ free_irq(port->irq, port);
+}
+
+static void vt8500_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct vt8500_port *vt8500_port =
+ container_of(port, struct vt8500_port, uart);
+ unsigned long flags;
+ unsigned int baud, lcr;
+ unsigned int loops = 1000;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* calculate and set baud rate */
+ baud = uart_get_baud_rate(port, termios, old, 900, 921600);
+ baud = vt8500_set_baud_rate(port, baud);
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ /* calculate parity */
+ lcr = vt8500_read(&vt8500_port->uart, VT8500_URLCR);
+ lcr &= ~((1 << 5) | (1 << 4));
+ if (termios->c_cflag & PARENB) {
+ lcr |= (1 << 4);
+ termios->c_cflag &= ~CMSPAR;
+ if (termios->c_cflag & PARODD)
+ lcr |= (1 << 5);
+ }
+
+ /* calculate bits per char */
+ lcr &= ~(1 << 2);
+ switch (termios->c_cflag & CSIZE) {
+ case CS7:
+ break;
+ case CS8:
+ default:
+ lcr |= (1 << 2);
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
+ break;
+ }
+
+ /* calculate stop bits */
+ lcr &= ~(1 << 3);
+ if (termios->c_cflag & CSTOPB)
+ lcr |= (1 << 3);
+
+ /* set parity, bits per char, and stop bit */
+ vt8500_write(&vt8500_port->uart, lcr, VT8500_URLCR);
+
+ /* Configure status bits to ignore based on termio flags. */
+ port->read_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->read_status_mask = FER | PER;
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ /* Reset FIFOs */
+ vt8500_write(&vt8500_port->uart, 0x88c, VT8500_URFCR);
+ while ((vt8500_read(&vt8500_port->uart, VT8500_URFCR) & 0xc)
+ && --loops)
+ cpu_relax();
+
+ /* Every possible FIFO-related interrupt */
+ vt8500_port->ier = RX_FIFO_INTS | TX_FIFO_INTS;
+
+ /*
+ * CTS flow control
+ */
+ if (UART_ENABLE_MS(&vt8500_port->uart, termios->c_cflag))
+ vt8500_port->ier |= TCTS;
+
+ vt8500_write(&vt8500_port->uart, 0x881, VT8500_URFCR);
+ vt8500_write(&vt8500_port->uart, vt8500_port->ier, VT8500_URIER);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *vt8500_type(struct uart_port *port)
+{
+ struct vt8500_port *vt8500_port =
+ container_of(port, struct vt8500_port, uart);
+ return vt8500_port->name;
+}
+
+static void vt8500_release_port(struct uart_port *port)
+{
+}
+
+static int vt8500_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void vt8500_config_port(struct uart_port *port, int flags)
+{
+ port->type = PORT_VT8500;
+}
+
+static int vt8500_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_VT8500))
+ return -EINVAL;
+ if (unlikely(port->irq != ser->irq))
+ return -EINVAL;
+ return 0;
+}
+
+static struct vt8500_port *vt8500_uart_ports[4];
+static struct uart_driver vt8500_uart_driver;
+
+#ifdef CONFIG_SERIAL_VT8500_CONSOLE
+
+static inline void wait_for_xmitr(struct uart_port *port)
+{
+ unsigned int status, tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ do {
+ status = vt8500_read(port, VT8500_URFIDX);
+
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ } while (status & 0x10);
+}
+
+static void vt8500_console_putchar(struct uart_port *port, int c)
+{
+ wait_for_xmitr(port);
+ writeb(c, port->membase + VT8500_TXFIFO);
+}
+
+static void vt8500_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct vt8500_port *vt8500_port = vt8500_uart_ports[co->index];
+ unsigned long ier;
+
+ BUG_ON(co->index < 0 || co->index >= vt8500_uart_driver.nr);
+
+ ier = vt8500_read(&vt8500_port->uart, VT8500_URIER);
+ vt8500_write(&vt8500_port->uart, VT8500_URIER, 0);
+
+ uart_console_write(&vt8500_port->uart, s, count,
+ vt8500_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and switch back to FIFO
+ */
+ wait_for_xmitr(&vt8500_port->uart);
+ vt8500_write(&vt8500_port->uart, VT8500_URIER, ier);
+}
+
+static int __init vt8500_console_setup(struct console *co, char *options)
+{
+ struct vt8500_port *vt8500_port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (unlikely(co->index >= vt8500_uart_driver.nr || co->index < 0))
+ return -ENXIO;
+
+ vt8500_port = vt8500_uart_ports[co->index];
+
+ if (!vt8500_port)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&vt8500_port->uart,
+ co, baud, parity, bits, flow);
+}
+
+static struct console vt8500_console = {
+ .name = "ttyWMT",
+ .write = vt8500_console_write,
+ .device = uart_console_device,
+ .setup = vt8500_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &vt8500_uart_driver,
+};
+
+#define VT8500_CONSOLE (&vt8500_console)
+
+#else
+#define VT8500_CONSOLE NULL
+#endif
+
+static struct uart_ops vt8500_uart_pops = {
+ .tx_empty = vt8500_tx_empty,
+ .set_mctrl = vt8500_set_mctrl,
+ .get_mctrl = vt8500_get_mctrl,
+ .stop_tx = vt8500_stop_tx,
+ .start_tx = vt8500_start_tx,
+ .stop_rx = vt8500_stop_rx,
+ .enable_ms = vt8500_enable_ms,
+ .break_ctl = vt8500_break_ctl,
+ .startup = vt8500_startup,
+ .shutdown = vt8500_shutdown,
+ .set_termios = vt8500_set_termios,
+ .type = vt8500_type,
+ .release_port = vt8500_release_port,
+ .request_port = vt8500_request_port,
+ .config_port = vt8500_config_port,
+ .verify_port = vt8500_verify_port,
+};
+
+static struct uart_driver vt8500_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "vt8500_serial",
+ .dev_name = "ttyWMT",
+ .nr = 6,
+ .cons = VT8500_CONSOLE,
+};
+
+static int __init vt8500_serial_probe(struct platform_device *pdev)
+{
+ struct vt8500_port *vt8500_port;
+ struct resource *mmres, *irqres;
+ int ret;
+
+ mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!mmres || !irqres)
+ return -ENODEV;
+
+ vt8500_port = kzalloc(sizeof(struct vt8500_port), GFP_KERNEL);
+ if (!vt8500_port)
+ return -ENOMEM;
+
+ vt8500_port->uart.type = PORT_VT8500;
+ vt8500_port->uart.iotype = UPIO_MEM;
+ vt8500_port->uart.mapbase = mmres->start;
+ vt8500_port->uart.irq = irqres->start;
+ vt8500_port->uart.fifosize = 16;
+ vt8500_port->uart.ops = &vt8500_uart_pops;
+ vt8500_port->uart.line = pdev->id;
+ vt8500_port->uart.dev = &pdev->dev;
+ vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
+ vt8500_port->uart.uartclk = 24000000;
+
+ snprintf(vt8500_port->name, sizeof(vt8500_port->name),
+ "VT8500 UART%d", pdev->id);
+
+ vt8500_port->uart.membase = ioremap(mmres->start,
+ mmres->end - mmres->start + 1);
+ if (!vt8500_port->uart.membase) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ vt8500_uart_ports[pdev->id] = vt8500_port;
+
+ uart_add_one_port(&vt8500_uart_driver, &vt8500_port->uart);
+
+ platform_set_drvdata(pdev, vt8500_port);
+
+ return 0;
+
+err:
+ kfree(vt8500_port);
+ return ret;
+}
+
+static int __devexit vt8500_serial_remove(struct platform_device *pdev)
+{
+ struct vt8500_port *vt8500_port = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ uart_remove_one_port(&vt8500_uart_driver, &vt8500_port->uart);
+ kfree(vt8500_port);
+
+ return 0;
+}
+
+static struct platform_driver vt8500_platform_driver = {
+ .probe = vt8500_serial_probe,
+ .remove = vt8500_serial_remove,
+ .driver = {
+ .name = "vt8500_serial",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init vt8500_serial_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&vt8500_uart_driver);
+ if (unlikely(ret))
+ return ret;
+
+ ret = platform_driver_register(&vt8500_platform_driver);
+
+ if (unlikely(ret))
+ uart_unregister_driver(&vt8500_uart_driver);
+
+ return ret;
+}
+
+static void __exit vt8500_serial_exit(void)
+{
+#ifdef CONFIG_SERIAL_VT8500_CONSOLE
+ unregister_console(&vt8500_console);
+#endif
+ platform_driver_unregister(&vt8500_platform_driver);
+ uart_unregister_driver(&vt8500_uart_driver);
+}
+
+module_init(vt8500_serial_init);
+module_exit(vt8500_serial_exit);
+
+MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
+MODULE_DESCRIPTION("Driver for vt8500 serial device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
new file mode 100644
index 00000000..19cc1e81
--- /dev/null
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -0,0 +1,1113 @@
+/*
+ * Xilinx PS UART driver
+ *
+ * 2011 (c) Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it
+ * and/or modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation;
+ * either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/console.h>
+#include <linux/serial.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#define XUARTPS_TTY_NAME "ttyPS"
+#define XUARTPS_NAME "xuartps"
+#define XUARTPS_MAJOR 0 /* use dynamic node allocation */
+#define XUARTPS_MINOR 0 /* works best with devtmpfs */
+#define XUARTPS_NR_PORTS 2
+#define XUARTPS_FIFO_SIZE 16 /* FIFO size */
+#define XUARTPS_REGISTER_SPACE 0xFFF
+
+#define xuartps_readl(offset) ioread32(port->membase + offset)
+#define xuartps_writel(val, offset) iowrite32(val, port->membase + offset)
+
+/********************************Register Map********************************/
+/** UART
+ *
+ * Register offsets for the UART.
+ *
+ */
+#define XUARTPS_CR_OFFSET 0x00 /* Control Register [8:0] */
+#define XUARTPS_MR_OFFSET 0x04 /* Mode Register [10:0] */
+#define XUARTPS_IER_OFFSET 0x08 /* Interrupt Enable [10:0] */
+#define XUARTPS_IDR_OFFSET 0x0C /* Interrupt Disable [10:0] */
+#define XUARTPS_IMR_OFFSET 0x10 /* Interrupt Mask [10:0] */
+#define XUARTPS_ISR_OFFSET 0x14 /* Interrupt Status [10:0]*/
+#define XUARTPS_BAUDGEN_OFFSET 0x18 /* Baud Rate Generator [15:0] */
+#define XUARTPS_RXTOUT_OFFSET 0x1C /* RX Timeout [7:0] */
+#define XUARTPS_RXWM_OFFSET 0x20 /* RX FIFO Trigger Level [5:0] */
+#define XUARTPS_MODEMCR_OFFSET 0x24 /* Modem Control [5:0] */
+#define XUARTPS_MODEMSR_OFFSET 0x28 /* Modem Status [8:0] */
+#define XUARTPS_SR_OFFSET 0x2C /* Channel Status [11:0] */
+#define XUARTPS_FIFO_OFFSET 0x30 /* FIFO [15:0] or [7:0] */
+#define XUARTPS_BAUDDIV_OFFSET 0x34 /* Baud Rate Divider [7:0] */
+#define XUARTPS_FLOWDEL_OFFSET 0x38 /* Flow Delay [15:0] */
+#define XUARTPS_IRRX_PWIDTH_OFFSET 0x3C /* IR Minimum Received Pulse
+ Width [15:0] */
+#define XUARTPS_IRTX_PWIDTH_OFFSET 0x40 /* IR Transmitted pulse
+ Width [7:0] */
+#define XUARTPS_TXWM_OFFSET 0x44 /* TX FIFO Trigger Level [5:0] */
+
+/** Control Register
+ *
+ * The Control register (CR) controls the major functions of the device.
+ *
+ * Control Register Bit Definitions
+ */
+#define XUARTPS_CR_STOPBRK 0x00000100 /* Stop TX break */
+#define XUARTPS_CR_STARTBRK 0x00000080 /* Set TX break */
+#define XUARTPS_CR_TX_DIS 0x00000020 /* TX disabled. */
+#define XUARTPS_CR_TX_EN 0x00000010 /* TX enabled */
+#define XUARTPS_CR_RX_DIS 0x00000008 /* RX disabled. */
+#define XUARTPS_CR_RX_EN 0x00000004 /* RX enabled */
+#define XUARTPS_CR_TXRST 0x00000002 /* TX logic reset */
+#define XUARTPS_CR_RXRST 0x00000001 /* RX logic reset */
+#define XUARTPS_CR_RST_TO 0x00000040 /* Restart Timeout Counter */
+
+/** Mode Register
+ *
+ * The mode register (MR) defines the mode of transfer as well as the data
+ * format. If this register is modified during transmission or reception,
+ * data validity cannot be guaranteed.
+ *
+ * Mode Register Bit Definitions
+ *
+ */
+#define XUARTPS_MR_CLKSEL 0x00000001 /* Pre-scalar selection */
+#define XUARTPS_MR_CHMODE_L_LOOP 0x00000200 /* Local loop back mode */
+#define XUARTPS_MR_CHMODE_NORM 0x00000000 /* Normal mode */
+
+#define XUARTPS_MR_STOPMODE_2_BIT 0x00000080 /* 2 stop bits */
+#define XUARTPS_MR_STOPMODE_1_BIT 0x00000000 /* 1 stop bit */
+
+#define XUARTPS_MR_PARITY_NONE 0x00000020 /* No parity mode */
+#define XUARTPS_MR_PARITY_MARK 0x00000018 /* Mark parity mode */
+#define XUARTPS_MR_PARITY_SPACE 0x00000010 /* Space parity mode */
+#define XUARTPS_MR_PARITY_ODD 0x00000008 /* Odd parity mode */
+#define XUARTPS_MR_PARITY_EVEN 0x00000000 /* Even parity mode */
+
+#define XUARTPS_MR_CHARLEN_6_BIT 0x00000006 /* 6 bits data */
+#define XUARTPS_MR_CHARLEN_7_BIT 0x00000004 /* 7 bits data */
+#define XUARTPS_MR_CHARLEN_8_BIT 0x00000000 /* 8 bits data */
+
+/** Interrupt Registers
+ *
+ * Interrupt control logic uses the interrupt enable register (IER) and the
+ * interrupt disable register (IDR) to set the value of the bits in the
+ * interrupt mask register (IMR). The IMR determines whether to pass an
+ * interrupt to the interrupt status register (ISR).
+ * Writing a 1 to IER Enables an interrupt, writing a 1 to IDR disables an
+ * interrupt. IMR and ISR are read only, and IER and IDR are write only.
+ * Reading either IER or IDR returns 0x00.
+ *
+ * All four registers have the same bit definitions.
+ */
+#define XUARTPS_IXR_TOUT 0x00000100 /* RX Timeout error interrupt */
+#define XUARTPS_IXR_PARITY 0x00000080 /* Parity error interrupt */
+#define XUARTPS_IXR_FRAMING 0x00000040 /* Framing error interrupt */
+#define XUARTPS_IXR_OVERRUN 0x00000020 /* Overrun error interrupt */
+#define XUARTPS_IXR_TXFULL 0x00000010 /* TX FIFO Full interrupt */
+#define XUARTPS_IXR_TXEMPTY 0x00000008 /* TX FIFO empty interrupt */
+#define XUARTPS_ISR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt */
+#define XUARTPS_IXR_RXTRIG 0x00000001 /* RX FIFO trigger interrupt */
+#define XUARTPS_IXR_RXFULL 0x00000004 /* RX FIFO full interrupt. */
+#define XUARTPS_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */
+#define XUARTPS_IXR_MASK 0x00001FFF /* Valid bit mask */
+
+/** Channel Status Register
+ *
+ * The channel status register (CSR) is provided to enable the control logic
+ * to monitor the status of bits in the channel interrupt status register,
+ * even if these are masked out by the interrupt mask register.
+ */
+#define XUARTPS_SR_RXEMPTY 0x00000002 /* RX FIFO empty */
+#define XUARTPS_SR_TXEMPTY 0x00000008 /* TX FIFO empty */
+#define XUARTPS_SR_TXFULL 0x00000010 /* TX FIFO full */
+#define XUARTPS_SR_RXTRIG 0x00000001 /* Rx Trigger */
+
+/**
+ * xuartps_isr - Interrupt handler
+ * @irq: Irq number
+ * @dev_id: Id of the port
+ *
+ * Returns IRQHANDLED
+ **/
+static irqreturn_t xuartps_isr(int irq, void *dev_id)
+{
+ struct uart_port *port = (struct uart_port *)dev_id;
+ struct tty_struct *tty;
+ unsigned long flags;
+ unsigned int isrstatus, numbytes;
+ unsigned int data;
+ char status = TTY_NORMAL;
+
+ /* Get the tty which could be NULL so don't assume it's valid */
+ tty = tty_port_tty_get(&port->state->port);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Read the interrupt status register to determine which
+ * interrupt(s) is/are active.
+ */
+ isrstatus = xuartps_readl(XUARTPS_ISR_OFFSET);
+
+ /* drop byte with parity error if IGNPAR specified */
+ if (isrstatus & port->ignore_status_mask & XUARTPS_IXR_PARITY)
+ isrstatus &= ~(XUARTPS_IXR_RXTRIG | XUARTPS_IXR_TOUT);
+
+ isrstatus &= port->read_status_mask;
+ isrstatus &= ~port->ignore_status_mask;
+
+ if ((isrstatus & XUARTPS_IXR_TOUT) ||
+ (isrstatus & XUARTPS_IXR_RXTRIG)) {
+ /* Receive Timeout Interrupt */
+ while ((xuartps_readl(XUARTPS_SR_OFFSET) &
+ XUARTPS_SR_RXEMPTY) != XUARTPS_SR_RXEMPTY) {
+ data = xuartps_readl(XUARTPS_FIFO_OFFSET);
+ port->icount.rx++;
+
+ if (isrstatus & XUARTPS_IXR_PARITY) {
+ port->icount.parity++;
+ status = TTY_PARITY;
+ } else if (isrstatus & XUARTPS_IXR_FRAMING) {
+ port->icount.frame++;
+ status = TTY_FRAME;
+ } else if (isrstatus & XUARTPS_IXR_OVERRUN)
+ port->icount.overrun++;
+
+ if (tty)
+ uart_insert_char(port, isrstatus,
+ XUARTPS_IXR_OVERRUN, data,
+ status);
+ }
+ spin_unlock(&port->lock);
+ if (tty)
+ tty_flip_buffer_push(tty);
+ spin_lock(&port->lock);
+ }
+
+ /* Dispatch an appropriate handler */
+ if ((isrstatus & XUARTPS_IXR_TXEMPTY) == XUARTPS_IXR_TXEMPTY) {
+ if (uart_circ_empty(&port->state->xmit)) {
+ xuartps_writel(XUARTPS_IXR_TXEMPTY,
+ XUARTPS_IDR_OFFSET);
+ } else {
+ numbytes = port->fifosize;
+ /* Break if no more data available in the UART buffer */
+ while (numbytes--) {
+ if (uart_circ_empty(&port->state->xmit))
+ break;
+ /* Get the data from the UART circular buffer
+ * and write it to the xuartps's TX_FIFO
+ * register.
+ */
+ xuartps_writel(
+ port->state->xmit.buf[port->state->xmit.
+ tail], XUARTPS_FIFO_OFFSET);
+
+ port->icount.tx++;
+
+ /* Adjust the tail of the UART buffer and wrap
+ * the buffer if it reaches limit.
+ */
+ port->state->xmit.tail =
+ (port->state->xmit.tail + 1) & \
+ (UART_XMIT_SIZE - 1);
+ }
+
+ if (uart_circ_chars_pending(
+ &port->state->xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+ }
+ }
+
+ xuartps_writel(isrstatus, XUARTPS_ISR_OFFSET);
+
+ /* be sure to release the lock and tty before leaving */
+ spin_unlock_irqrestore(&port->lock, flags);
+ tty_kref_put(tty);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xuartps_set_baud_rate - Calculate and set the baud rate
+ * @port: Handle to the uart port structure
+ * @baud: Baud rate to set
+ *
+ * Returns baud rate, requested baud when possible, or actual baud when there
+ * was too much error
+ **/
+static unsigned int xuartps_set_baud_rate(struct uart_port *port,
+ unsigned int baud)
+{
+ unsigned int sel_clk;
+ unsigned int calc_baud = 0;
+ unsigned int brgr_val, brdiv_val;
+ unsigned int bauderror;
+
+ /* Formula to obtain baud rate is
+ * baud_tx/rx rate = sel_clk/CD * (BDIV + 1)
+ * input_clk = (Uart User Defined Clock or Apb Clock)
+ * depends on UCLKEN in MR Reg
+ * sel_clk = input_clk or input_clk/8;
+ * depends on CLKS in MR reg
+ * CD and BDIV depends on values in
+ * baud rate generate register
+ * baud rate clock divisor register
+ */
+ sel_clk = port->uartclk;
+ if (xuartps_readl(XUARTPS_MR_OFFSET) & XUARTPS_MR_CLKSEL)
+ sel_clk = sel_clk / 8;
+
+ /* Find the best values for baud generation */
+ for (brdiv_val = 4; brdiv_val < 255; brdiv_val++) {
+
+ brgr_val = sel_clk / (baud * (brdiv_val + 1));
+ if (brgr_val < 2 || brgr_val > 65535)
+ continue;
+
+ calc_baud = sel_clk / (brgr_val * (brdiv_val + 1));
+
+ if (baud > calc_baud)
+ bauderror = baud - calc_baud;
+ else
+ bauderror = calc_baud - baud;
+
+ /* use the values when percent error is acceptable */
+ if (((bauderror * 100) / baud) < 3) {
+ calc_baud = baud;
+ break;
+ }
+ }
+
+ /* Set the values for the new baud rate */
+ xuartps_writel(brgr_val, XUARTPS_BAUDGEN_OFFSET);
+ xuartps_writel(brdiv_val, XUARTPS_BAUDDIV_OFFSET);
+
+ return calc_baud;
+}
+
+/*----------------------Uart Operations---------------------------*/
+
+/**
+ * xuartps_start_tx - Start transmitting bytes
+ * @port: Handle to the uart port structure
+ *
+ **/
+static void xuartps_start_tx(struct uart_port *port)
+{
+ unsigned int status, numbytes = port->fifosize;
+
+ if (uart_circ_empty(&port->state->xmit) || uart_tx_stopped(port))
+ return;
+
+ status = xuartps_readl(XUARTPS_CR_OFFSET);
+ /* Set the TX enable bit and clear the TX disable bit to enable the
+ * transmitter.
+ */
+ xuartps_writel((status & ~XUARTPS_CR_TX_DIS) | XUARTPS_CR_TX_EN,
+ XUARTPS_CR_OFFSET);
+
+ while (numbytes-- && ((xuartps_readl(XUARTPS_SR_OFFSET)
+ & XUARTPS_SR_TXFULL)) != XUARTPS_SR_TXFULL) {
+
+ /* Break if no more data available in the UART buffer */
+ if (uart_circ_empty(&port->state->xmit))
+ break;
+
+ /* Get the data from the UART circular buffer and
+ * write it to the xuartps's TX_FIFO register.
+ */
+ xuartps_writel(
+ port->state->xmit.buf[port->state->xmit.tail],
+ XUARTPS_FIFO_OFFSET);
+ port->icount.tx++;
+
+ /* Adjust the tail of the UART buffer and wrap
+ * the buffer if it reaches limit.
+ */
+ port->state->xmit.tail = (port->state->xmit.tail + 1) &
+ (UART_XMIT_SIZE - 1);
+ }
+
+ /* Enable the TX Empty interrupt */
+ xuartps_writel(XUARTPS_IXR_TXEMPTY, XUARTPS_IER_OFFSET);
+
+ if (uart_circ_chars_pending(&port->state->xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+/**
+ * xuartps_stop_tx - Stop TX
+ * @port: Handle to the uart port structure
+ *
+ **/
+static void xuartps_stop_tx(struct uart_port *port)
+{
+ unsigned int regval;
+
+ regval = xuartps_readl(XUARTPS_CR_OFFSET);
+ regval |= XUARTPS_CR_TX_DIS;
+ /* Disable the transmitter */
+ xuartps_writel(regval, XUARTPS_CR_OFFSET);
+}
+
+/**
+ * xuartps_stop_rx - Stop RX
+ * @port: Handle to the uart port structure
+ *
+ **/
+static void xuartps_stop_rx(struct uart_port *port)
+{
+ unsigned int regval;
+
+ regval = xuartps_readl(XUARTPS_CR_OFFSET);
+ regval |= XUARTPS_CR_RX_DIS;
+ /* Disable the receiver */
+ xuartps_writel(regval, XUARTPS_CR_OFFSET);
+}
+
+/**
+ * xuartps_tx_empty - Check whether TX is empty
+ * @port: Handle to the uart port structure
+ *
+ * Returns TIOCSER_TEMT on success, 0 otherwise
+ **/
+static unsigned int xuartps_tx_empty(struct uart_port *port)
+{
+ unsigned int status;
+
+ status = xuartps_readl(XUARTPS_ISR_OFFSET) & XUARTPS_IXR_TXEMPTY;
+ return status ? TIOCSER_TEMT : 0;
+}
+
+/**
+ * xuartps_break_ctl - Based on the input ctl we have to start or stop
+ * transmitting char breaks
+ * @port: Handle to the uart port structure
+ * @ctl: Value based on which start or stop decision is taken
+ *
+ **/
+static void xuartps_break_ctl(struct uart_port *port, int ctl)
+{
+ unsigned int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ status = xuartps_readl(XUARTPS_CR_OFFSET);
+
+ if (ctl == -1)
+ xuartps_writel(XUARTPS_CR_STARTBRK | status,
+ XUARTPS_CR_OFFSET);
+ else {
+ if ((status & XUARTPS_CR_STOPBRK) == 0)
+ xuartps_writel(XUARTPS_CR_STOPBRK | status,
+ XUARTPS_CR_OFFSET);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/**
+ * xuartps_set_termios - termios operations, handling data length, parity,
+ * stop bits, flow control, baud rate
+ * @port: Handle to the uart port structure
+ * @termios: Handle to the input termios structure
+ * @old: Values of the previously saved termios structure
+ *
+ **/
+static void xuartps_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old)
+{
+ unsigned int cval = 0;
+ unsigned int baud;
+ unsigned long flags;
+ unsigned int ctrl_reg, mode_reg;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Empty the receive FIFO 1st before making changes */
+ while ((xuartps_readl(XUARTPS_SR_OFFSET) &
+ XUARTPS_SR_RXEMPTY) != XUARTPS_SR_RXEMPTY) {
+ xuartps_readl(XUARTPS_FIFO_OFFSET);
+ }
+
+ /* Disable the TX and RX to set baud rate */
+ xuartps_writel(xuartps_readl(XUARTPS_CR_OFFSET) |
+ (XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS),
+ XUARTPS_CR_OFFSET);
+
+ /* Min baud rate = 6bps and Max Baud Rate is 10Mbps for 100Mhz clk */
+ baud = uart_get_baud_rate(port, termios, old, 0, 10000000);
+ baud = xuartps_set_baud_rate(port, baud);
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ /* Set TX/RX Reset */
+ xuartps_writel(xuartps_readl(XUARTPS_CR_OFFSET) |
+ (XUARTPS_CR_TXRST | XUARTPS_CR_RXRST),
+ XUARTPS_CR_OFFSET);
+
+ ctrl_reg = xuartps_readl(XUARTPS_CR_OFFSET);
+
+ /* Clear the RX disable and TX disable bits and then set the TX enable
+ * bit and RX enable bit to enable the transmitter and receiver.
+ */
+ xuartps_writel(
+ (ctrl_reg & ~(XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS))
+ | (XUARTPS_CR_TX_EN | XUARTPS_CR_RX_EN),
+ XUARTPS_CR_OFFSET);
+
+ xuartps_writel(10, XUARTPS_RXTOUT_OFFSET);
+
+ port->read_status_mask = XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_RXTRIG |
+ XUARTPS_IXR_OVERRUN | XUARTPS_IXR_TOUT;
+ port->ignore_status_mask = 0;
+
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= XUARTPS_IXR_PARITY |
+ XUARTPS_IXR_FRAMING;
+
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= XUARTPS_IXR_PARITY |
+ XUARTPS_IXR_FRAMING | XUARTPS_IXR_OVERRUN;
+
+ /* ignore all characters if CREAD is not set */
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= XUARTPS_IXR_RXTRIG |
+ XUARTPS_IXR_TOUT | XUARTPS_IXR_PARITY |
+ XUARTPS_IXR_FRAMING | XUARTPS_IXR_OVERRUN;
+
+ mode_reg = xuartps_readl(XUARTPS_MR_OFFSET);
+
+ /* Handling Data Size */
+ switch (termios->c_cflag & CSIZE) {
+ case CS6:
+ cval |= XUARTPS_MR_CHARLEN_6_BIT;
+ break;
+ case CS7:
+ cval |= XUARTPS_MR_CHARLEN_7_BIT;
+ break;
+ default:
+ case CS8:
+ cval |= XUARTPS_MR_CHARLEN_8_BIT;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
+ break;
+ }
+
+ /* Handling Parity and Stop Bits length */
+ if (termios->c_cflag & CSTOPB)
+ cval |= XUARTPS_MR_STOPMODE_2_BIT; /* 2 STOP bits */
+ else
+ cval |= XUARTPS_MR_STOPMODE_1_BIT; /* 1 STOP bit */
+
+ if (termios->c_cflag & PARENB) {
+ /* Mark or Space parity */
+ if (termios->c_cflag & CMSPAR) {
+ if (termios->c_cflag & PARODD)
+ cval |= XUARTPS_MR_PARITY_MARK;
+ else
+ cval |= XUARTPS_MR_PARITY_SPACE;
+ } else if (termios->c_cflag & PARODD)
+ cval |= XUARTPS_MR_PARITY_ODD;
+ else
+ cval |= XUARTPS_MR_PARITY_EVEN;
+ } else
+ cval |= XUARTPS_MR_PARITY_NONE;
+ xuartps_writel(cval , XUARTPS_MR_OFFSET);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/**
+ * xuartps_startup - Called when an application opens a xuartps port
+ * @port: Handle to the uart port structure
+ *
+ * Returns 0 on success, negative error otherwise
+ **/
+static int xuartps_startup(struct uart_port *port)
+{
+ unsigned int retval = 0, status = 0;
+
+ retval = request_irq(port->irq, xuartps_isr, 0, XUARTPS_NAME,
+ (void *)port);
+ if (retval)
+ return retval;
+
+ /* Disable the TX and RX */
+ xuartps_writel(XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS,
+ XUARTPS_CR_OFFSET);
+
+ /* Set the Control Register with TX/RX Enable, TX/RX Reset,
+ * no break chars.
+ */
+ xuartps_writel(XUARTPS_CR_TXRST | XUARTPS_CR_RXRST,
+ XUARTPS_CR_OFFSET);
+
+ status = xuartps_readl(XUARTPS_CR_OFFSET);
+
+ /* Clear the RX disable and TX disable bits and then set the TX enable
+ * bit and RX enable bit to enable the transmitter and receiver.
+ */
+ xuartps_writel((status & ~(XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS))
+ | (XUARTPS_CR_TX_EN | XUARTPS_CR_RX_EN |
+ XUARTPS_CR_STOPBRK), XUARTPS_CR_OFFSET);
+
+ /* Set the Mode Register with normal mode,8 data bits,1 stop bit,
+ * no parity.
+ */
+ xuartps_writel(XUARTPS_MR_CHMODE_NORM | XUARTPS_MR_STOPMODE_1_BIT
+ | XUARTPS_MR_PARITY_NONE | XUARTPS_MR_CHARLEN_8_BIT,
+ XUARTPS_MR_OFFSET);
+
+ /* Set the RX FIFO Trigger level to 14 assuming FIFO size as 16 */
+ xuartps_writel(14, XUARTPS_RXWM_OFFSET);
+
+ /* Receive Timeout register is enabled with value of 10 */
+ xuartps_writel(10, XUARTPS_RXTOUT_OFFSET);
+
+
+ /* Set the Interrupt Registers with desired interrupts */
+ xuartps_writel(XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_PARITY |
+ XUARTPS_IXR_FRAMING | XUARTPS_IXR_OVERRUN |
+ XUARTPS_IXR_RXTRIG | XUARTPS_IXR_TOUT, XUARTPS_IER_OFFSET);
+ xuartps_writel(~(XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_PARITY |
+ XUARTPS_IXR_FRAMING | XUARTPS_IXR_OVERRUN |
+ XUARTPS_IXR_RXTRIG | XUARTPS_IXR_TOUT), XUARTPS_IDR_OFFSET);
+
+ return retval;
+}
+
+/**
+ * xuartps_shutdown - Called when an application closes a xuartps port
+ * @port: Handle to the uart port structure
+ *
+ **/
+static void xuartps_shutdown(struct uart_port *port)
+{
+ int status;
+
+ /* Disable interrupts */
+ status = xuartps_readl(XUARTPS_IMR_OFFSET);
+ xuartps_writel(status, XUARTPS_IDR_OFFSET);
+
+ /* Disable the TX and RX */
+ xuartps_writel(XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS,
+ XUARTPS_CR_OFFSET);
+ free_irq(port->irq, port);
+}
+
+/**
+ * xuartps_type - Set UART type to xuartps port
+ * @port: Handle to the uart port structure
+ *
+ * Returns string on success, NULL otherwise
+ **/
+static const char *xuartps_type(struct uart_port *port)
+{
+ return port->type == PORT_XUARTPS ? XUARTPS_NAME : NULL;
+}
+
+/**
+ * xuartps_verify_port - Verify the port params
+ * @port: Handle to the uart port structure
+ * @ser: Handle to the structure whose members are compared
+ *
+ * Returns 0 if success otherwise -EINVAL
+ **/
+static int xuartps_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_XUARTPS)
+ return -EINVAL;
+ if (port->irq != ser->irq)
+ return -EINVAL;
+ if (ser->io_type != UPIO_MEM)
+ return -EINVAL;
+ if (port->iobase != ser->port)
+ return -EINVAL;
+ if (ser->hub6 != 0)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * xuartps_request_port - Claim the memory region attached to xuartps port,
+ * called when the driver adds a xuartps port via
+ * uart_add_one_port()
+ * @port: Handle to the uart port structure
+ *
+ * Returns 0, -ENOMEM if request fails
+ **/
+static int xuartps_request_port(struct uart_port *port)
+{
+ if (!request_mem_region(port->mapbase, XUARTPS_REGISTER_SPACE,
+ XUARTPS_NAME)) {
+ return -ENOMEM;
+ }
+
+ port->membase = ioremap(port->mapbase, XUARTPS_REGISTER_SPACE);
+ if (!port->membase) {
+ dev_err(port->dev, "Unable to map registers\n");
+ release_mem_region(port->mapbase, XUARTPS_REGISTER_SPACE);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * xuartps_release_port - Release the memory region attached to a xuartps
+ * port, called when the driver removes a xuartps
+ * port via uart_remove_one_port().
+ * @port: Handle to the uart port structure
+ *
+ **/
+static void xuartps_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, XUARTPS_REGISTER_SPACE);
+ iounmap(port->membase);
+ port->membase = NULL;
+}
+
+/**
+ * xuartps_config_port - Configure xuartps, called when the driver adds a
+ * xuartps port
+ * @port: Handle to the uart port structure
+ * @flags: If any
+ *
+ **/
+static void xuartps_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE && xuartps_request_port(port) == 0)
+ port->type = PORT_XUARTPS;
+}
+
+/**
+ * xuartps_get_mctrl - Get the modem control state
+ *
+ * @port: Handle to the uart port structure
+ *
+ * Returns the modem control state
+ *
+ **/
+static unsigned int xuartps_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+}
+
+static void xuartps_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /* N/A */
+}
+
+static void xuartps_enable_ms(struct uart_port *port)
+{
+ /* N/A */
+}
+
+/** The UART operations structure
+ */
+static struct uart_ops xuartps_ops = {
+ .set_mctrl = xuartps_set_mctrl,
+ .get_mctrl = xuartps_get_mctrl,
+ .enable_ms = xuartps_enable_ms,
+
+ .start_tx = xuartps_start_tx, /* Start transmitting */
+ .stop_tx = xuartps_stop_tx, /* Stop transmission */
+ .stop_rx = xuartps_stop_rx, /* Stop reception */
+ .tx_empty = xuartps_tx_empty, /* Transmitter busy? */
+ .break_ctl = xuartps_break_ctl, /* Start/stop
+ * transmitting break
+ */
+ .set_termios = xuartps_set_termios, /* Set termios */
+ .startup = xuartps_startup, /* App opens xuartps */
+ .shutdown = xuartps_shutdown, /* App closes xuartps */
+ .type = xuartps_type, /* Set UART type */
+ .verify_port = xuartps_verify_port, /* Verification of port
+ * params
+ */
+ .request_port = xuartps_request_port, /* Claim resources
+ * associated with a
+ * xuartps port
+ */
+ .release_port = xuartps_release_port, /* Release resources
+ * associated with a
+ * xuartps port
+ */
+ .config_port = xuartps_config_port, /* Configure when driver
+ * adds a xuartps port
+ */
+};
+
+static struct uart_port xuartps_port[2];
+
+/**
+ * xuartps_get_port - Configure the port from the platform device resource
+ * info
+ *
+ * Returns a pointer to a uart_port or NULL for failure
+ **/
+static struct uart_port *xuartps_get_port(void)
+{
+ struct uart_port *port;
+ int id;
+
+ /* Find the next unused port */
+ for (id = 0; id < XUARTPS_NR_PORTS; id++)
+ if (xuartps_port[id].mapbase == 0)
+ break;
+
+ if (id >= XUARTPS_NR_PORTS)
+ return NULL;
+
+ port = &xuartps_port[id];
+
+ /* At this point, we've got an empty uart_port struct, initialize it */
+ spin_lock_init(&port->lock);
+ port->membase = NULL;
+ port->iobase = 1; /* mark port in use */
+ port->irq = 0;
+ port->type = PORT_UNKNOWN;
+ port->iotype = UPIO_MEM32;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->ops = &xuartps_ops;
+ port->fifosize = XUARTPS_FIFO_SIZE;
+ port->line = id;
+ port->dev = NULL;
+ return port;
+}
+
+/*-----------------------Console driver operations--------------------------*/
+
+#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
+/**
+ * xuartps_console_wait_tx - Wait for the TX to be full
+ * @port: Handle to the uart port structure
+ *
+ **/
+static void xuartps_console_wait_tx(struct uart_port *port)
+{
+ while ((xuartps_readl(XUARTPS_SR_OFFSET) & XUARTPS_SR_TXEMPTY)
+ != XUARTPS_SR_TXEMPTY)
+ barrier();
+}
+
+/**
+ * xuartps_console_putchar - write the character to the FIFO buffer
+ * @port: Handle to the uart port structure
+ * @ch: Character to be written
+ *
+ **/
+static void xuartps_console_putchar(struct uart_port *port, int ch)
+{
+ xuartps_console_wait_tx(port);
+ xuartps_writel(ch, XUARTPS_FIFO_OFFSET);
+}
+
+/**
+ * xuartps_console_write - perform write operation
+ * @port: Handle to the uart port structure
+ * @s: Pointer to character array
+ * @count: No of characters
+ **/
+static void xuartps_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port = &xuartps_port[co->index];
+ unsigned long flags;
+ unsigned int imr;
+ int locked = 1;
+
+ if (oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* save and disable interrupt */
+ imr = xuartps_readl(XUARTPS_IMR_OFFSET);
+ xuartps_writel(imr, XUARTPS_IDR_OFFSET);
+
+ uart_console_write(port, s, count, xuartps_console_putchar);
+ xuartps_console_wait_tx(port);
+
+ /* restore interrupt state, it seems like there may be a h/w bug
+ * in that the interrupt enable register should not need to be
+ * written based on the data sheet
+ */
+ xuartps_writel(~imr, XUARTPS_IDR_OFFSET);
+ xuartps_writel(imr, XUARTPS_IER_OFFSET);
+
+ if (locked)
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/**
+ * xuartps_console_setup - Initialize the uart to default config
+ * @co: Console handle
+ * @options: Initial settings of uart
+ *
+ * Returns 0, -ENODEV if no device
+ **/
+static int __init xuartps_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port = &xuartps_port[co->index];
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index < 0 || co->index >= XUARTPS_NR_PORTS)
+ return -EINVAL;
+
+ if (!port->mapbase) {
+ pr_debug("console on ttyPS%i not present\n", co->index);
+ return -ENODEV;
+ }
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver xuartps_uart_driver;
+
+static struct console xuartps_console = {
+ .name = XUARTPS_TTY_NAME,
+ .write = xuartps_console_write,
+ .device = uart_console_device,
+ .setup = xuartps_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1, /* Specified on the cmdline (e.g. console=ttyPS ) */
+ .data = &xuartps_uart_driver,
+};
+
+/**
+ * xuartps_console_init - Initialization call
+ *
+ * Returns 0 on success, negative error otherwise
+ **/
+static int __init xuartps_console_init(void)
+{
+ register_console(&xuartps_console);
+ return 0;
+}
+
+console_initcall(xuartps_console_init);
+
+#endif /* CONFIG_SERIAL_XILINX_PS_UART_CONSOLE */
+
+/** Structure Definitions
+ */
+static struct uart_driver xuartps_uart_driver = {
+ .owner = THIS_MODULE, /* Owner */
+ .driver_name = XUARTPS_NAME, /* Driver name */
+ .dev_name = XUARTPS_TTY_NAME, /* Node name */
+ .major = XUARTPS_MAJOR, /* Major number */
+ .minor = XUARTPS_MINOR, /* Minor number */
+ .nr = XUARTPS_NR_PORTS, /* Number of UART ports */
+#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
+ .cons = &xuartps_console, /* Console */
+#endif
+};
+
+/* ---------------------------------------------------------------------
+ * Platform bus binding
+ */
+/**
+ * xuartps_probe - Platform driver probe
+ * @pdev: Pointer to the platform device structure
+ *
+ * Returns 0 on success, negative error otherwise
+ **/
+static int __devinit xuartps_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct uart_port *port;
+ struct resource *res, *res2;
+ int clk = 0;
+
+#ifdef CONFIG_OF
+ const unsigned int *prop;
+
+ prop = of_get_property(pdev->dev.of_node, "clock", NULL);
+ if (prop)
+ clk = be32_to_cpup(prop);
+#else
+ clk = *((unsigned int *)(pdev->dev.platform_data));
+#endif
+ if (!clk) {
+ dev_err(&pdev->dev, "no clock specified\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res2)
+ return -ENODEV;
+
+ /* Initialize the port structure */
+ port = xuartps_get_port();
+
+ if (!port) {
+ dev_err(&pdev->dev, "Cannot get uart_port structure\n");
+ return -ENODEV;
+ } else {
+ /* Register the port.
+ * This function also registers this device with the tty layer
+ * and triggers invocation of the config_port() entry point.
+ */
+ port->mapbase = res->start;
+ port->irq = res2->start;
+ port->dev = &pdev->dev;
+ port->uartclk = clk;
+ dev_set_drvdata(&pdev->dev, port);
+ rc = uart_add_one_port(&xuartps_uart_driver, port);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "uart_add_one_port() failed; err=%i\n", rc);
+ dev_set_drvdata(&pdev->dev, NULL);
+ return rc;
+ }
+ return 0;
+ }
+}
+
+/**
+ * xuartps_remove - called when the platform driver is unregistered
+ * @pdev: Pointer to the platform device structure
+ *
+ * Returns 0 on success, negative error otherwise
+ **/
+static int __devexit xuartps_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = dev_get_drvdata(&pdev->dev);
+ int rc = 0;
+
+ /* Remove the xuartps port from the serial core */
+ if (port) {
+ rc = uart_remove_one_port(&xuartps_uart_driver, port);
+ dev_set_drvdata(&pdev->dev, NULL);
+ port->mapbase = 0;
+ }
+ return rc;
+}
+
+/**
+ * xuartps_suspend - suspend event
+ * @pdev: Pointer to the platform device structure
+ * @state: State of the device
+ *
+ * Returns 0
+ **/
+static int xuartps_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ /* Call the API provided in serial_core.c file which handles
+ * the suspend.
+ */
+ uart_suspend_port(&xuartps_uart_driver, &xuartps_port[pdev->id]);
+ return 0;
+}
+
+/**
+ * xuartps_resume - Resume after a previous suspend
+ * @pdev: Pointer to the platform device structure
+ *
+ * Returns 0
+ **/
+static int xuartps_resume(struct platform_device *pdev)
+{
+ uart_resume_port(&xuartps_uart_driver, &xuartps_port[pdev->id]);
+ return 0;
+}
+
+/* Match table for of_platform binding */
+
+#ifdef CONFIG_OF
+static struct of_device_id xuartps_of_match[] __devinitdata = {
+ { .compatible = "xlnx,xuartps", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xuartps_of_match);
+#else
+#define xuartps_of_match NULL
+#endif
+
+static struct platform_driver xuartps_platform_driver = {
+ .probe = xuartps_probe, /* Probe method */
+ .remove = __exit_p(xuartps_remove), /* Detach method */
+ .suspend = xuartps_suspend, /* Suspend */
+ .resume = xuartps_resume, /* Resume after a suspend */
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = XUARTPS_NAME, /* Driver name */
+ .of_match_table = xuartps_of_match,
+ },
+};
+
+/* ---------------------------------------------------------------------
+ * Module Init and Exit
+ */
+/**
+ * xuartps_init - Initial driver registration call
+ *
+ * Returns whether the registration was successful or not
+ **/
+static int __init xuartps_init(void)
+{
+ int retval = 0;
+
+ /* Register the xuartps driver with the serial core */
+ retval = uart_register_driver(&xuartps_uart_driver);
+ if (retval)
+ return retval;
+
+ /* Register the platform driver */
+ retval = platform_driver_register(&xuartps_platform_driver);
+ if (retval)
+ uart_unregister_driver(&xuartps_uart_driver);
+
+ return retval;
+}
+
+/**
+ * xuartps_exit - Driver unregistration call
+ **/
+static void __exit xuartps_exit(void)
+{
+ /* The order of unregistration is important. Unregister the
+ * UART driver before the platform driver crashes the system.
+ */
+
+ /* Unregister the platform driver */
+ platform_driver_unregister(&xuartps_platform_driver);
+
+ /* Unregister the xuartps driver */
+ uart_unregister_driver(&xuartps_uart_driver);
+}
+
+module_init(xuartps_init);
+module_exit(xuartps_exit);
+
+MODULE_DESCRIPTION("Driver for PS UART");
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
new file mode 100644
index 00000000..1a7fd3e7
--- /dev/null
+++ b/drivers/tty/serial/zs.c
@@ -0,0 +1,1304 @@
+/*
+ * zs.c: Serial port driver for IOASIC DECstations.
+ *
+ * Derived from drivers/sbus/char/sunserial.c by Paul Mackerras.
+ * Derived from drivers/macintosh/macserial.c by Harald Koerfgen.
+ *
+ * DECstation changes
+ * Copyright (C) 1998-2000 Harald Koerfgen
+ * Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
+ *
+ * For the rest of the code the original Copyright applies:
+ * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au)
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ *
+ *
+ * Note: for IOASIC systems the wiring is as follows:
+ *
+ * mouse/keyboard:
+ * DIN-7 MJ-4 signal SCC
+ * 2 1 TxD <- A.TxD
+ * 3 4 RxD -> A.RxD
+ *
+ * EIA-232/EIA-423:
+ * DB-25 MMJ-6 signal SCC
+ * 2 2 TxD <- B.TxD
+ * 3 5 RxD -> B.RxD
+ * 4 RTS <- ~A.RTS
+ * 5 CTS -> ~B.CTS
+ * 6 6 DSR -> ~A.SYNC
+ * 8 CD -> ~B.DCD
+ * 12 DSRS(DCE) -> ~A.CTS (*)
+ * 15 TxC -> B.TxC
+ * 17 RxC -> B.RxC
+ * 20 1 DTR <- ~A.DTR
+ * 22 RI -> ~A.DCD
+ * 23 DSRS(DTE) <- ~B.RTS
+ *
+ * (*) EIA-232 defines the signal at this pin to be SCD, while DSRS(DCE)
+ * is shared with DSRS(DTE) at pin 23.
+ *
+ * As you can immediately notice the wiring of the RTS, DTR and DSR signals
+ * is a bit odd. This makes the handling of port B unnecessarily
+ * complicated and prevents the use of some automatic modes of operation.
+ */
+
+#if defined(CONFIG_SERIAL_ZS_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/bug.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irqflags.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/types.h>
+
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+#include <asm/dec/interrupts.h>
+#include <asm/dec/ioasic_addrs.h>
+#include <asm/dec/system.h>
+
+#include "zs.h"
+
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_DESCRIPTION("DECstation Z85C30 serial driver");
+MODULE_LICENSE("GPL");
+
+
+static char zs_name[] __initdata = "DECstation Z85C30 serial driver version ";
+static char zs_version[] __initdata = "0.10";
+
+/*
+ * It would be nice to dynamically allocate everything that
+ * depends on ZS_NUM_SCCS, so we could support any number of
+ * Z85C30s, but for now...
+ */
+#define ZS_NUM_SCCS 2 /* Max # of ZS chips supported. */
+#define ZS_NUM_CHAN 2 /* 2 channels per chip. */
+#define ZS_CHAN_A 0 /* Index of the channel A. */
+#define ZS_CHAN_B 1 /* Index of the channel B. */
+#define ZS_CHAN_IO_SIZE 8 /* IOMEM space size. */
+#define ZS_CHAN_IO_STRIDE 4 /* Register alignment. */
+#define ZS_CHAN_IO_OFFSET 1 /* The SCC resides on the high byte
+ of the 16-bit IOBUS. */
+#define ZS_CLOCK 7372800 /* Z85C30 PCLK input clock rate. */
+
+#define to_zport(uport) container_of(uport, struct zs_port, port)
+
+struct zs_parms {
+ resource_size_t scc[ZS_NUM_SCCS];
+ int irq[ZS_NUM_SCCS];
+};
+
+static struct zs_scc zs_sccs[ZS_NUM_SCCS];
+
+static u8 zs_init_regs[ZS_NUM_REGS] __initdata = {
+ 0, /* write 0 */
+ PAR_SPEC, /* write 1 */
+ 0, /* write 2 */
+ 0, /* write 3 */
+ X16CLK | SB1, /* write 4 */
+ 0, /* write 5 */
+ 0, 0, 0, /* write 6, 7, 8 */
+ MIE | DLC | NV, /* write 9 */
+ NRZ, /* write 10 */
+ TCBR | RCBR, /* write 11 */
+ 0, 0, /* BRG time constant, write 12 + 13 */
+ BRSRC | BRENABL, /* write 14 */
+ 0, /* write 15 */
+};
+
+/*
+ * Debugging.
+ */
+#undef ZS_DEBUG_REGS
+
+
+/*
+ * Reading and writing Z85C30 registers.
+ */
+static void recovery_delay(void)
+{
+ udelay(2);
+}
+
+static u8 read_zsreg(struct zs_port *zport, int reg)
+{
+ void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET;
+ u8 retval;
+
+ if (reg != 0) {
+ writeb(reg & 0xf, control);
+ fast_iob();
+ recovery_delay();
+ }
+ retval = readb(control);
+ recovery_delay();
+ return retval;
+}
+
+static void write_zsreg(struct zs_port *zport, int reg, u8 value)
+{
+ void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET;
+
+ if (reg != 0) {
+ writeb(reg & 0xf, control);
+ fast_iob(); recovery_delay();
+ }
+ writeb(value, control);
+ fast_iob();
+ recovery_delay();
+ return;
+}
+
+static u8 read_zsdata(struct zs_port *zport)
+{
+ void __iomem *data = zport->port.membase +
+ ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET;
+ u8 retval;
+
+ retval = readb(data);
+ recovery_delay();
+ return retval;
+}
+
+static void write_zsdata(struct zs_port *zport, u8 value)
+{
+ void __iomem *data = zport->port.membase +
+ ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET;
+
+ writeb(value, data);
+ fast_iob();
+ recovery_delay();
+ return;
+}
+
+#ifdef ZS_DEBUG_REGS
+void zs_dump(void)
+{
+ struct zs_port *zport;
+ int i, j;
+
+ for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) {
+ zport = &zs_sccs[i / ZS_NUM_CHAN].zport[i % ZS_NUM_CHAN];
+
+ if (!zport->scc)
+ continue;
+
+ for (j = 0; j < 16; j++)
+ printk("W%-2d = 0x%02x\t", j, zport->regs[j]);
+ printk("\n");
+ for (j = 0; j < 16; j++)
+ printk("R%-2d = 0x%02x\t", j, read_zsreg(zport, j));
+ printk("\n\n");
+ }
+}
+#endif
+
+
+static void zs_spin_lock_cond_irq(spinlock_t *lock, int irq)
+{
+ if (irq)
+ spin_lock_irq(lock);
+ else
+ spin_lock(lock);
+}
+
+static void zs_spin_unlock_cond_irq(spinlock_t *lock, int irq)
+{
+ if (irq)
+ spin_unlock_irq(lock);
+ else
+ spin_unlock(lock);
+}
+
+static int zs_receive_drain(struct zs_port *zport)
+{
+ int loops = 10000;
+
+ while ((read_zsreg(zport, R0) & Rx_CH_AV) && --loops)
+ read_zsdata(zport);
+ return loops;
+}
+
+static int zs_transmit_drain(struct zs_port *zport, int irq)
+{
+ struct zs_scc *scc = zport->scc;
+ int loops = 10000;
+
+ while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && --loops) {
+ zs_spin_unlock_cond_irq(&scc->zlock, irq);
+ udelay(2);
+ zs_spin_lock_cond_irq(&scc->zlock, irq);
+ }
+ return loops;
+}
+
+static int zs_line_drain(struct zs_port *zport, int irq)
+{
+ struct zs_scc *scc = zport->scc;
+ int loops = 10000;
+
+ while (!(read_zsreg(zport, R1) & ALL_SNT) && --loops) {
+ zs_spin_unlock_cond_irq(&scc->zlock, irq);
+ udelay(2);
+ zs_spin_lock_cond_irq(&scc->zlock, irq);
+ }
+ return loops;
+}
+
+
+static void load_zsregs(struct zs_port *zport, u8 *regs, int irq)
+{
+ /* Let the current transmission finish. */
+ zs_line_drain(zport, irq);
+ /* Load 'em up. */
+ write_zsreg(zport, R3, regs[3] & ~RxENABLE);
+ write_zsreg(zport, R5, regs[5] & ~TxENAB);
+ write_zsreg(zport, R4, regs[4]);
+ write_zsreg(zport, R9, regs[9]);
+ write_zsreg(zport, R1, regs[1]);
+ write_zsreg(zport, R2, regs[2]);
+ write_zsreg(zport, R10, regs[10]);
+ write_zsreg(zport, R14, regs[14] & ~BRENABL);
+ write_zsreg(zport, R11, regs[11]);
+ write_zsreg(zport, R12, regs[12]);
+ write_zsreg(zport, R13, regs[13]);
+ write_zsreg(zport, R14, regs[14]);
+ write_zsreg(zport, R15, regs[15]);
+ if (regs[3] & RxENABLE)
+ write_zsreg(zport, R3, regs[3]);
+ if (regs[5] & TxENAB)
+ write_zsreg(zport, R5, regs[5]);
+ return;
+}
+
+
+/*
+ * Status handling routines.
+ */
+
+/*
+ * zs_tx_empty() -- get the transmitter empty status
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * is emptied. On bus types like RS485, the transmitter must
+ * release the bus after transmitting. This must be done when
+ * the transmit shift register is empty, not be done when the
+ * transmit holding register is empty. This functionality
+ * allows an RS485 driver to be written in user space.
+ */
+static unsigned int zs_tx_empty(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ unsigned long flags;
+ u8 status;
+
+ spin_lock_irqsave(&scc->zlock, flags);
+ status = read_zsreg(zport, R1);
+ spin_unlock_irqrestore(&scc->zlock, flags);
+
+ return status & ALL_SNT ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int zs_raw_get_ab_mctrl(struct zs_port *zport_a,
+ struct zs_port *zport_b)
+{
+ u8 status_a, status_b;
+ unsigned int mctrl;
+
+ status_a = read_zsreg(zport_a, R0);
+ status_b = read_zsreg(zport_b, R0);
+
+ mctrl = ((status_b & CTS) ? TIOCM_CTS : 0) |
+ ((status_b & DCD) ? TIOCM_CAR : 0) |
+ ((status_a & DCD) ? TIOCM_RNG : 0) |
+ ((status_a & SYNC_HUNT) ? TIOCM_DSR : 0);
+
+ return mctrl;
+}
+
+static unsigned int zs_raw_get_mctrl(struct zs_port *zport)
+{
+ struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A];
+
+ return zport != zport_a ? zs_raw_get_ab_mctrl(zport_a, zport) : 0;
+}
+
+static unsigned int zs_raw_xor_mctrl(struct zs_port *zport)
+{
+ struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A];
+ unsigned int mmask, mctrl, delta;
+ u8 mask_a, mask_b;
+
+ if (zport == zport_a)
+ return 0;
+
+ mask_a = zport_a->regs[15];
+ mask_b = zport->regs[15];
+
+ mmask = ((mask_b & CTSIE) ? TIOCM_CTS : 0) |
+ ((mask_b & DCDIE) ? TIOCM_CAR : 0) |
+ ((mask_a & DCDIE) ? TIOCM_RNG : 0) |
+ ((mask_a & SYNCIE) ? TIOCM_DSR : 0);
+
+ mctrl = zport->mctrl;
+ if (mmask) {
+ mctrl &= ~mmask;
+ mctrl |= zs_raw_get_ab_mctrl(zport_a, zport) & mmask;
+ }
+
+ delta = mctrl ^ zport->mctrl;
+ if (delta)
+ zport->mctrl = mctrl;
+
+ return delta;
+}
+
+static unsigned int zs_get_mctrl(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ unsigned int mctrl;
+
+ spin_lock(&scc->zlock);
+ mctrl = zs_raw_get_mctrl(zport);
+ spin_unlock(&scc->zlock);
+
+ return mctrl;
+}
+
+static void zs_set_mctrl(struct uart_port *uport, unsigned int mctrl)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+ u8 oldloop, newloop;
+
+ spin_lock(&scc->zlock);
+ if (zport != zport_a) {
+ if (mctrl & TIOCM_DTR)
+ zport_a->regs[5] |= DTR;
+ else
+ zport_a->regs[5] &= ~DTR;
+ if (mctrl & TIOCM_RTS)
+ zport_a->regs[5] |= RTS;
+ else
+ zport_a->regs[5] &= ~RTS;
+ write_zsreg(zport_a, R5, zport_a->regs[5]);
+ }
+
+ /* Rarely modified, so don't poke at hardware unless necessary. */
+ oldloop = zport->regs[14];
+ newloop = oldloop;
+ if (mctrl & TIOCM_LOOP)
+ newloop |= LOOPBAK;
+ else
+ newloop &= ~LOOPBAK;
+ if (newloop != oldloop) {
+ zport->regs[14] = newloop;
+ write_zsreg(zport, R14, zport->regs[14]);
+ }
+ spin_unlock(&scc->zlock);
+}
+
+static void zs_raw_stop_tx(struct zs_port *zport)
+{
+ write_zsreg(zport, R0, RES_Tx_P);
+ zport->tx_stopped = 1;
+}
+
+static void zs_stop_tx(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+
+ spin_lock(&scc->zlock);
+ zs_raw_stop_tx(zport);
+ spin_unlock(&scc->zlock);
+}
+
+static void zs_raw_transmit_chars(struct zs_port *);
+
+static void zs_start_tx(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+
+ spin_lock(&scc->zlock);
+ if (zport->tx_stopped) {
+ zs_transmit_drain(zport, 0);
+ zport->tx_stopped = 0;
+ zs_raw_transmit_chars(zport);
+ }
+ spin_unlock(&scc->zlock);
+}
+
+static void zs_stop_rx(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+
+ spin_lock(&scc->zlock);
+ zport->regs[15] &= ~BRKIE;
+ zport->regs[1] &= ~(RxINT_MASK | TxINT_ENAB);
+ zport->regs[1] |= RxINT_DISAB;
+
+ if (zport != zport_a) {
+ /* A-side DCD tracks RI and SYNC tracks DSR. */
+ zport_a->regs[15] &= ~(DCDIE | SYNCIE);
+ write_zsreg(zport_a, R15, zport_a->regs[15]);
+ if (!(zport_a->regs[15] & BRKIE)) {
+ zport_a->regs[1] &= ~EXT_INT_ENAB;
+ write_zsreg(zport_a, R1, zport_a->regs[1]);
+ }
+
+ /* This-side DCD tracks DCD and CTS tracks CTS. */
+ zport->regs[15] &= ~(DCDIE | CTSIE);
+ zport->regs[1] &= ~EXT_INT_ENAB;
+ } else {
+ /* DCD tracks RI and SYNC tracks DSR for the B side. */
+ if (!(zport->regs[15] & (DCDIE | SYNCIE)))
+ zport->regs[1] &= ~EXT_INT_ENAB;
+ }
+
+ write_zsreg(zport, R15, zport->regs[15]);
+ write_zsreg(zport, R1, zport->regs[1]);
+ spin_unlock(&scc->zlock);
+}
+
+static void zs_enable_ms(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+
+ if (zport == zport_a)
+ return;
+
+ spin_lock(&scc->zlock);
+
+ /* Clear Ext interrupts if not being handled already. */
+ if (!(zport_a->regs[1] & EXT_INT_ENAB))
+ write_zsreg(zport_a, R0, RES_EXT_INT);
+
+ /* A-side DCD tracks RI and SYNC tracks DSR. */
+ zport_a->regs[1] |= EXT_INT_ENAB;
+ zport_a->regs[15] |= DCDIE | SYNCIE;
+
+ /* This-side DCD tracks DCD and CTS tracks CTS. */
+ zport->regs[15] |= DCDIE | CTSIE;
+
+ zs_raw_xor_mctrl(zport);
+
+ write_zsreg(zport_a, R1, zport_a->regs[1]);
+ write_zsreg(zport_a, R15, zport_a->regs[15]);
+ write_zsreg(zport, R15, zport->regs[15]);
+ spin_unlock(&scc->zlock);
+}
+
+static void zs_break_ctl(struct uart_port *uport, int break_state)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->zlock, flags);
+ if (break_state == -1)
+ zport->regs[5] |= SND_BRK;
+ else
+ zport->regs[5] &= ~SND_BRK;
+ write_zsreg(zport, R5, zport->regs[5]);
+ spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+
+/*
+ * Interrupt handling routines.
+ */
+#define Rx_BRK 0x0100 /* BREAK event software flag. */
+#define Rx_SYS 0x0200 /* SysRq event software flag. */
+
+static void zs_receive_chars(struct zs_port *zport)
+{
+ struct uart_port *uport = &zport->port;
+ struct zs_scc *scc = zport->scc;
+ struct uart_icount *icount;
+ unsigned int avail, status, ch, flag;
+ int count;
+
+ for (count = 16; count; count--) {
+ spin_lock(&scc->zlock);
+ avail = read_zsreg(zport, R0) & Rx_CH_AV;
+ spin_unlock(&scc->zlock);
+ if (!avail)
+ break;
+
+ spin_lock(&scc->zlock);
+ status = read_zsreg(zport, R1) & (Rx_OVR | FRM_ERR | PAR_ERR);
+ ch = read_zsdata(zport);
+ spin_unlock(&scc->zlock);
+
+ flag = TTY_NORMAL;
+
+ icount = &uport->icount;
+ icount->rx++;
+
+ /* Handle the null char got when BREAK is removed. */
+ if (!ch)
+ status |= zport->tty_break;
+ if (unlikely(status &
+ (Rx_OVR | FRM_ERR | PAR_ERR | Rx_SYS | Rx_BRK))) {
+ zport->tty_break = 0;
+
+ /* Reset the error indication. */
+ if (status & (Rx_OVR | FRM_ERR | PAR_ERR)) {
+ spin_lock(&scc->zlock);
+ write_zsreg(zport, R0, ERR_RES);
+ spin_unlock(&scc->zlock);
+ }
+
+ if (status & (Rx_SYS | Rx_BRK)) {
+ icount->brk++;
+ /* SysRq discards the null char. */
+ if (status & Rx_SYS)
+ continue;
+ } else if (status & FRM_ERR)
+ icount->frame++;
+ else if (status & PAR_ERR)
+ icount->parity++;
+ if (status & Rx_OVR)
+ icount->overrun++;
+
+ status &= uport->read_status_mask;
+ if (status & Rx_BRK)
+ flag = TTY_BREAK;
+ else if (status & FRM_ERR)
+ flag = TTY_FRAME;
+ else if (status & PAR_ERR)
+ flag = TTY_PARITY;
+ }
+
+ if (uart_handle_sysrq_char(uport, ch))
+ continue;
+
+ uart_insert_char(uport, status, Rx_OVR, ch, flag);
+ }
+
+ tty_flip_buffer_push(uport->state->port.tty);
+}
+
+static void zs_raw_transmit_chars(struct zs_port *zport)
+{
+ struct circ_buf *xmit = &zport->port.state->xmit;
+
+ /* XON/XOFF chars. */
+ if (zport->port.x_char) {
+ write_zsdata(zport, zport->port.x_char);
+ zport->port.icount.tx++;
+ zport->port.x_char = 0;
+ return;
+ }
+
+ /* If nothing to do or stopped or hardware stopped. */
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&zport->port)) {
+ zs_raw_stop_tx(zport);
+ return;
+ }
+
+ /* Send char. */
+ write_zsdata(zport, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ zport->port.icount.tx++;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&zport->port);
+
+ /* Are we are done? */
+ if (uart_circ_empty(xmit))
+ zs_raw_stop_tx(zport);
+}
+
+static void zs_transmit_chars(struct zs_port *zport)
+{
+ struct zs_scc *scc = zport->scc;
+
+ spin_lock(&scc->zlock);
+ zs_raw_transmit_chars(zport);
+ spin_unlock(&scc->zlock);
+}
+
+static void zs_status_handle(struct zs_port *zport, struct zs_port *zport_a)
+{
+ struct uart_port *uport = &zport->port;
+ struct zs_scc *scc = zport->scc;
+ unsigned int delta;
+ u8 status, brk;
+
+ spin_lock(&scc->zlock);
+
+ /* Get status from Read Register 0. */
+ status = read_zsreg(zport, R0);
+
+ if (zport->regs[15] & BRKIE) {
+ brk = status & BRK_ABRT;
+ if (brk && !zport->brk) {
+ spin_unlock(&scc->zlock);
+ if (uart_handle_break(uport))
+ zport->tty_break = Rx_SYS;
+ else
+ zport->tty_break = Rx_BRK;
+ spin_lock(&scc->zlock);
+ }
+ zport->brk = brk;
+ }
+
+ if (zport != zport_a) {
+ delta = zs_raw_xor_mctrl(zport);
+ spin_unlock(&scc->zlock);
+
+ if (delta & TIOCM_CTS)
+ uart_handle_cts_change(uport,
+ zport->mctrl & TIOCM_CTS);
+ if (delta & TIOCM_CAR)
+ uart_handle_dcd_change(uport,
+ zport->mctrl & TIOCM_CAR);
+ if (delta & TIOCM_RNG)
+ uport->icount.dsr++;
+ if (delta & TIOCM_DSR)
+ uport->icount.rng++;
+
+ if (delta)
+ wake_up_interruptible(&uport->state->port.delta_msr_wait);
+
+ spin_lock(&scc->zlock);
+ }
+
+ /* Clear the status condition... */
+ write_zsreg(zport, R0, RES_EXT_INT);
+
+ spin_unlock(&scc->zlock);
+}
+
+/*
+ * This is the Z85C30 driver's generic interrupt routine.
+ */
+static irqreturn_t zs_interrupt(int irq, void *dev_id)
+{
+ struct zs_scc *scc = dev_id;
+ struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+ struct zs_port *zport_b = &scc->zport[ZS_CHAN_B];
+ irqreturn_t status = IRQ_NONE;
+ u8 zs_intreg;
+ int count;
+
+ /*
+ * NOTE: The read register 3, which holds the irq status,
+ * does so for both channels on each chip. Although
+ * the status value itself must be read from the A
+ * channel and is only valid when read from channel A.
+ * Yes... broken hardware...
+ */
+ for (count = 16; count; count--) {
+ spin_lock(&scc->zlock);
+ zs_intreg = read_zsreg(zport_a, R3);
+ spin_unlock(&scc->zlock);
+ if (!zs_intreg)
+ break;
+
+ /*
+ * We do not like losing characters, so we prioritise
+ * interrupt sources a little bit differently than
+ * the SCC would, was it allowed to.
+ */
+ if (zs_intreg & CHBRxIP)
+ zs_receive_chars(zport_b);
+ if (zs_intreg & CHARxIP)
+ zs_receive_chars(zport_a);
+ if (zs_intreg & CHBEXT)
+ zs_status_handle(zport_b, zport_a);
+ if (zs_intreg & CHAEXT)
+ zs_status_handle(zport_a, zport_a);
+ if (zs_intreg & CHBTxIP)
+ zs_transmit_chars(zport_b);
+ if (zs_intreg & CHATxIP)
+ zs_transmit_chars(zport_a);
+
+ status = IRQ_HANDLED;
+ }
+
+ return status;
+}
+
+
+/*
+ * Finally, routines used to initialize the serial port.
+ */
+static int zs_startup(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ unsigned long flags;
+ int irq_guard;
+ int ret;
+
+ irq_guard = atomic_add_return(1, &scc->irq_guard);
+ if (irq_guard == 1) {
+ ret = request_irq(zport->port.irq, zs_interrupt,
+ IRQF_SHARED, "scc", scc);
+ if (ret) {
+ atomic_add(-1, &scc->irq_guard);
+ printk(KERN_ERR "zs: can't get irq %d\n",
+ zport->port.irq);
+ return ret;
+ }
+ }
+
+ spin_lock_irqsave(&scc->zlock, flags);
+
+ /* Clear the receive FIFO. */
+ zs_receive_drain(zport);
+
+ /* Clear the interrupt registers. */
+ write_zsreg(zport, R0, ERR_RES);
+ write_zsreg(zport, R0, RES_Tx_P);
+ /* But Ext only if not being handled already. */
+ if (!(zport->regs[1] & EXT_INT_ENAB))
+ write_zsreg(zport, R0, RES_EXT_INT);
+
+ /* Finally, enable sequencing and interrupts. */
+ zport->regs[1] &= ~RxINT_MASK;
+ zport->regs[1] |= RxINT_ALL | TxINT_ENAB | EXT_INT_ENAB;
+ zport->regs[3] |= RxENABLE;
+ zport->regs[15] |= BRKIE;
+ write_zsreg(zport, R1, zport->regs[1]);
+ write_zsreg(zport, R3, zport->regs[3]);
+ write_zsreg(zport, R5, zport->regs[5]);
+ write_zsreg(zport, R15, zport->regs[15]);
+
+ /* Record the current state of RR0. */
+ zport->mctrl = zs_raw_get_mctrl(zport);
+ zport->brk = read_zsreg(zport, R0) & BRK_ABRT;
+
+ zport->tx_stopped = 1;
+
+ spin_unlock_irqrestore(&scc->zlock, flags);
+
+ return 0;
+}
+
+static void zs_shutdown(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ unsigned long flags;
+ int irq_guard;
+
+ spin_lock_irqsave(&scc->zlock, flags);
+
+ zport->regs[3] &= ~RxENABLE;
+ write_zsreg(zport, R5, zport->regs[5]);
+ write_zsreg(zport, R3, zport->regs[3]);
+
+ spin_unlock_irqrestore(&scc->zlock, flags);
+
+ irq_guard = atomic_add_return(-1, &scc->irq_guard);
+ if (!irq_guard)
+ free_irq(zport->port.irq, scc);
+}
+
+
+static void zs_reset(struct zs_port *zport)
+{
+ struct zs_scc *scc = zport->scc;
+ int irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->zlock, flags);
+ irq = !irqs_disabled_flags(flags);
+ if (!scc->initialised) {
+ /* Reset the pointer first, just in case... */
+ read_zsreg(zport, R0);
+ /* And let the current transmission finish. */
+ zs_line_drain(zport, irq);
+ write_zsreg(zport, R9, FHWRES);
+ udelay(10);
+ write_zsreg(zport, R9, 0);
+ scc->initialised = 1;
+ }
+ load_zsregs(zport, zport->regs, irq);
+ spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
+ struct ktermios *old_termios)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+ int irq;
+ unsigned int baud, brg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->zlock, flags);
+ irq = !irqs_disabled_flags(flags);
+
+ /* Byte size. */
+ zport->regs[3] &= ~RxNBITS_MASK;
+ zport->regs[5] &= ~TxNBITS_MASK;
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ zport->regs[3] |= Rx5;
+ zport->regs[5] |= Tx5;
+ break;
+ case CS6:
+ zport->regs[3] |= Rx6;
+ zport->regs[5] |= Tx6;
+ break;
+ case CS7:
+ zport->regs[3] |= Rx7;
+ zport->regs[5] |= Tx7;
+ break;
+ case CS8:
+ default:
+ zport->regs[3] |= Rx8;
+ zport->regs[5] |= Tx8;
+ break;
+ }
+
+ /* Parity and stop bits. */
+ zport->regs[4] &= ~(XCLK_MASK | SB_MASK | PAR_ENA | PAR_EVEN);
+ if (termios->c_cflag & CSTOPB)
+ zport->regs[4] |= SB2;
+ else
+ zport->regs[4] |= SB1;
+ if (termios->c_cflag & PARENB)
+ zport->regs[4] |= PAR_ENA;
+ if (!(termios->c_cflag & PARODD))
+ zport->regs[4] |= PAR_EVEN;
+ switch (zport->clk_mode) {
+ case 64:
+ zport->regs[4] |= X64CLK;
+ break;
+ case 32:
+ zport->regs[4] |= X32CLK;
+ break;
+ case 16:
+ zport->regs[4] |= X16CLK;
+ break;
+ case 1:
+ zport->regs[4] |= X1CLK;
+ break;
+ default:
+ BUG();
+ }
+
+ baud = uart_get_baud_rate(uport, termios, old_termios, 0,
+ uport->uartclk / zport->clk_mode / 4);
+
+ brg = ZS_BPS_TO_BRG(baud, uport->uartclk / zport->clk_mode);
+ zport->regs[12] = brg & 0xff;
+ zport->regs[13] = (brg >> 8) & 0xff;
+
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
+ uport->read_status_mask = Rx_OVR;
+ if (termios->c_iflag & INPCK)
+ uport->read_status_mask |= FRM_ERR | PAR_ERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ uport->read_status_mask |= Rx_BRK;
+
+ uport->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ uport->ignore_status_mask |= FRM_ERR | PAR_ERR;
+ if (termios->c_iflag & IGNBRK) {
+ uport->ignore_status_mask |= Rx_BRK;
+ if (termios->c_iflag & IGNPAR)
+ uport->ignore_status_mask |= Rx_OVR;
+ }
+
+ if (termios->c_cflag & CREAD)
+ zport->regs[3] |= RxENABLE;
+ else
+ zport->regs[3] &= ~RxENABLE;
+
+ if (zport != zport_a) {
+ if (!(termios->c_cflag & CLOCAL)) {
+ zport->regs[15] |= DCDIE;
+ } else
+ zport->regs[15] &= ~DCDIE;
+ if (termios->c_cflag & CRTSCTS) {
+ zport->regs[15] |= CTSIE;
+ } else
+ zport->regs[15] &= ~CTSIE;
+ zs_raw_xor_mctrl(zport);
+ }
+
+ /* Load up the new values. */
+ load_zsregs(zport, zport->regs, irq);
+
+ spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+/*
+ * Hack alert!
+ * Required solely so that the initial PROM-based console
+ * works undisturbed in parallel with this one.
+ */
+static void zs_pm(struct uart_port *uport, unsigned int state,
+ unsigned int oldstate)
+{
+ struct zs_port *zport = to_zport(uport);
+
+ if (state < 3)
+ zport->regs[5] |= TxENAB;
+ else
+ zport->regs[5] &= ~TxENAB;
+ write_zsreg(zport, R5, zport->regs[5]);
+}
+
+
+static const char *zs_type(struct uart_port *uport)
+{
+ return "Z85C30 SCC";
+}
+
+static void zs_release_port(struct uart_port *uport)
+{
+ iounmap(uport->membase);
+ uport->membase = 0;
+ release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE);
+}
+
+static int zs_map_port(struct uart_port *uport)
+{
+ if (!uport->membase)
+ uport->membase = ioremap_nocache(uport->mapbase,
+ ZS_CHAN_IO_SIZE);
+ if (!uport->membase) {
+ printk(KERN_ERR "zs: Cannot map MMIO\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int zs_request_port(struct uart_port *uport)
+{
+ int ret;
+
+ if (!request_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE, "scc")) {
+ printk(KERN_ERR "zs: Unable to reserve MMIO resource\n");
+ return -EBUSY;
+ }
+ ret = zs_map_port(uport);
+ if (ret) {
+ release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE);
+ return ret;
+ }
+ return 0;
+}
+
+static void zs_config_port(struct uart_port *uport, int flags)
+{
+ struct zs_port *zport = to_zport(uport);
+
+ if (flags & UART_CONFIG_TYPE) {
+ if (zs_request_port(uport))
+ return;
+
+ uport->type = PORT_ZS;
+
+ zs_reset(zport);
+ }
+}
+
+static int zs_verify_port(struct uart_port *uport, struct serial_struct *ser)
+{
+ struct zs_port *zport = to_zport(uport);
+ int ret = 0;
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_ZS)
+ ret = -EINVAL;
+ if (ser->irq != uport->irq)
+ ret = -EINVAL;
+ if (ser->baud_base != uport->uartclk / zport->clk_mode / 4)
+ ret = -EINVAL;
+ return ret;
+}
+
+
+static struct uart_ops zs_ops = {
+ .tx_empty = zs_tx_empty,
+ .set_mctrl = zs_set_mctrl,
+ .get_mctrl = zs_get_mctrl,
+ .stop_tx = zs_stop_tx,
+ .start_tx = zs_start_tx,
+ .stop_rx = zs_stop_rx,
+ .enable_ms = zs_enable_ms,
+ .break_ctl = zs_break_ctl,
+ .startup = zs_startup,
+ .shutdown = zs_shutdown,
+ .set_termios = zs_set_termios,
+ .pm = zs_pm,
+ .type = zs_type,
+ .release_port = zs_release_port,
+ .request_port = zs_request_port,
+ .config_port = zs_config_port,
+ .verify_port = zs_verify_port,
+};
+
+/*
+ * Initialize Z85C30 port structures.
+ */
+static int __init zs_probe_sccs(void)
+{
+ static int probed;
+ struct zs_parms zs_parms;
+ int chip, side, irq;
+ int n_chips = 0;
+ int i;
+
+ if (probed)
+ return 0;
+
+ irq = dec_interrupt[DEC_IRQ_SCC0];
+ if (irq >= 0) {
+ zs_parms.scc[n_chips] = IOASIC_SCC0;
+ zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC0];
+ n_chips++;
+ }
+ irq = dec_interrupt[DEC_IRQ_SCC1];
+ if (irq >= 0) {
+ zs_parms.scc[n_chips] = IOASIC_SCC1;
+ zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC1];
+ n_chips++;
+ }
+ if (!n_chips)
+ return -ENXIO;
+
+ probed = 1;
+
+ for (chip = 0; chip < n_chips; chip++) {
+ spin_lock_init(&zs_sccs[chip].zlock);
+ for (side = 0; side < ZS_NUM_CHAN; side++) {
+ struct zs_port *zport = &zs_sccs[chip].zport[side];
+ struct uart_port *uport = &zport->port;
+
+ zport->scc = &zs_sccs[chip];
+ zport->clk_mode = 16;
+
+ uport->irq = zs_parms.irq[chip];
+ uport->uartclk = ZS_CLOCK;
+ uport->fifosize = 1;
+ uport->iotype = UPIO_MEM;
+ uport->flags = UPF_BOOT_AUTOCONF;
+ uport->ops = &zs_ops;
+ uport->line = chip * ZS_NUM_CHAN + side;
+ uport->mapbase = dec_kn_slot_base +
+ zs_parms.scc[chip] +
+ (side ^ ZS_CHAN_B) * ZS_CHAN_IO_SIZE;
+
+ for (i = 0; i < ZS_NUM_REGS; i++)
+ zport->regs[i] = zs_init_regs[i];
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_SERIAL_ZS_CONSOLE
+static void zs_console_putchar(struct uart_port *uport, int ch)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ int irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->zlock, flags);
+ irq = !irqs_disabled_flags(flags);
+ if (zs_transmit_drain(zport, irq))
+ write_zsdata(zport, ch);
+ spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ */
+static void zs_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN;
+ struct zs_port *zport = &zs_sccs[chip].zport[side];
+ struct zs_scc *scc = zport->scc;
+ unsigned long flags;
+ u8 txint, txenb;
+ int irq;
+
+ /* Disable transmit interrupts and enable the transmitter. */
+ spin_lock_irqsave(&scc->zlock, flags);
+ txint = zport->regs[1];
+ txenb = zport->regs[5];
+ if (txint & TxINT_ENAB) {
+ zport->regs[1] = txint & ~TxINT_ENAB;
+ write_zsreg(zport, R1, zport->regs[1]);
+ }
+ if (!(txenb & TxENAB)) {
+ zport->regs[5] = txenb | TxENAB;
+ write_zsreg(zport, R5, zport->regs[5]);
+ }
+ spin_unlock_irqrestore(&scc->zlock, flags);
+
+ uart_console_write(&zport->port, s, count, zs_console_putchar);
+
+ /* Restore transmit interrupts and the transmitter enable. */
+ spin_lock_irqsave(&scc->zlock, flags);
+ irq = !irqs_disabled_flags(flags);
+ zs_line_drain(zport, irq);
+ if (!(txenb & TxENAB)) {
+ zport->regs[5] &= ~TxENAB;
+ write_zsreg(zport, R5, zport->regs[5]);
+ }
+ if (txint & TxINT_ENAB) {
+ zport->regs[1] |= TxINT_ENAB;
+ write_zsreg(zport, R1, zport->regs[1]);
+ }
+ spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+/*
+ * Setup serial console baud/bits/parity. We do two things here:
+ * - construct a cflag setting for the first uart_open()
+ * - initialise the serial port
+ * Return non-zero if we didn't find a serial port.
+ */
+static int __init zs_console_setup(struct console *co, char *options)
+{
+ int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN;
+ struct zs_port *zport = &zs_sccs[chip].zport[side];
+ struct uart_port *uport = &zport->port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ ret = zs_map_port(uport);
+ if (ret)
+ return ret;
+
+ zs_reset(zport);
+ zs_pm(uport, 0, -1);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ return uart_set_options(uport, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver zs_reg;
+static struct console zs_console = {
+ .name = "ttyS",
+ .write = zs_console_write,
+ .device = uart_console_device,
+ .setup = zs_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &zs_reg,
+};
+
+/*
+ * Register console.
+ */
+static int __init zs_serial_console_init(void)
+{
+ int ret;
+
+ ret = zs_probe_sccs();
+ if (ret)
+ return ret;
+ register_console(&zs_console);
+
+ return 0;
+}
+
+console_initcall(zs_serial_console_init);
+
+#define SERIAL_ZS_CONSOLE &zs_console
+#else
+#define SERIAL_ZS_CONSOLE NULL
+#endif /* CONFIG_SERIAL_ZS_CONSOLE */
+
+static struct uart_driver zs_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "serial",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = ZS_NUM_SCCS * ZS_NUM_CHAN,
+ .cons = SERIAL_ZS_CONSOLE,
+};
+
+/* zs_init inits the driver. */
+static int __init zs_init(void)
+{
+ int i, ret;
+
+ pr_info("%s%s\n", zs_name, zs_version);
+
+ /* Find out how many Z85C30 SCCs we have. */
+ ret = zs_probe_sccs();
+ if (ret)
+ return ret;
+
+ ret = uart_register_driver(&zs_reg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) {
+ struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN];
+ struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN];
+ struct uart_port *uport = &zport->port;
+
+ if (zport->scc)
+ uart_add_one_port(&zs_reg, uport);
+ }
+
+ return 0;
+}
+
+static void __exit zs_exit(void)
+{
+ int i;
+
+ for (i = ZS_NUM_SCCS * ZS_NUM_CHAN - 1; i >= 0; i--) {
+ struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN];
+ struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN];
+ struct uart_port *uport = &zport->port;
+
+ if (zport->scc)
+ uart_remove_one_port(&zs_reg, uport);
+ }
+
+ uart_unregister_driver(&zs_reg);
+}
+
+module_init(zs_init);
+module_exit(zs_exit);
diff --git a/drivers/tty/serial/zs.h b/drivers/tty/serial/zs.h
new file mode 100644
index 00000000..aa921b57
--- /dev/null
+++ b/drivers/tty/serial/zs.h
@@ -0,0 +1,284 @@
+/*
+ * zs.h: Definitions for the DECstation Z85C30 serial driver.
+ *
+ * Adapted from drivers/sbus/char/sunserial.h by Paul Mackerras.
+ * Adapted from drivers/macintosh/macserial.h by Harald Koerfgen.
+ *
+ * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au)
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 2004, 2005, 2007 Maciej W. Rozycki
+ */
+#ifndef _SERIAL_ZS_H
+#define _SERIAL_ZS_H
+
+#ifdef __KERNEL__
+
+#define ZS_NUM_REGS 16
+
+/*
+ * This is our internal structure for each serial port's state.
+ */
+struct zs_port {
+ struct zs_scc *scc; /* Containing SCC. */
+ struct uart_port port; /* Underlying UART. */
+
+ int clk_mode; /* May be 1, 16, 32, or 64. */
+
+ unsigned int tty_break; /* Set on BREAK condition. */
+ int tx_stopped; /* Output is suspended. */
+
+ unsigned int mctrl; /* State of modem lines. */
+ u8 brk; /* BREAK state from RR0. */
+
+ u8 regs[ZS_NUM_REGS]; /* Channel write registers. */
+};
+
+/*
+ * Per-SCC state for locking and the interrupt handler.
+ */
+struct zs_scc {
+ struct zs_port zport[2];
+ spinlock_t zlock;
+ atomic_t irq_guard;
+ int initialised;
+};
+
+#endif /* __KERNEL__ */
+
+/*
+ * Conversion routines to/from brg time constants from/to bits per second.
+ */
+#define ZS_BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
+#define ZS_BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
+
+/*
+ * The Zilog register set.
+ */
+
+/* Write Register 0 (Command) */
+#define R0 0 /* Register selects */
+#define R1 1
+#define R2 2
+#define R3 3
+#define R4 4
+#define R5 5
+#define R6 6
+#define R7 7
+#define R8 8
+#define R9 9
+#define R10 10
+#define R11 11
+#define R12 12
+#define R13 13
+#define R14 14
+#define R15 15
+
+#define NULLCODE 0 /* Null Code */
+#define POINT_HIGH 0x8 /* Select upper half of registers */
+#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */
+#define SEND_ABORT 0x18 /* HDLC Abort */
+#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */
+#define RES_Tx_P 0x28 /* Reset TxINT Pending */
+#define ERR_RES 0x30 /* Error Reset */
+#define RES_H_IUS 0x38 /* Reset highest IUS */
+
+#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */
+#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */
+#define RES_EOM_L 0xC0 /* Reset EOM latch */
+
+/* Write Register 1 (Tx/Rx/Ext Int Enable and WAIT/DMA Commands) */
+#define EXT_INT_ENAB 0x1 /* Ext Int Enable */
+#define TxINT_ENAB 0x2 /* Tx Int Enable */
+#define PAR_SPEC 0x4 /* Parity is special condition */
+
+#define RxINT_DISAB 0 /* Rx Int Disable */
+#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */
+#define RxINT_ALL 0x10 /* Int on all Rx Characters or error */
+#define RxINT_ERR 0x18 /* Int on error only */
+#define RxINT_MASK 0x18
+
+#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */
+#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */
+#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */
+
+/* Write Register 2 (Interrupt Vector) */
+
+/* Write Register 3 (Receive Parameters and Control) */
+#define RxENABLE 0x1 /* Rx Enable */
+#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */
+#define ADD_SM 0x4 /* Address Search Mode (SDLC) */
+#define RxCRC_ENAB 0x8 /* Rx CRC Enable */
+#define ENT_HM 0x10 /* Enter Hunt Mode */
+#define AUTO_ENAB 0x20 /* Auto Enables */
+#define Rx5 0x0 /* Rx 5 Bits/Character */
+#define Rx7 0x40 /* Rx 7 Bits/Character */
+#define Rx6 0x80 /* Rx 6 Bits/Character */
+#define Rx8 0xc0 /* Rx 8 Bits/Character */
+#define RxNBITS_MASK 0xc0
+
+/* Write Register 4 (Transmit/Receive Miscellaneous Parameters and Modes) */
+#define PAR_ENA 0x1 /* Parity Enable */
+#define PAR_EVEN 0x2 /* Parity Even/Odd* */
+
+#define SYNC_ENAB 0 /* Sync Modes Enable */
+#define SB1 0x4 /* 1 stop bit/char */
+#define SB15 0x8 /* 1.5 stop bits/char */
+#define SB2 0xc /* 2 stop bits/char */
+#define SB_MASK 0xc
+
+#define MONSYNC 0 /* 8 Bit Sync character */
+#define BISYNC 0x10 /* 16 bit sync character */
+#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */
+#define EXTSYNC 0x30 /* External Sync Mode */
+
+#define X1CLK 0x0 /* x1 clock mode */
+#define X16CLK 0x40 /* x16 clock mode */
+#define X32CLK 0x80 /* x32 clock mode */
+#define X64CLK 0xc0 /* x64 clock mode */
+#define XCLK_MASK 0xc0
+
+/* Write Register 5 (Transmit Parameters and Controls) */
+#define TxCRC_ENAB 0x1 /* Tx CRC Enable */
+#define RTS 0x2 /* RTS */
+#define SDLC_CRC 0x4 /* SDLC/CRC-16 */
+#define TxENAB 0x8 /* Tx Enable */
+#define SND_BRK 0x10 /* Send Break */
+#define Tx5 0x0 /* Tx 5 bits (or less)/character */
+#define Tx7 0x20 /* Tx 7 bits/character */
+#define Tx6 0x40 /* Tx 6 bits/character */
+#define Tx8 0x60 /* Tx 8 bits/character */
+#define TxNBITS_MASK 0x60
+#define DTR 0x80 /* DTR */
+
+/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
+
+/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
+
+/* Write Register 8 (Transmit Buffer) */
+
+/* Write Register 9 (Master Interrupt Control) */
+#define VIS 1 /* Vector Includes Status */
+#define NV 2 /* No Vector */
+#define DLC 4 /* Disable Lower Chain */
+#define MIE 8 /* Master Interrupt Enable */
+#define STATHI 0x10 /* Status high */
+#define SOFTACK 0x20 /* Software Interrupt Acknowledge */
+#define NORESET 0 /* No reset on write to R9 */
+#define CHRB 0x40 /* Reset channel B */
+#define CHRA 0x80 /* Reset channel A */
+#define FHWRES 0xc0 /* Force hardware reset */
+
+/* Write Register 10 (Miscellaneous Transmitter/Receiver Control Bits) */
+#define BIT6 1 /* 6 bit/8bit sync */
+#define LOOPMODE 2 /* SDLC Loop mode */
+#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */
+#define MARKIDLE 8 /* Mark/flag on idle */
+#define GAOP 0x10 /* Go active on poll */
+#define NRZ 0 /* NRZ mode */
+#define NRZI 0x20 /* NRZI mode */
+#define FM1 0x40 /* FM1 (transition = 1) */
+#define FM0 0x60 /* FM0 (transition = 0) */
+#define CRCPS 0x80 /* CRC Preset I/O */
+
+/* Write Register 11 (Clock Mode Control) */
+#define TRxCXT 0 /* TRxC = Xtal output */
+#define TRxCTC 1 /* TRxC = Transmit clock */
+#define TRxCBR 2 /* TRxC = BR Generator Output */
+#define TRxCDP 3 /* TRxC = DPLL output */
+#define TRxCOI 4 /* TRxC O/I */
+#define TCRTxCP 0 /* Transmit clock = RTxC pin */
+#define TCTRxCP 8 /* Transmit clock = TRxC pin */
+#define TCBR 0x10 /* Transmit clock = BR Generator output */
+#define TCDPLL 0x18 /* Transmit clock = DPLL output */
+#define RCRTxCP 0 /* Receive clock = RTxC pin */
+#define RCTRxCP 0x20 /* Receive clock = TRxC pin */
+#define RCBR 0x40 /* Receive clock = BR Generator output */
+#define RCDPLL 0x60 /* Receive clock = DPLL output */
+#define RTxCX 0x80 /* RTxC Xtal/No Xtal */
+
+/* Write Register 12 (Lower Byte of Baud Rate Generator Time Constant) */
+
+/* Write Register 13 (Upper Byte of Baud Rate Generator Time Constant) */
+
+/* Write Register 14 (Miscellaneous Control Bits) */
+#define BRENABL 1 /* Baud rate generator enable */
+#define BRSRC 2 /* Baud rate generator source */
+#define DTRREQ 4 /* DTR/Request function */
+#define AUTOECHO 8 /* Auto Echo */
+#define LOOPBAK 0x10 /* Local loopback */
+#define SEARCH 0x20 /* Enter search mode */
+#define RMC 0x40 /* Reset missing clock */
+#define DISDPLL 0x60 /* Disable DPLL */
+#define SSBR 0x80 /* Set DPLL source = BR generator */
+#define SSRTxC 0xa0 /* Set DPLL source = RTxC */
+#define SFMM 0xc0 /* Set FM mode */
+#define SNRZI 0xe0 /* Set NRZI mode */
+
+/* Write Register 15 (External/Status Interrupt Control) */
+#define WR7P_EN 1 /* WR7 Prime SDLC Feature Enable */
+#define ZCIE 2 /* Zero count IE */
+#define DCDIE 8 /* DCD IE */
+#define SYNCIE 0x10 /* Sync/hunt IE */
+#define CTSIE 0x20 /* CTS IE */
+#define TxUIE 0x40 /* Tx Underrun/EOM IE */
+#define BRKIE 0x80 /* Break/Abort IE */
+
+
+/* Read Register 0 (Transmit/Receive Buffer Status and External Status) */
+#define Rx_CH_AV 0x1 /* Rx Character Available */
+#define ZCOUNT 0x2 /* Zero count */
+#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */
+#define DCD 0x8 /* DCD */
+#define SYNC_HUNT 0x10 /* Sync/hunt */
+#define CTS 0x20 /* CTS */
+#define TxEOM 0x40 /* Tx underrun */
+#define BRK_ABRT 0x80 /* Break/Abort */
+
+/* Read Register 1 (Special Receive Condition Status) */
+#define ALL_SNT 0x1 /* All sent */
+/* Residue Data for 8 Rx bits/char programmed */
+#define RES3 0x8 /* 0/3 */
+#define RES4 0x4 /* 0/4 */
+#define RES5 0xc /* 0/5 */
+#define RES6 0x2 /* 0/6 */
+#define RES7 0xa /* 0/7 */
+#define RES8 0x6 /* 0/8 */
+#define RES18 0xe /* 1/8 */
+#define RES28 0x0 /* 2/8 */
+/* Special Rx Condition Interrupts */
+#define PAR_ERR 0x10 /* Parity Error */
+#define Rx_OVR 0x20 /* Rx Overrun Error */
+#define FRM_ERR 0x40 /* CRC/Framing Error */
+#define END_FR 0x80 /* End of Frame (SDLC) */
+
+/* Read Register 2 (Interrupt Vector (WR2) -- channel A). */
+
+/* Read Register 2 (Modified Interrupt Vector -- channel B). */
+
+/* Read Register 3 (Interrupt Pending Bits -- channel A only). */
+#define CHBEXT 0x1 /* Channel B Ext/Stat IP */
+#define CHBTxIP 0x2 /* Channel B Tx IP */
+#define CHBRxIP 0x4 /* Channel B Rx IP */
+#define CHAEXT 0x8 /* Channel A Ext/Stat IP */
+#define CHATxIP 0x10 /* Channel A Tx IP */
+#define CHARxIP 0x20 /* Channel A Rx IP */
+
+/* Read Register 6 (SDLC FIFO Status and Byte Count LSB) */
+
+/* Read Register 7 (SDLC FIFO Status and Byte Count MSB) */
+
+/* Read Register 8 (Receive Data) */
+
+/* Read Register 10 (Miscellaneous Status Bits) */
+#define ONLOOP 2 /* On loop */
+#define LOOPSEND 0x10 /* Loop sending */
+#define CLK2MIS 0x40 /* Two clocks missing */
+#define CLK1MIS 0x80 /* One clock missing */
+
+/* Read Register 12 (Lower Byte of Baud Rate Generator Constant (WR12)) */
+
+/* Read Register 13 (Upper Byte of Baud Rate Generator Constant (WR13) */
+
+/* Read Register 15 (External/Status Interrupt Control (WR15)) */
+
+#endif /* _SERIAL_ZS_H */
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
new file mode 100644
index 00000000..272e417a
--- /dev/null
+++ b/drivers/tty/synclink.c
@@ -0,0 +1,8117 @@
+/*
+ * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
+ *
+ * Device driver for Microgate SyncLink ISA and PCI
+ * high speed multiprotocol serial adapters.
+ *
+ * written by Paul Fulghum for Microgate Corporation
+ * paulkf@microgate.com
+ *
+ * Microgate and SyncLink are trademarks of Microgate Corporation
+ *
+ * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
+ *
+ * Original release 01/11/99
+ *
+ * This code is released under the GNU General Public License (GPL)
+ *
+ * This driver is primarily intended for use in synchronous
+ * HDLC mode. Asynchronous mode is also provided.
+ *
+ * When operating in synchronous mode, each call to mgsl_write()
+ * contains exactly one complete HDLC frame. Calling mgsl_put_char
+ * will start assembling an HDLC frame that will not be sent until
+ * mgsl_flush_chars or mgsl_write is called.
+ *
+ * Synchronous receive data is reported as complete frames. To accomplish
+ * this, the TTY flip buffer is bypassed (too small to hold largest
+ * frame and may fragment frames) and the line discipline
+ * receive entry point is called directly.
+ *
+ * This driver has been tested with a slightly modified ppp.c driver
+ * for synchronous PPP.
+ *
+ * 2000/02/16
+ * Added interface for syncppp.c driver (an alternate synchronous PPP
+ * implementation that also supports Cisco HDLC). Each device instance
+ * registers as a tty device AND a network device (if dosyncppp option
+ * is set for the device). The functionality is determined by which
+ * device interface is opened.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__i386__)
+# define BREAKPOINT() asm(" int $3");
+#else
+# define BREAKPOINT() { }
+#endif
+
+#define MAX_ISA_DEVICES 10
+#define MAX_PCI_DEVICES 10
+#define MAX_TOTAL_DEVICES 20
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/synclink.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <linux/bitops.h>
+#include <asm/types.h>
+#include <linux/termios.h>
+#include <linux/workqueue.h>
+#include <linux/hdlc.h>
+#include <linux/dma-mapping.h>
+
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
+#endif
+
+#define GET_USER(error,value,addr) error = get_user(value,addr)
+#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
+#define PUT_USER(error,value,addr) error = put_user(value,addr)
+#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
+
+#include <asm/uaccess.h>
+
+#define RCLRVALUE 0xffff
+
+static MGSL_PARAMS default_params = {
+ MGSL_MODE_HDLC, /* unsigned long mode */
+ 0, /* unsigned char loopback; */
+ HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
+ HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
+ 0, /* unsigned long clock_speed; */
+ 0xff, /* unsigned char addr_filter; */
+ HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
+ HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
+ HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
+ 9600, /* unsigned long data_rate; */
+ 8, /* unsigned char data_bits; */
+ 1, /* unsigned char stop_bits; */
+ ASYNC_PARITY_NONE /* unsigned char parity; */
+};
+
+#define SHARED_MEM_ADDRESS_SIZE 0x40000
+#define BUFFERLISTSIZE 4096
+#define DMABUFFERSIZE 4096
+#define MAXRXFRAMES 7
+
+typedef struct _DMABUFFERENTRY
+{
+ u32 phys_addr; /* 32-bit flat physical address of data buffer */
+ volatile u16 count; /* buffer size/data count */
+ volatile u16 status; /* Control/status field */
+ volatile u16 rcc; /* character count field */
+ u16 reserved; /* padding required by 16C32 */
+ u32 link; /* 32-bit flat link to next buffer entry */
+ char *virt_addr; /* virtual address of data buffer */
+ u32 phys_entry; /* physical address of this buffer entry */
+ dma_addr_t dma_addr;
+} DMABUFFERENTRY, *DMAPBUFFERENTRY;
+
+/* The queue of BH actions to be performed */
+
+#define BH_RECEIVE 1
+#define BH_TRANSMIT 2
+#define BH_STATUS 4
+
+#define IO_PIN_SHUTDOWN_LIMIT 100
+
+struct _input_signal_events {
+ int ri_up;
+ int ri_down;
+ int dsr_up;
+ int dsr_down;
+ int dcd_up;
+ int dcd_down;
+ int cts_up;
+ int cts_down;
+};
+
+/* transmit holding buffer definitions*/
+#define MAX_TX_HOLDING_BUFFERS 5
+struct tx_holding_buffer {
+ int buffer_size;
+ unsigned char * buffer;
+};
+
+
+/*
+ * Device instance data structure
+ */
+
+struct mgsl_struct {
+ int magic;
+ struct tty_port port;
+ int line;
+ int hw_version;
+
+ struct mgsl_icount icount;
+
+ int timeout;
+ int x_char; /* xon/xoff character */
+ u16 read_status_mask;
+ u16 ignore_status_mask;
+ unsigned char *xmit_buf;
+ int xmit_head;
+ int xmit_tail;
+ int xmit_cnt;
+
+ wait_queue_head_t status_event_wait_q;
+ wait_queue_head_t event_wait_q;
+ struct timer_list tx_timer; /* HDLC transmit timeout timer */
+ struct mgsl_struct *next_device; /* device list link */
+
+ spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
+ struct work_struct task; /* task structure for scheduling bh */
+
+ u32 EventMask; /* event trigger mask */
+ u32 RecordedEvents; /* pending events */
+
+ u32 max_frame_size; /* as set by device config */
+
+ u32 pending_bh;
+
+ bool bh_running; /* Protection from multiple */
+ int isr_overflow;
+ bool bh_requested;
+
+ int dcd_chkcount; /* check counts to prevent */
+ int cts_chkcount; /* too many IRQs if a signal */
+ int dsr_chkcount; /* is floating */
+ int ri_chkcount;
+
+ char *buffer_list; /* virtual address of Rx & Tx buffer lists */
+ u32 buffer_list_phys;
+ dma_addr_t buffer_list_dma_addr;
+
+ unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
+ DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
+ unsigned int current_rx_buffer;
+
+ int num_tx_dma_buffers; /* number of tx dma frames required */
+ int tx_dma_buffers_used;
+ unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
+ DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
+ int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
+ int current_tx_buffer; /* next tx dma buffer to be loaded */
+
+ unsigned char *intermediate_rxbuffer;
+
+ int num_tx_holding_buffers; /* number of tx holding buffer allocated */
+ int get_tx_holding_index; /* next tx holding buffer for adapter to load */
+ int put_tx_holding_index; /* next tx holding buffer to store user request */
+ int tx_holding_count; /* number of tx holding buffers waiting */
+ struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
+
+ bool rx_enabled;
+ bool rx_overflow;
+ bool rx_rcc_underrun;
+
+ bool tx_enabled;
+ bool tx_active;
+ u32 idle_mode;
+
+ u16 cmr_value;
+ u16 tcsr_value;
+
+ char device_name[25]; /* device instance name */
+
+ unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
+ unsigned char bus; /* expansion bus number (zero based) */
+ unsigned char function; /* PCI device number */
+
+ unsigned int io_base; /* base I/O address of adapter */
+ unsigned int io_addr_size; /* size of the I/O address range */
+ bool io_addr_requested; /* true if I/O address requested */
+
+ unsigned int irq_level; /* interrupt level */
+ unsigned long irq_flags;
+ bool irq_requested; /* true if IRQ requested */
+
+ unsigned int dma_level; /* DMA channel */
+ bool dma_requested; /* true if dma channel requested */
+
+ u16 mbre_bit;
+ u16 loopback_bits;
+ u16 usc_idle_mode;
+
+ MGSL_PARAMS params; /* communications parameters */
+
+ unsigned char serial_signals; /* current serial signal states */
+
+ bool irq_occurred; /* for diagnostics use */
+ unsigned int init_error; /* Initialization startup error (DIAGS) */
+ int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
+
+ u32 last_mem_alloc;
+ unsigned char* memory_base; /* shared memory address (PCI only) */
+ u32 phys_memory_base;
+ bool shared_mem_requested;
+
+ unsigned char* lcr_base; /* local config registers (PCI only) */
+ u32 phys_lcr_base;
+ u32 lcr_offset;
+ bool lcr_mem_requested;
+
+ u32 misc_ctrl_value;
+ char flag_buf[MAX_ASYNC_BUFFER_SIZE];
+ char char_buf[MAX_ASYNC_BUFFER_SIZE];
+ bool drop_rts_on_tx_done;
+
+ bool loopmode_insert_requested;
+ bool loopmode_send_done_requested;
+
+ struct _input_signal_events input_signal_events;
+
+ /* generic HDLC device parts */
+ int netcount;
+ spinlock_t netlock;
+
+#if SYNCLINK_GENERIC_HDLC
+ struct net_device *netdev;
+#endif
+};
+
+#define MGSL_MAGIC 0x5401
+
+/*
+ * The size of the serial xmit buffer is 1 page, or 4096 bytes
+ */
+#ifndef SERIAL_XMIT_SIZE
+#define SERIAL_XMIT_SIZE 4096
+#endif
+
+/*
+ * These macros define the offsets used in calculating the
+ * I/O address of the specified USC registers.
+ */
+
+
+#define DCPIN 2 /* Bit 1 of I/O address */
+#define SDPIN 4 /* Bit 2 of I/O address */
+
+#define DCAR 0 /* DMA command/address register */
+#define CCAR SDPIN /* channel command/address register */
+#define DATAREG DCPIN + SDPIN /* serial data register */
+#define MSBONLY 0x41
+#define LSBONLY 0x40
+
+/*
+ * These macros define the register address (ordinal number)
+ * used for writing address/value pairs to the USC.
+ */
+
+#define CMR 0x02 /* Channel mode Register */
+#define CCSR 0x04 /* Channel Command/status Register */
+#define CCR 0x06 /* Channel Control Register */
+#define PSR 0x08 /* Port status Register */
+#define PCR 0x0a /* Port Control Register */
+#define TMDR 0x0c /* Test mode Data Register */
+#define TMCR 0x0e /* Test mode Control Register */
+#define CMCR 0x10 /* Clock mode Control Register */
+#define HCR 0x12 /* Hardware Configuration Register */
+#define IVR 0x14 /* Interrupt Vector Register */
+#define IOCR 0x16 /* Input/Output Control Register */
+#define ICR 0x18 /* Interrupt Control Register */
+#define DCCR 0x1a /* Daisy Chain Control Register */
+#define MISR 0x1c /* Misc Interrupt status Register */
+#define SICR 0x1e /* status Interrupt Control Register */
+#define RDR 0x20 /* Receive Data Register */
+#define RMR 0x22 /* Receive mode Register */
+#define RCSR 0x24 /* Receive Command/status Register */
+#define RICR 0x26 /* Receive Interrupt Control Register */
+#define RSR 0x28 /* Receive Sync Register */
+#define RCLR 0x2a /* Receive count Limit Register */
+#define RCCR 0x2c /* Receive Character count Register */
+#define TC0R 0x2e /* Time Constant 0 Register */
+#define TDR 0x30 /* Transmit Data Register */
+#define TMR 0x32 /* Transmit mode Register */
+#define TCSR 0x34 /* Transmit Command/status Register */
+#define TICR 0x36 /* Transmit Interrupt Control Register */
+#define TSR 0x38 /* Transmit Sync Register */
+#define TCLR 0x3a /* Transmit count Limit Register */
+#define TCCR 0x3c /* Transmit Character count Register */
+#define TC1R 0x3e /* Time Constant 1 Register */
+
+
+/*
+ * MACRO DEFINITIONS FOR DMA REGISTERS
+ */
+
+#define DCR 0x06 /* DMA Control Register (shared) */
+#define DACR 0x08 /* DMA Array count Register (shared) */
+#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
+#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
+#define DICR 0x18 /* DMA Interrupt Control Register (shared) */
+#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
+#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
+
+#define TDMR 0x02 /* Transmit DMA mode Register */
+#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
+#define TBCR 0x2a /* Transmit Byte count Register */
+#define TARL 0x2c /* Transmit Address Register (low) */
+#define TARU 0x2e /* Transmit Address Register (high) */
+#define NTBCR 0x3a /* Next Transmit Byte count Register */
+#define NTARL 0x3c /* Next Transmit Address Register (low) */
+#define NTARU 0x3e /* Next Transmit Address Register (high) */
+
+#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
+#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
+#define RBCR 0xaa /* Receive Byte count Register */
+#define RARL 0xac /* Receive Address Register (low) */
+#define RARU 0xae /* Receive Address Register (high) */
+#define NRBCR 0xba /* Next Receive Byte count Register */
+#define NRARL 0xbc /* Next Receive Address Register (low) */
+#define NRARU 0xbe /* Next Receive Address Register (high) */
+
+
+/*
+ * MACRO DEFINITIONS FOR MODEM STATUS BITS
+ */
+
+#define MODEMSTATUS_DTR 0x80
+#define MODEMSTATUS_DSR 0x40
+#define MODEMSTATUS_RTS 0x20
+#define MODEMSTATUS_CTS 0x10
+#define MODEMSTATUS_RI 0x04
+#define MODEMSTATUS_DCD 0x01
+
+
+/*
+ * Channel Command/Address Register (CCAR) Command Codes
+ */
+
+#define RTCmd_Null 0x0000
+#define RTCmd_ResetHighestIus 0x1000
+#define RTCmd_TriggerChannelLoadDma 0x2000
+#define RTCmd_TriggerRxDma 0x2800
+#define RTCmd_TriggerTxDma 0x3000
+#define RTCmd_TriggerRxAndTxDma 0x3800
+#define RTCmd_PurgeRxFifo 0x4800
+#define RTCmd_PurgeTxFifo 0x5000
+#define RTCmd_PurgeRxAndTxFifo 0x5800
+#define RTCmd_LoadRcc 0x6800
+#define RTCmd_LoadTcc 0x7000
+#define RTCmd_LoadRccAndTcc 0x7800
+#define RTCmd_LoadTC0 0x8800
+#define RTCmd_LoadTC1 0x9000
+#define RTCmd_LoadTC0AndTC1 0x9800
+#define RTCmd_SerialDataLSBFirst 0xa000
+#define RTCmd_SerialDataMSBFirst 0xa800
+#define RTCmd_SelectBigEndian 0xb000
+#define RTCmd_SelectLittleEndian 0xb800
+
+
+/*
+ * DMA Command/Address Register (DCAR) Command Codes
+ */
+
+#define DmaCmd_Null 0x0000
+#define DmaCmd_ResetTxChannel 0x1000
+#define DmaCmd_ResetRxChannel 0x1200
+#define DmaCmd_StartTxChannel 0x2000
+#define DmaCmd_StartRxChannel 0x2200
+#define DmaCmd_ContinueTxChannel 0x3000
+#define DmaCmd_ContinueRxChannel 0x3200
+#define DmaCmd_PauseTxChannel 0x4000
+#define DmaCmd_PauseRxChannel 0x4200
+#define DmaCmd_AbortTxChannel 0x5000
+#define DmaCmd_AbortRxChannel 0x5200
+#define DmaCmd_InitTxChannel 0x7000
+#define DmaCmd_InitRxChannel 0x7200
+#define DmaCmd_ResetHighestDmaIus 0x8000
+#define DmaCmd_ResetAllChannels 0x9000
+#define DmaCmd_StartAllChannels 0xa000
+#define DmaCmd_ContinueAllChannels 0xb000
+#define DmaCmd_PauseAllChannels 0xc000
+#define DmaCmd_AbortAllChannels 0xd000
+#define DmaCmd_InitAllChannels 0xf000
+
+#define TCmd_Null 0x0000
+#define TCmd_ClearTxCRC 0x2000
+#define TCmd_SelectTicrTtsaData 0x4000
+#define TCmd_SelectTicrTxFifostatus 0x5000
+#define TCmd_SelectTicrIntLevel 0x6000
+#define TCmd_SelectTicrdma_level 0x7000
+#define TCmd_SendFrame 0x8000
+#define TCmd_SendAbort 0x9000
+#define TCmd_EnableDleInsertion 0xc000
+#define TCmd_DisableDleInsertion 0xd000
+#define TCmd_ClearEofEom 0xe000
+#define TCmd_SetEofEom 0xf000
+
+#define RCmd_Null 0x0000
+#define RCmd_ClearRxCRC 0x2000
+#define RCmd_EnterHuntmode 0x3000
+#define RCmd_SelectRicrRtsaData 0x4000
+#define RCmd_SelectRicrRxFifostatus 0x5000
+#define RCmd_SelectRicrIntLevel 0x6000
+#define RCmd_SelectRicrdma_level 0x7000
+
+/*
+ * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
+ */
+
+#define RECEIVE_STATUS BIT5
+#define RECEIVE_DATA BIT4
+#define TRANSMIT_STATUS BIT3
+#define TRANSMIT_DATA BIT2
+#define IO_PIN BIT1
+#define MISC BIT0
+
+
+/*
+ * Receive status Bits in Receive Command/status Register RCSR
+ */
+
+#define RXSTATUS_SHORT_FRAME BIT8
+#define RXSTATUS_CODE_VIOLATION BIT8
+#define RXSTATUS_EXITED_HUNT BIT7
+#define RXSTATUS_IDLE_RECEIVED BIT6
+#define RXSTATUS_BREAK_RECEIVED BIT5
+#define RXSTATUS_ABORT_RECEIVED BIT5
+#define RXSTATUS_RXBOUND BIT4
+#define RXSTATUS_CRC_ERROR BIT3
+#define RXSTATUS_FRAMING_ERROR BIT3
+#define RXSTATUS_ABORT BIT2
+#define RXSTATUS_PARITY_ERROR BIT2
+#define RXSTATUS_OVERRUN BIT1
+#define RXSTATUS_DATA_AVAILABLE BIT0
+#define RXSTATUS_ALL 0x01f6
+#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
+
+/*
+ * Values for setting transmit idle mode in
+ * Transmit Control/status Register (TCSR)
+ */
+#define IDLEMODE_FLAGS 0x0000
+#define IDLEMODE_ALT_ONE_ZERO 0x0100
+#define IDLEMODE_ZERO 0x0200
+#define IDLEMODE_ONE 0x0300
+#define IDLEMODE_ALT_MARK_SPACE 0x0500
+#define IDLEMODE_SPACE 0x0600
+#define IDLEMODE_MARK 0x0700
+#define IDLEMODE_MASK 0x0700
+
+/*
+ * IUSC revision identifiers
+ */
+#define IUSC_SL1660 0x4d44
+#define IUSC_PRE_SL1660 0x4553
+
+/*
+ * Transmit status Bits in Transmit Command/status Register (TCSR)
+ */
+
+#define TCSR_PRESERVE 0x0F00
+
+#define TCSR_UNDERWAIT BIT11
+#define TXSTATUS_PREAMBLE_SENT BIT7
+#define TXSTATUS_IDLE_SENT BIT6
+#define TXSTATUS_ABORT_SENT BIT5
+#define TXSTATUS_EOF_SENT BIT4
+#define TXSTATUS_EOM_SENT BIT4
+#define TXSTATUS_CRC_SENT BIT3
+#define TXSTATUS_ALL_SENT BIT2
+#define TXSTATUS_UNDERRUN BIT1
+#define TXSTATUS_FIFO_EMPTY BIT0
+#define TXSTATUS_ALL 0x00fa
+#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
+
+
+#define MISCSTATUS_RXC_LATCHED BIT15
+#define MISCSTATUS_RXC BIT14
+#define MISCSTATUS_TXC_LATCHED BIT13
+#define MISCSTATUS_TXC BIT12
+#define MISCSTATUS_RI_LATCHED BIT11
+#define MISCSTATUS_RI BIT10
+#define MISCSTATUS_DSR_LATCHED BIT9
+#define MISCSTATUS_DSR BIT8
+#define MISCSTATUS_DCD_LATCHED BIT7
+#define MISCSTATUS_DCD BIT6
+#define MISCSTATUS_CTS_LATCHED BIT5
+#define MISCSTATUS_CTS BIT4
+#define MISCSTATUS_RCC_UNDERRUN BIT3
+#define MISCSTATUS_DPLL_NO_SYNC BIT2
+#define MISCSTATUS_BRG1_ZERO BIT1
+#define MISCSTATUS_BRG0_ZERO BIT0
+
+#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
+#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
+
+#define SICR_RXC_ACTIVE BIT15
+#define SICR_RXC_INACTIVE BIT14
+#define SICR_RXC (BIT15+BIT14)
+#define SICR_TXC_ACTIVE BIT13
+#define SICR_TXC_INACTIVE BIT12
+#define SICR_TXC (BIT13+BIT12)
+#define SICR_RI_ACTIVE BIT11
+#define SICR_RI_INACTIVE BIT10
+#define SICR_RI (BIT11+BIT10)
+#define SICR_DSR_ACTIVE BIT9
+#define SICR_DSR_INACTIVE BIT8
+#define SICR_DSR (BIT9+BIT8)
+#define SICR_DCD_ACTIVE BIT7
+#define SICR_DCD_INACTIVE BIT6
+#define SICR_DCD (BIT7+BIT6)
+#define SICR_CTS_ACTIVE BIT5
+#define SICR_CTS_INACTIVE BIT4
+#define SICR_CTS (BIT5+BIT4)
+#define SICR_RCC_UNDERFLOW BIT3
+#define SICR_DPLL_NO_SYNC BIT2
+#define SICR_BRG1_ZERO BIT1
+#define SICR_BRG0_ZERO BIT0
+
+void usc_DisableMasterIrqBit( struct mgsl_struct *info );
+void usc_EnableMasterIrqBit( struct mgsl_struct *info );
+void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
+void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
+void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
+
+#define usc_EnableInterrupts( a, b ) \
+ usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
+
+#define usc_DisableInterrupts( a, b ) \
+ usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
+
+#define usc_EnableMasterIrqBit(a) \
+ usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
+
+#define usc_DisableMasterIrqBit(a) \
+ usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
+
+#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
+
+/*
+ * Transmit status Bits in Transmit Control status Register (TCSR)
+ * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
+ */
+
+#define TXSTATUS_PREAMBLE_SENT BIT7
+#define TXSTATUS_IDLE_SENT BIT6
+#define TXSTATUS_ABORT_SENT BIT5
+#define TXSTATUS_EOF BIT4
+#define TXSTATUS_CRC_SENT BIT3
+#define TXSTATUS_ALL_SENT BIT2
+#define TXSTATUS_UNDERRUN BIT1
+#define TXSTATUS_FIFO_EMPTY BIT0
+
+#define DICR_MASTER BIT15
+#define DICR_TRANSMIT BIT0
+#define DICR_RECEIVE BIT1
+
+#define usc_EnableDmaInterrupts(a,b) \
+ usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
+
+#define usc_DisableDmaInterrupts(a,b) \
+ usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
+
+#define usc_EnableStatusIrqs(a,b) \
+ usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
+
+#define usc_DisablestatusIrqs(a,b) \
+ usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
+
+/* Transmit status Bits in Transmit Control status Register (TCSR) */
+/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
+
+
+#define DISABLE_UNCONDITIONAL 0
+#define DISABLE_END_OF_FRAME 1
+#define ENABLE_UNCONDITIONAL 2
+#define ENABLE_AUTO_CTS 3
+#define ENABLE_AUTO_DCD 3
+#define usc_EnableTransmitter(a,b) \
+ usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
+#define usc_EnableReceiver(a,b) \
+ usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
+
+static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
+static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
+static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
+
+static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
+static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
+static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
+void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
+void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
+
+#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
+#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
+
+#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
+
+static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
+static void usc_start_receiver( struct mgsl_struct *info );
+static void usc_stop_receiver( struct mgsl_struct *info );
+
+static void usc_start_transmitter( struct mgsl_struct *info );
+static void usc_stop_transmitter( struct mgsl_struct *info );
+static void usc_set_txidle( struct mgsl_struct *info );
+static void usc_load_txfifo( struct mgsl_struct *info );
+
+static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
+static void usc_enable_loopback( struct mgsl_struct *info, int enable );
+
+static void usc_get_serial_signals( struct mgsl_struct *info );
+static void usc_set_serial_signals( struct mgsl_struct *info );
+
+static void usc_reset( struct mgsl_struct *info );
+
+static void usc_set_sync_mode( struct mgsl_struct *info );
+static void usc_set_sdlc_mode( struct mgsl_struct *info );
+static void usc_set_async_mode( struct mgsl_struct *info );
+static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
+
+static void usc_loopback_frame( struct mgsl_struct *info );
+
+static void mgsl_tx_timeout(unsigned long context);
+
+
+static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
+static void usc_loopmode_insert_request( struct mgsl_struct * info );
+static int usc_loopmode_active( struct mgsl_struct * info);
+static void usc_loopmode_send_done( struct mgsl_struct * info );
+
+static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
+
+#if SYNCLINK_GENERIC_HDLC
+#define dev_to_port(D) (dev_to_hdlc(D)->priv)
+static void hdlcdev_tx_done(struct mgsl_struct *info);
+static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
+static int hdlcdev_init(struct mgsl_struct *info);
+static void hdlcdev_exit(struct mgsl_struct *info);
+#endif
+
+/*
+ * Defines a BUS descriptor value for the PCI adapter
+ * local bus address ranges.
+ */
+
+#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
+(0x00400020 + \
+((WrHold) << 30) + \
+((WrDly) << 28) + \
+((RdDly) << 26) + \
+((Nwdd) << 20) + \
+((Nwad) << 15) + \
+((Nxda) << 13) + \
+((Nrdd) << 11) + \
+((Nrad) << 6) )
+
+static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
+
+/*
+ * Adapter diagnostic routines
+ */
+static bool mgsl_register_test( struct mgsl_struct *info );
+static bool mgsl_irq_test( struct mgsl_struct *info );
+static bool mgsl_dma_test( struct mgsl_struct *info );
+static bool mgsl_memory_test( struct mgsl_struct *info );
+static int mgsl_adapter_test( struct mgsl_struct *info );
+
+/*
+ * device and resource management routines
+ */
+static int mgsl_claim_resources(struct mgsl_struct *info);
+static void mgsl_release_resources(struct mgsl_struct *info);
+static void mgsl_add_device(struct mgsl_struct *info);
+static struct mgsl_struct* mgsl_allocate_device(void);
+
+/*
+ * DMA buffer manupulation functions.
+ */
+static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
+static bool mgsl_get_rx_frame( struct mgsl_struct *info );
+static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
+static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
+static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
+static int num_free_tx_dma_buffers(struct mgsl_struct *info);
+static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
+static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
+
+/*
+ * DMA and Shared Memory buffer allocation and formatting
+ */
+static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
+static void mgsl_free_dma_buffers(struct mgsl_struct *info);
+static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
+static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
+static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
+static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
+static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
+static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
+static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
+static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
+static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
+static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
+
+/*
+ * Bottom half interrupt handlers
+ */
+static void mgsl_bh_handler(struct work_struct *work);
+static void mgsl_bh_receive(struct mgsl_struct *info);
+static void mgsl_bh_transmit(struct mgsl_struct *info);
+static void mgsl_bh_status(struct mgsl_struct *info);
+
+/*
+ * Interrupt handler routines and dispatch table.
+ */
+static void mgsl_isr_null( struct mgsl_struct *info );
+static void mgsl_isr_transmit_data( struct mgsl_struct *info );
+static void mgsl_isr_receive_data( struct mgsl_struct *info );
+static void mgsl_isr_receive_status( struct mgsl_struct *info );
+static void mgsl_isr_transmit_status( struct mgsl_struct *info );
+static void mgsl_isr_io_pin( struct mgsl_struct *info );
+static void mgsl_isr_misc( struct mgsl_struct *info );
+static void mgsl_isr_receive_dma( struct mgsl_struct *info );
+static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
+
+typedef void (*isr_dispatch_func)(struct mgsl_struct *);
+
+static isr_dispatch_func UscIsrTable[7] =
+{
+ mgsl_isr_null,
+ mgsl_isr_misc,
+ mgsl_isr_io_pin,
+ mgsl_isr_transmit_data,
+ mgsl_isr_transmit_status,
+ mgsl_isr_receive_data,
+ mgsl_isr_receive_status
+};
+
+/*
+ * ioctl call handlers
+ */
+static int tiocmget(struct tty_struct *tty);
+static int tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear);
+static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
+ __user *user_icount);
+static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
+static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
+static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
+static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
+static int mgsl_txenable(struct mgsl_struct * info, int enable);
+static int mgsl_txabort(struct mgsl_struct * info);
+static int mgsl_rxenable(struct mgsl_struct * info, int enable);
+static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
+static int mgsl_loopmode_send_done( struct mgsl_struct * info );
+
+/* set non-zero on successful registration with PCI subsystem */
+static bool pci_registered;
+
+/*
+ * Global linked list of SyncLink devices
+ */
+static struct mgsl_struct *mgsl_device_list;
+static int mgsl_device_count;
+
+/*
+ * Set this param to non-zero to load eax with the
+ * .text section address and breakpoint on module load.
+ * This is useful for use with gdb and add-symbol-file command.
+ */
+static int break_on_load;
+
+/*
+ * Driver major number, defaults to zero to get auto
+ * assigned major number. May be forced as module parameter.
+ */
+static int ttymajor;
+
+/*
+ * Array of user specified options for ISA adapters.
+ */
+static int io[MAX_ISA_DEVICES];
+static int irq[MAX_ISA_DEVICES];
+static int dma[MAX_ISA_DEVICES];
+static int debug_level;
+static int maxframe[MAX_TOTAL_DEVICES];
+static int txdmabufs[MAX_TOTAL_DEVICES];
+static int txholdbufs[MAX_TOTAL_DEVICES];
+
+module_param(break_on_load, bool, 0);
+module_param(ttymajor, int, 0);
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(dma, int, NULL, 0);
+module_param(debug_level, int, 0);
+module_param_array(maxframe, int, NULL, 0);
+module_param_array(txdmabufs, int, NULL, 0);
+module_param_array(txholdbufs, int, NULL, 0);
+
+static char *driver_name = "SyncLink serial driver";
+static char *driver_version = "$Revision: 4.38 $";
+
+static int synclink_init_one (struct pci_dev *dev,
+ const struct pci_device_id *ent);
+static void synclink_remove_one (struct pci_dev *dev);
+
+static struct pci_device_id synclink_pci_tbl[] = {
+ { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, }, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
+
+MODULE_LICENSE("GPL");
+
+static struct pci_driver synclink_pci_driver = {
+ .name = "synclink",
+ .id_table = synclink_pci_tbl,
+ .probe = synclink_init_one,
+ .remove = __devexit_p(synclink_remove_one),
+};
+
+static struct tty_driver *serial_driver;
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 256
+
+
+static void mgsl_change_params(struct mgsl_struct *info);
+static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
+
+/*
+ * 1st function defined in .text section. Calling this function in
+ * init_module() followed by a breakpoint allows a remote debugger
+ * (gdb) to get the .text address for the add-symbol-file command.
+ * This allows remote debugging of dynamically loadable modules.
+ */
+static void* mgsl_get_text_ptr(void)
+{
+ return mgsl_get_text_ptr;
+}
+
+static inline int mgsl_paranoia_check(struct mgsl_struct *info,
+ char *name, const char *routine)
+{
+#ifdef MGSL_PARANOIA_CHECK
+ static const char *badmagic =
+ "Warning: bad magic number for mgsl struct (%s) in %s\n";
+ static const char *badinfo =
+ "Warning: null mgsl_struct for (%s) in %s\n";
+
+ if (!info) {
+ printk(badinfo, name, routine);
+ return 1;
+ }
+ if (info->magic != MGSL_MAGIC) {
+ printk(badmagic, name, routine);
+ return 1;
+ }
+#else
+ if (!info)
+ return 1;
+#endif
+ return 0;
+}
+
+/**
+ * line discipline callback wrappers
+ *
+ * The wrappers maintain line discipline references
+ * while calling into the line discipline.
+ *
+ * ldisc_receive_buf - pass receive data to line discipline
+ */
+
+static void ldisc_receive_buf(struct tty_struct *tty,
+ const __u8 *data, char *flags, int count)
+{
+ struct tty_ldisc *ld;
+ if (!tty)
+ return;
+ ld = tty_ldisc_ref(tty);
+ if (ld) {
+ if (ld->ops->receive_buf)
+ ld->ops->receive_buf(tty, data, flags, count);
+ tty_ldisc_deref(ld);
+ }
+}
+
+/* mgsl_stop() throttle (stop) transmitter
+ *
+ * Arguments: tty pointer to tty info structure
+ * Return Value: None
+ */
+static void mgsl_stop(struct tty_struct *tty)
+{
+ struct mgsl_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
+ return;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk("mgsl_stop(%s)\n",info->device_name);
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ if (info->tx_enabled)
+ usc_stop_transmitter(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+} /* end of mgsl_stop() */
+
+/* mgsl_start() release (start) transmitter
+ *
+ * Arguments: tty pointer to tty info structure
+ * Return Value: None
+ */
+static void mgsl_start(struct tty_struct *tty)
+{
+ struct mgsl_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
+ return;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk("mgsl_start(%s)\n",info->device_name);
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ if (!info->tx_enabled)
+ usc_start_transmitter(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+} /* end of mgsl_start() */
+
+/*
+ * Bottom half work queue access functions
+ */
+
+/* mgsl_bh_action() Return next bottom half action to perform.
+ * Return Value: BH action code or 0 if nothing to do.
+ */
+static int mgsl_bh_action(struct mgsl_struct *info)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+
+ if (info->pending_bh & BH_RECEIVE) {
+ info->pending_bh &= ~BH_RECEIVE;
+ rc = BH_RECEIVE;
+ } else if (info->pending_bh & BH_TRANSMIT) {
+ info->pending_bh &= ~BH_TRANSMIT;
+ rc = BH_TRANSMIT;
+ } else if (info->pending_bh & BH_STATUS) {
+ info->pending_bh &= ~BH_STATUS;
+ rc = BH_STATUS;
+ }
+
+ if (!rc) {
+ /* Mark BH routine as complete */
+ info->bh_running = false;
+ info->bh_requested = false;
+ }
+
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ return rc;
+}
+
+/*
+ * Perform bottom half processing of work items queued by ISR.
+ */
+static void mgsl_bh_handler(struct work_struct *work)
+{
+ struct mgsl_struct *info =
+ container_of(work, struct mgsl_struct, task);
+ int action;
+
+ if (!info)
+ return;
+
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
+ __FILE__,__LINE__,info->device_name);
+
+ info->bh_running = true;
+
+ while((action = mgsl_bh_action(info)) != 0) {
+
+ /* Process work item */
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
+ __FILE__,__LINE__,action);
+
+ switch (action) {
+
+ case BH_RECEIVE:
+ mgsl_bh_receive(info);
+ break;
+ case BH_TRANSMIT:
+ mgsl_bh_transmit(info);
+ break;
+ case BH_STATUS:
+ mgsl_bh_status(info);
+ break;
+ default:
+ /* unknown work item ID */
+ printk("Unknown work item ID=%08X!\n", action);
+ break;
+ }
+ }
+
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
+ __FILE__,__LINE__,info->device_name);
+}
+
+static void mgsl_bh_receive(struct mgsl_struct *info)
+{
+ bool (*get_rx_frame)(struct mgsl_struct *info) =
+ (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
+
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk( "%s(%d):mgsl_bh_receive(%s)\n",
+ __FILE__,__LINE__,info->device_name);
+
+ do
+ {
+ if (info->rx_rcc_underrun) {
+ unsigned long flags;
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_start_receiver(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ return;
+ }
+ } while(get_rx_frame(info));
+}
+
+static void mgsl_bh_transmit(struct mgsl_struct *info)
+{
+ struct tty_struct *tty = info->port.tty;
+ unsigned long flags;
+
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
+ __FILE__,__LINE__,info->device_name);
+
+ if (tty)
+ tty_wakeup(tty);
+
+ /* if transmitter idle and loopmode_send_done_requested
+ * then start echoing RxD to TxD
+ */
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ if ( !info->tx_active && info->loopmode_send_done_requested )
+ usc_loopmode_send_done( info );
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+}
+
+static void mgsl_bh_status(struct mgsl_struct *info)
+{
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk( "%s(%d):mgsl_bh_status() entry on %s\n",
+ __FILE__,__LINE__,info->device_name);
+
+ info->ri_chkcount = 0;
+ info->dsr_chkcount = 0;
+ info->dcd_chkcount = 0;
+ info->cts_chkcount = 0;
+}
+
+/* mgsl_isr_receive_status()
+ *
+ * Service a receive status interrupt. The type of status
+ * interrupt is indicated by the state of the RCSR.
+ * This is only used for HDLC mode.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void mgsl_isr_receive_status( struct mgsl_struct *info )
+{
+ u16 status = usc_InReg( info, RCSR );
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
+ __FILE__,__LINE__,status);
+
+ if ( (status & RXSTATUS_ABORT_RECEIVED) &&
+ info->loopmode_insert_requested &&
+ usc_loopmode_active(info) )
+ {
+ ++info->icount.rxabort;
+ info->loopmode_insert_requested = false;
+
+ /* clear CMR:13 to start echoing RxD to TxD */
+ info->cmr_value &= ~BIT13;
+ usc_OutReg(info, CMR, info->cmr_value);
+
+ /* disable received abort irq (no longer required) */
+ usc_OutReg(info, RICR,
+ (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
+ }
+
+ if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
+ if (status & RXSTATUS_EXITED_HUNT)
+ info->icount.exithunt++;
+ if (status & RXSTATUS_IDLE_RECEIVED)
+ info->icount.rxidle++;
+ wake_up_interruptible(&info->event_wait_q);
+ }
+
+ if (status & RXSTATUS_OVERRUN){
+ info->icount.rxover++;
+ usc_process_rxoverrun_sync( info );
+ }
+
+ usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
+ usc_UnlatchRxstatusBits( info, status );
+
+} /* end of mgsl_isr_receive_status() */
+
+/* mgsl_isr_transmit_status()
+ *
+ * Service a transmit status interrupt
+ * HDLC mode :end of transmit frame
+ * Async mode:all data is sent
+ * transmit status is indicated by bits in the TCSR.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void mgsl_isr_transmit_status( struct mgsl_struct *info )
+{
+ u16 status = usc_InReg( info, TCSR );
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
+ __FILE__,__LINE__,status);
+
+ usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
+ usc_UnlatchTxstatusBits( info, status );
+
+ if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
+ {
+ /* finished sending HDLC abort. This may leave */
+ /* the TxFifo with data from the aborted frame */
+ /* so purge the TxFifo. Also shutdown the DMA */
+ /* channel in case there is data remaining in */
+ /* the DMA buffer */
+ usc_DmaCmd( info, DmaCmd_ResetTxChannel );
+ usc_RTCmd( info, RTCmd_PurgeTxFifo );
+ }
+
+ if ( status & TXSTATUS_EOF_SENT )
+ info->icount.txok++;
+ else if ( status & TXSTATUS_UNDERRUN )
+ info->icount.txunder++;
+ else if ( status & TXSTATUS_ABORT_SENT )
+ info->icount.txabort++;
+ else
+ info->icount.txunder++;
+
+ info->tx_active = false;
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+ del_timer(&info->tx_timer);
+
+ if ( info->drop_rts_on_tx_done ) {
+ usc_get_serial_signals( info );
+ if ( info->serial_signals & SerialSignal_RTS ) {
+ info->serial_signals &= ~SerialSignal_RTS;
+ usc_set_serial_signals( info );
+ }
+ info->drop_rts_on_tx_done = false;
+ }
+
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount)
+ hdlcdev_tx_done(info);
+ else
+#endif
+ {
+ if (info->port.tty->stopped || info->port.tty->hw_stopped) {
+ usc_stop_transmitter(info);
+ return;
+ }
+ info->pending_bh |= BH_TRANSMIT;
+ }
+
+} /* end of mgsl_isr_transmit_status() */
+
+/* mgsl_isr_io_pin()
+ *
+ * Service an Input/Output pin interrupt. The type of
+ * interrupt is indicated by bits in the MISR
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void mgsl_isr_io_pin( struct mgsl_struct *info )
+{
+ struct mgsl_icount *icount;
+ u16 status = usc_InReg( info, MISR );
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
+ __FILE__,__LINE__,status);
+
+ usc_ClearIrqPendingBits( info, IO_PIN );
+ usc_UnlatchIostatusBits( info, status );
+
+ if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
+ MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
+ icount = &info->icount;
+ /* update input line counters */
+ if (status & MISCSTATUS_RI_LATCHED) {
+ if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
+ usc_DisablestatusIrqs(info,SICR_RI);
+ icount->rng++;
+ if ( status & MISCSTATUS_RI )
+ info->input_signal_events.ri_up++;
+ else
+ info->input_signal_events.ri_down++;
+ }
+ if (status & MISCSTATUS_DSR_LATCHED) {
+ if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
+ usc_DisablestatusIrqs(info,SICR_DSR);
+ icount->dsr++;
+ if ( status & MISCSTATUS_DSR )
+ info->input_signal_events.dsr_up++;
+ else
+ info->input_signal_events.dsr_down++;
+ }
+ if (status & MISCSTATUS_DCD_LATCHED) {
+ if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
+ usc_DisablestatusIrqs(info,SICR_DCD);
+ icount->dcd++;
+ if (status & MISCSTATUS_DCD) {
+ info->input_signal_events.dcd_up++;
+ } else
+ info->input_signal_events.dcd_down++;
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount) {
+ if (status & MISCSTATUS_DCD)
+ netif_carrier_on(info->netdev);
+ else
+ netif_carrier_off(info->netdev);
+ }
+#endif
+ }
+ if (status & MISCSTATUS_CTS_LATCHED)
+ {
+ if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
+ usc_DisablestatusIrqs(info,SICR_CTS);
+ icount->cts++;
+ if ( status & MISCSTATUS_CTS )
+ info->input_signal_events.cts_up++;
+ else
+ info->input_signal_events.cts_down++;
+ }
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+
+ if ( (info->port.flags & ASYNC_CHECK_CD) &&
+ (status & MISCSTATUS_DCD_LATCHED) ) {
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s CD now %s...", info->device_name,
+ (status & MISCSTATUS_DCD) ? "on" : "off");
+ if (status & MISCSTATUS_DCD)
+ wake_up_interruptible(&info->port.open_wait);
+ else {
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("doing serial hangup...");
+ if (info->port.tty)
+ tty_hangup(info->port.tty);
+ }
+ }
+
+ if ( (info->port.flags & ASYNC_CTS_FLOW) &&
+ (status & MISCSTATUS_CTS_LATCHED) ) {
+ if (info->port.tty->hw_stopped) {
+ if (status & MISCSTATUS_CTS) {
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("CTS tx start...");
+ if (info->port.tty)
+ info->port.tty->hw_stopped = 0;
+ usc_start_transmitter(info);
+ info->pending_bh |= BH_TRANSMIT;
+ return;
+ }
+ } else {
+ if (!(status & MISCSTATUS_CTS)) {
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("CTS tx stop...");
+ if (info->port.tty)
+ info->port.tty->hw_stopped = 1;
+ usc_stop_transmitter(info);
+ }
+ }
+ }
+ }
+
+ info->pending_bh |= BH_STATUS;
+
+ /* for diagnostics set IRQ flag */
+ if ( status & MISCSTATUS_TXC_LATCHED ){
+ usc_OutReg( info, SICR,
+ (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
+ usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
+ info->irq_occurred = true;
+ }
+
+} /* end of mgsl_isr_io_pin() */
+
+/* mgsl_isr_transmit_data()
+ *
+ * Service a transmit data interrupt (async mode only).
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void mgsl_isr_transmit_data( struct mgsl_struct *info )
+{
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
+ __FILE__,__LINE__,info->xmit_cnt);
+
+ usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
+
+ if (info->port.tty->stopped || info->port.tty->hw_stopped) {
+ usc_stop_transmitter(info);
+ return;
+ }
+
+ if ( info->xmit_cnt )
+ usc_load_txfifo( info );
+ else
+ info->tx_active = false;
+
+ if (info->xmit_cnt < WAKEUP_CHARS)
+ info->pending_bh |= BH_TRANSMIT;
+
+} /* end of mgsl_isr_transmit_data() */
+
+/* mgsl_isr_receive_data()
+ *
+ * Service a receive data interrupt. This occurs
+ * when operating in asynchronous interrupt transfer mode.
+ * The receive data FIFO is flushed to the receive data buffers.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void mgsl_isr_receive_data( struct mgsl_struct *info )
+{
+ int Fifocount;
+ u16 status;
+ int work = 0;
+ unsigned char DataByte;
+ struct tty_struct *tty = info->port.tty;
+ struct mgsl_icount *icount = &info->icount;
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):mgsl_isr_receive_data\n",
+ __FILE__,__LINE__);
+
+ usc_ClearIrqPendingBits( info, RECEIVE_DATA );
+
+ /* select FIFO status for RICR readback */
+ usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
+
+ /* clear the Wordstatus bit so that status readback */
+ /* only reflects the status of this byte */
+ usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
+
+ /* flush the receive FIFO */
+
+ while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
+ int flag;
+
+ /* read one byte from RxFIFO */
+ outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
+ info->io_base + CCAR );
+ DataByte = inb( info->io_base + CCAR );
+
+ /* get the status of the received byte */
+ status = usc_InReg(info, RCSR);
+ if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
+ RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
+ usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
+
+ icount->rx++;
+
+ flag = 0;
+ if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
+ RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
+ printk("rxerr=%04X\n",status);
+ /* update error statistics */
+ if ( status & RXSTATUS_BREAK_RECEIVED ) {
+ status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
+ icount->brk++;
+ } else if (status & RXSTATUS_PARITY_ERROR)
+ icount->parity++;
+ else if (status & RXSTATUS_FRAMING_ERROR)
+ icount->frame++;
+ else if (status & RXSTATUS_OVERRUN) {
+ /* must issue purge fifo cmd before */
+ /* 16C32 accepts more receive chars */
+ usc_RTCmd(info,RTCmd_PurgeRxFifo);
+ icount->overrun++;
+ }
+
+ /* discard char if tty control flags say so */
+ if (status & info->ignore_status_mask)
+ continue;
+
+ status &= info->read_status_mask;
+
+ if (status & RXSTATUS_BREAK_RECEIVED) {
+ flag = TTY_BREAK;
+ if (info->port.flags & ASYNC_SAK)
+ do_SAK(tty);
+ } else if (status & RXSTATUS_PARITY_ERROR)
+ flag = TTY_PARITY;
+ else if (status & RXSTATUS_FRAMING_ERROR)
+ flag = TTY_FRAME;
+ } /* end of if (error) */
+ tty_insert_flip_char(tty, DataByte, flag);
+ if (status & RXSTATUS_OVERRUN) {
+ /* Overrun is special, since it's
+ * reported immediately, and doesn't
+ * affect the current character
+ */
+ work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+ }
+
+ if ( debug_level >= DEBUG_LEVEL_ISR ) {
+ printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
+ __FILE__,__LINE__,icount->rx,icount->brk,
+ icount->parity,icount->frame,icount->overrun);
+ }
+
+ if(work)
+ tty_flip_buffer_push(tty);
+}
+
+/* mgsl_isr_misc()
+ *
+ * Service a miscellaneous interrupt source.
+ *
+ * Arguments: info pointer to device extension (instance data)
+ * Return Value: None
+ */
+static void mgsl_isr_misc( struct mgsl_struct *info )
+{
+ u16 status = usc_InReg( info, MISR );
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):mgsl_isr_misc status=%04X\n",
+ __FILE__,__LINE__,status);
+
+ if ((status & MISCSTATUS_RCC_UNDERRUN) &&
+ (info->params.mode == MGSL_MODE_HDLC)) {
+
+ /* turn off receiver and rx DMA */
+ usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
+ usc_DmaCmd(info, DmaCmd_ResetRxChannel);
+ usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
+ usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
+ usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
+
+ /* schedule BH handler to restart receiver */
+ info->pending_bh |= BH_RECEIVE;
+ info->rx_rcc_underrun = true;
+ }
+
+ usc_ClearIrqPendingBits( info, MISC );
+ usc_UnlatchMiscstatusBits( info, status );
+
+} /* end of mgsl_isr_misc() */
+
+/* mgsl_isr_null()
+ *
+ * Services undefined interrupt vectors from the
+ * USC. (hence this function SHOULD never be called)
+ *
+ * Arguments: info pointer to device extension (instance data)
+ * Return Value: None
+ */
+static void mgsl_isr_null( struct mgsl_struct *info )
+{
+
+} /* end of mgsl_isr_null() */
+
+/* mgsl_isr_receive_dma()
+ *
+ * Service a receive DMA channel interrupt.
+ * For this driver there are two sources of receive DMA interrupts
+ * as identified in the Receive DMA mode Register (RDMR):
+ *
+ * BIT3 EOA/EOL End of List, all receive buffers in receive
+ * buffer list have been filled (no more free buffers
+ * available). The DMA controller has shut down.
+ *
+ * BIT2 EOB End of Buffer. This interrupt occurs when a receive
+ * DMA buffer is terminated in response to completion
+ * of a good frame or a frame with errors. The status
+ * of the frame is stored in the buffer entry in the
+ * list of receive buffer entries.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void mgsl_isr_receive_dma( struct mgsl_struct *info )
+{
+ u16 status;
+
+ /* clear interrupt pending and IUS bit for Rx DMA IRQ */
+ usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
+
+ /* Read the receive DMA status to identify interrupt type. */
+ /* This also clears the status bits. */
+ status = usc_InDmaReg( info, RDMR );
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
+ __FILE__,__LINE__,info->device_name,status);
+
+ info->pending_bh |= BH_RECEIVE;
+
+ if ( status & BIT3 ) {
+ info->rx_overflow = true;
+ info->icount.buf_overrun++;
+ }
+
+} /* end of mgsl_isr_receive_dma() */
+
+/* mgsl_isr_transmit_dma()
+ *
+ * This function services a transmit DMA channel interrupt.
+ *
+ * For this driver there is one source of transmit DMA interrupts
+ * as identified in the Transmit DMA Mode Register (TDMR):
+ *
+ * BIT2 EOB End of Buffer. This interrupt occurs when a
+ * transmit DMA buffer has been emptied.
+ *
+ * The driver maintains enough transmit DMA buffers to hold at least
+ * one max frame size transmit frame. When operating in a buffered
+ * transmit mode, there may be enough transmit DMA buffers to hold at
+ * least two or more max frame size frames. On an EOB condition,
+ * determine if there are any queued transmit buffers and copy into
+ * transmit DMA buffers if we have room.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
+{
+ u16 status;
+
+ /* clear interrupt pending and IUS bit for Tx DMA IRQ */
+ usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
+
+ /* Read the transmit DMA status to identify interrupt type. */
+ /* This also clears the status bits. */
+
+ status = usc_InDmaReg( info, TDMR );
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
+ __FILE__,__LINE__,info->device_name,status);
+
+ if ( status & BIT2 ) {
+ --info->tx_dma_buffers_used;
+
+ /* if there are transmit frames queued,
+ * try to load the next one
+ */
+ if ( load_next_tx_holding_buffer(info) ) {
+ /* if call returns non-zero value, we have
+ * at least one free tx holding buffer
+ */
+ info->pending_bh |= BH_TRANSMIT;
+ }
+ }
+
+} /* end of mgsl_isr_transmit_dma() */
+
+/* mgsl_interrupt()
+ *
+ * Interrupt service routine entry point.
+ *
+ * Arguments:
+ *
+ * irq interrupt number that caused interrupt
+ * dev_id device ID supplied during interrupt registration
+ *
+ * Return Value: None
+ */
+static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
+{
+ struct mgsl_struct *info = dev_id;
+ u16 UscVector;
+ u16 DmaVector;
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
+ __FILE__, __LINE__, info->irq_level);
+
+ spin_lock(&info->irq_spinlock);
+
+ for(;;) {
+ /* Read the interrupt vectors from hardware. */
+ UscVector = usc_InReg(info, IVR) >> 9;
+ DmaVector = usc_InDmaReg(info, DIVR);
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
+ __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
+
+ if ( !UscVector && !DmaVector )
+ break;
+
+ /* Dispatch interrupt vector */
+ if ( UscVector )
+ (*UscIsrTable[UscVector])(info);
+ else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
+ mgsl_isr_transmit_dma(info);
+ else
+ mgsl_isr_receive_dma(info);
+
+ if ( info->isr_overflow ) {
+ printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
+ __FILE__, __LINE__, info->device_name, info->irq_level);
+ usc_DisableMasterIrqBit(info);
+ usc_DisableDmaInterrupts(info,DICR_MASTER);
+ break;
+ }
+ }
+
+ /* Request bottom half processing if there's something
+ * for it to do and the bh is not already running
+ */
+
+ if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s queueing bh task.\n",
+ __FILE__,__LINE__,info->device_name);
+ schedule_work(&info->task);
+ info->bh_requested = true;
+ }
+
+ spin_unlock(&info->irq_spinlock);
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
+ __FILE__, __LINE__, info->irq_level);
+
+ return IRQ_HANDLED;
+} /* end of mgsl_interrupt() */
+
+/* startup()
+ *
+ * Initialize and start device.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: 0 if success, otherwise error code
+ */
+static int startup(struct mgsl_struct * info)
+{
+ int retval = 0;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
+
+ if (info->port.flags & ASYNC_INITIALIZED)
+ return 0;
+
+ if (!info->xmit_buf) {
+ /* allocate a page of memory for a transmit buffer */
+ info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
+ if (!info->xmit_buf) {
+ printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
+ __FILE__,__LINE__,info->device_name);
+ return -ENOMEM;
+ }
+ }
+
+ info->pending_bh = 0;
+
+ memset(&info->icount, 0, sizeof(info->icount));
+
+ setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
+
+ /* Allocate and claim adapter resources */
+ retval = mgsl_claim_resources(info);
+
+ /* perform existence check and diagnostics */
+ if ( !retval )
+ retval = mgsl_adapter_test(info);
+
+ if ( retval ) {
+ if (capable(CAP_SYS_ADMIN) && info->port.tty)
+ set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+ mgsl_release_resources(info);
+ return retval;
+ }
+
+ /* program hardware for current parameters */
+ mgsl_change_params(info);
+
+ if (info->port.tty)
+ clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
+
+ info->port.flags |= ASYNC_INITIALIZED;
+
+ return 0;
+
+} /* end of startup() */
+
+/* shutdown()
+ *
+ * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void shutdown(struct mgsl_struct * info)
+{
+ unsigned long flags;
+
+ if (!(info->port.flags & ASYNC_INITIALIZED))
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_shutdown(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ /* clear status wait queue because status changes */
+ /* can't happen after shutting down the hardware */
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+
+ del_timer_sync(&info->tx_timer);
+
+ if (info->xmit_buf) {
+ free_page((unsigned long) info->xmit_buf);
+ info->xmit_buf = NULL;
+ }
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_DisableMasterIrqBit(info);
+ usc_stop_receiver(info);
+ usc_stop_transmitter(info);
+ usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
+ TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
+ usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
+
+ /* Disable DMAEN (Port 7, Bit 14) */
+ /* This disconnects the DMA request signal from the ISA bus */
+ /* on the ISA adapter. This has no effect for the PCI adapter */
+ usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
+
+ /* Disable INTEN (Port 6, Bit12) */
+ /* This disconnects the IRQ request signal to the ISA bus */
+ /* on the ISA adapter. This has no effect for the PCI adapter */
+ usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
+
+ if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) {
+ info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ usc_set_serial_signals(info);
+ }
+
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ mgsl_release_resources(info);
+
+ if (info->port.tty)
+ set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+
+ info->port.flags &= ~ASYNC_INITIALIZED;
+
+} /* end of shutdown() */
+
+static void mgsl_program_hw(struct mgsl_struct *info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+
+ usc_stop_receiver(info);
+ usc_stop_transmitter(info);
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+
+ if (info->params.mode == MGSL_MODE_HDLC ||
+ info->params.mode == MGSL_MODE_RAW ||
+ info->netcount)
+ usc_set_sync_mode(info);
+ else
+ usc_set_async_mode(info);
+
+ usc_set_serial_signals(info);
+
+ info->dcd_chkcount = 0;
+ info->cts_chkcount = 0;
+ info->ri_chkcount = 0;
+ info->dsr_chkcount = 0;
+
+ usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
+ usc_EnableInterrupts(info, IO_PIN);
+ usc_get_serial_signals(info);
+
+ if (info->netcount || info->port.tty->termios->c_cflag & CREAD)
+ usc_start_receiver(info);
+
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+}
+
+/* Reconfigure adapter based on new parameters
+ */
+static void mgsl_change_params(struct mgsl_struct *info)
+{
+ unsigned cflag;
+ int bits_per_char;
+
+ if (!info->port.tty || !info->port.tty->termios)
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_change_params(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ cflag = info->port.tty->termios->c_cflag;
+
+ /* if B0 rate (hangup) specified then negate DTR and RTS */
+ /* otherwise assert DTR and RTS */
+ if (cflag & CBAUD)
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+
+ /* byte size and parity */
+
+ switch (cflag & CSIZE) {
+ case CS5: info->params.data_bits = 5; break;
+ case CS6: info->params.data_bits = 6; break;
+ case CS7: info->params.data_bits = 7; break;
+ case CS8: info->params.data_bits = 8; break;
+ /* Never happens, but GCC is too dumb to figure it out */
+ default: info->params.data_bits = 7; break;
+ }
+
+ if (cflag & CSTOPB)
+ info->params.stop_bits = 2;
+ else
+ info->params.stop_bits = 1;
+
+ info->params.parity = ASYNC_PARITY_NONE;
+ if (cflag & PARENB) {
+ if (cflag & PARODD)
+ info->params.parity = ASYNC_PARITY_ODD;
+ else
+ info->params.parity = ASYNC_PARITY_EVEN;
+#ifdef CMSPAR
+ if (cflag & CMSPAR)
+ info->params.parity = ASYNC_PARITY_SPACE;
+#endif
+ }
+
+ /* calculate number of jiffies to transmit a full
+ * FIFO (32 bytes) at specified data rate
+ */
+ bits_per_char = info->params.data_bits +
+ info->params.stop_bits + 1;
+
+ /* if port data rate is set to 460800 or less then
+ * allow tty settings to override, otherwise keep the
+ * current data rate.
+ */
+ if (info->params.data_rate <= 460800)
+ info->params.data_rate = tty_get_baud_rate(info->port.tty);
+
+ if ( info->params.data_rate ) {
+ info->timeout = (32*HZ*bits_per_char) /
+ info->params.data_rate;
+ }
+ info->timeout += HZ/50; /* Add .02 seconds of slop */
+
+ if (cflag & CRTSCTS)
+ info->port.flags |= ASYNC_CTS_FLOW;
+ else
+ info->port.flags &= ~ASYNC_CTS_FLOW;
+
+ if (cflag & CLOCAL)
+ info->port.flags &= ~ASYNC_CHECK_CD;
+ else
+ info->port.flags |= ASYNC_CHECK_CD;
+
+ /* process tty input control flags */
+
+ info->read_status_mask = RXSTATUS_OVERRUN;
+ if (I_INPCK(info->port.tty))
+ info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
+ if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+ info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
+
+ if (I_IGNPAR(info->port.tty))
+ info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
+ if (I_IGNBRK(info->port.tty)) {
+ info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
+ /* If ignoring parity and break indicators, ignore
+ * overruns too. (For real raw support).
+ */
+ if (I_IGNPAR(info->port.tty))
+ info->ignore_status_mask |= RXSTATUS_OVERRUN;
+ }
+
+ mgsl_program_hw(info);
+
+} /* end of mgsl_change_params() */
+
+/* mgsl_put_char()
+ *
+ * Add a character to the transmit buffer.
+ *
+ * Arguments: tty pointer to tty information structure
+ * ch character to add to transmit buffer
+ *
+ * Return Value: None
+ */
+static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct mgsl_struct *info = tty->driver_data;
+ unsigned long flags;
+ int ret = 0;
+
+ if (debug_level >= DEBUG_LEVEL_INFO) {
+ printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
+ __FILE__, __LINE__, ch, info->device_name);
+ }
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
+ return 0;
+
+ if (!info->xmit_buf)
+ return 0;
+
+ spin_lock_irqsave(&info->irq_spinlock, flags);
+
+ if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
+ if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
+ info->xmit_buf[info->xmit_head++] = ch;
+ info->xmit_head &= SERIAL_XMIT_SIZE-1;
+ info->xmit_cnt++;
+ ret = 1;
+ }
+ }
+ spin_unlock_irqrestore(&info->irq_spinlock, flags);
+ return ret;
+
+} /* end of mgsl_put_char() */
+
+/* mgsl_flush_chars()
+ *
+ * Enable transmitter so remaining characters in the
+ * transmit buffer are sent.
+ *
+ * Arguments: tty pointer to tty information structure
+ * Return Value: None
+ */
+static void mgsl_flush_chars(struct tty_struct *tty)
+{
+ struct mgsl_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
+ __FILE__,__LINE__,info->device_name,info->xmit_cnt);
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
+ return;
+
+ if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
+ !info->xmit_buf)
+ return;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
+ __FILE__,__LINE__,info->device_name );
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+
+ if (!info->tx_active) {
+ if ( (info->params.mode == MGSL_MODE_HDLC ||
+ info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
+ /* operating in synchronous (frame oriented) mode */
+ /* copy data from circular xmit_buf to */
+ /* transmit DMA buffer. */
+ mgsl_load_tx_dma_buffer(info,
+ info->xmit_buf,info->xmit_cnt);
+ }
+ usc_start_transmitter(info);
+ }
+
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+} /* end of mgsl_flush_chars() */
+
+/* mgsl_write()
+ *
+ * Send a block of data
+ *
+ * Arguments:
+ *
+ * tty pointer to tty information structure
+ * buf pointer to buffer containing send data
+ * count size of send data in bytes
+ *
+ * Return Value: number of characters written
+ */
+static int mgsl_write(struct tty_struct * tty,
+ const unsigned char *buf, int count)
+{
+ int c, ret = 0;
+ struct mgsl_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):mgsl_write(%s) count=%d\n",
+ __FILE__,__LINE__,info->device_name,count);
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
+ goto cleanup;
+
+ if (!info->xmit_buf)
+ goto cleanup;
+
+ if ( info->params.mode == MGSL_MODE_HDLC ||
+ info->params.mode == MGSL_MODE_RAW ) {
+ /* operating in synchronous (frame oriented) mode */
+ /* operating in synchronous (frame oriented) mode */
+ if (info->tx_active) {
+
+ if ( info->params.mode == MGSL_MODE_HDLC ) {
+ ret = 0;
+ goto cleanup;
+ }
+ /* transmitter is actively sending data -
+ * if we have multiple transmit dma and
+ * holding buffers, attempt to queue this
+ * frame for transmission at a later time.
+ */
+ if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
+ /* no tx holding buffers available */
+ ret = 0;
+ goto cleanup;
+ }
+
+ /* queue transmit frame request */
+ ret = count;
+ save_tx_buffer_request(info,buf,count);
+
+ /* if we have sufficient tx dma buffers,
+ * load the next buffered tx request
+ */
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ load_next_tx_holding_buffer(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ goto cleanup;
+ }
+
+ /* if operating in HDLC LoopMode and the adapter */
+ /* has yet to be inserted into the loop, we can't */
+ /* transmit */
+
+ if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
+ !usc_loopmode_active(info) )
+ {
+ ret = 0;
+ goto cleanup;
+ }
+
+ if ( info->xmit_cnt ) {
+ /* Send accumulated from send_char() calls */
+ /* as frame and wait before accepting more data. */
+ ret = 0;
+
+ /* copy data from circular xmit_buf to */
+ /* transmit DMA buffer. */
+ mgsl_load_tx_dma_buffer(info,
+ info->xmit_buf,info->xmit_cnt);
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
+ __FILE__,__LINE__,info->device_name);
+ } else {
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
+ __FILE__,__LINE__,info->device_name);
+ ret = count;
+ info->xmit_cnt = count;
+ mgsl_load_tx_dma_buffer(info,buf,count);
+ }
+ } else {
+ while (1) {
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ c = min_t(int, count,
+ min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - info->xmit_head));
+ if (c <= 0) {
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ break;
+ }
+ memcpy(info->xmit_buf + info->xmit_head, buf, c);
+ info->xmit_head = ((info->xmit_head + c) &
+ (SERIAL_XMIT_SIZE-1));
+ info->xmit_cnt += c;
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+ }
+
+ if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ if (!info->tx_active)
+ usc_start_transmitter(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ }
+cleanup:
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):mgsl_write(%s) returning=%d\n",
+ __FILE__,__LINE__,info->device_name,ret);
+
+ return ret;
+
+} /* end of mgsl_write() */
+
+/* mgsl_write_room()
+ *
+ * Return the count of free bytes in transmit buffer
+ *
+ * Arguments: tty pointer to tty info structure
+ * Return Value: None
+ */
+static int mgsl_write_room(struct tty_struct *tty)
+{
+ struct mgsl_struct *info = tty->driver_data;
+ int ret;
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
+ return 0;
+ ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
+ if (ret < 0)
+ ret = 0;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_write_room(%s)=%d\n",
+ __FILE__,__LINE__, info->device_name,ret );
+
+ if ( info->params.mode == MGSL_MODE_HDLC ||
+ info->params.mode == MGSL_MODE_RAW ) {
+ /* operating in synchronous (frame oriented) mode */
+ if ( info->tx_active )
+ return 0;
+ else
+ return HDLC_MAX_FRAME_SIZE;
+ }
+
+ return ret;
+
+} /* end of mgsl_write_room() */
+
+/* mgsl_chars_in_buffer()
+ *
+ * Return the count of bytes in transmit buffer
+ *
+ * Arguments: tty pointer to tty info structure
+ * Return Value: None
+ */
+static int mgsl_chars_in_buffer(struct tty_struct *tty)
+{
+ struct mgsl_struct *info = tty->driver_data;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
+ return 0;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
+ __FILE__,__LINE__, info->device_name,info->xmit_cnt );
+
+ if ( info->params.mode == MGSL_MODE_HDLC ||
+ info->params.mode == MGSL_MODE_RAW ) {
+ /* operating in synchronous (frame oriented) mode */
+ if ( info->tx_active )
+ return info->max_frame_size;
+ else
+ return 0;
+ }
+
+ return info->xmit_cnt;
+} /* end of mgsl_chars_in_buffer() */
+
+/* mgsl_flush_buffer()
+ *
+ * Discard all data in the send buffer
+ *
+ * Arguments: tty pointer to tty info structure
+ * Return Value: None
+ */
+static void mgsl_flush_buffer(struct tty_struct *tty)
+{
+ struct mgsl_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
+ return;
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+ del_timer(&info->tx_timer);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ tty_wakeup(tty);
+}
+
+/* mgsl_send_xchar()
+ *
+ * Send a high-priority XON/XOFF character
+ *
+ * Arguments: tty pointer to tty info structure
+ * ch character to send
+ * Return Value: None
+ */
+static void mgsl_send_xchar(struct tty_struct *tty, char ch)
+{
+ struct mgsl_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
+ __FILE__,__LINE__, info->device_name, ch );
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
+ return;
+
+ info->x_char = ch;
+ if (ch) {
+ /* Make sure transmit interrupts are on */
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ if (!info->tx_enabled)
+ usc_start_transmitter(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ }
+} /* end of mgsl_send_xchar() */
+
+/* mgsl_throttle()
+ *
+ * Signal remote device to throttle send data (our receive data)
+ *
+ * Arguments: tty pointer to tty info structure
+ * Return Value: None
+ */
+static void mgsl_throttle(struct tty_struct * tty)
+{
+ struct mgsl_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_throttle(%s) entry\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
+ return;
+
+ if (I_IXOFF(tty))
+ mgsl_send_xchar(tty, STOP_CHAR(tty));
+
+ if (tty->termios->c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ info->serial_signals &= ~SerialSignal_RTS;
+ usc_set_serial_signals(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ }
+} /* end of mgsl_throttle() */
+
+/* mgsl_unthrottle()
+ *
+ * Signal remote device to stop throttling send data (our receive data)
+ *
+ * Arguments: tty pointer to tty info structure
+ * Return Value: None
+ */
+static void mgsl_unthrottle(struct tty_struct * tty)
+{
+ struct mgsl_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_unthrottle(%s) entry\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
+ return;
+
+ if (I_IXOFF(tty)) {
+ if (info->x_char)
+ info->x_char = 0;
+ else
+ mgsl_send_xchar(tty, START_CHAR(tty));
+ }
+
+ if (tty->termios->c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ info->serial_signals |= SerialSignal_RTS;
+ usc_set_serial_signals(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ }
+
+} /* end of mgsl_unthrottle() */
+
+/* mgsl_get_stats()
+ *
+ * get the current serial parameters information
+ *
+ * Arguments: info pointer to device instance data
+ * user_icount pointer to buffer to hold returned stats
+ *
+ * Return Value: 0 if success, otherwise error code
+ */
+static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
+{
+ int err;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_get_params(%s)\n",
+ __FILE__,__LINE__, info->device_name);
+
+ if (!user_icount) {
+ memset(&info->icount, 0, sizeof(info->icount));
+ } else {
+ mutex_lock(&info->port.mutex);
+ COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
+ mutex_unlock(&info->port.mutex);
+ if (err)
+ return -EFAULT;
+ }
+
+ return 0;
+
+} /* end of mgsl_get_stats() */
+
+/* mgsl_get_params()
+ *
+ * get the current serial parameters information
+ *
+ * Arguments: info pointer to device instance data
+ * user_params pointer to buffer to hold returned params
+ *
+ * Return Value: 0 if success, otherwise error code
+ */
+static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
+{
+ int err;
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_get_params(%s)\n",
+ __FILE__,__LINE__, info->device_name);
+
+ mutex_lock(&info->port.mutex);
+ COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
+ mutex_unlock(&info->port.mutex);
+ if (err) {
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
+ __FILE__,__LINE__,info->device_name);
+ return -EFAULT;
+ }
+
+ return 0;
+
+} /* end of mgsl_get_params() */
+
+/* mgsl_set_params()
+ *
+ * set the serial parameters
+ *
+ * Arguments:
+ *
+ * info pointer to device instance data
+ * new_params user buffer containing new serial params
+ *
+ * Return Value: 0 if success, otherwise error code
+ */
+static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
+{
+ unsigned long flags;
+ MGSL_PARAMS tmp_params;
+ int err;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
+ info->device_name );
+ COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
+ if (err) {
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
+ __FILE__,__LINE__,info->device_name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&info->port.mutex);
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ mgsl_change_params(info);
+ mutex_unlock(&info->port.mutex);
+
+ return 0;
+
+} /* end of mgsl_set_params() */
+
+/* mgsl_get_txidle()
+ *
+ * get the current transmit idle mode
+ *
+ * Arguments: info pointer to device instance data
+ * idle_mode pointer to buffer to hold returned idle mode
+ *
+ * Return Value: 0 if success, otherwise error code
+ */
+static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
+{
+ int err;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
+ __FILE__,__LINE__, info->device_name, info->idle_mode);
+
+ COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
+ if (err) {
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
+ __FILE__,__LINE__,info->device_name);
+ return -EFAULT;
+ }
+
+ return 0;
+
+} /* end of mgsl_get_txidle() */
+
+/* mgsl_set_txidle() service ioctl to set transmit idle mode
+ *
+ * Arguments: info pointer to device instance data
+ * idle_mode new idle mode
+ *
+ * Return Value: 0 if success, otherwise error code
+ */
+static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
+{
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
+ info->device_name, idle_mode );
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ info->idle_mode = idle_mode;
+ usc_set_txidle( info );
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ return 0;
+
+} /* end of mgsl_set_txidle() */
+
+/* mgsl_txenable()
+ *
+ * enable or disable the transmitter
+ *
+ * Arguments:
+ *
+ * info pointer to device instance data
+ * enable 1 = enable, 0 = disable
+ *
+ * Return Value: 0 if success, otherwise error code
+ */
+static int mgsl_txenable(struct mgsl_struct * info, int enable)
+{
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
+ info->device_name, enable);
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ if ( enable ) {
+ if ( !info->tx_enabled ) {
+
+ usc_start_transmitter(info);
+ /*--------------------------------------------------
+ * if HDLC/SDLC Loop mode, attempt to insert the
+ * station in the 'loop' by setting CMR:13. Upon
+ * receipt of the next GoAhead (RxAbort) sequence,
+ * the OnLoop indicator (CCSR:7) should go active
+ * to indicate that we are on the loop
+ *--------------------------------------------------*/
+ if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
+ usc_loopmode_insert_request( info );
+ }
+ } else {
+ if ( info->tx_enabled )
+ usc_stop_transmitter(info);
+ }
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ return 0;
+
+} /* end of mgsl_txenable() */
+
+/* mgsl_txabort() abort send HDLC frame
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: 0 if success, otherwise error code
+ */
+static int mgsl_txabort(struct mgsl_struct * info)
+{
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
+ info->device_name);
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
+ {
+ if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
+ usc_loopmode_cancel_transmit( info );
+ else
+ usc_TCmd(info,TCmd_SendAbort);
+ }
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ return 0;
+
+} /* end of mgsl_txabort() */
+
+/* mgsl_rxenable() enable or disable the receiver
+ *
+ * Arguments: info pointer to device instance data
+ * enable 1 = enable, 0 = disable
+ * Return Value: 0 if success, otherwise error code
+ */
+static int mgsl_rxenable(struct mgsl_struct * info, int enable)
+{
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
+ info->device_name, enable);
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ if ( enable ) {
+ if ( !info->rx_enabled )
+ usc_start_receiver(info);
+ } else {
+ if ( info->rx_enabled )
+ usc_stop_receiver(info);
+ }
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ return 0;
+
+} /* end of mgsl_rxenable() */
+
+/* mgsl_wait_event() wait for specified event to occur
+ *
+ * Arguments: info pointer to device instance data
+ * mask pointer to bitmask of events to wait for
+ * Return Value: 0 if successful and bit mask updated with
+ * of events triggerred,
+ * otherwise error code
+ */
+static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
+{
+ unsigned long flags;
+ int s;
+ int rc=0;
+ struct mgsl_icount cprev, cnow;
+ int events;
+ int mask;
+ struct _input_signal_events oldsigs, newsigs;
+ DECLARE_WAITQUEUE(wait, current);
+
+ COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
+ if (rc) {
+ return -EFAULT;
+ }
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
+ info->device_name, mask);
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+
+ /* return immediately if state matches requested events */
+ usc_get_serial_signals(info);
+ s = info->serial_signals;
+ events = mask &
+ ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
+ ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
+ ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
+ ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
+ if (events) {
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ goto exit;
+ }
+
+ /* save current irq counts */
+ cprev = info->icount;
+ oldsigs = info->input_signal_events;
+
+ /* enable hunt and idle irqs if needed */
+ if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
+ u16 oldreg = usc_InReg(info,RICR);
+ u16 newreg = oldreg +
+ (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
+ (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
+ if (oldreg != newreg)
+ usc_OutReg(info, RICR, newreg);
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&info->event_wait_q, &wait);
+
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+
+ for(;;) {
+ schedule();
+ if (signal_pending(current)) {
+ rc = -ERESTARTSYS;
+ break;
+ }
+
+ /* get current irq counts */
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ cnow = info->icount;
+ newsigs = info->input_signal_events;
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ /* if no change, wait aborted for some reason */
+ if (newsigs.dsr_up == oldsigs.dsr_up &&
+ newsigs.dsr_down == oldsigs.dsr_down &&
+ newsigs.dcd_up == oldsigs.dcd_up &&
+ newsigs.dcd_down == oldsigs.dcd_down &&
+ newsigs.cts_up == oldsigs.cts_up &&
+ newsigs.cts_down == oldsigs.cts_down &&
+ newsigs.ri_up == oldsigs.ri_up &&
+ newsigs.ri_down == oldsigs.ri_down &&
+ cnow.exithunt == cprev.exithunt &&
+ cnow.rxidle == cprev.rxidle) {
+ rc = -EIO;
+ break;
+ }
+
+ events = mask &
+ ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
+ (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
+ (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
+ (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
+ (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
+ (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
+ (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
+ (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
+ (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
+ (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
+ if (events)
+ break;
+
+ cprev = cnow;
+ oldsigs = newsigs;
+ }
+
+ remove_wait_queue(&info->event_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+
+ if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ if (!waitqueue_active(&info->event_wait_q)) {
+ /* disable enable exit hunt mode/idle rcvd IRQs */
+ usc_OutReg(info, RICR, usc_InReg(info,RICR) &
+ ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
+ }
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ }
+exit:
+ if ( rc == 0 )
+ PUT_USER(rc, events, mask_ptr);
+
+ return rc;
+
+} /* end of mgsl_wait_event() */
+
+static int modem_input_wait(struct mgsl_struct *info,int arg)
+{
+ unsigned long flags;
+ int rc;
+ struct mgsl_icount cprev, cnow;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /* save current irq counts */
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ cprev = info->icount;
+ add_wait_queue(&info->status_event_wait_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ for(;;) {
+ schedule();
+ if (signal_pending(current)) {
+ rc = -ERESTARTSYS;
+ break;
+ }
+
+ /* get new irq counts */
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ cnow = info->icount;
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ /* if no change, wait aborted for some reason */
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
+ rc = -EIO;
+ break;
+ }
+
+ /* check for change in caller specified modem input */
+ if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
+ (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
+ (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
+ (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
+ rc = 0;
+ break;
+ }
+
+ cprev = cnow;
+ }
+ remove_wait_queue(&info->status_event_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+ return rc;
+}
+
+/* return the state of the serial control and status signals
+ */
+static int tiocmget(struct tty_struct *tty)
+{
+ struct mgsl_struct *info = tty->driver_data;
+ unsigned int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_get_serial_signals(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
+ ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
+ ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
+ ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
+ ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
+ ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s tiocmget() value=%08X\n",
+ __FILE__,__LINE__, info->device_name, result );
+ return result;
+}
+
+/* set modem control signals (DTR/RTS)
+ */
+static int tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct mgsl_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s tiocmset(%x,%x)\n",
+ __FILE__,__LINE__,info->device_name, set, clear);
+
+ if (set & TIOCM_RTS)
+ info->serial_signals |= SerialSignal_RTS;
+ if (set & TIOCM_DTR)
+ info->serial_signals |= SerialSignal_DTR;
+ if (clear & TIOCM_RTS)
+ info->serial_signals &= ~SerialSignal_RTS;
+ if (clear & TIOCM_DTR)
+ info->serial_signals &= ~SerialSignal_DTR;
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_set_serial_signals(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ return 0;
+}
+
+/* mgsl_break() Set or clear transmit break condition
+ *
+ * Arguments: tty pointer to tty instance data
+ * break_state -1=set break condition, 0=clear
+ * Return Value: error code
+ */
+static int mgsl_break(struct tty_struct *tty, int break_state)
+{
+ struct mgsl_struct * info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_break(%s,%d)\n",
+ __FILE__,__LINE__, info->device_name, break_state);
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
+ return -EINVAL;
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ if (break_state == -1)
+ usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
+ else
+ usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ return 0;
+
+} /* end of mgsl_break() */
+
+/*
+ * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+ * Return: write counters to the user passed counter struct
+ * NB: both 1->0 and 0->1 transitions are counted except for
+ * RI where only 0->1 is counted.
+ */
+static int msgl_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+
+{
+ struct mgsl_struct * info = tty->driver_data;
+ struct mgsl_icount cnow; /* kernel counter temps */
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ cnow = info->icount;
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ icount->cts = cnow.cts;
+ icount->dsr = cnow.dsr;
+ icount->rng = cnow.rng;
+ icount->dcd = cnow.dcd;
+ icount->rx = cnow.rx;
+ icount->tx = cnow.tx;
+ icount->frame = cnow.frame;
+ icount->overrun = cnow.overrun;
+ icount->parity = cnow.parity;
+ icount->brk = cnow.brk;
+ icount->buf_overrun = cnow.buf_overrun;
+ return 0;
+}
+
+/* mgsl_ioctl() Service an IOCTL request
+ *
+ * Arguments:
+ *
+ * tty pointer to tty instance data
+ * cmd IOCTL command code
+ * arg command argument/context
+ *
+ * Return Value: 0 if success, otherwise error code
+ */
+static int mgsl_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct mgsl_struct * info = tty->driver_data;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
+ info->device_name, cmd );
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
+ return -ENODEV;
+
+ if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+ (cmd != TIOCMIWAIT)) {
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ }
+
+ return mgsl_ioctl_common(info, cmd, arg);
+}
+
+static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case MGSL_IOCGPARAMS:
+ return mgsl_get_params(info, argp);
+ case MGSL_IOCSPARAMS:
+ return mgsl_set_params(info, argp);
+ case MGSL_IOCGTXIDLE:
+ return mgsl_get_txidle(info, argp);
+ case MGSL_IOCSTXIDLE:
+ return mgsl_set_txidle(info,(int)arg);
+ case MGSL_IOCTXENABLE:
+ return mgsl_txenable(info,(int)arg);
+ case MGSL_IOCRXENABLE:
+ return mgsl_rxenable(info,(int)arg);
+ case MGSL_IOCTXABORT:
+ return mgsl_txabort(info);
+ case MGSL_IOCGSTATS:
+ return mgsl_get_stats(info, argp);
+ case MGSL_IOCWAITEVENT:
+ return mgsl_wait_event(info, argp);
+ case MGSL_IOCLOOPTXDONE:
+ return mgsl_loopmode_send_done(info);
+ /* Wait for modem input (DCD,RI,DSR,CTS) change
+ * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
+ */
+ case TIOCMIWAIT:
+ return modem_input_wait(info,(int)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+/* mgsl_set_termios()
+ *
+ * Set new termios settings
+ *
+ * Arguments:
+ *
+ * tty pointer to tty structure
+ * termios pointer to buffer to hold returned old termios
+ *
+ * Return Value: None
+ */
+static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ struct mgsl_struct *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
+ tty->driver->name );
+
+ mgsl_change_params(info);
+
+ /* Handle transition to B0 status */
+ if (old_termios->c_cflag & CBAUD &&
+ !(tty->termios->c_cflag & CBAUD)) {
+ info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_set_serial_signals(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ }
+
+ /* Handle transition away from B0 status */
+ if (!(old_termios->c_cflag & CBAUD) &&
+ tty->termios->c_cflag & CBAUD) {
+ info->serial_signals |= SerialSignal_DTR;
+ if (!(tty->termios->c_cflag & CRTSCTS) ||
+ !test_bit(TTY_THROTTLED, &tty->flags)) {
+ info->serial_signals |= SerialSignal_RTS;
+ }
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_set_serial_signals(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ }
+
+ /* Handle turning off CRTSCTS */
+ if (old_termios->c_cflag & CRTSCTS &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ mgsl_start(tty);
+ }
+
+} /* end of mgsl_set_termios() */
+
+/* mgsl_close()
+ *
+ * Called when port is closed. Wait for remaining data to be
+ * sent. Disable port and free resources.
+ *
+ * Arguments:
+ *
+ * tty pointer to open tty structure
+ * filp pointer to open file object
+ *
+ * Return Value: None
+ */
+static void mgsl_close(struct tty_struct *tty, struct file * filp)
+{
+ struct mgsl_struct * info = tty->driver_data;
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
+ __FILE__,__LINE__, info->device_name, info->port.count);
+
+ if (tty_port_close_start(&info->port, tty, filp) == 0)
+ goto cleanup;
+
+ mutex_lock(&info->port.mutex);
+ if (info->port.flags & ASYNC_INITIALIZED)
+ mgsl_wait_until_sent(tty, info->timeout);
+ mgsl_flush_buffer(tty);
+ tty_ldisc_flush(tty);
+ shutdown(info);
+ mutex_unlock(&info->port.mutex);
+
+ tty_port_close_end(&info->port, tty);
+ info->port.tty = NULL;
+cleanup:
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
+ tty->driver->name, info->port.count);
+
+} /* end of mgsl_close() */
+
+/* mgsl_wait_until_sent()
+ *
+ * Wait until the transmitter is empty.
+ *
+ * Arguments:
+ *
+ * tty pointer to tty info structure
+ * timeout time to wait for send completion
+ *
+ * Return Value: None
+ */
+static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct mgsl_struct * info = tty->driver_data;
+ unsigned long orig_jiffies, char_time;
+
+ if (!info )
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
+ return;
+
+ if (!(info->port.flags & ASYNC_INITIALIZED))
+ goto exit;
+
+ orig_jiffies = jiffies;
+
+ /* Set check interval to 1/5 of estimated time to
+ * send a character, and make it at least 1. The check
+ * interval should also be less than the timeout.
+ * Note: use tight timings here to satisfy the NIST-PCTS.
+ */
+
+ if ( info->params.data_rate ) {
+ char_time = info->timeout/(32 * 5);
+ if (!char_time)
+ char_time++;
+ } else
+ char_time = 1;
+
+ if (timeout)
+ char_time = min_t(unsigned long, char_time, timeout);
+
+ if ( info->params.mode == MGSL_MODE_HDLC ||
+ info->params.mode == MGSL_MODE_RAW ) {
+ while (info->tx_active) {
+ msleep_interruptible(jiffies_to_msecs(char_time));
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ }
+ } else {
+ while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
+ info->tx_enabled) {
+ msleep_interruptible(jiffies_to_msecs(char_time));
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ }
+ }
+
+exit:
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
+ __FILE__,__LINE__, info->device_name );
+
+} /* end of mgsl_wait_until_sent() */
+
+/* mgsl_hangup()
+ *
+ * Called by tty_hangup() when a hangup is signaled.
+ * This is the same as to closing all open files for the port.
+ *
+ * Arguments: tty pointer to associated tty object
+ * Return Value: None
+ */
+static void mgsl_hangup(struct tty_struct *tty)
+{
+ struct mgsl_struct * info = tty->driver_data;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_hangup(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
+ return;
+
+ mgsl_flush_buffer(tty);
+ shutdown(info);
+
+ info->port.count = 0;
+ info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
+ info->port.tty = NULL;
+
+ wake_up_interruptible(&info->port.open_wait);
+
+} /* end of mgsl_hangup() */
+
+/*
+ * carrier_raised()
+ *
+ * Return true if carrier is raised
+ */
+
+static int carrier_raised(struct tty_port *port)
+{
+ unsigned long flags;
+ struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
+
+ spin_lock_irqsave(&info->irq_spinlock, flags);
+ usc_get_serial_signals(info);
+ spin_unlock_irqrestore(&info->irq_spinlock, flags);
+ return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
+}
+
+static void dtr_rts(struct tty_port *port, int on)
+{
+ struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ if (on)
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ usc_set_serial_signals(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+}
+
+
+/* block_til_ready()
+ *
+ * Block the current process until the specified port
+ * is ready to be opened.
+ *
+ * Arguments:
+ *
+ * tty pointer to tty info structure
+ * filp pointer to open file object
+ * info pointer to device instance data
+ *
+ * Return Value: 0 if success, otherwise error code
+ */
+static int block_til_ready(struct tty_struct *tty, struct file * filp,
+ struct mgsl_struct *info)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int retval;
+ bool do_clocal = false;
+ bool extra_count = false;
+ unsigned long flags;
+ int dcd;
+ struct tty_port *port = &info->port;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):block_til_ready on %s\n",
+ __FILE__,__LINE__, tty->driver->name );
+
+ if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
+ /* nonblock mode is set or port is not enabled */
+ port->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+
+ if (tty->termios->c_cflag & CLOCAL)
+ do_clocal = true;
+
+ /* Wait for carrier detect and the line to become
+ * free (i.e., not in use by the callout). While we are in
+ * this loop, port->count is dropped by one, so that
+ * mgsl_close() knows when to free things. We restore it upon
+ * exit, either normal or abnormal.
+ */
+
+ retval = 0;
+ add_wait_queue(&port->open_wait, &wait);
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):block_til_ready before block on %s count=%d\n",
+ __FILE__,__LINE__, tty->driver->name, port->count );
+
+ spin_lock_irqsave(&info->irq_spinlock, flags);
+ if (!tty_hung_up_p(filp)) {
+ extra_count = true;
+ port->count--;
+ }
+ spin_unlock_irqrestore(&info->irq_spinlock, flags);
+ port->blocked_open++;
+
+ while (1) {
+ if (tty->termios->c_cflag & CBAUD)
+ tty_port_raise_dtr_rts(port);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
+ retval = (port->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS;
+ break;
+ }
+
+ dcd = tty_port_carrier_raised(&info->port);
+
+ if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd))
+ break;
+
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):block_til_ready blocking on %s count=%d\n",
+ __FILE__,__LINE__, tty->driver->name, port->count );
+
+ tty_unlock();
+ schedule();
+ tty_lock();
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&port->open_wait, &wait);
+
+ /* FIXME: Racy on hangup during close wait */
+ if (extra_count)
+ port->count++;
+ port->blocked_open--;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
+ __FILE__,__LINE__, tty->driver->name, port->count );
+
+ if (!retval)
+ port->flags |= ASYNC_NORMAL_ACTIVE;
+
+ return retval;
+
+} /* end of block_til_ready() */
+
+/* mgsl_open()
+ *
+ * Called when a port is opened. Init and enable port.
+ * Perform serial-specific initialization for the tty structure.
+ *
+ * Arguments: tty pointer to tty info structure
+ * filp associated file pointer
+ *
+ * Return Value: 0 if success, otherwise error code
+ */
+static int mgsl_open(struct tty_struct *tty, struct file * filp)
+{
+ struct mgsl_struct *info;
+ int retval, line;
+ unsigned long flags;
+
+ /* verify range of specified line number */
+ line = tty->index;
+ if ((line < 0) || (line >= mgsl_device_count)) {
+ printk("%s(%d):mgsl_open with invalid line #%d.\n",
+ __FILE__,__LINE__,line);
+ return -ENODEV;
+ }
+
+ /* find the info structure for the specified line */
+ info = mgsl_device_list;
+ while(info && info->line != line)
+ info = info->next_device;
+ if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
+ return -ENODEV;
+
+ tty->driver_data = info;
+ info->port.tty = tty;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
+ __FILE__,__LINE__,tty->driver->name, info->port.count);
+
+ /* If port is closing, signal caller to try again */
+ if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
+ if (info->port.flags & ASYNC_CLOSING)
+ interruptible_sleep_on(&info->port.close_wait);
+ retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS);
+ goto cleanup;
+ }
+
+ info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+ spin_lock_irqsave(&info->netlock, flags);
+ if (info->netcount) {
+ retval = -EBUSY;
+ spin_unlock_irqrestore(&info->netlock, flags);
+ goto cleanup;
+ }
+ info->port.count++;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ if (info->port.count == 1) {
+ /* 1st open on this device, init hardware */
+ retval = startup(info);
+ if (retval < 0)
+ goto cleanup;
+ }
+
+ retval = block_til_ready(tty, filp, info);
+ if (retval) {
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):block_til_ready(%s) returned %d\n",
+ __FILE__,__LINE__, info->device_name, retval);
+ goto cleanup;
+ }
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):mgsl_open(%s) success\n",
+ __FILE__,__LINE__, info->device_name);
+ retval = 0;
+
+cleanup:
+ if (retval) {
+ if (tty->count == 1)
+ info->port.tty = NULL; /* tty layer will release tty struct */
+ if(info->port.count)
+ info->port.count--;
+ }
+
+ return retval;
+
+} /* end of mgsl_open() */
+
+/*
+ * /proc fs routines....
+ */
+
+static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
+{
+ char stat_buf[30];
+ unsigned long flags;
+
+ if (info->bus_type == MGSL_BUS_TYPE_PCI) {
+ seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
+ info->device_name, info->io_base, info->irq_level,
+ info->phys_memory_base, info->phys_lcr_base);
+ } else {
+ seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d",
+ info->device_name, info->io_base,
+ info->irq_level, info->dma_level);
+ }
+
+ /* output current serial signal states */
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_get_serial_signals(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ stat_buf[0] = 0;
+ stat_buf[1] = 0;
+ if (info->serial_signals & SerialSignal_RTS)
+ strcat(stat_buf, "|RTS");
+ if (info->serial_signals & SerialSignal_CTS)
+ strcat(stat_buf, "|CTS");
+ if (info->serial_signals & SerialSignal_DTR)
+ strcat(stat_buf, "|DTR");
+ if (info->serial_signals & SerialSignal_DSR)
+ strcat(stat_buf, "|DSR");
+ if (info->serial_signals & SerialSignal_DCD)
+ strcat(stat_buf, "|CD");
+ if (info->serial_signals & SerialSignal_RI)
+ strcat(stat_buf, "|RI");
+
+ if (info->params.mode == MGSL_MODE_HDLC ||
+ info->params.mode == MGSL_MODE_RAW ) {
+ seq_printf(m, " HDLC txok:%d rxok:%d",
+ info->icount.txok, info->icount.rxok);
+ if (info->icount.txunder)
+ seq_printf(m, " txunder:%d", info->icount.txunder);
+ if (info->icount.txabort)
+ seq_printf(m, " txabort:%d", info->icount.txabort);
+ if (info->icount.rxshort)
+ seq_printf(m, " rxshort:%d", info->icount.rxshort);
+ if (info->icount.rxlong)
+ seq_printf(m, " rxlong:%d", info->icount.rxlong);
+ if (info->icount.rxover)
+ seq_printf(m, " rxover:%d", info->icount.rxover);
+ if (info->icount.rxcrc)
+ seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
+ } else {
+ seq_printf(m, " ASYNC tx:%d rx:%d",
+ info->icount.tx, info->icount.rx);
+ if (info->icount.frame)
+ seq_printf(m, " fe:%d", info->icount.frame);
+ if (info->icount.parity)
+ seq_printf(m, " pe:%d", info->icount.parity);
+ if (info->icount.brk)
+ seq_printf(m, " brk:%d", info->icount.brk);
+ if (info->icount.overrun)
+ seq_printf(m, " oe:%d", info->icount.overrun);
+ }
+
+ /* Append serial signal status to end */
+ seq_printf(m, " %s\n", stat_buf+1);
+
+ seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
+ info->tx_active,info->bh_requested,info->bh_running,
+ info->pending_bh);
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ {
+ u16 Tcsr = usc_InReg( info, TCSR );
+ u16 Tdmr = usc_InDmaReg( info, TDMR );
+ u16 Ticr = usc_InReg( info, TICR );
+ u16 Rscr = usc_InReg( info, RCSR );
+ u16 Rdmr = usc_InDmaReg( info, RDMR );
+ u16 Ricr = usc_InReg( info, RICR );
+ u16 Icr = usc_InReg( info, ICR );
+ u16 Dccr = usc_InReg( info, DCCR );
+ u16 Tmr = usc_InReg( info, TMR );
+ u16 Tccr = usc_InReg( info, TCCR );
+ u16 Ccar = inw( info->io_base + CCAR );
+ seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
+ "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
+ Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
+ }
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+}
+
+/* Called to print information about devices */
+static int mgsl_proc_show(struct seq_file *m, void *v)
+{
+ struct mgsl_struct *info;
+
+ seq_printf(m, "synclink driver:%s\n", driver_version);
+
+ info = mgsl_device_list;
+ while( info ) {
+ line_info(m, info);
+ info = info->next_device;
+ }
+ return 0;
+}
+
+static int mgsl_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mgsl_proc_show, NULL);
+}
+
+static const struct file_operations mgsl_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = mgsl_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* mgsl_allocate_dma_buffers()
+ *
+ * Allocate and format DMA buffers (ISA adapter)
+ * or format shared memory buffers (PCI adapter).
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: 0 if success, otherwise error
+ */
+static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
+{
+ unsigned short BuffersPerFrame;
+
+ info->last_mem_alloc = 0;
+
+ /* Calculate the number of DMA buffers necessary to hold the */
+ /* largest allowable frame size. Note: If the max frame size is */
+ /* not an even multiple of the DMA buffer size then we need to */
+ /* round the buffer count per frame up one. */
+
+ BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
+ if ( info->max_frame_size % DMABUFFERSIZE )
+ BuffersPerFrame++;
+
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
+ /*
+ * The PCI adapter has 256KBytes of shared memory to use.
+ * This is 64 PAGE_SIZE buffers.
+ *
+ * The first page is used for padding at this time so the
+ * buffer list does not begin at offset 0 of the PCI
+ * adapter's shared memory.
+ *
+ * The 2nd page is used for the buffer list. A 4K buffer
+ * list can hold 128 DMA_BUFFER structures at 32 bytes
+ * each.
+ *
+ * This leaves 62 4K pages.
+ *
+ * The next N pages are used for transmit frame(s). We
+ * reserve enough 4K page blocks to hold the required
+ * number of transmit dma buffers (num_tx_dma_buffers),
+ * each of MaxFrameSize size.
+ *
+ * Of the remaining pages (62-N), determine how many can
+ * be used to receive full MaxFrameSize inbound frames
+ */
+ info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
+ info->rx_buffer_count = 62 - info->tx_buffer_count;
+ } else {
+ /* Calculate the number of PAGE_SIZE buffers needed for */
+ /* receive and transmit DMA buffers. */
+
+
+ /* Calculate the number of DMA buffers necessary to */
+ /* hold 7 max size receive frames and one max size transmit frame. */
+ /* The receive buffer count is bumped by one so we avoid an */
+ /* End of List condition if all receive buffers are used when */
+ /* using linked list DMA buffers. */
+
+ info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
+ info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
+
+ /*
+ * limit total TxBuffers & RxBuffers to 62 4K total
+ * (ala PCI Allocation)
+ */
+
+ if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
+ info->rx_buffer_count = 62 - info->tx_buffer_count;
+
+ }
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
+ __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
+
+ if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
+ mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
+ mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
+ mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
+ mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
+ printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
+ return -ENOMEM;
+ }
+
+ mgsl_reset_rx_dma_buffers( info );
+ mgsl_reset_tx_dma_buffers( info );
+
+ return 0;
+
+} /* end of mgsl_allocate_dma_buffers() */
+
+/*
+ * mgsl_alloc_buffer_list_memory()
+ *
+ * Allocate a common DMA buffer for use as the
+ * receive and transmit buffer lists.
+ *
+ * A buffer list is a set of buffer entries where each entry contains
+ * a pointer to an actual buffer and a pointer to the next buffer entry
+ * (plus some other info about the buffer).
+ *
+ * The buffer entries for a list are built to form a circular list so
+ * that when the entire list has been traversed you start back at the
+ * beginning.
+ *
+ * This function allocates memory for just the buffer entries.
+ * The links (pointer to next entry) are filled in with the physical
+ * address of the next entry so the adapter can navigate the list
+ * using bus master DMA. The pointers to the actual buffers are filled
+ * out later when the actual buffers are allocated.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: 0 if success, otherwise error
+ */
+static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
+{
+ unsigned int i;
+
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
+ /* PCI adapter uses shared memory. */
+ info->buffer_list = info->memory_base + info->last_mem_alloc;
+ info->buffer_list_phys = info->last_mem_alloc;
+ info->last_mem_alloc += BUFFERLISTSIZE;
+ } else {
+ /* ISA adapter uses system memory. */
+ /* The buffer lists are allocated as a common buffer that both */
+ /* the processor and adapter can access. This allows the driver to */
+ /* inspect portions of the buffer while other portions are being */
+ /* updated by the adapter using Bus Master DMA. */
+
+ info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
+ if (info->buffer_list == NULL)
+ return -ENOMEM;
+ info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
+ }
+
+ /* We got the memory for the buffer entry lists. */
+ /* Initialize the memory block to all zeros. */
+ memset( info->buffer_list, 0, BUFFERLISTSIZE );
+
+ /* Save virtual address pointers to the receive and */
+ /* transmit buffer lists. (Receive 1st). These pointers will */
+ /* be used by the processor to access the lists. */
+ info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
+ info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
+ info->tx_buffer_list += info->rx_buffer_count;
+
+ /*
+ * Build the links for the buffer entry lists such that
+ * two circular lists are built. (Transmit and Receive).
+ *
+ * Note: the links are physical addresses
+ * which are read by the adapter to determine the next
+ * buffer entry to use.
+ */
+
+ for ( i = 0; i < info->rx_buffer_count; i++ ) {
+ /* calculate and store physical address of this buffer entry */
+ info->rx_buffer_list[i].phys_entry =
+ info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
+
+ /* calculate and store physical address of */
+ /* next entry in cirular list of entries */
+
+ info->rx_buffer_list[i].link = info->buffer_list_phys;
+
+ if ( i < info->rx_buffer_count - 1 )
+ info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
+ }
+
+ for ( i = 0; i < info->tx_buffer_count; i++ ) {
+ /* calculate and store physical address of this buffer entry */
+ info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
+ ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
+
+ /* calculate and store physical address of */
+ /* next entry in cirular list of entries */
+
+ info->tx_buffer_list[i].link = info->buffer_list_phys +
+ info->rx_buffer_count * sizeof(DMABUFFERENTRY);
+
+ if ( i < info->tx_buffer_count - 1 )
+ info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
+ }
+
+ return 0;
+
+} /* end of mgsl_alloc_buffer_list_memory() */
+
+/* Free DMA buffers allocated for use as the
+ * receive and transmit buffer lists.
+ * Warning:
+ *
+ * The data transfer buffers associated with the buffer list
+ * MUST be freed before freeing the buffer list itself because
+ * the buffer list contains the information necessary to free
+ * the individual buffers!
+ */
+static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
+{
+ if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
+ dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
+
+ info->buffer_list = NULL;
+ info->rx_buffer_list = NULL;
+ info->tx_buffer_list = NULL;
+
+} /* end of mgsl_free_buffer_list_memory() */
+
+/*
+ * mgsl_alloc_frame_memory()
+ *
+ * Allocate the frame DMA buffers used by the specified buffer list.
+ * Each DMA buffer will be one memory page in size. This is necessary
+ * because memory can fragment enough that it may be impossible
+ * contiguous pages.
+ *
+ * Arguments:
+ *
+ * info pointer to device instance data
+ * BufferList pointer to list of buffer entries
+ * Buffercount count of buffer entries in buffer list
+ *
+ * Return Value: 0 if success, otherwise -ENOMEM
+ */
+static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
+{
+ int i;
+ u32 phys_addr;
+
+ /* Allocate page sized buffers for the receive buffer list */
+
+ for ( i = 0; i < Buffercount; i++ ) {
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
+ /* PCI adapter uses shared memory buffers. */
+ BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
+ phys_addr = info->last_mem_alloc;
+ info->last_mem_alloc += DMABUFFERSIZE;
+ } else {
+ /* ISA adapter uses system memory. */
+ BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
+ if (BufferList[i].virt_addr == NULL)
+ return -ENOMEM;
+ phys_addr = (u32)(BufferList[i].dma_addr);
+ }
+ BufferList[i].phys_addr = phys_addr;
+ }
+
+ return 0;
+
+} /* end of mgsl_alloc_frame_memory() */
+
+/*
+ * mgsl_free_frame_memory()
+ *
+ * Free the buffers associated with
+ * each buffer entry of a buffer list.
+ *
+ * Arguments:
+ *
+ * info pointer to device instance data
+ * BufferList pointer to list of buffer entries
+ * Buffercount count of buffer entries in buffer list
+ *
+ * Return Value: None
+ */
+static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
+{
+ int i;
+
+ if ( BufferList ) {
+ for ( i = 0 ; i < Buffercount ; i++ ) {
+ if ( BufferList[i].virt_addr ) {
+ if ( info->bus_type != MGSL_BUS_TYPE_PCI )
+ dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
+ BufferList[i].virt_addr = NULL;
+ }
+ }
+ }
+
+} /* end of mgsl_free_frame_memory() */
+
+/* mgsl_free_dma_buffers()
+ *
+ * Free DMA buffers
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void mgsl_free_dma_buffers( struct mgsl_struct *info )
+{
+ mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
+ mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
+ mgsl_free_buffer_list_memory( info );
+
+} /* end of mgsl_free_dma_buffers() */
+
+
+/*
+ * mgsl_alloc_intermediate_rxbuffer_memory()
+ *
+ * Allocate a buffer large enough to hold max_frame_size. This buffer
+ * is used to pass an assembled frame to the line discipline.
+ *
+ * Arguments:
+ *
+ * info pointer to device instance data
+ *
+ * Return Value: 0 if success, otherwise -ENOMEM
+ */
+static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
+{
+ info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
+ if ( info->intermediate_rxbuffer == NULL )
+ return -ENOMEM;
+
+ return 0;
+
+} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
+
+/*
+ * mgsl_free_intermediate_rxbuffer_memory()
+ *
+ *
+ * Arguments:
+ *
+ * info pointer to device instance data
+ *
+ * Return Value: None
+ */
+static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
+{
+ kfree(info->intermediate_rxbuffer);
+ info->intermediate_rxbuffer = NULL;
+
+} /* end of mgsl_free_intermediate_rxbuffer_memory() */
+
+/*
+ * mgsl_alloc_intermediate_txbuffer_memory()
+ *
+ * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
+ * This buffer is used to load transmit frames into the adapter's dma transfer
+ * buffers when there is sufficient space.
+ *
+ * Arguments:
+ *
+ * info pointer to device instance data
+ *
+ * Return Value: 0 if success, otherwise -ENOMEM
+ */
+static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
+{
+ int i;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk("%s %s(%d) allocating %d tx holding buffers\n",
+ info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
+
+ memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
+
+ for ( i=0; i<info->num_tx_holding_buffers; ++i) {
+ info->tx_holding_buffers[i].buffer =
+ kmalloc(info->max_frame_size, GFP_KERNEL);
+ if (info->tx_holding_buffers[i].buffer == NULL) {
+ for (--i; i >= 0; i--) {
+ kfree(info->tx_holding_buffers[i].buffer);
+ info->tx_holding_buffers[i].buffer = NULL;
+ }
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+
+} /* end of mgsl_alloc_intermediate_txbuffer_memory() */
+
+/*
+ * mgsl_free_intermediate_txbuffer_memory()
+ *
+ *
+ * Arguments:
+ *
+ * info pointer to device instance data
+ *
+ * Return Value: None
+ */
+static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
+{
+ int i;
+
+ for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
+ kfree(info->tx_holding_buffers[i].buffer);
+ info->tx_holding_buffers[i].buffer = NULL;
+ }
+
+ info->get_tx_holding_index = 0;
+ info->put_tx_holding_index = 0;
+ info->tx_holding_count = 0;
+
+} /* end of mgsl_free_intermediate_txbuffer_memory() */
+
+
+/*
+ * load_next_tx_holding_buffer()
+ *
+ * attempts to load the next buffered tx request into the
+ * tx dma buffers
+ *
+ * Arguments:
+ *
+ * info pointer to device instance data
+ *
+ * Return Value: true if next buffered tx request loaded
+ * into adapter's tx dma buffer,
+ * false otherwise
+ */
+static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
+{
+ bool ret = false;
+
+ if ( info->tx_holding_count ) {
+ /* determine if we have enough tx dma buffers
+ * to accommodate the next tx frame
+ */
+ struct tx_holding_buffer *ptx =
+ &info->tx_holding_buffers[info->get_tx_holding_index];
+ int num_free = num_free_tx_dma_buffers(info);
+ int num_needed = ptx->buffer_size / DMABUFFERSIZE;
+ if ( ptx->buffer_size % DMABUFFERSIZE )
+ ++num_needed;
+
+ if (num_needed <= num_free) {
+ info->xmit_cnt = ptx->buffer_size;
+ mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
+
+ --info->tx_holding_count;
+ if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
+ info->get_tx_holding_index=0;
+
+ /* restart transmit timer */
+ mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
+
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * save_tx_buffer_request()
+ *
+ * attempt to store transmit frame request for later transmission
+ *
+ * Arguments:
+ *
+ * info pointer to device instance data
+ * Buffer pointer to buffer containing frame to load
+ * BufferSize size in bytes of frame in Buffer
+ *
+ * Return Value: 1 if able to store, 0 otherwise
+ */
+static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
+{
+ struct tx_holding_buffer *ptx;
+
+ if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
+ return 0; /* all buffers in use */
+ }
+
+ ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
+ ptx->buffer_size = BufferSize;
+ memcpy( ptx->buffer, Buffer, BufferSize);
+
+ ++info->tx_holding_count;
+ if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
+ info->put_tx_holding_index=0;
+
+ return 1;
+}
+
+static int mgsl_claim_resources(struct mgsl_struct *info)
+{
+ if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
+ printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->io_base);
+ return -ENODEV;
+ }
+ info->io_addr_requested = true;
+
+ if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
+ info->device_name, info ) < 0 ) {
+ printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
+ __FILE__,__LINE__,info->device_name, info->irq_level );
+ goto errout;
+ }
+ info->irq_requested = true;
+
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
+ if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
+ printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_memory_base);
+ goto errout;
+ }
+ info->shared_mem_requested = true;
+ if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
+ printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
+ goto errout;
+ }
+ info->lcr_mem_requested = true;
+
+ info->memory_base = ioremap_nocache(info->phys_memory_base,
+ 0x40000);
+ if (!info->memory_base) {
+ printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_memory_base );
+ goto errout;
+ }
+
+ if ( !mgsl_memory_test(info) ) {
+ printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_memory_base );
+ goto errout;
+ }
+
+ info->lcr_base = ioremap_nocache(info->phys_lcr_base,
+ PAGE_SIZE);
+ if (!info->lcr_base) {
+ printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
+ goto errout;
+ }
+ info->lcr_base += info->lcr_offset;
+
+ } else {
+ /* claim DMA channel */
+
+ if (request_dma(info->dma_level,info->device_name) < 0){
+ printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
+ __FILE__,__LINE__,info->device_name, info->dma_level );
+ mgsl_release_resources( info );
+ return -ENODEV;
+ }
+ info->dma_requested = true;
+
+ /* ISA adapter uses bus master DMA */
+ set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
+ enable_dma(info->dma_level);
+ }
+
+ if ( mgsl_allocate_dma_buffers(info) < 0 ) {
+ printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
+ __FILE__,__LINE__,info->device_name, info->dma_level );
+ goto errout;
+ }
+
+ return 0;
+errout:
+ mgsl_release_resources(info);
+ return -ENODEV;
+
+} /* end of mgsl_claim_resources() */
+
+static void mgsl_release_resources(struct mgsl_struct *info)
+{
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):mgsl_release_resources(%s) entry\n",
+ __FILE__,__LINE__,info->device_name );
+
+ if ( info->irq_requested ) {
+ free_irq(info->irq_level, info);
+ info->irq_requested = false;
+ }
+ if ( info->dma_requested ) {
+ disable_dma(info->dma_level);
+ free_dma(info->dma_level);
+ info->dma_requested = false;
+ }
+ mgsl_free_dma_buffers(info);
+ mgsl_free_intermediate_rxbuffer_memory(info);
+ mgsl_free_intermediate_txbuffer_memory(info);
+
+ if ( info->io_addr_requested ) {
+ release_region(info->io_base,info->io_addr_size);
+ info->io_addr_requested = false;
+ }
+ if ( info->shared_mem_requested ) {
+ release_mem_region(info->phys_memory_base,0x40000);
+ info->shared_mem_requested = false;
+ }
+ if ( info->lcr_mem_requested ) {
+ release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
+ info->lcr_mem_requested = false;
+ }
+ if (info->memory_base){
+ iounmap(info->memory_base);
+ info->memory_base = NULL;
+ }
+ if (info->lcr_base){
+ iounmap(info->lcr_base - info->lcr_offset);
+ info->lcr_base = NULL;
+ }
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):mgsl_release_resources(%s) exit\n",
+ __FILE__,__LINE__,info->device_name );
+
+} /* end of mgsl_release_resources() */
+
+/* mgsl_add_device()
+ *
+ * Add the specified device instance data structure to the
+ * global linked list of devices and increment the device count.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void mgsl_add_device( struct mgsl_struct *info )
+{
+ info->next_device = NULL;
+ info->line = mgsl_device_count;
+ sprintf(info->device_name,"ttySL%d",info->line);
+
+ if (info->line < MAX_TOTAL_DEVICES) {
+ if (maxframe[info->line])
+ info->max_frame_size = maxframe[info->line];
+
+ if (txdmabufs[info->line]) {
+ info->num_tx_dma_buffers = txdmabufs[info->line];
+ if (info->num_tx_dma_buffers < 1)
+ info->num_tx_dma_buffers = 1;
+ }
+
+ if (txholdbufs[info->line]) {
+ info->num_tx_holding_buffers = txholdbufs[info->line];
+ if (info->num_tx_holding_buffers < 1)
+ info->num_tx_holding_buffers = 1;
+ else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
+ info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
+ }
+ }
+
+ mgsl_device_count++;
+
+ if ( !mgsl_device_list )
+ mgsl_device_list = info;
+ else {
+ struct mgsl_struct *current_dev = mgsl_device_list;
+ while( current_dev->next_device )
+ current_dev = current_dev->next_device;
+ current_dev->next_device = info;
+ }
+
+ if ( info->max_frame_size < 4096 )
+ info->max_frame_size = 4096;
+ else if ( info->max_frame_size > 65535 )
+ info->max_frame_size = 65535;
+
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
+ printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
+ info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
+ info->phys_memory_base, info->phys_lcr_base,
+ info->max_frame_size );
+ } else {
+ printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
+ info->device_name, info->io_base, info->irq_level, info->dma_level,
+ info->max_frame_size );
+ }
+
+#if SYNCLINK_GENERIC_HDLC
+ hdlcdev_init(info);
+#endif
+
+} /* end of mgsl_add_device() */
+
+static const struct tty_port_operations mgsl_port_ops = {
+ .carrier_raised = carrier_raised,
+ .dtr_rts = dtr_rts,
+};
+
+
+/* mgsl_allocate_device()
+ *
+ * Allocate and initialize a device instance structure
+ *
+ * Arguments: none
+ * Return Value: pointer to mgsl_struct if success, otherwise NULL
+ */
+static struct mgsl_struct* mgsl_allocate_device(void)
+{
+ struct mgsl_struct *info;
+
+ info = kzalloc(sizeof(struct mgsl_struct),
+ GFP_KERNEL);
+
+ if (!info) {
+ printk("Error can't allocate device instance data\n");
+ } else {
+ tty_port_init(&info->port);
+ info->port.ops = &mgsl_port_ops;
+ info->magic = MGSL_MAGIC;
+ INIT_WORK(&info->task, mgsl_bh_handler);
+ info->max_frame_size = 4096;
+ info->port.close_delay = 5*HZ/10;
+ info->port.closing_wait = 30*HZ;
+ init_waitqueue_head(&info->status_event_wait_q);
+ init_waitqueue_head(&info->event_wait_q);
+ spin_lock_init(&info->irq_spinlock);
+ spin_lock_init(&info->netlock);
+ memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
+ info->idle_mode = HDLC_TXIDLE_FLAGS;
+ info->num_tx_dma_buffers = 1;
+ info->num_tx_holding_buffers = 0;
+ }
+
+ return info;
+
+} /* end of mgsl_allocate_device()*/
+
+static const struct tty_operations mgsl_ops = {
+ .open = mgsl_open,
+ .close = mgsl_close,
+ .write = mgsl_write,
+ .put_char = mgsl_put_char,
+ .flush_chars = mgsl_flush_chars,
+ .write_room = mgsl_write_room,
+ .chars_in_buffer = mgsl_chars_in_buffer,
+ .flush_buffer = mgsl_flush_buffer,
+ .ioctl = mgsl_ioctl,
+ .throttle = mgsl_throttle,
+ .unthrottle = mgsl_unthrottle,
+ .send_xchar = mgsl_send_xchar,
+ .break_ctl = mgsl_break,
+ .wait_until_sent = mgsl_wait_until_sent,
+ .set_termios = mgsl_set_termios,
+ .stop = mgsl_stop,
+ .start = mgsl_start,
+ .hangup = mgsl_hangup,
+ .tiocmget = tiocmget,
+ .tiocmset = tiocmset,
+ .get_icount = msgl_get_icount,
+ .proc_fops = &mgsl_proc_fops,
+};
+
+/*
+ * perform tty device initialization
+ */
+static int mgsl_init_tty(void)
+{
+ int rc;
+
+ serial_driver = alloc_tty_driver(128);
+ if (!serial_driver)
+ return -ENOMEM;
+
+ serial_driver->owner = THIS_MODULE;
+ serial_driver->driver_name = "synclink";
+ serial_driver->name = "ttySL";
+ serial_driver->major = ttymajor;
+ serial_driver->minor_start = 64;
+ serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ serial_driver->init_termios = tty_std_termios;
+ serial_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ serial_driver->init_termios.c_ispeed = 9600;
+ serial_driver->init_termios.c_ospeed = 9600;
+ serial_driver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(serial_driver, &mgsl_ops);
+ if ((rc = tty_register_driver(serial_driver)) < 0) {
+ printk("%s(%d):Couldn't register serial driver\n",
+ __FILE__,__LINE__);
+ put_tty_driver(serial_driver);
+ serial_driver = NULL;
+ return rc;
+ }
+
+ printk("%s %s, tty major#%d\n",
+ driver_name, driver_version,
+ serial_driver->major);
+ return 0;
+}
+
+/* enumerate user specified ISA adapters
+ */
+static void mgsl_enum_isa_devices(void)
+{
+ struct mgsl_struct *info;
+ int i;
+
+ /* Check for user specified ISA devices */
+
+ for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
+ io[i], irq[i], dma[i] );
+
+ info = mgsl_allocate_device();
+ if ( !info ) {
+ /* error allocating device instance data */
+ if ( debug_level >= DEBUG_LEVEL_ERROR )
+ printk( "can't allocate device instance data.\n");
+ continue;
+ }
+
+ /* Copy user configuration info to device instance data */
+ info->io_base = (unsigned int)io[i];
+ info->irq_level = (unsigned int)irq[i];
+ info->irq_level = irq_canonicalize(info->irq_level);
+ info->dma_level = (unsigned int)dma[i];
+ info->bus_type = MGSL_BUS_TYPE_ISA;
+ info->io_addr_size = 16;
+ info->irq_flags = 0;
+
+ mgsl_add_device( info );
+ }
+}
+
+static void synclink_cleanup(void)
+{
+ int rc;
+ struct mgsl_struct *info;
+ struct mgsl_struct *tmp;
+
+ printk("Unloading %s: %s\n", driver_name, driver_version);
+
+ if (serial_driver) {
+ if ((rc = tty_unregister_driver(serial_driver)))
+ printk("%s(%d) failed to unregister tty driver err=%d\n",
+ __FILE__,__LINE__,rc);
+ put_tty_driver(serial_driver);
+ }
+
+ info = mgsl_device_list;
+ while(info) {
+#if SYNCLINK_GENERIC_HDLC
+ hdlcdev_exit(info);
+#endif
+ mgsl_release_resources(info);
+ tmp = info;
+ info = info->next_device;
+ kfree(tmp);
+ }
+
+ if (pci_registered)
+ pci_unregister_driver(&synclink_pci_driver);
+}
+
+static int __init synclink_init(void)
+{
+ int rc;
+
+ if (break_on_load) {
+ mgsl_get_text_ptr();
+ BREAKPOINT();
+ }
+
+ printk("%s %s\n", driver_name, driver_version);
+
+ mgsl_enum_isa_devices();
+ if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
+ printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
+ else
+ pci_registered = true;
+
+ if ((rc = mgsl_init_tty()) < 0)
+ goto error;
+
+ return 0;
+
+error:
+ synclink_cleanup();
+ return rc;
+}
+
+static void __exit synclink_exit(void)
+{
+ synclink_cleanup();
+}
+
+module_init(synclink_init);
+module_exit(synclink_exit);
+
+/*
+ * usc_RTCmd()
+ *
+ * Issue a USC Receive/Transmit command to the
+ * Channel Command/Address Register (CCAR).
+ *
+ * Notes:
+ *
+ * The command is encoded in the most significant 5 bits <15..11>
+ * of the CCAR value. Bits <10..7> of the CCAR must be preserved
+ * and Bits <6..0> must be written as zeros.
+ *
+ * Arguments:
+ *
+ * info pointer to device information structure
+ * Cmd command mask (use symbolic macros)
+ *
+ * Return Value:
+ *
+ * None
+ */
+static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
+{
+ /* output command to CCAR in bits <15..11> */
+ /* preserve bits <10..7>, bits <6..0> must be zero */
+
+ outw( Cmd + info->loopback_bits, info->io_base + CCAR );
+
+ /* Read to flush write to CCAR */
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI )
+ inw( info->io_base + CCAR );
+
+} /* end of usc_RTCmd() */
+
+/*
+ * usc_DmaCmd()
+ *
+ * Issue a DMA command to the DMA Command/Address Register (DCAR).
+ *
+ * Arguments:
+ *
+ * info pointer to device information structure
+ * Cmd DMA command mask (usc_DmaCmd_XX Macros)
+ *
+ * Return Value:
+ *
+ * None
+ */
+static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
+{
+ /* write command mask to DCAR */
+ outw( Cmd + info->mbre_bit, info->io_base );
+
+ /* Read to flush write to DCAR */
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI )
+ inw( info->io_base );
+
+} /* end of usc_DmaCmd() */
+
+/*
+ * usc_OutDmaReg()
+ *
+ * Write a 16-bit value to a USC DMA register
+ *
+ * Arguments:
+ *
+ * info pointer to device info structure
+ * RegAddr register address (number) for write
+ * RegValue 16-bit value to write to register
+ *
+ * Return Value:
+ *
+ * None
+ *
+ */
+static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
+{
+ /* Note: The DCAR is located at the adapter base address */
+ /* Note: must preserve state of BIT8 in DCAR */
+
+ outw( RegAddr + info->mbre_bit, info->io_base );
+ outw( RegValue, info->io_base );
+
+ /* Read to flush write to DCAR */
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI )
+ inw( info->io_base );
+
+} /* end of usc_OutDmaReg() */
+
+/*
+ * usc_InDmaReg()
+ *
+ * Read a 16-bit value from a DMA register
+ *
+ * Arguments:
+ *
+ * info pointer to device info structure
+ * RegAddr register address (number) to read from
+ *
+ * Return Value:
+ *
+ * The 16-bit value read from register
+ *
+ */
+static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
+{
+ /* Note: The DCAR is located at the adapter base address */
+ /* Note: must preserve state of BIT8 in DCAR */
+
+ outw( RegAddr + info->mbre_bit, info->io_base );
+ return inw( info->io_base );
+
+} /* end of usc_InDmaReg() */
+
+/*
+ *
+ * usc_OutReg()
+ *
+ * Write a 16-bit value to a USC serial channel register
+ *
+ * Arguments:
+ *
+ * info pointer to device info structure
+ * RegAddr register address (number) to write to
+ * RegValue 16-bit value to write to register
+ *
+ * Return Value:
+ *
+ * None
+ *
+ */
+static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
+{
+ outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
+ outw( RegValue, info->io_base + CCAR );
+
+ /* Read to flush write to CCAR */
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI )
+ inw( info->io_base + CCAR );
+
+} /* end of usc_OutReg() */
+
+/*
+ * usc_InReg()
+ *
+ * Reads a 16-bit value from a USC serial channel register
+ *
+ * Arguments:
+ *
+ * info pointer to device extension
+ * RegAddr register address (number) to read from
+ *
+ * Return Value:
+ *
+ * 16-bit value read from register
+ */
+static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
+{
+ outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
+ return inw( info->io_base + CCAR );
+
+} /* end of usc_InReg() */
+
+/* usc_set_sdlc_mode()
+ *
+ * Set up the adapter for SDLC DMA communications.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: NONE
+ */
+static void usc_set_sdlc_mode( struct mgsl_struct *info )
+{
+ u16 RegValue;
+ bool PreSL1660;
+
+ /*
+ * determine if the IUSC on the adapter is pre-SL1660. If
+ * not, take advantage of the UnderWait feature of more
+ * modern chips. If an underrun occurs and this bit is set,
+ * the transmitter will idle the programmed idle pattern
+ * until the driver has time to service the underrun. Otherwise,
+ * the dma controller may get the cycles previously requested
+ * and begin transmitting queued tx data.
+ */
+ usc_OutReg(info,TMCR,0x1f);
+ RegValue=usc_InReg(info,TMDR);
+ PreSL1660 = (RegValue == IUSC_PRE_SL1660);
+
+ if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
+ {
+ /*
+ ** Channel Mode Register (CMR)
+ **
+ ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
+ ** <13> 0 0 = Transmit Disabled (initially)
+ ** <12> 0 1 = Consecutive Idles share common 0
+ ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
+ ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
+ ** <3..0> 0110 Receiver Mode = HDLC/SDLC
+ **
+ ** 1000 1110 0000 0110 = 0x8e06
+ */
+ RegValue = 0x8e06;
+
+ /*--------------------------------------------------
+ * ignore user options for UnderRun Actions and
+ * preambles
+ *--------------------------------------------------*/
+ }
+ else
+ {
+ /* Channel mode Register (CMR)
+ *
+ * <15..14> 00 Tx Sub modes, Underrun Action
+ * <13> 0 1 = Send Preamble before opening flag
+ * <12> 0 1 = Consecutive Idles share common 0
+ * <11..8> 0110 Transmitter mode = HDLC/SDLC
+ * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
+ * <3..0> 0110 Receiver mode = HDLC/SDLC
+ *
+ * 0000 0110 0000 0110 = 0x0606
+ */
+ if (info->params.mode == MGSL_MODE_RAW) {
+ RegValue = 0x0001; /* Set Receive mode = external sync */
+
+ usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
+ (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
+
+ /*
+ * TxSubMode:
+ * CMR <15> 0 Don't send CRC on Tx Underrun
+ * CMR <14> x undefined
+ * CMR <13> 0 Send preamble before openning sync
+ * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
+ *
+ * TxMode:
+ * CMR <11-8) 0100 MonoSync
+ *
+ * 0x00 0100 xxxx xxxx 04xx
+ */
+ RegValue |= 0x0400;
+ }
+ else {
+
+ RegValue = 0x0606;
+
+ if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
+ RegValue |= BIT14;
+ else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
+ RegValue |= BIT15;
+ else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
+ RegValue |= BIT15 + BIT14;
+ }
+
+ if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
+ RegValue |= BIT13;
+ }
+
+ if ( info->params.mode == MGSL_MODE_HDLC &&
+ (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
+ RegValue |= BIT12;
+
+ if ( info->params.addr_filter != 0xff )
+ {
+ /* set up receive address filtering */
+ usc_OutReg( info, RSR, info->params.addr_filter );
+ RegValue |= BIT4;
+ }
+
+ usc_OutReg( info, CMR, RegValue );
+ info->cmr_value = RegValue;
+
+ /* Receiver mode Register (RMR)
+ *
+ * <15..13> 000 encoding
+ * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
+ * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
+ * <9> 0 1 = Include Receive chars in CRC
+ * <8> 1 1 = Use Abort/PE bit as abort indicator
+ * <7..6> 00 Even parity
+ * <5> 0 parity disabled
+ * <4..2> 000 Receive Char Length = 8 bits
+ * <1..0> 00 Disable Receiver
+ *
+ * 0000 0101 0000 0000 = 0x0500
+ */
+
+ RegValue = 0x0500;
+
+ switch ( info->params.encoding ) {
+ case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
+ case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
+ case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
+ case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
+ case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
+ case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
+ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
+ }
+
+ if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
+ RegValue |= BIT9;
+ else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
+ RegValue |= ( BIT12 | BIT10 | BIT9 );
+
+ usc_OutReg( info, RMR, RegValue );
+
+ /* Set the Receive count Limit Register (RCLR) to 0xffff. */
+ /* When an opening flag of an SDLC frame is recognized the */
+ /* Receive Character count (RCC) is loaded with the value in */
+ /* RCLR. The RCC is decremented for each received byte. The */
+ /* value of RCC is stored after the closing flag of the frame */
+ /* allowing the frame size to be computed. */
+
+ usc_OutReg( info, RCLR, RCLRVALUE );
+
+ usc_RCmd( info, RCmd_SelectRicrdma_level );
+
+ /* Receive Interrupt Control Register (RICR)
+ *
+ * <15..8> ? RxFIFO DMA Request Level
+ * <7> 0 Exited Hunt IA (Interrupt Arm)
+ * <6> 0 Idle Received IA
+ * <5> 0 Break/Abort IA
+ * <4> 0 Rx Bound IA
+ * <3> 1 Queued status reflects oldest 2 bytes in FIFO
+ * <2> 0 Abort/PE IA
+ * <1> 1 Rx Overrun IA
+ * <0> 0 Select TC0 value for readback
+ *
+ * 0000 0000 0000 1000 = 0x000a
+ */
+
+ /* Carry over the Exit Hunt and Idle Received bits */
+ /* in case they have been armed by usc_ArmEvents. */
+
+ RegValue = usc_InReg( info, RICR ) & 0xc0;
+
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI )
+ usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
+ else
+ usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
+
+ /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
+
+ usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
+ usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
+
+ /* Transmit mode Register (TMR)
+ *
+ * <15..13> 000 encoding
+ * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
+ * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
+ * <9> 0 1 = Tx CRC Enabled
+ * <8> 0 1 = Append CRC to end of transmit frame
+ * <7..6> 00 Transmit parity Even
+ * <5> 0 Transmit parity Disabled
+ * <4..2> 000 Tx Char Length = 8 bits
+ * <1..0> 00 Disable Transmitter
+ *
+ * 0000 0100 0000 0000 = 0x0400
+ */
+
+ RegValue = 0x0400;
+
+ switch ( info->params.encoding ) {
+ case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
+ case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
+ case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
+ case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
+ case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
+ case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
+ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
+ }
+
+ if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
+ RegValue |= BIT9 + BIT8;
+ else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
+ RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
+
+ usc_OutReg( info, TMR, RegValue );
+
+ usc_set_txidle( info );
+
+
+ usc_TCmd( info, TCmd_SelectTicrdma_level );
+
+ /* Transmit Interrupt Control Register (TICR)
+ *
+ * <15..8> ? Transmit FIFO DMA Level
+ * <7> 0 Present IA (Interrupt Arm)
+ * <6> 0 Idle Sent IA
+ * <5> 1 Abort Sent IA
+ * <4> 1 EOF/EOM Sent IA
+ * <3> 0 CRC Sent IA
+ * <2> 1 1 = Wait for SW Trigger to Start Frame
+ * <1> 1 Tx Underrun IA
+ * <0> 0 TC0 constant on read back
+ *
+ * 0000 0000 0011 0110 = 0x0036
+ */
+
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI )
+ usc_OutReg( info, TICR, 0x0736 );
+ else
+ usc_OutReg( info, TICR, 0x1436 );
+
+ usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
+ usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
+
+ /*
+ ** Transmit Command/Status Register (TCSR)
+ **
+ ** <15..12> 0000 TCmd
+ ** <11> 0/1 UnderWait
+ ** <10..08> 000 TxIdle
+ ** <7> x PreSent
+ ** <6> x IdleSent
+ ** <5> x AbortSent
+ ** <4> x EOF/EOM Sent
+ ** <3> x CRC Sent
+ ** <2> x All Sent
+ ** <1> x TxUnder
+ ** <0> x TxEmpty
+ **
+ ** 0000 0000 0000 0000 = 0x0000
+ */
+ info->tcsr_value = 0;
+
+ if ( !PreSL1660 )
+ info->tcsr_value |= TCSR_UNDERWAIT;
+
+ usc_OutReg( info, TCSR, info->tcsr_value );
+
+ /* Clock mode Control Register (CMCR)
+ *
+ * <15..14> 00 counter 1 Source = Disabled
+ * <13..12> 00 counter 0 Source = Disabled
+ * <11..10> 11 BRG1 Input is TxC Pin
+ * <9..8> 11 BRG0 Input is TxC Pin
+ * <7..6> 01 DPLL Input is BRG1 Output
+ * <5..3> XXX TxCLK comes from Port 0
+ * <2..0> XXX RxCLK comes from Port 1
+ *
+ * 0000 1111 0111 0111 = 0x0f77
+ */
+
+ RegValue = 0x0f40;
+
+ if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
+ RegValue |= 0x0003; /* RxCLK from DPLL */
+ else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
+ RegValue |= 0x0004; /* RxCLK from BRG0 */
+ else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
+ RegValue |= 0x0006; /* RxCLK from TXC Input */
+ else
+ RegValue |= 0x0007; /* RxCLK from Port1 */
+
+ if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
+ RegValue |= 0x0018; /* TxCLK from DPLL */
+ else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
+ RegValue |= 0x0020; /* TxCLK from BRG0 */
+ else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
+ RegValue |= 0x0038; /* RxCLK from TXC Input */
+ else
+ RegValue |= 0x0030; /* TxCLK from Port0 */
+
+ usc_OutReg( info, CMCR, RegValue );
+
+
+ /* Hardware Configuration Register (HCR)
+ *
+ * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
+ * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
+ * <12> 0 CVOK:0=report code violation in biphase
+ * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
+ * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
+ * <7..6> 00 reserved
+ * <5> 0 BRG1 mode:0=continuous,1=single cycle
+ * <4> X BRG1 Enable
+ * <3..2> 00 reserved
+ * <1> 0 BRG0 mode:0=continuous,1=single cycle
+ * <0> 0 BRG0 Enable
+ */
+
+ RegValue = 0x0000;
+
+ if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
+ u32 XtalSpeed;
+ u32 DpllDivisor;
+ u16 Tc;
+
+ /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
+ /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
+
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI )
+ XtalSpeed = 11059200;
+ else
+ XtalSpeed = 14745600;
+
+ if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
+ DpllDivisor = 16;
+ RegValue |= BIT10;
+ }
+ else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
+ DpllDivisor = 8;
+ RegValue |= BIT11;
+ }
+ else
+ DpllDivisor = 32;
+
+ /* Tc = (Xtal/Speed) - 1 */
+ /* If twice the remainder of (Xtal/Speed) is greater than Speed */
+ /* then rounding up gives a more precise time constant. Instead */
+ /* of rounding up and then subtracting 1 we just don't subtract */
+ /* the one in this case. */
+
+ /*--------------------------------------------------
+ * ejz: for DPLL mode, application should use the
+ * same clock speed as the partner system, even
+ * though clocking is derived from the input RxData.
+ * In case the user uses a 0 for the clock speed,
+ * default to 0xffffffff and don't try to divide by
+ * zero
+ *--------------------------------------------------*/
+ if ( info->params.clock_speed )
+ {
+ Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
+ if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
+ / info->params.clock_speed) )
+ Tc--;
+ }
+ else
+ Tc = -1;
+
+
+ /* Write 16-bit Time Constant for BRG1 */
+ usc_OutReg( info, TC1R, Tc );
+
+ RegValue |= BIT4; /* enable BRG1 */
+
+ switch ( info->params.encoding ) {
+ case HDLC_ENCODING_NRZ:
+ case HDLC_ENCODING_NRZB:
+ case HDLC_ENCODING_NRZI_MARK:
+ case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
+ case HDLC_ENCODING_BIPHASE_MARK:
+ case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
+ case HDLC_ENCODING_BIPHASE_LEVEL:
+ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
+ }
+ }
+
+ usc_OutReg( info, HCR, RegValue );
+
+
+ /* Channel Control/status Register (CCSR)
+ *
+ * <15> X RCC FIFO Overflow status (RO)
+ * <14> X RCC FIFO Not Empty status (RO)
+ * <13> 0 1 = Clear RCC FIFO (WO)
+ * <12> X DPLL Sync (RW)
+ * <11> X DPLL 2 Missed Clocks status (RO)
+ * <10> X DPLL 1 Missed Clock status (RO)
+ * <9..8> 00 DPLL Resync on rising and falling edges (RW)
+ * <7> X SDLC Loop On status (RO)
+ * <6> X SDLC Loop Send status (RO)
+ * <5> 1 Bypass counters for TxClk and RxClk (RW)
+ * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
+ * <1..0> 00 reserved
+ *
+ * 0000 0000 0010 0000 = 0x0020
+ */
+
+ usc_OutReg( info, CCSR, 0x1020 );
+
+
+ if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
+ usc_OutReg( info, SICR,
+ (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
+ }
+
+
+ /* enable Master Interrupt Enable bit (MIE) */
+ usc_EnableMasterIrqBit( info );
+
+ usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
+ TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
+
+ /* arm RCC underflow interrupt */
+ usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
+ usc_EnableInterrupts(info, MISC);
+
+ info->mbre_bit = 0;
+ outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
+ usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
+ info->mbre_bit = BIT8;
+ outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
+
+ if (info->bus_type == MGSL_BUS_TYPE_ISA) {
+ /* Enable DMAEN (Port 7, Bit 14) */
+ /* This connects the DMA request signal to the ISA bus */
+ usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
+ }
+
+ /* DMA Control Register (DCR)
+ *
+ * <15..14> 10 Priority mode = Alternating Tx/Rx
+ * 01 Rx has priority
+ * 00 Tx has priority
+ *
+ * <13> 1 Enable Priority Preempt per DCR<15..14>
+ * (WARNING DCR<11..10> must be 00 when this is 1)
+ * 0 Choose activate channel per DCR<11..10>
+ *
+ * <12> 0 Little Endian for Array/List
+ * <11..10> 00 Both Channels can use each bus grant
+ * <9..6> 0000 reserved
+ * <5> 0 7 CLK - Minimum Bus Re-request Interval
+ * <4> 0 1 = drive D/C and S/D pins
+ * <3> 1 1 = Add one wait state to all DMA cycles.
+ * <2> 0 1 = Strobe /UAS on every transfer.
+ * <1..0> 11 Addr incrementing only affects LS24 bits
+ *
+ * 0110 0000 0000 1011 = 0x600b
+ */
+
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
+ /* PCI adapter does not need DMA wait state */
+ usc_OutDmaReg( info, DCR, 0xa00b );
+ }
+ else
+ usc_OutDmaReg( info, DCR, 0x800b );
+
+
+ /* Receive DMA mode Register (RDMR)
+ *
+ * <15..14> 11 DMA mode = Linked List Buffer mode
+ * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
+ * <12> 1 Clear count of List Entry after fetching
+ * <11..10> 00 Address mode = Increment
+ * <9> 1 Terminate Buffer on RxBound
+ * <8> 0 Bus Width = 16bits
+ * <7..0> ? status Bits (write as 0s)
+ *
+ * 1111 0010 0000 0000 = 0xf200
+ */
+
+ usc_OutDmaReg( info, RDMR, 0xf200 );
+
+
+ /* Transmit DMA mode Register (TDMR)
+ *
+ * <15..14> 11 DMA mode = Linked List Buffer mode
+ * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
+ * <12> 1 Clear count of List Entry after fetching
+ * <11..10> 00 Address mode = Increment
+ * <9> 1 Terminate Buffer on end of frame
+ * <8> 0 Bus Width = 16bits
+ * <7..0> ? status Bits (Read Only so write as 0)
+ *
+ * 1111 0010 0000 0000 = 0xf200
+ */
+
+ usc_OutDmaReg( info, TDMR, 0xf200 );
+
+
+ /* DMA Interrupt Control Register (DICR)
+ *
+ * <15> 1 DMA Interrupt Enable
+ * <14> 0 1 = Disable IEO from USC
+ * <13> 0 1 = Don't provide vector during IntAck
+ * <12> 1 1 = Include status in Vector
+ * <10..2> 0 reserved, Must be 0s
+ * <1> 0 1 = Rx DMA Interrupt Enabled
+ * <0> 0 1 = Tx DMA Interrupt Enabled
+ *
+ * 1001 0000 0000 0000 = 0x9000
+ */
+
+ usc_OutDmaReg( info, DICR, 0x9000 );
+
+ usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
+ usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
+ usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
+
+ /* Channel Control Register (CCR)
+ *
+ * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
+ * <13> 0 Trigger Tx on SW Command Disabled
+ * <12> 0 Flag Preamble Disabled
+ * <11..10> 00 Preamble Length
+ * <9..8> 00 Preamble Pattern
+ * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
+ * <5> 0 Trigger Rx on SW Command Disabled
+ * <4..0> 0 reserved
+ *
+ * 1000 0000 1000 0000 = 0x8080
+ */
+
+ RegValue = 0x8080;
+
+ switch ( info->params.preamble_length ) {
+ case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
+ case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
+ case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
+ }
+
+ switch ( info->params.preamble ) {
+ case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
+ case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
+ case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
+ case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
+ }
+
+ usc_OutReg( info, CCR, RegValue );
+
+
+ /*
+ * Burst/Dwell Control Register
+ *
+ * <15..8> 0x20 Maximum number of transfers per bus grant
+ * <7..0> 0x00 Maximum number of clock cycles per bus grant
+ */
+
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
+ /* don't limit bus occupancy on PCI adapter */
+ usc_OutDmaReg( info, BDCR, 0x0000 );
+ }
+ else
+ usc_OutDmaReg( info, BDCR, 0x2000 );
+
+ usc_stop_transmitter(info);
+ usc_stop_receiver(info);
+
+} /* end of usc_set_sdlc_mode() */
+
+/* usc_enable_loopback()
+ *
+ * Set the 16C32 for internal loopback mode.
+ * The TxCLK and RxCLK signals are generated from the BRG0 and
+ * the TxD is looped back to the RxD internally.
+ *
+ * Arguments: info pointer to device instance data
+ * enable 1 = enable loopback, 0 = disable
+ * Return Value: None
+ */
+static void usc_enable_loopback(struct mgsl_struct *info, int enable)
+{
+ if (enable) {
+ /* blank external TXD output */
+ usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
+
+ /* Clock mode Control Register (CMCR)
+ *
+ * <15..14> 00 counter 1 Disabled
+ * <13..12> 00 counter 0 Disabled
+ * <11..10> 11 BRG1 Input is TxC Pin
+ * <9..8> 11 BRG0 Input is TxC Pin
+ * <7..6> 01 DPLL Input is BRG1 Output
+ * <5..3> 100 TxCLK comes from BRG0
+ * <2..0> 100 RxCLK comes from BRG0
+ *
+ * 0000 1111 0110 0100 = 0x0f64
+ */
+
+ usc_OutReg( info, CMCR, 0x0f64 );
+
+ /* Write 16-bit Time Constant for BRG0 */
+ /* use clock speed if available, otherwise use 8 for diagnostics */
+ if (info->params.clock_speed) {
+ if (info->bus_type == MGSL_BUS_TYPE_PCI)
+ usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
+ else
+ usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
+ } else
+ usc_OutReg(info, TC0R, (u16)8);
+
+ /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
+ mode = Continuous Set Bit 0 to enable BRG0. */
+ usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
+
+ /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
+ usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
+
+ /* set Internal Data loopback mode */
+ info->loopback_bits = 0x300;
+ outw( 0x0300, info->io_base + CCAR );
+ } else {
+ /* enable external TXD output */
+ usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
+
+ /* clear Internal Data loopback mode */
+ info->loopback_bits = 0;
+ outw( 0,info->io_base + CCAR );
+ }
+
+} /* end of usc_enable_loopback() */
+
+/* usc_enable_aux_clock()
+ *
+ * Enabled the AUX clock output at the specified frequency.
+ *
+ * Arguments:
+ *
+ * info pointer to device extension
+ * data_rate data rate of clock in bits per second
+ * A data rate of 0 disables the AUX clock.
+ *
+ * Return Value: None
+ */
+static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
+{
+ u32 XtalSpeed;
+ u16 Tc;
+
+ if ( data_rate ) {
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI )
+ XtalSpeed = 11059200;
+ else
+ XtalSpeed = 14745600;
+
+
+ /* Tc = (Xtal/Speed) - 1 */
+ /* If twice the remainder of (Xtal/Speed) is greater than Speed */
+ /* then rounding up gives a more precise time constant. Instead */
+ /* of rounding up and then subtracting 1 we just don't subtract */
+ /* the one in this case. */
+
+
+ Tc = (u16)(XtalSpeed/data_rate);
+ if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
+ Tc--;
+
+ /* Write 16-bit Time Constant for BRG0 */
+ usc_OutReg( info, TC0R, Tc );
+
+ /*
+ * Hardware Configuration Register (HCR)
+ * Clear Bit 1, BRG0 mode = Continuous
+ * Set Bit 0 to enable BRG0.
+ */
+
+ usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
+
+ /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
+ usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
+ } else {
+ /* data rate == 0 so turn off BRG0 */
+ usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
+ }
+
+} /* end of usc_enable_aux_clock() */
+
+/*
+ *
+ * usc_process_rxoverrun_sync()
+ *
+ * This function processes a receive overrun by resetting the
+ * receive DMA buffers and issuing a Purge Rx FIFO command
+ * to allow the receiver to continue receiving.
+ *
+ * Arguments:
+ *
+ * info pointer to device extension
+ *
+ * Return Value: None
+ */
+static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
+{
+ int start_index;
+ int end_index;
+ int frame_start_index;
+ bool start_of_frame_found = false;
+ bool end_of_frame_found = false;
+ bool reprogram_dma = false;
+
+ DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
+ u32 phys_addr;
+
+ usc_DmaCmd( info, DmaCmd_PauseRxChannel );
+ usc_RCmd( info, RCmd_EnterHuntmode );
+ usc_RTCmd( info, RTCmd_PurgeRxFifo );
+
+ /* CurrentRxBuffer points to the 1st buffer of the next */
+ /* possibly available receive frame. */
+
+ frame_start_index = start_index = end_index = info->current_rx_buffer;
+
+ /* Search for an unfinished string of buffers. This means */
+ /* that a receive frame started (at least one buffer with */
+ /* count set to zero) but there is no terminiting buffer */
+ /* (status set to non-zero). */
+
+ while( !buffer_list[end_index].count )
+ {
+ /* Count field has been reset to zero by 16C32. */
+ /* This buffer is currently in use. */
+
+ if ( !start_of_frame_found )
+ {
+ start_of_frame_found = true;
+ frame_start_index = end_index;
+ end_of_frame_found = false;
+ }
+
+ if ( buffer_list[end_index].status )
+ {
+ /* Status field has been set by 16C32. */
+ /* This is the last buffer of a received frame. */
+
+ /* We want to leave the buffers for this frame intact. */
+ /* Move on to next possible frame. */
+
+ start_of_frame_found = false;
+ end_of_frame_found = true;
+ }
+
+ /* advance to next buffer entry in linked list */
+ end_index++;
+ if ( end_index == info->rx_buffer_count )
+ end_index = 0;
+
+ if ( start_index == end_index )
+ {
+ /* The entire list has been searched with all Counts == 0 and */
+ /* all Status == 0. The receive buffers are */
+ /* completely screwed, reset all receive buffers! */
+ mgsl_reset_rx_dma_buffers( info );
+ frame_start_index = 0;
+ start_of_frame_found = false;
+ reprogram_dma = true;
+ break;
+ }
+ }
+
+ if ( start_of_frame_found && !end_of_frame_found )
+ {
+ /* There is an unfinished string of receive DMA buffers */
+ /* as a result of the receiver overrun. */
+
+ /* Reset the buffers for the unfinished frame */
+ /* and reprogram the receive DMA controller to start */
+ /* at the 1st buffer of unfinished frame. */
+
+ start_index = frame_start_index;
+
+ do
+ {
+ *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
+
+ /* Adjust index for wrap around. */
+ if ( start_index == info->rx_buffer_count )
+ start_index = 0;
+
+ } while( start_index != end_index );
+
+ reprogram_dma = true;
+ }
+
+ if ( reprogram_dma )
+ {
+ usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
+ usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
+ usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
+
+ usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
+
+ /* This empties the receive FIFO and loads the RCC with RCLR */
+ usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
+
+ /* program 16C32 with physical address of 1st DMA buffer entry */
+ phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
+ usc_OutDmaReg( info, NRARL, (u16)phys_addr );
+ usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
+
+ usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
+ usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
+ usc_EnableInterrupts( info, RECEIVE_STATUS );
+
+ /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
+ /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
+
+ usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
+ usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
+ usc_DmaCmd( info, DmaCmd_InitRxChannel );
+ if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
+ usc_EnableReceiver(info,ENABLE_AUTO_DCD);
+ else
+ usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
+ }
+ else
+ {
+ /* This empties the receive FIFO and loads the RCC with RCLR */
+ usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
+ usc_RTCmd( info, RTCmd_PurgeRxFifo );
+ }
+
+} /* end of usc_process_rxoverrun_sync() */
+
+/* usc_stop_receiver()
+ *
+ * Disable USC receiver
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void usc_stop_receiver( struct mgsl_struct *info )
+{
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):usc_stop_receiver(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ /* Disable receive DMA channel. */
+ /* This also disables receive DMA channel interrupts */
+ usc_DmaCmd( info, DmaCmd_ResetRxChannel );
+
+ usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
+ usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
+ usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
+
+ usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
+
+ /* This empties the receive FIFO and loads the RCC with RCLR */
+ usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
+ usc_RTCmd( info, RTCmd_PurgeRxFifo );
+
+ info->rx_enabled = false;
+ info->rx_overflow = false;
+ info->rx_rcc_underrun = false;
+
+} /* end of stop_receiver() */
+
+/* usc_start_receiver()
+ *
+ * Enable the USC receiver
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void usc_start_receiver( struct mgsl_struct *info )
+{
+ u32 phys_addr;
+
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):usc_start_receiver(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ mgsl_reset_rx_dma_buffers( info );
+ usc_stop_receiver( info );
+
+ usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
+ usc_RTCmd( info, RTCmd_PurgeRxFifo );
+
+ if ( info->params.mode == MGSL_MODE_HDLC ||
+ info->params.mode == MGSL_MODE_RAW ) {
+ /* DMA mode Transfers */
+ /* Program the DMA controller. */
+ /* Enable the DMA controller end of buffer interrupt. */
+
+ /* program 16C32 with physical address of 1st DMA buffer entry */
+ phys_addr = info->rx_buffer_list[0].phys_entry;
+ usc_OutDmaReg( info, NRARL, (u16)phys_addr );
+ usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
+
+ usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
+ usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
+ usc_EnableInterrupts( info, RECEIVE_STATUS );
+
+ /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
+ /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
+
+ usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
+ usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
+ usc_DmaCmd( info, DmaCmd_InitRxChannel );
+ if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
+ usc_EnableReceiver(info,ENABLE_AUTO_DCD);
+ else
+ usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
+ } else {
+ usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
+ usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
+ usc_EnableInterrupts(info, RECEIVE_DATA);
+
+ usc_RTCmd( info, RTCmd_PurgeRxFifo );
+ usc_RCmd( info, RCmd_EnterHuntmode );
+
+ usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
+ }
+
+ usc_OutReg( info, CCSR, 0x1020 );
+
+ info->rx_enabled = true;
+
+} /* end of usc_start_receiver() */
+
+/* usc_start_transmitter()
+ *
+ * Enable the USC transmitter and send a transmit frame if
+ * one is loaded in the DMA buffers.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void usc_start_transmitter( struct mgsl_struct *info )
+{
+ u32 phys_addr;
+ unsigned int FrameSize;
+
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):usc_start_transmitter(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if ( info->xmit_cnt ) {
+
+ /* If auto RTS enabled and RTS is inactive, then assert */
+ /* RTS and set a flag indicating that the driver should */
+ /* negate RTS when the transmission completes. */
+
+ info->drop_rts_on_tx_done = false;
+
+ if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
+ usc_get_serial_signals( info );
+ if ( !(info->serial_signals & SerialSignal_RTS) ) {
+ info->serial_signals |= SerialSignal_RTS;
+ usc_set_serial_signals( info );
+ info->drop_rts_on_tx_done = true;
+ }
+ }
+
+
+ if ( info->params.mode == MGSL_MODE_ASYNC ) {
+ if ( !info->tx_active ) {
+ usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
+ usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
+ usc_EnableInterrupts(info, TRANSMIT_DATA);
+ usc_load_txfifo(info);
+ }
+ } else {
+ /* Disable transmit DMA controller while programming. */
+ usc_DmaCmd( info, DmaCmd_ResetTxChannel );
+
+ /* Transmit DMA buffer is loaded, so program USC */
+ /* to send the frame contained in the buffers. */
+
+ FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
+
+ /* if operating in Raw sync mode, reset the rcc component
+ * of the tx dma buffer entry, otherwise, the serial controller
+ * will send a closing sync char after this count.
+ */
+ if ( info->params.mode == MGSL_MODE_RAW )
+ info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
+
+ /* Program the Transmit Character Length Register (TCLR) */
+ /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
+ usc_OutReg( info, TCLR, (u16)FrameSize );
+
+ usc_RTCmd( info, RTCmd_PurgeTxFifo );
+
+ /* Program the address of the 1st DMA Buffer Entry in linked list */
+ phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
+ usc_OutDmaReg( info, NTARL, (u16)phys_addr );
+ usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
+
+ usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
+ usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
+ usc_EnableInterrupts( info, TRANSMIT_STATUS );
+
+ if ( info->params.mode == MGSL_MODE_RAW &&
+ info->num_tx_dma_buffers > 1 ) {
+ /* When running external sync mode, attempt to 'stream' transmit */
+ /* by filling tx dma buffers as they become available. To do this */
+ /* we need to enable Tx DMA EOB Status interrupts : */
+ /* */
+ /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
+ /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
+
+ usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
+ usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
+ }
+
+ /* Initialize Transmit DMA Channel */
+ usc_DmaCmd( info, DmaCmd_InitTxChannel );
+
+ usc_TCmd( info, TCmd_SendFrame );
+
+ mod_timer(&info->tx_timer, jiffies +
+ msecs_to_jiffies(5000));
+ }
+ info->tx_active = true;
+ }
+
+ if ( !info->tx_enabled ) {
+ info->tx_enabled = true;
+ if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
+ usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
+ else
+ usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
+ }
+
+} /* end of usc_start_transmitter() */
+
+/* usc_stop_transmitter()
+ *
+ * Stops the transmitter and DMA
+ *
+ * Arguments: info pointer to device isntance data
+ * Return Value: None
+ */
+static void usc_stop_transmitter( struct mgsl_struct *info )
+{
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):usc_stop_transmitter(%s)\n",
+ __FILE__,__LINE__, info->device_name );
+
+ del_timer(&info->tx_timer);
+
+ usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
+ usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
+ usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
+
+ usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
+ usc_DmaCmd( info, DmaCmd_ResetTxChannel );
+ usc_RTCmd( info, RTCmd_PurgeTxFifo );
+
+ info->tx_enabled = false;
+ info->tx_active = false;
+
+} /* end of usc_stop_transmitter() */
+
+/* usc_load_txfifo()
+ *
+ * Fill the transmit FIFO until the FIFO is full or
+ * there is no more data to load.
+ *
+ * Arguments: info pointer to device extension (instance data)
+ * Return Value: None
+ */
+static void usc_load_txfifo( struct mgsl_struct *info )
+{
+ int Fifocount;
+ u8 TwoBytes[2];
+
+ if ( !info->xmit_cnt && !info->x_char )
+ return;
+
+ /* Select transmit FIFO status readback in TICR */
+ usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
+
+ /* load the Transmit FIFO until FIFOs full or all data sent */
+
+ while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
+ /* there is more space in the transmit FIFO and */
+ /* there is more data in transmit buffer */
+
+ if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
+ /* write a 16-bit word from transmit buffer to 16C32 */
+
+ TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
+ info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
+ TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
+ info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
+
+ outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
+
+ info->xmit_cnt -= 2;
+ info->icount.tx += 2;
+ } else {
+ /* only 1 byte left to transmit or 1 FIFO slot left */
+
+ outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
+ info->io_base + CCAR );
+
+ if (info->x_char) {
+ /* transmit pending high priority char */
+ outw( info->x_char,info->io_base + CCAR );
+ info->x_char = 0;
+ } else {
+ outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
+ info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
+ info->xmit_cnt--;
+ }
+ info->icount.tx++;
+ }
+ }
+
+} /* end of usc_load_txfifo() */
+
+/* usc_reset()
+ *
+ * Reset the adapter to a known state and prepare it for further use.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void usc_reset( struct mgsl_struct *info )
+{
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
+ int i;
+ u32 readval;
+
+ /* Set BIT30 of Misc Control Register */
+ /* (Local Control Register 0x50) to force reset of USC. */
+
+ volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
+ u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
+
+ info->misc_ctrl_value |= BIT30;
+ *MiscCtrl = info->misc_ctrl_value;
+
+ /*
+ * Force at least 170ns delay before clearing
+ * reset bit. Each read from LCR takes at least
+ * 30ns so 10 times for 300ns to be safe.
+ */
+ for(i=0;i<10;i++)
+ readval = *MiscCtrl;
+
+ info->misc_ctrl_value &= ~BIT30;
+ *MiscCtrl = info->misc_ctrl_value;
+
+ *LCR0BRDR = BUS_DESCRIPTOR(
+ 1, // Write Strobe Hold (0-3)
+ 2, // Write Strobe Delay (0-3)
+ 2, // Read Strobe Delay (0-3)
+ 0, // NWDD (Write data-data) (0-3)
+ 4, // NWAD (Write Addr-data) (0-31)
+ 0, // NXDA (Read/Write Data-Addr) (0-3)
+ 0, // NRDD (Read Data-Data) (0-3)
+ 5 // NRAD (Read Addr-Data) (0-31)
+ );
+ } else {
+ /* do HW reset */
+ outb( 0,info->io_base + 8 );
+ }
+
+ info->mbre_bit = 0;
+ info->loopback_bits = 0;
+ info->usc_idle_mode = 0;
+
+ /*
+ * Program the Bus Configuration Register (BCR)
+ *
+ * <15> 0 Don't use separate address
+ * <14..6> 0 reserved
+ * <5..4> 00 IAckmode = Default, don't care
+ * <3> 1 Bus Request Totem Pole output
+ * <2> 1 Use 16 Bit data bus
+ * <1> 0 IRQ Totem Pole output
+ * <0> 0 Don't Shift Right Addr
+ *
+ * 0000 0000 0000 1100 = 0x000c
+ *
+ * By writing to io_base + SDPIN the Wait/Ack pin is
+ * programmed to work as a Wait pin.
+ */
+
+ outw( 0x000c,info->io_base + SDPIN );
+
+
+ outw( 0,info->io_base );
+ outw( 0,info->io_base + CCAR );
+
+ /* select little endian byte ordering */
+ usc_RTCmd( info, RTCmd_SelectLittleEndian );
+
+
+ /* Port Control Register (PCR)
+ *
+ * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
+ * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
+ * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
+ * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
+ * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
+ * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
+ * <3..2> 01 Port 1 is Input (Dedicated RxC)
+ * <1..0> 01 Port 0 is Input (Dedicated TxC)
+ *
+ * 1111 0000 1111 0101 = 0xf0f5
+ */
+
+ usc_OutReg( info, PCR, 0xf0f5 );
+
+
+ /*
+ * Input/Output Control Register
+ *
+ * <15..14> 00 CTS is active low input
+ * <13..12> 00 DCD is active low input
+ * <11..10> 00 TxREQ pin is input (DSR)
+ * <9..8> 00 RxREQ pin is input (RI)
+ * <7..6> 00 TxD is output (Transmit Data)
+ * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
+ * <2..0> 100 RxC is Output (drive with BRG0)
+ *
+ * 0000 0000 0000 0100 = 0x0004
+ */
+
+ usc_OutReg( info, IOCR, 0x0004 );
+
+} /* end of usc_reset() */
+
+/* usc_set_async_mode()
+ *
+ * Program adapter for asynchronous communications.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void usc_set_async_mode( struct mgsl_struct *info )
+{
+ u16 RegValue;
+
+ /* disable interrupts while programming USC */
+ usc_DisableMasterIrqBit( info );
+
+ outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
+ usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
+
+ usc_loopback_frame( info );
+
+ /* Channel mode Register (CMR)
+ *
+ * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
+ * <13..12> 00 00 = 16X Clock
+ * <11..8> 0000 Transmitter mode = Asynchronous
+ * <7..6> 00 reserved?
+ * <5..4> 00 Rx Sub modes, 00 = 16X Clock
+ * <3..0> 0000 Receiver mode = Asynchronous
+ *
+ * 0000 0000 0000 0000 = 0x0
+ */
+
+ RegValue = 0;
+ if ( info->params.stop_bits != 1 )
+ RegValue |= BIT14;
+ usc_OutReg( info, CMR, RegValue );
+
+
+ /* Receiver mode Register (RMR)
+ *
+ * <15..13> 000 encoding = None
+ * <12..08> 00000 reserved (Sync Only)
+ * <7..6> 00 Even parity
+ * <5> 0 parity disabled
+ * <4..2> 000 Receive Char Length = 8 bits
+ * <1..0> 00 Disable Receiver
+ *
+ * 0000 0000 0000 0000 = 0x0
+ */
+
+ RegValue = 0;
+
+ if ( info->params.data_bits != 8 )
+ RegValue |= BIT4+BIT3+BIT2;
+
+ if ( info->params.parity != ASYNC_PARITY_NONE ) {
+ RegValue |= BIT5;
+ if ( info->params.parity != ASYNC_PARITY_ODD )
+ RegValue |= BIT6;
+ }
+
+ usc_OutReg( info, RMR, RegValue );
+
+
+ /* Set IRQ trigger level */
+
+ usc_RCmd( info, RCmd_SelectRicrIntLevel );
+
+
+ /* Receive Interrupt Control Register (RICR)
+ *
+ * <15..8> ? RxFIFO IRQ Request Level
+ *
+ * Note: For async mode the receive FIFO level must be set
+ * to 0 to avoid the situation where the FIFO contains fewer bytes
+ * than the trigger level and no more data is expected.
+ *
+ * <7> 0 Exited Hunt IA (Interrupt Arm)
+ * <6> 0 Idle Received IA
+ * <5> 0 Break/Abort IA
+ * <4> 0 Rx Bound IA
+ * <3> 0 Queued status reflects oldest byte in FIFO
+ * <2> 0 Abort/PE IA
+ * <1> 0 Rx Overrun IA
+ * <0> 0 Select TC0 value for readback
+ *
+ * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
+ */
+
+ usc_OutReg( info, RICR, 0x0000 );
+
+ usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
+ usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
+
+
+ /* Transmit mode Register (TMR)
+ *
+ * <15..13> 000 encoding = None
+ * <12..08> 00000 reserved (Sync Only)
+ * <7..6> 00 Transmit parity Even
+ * <5> 0 Transmit parity Disabled
+ * <4..2> 000 Tx Char Length = 8 bits
+ * <1..0> 00 Disable Transmitter
+ *
+ * 0000 0000 0000 0000 = 0x0
+ */
+
+ RegValue = 0;
+
+ if ( info->params.data_bits != 8 )
+ RegValue |= BIT4+BIT3+BIT2;
+
+ if ( info->params.parity != ASYNC_PARITY_NONE ) {
+ RegValue |= BIT5;
+ if ( info->params.parity != ASYNC_PARITY_ODD )
+ RegValue |= BIT6;
+ }
+
+ usc_OutReg( info, TMR, RegValue );
+
+ usc_set_txidle( info );
+
+
+ /* Set IRQ trigger level */
+
+ usc_TCmd( info, TCmd_SelectTicrIntLevel );
+
+
+ /* Transmit Interrupt Control Register (TICR)
+ *
+ * <15..8> ? Transmit FIFO IRQ Level
+ * <7> 0 Present IA (Interrupt Arm)
+ * <6> 1 Idle Sent IA
+ * <5> 0 Abort Sent IA
+ * <4> 0 EOF/EOM Sent IA
+ * <3> 0 CRC Sent IA
+ * <2> 0 1 = Wait for SW Trigger to Start Frame
+ * <1> 0 Tx Underrun IA
+ * <0> 0 TC0 constant on read back
+ *
+ * 0000 0000 0100 0000 = 0x0040
+ */
+
+ usc_OutReg( info, TICR, 0x1f40 );
+
+ usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
+ usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
+
+ usc_enable_async_clock( info, info->params.data_rate );
+
+
+ /* Channel Control/status Register (CCSR)
+ *
+ * <15> X RCC FIFO Overflow status (RO)
+ * <14> X RCC FIFO Not Empty status (RO)
+ * <13> 0 1 = Clear RCC FIFO (WO)
+ * <12> X DPLL in Sync status (RO)
+ * <11> X DPLL 2 Missed Clocks status (RO)
+ * <10> X DPLL 1 Missed Clock status (RO)
+ * <9..8> 00 DPLL Resync on rising and falling edges (RW)
+ * <7> X SDLC Loop On status (RO)
+ * <6> X SDLC Loop Send status (RO)
+ * <5> 1 Bypass counters for TxClk and RxClk (RW)
+ * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
+ * <1..0> 00 reserved
+ *
+ * 0000 0000 0010 0000 = 0x0020
+ */
+
+ usc_OutReg( info, CCSR, 0x0020 );
+
+ usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
+ RECEIVE_DATA + RECEIVE_STATUS );
+
+ usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
+ RECEIVE_DATA + RECEIVE_STATUS );
+
+ usc_EnableMasterIrqBit( info );
+
+ if (info->bus_type == MGSL_BUS_TYPE_ISA) {
+ /* Enable INTEN (Port 6, Bit12) */
+ /* This connects the IRQ request signal to the ISA bus */
+ usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
+ }
+
+ if (info->params.loopback) {
+ info->loopback_bits = 0x300;
+ outw(0x0300, info->io_base + CCAR);
+ }
+
+} /* end of usc_set_async_mode() */
+
+/* usc_loopback_frame()
+ *
+ * Loop back a small (2 byte) dummy SDLC frame.
+ * Interrupts and DMA are NOT used. The purpose of this is to
+ * clear any 'stale' status info left over from running in async mode.
+ *
+ * The 16C32 shows the strange behaviour of marking the 1st
+ * received SDLC frame with a CRC error even when there is no
+ * CRC error. To get around this a small dummy from of 2 bytes
+ * is looped back when switching from async to sync mode.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void usc_loopback_frame( struct mgsl_struct *info )
+{
+ int i;
+ unsigned long oldmode = info->params.mode;
+
+ info->params.mode = MGSL_MODE_HDLC;
+
+ usc_DisableMasterIrqBit( info );
+
+ usc_set_sdlc_mode( info );
+ usc_enable_loopback( info, 1 );
+
+ /* Write 16-bit Time Constant for BRG0 */
+ usc_OutReg( info, TC0R, 0 );
+
+ /* Channel Control Register (CCR)
+ *
+ * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
+ * <13> 0 Trigger Tx on SW Command Disabled
+ * <12> 0 Flag Preamble Disabled
+ * <11..10> 00 Preamble Length = 8-Bits
+ * <9..8> 01 Preamble Pattern = flags
+ * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
+ * <5> 0 Trigger Rx on SW Command Disabled
+ * <4..0> 0 reserved
+ *
+ * 0000 0001 0000 0000 = 0x0100
+ */
+
+ usc_OutReg( info, CCR, 0x0100 );
+
+ /* SETUP RECEIVER */
+ usc_RTCmd( info, RTCmd_PurgeRxFifo );
+ usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
+
+ /* SETUP TRANSMITTER */
+ /* Program the Transmit Character Length Register (TCLR) */
+ /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
+ usc_OutReg( info, TCLR, 2 );
+ usc_RTCmd( info, RTCmd_PurgeTxFifo );
+
+ /* unlatch Tx status bits, and start transmit channel. */
+ usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
+ outw(0,info->io_base + DATAREG);
+
+ /* ENABLE TRANSMITTER */
+ usc_TCmd( info, TCmd_SendFrame );
+ usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
+
+ /* WAIT FOR RECEIVE COMPLETE */
+ for (i=0 ; i<1000 ; i++)
+ if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
+ break;
+
+ /* clear Internal Data loopback mode */
+ usc_enable_loopback(info, 0);
+
+ usc_EnableMasterIrqBit(info);
+
+ info->params.mode = oldmode;
+
+} /* end of usc_loopback_frame() */
+
+/* usc_set_sync_mode() Programs the USC for SDLC communications.
+ *
+ * Arguments: info pointer to adapter info structure
+ * Return Value: None
+ */
+static void usc_set_sync_mode( struct mgsl_struct *info )
+{
+ usc_loopback_frame( info );
+ usc_set_sdlc_mode( info );
+
+ if (info->bus_type == MGSL_BUS_TYPE_ISA) {
+ /* Enable INTEN (Port 6, Bit12) */
+ /* This connects the IRQ request signal to the ISA bus */
+ usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
+ }
+
+ usc_enable_aux_clock(info, info->params.clock_speed);
+
+ if (info->params.loopback)
+ usc_enable_loopback(info,1);
+
+} /* end of mgsl_set_sync_mode() */
+
+/* usc_set_txidle() Set the HDLC idle mode for the transmitter.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void usc_set_txidle( struct mgsl_struct *info )
+{
+ u16 usc_idle_mode = IDLEMODE_FLAGS;
+
+ /* Map API idle mode to USC register bits */
+
+ switch( info->idle_mode ){
+ case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
+ case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
+ case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
+ case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
+ case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
+ case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
+ case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
+ }
+
+ info->usc_idle_mode = usc_idle_mode;
+ //usc_OutReg(info, TCSR, usc_idle_mode);
+ info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
+ info->tcsr_value += usc_idle_mode;
+ usc_OutReg(info, TCSR, info->tcsr_value);
+
+ /*
+ * if SyncLink WAN adapter is running in external sync mode, the
+ * transmitter has been set to Monosync in order to try to mimic
+ * a true raw outbound bit stream. Monosync still sends an open/close
+ * sync char at the start/end of a frame. Try to match those sync
+ * patterns to the idle mode set here
+ */
+ if ( info->params.mode == MGSL_MODE_RAW ) {
+ unsigned char syncpat = 0;
+ switch( info->idle_mode ) {
+ case HDLC_TXIDLE_FLAGS:
+ syncpat = 0x7e;
+ break;
+ case HDLC_TXIDLE_ALT_ZEROS_ONES:
+ syncpat = 0x55;
+ break;
+ case HDLC_TXIDLE_ZEROS:
+ case HDLC_TXIDLE_SPACE:
+ syncpat = 0x00;
+ break;
+ case HDLC_TXIDLE_ONES:
+ case HDLC_TXIDLE_MARK:
+ syncpat = 0xff;
+ break;
+ case HDLC_TXIDLE_ALT_MARK_SPACE:
+ syncpat = 0xaa;
+ break;
+ }
+
+ usc_SetTransmitSyncChars(info,syncpat,syncpat);
+ }
+
+} /* end of usc_set_txidle() */
+
+/* usc_get_serial_signals()
+ *
+ * Query the adapter for the state of the V24 status (input) signals.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void usc_get_serial_signals( struct mgsl_struct *info )
+{
+ u16 status;
+
+ /* clear all serial signals except DTR and RTS */
+ info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
+
+ /* Read the Misc Interrupt status Register (MISR) to get */
+ /* the V24 status signals. */
+
+ status = usc_InReg( info, MISR );
+
+ /* set serial signal bits to reflect MISR */
+
+ if ( status & MISCSTATUS_CTS )
+ info->serial_signals |= SerialSignal_CTS;
+
+ if ( status & MISCSTATUS_DCD )
+ info->serial_signals |= SerialSignal_DCD;
+
+ if ( status & MISCSTATUS_RI )
+ info->serial_signals |= SerialSignal_RI;
+
+ if ( status & MISCSTATUS_DSR )
+ info->serial_signals |= SerialSignal_DSR;
+
+} /* end of usc_get_serial_signals() */
+
+/* usc_set_serial_signals()
+ *
+ * Set the state of DTR and RTS based on contents of
+ * serial_signals member of device extension.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void usc_set_serial_signals( struct mgsl_struct *info )
+{
+ u16 Control;
+ unsigned char V24Out = info->serial_signals;
+
+ /* get the current value of the Port Control Register (PCR) */
+
+ Control = usc_InReg( info, PCR );
+
+ if ( V24Out & SerialSignal_RTS )
+ Control &= ~(BIT6);
+ else
+ Control |= BIT6;
+
+ if ( V24Out & SerialSignal_DTR )
+ Control &= ~(BIT4);
+ else
+ Control |= BIT4;
+
+ usc_OutReg( info, PCR, Control );
+
+} /* end of usc_set_serial_signals() */
+
+/* usc_enable_async_clock()
+ *
+ * Enable the async clock at the specified frequency.
+ *
+ * Arguments: info pointer to device instance data
+ * data_rate data rate of clock in bps
+ * 0 disables the AUX clock.
+ * Return Value: None
+ */
+static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
+{
+ if ( data_rate ) {
+ /*
+ * Clock mode Control Register (CMCR)
+ *
+ * <15..14> 00 counter 1 Disabled
+ * <13..12> 00 counter 0 Disabled
+ * <11..10> 11 BRG1 Input is TxC Pin
+ * <9..8> 11 BRG0 Input is TxC Pin
+ * <7..6> 01 DPLL Input is BRG1 Output
+ * <5..3> 100 TxCLK comes from BRG0
+ * <2..0> 100 RxCLK comes from BRG0
+ *
+ * 0000 1111 0110 0100 = 0x0f64
+ */
+
+ usc_OutReg( info, CMCR, 0x0f64 );
+
+
+ /*
+ * Write 16-bit Time Constant for BRG0
+ * Time Constant = (ClkSpeed / data_rate) - 1
+ * ClkSpeed = 921600 (ISA), 691200 (PCI)
+ */
+
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI )
+ usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
+ else
+ usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
+
+
+ /*
+ * Hardware Configuration Register (HCR)
+ * Clear Bit 1, BRG0 mode = Continuous
+ * Set Bit 0 to enable BRG0.
+ */
+
+ usc_OutReg( info, HCR,
+ (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
+
+
+ /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
+
+ usc_OutReg( info, IOCR,
+ (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
+ } else {
+ /* data rate == 0 so turn off BRG0 */
+ usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
+ }
+
+} /* end of usc_enable_async_clock() */
+
+/*
+ * Buffer Structures:
+ *
+ * Normal memory access uses virtual addresses that can make discontiguous
+ * physical memory pages appear to be contiguous in the virtual address
+ * space (the processors memory mapping handles the conversions).
+ *
+ * DMA transfers require physically contiguous memory. This is because
+ * the DMA system controller and DMA bus masters deal with memory using
+ * only physical addresses.
+ *
+ * This causes a problem under Windows NT when large DMA buffers are
+ * needed. Fragmentation of the nonpaged pool prevents allocations of
+ * physically contiguous buffers larger than the PAGE_SIZE.
+ *
+ * However the 16C32 supports Bus Master Scatter/Gather DMA which
+ * allows DMA transfers to physically discontiguous buffers. Information
+ * about each data transfer buffer is contained in a memory structure
+ * called a 'buffer entry'. A list of buffer entries is maintained
+ * to track and control the use of the data transfer buffers.
+ *
+ * To support this strategy we will allocate sufficient PAGE_SIZE
+ * contiguous memory buffers to allow for the total required buffer
+ * space.
+ *
+ * The 16C32 accesses the list of buffer entries using Bus Master
+ * DMA. Control information is read from the buffer entries by the
+ * 16C32 to control data transfers. status information is written to
+ * the buffer entries by the 16C32 to indicate the status of completed
+ * transfers.
+ *
+ * The CPU writes control information to the buffer entries to control
+ * the 16C32 and reads status information from the buffer entries to
+ * determine information about received and transmitted frames.
+ *
+ * Because the CPU and 16C32 (adapter) both need simultaneous access
+ * to the buffer entries, the buffer entry memory is allocated with
+ * HalAllocateCommonBuffer(). This restricts the size of the buffer
+ * entry list to PAGE_SIZE.
+ *
+ * The actual data buffers on the other hand will only be accessed
+ * by the CPU or the adapter but not by both simultaneously. This allows
+ * Scatter/Gather packet based DMA procedures for using physically
+ * discontiguous pages.
+ */
+
+/*
+ * mgsl_reset_tx_dma_buffers()
+ *
+ * Set the count for all transmit buffers to 0 to indicate the
+ * buffer is available for use and set the current buffer to the
+ * first buffer. This effectively makes all buffers free and
+ * discards any data in buffers.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
+{
+ unsigned int i;
+
+ for ( i = 0; i < info->tx_buffer_count; i++ ) {
+ *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
+ }
+
+ info->current_tx_buffer = 0;
+ info->start_tx_dma_buffer = 0;
+ info->tx_dma_buffers_used = 0;
+
+ info->get_tx_holding_index = 0;
+ info->put_tx_holding_index = 0;
+ info->tx_holding_count = 0;
+
+} /* end of mgsl_reset_tx_dma_buffers() */
+
+/*
+ * num_free_tx_dma_buffers()
+ *
+ * returns the number of free tx dma buffers available
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: number of free tx dma buffers
+ */
+static int num_free_tx_dma_buffers(struct mgsl_struct *info)
+{
+ return info->tx_buffer_count - info->tx_dma_buffers_used;
+}
+
+/*
+ * mgsl_reset_rx_dma_buffers()
+ *
+ * Set the count for all receive buffers to DMABUFFERSIZE
+ * and set the current buffer to the first buffer. This effectively
+ * makes all buffers free and discards any data in buffers.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: None
+ */
+static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
+{
+ unsigned int i;
+
+ for ( i = 0; i < info->rx_buffer_count; i++ ) {
+ *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
+// info->rx_buffer_list[i].count = DMABUFFERSIZE;
+// info->rx_buffer_list[i].status = 0;
+ }
+
+ info->current_rx_buffer = 0;
+
+} /* end of mgsl_reset_rx_dma_buffers() */
+
+/*
+ * mgsl_free_rx_frame_buffers()
+ *
+ * Free the receive buffers used by a received SDLC
+ * frame such that the buffers can be reused.
+ *
+ * Arguments:
+ *
+ * info pointer to device instance data
+ * StartIndex index of 1st receive buffer of frame
+ * EndIndex index of last receive buffer of frame
+ *
+ * Return Value: None
+ */
+static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
+{
+ bool Done = false;
+ DMABUFFERENTRY *pBufEntry;
+ unsigned int Index;
+
+ /* Starting with 1st buffer entry of the frame clear the status */
+ /* field and set the count field to DMA Buffer Size. */
+
+ Index = StartIndex;
+
+ while( !Done ) {
+ pBufEntry = &(info->rx_buffer_list[Index]);
+
+ if ( Index == EndIndex ) {
+ /* This is the last buffer of the frame! */
+ Done = true;
+ }
+
+ /* reset current buffer for reuse */
+// pBufEntry->status = 0;
+// pBufEntry->count = DMABUFFERSIZE;
+ *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
+
+ /* advance to next buffer entry in linked list */
+ Index++;
+ if ( Index == info->rx_buffer_count )
+ Index = 0;
+ }
+
+ /* set current buffer to next buffer after last buffer of frame */
+ info->current_rx_buffer = Index;
+
+} /* end of free_rx_frame_buffers() */
+
+/* mgsl_get_rx_frame()
+ *
+ * This function attempts to return a received SDLC frame from the
+ * receive DMA buffers. Only frames received without errors are returned.
+ *
+ * Arguments: info pointer to device extension
+ * Return Value: true if frame returned, otherwise false
+ */
+static bool mgsl_get_rx_frame(struct mgsl_struct *info)
+{
+ unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
+ unsigned short status;
+ DMABUFFERENTRY *pBufEntry;
+ unsigned int framesize = 0;
+ bool ReturnCode = false;
+ unsigned long flags;
+ struct tty_struct *tty = info->port.tty;
+ bool return_frame = false;
+
+ /*
+ * current_rx_buffer points to the 1st buffer of the next available
+ * receive frame. To find the last buffer of the frame look for
+ * a non-zero status field in the buffer entries. (The status
+ * field is set by the 16C32 after completing a receive frame.
+ */
+
+ StartIndex = EndIndex = info->current_rx_buffer;
+
+ while( !info->rx_buffer_list[EndIndex].status ) {
+ /*
+ * If the count field of the buffer entry is non-zero then
+ * this buffer has not been used. (The 16C32 clears the count
+ * field when it starts using the buffer.) If an unused buffer
+ * is encountered then there are no frames available.
+ */
+
+ if ( info->rx_buffer_list[EndIndex].count )
+ goto Cleanup;
+
+ /* advance to next buffer entry in linked list */
+ EndIndex++;
+ if ( EndIndex == info->rx_buffer_count )
+ EndIndex = 0;
+
+ /* if entire list searched then no frame available */
+ if ( EndIndex == StartIndex ) {
+ /* If this occurs then something bad happened,
+ * all buffers have been 'used' but none mark
+ * the end of a frame. Reset buffers and receiver.
+ */
+
+ if ( info->rx_enabled ){
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_start_receiver(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ }
+ goto Cleanup;
+ }
+ }
+
+
+ /* check status of receive frame */
+
+ status = info->rx_buffer_list[EndIndex].status;
+
+ if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
+ RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
+ if ( status & RXSTATUS_SHORT_FRAME )
+ info->icount.rxshort++;
+ else if ( status & RXSTATUS_ABORT )
+ info->icount.rxabort++;
+ else if ( status & RXSTATUS_OVERRUN )
+ info->icount.rxover++;
+ else {
+ info->icount.rxcrc++;
+ if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
+ return_frame = true;
+ }
+ framesize = 0;
+#if SYNCLINK_GENERIC_HDLC
+ {
+ info->netdev->stats.rx_errors++;
+ info->netdev->stats.rx_frame_errors++;
+ }
+#endif
+ } else
+ return_frame = true;
+
+ if ( return_frame ) {
+ /* receive frame has no errors, get frame size.
+ * The frame size is the starting value of the RCC (which was
+ * set to 0xffff) minus the ending value of the RCC (decremented
+ * once for each receive character) minus 2 for the 16-bit CRC.
+ */
+
+ framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
+
+ /* adjust frame size for CRC if any */
+ if ( info->params.crc_type == HDLC_CRC_16_CCITT )
+ framesize -= 2;
+ else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
+ framesize -= 4;
+ }
+
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
+ __FILE__,__LINE__,info->device_name,status,framesize);
+
+ if ( debug_level >= DEBUG_LEVEL_DATA )
+ mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
+ min_t(int, framesize, DMABUFFERSIZE),0);
+
+ if (framesize) {
+ if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
+ ((framesize+1) > info->max_frame_size) ) ||
+ (framesize > info->max_frame_size) )
+ info->icount.rxlong++;
+ else {
+ /* copy dma buffer(s) to contiguous intermediate buffer */
+ int copy_count = framesize;
+ int index = StartIndex;
+ unsigned char *ptmp = info->intermediate_rxbuffer;
+
+ if ( !(status & RXSTATUS_CRC_ERROR))
+ info->icount.rxok++;
+
+ while(copy_count) {
+ int partial_count;
+ if ( copy_count > DMABUFFERSIZE )
+ partial_count = DMABUFFERSIZE;
+ else
+ partial_count = copy_count;
+
+ pBufEntry = &(info->rx_buffer_list[index]);
+ memcpy( ptmp, pBufEntry->virt_addr, partial_count );
+ ptmp += partial_count;
+ copy_count -= partial_count;
+
+ if ( ++index == info->rx_buffer_count )
+ index = 0;
+ }
+
+ if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
+ ++framesize;
+ *ptmp = (status & RXSTATUS_CRC_ERROR ?
+ RX_CRC_ERROR :
+ RX_OK);
+
+ if ( debug_level >= DEBUG_LEVEL_DATA )
+ printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
+ __FILE__,__LINE__,info->device_name,
+ *ptmp);
+ }
+
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount)
+ hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
+ else
+#endif
+ ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
+ }
+ }
+ /* Free the buffers used by this frame. */
+ mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
+
+ ReturnCode = true;
+
+Cleanup:
+
+ if ( info->rx_enabled && info->rx_overflow ) {
+ /* The receiver needs to restarted because of
+ * a receive overflow (buffer or FIFO). If the
+ * receive buffers are now empty, then restart receiver.
+ */
+
+ if ( !info->rx_buffer_list[EndIndex].status &&
+ info->rx_buffer_list[EndIndex].count ) {
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_start_receiver(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ }
+ }
+
+ return ReturnCode;
+
+} /* end of mgsl_get_rx_frame() */
+
+/* mgsl_get_raw_rx_frame()
+ *
+ * This function attempts to return a received frame from the
+ * receive DMA buffers when running in external loop mode. In this mode,
+ * we will return at most one DMABUFFERSIZE frame to the application.
+ * The USC receiver is triggering off of DCD going active to start a new
+ * frame, and DCD going inactive to terminate the frame (similar to
+ * processing a closing flag character).
+ *
+ * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
+ * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
+ * status field and the RCC field will indicate the length of the
+ * entire received frame. We take this RCC field and get the modulus
+ * of RCC and DMABUFFERSIZE to determine if number of bytes in the
+ * last Rx DMA buffer and return that last portion of the frame.
+ *
+ * Arguments: info pointer to device extension
+ * Return Value: true if frame returned, otherwise false
+ */
+static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
+{
+ unsigned int CurrentIndex, NextIndex;
+ unsigned short status;
+ DMABUFFERENTRY *pBufEntry;
+ unsigned int framesize = 0;
+ bool ReturnCode = false;
+ unsigned long flags;
+ struct tty_struct *tty = info->port.tty;
+
+ /*
+ * current_rx_buffer points to the 1st buffer of the next available
+ * receive frame. The status field is set by the 16C32 after
+ * completing a receive frame. If the status field of this buffer
+ * is zero, either the USC is still filling this buffer or this
+ * is one of a series of buffers making up a received frame.
+ *
+ * If the count field of this buffer is zero, the USC is either
+ * using this buffer or has used this buffer. Look at the count
+ * field of the next buffer. If that next buffer's count is
+ * non-zero, the USC is still actively using the current buffer.
+ * Otherwise, if the next buffer's count field is zero, the
+ * current buffer is complete and the USC is using the next
+ * buffer.
+ */
+ CurrentIndex = NextIndex = info->current_rx_buffer;
+ ++NextIndex;
+ if ( NextIndex == info->rx_buffer_count )
+ NextIndex = 0;
+
+ if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
+ (info->rx_buffer_list[CurrentIndex].count == 0 &&
+ info->rx_buffer_list[NextIndex].count == 0)) {
+ /*
+ * Either the status field of this dma buffer is non-zero
+ * (indicating the last buffer of a receive frame) or the next
+ * buffer is marked as in use -- implying this buffer is complete
+ * and an intermediate buffer for this received frame.
+ */
+
+ status = info->rx_buffer_list[CurrentIndex].status;
+
+ if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
+ RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
+ if ( status & RXSTATUS_SHORT_FRAME )
+ info->icount.rxshort++;
+ else if ( status & RXSTATUS_ABORT )
+ info->icount.rxabort++;
+ else if ( status & RXSTATUS_OVERRUN )
+ info->icount.rxover++;
+ else
+ info->icount.rxcrc++;
+ framesize = 0;
+ } else {
+ /*
+ * A receive frame is available, get frame size and status.
+ *
+ * The frame size is the starting value of the RCC (which was
+ * set to 0xffff) minus the ending value of the RCC (decremented
+ * once for each receive character) minus 2 or 4 for the 16-bit
+ * or 32-bit CRC.
+ *
+ * If the status field is zero, this is an intermediate buffer.
+ * It's size is 4K.
+ *
+ * If the DMA Buffer Entry's Status field is non-zero, the
+ * receive operation completed normally (ie: DCD dropped). The
+ * RCC field is valid and holds the received frame size.
+ * It is possible that the RCC field will be zero on a DMA buffer
+ * entry with a non-zero status. This can occur if the total
+ * frame size (number of bytes between the time DCD goes active
+ * to the time DCD goes inactive) exceeds 65535 bytes. In this
+ * case the 16C32 has underrun on the RCC count and appears to
+ * stop updating this counter to let us know the actual received
+ * frame size. If this happens (non-zero status and zero RCC),
+ * simply return the entire RxDMA Buffer
+ */
+ if ( status ) {
+ /*
+ * In the event that the final RxDMA Buffer is
+ * terminated with a non-zero status and the RCC
+ * field is zero, we interpret this as the RCC
+ * having underflowed (received frame > 65535 bytes).
+ *
+ * Signal the event to the user by passing back
+ * a status of RxStatus_CrcError returning the full
+ * buffer and let the app figure out what data is
+ * actually valid
+ */
+ if ( info->rx_buffer_list[CurrentIndex].rcc )
+ framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
+ else
+ framesize = DMABUFFERSIZE;
+ }
+ else
+ framesize = DMABUFFERSIZE;
+ }
+
+ if ( framesize > DMABUFFERSIZE ) {
+ /*
+ * if running in raw sync mode, ISR handler for
+ * End Of Buffer events terminates all buffers at 4K.
+ * If this frame size is said to be >4K, get the
+ * actual number of bytes of the frame in this buffer.
+ */
+ framesize = framesize % DMABUFFERSIZE;
+ }
+
+
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
+ __FILE__,__LINE__,info->device_name,status,framesize);
+
+ if ( debug_level >= DEBUG_LEVEL_DATA )
+ mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
+ min_t(int, framesize, DMABUFFERSIZE),0);
+
+ if (framesize) {
+ /* copy dma buffer(s) to contiguous intermediate buffer */
+ /* NOTE: we never copy more than DMABUFFERSIZE bytes */
+
+ pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
+ memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
+ info->icount.rxok++;
+
+ ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
+ }
+
+ /* Free the buffers used by this frame. */
+ mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
+
+ ReturnCode = true;
+ }
+
+
+ if ( info->rx_enabled && info->rx_overflow ) {
+ /* The receiver needs to restarted because of
+ * a receive overflow (buffer or FIFO). If the
+ * receive buffers are now empty, then restart receiver.
+ */
+
+ if ( !info->rx_buffer_list[CurrentIndex].status &&
+ info->rx_buffer_list[CurrentIndex].count ) {
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_start_receiver(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ }
+ }
+
+ return ReturnCode;
+
+} /* end of mgsl_get_raw_rx_frame() */
+
+/* mgsl_load_tx_dma_buffer()
+ *
+ * Load the transmit DMA buffer with the specified data.
+ *
+ * Arguments:
+ *
+ * info pointer to device extension
+ * Buffer pointer to buffer containing frame to load
+ * BufferSize size in bytes of frame in Buffer
+ *
+ * Return Value: None
+ */
+static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
+ const char *Buffer, unsigned int BufferSize)
+{
+ unsigned short Copycount;
+ unsigned int i = 0;
+ DMABUFFERENTRY *pBufEntry;
+
+ if ( debug_level >= DEBUG_LEVEL_DATA )
+ mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
+
+ if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
+ /* set CMR:13 to start transmit when
+ * next GoAhead (abort) is received
+ */
+ info->cmr_value |= BIT13;
+ }
+
+ /* begin loading the frame in the next available tx dma
+ * buffer, remember it's starting location for setting
+ * up tx dma operation
+ */
+ i = info->current_tx_buffer;
+ info->start_tx_dma_buffer = i;
+
+ /* Setup the status and RCC (Frame Size) fields of the 1st */
+ /* buffer entry in the transmit DMA buffer list. */
+
+ info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
+ info->tx_buffer_list[i].rcc = BufferSize;
+ info->tx_buffer_list[i].count = BufferSize;
+
+ /* Copy frame data from 1st source buffer to the DMA buffers. */
+ /* The frame data may span multiple DMA buffers. */
+
+ while( BufferSize ){
+ /* Get a pointer to next DMA buffer entry. */
+ pBufEntry = &info->tx_buffer_list[i++];
+
+ if ( i == info->tx_buffer_count )
+ i=0;
+
+ /* Calculate the number of bytes that can be copied from */
+ /* the source buffer to this DMA buffer. */
+ if ( BufferSize > DMABUFFERSIZE )
+ Copycount = DMABUFFERSIZE;
+ else
+ Copycount = BufferSize;
+
+ /* Actually copy data from source buffer to DMA buffer. */
+ /* Also set the data count for this individual DMA buffer. */
+ if ( info->bus_type == MGSL_BUS_TYPE_PCI )
+ mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
+ else
+ memcpy(pBufEntry->virt_addr, Buffer, Copycount);
+
+ pBufEntry->count = Copycount;
+
+ /* Advance source pointer and reduce remaining data count. */
+ Buffer += Copycount;
+ BufferSize -= Copycount;
+
+ ++info->tx_dma_buffers_used;
+ }
+
+ /* remember next available tx dma buffer */
+ info->current_tx_buffer = i;
+
+} /* end of mgsl_load_tx_dma_buffer() */
+
+/*
+ * mgsl_register_test()
+ *
+ * Performs a register test of the 16C32.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: true if test passed, otherwise false
+ */
+static bool mgsl_register_test( struct mgsl_struct *info )
+{
+ static unsigned short BitPatterns[] =
+ { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
+ static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
+ unsigned int i;
+ bool rc = true;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_reset(info);
+
+ /* Verify the reset state of some registers. */
+
+ if ( (usc_InReg( info, SICR ) != 0) ||
+ (usc_InReg( info, IVR ) != 0) ||
+ (usc_InDmaReg( info, DIVR ) != 0) ){
+ rc = false;
+ }
+
+ if ( rc ){
+ /* Write bit patterns to various registers but do it out of */
+ /* sync, then read back and verify values. */
+
+ for ( i = 0 ; i < Patterncount ; i++ ) {
+ usc_OutReg( info, TC0R, BitPatterns[i] );
+ usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
+ usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
+ usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
+ usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
+ usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
+
+ if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
+ (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
+ (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
+ (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
+ (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
+ (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
+ rc = false;
+ break;
+ }
+ }
+ }
+
+ usc_reset(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ return rc;
+
+} /* end of mgsl_register_test() */
+
+/* mgsl_irq_test() Perform interrupt test of the 16C32.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: true if test passed, otherwise false
+ */
+static bool mgsl_irq_test( struct mgsl_struct *info )
+{
+ unsigned long EndTime;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_reset(info);
+
+ /*
+ * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
+ * The ISR sets irq_occurred to true.
+ */
+
+ info->irq_occurred = false;
+
+ /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
+ /* Enable INTEN (Port 6, Bit12) */
+ /* This connects the IRQ request signal to the ISA bus */
+ /* on the ISA adapter. This has no effect for the PCI adapter */
+ usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
+
+ usc_EnableMasterIrqBit(info);
+ usc_EnableInterrupts(info, IO_PIN);
+ usc_ClearIrqPendingBits(info, IO_PIN);
+
+ usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
+ usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
+
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ EndTime=100;
+ while( EndTime-- && !info->irq_occurred ) {
+ msleep_interruptible(10);
+ }
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_reset(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ return info->irq_occurred;
+
+} /* end of mgsl_irq_test() */
+
+/* mgsl_dma_test()
+ *
+ * Perform a DMA test of the 16C32. A small frame is
+ * transmitted via DMA from a transmit buffer to a receive buffer
+ * using single buffer DMA mode.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: true if test passed, otherwise false
+ */
+static bool mgsl_dma_test( struct mgsl_struct *info )
+{
+ unsigned short FifoLevel;
+ unsigned long phys_addr;
+ unsigned int FrameSize;
+ unsigned int i;
+ char *TmpPtr;
+ bool rc = true;
+ unsigned short status=0;
+ unsigned long EndTime;
+ unsigned long flags;
+ MGSL_PARAMS tmp_params;
+
+ /* save current port options */
+ memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
+ /* load default port options */
+ memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
+
+#define TESTFRAMESIZE 40
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+
+ /* setup 16C32 for SDLC DMA transfer mode */
+
+ usc_reset(info);
+ usc_set_sdlc_mode(info);
+ usc_enable_loopback(info,1);
+
+ /* Reprogram the RDMR so that the 16C32 does NOT clear the count
+ * field of the buffer entry after fetching buffer address. This
+ * way we can detect a DMA failure for a DMA read (which should be
+ * non-destructive to system memory) before we try and write to
+ * memory (where a failure could corrupt system memory).
+ */
+
+ /* Receive DMA mode Register (RDMR)
+ *
+ * <15..14> 11 DMA mode = Linked List Buffer mode
+ * <13> 1 RSBinA/L = store Rx status Block in List entry
+ * <12> 0 1 = Clear count of List Entry after fetching
+ * <11..10> 00 Address mode = Increment
+ * <9> 1 Terminate Buffer on RxBound
+ * <8> 0 Bus Width = 16bits
+ * <7..0> ? status Bits (write as 0s)
+ *
+ * 1110 0010 0000 0000 = 0xe200
+ */
+
+ usc_OutDmaReg( info, RDMR, 0xe200 );
+
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+
+ /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
+
+ FrameSize = TESTFRAMESIZE;
+
+ /* setup 1st transmit buffer entry: */
+ /* with frame size and transmit control word */
+
+ info->tx_buffer_list[0].count = FrameSize;
+ info->tx_buffer_list[0].rcc = FrameSize;
+ info->tx_buffer_list[0].status = 0x4000;
+
+ /* build a transmit frame in 1st transmit DMA buffer */
+
+ TmpPtr = info->tx_buffer_list[0].virt_addr;
+ for (i = 0; i < FrameSize; i++ )
+ *TmpPtr++ = i;
+
+ /* setup 1st receive buffer entry: */
+ /* clear status, set max receive buffer size */
+
+ info->rx_buffer_list[0].status = 0;
+ info->rx_buffer_list[0].count = FrameSize + 4;
+
+ /* zero out the 1st receive buffer */
+
+ memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
+
+ /* Set count field of next buffer entries to prevent */
+ /* 16C32 from using buffers after the 1st one. */
+
+ info->tx_buffer_list[1].count = 0;
+ info->rx_buffer_list[1].count = 0;
+
+
+ /***************************/
+ /* Program 16C32 receiver. */
+ /***************************/
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+
+ /* setup DMA transfers */
+ usc_RTCmd( info, RTCmd_PurgeRxFifo );
+
+ /* program 16C32 receiver with physical address of 1st DMA buffer entry */
+ phys_addr = info->rx_buffer_list[0].phys_entry;
+ usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
+ usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
+
+ /* Clear the Rx DMA status bits (read RDMR) and start channel */
+ usc_InDmaReg( info, RDMR );
+ usc_DmaCmd( info, DmaCmd_InitRxChannel );
+
+ /* Enable Receiver (RMR <1..0> = 10) */
+ usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
+
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+
+ /*************************************************************/
+ /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
+ /*************************************************************/
+
+ /* Wait 100ms for interrupt. */
+ EndTime = jiffies + msecs_to_jiffies(100);
+
+ for(;;) {
+ if (time_after(jiffies, EndTime)) {
+ rc = false;
+ break;
+ }
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ status = usc_InDmaReg( info, RDMR );
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ if ( !(status & BIT4) && (status & BIT5) ) {
+ /* INITG (BIT 4) is inactive (no entry read in progress) AND */
+ /* BUSY (BIT 5) is active (channel still active). */
+ /* This means the buffer entry read has completed. */
+ break;
+ }
+ }
+
+
+ /******************************/
+ /* Program 16C32 transmitter. */
+ /******************************/
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+
+ /* Program the Transmit Character Length Register (TCLR) */
+ /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
+
+ usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
+ usc_RTCmd( info, RTCmd_PurgeTxFifo );
+
+ /* Program the address of the 1st DMA Buffer Entry in linked list */
+
+ phys_addr = info->tx_buffer_list[0].phys_entry;
+ usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
+ usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
+
+ /* unlatch Tx status bits, and start transmit channel. */
+
+ usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
+ usc_DmaCmd( info, DmaCmd_InitTxChannel );
+
+ /* wait for DMA controller to fill transmit FIFO */
+
+ usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
+
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+
+ /**********************************/
+ /* WAIT FOR TRANSMIT FIFO TO FILL */
+ /**********************************/
+
+ /* Wait 100ms */
+ EndTime = jiffies + msecs_to_jiffies(100);
+
+ for(;;) {
+ if (time_after(jiffies, EndTime)) {
+ rc = false;
+ break;
+ }
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ FifoLevel = usc_InReg(info, TICR) >> 8;
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ if ( FifoLevel < 16 )
+ break;
+ else
+ if ( FrameSize < 32 ) {
+ /* This frame is smaller than the entire transmit FIFO */
+ /* so wait for the entire frame to be loaded. */
+ if ( FifoLevel <= (32 - FrameSize) )
+ break;
+ }
+ }
+
+
+ if ( rc )
+ {
+ /* Enable 16C32 transmitter. */
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+
+ /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
+ usc_TCmd( info, TCmd_SendFrame );
+ usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
+
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+
+ /******************************/
+ /* WAIT FOR TRANSMIT COMPLETE */
+ /******************************/
+
+ /* Wait 100ms */
+ EndTime = jiffies + msecs_to_jiffies(100);
+
+ /* While timer not expired wait for transmit complete */
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ status = usc_InReg( info, TCSR );
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
+ if (time_after(jiffies, EndTime)) {
+ rc = false;
+ break;
+ }
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ status = usc_InReg( info, TCSR );
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ }
+ }
+
+
+ if ( rc ){
+ /* CHECK FOR TRANSMIT ERRORS */
+ if ( status & (BIT5 + BIT1) )
+ rc = false;
+ }
+
+ if ( rc ) {
+ /* WAIT FOR RECEIVE COMPLETE */
+
+ /* Wait 100ms */
+ EndTime = jiffies + msecs_to_jiffies(100);
+
+ /* Wait for 16C32 to write receive status to buffer entry. */
+ status=info->rx_buffer_list[0].status;
+ while ( status == 0 ) {
+ if (time_after(jiffies, EndTime)) {
+ rc = false;
+ break;
+ }
+ status=info->rx_buffer_list[0].status;
+ }
+ }
+
+
+ if ( rc ) {
+ /* CHECK FOR RECEIVE ERRORS */
+ status = info->rx_buffer_list[0].status;
+
+ if ( status & (BIT8 + BIT3 + BIT1) ) {
+ /* receive error has occurred */
+ rc = false;
+ } else {
+ if ( memcmp( info->tx_buffer_list[0].virt_addr ,
+ info->rx_buffer_list[0].virt_addr, FrameSize ) ){
+ rc = false;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_reset( info );
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ /* restore current port options */
+ memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
+
+ return rc;
+
+} /* end of mgsl_dma_test() */
+
+/* mgsl_adapter_test()
+ *
+ * Perform the register, IRQ, and DMA tests for the 16C32.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: 0 if success, otherwise -ENODEV
+ */
+static int mgsl_adapter_test( struct mgsl_struct *info )
+{
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):Testing device %s\n",
+ __FILE__,__LINE__,info->device_name );
+
+ if ( !mgsl_register_test( info ) ) {
+ info->init_error = DiagStatus_AddressFailure;
+ printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
+ __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
+ return -ENODEV;
+ }
+
+ if ( !mgsl_irq_test( info ) ) {
+ info->init_error = DiagStatus_IrqFailure;
+ printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
+ __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
+ return -ENODEV;
+ }
+
+ if ( !mgsl_dma_test( info ) ) {
+ info->init_error = DiagStatus_DmaFailure;
+ printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
+ __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
+ return -ENODEV;
+ }
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):device %s passed diagnostics\n",
+ __FILE__,__LINE__,info->device_name );
+
+ return 0;
+
+} /* end of mgsl_adapter_test() */
+
+/* mgsl_memory_test()
+ *
+ * Test the shared memory on a PCI adapter.
+ *
+ * Arguments: info pointer to device instance data
+ * Return Value: true if test passed, otherwise false
+ */
+static bool mgsl_memory_test( struct mgsl_struct *info )
+{
+ static unsigned long BitPatterns[] =
+ { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
+ unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
+ unsigned long i;
+ unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
+ unsigned long * TestAddr;
+
+ if ( info->bus_type != MGSL_BUS_TYPE_PCI )
+ return true;
+
+ TestAddr = (unsigned long *)info->memory_base;
+
+ /* Test data lines with test pattern at one location. */
+
+ for ( i = 0 ; i < Patterncount ; i++ ) {
+ *TestAddr = BitPatterns[i];
+ if ( *TestAddr != BitPatterns[i] )
+ return false;
+ }
+
+ /* Test address lines with incrementing pattern over */
+ /* entire address range. */
+
+ for ( i = 0 ; i < TestLimit ; i++ ) {
+ *TestAddr = i * 4;
+ TestAddr++;
+ }
+
+ TestAddr = (unsigned long *)info->memory_base;
+
+ for ( i = 0 ; i < TestLimit ; i++ ) {
+ if ( *TestAddr != i * 4 )
+ return false;
+ TestAddr++;
+ }
+
+ memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
+
+ return true;
+
+} /* End Of mgsl_memory_test() */
+
+
+/* mgsl_load_pci_memory()
+ *
+ * Load a large block of data into the PCI shared memory.
+ * Use this instead of memcpy() or memmove() to move data
+ * into the PCI shared memory.
+ *
+ * Notes:
+ *
+ * This function prevents the PCI9050 interface chip from hogging
+ * the adapter local bus, which can starve the 16C32 by preventing
+ * 16C32 bus master cycles.
+ *
+ * The PCI9050 documentation says that the 9050 will always release
+ * control of the local bus after completing the current read
+ * or write operation.
+ *
+ * It appears that as long as the PCI9050 write FIFO is full, the
+ * PCI9050 treats all of the writes as a single burst transaction
+ * and will not release the bus. This causes DMA latency problems
+ * at high speeds when copying large data blocks to the shared
+ * memory.
+ *
+ * This function in effect, breaks the a large shared memory write
+ * into multiple transations by interleaving a shared memory read
+ * which will flush the write FIFO and 'complete' the write
+ * transation. This allows any pending DMA request to gain control
+ * of the local bus in a timely fasion.
+ *
+ * Arguments:
+ *
+ * TargetPtr pointer to target address in PCI shared memory
+ * SourcePtr pointer to source buffer for data
+ * count count in bytes of data to copy
+ *
+ * Return Value: None
+ */
+static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
+ unsigned short count )
+{
+ /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
+#define PCI_LOAD_INTERVAL 64
+
+ unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
+ unsigned short Index;
+ unsigned long Dummy;
+
+ for ( Index = 0 ; Index < Intervalcount ; Index++ )
+ {
+ memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
+ Dummy = *((volatile unsigned long *)TargetPtr);
+ TargetPtr += PCI_LOAD_INTERVAL;
+ SourcePtr += PCI_LOAD_INTERVAL;
+ }
+
+ memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
+
+} /* End Of mgsl_load_pci_memory() */
+
+static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
+{
+ int i;
+ int linecount;
+ if (xmit)
+ printk("%s tx data:\n",info->device_name);
+ else
+ printk("%s rx data:\n",info->device_name);
+
+ while(count) {
+ if (count > 16)
+ linecount = 16;
+ else
+ linecount = count;
+
+ for(i=0;i<linecount;i++)
+ printk("%02X ",(unsigned char)data[i]);
+ for(;i<17;i++)
+ printk(" ");
+ for(i=0;i<linecount;i++) {
+ if (data[i]>=040 && data[i]<=0176)
+ printk("%c",data[i]);
+ else
+ printk(".");
+ }
+ printk("\n");
+
+ data += linecount;
+ count -= linecount;
+ }
+} /* end of mgsl_trace_block() */
+
+/* mgsl_tx_timeout()
+ *
+ * called when HDLC frame times out
+ * update stats and do tx completion processing
+ *
+ * Arguments: context pointer to device instance data
+ * Return Value: None
+ */
+static void mgsl_tx_timeout(unsigned long context)
+{
+ struct mgsl_struct *info = (struct mgsl_struct*)context;
+ unsigned long flags;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):mgsl_tx_timeout(%s)\n",
+ __FILE__,__LINE__,info->device_name);
+ if(info->tx_active &&
+ (info->params.mode == MGSL_MODE_HDLC ||
+ info->params.mode == MGSL_MODE_RAW) ) {
+ info->icount.txtimeout++;
+ }
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ info->tx_active = false;
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+
+ if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
+ usc_loopmode_cancel_transmit( info );
+
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount)
+ hdlcdev_tx_done(info);
+ else
+#endif
+ mgsl_bh_transmit(info);
+
+} /* end of mgsl_tx_timeout() */
+
+/* signal that there are no more frames to send, so that
+ * line is 'released' by echoing RxD to TxD when current
+ * transmission is complete (or immediately if no tx in progress).
+ */
+static int mgsl_loopmode_send_done( struct mgsl_struct * info )
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
+ if (info->tx_active)
+ info->loopmode_send_done_requested = true;
+ else
+ usc_loopmode_send_done(info);
+ }
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ return 0;
+}
+
+/* release the line by echoing RxD to TxD
+ * upon completion of a transmit frame
+ */
+static void usc_loopmode_send_done( struct mgsl_struct * info )
+{
+ info->loopmode_send_done_requested = false;
+ /* clear CMR:13 to 0 to start echoing RxData to TxData */
+ info->cmr_value &= ~BIT13;
+ usc_OutReg(info, CMR, info->cmr_value);
+}
+
+/* abort a transmit in progress while in HDLC LoopMode
+ */
+static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
+{
+ /* reset tx dma channel and purge TxFifo */
+ usc_RTCmd( info, RTCmd_PurgeTxFifo );
+ usc_DmaCmd( info, DmaCmd_ResetTxChannel );
+ usc_loopmode_send_done( info );
+}
+
+/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
+ * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
+ * we must clear CMR:13 to begin repeating TxData to RxData
+ */
+static void usc_loopmode_insert_request( struct mgsl_struct * info )
+{
+ info->loopmode_insert_requested = true;
+
+ /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
+ * begin repeating TxData on RxData (complete insertion)
+ */
+ usc_OutReg( info, RICR,
+ (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
+
+ /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
+ info->cmr_value |= BIT13;
+ usc_OutReg(info, CMR, info->cmr_value);
+}
+
+/* return 1 if station is inserted into the loop, otherwise 0
+ */
+static int usc_loopmode_active( struct mgsl_struct * info)
+{
+ return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
+}
+
+#if SYNCLINK_GENERIC_HDLC
+
+/**
+ * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
+ * set encoding and frame check sequence (FCS) options
+ *
+ * dev pointer to network device structure
+ * encoding serial encoding setting
+ * parity FCS setting
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ struct mgsl_struct *info = dev_to_port(dev);
+ unsigned char new_encoding;
+ unsigned short new_crctype;
+
+ /* return error if TTY interface open */
+ if (info->port.count)
+ return -EBUSY;
+
+ switch (encoding)
+ {
+ case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
+ case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
+ case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
+ case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
+ case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
+ default: return -EINVAL;
+ }
+
+ switch (parity)
+ {
+ case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
+ case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
+ case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
+ default: return -EINVAL;
+ }
+
+ info->params.encoding = new_encoding;
+ info->params.crc_type = new_crctype;
+
+ /* if network interface up, reprogram hardware */
+ if (info->netcount)
+ mgsl_program_hw(info);
+
+ return 0;
+}
+
+/**
+ * called by generic HDLC layer to send frame
+ *
+ * skb socket buffer containing HDLC frame
+ * dev pointer to network device structure
+ */
+static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mgsl_struct *info = dev_to_port(dev);
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
+
+ /* stop sending until this frame completes */
+ netif_stop_queue(dev);
+
+ /* copy data to device buffers */
+ info->xmit_cnt = skb->len;
+ mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
+
+ /* update network statistics */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ /* done with socket buffer, so free it */
+ dev_kfree_skb(skb);
+
+ /* save start time for transmit timeout detection */
+ dev->trans_start = jiffies;
+
+ /* start hardware transmitter if necessary */
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ if (!info->tx_active)
+ usc_start_transmitter(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * called by network layer when interface enabled
+ * claim resources and initialize hardware
+ *
+ * dev pointer to network device structure
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_open(struct net_device *dev)
+{
+ struct mgsl_struct *info = dev_to_port(dev);
+ int rc;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
+
+ /* generic HDLC layer open processing */
+ if ((rc = hdlc_open(dev)))
+ return rc;
+
+ /* arbitrate between network and tty opens */
+ spin_lock_irqsave(&info->netlock, flags);
+ if (info->port.count != 0 || info->netcount != 0) {
+ printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
+ spin_unlock_irqrestore(&info->netlock, flags);
+ return -EBUSY;
+ }
+ info->netcount=1;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ /* claim resources and init adapter */
+ if ((rc = startup(info)) != 0) {
+ spin_lock_irqsave(&info->netlock, flags);
+ info->netcount=0;
+ spin_unlock_irqrestore(&info->netlock, flags);
+ return rc;
+ }
+
+ /* assert DTR and RTS, apply hardware settings */
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ mgsl_program_hw(info);
+
+ /* enable network layer transmit */
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+
+ /* inform generic HDLC layer of current DCD status */
+ spin_lock_irqsave(&info->irq_spinlock, flags);
+ usc_get_serial_signals(info);
+ spin_unlock_irqrestore(&info->irq_spinlock, flags);
+ if (info->serial_signals & SerialSignal_DCD)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ return 0;
+}
+
+/**
+ * called by network layer when interface is disabled
+ * shutdown hardware and release resources
+ *
+ * dev pointer to network device structure
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_close(struct net_device *dev)
+{
+ struct mgsl_struct *info = dev_to_port(dev);
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
+
+ netif_stop_queue(dev);
+
+ /* shutdown adapter and release resources */
+ shutdown(info);
+
+ hdlc_close(dev);
+
+ spin_lock_irqsave(&info->netlock, flags);
+ info->netcount=0;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ return 0;
+}
+
+/**
+ * called by network layer to process IOCTL call to network device
+ *
+ * dev pointer to network device structure
+ * ifr pointer to network interface request structure
+ * cmd IOCTL command code
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ struct mgsl_struct *info = dev_to_port(dev);
+ unsigned int flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
+
+ /* return error if TTY interface open */
+ if (info->port.count)
+ return -EBUSY;
+
+ if (cmd != SIOCWANDEV)
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ switch(ifr->ifr_settings.type) {
+ case IF_GET_IFACE: /* return current sync_serial_settings */
+
+ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+
+ flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+
+ switch (flags){
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
+ case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
+ default: new_line.clock_type = CLOCK_DEFAULT;
+ }
+
+ new_line.clock_rate = info->params.clock_speed;
+ new_line.loopback = info->params.loopback ? 1:0;
+
+ if (copy_to_user(line, &new_line, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&new_line, line, size))
+ return -EFAULT;
+
+ switch (new_line.clock_type)
+ {
+ case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
+ case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
+ case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
+ case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
+ case CLOCK_DEFAULT: flags = info->params.flags &
+ (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
+ default: return -EINVAL;
+ }
+
+ if (new_line.loopback != 0 && new_line.loopback != 1)
+ return -EINVAL;
+
+ info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+ info->params.flags |= flags;
+
+ info->params.loopback = new_line.loopback;
+
+ if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
+ info->params.clock_speed = new_line.clock_rate;
+ else
+ info->params.clock_speed = 0;
+
+ /* if network interface up, reprogram hardware */
+ if (info->netcount)
+ mgsl_program_hw(info);
+ return 0;
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+/**
+ * called by network layer when transmit timeout is detected
+ *
+ * dev pointer to network device structure
+ */
+static void hdlcdev_tx_timeout(struct net_device *dev)
+{
+ struct mgsl_struct *info = dev_to_port(dev);
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("hdlcdev_tx_timeout(%s)\n",dev->name);
+
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+
+ spin_lock_irqsave(&info->irq_spinlock,flags);
+ usc_stop_transmitter(info);
+ spin_unlock_irqrestore(&info->irq_spinlock,flags);
+
+ netif_wake_queue(dev);
+}
+
+/**
+ * called by device driver when transmit completes
+ * reenable network layer transmit if stopped
+ *
+ * info pointer to device instance information
+ */
+static void hdlcdev_tx_done(struct mgsl_struct *info)
+{
+ if (netif_queue_stopped(info->netdev))
+ netif_wake_queue(info->netdev);
+}
+
+/**
+ * called by device driver when frame received
+ * pass frame to network layer
+ *
+ * info pointer to device instance information
+ * buf pointer to buffer contianing frame data
+ * size count of data bytes in buf
+ */
+static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
+{
+ struct sk_buff *skb = dev_alloc_skb(size);
+ struct net_device *dev = info->netdev;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("hdlcdev_rx(%s)\n", dev->name);
+
+ if (skb == NULL) {
+ printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ memcpy(skb_put(skb, size), buf, size);
+
+ skb->protocol = hdlc_type_trans(skb, dev);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += size;
+
+ netif_rx(skb);
+}
+
+static const struct net_device_ops hdlcdev_ops = {
+ .ndo_open = hdlcdev_open,
+ .ndo_stop = hdlcdev_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = hdlcdev_ioctl,
+ .ndo_tx_timeout = hdlcdev_tx_timeout,
+};
+
+/**
+ * called by device driver when adding device instance
+ * do generic HDLC initialization
+ *
+ * info pointer to device instance information
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_init(struct mgsl_struct *info)
+{
+ int rc;
+ struct net_device *dev;
+ hdlc_device *hdlc;
+
+ /* allocate and initialize network and HDLC layer objects */
+
+ if (!(dev = alloc_hdlcdev(info))) {
+ printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
+ return -ENOMEM;
+ }
+
+ /* for network layer reporting purposes only */
+ dev->base_addr = info->io_base;
+ dev->irq = info->irq_level;
+ dev->dma = info->dma_level;
+
+ /* network layer callbacks and settings */
+ dev->netdev_ops = &hdlcdev_ops;
+ dev->watchdog_timeo = 10 * HZ;
+ dev->tx_queue_len = 50;
+
+ /* generic HDLC layer callbacks and settings */
+ hdlc = dev_to_hdlc(dev);
+ hdlc->attach = hdlcdev_attach;
+ hdlc->xmit = hdlcdev_xmit;
+
+ /* register objects with HDLC layer */
+ if ((rc = register_hdlc_device(dev))) {
+ printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
+ free_netdev(dev);
+ return rc;
+ }
+
+ info->netdev = dev;
+ return 0;
+}
+
+/**
+ * called by device driver when removing device instance
+ * do generic HDLC cleanup
+ *
+ * info pointer to device instance information
+ */
+static void hdlcdev_exit(struct mgsl_struct *info)
+{
+ unregister_hdlc_device(info->netdev);
+ free_netdev(info->netdev);
+ info->netdev = NULL;
+}
+
+#endif /* CONFIG_HDLC */
+
+
+static int __devinit synclink_init_one (struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ struct mgsl_struct *info;
+
+ if (pci_enable_device(dev)) {
+ printk("error enabling pci device %p\n", dev);
+ return -EIO;
+ }
+
+ if (!(info = mgsl_allocate_device())) {
+ printk("can't allocate device instance data.\n");
+ return -EIO;
+ }
+
+ /* Copy user configuration info to device instance data */
+
+ info->io_base = pci_resource_start(dev, 2);
+ info->irq_level = dev->irq;
+ info->phys_memory_base = pci_resource_start(dev, 3);
+
+ /* Because veremap only works on page boundaries we must map
+ * a larger area than is actually implemented for the LCR
+ * memory range. We map a full page starting at the page boundary.
+ */
+ info->phys_lcr_base = pci_resource_start(dev, 0);
+ info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
+ info->phys_lcr_base &= ~(PAGE_SIZE-1);
+
+ info->bus_type = MGSL_BUS_TYPE_PCI;
+ info->io_addr_size = 8;
+ info->irq_flags = IRQF_SHARED;
+
+ if (dev->device == 0x0210) {
+ /* Version 1 PCI9030 based universal PCI adapter */
+ info->misc_ctrl_value = 0x007c4080;
+ info->hw_version = 1;
+ } else {
+ /* Version 0 PCI9050 based 5V PCI adapter
+ * A PCI9050 bug prevents reading LCR registers if
+ * LCR base address bit 7 is set. Maintain shadow
+ * value so we can write to LCR misc control reg.
+ */
+ info->misc_ctrl_value = 0x087e4546;
+ info->hw_version = 0;
+ }
+
+ mgsl_add_device(info);
+
+ return 0;
+}
+
+static void __devexit synclink_remove_one (struct pci_dev *dev)
+{
+}
+
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
new file mode 100644
index 00000000..18b48cd3
--- /dev/null
+++ b/drivers/tty/synclink_gt.c
@@ -0,0 +1,5161 @@
+/*
+ * Device driver for Microgate SyncLink GT serial adapters.
+ *
+ * written by Paul Fulghum for Microgate Corporation
+ * paulkf@microgate.com
+ *
+ * Microgate and SyncLink are trademarks of Microgate Corporation
+ *
+ * This code is released under the GNU General Public License (GPL)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DEBUG OUTPUT DEFINITIONS
+ *
+ * uncomment lines below to enable specific types of debug output
+ *
+ * DBGINFO information - most verbose output
+ * DBGERR serious errors
+ * DBGBH bottom half service routine debugging
+ * DBGISR interrupt service routine debugging
+ * DBGDATA output receive and transmit data
+ * DBGTBUF output transmit DMA buffers and registers
+ * DBGRBUF output receive DMA buffers and registers
+ */
+
+#define DBGINFO(fmt) if (debug_level >= DEBUG_LEVEL_INFO) printk fmt
+#define DBGERR(fmt) if (debug_level >= DEBUG_LEVEL_ERROR) printk fmt
+#define DBGBH(fmt) if (debug_level >= DEBUG_LEVEL_BH) printk fmt
+#define DBGISR(fmt) if (debug_level >= DEBUG_LEVEL_ISR) printk fmt
+#define DBGDATA(info, buf, size, label) if (debug_level >= DEBUG_LEVEL_DATA) trace_block((info), (buf), (size), (label))
+/*#define DBGTBUF(info) dump_tbufs(info)*/
+/*#define DBGRBUF(info) dump_rbufs(info)*/
+
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/ioctl.h>
+#include <linux/termios.h>
+#include <linux/bitops.h>
+#include <linux/workqueue.h>
+#include <linux/hdlc.h>
+#include <linux/synclink.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/types.h>
+#include <asm/uaccess.h>
+
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
+#endif
+
+/*
+ * module identification
+ */
+static char *driver_name = "SyncLink GT";
+static char *tty_driver_name = "synclink_gt";
+static char *tty_dev_prefix = "ttySLG";
+MODULE_LICENSE("GPL");
+#define MGSL_MAGIC 0x5401
+#define MAX_DEVICES 32
+
+static struct pci_device_id pci_table[] = {
+ {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT2_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT4_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_MICROGATE, SYNCLINK_AC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {0,}, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_table);
+
+static int init_one(struct pci_dev *dev,const struct pci_device_id *ent);
+static void remove_one(struct pci_dev *dev);
+static struct pci_driver pci_driver = {
+ .name = "synclink_gt",
+ .id_table = pci_table,
+ .probe = init_one,
+ .remove = __devexit_p(remove_one),
+};
+
+static bool pci_registered;
+
+/*
+ * module configuration and status
+ */
+static struct slgt_info *slgt_device_list;
+static int slgt_device_count;
+
+static int ttymajor;
+static int debug_level;
+static int maxframe[MAX_DEVICES];
+
+module_param(ttymajor, int, 0);
+module_param(debug_level, int, 0);
+module_param_array(maxframe, int, NULL, 0);
+
+MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned");
+MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail");
+MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)");
+
+/*
+ * tty support and callbacks
+ */
+static struct tty_driver *serial_driver;
+
+static int open(struct tty_struct *tty, struct file * filp);
+static void close(struct tty_struct *tty, struct file * filp);
+static void hangup(struct tty_struct *tty);
+static void set_termios(struct tty_struct *tty, struct ktermios *old_termios);
+
+static int write(struct tty_struct *tty, const unsigned char *buf, int count);
+static int put_char(struct tty_struct *tty, unsigned char ch);
+static void send_xchar(struct tty_struct *tty, char ch);
+static void wait_until_sent(struct tty_struct *tty, int timeout);
+static int write_room(struct tty_struct *tty);
+static void flush_chars(struct tty_struct *tty);
+static void flush_buffer(struct tty_struct *tty);
+static void tx_hold(struct tty_struct *tty);
+static void tx_release(struct tty_struct *tty);
+
+static int ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
+static int chars_in_buffer(struct tty_struct *tty);
+static void throttle(struct tty_struct * tty);
+static void unthrottle(struct tty_struct * tty);
+static int set_break(struct tty_struct *tty, int break_state);
+
+/*
+ * generic HDLC support and callbacks
+ */
+#if SYNCLINK_GENERIC_HDLC
+#define dev_to_port(D) (dev_to_hdlc(D)->priv)
+static void hdlcdev_tx_done(struct slgt_info *info);
+static void hdlcdev_rx(struct slgt_info *info, char *buf, int size);
+static int hdlcdev_init(struct slgt_info *info);
+static void hdlcdev_exit(struct slgt_info *info);
+#endif
+
+
+/*
+ * device specific structures, macros and functions
+ */
+
+#define SLGT_MAX_PORTS 4
+#define SLGT_REG_SIZE 256
+
+/*
+ * conditional wait facility
+ */
+struct cond_wait {
+ struct cond_wait *next;
+ wait_queue_head_t q;
+ wait_queue_t wait;
+ unsigned int data;
+};
+static void init_cond_wait(struct cond_wait *w, unsigned int data);
+static void add_cond_wait(struct cond_wait **head, struct cond_wait *w);
+static void remove_cond_wait(struct cond_wait **head, struct cond_wait *w);
+static void flush_cond_wait(struct cond_wait **head);
+
+/*
+ * DMA buffer descriptor and access macros
+ */
+struct slgt_desc
+{
+ __le16 count;
+ __le16 status;
+ __le32 pbuf; /* physical address of data buffer */
+ __le32 next; /* physical address of next descriptor */
+
+ /* driver book keeping */
+ char *buf; /* virtual address of data buffer */
+ unsigned int pdesc; /* physical address of this descriptor */
+ dma_addr_t buf_dma_addr;
+ unsigned short buf_count;
+};
+
+#define set_desc_buffer(a,b) (a).pbuf = cpu_to_le32((unsigned int)(b))
+#define set_desc_next(a,b) (a).next = cpu_to_le32((unsigned int)(b))
+#define set_desc_count(a,b)(a).count = cpu_to_le16((unsigned short)(b))
+#define set_desc_eof(a,b) (a).status = cpu_to_le16((b) ? (le16_to_cpu((a).status) | BIT0) : (le16_to_cpu((a).status) & ~BIT0))
+#define set_desc_status(a, b) (a).status = cpu_to_le16((unsigned short)(b))
+#define desc_count(a) (le16_to_cpu((a).count))
+#define desc_status(a) (le16_to_cpu((a).status))
+#define desc_complete(a) (le16_to_cpu((a).status) & BIT15)
+#define desc_eof(a) (le16_to_cpu((a).status) & BIT2)
+#define desc_crc_error(a) (le16_to_cpu((a).status) & BIT1)
+#define desc_abort(a) (le16_to_cpu((a).status) & BIT0)
+#define desc_residue(a) ((le16_to_cpu((a).status) & 0x38) >> 3)
+
+struct _input_signal_events {
+ int ri_up;
+ int ri_down;
+ int dsr_up;
+ int dsr_down;
+ int dcd_up;
+ int dcd_down;
+ int cts_up;
+ int cts_down;
+};
+
+/*
+ * device instance data structure
+ */
+struct slgt_info {
+ void *if_ptr; /* General purpose pointer (used by SPPP) */
+ struct tty_port port;
+
+ struct slgt_info *next_device; /* device list link */
+
+ int magic;
+
+ char device_name[25];
+ struct pci_dev *pdev;
+
+ int port_count; /* count of ports on adapter */
+ int adapter_num; /* adapter instance number */
+ int port_num; /* port instance number */
+
+ /* array of pointers to port contexts on this adapter */
+ struct slgt_info *port_array[SLGT_MAX_PORTS];
+
+ int line; /* tty line instance number */
+
+ struct mgsl_icount icount;
+
+ int timeout;
+ int x_char; /* xon/xoff character */
+ unsigned int read_status_mask;
+ unsigned int ignore_status_mask;
+
+ wait_queue_head_t status_event_wait_q;
+ wait_queue_head_t event_wait_q;
+ struct timer_list tx_timer;
+ struct timer_list rx_timer;
+
+ unsigned int gpio_present;
+ struct cond_wait *gpio_wait_q;
+
+ spinlock_t lock; /* spinlock for synchronizing with ISR */
+
+ struct work_struct task;
+ u32 pending_bh;
+ bool bh_requested;
+ bool bh_running;
+
+ int isr_overflow;
+ bool irq_requested; /* true if IRQ requested */
+ bool irq_occurred; /* for diagnostics use */
+
+ /* device configuration */
+
+ unsigned int bus_type;
+ unsigned int irq_level;
+ unsigned long irq_flags;
+
+ unsigned char __iomem * reg_addr; /* memory mapped registers address */
+ u32 phys_reg_addr;
+ bool reg_addr_requested;
+
+ MGSL_PARAMS params; /* communications parameters */
+ u32 idle_mode;
+ u32 max_frame_size; /* as set by device config */
+
+ unsigned int rbuf_fill_level;
+ unsigned int rx_pio;
+ unsigned int if_mode;
+ unsigned int base_clock;
+ unsigned int xsync;
+ unsigned int xctrl;
+
+ /* device status */
+
+ bool rx_enabled;
+ bool rx_restart;
+
+ bool tx_enabled;
+ bool tx_active;
+
+ unsigned char signals; /* serial signal states */
+ int init_error; /* initialization error */
+
+ unsigned char *tx_buf;
+ int tx_count;
+
+ char flag_buf[MAX_ASYNC_BUFFER_SIZE];
+ char char_buf[MAX_ASYNC_BUFFER_SIZE];
+ bool drop_rts_on_tx_done;
+ struct _input_signal_events input_signal_events;
+
+ int dcd_chkcount; /* check counts to prevent */
+ int cts_chkcount; /* too many IRQs if a signal */
+ int dsr_chkcount; /* is floating */
+ int ri_chkcount;
+
+ char *bufs; /* virtual address of DMA buffer lists */
+ dma_addr_t bufs_dma_addr; /* physical address of buffer descriptors */
+
+ unsigned int rbuf_count;
+ struct slgt_desc *rbufs;
+ unsigned int rbuf_current;
+ unsigned int rbuf_index;
+ unsigned int rbuf_fill_index;
+ unsigned short rbuf_fill_count;
+
+ unsigned int tbuf_count;
+ struct slgt_desc *tbufs;
+ unsigned int tbuf_current;
+ unsigned int tbuf_start;
+
+ unsigned char *tmp_rbuf;
+ unsigned int tmp_rbuf_count;
+
+ /* SPPP/Cisco HDLC device parts */
+
+ int netcount;
+ spinlock_t netlock;
+#if SYNCLINK_GENERIC_HDLC
+ struct net_device *netdev;
+#endif
+
+};
+
+static MGSL_PARAMS default_params = {
+ .mode = MGSL_MODE_HDLC,
+ .loopback = 0,
+ .flags = HDLC_FLAG_UNDERRUN_ABORT15,
+ .encoding = HDLC_ENCODING_NRZI_SPACE,
+ .clock_speed = 0,
+ .addr_filter = 0xff,
+ .crc_type = HDLC_CRC_16_CCITT,
+ .preamble_length = HDLC_PREAMBLE_LENGTH_8BITS,
+ .preamble = HDLC_PREAMBLE_PATTERN_NONE,
+ .data_rate = 9600,
+ .data_bits = 8,
+ .stop_bits = 1,
+ .parity = ASYNC_PARITY_NONE
+};
+
+
+#define BH_RECEIVE 1
+#define BH_TRANSMIT 2
+#define BH_STATUS 4
+#define IO_PIN_SHUTDOWN_LIMIT 100
+
+#define DMABUFSIZE 256
+#define DESC_LIST_SIZE 4096
+
+#define MASK_PARITY BIT1
+#define MASK_FRAMING BIT0
+#define MASK_BREAK BIT14
+#define MASK_OVERRUN BIT4
+
+#define GSR 0x00 /* global status */
+#define JCR 0x04 /* JTAG control */
+#define IODR 0x08 /* GPIO direction */
+#define IOER 0x0c /* GPIO interrupt enable */
+#define IOVR 0x10 /* GPIO value */
+#define IOSR 0x14 /* GPIO interrupt status */
+#define TDR 0x80 /* tx data */
+#define RDR 0x80 /* rx data */
+#define TCR 0x82 /* tx control */
+#define TIR 0x84 /* tx idle */
+#define TPR 0x85 /* tx preamble */
+#define RCR 0x86 /* rx control */
+#define VCR 0x88 /* V.24 control */
+#define CCR 0x89 /* clock control */
+#define BDR 0x8a /* baud divisor */
+#define SCR 0x8c /* serial control */
+#define SSR 0x8e /* serial status */
+#define RDCSR 0x90 /* rx DMA control/status */
+#define TDCSR 0x94 /* tx DMA control/status */
+#define RDDAR 0x98 /* rx DMA descriptor address */
+#define TDDAR 0x9c /* tx DMA descriptor address */
+#define XSR 0x40 /* extended sync pattern */
+#define XCR 0x44 /* extended control */
+
+#define RXIDLE BIT14
+#define RXBREAK BIT14
+#define IRQ_TXDATA BIT13
+#define IRQ_TXIDLE BIT12
+#define IRQ_TXUNDER BIT11 /* HDLC */
+#define IRQ_RXDATA BIT10
+#define IRQ_RXIDLE BIT9 /* HDLC */
+#define IRQ_RXBREAK BIT9 /* async */
+#define IRQ_RXOVER BIT8
+#define IRQ_DSR BIT7
+#define IRQ_CTS BIT6
+#define IRQ_DCD BIT5
+#define IRQ_RI BIT4
+#define IRQ_ALL 0x3ff0
+#define IRQ_MASTER BIT0
+
+#define slgt_irq_on(info, mask) \
+ wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) | (mask)))
+#define slgt_irq_off(info, mask) \
+ wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) & ~(mask)))
+
+static __u8 rd_reg8(struct slgt_info *info, unsigned int addr);
+static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value);
+static __u16 rd_reg16(struct slgt_info *info, unsigned int addr);
+static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value);
+static __u32 rd_reg32(struct slgt_info *info, unsigned int addr);
+static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value);
+
+static void msc_set_vcr(struct slgt_info *info);
+
+static int startup(struct slgt_info *info);
+static int block_til_ready(struct tty_struct *tty, struct file * filp,struct slgt_info *info);
+static void shutdown(struct slgt_info *info);
+static void program_hw(struct slgt_info *info);
+static void change_params(struct slgt_info *info);
+
+static int register_test(struct slgt_info *info);
+static int irq_test(struct slgt_info *info);
+static int loopback_test(struct slgt_info *info);
+static int adapter_test(struct slgt_info *info);
+
+static void reset_adapter(struct slgt_info *info);
+static void reset_port(struct slgt_info *info);
+static void async_mode(struct slgt_info *info);
+static void sync_mode(struct slgt_info *info);
+
+static void rx_stop(struct slgt_info *info);
+static void rx_start(struct slgt_info *info);
+static void reset_rbufs(struct slgt_info *info);
+static void free_rbufs(struct slgt_info *info, unsigned int first, unsigned int last);
+static void rdma_reset(struct slgt_info *info);
+static bool rx_get_frame(struct slgt_info *info);
+static bool rx_get_buf(struct slgt_info *info);
+
+static void tx_start(struct slgt_info *info);
+static void tx_stop(struct slgt_info *info);
+static void tx_set_idle(struct slgt_info *info);
+static unsigned int free_tbuf_count(struct slgt_info *info);
+static unsigned int tbuf_bytes(struct slgt_info *info);
+static void reset_tbufs(struct slgt_info *info);
+static void tdma_reset(struct slgt_info *info);
+static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
+
+static void get_signals(struct slgt_info *info);
+static void set_signals(struct slgt_info *info);
+static void enable_loopback(struct slgt_info *info);
+static void set_rate(struct slgt_info *info, u32 data_rate);
+
+static int bh_action(struct slgt_info *info);
+static void bh_handler(struct work_struct *work);
+static void bh_transmit(struct slgt_info *info);
+static void isr_serial(struct slgt_info *info);
+static void isr_rdma(struct slgt_info *info);
+static void isr_txeom(struct slgt_info *info, unsigned short status);
+static void isr_tdma(struct slgt_info *info);
+
+static int alloc_dma_bufs(struct slgt_info *info);
+static void free_dma_bufs(struct slgt_info *info);
+static int alloc_desc(struct slgt_info *info);
+static void free_desc(struct slgt_info *info);
+static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
+static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
+
+static int alloc_tmp_rbuf(struct slgt_info *info);
+static void free_tmp_rbuf(struct slgt_info *info);
+
+static void tx_timeout(unsigned long context);
+static void rx_timeout(unsigned long context);
+
+/*
+ * ioctl handlers
+ */
+static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount);
+static int get_params(struct slgt_info *info, MGSL_PARAMS __user *params);
+static int set_params(struct slgt_info *info, MGSL_PARAMS __user *params);
+static int get_txidle(struct slgt_info *info, int __user *idle_mode);
+static int set_txidle(struct slgt_info *info, int idle_mode);
+static int tx_enable(struct slgt_info *info, int enable);
+static int tx_abort(struct slgt_info *info);
+static int rx_enable(struct slgt_info *info, int enable);
+static int modem_input_wait(struct slgt_info *info,int arg);
+static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr);
+static int tiocmget(struct tty_struct *tty);
+static int tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear);
+static int set_break(struct tty_struct *tty, int break_state);
+static int get_interface(struct slgt_info *info, int __user *if_mode);
+static int set_interface(struct slgt_info *info, int if_mode);
+static int set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
+static int get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
+static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
+static int get_xsync(struct slgt_info *info, int __user *if_mode);
+static int set_xsync(struct slgt_info *info, int if_mode);
+static int get_xctrl(struct slgt_info *info, int __user *if_mode);
+static int set_xctrl(struct slgt_info *info, int if_mode);
+
+/*
+ * driver functions
+ */
+static void add_device(struct slgt_info *info);
+static void device_init(int adapter_num, struct pci_dev *pdev);
+static int claim_resources(struct slgt_info *info);
+static void release_resources(struct slgt_info *info);
+
+/*
+ * DEBUG OUTPUT CODE
+ */
+#ifndef DBGINFO
+#define DBGINFO(fmt)
+#endif
+#ifndef DBGERR
+#define DBGERR(fmt)
+#endif
+#ifndef DBGBH
+#define DBGBH(fmt)
+#endif
+#ifndef DBGISR
+#define DBGISR(fmt)
+#endif
+
+#ifdef DBGDATA
+static void trace_block(struct slgt_info *info, const char *data, int count, const char *label)
+{
+ int i;
+ int linecount;
+ printk("%s %s data:\n",info->device_name, label);
+ while(count) {
+ linecount = (count > 16) ? 16 : count;
+ for(i=0; i < linecount; i++)
+ printk("%02X ",(unsigned char)data[i]);
+ for(;i<17;i++)
+ printk(" ");
+ for(i=0;i<linecount;i++) {
+ if (data[i]>=040 && data[i]<=0176)
+ printk("%c",data[i]);
+ else
+ printk(".");
+ }
+ printk("\n");
+ data += linecount;
+ count -= linecount;
+ }
+}
+#else
+#define DBGDATA(info, buf, size, label)
+#endif
+
+#ifdef DBGTBUF
+static void dump_tbufs(struct slgt_info *info)
+{
+ int i;
+ printk("tbuf_current=%d\n", info->tbuf_current);
+ for (i=0 ; i < info->tbuf_count ; i++) {
+ printk("%d: count=%04X status=%04X\n",
+ i, le16_to_cpu(info->tbufs[i].count), le16_to_cpu(info->tbufs[i].status));
+ }
+}
+#else
+#define DBGTBUF(info)
+#endif
+
+#ifdef DBGRBUF
+static void dump_rbufs(struct slgt_info *info)
+{
+ int i;
+ printk("rbuf_current=%d\n", info->rbuf_current);
+ for (i=0 ; i < info->rbuf_count ; i++) {
+ printk("%d: count=%04X status=%04X\n",
+ i, le16_to_cpu(info->rbufs[i].count), le16_to_cpu(info->rbufs[i].status));
+ }
+}
+#else
+#define DBGRBUF(info)
+#endif
+
+static inline int sanity_check(struct slgt_info *info, char *devname, const char *name)
+{
+#ifdef SANITY_CHECK
+ if (!info) {
+ printk("null struct slgt_info for (%s) in %s\n", devname, name);
+ return 1;
+ }
+ if (info->magic != MGSL_MAGIC) {
+ printk("bad magic number struct slgt_info (%s) in %s\n", devname, name);
+ return 1;
+ }
+#else
+ if (!info)
+ return 1;
+#endif
+ return 0;
+}
+
+/**
+ * line discipline callback wrappers
+ *
+ * The wrappers maintain line discipline references
+ * while calling into the line discipline.
+ *
+ * ldisc_receive_buf - pass receive data to line discipline
+ */
+static void ldisc_receive_buf(struct tty_struct *tty,
+ const __u8 *data, char *flags, int count)
+{
+ struct tty_ldisc *ld;
+ if (!tty)
+ return;
+ ld = tty_ldisc_ref(tty);
+ if (ld) {
+ if (ld->ops->receive_buf)
+ ld->ops->receive_buf(tty, data, flags, count);
+ tty_ldisc_deref(ld);
+ }
+}
+
+/* tty callbacks */
+
+static int open(struct tty_struct *tty, struct file *filp)
+{
+ struct slgt_info *info;
+ int retval, line;
+ unsigned long flags;
+
+ line = tty->index;
+ if ((line < 0) || (line >= slgt_device_count)) {
+ DBGERR(("%s: open with invalid line #%d.\n", driver_name, line));
+ return -ENODEV;
+ }
+
+ info = slgt_device_list;
+ while(info && info->line != line)
+ info = info->next_device;
+ if (sanity_check(info, tty->name, "open"))
+ return -ENODEV;
+ if (info->init_error) {
+ DBGERR(("%s init error=%d\n", info->device_name, info->init_error));
+ return -ENODEV;
+ }
+
+ tty->driver_data = info;
+ info->port.tty = tty;
+
+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
+
+ /* If port is closing, signal caller to try again */
+ if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
+ if (info->port.flags & ASYNC_CLOSING)
+ interruptible_sleep_on(&info->port.close_wait);
+ retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS);
+ goto cleanup;
+ }
+
+ mutex_lock(&info->port.mutex);
+ info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+ spin_lock_irqsave(&info->netlock, flags);
+ if (info->netcount) {
+ retval = -EBUSY;
+ spin_unlock_irqrestore(&info->netlock, flags);
+ mutex_unlock(&info->port.mutex);
+ goto cleanup;
+ }
+ info->port.count++;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ if (info->port.count == 1) {
+ /* 1st open on this device, init hardware */
+ retval = startup(info);
+ if (retval < 0) {
+ mutex_unlock(&info->port.mutex);
+ goto cleanup;
+ }
+ }
+ mutex_unlock(&info->port.mutex);
+ retval = block_til_ready(tty, filp, info);
+ if (retval) {
+ DBGINFO(("%s block_til_ready rc=%d\n", info->device_name, retval));
+ goto cleanup;
+ }
+
+ retval = 0;
+
+cleanup:
+ if (retval) {
+ if (tty->count == 1)
+ info->port.tty = NULL; /* tty layer will release tty struct */
+ if(info->port.count)
+ info->port.count--;
+ }
+
+ DBGINFO(("%s open rc=%d\n", info->device_name, retval));
+ return retval;
+}
+
+static void close(struct tty_struct *tty, struct file *filp)
+{
+ struct slgt_info *info = tty->driver_data;
+
+ if (sanity_check(info, tty->name, "close"))
+ return;
+ DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
+
+ if (tty_port_close_start(&info->port, tty, filp) == 0)
+ goto cleanup;
+
+ mutex_lock(&info->port.mutex);
+ if (info->port.flags & ASYNC_INITIALIZED)
+ wait_until_sent(tty, info->timeout);
+ flush_buffer(tty);
+ tty_ldisc_flush(tty);
+
+ shutdown(info);
+ mutex_unlock(&info->port.mutex);
+
+ tty_port_close_end(&info->port, tty);
+ info->port.tty = NULL;
+cleanup:
+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
+}
+
+static void hangup(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "hangup"))
+ return;
+ DBGINFO(("%s hangup\n", info->device_name));
+
+ flush_buffer(tty);
+
+ mutex_lock(&info->port.mutex);
+ shutdown(info);
+
+ spin_lock_irqsave(&info->port.lock, flags);
+ info->port.count = 0;
+ info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
+ info->port.tty = NULL;
+ spin_unlock_irqrestore(&info->port.lock, flags);
+ mutex_unlock(&info->port.mutex);
+
+ wake_up_interruptible(&info->port.open_wait);
+}
+
+static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ DBGINFO(("%s set_termios\n", tty->driver->name));
+
+ change_params(info);
+
+ /* Handle transition to B0 status */
+ if (old_termios->c_cflag & CBAUD &&
+ !(tty->termios->c_cflag & CBAUD)) {
+ info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ spin_lock_irqsave(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+ /* Handle transition away from B0 status */
+ if (!(old_termios->c_cflag & CBAUD) &&
+ tty->termios->c_cflag & CBAUD) {
+ info->signals |= SerialSignal_DTR;
+ if (!(tty->termios->c_cflag & CRTSCTS) ||
+ !test_bit(TTY_THROTTLED, &tty->flags)) {
+ info->signals |= SerialSignal_RTS;
+ }
+ spin_lock_irqsave(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+ /* Handle turning off CRTSCTS */
+ if (old_termios->c_cflag & CRTSCTS &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ tx_release(tty);
+ }
+}
+
+static void update_tx_timer(struct slgt_info *info)
+{
+ /*
+ * use worst case speed of 1200bps to calculate transmit timeout
+ * based on data in buffers (tbuf_bytes) and FIFO (128 bytes)
+ */
+ if (info->params.mode == MGSL_MODE_HDLC) {
+ int timeout = (tbuf_bytes(info) * 7) + 1000;
+ mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(timeout));
+ }
+}
+
+static int write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ int ret = 0;
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "write"))
+ return -EIO;
+
+ DBGINFO(("%s write count=%d\n", info->device_name, count));
+
+ if (!info->tx_buf || (count > info->max_frame_size))
+ return -EIO;
+
+ if (!count || tty->stopped || tty->hw_stopped)
+ return 0;
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ if (info->tx_count) {
+ /* send accumulated data from send_char() */
+ if (!tx_load(info, info->tx_buf, info->tx_count))
+ goto cleanup;
+ info->tx_count = 0;
+ }
+
+ if (tx_load(info, buf, count))
+ ret = count;
+
+cleanup:
+ spin_unlock_irqrestore(&info->lock, flags);
+ DBGINFO(("%s write rc=%d\n", info->device_name, ret));
+ return ret;
+}
+
+static int put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+ int ret = 0;
+
+ if (sanity_check(info, tty->name, "put_char"))
+ return 0;
+ DBGINFO(("%s put_char(%d)\n", info->device_name, ch));
+ if (!info->tx_buf)
+ return 0;
+ spin_lock_irqsave(&info->lock,flags);
+ if (info->tx_count < info->max_frame_size) {
+ info->tx_buf[info->tx_count++] = ch;
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ return ret;
+}
+
+static void send_xchar(struct tty_struct *tty, char ch)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "send_xchar"))
+ return;
+ DBGINFO(("%s send_xchar(%d)\n", info->device_name, ch));
+ info->x_char = ch;
+ if (ch) {
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_enabled)
+ tx_start(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+}
+
+static void wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long orig_jiffies, char_time;
+
+ if (!info )
+ return;
+ if (sanity_check(info, tty->name, "wait_until_sent"))
+ return;
+ DBGINFO(("%s wait_until_sent entry\n", info->device_name));
+ if (!(info->port.flags & ASYNC_INITIALIZED))
+ goto exit;
+
+ orig_jiffies = jiffies;
+
+ /* Set check interval to 1/5 of estimated time to
+ * send a character, and make it at least 1. The check
+ * interval should also be less than the timeout.
+ * Note: use tight timings here to satisfy the NIST-PCTS.
+ */
+
+ if (info->params.data_rate) {
+ char_time = info->timeout/(32 * 5);
+ if (!char_time)
+ char_time++;
+ } else
+ char_time = 1;
+
+ if (timeout)
+ char_time = min_t(unsigned long, char_time, timeout);
+
+ while (info->tx_active) {
+ msleep_interruptible(jiffies_to_msecs(char_time));
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ }
+exit:
+ DBGINFO(("%s wait_until_sent exit\n", info->device_name));
+}
+
+static int write_room(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ int ret;
+
+ if (sanity_check(info, tty->name, "write_room"))
+ return 0;
+ ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE;
+ DBGINFO(("%s write_room=%d\n", info->device_name, ret));
+ return ret;
+}
+
+static void flush_chars(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "flush_chars"))
+ return;
+ DBGINFO(("%s flush_chars entry tx_count=%d\n", info->device_name, info->tx_count));
+
+ if (info->tx_count <= 0 || tty->stopped ||
+ tty->hw_stopped || !info->tx_buf)
+ return;
+
+ DBGINFO(("%s flush_chars start transmit\n", info->device_name));
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
+ info->tx_count = 0;
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+static void flush_buffer(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "flush_buffer"))
+ return;
+ DBGINFO(("%s flush_buffer\n", info->device_name));
+
+ spin_lock_irqsave(&info->lock, flags);
+ info->tx_count = 0;
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ tty_wakeup(tty);
+}
+
+/*
+ * throttle (stop) transmitter
+ */
+static void tx_hold(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "tx_hold"))
+ return;
+ DBGINFO(("%s tx_hold\n", info->device_name));
+ spin_lock_irqsave(&info->lock,flags);
+ if (info->tx_enabled && info->params.mode == MGSL_MODE_ASYNC)
+ tx_stop(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+/*
+ * release (start) transmitter
+ */
+static void tx_release(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "tx_release"))
+ return;
+ DBGINFO(("%s tx_release\n", info->device_name));
+ spin_lock_irqsave(&info->lock, flags);
+ if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
+ info->tx_count = 0;
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+/*
+ * Service an IOCTL request
+ *
+ * Arguments
+ *
+ * tty pointer to tty instance data
+ * cmd IOCTL command code
+ * arg command argument/context
+ *
+ * Return 0 if success, otherwise error code
+ */
+static int ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct slgt_info *info = tty->driver_data;
+ void __user *argp = (void __user *)arg;
+ int ret;
+
+ if (sanity_check(info, tty->name, "ioctl"))
+ return -ENODEV;
+ DBGINFO(("%s ioctl() cmd=%08X\n", info->device_name, cmd));
+
+ if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+ (cmd != TIOCMIWAIT)) {
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case MGSL_IOCWAITEVENT:
+ return wait_mgsl_event(info, argp);
+ case TIOCMIWAIT:
+ return modem_input_wait(info,(int)arg);
+ case MGSL_IOCSGPIO:
+ return set_gpio(info, argp);
+ case MGSL_IOCGGPIO:
+ return get_gpio(info, argp);
+ case MGSL_IOCWAITGPIO:
+ return wait_gpio(info, argp);
+ case MGSL_IOCGXSYNC:
+ return get_xsync(info, argp);
+ case MGSL_IOCSXSYNC:
+ return set_xsync(info, (int)arg);
+ case MGSL_IOCGXCTRL:
+ return get_xctrl(info, argp);
+ case MGSL_IOCSXCTRL:
+ return set_xctrl(info, (int)arg);
+ }
+ mutex_lock(&info->port.mutex);
+ switch (cmd) {
+ case MGSL_IOCGPARAMS:
+ ret = get_params(info, argp);
+ break;
+ case MGSL_IOCSPARAMS:
+ ret = set_params(info, argp);
+ break;
+ case MGSL_IOCGTXIDLE:
+ ret = get_txidle(info, argp);
+ break;
+ case MGSL_IOCSTXIDLE:
+ ret = set_txidle(info, (int)arg);
+ break;
+ case MGSL_IOCTXENABLE:
+ ret = tx_enable(info, (int)arg);
+ break;
+ case MGSL_IOCRXENABLE:
+ ret = rx_enable(info, (int)arg);
+ break;
+ case MGSL_IOCTXABORT:
+ ret = tx_abort(info);
+ break;
+ case MGSL_IOCGSTATS:
+ ret = get_stats(info, argp);
+ break;
+ case MGSL_IOCGIF:
+ ret = get_interface(info, argp);
+ break;
+ case MGSL_IOCSIF:
+ ret = set_interface(info,(int)arg);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+ mutex_unlock(&info->port.mutex);
+ return ret;
+}
+
+static int get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+
+{
+ struct slgt_info *info = tty->driver_data;
+ struct mgsl_icount cnow; /* kernel counter temps */
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ cnow = info->icount;
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ icount->cts = cnow.cts;
+ icount->dsr = cnow.dsr;
+ icount->rng = cnow.rng;
+ icount->dcd = cnow.dcd;
+ icount->rx = cnow.rx;
+ icount->tx = cnow.tx;
+ icount->frame = cnow.frame;
+ icount->overrun = cnow.overrun;
+ icount->parity = cnow.parity;
+ icount->brk = cnow.brk;
+ icount->buf_overrun = cnow.buf_overrun;
+
+ return 0;
+}
+
+/*
+ * support for 32 bit ioctl calls on 64 bit systems
+ */
+#ifdef CONFIG_COMPAT
+static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *user_params)
+{
+ struct MGSL_PARAMS32 tmp_params;
+
+ DBGINFO(("%s get_params32\n", info->device_name));
+ memset(&tmp_params, 0, sizeof(tmp_params));
+ tmp_params.mode = (compat_ulong_t)info->params.mode;
+ tmp_params.loopback = info->params.loopback;
+ tmp_params.flags = info->params.flags;
+ tmp_params.encoding = info->params.encoding;
+ tmp_params.clock_speed = (compat_ulong_t)info->params.clock_speed;
+ tmp_params.addr_filter = info->params.addr_filter;
+ tmp_params.crc_type = info->params.crc_type;
+ tmp_params.preamble_length = info->params.preamble_length;
+ tmp_params.preamble = info->params.preamble;
+ tmp_params.data_rate = (compat_ulong_t)info->params.data_rate;
+ tmp_params.data_bits = info->params.data_bits;
+ tmp_params.stop_bits = info->params.stop_bits;
+ tmp_params.parity = info->params.parity;
+ if (copy_to_user(user_params, &tmp_params, sizeof(struct MGSL_PARAMS32)))
+ return -EFAULT;
+ return 0;
+}
+
+static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *new_params)
+{
+ struct MGSL_PARAMS32 tmp_params;
+
+ DBGINFO(("%s set_params32\n", info->device_name));
+ if (copy_from_user(&tmp_params, new_params, sizeof(struct MGSL_PARAMS32)))
+ return -EFAULT;
+
+ spin_lock(&info->lock);
+ if (tmp_params.mode == MGSL_MODE_BASE_CLOCK) {
+ info->base_clock = tmp_params.clock_speed;
+ } else {
+ info->params.mode = tmp_params.mode;
+ info->params.loopback = tmp_params.loopback;
+ info->params.flags = tmp_params.flags;
+ info->params.encoding = tmp_params.encoding;
+ info->params.clock_speed = tmp_params.clock_speed;
+ info->params.addr_filter = tmp_params.addr_filter;
+ info->params.crc_type = tmp_params.crc_type;
+ info->params.preamble_length = tmp_params.preamble_length;
+ info->params.preamble = tmp_params.preamble;
+ info->params.data_rate = tmp_params.data_rate;
+ info->params.data_bits = tmp_params.data_bits;
+ info->params.stop_bits = tmp_params.stop_bits;
+ info->params.parity = tmp_params.parity;
+ }
+ spin_unlock(&info->lock);
+
+ program_hw(info);
+
+ return 0;
+}
+
+static long slgt_compat_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct slgt_info *info = tty->driver_data;
+ int rc = -ENOIOCTLCMD;
+
+ if (sanity_check(info, tty->name, "compat_ioctl"))
+ return -ENODEV;
+ DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd));
+
+ switch (cmd) {
+
+ case MGSL_IOCSPARAMS32:
+ rc = set_params32(info, compat_ptr(arg));
+ break;
+
+ case MGSL_IOCGPARAMS32:
+ rc = get_params32(info, compat_ptr(arg));
+ break;
+
+ case MGSL_IOCGPARAMS:
+ case MGSL_IOCSPARAMS:
+ case MGSL_IOCGTXIDLE:
+ case MGSL_IOCGSTATS:
+ case MGSL_IOCWAITEVENT:
+ case MGSL_IOCGIF:
+ case MGSL_IOCSGPIO:
+ case MGSL_IOCGGPIO:
+ case MGSL_IOCWAITGPIO:
+ case MGSL_IOCGXSYNC:
+ case MGSL_IOCGXCTRL:
+ case MGSL_IOCSTXIDLE:
+ case MGSL_IOCTXENABLE:
+ case MGSL_IOCRXENABLE:
+ case MGSL_IOCTXABORT:
+ case TIOCMIWAIT:
+ case MGSL_IOCSIF:
+ case MGSL_IOCSXSYNC:
+ case MGSL_IOCSXCTRL:
+ rc = ioctl(tty, cmd, arg);
+ break;
+ }
+
+ DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc));
+ return rc;
+}
+#else
+#define slgt_compat_ioctl NULL
+#endif /* ifdef CONFIG_COMPAT */
+
+/*
+ * proc fs support
+ */
+static inline void line_info(struct seq_file *m, struct slgt_info *info)
+{
+ char stat_buf[30];
+ unsigned long flags;
+
+ seq_printf(m, "%s: IO=%08X IRQ=%d MaxFrameSize=%u\n",
+ info->device_name, info->phys_reg_addr,
+ info->irq_level, info->max_frame_size);
+
+ /* output current serial signal states */
+ spin_lock_irqsave(&info->lock,flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ stat_buf[0] = 0;
+ stat_buf[1] = 0;
+ if (info->signals & SerialSignal_RTS)
+ strcat(stat_buf, "|RTS");
+ if (info->signals & SerialSignal_CTS)
+ strcat(stat_buf, "|CTS");
+ if (info->signals & SerialSignal_DTR)
+ strcat(stat_buf, "|DTR");
+ if (info->signals & SerialSignal_DSR)
+ strcat(stat_buf, "|DSR");
+ if (info->signals & SerialSignal_DCD)
+ strcat(stat_buf, "|CD");
+ if (info->signals & SerialSignal_RI)
+ strcat(stat_buf, "|RI");
+
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ seq_printf(m, "\tHDLC txok:%d rxok:%d",
+ info->icount.txok, info->icount.rxok);
+ if (info->icount.txunder)
+ seq_printf(m, " txunder:%d", info->icount.txunder);
+ if (info->icount.txabort)
+ seq_printf(m, " txabort:%d", info->icount.txabort);
+ if (info->icount.rxshort)
+ seq_printf(m, " rxshort:%d", info->icount.rxshort);
+ if (info->icount.rxlong)
+ seq_printf(m, " rxlong:%d", info->icount.rxlong);
+ if (info->icount.rxover)
+ seq_printf(m, " rxover:%d", info->icount.rxover);
+ if (info->icount.rxcrc)
+ seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
+ } else {
+ seq_printf(m, "\tASYNC tx:%d rx:%d",
+ info->icount.tx, info->icount.rx);
+ if (info->icount.frame)
+ seq_printf(m, " fe:%d", info->icount.frame);
+ if (info->icount.parity)
+ seq_printf(m, " pe:%d", info->icount.parity);
+ if (info->icount.brk)
+ seq_printf(m, " brk:%d", info->icount.brk);
+ if (info->icount.overrun)
+ seq_printf(m, " oe:%d", info->icount.overrun);
+ }
+
+ /* Append serial signal status to end */
+ seq_printf(m, " %s\n", stat_buf+1);
+
+ seq_printf(m, "\ttxactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
+ info->tx_active,info->bh_requested,info->bh_running,
+ info->pending_bh);
+}
+
+/* Called to print information about devices
+ */
+static int synclink_gt_proc_show(struct seq_file *m, void *v)
+{
+ struct slgt_info *info;
+
+ seq_puts(m, "synclink_gt driver\n");
+
+ info = slgt_device_list;
+ while( info ) {
+ line_info(m, info);
+ info = info->next_device;
+ }
+ return 0;
+}
+
+static int synclink_gt_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, synclink_gt_proc_show, NULL);
+}
+
+static const struct file_operations synclink_gt_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = synclink_gt_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * return count of bytes in transmit buffer
+ */
+static int chars_in_buffer(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ int count;
+ if (sanity_check(info, tty->name, "chars_in_buffer"))
+ return 0;
+ count = tbuf_bytes(info);
+ DBGINFO(("%s chars_in_buffer()=%d\n", info->device_name, count));
+ return count;
+}
+
+/*
+ * signal remote device to throttle send data (our receive data)
+ */
+static void throttle(struct tty_struct * tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "throttle"))
+ return;
+ DBGINFO(("%s throttle\n", info->device_name));
+ if (I_IXOFF(tty))
+ send_xchar(tty, STOP_CHAR(tty));
+ if (tty->termios->c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock,flags);
+ info->signals &= ~SerialSignal_RTS;
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+}
+
+/*
+ * signal remote device to stop throttling send data (our receive data)
+ */
+static void unthrottle(struct tty_struct * tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "unthrottle"))
+ return;
+ DBGINFO(("%s unthrottle\n", info->device_name));
+ if (I_IXOFF(tty)) {
+ if (info->x_char)
+ info->x_char = 0;
+ else
+ send_xchar(tty, START_CHAR(tty));
+ }
+ if (tty->termios->c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock,flags);
+ info->signals |= SerialSignal_RTS;
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+}
+
+/*
+ * set or clear transmit break condition
+ * break_state -1=set break condition, 0=clear
+ */
+static int set_break(struct tty_struct *tty, int break_state)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned short value;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "set_break"))
+ return -EINVAL;
+ DBGINFO(("%s set_break(%d)\n", info->device_name, break_state));
+
+ spin_lock_irqsave(&info->lock,flags);
+ value = rd_reg16(info, TCR);
+ if (break_state == -1)
+ value |= BIT6;
+ else
+ value &= ~BIT6;
+ wr_reg16(info, TCR, value);
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+#if SYNCLINK_GENERIC_HDLC
+
+/**
+ * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
+ * set encoding and frame check sequence (FCS) options
+ *
+ * dev pointer to network device structure
+ * encoding serial encoding setting
+ * parity FCS setting
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ struct slgt_info *info = dev_to_port(dev);
+ unsigned char new_encoding;
+ unsigned short new_crctype;
+
+ /* return error if TTY interface open */
+ if (info->port.count)
+ return -EBUSY;
+
+ DBGINFO(("%s hdlcdev_attach\n", info->device_name));
+
+ switch (encoding)
+ {
+ case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
+ case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
+ case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
+ case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
+ case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
+ default: return -EINVAL;
+ }
+
+ switch (parity)
+ {
+ case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
+ case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
+ case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
+ default: return -EINVAL;
+ }
+
+ info->params.encoding = new_encoding;
+ info->params.crc_type = new_crctype;
+
+ /* if network interface up, reprogram hardware */
+ if (info->netcount)
+ program_hw(info);
+
+ return 0;
+}
+
+/**
+ * called by generic HDLC layer to send frame
+ *
+ * skb socket buffer containing HDLC frame
+ * dev pointer to network device structure
+ */
+static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct slgt_info *info = dev_to_port(dev);
+ unsigned long flags;
+
+ DBGINFO(("%s hdlc_xmit\n", dev->name));
+
+ if (!skb->len)
+ return NETDEV_TX_OK;
+
+ /* stop sending until this frame completes */
+ netif_stop_queue(dev);
+
+ /* update network statistics */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ /* save start time for transmit timeout detection */
+ dev->trans_start = jiffies;
+
+ spin_lock_irqsave(&info->lock, flags);
+ tx_load(info, skb->data, skb->len);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ /* done with socket buffer, so free it */
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * called by network layer when interface enabled
+ * claim resources and initialize hardware
+ *
+ * dev pointer to network device structure
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_open(struct net_device *dev)
+{
+ struct slgt_info *info = dev_to_port(dev);
+ int rc;
+ unsigned long flags;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EBUSY;
+
+ DBGINFO(("%s hdlcdev_open\n", dev->name));
+
+ /* generic HDLC layer open processing */
+ if ((rc = hdlc_open(dev)))
+ return rc;
+
+ /* arbitrate between network and tty opens */
+ spin_lock_irqsave(&info->netlock, flags);
+ if (info->port.count != 0 || info->netcount != 0) {
+ DBGINFO(("%s hdlc_open busy\n", dev->name));
+ spin_unlock_irqrestore(&info->netlock, flags);
+ return -EBUSY;
+ }
+ info->netcount=1;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ /* claim resources and init adapter */
+ if ((rc = startup(info)) != 0) {
+ spin_lock_irqsave(&info->netlock, flags);
+ info->netcount=0;
+ spin_unlock_irqrestore(&info->netlock, flags);
+ return rc;
+ }
+
+ /* assert DTR and RTS, apply hardware settings */
+ info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ program_hw(info);
+
+ /* enable network layer transmit */
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+
+ /* inform generic HDLC layer of current DCD status */
+ spin_lock_irqsave(&info->lock, flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
+ if (info->signals & SerialSignal_DCD)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ return 0;
+}
+
+/**
+ * called by network layer when interface is disabled
+ * shutdown hardware and release resources
+ *
+ * dev pointer to network device structure
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_close(struct net_device *dev)
+{
+ struct slgt_info *info = dev_to_port(dev);
+ unsigned long flags;
+
+ DBGINFO(("%s hdlcdev_close\n", dev->name));
+
+ netif_stop_queue(dev);
+
+ /* shutdown adapter and release resources */
+ shutdown(info);
+
+ hdlc_close(dev);
+
+ spin_lock_irqsave(&info->netlock, flags);
+ info->netcount=0;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ module_put(THIS_MODULE);
+ return 0;
+}
+
+/**
+ * called by network layer to process IOCTL call to network device
+ *
+ * dev pointer to network device structure
+ * ifr pointer to network interface request structure
+ * cmd IOCTL command code
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ struct slgt_info *info = dev_to_port(dev);
+ unsigned int flags;
+
+ DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
+
+ /* return error if TTY interface open */
+ if (info->port.count)
+ return -EBUSY;
+
+ if (cmd != SIOCWANDEV)
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ memset(&new_line, 0, sizeof(new_line));
+
+ switch(ifr->ifr_settings.type) {
+ case IF_GET_IFACE: /* return current sync_serial_settings */
+
+ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+
+ flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+
+ switch (flags){
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
+ case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
+ default: new_line.clock_type = CLOCK_DEFAULT;
+ }
+
+ new_line.clock_rate = info->params.clock_speed;
+ new_line.loopback = info->params.loopback ? 1:0;
+
+ if (copy_to_user(line, &new_line, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&new_line, line, size))
+ return -EFAULT;
+
+ switch (new_line.clock_type)
+ {
+ case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
+ case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
+ case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
+ case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
+ case CLOCK_DEFAULT: flags = info->params.flags &
+ (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
+ default: return -EINVAL;
+ }
+
+ if (new_line.loopback != 0 && new_line.loopback != 1)
+ return -EINVAL;
+
+ info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+ info->params.flags |= flags;
+
+ info->params.loopback = new_line.loopback;
+
+ if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
+ info->params.clock_speed = new_line.clock_rate;
+ else
+ info->params.clock_speed = 0;
+
+ /* if network interface up, reprogram hardware */
+ if (info->netcount)
+ program_hw(info);
+ return 0;
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+/**
+ * called by network layer when transmit timeout is detected
+ *
+ * dev pointer to network device structure
+ */
+static void hdlcdev_tx_timeout(struct net_device *dev)
+{
+ struct slgt_info *info = dev_to_port(dev);
+ unsigned long flags;
+
+ DBGINFO(("%s hdlcdev_tx_timeout\n", dev->name));
+
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+
+ spin_lock_irqsave(&info->lock,flags);
+ tx_stop(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ netif_wake_queue(dev);
+}
+
+/**
+ * called by device driver when transmit completes
+ * reenable network layer transmit if stopped
+ *
+ * info pointer to device instance information
+ */
+static void hdlcdev_tx_done(struct slgt_info *info)
+{
+ if (netif_queue_stopped(info->netdev))
+ netif_wake_queue(info->netdev);
+}
+
+/**
+ * called by device driver when frame received
+ * pass frame to network layer
+ *
+ * info pointer to device instance information
+ * buf pointer to buffer contianing frame data
+ * size count of data bytes in buf
+ */
+static void hdlcdev_rx(struct slgt_info *info, char *buf, int size)
+{
+ struct sk_buff *skb = dev_alloc_skb(size);
+ struct net_device *dev = info->netdev;
+
+ DBGINFO(("%s hdlcdev_rx\n", dev->name));
+
+ if (skb == NULL) {
+ DBGERR(("%s: can't alloc skb, drop packet\n", dev->name));
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ memcpy(skb_put(skb, size), buf, size);
+
+ skb->protocol = hdlc_type_trans(skb, dev);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += size;
+
+ netif_rx(skb);
+}
+
+static const struct net_device_ops hdlcdev_ops = {
+ .ndo_open = hdlcdev_open,
+ .ndo_stop = hdlcdev_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = hdlcdev_ioctl,
+ .ndo_tx_timeout = hdlcdev_tx_timeout,
+};
+
+/**
+ * called by device driver when adding device instance
+ * do generic HDLC initialization
+ *
+ * info pointer to device instance information
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_init(struct slgt_info *info)
+{
+ int rc;
+ struct net_device *dev;
+ hdlc_device *hdlc;
+
+ /* allocate and initialize network and HDLC layer objects */
+
+ if (!(dev = alloc_hdlcdev(info))) {
+ printk(KERN_ERR "%s hdlc device alloc failure\n", info->device_name);
+ return -ENOMEM;
+ }
+
+ /* for network layer reporting purposes only */
+ dev->mem_start = info->phys_reg_addr;
+ dev->mem_end = info->phys_reg_addr + SLGT_REG_SIZE - 1;
+ dev->irq = info->irq_level;
+
+ /* network layer callbacks and settings */
+ dev->netdev_ops = &hdlcdev_ops;
+ dev->watchdog_timeo = 10 * HZ;
+ dev->tx_queue_len = 50;
+
+ /* generic HDLC layer callbacks and settings */
+ hdlc = dev_to_hdlc(dev);
+ hdlc->attach = hdlcdev_attach;
+ hdlc->xmit = hdlcdev_xmit;
+
+ /* register objects with HDLC layer */
+ if ((rc = register_hdlc_device(dev))) {
+ printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
+ free_netdev(dev);
+ return rc;
+ }
+
+ info->netdev = dev;
+ return 0;
+}
+
+/**
+ * called by device driver when removing device instance
+ * do generic HDLC cleanup
+ *
+ * info pointer to device instance information
+ */
+static void hdlcdev_exit(struct slgt_info *info)
+{
+ unregister_hdlc_device(info->netdev);
+ free_netdev(info->netdev);
+ info->netdev = NULL;
+}
+
+#endif /* ifdef CONFIG_HDLC */
+
+/*
+ * get async data from rx DMA buffers
+ */
+static void rx_async(struct slgt_info *info)
+{
+ struct tty_struct *tty = info->port.tty;
+ struct mgsl_icount *icount = &info->icount;
+ unsigned int start, end;
+ unsigned char *p;
+ unsigned char status;
+ struct slgt_desc *bufs = info->rbufs;
+ int i, count;
+ int chars = 0;
+ int stat;
+ unsigned char ch;
+
+ start = end = info->rbuf_current;
+
+ while(desc_complete(bufs[end])) {
+ count = desc_count(bufs[end]) - info->rbuf_index;
+ p = bufs[end].buf + info->rbuf_index;
+
+ DBGISR(("%s rx_async count=%d\n", info->device_name, count));
+ DBGDATA(info, p, count, "rx");
+
+ for(i=0 ; i < count; i+=2, p+=2) {
+ ch = *p;
+ icount->rx++;
+
+ stat = 0;
+
+ if ((status = *(p+1) & (BIT1 + BIT0))) {
+ if (status & BIT1)
+ icount->parity++;
+ else if (status & BIT0)
+ icount->frame++;
+ /* discard char if tty control flags say so */
+ if (status & info->ignore_status_mask)
+ continue;
+ if (status & BIT1)
+ stat = TTY_PARITY;
+ else if (status & BIT0)
+ stat = TTY_FRAME;
+ }
+ if (tty) {
+ tty_insert_flip_char(tty, ch, stat);
+ chars++;
+ }
+ }
+
+ if (i < count) {
+ /* receive buffer not completed */
+ info->rbuf_index += i;
+ mod_timer(&info->rx_timer, jiffies + 1);
+ break;
+ }
+
+ info->rbuf_index = 0;
+ free_rbufs(info, end, end);
+
+ if (++end == info->rbuf_count)
+ end = 0;
+
+ /* if entire list searched then no frame available */
+ if (end == start)
+ break;
+ }
+
+ if (tty && chars)
+ tty_flip_buffer_push(tty);
+}
+
+/*
+ * return next bottom half action to perform
+ */
+static int bh_action(struct slgt_info *info)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ if (info->pending_bh & BH_RECEIVE) {
+ info->pending_bh &= ~BH_RECEIVE;
+ rc = BH_RECEIVE;
+ } else if (info->pending_bh & BH_TRANSMIT) {
+ info->pending_bh &= ~BH_TRANSMIT;
+ rc = BH_TRANSMIT;
+ } else if (info->pending_bh & BH_STATUS) {
+ info->pending_bh &= ~BH_STATUS;
+ rc = BH_STATUS;
+ } else {
+ /* Mark BH routine as complete */
+ info->bh_running = false;
+ info->bh_requested = false;
+ rc = 0;
+ }
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return rc;
+}
+
+/*
+ * perform bottom half processing
+ */
+static void bh_handler(struct work_struct *work)
+{
+ struct slgt_info *info = container_of(work, struct slgt_info, task);
+ int action;
+
+ if (!info)
+ return;
+ info->bh_running = true;
+
+ while((action = bh_action(info))) {
+ switch (action) {
+ case BH_RECEIVE:
+ DBGBH(("%s bh receive\n", info->device_name));
+ switch(info->params.mode) {
+ case MGSL_MODE_ASYNC:
+ rx_async(info);
+ break;
+ case MGSL_MODE_HDLC:
+ while(rx_get_frame(info));
+ break;
+ case MGSL_MODE_RAW:
+ case MGSL_MODE_MONOSYNC:
+ case MGSL_MODE_BISYNC:
+ case MGSL_MODE_XSYNC:
+ while(rx_get_buf(info));
+ break;
+ }
+ /* restart receiver if rx DMA buffers exhausted */
+ if (info->rx_restart)
+ rx_start(info);
+ break;
+ case BH_TRANSMIT:
+ bh_transmit(info);
+ break;
+ case BH_STATUS:
+ DBGBH(("%s bh status\n", info->device_name));
+ info->ri_chkcount = 0;
+ info->dsr_chkcount = 0;
+ info->dcd_chkcount = 0;
+ info->cts_chkcount = 0;
+ break;
+ default:
+ DBGBH(("%s unknown action\n", info->device_name));
+ break;
+ }
+ }
+ DBGBH(("%s bh_handler exit\n", info->device_name));
+}
+
+static void bh_transmit(struct slgt_info *info)
+{
+ struct tty_struct *tty = info->port.tty;
+
+ DBGBH(("%s bh_transmit\n", info->device_name));
+ if (tty)
+ tty_wakeup(tty);
+}
+
+static void dsr_change(struct slgt_info *info, unsigned short status)
+{
+ if (status & BIT3) {
+ info->signals |= SerialSignal_DSR;
+ info->input_signal_events.dsr_up++;
+ } else {
+ info->signals &= ~SerialSignal_DSR;
+ info->input_signal_events.dsr_down++;
+ }
+ DBGISR(("dsr_change %s signals=%04X\n", info->device_name, info->signals));
+ if ((info->dsr_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
+ slgt_irq_off(info, IRQ_DSR);
+ return;
+ }
+ info->icount.dsr++;
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+ info->pending_bh |= BH_STATUS;
+}
+
+static void cts_change(struct slgt_info *info, unsigned short status)
+{
+ if (status & BIT2) {
+ info->signals |= SerialSignal_CTS;
+ info->input_signal_events.cts_up++;
+ } else {
+ info->signals &= ~SerialSignal_CTS;
+ info->input_signal_events.cts_down++;
+ }
+ DBGISR(("cts_change %s signals=%04X\n", info->device_name, info->signals));
+ if ((info->cts_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
+ slgt_irq_off(info, IRQ_CTS);
+ return;
+ }
+ info->icount.cts++;
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+ info->pending_bh |= BH_STATUS;
+
+ if (info->port.flags & ASYNC_CTS_FLOW) {
+ if (info->port.tty) {
+ if (info->port.tty->hw_stopped) {
+ if (info->signals & SerialSignal_CTS) {
+ info->port.tty->hw_stopped = 0;
+ info->pending_bh |= BH_TRANSMIT;
+ return;
+ }
+ } else {
+ if (!(info->signals & SerialSignal_CTS))
+ info->port.tty->hw_stopped = 1;
+ }
+ }
+ }
+}
+
+static void dcd_change(struct slgt_info *info, unsigned short status)
+{
+ if (status & BIT1) {
+ info->signals |= SerialSignal_DCD;
+ info->input_signal_events.dcd_up++;
+ } else {
+ info->signals &= ~SerialSignal_DCD;
+ info->input_signal_events.dcd_down++;
+ }
+ DBGISR(("dcd_change %s signals=%04X\n", info->device_name, info->signals));
+ if ((info->dcd_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
+ slgt_irq_off(info, IRQ_DCD);
+ return;
+ }
+ info->icount.dcd++;
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount) {
+ if (info->signals & SerialSignal_DCD)
+ netif_carrier_on(info->netdev);
+ else
+ netif_carrier_off(info->netdev);
+ }
+#endif
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+ info->pending_bh |= BH_STATUS;
+
+ if (info->port.flags & ASYNC_CHECK_CD) {
+ if (info->signals & SerialSignal_DCD)
+ wake_up_interruptible(&info->port.open_wait);
+ else {
+ if (info->port.tty)
+ tty_hangup(info->port.tty);
+ }
+ }
+}
+
+static void ri_change(struct slgt_info *info, unsigned short status)
+{
+ if (status & BIT0) {
+ info->signals |= SerialSignal_RI;
+ info->input_signal_events.ri_up++;
+ } else {
+ info->signals &= ~SerialSignal_RI;
+ info->input_signal_events.ri_down++;
+ }
+ DBGISR(("ri_change %s signals=%04X\n", info->device_name, info->signals));
+ if ((info->ri_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
+ slgt_irq_off(info, IRQ_RI);
+ return;
+ }
+ info->icount.rng++;
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+ info->pending_bh |= BH_STATUS;
+}
+
+static void isr_rxdata(struct slgt_info *info)
+{
+ unsigned int count = info->rbuf_fill_count;
+ unsigned int i = info->rbuf_fill_index;
+ unsigned short reg;
+
+ while (rd_reg16(info, SSR) & IRQ_RXDATA) {
+ reg = rd_reg16(info, RDR);
+ DBGISR(("isr_rxdata %s RDR=%04X\n", info->device_name, reg));
+ if (desc_complete(info->rbufs[i])) {
+ /* all buffers full */
+ rx_stop(info);
+ info->rx_restart = 1;
+ continue;
+ }
+ info->rbufs[i].buf[count++] = (unsigned char)reg;
+ /* async mode saves status byte to buffer for each data byte */
+ if (info->params.mode == MGSL_MODE_ASYNC)
+ info->rbufs[i].buf[count++] = (unsigned char)(reg >> 8);
+ if (count == info->rbuf_fill_level || (reg & BIT10)) {
+ /* buffer full or end of frame */
+ set_desc_count(info->rbufs[i], count);
+ set_desc_status(info->rbufs[i], BIT15 | (reg >> 8));
+ info->rbuf_fill_count = count = 0;
+ if (++i == info->rbuf_count)
+ i = 0;
+ info->pending_bh |= BH_RECEIVE;
+ }
+ }
+
+ info->rbuf_fill_index = i;
+ info->rbuf_fill_count = count;
+}
+
+static void isr_serial(struct slgt_info *info)
+{
+ unsigned short status = rd_reg16(info, SSR);
+
+ DBGISR(("%s isr_serial status=%04X\n", info->device_name, status));
+
+ wr_reg16(info, SSR, status); /* clear pending */
+
+ info->irq_occurred = true;
+
+ if (info->params.mode == MGSL_MODE_ASYNC) {
+ if (status & IRQ_TXIDLE) {
+ if (info->tx_active)
+ isr_txeom(info, status);
+ }
+ if (info->rx_pio && (status & IRQ_RXDATA))
+ isr_rxdata(info);
+ if ((status & IRQ_RXBREAK) && (status & RXBREAK)) {
+ info->icount.brk++;
+ /* process break detection if tty control allows */
+ if (info->port.tty) {
+ if (!(status & info->ignore_status_mask)) {
+ if (info->read_status_mask & MASK_BREAK) {
+ tty_insert_flip_char(info->port.tty, 0, TTY_BREAK);
+ if (info->port.flags & ASYNC_SAK)
+ do_SAK(info->port.tty);
+ }
+ }
+ }
+ }
+ } else {
+ if (status & (IRQ_TXIDLE + IRQ_TXUNDER))
+ isr_txeom(info, status);
+ if (info->rx_pio && (status & IRQ_RXDATA))
+ isr_rxdata(info);
+ if (status & IRQ_RXIDLE) {
+ if (status & RXIDLE)
+ info->icount.rxidle++;
+ else
+ info->icount.exithunt++;
+ wake_up_interruptible(&info->event_wait_q);
+ }
+
+ if (status & IRQ_RXOVER)
+ rx_start(info);
+ }
+
+ if (status & IRQ_DSR)
+ dsr_change(info, status);
+ if (status & IRQ_CTS)
+ cts_change(info, status);
+ if (status & IRQ_DCD)
+ dcd_change(info, status);
+ if (status & IRQ_RI)
+ ri_change(info, status);
+}
+
+static void isr_rdma(struct slgt_info *info)
+{
+ unsigned int status = rd_reg32(info, RDCSR);
+
+ DBGISR(("%s isr_rdma status=%08x\n", info->device_name, status));
+
+ /* RDCSR (rx DMA control/status)
+ *
+ * 31..07 reserved
+ * 06 save status byte to DMA buffer
+ * 05 error
+ * 04 eol (end of list)
+ * 03 eob (end of buffer)
+ * 02 IRQ enable
+ * 01 reset
+ * 00 enable
+ */
+ wr_reg32(info, RDCSR, status); /* clear pending */
+
+ if (status & (BIT5 + BIT4)) {
+ DBGISR(("%s isr_rdma rx_restart=1\n", info->device_name));
+ info->rx_restart = true;
+ }
+ info->pending_bh |= BH_RECEIVE;
+}
+
+static void isr_tdma(struct slgt_info *info)
+{
+ unsigned int status = rd_reg32(info, TDCSR);
+
+ DBGISR(("%s isr_tdma status=%08x\n", info->device_name, status));
+
+ /* TDCSR (tx DMA control/status)
+ *
+ * 31..06 reserved
+ * 05 error
+ * 04 eol (end of list)
+ * 03 eob (end of buffer)
+ * 02 IRQ enable
+ * 01 reset
+ * 00 enable
+ */
+ wr_reg32(info, TDCSR, status); /* clear pending */
+
+ if (status & (BIT5 + BIT4 + BIT3)) {
+ // another transmit buffer has completed
+ // run bottom half to get more send data from user
+ info->pending_bh |= BH_TRANSMIT;
+ }
+}
+
+/*
+ * return true if there are unsent tx DMA buffers, otherwise false
+ *
+ * if there are unsent buffers then info->tbuf_start
+ * is set to index of first unsent buffer
+ */
+static bool unsent_tbufs(struct slgt_info *info)
+{
+ unsigned int i = info->tbuf_current;
+ bool rc = false;
+
+ /*
+ * search backwards from last loaded buffer (precedes tbuf_current)
+ * for first unsent buffer (desc_count > 0)
+ */
+
+ do {
+ if (i)
+ i--;
+ else
+ i = info->tbuf_count - 1;
+ if (!desc_count(info->tbufs[i]))
+ break;
+ info->tbuf_start = i;
+ rc = true;
+ } while (i != info->tbuf_current);
+
+ return rc;
+}
+
+static void isr_txeom(struct slgt_info *info, unsigned short status)
+{
+ DBGISR(("%s txeom status=%04x\n", info->device_name, status));
+
+ slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
+ tdma_reset(info);
+ if (status & IRQ_TXUNDER) {
+ unsigned short val = rd_reg16(info, TCR);
+ wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
+ wr_reg16(info, TCR, val); /* clear reset bit */
+ }
+
+ if (info->tx_active) {
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ if (status & IRQ_TXUNDER)
+ info->icount.txunder++;
+ else if (status & IRQ_TXIDLE)
+ info->icount.txok++;
+ }
+
+ if (unsent_tbufs(info)) {
+ tx_start(info);
+ update_tx_timer(info);
+ return;
+ }
+ info->tx_active = false;
+
+ del_timer(&info->tx_timer);
+
+ if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
+ info->signals &= ~SerialSignal_RTS;
+ info->drop_rts_on_tx_done = false;
+ set_signals(info);
+ }
+
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount)
+ hdlcdev_tx_done(info);
+ else
+#endif
+ {
+ if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) {
+ tx_stop(info);
+ return;
+ }
+ info->pending_bh |= BH_TRANSMIT;
+ }
+ }
+}
+
+static void isr_gpio(struct slgt_info *info, unsigned int changed, unsigned int state)
+{
+ struct cond_wait *w, *prev;
+
+ /* wake processes waiting for specific transitions */
+ for (w = info->gpio_wait_q, prev = NULL ; w != NULL ; w = w->next) {
+ if (w->data & changed) {
+ w->data = state;
+ wake_up_interruptible(&w->q);
+ if (prev != NULL)
+ prev->next = w->next;
+ else
+ info->gpio_wait_q = w->next;
+ } else
+ prev = w;
+ }
+}
+
+/* interrupt service routine
+ *
+ * irq interrupt number
+ * dev_id device ID supplied during interrupt registration
+ */
+static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
+{
+ struct slgt_info *info = dev_id;
+ unsigned int gsr;
+ unsigned int i;
+
+ DBGISR(("slgt_interrupt irq=%d entry\n", info->irq_level));
+
+ while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
+ DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
+ info->irq_occurred = true;
+ for(i=0; i < info->port_count ; i++) {
+ if (info->port_array[i] == NULL)
+ continue;
+ spin_lock(&info->port_array[i]->lock);
+ if (gsr & (BIT8 << i))
+ isr_serial(info->port_array[i]);
+ if (gsr & (BIT16 << (i*2)))
+ isr_rdma(info->port_array[i]);
+ if (gsr & (BIT17 << (i*2)))
+ isr_tdma(info->port_array[i]);
+ spin_unlock(&info->port_array[i]->lock);
+ }
+ }
+
+ if (info->gpio_present) {
+ unsigned int state;
+ unsigned int changed;
+ spin_lock(&info->lock);
+ while ((changed = rd_reg32(info, IOSR)) != 0) {
+ DBGISR(("%s iosr=%08x\n", info->device_name, changed));
+ /* read latched state of GPIO signals */
+ state = rd_reg32(info, IOVR);
+ /* clear pending GPIO interrupt bits */
+ wr_reg32(info, IOSR, changed);
+ for (i=0 ; i < info->port_count ; i++) {
+ if (info->port_array[i] != NULL)
+ isr_gpio(info->port_array[i], changed, state);
+ }
+ }
+ spin_unlock(&info->lock);
+ }
+
+ for(i=0; i < info->port_count ; i++) {
+ struct slgt_info *port = info->port_array[i];
+ if (port == NULL)
+ continue;
+ spin_lock(&port->lock);
+ if ((port->port.count || port->netcount) &&
+ port->pending_bh && !port->bh_running &&
+ !port->bh_requested) {
+ DBGISR(("%s bh queued\n", port->device_name));
+ schedule_work(&port->task);
+ port->bh_requested = true;
+ }
+ spin_unlock(&port->lock);
+ }
+
+ DBGISR(("slgt_interrupt irq=%d exit\n", info->irq_level));
+ return IRQ_HANDLED;
+}
+
+static int startup(struct slgt_info *info)
+{
+ DBGINFO(("%s startup\n", info->device_name));
+
+ if (info->port.flags & ASYNC_INITIALIZED)
+ return 0;
+
+ if (!info->tx_buf) {
+ info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
+ if (!info->tx_buf) {
+ DBGERR(("%s can't allocate tx buffer\n", info->device_name));
+ return -ENOMEM;
+ }
+ }
+
+ info->pending_bh = 0;
+
+ memset(&info->icount, 0, sizeof(info->icount));
+
+ /* program hardware for current parameters */
+ change_params(info);
+
+ if (info->port.tty)
+ clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
+
+ info->port.flags |= ASYNC_INITIALIZED;
+
+ return 0;
+}
+
+/*
+ * called by close() and hangup() to shutdown hardware
+ */
+static void shutdown(struct slgt_info *info)
+{
+ unsigned long flags;
+
+ if (!(info->port.flags & ASYNC_INITIALIZED))
+ return;
+
+ DBGINFO(("%s shutdown\n", info->device_name));
+
+ /* clear status wait queue because status changes */
+ /* can't happen after shutting down the hardware */
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+
+ del_timer_sync(&info->tx_timer);
+ del_timer_sync(&info->rx_timer);
+
+ kfree(info->tx_buf);
+ info->tx_buf = NULL;
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ tx_stop(info);
+ rx_stop(info);
+
+ slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
+
+ if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) {
+ info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ set_signals(info);
+ }
+
+ flush_cond_wait(&info->gpio_wait_q);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ if (info->port.tty)
+ set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+
+ info->port.flags &= ~ASYNC_INITIALIZED;
+}
+
+static void program_hw(struct slgt_info *info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ rx_stop(info);
+ tx_stop(info);
+
+ if (info->params.mode != MGSL_MODE_ASYNC ||
+ info->netcount)
+ sync_mode(info);
+ else
+ async_mode(info);
+
+ set_signals(info);
+
+ info->dcd_chkcount = 0;
+ info->cts_chkcount = 0;
+ info->ri_chkcount = 0;
+ info->dsr_chkcount = 0;
+
+ slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
+ get_signals(info);
+
+ if (info->netcount ||
+ (info->port.tty && info->port.tty->termios->c_cflag & CREAD))
+ rx_start(info);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+/*
+ * reconfigure adapter based on new parameters
+ */
+static void change_params(struct slgt_info *info)
+{
+ unsigned cflag;
+ int bits_per_char;
+
+ if (!info->port.tty || !info->port.tty->termios)
+ return;
+ DBGINFO(("%s change_params\n", info->device_name));
+
+ cflag = info->port.tty->termios->c_cflag;
+
+ /* if B0 rate (hangup) specified then negate DTR and RTS */
+ /* otherwise assert DTR and RTS */
+ if (cflag & CBAUD)
+ info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+
+ /* byte size and parity */
+
+ switch (cflag & CSIZE) {
+ case CS5: info->params.data_bits = 5; break;
+ case CS6: info->params.data_bits = 6; break;
+ case CS7: info->params.data_bits = 7; break;
+ case CS8: info->params.data_bits = 8; break;
+ default: info->params.data_bits = 7; break;
+ }
+
+ info->params.stop_bits = (cflag & CSTOPB) ? 2 : 1;
+
+ if (cflag & PARENB)
+ info->params.parity = (cflag & PARODD) ? ASYNC_PARITY_ODD : ASYNC_PARITY_EVEN;
+ else
+ info->params.parity = ASYNC_PARITY_NONE;
+
+ /* calculate number of jiffies to transmit a full
+ * FIFO (32 bytes) at specified data rate
+ */
+ bits_per_char = info->params.data_bits +
+ info->params.stop_bits + 1;
+
+ info->params.data_rate = tty_get_baud_rate(info->port.tty);
+
+ if (info->params.data_rate) {
+ info->timeout = (32*HZ*bits_per_char) /
+ info->params.data_rate;
+ }
+ info->timeout += HZ/50; /* Add .02 seconds of slop */
+
+ if (cflag & CRTSCTS)
+ info->port.flags |= ASYNC_CTS_FLOW;
+ else
+ info->port.flags &= ~ASYNC_CTS_FLOW;
+
+ if (cflag & CLOCAL)
+ info->port.flags &= ~ASYNC_CHECK_CD;
+ else
+ info->port.flags |= ASYNC_CHECK_CD;
+
+ /* process tty input control flags */
+
+ info->read_status_mask = IRQ_RXOVER;
+ if (I_INPCK(info->port.tty))
+ info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
+ if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+ info->read_status_mask |= MASK_BREAK;
+ if (I_IGNPAR(info->port.tty))
+ info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
+ if (I_IGNBRK(info->port.tty)) {
+ info->ignore_status_mask |= MASK_BREAK;
+ /* If ignoring parity and break indicators, ignore
+ * overruns too. (For real raw support).
+ */
+ if (I_IGNPAR(info->port.tty))
+ info->ignore_status_mask |= MASK_OVERRUN;
+ }
+
+ program_hw(info);
+}
+
+static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount)
+{
+ DBGINFO(("%s get_stats\n", info->device_name));
+ if (!user_icount) {
+ memset(&info->icount, 0, sizeof(info->icount));
+ } else {
+ if (copy_to_user(user_icount, &info->icount, sizeof(struct mgsl_icount)))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int get_params(struct slgt_info *info, MGSL_PARAMS __user *user_params)
+{
+ DBGINFO(("%s get_params\n", info->device_name));
+ if (copy_to_user(user_params, &info->params, sizeof(MGSL_PARAMS)))
+ return -EFAULT;
+ return 0;
+}
+
+static int set_params(struct slgt_info *info, MGSL_PARAMS __user *new_params)
+{
+ unsigned long flags;
+ MGSL_PARAMS tmp_params;
+
+ DBGINFO(("%s set_params\n", info->device_name));
+ if (copy_from_user(&tmp_params, new_params, sizeof(MGSL_PARAMS)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&info->lock, flags);
+ if (tmp_params.mode == MGSL_MODE_BASE_CLOCK)
+ info->base_clock = tmp_params.clock_speed;
+ else
+ memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS));
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ program_hw(info);
+
+ return 0;
+}
+
+static int get_txidle(struct slgt_info *info, int __user *idle_mode)
+{
+ DBGINFO(("%s get_txidle=%d\n", info->device_name, info->idle_mode));
+ if (put_user(info->idle_mode, idle_mode))
+ return -EFAULT;
+ return 0;
+}
+
+static int set_txidle(struct slgt_info *info, int idle_mode)
+{
+ unsigned long flags;
+ DBGINFO(("%s set_txidle(%d)\n", info->device_name, idle_mode));
+ spin_lock_irqsave(&info->lock,flags);
+ info->idle_mode = idle_mode;
+ if (info->params.mode != MGSL_MODE_ASYNC)
+ tx_set_idle(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int tx_enable(struct slgt_info *info, int enable)
+{
+ unsigned long flags;
+ DBGINFO(("%s tx_enable(%d)\n", info->device_name, enable));
+ spin_lock_irqsave(&info->lock,flags);
+ if (enable) {
+ if (!info->tx_enabled)
+ tx_start(info);
+ } else {
+ if (info->tx_enabled)
+ tx_stop(info);
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+/*
+ * abort transmit HDLC frame
+ */
+static int tx_abort(struct slgt_info *info)
+{
+ unsigned long flags;
+ DBGINFO(("%s tx_abort\n", info->device_name));
+ spin_lock_irqsave(&info->lock,flags);
+ tdma_reset(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int rx_enable(struct slgt_info *info, int enable)
+{
+ unsigned long flags;
+ unsigned int rbuf_fill_level;
+ DBGINFO(("%s rx_enable(%08x)\n", info->device_name, enable));
+ spin_lock_irqsave(&info->lock,flags);
+ /*
+ * enable[31..16] = receive DMA buffer fill level
+ * 0 = noop (leave fill level unchanged)
+ * fill level must be multiple of 4 and <= buffer size
+ */
+ rbuf_fill_level = ((unsigned int)enable) >> 16;
+ if (rbuf_fill_level) {
+ if ((rbuf_fill_level > DMABUFSIZE) || (rbuf_fill_level % 4)) {
+ spin_unlock_irqrestore(&info->lock, flags);
+ return -EINVAL;
+ }
+ info->rbuf_fill_level = rbuf_fill_level;
+ if (rbuf_fill_level < 128)
+ info->rx_pio = 1; /* PIO mode */
+ else
+ info->rx_pio = 0; /* DMA mode */
+ rx_stop(info); /* restart receiver to use new fill level */
+ }
+
+ /*
+ * enable[1..0] = receiver enable command
+ * 0 = disable
+ * 1 = enable
+ * 2 = enable or force hunt mode if already enabled
+ */
+ enable &= 3;
+ if (enable) {
+ if (!info->rx_enabled)
+ rx_start(info);
+ else if (enable == 2) {
+ /* force hunt mode (write 1 to RCR[3]) */
+ wr_reg16(info, RCR, rd_reg16(info, RCR) | BIT3);
+ }
+ } else {
+ if (info->rx_enabled)
+ rx_stop(info);
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+/*
+ * wait for specified event to occur
+ */
+static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
+{
+ unsigned long flags;
+ int s;
+ int rc=0;
+ struct mgsl_icount cprev, cnow;
+ int events;
+ int mask;
+ struct _input_signal_events oldsigs, newsigs;
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (get_user(mask, mask_ptr))
+ return -EFAULT;
+
+ DBGINFO(("%s wait_mgsl_event(%d)\n", info->device_name, mask));
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ /* return immediately if state matches requested events */
+ get_signals(info);
+ s = info->signals;
+
+ events = mask &
+ ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
+ ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
+ ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
+ ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
+ if (events) {
+ spin_unlock_irqrestore(&info->lock,flags);
+ goto exit;
+ }
+
+ /* save current irq counts */
+ cprev = info->icount;
+ oldsigs = info->input_signal_events;
+
+ /* enable hunt and idle irqs if needed */
+ if (mask & (MgslEvent_ExitHuntMode+MgslEvent_IdleReceived)) {
+ unsigned short val = rd_reg16(info, SCR);
+ if (!(val & IRQ_RXIDLE))
+ wr_reg16(info, SCR, (unsigned short)(val | IRQ_RXIDLE));
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&info->event_wait_q, &wait);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ for(;;) {
+ schedule();
+ if (signal_pending(current)) {
+ rc = -ERESTARTSYS;
+ break;
+ }
+
+ /* get current irq counts */
+ spin_lock_irqsave(&info->lock,flags);
+ cnow = info->icount;
+ newsigs = info->input_signal_events;
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ /* if no change, wait aborted for some reason */
+ if (newsigs.dsr_up == oldsigs.dsr_up &&
+ newsigs.dsr_down == oldsigs.dsr_down &&
+ newsigs.dcd_up == oldsigs.dcd_up &&
+ newsigs.dcd_down == oldsigs.dcd_down &&
+ newsigs.cts_up == oldsigs.cts_up &&
+ newsigs.cts_down == oldsigs.cts_down &&
+ newsigs.ri_up == oldsigs.ri_up &&
+ newsigs.ri_down == oldsigs.ri_down &&
+ cnow.exithunt == cprev.exithunt &&
+ cnow.rxidle == cprev.rxidle) {
+ rc = -EIO;
+ break;
+ }
+
+ events = mask &
+ ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
+ (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
+ (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
+ (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
+ (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
+ (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
+ (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
+ (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
+ (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
+ (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
+ if (events)
+ break;
+
+ cprev = cnow;
+ oldsigs = newsigs;
+ }
+
+ remove_wait_queue(&info->event_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+
+
+ if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
+ spin_lock_irqsave(&info->lock,flags);
+ if (!waitqueue_active(&info->event_wait_q)) {
+ /* disable enable exit hunt mode/idle rcvd IRQs */
+ wr_reg16(info, SCR,
+ (unsigned short)(rd_reg16(info, SCR) & ~IRQ_RXIDLE));
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+exit:
+ if (rc == 0)
+ rc = put_user(events, mask_ptr);
+ return rc;
+}
+
+static int get_interface(struct slgt_info *info, int __user *if_mode)
+{
+ DBGINFO(("%s get_interface=%x\n", info->device_name, info->if_mode));
+ if (put_user(info->if_mode, if_mode))
+ return -EFAULT;
+ return 0;
+}
+
+static int set_interface(struct slgt_info *info, int if_mode)
+{
+ unsigned long flags;
+ unsigned short val;
+
+ DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
+ spin_lock_irqsave(&info->lock,flags);
+ info->if_mode = if_mode;
+
+ msc_set_vcr(info);
+
+ /* TCR (tx control) 07 1=RTS driver control */
+ val = rd_reg16(info, TCR);
+ if (info->if_mode & MGSL_INTERFACE_RTS_EN)
+ val |= BIT7;
+ else
+ val &= ~BIT7;
+ wr_reg16(info, TCR, val);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int get_xsync(struct slgt_info *info, int __user *xsync)
+{
+ DBGINFO(("%s get_xsync=%x\n", info->device_name, info->xsync));
+ if (put_user(info->xsync, xsync))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * set extended sync pattern (1 to 4 bytes) for extended sync mode
+ *
+ * sync pattern is contained in least significant bytes of value
+ * most significant byte of sync pattern is oldest (1st sent/detected)
+ */
+static int set_xsync(struct slgt_info *info, int xsync)
+{
+ unsigned long flags;
+
+ DBGINFO(("%s set_xsync=%x)\n", info->device_name, xsync));
+ spin_lock_irqsave(&info->lock, flags);
+ info->xsync = xsync;
+ wr_reg32(info, XSR, xsync);
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+}
+
+static int get_xctrl(struct slgt_info *info, int __user *xctrl)
+{
+ DBGINFO(("%s get_xctrl=%x\n", info->device_name, info->xctrl));
+ if (put_user(info->xctrl, xctrl))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * set extended control options
+ *
+ * xctrl[31:19] reserved, must be zero
+ * xctrl[18:17] extended sync pattern length in bytes
+ * 00 = 1 byte in xsr[7:0]
+ * 01 = 2 bytes in xsr[15:0]
+ * 10 = 3 bytes in xsr[23:0]
+ * 11 = 4 bytes in xsr[31:0]
+ * xctrl[16] 1 = enable terminal count, 0=disabled
+ * xctrl[15:0] receive terminal count for fixed length packets
+ * value is count minus one (0 = 1 byte packet)
+ * when terminal count is reached, receiver
+ * automatically returns to hunt mode and receive
+ * FIFO contents are flushed to DMA buffers with
+ * end of frame (EOF) status
+ */
+static int set_xctrl(struct slgt_info *info, int xctrl)
+{
+ unsigned long flags;
+
+ DBGINFO(("%s set_xctrl=%x)\n", info->device_name, xctrl));
+ spin_lock_irqsave(&info->lock, flags);
+ info->xctrl = xctrl;
+ wr_reg32(info, XCR, xctrl);
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+}
+
+/*
+ * set general purpose IO pin state and direction
+ *
+ * user_gpio fields:
+ * state each bit indicates a pin state
+ * smask set bit indicates pin state to set
+ * dir each bit indicates a pin direction (0=input, 1=output)
+ * dmask set bit indicates pin direction to set
+ */
+static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
+{
+ unsigned long flags;
+ struct gpio_desc gpio;
+ __u32 data;
+
+ if (!info->gpio_present)
+ return -EINVAL;
+ if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
+ return -EFAULT;
+ DBGINFO(("%s set_gpio state=%08x smask=%08x dir=%08x dmask=%08x\n",
+ info->device_name, gpio.state, gpio.smask,
+ gpio.dir, gpio.dmask));
+
+ spin_lock_irqsave(&info->port_array[0]->lock, flags);
+ if (gpio.dmask) {
+ data = rd_reg32(info, IODR);
+ data |= gpio.dmask & gpio.dir;
+ data &= ~(gpio.dmask & ~gpio.dir);
+ wr_reg32(info, IODR, data);
+ }
+ if (gpio.smask) {
+ data = rd_reg32(info, IOVR);
+ data |= gpio.smask & gpio.state;
+ data &= ~(gpio.smask & ~gpio.state);
+ wr_reg32(info, IOVR, data);
+ }
+ spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
+
+ return 0;
+}
+
+/*
+ * get general purpose IO pin state and direction
+ */
+static int get_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
+{
+ struct gpio_desc gpio;
+ if (!info->gpio_present)
+ return -EINVAL;
+ gpio.state = rd_reg32(info, IOVR);
+ gpio.smask = 0xffffffff;
+ gpio.dir = rd_reg32(info, IODR);
+ gpio.dmask = 0xffffffff;
+ if (copy_to_user(user_gpio, &gpio, sizeof(gpio)))
+ return -EFAULT;
+ DBGINFO(("%s get_gpio state=%08x dir=%08x\n",
+ info->device_name, gpio.state, gpio.dir));
+ return 0;
+}
+
+/*
+ * conditional wait facility
+ */
+static void init_cond_wait(struct cond_wait *w, unsigned int data)
+{
+ init_waitqueue_head(&w->q);
+ init_waitqueue_entry(&w->wait, current);
+ w->data = data;
+}
+
+static void add_cond_wait(struct cond_wait **head, struct cond_wait *w)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&w->q, &w->wait);
+ w->next = *head;
+ *head = w;
+}
+
+static void remove_cond_wait(struct cond_wait **head, struct cond_wait *cw)
+{
+ struct cond_wait *w, *prev;
+ remove_wait_queue(&cw->q, &cw->wait);
+ set_current_state(TASK_RUNNING);
+ for (w = *head, prev = NULL ; w != NULL ; prev = w, w = w->next) {
+ if (w == cw) {
+ if (prev != NULL)
+ prev->next = w->next;
+ else
+ *head = w->next;
+ break;
+ }
+ }
+}
+
+static void flush_cond_wait(struct cond_wait **head)
+{
+ while (*head != NULL) {
+ wake_up_interruptible(&(*head)->q);
+ *head = (*head)->next;
+ }
+}
+
+/*
+ * wait for general purpose I/O pin(s) to enter specified state
+ *
+ * user_gpio fields:
+ * state - bit indicates target pin state
+ * smask - set bit indicates watched pin
+ *
+ * The wait ends when at least one watched pin enters the specified
+ * state. When 0 (no error) is returned, user_gpio->state is set to the
+ * state of all GPIO pins when the wait ends.
+ *
+ * Note: Each pin may be a dedicated input, dedicated output, or
+ * configurable input/output. The number and configuration of pins
+ * varies with the specific adapter model. Only input pins (dedicated
+ * or configured) can be monitored with this function.
+ */
+static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
+{
+ unsigned long flags;
+ int rc = 0;
+ struct gpio_desc gpio;
+ struct cond_wait wait;
+ u32 state;
+
+ if (!info->gpio_present)
+ return -EINVAL;
+ if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
+ return -EFAULT;
+ DBGINFO(("%s wait_gpio() state=%08x smask=%08x\n",
+ info->device_name, gpio.state, gpio.smask));
+ /* ignore output pins identified by set IODR bit */
+ if ((gpio.smask &= ~rd_reg32(info, IODR)) == 0)
+ return -EINVAL;
+ init_cond_wait(&wait, gpio.smask);
+
+ spin_lock_irqsave(&info->port_array[0]->lock, flags);
+ /* enable interrupts for watched pins */
+ wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask);
+ /* get current pin states */
+ state = rd_reg32(info, IOVR);
+
+ if (gpio.smask & ~(state ^ gpio.state)) {
+ /* already in target state */
+ gpio.state = state;
+ } else {
+ /* wait for target state */
+ add_cond_wait(&info->gpio_wait_q, &wait);
+ spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
+ schedule();
+ if (signal_pending(current))
+ rc = -ERESTARTSYS;
+ else
+ gpio.state = wait.data;
+ spin_lock_irqsave(&info->port_array[0]->lock, flags);
+ remove_cond_wait(&info->gpio_wait_q, &wait);
+ }
+
+ /* disable all GPIO interrupts if no waiting processes */
+ if (info->gpio_wait_q == NULL)
+ wr_reg32(info, IOER, 0);
+ spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
+
+ if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio)))
+ rc = -EFAULT;
+ return rc;
+}
+
+static int modem_input_wait(struct slgt_info *info,int arg)
+{
+ unsigned long flags;
+ int rc;
+ struct mgsl_icount cprev, cnow;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /* save current irq counts */
+ spin_lock_irqsave(&info->lock,flags);
+ cprev = info->icount;
+ add_wait_queue(&info->status_event_wait_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ for(;;) {
+ schedule();
+ if (signal_pending(current)) {
+ rc = -ERESTARTSYS;
+ break;
+ }
+
+ /* get new irq counts */
+ spin_lock_irqsave(&info->lock,flags);
+ cnow = info->icount;
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ /* if no change, wait aborted for some reason */
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
+ rc = -EIO;
+ break;
+ }
+
+ /* check for change in caller specified modem input */
+ if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
+ (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
+ (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
+ (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
+ rc = 0;
+ break;
+ }
+
+ cprev = cnow;
+ }
+ remove_wait_queue(&info->status_event_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+ return rc;
+}
+
+/*
+ * return state of serial control and status signals
+ */
+static int tiocmget(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
+ ((info->signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
+ ((info->signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
+ ((info->signals & SerialSignal_RI) ? TIOCM_RNG:0) +
+ ((info->signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
+ ((info->signals & SerialSignal_CTS) ? TIOCM_CTS:0);
+
+ DBGINFO(("%s tiocmget value=%08X\n", info->device_name, result));
+ return result;
+}
+
+/*
+ * set modem control signals (DTR/RTS)
+ *
+ * cmd signal command: TIOCMBIS = set bit TIOCMBIC = clear bit
+ * TIOCMSET = set/clear signal values
+ * value bit mask for command
+ */
+static int tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ DBGINFO(("%s tiocmset(%x,%x)\n", info->device_name, set, clear));
+
+ if (set & TIOCM_RTS)
+ info->signals |= SerialSignal_RTS;
+ if (set & TIOCM_DTR)
+ info->signals |= SerialSignal_DTR;
+ if (clear & TIOCM_RTS)
+ info->signals &= ~SerialSignal_RTS;
+ if (clear & TIOCM_DTR)
+ info->signals &= ~SerialSignal_DTR;
+
+ spin_lock_irqsave(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int carrier_raised(struct tty_port *port)
+{
+ unsigned long flags;
+ struct slgt_info *info = container_of(port, struct slgt_info, port);
+
+ spin_lock_irqsave(&info->lock,flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ return (info->signals & SerialSignal_DCD) ? 1 : 0;
+}
+
+static void dtr_rts(struct tty_port *port, int on)
+{
+ unsigned long flags;
+ struct slgt_info *info = container_of(port, struct slgt_info, port);
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (on)
+ info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+
+/*
+ * block current process until the device is ready to open
+ */
+static int block_til_ready(struct tty_struct *tty, struct file *filp,
+ struct slgt_info *info)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int retval;
+ bool do_clocal = false;
+ bool extra_count = false;
+ unsigned long flags;
+ int cd;
+ struct tty_port *port = &info->port;
+
+ DBGINFO(("%s block_til_ready\n", tty->driver->name));
+
+ if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
+ /* nonblock mode is set or port is not enabled */
+ port->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+
+ if (tty->termios->c_cflag & CLOCAL)
+ do_clocal = true;
+
+ /* Wait for carrier detect and the line to become
+ * free (i.e., not in use by the callout). While we are in
+ * this loop, port->count is dropped by one, so that
+ * close() knows when to free things. We restore it upon
+ * exit, either normal or abnormal.
+ */
+
+ retval = 0;
+ add_wait_queue(&port->open_wait, &wait);
+
+ spin_lock_irqsave(&info->lock, flags);
+ if (!tty_hung_up_p(filp)) {
+ extra_count = true;
+ port->count--;
+ }
+ spin_unlock_irqrestore(&info->lock, flags);
+ port->blocked_open++;
+
+ while (1) {
+ if ((tty->termios->c_cflag & CBAUD))
+ tty_port_raise_dtr_rts(port);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
+ retval = (port->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS;
+ break;
+ }
+
+ cd = tty_port_carrier_raised(port);
+
+ if (!(port->flags & ASYNC_CLOSING) && (do_clocal || cd ))
+ break;
+
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
+ tty_unlock();
+ schedule();
+ tty_lock();
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&port->open_wait, &wait);
+
+ if (extra_count)
+ port->count++;
+ port->blocked_open--;
+
+ if (!retval)
+ port->flags |= ASYNC_NORMAL_ACTIVE;
+
+ DBGINFO(("%s block_til_ready ready, rc=%d\n", tty->driver->name, retval));
+ return retval;
+}
+
+static int alloc_tmp_rbuf(struct slgt_info *info)
+{
+ info->tmp_rbuf = kmalloc(info->max_frame_size + 5, GFP_KERNEL);
+ if (info->tmp_rbuf == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static void free_tmp_rbuf(struct slgt_info *info)
+{
+ kfree(info->tmp_rbuf);
+ info->tmp_rbuf = NULL;
+}
+
+/*
+ * allocate DMA descriptor lists.
+ */
+static int alloc_desc(struct slgt_info *info)
+{
+ unsigned int i;
+ unsigned int pbufs;
+
+ /* allocate memory to hold descriptor lists */
+ info->bufs = pci_alloc_consistent(info->pdev, DESC_LIST_SIZE, &info->bufs_dma_addr);
+ if (info->bufs == NULL)
+ return -ENOMEM;
+
+ memset(info->bufs, 0, DESC_LIST_SIZE);
+
+ info->rbufs = (struct slgt_desc*)info->bufs;
+ info->tbufs = ((struct slgt_desc*)info->bufs) + info->rbuf_count;
+
+ pbufs = (unsigned int)info->bufs_dma_addr;
+
+ /*
+ * Build circular lists of descriptors
+ */
+
+ for (i=0; i < info->rbuf_count; i++) {
+ /* physical address of this descriptor */
+ info->rbufs[i].pdesc = pbufs + (i * sizeof(struct slgt_desc));
+
+ /* physical address of next descriptor */
+ if (i == info->rbuf_count - 1)
+ info->rbufs[i].next = cpu_to_le32(pbufs);
+ else
+ info->rbufs[i].next = cpu_to_le32(pbufs + ((i+1) * sizeof(struct slgt_desc)));
+ set_desc_count(info->rbufs[i], DMABUFSIZE);
+ }
+
+ for (i=0; i < info->tbuf_count; i++) {
+ /* physical address of this descriptor */
+ info->tbufs[i].pdesc = pbufs + ((info->rbuf_count + i) * sizeof(struct slgt_desc));
+
+ /* physical address of next descriptor */
+ if (i == info->tbuf_count - 1)
+ info->tbufs[i].next = cpu_to_le32(pbufs + info->rbuf_count * sizeof(struct slgt_desc));
+ else
+ info->tbufs[i].next = cpu_to_le32(pbufs + ((info->rbuf_count + i + 1) * sizeof(struct slgt_desc)));
+ }
+
+ return 0;
+}
+
+static void free_desc(struct slgt_info *info)
+{
+ if (info->bufs != NULL) {
+ pci_free_consistent(info->pdev, DESC_LIST_SIZE, info->bufs, info->bufs_dma_addr);
+ info->bufs = NULL;
+ info->rbufs = NULL;
+ info->tbufs = NULL;
+ }
+}
+
+static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
+{
+ int i;
+ for (i=0; i < count; i++) {
+ if ((bufs[i].buf = pci_alloc_consistent(info->pdev, DMABUFSIZE, &bufs[i].buf_dma_addr)) == NULL)
+ return -ENOMEM;
+ bufs[i].pbuf = cpu_to_le32((unsigned int)bufs[i].buf_dma_addr);
+ }
+ return 0;
+}
+
+static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
+{
+ int i;
+ for (i=0; i < count; i++) {
+ if (bufs[i].buf == NULL)
+ continue;
+ pci_free_consistent(info->pdev, DMABUFSIZE, bufs[i].buf, bufs[i].buf_dma_addr);
+ bufs[i].buf = NULL;
+ }
+}
+
+static int alloc_dma_bufs(struct slgt_info *info)
+{
+ info->rbuf_count = 32;
+ info->tbuf_count = 32;
+
+ if (alloc_desc(info) < 0 ||
+ alloc_bufs(info, info->rbufs, info->rbuf_count) < 0 ||
+ alloc_bufs(info, info->tbufs, info->tbuf_count) < 0 ||
+ alloc_tmp_rbuf(info) < 0) {
+ DBGERR(("%s DMA buffer alloc fail\n", info->device_name));
+ return -ENOMEM;
+ }
+ reset_rbufs(info);
+ return 0;
+}
+
+static void free_dma_bufs(struct slgt_info *info)
+{
+ if (info->bufs) {
+ free_bufs(info, info->rbufs, info->rbuf_count);
+ free_bufs(info, info->tbufs, info->tbuf_count);
+ free_desc(info);
+ }
+ free_tmp_rbuf(info);
+}
+
+static int claim_resources(struct slgt_info *info)
+{
+ if (request_mem_region(info->phys_reg_addr, SLGT_REG_SIZE, "synclink_gt") == NULL) {
+ DBGERR(("%s reg addr conflict, addr=%08X\n",
+ info->device_name, info->phys_reg_addr));
+ info->init_error = DiagStatus_AddressConflict;
+ goto errout;
+ }
+ else
+ info->reg_addr_requested = true;
+
+ info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE);
+ if (!info->reg_addr) {
+ DBGERR(("%s can't map device registers, addr=%08X\n",
+ info->device_name, info->phys_reg_addr));
+ info->init_error = DiagStatus_CantAssignPciResources;
+ goto errout;
+ }
+ return 0;
+
+errout:
+ release_resources(info);
+ return -ENODEV;
+}
+
+static void release_resources(struct slgt_info *info)
+{
+ if (info->irq_requested) {
+ free_irq(info->irq_level, info);
+ info->irq_requested = false;
+ }
+
+ if (info->reg_addr_requested) {
+ release_mem_region(info->phys_reg_addr, SLGT_REG_SIZE);
+ info->reg_addr_requested = false;
+ }
+
+ if (info->reg_addr) {
+ iounmap(info->reg_addr);
+ info->reg_addr = NULL;
+ }
+}
+
+/* Add the specified device instance data structure to the
+ * global linked list of devices and increment the device count.
+ */
+static void add_device(struct slgt_info *info)
+{
+ char *devstr;
+
+ info->next_device = NULL;
+ info->line = slgt_device_count;
+ sprintf(info->device_name, "%s%d", tty_dev_prefix, info->line);
+
+ if (info->line < MAX_DEVICES) {
+ if (maxframe[info->line])
+ info->max_frame_size = maxframe[info->line];
+ }
+
+ slgt_device_count++;
+
+ if (!slgt_device_list)
+ slgt_device_list = info;
+ else {
+ struct slgt_info *current_dev = slgt_device_list;
+ while(current_dev->next_device)
+ current_dev = current_dev->next_device;
+ current_dev->next_device = info;
+ }
+
+ if (info->max_frame_size < 4096)
+ info->max_frame_size = 4096;
+ else if (info->max_frame_size > 65535)
+ info->max_frame_size = 65535;
+
+ switch(info->pdev->device) {
+ case SYNCLINK_GT_DEVICE_ID:
+ devstr = "GT";
+ break;
+ case SYNCLINK_GT2_DEVICE_ID:
+ devstr = "GT2";
+ break;
+ case SYNCLINK_GT4_DEVICE_ID:
+ devstr = "GT4";
+ break;
+ case SYNCLINK_AC_DEVICE_ID:
+ devstr = "AC";
+ info->params.mode = MGSL_MODE_ASYNC;
+ break;
+ default:
+ devstr = "(unknown model)";
+ }
+ printk("SyncLink %s %s IO=%08x IRQ=%d MaxFrameSize=%u\n",
+ devstr, info->device_name, info->phys_reg_addr,
+ info->irq_level, info->max_frame_size);
+
+#if SYNCLINK_GENERIC_HDLC
+ hdlcdev_init(info);
+#endif
+}
+
+static const struct tty_port_operations slgt_port_ops = {
+ .carrier_raised = carrier_raised,
+ .dtr_rts = dtr_rts,
+};
+
+/*
+ * allocate device instance structure, return NULL on failure
+ */
+static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
+{
+ struct slgt_info *info;
+
+ info = kzalloc(sizeof(struct slgt_info), GFP_KERNEL);
+
+ if (!info) {
+ DBGERR(("%s device alloc failed adapter=%d port=%d\n",
+ driver_name, adapter_num, port_num));
+ } else {
+ tty_port_init(&info->port);
+ info->port.ops = &slgt_port_ops;
+ info->magic = MGSL_MAGIC;
+ INIT_WORK(&info->task, bh_handler);
+ info->max_frame_size = 4096;
+ info->base_clock = 14745600;
+ info->rbuf_fill_level = DMABUFSIZE;
+ info->port.close_delay = 5*HZ/10;
+ info->port.closing_wait = 30*HZ;
+ init_waitqueue_head(&info->status_event_wait_q);
+ init_waitqueue_head(&info->event_wait_q);
+ spin_lock_init(&info->netlock);
+ memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
+ info->idle_mode = HDLC_TXIDLE_FLAGS;
+ info->adapter_num = adapter_num;
+ info->port_num = port_num;
+
+ setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
+ setup_timer(&info->rx_timer, rx_timeout, (unsigned long)info);
+
+ /* Copy configuration info to device instance data */
+ info->pdev = pdev;
+ info->irq_level = pdev->irq;
+ info->phys_reg_addr = pci_resource_start(pdev,0);
+
+ info->bus_type = MGSL_BUS_TYPE_PCI;
+ info->irq_flags = IRQF_SHARED;
+
+ info->init_error = -1; /* assume error, set to 0 on successful init */
+ }
+
+ return info;
+}
+
+static void device_init(int adapter_num, struct pci_dev *pdev)
+{
+ struct slgt_info *port_array[SLGT_MAX_PORTS];
+ int i;
+ int port_count = 1;
+
+ if (pdev->device == SYNCLINK_GT2_DEVICE_ID)
+ port_count = 2;
+ else if (pdev->device == SYNCLINK_GT4_DEVICE_ID)
+ port_count = 4;
+
+ /* allocate device instances for all ports */
+ for (i=0; i < port_count; ++i) {
+ port_array[i] = alloc_dev(adapter_num, i, pdev);
+ if (port_array[i] == NULL) {
+ for (--i; i >= 0; --i)
+ kfree(port_array[i]);
+ return;
+ }
+ }
+
+ /* give copy of port_array to all ports and add to device list */
+ for (i=0; i < port_count; ++i) {
+ memcpy(port_array[i]->port_array, port_array, sizeof(port_array));
+ add_device(port_array[i]);
+ port_array[i]->port_count = port_count;
+ spin_lock_init(&port_array[i]->lock);
+ }
+
+ /* Allocate and claim adapter resources */
+ if (!claim_resources(port_array[0])) {
+
+ alloc_dma_bufs(port_array[0]);
+
+ /* copy resource information from first port to others */
+ for (i = 1; i < port_count; ++i) {
+ port_array[i]->irq_level = port_array[0]->irq_level;
+ port_array[i]->reg_addr = port_array[0]->reg_addr;
+ alloc_dma_bufs(port_array[i]);
+ }
+
+ if (request_irq(port_array[0]->irq_level,
+ slgt_interrupt,
+ port_array[0]->irq_flags,
+ port_array[0]->device_name,
+ port_array[0]) < 0) {
+ DBGERR(("%s request_irq failed IRQ=%d\n",
+ port_array[0]->device_name,
+ port_array[0]->irq_level));
+ } else {
+ port_array[0]->irq_requested = true;
+ adapter_test(port_array[0]);
+ for (i=1 ; i < port_count ; i++) {
+ port_array[i]->init_error = port_array[0]->init_error;
+ port_array[i]->gpio_present = port_array[0]->gpio_present;
+ }
+ }
+ }
+
+ for (i=0; i < port_count; ++i)
+ tty_register_device(serial_driver, port_array[i]->line, &(port_array[i]->pdev->dev));
+}
+
+static int __devinit init_one(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ if (pci_enable_device(dev)) {
+ printk("error enabling pci device %p\n", dev);
+ return -EIO;
+ }
+ pci_set_master(dev);
+ device_init(slgt_device_count, dev);
+ return 0;
+}
+
+static void __devexit remove_one(struct pci_dev *dev)
+{
+}
+
+static const struct tty_operations ops = {
+ .open = open,
+ .close = close,
+ .write = write,
+ .put_char = put_char,
+ .flush_chars = flush_chars,
+ .write_room = write_room,
+ .chars_in_buffer = chars_in_buffer,
+ .flush_buffer = flush_buffer,
+ .ioctl = ioctl,
+ .compat_ioctl = slgt_compat_ioctl,
+ .throttle = throttle,
+ .unthrottle = unthrottle,
+ .send_xchar = send_xchar,
+ .break_ctl = set_break,
+ .wait_until_sent = wait_until_sent,
+ .set_termios = set_termios,
+ .stop = tx_hold,
+ .start = tx_release,
+ .hangup = hangup,
+ .tiocmget = tiocmget,
+ .tiocmset = tiocmset,
+ .get_icount = get_icount,
+ .proc_fops = &synclink_gt_proc_fops,
+};
+
+static void slgt_cleanup(void)
+{
+ int rc;
+ struct slgt_info *info;
+ struct slgt_info *tmp;
+
+ printk(KERN_INFO "unload %s\n", driver_name);
+
+ if (serial_driver) {
+ for (info=slgt_device_list ; info != NULL ; info=info->next_device)
+ tty_unregister_device(serial_driver, info->line);
+ if ((rc = tty_unregister_driver(serial_driver)))
+ DBGERR(("tty_unregister_driver error=%d\n", rc));
+ put_tty_driver(serial_driver);
+ }
+
+ /* reset devices */
+ info = slgt_device_list;
+ while(info) {
+ reset_port(info);
+ info = info->next_device;
+ }
+
+ /* release devices */
+ info = slgt_device_list;
+ while(info) {
+#if SYNCLINK_GENERIC_HDLC
+ hdlcdev_exit(info);
+#endif
+ free_dma_bufs(info);
+ free_tmp_rbuf(info);
+ if (info->port_num == 0)
+ release_resources(info);
+ tmp = info;
+ info = info->next_device;
+ kfree(tmp);
+ }
+
+ if (pci_registered)
+ pci_unregister_driver(&pci_driver);
+}
+
+/*
+ * Driver initialization entry point.
+ */
+static int __init slgt_init(void)
+{
+ int rc;
+
+ printk(KERN_INFO "%s\n", driver_name);
+
+ serial_driver = alloc_tty_driver(MAX_DEVICES);
+ if (!serial_driver) {
+ printk("%s can't allocate tty driver\n", driver_name);
+ return -ENOMEM;
+ }
+
+ /* Initialize the tty_driver structure */
+
+ serial_driver->owner = THIS_MODULE;
+ serial_driver->driver_name = tty_driver_name;
+ serial_driver->name = tty_dev_prefix;
+ serial_driver->major = ttymajor;
+ serial_driver->minor_start = 64;
+ serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ serial_driver->init_termios = tty_std_termios;
+ serial_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ serial_driver->init_termios.c_ispeed = 9600;
+ serial_driver->init_termios.c_ospeed = 9600;
+ serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(serial_driver, &ops);
+ if ((rc = tty_register_driver(serial_driver)) < 0) {
+ DBGERR(("%s can't register serial driver\n", driver_name));
+ put_tty_driver(serial_driver);
+ serial_driver = NULL;
+ goto error;
+ }
+
+ printk(KERN_INFO "%s, tty major#%d\n",
+ driver_name, serial_driver->major);
+
+ slgt_device_count = 0;
+ if ((rc = pci_register_driver(&pci_driver)) < 0) {
+ printk("%s pci_register_driver error=%d\n", driver_name, rc);
+ goto error;
+ }
+ pci_registered = true;
+
+ if (!slgt_device_list)
+ printk("%s no devices found\n",driver_name);
+
+ return 0;
+
+error:
+ slgt_cleanup();
+ return rc;
+}
+
+static void __exit slgt_exit(void)
+{
+ slgt_cleanup();
+}
+
+module_init(slgt_init);
+module_exit(slgt_exit);
+
+/*
+ * register access routines
+ */
+
+#define CALC_REGADDR() \
+ unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \
+ if (addr >= 0x80) \
+ reg_addr += (info->port_num) * 32; \
+ else if (addr >= 0x40) \
+ reg_addr += (info->port_num) * 16;
+
+static __u8 rd_reg8(struct slgt_info *info, unsigned int addr)
+{
+ CALC_REGADDR();
+ return readb((void __iomem *)reg_addr);
+}
+
+static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value)
+{
+ CALC_REGADDR();
+ writeb(value, (void __iomem *)reg_addr);
+}
+
+static __u16 rd_reg16(struct slgt_info *info, unsigned int addr)
+{
+ CALC_REGADDR();
+ return readw((void __iomem *)reg_addr);
+}
+
+static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value)
+{
+ CALC_REGADDR();
+ writew(value, (void __iomem *)reg_addr);
+}
+
+static __u32 rd_reg32(struct slgt_info *info, unsigned int addr)
+{
+ CALC_REGADDR();
+ return readl((void __iomem *)reg_addr);
+}
+
+static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value)
+{
+ CALC_REGADDR();
+ writel(value, (void __iomem *)reg_addr);
+}
+
+static void rdma_reset(struct slgt_info *info)
+{
+ unsigned int i;
+
+ /* set reset bit */
+ wr_reg32(info, RDCSR, BIT1);
+
+ /* wait for enable bit cleared */
+ for(i=0 ; i < 1000 ; i++)
+ if (!(rd_reg32(info, RDCSR) & BIT0))
+ break;
+}
+
+static void tdma_reset(struct slgt_info *info)
+{
+ unsigned int i;
+
+ /* set reset bit */
+ wr_reg32(info, TDCSR, BIT1);
+
+ /* wait for enable bit cleared */
+ for(i=0 ; i < 1000 ; i++)
+ if (!(rd_reg32(info, TDCSR) & BIT0))
+ break;
+}
+
+/*
+ * enable internal loopback
+ * TxCLK and RxCLK are generated from BRG
+ * and TxD is looped back to RxD internally.
+ */
+static void enable_loopback(struct slgt_info *info)
+{
+ /* SCR (serial control) BIT2=looopback enable */
+ wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT2));
+
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ /* CCR (clock control)
+ * 07..05 tx clock source (010 = BRG)
+ * 04..02 rx clock source (010 = BRG)
+ * 01 auxclk enable (0 = disable)
+ * 00 BRG enable (1 = enable)
+ *
+ * 0100 1001
+ */
+ wr_reg8(info, CCR, 0x49);
+
+ /* set speed if available, otherwise use default */
+ if (info->params.clock_speed)
+ set_rate(info, info->params.clock_speed);
+ else
+ set_rate(info, 3686400);
+ }
+}
+
+/*
+ * set baud rate generator to specified rate
+ */
+static void set_rate(struct slgt_info *info, u32 rate)
+{
+ unsigned int div;
+ unsigned int osc = info->base_clock;
+
+ /* div = osc/rate - 1
+ *
+ * Round div up if osc/rate is not integer to
+ * force to next slowest rate.
+ */
+
+ if (rate) {
+ div = osc/rate;
+ if (!(osc % rate) && div)
+ div--;
+ wr_reg16(info, BDR, (unsigned short)div);
+ }
+}
+
+static void rx_stop(struct slgt_info *info)
+{
+ unsigned short val;
+
+ /* disable and reset receiver */
+ val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */
+ wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
+ wr_reg16(info, RCR, val); /* clear reset bit */
+
+ slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA + IRQ_RXIDLE);
+
+ /* clear pending rx interrupts */
+ wr_reg16(info, SSR, IRQ_RXIDLE + IRQ_RXOVER);
+
+ rdma_reset(info);
+
+ info->rx_enabled = false;
+ info->rx_restart = false;
+}
+
+static void rx_start(struct slgt_info *info)
+{
+ unsigned short val;
+
+ slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA);
+
+ /* clear pending rx overrun IRQ */
+ wr_reg16(info, SSR, IRQ_RXOVER);
+
+ /* reset and disable receiver */
+ val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */
+ wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
+ wr_reg16(info, RCR, val); /* clear reset bit */
+
+ rdma_reset(info);
+ reset_rbufs(info);
+
+ if (info->rx_pio) {
+ /* rx request when rx FIFO not empty */
+ wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) & ~BIT14));
+ slgt_irq_on(info, IRQ_RXDATA);
+ if (info->params.mode == MGSL_MODE_ASYNC) {
+ /* enable saving of rx status */
+ wr_reg32(info, RDCSR, BIT6);
+ }
+ } else {
+ /* rx request when rx FIFO half full */
+ wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT14));
+ /* set 1st descriptor address */
+ wr_reg32(info, RDDAR, info->rbufs[0].pdesc);
+
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ /* enable rx DMA and DMA interrupt */
+ wr_reg32(info, RDCSR, (BIT2 + BIT0));
+ } else {
+ /* enable saving of rx status, rx DMA and DMA interrupt */
+ wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0));
+ }
+ }
+
+ slgt_irq_on(info, IRQ_RXOVER);
+
+ /* enable receiver */
+ wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | BIT1));
+
+ info->rx_restart = false;
+ info->rx_enabled = true;
+}
+
+static void tx_start(struct slgt_info *info)
+{
+ if (!info->tx_enabled) {
+ wr_reg16(info, TCR,
+ (unsigned short)((rd_reg16(info, TCR) | BIT1) & ~BIT2));
+ info->tx_enabled = true;
+ }
+
+ if (desc_count(info->tbufs[info->tbuf_start])) {
+ info->drop_rts_on_tx_done = false;
+
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
+ get_signals(info);
+ if (!(info->signals & SerialSignal_RTS)) {
+ info->signals |= SerialSignal_RTS;
+ set_signals(info);
+ info->drop_rts_on_tx_done = true;
+ }
+ }
+
+ slgt_irq_off(info, IRQ_TXDATA);
+ slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE);
+ /* clear tx idle and underrun status bits */
+ wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
+ } else {
+ slgt_irq_off(info, IRQ_TXDATA);
+ slgt_irq_on(info, IRQ_TXIDLE);
+ /* clear tx idle status bit */
+ wr_reg16(info, SSR, IRQ_TXIDLE);
+ }
+ /* set 1st descriptor address and start DMA */
+ wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
+ wr_reg32(info, TDCSR, BIT2 + BIT0);
+ info->tx_active = true;
+ }
+}
+
+static void tx_stop(struct slgt_info *info)
+{
+ unsigned short val;
+
+ del_timer(&info->tx_timer);
+
+ tdma_reset(info);
+
+ /* reset and disable transmitter */
+ val = rd_reg16(info, TCR) & ~BIT1; /* clear enable bit */
+ wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
+
+ slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
+
+ /* clear tx idle and underrun status bit */
+ wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
+
+ reset_tbufs(info);
+
+ info->tx_enabled = false;
+ info->tx_active = false;
+}
+
+static void reset_port(struct slgt_info *info)
+{
+ if (!info->reg_addr)
+ return;
+
+ tx_stop(info);
+ rx_stop(info);
+
+ info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ set_signals(info);
+
+ slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
+}
+
+static void reset_adapter(struct slgt_info *info)
+{
+ int i;
+ for (i=0; i < info->port_count; ++i) {
+ if (info->port_array[i])
+ reset_port(info->port_array[i]);
+ }
+}
+
+static void async_mode(struct slgt_info *info)
+{
+ unsigned short val;
+
+ slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
+ tx_stop(info);
+ rx_stop(info);
+
+ /* TCR (tx control)
+ *
+ * 15..13 mode, 010=async
+ * 12..10 encoding, 000=NRZ
+ * 09 parity enable
+ * 08 1=odd parity, 0=even parity
+ * 07 1=RTS driver control
+ * 06 1=break enable
+ * 05..04 character length
+ * 00=5 bits
+ * 01=6 bits
+ * 10=7 bits
+ * 11=8 bits
+ * 03 0=1 stop bit, 1=2 stop bits
+ * 02 reset
+ * 01 enable
+ * 00 auto-CTS enable
+ */
+ val = 0x4000;
+
+ if (info->if_mode & MGSL_INTERFACE_RTS_EN)
+ val |= BIT7;
+
+ if (info->params.parity != ASYNC_PARITY_NONE) {
+ val |= BIT9;
+ if (info->params.parity == ASYNC_PARITY_ODD)
+ val |= BIT8;
+ }
+
+ switch (info->params.data_bits)
+ {
+ case 6: val |= BIT4; break;
+ case 7: val |= BIT5; break;
+ case 8: val |= BIT5 + BIT4; break;
+ }
+
+ if (info->params.stop_bits != 1)
+ val |= BIT3;
+
+ if (info->params.flags & HDLC_FLAG_AUTO_CTS)
+ val |= BIT0;
+
+ wr_reg16(info, TCR, val);
+
+ /* RCR (rx control)
+ *
+ * 15..13 mode, 010=async
+ * 12..10 encoding, 000=NRZ
+ * 09 parity enable
+ * 08 1=odd parity, 0=even parity
+ * 07..06 reserved, must be 0
+ * 05..04 character length
+ * 00=5 bits
+ * 01=6 bits
+ * 10=7 bits
+ * 11=8 bits
+ * 03 reserved, must be zero
+ * 02 reset
+ * 01 enable
+ * 00 auto-DCD enable
+ */
+ val = 0x4000;
+
+ if (info->params.parity != ASYNC_PARITY_NONE) {
+ val |= BIT9;
+ if (info->params.parity == ASYNC_PARITY_ODD)
+ val |= BIT8;
+ }
+
+ switch (info->params.data_bits)
+ {
+ case 6: val |= BIT4; break;
+ case 7: val |= BIT5; break;
+ case 8: val |= BIT5 + BIT4; break;
+ }
+
+ if (info->params.flags & HDLC_FLAG_AUTO_DCD)
+ val |= BIT0;
+
+ wr_reg16(info, RCR, val);
+
+ /* CCR (clock control)
+ *
+ * 07..05 011 = tx clock source is BRG/16
+ * 04..02 010 = rx clock source is BRG
+ * 01 0 = auxclk disabled
+ * 00 1 = BRG enabled
+ *
+ * 0110 1001
+ */
+ wr_reg8(info, CCR, 0x69);
+
+ msc_set_vcr(info);
+
+ /* SCR (serial control)
+ *
+ * 15 1=tx req on FIFO half empty
+ * 14 1=rx req on FIFO half full
+ * 13 tx data IRQ enable
+ * 12 tx idle IRQ enable
+ * 11 rx break on IRQ enable
+ * 10 rx data IRQ enable
+ * 09 rx break off IRQ enable
+ * 08 overrun IRQ enable
+ * 07 DSR IRQ enable
+ * 06 CTS IRQ enable
+ * 05 DCD IRQ enable
+ * 04 RI IRQ enable
+ * 03 0=16x sampling, 1=8x sampling
+ * 02 1=txd->rxd internal loopback enable
+ * 01 reserved, must be zero
+ * 00 1=master IRQ enable
+ */
+ val = BIT15 + BIT14 + BIT0;
+ /* JCR[8] : 1 = x8 async mode feature available */
+ if ((rd_reg32(info, JCR) & BIT8) && info->params.data_rate &&
+ ((info->base_clock < (info->params.data_rate * 16)) ||
+ (info->base_clock % (info->params.data_rate * 16)))) {
+ /* use 8x sampling */
+ val |= BIT3;
+ set_rate(info, info->params.data_rate * 8);
+ } else {
+ /* use 16x sampling */
+ set_rate(info, info->params.data_rate * 16);
+ }
+ wr_reg16(info, SCR, val);
+
+ slgt_irq_on(info, IRQ_RXBREAK | IRQ_RXOVER);
+
+ if (info->params.loopback)
+ enable_loopback(info);
+}
+
+static void sync_mode(struct slgt_info *info)
+{
+ unsigned short val;
+
+ slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
+ tx_stop(info);
+ rx_stop(info);
+
+ /* TCR (tx control)
+ *
+ * 15..13 mode
+ * 000=HDLC/SDLC
+ * 001=raw bit synchronous
+ * 010=asynchronous/isochronous
+ * 011=monosync byte synchronous
+ * 100=bisync byte synchronous
+ * 101=xsync byte synchronous
+ * 12..10 encoding
+ * 09 CRC enable
+ * 08 CRC32
+ * 07 1=RTS driver control
+ * 06 preamble enable
+ * 05..04 preamble length
+ * 03 share open/close flag
+ * 02 reset
+ * 01 enable
+ * 00 auto-CTS enable
+ */
+ val = BIT2;
+
+ switch(info->params.mode) {
+ case MGSL_MODE_XSYNC:
+ val |= BIT15 + BIT13;
+ break;
+ case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
+ case MGSL_MODE_BISYNC: val |= BIT15; break;
+ case MGSL_MODE_RAW: val |= BIT13; break;
+ }
+ if (info->if_mode & MGSL_INTERFACE_RTS_EN)
+ val |= BIT7;
+
+ switch(info->params.encoding)
+ {
+ case HDLC_ENCODING_NRZB: val |= BIT10; break;
+ case HDLC_ENCODING_NRZI_MARK: val |= BIT11; break;
+ case HDLC_ENCODING_NRZI: val |= BIT11 + BIT10; break;
+ case HDLC_ENCODING_BIPHASE_MARK: val |= BIT12; break;
+ case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
+ case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
+ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
+ }
+
+ switch (info->params.crc_type & HDLC_CRC_MASK)
+ {
+ case HDLC_CRC_16_CCITT: val |= BIT9; break;
+ case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
+ }
+
+ if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE)
+ val |= BIT6;
+
+ switch (info->params.preamble_length)
+ {
+ case HDLC_PREAMBLE_LENGTH_16BITS: val |= BIT5; break;
+ case HDLC_PREAMBLE_LENGTH_32BITS: val |= BIT4; break;
+ case HDLC_PREAMBLE_LENGTH_64BITS: val |= BIT5 + BIT4; break;
+ }
+
+ if (info->params.flags & HDLC_FLAG_AUTO_CTS)
+ val |= BIT0;
+
+ wr_reg16(info, TCR, val);
+
+ /* TPR (transmit preamble) */
+
+ switch (info->params.preamble)
+ {
+ case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break;
+ case HDLC_PREAMBLE_PATTERN_ONES: val = 0xff; break;
+ case HDLC_PREAMBLE_PATTERN_ZEROS: val = 0x00; break;
+ case HDLC_PREAMBLE_PATTERN_10: val = 0x55; break;
+ case HDLC_PREAMBLE_PATTERN_01: val = 0xaa; break;
+ default: val = 0x7e; break;
+ }
+ wr_reg8(info, TPR, (unsigned char)val);
+
+ /* RCR (rx control)
+ *
+ * 15..13 mode
+ * 000=HDLC/SDLC
+ * 001=raw bit synchronous
+ * 010=asynchronous/isochronous
+ * 011=monosync byte synchronous
+ * 100=bisync byte synchronous
+ * 101=xsync byte synchronous
+ * 12..10 encoding
+ * 09 CRC enable
+ * 08 CRC32
+ * 07..03 reserved, must be 0
+ * 02 reset
+ * 01 enable
+ * 00 auto-DCD enable
+ */
+ val = 0;
+
+ switch(info->params.mode) {
+ case MGSL_MODE_XSYNC:
+ val |= BIT15 + BIT13;
+ break;
+ case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
+ case MGSL_MODE_BISYNC: val |= BIT15; break;
+ case MGSL_MODE_RAW: val |= BIT13; break;
+ }
+
+ switch(info->params.encoding)
+ {
+ case HDLC_ENCODING_NRZB: val |= BIT10; break;
+ case HDLC_ENCODING_NRZI_MARK: val |= BIT11; break;
+ case HDLC_ENCODING_NRZI: val |= BIT11 + BIT10; break;
+ case HDLC_ENCODING_BIPHASE_MARK: val |= BIT12; break;
+ case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
+ case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
+ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
+ }
+
+ switch (info->params.crc_type & HDLC_CRC_MASK)
+ {
+ case HDLC_CRC_16_CCITT: val |= BIT9; break;
+ case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
+ }
+
+ if (info->params.flags & HDLC_FLAG_AUTO_DCD)
+ val |= BIT0;
+
+ wr_reg16(info, RCR, val);
+
+ /* CCR (clock control)
+ *
+ * 07..05 tx clock source
+ * 04..02 rx clock source
+ * 01 auxclk enable
+ * 00 BRG enable
+ */
+ val = 0;
+
+ if (info->params.flags & HDLC_FLAG_TXC_BRG)
+ {
+ // when RxC source is DPLL, BRG generates 16X DPLL
+ // reference clock, so take TxC from BRG/16 to get
+ // transmit clock at actual data rate
+ if (info->params.flags & HDLC_FLAG_RXC_DPLL)
+ val |= BIT6 + BIT5; /* 011, txclk = BRG/16 */
+ else
+ val |= BIT6; /* 010, txclk = BRG */
+ }
+ else if (info->params.flags & HDLC_FLAG_TXC_DPLL)
+ val |= BIT7; /* 100, txclk = DPLL Input */
+ else if (info->params.flags & HDLC_FLAG_TXC_RXCPIN)
+ val |= BIT5; /* 001, txclk = RXC Input */
+
+ if (info->params.flags & HDLC_FLAG_RXC_BRG)
+ val |= BIT3; /* 010, rxclk = BRG */
+ else if (info->params.flags & HDLC_FLAG_RXC_DPLL)
+ val |= BIT4; /* 100, rxclk = DPLL */
+ else if (info->params.flags & HDLC_FLAG_RXC_TXCPIN)
+ val |= BIT2; /* 001, rxclk = TXC Input */
+
+ if (info->params.clock_speed)
+ val |= BIT1 + BIT0;
+
+ wr_reg8(info, CCR, (unsigned char)val);
+
+ if (info->params.flags & (HDLC_FLAG_TXC_DPLL + HDLC_FLAG_RXC_DPLL))
+ {
+ // program DPLL mode
+ switch(info->params.encoding)
+ {
+ case HDLC_ENCODING_BIPHASE_MARK:
+ case HDLC_ENCODING_BIPHASE_SPACE:
+ val = BIT7; break;
+ case HDLC_ENCODING_BIPHASE_LEVEL:
+ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL:
+ val = BIT7 + BIT6; break;
+ default: val = BIT6; // NRZ encodings
+ }
+ wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | val));
+
+ // DPLL requires a 16X reference clock from BRG
+ set_rate(info, info->params.clock_speed * 16);
+ }
+ else
+ set_rate(info, info->params.clock_speed);
+
+ tx_set_idle(info);
+
+ msc_set_vcr(info);
+
+ /* SCR (serial control)
+ *
+ * 15 1=tx req on FIFO half empty
+ * 14 1=rx req on FIFO half full
+ * 13 tx data IRQ enable
+ * 12 tx idle IRQ enable
+ * 11 underrun IRQ enable
+ * 10 rx data IRQ enable
+ * 09 rx idle IRQ enable
+ * 08 overrun IRQ enable
+ * 07 DSR IRQ enable
+ * 06 CTS IRQ enable
+ * 05 DCD IRQ enable
+ * 04 RI IRQ enable
+ * 03 reserved, must be zero
+ * 02 1=txd->rxd internal loopback enable
+ * 01 reserved, must be zero
+ * 00 1=master IRQ enable
+ */
+ wr_reg16(info, SCR, BIT15 + BIT14 + BIT0);
+
+ if (info->params.loopback)
+ enable_loopback(info);
+}
+
+/*
+ * set transmit idle mode
+ */
+static void tx_set_idle(struct slgt_info *info)
+{
+ unsigned char val;
+ unsigned short tcr;
+
+ /* if preamble enabled (tcr[6] == 1) then tx idle size = 8 bits
+ * else tcr[5:4] = tx idle size: 00 = 8 bits, 01 = 16 bits
+ */
+ tcr = rd_reg16(info, TCR);
+ if (info->idle_mode & HDLC_TXIDLE_CUSTOM_16) {
+ /* disable preamble, set idle size to 16 bits */
+ tcr = (tcr & ~(BIT6 + BIT5)) | BIT4;
+ /* MSB of 16 bit idle specified in tx preamble register (TPR) */
+ wr_reg8(info, TPR, (unsigned char)((info->idle_mode >> 8) & 0xff));
+ } else if (!(tcr & BIT6)) {
+ /* preamble is disabled, set idle size to 8 bits */
+ tcr &= ~(BIT5 + BIT4);
+ }
+ wr_reg16(info, TCR, tcr);
+
+ if (info->idle_mode & (HDLC_TXIDLE_CUSTOM_8 | HDLC_TXIDLE_CUSTOM_16)) {
+ /* LSB of custom tx idle specified in tx idle register */
+ val = (unsigned char)(info->idle_mode & 0xff);
+ } else {
+ /* standard 8 bit idle patterns */
+ switch(info->idle_mode)
+ {
+ case HDLC_TXIDLE_FLAGS: val = 0x7e; break;
+ case HDLC_TXIDLE_ALT_ZEROS_ONES:
+ case HDLC_TXIDLE_ALT_MARK_SPACE: val = 0xaa; break;
+ case HDLC_TXIDLE_ZEROS:
+ case HDLC_TXIDLE_SPACE: val = 0x00; break;
+ default: val = 0xff;
+ }
+ }
+
+ wr_reg8(info, TIR, val);
+}
+
+/*
+ * get state of V24 status (input) signals
+ */
+static void get_signals(struct slgt_info *info)
+{
+ unsigned short status = rd_reg16(info, SSR);
+
+ /* clear all serial signals except DTR and RTS */
+ info->signals &= SerialSignal_DTR + SerialSignal_RTS;
+
+ if (status & BIT3)
+ info->signals |= SerialSignal_DSR;
+ if (status & BIT2)
+ info->signals |= SerialSignal_CTS;
+ if (status & BIT1)
+ info->signals |= SerialSignal_DCD;
+ if (status & BIT0)
+ info->signals |= SerialSignal_RI;
+}
+
+/*
+ * set V.24 Control Register based on current configuration
+ */
+static void msc_set_vcr(struct slgt_info *info)
+{
+ unsigned char val = 0;
+
+ /* VCR (V.24 control)
+ *
+ * 07..04 serial IF select
+ * 03 DTR
+ * 02 RTS
+ * 01 LL
+ * 00 RL
+ */
+
+ switch(info->if_mode & MGSL_INTERFACE_MASK)
+ {
+ case MGSL_INTERFACE_RS232:
+ val |= BIT5; /* 0010 */
+ break;
+ case MGSL_INTERFACE_V35:
+ val |= BIT7 + BIT6 + BIT5; /* 1110 */
+ break;
+ case MGSL_INTERFACE_RS422:
+ val |= BIT6; /* 0100 */
+ break;
+ }
+
+ if (info->if_mode & MGSL_INTERFACE_MSB_FIRST)
+ val |= BIT4;
+ if (info->signals & SerialSignal_DTR)
+ val |= BIT3;
+ if (info->signals & SerialSignal_RTS)
+ val |= BIT2;
+ if (info->if_mode & MGSL_INTERFACE_LL)
+ val |= BIT1;
+ if (info->if_mode & MGSL_INTERFACE_RL)
+ val |= BIT0;
+ wr_reg8(info, VCR, val);
+}
+
+/*
+ * set state of V24 control (output) signals
+ */
+static void set_signals(struct slgt_info *info)
+{
+ unsigned char val = rd_reg8(info, VCR);
+ if (info->signals & SerialSignal_DTR)
+ val |= BIT3;
+ else
+ val &= ~BIT3;
+ if (info->signals & SerialSignal_RTS)
+ val |= BIT2;
+ else
+ val &= ~BIT2;
+ wr_reg8(info, VCR, val);
+}
+
+/*
+ * free range of receive DMA buffers (i to last)
+ */
+static void free_rbufs(struct slgt_info *info, unsigned int i, unsigned int last)
+{
+ int done = 0;
+
+ while(!done) {
+ /* reset current buffer for reuse */
+ info->rbufs[i].status = 0;
+ set_desc_count(info->rbufs[i], info->rbuf_fill_level);
+ if (i == last)
+ done = 1;
+ if (++i == info->rbuf_count)
+ i = 0;
+ }
+ info->rbuf_current = i;
+}
+
+/*
+ * mark all receive DMA buffers as free
+ */
+static void reset_rbufs(struct slgt_info *info)
+{
+ free_rbufs(info, 0, info->rbuf_count - 1);
+ info->rbuf_fill_index = 0;
+ info->rbuf_fill_count = 0;
+}
+
+/*
+ * pass receive HDLC frame to upper layer
+ *
+ * return true if frame available, otherwise false
+ */
+static bool rx_get_frame(struct slgt_info *info)
+{
+ unsigned int start, end;
+ unsigned short status;
+ unsigned int framesize = 0;
+ unsigned long flags;
+ struct tty_struct *tty = info->port.tty;
+ unsigned char addr_field = 0xff;
+ unsigned int crc_size = 0;
+
+ switch (info->params.crc_type & HDLC_CRC_MASK) {
+ case HDLC_CRC_16_CCITT: crc_size = 2; break;
+ case HDLC_CRC_32_CCITT: crc_size = 4; break;
+ }
+
+check_again:
+
+ framesize = 0;
+ addr_field = 0xff;
+ start = end = info->rbuf_current;
+
+ for (;;) {
+ if (!desc_complete(info->rbufs[end]))
+ goto cleanup;
+
+ if (framesize == 0 && info->params.addr_filter != 0xff)
+ addr_field = info->rbufs[end].buf[0];
+
+ framesize += desc_count(info->rbufs[end]);
+
+ if (desc_eof(info->rbufs[end]))
+ break;
+
+ if (++end == info->rbuf_count)
+ end = 0;
+
+ if (end == info->rbuf_current) {
+ if (info->rx_enabled){
+ spin_lock_irqsave(&info->lock,flags);
+ rx_start(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+ goto cleanup;
+ }
+ }
+
+ /* status
+ *
+ * 15 buffer complete
+ * 14..06 reserved
+ * 05..04 residue
+ * 02 eof (end of frame)
+ * 01 CRC error
+ * 00 abort
+ */
+ status = desc_status(info->rbufs[end]);
+
+ /* ignore CRC bit if not using CRC (bit is undefined) */
+ if ((info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_NONE)
+ status &= ~BIT1;
+
+ if (framesize == 0 ||
+ (addr_field != 0xff && addr_field != info->params.addr_filter)) {
+ free_rbufs(info, start, end);
+ goto check_again;
+ }
+
+ if (framesize < (2 + crc_size) || status & BIT0) {
+ info->icount.rxshort++;
+ framesize = 0;
+ } else if (status & BIT1) {
+ info->icount.rxcrc++;
+ if (!(info->params.crc_type & HDLC_CRC_RETURN_EX))
+ framesize = 0;
+ }
+
+#if SYNCLINK_GENERIC_HDLC
+ if (framesize == 0) {
+ info->netdev->stats.rx_errors++;
+ info->netdev->stats.rx_frame_errors++;
+ }
+#endif
+
+ DBGBH(("%s rx frame status=%04X size=%d\n",
+ info->device_name, status, framesize));
+ DBGDATA(info, info->rbufs[start].buf, min_t(int, framesize, info->rbuf_fill_level), "rx");
+
+ if (framesize) {
+ if (!(info->params.crc_type & HDLC_CRC_RETURN_EX)) {
+ framesize -= crc_size;
+ crc_size = 0;
+ }
+
+ if (framesize > info->max_frame_size + crc_size)
+ info->icount.rxlong++;
+ else {
+ /* copy dma buffer(s) to contiguous temp buffer */
+ int copy_count = framesize;
+ int i = start;
+ unsigned char *p = info->tmp_rbuf;
+ info->tmp_rbuf_count = framesize;
+
+ info->icount.rxok++;
+
+ while(copy_count) {
+ int partial_count = min_t(int, copy_count, info->rbuf_fill_level);
+ memcpy(p, info->rbufs[i].buf, partial_count);
+ p += partial_count;
+ copy_count -= partial_count;
+ if (++i == info->rbuf_count)
+ i = 0;
+ }
+
+ if (info->params.crc_type & HDLC_CRC_RETURN_EX) {
+ *p = (status & BIT1) ? RX_CRC_ERROR : RX_OK;
+ framesize++;
+ }
+
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount)
+ hdlcdev_rx(info,info->tmp_rbuf, framesize);
+ else
+#endif
+ ldisc_receive_buf(tty, info->tmp_rbuf, info->flag_buf, framesize);
+ }
+ }
+ free_rbufs(info, start, end);
+ return true;
+
+cleanup:
+ return false;
+}
+
+/*
+ * pass receive buffer (RAW synchronous mode) to tty layer
+ * return true if buffer available, otherwise false
+ */
+static bool rx_get_buf(struct slgt_info *info)
+{
+ unsigned int i = info->rbuf_current;
+ unsigned int count;
+
+ if (!desc_complete(info->rbufs[i]))
+ return false;
+ count = desc_count(info->rbufs[i]);
+ switch(info->params.mode) {
+ case MGSL_MODE_MONOSYNC:
+ case MGSL_MODE_BISYNC:
+ case MGSL_MODE_XSYNC:
+ /* ignore residue in byte synchronous modes */
+ if (desc_residue(info->rbufs[i]))
+ count--;
+ break;
+ }
+ DBGDATA(info, info->rbufs[i].buf, count, "rx");
+ DBGINFO(("rx_get_buf size=%d\n", count));
+ if (count)
+ ldisc_receive_buf(info->port.tty, info->rbufs[i].buf,
+ info->flag_buf, count);
+ free_rbufs(info, i, i);
+ return true;
+}
+
+static void reset_tbufs(struct slgt_info *info)
+{
+ unsigned int i;
+ info->tbuf_current = 0;
+ for (i=0 ; i < info->tbuf_count ; i++) {
+ info->tbufs[i].status = 0;
+ info->tbufs[i].count = 0;
+ }
+}
+
+/*
+ * return number of free transmit DMA buffers
+ */
+static unsigned int free_tbuf_count(struct slgt_info *info)
+{
+ unsigned int count = 0;
+ unsigned int i = info->tbuf_current;
+
+ do
+ {
+ if (desc_count(info->tbufs[i]))
+ break; /* buffer in use */
+ ++count;
+ if (++i == info->tbuf_count)
+ i=0;
+ } while (i != info->tbuf_current);
+
+ /* if tx DMA active, last zero count buffer is in use */
+ if (count && (rd_reg32(info, TDCSR) & BIT0))
+ --count;
+
+ return count;
+}
+
+/*
+ * return number of bytes in unsent transmit DMA buffers
+ * and the serial controller tx FIFO
+ */
+static unsigned int tbuf_bytes(struct slgt_info *info)
+{
+ unsigned int total_count = 0;
+ unsigned int i = info->tbuf_current;
+ unsigned int reg_value;
+ unsigned int count;
+ unsigned int active_buf_count = 0;
+
+ /*
+ * Add descriptor counts for all tx DMA buffers.
+ * If count is zero (cleared by DMA controller after read),
+ * the buffer is complete or is actively being read from.
+ *
+ * Record buf_count of last buffer with zero count starting
+ * from current ring position. buf_count is mirror
+ * copy of count and is not cleared by serial controller.
+ * If DMA controller is active, that buffer is actively
+ * being read so add to total.
+ */
+ do {
+ count = desc_count(info->tbufs[i]);
+ if (count)
+ total_count += count;
+ else if (!total_count)
+ active_buf_count = info->tbufs[i].buf_count;
+ if (++i == info->tbuf_count)
+ i = 0;
+ } while (i != info->tbuf_current);
+
+ /* read tx DMA status register */
+ reg_value = rd_reg32(info, TDCSR);
+
+ /* if tx DMA active, last zero count buffer is in use */
+ if (reg_value & BIT0)
+ total_count += active_buf_count;
+
+ /* add tx FIFO count = reg_value[15..8] */
+ total_count += (reg_value >> 8) & 0xff;
+
+ /* if transmitter active add one byte for shift register */
+ if (info->tx_active)
+ total_count++;
+
+ return total_count;
+}
+
+/*
+ * load data into transmit DMA buffer ring and start transmitter if needed
+ * return true if data accepted, otherwise false (buffers full)
+ */
+static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size)
+{
+ unsigned short count;
+ unsigned int i;
+ struct slgt_desc *d;
+
+ /* check required buffer space */
+ if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info))
+ return false;
+
+ DBGDATA(info, buf, size, "tx");
+
+ /*
+ * copy data to one or more DMA buffers in circular ring
+ * tbuf_start = first buffer for this data
+ * tbuf_current = next free buffer
+ *
+ * Copy all data before making data visible to DMA controller by
+ * setting descriptor count of the first buffer.
+ * This prevents an active DMA controller from reading the first DMA
+ * buffers of a frame and stopping before the final buffers are filled.
+ */
+
+ info->tbuf_start = i = info->tbuf_current;
+
+ while (size) {
+ d = &info->tbufs[i];
+
+ count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size);
+ memcpy(d->buf, buf, count);
+
+ size -= count;
+ buf += count;
+
+ /*
+ * set EOF bit for last buffer of HDLC frame or
+ * for every buffer in raw mode
+ */
+ if ((!size && info->params.mode == MGSL_MODE_HDLC) ||
+ info->params.mode == MGSL_MODE_RAW)
+ set_desc_eof(*d, 1);
+ else
+ set_desc_eof(*d, 0);
+
+ /* set descriptor count for all but first buffer */
+ if (i != info->tbuf_start)
+ set_desc_count(*d, count);
+ d->buf_count = count;
+
+ if (++i == info->tbuf_count)
+ i = 0;
+ }
+
+ info->tbuf_current = i;
+
+ /* set first buffer count to make new data visible to DMA controller */
+ d = &info->tbufs[info->tbuf_start];
+ set_desc_count(*d, d->buf_count);
+
+ /* start transmitter if needed and update transmit timeout */
+ if (!info->tx_active)
+ tx_start(info);
+ update_tx_timer(info);
+
+ return true;
+}
+
+static int register_test(struct slgt_info *info)
+{
+ static unsigned short patterns[] =
+ {0x0000, 0xffff, 0xaaaa, 0x5555, 0x6969, 0x9696};
+ static unsigned int count = ARRAY_SIZE(patterns);
+ unsigned int i;
+ int rc = 0;
+
+ for (i=0 ; i < count ; i++) {
+ wr_reg16(info, TIR, patterns[i]);
+ wr_reg16(info, BDR, patterns[(i+1)%count]);
+ if ((rd_reg16(info, TIR) != patterns[i]) ||
+ (rd_reg16(info, BDR) != patterns[(i+1)%count])) {
+ rc = -ENODEV;
+ break;
+ }
+ }
+ info->gpio_present = (rd_reg32(info, JCR) & BIT5) ? 1 : 0;
+ info->init_error = rc ? 0 : DiagStatus_AddressFailure;
+ return rc;
+}
+
+static int irq_test(struct slgt_info *info)
+{
+ unsigned long timeout;
+ unsigned long flags;
+ struct tty_struct *oldtty = info->port.tty;
+ u32 speed = info->params.data_rate;
+
+ info->params.data_rate = 921600;
+ info->port.tty = NULL;
+
+ spin_lock_irqsave(&info->lock, flags);
+ async_mode(info);
+ slgt_irq_on(info, IRQ_TXIDLE);
+
+ /* enable transmitter */
+ wr_reg16(info, TCR,
+ (unsigned short)(rd_reg16(info, TCR) | BIT1));
+
+ /* write one byte and wait for tx idle */
+ wr_reg16(info, TDR, 0);
+
+ /* assume failure */
+ info->init_error = DiagStatus_IrqFailure;
+ info->irq_occurred = false;
+
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ timeout=100;
+ while(timeout-- && !info->irq_occurred)
+ msleep_interruptible(10);
+
+ spin_lock_irqsave(&info->lock,flags);
+ reset_port(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ info->params.data_rate = speed;
+ info->port.tty = oldtty;
+
+ info->init_error = info->irq_occurred ? 0 : DiagStatus_IrqFailure;
+ return info->irq_occurred ? 0 : -ENODEV;
+}
+
+static int loopback_test_rx(struct slgt_info *info)
+{
+ unsigned char *src, *dest;
+ int count;
+
+ if (desc_complete(info->rbufs[0])) {
+ count = desc_count(info->rbufs[0]);
+ src = info->rbufs[0].buf;
+ dest = info->tmp_rbuf;
+
+ for( ; count ; count-=2, src+=2) {
+ /* src=data byte (src+1)=status byte */
+ if (!(*(src+1) & (BIT9 + BIT8))) {
+ *dest = *src;
+ dest++;
+ info->tmp_rbuf_count++;
+ }
+ }
+ DBGDATA(info, info->tmp_rbuf, info->tmp_rbuf_count, "rx");
+ return 1;
+ }
+ return 0;
+}
+
+static int loopback_test(struct slgt_info *info)
+{
+#define TESTFRAMESIZE 20
+
+ unsigned long timeout;
+ u16 count = TESTFRAMESIZE;
+ unsigned char buf[TESTFRAMESIZE];
+ int rc = -ENODEV;
+ unsigned long flags;
+
+ struct tty_struct *oldtty = info->port.tty;
+ MGSL_PARAMS params;
+
+ memcpy(&params, &info->params, sizeof(params));
+
+ info->params.mode = MGSL_MODE_ASYNC;
+ info->params.data_rate = 921600;
+ info->params.loopback = 1;
+ info->port.tty = NULL;
+
+ /* build and send transmit frame */
+ for (count = 0; count < TESTFRAMESIZE; ++count)
+ buf[count] = (unsigned char)count;
+
+ info->tmp_rbuf_count = 0;
+ memset(info->tmp_rbuf, 0, TESTFRAMESIZE);
+
+ /* program hardware for HDLC and enabled receiver */
+ spin_lock_irqsave(&info->lock,flags);
+ async_mode(info);
+ rx_start(info);
+ tx_load(info, buf, count);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ /* wait for receive complete */
+ for (timeout = 100; timeout; --timeout) {
+ msleep_interruptible(10);
+ if (loopback_test_rx(info)) {
+ rc = 0;
+ break;
+ }
+ }
+
+ /* verify received frame length and contents */
+ if (!rc && (info->tmp_rbuf_count != count ||
+ memcmp(buf, info->tmp_rbuf, count))) {
+ rc = -ENODEV;
+ }
+
+ spin_lock_irqsave(&info->lock,flags);
+ reset_adapter(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ memcpy(&info->params, &params, sizeof(info->params));
+ info->port.tty = oldtty;
+
+ info->init_error = rc ? DiagStatus_DmaFailure : 0;
+ return rc;
+}
+
+static int adapter_test(struct slgt_info *info)
+{
+ DBGINFO(("testing %s\n", info->device_name));
+ if (register_test(info) < 0) {
+ printk("register test failure %s addr=%08X\n",
+ info->device_name, info->phys_reg_addr);
+ } else if (irq_test(info) < 0) {
+ printk("IRQ test failure %s IRQ=%d\n",
+ info->device_name, info->irq_level);
+ } else if (loopback_test(info) < 0) {
+ printk("loopback test failure %s\n", info->device_name);
+ }
+ return info->init_error;
+}
+
+/*
+ * transmit timeout handler
+ */
+static void tx_timeout(unsigned long context)
+{
+ struct slgt_info *info = (struct slgt_info*)context;
+ unsigned long flags;
+
+ DBGINFO(("%s tx_timeout\n", info->device_name));
+ if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) {
+ info->icount.txtimeout++;
+ }
+ spin_lock_irqsave(&info->lock,flags);
+ tx_stop(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount)
+ hdlcdev_tx_done(info);
+ else
+#endif
+ bh_transmit(info);
+}
+
+/*
+ * receive buffer polling timer
+ */
+static void rx_timeout(unsigned long context)
+{
+ struct slgt_info *info = (struct slgt_info*)context;
+ unsigned long flags;
+
+ DBGINFO(("%s rx_timeout\n", info->device_name));
+ spin_lock_irqsave(&info->lock, flags);
+ info->pending_bh |= BH_RECEIVE;
+ spin_unlock_irqrestore(&info->lock, flags);
+ bh_handler(&info->task);
+}
+
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
new file mode 100644
index 00000000..c77831c7
--- /dev/null
+++ b/drivers/tty/synclinkmp.c
@@ -0,0 +1,5600 @@
+/*
+ * $Id: synclinkmp.c,v 4.38 2005/07/15 13:29:44 paulkf Exp $
+ *
+ * Device driver for Microgate SyncLink Multiport
+ * high speed multiprotocol serial adapter.
+ *
+ * written by Paul Fulghum for Microgate Corporation
+ * paulkf@microgate.com
+ *
+ * Microgate and SyncLink are trademarks of Microgate Corporation
+ *
+ * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
+ * This code is released under the GNU General Public License (GPL)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq))
+#if defined(__i386__)
+# define BREAKPOINT() asm(" int $3");
+#else
+# define BREAKPOINT() { }
+#endif
+
+#define MAX_DEVICES 12
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/ioctl.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <linux/bitops.h>
+#include <asm/types.h>
+#include <linux/termios.h>
+#include <linux/workqueue.h>
+#include <linux/hdlc.h>
+#include <linux/synclink.h>
+
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINKMP_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
+#endif
+
+#define GET_USER(error,value,addr) error = get_user(value,addr)
+#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
+#define PUT_USER(error,value,addr) error = put_user(value,addr)
+#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
+
+#include <asm/uaccess.h>
+
+static MGSL_PARAMS default_params = {
+ MGSL_MODE_HDLC, /* unsigned long mode */
+ 0, /* unsigned char loopback; */
+ HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
+ HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
+ 0, /* unsigned long clock_speed; */
+ 0xff, /* unsigned char addr_filter; */
+ HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
+ HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
+ HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
+ 9600, /* unsigned long data_rate; */
+ 8, /* unsigned char data_bits; */
+ 1, /* unsigned char stop_bits; */
+ ASYNC_PARITY_NONE /* unsigned char parity; */
+};
+
+/* size in bytes of DMA data buffers */
+#define SCABUFSIZE 1024
+#define SCA_MEM_SIZE 0x40000
+#define SCA_BASE_SIZE 512
+#define SCA_REG_SIZE 16
+#define SCA_MAX_PORTS 4
+#define SCAMAXDESC 128
+
+#define BUFFERLISTSIZE 4096
+
+/* SCA-I style DMA buffer descriptor */
+typedef struct _SCADESC
+{
+ u16 next; /* lower l6 bits of next descriptor addr */
+ u16 buf_ptr; /* lower 16 bits of buffer addr */
+ u8 buf_base; /* upper 8 bits of buffer addr */
+ u8 pad1;
+ u16 length; /* length of buffer */
+ u8 status; /* status of buffer */
+ u8 pad2;
+} SCADESC, *PSCADESC;
+
+typedef struct _SCADESC_EX
+{
+ /* device driver bookkeeping section */
+ char *virt_addr; /* virtual address of data buffer */
+ u16 phys_entry; /* lower 16-bits of physical address of this descriptor */
+} SCADESC_EX, *PSCADESC_EX;
+
+/* The queue of BH actions to be performed */
+
+#define BH_RECEIVE 1
+#define BH_TRANSMIT 2
+#define BH_STATUS 4
+
+#define IO_PIN_SHUTDOWN_LIMIT 100
+
+struct _input_signal_events {
+ int ri_up;
+ int ri_down;
+ int dsr_up;
+ int dsr_down;
+ int dcd_up;
+ int dcd_down;
+ int cts_up;
+ int cts_down;
+};
+
+/*
+ * Device instance data structure
+ */
+typedef struct _synclinkmp_info {
+ void *if_ptr; /* General purpose pointer (used by SPPP) */
+ int magic;
+ struct tty_port port;
+ int line;
+ unsigned short close_delay;
+ unsigned short closing_wait; /* time to wait before closing */
+
+ struct mgsl_icount icount;
+
+ int timeout;
+ int x_char; /* xon/xoff character */
+ u16 read_status_mask1; /* break detection (SR1 indications) */
+ u16 read_status_mask2; /* parity/framing/overun (SR2 indications) */
+ unsigned char ignore_status_mask1; /* break detection (SR1 indications) */
+ unsigned char ignore_status_mask2; /* parity/framing/overun (SR2 indications) */
+ unsigned char *tx_buf;
+ int tx_put;
+ int tx_get;
+ int tx_count;
+
+ wait_queue_head_t status_event_wait_q;
+ wait_queue_head_t event_wait_q;
+ struct timer_list tx_timer; /* HDLC transmit timeout timer */
+ struct _synclinkmp_info *next_device; /* device list link */
+ struct timer_list status_timer; /* input signal status check timer */
+
+ spinlock_t lock; /* spinlock for synchronizing with ISR */
+ struct work_struct task; /* task structure for scheduling bh */
+
+ u32 max_frame_size; /* as set by device config */
+
+ u32 pending_bh;
+
+ bool bh_running; /* Protection from multiple */
+ int isr_overflow;
+ bool bh_requested;
+
+ int dcd_chkcount; /* check counts to prevent */
+ int cts_chkcount; /* too many IRQs if a signal */
+ int dsr_chkcount; /* is floating */
+ int ri_chkcount;
+
+ char *buffer_list; /* virtual address of Rx & Tx buffer lists */
+ unsigned long buffer_list_phys;
+
+ unsigned int rx_buf_count; /* count of total allocated Rx buffers */
+ SCADESC *rx_buf_list; /* list of receive buffer entries */
+ SCADESC_EX rx_buf_list_ex[SCAMAXDESC]; /* list of receive buffer entries */
+ unsigned int current_rx_buf;
+
+ unsigned int tx_buf_count; /* count of total allocated Tx buffers */
+ SCADESC *tx_buf_list; /* list of transmit buffer entries */
+ SCADESC_EX tx_buf_list_ex[SCAMAXDESC]; /* list of transmit buffer entries */
+ unsigned int last_tx_buf;
+
+ unsigned char *tmp_rx_buf;
+ unsigned int tmp_rx_buf_count;
+
+ bool rx_enabled;
+ bool rx_overflow;
+
+ bool tx_enabled;
+ bool tx_active;
+ u32 idle_mode;
+
+ unsigned char ie0_value;
+ unsigned char ie1_value;
+ unsigned char ie2_value;
+ unsigned char ctrlreg_value;
+ unsigned char old_signals;
+
+ char device_name[25]; /* device instance name */
+
+ int port_count;
+ int adapter_num;
+ int port_num;
+
+ struct _synclinkmp_info *port_array[SCA_MAX_PORTS];
+
+ unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
+
+ unsigned int irq_level; /* interrupt level */
+ unsigned long irq_flags;
+ bool irq_requested; /* true if IRQ requested */
+
+ MGSL_PARAMS params; /* communications parameters */
+
+ unsigned char serial_signals; /* current serial signal states */
+
+ bool irq_occurred; /* for diagnostics use */
+ unsigned int init_error; /* Initialization startup error */
+
+ u32 last_mem_alloc;
+ unsigned char* memory_base; /* shared memory address (PCI only) */
+ u32 phys_memory_base;
+ int shared_mem_requested;
+
+ unsigned char* sca_base; /* HD64570 SCA Memory address */
+ u32 phys_sca_base;
+ u32 sca_offset;
+ bool sca_base_requested;
+
+ unsigned char* lcr_base; /* local config registers (PCI only) */
+ u32 phys_lcr_base;
+ u32 lcr_offset;
+ int lcr_mem_requested;
+
+ unsigned char* statctrl_base; /* status/control register memory */
+ u32 phys_statctrl_base;
+ u32 statctrl_offset;
+ bool sca_statctrl_requested;
+
+ u32 misc_ctrl_value;
+ char flag_buf[MAX_ASYNC_BUFFER_SIZE];
+ char char_buf[MAX_ASYNC_BUFFER_SIZE];
+ bool drop_rts_on_tx_done;
+
+ struct _input_signal_events input_signal_events;
+
+ /* SPPP/Cisco HDLC device parts */
+ int netcount;
+ spinlock_t netlock;
+
+#if SYNCLINK_GENERIC_HDLC
+ struct net_device *netdev;
+#endif
+
+} SLMP_INFO;
+
+#define MGSL_MAGIC 0x5401
+
+/*
+ * define serial signal status change macros
+ */
+#define MISCSTATUS_DCD_LATCHED (SerialSignal_DCD<<8) /* indicates change in DCD */
+#define MISCSTATUS_RI_LATCHED (SerialSignal_RI<<8) /* indicates change in RI */
+#define MISCSTATUS_CTS_LATCHED (SerialSignal_CTS<<8) /* indicates change in CTS */
+#define MISCSTATUS_DSR_LATCHED (SerialSignal_DSR<<8) /* change in DSR */
+
+/* Common Register macros */
+#define LPR 0x00
+#define PABR0 0x02
+#define PABR1 0x03
+#define WCRL 0x04
+#define WCRM 0x05
+#define WCRH 0x06
+#define DPCR 0x08
+#define DMER 0x09
+#define ISR0 0x10
+#define ISR1 0x11
+#define ISR2 0x12
+#define IER0 0x14
+#define IER1 0x15
+#define IER2 0x16
+#define ITCR 0x18
+#define INTVR 0x1a
+#define IMVR 0x1c
+
+/* MSCI Register macros */
+#define TRB 0x20
+#define TRBL 0x20
+#define TRBH 0x21
+#define SR0 0x22
+#define SR1 0x23
+#define SR2 0x24
+#define SR3 0x25
+#define FST 0x26
+#define IE0 0x28
+#define IE1 0x29
+#define IE2 0x2a
+#define FIE 0x2b
+#define CMD 0x2c
+#define MD0 0x2e
+#define MD1 0x2f
+#define MD2 0x30
+#define CTL 0x31
+#define SA0 0x32
+#define SA1 0x33
+#define IDL 0x34
+#define TMC 0x35
+#define RXS 0x36
+#define TXS 0x37
+#define TRC0 0x38
+#define TRC1 0x39
+#define RRC 0x3a
+#define CST0 0x3c
+#define CST1 0x3d
+
+/* Timer Register Macros */
+#define TCNT 0x60
+#define TCNTL 0x60
+#define TCNTH 0x61
+#define TCONR 0x62
+#define TCONRL 0x62
+#define TCONRH 0x63
+#define TMCS 0x64
+#define TEPR 0x65
+
+/* DMA Controller Register macros */
+#define DARL 0x80
+#define DARH 0x81
+#define DARB 0x82
+#define BAR 0x80
+#define BARL 0x80
+#define BARH 0x81
+#define BARB 0x82
+#define SAR 0x84
+#define SARL 0x84
+#define SARH 0x85
+#define SARB 0x86
+#define CPB 0x86
+#define CDA 0x88
+#define CDAL 0x88
+#define CDAH 0x89
+#define EDA 0x8a
+#define EDAL 0x8a
+#define EDAH 0x8b
+#define BFL 0x8c
+#define BFLL 0x8c
+#define BFLH 0x8d
+#define BCR 0x8e
+#define BCRL 0x8e
+#define BCRH 0x8f
+#define DSR 0x90
+#define DMR 0x91
+#define FCT 0x93
+#define DIR 0x94
+#define DCMD 0x95
+
+/* combine with timer or DMA register address */
+#define TIMER0 0x00
+#define TIMER1 0x08
+#define TIMER2 0x10
+#define TIMER3 0x18
+#define RXDMA 0x00
+#define TXDMA 0x20
+
+/* SCA Command Codes */
+#define NOOP 0x00
+#define TXRESET 0x01
+#define TXENABLE 0x02
+#define TXDISABLE 0x03
+#define TXCRCINIT 0x04
+#define TXCRCEXCL 0x05
+#define TXEOM 0x06
+#define TXABORT 0x07
+#define MPON 0x08
+#define TXBUFCLR 0x09
+#define RXRESET 0x11
+#define RXENABLE 0x12
+#define RXDISABLE 0x13
+#define RXCRCINIT 0x14
+#define RXREJECT 0x15
+#define SEARCHMP 0x16
+#define RXCRCEXCL 0x17
+#define RXCRCCALC 0x18
+#define CHRESET 0x21
+#define HUNT 0x31
+
+/* DMA command codes */
+#define SWABORT 0x01
+#define FEICLEAR 0x02
+
+/* IE0 */
+#define TXINTE BIT7
+#define RXINTE BIT6
+#define TXRDYE BIT1
+#define RXRDYE BIT0
+
+/* IE1 & SR1 */
+#define UDRN BIT7
+#define IDLE BIT6
+#define SYNCD BIT4
+#define FLGD BIT4
+#define CCTS BIT3
+#define CDCD BIT2
+#define BRKD BIT1
+#define ABTD BIT1
+#define GAPD BIT1
+#define BRKE BIT0
+#define IDLD BIT0
+
+/* IE2 & SR2 */
+#define EOM BIT7
+#define PMP BIT6
+#define SHRT BIT6
+#define PE BIT5
+#define ABT BIT5
+#define FRME BIT4
+#define RBIT BIT4
+#define OVRN BIT3
+#define CRCE BIT2
+
+
+/*
+ * Global linked list of SyncLink devices
+ */
+static SLMP_INFO *synclinkmp_device_list = NULL;
+static int synclinkmp_adapter_count = -1;
+static int synclinkmp_device_count = 0;
+
+/*
+ * Set this param to non-zero to load eax with the
+ * .text section address and breakpoint on module load.
+ * This is useful for use with gdb and add-symbol-file command.
+ */
+static int break_on_load = 0;
+
+/*
+ * Driver major number, defaults to zero to get auto
+ * assigned major number. May be forced as module parameter.
+ */
+static int ttymajor = 0;
+
+/*
+ * Array of user specified options for ISA adapters.
+ */
+static int debug_level = 0;
+static int maxframe[MAX_DEVICES] = {0,};
+
+module_param(break_on_load, bool, 0);
+module_param(ttymajor, int, 0);
+module_param(debug_level, int, 0);
+module_param_array(maxframe, int, NULL, 0);
+
+static char *driver_name = "SyncLink MultiPort driver";
+static char *driver_version = "$Revision: 4.38 $";
+
+static int synclinkmp_init_one(struct pci_dev *dev,const struct pci_device_id *ent);
+static void synclinkmp_remove_one(struct pci_dev *dev);
+
+static struct pci_device_id synclinkmp_pci_tbl[] = {
+ { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_SCA, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, }, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, synclinkmp_pci_tbl);
+
+MODULE_LICENSE("GPL");
+
+static struct pci_driver synclinkmp_pci_driver = {
+ .name = "synclinkmp",
+ .id_table = synclinkmp_pci_tbl,
+ .probe = synclinkmp_init_one,
+ .remove = __devexit_p(synclinkmp_remove_one),
+};
+
+
+static struct tty_driver *serial_driver;
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 256
+
+
+/* tty callbacks */
+
+static int open(struct tty_struct *tty, struct file * filp);
+static void close(struct tty_struct *tty, struct file * filp);
+static void hangup(struct tty_struct *tty);
+static void set_termios(struct tty_struct *tty, struct ktermios *old_termios);
+
+static int write(struct tty_struct *tty, const unsigned char *buf, int count);
+static int put_char(struct tty_struct *tty, unsigned char ch);
+static void send_xchar(struct tty_struct *tty, char ch);
+static void wait_until_sent(struct tty_struct *tty, int timeout);
+static int write_room(struct tty_struct *tty);
+static void flush_chars(struct tty_struct *tty);
+static void flush_buffer(struct tty_struct *tty);
+static void tx_hold(struct tty_struct *tty);
+static void tx_release(struct tty_struct *tty);
+
+static int ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
+static int chars_in_buffer(struct tty_struct *tty);
+static void throttle(struct tty_struct * tty);
+static void unthrottle(struct tty_struct * tty);
+static int set_break(struct tty_struct *tty, int break_state);
+
+#if SYNCLINK_GENERIC_HDLC
+#define dev_to_port(D) (dev_to_hdlc(D)->priv)
+static void hdlcdev_tx_done(SLMP_INFO *info);
+static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size);
+static int hdlcdev_init(SLMP_INFO *info);
+static void hdlcdev_exit(SLMP_INFO *info);
+#endif
+
+/* ioctl handlers */
+
+static int get_stats(SLMP_INFO *info, struct mgsl_icount __user *user_icount);
+static int get_params(SLMP_INFO *info, MGSL_PARAMS __user *params);
+static int set_params(SLMP_INFO *info, MGSL_PARAMS __user *params);
+static int get_txidle(SLMP_INFO *info, int __user *idle_mode);
+static int set_txidle(SLMP_INFO *info, int idle_mode);
+static int tx_enable(SLMP_INFO *info, int enable);
+static int tx_abort(SLMP_INFO *info);
+static int rx_enable(SLMP_INFO *info, int enable);
+static int modem_input_wait(SLMP_INFO *info,int arg);
+static int wait_mgsl_event(SLMP_INFO *info, int __user *mask_ptr);
+static int tiocmget(struct tty_struct *tty);
+static int tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear);
+static int set_break(struct tty_struct *tty, int break_state);
+
+static void add_device(SLMP_INFO *info);
+static void device_init(int adapter_num, struct pci_dev *pdev);
+static int claim_resources(SLMP_INFO *info);
+static void release_resources(SLMP_INFO *info);
+
+static int startup(SLMP_INFO *info);
+static int block_til_ready(struct tty_struct *tty, struct file * filp,SLMP_INFO *info);
+static int carrier_raised(struct tty_port *port);
+static void shutdown(SLMP_INFO *info);
+static void program_hw(SLMP_INFO *info);
+static void change_params(SLMP_INFO *info);
+
+static bool init_adapter(SLMP_INFO *info);
+static bool register_test(SLMP_INFO *info);
+static bool irq_test(SLMP_INFO *info);
+static bool loopback_test(SLMP_INFO *info);
+static int adapter_test(SLMP_INFO *info);
+static bool memory_test(SLMP_INFO *info);
+
+static void reset_adapter(SLMP_INFO *info);
+static void reset_port(SLMP_INFO *info);
+static void async_mode(SLMP_INFO *info);
+static void hdlc_mode(SLMP_INFO *info);
+
+static void rx_stop(SLMP_INFO *info);
+static void rx_start(SLMP_INFO *info);
+static void rx_reset_buffers(SLMP_INFO *info);
+static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last);
+static bool rx_get_frame(SLMP_INFO *info);
+
+static void tx_start(SLMP_INFO *info);
+static void tx_stop(SLMP_INFO *info);
+static void tx_load_fifo(SLMP_INFO *info);
+static void tx_set_idle(SLMP_INFO *info);
+static void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count);
+
+static void get_signals(SLMP_INFO *info);
+static void set_signals(SLMP_INFO *info);
+static void enable_loopback(SLMP_INFO *info, int enable);
+static void set_rate(SLMP_INFO *info, u32 data_rate);
+
+static int bh_action(SLMP_INFO *info);
+static void bh_handler(struct work_struct *work);
+static void bh_receive(SLMP_INFO *info);
+static void bh_transmit(SLMP_INFO *info);
+static void bh_status(SLMP_INFO *info);
+static void isr_timer(SLMP_INFO *info);
+static void isr_rxint(SLMP_INFO *info);
+static void isr_rxrdy(SLMP_INFO *info);
+static void isr_txint(SLMP_INFO *info);
+static void isr_txrdy(SLMP_INFO *info);
+static void isr_rxdmaok(SLMP_INFO *info);
+static void isr_rxdmaerror(SLMP_INFO *info);
+static void isr_txdmaok(SLMP_INFO *info);
+static void isr_txdmaerror(SLMP_INFO *info);
+static void isr_io_pin(SLMP_INFO *info, u16 status);
+
+static int alloc_dma_bufs(SLMP_INFO *info);
+static void free_dma_bufs(SLMP_INFO *info);
+static int alloc_buf_list(SLMP_INFO *info);
+static int alloc_frame_bufs(SLMP_INFO *info, SCADESC *list, SCADESC_EX *list_ex,int count);
+static int alloc_tmp_rx_buf(SLMP_INFO *info);
+static void free_tmp_rx_buf(SLMP_INFO *info);
+
+static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count);
+static void trace_block(SLMP_INFO *info, const char* data, int count, int xmit);
+static void tx_timeout(unsigned long context);
+static void status_timeout(unsigned long context);
+
+static unsigned char read_reg(SLMP_INFO *info, unsigned char addr);
+static void write_reg(SLMP_INFO *info, unsigned char addr, unsigned char val);
+static u16 read_reg16(SLMP_INFO *info, unsigned char addr);
+static void write_reg16(SLMP_INFO *info, unsigned char addr, u16 val);
+static unsigned char read_status_reg(SLMP_INFO * info);
+static void write_control_reg(SLMP_INFO * info);
+
+
+static unsigned char rx_active_fifo_level = 16; // rx request FIFO activation level in bytes
+static unsigned char tx_active_fifo_level = 16; // tx request FIFO activation level in bytes
+static unsigned char tx_negate_fifo_level = 32; // tx request FIFO negation level in bytes
+
+static u32 misc_ctrl_value = 0x007e4040;
+static u32 lcr1_brdr_value = 0x00800028;
+
+static u32 read_ahead_count = 8;
+
+/* DPCR, DMA Priority Control
+ *
+ * 07..05 Not used, must be 0
+ * 04 BRC, bus release condition: 0=all transfers complete
+ * 1=release after 1 xfer on all channels
+ * 03 CCC, channel change condition: 0=every cycle
+ * 1=after each channel completes all xfers
+ * 02..00 PR<2..0>, priority 100=round robin
+ *
+ * 00000100 = 0x00
+ */
+static unsigned char dma_priority = 0x04;
+
+// Number of bytes that can be written to shared RAM
+// in a single write operation
+static u32 sca_pci_load_interval = 64;
+
+/*
+ * 1st function defined in .text section. Calling this function in
+ * init_module() followed by a breakpoint allows a remote debugger
+ * (gdb) to get the .text address for the add-symbol-file command.
+ * This allows remote debugging of dynamically loadable modules.
+ */
+static void* synclinkmp_get_text_ptr(void);
+static void* synclinkmp_get_text_ptr(void) {return synclinkmp_get_text_ptr;}
+
+static inline int sanity_check(SLMP_INFO *info,
+ char *name, const char *routine)
+{
+#ifdef SANITY_CHECK
+ static const char *badmagic =
+ "Warning: bad magic number for synclinkmp_struct (%s) in %s\n";
+ static const char *badinfo =
+ "Warning: null synclinkmp_struct for (%s) in %s\n";
+
+ if (!info) {
+ printk(badinfo, name, routine);
+ return 1;
+ }
+ if (info->magic != MGSL_MAGIC) {
+ printk(badmagic, name, routine);
+ return 1;
+ }
+#else
+ if (!info)
+ return 1;
+#endif
+ return 0;
+}
+
+/**
+ * line discipline callback wrappers
+ *
+ * The wrappers maintain line discipline references
+ * while calling into the line discipline.
+ *
+ * ldisc_receive_buf - pass receive data to line discipline
+ */
+
+static void ldisc_receive_buf(struct tty_struct *tty,
+ const __u8 *data, char *flags, int count)
+{
+ struct tty_ldisc *ld;
+ if (!tty)
+ return;
+ ld = tty_ldisc_ref(tty);
+ if (ld) {
+ if (ld->ops->receive_buf)
+ ld->ops->receive_buf(tty, data, flags, count);
+ tty_ldisc_deref(ld);
+ }
+}
+
+/* tty callbacks */
+
+/* Called when a port is opened. Init and enable port.
+ */
+static int open(struct tty_struct *tty, struct file *filp)
+{
+ SLMP_INFO *info;
+ int retval, line;
+ unsigned long flags;
+
+ line = tty->index;
+ if ((line < 0) || (line >= synclinkmp_device_count)) {
+ printk("%s(%d): open with invalid line #%d.\n",
+ __FILE__,__LINE__,line);
+ return -ENODEV;
+ }
+
+ info = synclinkmp_device_list;
+ while(info && info->line != line)
+ info = info->next_device;
+ if (sanity_check(info, tty->name, "open"))
+ return -ENODEV;
+ if ( info->init_error ) {
+ printk("%s(%d):%s device is not allocated, init error=%d\n",
+ __FILE__,__LINE__,info->device_name,info->init_error);
+ return -ENODEV;
+ }
+
+ tty->driver_data = info;
+ info->port.tty = tty;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s open(), old ref count = %d\n",
+ __FILE__,__LINE__,tty->driver->name, info->port.count);
+
+ /* If port is closing, signal caller to try again */
+ if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
+ if (info->port.flags & ASYNC_CLOSING)
+ interruptible_sleep_on(&info->port.close_wait);
+ retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS);
+ goto cleanup;
+ }
+
+ info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+ spin_lock_irqsave(&info->netlock, flags);
+ if (info->netcount) {
+ retval = -EBUSY;
+ spin_unlock_irqrestore(&info->netlock, flags);
+ goto cleanup;
+ }
+ info->port.count++;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ if (info->port.count == 1) {
+ /* 1st open on this device, init hardware */
+ retval = startup(info);
+ if (retval < 0)
+ goto cleanup;
+ }
+
+ retval = block_til_ready(tty, filp, info);
+ if (retval) {
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s block_til_ready() returned %d\n",
+ __FILE__,__LINE__, info->device_name, retval);
+ goto cleanup;
+ }
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s open() success\n",
+ __FILE__,__LINE__, info->device_name);
+ retval = 0;
+
+cleanup:
+ if (retval) {
+ if (tty->count == 1)
+ info->port.tty = NULL; /* tty layer will release tty struct */
+ if(info->port.count)
+ info->port.count--;
+ }
+
+ return retval;
+}
+
+/* Called when port is closed. Wait for remaining data to be
+ * sent. Disable port and free resources.
+ */
+static void close(struct tty_struct *tty, struct file *filp)
+{
+ SLMP_INFO * info = tty->driver_data;
+
+ if (sanity_check(info, tty->name, "close"))
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s close() entry, count=%d\n",
+ __FILE__,__LINE__, info->device_name, info->port.count);
+
+ if (tty_port_close_start(&info->port, tty, filp) == 0)
+ goto cleanup;
+
+ mutex_lock(&info->port.mutex);
+ if (info->port.flags & ASYNC_INITIALIZED)
+ wait_until_sent(tty, info->timeout);
+
+ flush_buffer(tty);
+ tty_ldisc_flush(tty);
+ shutdown(info);
+ mutex_unlock(&info->port.mutex);
+
+ tty_port_close_end(&info->port, tty);
+ info->port.tty = NULL;
+cleanup:
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
+ tty->driver->name, info->port.count);
+}
+
+/* Called by tty_hangup() when a hangup is signaled.
+ * This is the same as closing all open descriptors for the port.
+ */
+static void hangup(struct tty_struct *tty)
+{
+ SLMP_INFO *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s hangup()\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (sanity_check(info, tty->name, "hangup"))
+ return;
+
+ mutex_lock(&info->port.mutex);
+ flush_buffer(tty);
+ shutdown(info);
+
+ spin_lock_irqsave(&info->port.lock, flags);
+ info->port.count = 0;
+ info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
+ info->port.tty = NULL;
+ spin_unlock_irqrestore(&info->port.lock, flags);
+ mutex_unlock(&info->port.mutex);
+
+ wake_up_interruptible(&info->port.open_wait);
+}
+
+/* Set new termios settings
+ */
+static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ SLMP_INFO *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s set_termios()\n", __FILE__,__LINE__,
+ tty->driver->name );
+
+ change_params(info);
+
+ /* Handle transition to B0 status */
+ if (old_termios->c_cflag & CBAUD &&
+ !(tty->termios->c_cflag & CBAUD)) {
+ info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ spin_lock_irqsave(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+ /* Handle transition away from B0 status */
+ if (!(old_termios->c_cflag & CBAUD) &&
+ tty->termios->c_cflag & CBAUD) {
+ info->serial_signals |= SerialSignal_DTR;
+ if (!(tty->termios->c_cflag & CRTSCTS) ||
+ !test_bit(TTY_THROTTLED, &tty->flags)) {
+ info->serial_signals |= SerialSignal_RTS;
+ }
+ spin_lock_irqsave(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+ /* Handle turning off CRTSCTS */
+ if (old_termios->c_cflag & CRTSCTS &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ tx_release(tty);
+ }
+}
+
+/* Send a block of data
+ *
+ * Arguments:
+ *
+ * tty pointer to tty information structure
+ * buf pointer to buffer containing send data
+ * count size of send data in bytes
+ *
+ * Return Value: number of characters written
+ */
+static int write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ int c, ret = 0;
+ SLMP_INFO *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s write() count=%d\n",
+ __FILE__,__LINE__,info->device_name,count);
+
+ if (sanity_check(info, tty->name, "write"))
+ goto cleanup;
+
+ if (!info->tx_buf)
+ goto cleanup;
+
+ if (info->params.mode == MGSL_MODE_HDLC) {
+ if (count > info->max_frame_size) {
+ ret = -EIO;
+ goto cleanup;
+ }
+ if (info->tx_active)
+ goto cleanup;
+ if (info->tx_count) {
+ /* send accumulated data from send_char() calls */
+ /* as frame and wait before accepting more data. */
+ tx_load_dma_buffer(info, info->tx_buf, info->tx_count);
+ goto start;
+ }
+ ret = info->tx_count = count;
+ tx_load_dma_buffer(info, buf, count);
+ goto start;
+ }
+
+ for (;;) {
+ c = min_t(int, count,
+ min(info->max_frame_size - info->tx_count - 1,
+ info->max_frame_size - info->tx_put));
+ if (c <= 0)
+ break;
+
+ memcpy(info->tx_buf + info->tx_put, buf, c);
+
+ spin_lock_irqsave(&info->lock,flags);
+ info->tx_put += c;
+ if (info->tx_put >= info->max_frame_size)
+ info->tx_put -= info->max_frame_size;
+ info->tx_count += c;
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+
+ if (info->params.mode == MGSL_MODE_HDLC) {
+ if (count) {
+ ret = info->tx_count = 0;
+ goto cleanup;
+ }
+ tx_load_dma_buffer(info, info->tx_buf, info->tx_count);
+ }
+start:
+ if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_active)
+ tx_start(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+cleanup:
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk( "%s(%d):%s write() returning=%d\n",
+ __FILE__,__LINE__,info->device_name,ret);
+ return ret;
+}
+
+/* Add a character to the transmit buffer.
+ */
+static int put_char(struct tty_struct *tty, unsigned char ch)
+{
+ SLMP_INFO *info = tty->driver_data;
+ unsigned long flags;
+ int ret = 0;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO ) {
+ printk( "%s(%d):%s put_char(%d)\n",
+ __FILE__,__LINE__,info->device_name,ch);
+ }
+
+ if (sanity_check(info, tty->name, "put_char"))
+ return 0;
+
+ if (!info->tx_buf)
+ return 0;
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ if ( (info->params.mode != MGSL_MODE_HDLC) ||
+ !info->tx_active ) {
+
+ if (info->tx_count < info->max_frame_size - 1) {
+ info->tx_buf[info->tx_put++] = ch;
+ if (info->tx_put >= info->max_frame_size)
+ info->tx_put -= info->max_frame_size;
+ info->tx_count++;
+ ret = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&info->lock,flags);
+ return ret;
+}
+
+/* Send a high-priority XON/XOFF character
+ */
+static void send_xchar(struct tty_struct *tty, char ch)
+{
+ SLMP_INFO *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s send_xchar(%d)\n",
+ __FILE__,__LINE__, info->device_name, ch );
+
+ if (sanity_check(info, tty->name, "send_xchar"))
+ return;
+
+ info->x_char = ch;
+ if (ch) {
+ /* Make sure transmit interrupts are on */
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_enabled)
+ tx_start(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+}
+
+/* Wait until the transmitter is empty.
+ */
+static void wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ SLMP_INFO * info = tty->driver_data;
+ unsigned long orig_jiffies, char_time;
+
+ if (!info )
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s wait_until_sent() entry\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (sanity_check(info, tty->name, "wait_until_sent"))
+ return;
+
+ if (!test_bit(ASYNCB_INITIALIZED, &info->port.flags))
+ goto exit;
+
+ orig_jiffies = jiffies;
+
+ /* Set check interval to 1/5 of estimated time to
+ * send a character, and make it at least 1. The check
+ * interval should also be less than the timeout.
+ * Note: use tight timings here to satisfy the NIST-PCTS.
+ */
+
+ if ( info->params.data_rate ) {
+ char_time = info->timeout/(32 * 5);
+ if (!char_time)
+ char_time++;
+ } else
+ char_time = 1;
+
+ if (timeout)
+ char_time = min_t(unsigned long, char_time, timeout);
+
+ if ( info->params.mode == MGSL_MODE_HDLC ) {
+ while (info->tx_active) {
+ msleep_interruptible(jiffies_to_msecs(char_time));
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ }
+ } else {
+ /*
+ * TODO: determine if there is something similar to USC16C32
+ * TXSTATUS_ALL_SENT status
+ */
+ while ( info->tx_active && info->tx_enabled) {
+ msleep_interruptible(jiffies_to_msecs(char_time));
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ }
+ }
+
+exit:
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s wait_until_sent() exit\n",
+ __FILE__,__LINE__, info->device_name );
+}
+
+/* Return the count of free bytes in transmit buffer
+ */
+static int write_room(struct tty_struct *tty)
+{
+ SLMP_INFO *info = tty->driver_data;
+ int ret;
+
+ if (sanity_check(info, tty->name, "write_room"))
+ return 0;
+
+ if (info->params.mode == MGSL_MODE_HDLC) {
+ ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE;
+ } else {
+ ret = info->max_frame_size - info->tx_count - 1;
+ if (ret < 0)
+ ret = 0;
+ }
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s write_room()=%d\n",
+ __FILE__, __LINE__, info->device_name, ret);
+
+ return ret;
+}
+
+/* enable transmitter and send remaining buffered characters
+ */
+static void flush_chars(struct tty_struct *tty)
+{
+ SLMP_INFO *info = tty->driver_data;
+ unsigned long flags;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):%s flush_chars() entry tx_count=%d\n",
+ __FILE__,__LINE__,info->device_name,info->tx_count);
+
+ if (sanity_check(info, tty->name, "flush_chars"))
+ return;
+
+ if (info->tx_count <= 0 || tty->stopped || tty->hw_stopped ||
+ !info->tx_buf)
+ return;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):%s flush_chars() entry, starting transmitter\n",
+ __FILE__,__LINE__,info->device_name );
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ if (!info->tx_active) {
+ if ( (info->params.mode == MGSL_MODE_HDLC) &&
+ info->tx_count ) {
+ /* operating in synchronous (frame oriented) mode */
+ /* copy data from circular tx_buf to */
+ /* transmit DMA buffer. */
+ tx_load_dma_buffer(info,
+ info->tx_buf,info->tx_count);
+ }
+ tx_start(info);
+ }
+
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+/* Discard all data in the send buffer
+ */
+static void flush_buffer(struct tty_struct *tty)
+{
+ SLMP_INFO *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s flush_buffer() entry\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (sanity_check(info, tty->name, "flush_buffer"))
+ return;
+
+ spin_lock_irqsave(&info->lock,flags);
+ info->tx_count = info->tx_put = info->tx_get = 0;
+ del_timer(&info->tx_timer);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ tty_wakeup(tty);
+}
+
+/* throttle (stop) transmitter
+ */
+static void tx_hold(struct tty_struct *tty)
+{
+ SLMP_INFO *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "tx_hold"))
+ return;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk("%s(%d):%s tx_hold()\n",
+ __FILE__,__LINE__,info->device_name);
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (info->tx_enabled)
+ tx_stop(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+/* release (start) transmitter
+ */
+static void tx_release(struct tty_struct *tty)
+{
+ SLMP_INFO *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "tx_release"))
+ return;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk("%s(%d):%s tx_release()\n",
+ __FILE__,__LINE__,info->device_name);
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_enabled)
+ tx_start(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+/* Service an IOCTL request
+ *
+ * Arguments:
+ *
+ * tty pointer to tty instance data
+ * cmd IOCTL command code
+ * arg command argument/context
+ *
+ * Return Value: 0 if success, otherwise error code
+ */
+static int ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ SLMP_INFO *info = tty->driver_data;
+ void __user *argp = (void __user *)arg;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s ioctl() cmd=%08X\n", __FILE__,__LINE__,
+ info->device_name, cmd );
+
+ if (sanity_check(info, tty->name, "ioctl"))
+ return -ENODEV;
+
+ if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+ (cmd != TIOCMIWAIT)) {
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case MGSL_IOCGPARAMS:
+ return get_params(info, argp);
+ case MGSL_IOCSPARAMS:
+ return set_params(info, argp);
+ case MGSL_IOCGTXIDLE:
+ return get_txidle(info, argp);
+ case MGSL_IOCSTXIDLE:
+ return set_txidle(info, (int)arg);
+ case MGSL_IOCTXENABLE:
+ return tx_enable(info, (int)arg);
+ case MGSL_IOCRXENABLE:
+ return rx_enable(info, (int)arg);
+ case MGSL_IOCTXABORT:
+ return tx_abort(info);
+ case MGSL_IOCGSTATS:
+ return get_stats(info, argp);
+ case MGSL_IOCWAITEVENT:
+ return wait_mgsl_event(info, argp);
+ case MGSL_IOCLOOPTXDONE:
+ return 0; // TODO: Not supported, need to document
+ /* Wait for modem input (DCD,RI,DSR,CTS) change
+ * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
+ */
+ case TIOCMIWAIT:
+ return modem_input_wait(info,(int)arg);
+
+ /*
+ * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+ * Return: write counters to the user passed counter struct
+ * NB: both 1->0 and 0->1 transitions are counted except for
+ * RI where only 0->1 is counted.
+ */
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static int get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+{
+ SLMP_INFO *info = tty->driver_data;
+ struct mgsl_icount cnow; /* kernel counter temps */
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ cnow = info->icount;
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ icount->cts = cnow.cts;
+ icount->dsr = cnow.dsr;
+ icount->rng = cnow.rng;
+ icount->dcd = cnow.dcd;
+ icount->rx = cnow.rx;
+ icount->tx = cnow.tx;
+ icount->frame = cnow.frame;
+ icount->overrun = cnow.overrun;
+ icount->parity = cnow.parity;
+ icount->brk = cnow.brk;
+ icount->buf_overrun = cnow.buf_overrun;
+
+ return 0;
+}
+
+/*
+ * /proc fs routines....
+ */
+
+static inline void line_info(struct seq_file *m, SLMP_INFO *info)
+{
+ char stat_buf[30];
+ unsigned long flags;
+
+ seq_printf(m, "%s: SCABase=%08x Mem=%08X StatusControl=%08x LCR=%08X\n"
+ "\tIRQ=%d MaxFrameSize=%u\n",
+ info->device_name,
+ info->phys_sca_base,
+ info->phys_memory_base,
+ info->phys_statctrl_base,
+ info->phys_lcr_base,
+ info->irq_level,
+ info->max_frame_size );
+
+ /* output current serial signal states */
+ spin_lock_irqsave(&info->lock,flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ stat_buf[0] = 0;
+ stat_buf[1] = 0;
+ if (info->serial_signals & SerialSignal_RTS)
+ strcat(stat_buf, "|RTS");
+ if (info->serial_signals & SerialSignal_CTS)
+ strcat(stat_buf, "|CTS");
+ if (info->serial_signals & SerialSignal_DTR)
+ strcat(stat_buf, "|DTR");
+ if (info->serial_signals & SerialSignal_DSR)
+ strcat(stat_buf, "|DSR");
+ if (info->serial_signals & SerialSignal_DCD)
+ strcat(stat_buf, "|CD");
+ if (info->serial_signals & SerialSignal_RI)
+ strcat(stat_buf, "|RI");
+
+ if (info->params.mode == MGSL_MODE_HDLC) {
+ seq_printf(m, "\tHDLC txok:%d rxok:%d",
+ info->icount.txok, info->icount.rxok);
+ if (info->icount.txunder)
+ seq_printf(m, " txunder:%d", info->icount.txunder);
+ if (info->icount.txabort)
+ seq_printf(m, " txabort:%d", info->icount.txabort);
+ if (info->icount.rxshort)
+ seq_printf(m, " rxshort:%d", info->icount.rxshort);
+ if (info->icount.rxlong)
+ seq_printf(m, " rxlong:%d", info->icount.rxlong);
+ if (info->icount.rxover)
+ seq_printf(m, " rxover:%d", info->icount.rxover);
+ if (info->icount.rxcrc)
+ seq_printf(m, " rxlong:%d", info->icount.rxcrc);
+ } else {
+ seq_printf(m, "\tASYNC tx:%d rx:%d",
+ info->icount.tx, info->icount.rx);
+ if (info->icount.frame)
+ seq_printf(m, " fe:%d", info->icount.frame);
+ if (info->icount.parity)
+ seq_printf(m, " pe:%d", info->icount.parity);
+ if (info->icount.brk)
+ seq_printf(m, " brk:%d", info->icount.brk);
+ if (info->icount.overrun)
+ seq_printf(m, " oe:%d", info->icount.overrun);
+ }
+
+ /* Append serial signal status to end */
+ seq_printf(m, " %s\n", stat_buf+1);
+
+ seq_printf(m, "\ttxactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
+ info->tx_active,info->bh_requested,info->bh_running,
+ info->pending_bh);
+}
+
+/* Called to print information about devices
+ */
+static int synclinkmp_proc_show(struct seq_file *m, void *v)
+{
+ SLMP_INFO *info;
+
+ seq_printf(m, "synclinkmp driver:%s\n", driver_version);
+
+ info = synclinkmp_device_list;
+ while( info ) {
+ line_info(m, info);
+ info = info->next_device;
+ }
+ return 0;
+}
+
+static int synclinkmp_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, synclinkmp_proc_show, NULL);
+}
+
+static const struct file_operations synclinkmp_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = synclinkmp_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* Return the count of bytes in transmit buffer
+ */
+static int chars_in_buffer(struct tty_struct *tty)
+{
+ SLMP_INFO *info = tty->driver_data;
+
+ if (sanity_check(info, tty->name, "chars_in_buffer"))
+ return 0;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s chars_in_buffer()=%d\n",
+ __FILE__, __LINE__, info->device_name, info->tx_count);
+
+ return info->tx_count;
+}
+
+/* Signal remote device to throttle send data (our receive data)
+ */
+static void throttle(struct tty_struct * tty)
+{
+ SLMP_INFO *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s throttle() entry\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (sanity_check(info, tty->name, "throttle"))
+ return;
+
+ if (I_IXOFF(tty))
+ send_xchar(tty, STOP_CHAR(tty));
+
+ if (tty->termios->c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock,flags);
+ info->serial_signals &= ~SerialSignal_RTS;
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+}
+
+/* Signal remote device to stop throttling send data (our receive data)
+ */
+static void unthrottle(struct tty_struct * tty)
+{
+ SLMP_INFO *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s unthrottle() entry\n",
+ __FILE__,__LINE__, info->device_name );
+
+ if (sanity_check(info, tty->name, "unthrottle"))
+ return;
+
+ if (I_IXOFF(tty)) {
+ if (info->x_char)
+ info->x_char = 0;
+ else
+ send_xchar(tty, START_CHAR(tty));
+ }
+
+ if (tty->termios->c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock,flags);
+ info->serial_signals |= SerialSignal_RTS;
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+}
+
+/* set or clear transmit break condition
+ * break_state -1=set break condition, 0=clear
+ */
+static int set_break(struct tty_struct *tty, int break_state)
+{
+ unsigned char RegValue;
+ SLMP_INFO * info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s set_break(%d)\n",
+ __FILE__,__LINE__, info->device_name, break_state);
+
+ if (sanity_check(info, tty->name, "set_break"))
+ return -EINVAL;
+
+ spin_lock_irqsave(&info->lock,flags);
+ RegValue = read_reg(info, CTL);
+ if (break_state == -1)
+ RegValue |= BIT3;
+ else
+ RegValue &= ~BIT3;
+ write_reg(info, CTL, RegValue);
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+#if SYNCLINK_GENERIC_HDLC
+
+/**
+ * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
+ * set encoding and frame check sequence (FCS) options
+ *
+ * dev pointer to network device structure
+ * encoding serial encoding setting
+ * parity FCS setting
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ SLMP_INFO *info = dev_to_port(dev);
+ unsigned char new_encoding;
+ unsigned short new_crctype;
+
+ /* return error if TTY interface open */
+ if (info->port.count)
+ return -EBUSY;
+
+ switch (encoding)
+ {
+ case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
+ case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
+ case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
+ case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
+ case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
+ default: return -EINVAL;
+ }
+
+ switch (parity)
+ {
+ case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
+ case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
+ case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
+ default: return -EINVAL;
+ }
+
+ info->params.encoding = new_encoding;
+ info->params.crc_type = new_crctype;
+
+ /* if network interface up, reprogram hardware */
+ if (info->netcount)
+ program_hw(info);
+
+ return 0;
+}
+
+/**
+ * called by generic HDLC layer to send frame
+ *
+ * skb socket buffer containing HDLC frame
+ * dev pointer to network device structure
+ */
+static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ SLMP_INFO *info = dev_to_port(dev);
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
+
+ /* stop sending until this frame completes */
+ netif_stop_queue(dev);
+
+ /* copy data to device buffers */
+ info->tx_count = skb->len;
+ tx_load_dma_buffer(info, skb->data, skb->len);
+
+ /* update network statistics */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ /* done with socket buffer, so free it */
+ dev_kfree_skb(skb);
+
+ /* save start time for transmit timeout detection */
+ dev->trans_start = jiffies;
+
+ /* start hardware transmitter if necessary */
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_active)
+ tx_start(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * called by network layer when interface enabled
+ * claim resources and initialize hardware
+ *
+ * dev pointer to network device structure
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_open(struct net_device *dev)
+{
+ SLMP_INFO *info = dev_to_port(dev);
+ int rc;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
+
+ /* generic HDLC layer open processing */
+ if ((rc = hdlc_open(dev)))
+ return rc;
+
+ /* arbitrate between network and tty opens */
+ spin_lock_irqsave(&info->netlock, flags);
+ if (info->port.count != 0 || info->netcount != 0) {
+ printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
+ spin_unlock_irqrestore(&info->netlock, flags);
+ return -EBUSY;
+ }
+ info->netcount=1;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ /* claim resources and init adapter */
+ if ((rc = startup(info)) != 0) {
+ spin_lock_irqsave(&info->netlock, flags);
+ info->netcount=0;
+ spin_unlock_irqrestore(&info->netlock, flags);
+ return rc;
+ }
+
+ /* assert DTR and RTS, apply hardware settings */
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ program_hw(info);
+
+ /* enable network layer transmit */
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+
+ /* inform generic HDLC layer of current DCD status */
+ spin_lock_irqsave(&info->lock, flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
+ if (info->serial_signals & SerialSignal_DCD)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ return 0;
+}
+
+/**
+ * called by network layer when interface is disabled
+ * shutdown hardware and release resources
+ *
+ * dev pointer to network device structure
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_close(struct net_device *dev)
+{
+ SLMP_INFO *info = dev_to_port(dev);
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
+
+ netif_stop_queue(dev);
+
+ /* shutdown adapter and release resources */
+ shutdown(info);
+
+ hdlc_close(dev);
+
+ spin_lock_irqsave(&info->netlock, flags);
+ info->netcount=0;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ return 0;
+}
+
+/**
+ * called by network layer to process IOCTL call to network device
+ *
+ * dev pointer to network device structure
+ * ifr pointer to network interface request structure
+ * cmd IOCTL command code
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ SLMP_INFO *info = dev_to_port(dev);
+ unsigned int flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
+
+ /* return error if TTY interface open */
+ if (info->port.count)
+ return -EBUSY;
+
+ if (cmd != SIOCWANDEV)
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ switch(ifr->ifr_settings.type) {
+ case IF_GET_IFACE: /* return current sync_serial_settings */
+
+ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+
+ flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+
+ switch (flags){
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
+ case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
+ default: new_line.clock_type = CLOCK_DEFAULT;
+ }
+
+ new_line.clock_rate = info->params.clock_speed;
+ new_line.loopback = info->params.loopback ? 1:0;
+
+ if (copy_to_user(line, &new_line, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&new_line, line, size))
+ return -EFAULT;
+
+ switch (new_line.clock_type)
+ {
+ case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
+ case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
+ case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
+ case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
+ case CLOCK_DEFAULT: flags = info->params.flags &
+ (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
+ default: return -EINVAL;
+ }
+
+ if (new_line.loopback != 0 && new_line.loopback != 1)
+ return -EINVAL;
+
+ info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+ info->params.flags |= flags;
+
+ info->params.loopback = new_line.loopback;
+
+ if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
+ info->params.clock_speed = new_line.clock_rate;
+ else
+ info->params.clock_speed = 0;
+
+ /* if network interface up, reprogram hardware */
+ if (info->netcount)
+ program_hw(info);
+ return 0;
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+/**
+ * called by network layer when transmit timeout is detected
+ *
+ * dev pointer to network device structure
+ */
+static void hdlcdev_tx_timeout(struct net_device *dev)
+{
+ SLMP_INFO *info = dev_to_port(dev);
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("hdlcdev_tx_timeout(%s)\n",dev->name);
+
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+
+ spin_lock_irqsave(&info->lock,flags);
+ tx_stop(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ netif_wake_queue(dev);
+}
+
+/**
+ * called by device driver when transmit completes
+ * reenable network layer transmit if stopped
+ *
+ * info pointer to device instance information
+ */
+static void hdlcdev_tx_done(SLMP_INFO *info)
+{
+ if (netif_queue_stopped(info->netdev))
+ netif_wake_queue(info->netdev);
+}
+
+/**
+ * called by device driver when frame received
+ * pass frame to network layer
+ *
+ * info pointer to device instance information
+ * buf pointer to buffer contianing frame data
+ * size count of data bytes in buf
+ */
+static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size)
+{
+ struct sk_buff *skb = dev_alloc_skb(size);
+ struct net_device *dev = info->netdev;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("hdlcdev_rx(%s)\n",dev->name);
+
+ if (skb == NULL) {
+ printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ memcpy(skb_put(skb, size), buf, size);
+
+ skb->protocol = hdlc_type_trans(skb, dev);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += size;
+
+ netif_rx(skb);
+}
+
+static const struct net_device_ops hdlcdev_ops = {
+ .ndo_open = hdlcdev_open,
+ .ndo_stop = hdlcdev_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = hdlcdev_ioctl,
+ .ndo_tx_timeout = hdlcdev_tx_timeout,
+};
+
+/**
+ * called by device driver when adding device instance
+ * do generic HDLC initialization
+ *
+ * info pointer to device instance information
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_init(SLMP_INFO *info)
+{
+ int rc;
+ struct net_device *dev;
+ hdlc_device *hdlc;
+
+ /* allocate and initialize network and HDLC layer objects */
+
+ if (!(dev = alloc_hdlcdev(info))) {
+ printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
+ return -ENOMEM;
+ }
+
+ /* for network layer reporting purposes only */
+ dev->mem_start = info->phys_sca_base;
+ dev->mem_end = info->phys_sca_base + SCA_BASE_SIZE - 1;
+ dev->irq = info->irq_level;
+
+ /* network layer callbacks and settings */
+ dev->netdev_ops = &hdlcdev_ops;
+ dev->watchdog_timeo = 10 * HZ;
+ dev->tx_queue_len = 50;
+
+ /* generic HDLC layer callbacks and settings */
+ hdlc = dev_to_hdlc(dev);
+ hdlc->attach = hdlcdev_attach;
+ hdlc->xmit = hdlcdev_xmit;
+
+ /* register objects with HDLC layer */
+ if ((rc = register_hdlc_device(dev))) {
+ printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
+ free_netdev(dev);
+ return rc;
+ }
+
+ info->netdev = dev;
+ return 0;
+}
+
+/**
+ * called by device driver when removing device instance
+ * do generic HDLC cleanup
+ *
+ * info pointer to device instance information
+ */
+static void hdlcdev_exit(SLMP_INFO *info)
+{
+ unregister_hdlc_device(info->netdev);
+ free_netdev(info->netdev);
+ info->netdev = NULL;
+}
+
+#endif /* CONFIG_HDLC */
+
+
+/* Return next bottom half action to perform.
+ * Return Value: BH action code or 0 if nothing to do.
+ */
+static int bh_action(SLMP_INFO *info)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ if (info->pending_bh & BH_RECEIVE) {
+ info->pending_bh &= ~BH_RECEIVE;
+ rc = BH_RECEIVE;
+ } else if (info->pending_bh & BH_TRANSMIT) {
+ info->pending_bh &= ~BH_TRANSMIT;
+ rc = BH_TRANSMIT;
+ } else if (info->pending_bh & BH_STATUS) {
+ info->pending_bh &= ~BH_STATUS;
+ rc = BH_STATUS;
+ }
+
+ if (!rc) {
+ /* Mark BH routine as complete */
+ info->bh_running = false;
+ info->bh_requested = false;
+ }
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return rc;
+}
+
+/* Perform bottom half processing of work items queued by ISR.
+ */
+static void bh_handler(struct work_struct *work)
+{
+ SLMP_INFO *info = container_of(work, SLMP_INFO, task);
+ int action;
+
+ if (!info)
+ return;
+
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk( "%s(%d):%s bh_handler() entry\n",
+ __FILE__,__LINE__,info->device_name);
+
+ info->bh_running = true;
+
+ while((action = bh_action(info)) != 0) {
+
+ /* Process work item */
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk( "%s(%d):%s bh_handler() work item action=%d\n",
+ __FILE__,__LINE__,info->device_name, action);
+
+ switch (action) {
+
+ case BH_RECEIVE:
+ bh_receive(info);
+ break;
+ case BH_TRANSMIT:
+ bh_transmit(info);
+ break;
+ case BH_STATUS:
+ bh_status(info);
+ break;
+ default:
+ /* unknown work item ID */
+ printk("%s(%d):%s Unknown work item ID=%08X!\n",
+ __FILE__,__LINE__,info->device_name,action);
+ break;
+ }
+ }
+
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk( "%s(%d):%s bh_handler() exit\n",
+ __FILE__,__LINE__,info->device_name);
+}
+
+static void bh_receive(SLMP_INFO *info)
+{
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk( "%s(%d):%s bh_receive()\n",
+ __FILE__,__LINE__,info->device_name);
+
+ while( rx_get_frame(info) );
+}
+
+static void bh_transmit(SLMP_INFO *info)
+{
+ struct tty_struct *tty = info->port.tty;
+
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk( "%s(%d):%s bh_transmit() entry\n",
+ __FILE__,__LINE__,info->device_name);
+
+ if (tty)
+ tty_wakeup(tty);
+}
+
+static void bh_status(SLMP_INFO *info)
+{
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk( "%s(%d):%s bh_status() entry\n",
+ __FILE__,__LINE__,info->device_name);
+
+ info->ri_chkcount = 0;
+ info->dsr_chkcount = 0;
+ info->dcd_chkcount = 0;
+ info->cts_chkcount = 0;
+}
+
+static void isr_timer(SLMP_INFO * info)
+{
+ unsigned char timer = (info->port_num & 1) ? TIMER2 : TIMER0;
+
+ /* IER2<7..4> = timer<3..0> interrupt enables (0=disabled) */
+ write_reg(info, IER2, 0);
+
+ /* TMCS, Timer Control/Status Register
+ *
+ * 07 CMF, Compare match flag (read only) 1=match
+ * 06 ECMI, CMF Interrupt Enable: 0=disabled
+ * 05 Reserved, must be 0
+ * 04 TME, Timer Enable
+ * 03..00 Reserved, must be 0
+ *
+ * 0000 0000
+ */
+ write_reg(info, (unsigned char)(timer + TMCS), 0);
+
+ info->irq_occurred = true;
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s isr_timer()\n",
+ __FILE__,__LINE__,info->device_name);
+}
+
+static void isr_rxint(SLMP_INFO * info)
+{
+ struct tty_struct *tty = info->port.tty;
+ struct mgsl_icount *icount = &info->icount;
+ unsigned char status = read_reg(info, SR1) & info->ie1_value & (FLGD + IDLD + CDCD + BRKD);
+ unsigned char status2 = read_reg(info, SR2) & info->ie2_value & OVRN;
+
+ /* clear status bits */
+ if (status)
+ write_reg(info, SR1, status);
+
+ if (status2)
+ write_reg(info, SR2, status2);
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s isr_rxint status=%02X %02x\n",
+ __FILE__,__LINE__,info->device_name,status,status2);
+
+ if (info->params.mode == MGSL_MODE_ASYNC) {
+ if (status & BRKD) {
+ icount->brk++;
+
+ /* process break detection if tty control
+ * is not set to ignore it
+ */
+ if ( tty ) {
+ if (!(status & info->ignore_status_mask1)) {
+ if (info->read_status_mask1 & BRKD) {
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
+ if (info->port.flags & ASYNC_SAK)
+ do_SAK(tty);
+ }
+ }
+ }
+ }
+ }
+ else {
+ if (status & (FLGD|IDLD)) {
+ if (status & FLGD)
+ info->icount.exithunt++;
+ else if (status & IDLD)
+ info->icount.rxidle++;
+ wake_up_interruptible(&info->event_wait_q);
+ }
+ }
+
+ if (status & CDCD) {
+ /* simulate a common modem status change interrupt
+ * for our handler
+ */
+ get_signals( info );
+ isr_io_pin(info,
+ MISCSTATUS_DCD_LATCHED|(info->serial_signals&SerialSignal_DCD));
+ }
+}
+
+/*
+ * handle async rx data interrupts
+ */
+static void isr_rxrdy(SLMP_INFO * info)
+{
+ u16 status;
+ unsigned char DataByte;
+ struct tty_struct *tty = info->port.tty;
+ struct mgsl_icount *icount = &info->icount;
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s isr_rxrdy\n",
+ __FILE__,__LINE__,info->device_name);
+
+ while((status = read_reg(info,CST0)) & BIT0)
+ {
+ int flag = 0;
+ bool over = false;
+ DataByte = read_reg(info,TRB);
+
+ icount->rx++;
+
+ if ( status & (PE + FRME + OVRN) ) {
+ printk("%s(%d):%s rxerr=%04X\n",
+ __FILE__,__LINE__,info->device_name,status);
+
+ /* update error statistics */
+ if (status & PE)
+ icount->parity++;
+ else if (status & FRME)
+ icount->frame++;
+ else if (status & OVRN)
+ icount->overrun++;
+
+ /* discard char if tty control flags say so */
+ if (status & info->ignore_status_mask2)
+ continue;
+
+ status &= info->read_status_mask2;
+
+ if ( tty ) {
+ if (status & PE)
+ flag = TTY_PARITY;
+ else if (status & FRME)
+ flag = TTY_FRAME;
+ if (status & OVRN) {
+ /* Overrun is special, since it's
+ * reported immediately, and doesn't
+ * affect the current character
+ */
+ over = true;
+ }
+ }
+ } /* end of if (error) */
+
+ if ( tty ) {
+ tty_insert_flip_char(tty, DataByte, flag);
+ if (over)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+ }
+
+ if ( debug_level >= DEBUG_LEVEL_ISR ) {
+ printk("%s(%d):%s rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
+ __FILE__,__LINE__,info->device_name,
+ icount->rx,icount->brk,icount->parity,
+ icount->frame,icount->overrun);
+ }
+
+ if ( tty )
+ tty_flip_buffer_push(tty);
+}
+
+static void isr_txeom(SLMP_INFO * info, unsigned char status)
+{
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s isr_txeom status=%02x\n",
+ __FILE__,__LINE__,info->device_name,status);
+
+ write_reg(info, TXDMA + DIR, 0x00); /* disable Tx DMA IRQs */
+ write_reg(info, TXDMA + DSR, 0xc0); /* clear IRQs and disable DMA */
+ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */
+
+ if (status & UDRN) {
+ write_reg(info, CMD, TXRESET);
+ write_reg(info, CMD, TXENABLE);
+ } else
+ write_reg(info, CMD, TXBUFCLR);
+
+ /* disable and clear tx interrupts */
+ info->ie0_value &= ~TXRDYE;
+ info->ie1_value &= ~(IDLE + UDRN);
+ write_reg16(info, IE0, (unsigned short)((info->ie1_value << 8) + info->ie0_value));
+ write_reg(info, SR1, (unsigned char)(UDRN + IDLE));
+
+ if ( info->tx_active ) {
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ if (status & UDRN)
+ info->icount.txunder++;
+ else if (status & IDLE)
+ info->icount.txok++;
+ }
+
+ info->tx_active = false;
+ info->tx_count = info->tx_put = info->tx_get = 0;
+
+ del_timer(&info->tx_timer);
+
+ if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done ) {
+ info->serial_signals &= ~SerialSignal_RTS;
+ info->drop_rts_on_tx_done = false;
+ set_signals(info);
+ }
+
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount)
+ hdlcdev_tx_done(info);
+ else
+#endif
+ {
+ if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) {
+ tx_stop(info);
+ return;
+ }
+ info->pending_bh |= BH_TRANSMIT;
+ }
+ }
+}
+
+
+/*
+ * handle tx status interrupts
+ */
+static void isr_txint(SLMP_INFO * info)
+{
+ unsigned char status = read_reg(info, SR1) & info->ie1_value & (UDRN + IDLE + CCTS);
+
+ /* clear status bits */
+ write_reg(info, SR1, status);
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s isr_txint status=%02x\n",
+ __FILE__,__LINE__,info->device_name,status);
+
+ if (status & (UDRN + IDLE))
+ isr_txeom(info, status);
+
+ if (status & CCTS) {
+ /* simulate a common modem status change interrupt
+ * for our handler
+ */
+ get_signals( info );
+ isr_io_pin(info,
+ MISCSTATUS_CTS_LATCHED|(info->serial_signals&SerialSignal_CTS));
+
+ }
+}
+
+/*
+ * handle async tx data interrupts
+ */
+static void isr_txrdy(SLMP_INFO * info)
+{
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s isr_txrdy() tx_count=%d\n",
+ __FILE__,__LINE__,info->device_name,info->tx_count);
+
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ /* disable TXRDY IRQ, enable IDLE IRQ */
+ info->ie0_value &= ~TXRDYE;
+ info->ie1_value |= IDLE;
+ write_reg16(info, IE0, (unsigned short)((info->ie1_value << 8) + info->ie0_value));
+ return;
+ }
+
+ if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) {
+ tx_stop(info);
+ return;
+ }
+
+ if ( info->tx_count )
+ tx_load_fifo( info );
+ else {
+ info->tx_active = false;
+ info->ie0_value &= ~TXRDYE;
+ write_reg(info, IE0, info->ie0_value);
+ }
+
+ if (info->tx_count < WAKEUP_CHARS)
+ info->pending_bh |= BH_TRANSMIT;
+}
+
+static void isr_rxdmaok(SLMP_INFO * info)
+{
+ /* BIT7 = EOT (end of transfer)
+ * BIT6 = EOM (end of message/frame)
+ */
+ unsigned char status = read_reg(info,RXDMA + DSR) & 0xc0;
+
+ /* clear IRQ (BIT0 must be 1 to prevent clearing DE bit) */
+ write_reg(info, RXDMA + DSR, (unsigned char)(status | 1));
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s isr_rxdmaok(), status=%02x\n",
+ __FILE__,__LINE__,info->device_name,status);
+
+ info->pending_bh |= BH_RECEIVE;
+}
+
+static void isr_rxdmaerror(SLMP_INFO * info)
+{
+ /* BIT5 = BOF (buffer overflow)
+ * BIT4 = COF (counter overflow)
+ */
+ unsigned char status = read_reg(info,RXDMA + DSR) & 0x30;
+
+ /* clear IRQ (BIT0 must be 1 to prevent clearing DE bit) */
+ write_reg(info, RXDMA + DSR, (unsigned char)(status | 1));
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s isr_rxdmaerror(), status=%02x\n",
+ __FILE__,__LINE__,info->device_name,status);
+
+ info->rx_overflow = true;
+ info->pending_bh |= BH_RECEIVE;
+}
+
+static void isr_txdmaok(SLMP_INFO * info)
+{
+ unsigned char status_reg1 = read_reg(info, SR1);
+
+ write_reg(info, TXDMA + DIR, 0x00); /* disable Tx DMA IRQs */
+ write_reg(info, TXDMA + DSR, 0xc0); /* clear IRQs and disable DMA */
+ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s isr_txdmaok(), status=%02x\n",
+ __FILE__,__LINE__,info->device_name,status_reg1);
+
+ /* program TXRDY as FIFO empty flag, enable TXRDY IRQ */
+ write_reg16(info, TRC0, 0);
+ info->ie0_value |= TXRDYE;
+ write_reg(info, IE0, info->ie0_value);
+}
+
+static void isr_txdmaerror(SLMP_INFO * info)
+{
+ /* BIT5 = BOF (buffer overflow)
+ * BIT4 = COF (counter overflow)
+ */
+ unsigned char status = read_reg(info,TXDMA + DSR) & 0x30;
+
+ /* clear IRQ (BIT0 must be 1 to prevent clearing DE bit) */
+ write_reg(info, TXDMA + DSR, (unsigned char)(status | 1));
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s isr_txdmaerror(), status=%02x\n",
+ __FILE__,__LINE__,info->device_name,status);
+}
+
+/* handle input serial signal changes
+ */
+static void isr_io_pin( SLMP_INFO *info, u16 status )
+{
+ struct mgsl_icount *icount;
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):isr_io_pin status=%04X\n",
+ __FILE__,__LINE__,status);
+
+ if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
+ MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
+ icount = &info->icount;
+ /* update input line counters */
+ if (status & MISCSTATUS_RI_LATCHED) {
+ icount->rng++;
+ if ( status & SerialSignal_RI )
+ info->input_signal_events.ri_up++;
+ else
+ info->input_signal_events.ri_down++;
+ }
+ if (status & MISCSTATUS_DSR_LATCHED) {
+ icount->dsr++;
+ if ( status & SerialSignal_DSR )
+ info->input_signal_events.dsr_up++;
+ else
+ info->input_signal_events.dsr_down++;
+ }
+ if (status & MISCSTATUS_DCD_LATCHED) {
+ if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) {
+ info->ie1_value &= ~CDCD;
+ write_reg(info, IE1, info->ie1_value);
+ }
+ icount->dcd++;
+ if (status & SerialSignal_DCD) {
+ info->input_signal_events.dcd_up++;
+ } else
+ info->input_signal_events.dcd_down++;
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount) {
+ if (status & SerialSignal_DCD)
+ netif_carrier_on(info->netdev);
+ else
+ netif_carrier_off(info->netdev);
+ }
+#endif
+ }
+ if (status & MISCSTATUS_CTS_LATCHED)
+ {
+ if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) {
+ info->ie1_value &= ~CCTS;
+ write_reg(info, IE1, info->ie1_value);
+ }
+ icount->cts++;
+ if ( status & SerialSignal_CTS )
+ info->input_signal_events.cts_up++;
+ else
+ info->input_signal_events.cts_down++;
+ }
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+
+ if ( (info->port.flags & ASYNC_CHECK_CD) &&
+ (status & MISCSTATUS_DCD_LATCHED) ) {
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s CD now %s...", info->device_name,
+ (status & SerialSignal_DCD) ? "on" : "off");
+ if (status & SerialSignal_DCD)
+ wake_up_interruptible(&info->port.open_wait);
+ else {
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("doing serial hangup...");
+ if (info->port.tty)
+ tty_hangup(info->port.tty);
+ }
+ }
+
+ if ( (info->port.flags & ASYNC_CTS_FLOW) &&
+ (status & MISCSTATUS_CTS_LATCHED) ) {
+ if ( info->port.tty ) {
+ if (info->port.tty->hw_stopped) {
+ if (status & SerialSignal_CTS) {
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("CTS tx start...");
+ info->port.tty->hw_stopped = 0;
+ tx_start(info);
+ info->pending_bh |= BH_TRANSMIT;
+ return;
+ }
+ } else {
+ if (!(status & SerialSignal_CTS)) {
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("CTS tx stop...");
+ info->port.tty->hw_stopped = 1;
+ tx_stop(info);
+ }
+ }
+ }
+ }
+ }
+
+ info->pending_bh |= BH_STATUS;
+}
+
+/* Interrupt service routine entry point.
+ *
+ * Arguments:
+ * irq interrupt number that caused interrupt
+ * dev_id device ID supplied during interrupt registration
+ * regs interrupted processor context
+ */
+static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
+{
+ SLMP_INFO *info = dev_id;
+ unsigned char status, status0, status1=0;
+ unsigned char dmastatus, dmastatus0, dmastatus1=0;
+ unsigned char timerstatus0, timerstatus1=0;
+ unsigned char shift;
+ unsigned int i;
+ unsigned short tmp;
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk(KERN_DEBUG "%s(%d): synclinkmp_interrupt(%d)entry.\n",
+ __FILE__, __LINE__, info->irq_level);
+
+ spin_lock(&info->lock);
+
+ for(;;) {
+
+ /* get status for SCA0 (ports 0-1) */
+ tmp = read_reg16(info, ISR0); /* get ISR0 and ISR1 in one read */
+ status0 = (unsigned char)tmp;
+ dmastatus0 = (unsigned char)(tmp>>8);
+ timerstatus0 = read_reg(info, ISR2);
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk(KERN_DEBUG "%s(%d):%s status0=%02x, dmastatus0=%02x, timerstatus0=%02x\n",
+ __FILE__, __LINE__, info->device_name,
+ status0, dmastatus0, timerstatus0);
+
+ if (info->port_count == 4) {
+ /* get status for SCA1 (ports 2-3) */
+ tmp = read_reg16(info->port_array[2], ISR0);
+ status1 = (unsigned char)tmp;
+ dmastatus1 = (unsigned char)(tmp>>8);
+ timerstatus1 = read_reg(info->port_array[2], ISR2);
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s status1=%02x, dmastatus1=%02x, timerstatus1=%02x\n",
+ __FILE__,__LINE__,info->device_name,
+ status1,dmastatus1,timerstatus1);
+ }
+
+ if (!status0 && !dmastatus0 && !timerstatus0 &&
+ !status1 && !dmastatus1 && !timerstatus1)
+ break;
+
+ for(i=0; i < info->port_count ; i++) {
+ if (info->port_array[i] == NULL)
+ continue;
+ if (i < 2) {
+ status = status0;
+ dmastatus = dmastatus0;
+ } else {
+ status = status1;
+ dmastatus = dmastatus1;
+ }
+
+ shift = i & 1 ? 4 :0;
+
+ if (status & BIT0 << shift)
+ isr_rxrdy(info->port_array[i]);
+ if (status & BIT1 << shift)
+ isr_txrdy(info->port_array[i]);
+ if (status & BIT2 << shift)
+ isr_rxint(info->port_array[i]);
+ if (status & BIT3 << shift)
+ isr_txint(info->port_array[i]);
+
+ if (dmastatus & BIT0 << shift)
+ isr_rxdmaerror(info->port_array[i]);
+ if (dmastatus & BIT1 << shift)
+ isr_rxdmaok(info->port_array[i]);
+ if (dmastatus & BIT2 << shift)
+ isr_txdmaerror(info->port_array[i]);
+ if (dmastatus & BIT3 << shift)
+ isr_txdmaok(info->port_array[i]);
+ }
+
+ if (timerstatus0 & (BIT5 | BIT4))
+ isr_timer(info->port_array[0]);
+ if (timerstatus0 & (BIT7 | BIT6))
+ isr_timer(info->port_array[1]);
+ if (timerstatus1 & (BIT5 | BIT4))
+ isr_timer(info->port_array[2]);
+ if (timerstatus1 & (BIT7 | BIT6))
+ isr_timer(info->port_array[3]);
+ }
+
+ for(i=0; i < info->port_count ; i++) {
+ SLMP_INFO * port = info->port_array[i];
+
+ /* Request bottom half processing if there's something
+ * for it to do and the bh is not already running.
+ *
+ * Note: startup adapter diags require interrupts.
+ * do not request bottom half processing if the
+ * device is not open in a normal mode.
+ */
+ if ( port && (port->port.count || port->netcount) &&
+ port->pending_bh && !port->bh_running &&
+ !port->bh_requested ) {
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk("%s(%d):%s queueing bh task.\n",
+ __FILE__,__LINE__,port->device_name);
+ schedule_work(&port->task);
+ port->bh_requested = true;
+ }
+ }
+
+ spin_unlock(&info->lock);
+
+ if ( debug_level >= DEBUG_LEVEL_ISR )
+ printk(KERN_DEBUG "%s(%d):synclinkmp_interrupt(%d)exit.\n",
+ __FILE__, __LINE__, info->irq_level);
+ return IRQ_HANDLED;
+}
+
+/* Initialize and start device.
+ */
+static int startup(SLMP_INFO * info)
+{
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk("%s(%d):%s tx_releaseup()\n",__FILE__,__LINE__,info->device_name);
+
+ if (info->port.flags & ASYNC_INITIALIZED)
+ return 0;
+
+ if (!info->tx_buf) {
+ info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
+ if (!info->tx_buf) {
+ printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
+ __FILE__,__LINE__,info->device_name);
+ return -ENOMEM;
+ }
+ }
+
+ info->pending_bh = 0;
+
+ memset(&info->icount, 0, sizeof(info->icount));
+
+ /* program hardware for current parameters */
+ reset_port(info);
+
+ change_params(info);
+
+ mod_timer(&info->status_timer, jiffies + msecs_to_jiffies(10));
+
+ if (info->port.tty)
+ clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
+
+ info->port.flags |= ASYNC_INITIALIZED;
+
+ return 0;
+}
+
+/* Called by close() and hangup() to shutdown hardware
+ */
+static void shutdown(SLMP_INFO * info)
+{
+ unsigned long flags;
+
+ if (!(info->port.flags & ASYNC_INITIALIZED))
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s synclinkmp_shutdown()\n",
+ __FILE__,__LINE__, info->device_name );
+
+ /* clear status wait queue because status changes */
+ /* can't happen after shutting down the hardware */
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+
+ del_timer(&info->tx_timer);
+ del_timer(&info->status_timer);
+
+ kfree(info->tx_buf);
+ info->tx_buf = NULL;
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ reset_port(info);
+
+ if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) {
+ info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ set_signals(info);
+ }
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ if (info->port.tty)
+ set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+
+ info->port.flags &= ~ASYNC_INITIALIZED;
+}
+
+static void program_hw(SLMP_INFO *info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ rx_stop(info);
+ tx_stop(info);
+
+ info->tx_count = info->tx_put = info->tx_get = 0;
+
+ if (info->params.mode == MGSL_MODE_HDLC || info->netcount)
+ hdlc_mode(info);
+ else
+ async_mode(info);
+
+ set_signals(info);
+
+ info->dcd_chkcount = 0;
+ info->cts_chkcount = 0;
+ info->ri_chkcount = 0;
+ info->dsr_chkcount = 0;
+
+ info->ie1_value |= (CDCD|CCTS);
+ write_reg(info, IE1, info->ie1_value);
+
+ get_signals(info);
+
+ if (info->netcount || (info->port.tty && info->port.tty->termios->c_cflag & CREAD) )
+ rx_start(info);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+/* Reconfigure adapter based on new parameters
+ */
+static void change_params(SLMP_INFO *info)
+{
+ unsigned cflag;
+ int bits_per_char;
+
+ if (!info->port.tty || !info->port.tty->termios)
+ return;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s change_params()\n",
+ __FILE__,__LINE__, info->device_name );
+
+ cflag = info->port.tty->termios->c_cflag;
+
+ /* if B0 rate (hangup) specified then negate DTR and RTS */
+ /* otherwise assert DTR and RTS */
+ if (cflag & CBAUD)
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+
+ /* byte size and parity */
+
+ switch (cflag & CSIZE) {
+ case CS5: info->params.data_bits = 5; break;
+ case CS6: info->params.data_bits = 6; break;
+ case CS7: info->params.data_bits = 7; break;
+ case CS8: info->params.data_bits = 8; break;
+ /* Never happens, but GCC is too dumb to figure it out */
+ default: info->params.data_bits = 7; break;
+ }
+
+ if (cflag & CSTOPB)
+ info->params.stop_bits = 2;
+ else
+ info->params.stop_bits = 1;
+
+ info->params.parity = ASYNC_PARITY_NONE;
+ if (cflag & PARENB) {
+ if (cflag & PARODD)
+ info->params.parity = ASYNC_PARITY_ODD;
+ else
+ info->params.parity = ASYNC_PARITY_EVEN;
+#ifdef CMSPAR
+ if (cflag & CMSPAR)
+ info->params.parity = ASYNC_PARITY_SPACE;
+#endif
+ }
+
+ /* calculate number of jiffies to transmit a full
+ * FIFO (32 bytes) at specified data rate
+ */
+ bits_per_char = info->params.data_bits +
+ info->params.stop_bits + 1;
+
+ /* if port data rate is set to 460800 or less then
+ * allow tty settings to override, otherwise keep the
+ * current data rate.
+ */
+ if (info->params.data_rate <= 460800) {
+ info->params.data_rate = tty_get_baud_rate(info->port.tty);
+ }
+
+ if ( info->params.data_rate ) {
+ info->timeout = (32*HZ*bits_per_char) /
+ info->params.data_rate;
+ }
+ info->timeout += HZ/50; /* Add .02 seconds of slop */
+
+ if (cflag & CRTSCTS)
+ info->port.flags |= ASYNC_CTS_FLOW;
+ else
+ info->port.flags &= ~ASYNC_CTS_FLOW;
+
+ if (cflag & CLOCAL)
+ info->port.flags &= ~ASYNC_CHECK_CD;
+ else
+ info->port.flags |= ASYNC_CHECK_CD;
+
+ /* process tty input control flags */
+
+ info->read_status_mask2 = OVRN;
+ if (I_INPCK(info->port.tty))
+ info->read_status_mask2 |= PE | FRME;
+ if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+ info->read_status_mask1 |= BRKD;
+ if (I_IGNPAR(info->port.tty))
+ info->ignore_status_mask2 |= PE | FRME;
+ if (I_IGNBRK(info->port.tty)) {
+ info->ignore_status_mask1 |= BRKD;
+ /* If ignoring parity and break indicators, ignore
+ * overruns too. (For real raw support).
+ */
+ if (I_IGNPAR(info->port.tty))
+ info->ignore_status_mask2 |= OVRN;
+ }
+
+ program_hw(info);
+}
+
+static int get_stats(SLMP_INFO * info, struct mgsl_icount __user *user_icount)
+{
+ int err;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s get_params()\n",
+ __FILE__,__LINE__, info->device_name);
+
+ if (!user_icount) {
+ memset(&info->icount, 0, sizeof(info->icount));
+ } else {
+ mutex_lock(&info->port.mutex);
+ COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
+ mutex_unlock(&info->port.mutex);
+ if (err)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int get_params(SLMP_INFO * info, MGSL_PARAMS __user *user_params)
+{
+ int err;
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s get_params()\n",
+ __FILE__,__LINE__, info->device_name);
+
+ mutex_lock(&info->port.mutex);
+ COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
+ mutex_unlock(&info->port.mutex);
+ if (err) {
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):%s get_params() user buffer copy failed\n",
+ __FILE__,__LINE__,info->device_name);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int set_params(SLMP_INFO * info, MGSL_PARAMS __user *new_params)
+{
+ unsigned long flags;
+ MGSL_PARAMS tmp_params;
+ int err;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s set_params\n",
+ __FILE__,__LINE__,info->device_name );
+ COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
+ if (err) {
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):%s set_params() user buffer copy failed\n",
+ __FILE__,__LINE__,info->device_name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&info->port.mutex);
+ spin_lock_irqsave(&info->lock,flags);
+ memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ change_params(info);
+ mutex_unlock(&info->port.mutex);
+
+ return 0;
+}
+
+static int get_txidle(SLMP_INFO * info, int __user *idle_mode)
+{
+ int err;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s get_txidle()=%d\n",
+ __FILE__,__LINE__, info->device_name, info->idle_mode);
+
+ COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
+ if (err) {
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):%s get_txidle() user buffer copy failed\n",
+ __FILE__,__LINE__,info->device_name);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int set_txidle(SLMP_INFO * info, int idle_mode)
+{
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s set_txidle(%d)\n",
+ __FILE__,__LINE__,info->device_name, idle_mode );
+
+ spin_lock_irqsave(&info->lock,flags);
+ info->idle_mode = idle_mode;
+ tx_set_idle( info );
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int tx_enable(SLMP_INFO * info, int enable)
+{
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s tx_enable(%d)\n",
+ __FILE__,__LINE__,info->device_name, enable);
+
+ spin_lock_irqsave(&info->lock,flags);
+ if ( enable ) {
+ if ( !info->tx_enabled ) {
+ tx_start(info);
+ }
+ } else {
+ if ( info->tx_enabled )
+ tx_stop(info);
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+/* abort send HDLC frame
+ */
+static int tx_abort(SLMP_INFO * info)
+{
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s tx_abort()\n",
+ __FILE__,__LINE__,info->device_name);
+
+ spin_lock_irqsave(&info->lock,flags);
+ if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC ) {
+ info->ie1_value &= ~UDRN;
+ info->ie1_value |= IDLE;
+ write_reg(info, IE1, info->ie1_value); /* disable tx status interrupts */
+ write_reg(info, SR1, (unsigned char)(IDLE + UDRN)); /* clear pending */
+
+ write_reg(info, TXDMA + DSR, 0); /* disable DMA channel */
+ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */
+
+ write_reg(info, CMD, TXABORT);
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int rx_enable(SLMP_INFO * info, int enable)
+{
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s rx_enable(%d)\n",
+ __FILE__,__LINE__,info->device_name,enable);
+
+ spin_lock_irqsave(&info->lock,flags);
+ if ( enable ) {
+ if ( !info->rx_enabled )
+ rx_start(info);
+ } else {
+ if ( info->rx_enabled )
+ rx_stop(info);
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+/* wait for specified event to occur
+ */
+static int wait_mgsl_event(SLMP_INFO * info, int __user *mask_ptr)
+{
+ unsigned long flags;
+ int s;
+ int rc=0;
+ struct mgsl_icount cprev, cnow;
+ int events;
+ int mask;
+ struct _input_signal_events oldsigs, newsigs;
+ DECLARE_WAITQUEUE(wait, current);
+
+ COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
+ if (rc) {
+ return -EFAULT;
+ }
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s wait_mgsl_event(%d)\n",
+ __FILE__,__LINE__,info->device_name,mask);
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ /* return immediately if state matches requested events */
+ get_signals(info);
+ s = info->serial_signals;
+
+ events = mask &
+ ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
+ ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
+ ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
+ ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
+ if (events) {
+ spin_unlock_irqrestore(&info->lock,flags);
+ goto exit;
+ }
+
+ /* save current irq counts */
+ cprev = info->icount;
+ oldsigs = info->input_signal_events;
+
+ /* enable hunt and idle irqs if needed */
+ if (mask & (MgslEvent_ExitHuntMode+MgslEvent_IdleReceived)) {
+ unsigned char oldval = info->ie1_value;
+ unsigned char newval = oldval +
+ (mask & MgslEvent_ExitHuntMode ? FLGD:0) +
+ (mask & MgslEvent_IdleReceived ? IDLD:0);
+ if ( oldval != newval ) {
+ info->ie1_value = newval;
+ write_reg(info, IE1, info->ie1_value);
+ }
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&info->event_wait_q, &wait);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ for(;;) {
+ schedule();
+ if (signal_pending(current)) {
+ rc = -ERESTARTSYS;
+ break;
+ }
+
+ /* get current irq counts */
+ spin_lock_irqsave(&info->lock,flags);
+ cnow = info->icount;
+ newsigs = info->input_signal_events;
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ /* if no change, wait aborted for some reason */
+ if (newsigs.dsr_up == oldsigs.dsr_up &&
+ newsigs.dsr_down == oldsigs.dsr_down &&
+ newsigs.dcd_up == oldsigs.dcd_up &&
+ newsigs.dcd_down == oldsigs.dcd_down &&
+ newsigs.cts_up == oldsigs.cts_up &&
+ newsigs.cts_down == oldsigs.cts_down &&
+ newsigs.ri_up == oldsigs.ri_up &&
+ newsigs.ri_down == oldsigs.ri_down &&
+ cnow.exithunt == cprev.exithunt &&
+ cnow.rxidle == cprev.rxidle) {
+ rc = -EIO;
+ break;
+ }
+
+ events = mask &
+ ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
+ (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
+ (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
+ (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
+ (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
+ (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
+ (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
+ (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
+ (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
+ (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
+ if (events)
+ break;
+
+ cprev = cnow;
+ oldsigs = newsigs;
+ }
+
+ remove_wait_queue(&info->event_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+
+
+ if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
+ spin_lock_irqsave(&info->lock,flags);
+ if (!waitqueue_active(&info->event_wait_q)) {
+ /* disable enable exit hunt mode/idle rcvd IRQs */
+ info->ie1_value &= ~(FLGD|IDLD);
+ write_reg(info, IE1, info->ie1_value);
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+exit:
+ if ( rc == 0 )
+ PUT_USER(rc, events, mask_ptr);
+
+ return rc;
+}
+
+static int modem_input_wait(SLMP_INFO *info,int arg)
+{
+ unsigned long flags;
+ int rc;
+ struct mgsl_icount cprev, cnow;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /* save current irq counts */
+ spin_lock_irqsave(&info->lock,flags);
+ cprev = info->icount;
+ add_wait_queue(&info->status_event_wait_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ for(;;) {
+ schedule();
+ if (signal_pending(current)) {
+ rc = -ERESTARTSYS;
+ break;
+ }
+
+ /* get new irq counts */
+ spin_lock_irqsave(&info->lock,flags);
+ cnow = info->icount;
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ /* if no change, wait aborted for some reason */
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
+ rc = -EIO;
+ break;
+ }
+
+ /* check for change in caller specified modem input */
+ if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
+ (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
+ (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
+ (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
+ rc = 0;
+ break;
+ }
+
+ cprev = cnow;
+ }
+ remove_wait_queue(&info->status_event_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+ return rc;
+}
+
+/* return the state of the serial control and status signals
+ */
+static int tiocmget(struct tty_struct *tty)
+{
+ SLMP_INFO *info = tty->driver_data;
+ unsigned int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
+ ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
+ ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
+ ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
+ ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
+ ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s tiocmget() value=%08X\n",
+ __FILE__,__LINE__, info->device_name, result );
+ return result;
+}
+
+/* set modem control signals (DTR/RTS)
+ */
+static int tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ SLMP_INFO *info = tty->driver_data;
+ unsigned long flags;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s tiocmset(%x,%x)\n",
+ __FILE__,__LINE__,info->device_name, set, clear);
+
+ if (set & TIOCM_RTS)
+ info->serial_signals |= SerialSignal_RTS;
+ if (set & TIOCM_DTR)
+ info->serial_signals |= SerialSignal_DTR;
+ if (clear & TIOCM_RTS)
+ info->serial_signals &= ~SerialSignal_RTS;
+ if (clear & TIOCM_DTR)
+ info->serial_signals &= ~SerialSignal_DTR;
+
+ spin_lock_irqsave(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return 0;
+}
+
+static int carrier_raised(struct tty_port *port)
+{
+ SLMP_INFO *info = container_of(port, SLMP_INFO, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
+}
+
+static void dtr_rts(struct tty_port *port, int on)
+{
+ SLMP_INFO *info = container_of(port, SLMP_INFO, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (on)
+ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+/* Block the current process until the specified port is ready to open.
+ */
+static int block_til_ready(struct tty_struct *tty, struct file *filp,
+ SLMP_INFO *info)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int retval;
+ bool do_clocal = false;
+ bool extra_count = false;
+ unsigned long flags;
+ int cd;
+ struct tty_port *port = &info->port;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s block_til_ready()\n",
+ __FILE__,__LINE__, tty->driver->name );
+
+ if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
+ /* nonblock mode is set or port is not enabled */
+ /* just verify that callout device is not active */
+ port->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+
+ if (tty->termios->c_cflag & CLOCAL)
+ do_clocal = true;
+
+ /* Wait for carrier detect and the line to become
+ * free (i.e., not in use by the callout). While we are in
+ * this loop, port->count is dropped by one, so that
+ * close() knows when to free things. We restore it upon
+ * exit, either normal or abnormal.
+ */
+
+ retval = 0;
+ add_wait_queue(&port->open_wait, &wait);
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s block_til_ready() before block, count=%d\n",
+ __FILE__,__LINE__, tty->driver->name, port->count );
+
+ spin_lock_irqsave(&info->lock, flags);
+ if (!tty_hung_up_p(filp)) {
+ extra_count = true;
+ port->count--;
+ }
+ spin_unlock_irqrestore(&info->lock, flags);
+ port->blocked_open++;
+
+ while (1) {
+ if (tty->termios->c_cflag & CBAUD)
+ tty_port_raise_dtr_rts(port);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
+ retval = (port->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS;
+ break;
+ }
+
+ cd = tty_port_carrier_raised(port);
+
+ if (!(port->flags & ASYNC_CLOSING) && (do_clocal || cd))
+ break;
+
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s block_til_ready() count=%d\n",
+ __FILE__,__LINE__, tty->driver->name, port->count );
+
+ tty_unlock();
+ schedule();
+ tty_lock();
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&port->open_wait, &wait);
+
+ if (extra_count)
+ port->count++;
+ port->blocked_open--;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):%s block_til_ready() after, count=%d\n",
+ __FILE__,__LINE__, tty->driver->name, port->count );
+
+ if (!retval)
+ port->flags |= ASYNC_NORMAL_ACTIVE;
+
+ return retval;
+}
+
+static int alloc_dma_bufs(SLMP_INFO *info)
+{
+ unsigned short BuffersPerFrame;
+ unsigned short BufferCount;
+
+ // Force allocation to start at 64K boundary for each port.
+ // This is necessary because *all* buffer descriptors for a port
+ // *must* be in the same 64K block. All descriptors on a port
+ // share a common 'base' address (upper 8 bits of 24 bits) programmed
+ // into the CBP register.
+ info->port_array[0]->last_mem_alloc = (SCA_MEM_SIZE/4) * info->port_num;
+
+ /* Calculate the number of DMA buffers necessary to hold the */
+ /* largest allowable frame size. Note: If the max frame size is */
+ /* not an even multiple of the DMA buffer size then we need to */
+ /* round the buffer count per frame up one. */
+
+ BuffersPerFrame = (unsigned short)(info->max_frame_size/SCABUFSIZE);
+ if ( info->max_frame_size % SCABUFSIZE )
+ BuffersPerFrame++;
+
+ /* calculate total number of data buffers (SCABUFSIZE) possible
+ * in one ports memory (SCA_MEM_SIZE/4) after allocating memory
+ * for the descriptor list (BUFFERLISTSIZE).
+ */
+ BufferCount = (SCA_MEM_SIZE/4 - BUFFERLISTSIZE)/SCABUFSIZE;
+
+ /* limit number of buffers to maximum amount of descriptors */
+ if (BufferCount > BUFFERLISTSIZE/sizeof(SCADESC))
+ BufferCount = BUFFERLISTSIZE/sizeof(SCADESC);
+
+ /* use enough buffers to transmit one max size frame */
+ info->tx_buf_count = BuffersPerFrame + 1;
+
+ /* never use more than half the available buffers for transmit */
+ if (info->tx_buf_count > (BufferCount/2))
+ info->tx_buf_count = BufferCount/2;
+
+ if (info->tx_buf_count > SCAMAXDESC)
+ info->tx_buf_count = SCAMAXDESC;
+
+ /* use remaining buffers for receive */
+ info->rx_buf_count = BufferCount - info->tx_buf_count;
+
+ if (info->rx_buf_count > SCAMAXDESC)
+ info->rx_buf_count = SCAMAXDESC;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk("%s(%d):%s Allocating %d TX and %d RX DMA buffers.\n",
+ __FILE__,__LINE__, info->device_name,
+ info->tx_buf_count,info->rx_buf_count);
+
+ if ( alloc_buf_list( info ) < 0 ||
+ alloc_frame_bufs(info,
+ info->rx_buf_list,
+ info->rx_buf_list_ex,
+ info->rx_buf_count) < 0 ||
+ alloc_frame_bufs(info,
+ info->tx_buf_list,
+ info->tx_buf_list_ex,
+ info->tx_buf_count) < 0 ||
+ alloc_tmp_rx_buf(info) < 0 ) {
+ printk("%s(%d):%s Can't allocate DMA buffer memory\n",
+ __FILE__,__LINE__, info->device_name);
+ return -ENOMEM;
+ }
+
+ rx_reset_buffers( info );
+
+ return 0;
+}
+
+/* Allocate DMA buffers for the transmit and receive descriptor lists.
+ */
+static int alloc_buf_list(SLMP_INFO *info)
+{
+ unsigned int i;
+
+ /* build list in adapter shared memory */
+ info->buffer_list = info->memory_base + info->port_array[0]->last_mem_alloc;
+ info->buffer_list_phys = info->port_array[0]->last_mem_alloc;
+ info->port_array[0]->last_mem_alloc += BUFFERLISTSIZE;
+
+ memset(info->buffer_list, 0, BUFFERLISTSIZE);
+
+ /* Save virtual address pointers to the receive and */
+ /* transmit buffer lists. (Receive 1st). These pointers will */
+ /* be used by the processor to access the lists. */
+ info->rx_buf_list = (SCADESC *)info->buffer_list;
+
+ info->tx_buf_list = (SCADESC *)info->buffer_list;
+ info->tx_buf_list += info->rx_buf_count;
+
+ /* Build links for circular buffer entry lists (tx and rx)
+ *
+ * Note: links are physical addresses read by the SCA device
+ * to determine the next buffer entry to use.
+ */
+
+ for ( i = 0; i < info->rx_buf_count; i++ ) {
+ /* calculate and store physical address of this buffer entry */
+ info->rx_buf_list_ex[i].phys_entry =
+ info->buffer_list_phys + (i * sizeof(SCABUFSIZE));
+
+ /* calculate and store physical address of */
+ /* next entry in cirular list of entries */
+ info->rx_buf_list[i].next = info->buffer_list_phys;
+ if ( i < info->rx_buf_count - 1 )
+ info->rx_buf_list[i].next += (i + 1) * sizeof(SCADESC);
+
+ info->rx_buf_list[i].length = SCABUFSIZE;
+ }
+
+ for ( i = 0; i < info->tx_buf_count; i++ ) {
+ /* calculate and store physical address of this buffer entry */
+ info->tx_buf_list_ex[i].phys_entry = info->buffer_list_phys +
+ ((info->rx_buf_count + i) * sizeof(SCADESC));
+
+ /* calculate and store physical address of */
+ /* next entry in cirular list of entries */
+
+ info->tx_buf_list[i].next = info->buffer_list_phys +
+ info->rx_buf_count * sizeof(SCADESC);
+
+ if ( i < info->tx_buf_count - 1 )
+ info->tx_buf_list[i].next += (i + 1) * sizeof(SCADESC);
+ }
+
+ return 0;
+}
+
+/* Allocate the frame DMA buffers used by the specified buffer list.
+ */
+static int alloc_frame_bufs(SLMP_INFO *info, SCADESC *buf_list,SCADESC_EX *buf_list_ex,int count)
+{
+ int i;
+ unsigned long phys_addr;
+
+ for ( i = 0; i < count; i++ ) {
+ buf_list_ex[i].virt_addr = info->memory_base + info->port_array[0]->last_mem_alloc;
+ phys_addr = info->port_array[0]->last_mem_alloc;
+ info->port_array[0]->last_mem_alloc += SCABUFSIZE;
+
+ buf_list[i].buf_ptr = (unsigned short)phys_addr;
+ buf_list[i].buf_base = (unsigned char)(phys_addr >> 16);
+ }
+
+ return 0;
+}
+
+static void free_dma_bufs(SLMP_INFO *info)
+{
+ info->buffer_list = NULL;
+ info->rx_buf_list = NULL;
+ info->tx_buf_list = NULL;
+}
+
+/* allocate buffer large enough to hold max_frame_size.
+ * This buffer is used to pass an assembled frame to the line discipline.
+ */
+static int alloc_tmp_rx_buf(SLMP_INFO *info)
+{
+ info->tmp_rx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
+ if (info->tmp_rx_buf == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static void free_tmp_rx_buf(SLMP_INFO *info)
+{
+ kfree(info->tmp_rx_buf);
+ info->tmp_rx_buf = NULL;
+}
+
+static int claim_resources(SLMP_INFO *info)
+{
+ if (request_mem_region(info->phys_memory_base,SCA_MEM_SIZE,"synclinkmp") == NULL) {
+ printk( "%s(%d):%s mem addr conflict, Addr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_memory_base);
+ info->init_error = DiagStatus_AddressConflict;
+ goto errout;
+ }
+ else
+ info->shared_mem_requested = true;
+
+ if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclinkmp") == NULL) {
+ printk( "%s(%d):%s lcr mem addr conflict, Addr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_lcr_base);
+ info->init_error = DiagStatus_AddressConflict;
+ goto errout;
+ }
+ else
+ info->lcr_mem_requested = true;
+
+ if (request_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE,"synclinkmp") == NULL) {
+ printk( "%s(%d):%s sca mem addr conflict, Addr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_sca_base);
+ info->init_error = DiagStatus_AddressConflict;
+ goto errout;
+ }
+ else
+ info->sca_base_requested = true;
+
+ if (request_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE,"synclinkmp") == NULL) {
+ printk( "%s(%d):%s stat/ctrl mem addr conflict, Addr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_statctrl_base);
+ info->init_error = DiagStatus_AddressConflict;
+ goto errout;
+ }
+ else
+ info->sca_statctrl_requested = true;
+
+ info->memory_base = ioremap_nocache(info->phys_memory_base,
+ SCA_MEM_SIZE);
+ if (!info->memory_base) {
+ printk( "%s(%d):%s Can't map shared memory, MemAddr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_memory_base );
+ info->init_error = DiagStatus_CantAssignPciResources;
+ goto errout;
+ }
+
+ info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE);
+ if (!info->lcr_base) {
+ printk( "%s(%d):%s Can't map LCR memory, MemAddr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
+ info->init_error = DiagStatus_CantAssignPciResources;
+ goto errout;
+ }
+ info->lcr_base += info->lcr_offset;
+
+ info->sca_base = ioremap_nocache(info->phys_sca_base, PAGE_SIZE);
+ if (!info->sca_base) {
+ printk( "%s(%d):%s Can't map SCA memory, MemAddr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_sca_base );
+ info->init_error = DiagStatus_CantAssignPciResources;
+ goto errout;
+ }
+ info->sca_base += info->sca_offset;
+
+ info->statctrl_base = ioremap_nocache(info->phys_statctrl_base,
+ PAGE_SIZE);
+ if (!info->statctrl_base) {
+ printk( "%s(%d):%s Can't map SCA Status/Control memory, MemAddr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_statctrl_base );
+ info->init_error = DiagStatus_CantAssignPciResources;
+ goto errout;
+ }
+ info->statctrl_base += info->statctrl_offset;
+
+ if ( !memory_test(info) ) {
+ printk( "%s(%d):Shared Memory Test failed for device %s MemAddr=%08X\n",
+ __FILE__,__LINE__,info->device_name, info->phys_memory_base );
+ info->init_error = DiagStatus_MemoryError;
+ goto errout;
+ }
+
+ return 0;
+
+errout:
+ release_resources( info );
+ return -ENODEV;
+}
+
+static void release_resources(SLMP_INFO *info)
+{
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):%s release_resources() entry\n",
+ __FILE__,__LINE__,info->device_name );
+
+ if ( info->irq_requested ) {
+ free_irq(info->irq_level, info);
+ info->irq_requested = false;
+ }
+
+ if ( info->shared_mem_requested ) {
+ release_mem_region(info->phys_memory_base,SCA_MEM_SIZE);
+ info->shared_mem_requested = false;
+ }
+ if ( info->lcr_mem_requested ) {
+ release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
+ info->lcr_mem_requested = false;
+ }
+ if ( info->sca_base_requested ) {
+ release_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE);
+ info->sca_base_requested = false;
+ }
+ if ( info->sca_statctrl_requested ) {
+ release_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE);
+ info->sca_statctrl_requested = false;
+ }
+
+ if (info->memory_base){
+ iounmap(info->memory_base);
+ info->memory_base = NULL;
+ }
+
+ if (info->sca_base) {
+ iounmap(info->sca_base - info->sca_offset);
+ info->sca_base=NULL;
+ }
+
+ if (info->statctrl_base) {
+ iounmap(info->statctrl_base - info->statctrl_offset);
+ info->statctrl_base=NULL;
+ }
+
+ if (info->lcr_base){
+ iounmap(info->lcr_base - info->lcr_offset);
+ info->lcr_base = NULL;
+ }
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):%s release_resources() exit\n",
+ __FILE__,__LINE__,info->device_name );
+}
+
+/* Add the specified device instance data structure to the
+ * global linked list of devices and increment the device count.
+ */
+static void add_device(SLMP_INFO *info)
+{
+ info->next_device = NULL;
+ info->line = synclinkmp_device_count;
+ sprintf(info->device_name,"ttySLM%dp%d",info->adapter_num,info->port_num);
+
+ if (info->line < MAX_DEVICES) {
+ if (maxframe[info->line])
+ info->max_frame_size = maxframe[info->line];
+ }
+
+ synclinkmp_device_count++;
+
+ if ( !synclinkmp_device_list )
+ synclinkmp_device_list = info;
+ else {
+ SLMP_INFO *current_dev = synclinkmp_device_list;
+ while( current_dev->next_device )
+ current_dev = current_dev->next_device;
+ current_dev->next_device = info;
+ }
+
+ if ( info->max_frame_size < 4096 )
+ info->max_frame_size = 4096;
+ else if ( info->max_frame_size > 65535 )
+ info->max_frame_size = 65535;
+
+ printk( "SyncLink MultiPort %s: "
+ "Mem=(%08x %08X %08x %08X) IRQ=%d MaxFrameSize=%u\n",
+ info->device_name,
+ info->phys_sca_base,
+ info->phys_memory_base,
+ info->phys_statctrl_base,
+ info->phys_lcr_base,
+ info->irq_level,
+ info->max_frame_size );
+
+#if SYNCLINK_GENERIC_HDLC
+ hdlcdev_init(info);
+#endif
+}
+
+static const struct tty_port_operations port_ops = {
+ .carrier_raised = carrier_raised,
+ .dtr_rts = dtr_rts,
+};
+
+/* Allocate and initialize a device instance structure
+ *
+ * Return Value: pointer to SLMP_INFO if success, otherwise NULL
+ */
+static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
+{
+ SLMP_INFO *info;
+
+ info = kzalloc(sizeof(SLMP_INFO),
+ GFP_KERNEL);
+
+ if (!info) {
+ printk("%s(%d) Error can't allocate device instance data for adapter %d, port %d\n",
+ __FILE__,__LINE__, adapter_num, port_num);
+ } else {
+ tty_port_init(&info->port);
+ info->port.ops = &port_ops;
+ info->magic = MGSL_MAGIC;
+ INIT_WORK(&info->task, bh_handler);
+ info->max_frame_size = 4096;
+ info->port.close_delay = 5*HZ/10;
+ info->port.closing_wait = 30*HZ;
+ init_waitqueue_head(&info->status_event_wait_q);
+ init_waitqueue_head(&info->event_wait_q);
+ spin_lock_init(&info->netlock);
+ memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
+ info->idle_mode = HDLC_TXIDLE_FLAGS;
+ info->adapter_num = adapter_num;
+ info->port_num = port_num;
+
+ /* Copy configuration info to device instance data */
+ info->irq_level = pdev->irq;
+ info->phys_lcr_base = pci_resource_start(pdev,0);
+ info->phys_sca_base = pci_resource_start(pdev,2);
+ info->phys_memory_base = pci_resource_start(pdev,3);
+ info->phys_statctrl_base = pci_resource_start(pdev,4);
+
+ /* Because veremap only works on page boundaries we must map
+ * a larger area than is actually implemented for the LCR
+ * memory range. We map a full page starting at the page boundary.
+ */
+ info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
+ info->phys_lcr_base &= ~(PAGE_SIZE-1);
+
+ info->sca_offset = info->phys_sca_base & (PAGE_SIZE-1);
+ info->phys_sca_base &= ~(PAGE_SIZE-1);
+
+ info->statctrl_offset = info->phys_statctrl_base & (PAGE_SIZE-1);
+ info->phys_statctrl_base &= ~(PAGE_SIZE-1);
+
+ info->bus_type = MGSL_BUS_TYPE_PCI;
+ info->irq_flags = IRQF_SHARED;
+
+ setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
+ setup_timer(&info->status_timer, status_timeout,
+ (unsigned long)info);
+
+ /* Store the PCI9050 misc control register value because a flaw
+ * in the PCI9050 prevents LCR registers from being read if
+ * BIOS assigns an LCR base address with bit 7 set.
+ *
+ * Only the misc control register is accessed for which only
+ * write access is needed, so set an initial value and change
+ * bits to the device instance data as we write the value
+ * to the actual misc control register.
+ */
+ info->misc_ctrl_value = 0x087e4546;
+
+ /* initial port state is unknown - if startup errors
+ * occur, init_error will be set to indicate the
+ * problem. Once the port is fully initialized,
+ * this value will be set to 0 to indicate the
+ * port is available.
+ */
+ info->init_error = -1;
+ }
+
+ return info;
+}
+
+static void device_init(int adapter_num, struct pci_dev *pdev)
+{
+ SLMP_INFO *port_array[SCA_MAX_PORTS];
+ int port;
+
+ /* allocate device instances for up to SCA_MAX_PORTS devices */
+ for ( port = 0; port < SCA_MAX_PORTS; ++port ) {
+ port_array[port] = alloc_dev(adapter_num,port,pdev);
+ if( port_array[port] == NULL ) {
+ for ( --port; port >= 0; --port )
+ kfree(port_array[port]);
+ return;
+ }
+ }
+
+ /* give copy of port_array to all ports and add to device list */
+ for ( port = 0; port < SCA_MAX_PORTS; ++port ) {
+ memcpy(port_array[port]->port_array,port_array,sizeof(port_array));
+ add_device( port_array[port] );
+ spin_lock_init(&port_array[port]->lock);
+ }
+
+ /* Allocate and claim adapter resources */
+ if ( !claim_resources(port_array[0]) ) {
+
+ alloc_dma_bufs(port_array[0]);
+
+ /* copy resource information from first port to others */
+ for ( port = 1; port < SCA_MAX_PORTS; ++port ) {
+ port_array[port]->lock = port_array[0]->lock;
+ port_array[port]->irq_level = port_array[0]->irq_level;
+ port_array[port]->memory_base = port_array[0]->memory_base;
+ port_array[port]->sca_base = port_array[0]->sca_base;
+ port_array[port]->statctrl_base = port_array[0]->statctrl_base;
+ port_array[port]->lcr_base = port_array[0]->lcr_base;
+ alloc_dma_bufs(port_array[port]);
+ }
+
+ if ( request_irq(port_array[0]->irq_level,
+ synclinkmp_interrupt,
+ port_array[0]->irq_flags,
+ port_array[0]->device_name,
+ port_array[0]) < 0 ) {
+ printk( "%s(%d):%s Can't request interrupt, IRQ=%d\n",
+ __FILE__,__LINE__,
+ port_array[0]->device_name,
+ port_array[0]->irq_level );
+ }
+ else {
+ port_array[0]->irq_requested = true;
+ adapter_test(port_array[0]);
+ }
+ }
+}
+
+static const struct tty_operations ops = {
+ .open = open,
+ .close = close,
+ .write = write,
+ .put_char = put_char,
+ .flush_chars = flush_chars,
+ .write_room = write_room,
+ .chars_in_buffer = chars_in_buffer,
+ .flush_buffer = flush_buffer,
+ .ioctl = ioctl,
+ .throttle = throttle,
+ .unthrottle = unthrottle,
+ .send_xchar = send_xchar,
+ .break_ctl = set_break,
+ .wait_until_sent = wait_until_sent,
+ .set_termios = set_termios,
+ .stop = tx_hold,
+ .start = tx_release,
+ .hangup = hangup,
+ .tiocmget = tiocmget,
+ .tiocmset = tiocmset,
+ .get_icount = get_icount,
+ .proc_fops = &synclinkmp_proc_fops,
+};
+
+
+static void synclinkmp_cleanup(void)
+{
+ int rc;
+ SLMP_INFO *info;
+ SLMP_INFO *tmp;
+
+ printk("Unloading %s %s\n", driver_name, driver_version);
+
+ if (serial_driver) {
+ if ((rc = tty_unregister_driver(serial_driver)))
+ printk("%s(%d) failed to unregister tty driver err=%d\n",
+ __FILE__,__LINE__,rc);
+ put_tty_driver(serial_driver);
+ }
+
+ /* reset devices */
+ info = synclinkmp_device_list;
+ while(info) {
+ reset_port(info);
+ info = info->next_device;
+ }
+
+ /* release devices */
+ info = synclinkmp_device_list;
+ while(info) {
+#if SYNCLINK_GENERIC_HDLC
+ hdlcdev_exit(info);
+#endif
+ free_dma_bufs(info);
+ free_tmp_rx_buf(info);
+ if ( info->port_num == 0 ) {
+ if (info->sca_base)
+ write_reg(info, LPR, 1); /* set low power mode */
+ release_resources(info);
+ }
+ tmp = info;
+ info = info->next_device;
+ kfree(tmp);
+ }
+
+ pci_unregister_driver(&synclinkmp_pci_driver);
+}
+
+/* Driver initialization entry point.
+ */
+
+static int __init synclinkmp_init(void)
+{
+ int rc;
+
+ if (break_on_load) {
+ synclinkmp_get_text_ptr();
+ BREAKPOINT();
+ }
+
+ printk("%s %s\n", driver_name, driver_version);
+
+ if ((rc = pci_register_driver(&synclinkmp_pci_driver)) < 0) {
+ printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
+ return rc;
+ }
+
+ serial_driver = alloc_tty_driver(128);
+ if (!serial_driver) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize the tty_driver structure */
+
+ serial_driver->owner = THIS_MODULE;
+ serial_driver->driver_name = "synclinkmp";
+ serial_driver->name = "ttySLM";
+ serial_driver->major = ttymajor;
+ serial_driver->minor_start = 64;
+ serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ serial_driver->init_termios = tty_std_termios;
+ serial_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ serial_driver->init_termios.c_ispeed = 9600;
+ serial_driver->init_termios.c_ospeed = 9600;
+ serial_driver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(serial_driver, &ops);
+ if ((rc = tty_register_driver(serial_driver)) < 0) {
+ printk("%s(%d):Couldn't register serial driver\n",
+ __FILE__,__LINE__);
+ put_tty_driver(serial_driver);
+ serial_driver = NULL;
+ goto error;
+ }
+
+ printk("%s %s, tty major#%d\n",
+ driver_name, driver_version,
+ serial_driver->major);
+
+ return 0;
+
+error:
+ synclinkmp_cleanup();
+ return rc;
+}
+
+static void __exit synclinkmp_exit(void)
+{
+ synclinkmp_cleanup();
+}
+
+module_init(synclinkmp_init);
+module_exit(synclinkmp_exit);
+
+/* Set the port for internal loopback mode.
+ * The TxCLK and RxCLK signals are generated from the BRG and
+ * the TxD is looped back to the RxD internally.
+ */
+static void enable_loopback(SLMP_INFO *info, int enable)
+{
+ if (enable) {
+ /* MD2 (Mode Register 2)
+ * 01..00 CNCT<1..0> Channel Connection 11=Local Loopback
+ */
+ write_reg(info, MD2, (unsigned char)(read_reg(info, MD2) | (BIT1 + BIT0)));
+
+ /* degate external TxC clock source */
+ info->port_array[0]->ctrlreg_value |= (BIT0 << (info->port_num * 2));
+ write_control_reg(info);
+
+ /* RXS/TXS (Rx/Tx clock source)
+ * 07 Reserved, must be 0
+ * 06..04 Clock Source, 100=BRG
+ * 03..00 Clock Divisor, 0000=1
+ */
+ write_reg(info, RXS, 0x40);
+ write_reg(info, TXS, 0x40);
+
+ } else {
+ /* MD2 (Mode Register 2)
+ * 01..00 CNCT<1..0> Channel connection, 0=normal
+ */
+ write_reg(info, MD2, (unsigned char)(read_reg(info, MD2) & ~(BIT1 + BIT0)));
+
+ /* RXS/TXS (Rx/Tx clock source)
+ * 07 Reserved, must be 0
+ * 06..04 Clock Source, 000=RxC/TxC Pin
+ * 03..00 Clock Divisor, 0000=1
+ */
+ write_reg(info, RXS, 0x00);
+ write_reg(info, TXS, 0x00);
+ }
+
+ /* set LinkSpeed if available, otherwise default to 2Mbps */
+ if (info->params.clock_speed)
+ set_rate(info, info->params.clock_speed);
+ else
+ set_rate(info, 3686400);
+}
+
+/* Set the baud rate register to the desired speed
+ *
+ * data_rate data rate of clock in bits per second
+ * A data rate of 0 disables the AUX clock.
+ */
+static void set_rate( SLMP_INFO *info, u32 data_rate )
+{
+ u32 TMCValue;
+ unsigned char BRValue;
+ u32 Divisor=0;
+
+ /* fBRG = fCLK/(TMC * 2^BR)
+ */
+ if (data_rate != 0) {
+ Divisor = 14745600/data_rate;
+ if (!Divisor)
+ Divisor = 1;
+
+ TMCValue = Divisor;
+
+ BRValue = 0;
+ if (TMCValue != 1 && TMCValue != 2) {
+ /* BRValue of 0 provides 50/50 duty cycle *only* when
+ * TMCValue is 1 or 2. BRValue of 1 to 9 always provides
+ * 50/50 duty cycle.
+ */
+ BRValue = 1;
+ TMCValue >>= 1;
+ }
+
+ /* while TMCValue is too big for TMC register, divide
+ * by 2 and increment BR exponent.
+ */
+ for(; TMCValue > 256 && BRValue < 10; BRValue++)
+ TMCValue >>= 1;
+
+ write_reg(info, TXS,
+ (unsigned char)((read_reg(info, TXS) & 0xf0) | BRValue));
+ write_reg(info, RXS,
+ (unsigned char)((read_reg(info, RXS) & 0xf0) | BRValue));
+ write_reg(info, TMC, (unsigned char)TMCValue);
+ }
+ else {
+ write_reg(info, TXS,0);
+ write_reg(info, RXS,0);
+ write_reg(info, TMC, 0);
+ }
+}
+
+/* Disable receiver
+ */
+static void rx_stop(SLMP_INFO *info)
+{
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):%s rx_stop()\n",
+ __FILE__,__LINE__, info->device_name );
+
+ write_reg(info, CMD, RXRESET);
+
+ info->ie0_value &= ~RXRDYE;
+ write_reg(info, IE0, info->ie0_value); /* disable Rx data interrupts */
+
+ write_reg(info, RXDMA + DSR, 0); /* disable Rx DMA */
+ write_reg(info, RXDMA + DCMD, SWABORT); /* reset/init Rx DMA */
+ write_reg(info, RXDMA + DIR, 0); /* disable Rx DMA interrupts */
+
+ info->rx_enabled = false;
+ info->rx_overflow = false;
+}
+
+/* enable the receiver
+ */
+static void rx_start(SLMP_INFO *info)
+{
+ int i;
+
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):%s rx_start()\n",
+ __FILE__,__LINE__, info->device_name );
+
+ write_reg(info, CMD, RXRESET);
+
+ if ( info->params.mode == MGSL_MODE_HDLC ) {
+ /* HDLC, disabe IRQ on rxdata */
+ info->ie0_value &= ~RXRDYE;
+ write_reg(info, IE0, info->ie0_value);
+
+ /* Reset all Rx DMA buffers and program rx dma */
+ write_reg(info, RXDMA + DSR, 0); /* disable Rx DMA */
+ write_reg(info, RXDMA + DCMD, SWABORT); /* reset/init Rx DMA */
+
+ for (i = 0; i < info->rx_buf_count; i++) {
+ info->rx_buf_list[i].status = 0xff;
+
+ // throttle to 4 shared memory writes at a time to prevent
+ // hogging local bus (keep latency time for DMA requests low).
+ if (!(i % 4))
+ read_status_reg(info);
+ }
+ info->current_rx_buf = 0;
+
+ /* set current/1st descriptor address */
+ write_reg16(info, RXDMA + CDA,
+ info->rx_buf_list_ex[0].phys_entry);
+
+ /* set new last rx descriptor address */
+ write_reg16(info, RXDMA + EDA,
+ info->rx_buf_list_ex[info->rx_buf_count - 1].phys_entry);
+
+ /* set buffer length (shared by all rx dma data buffers) */
+ write_reg16(info, RXDMA + BFL, SCABUFSIZE);
+
+ write_reg(info, RXDMA + DIR, 0x60); /* enable Rx DMA interrupts (EOM/BOF) */
+ write_reg(info, RXDMA + DSR, 0xf2); /* clear Rx DMA IRQs, enable Rx DMA */
+ } else {
+ /* async, enable IRQ on rxdata */
+ info->ie0_value |= RXRDYE;
+ write_reg(info, IE0, info->ie0_value);
+ }
+
+ write_reg(info, CMD, RXENABLE);
+
+ info->rx_overflow = false;
+ info->rx_enabled = true;
+}
+
+/* Enable the transmitter and send a transmit frame if
+ * one is loaded in the DMA buffers.
+ */
+static void tx_start(SLMP_INFO *info)
+{
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):%s tx_start() tx_count=%d\n",
+ __FILE__,__LINE__, info->device_name,info->tx_count );
+
+ if (!info->tx_enabled ) {
+ write_reg(info, CMD, TXRESET);
+ write_reg(info, CMD, TXENABLE);
+ info->tx_enabled = true;
+ }
+
+ if ( info->tx_count ) {
+
+ /* If auto RTS enabled and RTS is inactive, then assert */
+ /* RTS and set a flag indicating that the driver should */
+ /* negate RTS when the transmission completes. */
+
+ info->drop_rts_on_tx_done = false;
+
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+
+ if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
+ get_signals( info );
+ if ( !(info->serial_signals & SerialSignal_RTS) ) {
+ info->serial_signals |= SerialSignal_RTS;
+ set_signals( info );
+ info->drop_rts_on_tx_done = true;
+ }
+ }
+
+ write_reg16(info, TRC0,
+ (unsigned short)(((tx_negate_fifo_level-1)<<8) + tx_active_fifo_level));
+
+ write_reg(info, TXDMA + DSR, 0); /* disable DMA channel */
+ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */
+
+ /* set TX CDA (current descriptor address) */
+ write_reg16(info, TXDMA + CDA,
+ info->tx_buf_list_ex[0].phys_entry);
+
+ /* set TX EDA (last descriptor address) */
+ write_reg16(info, TXDMA + EDA,
+ info->tx_buf_list_ex[info->last_tx_buf].phys_entry);
+
+ /* enable underrun IRQ */
+ info->ie1_value &= ~IDLE;
+ info->ie1_value |= UDRN;
+ write_reg(info, IE1, info->ie1_value);
+ write_reg(info, SR1, (unsigned char)(IDLE + UDRN));
+
+ write_reg(info, TXDMA + DIR, 0x40); /* enable Tx DMA interrupts (EOM) */
+ write_reg(info, TXDMA + DSR, 0xf2); /* clear Tx DMA IRQs, enable Tx DMA */
+
+ mod_timer(&info->tx_timer, jiffies +
+ msecs_to_jiffies(5000));
+ }
+ else {
+ tx_load_fifo(info);
+ /* async, enable IRQ on txdata */
+ info->ie0_value |= TXRDYE;
+ write_reg(info, IE0, info->ie0_value);
+ }
+
+ info->tx_active = true;
+ }
+}
+
+/* stop the transmitter and DMA
+ */
+static void tx_stop( SLMP_INFO *info )
+{
+ if (debug_level >= DEBUG_LEVEL_ISR)
+ printk("%s(%d):%s tx_stop()\n",
+ __FILE__,__LINE__, info->device_name );
+
+ del_timer(&info->tx_timer);
+
+ write_reg(info, TXDMA + DSR, 0); /* disable DMA channel */
+ write_reg(info, TXDMA + DCMD, SWABORT); /* reset/init DMA channel */
+
+ write_reg(info, CMD, TXRESET);
+
+ info->ie1_value &= ~(UDRN + IDLE);
+ write_reg(info, IE1, info->ie1_value); /* disable tx status interrupts */
+ write_reg(info, SR1, (unsigned char)(IDLE + UDRN)); /* clear pending */
+
+ info->ie0_value &= ~TXRDYE;
+ write_reg(info, IE0, info->ie0_value); /* disable tx data interrupts */
+
+ info->tx_enabled = false;
+ info->tx_active = false;
+}
+
+/* Fill the transmit FIFO until the FIFO is full or
+ * there is no more data to load.
+ */
+static void tx_load_fifo(SLMP_INFO *info)
+{
+ u8 TwoBytes[2];
+
+ /* do nothing is now tx data available and no XON/XOFF pending */
+
+ if ( !info->tx_count && !info->x_char )
+ return;
+
+ /* load the Transmit FIFO until FIFOs full or all data sent */
+
+ while( info->tx_count && (read_reg(info,SR0) & BIT1) ) {
+
+ /* there is more space in the transmit FIFO and */
+ /* there is more data in transmit buffer */
+
+ if ( (info->tx_count > 1) && !info->x_char ) {
+ /* write 16-bits */
+ TwoBytes[0] = info->tx_buf[info->tx_get++];
+ if (info->tx_get >= info->max_frame_size)
+ info->tx_get -= info->max_frame_size;
+ TwoBytes[1] = info->tx_buf[info->tx_get++];
+ if (info->tx_get >= info->max_frame_size)
+ info->tx_get -= info->max_frame_size;
+
+ write_reg16(info, TRB, *((u16 *)TwoBytes));
+
+ info->tx_count -= 2;
+ info->icount.tx += 2;
+ } else {
+ /* only 1 byte left to transmit or 1 FIFO slot left */
+
+ if (info->x_char) {
+ /* transmit pending high priority char */
+ write_reg(info, TRB, info->x_char);
+ info->x_char = 0;
+ } else {
+ write_reg(info, TRB, info->tx_buf[info->tx_get++]);
+ if (info->tx_get >= info->max_frame_size)
+ info->tx_get -= info->max_frame_size;
+ info->tx_count--;
+ }
+ info->icount.tx++;
+ }
+ }
+}
+
+/* Reset a port to a known state
+ */
+static void reset_port(SLMP_INFO *info)
+{
+ if (info->sca_base) {
+
+ tx_stop(info);
+ rx_stop(info);
+
+ info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ set_signals(info);
+
+ /* disable all port interrupts */
+ info->ie0_value = 0;
+ info->ie1_value = 0;
+ info->ie2_value = 0;
+ write_reg(info, IE0, info->ie0_value);
+ write_reg(info, IE1, info->ie1_value);
+ write_reg(info, IE2, info->ie2_value);
+
+ write_reg(info, CMD, CHRESET);
+ }
+}
+
+/* Reset all the ports to a known state.
+ */
+static void reset_adapter(SLMP_INFO *info)
+{
+ int i;
+
+ for ( i=0; i < SCA_MAX_PORTS; ++i) {
+ if (info->port_array[i])
+ reset_port(info->port_array[i]);
+ }
+}
+
+/* Program port for asynchronous communications.
+ */
+static void async_mode(SLMP_INFO *info)
+{
+
+ unsigned char RegValue;
+
+ tx_stop(info);
+ rx_stop(info);
+
+ /* MD0, Mode Register 0
+ *
+ * 07..05 PRCTL<2..0>, Protocol Mode, 000=async
+ * 04 AUTO, Auto-enable (RTS/CTS/DCD)
+ * 03 Reserved, must be 0
+ * 02 CRCCC, CRC Calculation, 0=disabled
+ * 01..00 STOP<1..0> Stop bits (00=1,10=2)
+ *
+ * 0000 0000
+ */
+ RegValue = 0x00;
+ if (info->params.stop_bits != 1)
+ RegValue |= BIT1;
+ write_reg(info, MD0, RegValue);
+
+ /* MD1, Mode Register 1
+ *
+ * 07..06 BRATE<1..0>, bit rate, 00=1/1 01=1/16 10=1/32 11=1/64
+ * 05..04 TXCHR<1..0>, tx char size, 00=8 bits,01=7,10=6,11=5
+ * 03..02 RXCHR<1..0>, rx char size
+ * 01..00 PMPM<1..0>, Parity mode, 00=none 10=even 11=odd
+ *
+ * 0100 0000
+ */
+ RegValue = 0x40;
+ switch (info->params.data_bits) {
+ case 7: RegValue |= BIT4 + BIT2; break;
+ case 6: RegValue |= BIT5 + BIT3; break;
+ case 5: RegValue |= BIT5 + BIT4 + BIT3 + BIT2; break;
+ }
+ if (info->params.parity != ASYNC_PARITY_NONE) {
+ RegValue |= BIT1;
+ if (info->params.parity == ASYNC_PARITY_ODD)
+ RegValue |= BIT0;
+ }
+ write_reg(info, MD1, RegValue);
+
+ /* MD2, Mode Register 2
+ *
+ * 07..02 Reserved, must be 0
+ * 01..00 CNCT<1..0> Channel connection, 00=normal 11=local loopback
+ *
+ * 0000 0000
+ */
+ RegValue = 0x00;
+ if (info->params.loopback)
+ RegValue |= (BIT1 + BIT0);
+ write_reg(info, MD2, RegValue);
+
+ /* RXS, Receive clock source
+ *
+ * 07 Reserved, must be 0
+ * 06..04 RXCS<2..0>, clock source, 000=RxC Pin, 100=BRG, 110=DPLL
+ * 03..00 RXBR<3..0>, rate divisor, 0000=1
+ */
+ RegValue=BIT6;
+ write_reg(info, RXS, RegValue);
+
+ /* TXS, Transmit clock source
+ *
+ * 07 Reserved, must be 0
+ * 06..04 RXCS<2..0>, clock source, 000=TxC Pin, 100=BRG, 110=Receive Clock
+ * 03..00 RXBR<3..0>, rate divisor, 0000=1
+ */
+ RegValue=BIT6;
+ write_reg(info, TXS, RegValue);
+
+ /* Control Register
+ *
+ * 6,4,2,0 CLKSEL<3..0>, 0 = TcCLK in, 1 = Auxclk out
+ */
+ info->port_array[0]->ctrlreg_value |= (BIT0 << (info->port_num * 2));
+ write_control_reg(info);
+
+ tx_set_idle(info);
+
+ /* RRC Receive Ready Control 0
+ *
+ * 07..05 Reserved, must be 0
+ * 04..00 RRC<4..0> Rx FIFO trigger active 0x00 = 1 byte
+ */
+ write_reg(info, RRC, 0x00);
+
+ /* TRC0 Transmit Ready Control 0
+ *
+ * 07..05 Reserved, must be 0
+ * 04..00 TRC<4..0> Tx FIFO trigger active 0x10 = 16 bytes
+ */
+ write_reg(info, TRC0, 0x10);
+
+ /* TRC1 Transmit Ready Control 1
+ *
+ * 07..05 Reserved, must be 0
+ * 04..00 TRC<4..0> Tx FIFO trigger inactive 0x1e = 31 bytes (full-1)
+ */
+ write_reg(info, TRC1, 0x1e);
+
+ /* CTL, MSCI control register
+ *
+ * 07..06 Reserved, set to 0
+ * 05 UDRNC, underrun control, 0=abort 1=CRC+flag (HDLC/BSC)
+ * 04 IDLC, idle control, 0=mark 1=idle register
+ * 03 BRK, break, 0=off 1 =on (async)
+ * 02 SYNCLD, sync char load enable (BSC) 1=enabled
+ * 01 GOP, go active on poll (LOOP mode) 1=enabled
+ * 00 RTS, RTS output control, 0=active 1=inactive
+ *
+ * 0001 0001
+ */
+ RegValue = 0x10;
+ if (!(info->serial_signals & SerialSignal_RTS))
+ RegValue |= 0x01;
+ write_reg(info, CTL, RegValue);
+
+ /* enable status interrupts */
+ info->ie0_value |= TXINTE + RXINTE;
+ write_reg(info, IE0, info->ie0_value);
+
+ /* enable break detect interrupt */
+ info->ie1_value = BRKD;
+ write_reg(info, IE1, info->ie1_value);
+
+ /* enable rx overrun interrupt */
+ info->ie2_value = OVRN;
+ write_reg(info, IE2, info->ie2_value);
+
+ set_rate( info, info->params.data_rate * 16 );
+}
+
+/* Program the SCA for HDLC communications.
+ */
+static void hdlc_mode(SLMP_INFO *info)
+{
+ unsigned char RegValue;
+ u32 DpllDivisor;
+
+ // Can't use DPLL because SCA outputs recovered clock on RxC when
+ // DPLL mode selected. This causes output contention with RxC receiver.
+ // Use of DPLL would require external hardware to disable RxC receiver
+ // when DPLL mode selected.
+ info->params.flags &= ~(HDLC_FLAG_TXC_DPLL + HDLC_FLAG_RXC_DPLL);
+
+ /* disable DMA interrupts */
+ write_reg(info, TXDMA + DIR, 0);
+ write_reg(info, RXDMA + DIR, 0);
+
+ /* MD0, Mode Register 0
+ *
+ * 07..05 PRCTL<2..0>, Protocol Mode, 100=HDLC
+ * 04 AUTO, Auto-enable (RTS/CTS/DCD)
+ * 03 Reserved, must be 0
+ * 02 CRCCC, CRC Calculation, 1=enabled
+ * 01 CRC1, CRC selection, 0=CRC-16,1=CRC-CCITT-16
+ * 00 CRC0, CRC initial value, 1 = all 1s
+ *
+ * 1000 0001
+ */
+ RegValue = 0x81;
+ if (info->params.flags & HDLC_FLAG_AUTO_CTS)
+ RegValue |= BIT4;
+ if (info->params.flags & HDLC_FLAG_AUTO_DCD)
+ RegValue |= BIT4;
+ if (info->params.crc_type == HDLC_CRC_16_CCITT)
+ RegValue |= BIT2 + BIT1;
+ write_reg(info, MD0, RegValue);
+
+ /* MD1, Mode Register 1
+ *
+ * 07..06 ADDRS<1..0>, Address detect, 00=no addr check
+ * 05..04 TXCHR<1..0>, tx char size, 00=8 bits
+ * 03..02 RXCHR<1..0>, rx char size, 00=8 bits
+ * 01..00 PMPM<1..0>, Parity mode, 00=no parity
+ *
+ * 0000 0000
+ */
+ RegValue = 0x00;
+ write_reg(info, MD1, RegValue);
+
+ /* MD2, Mode Register 2
+ *
+ * 07 NRZFM, 0=NRZ, 1=FM
+ * 06..05 CODE<1..0> Encoding, 00=NRZ
+ * 04..03 DRATE<1..0> DPLL Divisor, 00=8
+ * 02 Reserved, must be 0
+ * 01..00 CNCT<1..0> Channel connection, 0=normal
+ *
+ * 0000 0000
+ */
+ RegValue = 0x00;
+ switch(info->params.encoding) {
+ case HDLC_ENCODING_NRZI: RegValue |= BIT5; break;
+ case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT7 + BIT5; break; /* aka FM1 */
+ case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT7 + BIT6; break; /* aka FM0 */
+ case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT7; break; /* aka Manchester */
+#if 0
+ case HDLC_ENCODING_NRZB: /* not supported */
+ case HDLC_ENCODING_NRZI_MARK: /* not supported */
+ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: /* not supported */
+#endif
+ }
+ if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
+ DpllDivisor = 16;
+ RegValue |= BIT3;
+ } else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
+ DpllDivisor = 8;
+ } else {
+ DpllDivisor = 32;
+ RegValue |= BIT4;
+ }
+ write_reg(info, MD2, RegValue);
+
+
+ /* RXS, Receive clock source
+ *
+ * 07 Reserved, must be 0
+ * 06..04 RXCS<2..0>, clock source, 000=RxC Pin, 100=BRG, 110=DPLL
+ * 03..00 RXBR<3..0>, rate divisor, 0000=1
+ */
+ RegValue=0;
+ if (info->params.flags & HDLC_FLAG_RXC_BRG)
+ RegValue |= BIT6;
+ if (info->params.flags & HDLC_FLAG_RXC_DPLL)
+ RegValue |= BIT6 + BIT5;
+ write_reg(info, RXS, RegValue);
+
+ /* TXS, Transmit clock source
+ *
+ * 07 Reserved, must be 0
+ * 06..04 RXCS<2..0>, clock source, 000=TxC Pin, 100=BRG, 110=Receive Clock
+ * 03..00 RXBR<3..0>, rate divisor, 0000=1
+ */
+ RegValue=0;
+ if (info->params.flags & HDLC_FLAG_TXC_BRG)
+ RegValue |= BIT6;
+ if (info->params.flags & HDLC_FLAG_TXC_DPLL)
+ RegValue |= BIT6 + BIT5;
+ write_reg(info, TXS, RegValue);
+
+ if (info->params.flags & HDLC_FLAG_RXC_DPLL)
+ set_rate(info, info->params.clock_speed * DpllDivisor);
+ else
+ set_rate(info, info->params.clock_speed);
+
+ /* GPDATA (General Purpose I/O Data Register)
+ *
+ * 6,4,2,0 CLKSEL<3..0>, 0 = TcCLK in, 1 = Auxclk out
+ */
+ if (info->params.flags & HDLC_FLAG_TXC_BRG)
+ info->port_array[0]->ctrlreg_value |= (BIT0 << (info->port_num * 2));
+ else
+ info->port_array[0]->ctrlreg_value &= ~(BIT0 << (info->port_num * 2));
+ write_control_reg(info);
+
+ /* RRC Receive Ready Control 0
+ *
+ * 07..05 Reserved, must be 0
+ * 04..00 RRC<4..0> Rx FIFO trigger active
+ */
+ write_reg(info, RRC, rx_active_fifo_level);
+
+ /* TRC0 Transmit Ready Control 0
+ *
+ * 07..05 Reserved, must be 0
+ * 04..00 TRC<4..0> Tx FIFO trigger active
+ */
+ write_reg(info, TRC0, tx_active_fifo_level);
+
+ /* TRC1 Transmit Ready Control 1
+ *
+ * 07..05 Reserved, must be 0
+ * 04..00 TRC<4..0> Tx FIFO trigger inactive 0x1f = 32 bytes (full)
+ */
+ write_reg(info, TRC1, (unsigned char)(tx_negate_fifo_level - 1));
+
+ /* DMR, DMA Mode Register
+ *
+ * 07..05 Reserved, must be 0
+ * 04 TMOD, Transfer Mode: 1=chained-block
+ * 03 Reserved, must be 0
+ * 02 NF, Number of Frames: 1=multi-frame
+ * 01 CNTE, Frame End IRQ Counter enable: 0=disabled
+ * 00 Reserved, must be 0
+ *
+ * 0001 0100
+ */
+ write_reg(info, TXDMA + DMR, 0x14);
+ write_reg(info, RXDMA + DMR, 0x14);
+
+ /* Set chain pointer base (upper 8 bits of 24 bit addr) */
+ write_reg(info, RXDMA + CPB,
+ (unsigned char)(info->buffer_list_phys >> 16));
+
+ /* Set chain pointer base (upper 8 bits of 24 bit addr) */
+ write_reg(info, TXDMA + CPB,
+ (unsigned char)(info->buffer_list_phys >> 16));
+
+ /* enable status interrupts. other code enables/disables
+ * the individual sources for these two interrupt classes.
+ */
+ info->ie0_value |= TXINTE + RXINTE;
+ write_reg(info, IE0, info->ie0_value);
+
+ /* CTL, MSCI control register
+ *
+ * 07..06 Reserved, set to 0
+ * 05 UDRNC, underrun control, 0=abort 1=CRC+flag (HDLC/BSC)
+ * 04 IDLC, idle control, 0=mark 1=idle register
+ * 03 BRK, break, 0=off 1 =on (async)
+ * 02 SYNCLD, sync char load enable (BSC) 1=enabled
+ * 01 GOP, go active on poll (LOOP mode) 1=enabled
+ * 00 RTS, RTS output control, 0=active 1=inactive
+ *
+ * 0001 0001
+ */
+ RegValue = 0x10;
+ if (!(info->serial_signals & SerialSignal_RTS))
+ RegValue |= 0x01;
+ write_reg(info, CTL, RegValue);
+
+ /* preamble not supported ! */
+
+ tx_set_idle(info);
+ tx_stop(info);
+ rx_stop(info);
+
+ set_rate(info, info->params.clock_speed);
+
+ if (info->params.loopback)
+ enable_loopback(info,1);
+}
+
+/* Set the transmit HDLC idle mode
+ */
+static void tx_set_idle(SLMP_INFO *info)
+{
+ unsigned char RegValue = 0xff;
+
+ /* Map API idle mode to SCA register bits */
+ switch(info->idle_mode) {
+ case HDLC_TXIDLE_FLAGS: RegValue = 0x7e; break;
+ case HDLC_TXIDLE_ALT_ZEROS_ONES: RegValue = 0xaa; break;
+ case HDLC_TXIDLE_ZEROS: RegValue = 0x00; break;
+ case HDLC_TXIDLE_ONES: RegValue = 0xff; break;
+ case HDLC_TXIDLE_ALT_MARK_SPACE: RegValue = 0xaa; break;
+ case HDLC_TXIDLE_SPACE: RegValue = 0x00; break;
+ case HDLC_TXIDLE_MARK: RegValue = 0xff; break;
+ }
+
+ write_reg(info, IDL, RegValue);
+}
+
+/* Query the adapter for the state of the V24 status (input) signals.
+ */
+static void get_signals(SLMP_INFO *info)
+{
+ u16 status = read_reg(info, SR3);
+ u16 gpstatus = read_status_reg(info);
+ u16 testbit;
+
+ /* clear all serial signals except DTR and RTS */
+ info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
+
+ /* set serial signal bits to reflect MISR */
+
+ if (!(status & BIT3))
+ info->serial_signals |= SerialSignal_CTS;
+
+ if ( !(status & BIT2))
+ info->serial_signals |= SerialSignal_DCD;
+
+ testbit = BIT1 << (info->port_num * 2); // Port 0..3 RI is GPDATA<1,3,5,7>
+ if (!(gpstatus & testbit))
+ info->serial_signals |= SerialSignal_RI;
+
+ testbit = BIT0 << (info->port_num * 2); // Port 0..3 DSR is GPDATA<0,2,4,6>
+ if (!(gpstatus & testbit))
+ info->serial_signals |= SerialSignal_DSR;
+}
+
+/* Set the state of DTR and RTS based on contents of
+ * serial_signals member of device context.
+ */
+static void set_signals(SLMP_INFO *info)
+{
+ unsigned char RegValue;
+ u16 EnableBit;
+
+ RegValue = read_reg(info, CTL);
+ if (info->serial_signals & SerialSignal_RTS)
+ RegValue &= ~BIT0;
+ else
+ RegValue |= BIT0;
+ write_reg(info, CTL, RegValue);
+
+ // Port 0..3 DTR is ctrl reg <1,3,5,7>
+ EnableBit = BIT1 << (info->port_num*2);
+ if (info->serial_signals & SerialSignal_DTR)
+ info->port_array[0]->ctrlreg_value &= ~EnableBit;
+ else
+ info->port_array[0]->ctrlreg_value |= EnableBit;
+ write_control_reg(info);
+}
+
+/*******************/
+/* DMA Buffer Code */
+/*******************/
+
+/* Set the count for all receive buffers to SCABUFSIZE
+ * and set the current buffer to the first buffer. This effectively
+ * makes all buffers free and discards any data in buffers.
+ */
+static void rx_reset_buffers(SLMP_INFO *info)
+{
+ rx_free_frame_buffers(info, 0, info->rx_buf_count - 1);
+}
+
+/* Free the buffers used by a received frame
+ *
+ * info pointer to device instance data
+ * first index of 1st receive buffer of frame
+ * last index of last receive buffer of frame
+ */
+static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last)
+{
+ bool done = false;
+
+ while(!done) {
+ /* reset current buffer for reuse */
+ info->rx_buf_list[first].status = 0xff;
+
+ if (first == last) {
+ done = true;
+ /* set new last rx descriptor address */
+ write_reg16(info, RXDMA + EDA, info->rx_buf_list_ex[first].phys_entry);
+ }
+
+ first++;
+ if (first == info->rx_buf_count)
+ first = 0;
+ }
+
+ /* set current buffer to next buffer after last buffer of frame */
+ info->current_rx_buf = first;
+}
+
+/* Return a received frame from the receive DMA buffers.
+ * Only frames received without errors are returned.
+ *
+ * Return Value: true if frame returned, otherwise false
+ */
+static bool rx_get_frame(SLMP_INFO *info)
+{
+ unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
+ unsigned short status;
+ unsigned int framesize = 0;
+ bool ReturnCode = false;
+ unsigned long flags;
+ struct tty_struct *tty = info->port.tty;
+ unsigned char addr_field = 0xff;
+ SCADESC *desc;
+ SCADESC_EX *desc_ex;
+
+CheckAgain:
+ /* assume no frame returned, set zero length */
+ framesize = 0;
+ addr_field = 0xff;
+
+ /*
+ * current_rx_buf points to the 1st buffer of the next available
+ * receive frame. To find the last buffer of the frame look for
+ * a non-zero status field in the buffer entries. (The status
+ * field is set by the 16C32 after completing a receive frame.
+ */
+ StartIndex = EndIndex = info->current_rx_buf;
+
+ for ( ;; ) {
+ desc = &info->rx_buf_list[EndIndex];
+ desc_ex = &info->rx_buf_list_ex[EndIndex];
+
+ if (desc->status == 0xff)
+ goto Cleanup; /* current desc still in use, no frames available */
+
+ if (framesize == 0 && info->params.addr_filter != 0xff)
+ addr_field = desc_ex->virt_addr[0];
+
+ framesize += desc->length;
+
+ /* Status != 0 means last buffer of frame */
+ if (desc->status)
+ break;
+
+ EndIndex++;
+ if (EndIndex == info->rx_buf_count)
+ EndIndex = 0;
+
+ if (EndIndex == info->current_rx_buf) {
+ /* all buffers have been 'used' but none mark */
+ /* the end of a frame. Reset buffers and receiver. */
+ if ( info->rx_enabled ){
+ spin_lock_irqsave(&info->lock,flags);
+ rx_start(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+ goto Cleanup;
+ }
+
+ }
+
+ /* check status of receive frame */
+
+ /* frame status is byte stored after frame data
+ *
+ * 7 EOM (end of msg), 1 = last buffer of frame
+ * 6 Short Frame, 1 = short frame
+ * 5 Abort, 1 = frame aborted
+ * 4 Residue, 1 = last byte is partial
+ * 3 Overrun, 1 = overrun occurred during frame reception
+ * 2 CRC, 1 = CRC error detected
+ *
+ */
+ status = desc->status;
+
+ /* ignore CRC bit if not using CRC (bit is undefined) */
+ /* Note:CRC is not save to data buffer */
+ if (info->params.crc_type == HDLC_CRC_NONE)
+ status &= ~BIT2;
+
+ if (framesize == 0 ||
+ (addr_field != 0xff && addr_field != info->params.addr_filter)) {
+ /* discard 0 byte frames, this seems to occur sometime
+ * when remote is idling flags.
+ */
+ rx_free_frame_buffers(info, StartIndex, EndIndex);
+ goto CheckAgain;
+ }
+
+ if (framesize < 2)
+ status |= BIT6;
+
+ if (status & (BIT6+BIT5+BIT3+BIT2)) {
+ /* received frame has errors,
+ * update counts and mark frame size as 0
+ */
+ if (status & BIT6)
+ info->icount.rxshort++;
+ else if (status & BIT5)
+ info->icount.rxabort++;
+ else if (status & BIT3)
+ info->icount.rxover++;
+ else
+ info->icount.rxcrc++;
+
+ framesize = 0;
+#if SYNCLINK_GENERIC_HDLC
+ {
+ info->netdev->stats.rx_errors++;
+ info->netdev->stats.rx_frame_errors++;
+ }
+#endif
+ }
+
+ if ( debug_level >= DEBUG_LEVEL_BH )
+ printk("%s(%d):%s rx_get_frame() status=%04X size=%d\n",
+ __FILE__,__LINE__,info->device_name,status,framesize);
+
+ if ( debug_level >= DEBUG_LEVEL_DATA )
+ trace_block(info,info->rx_buf_list_ex[StartIndex].virt_addr,
+ min_t(int, framesize,SCABUFSIZE),0);
+
+ if (framesize) {
+ if (framesize > info->max_frame_size)
+ info->icount.rxlong++;
+ else {
+ /* copy dma buffer(s) to contiguous intermediate buffer */
+ int copy_count = framesize;
+ int index = StartIndex;
+ unsigned char *ptmp = info->tmp_rx_buf;
+ info->tmp_rx_buf_count = framesize;
+
+ info->icount.rxok++;
+
+ while(copy_count) {
+ int partial_count = min(copy_count,SCABUFSIZE);
+ memcpy( ptmp,
+ info->rx_buf_list_ex[index].virt_addr,
+ partial_count );
+ ptmp += partial_count;
+ copy_count -= partial_count;
+
+ if ( ++index == info->rx_buf_count )
+ index = 0;
+ }
+
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount)
+ hdlcdev_rx(info,info->tmp_rx_buf,framesize);
+ else
+#endif
+ ldisc_receive_buf(tty,info->tmp_rx_buf,
+ info->flag_buf, framesize);
+ }
+ }
+ /* Free the buffers used by this frame. */
+ rx_free_frame_buffers( info, StartIndex, EndIndex );
+
+ ReturnCode = true;
+
+Cleanup:
+ if ( info->rx_enabled && info->rx_overflow ) {
+ /* Receiver is enabled, but needs to restarted due to
+ * rx buffer overflow. If buffers are empty, restart receiver.
+ */
+ if (info->rx_buf_list[EndIndex].status == 0xff) {
+ spin_lock_irqsave(&info->lock,flags);
+ rx_start(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+ }
+
+ return ReturnCode;
+}
+
+/* load the transmit DMA buffer with data
+ */
+static void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count)
+{
+ unsigned short copy_count;
+ unsigned int i = 0;
+ SCADESC *desc;
+ SCADESC_EX *desc_ex;
+
+ if ( debug_level >= DEBUG_LEVEL_DATA )
+ trace_block(info,buf, min_t(int, count,SCABUFSIZE), 1);
+
+ /* Copy source buffer to one or more DMA buffers, starting with
+ * the first transmit dma buffer.
+ */
+ for(i=0;;)
+ {
+ copy_count = min_t(unsigned short,count,SCABUFSIZE);
+
+ desc = &info->tx_buf_list[i];
+ desc_ex = &info->tx_buf_list_ex[i];
+
+ load_pci_memory(info, desc_ex->virt_addr,buf,copy_count);
+
+ desc->length = copy_count;
+ desc->status = 0;
+
+ buf += copy_count;
+ count -= copy_count;
+
+ if (!count)
+ break;
+
+ i++;
+ if (i >= info->tx_buf_count)
+ i = 0;
+ }
+
+ info->tx_buf_list[i].status = 0x81; /* set EOM and EOT status */
+ info->last_tx_buf = ++i;
+}
+
+static bool register_test(SLMP_INFO *info)
+{
+ static unsigned char testval[] = {0x00, 0xff, 0xaa, 0x55, 0x69, 0x96};
+ static unsigned int count = ARRAY_SIZE(testval);
+ unsigned int i;
+ bool rc = true;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ reset_port(info);
+
+ /* assume failure */
+ info->init_error = DiagStatus_AddressFailure;
+
+ /* Write bit patterns to various registers but do it out of */
+ /* sync, then read back and verify values. */
+
+ for (i = 0 ; i < count ; i++) {
+ write_reg(info, TMC, testval[i]);
+ write_reg(info, IDL, testval[(i+1)%count]);
+ write_reg(info, SA0, testval[(i+2)%count]);
+ write_reg(info, SA1, testval[(i+3)%count]);
+
+ if ( (read_reg(info, TMC) != testval[i]) ||
+ (read_reg(info, IDL) != testval[(i+1)%count]) ||
+ (read_reg(info, SA0) != testval[(i+2)%count]) ||
+ (read_reg(info, SA1) != testval[(i+3)%count]) )
+ {
+ rc = false;
+ break;
+ }
+ }
+
+ reset_port(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return rc;
+}
+
+static bool irq_test(SLMP_INFO *info)
+{
+ unsigned long timeout;
+ unsigned long flags;
+
+ unsigned char timer = (info->port_num & 1) ? TIMER2 : TIMER0;
+
+ spin_lock_irqsave(&info->lock,flags);
+ reset_port(info);
+
+ /* assume failure */
+ info->init_error = DiagStatus_IrqFailure;
+ info->irq_occurred = false;
+
+ /* setup timer0 on SCA0 to interrupt */
+
+ /* IER2<7..4> = timer<3..0> interrupt enables (1=enabled) */
+ write_reg(info, IER2, (unsigned char)((info->port_num & 1) ? BIT6 : BIT4));
+
+ write_reg(info, (unsigned char)(timer + TEPR), 0); /* timer expand prescale */
+ write_reg16(info, (unsigned char)(timer + TCONR), 1); /* timer constant */
+
+
+ /* TMCS, Timer Control/Status Register
+ *
+ * 07 CMF, Compare match flag (read only) 1=match
+ * 06 ECMI, CMF Interrupt Enable: 1=enabled
+ * 05 Reserved, must be 0
+ * 04 TME, Timer Enable
+ * 03..00 Reserved, must be 0
+ *
+ * 0101 0000
+ */
+ write_reg(info, (unsigned char)(timer + TMCS), 0x50);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ timeout=100;
+ while( timeout-- && !info->irq_occurred ) {
+ msleep_interruptible(10);
+ }
+
+ spin_lock_irqsave(&info->lock,flags);
+ reset_port(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return info->irq_occurred;
+}
+
+/* initialize individual SCA device (2 ports)
+ */
+static bool sca_init(SLMP_INFO *info)
+{
+ /* set wait controller to single mem partition (low), no wait states */
+ write_reg(info, PABR0, 0); /* wait controller addr boundary 0 */
+ write_reg(info, PABR1, 0); /* wait controller addr boundary 1 */
+ write_reg(info, WCRL, 0); /* wait controller low range */
+ write_reg(info, WCRM, 0); /* wait controller mid range */
+ write_reg(info, WCRH, 0); /* wait controller high range */
+
+ /* DPCR, DMA Priority Control
+ *
+ * 07..05 Not used, must be 0
+ * 04 BRC, bus release condition: 0=all transfers complete
+ * 03 CCC, channel change condition: 0=every cycle
+ * 02..00 PR<2..0>, priority 100=round robin
+ *
+ * 00000100 = 0x04
+ */
+ write_reg(info, DPCR, dma_priority);
+
+ /* DMA Master Enable, BIT7: 1=enable all channels */
+ write_reg(info, DMER, 0x80);
+
+ /* enable all interrupt classes */
+ write_reg(info, IER0, 0xff); /* TxRDY,RxRDY,TxINT,RxINT (ports 0-1) */
+ write_reg(info, IER1, 0xff); /* DMIB,DMIA (channels 0-3) */
+ write_reg(info, IER2, 0xf0); /* TIRQ (timers 0-3) */
+
+ /* ITCR, interrupt control register
+ * 07 IPC, interrupt priority, 0=MSCI->DMA
+ * 06..05 IAK<1..0>, Acknowledge cycle, 00=non-ack cycle
+ * 04 VOS, Vector Output, 0=unmodified vector
+ * 03..00 Reserved, must be 0
+ */
+ write_reg(info, ITCR, 0);
+
+ return true;
+}
+
+/* initialize adapter hardware
+ */
+static bool init_adapter(SLMP_INFO *info)
+{
+ int i;
+
+ /* Set BIT30 of Local Control Reg 0x50 to reset SCA */
+ volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
+ u32 readval;
+
+ info->misc_ctrl_value |= BIT30;
+ *MiscCtrl = info->misc_ctrl_value;
+
+ /*
+ * Force at least 170ns delay before clearing
+ * reset bit. Each read from LCR takes at least
+ * 30ns so 10 times for 300ns to be safe.
+ */
+ for(i=0;i<10;i++)
+ readval = *MiscCtrl;
+
+ info->misc_ctrl_value &= ~BIT30;
+ *MiscCtrl = info->misc_ctrl_value;
+
+ /* init control reg (all DTRs off, all clksel=input) */
+ info->ctrlreg_value = 0xaa;
+ write_control_reg(info);
+
+ {
+ volatile u32 *LCR1BRDR = (u32 *)(info->lcr_base + 0x2c);
+ lcr1_brdr_value &= ~(BIT5 + BIT4 + BIT3);
+
+ switch(read_ahead_count)
+ {
+ case 16:
+ lcr1_brdr_value |= BIT5 + BIT4 + BIT3;
+ break;
+ case 8:
+ lcr1_brdr_value |= BIT5 + BIT4;
+ break;
+ case 4:
+ lcr1_brdr_value |= BIT5 + BIT3;
+ break;
+ case 0:
+ lcr1_brdr_value |= BIT5;
+ break;
+ }
+
+ *LCR1BRDR = lcr1_brdr_value;
+ *MiscCtrl = misc_ctrl_value;
+ }
+
+ sca_init(info->port_array[0]);
+ sca_init(info->port_array[2]);
+
+ return true;
+}
+
+/* Loopback an HDLC frame to test the hardware
+ * interrupt and DMA functions.
+ */
+static bool loopback_test(SLMP_INFO *info)
+{
+#define TESTFRAMESIZE 20
+
+ unsigned long timeout;
+ u16 count = TESTFRAMESIZE;
+ unsigned char buf[TESTFRAMESIZE];
+ bool rc = false;
+ unsigned long flags;
+
+ struct tty_struct *oldtty = info->port.tty;
+ u32 speed = info->params.clock_speed;
+
+ info->params.clock_speed = 3686400;
+ info->port.tty = NULL;
+
+ /* assume failure */
+ info->init_error = DiagStatus_DmaFailure;
+
+ /* build and send transmit frame */
+ for (count = 0; count < TESTFRAMESIZE;++count)
+ buf[count] = (unsigned char)count;
+
+ memset(info->tmp_rx_buf,0,TESTFRAMESIZE);
+
+ /* program hardware for HDLC and enabled receiver */
+ spin_lock_irqsave(&info->lock,flags);
+ hdlc_mode(info);
+ enable_loopback(info,1);
+ rx_start(info);
+ info->tx_count = count;
+ tx_load_dma_buffer(info,buf,count);
+ tx_start(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ /* wait for receive complete */
+ /* Set a timeout for waiting for interrupt. */
+ for ( timeout = 100; timeout; --timeout ) {
+ msleep_interruptible(10);
+
+ if (rx_get_frame(info)) {
+ rc = true;
+ break;
+ }
+ }
+
+ /* verify received frame length and contents */
+ if (rc &&
+ ( info->tmp_rx_buf_count != count ||
+ memcmp(buf, info->tmp_rx_buf,count))) {
+ rc = false;
+ }
+
+ spin_lock_irqsave(&info->lock,flags);
+ reset_adapter(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ info->params.clock_speed = speed;
+ info->port.tty = oldtty;
+
+ return rc;
+}
+
+/* Perform diagnostics on hardware
+ */
+static int adapter_test( SLMP_INFO *info )
+{
+ unsigned long flags;
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):Testing device %s\n",
+ __FILE__,__LINE__,info->device_name );
+
+ spin_lock_irqsave(&info->lock,flags);
+ init_adapter(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ info->port_array[0]->port_count = 0;
+
+ if ( register_test(info->port_array[0]) &&
+ register_test(info->port_array[1])) {
+
+ info->port_array[0]->port_count = 2;
+
+ if ( register_test(info->port_array[2]) &&
+ register_test(info->port_array[3]) )
+ info->port_array[0]->port_count += 2;
+ }
+ else {
+ printk( "%s(%d):Register test failure for device %s Addr=%08lX\n",
+ __FILE__,__LINE__,info->device_name, (unsigned long)(info->phys_sca_base));
+ return -ENODEV;
+ }
+
+ if ( !irq_test(info->port_array[0]) ||
+ !irq_test(info->port_array[1]) ||
+ (info->port_count == 4 && !irq_test(info->port_array[2])) ||
+ (info->port_count == 4 && !irq_test(info->port_array[3]))) {
+ printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
+ __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
+ return -ENODEV;
+ }
+
+ if (!loopback_test(info->port_array[0]) ||
+ !loopback_test(info->port_array[1]) ||
+ (info->port_count == 4 && !loopback_test(info->port_array[2])) ||
+ (info->port_count == 4 && !loopback_test(info->port_array[3]))) {
+ printk( "%s(%d):DMA test failure for device %s\n",
+ __FILE__,__LINE__,info->device_name);
+ return -ENODEV;
+ }
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):device %s passed diagnostics\n",
+ __FILE__,__LINE__,info->device_name );
+
+ info->port_array[0]->init_error = 0;
+ info->port_array[1]->init_error = 0;
+ if ( info->port_count > 2 ) {
+ info->port_array[2]->init_error = 0;
+ info->port_array[3]->init_error = 0;
+ }
+
+ return 0;
+}
+
+/* Test the shared memory on a PCI adapter.
+ */
+static bool memory_test(SLMP_INFO *info)
+{
+ static unsigned long testval[] = { 0x0, 0x55555555, 0xaaaaaaaa,
+ 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
+ unsigned long count = ARRAY_SIZE(testval);
+ unsigned long i;
+ unsigned long limit = SCA_MEM_SIZE/sizeof(unsigned long);
+ unsigned long * addr = (unsigned long *)info->memory_base;
+
+ /* Test data lines with test pattern at one location. */
+
+ for ( i = 0 ; i < count ; i++ ) {
+ *addr = testval[i];
+ if ( *addr != testval[i] )
+ return false;
+ }
+
+ /* Test address lines with incrementing pattern over */
+ /* entire address range. */
+
+ for ( i = 0 ; i < limit ; i++ ) {
+ *addr = i * 4;
+ addr++;
+ }
+
+ addr = (unsigned long *)info->memory_base;
+
+ for ( i = 0 ; i < limit ; i++ ) {
+ if ( *addr != i * 4 )
+ return false;
+ addr++;
+ }
+
+ memset( info->memory_base, 0, SCA_MEM_SIZE );
+ return true;
+}
+
+/* Load data into PCI adapter shared memory.
+ *
+ * The PCI9050 releases control of the local bus
+ * after completing the current read or write operation.
+ *
+ * While the PCI9050 write FIFO not empty, the
+ * PCI9050 treats all of the writes as a single transaction
+ * and does not release the bus. This causes DMA latency problems
+ * at high speeds when copying large data blocks to the shared memory.
+ *
+ * This function breaks a write into multiple transations by
+ * interleaving a read which flushes the write FIFO and 'completes'
+ * the write transation. This allows any pending DMA request to gain control
+ * of the local bus in a timely fasion.
+ */
+static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count)
+{
+ /* A load interval of 16 allows for 4 32-bit writes at */
+ /* 136ns each for a maximum latency of 542ns on the local bus.*/
+
+ unsigned short interval = count / sca_pci_load_interval;
+ unsigned short i;
+
+ for ( i = 0 ; i < interval ; i++ )
+ {
+ memcpy(dest, src, sca_pci_load_interval);
+ read_status_reg(info);
+ dest += sca_pci_load_interval;
+ src += sca_pci_load_interval;
+ }
+
+ memcpy(dest, src, count % sca_pci_load_interval);
+}
+
+static void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
+{
+ int i;
+ int linecount;
+ if (xmit)
+ printk("%s tx data:\n",info->device_name);
+ else
+ printk("%s rx data:\n",info->device_name);
+
+ while(count) {
+ if (count > 16)
+ linecount = 16;
+ else
+ linecount = count;
+
+ for(i=0;i<linecount;i++)
+ printk("%02X ",(unsigned char)data[i]);
+ for(;i<17;i++)
+ printk(" ");
+ for(i=0;i<linecount;i++) {
+ if (data[i]>=040 && data[i]<=0176)
+ printk("%c",data[i]);
+ else
+ printk(".");
+ }
+ printk("\n");
+
+ data += linecount;
+ count -= linecount;
+ }
+} /* end of trace_block() */
+
+/* called when HDLC frame times out
+ * update stats and do tx completion processing
+ */
+static void tx_timeout(unsigned long context)
+{
+ SLMP_INFO *info = (SLMP_INFO*)context;
+ unsigned long flags;
+
+ if ( debug_level >= DEBUG_LEVEL_INFO )
+ printk( "%s(%d):%s tx_timeout()\n",
+ __FILE__,__LINE__,info->device_name);
+ if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) {
+ info->icount.txtimeout++;
+ }
+ spin_lock_irqsave(&info->lock,flags);
+ info->tx_active = false;
+ info->tx_count = info->tx_put = info->tx_get = 0;
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+#if SYNCLINK_GENERIC_HDLC
+ if (info->netcount)
+ hdlcdev_tx_done(info);
+ else
+#endif
+ bh_transmit(info);
+}
+
+/* called to periodically check the DSR/RI modem signal input status
+ */
+static void status_timeout(unsigned long context)
+{
+ u16 status = 0;
+ SLMP_INFO *info = (SLMP_INFO*)context;
+ unsigned long flags;
+ unsigned char delta;
+
+
+ spin_lock_irqsave(&info->lock,flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ /* check for DSR/RI state change */
+
+ delta = info->old_signals ^ info->serial_signals;
+ info->old_signals = info->serial_signals;
+
+ if (delta & SerialSignal_DSR)
+ status |= MISCSTATUS_DSR_LATCHED|(info->serial_signals&SerialSignal_DSR);
+
+ if (delta & SerialSignal_RI)
+ status |= MISCSTATUS_RI_LATCHED|(info->serial_signals&SerialSignal_RI);
+
+ if (delta & SerialSignal_DCD)
+ status |= MISCSTATUS_DCD_LATCHED|(info->serial_signals&SerialSignal_DCD);
+
+ if (delta & SerialSignal_CTS)
+ status |= MISCSTATUS_CTS_LATCHED|(info->serial_signals&SerialSignal_CTS);
+
+ if (status)
+ isr_io_pin(info,status);
+
+ mod_timer(&info->status_timer, jiffies + msecs_to_jiffies(10));
+}
+
+
+/* Register Access Routines -
+ * All registers are memory mapped
+ */
+#define CALC_REGADDR() \
+ unsigned char * RegAddr = (unsigned char*)(info->sca_base + Addr); \
+ if (info->port_num > 1) \
+ RegAddr += 256; /* port 0-1 SCA0, 2-3 SCA1 */ \
+ if ( info->port_num & 1) { \
+ if (Addr > 0x7f) \
+ RegAddr += 0x40; /* DMA access */ \
+ else if (Addr > 0x1f && Addr < 0x60) \
+ RegAddr += 0x20; /* MSCI access */ \
+ }
+
+
+static unsigned char read_reg(SLMP_INFO * info, unsigned char Addr)
+{
+ CALC_REGADDR();
+ return *RegAddr;
+}
+static void write_reg(SLMP_INFO * info, unsigned char Addr, unsigned char Value)
+{
+ CALC_REGADDR();
+ *RegAddr = Value;
+}
+
+static u16 read_reg16(SLMP_INFO * info, unsigned char Addr)
+{
+ CALC_REGADDR();
+ return *((u16 *)RegAddr);
+}
+
+static void write_reg16(SLMP_INFO * info, unsigned char Addr, u16 Value)
+{
+ CALC_REGADDR();
+ *((u16 *)RegAddr) = Value;
+}
+
+static unsigned char read_status_reg(SLMP_INFO * info)
+{
+ unsigned char *RegAddr = (unsigned char *)info->statctrl_base;
+ return *RegAddr;
+}
+
+static void write_control_reg(SLMP_INFO * info)
+{
+ unsigned char *RegAddr = (unsigned char *)info->statctrl_base;
+ *RegAddr = info->port_array[0]->ctrlreg_value;
+}
+
+
+static int __devinit synclinkmp_init_one (struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ if (pci_enable_device(dev)) {
+ printk("error enabling pci device %p\n", dev);
+ return -EIO;
+ }
+ device_init( ++synclinkmp_adapter_count, dev );
+ return 0;
+}
+
+static void __devexit synclinkmp_remove_one (struct pci_dev *dev)
+{
+}
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
new file mode 100644
index 00000000..43db715f
--- /dev/null
+++ b/drivers/tty/sysrq.c
@@ -0,0 +1,905 @@
+/*
+ * Linux Magic System Request Key Hacks
+ *
+ * (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ * based on ideas by Pavel Machek <pavel@atrey.karlin.mff.cuni.cz>
+ *
+ * (c) 2000 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
+ * overhauled to use key registration
+ * based upon discusions in irc://irc.openprojects.net/#kernelnewbies
+ *
+ * Copyright (c) 2010 Dmitry Torokhov
+ * Input handler conversion
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/reboot.h>
+#include <linux/sysrq.h>
+#include <linux/kbd_kern.h>
+#include <linux/proc_fs.h>
+#include <linux/nmi.h>
+#include <linux/quotaops.h>
+#include <linux/perf_event.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/suspend.h>
+#include <linux/writeback.h>
+#include <linux/buffer_head.h> /* for fsync_bdev() */
+#include <linux/swap.h>
+#include <linux/spinlock.h>
+#include <linux/vt_kern.h>
+#include <linux/workqueue.h>
+#include <linux/hrtimer.h>
+#include <linux/oom.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+
+#include <asm/ptrace.h>
+#include <asm/irq_regs.h>
+
+/* Whether we react on sysrq keys or just ignore them */
+static int __read_mostly sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
+static bool __read_mostly sysrq_always_enabled;
+
+static bool sysrq_on(void)
+{
+ return sysrq_enabled || sysrq_always_enabled;
+}
+
+/*
+ * A value of 1 means 'all', other nonzero values are an op mask:
+ */
+static bool sysrq_on_mask(int mask)
+{
+ return sysrq_always_enabled ||
+ sysrq_enabled == 1 ||
+ (sysrq_enabled & mask);
+}
+
+static int __init sysrq_always_enabled_setup(char *str)
+{
+ sysrq_always_enabled = true;
+ pr_info("sysrq always enabled.\n");
+
+ return 1;
+}
+
+__setup("sysrq_always_enabled", sysrq_always_enabled_setup);
+
+
+static void sysrq_handle_loglevel(int key)
+{
+ int i;
+
+ i = key - '0';
+ console_loglevel = 7;
+ printk("Loglevel set to %d\n", i);
+ console_loglevel = i;
+}
+static struct sysrq_key_op sysrq_loglevel_op = {
+ .handler = sysrq_handle_loglevel,
+ .help_msg = "loglevel(0-9)",
+ .action_msg = "Changing Loglevel",
+ .enable_mask = SYSRQ_ENABLE_LOG,
+};
+
+#ifdef CONFIG_VT
+static void sysrq_handle_SAK(int key)
+{
+ struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
+ schedule_work(SAK_work);
+}
+static struct sysrq_key_op sysrq_SAK_op = {
+ .handler = sysrq_handle_SAK,
+ .help_msg = "saK",
+ .action_msg = "SAK",
+ .enable_mask = SYSRQ_ENABLE_KEYBOARD,
+};
+#else
+#define sysrq_SAK_op (*(struct sysrq_key_op *)NULL)
+#endif
+
+#ifdef CONFIG_VT
+static void sysrq_handle_unraw(int key)
+{
+ struct kbd_struct *kbd = &kbd_table[fg_console];
+
+ if (kbd)
+ kbd->kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE;
+}
+static struct sysrq_key_op sysrq_unraw_op = {
+ .handler = sysrq_handle_unraw,
+ .help_msg = "unRaw",
+ .action_msg = "Keyboard mode set to system default",
+ .enable_mask = SYSRQ_ENABLE_KEYBOARD,
+};
+#else
+#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
+#endif /* CONFIG_VT */
+
+static void sysrq_handle_crash(int key)
+{
+ char *killer = NULL;
+
+ panic_on_oops = 1; /* force panic */
+ wmb();
+ *killer = 1;
+}
+static struct sysrq_key_op sysrq_crash_op = {
+ .handler = sysrq_handle_crash,
+ .help_msg = "Crash",
+ .action_msg = "Trigger a crash",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+static void sysrq_handle_reboot(int key)
+{
+ lockdep_off();
+ local_irq_enable();
+ emergency_restart();
+}
+static struct sysrq_key_op sysrq_reboot_op = {
+ .handler = sysrq_handle_reboot,
+ .help_msg = "reBoot",
+ .action_msg = "Resetting",
+ .enable_mask = SYSRQ_ENABLE_BOOT,
+};
+
+static void sysrq_handle_sync(int key)
+{
+ emergency_sync();
+}
+static struct sysrq_key_op sysrq_sync_op = {
+ .handler = sysrq_handle_sync,
+ .help_msg = "Sync",
+ .action_msg = "Emergency Sync",
+ .enable_mask = SYSRQ_ENABLE_SYNC,
+};
+
+static void sysrq_handle_show_timers(int key)
+{
+ sysrq_timer_list_show();
+}
+
+static struct sysrq_key_op sysrq_show_timers_op = {
+ .handler = sysrq_handle_show_timers,
+ .help_msg = "show-all-timers(Q)",
+ .action_msg = "Show clockevent devices & pending hrtimers (no others)",
+};
+
+static void sysrq_handle_mountro(int key)
+{
+ emergency_remount();
+}
+static struct sysrq_key_op sysrq_mountro_op = {
+ .handler = sysrq_handle_mountro,
+ .help_msg = "Unmount",
+ .action_msg = "Emergency Remount R/O",
+ .enable_mask = SYSRQ_ENABLE_REMOUNT,
+};
+
+#ifdef CONFIG_LOCKDEP
+static void sysrq_handle_showlocks(int key)
+{
+ debug_show_all_locks();
+}
+
+static struct sysrq_key_op sysrq_showlocks_op = {
+ .handler = sysrq_handle_showlocks,
+ .help_msg = "show-all-locks(D)",
+ .action_msg = "Show Locks Held",
+};
+#else
+#define sysrq_showlocks_op (*(struct sysrq_key_op *)NULL)
+#endif
+
+#ifdef CONFIG_SMP
+static DEFINE_SPINLOCK(show_lock);
+
+static void showacpu(void *dummy)
+{
+ unsigned long flags;
+
+ /* Idle CPUs have no interesting backtrace. */
+ if (idle_cpu(smp_processor_id()))
+ return;
+
+ spin_lock_irqsave(&show_lock, flags);
+ printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+ show_stack(NULL, NULL);
+ spin_unlock_irqrestore(&show_lock, flags);
+}
+
+static void sysrq_showregs_othercpus(struct work_struct *dummy)
+{
+ smp_call_function(showacpu, NULL, 0);
+}
+
+static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
+
+static void sysrq_handle_showallcpus(int key)
+{
+ /*
+ * Fall back to the workqueue based printing if the
+ * backtrace printing did not succeed or the
+ * architecture has no support for it:
+ */
+ if (!trigger_all_cpu_backtrace()) {
+ struct pt_regs *regs = get_irq_regs();
+
+ if (regs) {
+ printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+ show_regs(regs);
+ }
+ schedule_work(&sysrq_showallcpus);
+ }
+}
+
+static struct sysrq_key_op sysrq_showallcpus_op = {
+ .handler = sysrq_handle_showallcpus,
+ .help_msg = "show-backtrace-all-active-cpus(L)",
+ .action_msg = "Show backtrace of all active CPUs",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+#endif
+
+static void sysrq_handle_showregs(int key)
+{
+ struct pt_regs *regs = get_irq_regs();
+ if (regs)
+ show_regs(regs);
+ perf_event_print_debug();
+}
+static struct sysrq_key_op sysrq_showregs_op = {
+ .handler = sysrq_handle_showregs,
+ .help_msg = "show-registers(P)",
+ .action_msg = "Show Regs",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+static void sysrq_handle_showstate(int key)
+{
+ show_state();
+}
+static struct sysrq_key_op sysrq_showstate_op = {
+ .handler = sysrq_handle_showstate,
+ .help_msg = "show-task-states(T)",
+ .action_msg = "Show State",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+static void sysrq_handle_showstate_blocked(int key)
+{
+ show_state_filter(TASK_UNINTERRUPTIBLE);
+}
+static struct sysrq_key_op sysrq_showstate_blocked_op = {
+ .handler = sysrq_handle_showstate_blocked,
+ .help_msg = "show-blocked-tasks(W)",
+ .action_msg = "Show Blocked State",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+#ifdef CONFIG_TRACING
+#include <linux/ftrace.h>
+
+static void sysrq_ftrace_dump(int key)
+{
+ ftrace_dump(DUMP_ALL);
+}
+static struct sysrq_key_op sysrq_ftrace_dump_op = {
+ .handler = sysrq_ftrace_dump,
+ .help_msg = "dump-ftrace-buffer(Z)",
+ .action_msg = "Dump ftrace buffer",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+#else
+#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
+#endif
+
+static void sysrq_handle_showmem(int key)
+{
+ show_mem(0);
+}
+static struct sysrq_key_op sysrq_showmem_op = {
+ .handler = sysrq_handle_showmem,
+ .help_msg = "show-memory-usage(M)",
+ .action_msg = "Show Memory",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+/*
+ * Signal sysrq helper function. Sends a signal to all user processes.
+ */
+static void send_sig_all(int sig)
+{
+ struct task_struct *p;
+
+ for_each_process(p) {
+ if (p->mm && !is_global_init(p))
+ /* Not swapper, init nor kernel thread */
+ force_sig(sig, p);
+ }
+}
+
+static void sysrq_handle_term(int key)
+{
+ send_sig_all(SIGTERM);
+ console_loglevel = 8;
+}
+static struct sysrq_key_op sysrq_term_op = {
+ .handler = sysrq_handle_term,
+ .help_msg = "terminate-all-tasks(E)",
+ .action_msg = "Terminate All Tasks",
+ .enable_mask = SYSRQ_ENABLE_SIGNAL,
+};
+
+static void moom_callback(struct work_struct *ignored)
+{
+ out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0, NULL);
+}
+
+static DECLARE_WORK(moom_work, moom_callback);
+
+static void sysrq_handle_moom(int key)
+{
+ schedule_work(&moom_work);
+}
+static struct sysrq_key_op sysrq_moom_op = {
+ .handler = sysrq_handle_moom,
+ .help_msg = "memory-full-oom-kill(F)",
+ .action_msg = "Manual OOM execution",
+ .enable_mask = SYSRQ_ENABLE_SIGNAL,
+};
+
+#ifdef CONFIG_BLOCK
+static void sysrq_handle_thaw(int key)
+{
+ emergency_thaw_all();
+}
+static struct sysrq_key_op sysrq_thaw_op = {
+ .handler = sysrq_handle_thaw,
+ .help_msg = "thaw-filesystems(J)",
+ .action_msg = "Emergency Thaw of all frozen filesystems",
+ .enable_mask = SYSRQ_ENABLE_SIGNAL,
+};
+#endif
+
+static void sysrq_handle_kill(int key)
+{
+ send_sig_all(SIGKILL);
+ console_loglevel = 8;
+}
+static struct sysrq_key_op sysrq_kill_op = {
+ .handler = sysrq_handle_kill,
+ .help_msg = "kill-all-tasks(I)",
+ .action_msg = "Kill All Tasks",
+ .enable_mask = SYSRQ_ENABLE_SIGNAL,
+};
+
+static void sysrq_handle_unrt(int key)
+{
+ normalize_rt_tasks();
+}
+static struct sysrq_key_op sysrq_unrt_op = {
+ .handler = sysrq_handle_unrt,
+ .help_msg = "nice-all-RT-tasks(N)",
+ .action_msg = "Nice All RT Tasks",
+ .enable_mask = SYSRQ_ENABLE_RTNICE,
+};
+
+/* Key Operations table and lock */
+static DEFINE_SPINLOCK(sysrq_key_table_lock);
+
+static struct sysrq_key_op *sysrq_key_table[36] = {
+ &sysrq_loglevel_op, /* 0 */
+ &sysrq_loglevel_op, /* 1 */
+ &sysrq_loglevel_op, /* 2 */
+ &sysrq_loglevel_op, /* 3 */
+ &sysrq_loglevel_op, /* 4 */
+ &sysrq_loglevel_op, /* 5 */
+ &sysrq_loglevel_op, /* 6 */
+ &sysrq_loglevel_op, /* 7 */
+ &sysrq_loglevel_op, /* 8 */
+ &sysrq_loglevel_op, /* 9 */
+
+ /*
+ * a: Don't use for system provided sysrqs, it is handled specially on
+ * sparc and will never arrive.
+ */
+ NULL, /* a */
+ &sysrq_reboot_op, /* b */
+ &sysrq_crash_op, /* c & ibm_emac driver debug */
+ &sysrq_showlocks_op, /* d */
+ &sysrq_term_op, /* e */
+ &sysrq_moom_op, /* f */
+ /* g: May be registered for the kernel debugger */
+ NULL, /* g */
+ NULL, /* h - reserved for help */
+ &sysrq_kill_op, /* i */
+#ifdef CONFIG_BLOCK
+ &sysrq_thaw_op, /* j */
+#else
+ NULL, /* j */
+#endif
+ &sysrq_SAK_op, /* k */
+#ifdef CONFIG_SMP
+ &sysrq_showallcpus_op, /* l */
+#else
+ NULL, /* l */
+#endif
+ &sysrq_showmem_op, /* m */
+ &sysrq_unrt_op, /* n */
+ /* o: This will often be registered as 'Off' at init time */
+ NULL, /* o */
+ &sysrq_showregs_op, /* p */
+ &sysrq_show_timers_op, /* q */
+ &sysrq_unraw_op, /* r */
+ &sysrq_sync_op, /* s */
+ &sysrq_showstate_op, /* t */
+ &sysrq_mountro_op, /* u */
+ /* v: May be registered for frame buffer console restore */
+ NULL, /* v */
+ &sysrq_showstate_blocked_op, /* w */
+ /* x: May be registered on ppc/powerpc for xmon */
+ NULL, /* x */
+ /* y: May be registered on sparc64 for global register dump */
+ NULL, /* y */
+ &sysrq_ftrace_dump_op, /* z */
+};
+
+/* key2index calculation, -1 on invalid index */
+static int sysrq_key_table_key2index(int key)
+{
+ int retval;
+
+ if ((key >= '0') && (key <= '9'))
+ retval = key - '0';
+ else if ((key >= 'a') && (key <= 'z'))
+ retval = key + 10 - 'a';
+ else
+ retval = -1;
+ return retval;
+}
+
+/*
+ * get and put functions for the table, exposed to modules.
+ */
+struct sysrq_key_op *__sysrq_get_key_op(int key)
+{
+ struct sysrq_key_op *op_p = NULL;
+ int i;
+
+ i = sysrq_key_table_key2index(key);
+ if (i != -1)
+ op_p = sysrq_key_table[i];
+
+ return op_p;
+}
+
+static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
+{
+ int i = sysrq_key_table_key2index(key);
+
+ if (i != -1)
+ sysrq_key_table[i] = op_p;
+}
+
+void __handle_sysrq(int key, bool check_mask)
+{
+ struct sysrq_key_op *op_p;
+ int orig_log_level;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ /*
+ * Raise the apparent loglevel to maximum so that the sysrq header
+ * is shown to provide the user with positive feedback. We do not
+ * simply emit this at KERN_EMERG as that would change message
+ * routing in the consumers of /proc/kmsg.
+ */
+ orig_log_level = console_loglevel;
+ console_loglevel = 7;
+ printk(KERN_INFO "SysRq : ");
+
+ op_p = __sysrq_get_key_op(key);
+ if (op_p) {
+ /*
+ * Should we check for enabled operations (/proc/sysrq-trigger
+ * should not) and is the invoked operation enabled?
+ */
+ if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
+ printk("%s\n", op_p->action_msg);
+ console_loglevel = orig_log_level;
+ op_p->handler(key);
+ } else {
+ printk("This sysrq operation is disabled.\n");
+ }
+ } else {
+ printk("HELP : ");
+ /* Only print the help msg once per handler */
+ for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) {
+ if (sysrq_key_table[i]) {
+ int j;
+
+ for (j = 0; sysrq_key_table[i] !=
+ sysrq_key_table[j]; j++)
+ ;
+ if (j != i)
+ continue;
+ printk("%s ", sysrq_key_table[i]->help_msg);
+ }
+ }
+ printk("\n");
+ console_loglevel = orig_log_level;
+ }
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+}
+
+void handle_sysrq(int key)
+{
+ if (sysrq_on())
+ __handle_sysrq(key, true);
+}
+EXPORT_SYMBOL(handle_sysrq);
+
+#ifdef CONFIG_INPUT
+
+/* Simple translation table for the SysRq keys */
+static const unsigned char sysrq_xlate[KEY_CNT] =
+ "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */
+ "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */
+ "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */
+ "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */
+ "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */
+ "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */
+ "\r\000/"; /* 0x60 - 0x6f */
+
+struct sysrq_state {
+ struct input_handle handle;
+ struct work_struct reinject_work;
+ unsigned long key_down[BITS_TO_LONGS(KEY_CNT)];
+ unsigned int alt;
+ unsigned int alt_use;
+ bool active;
+ bool need_reinject;
+ bool reinjecting;
+};
+
+static void sysrq_reinject_alt_sysrq(struct work_struct *work)
+{
+ struct sysrq_state *sysrq =
+ container_of(work, struct sysrq_state, reinject_work);
+ struct input_handle *handle = &sysrq->handle;
+ unsigned int alt_code = sysrq->alt_use;
+
+ if (sysrq->need_reinject) {
+ /* we do not want the assignment to be reordered */
+ sysrq->reinjecting = true;
+ mb();
+
+ /* Simulate press and release of Alt + SysRq */
+ input_inject_event(handle, EV_KEY, alt_code, 1);
+ input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1);
+ input_inject_event(handle, EV_SYN, SYN_REPORT, 1);
+
+ input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0);
+ input_inject_event(handle, EV_KEY, alt_code, 0);
+ input_inject_event(handle, EV_SYN, SYN_REPORT, 1);
+
+ mb();
+ sysrq->reinjecting = false;
+ }
+}
+
+static bool sysrq_filter(struct input_handle *handle,
+ unsigned int type, unsigned int code, int value)
+{
+ struct sysrq_state *sysrq = handle->private;
+ bool was_active = sysrq->active;
+ bool suppress;
+
+ /*
+ * Do not filter anything if we are in the process of re-injecting
+ * Alt+SysRq combination.
+ */
+ if (sysrq->reinjecting)
+ return false;
+
+ switch (type) {
+
+ case EV_SYN:
+ suppress = false;
+ break;
+
+ case EV_KEY:
+ switch (code) {
+
+ case KEY_LEFTALT:
+ case KEY_RIGHTALT:
+ if (!value) {
+ /* One of ALTs is being released */
+ if (sysrq->active && code == sysrq->alt_use)
+ sysrq->active = false;
+
+ sysrq->alt = KEY_RESERVED;
+
+ } else if (value != 2) {
+ sysrq->alt = code;
+ sysrq->need_reinject = false;
+ }
+ break;
+
+ case KEY_SYSRQ:
+ if (value == 1 && sysrq->alt != KEY_RESERVED) {
+ sysrq->active = true;
+ sysrq->alt_use = sysrq->alt;
+ /*
+ * If nothing else will be pressed we'll need
+ * to re-inject Alt-SysRq keysroke.
+ */
+ sysrq->need_reinject = true;
+ }
+
+ /*
+ * Pretend that sysrq was never pressed at all. This
+ * is needed to properly handle KGDB which will try
+ * to release all keys after exiting debugger. If we
+ * do not clear key bit it KGDB will end up sending
+ * release events for Alt and SysRq, potentially
+ * triggering print screen function.
+ */
+ if (sysrq->active)
+ clear_bit(KEY_SYSRQ, handle->dev->key);
+
+ break;
+
+ default:
+ if (sysrq->active && value && value != 2) {
+ sysrq->need_reinject = false;
+ __handle_sysrq(sysrq_xlate[code], true);
+ }
+ break;
+ }
+
+ suppress = sysrq->active;
+
+ if (!sysrq->active) {
+ /*
+ * If we are not suppressing key presses keep track of
+ * keyboard state so we can release keys that have been
+ * pressed before entering SysRq mode.
+ */
+ if (value)
+ set_bit(code, sysrq->key_down);
+ else
+ clear_bit(code, sysrq->key_down);
+
+ if (was_active)
+ schedule_work(&sysrq->reinject_work);
+
+ } else if (value == 0 &&
+ test_and_clear_bit(code, sysrq->key_down)) {
+ /*
+ * Pass on release events for keys that was pressed before
+ * entering SysRq mode.
+ */
+ suppress = false;
+ }
+ break;
+
+ default:
+ suppress = sysrq->active;
+ break;
+ }
+
+ return suppress;
+}
+
+static int sysrq_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ struct sysrq_state *sysrq;
+ int error;
+
+ sysrq = kzalloc(sizeof(struct sysrq_state), GFP_KERNEL);
+ if (!sysrq)
+ return -ENOMEM;
+
+ INIT_WORK(&sysrq->reinject_work, sysrq_reinject_alt_sysrq);
+
+ sysrq->handle.dev = dev;
+ sysrq->handle.handler = handler;
+ sysrq->handle.name = "sysrq";
+ sysrq->handle.private = sysrq;
+
+ error = input_register_handle(&sysrq->handle);
+ if (error) {
+ pr_err("Failed to register input sysrq handler, error %d\n",
+ error);
+ goto err_free;
+ }
+
+ error = input_open_device(&sysrq->handle);
+ if (error) {
+ pr_err("Failed to open input device, error %d\n", error);
+ goto err_unregister;
+ }
+
+ return 0;
+
+ err_unregister:
+ input_unregister_handle(&sysrq->handle);
+ err_free:
+ kfree(sysrq);
+ return error;
+}
+
+static void sysrq_disconnect(struct input_handle *handle)
+{
+ struct sysrq_state *sysrq = handle->private;
+
+ input_close_device(handle);
+ cancel_work_sync(&sysrq->reinject_work);
+ input_unregister_handle(handle);
+ kfree(sysrq);
+}
+
+/*
+ * We are matching on KEY_LEFTALT instead of KEY_SYSRQ because not all
+ * keyboards have SysRq key predefined and so user may add it to keymap
+ * later, but we expect all such keyboards to have left alt.
+ */
+static const struct input_device_id sysrq_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { BIT_MASK(KEY_LEFTALT) },
+ },
+ { },
+};
+
+static struct input_handler sysrq_handler = {
+ .filter = sysrq_filter,
+ .connect = sysrq_connect,
+ .disconnect = sysrq_disconnect,
+ .name = "sysrq",
+ .id_table = sysrq_ids,
+};
+
+static bool sysrq_handler_registered;
+
+static inline void sysrq_register_handler(void)
+{
+ int error;
+
+ error = input_register_handler(&sysrq_handler);
+ if (error)
+ pr_err("Failed to register input handler, error %d", error);
+ else
+ sysrq_handler_registered = true;
+}
+
+static inline void sysrq_unregister_handler(void)
+{
+ if (sysrq_handler_registered) {
+ input_unregister_handler(&sysrq_handler);
+ sysrq_handler_registered = false;
+ }
+}
+
+#else
+
+static inline void sysrq_register_handler(void)
+{
+}
+
+static inline void sysrq_unregister_handler(void)
+{
+}
+
+#endif /* CONFIG_INPUT */
+
+int sysrq_toggle_support(int enable_mask)
+{
+ bool was_enabled = sysrq_on();
+
+ sysrq_enabled = enable_mask;
+
+ if (was_enabled != sysrq_on()) {
+ if (sysrq_on())
+ sysrq_register_handler();
+ else
+ sysrq_unregister_handler();
+ }
+
+ return 0;
+}
+
+static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
+ struct sysrq_key_op *remove_op_p)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ if (__sysrq_get_key_op(key) == remove_op_p) {
+ __sysrq_put_key_op(key, insert_op_p);
+ retval = 0;
+ } else {
+ retval = -1;
+ }
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+ return retval;
+}
+
+int register_sysrq_key(int key, struct sysrq_key_op *op_p)
+{
+ return __sysrq_swap_key_ops(key, op_p, NULL);
+}
+EXPORT_SYMBOL(register_sysrq_key);
+
+int unregister_sysrq_key(int key, struct sysrq_key_op *op_p)
+{
+ return __sysrq_swap_key_ops(key, NULL, op_p);
+}
+EXPORT_SYMBOL(unregister_sysrq_key);
+
+#ifdef CONFIG_PROC_FS
+/*
+ * writing 'C' to /proc/sysrq-trigger is like sysrq-C
+ */
+static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ if (count) {
+ char c;
+
+ if (get_user(c, buf))
+ return -EFAULT;
+ __handle_sysrq(c, false);
+ }
+
+ return count;
+}
+
+static const struct file_operations proc_sysrq_trigger_operations = {
+ .write = write_sysrq_trigger,
+ .llseek = noop_llseek,
+};
+
+static void sysrq_init_procfs(void)
+{
+ if (!proc_create("sysrq-trigger", S_IWUSR, NULL,
+ &proc_sysrq_trigger_operations))
+ pr_err("Failed to register proc interface\n");
+}
+
+#else
+
+static inline void sysrq_init_procfs(void)
+{
+}
+
+#endif /* CONFIG_PROC_FS */
+
+static int __init sysrq_init(void)
+{
+ sysrq_init_procfs();
+
+ if (sysrq_on())
+ sysrq_register_handler();
+
+ return 0;
+}
+module_init(sysrq_init);
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
new file mode 100644
index 00000000..7c586692
--- /dev/null
+++ b/drivers/tty/tty_audit.c
@@ -0,0 +1,360 @@
+/*
+ * Creating audit events from TTY input.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All rights reserved. This copyrighted
+ * material is made available to anyone wishing to use, modify, copy, or
+ * redistribute it subject to the terms and conditions of the GNU General
+ * Public License v.2.
+ *
+ * Authors: Miloslav Trmac <mitr@redhat.com>
+ */
+
+#include <linux/audit.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+
+struct tty_audit_buf {
+ atomic_t count;
+ struct mutex mutex; /* Protects all data below */
+ int major, minor; /* The TTY which the data is from */
+ unsigned icanon:1;
+ size_t valid;
+ unsigned char *data; /* Allocated size N_TTY_BUF_SIZE */
+};
+
+static struct tty_audit_buf *tty_audit_buf_alloc(int major, int minor,
+ int icanon)
+{
+ struct tty_audit_buf *buf;
+
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ goto err;
+ buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
+ if (!buf->data)
+ goto err_buf;
+ atomic_set(&buf->count, 1);
+ mutex_init(&buf->mutex);
+ buf->major = major;
+ buf->minor = minor;
+ buf->icanon = icanon;
+ buf->valid = 0;
+ return buf;
+
+err_buf:
+ kfree(buf);
+err:
+ return NULL;
+}
+
+static void tty_audit_buf_free(struct tty_audit_buf *buf)
+{
+ WARN_ON(buf->valid != 0);
+ kfree(buf->data);
+ kfree(buf);
+}
+
+static void tty_audit_buf_put(struct tty_audit_buf *buf)
+{
+ if (atomic_dec_and_test(&buf->count))
+ tty_audit_buf_free(buf);
+}
+
+static void tty_audit_log(const char *description, struct task_struct *tsk,
+ uid_t loginuid, unsigned sessionid, int major,
+ int minor, unsigned char *data, size_t size)
+{
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY);
+ if (ab) {
+ char name[sizeof(tsk->comm)];
+ uid_t uid = task_uid(tsk);
+
+ audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u "
+ "major=%d minor=%d comm=", description,
+ tsk->pid, uid, loginuid, sessionid,
+ major, minor);
+ get_task_comm(name, tsk);
+ audit_log_untrustedstring(ab, name);
+ audit_log_format(ab, " data=");
+ audit_log_n_hex(ab, data, size);
+ audit_log_end(ab);
+ }
+}
+
+/**
+ * tty_audit_buf_push - Push buffered data out
+ *
+ * Generate an audit message from the contents of @buf, which is owned by
+ * @tsk with @loginuid. @buf->mutex must be locked.
+ */
+static void tty_audit_buf_push(struct task_struct *tsk, uid_t loginuid,
+ unsigned int sessionid,
+ struct tty_audit_buf *buf)
+{
+ if (buf->valid == 0)
+ return;
+ if (audit_enabled == 0) {
+ buf->valid = 0;
+ return;
+ }
+ tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor,
+ buf->data, buf->valid);
+ buf->valid = 0;
+}
+
+/**
+ * tty_audit_buf_push_current - Push buffered data out
+ *
+ * Generate an audit message from the contents of @buf, which is owned by
+ * the current task. @buf->mutex must be locked.
+ */
+static void tty_audit_buf_push_current(struct tty_audit_buf *buf)
+{
+ uid_t auid = audit_get_loginuid(current);
+ unsigned int sessionid = audit_get_sessionid(current);
+ tty_audit_buf_push(current, auid, sessionid, buf);
+}
+
+/**
+ * tty_audit_exit - Handle a task exit
+ *
+ * Make sure all buffered data is written out and deallocate the buffer.
+ * Only needs to be called if current->signal->tty_audit_buf != %NULL.
+ */
+void tty_audit_exit(void)
+{
+ struct tty_audit_buf *buf;
+
+ spin_lock_irq(&current->sighand->siglock);
+ buf = current->signal->tty_audit_buf;
+ current->signal->tty_audit_buf = NULL;
+ spin_unlock_irq(&current->sighand->siglock);
+ if (!buf)
+ return;
+
+ mutex_lock(&buf->mutex);
+ tty_audit_buf_push_current(buf);
+ mutex_unlock(&buf->mutex);
+
+ tty_audit_buf_put(buf);
+}
+
+/**
+ * tty_audit_fork - Copy TTY audit state for a new task
+ *
+ * Set up TTY audit state in @sig from current. @sig needs no locking.
+ */
+void tty_audit_fork(struct signal_struct *sig)
+{
+ spin_lock_irq(&current->sighand->siglock);
+ sig->audit_tty = current->signal->audit_tty;
+ spin_unlock_irq(&current->sighand->siglock);
+}
+
+/**
+ * tty_audit_tiocsti - Log TIOCSTI
+ */
+void tty_audit_tiocsti(struct tty_struct *tty, char ch)
+{
+ struct tty_audit_buf *buf;
+ int major, minor, should_audit;
+
+ spin_lock_irq(&current->sighand->siglock);
+ should_audit = current->signal->audit_tty;
+ buf = current->signal->tty_audit_buf;
+ if (buf)
+ atomic_inc(&buf->count);
+ spin_unlock_irq(&current->sighand->siglock);
+
+ major = tty->driver->major;
+ minor = tty->driver->minor_start + tty->index;
+ if (buf) {
+ mutex_lock(&buf->mutex);
+ if (buf->major == major && buf->minor == minor)
+ tty_audit_buf_push_current(buf);
+ mutex_unlock(&buf->mutex);
+ tty_audit_buf_put(buf);
+ }
+
+ if (should_audit && audit_enabled) {
+ uid_t auid;
+ unsigned int sessionid;
+
+ auid = audit_get_loginuid(current);
+ sessionid = audit_get_sessionid(current);
+ tty_audit_log("ioctl=TIOCSTI", current, auid, sessionid, major,
+ minor, &ch, 1);
+ }
+}
+
+/**
+ * tty_audit_push_task - Flush task's pending audit data
+ * @tsk: task pointer
+ * @loginuid: sender login uid
+ * @sessionid: sender session id
+ *
+ * Called with a ref on @tsk held. Try to lock sighand and get a
+ * reference to the tty audit buffer if available.
+ * Flush the buffer or return an appropriate error code.
+ */
+int tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid)
+{
+ struct tty_audit_buf *buf = ERR_PTR(-EPERM);
+ unsigned long flags;
+
+ if (!lock_task_sighand(tsk, &flags))
+ return -ESRCH;
+
+ if (tsk->signal->audit_tty) {
+ buf = tsk->signal->tty_audit_buf;
+ if (buf)
+ atomic_inc(&buf->count);
+ }
+ unlock_task_sighand(tsk, &flags);
+
+ /*
+ * Return 0 when signal->audit_tty set
+ * but tsk->signal->tty_audit_buf == NULL.
+ */
+ if (!buf || IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ mutex_lock(&buf->mutex);
+ tty_audit_buf_push(tsk, loginuid, sessionid, buf);
+ mutex_unlock(&buf->mutex);
+
+ tty_audit_buf_put(buf);
+ return 0;
+}
+
+/**
+ * tty_audit_buf_get - Get an audit buffer.
+ *
+ * Get an audit buffer for @tty, allocate it if necessary. Return %NULL
+ * if TTY auditing is disabled or out of memory. Otherwise, return a new
+ * reference to the buffer.
+ */
+static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty)
+{
+ struct tty_audit_buf *buf, *buf2;
+
+ buf = NULL;
+ buf2 = NULL;
+ spin_lock_irq(&current->sighand->siglock);
+ if (likely(!current->signal->audit_tty))
+ goto out;
+ buf = current->signal->tty_audit_buf;
+ if (buf) {
+ atomic_inc(&buf->count);
+ goto out;
+ }
+ spin_unlock_irq(&current->sighand->siglock);
+
+ buf2 = tty_audit_buf_alloc(tty->driver->major,
+ tty->driver->minor_start + tty->index,
+ tty->icanon);
+ if (buf2 == NULL) {
+ audit_log_lost("out of memory in TTY auditing");
+ return NULL;
+ }
+
+ spin_lock_irq(&current->sighand->siglock);
+ if (!current->signal->audit_tty)
+ goto out;
+ buf = current->signal->tty_audit_buf;
+ if (!buf) {
+ current->signal->tty_audit_buf = buf2;
+ buf = buf2;
+ buf2 = NULL;
+ }
+ atomic_inc(&buf->count);
+ /* Fall through */
+ out:
+ spin_unlock_irq(&current->sighand->siglock);
+ if (buf2)
+ tty_audit_buf_free(buf2);
+ return buf;
+}
+
+/**
+ * tty_audit_add_data - Add data for TTY auditing.
+ *
+ * Audit @data of @size from @tty, if necessary.
+ */
+void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
+ size_t size)
+{
+ struct tty_audit_buf *buf;
+ int major, minor;
+
+ if (unlikely(size == 0))
+ return;
+
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY
+ && tty->driver->subtype == PTY_TYPE_MASTER)
+ return;
+
+ buf = tty_audit_buf_get(tty);
+ if (!buf)
+ return;
+
+ mutex_lock(&buf->mutex);
+ major = tty->driver->major;
+ minor = tty->driver->minor_start + tty->index;
+ if (buf->major != major || buf->minor != minor
+ || buf->icanon != tty->icanon) {
+ tty_audit_buf_push_current(buf);
+ buf->major = major;
+ buf->minor = minor;
+ buf->icanon = tty->icanon;
+ }
+ do {
+ size_t run;
+
+ run = N_TTY_BUF_SIZE - buf->valid;
+ if (run > size)
+ run = size;
+ memcpy(buf->data + buf->valid, data, run);
+ buf->valid += run;
+ data += run;
+ size -= run;
+ if (buf->valid == N_TTY_BUF_SIZE)
+ tty_audit_buf_push_current(buf);
+ } while (size != 0);
+ mutex_unlock(&buf->mutex);
+ tty_audit_buf_put(buf);
+}
+
+/**
+ * tty_audit_push - Push buffered data out
+ *
+ * Make sure no audit data is pending for @tty on the current process.
+ */
+void tty_audit_push(struct tty_struct *tty)
+{
+ struct tty_audit_buf *buf;
+
+ spin_lock_irq(&current->sighand->siglock);
+ if (likely(!current->signal->audit_tty)) {
+ spin_unlock_irq(&current->sighand->siglock);
+ return;
+ }
+ buf = current->signal->tty_audit_buf;
+ if (buf)
+ atomic_inc(&buf->count);
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (buf) {
+ int major, minor;
+
+ major = tty->driver->major;
+ minor = tty->driver->minor_start + tty->index;
+ mutex_lock(&buf->mutex);
+ if (buf->major == major && buf->minor == minor)
+ tty_audit_buf_push_current(buf);
+ mutex_unlock(&buf->mutex);
+ tty_audit_buf_put(buf);
+ }
+}
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
new file mode 100644
index 00000000..6c9b7cd6
--- /dev/null
+++ b/drivers/tty/tty_buffer.c
@@ -0,0 +1,522 @@
+/*
+ * Tty buffer allocation management
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+
+/**
+ * tty_buffer_free_all - free buffers used by a tty
+ * @tty: tty to free from
+ *
+ * Remove all the buffers pending on a tty whether queued with data
+ * or in the free ring. Must be called when the tty is no longer in use
+ *
+ * Locking: none
+ */
+
+void tty_buffer_free_all(struct tty_struct *tty)
+{
+ struct tty_buffer *thead;
+ while ((thead = tty->buf.head) != NULL) {
+ tty->buf.head = thead->next;
+ kfree(thead);
+ }
+ while ((thead = tty->buf.free) != NULL) {
+ tty->buf.free = thead->next;
+ kfree(thead);
+ }
+ tty->buf.tail = NULL;
+ tty->buf.memory_used = 0;
+}
+
+/**
+ * tty_buffer_alloc - allocate a tty buffer
+ * @tty: tty device
+ * @size: desired size (characters)
+ *
+ * Allocate a new tty buffer to hold the desired number of characters.
+ * Return NULL if out of memory or the allocation would exceed the
+ * per device queue
+ *
+ * Locking: Caller must hold tty->buf.lock
+ */
+
+static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size)
+{
+ struct tty_buffer *p;
+
+ if (tty->buf.memory_used + size > 65536)
+ return NULL;
+ p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
+ if (p == NULL)
+ return NULL;
+ p->used = 0;
+ p->size = size;
+ p->next = NULL;
+ p->commit = 0;
+ p->read = 0;
+ p->char_buf_ptr = (char *)(p->data);
+ p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
+ tty->buf.memory_used += size;
+ return p;
+}
+
+/**
+ * tty_buffer_free - free a tty buffer
+ * @tty: tty owning the buffer
+ * @b: the buffer to free
+ *
+ * Free a tty buffer, or add it to the free list according to our
+ * internal strategy
+ *
+ * Locking: Caller must hold tty->buf.lock
+ */
+
+static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
+{
+ /* Dumb strategy for now - should keep some stats */
+ tty->buf.memory_used -= b->size;
+ WARN_ON(tty->buf.memory_used < 0);
+
+ if (b->size >= 512)
+ kfree(b);
+ else {
+ b->next = tty->buf.free;
+ tty->buf.free = b;
+ }
+}
+
+/**
+ * __tty_buffer_flush - flush full tty buffers
+ * @tty: tty to flush
+ *
+ * flush all the buffers containing receive data. Caller must
+ * hold the buffer lock and must have ensured no parallel flush to
+ * ldisc is running.
+ *
+ * Locking: Caller must hold tty->buf.lock
+ */
+
+static void __tty_buffer_flush(struct tty_struct *tty)
+{
+ struct tty_buffer *thead;
+
+ while ((thead = tty->buf.head) != NULL) {
+ tty->buf.head = thead->next;
+ tty_buffer_free(tty, thead);
+ }
+ tty->buf.tail = NULL;
+}
+
+/**
+ * tty_buffer_flush - flush full tty buffers
+ * @tty: tty to flush
+ *
+ * flush all the buffers containing receive data. If the buffer is
+ * being processed by flush_to_ldisc then we defer the processing
+ * to that function
+ *
+ * Locking: none
+ */
+
+void tty_buffer_flush(struct tty_struct *tty)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&tty->buf.lock, flags);
+
+ /* If the data is being pushed to the tty layer then we can't
+ process it here. Instead set a flag and the flush_to_ldisc
+ path will process the flush request before it exits */
+ if (test_bit(TTY_FLUSHING, &tty->flags)) {
+ set_bit(TTY_FLUSHPENDING, &tty->flags);
+ spin_unlock_irqrestore(&tty->buf.lock, flags);
+ wait_event(tty->read_wait,
+ test_bit(TTY_FLUSHPENDING, &tty->flags) == 0);
+ return;
+ } else
+ __tty_buffer_flush(tty);
+ spin_unlock_irqrestore(&tty->buf.lock, flags);
+}
+
+/**
+ * tty_buffer_find - find a free tty buffer
+ * @tty: tty owning the buffer
+ * @size: characters wanted
+ *
+ * Locate an existing suitable tty buffer or if we are lacking one then
+ * allocate a new one. We round our buffers off in 256 character chunks
+ * to get better allocation behaviour.
+ *
+ * Locking: Caller must hold tty->buf.lock
+ */
+
+static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
+{
+ struct tty_buffer **tbh = &tty->buf.free;
+ while ((*tbh) != NULL) {
+ struct tty_buffer *t = *tbh;
+ if (t->size >= size) {
+ *tbh = t->next;
+ t->next = NULL;
+ t->used = 0;
+ t->commit = 0;
+ t->read = 0;
+ tty->buf.memory_used += t->size;
+ return t;
+ }
+ tbh = &((*tbh)->next);
+ }
+ /* Round the buffer size out */
+ size = (size + 0xFF) & ~0xFF;
+ return tty_buffer_alloc(tty, size);
+ /* Should possibly check if this fails for the largest buffer we
+ have queued and recycle that ? */
+}
+
+/**
+ * tty_buffer_request_room - grow tty buffer if needed
+ * @tty: tty structure
+ * @size: size desired
+ *
+ * Make at least size bytes of linear space available for the tty
+ * buffer. If we fail return the size we managed to find.
+ *
+ * Locking: Takes tty->buf.lock
+ */
+int tty_buffer_request_room(struct tty_struct *tty, size_t size)
+{
+ struct tty_buffer *b, *n;
+ int left;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tty->buf.lock, flags);
+
+ /* OPTIMISATION: We could keep a per tty "zero" sized buffer to
+ remove this conditional if its worth it. This would be invisible
+ to the callers */
+ if ((b = tty->buf.tail) != NULL)
+ left = b->size - b->used;
+ else
+ left = 0;
+
+ if (left < size) {
+ /* This is the slow path - looking for new buffers to use */
+ if ((n = tty_buffer_find(tty, size)) != NULL) {
+ if (b != NULL) {
+ b->next = n;
+ b->commit = b->used;
+ } else
+ tty->buf.head = n;
+ tty->buf.tail = n;
+ } else
+ size = left;
+ }
+
+ spin_unlock_irqrestore(&tty->buf.lock, flags);
+ return size;
+}
+EXPORT_SYMBOL_GPL(tty_buffer_request_room);
+
+/**
+ * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
+ * @tty: tty structure
+ * @chars: characters
+ * @flag: flag value for each character
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. All the characters
+ * passed are marked with the supplied flag. Returns the number added.
+ *
+ * Locking: Called functions may take tty->buf.lock
+ */
+
+int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
+ const unsigned char *chars, char flag, size_t size)
+{
+ int copied = 0;
+ do {
+ int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
+ int space = tty_buffer_request_room(tty, goal);
+ struct tty_buffer *tb = tty->buf.tail;
+ /* If there is no space then tb may be NULL */
+ if (unlikely(space == 0))
+ break;
+ memcpy(tb->char_buf_ptr + tb->used, chars, space);
+ memset(tb->flag_buf_ptr + tb->used, flag, space);
+ tb->used += space;
+ copied += space;
+ chars += space;
+ /* There is a small chance that we need to split the data over
+ several buffers. If this is the case we must loop */
+ } while (unlikely(size > copied));
+ return copied;
+}
+EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
+
+/**
+ * tty_insert_flip_string_flags - Add characters to the tty buffer
+ * @tty: tty structure
+ * @chars: characters
+ * @flags: flag bytes
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. For each character
+ * the flags array indicates the status of the character. Returns the
+ * number added.
+ *
+ * Locking: Called functions may take tty->buf.lock
+ */
+
+int tty_insert_flip_string_flags(struct tty_struct *tty,
+ const unsigned char *chars, const char *flags, size_t size)
+{
+ int copied = 0;
+ do {
+ int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
+ int space = tty_buffer_request_room(tty, goal);
+ struct tty_buffer *tb = tty->buf.tail;
+ /* If there is no space then tb may be NULL */
+ if (unlikely(space == 0))
+ break;
+ memcpy(tb->char_buf_ptr + tb->used, chars, space);
+ memcpy(tb->flag_buf_ptr + tb->used, flags, space);
+ tb->used += space;
+ copied += space;
+ chars += space;
+ flags += space;
+ /* There is a small chance that we need to split the data over
+ several buffers. If this is the case we must loop */
+ } while (unlikely(size > copied));
+ return copied;
+}
+EXPORT_SYMBOL(tty_insert_flip_string_flags);
+
+/**
+ * tty_schedule_flip - push characters to ldisc
+ * @tty: tty to push from
+ *
+ * Takes any pending buffers and transfers their ownership to the
+ * ldisc side of the queue. It then schedules those characters for
+ * processing by the line discipline.
+ *
+ * Locking: Takes tty->buf.lock
+ */
+
+void tty_schedule_flip(struct tty_struct *tty)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&tty->buf.lock, flags);
+ if (tty->buf.tail != NULL)
+ tty->buf.tail->commit = tty->buf.tail->used;
+ spin_unlock_irqrestore(&tty->buf.lock, flags);
+ schedule_work(&tty->buf.work);
+}
+EXPORT_SYMBOL(tty_schedule_flip);
+
+/**
+ * tty_prepare_flip_string - make room for characters
+ * @tty: tty
+ * @chars: return pointer for character write area
+ * @size: desired size
+ *
+ * Prepare a block of space in the buffer for data. Returns the length
+ * available and buffer pointer to the space which is now allocated and
+ * accounted for as ready for normal characters. This is used for drivers
+ * that need their own block copy routines into the buffer. There is no
+ * guarantee the buffer is a DMA target!
+ *
+ * Locking: May call functions taking tty->buf.lock
+ */
+
+int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars,
+ size_t size)
+{
+ int space = tty_buffer_request_room(tty, size);
+ if (likely(space)) {
+ struct tty_buffer *tb = tty->buf.tail;
+ *chars = tb->char_buf_ptr + tb->used;
+ memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
+ tb->used += space;
+ }
+ return space;
+}
+EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
+
+/**
+ * tty_prepare_flip_string_flags - make room for characters
+ * @tty: tty
+ * @chars: return pointer for character write area
+ * @flags: return pointer for status flag write area
+ * @size: desired size
+ *
+ * Prepare a block of space in the buffer for data. Returns the length
+ * available and buffer pointer to the space which is now allocated and
+ * accounted for as ready for characters. This is used for drivers
+ * that need their own block copy routines into the buffer. There is no
+ * guarantee the buffer is a DMA target!
+ *
+ * Locking: May call functions taking tty->buf.lock
+ */
+
+int tty_prepare_flip_string_flags(struct tty_struct *tty,
+ unsigned char **chars, char **flags, size_t size)
+{
+ int space = tty_buffer_request_room(tty, size);
+ if (likely(space)) {
+ struct tty_buffer *tb = tty->buf.tail;
+ *chars = tb->char_buf_ptr + tb->used;
+ *flags = tb->flag_buf_ptr + tb->used;
+ tb->used += space;
+ }
+ return space;
+}
+EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
+
+
+
+/**
+ * flush_to_ldisc
+ * @work: tty structure passed from work queue.
+ *
+ * This routine is called out of the software interrupt to flush data
+ * from the buffer chain to the line discipline.
+ *
+ * Locking: holds tty->buf.lock to guard buffer list. Drops the lock
+ * while invoking the line discipline receive_buf method. The
+ * receive_buf method is single threaded for each tty instance.
+ */
+
+static void flush_to_ldisc(struct work_struct *work)
+{
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, buf.work);
+ unsigned long flags;
+ struct tty_ldisc *disc;
+
+ disc = tty_ldisc_ref(tty);
+ if (disc == NULL) /* !TTY_LDISC */
+ return;
+
+ spin_lock_irqsave(&tty->buf.lock, flags);
+
+ if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
+ struct tty_buffer *head;
+ while ((head = tty->buf.head) != NULL) {
+ int count;
+ char *char_buf;
+ unsigned char *flag_buf;
+
+ count = head->commit - head->read;
+ if (!count) {
+ if (head->next == NULL)
+ break;
+ tty->buf.head = head->next;
+ tty_buffer_free(tty, head);
+ continue;
+ }
+ /* Ldisc or user is trying to flush the buffers
+ we are feeding to the ldisc, stop feeding the
+ line discipline as we want to empty the queue */
+ if (test_bit(TTY_FLUSHPENDING, &tty->flags))
+ break;
+ if (!tty->receive_room)
+ break;
+ if (count > tty->receive_room)
+ count = tty->receive_room;
+ char_buf = head->char_buf_ptr + head->read;
+ flag_buf = head->flag_buf_ptr + head->read;
+ head->read += count;
+ spin_unlock_irqrestore(&tty->buf.lock, flags);
+ disc->ops->receive_buf(tty, char_buf,
+ flag_buf, count);
+ spin_lock_irqsave(&tty->buf.lock, flags);
+ }
+ clear_bit(TTY_FLUSHING, &tty->flags);
+ }
+
+ /* We may have a deferred request to flush the input buffer,
+ if so pull the chain under the lock and empty the queue */
+ if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
+ __tty_buffer_flush(tty);
+ clear_bit(TTY_FLUSHPENDING, &tty->flags);
+ wake_up(&tty->read_wait);
+ }
+ spin_unlock_irqrestore(&tty->buf.lock, flags);
+
+ tty_ldisc_deref(disc);
+}
+
+/**
+ * tty_flush_to_ldisc
+ * @tty: tty to push
+ *
+ * Push the terminal flip buffers to the line discipline.
+ *
+ * Must not be called from IRQ context.
+ */
+void tty_flush_to_ldisc(struct tty_struct *tty)
+{
+ flush_work(&tty->buf.work);
+}
+
+/**
+ * tty_flip_buffer_push - terminal
+ * @tty: tty to push
+ *
+ * Queue a push of the terminal flip buffers to the line discipline. This
+ * function must not be called from IRQ context if tty->low_latency is set.
+ *
+ * In the event of the queue being busy for flipping the work will be
+ * held off and retried later.
+ *
+ * Locking: tty buffer lock. Driver locks in low latency mode.
+ */
+
+void tty_flip_buffer_push(struct tty_struct *tty)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&tty->buf.lock, flags);
+ if (tty->buf.tail != NULL)
+ tty->buf.tail->commit = tty->buf.tail->used;
+ spin_unlock_irqrestore(&tty->buf.lock, flags);
+
+ if (tty->low_latency)
+ flush_to_ldisc(&tty->buf.work);
+ else
+ schedule_work(&tty->buf.work);
+}
+EXPORT_SYMBOL(tty_flip_buffer_push);
+
+/**
+ * tty_buffer_init - prepare a tty buffer structure
+ * @tty: tty to initialise
+ *
+ * Set up the initial state of the buffer management for a tty device.
+ * Must be called before the other tty buffer functions are used.
+ *
+ * Locking: none
+ */
+
+void tty_buffer_init(struct tty_struct *tty)
+{
+ spin_lock_init(&tty->buf.lock);
+ tty->buf.head = NULL;
+ tty->buf.tail = NULL;
+ tty->buf.free = NULL;
+ tty->buf.memory_used = 0;
+ INIT_WORK(&tty->buf.work, flush_to_ldisc);
+}
+
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
new file mode 100644
index 00000000..b44aef07
--- /dev/null
+++ b/drivers/tty/tty_io.c
@@ -0,0 +1,3357 @@
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * 'tty_io.c' gives an orthogonal feeling to tty's, be they consoles
+ * or rs-channels. It also implements echoing, cooked mode etc.
+ *
+ * Kill-line thanks to John T Kohl, who also corrected VMIN = VTIME = 0.
+ *
+ * Modified by Theodore Ts'o, 9/14/92, to dynamically allocate the
+ * tty_struct and tty_queue structures. Previously there was an array
+ * of 256 tty_struct's which was statically allocated, and the
+ * tty_queue structures were allocated at boot time. Both are now
+ * dynamically allocated only when the tty is open.
+ *
+ * Also restructured routines so that there is more of a separation
+ * between the high-level tty routines (tty_io.c and tty_ioctl.c) and
+ * the low-level tty routines (serial.c, pty.c, console.c). This
+ * makes for cleaner and more compact code. -TYT, 9/17/92
+ *
+ * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines
+ * which can be dynamically activated and de-activated by the line
+ * discipline handling modules (like SLIP).
+ *
+ * NOTE: pay no attention to the line discipline code (yet); its
+ * interface is still subject to change in this version...
+ * -- TYT, 1/31/92
+ *
+ * Added functionality to the OPOST tty handling. No delays, but all
+ * other bits should be there.
+ * -- Nick Holloway <alfie@dcs.warwick.ac.uk>, 27th May 1993.
+ *
+ * Rewrote canonical mode and added more termios flags.
+ * -- julian@uhunix.uhcc.hawaii.edu (J. Cowley), 13Jan94
+ *
+ * Reorganized FASYNC support so mouse code can share it.
+ * -- ctm@ardi.com, 9Sep95
+ *
+ * New TIOCLINUX variants added.
+ * -- mj@k332.feld.cvut.cz, 19-Nov-95
+ *
+ * Restrict vt switching via ioctl()
+ * -- grif@cs.ucr.edu, 5-Dec-95
+ *
+ * Move console and virtual terminal code to more appropriate files,
+ * implement CONFIG_VT and generalize console device interface.
+ * -- Marko Kohtala <Marko.Kohtala@hut.fi>, March 97
+ *
+ * Rewrote tty_init_dev and tty_release_dev to eliminate races.
+ * -- Bill Hawes <whawes@star.net>, June 97
+ *
+ * Added devfs support.
+ * -- C. Scott Ananian <cananian@alumni.princeton.edu>, 13-Jan-1998
+ *
+ * Added support for a Unix98-style ptmx device.
+ * -- C. Scott Ananian <cananian@alumni.princeton.edu>, 14-Jan-1998
+ *
+ * Reduced memory usage for older ARM systems
+ * -- Russell King <rmk@arm.linux.org.uk>
+ *
+ * Move do_SAK() into process context. Less stack use in devfs functions.
+ * alloc_tty_struct() always uses kmalloc()
+ * -- Andrew Morton <andrewm@uow.edu.eu> 17Mar01
+ */
+
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/devpts_fs.h>
+#include <linux/file.h>
+#include <linux/fdtable.h>
+#include <linux/console.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/kd.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/serial.h>
+
+#include <linux/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/kbd_kern.h>
+#include <linux/vt_kern.h>
+#include <linux/selection.h>
+
+#include <linux/kmod.h>
+#include <linux/nsproxy.h>
+
+#undef TTY_DEBUG_HANGUP
+
+#define TTY_PARANOIA_CHECK 1
+#define CHECK_TTY_COUNT 1
+
+struct ktermios tty_std_termios = { /* for the benefit of tty drivers */
+ .c_iflag = ICRNL | IXON,
+ .c_oflag = OPOST | ONLCR,
+ .c_cflag = B38400 | CS8 | CREAD | HUPCL,
+ .c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK |
+ ECHOCTL | ECHOKE | IEXTEN,
+ .c_cc = INIT_C_CC,
+ .c_ispeed = 38400,
+ .c_ospeed = 38400
+};
+
+EXPORT_SYMBOL(tty_std_termios);
+
+/* This list gets poked at by procfs and various bits of boot up code. This
+ could do with some rationalisation such as pulling the tty proc function
+ into this file */
+
+LIST_HEAD(tty_drivers); /* linked list of tty drivers */
+
+/* Mutex to protect creating and releasing a tty. This is shared with
+ vt.c for deeply disgusting hack reasons */
+DEFINE_MUTEX(tty_mutex);
+EXPORT_SYMBOL(tty_mutex);
+
+/* Spinlock to protect the tty->tty_files list */
+DEFINE_SPINLOCK(tty_files_lock);
+
+static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
+ssize_t redirected_tty_write(struct file *, const char __user *,
+ size_t, loff_t *);
+static unsigned int tty_poll(struct file *, poll_table *);
+static int tty_open(struct inode *, struct file *);
+long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+#else
+#define tty_compat_ioctl NULL
+#endif
+static int __tty_fasync(int fd, struct file *filp, int on);
+static int tty_fasync(int fd, struct file *filp, int on);
+static void release_tty(struct tty_struct *tty, int idx);
+static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
+static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
+
+/**
+ * alloc_tty_struct - allocate a tty object
+ *
+ * Return a new empty tty structure. The data fields have not
+ * been initialized in any way but has been zeroed
+ *
+ * Locking: none
+ */
+
+struct tty_struct *alloc_tty_struct(void)
+{
+ return kzalloc(sizeof(struct tty_struct), GFP_KERNEL);
+}
+
+/**
+ * free_tty_struct - free a disused tty
+ * @tty: tty struct to free
+ *
+ * Free the write buffers, tty queue and tty memory itself.
+ *
+ * Locking: none. Must be called after tty is definitely unused
+ */
+
+void free_tty_struct(struct tty_struct *tty)
+{
+ if (tty->dev)
+ put_device(tty->dev);
+ kfree(tty->write_buf);
+ tty_buffer_free_all(tty);
+ kfree(tty);
+}
+
+static inline struct tty_struct *file_tty(struct file *file)
+{
+ return ((struct tty_file_private *)file->private_data)->tty;
+}
+
+int tty_alloc_file(struct file *file)
+{
+ struct tty_file_private *priv;
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ file->private_data = priv;
+
+ return 0;
+}
+
+/* Associate a new file with the tty structure */
+void tty_add_file(struct tty_struct *tty, struct file *file)
+{
+ struct tty_file_private *priv = file->private_data;
+
+ priv->tty = tty;
+ priv->file = file;
+
+ spin_lock(&tty_files_lock);
+ list_add(&priv->list, &tty->tty_files);
+ spin_unlock(&tty_files_lock);
+}
+
+/**
+ * tty_free_file - free file->private_data
+ *
+ * This shall be used only for fail path handling when tty_add_file was not
+ * called yet.
+ */
+void tty_free_file(struct file *file)
+{
+ struct tty_file_private *priv = file->private_data;
+
+ file->private_data = NULL;
+ kfree(priv);
+}
+
+/* Delete file from its tty */
+void tty_del_file(struct file *file)
+{
+ struct tty_file_private *priv = file->private_data;
+
+ spin_lock(&tty_files_lock);
+ list_del(&priv->list);
+ spin_unlock(&tty_files_lock);
+ tty_free_file(file);
+}
+
+
+#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
+
+/**
+ * tty_name - return tty naming
+ * @tty: tty structure
+ * @buf: buffer for output
+ *
+ * Convert a tty structure into a name. The name reflects the kernel
+ * naming policy and if udev is in use may not reflect user space
+ *
+ * Locking: none
+ */
+
+char *tty_name(struct tty_struct *tty, char *buf)
+{
+ if (!tty) /* Hmm. NULL pointer. That's fun. */
+ strcpy(buf, "NULL tty");
+ else
+ strcpy(buf, tty->name);
+ return buf;
+}
+
+EXPORT_SYMBOL(tty_name);
+
+int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
+ const char *routine)
+{
+#ifdef TTY_PARANOIA_CHECK
+ if (!tty) {
+ printk(KERN_WARNING
+ "null TTY for (%d:%d) in %s\n",
+ imajor(inode), iminor(inode), routine);
+ return 1;
+ }
+ if (tty->magic != TTY_MAGIC) {
+ printk(KERN_WARNING
+ "bad magic number for tty struct (%d:%d) in %s\n",
+ imajor(inode), iminor(inode), routine);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+static int check_tty_count(struct tty_struct *tty, const char *routine)
+{
+#ifdef CHECK_TTY_COUNT
+ struct list_head *p;
+ int count = 0;
+
+ spin_lock(&tty_files_lock);
+ list_for_each(p, &tty->tty_files) {
+ count++;
+ }
+ spin_unlock(&tty_files_lock);
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_SLAVE &&
+ tty->link && tty->link->count)
+ count++;
+ if (tty->count != count) {
+ printk(KERN_WARNING "Warning: dev (%s) tty->count(%d) "
+ "!= #fd's(%d) in %s\n",
+ tty->name, tty->count, count, routine);
+ return count;
+ }
+#endif
+ return 0;
+}
+
+/**
+ * get_tty_driver - find device of a tty
+ * @dev_t: device identifier
+ * @index: returns the index of the tty
+ *
+ * This routine returns a tty driver structure, given a device number
+ * and also passes back the index number.
+ *
+ * Locking: caller must hold tty_mutex
+ */
+
+static struct tty_driver *get_tty_driver(dev_t device, int *index)
+{
+ struct tty_driver *p;
+
+ list_for_each_entry(p, &tty_drivers, tty_drivers) {
+ dev_t base = MKDEV(p->major, p->minor_start);
+ if (device < base || device >= base + p->num)
+ continue;
+ *index = device - base;
+ return tty_driver_kref_get(p);
+ }
+ return NULL;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+
+/**
+ * tty_find_polling_driver - find device of a polled tty
+ * @name: name string to match
+ * @line: pointer to resulting tty line nr
+ *
+ * This routine returns a tty driver structure, given a name
+ * and the condition that the tty driver is capable of polled
+ * operation.
+ */
+struct tty_driver *tty_find_polling_driver(char *name, int *line)
+{
+ struct tty_driver *p, *res = NULL;
+ int tty_line = 0;
+ int len;
+ char *str, *stp;
+
+ for (str = name; *str; str++)
+ if ((*str >= '0' && *str <= '9') || *str == ',')
+ break;
+ if (!*str)
+ return NULL;
+
+ len = str - name;
+ tty_line = simple_strtoul(str, &str, 10);
+
+ mutex_lock(&tty_mutex);
+ /* Search through the tty devices to look for a match */
+ list_for_each_entry(p, &tty_drivers, tty_drivers) {
+ if (strncmp(name, p->name, len) != 0)
+ continue;
+ stp = str;
+ if (*stp == ',')
+ stp++;
+ if (*stp == '\0')
+ stp = NULL;
+
+ if (tty_line >= 0 && tty_line < p->num && p->ops &&
+ p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
+ res = tty_driver_kref_get(p);
+ *line = tty_line;
+ break;
+ }
+ }
+ mutex_unlock(&tty_mutex);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(tty_find_polling_driver);
+#endif
+
+/**
+ * tty_check_change - check for POSIX terminal changes
+ * @tty: tty to check
+ *
+ * If we try to write to, or set the state of, a terminal and we're
+ * not in the foreground, send a SIGTTOU. If the signal is blocked or
+ * ignored, go ahead and perform the operation. (POSIX 7.2)
+ *
+ * Locking: ctrl_lock
+ */
+
+int tty_check_change(struct tty_struct *tty)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (current->signal->tty != tty)
+ return 0;
+
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+
+ if (!tty->pgrp) {
+ printk(KERN_WARNING "tty_check_change: tty->pgrp == NULL!\n");
+ goto out_unlock;
+ }
+ if (task_pgrp(current) == tty->pgrp)
+ goto out_unlock;
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ if (is_ignored(SIGTTOU))
+ goto out;
+ if (is_current_pgrp_orphaned()) {
+ ret = -EIO;
+ goto out;
+ }
+ kill_pgrp(task_pgrp(current), SIGTTOU, 1);
+ set_thread_flag(TIF_SIGPENDING);
+ ret = -ERESTARTSYS;
+out:
+ return ret;
+out_unlock:
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ return ret;
+}
+
+EXPORT_SYMBOL(tty_check_change);
+
+static ssize_t hung_up_tty_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return -EIO;
+}
+
+/* No kernel lock held - none needed ;) */
+static unsigned int hung_up_tty_poll(struct file *filp, poll_table *wait)
+{
+ return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
+}
+
+static long hung_up_tty_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static long hung_up_tty_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static const struct file_operations tty_fops = {
+ .llseek = no_llseek,
+ .read = tty_read,
+ .write = tty_write,
+ .poll = tty_poll,
+ .unlocked_ioctl = tty_ioctl,
+ .compat_ioctl = tty_compat_ioctl,
+ .open = tty_open,
+ .release = tty_release,
+ .fasync = tty_fasync,
+};
+
+static const struct file_operations console_fops = {
+ .llseek = no_llseek,
+ .read = tty_read,
+ .write = redirected_tty_write,
+ .poll = tty_poll,
+ .unlocked_ioctl = tty_ioctl,
+ .compat_ioctl = tty_compat_ioctl,
+ .open = tty_open,
+ .release = tty_release,
+ .fasync = tty_fasync,
+};
+
+static const struct file_operations hung_up_tty_fops = {
+ .llseek = no_llseek,
+ .read = hung_up_tty_read,
+ .write = hung_up_tty_write,
+ .poll = hung_up_tty_poll,
+ .unlocked_ioctl = hung_up_tty_ioctl,
+ .compat_ioctl = hung_up_tty_compat_ioctl,
+ .release = tty_release,
+};
+
+static DEFINE_SPINLOCK(redirect_lock);
+static struct file *redirect;
+
+/**
+ * tty_wakeup - request more data
+ * @tty: terminal
+ *
+ * Internal and external helper for wakeups of tty. This function
+ * informs the line discipline if present that the driver is ready
+ * to receive more output data.
+ */
+
+void tty_wakeup(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld;
+
+ if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) {
+ ld = tty_ldisc_ref(tty);
+ if (ld) {
+ if (ld->ops->write_wakeup)
+ ld->ops->write_wakeup(tty);
+ tty_ldisc_deref(ld);
+ }
+ }
+ wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+}
+
+EXPORT_SYMBOL_GPL(tty_wakeup);
+
+/**
+ * __tty_hangup - actual handler for hangup events
+ * @work: tty device
+ *
+ * This can be called by the "eventd" kernel thread. That is process
+ * synchronous but doesn't hold any locks, so we need to make sure we
+ * have the appropriate locks for what we're doing.
+ *
+ * The hangup event clears any pending redirections onto the hung up
+ * device. It ensures future writes will error and it does the needed
+ * line discipline hangup and signal delivery. The tty object itself
+ * remains intact.
+ *
+ * Locking:
+ * BTM
+ * redirect lock for undoing redirection
+ * file list lock for manipulating list of ttys
+ * tty_ldisc_lock from called functions
+ * termios_mutex resetting termios data
+ * tasklist_lock to walk task list for hangup event
+ * ->siglock to protect ->signal/->sighand
+ */
+void __tty_hangup(struct tty_struct *tty)
+{
+ struct file *cons_filp = NULL;
+ struct file *filp, *f = NULL;
+ struct task_struct *p;
+ struct tty_file_private *priv;
+ int closecount = 0, n;
+ unsigned long flags;
+ int refs = 0;
+
+ if (!tty)
+ return;
+
+
+ spin_lock(&redirect_lock);
+ if (redirect && file_tty(redirect) == tty) {
+ f = redirect;
+ redirect = NULL;
+ }
+ spin_unlock(&redirect_lock);
+
+ tty_lock();
+
+ /* some functions below drop BTM, so we need this bit */
+ set_bit(TTY_HUPPING, &tty->flags);
+
+ /* inuse_filps is protected by the single tty lock,
+ this really needs to change if we want to flush the
+ workqueue with the lock held */
+ check_tty_count(tty, "tty_hangup");
+
+ spin_lock(&tty_files_lock);
+ /* This breaks for file handles being sent over AF_UNIX sockets ? */
+ list_for_each_entry(priv, &tty->tty_files, list) {
+ filp = priv->file;
+ if (filp->f_op->write == redirected_tty_write)
+ cons_filp = filp;
+ if (filp->f_op->write != tty_write)
+ continue;
+ closecount++;
+ __tty_fasync(-1, filp, 0); /* can't block */
+ filp->f_op = &hung_up_tty_fops;
+ }
+ spin_unlock(&tty_files_lock);
+
+ /*
+ * it drops BTM and thus races with reopen
+ * we protect the race by TTY_HUPPING
+ */
+ tty_ldisc_hangup(tty);
+
+ read_lock(&tasklist_lock);
+ if (tty->session) {
+ do_each_pid_task(tty->session, PIDTYPE_SID, p) {
+ spin_lock_irq(&p->sighand->siglock);
+ if (p->signal->tty == tty) {
+ p->signal->tty = NULL;
+ /* We defer the dereferences outside fo
+ the tasklist lock */
+ refs++;
+ }
+ if (!p->signal->leader) {
+ spin_unlock_irq(&p->sighand->siglock);
+ continue;
+ }
+ __group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p);
+ __group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p);
+ put_pid(p->signal->tty_old_pgrp); /* A noop */
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ if (tty->pgrp)
+ p->signal->tty_old_pgrp = get_pid(tty->pgrp);
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ spin_unlock_irq(&p->sighand->siglock);
+ } while_each_pid_task(tty->session, PIDTYPE_SID, p);
+ }
+ read_unlock(&tasklist_lock);
+
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ clear_bit(TTY_THROTTLED, &tty->flags);
+ clear_bit(TTY_PUSH, &tty->flags);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ put_pid(tty->session);
+ put_pid(tty->pgrp);
+ tty->session = NULL;
+ tty->pgrp = NULL;
+ tty->ctrl_status = 0;
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+ /* Account for the p->signal references we killed */
+ while (refs--)
+ tty_kref_put(tty);
+
+ /*
+ * If one of the devices matches a console pointer, we
+ * cannot just call hangup() because that will cause
+ * tty->count and state->count to go out of sync.
+ * So we just call close() the right number of times.
+ */
+ if (cons_filp) {
+ if (tty->ops->close)
+ for (n = 0; n < closecount; n++)
+ tty->ops->close(tty, cons_filp);
+ } else if (tty->ops->hangup)
+ (tty->ops->hangup)(tty);
+ /*
+ * We don't want to have driver/ldisc interactions beyond
+ * the ones we did here. The driver layer expects no
+ * calls after ->hangup() from the ldisc side. However we
+ * can't yet guarantee all that.
+ */
+ set_bit(TTY_HUPPED, &tty->flags);
+ clear_bit(TTY_HUPPING, &tty->flags);
+ tty_ldisc_enable(tty);
+
+ tty_unlock();
+
+ if (f)
+ fput(f);
+}
+
+static void do_tty_hangup(struct work_struct *work)
+{
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, hangup_work);
+
+ __tty_hangup(tty);
+}
+
+/**
+ * tty_hangup - trigger a hangup event
+ * @tty: tty to hangup
+ *
+ * A carrier loss (virtual or otherwise) has occurred on this like
+ * schedule a hangup sequence to run after this event.
+ */
+
+void tty_hangup(struct tty_struct *tty)
+{
+#ifdef TTY_DEBUG_HANGUP
+ char buf[64];
+ printk(KERN_DEBUG "%s hangup...\n", tty_name(tty, buf));
+#endif
+ schedule_work(&tty->hangup_work);
+}
+
+EXPORT_SYMBOL(tty_hangup);
+
+/**
+ * tty_vhangup - process vhangup
+ * @tty: tty to hangup
+ *
+ * The user has asked via system call for the terminal to be hung up.
+ * We do this synchronously so that when the syscall returns the process
+ * is complete. That guarantee is necessary for security reasons.
+ */
+
+void tty_vhangup(struct tty_struct *tty)
+{
+#ifdef TTY_DEBUG_HANGUP
+ char buf[64];
+
+ printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
+#endif
+ __tty_hangup(tty);
+}
+
+EXPORT_SYMBOL(tty_vhangup);
+
+
+/**
+ * tty_vhangup_self - process vhangup for own ctty
+ *
+ * Perform a vhangup on the current controlling tty
+ */
+
+void tty_vhangup_self(void)
+{
+ struct tty_struct *tty;
+
+ tty = get_current_tty();
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+}
+
+/**
+ * tty_hung_up_p - was tty hung up
+ * @filp: file pointer of tty
+ *
+ * Return true if the tty has been subject to a vhangup or a carrier
+ * loss
+ */
+
+int tty_hung_up_p(struct file *filp)
+{
+ return (filp->f_op == &hung_up_tty_fops);
+}
+
+EXPORT_SYMBOL(tty_hung_up_p);
+
+static void session_clear_tty(struct pid *session)
+{
+ struct task_struct *p;
+ do_each_pid_task(session, PIDTYPE_SID, p) {
+ proc_clear_tty(p);
+ } while_each_pid_task(session, PIDTYPE_SID, p);
+}
+
+/**
+ * disassociate_ctty - disconnect controlling tty
+ * @on_exit: true if exiting so need to "hang up" the session
+ *
+ * This function is typically called only by the session leader, when
+ * it wants to disassociate itself from its controlling tty.
+ *
+ * It performs the following functions:
+ * (1) Sends a SIGHUP and SIGCONT to the foreground process group
+ * (2) Clears the tty from being controlling the session
+ * (3) Clears the controlling tty for all processes in the
+ * session group.
+ *
+ * The argument on_exit is set to 1 if called when a process is
+ * exiting; it is 0 if called by the ioctl TIOCNOTTY.
+ *
+ * Locking:
+ * BTM is taken for hysterical raisins, and held when
+ * called from no_tty().
+ * tty_mutex is taken to protect tty
+ * ->siglock is taken to protect ->signal/->sighand
+ * tasklist_lock is taken to walk process list for sessions
+ * ->siglock is taken to protect ->signal/->sighand
+ */
+
+void disassociate_ctty(int on_exit)
+{
+ struct tty_struct *tty;
+ struct pid *tty_pgrp = NULL;
+
+ if (!current->signal->leader)
+ return;
+
+ tty = get_current_tty();
+ if (tty) {
+ tty_pgrp = get_pid(tty->pgrp);
+ if (on_exit) {
+ if (tty->driver->type != TTY_DRIVER_TYPE_PTY)
+ tty_vhangup(tty);
+ }
+ tty_kref_put(tty);
+ } else if (on_exit) {
+ struct pid *old_pgrp;
+ spin_lock_irq(&current->sighand->siglock);
+ old_pgrp = current->signal->tty_old_pgrp;
+ current->signal->tty_old_pgrp = NULL;
+ spin_unlock_irq(&current->sighand->siglock);
+ if (old_pgrp) {
+ kill_pgrp(old_pgrp, SIGHUP, on_exit);
+ kill_pgrp(old_pgrp, SIGCONT, on_exit);
+ put_pid(old_pgrp);
+ }
+ return;
+ }
+ if (tty_pgrp) {
+ kill_pgrp(tty_pgrp, SIGHUP, on_exit);
+ if (!on_exit)
+ kill_pgrp(tty_pgrp, SIGCONT, on_exit);
+ put_pid(tty_pgrp);
+ }
+
+ spin_lock_irq(&current->sighand->siglock);
+ put_pid(current->signal->tty_old_pgrp);
+ current->signal->tty_old_pgrp = NULL;
+ spin_unlock_irq(&current->sighand->siglock);
+
+ tty = get_current_tty();
+ if (tty) {
+ unsigned long flags;
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ put_pid(tty->session);
+ put_pid(tty->pgrp);
+ tty->session = NULL;
+ tty->pgrp = NULL;
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ tty_kref_put(tty);
+ } else {
+#ifdef TTY_DEBUG_HANGUP
+ printk(KERN_DEBUG "error attempted to write to tty [0x%p]"
+ " = NULL", tty);
+#endif
+ }
+
+ /* Now clear signal->tty under the lock */
+ read_lock(&tasklist_lock);
+ session_clear_tty(task_session(current));
+ read_unlock(&tasklist_lock);
+}
+
+/**
+ *
+ * no_tty - Ensure the current process does not have a controlling tty
+ */
+void no_tty(void)
+{
+ struct task_struct *tsk = current;
+ tty_lock();
+ disassociate_ctty(0);
+ tty_unlock();
+ proc_clear_tty(tsk);
+}
+
+
+/**
+ * stop_tty - propagate flow control
+ * @tty: tty to stop
+ *
+ * Perform flow control to the driver. For PTY/TTY pairs we
+ * must also propagate the TIOCKPKT status. May be called
+ * on an already stopped device and will not re-call the driver
+ * method.
+ *
+ * This functionality is used by both the line disciplines for
+ * halting incoming flow and by the driver. It may therefore be
+ * called from any context, may be under the tty atomic_write_lock
+ * but not always.
+ *
+ * Locking:
+ * Uses the tty control lock internally
+ */
+
+void stop_tty(struct tty_struct *tty)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ if (tty->stopped) {
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ return;
+ }
+ tty->stopped = 1;
+ if (tty->link && tty->link->packet) {
+ tty->ctrl_status &= ~TIOCPKT_START;
+ tty->ctrl_status |= TIOCPKT_STOP;
+ wake_up_interruptible_poll(&tty->link->read_wait, POLLIN);
+ }
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ if (tty->ops->stop)
+ (tty->ops->stop)(tty);
+}
+
+EXPORT_SYMBOL(stop_tty);
+
+/**
+ * start_tty - propagate flow control
+ * @tty: tty to start
+ *
+ * Start a tty that has been stopped if at all possible. Perform
+ * any necessary wakeups and propagate the TIOCPKT status. If this
+ * is the tty was previous stopped and is being started then the
+ * driver start method is invoked and the line discipline woken.
+ *
+ * Locking:
+ * ctrl_lock
+ */
+
+void start_tty(struct tty_struct *tty)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ if (!tty->stopped || tty->flow_stopped) {
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ return;
+ }
+ tty->stopped = 0;
+ if (tty->link && tty->link->packet) {
+ tty->ctrl_status &= ~TIOCPKT_STOP;
+ tty->ctrl_status |= TIOCPKT_START;
+ wake_up_interruptible_poll(&tty->link->read_wait, POLLIN);
+ }
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ if (tty->ops->start)
+ (tty->ops->start)(tty);
+ /* If we have a running line discipline it may need kicking */
+ tty_wakeup(tty);
+}
+
+EXPORT_SYMBOL(start_tty);
+
+/**
+ * tty_read - read method for tty device files
+ * @file: pointer to tty file
+ * @buf: user buffer
+ * @count: size of user buffer
+ * @ppos: unused
+ *
+ * Perform the read system call function on this terminal device. Checks
+ * for hung up devices before calling the line discipline method.
+ *
+ * Locking:
+ * Locks the line discipline internally while needed. Multiple
+ * read calls may be outstanding in parallel.
+ */
+
+static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ int i;
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct tty_struct *tty = file_tty(file);
+ struct tty_ldisc *ld;
+
+ if (tty_paranoia_check(tty, inode, "tty_read"))
+ return -EIO;
+ if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
+ return -EIO;
+
+ /* We want to wait for the line discipline to sort out in this
+ situation */
+ ld = tty_ldisc_ref_wait(tty);
+ if (ld->ops->read)
+ i = (ld->ops->read)(tty, file, buf, count);
+ else
+ i = -EIO;
+ tty_ldisc_deref(ld);
+ if (i > 0)
+ inode->i_atime = current_fs_time(inode->i_sb);
+ return i;
+}
+
+void tty_write_unlock(struct tty_struct *tty)
+ __releases(&tty->atomic_write_lock)
+{
+ mutex_unlock(&tty->atomic_write_lock);
+ wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+}
+
+int tty_write_lock(struct tty_struct *tty, int ndelay)
+ __acquires(&tty->atomic_write_lock)
+{
+ if (!mutex_trylock(&tty->atomic_write_lock)) {
+ if (ndelay)
+ return -EAGAIN;
+ if (mutex_lock_interruptible(&tty->atomic_write_lock))
+ return -ERESTARTSYS;
+ }
+ return 0;
+}
+
+/*
+ * Split writes up in sane blocksizes to avoid
+ * denial-of-service type attacks
+ */
+static inline ssize_t do_tty_write(
+ ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
+ struct tty_struct *tty,
+ struct file *file,
+ const char __user *buf,
+ size_t count)
+{
+ ssize_t ret, written = 0;
+ unsigned int chunk;
+
+ ret = tty_write_lock(tty, file->f_flags & O_NDELAY);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We chunk up writes into a temporary buffer. This
+ * simplifies low-level drivers immensely, since they
+ * don't have locking issues and user mode accesses.
+ *
+ * But if TTY_NO_WRITE_SPLIT is set, we should use a
+ * big chunk-size..
+ *
+ * The default chunk-size is 2kB, because the NTTY
+ * layer has problems with bigger chunks. It will
+ * claim to be able to handle more characters than
+ * it actually does.
+ *
+ * FIXME: This can probably go away now except that 64K chunks
+ * are too likely to fail unless switched to vmalloc...
+ */
+ chunk = 2048;
+ if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
+ chunk = 65536;
+ if (count < chunk)
+ chunk = count;
+
+ /* write_buf/write_cnt is protected by the atomic_write_lock mutex */
+ if (tty->write_cnt < chunk) {
+ unsigned char *buf_chunk;
+
+ if (chunk < 1024)
+ chunk = 1024;
+
+ buf_chunk = kmalloc(chunk, GFP_KERNEL);
+ if (!buf_chunk) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(tty->write_buf);
+ tty->write_cnt = chunk;
+ tty->write_buf = buf_chunk;
+ }
+
+ /* Do the write .. */
+ for (;;) {
+ size_t size = count;
+ if (size > chunk)
+ size = chunk;
+ ret = -EFAULT;
+ if (copy_from_user(tty->write_buf, buf, size))
+ break;
+ ret = write(tty, file, tty->write_buf, size);
+ if (ret <= 0)
+ break;
+ written += ret;
+ buf += ret;
+ count -= ret;
+ if (!count)
+ break;
+ ret = -ERESTARTSYS;
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
+ if (written) {
+ struct inode *inode = file->f_path.dentry->d_inode;
+ inode->i_mtime = current_fs_time(inode->i_sb);
+ ret = written;
+ }
+out:
+ tty_write_unlock(tty);
+ return ret;
+}
+
+/**
+ * tty_write_message - write a message to a certain tty, not just the console.
+ * @tty: the destination tty_struct
+ * @msg: the message to write
+ *
+ * This is used for messages that need to be redirected to a specific tty.
+ * We don't put it into the syslog queue right now maybe in the future if
+ * really needed.
+ *
+ * We must still hold the BTM and test the CLOSING flag for the moment.
+ */
+
+void tty_write_message(struct tty_struct *tty, char *msg)
+{
+ if (tty) {
+ mutex_lock(&tty->atomic_write_lock);
+ tty_lock();
+ if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) {
+ tty_unlock();
+ tty->ops->write(tty, msg, strlen(msg));
+ } else
+ tty_unlock();
+ tty_write_unlock(tty);
+ }
+ return;
+}
+
+
+/**
+ * tty_write - write method for tty device file
+ * @file: tty file pointer
+ * @buf: user data to write
+ * @count: bytes to write
+ * @ppos: unused
+ *
+ * Write data to a tty device via the line discipline.
+ *
+ * Locking:
+ * Locks the line discipline as required
+ * Writes to the tty driver are serialized by the atomic_write_lock
+ * and are then processed in chunks to the device. The line discipline
+ * write method will not be invoked in parallel for each device.
+ */
+
+static ssize_t tty_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct tty_struct *tty = file_tty(file);
+ struct tty_ldisc *ld;
+ ssize_t ret;
+
+ if (tty_paranoia_check(tty, inode, "tty_write"))
+ return -EIO;
+ if (!tty || !tty->ops->write ||
+ (test_bit(TTY_IO_ERROR, &tty->flags)))
+ return -EIO;
+ /* Short term debug to catch buggy drivers */
+ if (tty->ops->write_room == NULL)
+ printk(KERN_ERR "tty driver %s lacks a write_room method.\n",
+ tty->driver->name);
+ ld = tty_ldisc_ref_wait(tty);
+ if (!ld->ops->write)
+ ret = -EIO;
+ else
+ ret = do_tty_write(ld->ops->write, tty, file, buf, count);
+ tty_ldisc_deref(ld);
+ return ret;
+}
+
+ssize_t redirected_tty_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct file *p = NULL;
+
+ spin_lock(&redirect_lock);
+ if (redirect) {
+ get_file(redirect);
+ p = redirect;
+ }
+ spin_unlock(&redirect_lock);
+
+ if (p) {
+ ssize_t res;
+ res = vfs_write(p, buf, count, &p->f_pos);
+ fput(p);
+ return res;
+ }
+ return tty_write(file, buf, count, ppos);
+}
+
+static char ptychar[] = "pqrstuvwxyzabcde";
+
+/**
+ * pty_line_name - generate name for a pty
+ * @driver: the tty driver in use
+ * @index: the minor number
+ * @p: output buffer of at least 6 bytes
+ *
+ * Generate a name from a driver reference and write it to the output
+ * buffer.
+ *
+ * Locking: None
+ */
+static void pty_line_name(struct tty_driver *driver, int index, char *p)
+{
+ int i = index + driver->name_base;
+ /* ->name is initialized to "ttyp", but "tty" is expected */
+ sprintf(p, "%s%c%x",
+ driver->subtype == PTY_TYPE_SLAVE ? "tty" : driver->name,
+ ptychar[i >> 4 & 0xf], i & 0xf);
+}
+
+/**
+ * tty_line_name - generate name for a tty
+ * @driver: the tty driver in use
+ * @index: the minor number
+ * @p: output buffer of at least 7 bytes
+ *
+ * Generate a name from a driver reference and write it to the output
+ * buffer.
+ *
+ * Locking: None
+ */
+static void tty_line_name(struct tty_driver *driver, int index, char *p)
+{
+ sprintf(p, "%s%d", driver->name, index + driver->name_base);
+}
+
+/**
+ * tty_driver_lookup_tty() - find an existing tty, if any
+ * @driver: the driver for the tty
+ * @idx: the minor number
+ *
+ * Return the tty, if found or ERR_PTR() otherwise.
+ *
+ * Locking: tty_mutex must be held. If tty is found, the mutex must
+ * be held until the 'fast-open' is also done. Will change once we
+ * have refcounting in the driver and per driver locking
+ */
+static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
+ struct inode *inode, int idx)
+{
+ struct tty_struct *tty;
+
+ if (driver->ops->lookup)
+ return driver->ops->lookup(driver, inode, idx);
+
+ tty = driver->ttys[idx];
+ return tty;
+}
+
+/**
+ * tty_init_termios - helper for termios setup
+ * @tty: the tty to set up
+ *
+ * Initialise the termios structures for this tty. Thus runs under
+ * the tty_mutex currently so we can be relaxed about ordering.
+ */
+
+int tty_init_termios(struct tty_struct *tty)
+{
+ struct ktermios *tp;
+ int idx = tty->index;
+
+ tp = tty->driver->termios[idx];
+ if (tp == NULL) {
+ tp = kzalloc(sizeof(struct ktermios[2]), GFP_KERNEL);
+ if (tp == NULL)
+ return -ENOMEM;
+ memcpy(tp, &tty->driver->init_termios,
+ sizeof(struct ktermios));
+ tty->driver->termios[idx] = tp;
+ }
+ tty->termios = tp;
+ tty->termios_locked = tp + 1;
+
+ /* Compatibility until drivers always set this */
+ tty->termios->c_ispeed = tty_termios_input_baud_rate(tty->termios);
+ tty->termios->c_ospeed = tty_termios_baud_rate(tty->termios);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tty_init_termios);
+
+/**
+ * tty_driver_install_tty() - install a tty entry in the driver
+ * @driver: the driver for the tty
+ * @tty: the tty
+ *
+ * Install a tty object into the driver tables. The tty->index field
+ * will be set by the time this is called. This method is responsible
+ * for ensuring any need additional structures are allocated and
+ * configured.
+ *
+ * Locking: tty_mutex for now
+ */
+static int tty_driver_install_tty(struct tty_driver *driver,
+ struct tty_struct *tty)
+{
+ int idx = tty->index;
+ int ret;
+
+ if (driver->ops->install) {
+ ret = driver->ops->install(driver, tty);
+ return ret;
+ }
+
+ if (tty_init_termios(tty) == 0) {
+ tty_driver_kref_get(driver);
+ tty->count++;
+ driver->ttys[idx] = tty;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * tty_driver_remove_tty() - remove a tty from the driver tables
+ * @driver: the driver for the tty
+ * @idx: the minor number
+ *
+ * Remvoe a tty object from the driver tables. The tty->index field
+ * will be set by the time this is called.
+ *
+ * Locking: tty_mutex for now
+ */
+void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
+{
+ if (driver->ops->remove)
+ driver->ops->remove(driver, tty);
+ else
+ driver->ttys[tty->index] = NULL;
+}
+
+/*
+ * tty_reopen() - fast re-open of an open tty
+ * @tty - the tty to open
+ *
+ * Return 0 on success, -errno on error.
+ *
+ * Locking: tty_mutex must be held from the time the tty was found
+ * till this open completes.
+ */
+static int tty_reopen(struct tty_struct *tty)
+{
+ struct tty_driver *driver = tty->driver;
+
+ if (test_bit(TTY_CLOSING, &tty->flags) ||
+ test_bit(TTY_HUPPING, &tty->flags) ||
+ test_bit(TTY_LDISC_CHANGING, &tty->flags))
+ return -EIO;
+
+ if (driver->type == TTY_DRIVER_TYPE_PTY &&
+ driver->subtype == PTY_TYPE_MASTER) {
+ /*
+ * special case for PTY masters: only one open permitted,
+ * and the slave side open count is incremented as well.
+ */
+ if (tty->count)
+ return -EIO;
+
+ tty->link->count++;
+ }
+ tty->count++;
+ tty->driver = driver; /* N.B. why do this every time?? */
+
+ mutex_lock(&tty->ldisc_mutex);
+ WARN_ON(!test_bit(TTY_LDISC, &tty->flags));
+ mutex_unlock(&tty->ldisc_mutex);
+
+ return 0;
+}
+
+/**
+ * tty_init_dev - initialise a tty device
+ * @driver: tty driver we are opening a device on
+ * @idx: device index
+ * @ret_tty: returned tty structure
+ * @first_ok: ok to open a new device (used by ptmx)
+ *
+ * Prepare a tty device. This may not be a "new" clean device but
+ * could also be an active device. The pty drivers require special
+ * handling because of this.
+ *
+ * Locking:
+ * The function is called under the tty_mutex, which
+ * protects us from the tty struct or driver itself going away.
+ *
+ * On exit the tty device has the line discipline attached and
+ * a reference count of 1. If a pair was created for pty/tty use
+ * and the other was a pty master then it too has a reference count of 1.
+ *
+ * WSH 06/09/97: Rewritten to remove races and properly clean up after a
+ * failed open. The new code protects the open with a mutex, so it's
+ * really quite straightforward. The mutex locking can probably be
+ * relaxed for the (most common) case of reopening a tty.
+ */
+
+struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx,
+ int first_ok)
+{
+ struct tty_struct *tty;
+ int retval;
+
+ /* Check if pty master is being opened multiple times */
+ if (driver->subtype == PTY_TYPE_MASTER &&
+ (driver->flags & TTY_DRIVER_DEVPTS_MEM) && !first_ok) {
+ return ERR_PTR(-EIO);
+ }
+
+ /*
+ * First time open is complex, especially for PTY devices.
+ * This code guarantees that either everything succeeds and the
+ * TTY is ready for operation, or else the table slots are vacated
+ * and the allocated memory released. (Except that the termios
+ * and locked termios may be retained.)
+ */
+
+ if (!try_module_get(driver->owner))
+ return ERR_PTR(-ENODEV);
+
+ tty = alloc_tty_struct();
+ if (!tty) {
+ retval = -ENOMEM;
+ goto err_module_put;
+ }
+ initialize_tty_struct(tty, driver, idx);
+
+ retval = tty_driver_install_tty(driver, tty);
+ if (retval < 0)
+ goto err_deinit_tty;
+
+ /*
+ * Structures all installed ... call the ldisc open routines.
+ * If we fail here just call release_tty to clean up. No need
+ * to decrement the use counts, as release_tty doesn't care.
+ */
+ retval = tty_ldisc_setup(tty, tty->link);
+ if (retval)
+ goto err_release_tty;
+ return tty;
+
+err_deinit_tty:
+ deinitialize_tty_struct(tty);
+ free_tty_struct(tty);
+err_module_put:
+ module_put(driver->owner);
+ return ERR_PTR(retval);
+
+ /* call the tty release_tty routine to clean out this slot */
+err_release_tty:
+ if (printk_ratelimit())
+ printk(KERN_INFO "tty_init_dev: ldisc open failed, "
+ "clearing slot %d\n", idx);
+ release_tty(tty, idx);
+ return ERR_PTR(retval);
+}
+
+void tty_free_termios(struct tty_struct *tty)
+{
+ struct ktermios *tp;
+ int idx = tty->index;
+ /* Kill this flag and push into drivers for locking etc */
+ if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
+ /* FIXME: Locking on ->termios array */
+ tp = tty->termios;
+ tty->driver->termios[idx] = NULL;
+ kfree(tp);
+ }
+}
+EXPORT_SYMBOL(tty_free_termios);
+
+void tty_shutdown(struct tty_struct *tty)
+{
+ tty_driver_remove_tty(tty->driver, tty);
+ tty_free_termios(tty);
+}
+EXPORT_SYMBOL(tty_shutdown);
+
+/**
+ * release_one_tty - release tty structure memory
+ * @kref: kref of tty we are obliterating
+ *
+ * Releases memory associated with a tty structure, and clears out the
+ * driver table slots. This function is called when a device is no longer
+ * in use. It also gets called when setup of a device fails.
+ *
+ * Locking:
+ * tty_mutex - sometimes only
+ * takes the file list lock internally when working on the list
+ * of ttys that the driver keeps.
+ *
+ * This method gets called from a work queue so that the driver private
+ * cleanup ops can sleep (needed for USB at least)
+ */
+static void release_one_tty(struct work_struct *work)
+{
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, hangup_work);
+ struct tty_driver *driver = tty->driver;
+
+ if (tty->ops->cleanup)
+ tty->ops->cleanup(tty);
+
+ tty->magic = 0;
+ tty_driver_kref_put(driver);
+ module_put(driver->owner);
+
+ spin_lock(&tty_files_lock);
+ list_del_init(&tty->tty_files);
+ spin_unlock(&tty_files_lock);
+
+ put_pid(tty->pgrp);
+ put_pid(tty->session);
+ free_tty_struct(tty);
+}
+
+static void queue_release_one_tty(struct kref *kref)
+{
+ struct tty_struct *tty = container_of(kref, struct tty_struct, kref);
+
+ if (tty->ops->shutdown)
+ tty->ops->shutdown(tty);
+ else
+ tty_shutdown(tty);
+
+ /* The hangup queue is now free so we can reuse it rather than
+ waste a chunk of memory for each port */
+ INIT_WORK(&tty->hangup_work, release_one_tty);
+ schedule_work(&tty->hangup_work);
+}
+
+/**
+ * tty_kref_put - release a tty kref
+ * @tty: tty device
+ *
+ * Release a reference to a tty device and if need be let the kref
+ * layer destruct the object for us
+ */
+
+void tty_kref_put(struct tty_struct *tty)
+{
+ if (tty)
+ kref_put(&tty->kref, queue_release_one_tty);
+}
+EXPORT_SYMBOL(tty_kref_put);
+
+/**
+ * release_tty - release tty structure memory
+ *
+ * Release both @tty and a possible linked partner (think pty pair),
+ * and decrement the refcount of the backing module.
+ *
+ * Locking:
+ * tty_mutex - sometimes only
+ * takes the file list lock internally when working on the list
+ * of ttys that the driver keeps.
+ * FIXME: should we require tty_mutex is held here ??
+ *
+ */
+static void release_tty(struct tty_struct *tty, int idx)
+{
+ /* This should always be true but check for the moment */
+ WARN_ON(tty->index != idx);
+
+ if (tty->link)
+ tty_kref_put(tty->link);
+ tty_kref_put(tty);
+}
+
+/**
+ * tty_release - vfs callback for close
+ * @inode: inode of tty
+ * @filp: file pointer for handle to tty
+ *
+ * Called the last time each file handle is closed that references
+ * this tty. There may however be several such references.
+ *
+ * Locking:
+ * Takes bkl. See tty_release_dev
+ *
+ * Even releasing the tty structures is a tricky business.. We have
+ * to be very careful that the structures are all released at the
+ * same time, as interrupts might otherwise get the wrong pointers.
+ *
+ * WSH 09/09/97: rewritten to avoid some nasty race conditions that could
+ * lead to double frees or releasing memory still in use.
+ */
+
+int tty_release(struct inode *inode, struct file *filp)
+{
+ struct tty_struct *tty = file_tty(filp);
+ struct tty_struct *o_tty;
+ int pty_master, tty_closing, o_tty_closing, do_sleep;
+ int devpts;
+ int idx;
+ char buf[64];
+
+ if (tty_paranoia_check(tty, inode, "tty_release_dev"))
+ return 0;
+
+ tty_lock();
+ check_tty_count(tty, "tty_release_dev");
+
+ __tty_fasync(-1, filp, 0);
+
+ idx = tty->index;
+ pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER);
+ devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
+ o_tty = tty->link;
+
+#ifdef TTY_PARANOIA_CHECK
+ if (idx < 0 || idx >= tty->driver->num) {
+ printk(KERN_DEBUG "tty_release_dev: bad idx when trying to "
+ "free (%s)\n", tty->name);
+ tty_unlock();
+ return 0;
+ }
+ if (!devpts) {
+ if (tty != tty->driver->ttys[idx]) {
+ tty_unlock();
+ printk(KERN_DEBUG "tty_release_dev: driver.table[%d] not tty "
+ "for (%s)\n", idx, tty->name);
+ return 0;
+ }
+ if (tty->termios != tty->driver->termios[idx]) {
+ tty_unlock();
+ printk(KERN_DEBUG "tty_release_dev: driver.termios[%d] not termios "
+ "for (%s)\n",
+ idx, tty->name);
+ return 0;
+ }
+ }
+#endif
+
+#ifdef TTY_DEBUG_HANGUP
+ printk(KERN_DEBUG "tty_release_dev of %s (tty count=%d)...",
+ tty_name(tty, buf), tty->count);
+#endif
+
+#ifdef TTY_PARANOIA_CHECK
+ if (tty->driver->other &&
+ !(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
+ if (o_tty != tty->driver->other->ttys[idx]) {
+ tty_unlock();
+ printk(KERN_DEBUG "tty_release_dev: other->table[%d] "
+ "not o_tty for (%s)\n",
+ idx, tty->name);
+ return 0 ;
+ }
+ if (o_tty->termios != tty->driver->other->termios[idx]) {
+ tty_unlock();
+ printk(KERN_DEBUG "tty_release_dev: other->termios[%d] "
+ "not o_termios for (%s)\n",
+ idx, tty->name);
+ return 0;
+ }
+ if (o_tty->link != tty) {
+ tty_unlock();
+ printk(KERN_DEBUG "tty_release_dev: bad pty pointers\n");
+ return 0;
+ }
+ }
+#endif
+ if (tty->ops->close)
+ tty->ops->close(tty, filp);
+
+ tty_unlock();
+ /*
+ * Sanity check: if tty->count is going to zero, there shouldn't be
+ * any waiters on tty->read_wait or tty->write_wait. We test the
+ * wait queues and kick everyone out _before_ actually starting to
+ * close. This ensures that we won't block while releasing the tty
+ * structure.
+ *
+ * The test for the o_tty closing is necessary, since the master and
+ * slave sides may close in any order. If the slave side closes out
+ * first, its count will be one, since the master side holds an open.
+ * Thus this test wouldn't be triggered at the time the slave closes,
+ * so we do it now.
+ *
+ * Note that it's possible for the tty to be opened again while we're
+ * flushing out waiters. By recalculating the closing flags before
+ * each iteration we avoid any problems.
+ */
+ while (1) {
+ /* Guard against races with tty->count changes elsewhere and
+ opens on /dev/tty */
+
+ mutex_lock(&tty_mutex);
+ tty_lock();
+ tty_closing = tty->count <= 1;
+ o_tty_closing = o_tty &&
+ (o_tty->count <= (pty_master ? 1 : 0));
+ do_sleep = 0;
+
+ if (tty_closing) {
+ if (waitqueue_active(&tty->read_wait)) {
+ wake_up_poll(&tty->read_wait, POLLIN);
+ do_sleep++;
+ }
+ if (waitqueue_active(&tty->write_wait)) {
+ wake_up_poll(&tty->write_wait, POLLOUT);
+ do_sleep++;
+ }
+ }
+ if (o_tty_closing) {
+ if (waitqueue_active(&o_tty->read_wait)) {
+ wake_up_poll(&o_tty->read_wait, POLLIN);
+ do_sleep++;
+ }
+ if (waitqueue_active(&o_tty->write_wait)) {
+ wake_up_poll(&o_tty->write_wait, POLLOUT);
+ do_sleep++;
+ }
+ }
+ if (!do_sleep)
+ break;
+
+ printk(KERN_WARNING "tty_release_dev: %s: read/write wait queue "
+ "active!\n", tty_name(tty, buf));
+ tty_unlock();
+ mutex_unlock(&tty_mutex);
+ schedule();
+ }
+
+ /*
+ * The closing flags are now consistent with the open counts on
+ * both sides, and we've completed the last operation that could
+ * block, so it's safe to proceed with closing.
+ */
+ if (pty_master) {
+ if (--o_tty->count < 0) {
+ printk(KERN_WARNING "tty_release_dev: bad pty slave count "
+ "(%d) for %s\n",
+ o_tty->count, tty_name(o_tty, buf));
+ o_tty->count = 0;
+ }
+ }
+ if (--tty->count < 0) {
+ printk(KERN_WARNING "tty_release_dev: bad tty->count (%d) for %s\n",
+ tty->count, tty_name(tty, buf));
+ tty->count = 0;
+ }
+
+ /*
+ * We've decremented tty->count, so we need to remove this file
+ * descriptor off the tty->tty_files list; this serves two
+ * purposes:
+ * - check_tty_count sees the correct number of file descriptors
+ * associated with this tty.
+ * - do_tty_hangup no longer sees this file descriptor as
+ * something that needs to be handled for hangups.
+ */
+ tty_del_file(filp);
+
+ /*
+ * Perform some housekeeping before deciding whether to return.
+ *
+ * Set the TTY_CLOSING flag if this was the last open. In the
+ * case of a pty we may have to wait around for the other side
+ * to close, and TTY_CLOSING makes sure we can't be reopened.
+ */
+ if (tty_closing)
+ set_bit(TTY_CLOSING, &tty->flags);
+ if (o_tty_closing)
+ set_bit(TTY_CLOSING, &o_tty->flags);
+
+ /*
+ * If _either_ side is closing, make sure there aren't any
+ * processes that still think tty or o_tty is their controlling
+ * tty.
+ */
+ if (tty_closing || o_tty_closing) {
+ read_lock(&tasklist_lock);
+ session_clear_tty(tty->session);
+ if (o_tty)
+ session_clear_tty(o_tty->session);
+ read_unlock(&tasklist_lock);
+ }
+
+ mutex_unlock(&tty_mutex);
+
+ /* check whether both sides are closing ... */
+ if (!tty_closing || (o_tty && !o_tty_closing)) {
+ tty_unlock();
+ return 0;
+ }
+
+#ifdef TTY_DEBUG_HANGUP
+ printk(KERN_DEBUG "freeing tty structure...");
+#endif
+ /*
+ * Ask the line discipline code to release its structures
+ */
+ tty_ldisc_release(tty, o_tty);
+ /*
+ * The release_tty function takes care of the details of clearing
+ * the slots and preserving the termios structure.
+ */
+ release_tty(tty, idx);
+
+ /* Make this pty number available for reallocation */
+ if (devpts)
+ devpts_kill_index(inode, idx);
+ tty_unlock();
+ return 0;
+}
+
+/**
+ * tty_open - open a tty device
+ * @inode: inode of device file
+ * @filp: file pointer to tty
+ *
+ * tty_open and tty_release keep up the tty count that contains the
+ * number of opens done on a tty. We cannot use the inode-count, as
+ * different inodes might point to the same tty.
+ *
+ * Open-counting is needed for pty masters, as well as for keeping
+ * track of serial lines: DTR is dropped when the last close happens.
+ * (This is not done solely through tty->count, now. - Ted 1/27/92)
+ *
+ * The termios state of a pty is reset on first open so that
+ * settings don't persist across reuse.
+ *
+ * Locking: tty_mutex protects tty, get_tty_driver and tty_init_dev work.
+ * tty->count should protect the rest.
+ * ->siglock protects ->signal/->sighand
+ */
+
+static int tty_open(struct inode *inode, struct file *filp)
+{
+ struct tty_struct *tty = NULL;
+ int noctty, retval;
+ struct tty_driver *driver;
+ int index;
+ dev_t device = inode->i_rdev;
+ unsigned saved_flags = filp->f_flags;
+
+ nonseekable_open(inode, filp);
+
+retry_open:
+ retval = tty_alloc_file(filp);
+ if (retval)
+ return -ENOMEM;
+
+ noctty = filp->f_flags & O_NOCTTY;
+ index = -1;
+ retval = 0;
+
+ mutex_lock(&tty_mutex);
+ tty_lock();
+
+ if (device == MKDEV(TTYAUX_MAJOR, 0)) {
+ tty = get_current_tty();
+ if (!tty) {
+ tty_unlock();
+ mutex_unlock(&tty_mutex);
+ tty_free_file(filp);
+ return -ENXIO;
+ }
+ driver = tty_driver_kref_get(tty->driver);
+ index = tty->index;
+ filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
+ /* noctty = 1; */
+ /* FIXME: Should we take a driver reference ? */
+ tty_kref_put(tty);
+ goto got_driver;
+ }
+#ifdef CONFIG_VT
+ if (device == MKDEV(TTY_MAJOR, 0)) {
+ extern struct tty_driver *console_driver;
+ driver = tty_driver_kref_get(console_driver);
+ index = fg_console;
+ noctty = 1;
+ goto got_driver;
+ }
+#endif
+ if (device == MKDEV(TTYAUX_MAJOR, 1)) {
+ struct tty_driver *console_driver = console_device(&index);
+ if (console_driver) {
+ driver = tty_driver_kref_get(console_driver);
+ if (driver) {
+ /* Don't let /dev/console block */
+ filp->f_flags |= O_NONBLOCK;
+ noctty = 1;
+ goto got_driver;
+ }
+ }
+ tty_unlock();
+ mutex_unlock(&tty_mutex);
+ tty_free_file(filp);
+ return -ENODEV;
+ }
+
+ driver = get_tty_driver(device, &index);
+ if (!driver) {
+ tty_unlock();
+ mutex_unlock(&tty_mutex);
+ tty_free_file(filp);
+ return -ENODEV;
+ }
+got_driver:
+ if (!tty) {
+ /* check whether we're reopening an existing tty */
+ tty = tty_driver_lookup_tty(driver, inode, index);
+
+ if (IS_ERR(tty)) {
+ tty_unlock();
+ mutex_unlock(&tty_mutex);
+ tty_driver_kref_put(driver);
+ tty_free_file(filp);
+ return PTR_ERR(tty);
+ }
+ }
+
+ if (tty) {
+ retval = tty_reopen(tty);
+ if (retval)
+ tty = ERR_PTR(retval);
+ } else
+ tty = tty_init_dev(driver, index, 0);
+
+ mutex_unlock(&tty_mutex);
+ tty_driver_kref_put(driver);
+ if (IS_ERR(tty)) {
+ tty_unlock();
+ tty_free_file(filp);
+ return PTR_ERR(tty);
+ }
+
+ tty_add_file(tty, filp);
+
+ check_tty_count(tty, "tty_open");
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ noctty = 1;
+#ifdef TTY_DEBUG_HANGUP
+ printk(KERN_DEBUG "opening %s...", tty->name);
+#endif
+ if (tty->ops->open)
+ retval = tty->ops->open(tty, filp);
+ else
+ retval = -ENODEV;
+ filp->f_flags = saved_flags;
+
+ if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) &&
+ !capable(CAP_SYS_ADMIN))
+ retval = -EBUSY;
+
+ if (retval) {
+#ifdef TTY_DEBUG_HANGUP
+ printk(KERN_DEBUG "error %d in opening %s...", retval,
+ tty->name);
+#endif
+ tty_unlock(); /* need to call tty_release without BTM */
+ tty_release(inode, filp);
+ if (retval != -ERESTARTSYS)
+ return retval;
+
+ if (signal_pending(current))
+ return retval;
+
+ schedule();
+ /*
+ * Need to reset f_op in case a hangup happened.
+ */
+ tty_lock();
+ if (filp->f_op == &hung_up_tty_fops)
+ filp->f_op = &tty_fops;
+ tty_unlock();
+ goto retry_open;
+ }
+ tty_unlock();
+
+
+ mutex_lock(&tty_mutex);
+ tty_lock();
+ spin_lock_irq(&current->sighand->siglock);
+ if (!noctty &&
+ current->signal->leader &&
+ !current->signal->tty &&
+ tty->session == NULL)
+ __proc_set_tty(current, tty);
+ spin_unlock_irq(&current->sighand->siglock);
+ tty_unlock();
+ mutex_unlock(&tty_mutex);
+ return 0;
+}
+
+
+
+/**
+ * tty_poll - check tty status
+ * @filp: file being polled
+ * @wait: poll wait structures to update
+ *
+ * Call the line discipline polling method to obtain the poll
+ * status of the device.
+ *
+ * Locking: locks called line discipline but ldisc poll method
+ * may be re-entered freely by other callers.
+ */
+
+static unsigned int tty_poll(struct file *filp, poll_table *wait)
+{
+ struct tty_struct *tty = file_tty(filp);
+ struct tty_ldisc *ld;
+ int ret = 0;
+
+ if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
+ return 0;
+
+ ld = tty_ldisc_ref_wait(tty);
+ if (ld->ops->poll)
+ ret = (ld->ops->poll)(tty, filp, wait);
+ tty_ldisc_deref(ld);
+ return ret;
+}
+
+static int __tty_fasync(int fd, struct file *filp, int on)
+{
+ struct tty_struct *tty = file_tty(filp);
+ unsigned long flags;
+ int retval = 0;
+
+ if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
+ goto out;
+
+ retval = fasync_helper(fd, filp, on, &tty->fasync);
+ if (retval <= 0)
+ goto out;
+
+ if (on) {
+ enum pid_type type;
+ struct pid *pid;
+ if (!waitqueue_active(&tty->read_wait))
+ tty->minimum_to_wake = 1;
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ if (tty->pgrp) {
+ pid = tty->pgrp;
+ type = PIDTYPE_PGID;
+ } else {
+ pid = task_pid(current);
+ type = PIDTYPE_PID;
+ }
+ get_pid(pid);
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ retval = __f_setown(filp, pid, type, 0);
+ put_pid(pid);
+ if (retval)
+ goto out;
+ } else {
+ if (!tty->fasync && !waitqueue_active(&tty->read_wait))
+ tty->minimum_to_wake = N_TTY_BUF_SIZE;
+ }
+ retval = 0;
+out:
+ return retval;
+}
+
+static int tty_fasync(int fd, struct file *filp, int on)
+{
+ int retval;
+ tty_lock();
+ retval = __tty_fasync(fd, filp, on);
+ tty_unlock();
+ return retval;
+}
+
+/**
+ * tiocsti - fake input character
+ * @tty: tty to fake input into
+ * @p: pointer to character
+ *
+ * Fake input to a tty device. Does the necessary locking and
+ * input management.
+ *
+ * FIXME: does not honour flow control ??
+ *
+ * Locking:
+ * Called functions take tty_ldisc_lock
+ * current->signal->tty check is safe without locks
+ *
+ * FIXME: may race normal receive processing
+ */
+
+static int tiocsti(struct tty_struct *tty, char __user *p)
+{
+ char ch, mbz = 0;
+ struct tty_ldisc *ld;
+
+ if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ch, p))
+ return -EFAULT;
+ tty_audit_tiocsti(tty, ch);
+ ld = tty_ldisc_ref_wait(tty);
+ ld->ops->receive_buf(tty, &ch, &mbz, 1);
+ tty_ldisc_deref(ld);
+ return 0;
+}
+
+/**
+ * tiocgwinsz - implement window query ioctl
+ * @tty; tty
+ * @arg: user buffer for result
+ *
+ * Copies the kernel idea of the window size into the user buffer.
+ *
+ * Locking: tty->termios_mutex is taken to ensure the winsize data
+ * is consistent.
+ */
+
+static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
+{
+ int err;
+
+ mutex_lock(&tty->termios_mutex);
+ err = copy_to_user(arg, &tty->winsize, sizeof(*arg));
+ mutex_unlock(&tty->termios_mutex);
+
+ return err ? -EFAULT: 0;
+}
+
+/**
+ * tty_do_resize - resize event
+ * @tty: tty being resized
+ * @rows: rows (character)
+ * @cols: cols (character)
+ *
+ * Update the termios variables and send the necessary signals to
+ * peform a terminal resize correctly
+ */
+
+int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
+{
+ struct pid *pgrp;
+ unsigned long flags;
+
+ /* Lock the tty */
+ mutex_lock(&tty->termios_mutex);
+ if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
+ goto done;
+ /* Get the PID values and reference them so we can
+ avoid holding the tty ctrl lock while sending signals */
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ pgrp = get_pid(tty->pgrp);
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+ if (pgrp)
+ kill_pgrp(pgrp, SIGWINCH, 1);
+ put_pid(pgrp);
+
+ tty->winsize = *ws;
+done:
+ mutex_unlock(&tty->termios_mutex);
+ return 0;
+}
+
+/**
+ * tiocswinsz - implement window size set ioctl
+ * @tty; tty side of tty
+ * @arg: user buffer for result
+ *
+ * Copies the user idea of the window size to the kernel. Traditionally
+ * this is just advisory information but for the Linux console it
+ * actually has driver level meaning and triggers a VC resize.
+ *
+ * Locking:
+ * Driver dependent. The default do_resize method takes the
+ * tty termios mutex and ctrl_lock. The console takes its own lock
+ * then calls into the default method.
+ */
+
+static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg)
+{
+ struct winsize tmp_ws;
+ if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
+ return -EFAULT;
+
+ if (tty->ops->resize)
+ return tty->ops->resize(tty, &tmp_ws);
+ else
+ return tty_do_resize(tty, &tmp_ws);
+}
+
+/**
+ * tioccons - allow admin to move logical console
+ * @file: the file to become console
+ *
+ * Allow the administrator to move the redirected console device
+ *
+ * Locking: uses redirect_lock to guard the redirect information
+ */
+
+static int tioccons(struct file *file)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (file->f_op->write == redirected_tty_write) {
+ struct file *f;
+ spin_lock(&redirect_lock);
+ f = redirect;
+ redirect = NULL;
+ spin_unlock(&redirect_lock);
+ if (f)
+ fput(f);
+ return 0;
+ }
+ spin_lock(&redirect_lock);
+ if (redirect) {
+ spin_unlock(&redirect_lock);
+ return -EBUSY;
+ }
+ get_file(file);
+ redirect = file;
+ spin_unlock(&redirect_lock);
+ return 0;
+}
+
+/**
+ * fionbio - non blocking ioctl
+ * @file: file to set blocking value
+ * @p: user parameter
+ *
+ * Historical tty interfaces had a blocking control ioctl before
+ * the generic functionality existed. This piece of history is preserved
+ * in the expected tty API of posix OS's.
+ *
+ * Locking: none, the open file handle ensures it won't go away.
+ */
+
+static int fionbio(struct file *file, int __user *p)
+{
+ int nonblock;
+
+ if (get_user(nonblock, p))
+ return -EFAULT;
+
+ spin_lock(&file->f_lock);
+ if (nonblock)
+ file->f_flags |= O_NONBLOCK;
+ else
+ file->f_flags &= ~O_NONBLOCK;
+ spin_unlock(&file->f_lock);
+ return 0;
+}
+
+/**
+ * tiocsctty - set controlling tty
+ * @tty: tty structure
+ * @arg: user argument
+ *
+ * This ioctl is used to manage job control. It permits a session
+ * leader to set this tty as the controlling tty for the session.
+ *
+ * Locking:
+ * Takes tty_mutex() to protect tty instance
+ * Takes tasklist_lock internally to walk sessions
+ * Takes ->siglock() when updating signal->tty
+ */
+
+static int tiocsctty(struct tty_struct *tty, int arg)
+{
+ int ret = 0;
+ if (current->signal->leader && (task_session(current) == tty->session))
+ return ret;
+
+ mutex_lock(&tty_mutex);
+ /*
+ * The process must be a session leader and
+ * not have a controlling tty already.
+ */
+ if (!current->signal->leader || current->signal->tty) {
+ ret = -EPERM;
+ goto unlock;
+ }
+
+ if (tty->session) {
+ /*
+ * This tty is already the controlling
+ * tty for another session group!
+ */
+ if (arg == 1 && capable(CAP_SYS_ADMIN)) {
+ /*
+ * Steal it away
+ */
+ read_lock(&tasklist_lock);
+ session_clear_tty(tty->session);
+ read_unlock(&tasklist_lock);
+ } else {
+ ret = -EPERM;
+ goto unlock;
+ }
+ }
+ proc_set_tty(current, tty);
+unlock:
+ mutex_unlock(&tty_mutex);
+ return ret;
+}
+
+/**
+ * tty_get_pgrp - return a ref counted pgrp pid
+ * @tty: tty to read
+ *
+ * Returns a refcounted instance of the pid struct for the process
+ * group controlling the tty.
+ */
+
+struct pid *tty_get_pgrp(struct tty_struct *tty)
+{
+ unsigned long flags;
+ struct pid *pgrp;
+
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ pgrp = get_pid(tty->pgrp);
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+ return pgrp;
+}
+EXPORT_SYMBOL_GPL(tty_get_pgrp);
+
+/**
+ * tiocgpgrp - get process group
+ * @tty: tty passed by user
+ * @real_tty: tty side of the tty passed by the user if a pty else the tty
+ * @p: returned pid
+ *
+ * Obtain the process group of the tty. If there is no process group
+ * return an error.
+ *
+ * Locking: none. Reference to current->signal->tty is safe.
+ */
+
+static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
+{
+ struct pid *pid;
+ int ret;
+ /*
+ * (tty == real_tty) is a cheap way of
+ * testing if the tty is NOT a master pty.
+ */
+ if (tty == real_tty && current->signal->tty != real_tty)
+ return -ENOTTY;
+ pid = tty_get_pgrp(real_tty);
+ ret = put_user(pid_vnr(pid), p);
+ put_pid(pid);
+ return ret;
+}
+
+/**
+ * tiocspgrp - attempt to set process group
+ * @tty: tty passed by user
+ * @real_tty: tty side device matching tty passed by user
+ * @p: pid pointer
+ *
+ * Set the process group of the tty to the session passed. Only
+ * permitted where the tty session is our session.
+ *
+ * Locking: RCU, ctrl lock
+ */
+
+static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
+{
+ struct pid *pgrp;
+ pid_t pgrp_nr;
+ int retval = tty_check_change(real_tty);
+ unsigned long flags;
+
+ if (retval == -EIO)
+ return -ENOTTY;
+ if (retval)
+ return retval;
+ if (!current->signal->tty ||
+ (current->signal->tty != real_tty) ||
+ (real_tty->session != task_session(current)))
+ return -ENOTTY;
+ if (get_user(pgrp_nr, p))
+ return -EFAULT;
+ if (pgrp_nr < 0)
+ return -EINVAL;
+ rcu_read_lock();
+ pgrp = find_vpid(pgrp_nr);
+ retval = -ESRCH;
+ if (!pgrp)
+ goto out_unlock;
+ retval = -EPERM;
+ if (session_of_pgrp(pgrp) != task_session(current))
+ goto out_unlock;
+ retval = 0;
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ put_pid(real_tty->pgrp);
+ real_tty->pgrp = get_pid(pgrp);
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+out_unlock:
+ rcu_read_unlock();
+ return retval;
+}
+
+/**
+ * tiocgsid - get session id
+ * @tty: tty passed by user
+ * @real_tty: tty side of the tty passed by the user if a pty else the tty
+ * @p: pointer to returned session id
+ *
+ * Obtain the session id of the tty. If there is no session
+ * return an error.
+ *
+ * Locking: none. Reference to current->signal->tty is safe.
+ */
+
+static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
+{
+ /*
+ * (tty == real_tty) is a cheap way of
+ * testing if the tty is NOT a master pty.
+ */
+ if (tty == real_tty && current->signal->tty != real_tty)
+ return -ENOTTY;
+ if (!real_tty->session)
+ return -ENOTTY;
+ return put_user(pid_vnr(real_tty->session), p);
+}
+
+/**
+ * tiocsetd - set line discipline
+ * @tty: tty device
+ * @p: pointer to user data
+ *
+ * Set the line discipline according to user request.
+ *
+ * Locking: see tty_set_ldisc, this function is just a helper
+ */
+
+static int tiocsetd(struct tty_struct *tty, int __user *p)
+{
+ int ldisc;
+ int ret;
+
+ if (get_user(ldisc, p))
+ return -EFAULT;
+
+ ret = tty_set_ldisc(tty, ldisc);
+
+ return ret;
+}
+
+/**
+ * send_break - performed time break
+ * @tty: device to break on
+ * @duration: timeout in mS
+ *
+ * Perform a timed break on hardware that lacks its own driver level
+ * timed break functionality.
+ *
+ * Locking:
+ * atomic_write_lock serializes
+ *
+ */
+
+static int send_break(struct tty_struct *tty, unsigned int duration)
+{
+ int retval;
+
+ if (tty->ops->break_ctl == NULL)
+ return 0;
+
+ if (tty->driver->flags & TTY_DRIVER_HARDWARE_BREAK)
+ retval = tty->ops->break_ctl(tty, duration);
+ else {
+ /* Do the work ourselves */
+ if (tty_write_lock(tty, 0) < 0)
+ return -EINTR;
+ retval = tty->ops->break_ctl(tty, -1);
+ if (retval)
+ goto out;
+ if (!signal_pending(current))
+ msleep_interruptible(duration);
+ retval = tty->ops->break_ctl(tty, 0);
+out:
+ tty_write_unlock(tty);
+ if (signal_pending(current))
+ retval = -EINTR;
+ }
+ return retval;
+}
+
+/**
+ * tty_tiocmget - get modem status
+ * @tty: tty device
+ * @file: user file pointer
+ * @p: pointer to result
+ *
+ * Obtain the modem status bits from the tty driver if the feature
+ * is supported. Return -EINVAL if it is not available.
+ *
+ * Locking: none (up to the driver)
+ */
+
+static int tty_tiocmget(struct tty_struct *tty, int __user *p)
+{
+ int retval = -EINVAL;
+
+ if (tty->ops->tiocmget) {
+ retval = tty->ops->tiocmget(tty);
+
+ if (retval >= 0)
+ retval = put_user(retval, p);
+ }
+ return retval;
+}
+
+/**
+ * tty_tiocmset - set modem status
+ * @tty: tty device
+ * @cmd: command - clear bits, set bits or set all
+ * @p: pointer to desired bits
+ *
+ * Set the modem status bits from the tty driver if the feature
+ * is supported. Return -EINVAL if it is not available.
+ *
+ * Locking: none (up to the driver)
+ */
+
+static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
+ unsigned __user *p)
+{
+ int retval;
+ unsigned int set, clear, val;
+
+ if (tty->ops->tiocmset == NULL)
+ return -EINVAL;
+
+ retval = get_user(val, p);
+ if (retval)
+ return retval;
+ set = clear = 0;
+ switch (cmd) {
+ case TIOCMBIS:
+ set = val;
+ break;
+ case TIOCMBIC:
+ clear = val;
+ break;
+ case TIOCMSET:
+ set = val;
+ clear = ~val;
+ break;
+ }
+ set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
+ clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
+ return tty->ops->tiocmset(tty, set, clear);
+}
+
+static int tty_tiocgicount(struct tty_struct *tty, void __user *arg)
+{
+ int retval = -EINVAL;
+ struct serial_icounter_struct icount;
+ memset(&icount, 0, sizeof(icount));
+ if (tty->ops->get_icount)
+ retval = tty->ops->get_icount(tty, &icount);
+ if (retval != 0)
+ return retval;
+ if (copy_to_user(arg, &icount, sizeof(icount)))
+ return -EFAULT;
+ return 0;
+}
+
+struct tty_struct *tty_pair_get_tty(struct tty_struct *tty)
+{
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ tty = tty->link;
+ return tty;
+}
+EXPORT_SYMBOL(tty_pair_get_tty);
+
+struct tty_struct *tty_pair_get_pty(struct tty_struct *tty)
+{
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ return tty;
+ return tty->link;
+}
+EXPORT_SYMBOL(tty_pair_get_pty);
+
+/*
+ * Split this up, as gcc can choke on it otherwise..
+ */
+long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct tty_struct *tty = file_tty(file);
+ struct tty_struct *real_tty;
+ void __user *p = (void __user *)arg;
+ int retval;
+ struct tty_ldisc *ld;
+ struct inode *inode = file->f_dentry->d_inode;
+
+ if (tty_paranoia_check(tty, inode, "tty_ioctl"))
+ return -EINVAL;
+
+ real_tty = tty_pair_get_tty(tty);
+
+ /*
+ * Factor out some common prep work
+ */
+ switch (cmd) {
+ case TIOCSETD:
+ case TIOCSBRK:
+ case TIOCCBRK:
+ case TCSBRK:
+ case TCSBRKP:
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ if (cmd != TIOCCBRK) {
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ break;
+ }
+
+ /*
+ * Now do the stuff.
+ */
+ switch (cmd) {
+ case TIOCSTI:
+ return tiocsti(tty, p);
+ case TIOCGWINSZ:
+ return tiocgwinsz(real_tty, p);
+ case TIOCSWINSZ:
+ return tiocswinsz(real_tty, p);
+ case TIOCCONS:
+ return real_tty != tty ? -EINVAL : tioccons(file);
+ case FIONBIO:
+ return fionbio(file, p);
+ case TIOCEXCL:
+ set_bit(TTY_EXCLUSIVE, &tty->flags);
+ return 0;
+ case TIOCNXCL:
+ clear_bit(TTY_EXCLUSIVE, &tty->flags);
+ return 0;
+ case TIOCNOTTY:
+ if (current->signal->tty != tty)
+ return -ENOTTY;
+ no_tty();
+ return 0;
+ case TIOCSCTTY:
+ return tiocsctty(tty, arg);
+ case TIOCGPGRP:
+ return tiocgpgrp(tty, real_tty, p);
+ case TIOCSPGRP:
+ return tiocspgrp(tty, real_tty, p);
+ case TIOCGSID:
+ return tiocgsid(tty, real_tty, p);
+ case TIOCGETD:
+ return put_user(tty->ldisc->ops->num, (int __user *)p);
+ case TIOCSETD:
+ return tiocsetd(tty, p);
+ case TIOCVHANGUP:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ tty_vhangup(tty);
+ return 0;
+ case TIOCGDEV:
+ {
+ unsigned int ret = new_encode_dev(tty_devnum(real_tty));
+ return put_user(ret, (unsigned int __user *)p);
+ }
+ /*
+ * Break handling
+ */
+ case TIOCSBRK: /* Turn break on, unconditionally */
+ if (tty->ops->break_ctl)
+ return tty->ops->break_ctl(tty, -1);
+ return 0;
+ case TIOCCBRK: /* Turn break off, unconditionally */
+ if (tty->ops->break_ctl)
+ return tty->ops->break_ctl(tty, 0);
+ return 0;
+ case TCSBRK: /* SVID version: non-zero arg --> no break */
+ /* non-zero arg means wait for all output data
+ * to be sent (performed above) but don't send break.
+ * This is used by the tcdrain() termios function.
+ */
+ if (!arg)
+ return send_break(tty, 250);
+ return 0;
+ case TCSBRKP: /* support for POSIX tcsendbreak() */
+ return send_break(tty, arg ? arg*100 : 250);
+
+ case TIOCMGET:
+ return tty_tiocmget(tty, p);
+ case TIOCMSET:
+ case TIOCMBIC:
+ case TIOCMBIS:
+ return tty_tiocmset(tty, cmd, p);
+ case TIOCGICOUNT:
+ retval = tty_tiocgicount(tty, p);
+ /* For the moment allow fall through to the old method */
+ if (retval != -EINVAL)
+ return retval;
+ break;
+ case TCFLSH:
+ switch (arg) {
+ case TCIFLUSH:
+ case TCIOFLUSH:
+ /* flush tty buffer and allow ldisc to process ioctl */
+ tty_buffer_flush(tty);
+ break;
+ }
+ break;
+ }
+ if (tty->ops->ioctl) {
+ retval = (tty->ops->ioctl)(tty, cmd, arg);
+ if (retval != -ENOIOCTLCMD)
+ return retval;
+ }
+ ld = tty_ldisc_ref_wait(tty);
+ retval = -EINVAL;
+ if (ld->ops->ioctl) {
+ retval = ld->ops->ioctl(tty, file, cmd, arg);
+ if (retval == -ENOIOCTLCMD)
+ retval = -EINVAL;
+ }
+ tty_ldisc_deref(ld);
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT
+static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct tty_struct *tty = file_tty(file);
+ struct tty_ldisc *ld;
+ int retval = -ENOIOCTLCMD;
+
+ if (tty_paranoia_check(tty, inode, "tty_ioctl"))
+ return -EINVAL;
+
+ if (tty->ops->compat_ioctl) {
+ retval = (tty->ops->compat_ioctl)(tty, cmd, arg);
+ if (retval != -ENOIOCTLCMD)
+ return retval;
+ }
+
+ ld = tty_ldisc_ref_wait(tty);
+ if (ld->ops->compat_ioctl)
+ retval = ld->ops->compat_ioctl(tty, file, cmd, arg);
+ tty_ldisc_deref(ld);
+
+ return retval;
+}
+#endif
+
+/*
+ * This implements the "Secure Attention Key" --- the idea is to
+ * prevent trojan horses by killing all processes associated with this
+ * tty when the user hits the "Secure Attention Key". Required for
+ * super-paranoid applications --- see the Orange Book for more details.
+ *
+ * This code could be nicer; ideally it should send a HUP, wait a few
+ * seconds, then send a INT, and then a KILL signal. But you then
+ * have to coordinate with the init process, since all processes associated
+ * with the current tty must be dead before the new getty is allowed
+ * to spawn.
+ *
+ * Now, if it would be correct ;-/ The current code has a nasty hole -
+ * it doesn't catch files in flight. We may send the descriptor to ourselves
+ * via AF_UNIX socket, close it and later fetch from socket. FIXME.
+ *
+ * Nasty bug: do_SAK is being called in interrupt context. This can
+ * deadlock. We punt it up to process context. AKPM - 16Mar2001
+ */
+void __do_SAK(struct tty_struct *tty)
+{
+#ifdef TTY_SOFT_SAK
+ tty_hangup(tty);
+#else
+ struct task_struct *g, *p;
+ struct pid *session;
+ int i;
+ struct file *filp;
+ struct fdtable *fdt;
+
+ if (!tty)
+ return;
+ session = tty->session;
+
+ tty_ldisc_flush(tty);
+
+ tty_driver_flush_buffer(tty);
+
+ read_lock(&tasklist_lock);
+ /* Kill the entire session */
+ do_each_pid_task(session, PIDTYPE_SID, p) {
+ printk(KERN_NOTICE "SAK: killed process %d"
+ " (%s): task_session(p)==tty->session\n",
+ task_pid_nr(p), p->comm);
+ send_sig(SIGKILL, p, 1);
+ } while_each_pid_task(session, PIDTYPE_SID, p);
+ /* Now kill any processes that happen to have the
+ * tty open.
+ */
+ do_each_thread(g, p) {
+ if (p->signal->tty == tty) {
+ printk(KERN_NOTICE "SAK: killed process %d"
+ " (%s): task_session(p)==tty->session\n",
+ task_pid_nr(p), p->comm);
+ send_sig(SIGKILL, p, 1);
+ continue;
+ }
+ task_lock(p);
+ if (p->files) {
+ /*
+ * We don't take a ref to the file, so we must
+ * hold ->file_lock instead.
+ */
+ spin_lock(&p->files->file_lock);
+ fdt = files_fdtable(p->files);
+ for (i = 0; i < fdt->max_fds; i++) {
+ filp = fcheck_files(p->files, i);
+ if (!filp)
+ continue;
+ if (filp->f_op->read == tty_read &&
+ file_tty(filp) == tty) {
+ printk(KERN_NOTICE "SAK: killed process %d"
+ " (%s): fd#%d opened to the tty\n",
+ task_pid_nr(p), p->comm, i);
+ force_sig(SIGKILL, p);
+ break;
+ }
+ }
+ spin_unlock(&p->files->file_lock);
+ }
+ task_unlock(p);
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+#endif
+}
+
+static void do_SAK_work(struct work_struct *work)
+{
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, SAK_work);
+ __do_SAK(tty);
+}
+
+/*
+ * The tq handling here is a little racy - tty->SAK_work may already be queued.
+ * Fortunately we don't need to worry, because if ->SAK_work is already queued,
+ * the values which we write to it will be identical to the values which it
+ * already has. --akpm
+ */
+void do_SAK(struct tty_struct *tty)
+{
+ if (!tty)
+ return;
+ schedule_work(&tty->SAK_work);
+}
+
+EXPORT_SYMBOL(do_SAK);
+
+static int dev_match_devt(struct device *dev, void *data)
+{
+ dev_t *devt = data;
+ return dev->devt == *devt;
+}
+
+/* Must put_device() after it's unused! */
+static struct device *tty_get_device(struct tty_struct *tty)
+{
+ dev_t devt = tty_devnum(tty);
+ return class_find_device(tty_class, NULL, &devt, dev_match_devt);
+}
+
+
+/**
+ * initialize_tty_struct
+ * @tty: tty to initialize
+ *
+ * This subroutine initializes a tty structure that has been newly
+ * allocated.
+ *
+ * Locking: none - tty in question must not be exposed at this point
+ */
+
+void initialize_tty_struct(struct tty_struct *tty,
+ struct tty_driver *driver, int idx)
+{
+ memset(tty, 0, sizeof(struct tty_struct));
+ kref_init(&tty->kref);
+ tty->magic = TTY_MAGIC;
+ tty_ldisc_init(tty);
+ tty->session = NULL;
+ tty->pgrp = NULL;
+ tty->overrun_time = jiffies;
+ tty->buf.head = tty->buf.tail = NULL;
+ tty_buffer_init(tty);
+ mutex_init(&tty->termios_mutex);
+ mutex_init(&tty->ldisc_mutex);
+ init_waitqueue_head(&tty->write_wait);
+ init_waitqueue_head(&tty->read_wait);
+ INIT_WORK(&tty->hangup_work, do_tty_hangup);
+ mutex_init(&tty->atomic_read_lock);
+ mutex_init(&tty->atomic_write_lock);
+ mutex_init(&tty->output_lock);
+ mutex_init(&tty->echo_lock);
+ spin_lock_init(&tty->read_lock);
+ spin_lock_init(&tty->ctrl_lock);
+ INIT_LIST_HEAD(&tty->tty_files);
+ INIT_WORK(&tty->SAK_work, do_SAK_work);
+
+ tty->driver = driver;
+ tty->ops = driver->ops;
+ tty->index = idx;
+ tty_line_name(driver, idx, tty->name);
+ tty->dev = tty_get_device(tty);
+}
+
+/**
+ * deinitialize_tty_struct
+ * @tty: tty to deinitialize
+ *
+ * This subroutine deinitializes a tty structure that has been newly
+ * allocated but tty_release cannot be called on that yet.
+ *
+ * Locking: none - tty in question must not be exposed at this point
+ */
+void deinitialize_tty_struct(struct tty_struct *tty)
+{
+ tty_ldisc_deinit(tty);
+}
+
+/**
+ * tty_put_char - write one character to a tty
+ * @tty: tty
+ * @ch: character
+ *
+ * Write one byte to the tty using the provided put_char method
+ * if present. Returns the number of characters successfully output.
+ *
+ * Note: the specific put_char operation in the driver layer may go
+ * away soon. Don't call it directly, use this method
+ */
+
+int tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ if (tty->ops->put_char)
+ return tty->ops->put_char(tty, ch);
+ return tty->ops->write(tty, &ch, 1);
+}
+EXPORT_SYMBOL_GPL(tty_put_char);
+
+struct class *tty_class;
+
+/**
+ * tty_register_device - register a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ * @device: a struct device that is associated with this tty device.
+ * This field is optional, if there is no known struct device
+ * for this tty device it can be set to NULL safely.
+ *
+ * Returns a pointer to the struct device for this tty device
+ * (or ERR_PTR(-EFOO) on error).
+ *
+ * This call is required to be made to register an individual tty device
+ * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If
+ * that bit is not set, this function should not be called by a tty
+ * driver.
+ *
+ * Locking: ??
+ */
+
+struct device *tty_register_device(struct tty_driver *driver, unsigned index,
+ struct device *device)
+{
+ char name[64];
+ dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
+
+ if (index >= driver->num) {
+ printk(KERN_ERR "Attempt to register invalid tty line number "
+ " (%d).\n", index);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (driver->type == TTY_DRIVER_TYPE_PTY)
+ pty_line_name(driver, index, name);
+ else
+ tty_line_name(driver, index, name);
+
+ return device_create(tty_class, device, dev, NULL, name);
+}
+EXPORT_SYMBOL(tty_register_device);
+
+/**
+ * tty_unregister_device - unregister a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ *
+ * If a tty device is registered with a call to tty_register_device() then
+ * this function must be called when the tty device is gone.
+ *
+ * Locking: ??
+ */
+
+void tty_unregister_device(struct tty_driver *driver, unsigned index)
+{
+ device_destroy(tty_class,
+ MKDEV(driver->major, driver->minor_start) + index);
+}
+EXPORT_SYMBOL(tty_unregister_device);
+
+struct tty_driver *alloc_tty_driver(int lines)
+{
+ struct tty_driver *driver;
+
+ driver = kzalloc(sizeof(struct tty_driver), GFP_KERNEL);
+ if (driver) {
+ kref_init(&driver->kref);
+ driver->magic = TTY_DRIVER_MAGIC;
+ driver->num = lines;
+ /* later we'll move allocation of tables here */
+ }
+ return driver;
+}
+EXPORT_SYMBOL(alloc_tty_driver);
+
+static void destruct_tty_driver(struct kref *kref)
+{
+ struct tty_driver *driver = container_of(kref, struct tty_driver, kref);
+ int i;
+ struct ktermios *tp;
+ void *p;
+
+ if (driver->flags & TTY_DRIVER_INSTALLED) {
+ /*
+ * Free the termios and termios_locked structures because
+ * we don't want to get memory leaks when modular tty
+ * drivers are removed from the kernel.
+ */
+ for (i = 0; i < driver->num; i++) {
+ tp = driver->termios[i];
+ if (tp) {
+ driver->termios[i] = NULL;
+ kfree(tp);
+ }
+ if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV))
+ tty_unregister_device(driver, i);
+ }
+ p = driver->ttys;
+ proc_tty_unregister_driver(driver);
+ driver->ttys = NULL;
+ driver->termios = NULL;
+ kfree(p);
+ cdev_del(&driver->cdev);
+ }
+ kfree(driver);
+}
+
+void tty_driver_kref_put(struct tty_driver *driver)
+{
+ kref_put(&driver->kref, destruct_tty_driver);
+}
+EXPORT_SYMBOL(tty_driver_kref_put);
+
+void tty_set_operations(struct tty_driver *driver,
+ const struct tty_operations *op)
+{
+ driver->ops = op;
+};
+EXPORT_SYMBOL(tty_set_operations);
+
+void put_tty_driver(struct tty_driver *d)
+{
+ tty_driver_kref_put(d);
+}
+EXPORT_SYMBOL(put_tty_driver);
+
+/*
+ * Called by a tty driver to register itself.
+ */
+int tty_register_driver(struct tty_driver *driver)
+{
+ int error;
+ int i;
+ dev_t dev;
+ void **p = NULL;
+ struct device *d;
+
+ if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM) && driver->num) {
+ p = kzalloc(driver->num * 2 * sizeof(void *), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ }
+
+ if (!driver->major) {
+ error = alloc_chrdev_region(&dev, driver->minor_start,
+ driver->num, driver->name);
+ if (!error) {
+ driver->major = MAJOR(dev);
+ driver->minor_start = MINOR(dev);
+ }
+ } else {
+ dev = MKDEV(driver->major, driver->minor_start);
+ error = register_chrdev_region(dev, driver->num, driver->name);
+ }
+ if (error < 0) {
+ kfree(p);
+ return error;
+ }
+
+ if (p) {
+ driver->ttys = (struct tty_struct **)p;
+ driver->termios = (struct ktermios **)(p + driver->num);
+ } else {
+ driver->ttys = NULL;
+ driver->termios = NULL;
+ }
+
+ cdev_init(&driver->cdev, &tty_fops);
+ driver->cdev.owner = driver->owner;
+ error = cdev_add(&driver->cdev, dev, driver->num);
+ if (error) {
+ unregister_chrdev_region(dev, driver->num);
+ driver->ttys = NULL;
+ driver->termios = NULL;
+ kfree(p);
+ return error;
+ }
+
+ mutex_lock(&tty_mutex);
+ list_add(&driver->tty_drivers, &tty_drivers);
+ mutex_unlock(&tty_mutex);
+
+ if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) {
+ for (i = 0; i < driver->num; i++) {
+ d = tty_register_device(driver, i, NULL);
+ if (IS_ERR(d)) {
+ error = PTR_ERR(d);
+ goto err;
+ }
+ }
+ }
+ proc_tty_register_driver(driver);
+ driver->flags |= TTY_DRIVER_INSTALLED;
+ return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ tty_unregister_device(driver, i);
+
+ mutex_lock(&tty_mutex);
+ list_del(&driver->tty_drivers);
+ mutex_unlock(&tty_mutex);
+
+ unregister_chrdev_region(dev, driver->num);
+ driver->ttys = NULL;
+ driver->termios = NULL;
+ kfree(p);
+ return error;
+}
+
+EXPORT_SYMBOL(tty_register_driver);
+
+/*
+ * Called by a tty driver to unregister itself.
+ */
+int tty_unregister_driver(struct tty_driver *driver)
+{
+#if 0
+ /* FIXME */
+ if (driver->refcount)
+ return -EBUSY;
+#endif
+ unregister_chrdev_region(MKDEV(driver->major, driver->minor_start),
+ driver->num);
+ mutex_lock(&tty_mutex);
+ list_del(&driver->tty_drivers);
+ mutex_unlock(&tty_mutex);
+ return 0;
+}
+
+EXPORT_SYMBOL(tty_unregister_driver);
+
+dev_t tty_devnum(struct tty_struct *tty)
+{
+ return MKDEV(tty->driver->major, tty->driver->minor_start) + tty->index;
+}
+EXPORT_SYMBOL(tty_devnum);
+
+void proc_clear_tty(struct task_struct *p)
+{
+ unsigned long flags;
+ struct tty_struct *tty;
+ spin_lock_irqsave(&p->sighand->siglock, flags);
+ tty = p->signal->tty;
+ p->signal->tty = NULL;
+ spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ tty_kref_put(tty);
+}
+
+/* Called under the sighand lock */
+
+static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty)
+{
+ if (tty) {
+ unsigned long flags;
+ /* We should not have a session or pgrp to put here but.... */
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ put_pid(tty->session);
+ put_pid(tty->pgrp);
+ tty->pgrp = get_pid(task_pgrp(tsk));
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ tty->session = get_pid(task_session(tsk));
+ if (tsk->signal->tty) {
+ printk(KERN_DEBUG "tty not NULL!!\n");
+ tty_kref_put(tsk->signal->tty);
+ }
+ }
+ put_pid(tsk->signal->tty_old_pgrp);
+ tsk->signal->tty = tty_kref_get(tty);
+ tsk->signal->tty_old_pgrp = NULL;
+}
+
+static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty)
+{
+ spin_lock_irq(&tsk->sighand->siglock);
+ __proc_set_tty(tsk, tty);
+ spin_unlock_irq(&tsk->sighand->siglock);
+}
+
+struct tty_struct *get_current_tty(void)
+{
+ struct tty_struct *tty;
+ unsigned long flags;
+
+ spin_lock_irqsave(&current->sighand->siglock, flags);
+ tty = tty_kref_get(current->signal->tty);
+ spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ return tty;
+}
+EXPORT_SYMBOL_GPL(get_current_tty);
+
+void tty_default_fops(struct file_operations *fops)
+{
+ *fops = tty_fops;
+}
+
+/*
+ * Initialize the console device. This is called *early*, so
+ * we can't necessarily depend on lots of kernel help here.
+ * Just do some early initializations, and do the complex setup
+ * later.
+ */
+void __init console_init(void)
+{
+ initcall_t *call;
+
+ /* Setup the default TTY line discipline. */
+ tty_ldisc_begin();
+
+ /*
+ * set up the console device so that later boot sequences can
+ * inform about problems etc..
+ */
+ call = __con_initcall_start;
+ while (call < __con_initcall_end) {
+ (*call)();
+ call++;
+ }
+}
+
+static char *tty_devnode(struct device *dev, mode_t *mode)
+{
+ if (!mode)
+ return NULL;
+ if (dev->devt == MKDEV(TTYAUX_MAJOR, 0) ||
+ dev->devt == MKDEV(TTYAUX_MAJOR, 2))
+ *mode = 0666;
+ return NULL;
+}
+
+static int __init tty_class_init(void)
+{
+ tty_class = class_create(THIS_MODULE, "tty");
+ if (IS_ERR(tty_class))
+ return PTR_ERR(tty_class);
+ tty_class->devnode = tty_devnode;
+ return 0;
+}
+
+postcore_initcall(tty_class_init);
+
+/* 3/2004 jmc: why do these devices exist? */
+static struct cdev tty_cdev, console_cdev;
+
+static ssize_t show_cons_active(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct console *cs[16];
+ int i = 0;
+ struct console *c;
+ ssize_t count = 0;
+
+ console_lock();
+ for_each_console(c) {
+ if (!c->device)
+ continue;
+ if (!c->write)
+ continue;
+ if ((c->flags & CON_ENABLED) == 0)
+ continue;
+ cs[i++] = c;
+ if (i >= ARRAY_SIZE(cs))
+ break;
+ }
+ while (i--)
+ count += sprintf(buf + count, "%s%d%c",
+ cs[i]->name, cs[i]->index, i ? ' ':'\n');
+ console_unlock();
+
+ return count;
+}
+static DEVICE_ATTR(active, S_IRUGO, show_cons_active, NULL);
+
+static struct device *consdev;
+
+void console_sysfs_notify(void)
+{
+ if (consdev)
+ sysfs_notify(&consdev->kobj, NULL, "active");
+}
+
+/*
+ * Ok, now we can initialize the rest of the tty devices and can count
+ * on memory allocations, interrupts etc..
+ */
+int __init tty_init(void)
+{
+ cdev_init(&tty_cdev, &tty_fops);
+ if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
+ register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
+ panic("Couldn't register /dev/tty driver\n");
+ device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty");
+
+ cdev_init(&console_cdev, &console_fops);
+ if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
+ register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0)
+ panic("Couldn't register /dev/console driver\n");
+ consdev = device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 1), NULL,
+ "console");
+ if (IS_ERR(consdev))
+ consdev = NULL;
+ else
+ WARN_ON(device_create_file(consdev, &dev_attr_active) < 0);
+
+#ifdef CONFIG_VT
+ vty_init(&console_fops);
+#endif
+ return 0;
+}
+
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
new file mode 100644
index 00000000..53f2442c
--- /dev/null
+++ b/drivers/tty/tty_ioctl.c
@@ -0,0 +1,1181 @@
+/*
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines
+ * which can be dynamically activated and de-activated by the line
+ * discipline handling modules (like SLIP).
+ */
+
+#include <linux/types.h>
+#include <linux/termios.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/tty.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#undef TTY_DEBUG_WAIT_UNTIL_SENT
+
+#undef DEBUG
+
+/*
+ * Internal flag options for termios setting behavior
+ */
+#define TERMIOS_FLUSH 1
+#define TERMIOS_WAIT 2
+#define TERMIOS_TERMIO 4
+#define TERMIOS_OLD 8
+
+
+/**
+ * tty_chars_in_buffer - characters pending
+ * @tty: terminal
+ *
+ * Return the number of bytes of data in the device private
+ * output queue. If no private method is supplied there is assumed
+ * to be no queue on the device.
+ */
+
+int tty_chars_in_buffer(struct tty_struct *tty)
+{
+ if (tty->ops->chars_in_buffer)
+ return tty->ops->chars_in_buffer(tty);
+ else
+ return 0;
+}
+EXPORT_SYMBOL(tty_chars_in_buffer);
+
+/**
+ * tty_write_room - write queue space
+ * @tty: terminal
+ *
+ * Return the number of bytes that can be queued to this device
+ * at the present time. The result should be treated as a guarantee
+ * and the driver cannot offer a value it later shrinks by more than
+ * the number of bytes written. If no method is provided 2K is always
+ * returned and data may be lost as there will be no flow control.
+ */
+
+int tty_write_room(struct tty_struct *tty)
+{
+ if (tty->ops->write_room)
+ return tty->ops->write_room(tty);
+ return 2048;
+}
+EXPORT_SYMBOL(tty_write_room);
+
+/**
+ * tty_driver_flush_buffer - discard internal buffer
+ * @tty: terminal
+ *
+ * Discard the internal output buffer for this device. If no method
+ * is provided then either the buffer cannot be hardware flushed or
+ * there is no buffer driver side.
+ */
+void tty_driver_flush_buffer(struct tty_struct *tty)
+{
+ if (tty->ops->flush_buffer)
+ tty->ops->flush_buffer(tty);
+}
+EXPORT_SYMBOL(tty_driver_flush_buffer);
+
+/**
+ * tty_throttle - flow control
+ * @tty: terminal
+ *
+ * Indicate that a tty should stop transmitting data down the stack.
+ * Takes the termios mutex to protect against parallel throttle/unthrottle
+ * and also to ensure the driver can consistently reference its own
+ * termios data at this point when implementing software flow control.
+ */
+
+void tty_throttle(struct tty_struct *tty)
+{
+ mutex_lock(&tty->termios_mutex);
+ /* check TTY_THROTTLED first so it indicates our state */
+ if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) &&
+ tty->ops->throttle)
+ tty->ops->throttle(tty);
+ mutex_unlock(&tty->termios_mutex);
+}
+EXPORT_SYMBOL(tty_throttle);
+
+/**
+ * tty_unthrottle - flow control
+ * @tty: terminal
+ *
+ * Indicate that a tty may continue transmitting data down the stack.
+ * Takes the termios mutex to protect against parallel throttle/unthrottle
+ * and also to ensure the driver can consistently reference its own
+ * termios data at this point when implementing software flow control.
+ *
+ * Drivers should however remember that the stack can issue a throttle,
+ * then change flow control method, then unthrottle.
+ */
+
+void tty_unthrottle(struct tty_struct *tty)
+{
+ mutex_lock(&tty->termios_mutex);
+ if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) &&
+ tty->ops->unthrottle)
+ tty->ops->unthrottle(tty);
+ mutex_unlock(&tty->termios_mutex);
+}
+EXPORT_SYMBOL(tty_unthrottle);
+
+/**
+ * tty_wait_until_sent - wait for I/O to finish
+ * @tty: tty we are waiting for
+ * @timeout: how long we will wait
+ *
+ * Wait for characters pending in a tty driver to hit the wire, or
+ * for a timeout to occur (eg due to flow control)
+ *
+ * Locking: none
+ */
+
+void tty_wait_until_sent(struct tty_struct *tty, long timeout)
+{
+#ifdef TTY_DEBUG_WAIT_UNTIL_SENT
+ char buf[64];
+
+ printk(KERN_DEBUG "%s wait until sent...\n", tty_name(tty, buf));
+#endif
+ if (!timeout)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (wait_event_interruptible_timeout(tty->write_wait,
+ !tty_chars_in_buffer(tty), timeout) >= 0) {
+ if (tty->ops->wait_until_sent)
+ tty->ops->wait_until_sent(tty, timeout);
+ }
+}
+EXPORT_SYMBOL(tty_wait_until_sent);
+
+
+/*
+ * Termios Helper Methods
+ */
+
+static void unset_locked_termios(struct ktermios *termios,
+ struct ktermios *old,
+ struct ktermios *locked)
+{
+ int i;
+
+#define NOSET_MASK(x, y, z) (x = ((x) & ~(z)) | ((y) & (z)))
+
+ if (!locked) {
+ printk(KERN_WARNING "Warning?!? termios_locked is NULL.\n");
+ return;
+ }
+
+ NOSET_MASK(termios->c_iflag, old->c_iflag, locked->c_iflag);
+ NOSET_MASK(termios->c_oflag, old->c_oflag, locked->c_oflag);
+ NOSET_MASK(termios->c_cflag, old->c_cflag, locked->c_cflag);
+ NOSET_MASK(termios->c_lflag, old->c_lflag, locked->c_lflag);
+ termios->c_line = locked->c_line ? old->c_line : termios->c_line;
+ for (i = 0; i < NCCS; i++)
+ termios->c_cc[i] = locked->c_cc[i] ?
+ old->c_cc[i] : termios->c_cc[i];
+ /* FIXME: What should we do for i/ospeed */
+}
+
+/*
+ * Routine which returns the baud rate of the tty
+ *
+ * Note that the baud_table needs to be kept in sync with the
+ * include/asm/termbits.h file.
+ */
+static const speed_t baud_table[] = {
+ 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
+ 9600, 19200, 38400, 57600, 115200, 230400, 460800,
+#ifdef __sparc__
+ 76800, 153600, 307200, 614400, 921600
+#else
+ 500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000,
+ 2500000, 3000000, 3500000, 4000000
+#endif
+};
+
+#ifndef __sparc__
+static const tcflag_t baud_bits[] = {
+ B0, B50, B75, B110, B134, B150, B200, B300, B600,
+ B1200, B1800, B2400, B4800, B9600, B19200, B38400,
+ B57600, B115200, B230400, B460800, B500000, B576000,
+ B921600, B1000000, B1152000, B1500000, B2000000, B2500000,
+ B3000000, B3500000, B4000000
+};
+#else
+static const tcflag_t baud_bits[] = {
+ B0, B50, B75, B110, B134, B150, B200, B300, B600,
+ B1200, B1800, B2400, B4800, B9600, B19200, B38400,
+ B57600, B115200, B230400, B460800, B76800, B153600,
+ B307200, B614400, B921600
+};
+#endif
+
+static int n_baud_table = ARRAY_SIZE(baud_table);
+
+/**
+ * tty_termios_baud_rate
+ * @termios: termios structure
+ *
+ * Convert termios baud rate data into a speed. This should be called
+ * with the termios lock held if this termios is a terminal termios
+ * structure. May change the termios data. Device drivers can call this
+ * function but should use ->c_[io]speed directly as they are updated.
+ *
+ * Locking: none
+ */
+
+speed_t tty_termios_baud_rate(struct ktermios *termios)
+{
+ unsigned int cbaud;
+
+ cbaud = termios->c_cflag & CBAUD;
+
+#ifdef BOTHER
+ /* Magic token for arbitrary speed via c_ispeed/c_ospeed */
+ if (cbaud == BOTHER)
+ return termios->c_ospeed;
+#endif
+ if (cbaud & CBAUDEX) {
+ cbaud &= ~CBAUDEX;
+
+ if (cbaud < 1 || cbaud + 15 > n_baud_table)
+ termios->c_cflag &= ~CBAUDEX;
+ else
+ cbaud += 15;
+ }
+ return baud_table[cbaud];
+}
+EXPORT_SYMBOL(tty_termios_baud_rate);
+
+/**
+ * tty_termios_input_baud_rate
+ * @termios: termios structure
+ *
+ * Convert termios baud rate data into a speed. This should be called
+ * with the termios lock held if this termios is a terminal termios
+ * structure. May change the termios data. Device drivers can call this
+ * function but should use ->c_[io]speed directly as they are updated.
+ *
+ * Locking: none
+ */
+
+speed_t tty_termios_input_baud_rate(struct ktermios *termios)
+{
+#ifdef IBSHIFT
+ unsigned int cbaud = (termios->c_cflag >> IBSHIFT) & CBAUD;
+
+ if (cbaud == B0)
+ return tty_termios_baud_rate(termios);
+
+ /* Magic token for arbitrary speed via c_ispeed*/
+ if (cbaud == BOTHER)
+ return termios->c_ispeed;
+
+ if (cbaud & CBAUDEX) {
+ cbaud &= ~CBAUDEX;
+
+ if (cbaud < 1 || cbaud + 15 > n_baud_table)
+ termios->c_cflag &= ~(CBAUDEX << IBSHIFT);
+ else
+ cbaud += 15;
+ }
+ return baud_table[cbaud];
+#else
+ return tty_termios_baud_rate(termios);
+#endif
+}
+EXPORT_SYMBOL(tty_termios_input_baud_rate);
+
+/**
+ * tty_termios_encode_baud_rate
+ * @termios: ktermios structure holding user requested state
+ * @ispeed: input speed
+ * @ospeed: output speed
+ *
+ * Encode the speeds set into the passed termios structure. This is
+ * used as a library helper for drivers so that they can report back
+ * the actual speed selected when it differs from the speed requested
+ *
+ * For maximal back compatibility with legacy SYS5/POSIX *nix behaviour
+ * we need to carefully set the bits when the user does not get the
+ * desired speed. We allow small margins and preserve as much of possible
+ * of the input intent to keep compatibility.
+ *
+ * Locking: Caller should hold termios lock. This is already held
+ * when calling this function from the driver termios handler.
+ *
+ * The ifdefs deal with platforms whose owners have yet to update them
+ * and will all go away once this is done.
+ */
+
+void tty_termios_encode_baud_rate(struct ktermios *termios,
+ speed_t ibaud, speed_t obaud)
+{
+ int i = 0;
+ int ifound = -1, ofound = -1;
+ int iclose = ibaud/50, oclose = obaud/50;
+ int ibinput = 0;
+
+ if (obaud == 0) /* CD dropped */
+ ibaud = 0; /* Clear ibaud to be sure */
+
+ termios->c_ispeed = ibaud;
+ termios->c_ospeed = obaud;
+
+#ifdef BOTHER
+ /* If the user asked for a precise weird speed give a precise weird
+ answer. If they asked for a Bfoo speed they many have problems
+ digesting non-exact replies so fuzz a bit */
+
+ if ((termios->c_cflag & CBAUD) == BOTHER)
+ oclose = 0;
+ if (((termios->c_cflag >> IBSHIFT) & CBAUD) == BOTHER)
+ iclose = 0;
+ if ((termios->c_cflag >> IBSHIFT) & CBAUD)
+ ibinput = 1; /* An input speed was specified */
+#endif
+ termios->c_cflag &= ~CBAUD;
+
+ /*
+ * Our goal is to find a close match to the standard baud rate
+ * returned. Walk the baud rate table and if we get a very close
+ * match then report back the speed as a POSIX Bxxxx value by
+ * preference
+ */
+
+ do {
+ if (obaud - oclose <= baud_table[i] &&
+ obaud + oclose >= baud_table[i]) {
+ termios->c_cflag |= baud_bits[i];
+ ofound = i;
+ }
+ if (ibaud - iclose <= baud_table[i] &&
+ ibaud + iclose >= baud_table[i]) {
+ /* For the case input == output don't set IBAUD bits
+ if the user didn't do so */
+ if (ofound == i && !ibinput)
+ ifound = i;
+#ifdef IBSHIFT
+ else {
+ ifound = i;
+ termios->c_cflag |= (baud_bits[i] << IBSHIFT);
+ }
+#endif
+ }
+ } while (++i < n_baud_table);
+
+ /*
+ * If we found no match then use BOTHER if provided or warn
+ * the user their platform maintainer needs to wake up if not.
+ */
+#ifdef BOTHER
+ if (ofound == -1)
+ termios->c_cflag |= BOTHER;
+ /* Set exact input bits only if the input and output differ or the
+ user already did */
+ if (ifound == -1 && (ibaud != obaud || ibinput))
+ termios->c_cflag |= (BOTHER << IBSHIFT);
+#else
+ if (ifound == -1 || ofound == -1) {
+ printk_once(KERN_WARNING "tty: Unable to return correct "
+ "speed data as your architecture needs updating.\n");
+ }
+#endif
+}
+EXPORT_SYMBOL_GPL(tty_termios_encode_baud_rate);
+
+/**
+ * tty_encode_baud_rate - set baud rate of the tty
+ * @ibaud: input baud rate
+ * @obad: output baud rate
+ *
+ * Update the current termios data for the tty with the new speed
+ * settings. The caller must hold the termios_mutex for the tty in
+ * question.
+ */
+
+void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud, speed_t obaud)
+{
+ tty_termios_encode_baud_rate(tty->termios, ibaud, obaud);
+}
+EXPORT_SYMBOL_GPL(tty_encode_baud_rate);
+
+/**
+ * tty_get_baud_rate - get tty bit rates
+ * @tty: tty to query
+ *
+ * Returns the baud rate as an integer for this terminal. The
+ * termios lock must be held by the caller and the terminal bit
+ * flags may be updated.
+ *
+ * Locking: none
+ */
+
+speed_t tty_get_baud_rate(struct tty_struct *tty)
+{
+ speed_t baud = tty_termios_baud_rate(tty->termios);
+
+ if (baud == 38400 && tty->alt_speed) {
+ if (!tty->warned) {
+ printk(KERN_WARNING "Use of setserial/setrocket to "
+ "set SPD_* flags is deprecated\n");
+ tty->warned = 1;
+ }
+ baud = tty->alt_speed;
+ }
+
+ return baud;
+}
+EXPORT_SYMBOL(tty_get_baud_rate);
+
+/**
+ * tty_termios_copy_hw - copy hardware settings
+ * @new: New termios
+ * @old: Old termios
+ *
+ * Propagate the hardware specific terminal setting bits from
+ * the old termios structure to the new one. This is used in cases
+ * where the hardware does not support reconfiguration or as a helper
+ * in some cases where only minimal reconfiguration is supported
+ */
+
+void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old)
+{
+ /* The bits a dumb device handles in software. Smart devices need
+ to always provide a set_termios method */
+ new->c_cflag &= HUPCL | CREAD | CLOCAL;
+ new->c_cflag |= old->c_cflag & ~(HUPCL | CREAD | CLOCAL);
+ new->c_ispeed = old->c_ispeed;
+ new->c_ospeed = old->c_ospeed;
+}
+EXPORT_SYMBOL(tty_termios_copy_hw);
+
+/**
+ * tty_termios_hw_change - check for setting change
+ * @a: termios
+ * @b: termios to compare
+ *
+ * Check if any of the bits that affect a dumb device have changed
+ * between the two termios structures, or a speed change is needed.
+ */
+
+int tty_termios_hw_change(struct ktermios *a, struct ktermios *b)
+{
+ if (a->c_ispeed != b->c_ispeed || a->c_ospeed != b->c_ospeed)
+ return 1;
+ if ((a->c_cflag ^ b->c_cflag) & ~(HUPCL | CREAD | CLOCAL))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(tty_termios_hw_change);
+
+/**
+ * tty_set_termios - update termios values
+ * @tty: tty to update
+ * @new_termios: desired new value
+ *
+ * Perform updates to the termios values set on this terminal. There
+ * is a bit of layering violation here with n_tty in terms of the
+ * internal knowledge of this function.
+ *
+ * Locking: termios_mutex
+ */
+
+int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
+{
+ struct ktermios old_termios;
+ struct tty_ldisc *ld;
+ unsigned long flags;
+
+ /*
+ * Perform the actual termios internal changes under lock.
+ */
+
+
+ /* FIXME: we need to decide on some locking/ordering semantics
+ for the set_termios notification eventually */
+ mutex_lock(&tty->termios_mutex);
+ old_termios = *tty->termios;
+ *tty->termios = *new_termios;
+ unset_locked_termios(tty->termios, &old_termios, tty->termios_locked);
+
+ /* See if packet mode change of state. */
+ if (tty->link && tty->link->packet) {
+ int extproc = (old_termios.c_lflag & EXTPROC) |
+ (tty->termios->c_lflag & EXTPROC);
+ int old_flow = ((old_termios.c_iflag & IXON) &&
+ (old_termios.c_cc[VSTOP] == '\023') &&
+ (old_termios.c_cc[VSTART] == '\021'));
+ int new_flow = (I_IXON(tty) &&
+ STOP_CHAR(tty) == '\023' &&
+ START_CHAR(tty) == '\021');
+ if ((old_flow != new_flow) || extproc) {
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ if (old_flow != new_flow) {
+ tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP);
+ if (new_flow)
+ tty->ctrl_status |= TIOCPKT_DOSTOP;
+ else
+ tty->ctrl_status |= TIOCPKT_NOSTOP;
+ }
+ if (extproc)
+ tty->ctrl_status |= TIOCPKT_IOCTL;
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ wake_up_interruptible(&tty->link->read_wait);
+ }
+ }
+
+ if (tty->ops->set_termios)
+ (*tty->ops->set_termios)(tty, &old_termios);
+ else
+ tty_termios_copy_hw(tty->termios, &old_termios);
+
+ ld = tty_ldisc_ref(tty);
+ if (ld != NULL) {
+ if (ld->ops->set_termios)
+ (ld->ops->set_termios)(tty, &old_termios);
+ tty_ldisc_deref(ld);
+ }
+ mutex_unlock(&tty->termios_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tty_set_termios);
+
+/**
+ * set_termios - set termios values for a tty
+ * @tty: terminal device
+ * @arg: user data
+ * @opt: option information
+ *
+ * Helper function to prepare termios data and run necessary other
+ * functions before using tty_set_termios to do the actual changes.
+ *
+ * Locking:
+ * Called functions take ldisc and termios_mutex locks
+ */
+
+static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
+{
+ struct ktermios tmp_termios;
+ struct tty_ldisc *ld;
+ int retval = tty_check_change(tty);
+
+ if (retval)
+ return retval;
+
+ mutex_lock(&tty->termios_mutex);
+ memcpy(&tmp_termios, tty->termios, sizeof(struct ktermios));
+ mutex_unlock(&tty->termios_mutex);
+
+ if (opt & TERMIOS_TERMIO) {
+ if (user_termio_to_kernel_termios(&tmp_termios,
+ (struct termio __user *)arg))
+ return -EFAULT;
+#ifdef TCGETS2
+ } else if (opt & TERMIOS_OLD) {
+ if (user_termios_to_kernel_termios_1(&tmp_termios,
+ (struct termios __user *)arg))
+ return -EFAULT;
+ } else {
+ if (user_termios_to_kernel_termios(&tmp_termios,
+ (struct termios2 __user *)arg))
+ return -EFAULT;
+ }
+#else
+ } else if (user_termios_to_kernel_termios(&tmp_termios,
+ (struct termios __user *)arg))
+ return -EFAULT;
+#endif
+
+ /* If old style Bfoo values are used then load c_ispeed/c_ospeed
+ * with the real speed so its unconditionally usable */
+ tmp_termios.c_ispeed = tty_termios_input_baud_rate(&tmp_termios);
+ tmp_termios.c_ospeed = tty_termios_baud_rate(&tmp_termios);
+
+ ld = tty_ldisc_ref(tty);
+
+ if (ld != NULL) {
+ if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+ tty_ldisc_deref(ld);
+ }
+
+ if (opt & TERMIOS_WAIT) {
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ tty_set_termios(tty, &tmp_termios);
+
+ /* FIXME: Arguably if tmp_termios == tty->termios AND the
+ actual requested termios was not tmp_termios then we may
+ want to return an error as no user requested change has
+ succeeded */
+ return 0;
+}
+
+static void copy_termios(struct tty_struct *tty, struct ktermios *kterm)
+{
+ mutex_lock(&tty->termios_mutex);
+ memcpy(kterm, tty->termios, sizeof(struct ktermios));
+ mutex_unlock(&tty->termios_mutex);
+}
+
+static void copy_termios_locked(struct tty_struct *tty, struct ktermios *kterm)
+{
+ mutex_lock(&tty->termios_mutex);
+ memcpy(kterm, tty->termios_locked, sizeof(struct ktermios));
+ mutex_unlock(&tty->termios_mutex);
+}
+
+static int get_termio(struct tty_struct *tty, struct termio __user *termio)
+{
+ struct ktermios kterm;
+ copy_termios(tty, &kterm);
+ if (kernel_termios_to_user_termio(termio, &kterm))
+ return -EFAULT;
+ return 0;
+}
+
+
+#ifdef TCGETX
+
+/**
+ * set_termiox - set termiox fields if possible
+ * @tty: terminal
+ * @arg: termiox structure from user
+ * @opt: option flags for ioctl type
+ *
+ * Implement the device calling points for the SYS5 termiox ioctl
+ * interface in Linux
+ */
+
+static int set_termiox(struct tty_struct *tty, void __user *arg, int opt)
+{
+ struct termiox tnew;
+ struct tty_ldisc *ld;
+
+ if (tty->termiox == NULL)
+ return -EINVAL;
+ if (copy_from_user(&tnew, arg, sizeof(struct termiox)))
+ return -EFAULT;
+
+ ld = tty_ldisc_ref(tty);
+ if (ld != NULL) {
+ if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+ tty_ldisc_deref(ld);
+ }
+ if (opt & TERMIOS_WAIT) {
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ mutex_lock(&tty->termios_mutex);
+ if (tty->ops->set_termiox)
+ tty->ops->set_termiox(tty, &tnew);
+ mutex_unlock(&tty->termios_mutex);
+ return 0;
+}
+
+#endif
+
+
+#ifdef TIOCGETP
+/*
+ * These are deprecated, but there is limited support..
+ *
+ * The "sg_flags" translation is a joke..
+ */
+static int get_sgflags(struct tty_struct *tty)
+{
+ int flags = 0;
+
+ if (!(tty->termios->c_lflag & ICANON)) {
+ if (tty->termios->c_lflag & ISIG)
+ flags |= 0x02; /* cbreak */
+ else
+ flags |= 0x20; /* raw */
+ }
+ if (tty->termios->c_lflag & ECHO)
+ flags |= 0x08; /* echo */
+ if (tty->termios->c_oflag & OPOST)
+ if (tty->termios->c_oflag & ONLCR)
+ flags |= 0x10; /* crmod */
+ return flags;
+}
+
+static int get_sgttyb(struct tty_struct *tty, struct sgttyb __user *sgttyb)
+{
+ struct sgttyb tmp;
+
+ mutex_lock(&tty->termios_mutex);
+ tmp.sg_ispeed = tty->termios->c_ispeed;
+ tmp.sg_ospeed = tty->termios->c_ospeed;
+ tmp.sg_erase = tty->termios->c_cc[VERASE];
+ tmp.sg_kill = tty->termios->c_cc[VKILL];
+ tmp.sg_flags = get_sgflags(tty);
+ mutex_unlock(&tty->termios_mutex);
+
+ return copy_to_user(sgttyb, &tmp, sizeof(tmp)) ? -EFAULT : 0;
+}
+
+static void set_sgflags(struct ktermios *termios, int flags)
+{
+ termios->c_iflag = ICRNL | IXON;
+ termios->c_oflag = 0;
+ termios->c_lflag = ISIG | ICANON;
+ if (flags & 0x02) { /* cbreak */
+ termios->c_iflag = 0;
+ termios->c_lflag &= ~ICANON;
+ }
+ if (flags & 0x08) { /* echo */
+ termios->c_lflag |= ECHO | ECHOE | ECHOK |
+ ECHOCTL | ECHOKE | IEXTEN;
+ }
+ if (flags & 0x10) { /* crmod */
+ termios->c_oflag |= OPOST | ONLCR;
+ }
+ if (flags & 0x20) { /* raw */
+ termios->c_iflag = 0;
+ termios->c_lflag &= ~(ISIG | ICANON);
+ }
+ if (!(termios->c_lflag & ICANON)) {
+ termios->c_cc[VMIN] = 1;
+ termios->c_cc[VTIME] = 0;
+ }
+}
+
+/**
+ * set_sgttyb - set legacy terminal values
+ * @tty: tty structure
+ * @sgttyb: pointer to old style terminal structure
+ *
+ * Updates a terminal from the legacy BSD style terminal information
+ * structure.
+ *
+ * Locking: termios_mutex
+ */
+
+static int set_sgttyb(struct tty_struct *tty, struct sgttyb __user *sgttyb)
+{
+ int retval;
+ struct sgttyb tmp;
+ struct ktermios termios;
+
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+
+ if (copy_from_user(&tmp, sgttyb, sizeof(tmp)))
+ return -EFAULT;
+
+ mutex_lock(&tty->termios_mutex);
+ termios = *tty->termios;
+ termios.c_cc[VERASE] = tmp.sg_erase;
+ termios.c_cc[VKILL] = tmp.sg_kill;
+ set_sgflags(&termios, tmp.sg_flags);
+ /* Try and encode into Bfoo format */
+#ifdef BOTHER
+ tty_termios_encode_baud_rate(&termios, termios.c_ispeed,
+ termios.c_ospeed);
+#endif
+ mutex_unlock(&tty->termios_mutex);
+ tty_set_termios(tty, &termios);
+ return 0;
+}
+#endif
+
+#ifdef TIOCGETC
+static int get_tchars(struct tty_struct *tty, struct tchars __user *tchars)
+{
+ struct tchars tmp;
+
+ mutex_lock(&tty->termios_mutex);
+ tmp.t_intrc = tty->termios->c_cc[VINTR];
+ tmp.t_quitc = tty->termios->c_cc[VQUIT];
+ tmp.t_startc = tty->termios->c_cc[VSTART];
+ tmp.t_stopc = tty->termios->c_cc[VSTOP];
+ tmp.t_eofc = tty->termios->c_cc[VEOF];
+ tmp.t_brkc = tty->termios->c_cc[VEOL2]; /* what is brkc anyway? */
+ mutex_unlock(&tty->termios_mutex);
+ return copy_to_user(tchars, &tmp, sizeof(tmp)) ? -EFAULT : 0;
+}
+
+static int set_tchars(struct tty_struct *tty, struct tchars __user *tchars)
+{
+ struct tchars tmp;
+
+ if (copy_from_user(&tmp, tchars, sizeof(tmp)))
+ return -EFAULT;
+ mutex_lock(&tty->termios_mutex);
+ tty->termios->c_cc[VINTR] = tmp.t_intrc;
+ tty->termios->c_cc[VQUIT] = tmp.t_quitc;
+ tty->termios->c_cc[VSTART] = tmp.t_startc;
+ tty->termios->c_cc[VSTOP] = tmp.t_stopc;
+ tty->termios->c_cc[VEOF] = tmp.t_eofc;
+ tty->termios->c_cc[VEOL2] = tmp.t_brkc; /* what is brkc anyway? */
+ mutex_unlock(&tty->termios_mutex);
+ return 0;
+}
+#endif
+
+#ifdef TIOCGLTC
+static int get_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
+{
+ struct ltchars tmp;
+
+ mutex_lock(&tty->termios_mutex);
+ tmp.t_suspc = tty->termios->c_cc[VSUSP];
+ /* what is dsuspc anyway? */
+ tmp.t_dsuspc = tty->termios->c_cc[VSUSP];
+ tmp.t_rprntc = tty->termios->c_cc[VREPRINT];
+ /* what is flushc anyway? */
+ tmp.t_flushc = tty->termios->c_cc[VEOL2];
+ tmp.t_werasc = tty->termios->c_cc[VWERASE];
+ tmp.t_lnextc = tty->termios->c_cc[VLNEXT];
+ mutex_unlock(&tty->termios_mutex);
+ return copy_to_user(ltchars, &tmp, sizeof(tmp)) ? -EFAULT : 0;
+}
+
+static int set_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
+{
+ struct ltchars tmp;
+
+ if (copy_from_user(&tmp, ltchars, sizeof(tmp)))
+ return -EFAULT;
+
+ mutex_lock(&tty->termios_mutex);
+ tty->termios->c_cc[VSUSP] = tmp.t_suspc;
+ /* what is dsuspc anyway? */
+ tty->termios->c_cc[VEOL2] = tmp.t_dsuspc;
+ tty->termios->c_cc[VREPRINT] = tmp.t_rprntc;
+ /* what is flushc anyway? */
+ tty->termios->c_cc[VEOL2] = tmp.t_flushc;
+ tty->termios->c_cc[VWERASE] = tmp.t_werasc;
+ tty->termios->c_cc[VLNEXT] = tmp.t_lnextc;
+ mutex_unlock(&tty->termios_mutex);
+ return 0;
+}
+#endif
+
+/**
+ * send_prio_char - send priority character
+ *
+ * Send a high priority character to the tty even if stopped
+ *
+ * Locking: none for xchar method, write ordering for write method.
+ */
+
+static int send_prio_char(struct tty_struct *tty, char ch)
+{
+ int was_stopped = tty->stopped;
+
+ if (tty->ops->send_xchar) {
+ tty->ops->send_xchar(tty, ch);
+ return 0;
+ }
+
+ if (tty_write_lock(tty, 0) < 0)
+ return -ERESTARTSYS;
+
+ if (was_stopped)
+ start_tty(tty);
+ tty->ops->write(tty, &ch, 1);
+ if (was_stopped)
+ stop_tty(tty);
+ tty_write_unlock(tty);
+ return 0;
+}
+
+/**
+ * tty_change_softcar - carrier change ioctl helper
+ * @tty: tty to update
+ * @arg: enable/disable CLOCAL
+ *
+ * Perform a change to the CLOCAL state and call into the driver
+ * layer to make it visible. All done with the termios mutex
+ */
+
+static int tty_change_softcar(struct tty_struct *tty, int arg)
+{
+ int ret = 0;
+ int bit = arg ? CLOCAL : 0;
+ struct ktermios old;
+
+ mutex_lock(&tty->termios_mutex);
+ old = *tty->termios;
+ tty->termios->c_cflag &= ~CLOCAL;
+ tty->termios->c_cflag |= bit;
+ if (tty->ops->set_termios)
+ tty->ops->set_termios(tty, &old);
+ if ((tty->termios->c_cflag & CLOCAL) != bit)
+ ret = -EINVAL;
+ mutex_unlock(&tty->termios_mutex);
+ return ret;
+}
+
+/**
+ * tty_mode_ioctl - mode related ioctls
+ * @tty: tty for the ioctl
+ * @file: file pointer for the tty
+ * @cmd: command
+ * @arg: ioctl argument
+ *
+ * Perform non line discipline specific mode control ioctls. This
+ * is designed to be called by line disciplines to ensure they provide
+ * consistent mode setting.
+ */
+
+int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct tty_struct *real_tty;
+ void __user *p = (void __user *)arg;
+ int ret = 0;
+ struct ktermios kterm;
+
+ BUG_ON(file == NULL);
+
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ real_tty = tty->link;
+ else
+ real_tty = tty;
+
+ switch (cmd) {
+#ifdef TIOCGETP
+ case TIOCGETP:
+ return get_sgttyb(real_tty, (struct sgttyb __user *) arg);
+ case TIOCSETP:
+ case TIOCSETN:
+ return set_sgttyb(real_tty, (struct sgttyb __user *) arg);
+#endif
+#ifdef TIOCGETC
+ case TIOCGETC:
+ return get_tchars(real_tty, p);
+ case TIOCSETC:
+ return set_tchars(real_tty, p);
+#endif
+#ifdef TIOCGLTC
+ case TIOCGLTC:
+ return get_ltchars(real_tty, p);
+ case TIOCSLTC:
+ return set_ltchars(real_tty, p);
+#endif
+ case TCSETSF:
+ return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT | TERMIOS_OLD);
+ case TCSETSW:
+ return set_termios(real_tty, p, TERMIOS_WAIT | TERMIOS_OLD);
+ case TCSETS:
+ return set_termios(real_tty, p, TERMIOS_OLD);
+#ifndef TCGETS2
+ case TCGETS:
+ copy_termios(real_tty, &kterm);
+ if (kernel_termios_to_user_termios((struct termios __user *)arg, &kterm))
+ ret = -EFAULT;
+ return ret;
+#else
+ case TCGETS:
+ copy_termios(real_tty, &kterm);
+ if (kernel_termios_to_user_termios_1((struct termios __user *)arg, &kterm))
+ ret = -EFAULT;
+ return ret;
+ case TCGETS2:
+ copy_termios(real_tty, &kterm);
+ if (kernel_termios_to_user_termios((struct termios2 __user *)arg, &kterm))
+ ret = -EFAULT;
+ return ret;
+ case TCSETSF2:
+ return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT);
+ case TCSETSW2:
+ return set_termios(real_tty, p, TERMIOS_WAIT);
+ case TCSETS2:
+ return set_termios(real_tty, p, 0);
+#endif
+ case TCGETA:
+ return get_termio(real_tty, p);
+ case TCSETAF:
+ return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT | TERMIOS_TERMIO);
+ case TCSETAW:
+ return set_termios(real_tty, p, TERMIOS_WAIT | TERMIOS_TERMIO);
+ case TCSETA:
+ return set_termios(real_tty, p, TERMIOS_TERMIO);
+#ifndef TCGETS2
+ case TIOCGLCKTRMIOS:
+ copy_termios_locked(real_tty, &kterm);
+ if (kernel_termios_to_user_termios((struct termios __user *)arg, &kterm))
+ ret = -EFAULT;
+ return ret;
+ case TIOCSLCKTRMIOS:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ copy_termios_locked(real_tty, &kterm);
+ if (user_termios_to_kernel_termios(&kterm,
+ (struct termios __user *) arg))
+ return -EFAULT;
+ mutex_lock(&real_tty->termios_mutex);
+ memcpy(real_tty->termios_locked, &kterm, sizeof(struct ktermios));
+ mutex_unlock(&real_tty->termios_mutex);
+ return 0;
+#else
+ case TIOCGLCKTRMIOS:
+ copy_termios_locked(real_tty, &kterm);
+ if (kernel_termios_to_user_termios_1((struct termios __user *)arg, &kterm))
+ ret = -EFAULT;
+ return ret;
+ case TIOCSLCKTRMIOS:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ copy_termios_locked(real_tty, &kterm);
+ if (user_termios_to_kernel_termios_1(&kterm,
+ (struct termios __user *) arg))
+ return -EFAULT;
+ mutex_lock(&real_tty->termios_mutex);
+ memcpy(real_tty->termios_locked, &kterm, sizeof(struct ktermios));
+ mutex_unlock(&real_tty->termios_mutex);
+ return ret;
+#endif
+#ifdef TCGETX
+ case TCGETX: {
+ struct termiox ktermx;
+ if (real_tty->termiox == NULL)
+ return -EINVAL;
+ mutex_lock(&real_tty->termios_mutex);
+ memcpy(&ktermx, real_tty->termiox, sizeof(struct termiox));
+ mutex_unlock(&real_tty->termios_mutex);
+ if (copy_to_user(p, &ktermx, sizeof(struct termiox)))
+ ret = -EFAULT;
+ return ret;
+ }
+ case TCSETX:
+ return set_termiox(real_tty, p, 0);
+ case TCSETXW:
+ return set_termiox(real_tty, p, TERMIOS_WAIT);
+ case TCSETXF:
+ return set_termiox(real_tty, p, TERMIOS_FLUSH);
+#endif
+ case TIOCGSOFTCAR:
+ copy_termios(real_tty, &kterm);
+ ret = put_user((kterm.c_cflag & CLOCAL) ? 1 : 0,
+ (int __user *)arg);
+ return ret;
+ case TIOCSSOFTCAR:
+ if (get_user(arg, (unsigned int __user *) arg))
+ return -EFAULT;
+ return tty_change_softcar(real_tty, arg);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+EXPORT_SYMBOL_GPL(tty_mode_ioctl);
+
+int tty_perform_flush(struct tty_struct *tty, unsigned long arg)
+{
+ struct tty_ldisc *ld;
+ int retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+
+ ld = tty_ldisc_ref_wait(tty);
+ switch (arg) {
+ case TCIFLUSH:
+ if (ld && ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+ break;
+ case TCIOFLUSH:
+ if (ld && ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+ /* fall through */
+ case TCOFLUSH:
+ tty_driver_flush_buffer(tty);
+ break;
+ default:
+ tty_ldisc_deref(ld);
+ return -EINVAL;
+ }
+ tty_ldisc_deref(ld);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tty_perform_flush);
+
+int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ unsigned long flags;
+ int retval;
+
+ switch (cmd) {
+ case TCXONC:
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ switch (arg) {
+ case TCOOFF:
+ if (!tty->flow_stopped) {
+ tty->flow_stopped = 1;
+ stop_tty(tty);
+ }
+ break;
+ case TCOON:
+ if (tty->flow_stopped) {
+ tty->flow_stopped = 0;
+ start_tty(tty);
+ }
+ break;
+ case TCIOFF:
+ if (STOP_CHAR(tty) != __DISABLED_CHAR)
+ return send_prio_char(tty, STOP_CHAR(tty));
+ break;
+ case TCION:
+ if (START_CHAR(tty) != __DISABLED_CHAR)
+ return send_prio_char(tty, START_CHAR(tty));
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+ case TCFLSH:
+ return tty_perform_flush(tty, arg);
+ case TIOCPKT:
+ {
+ int pktmode;
+
+ if (tty->driver->type != TTY_DRIVER_TYPE_PTY ||
+ tty->driver->subtype != PTY_TYPE_MASTER)
+ return -ENOTTY;
+ if (get_user(pktmode, (int __user *) arg))
+ return -EFAULT;
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ if (pktmode) {
+ if (!tty->packet) {
+ tty->packet = 1;
+ tty->link->ctrl_status = 0;
+ }
+ } else
+ tty->packet = 0;
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ return 0;
+ }
+ default:
+ /* Try the mode commands */
+ return tty_mode_ioctl(tty, file, cmd, arg);
+ }
+}
+EXPORT_SYMBOL(n_tty_ioctl_helper);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
new file mode 100644
index 00000000..a76c808a
--- /dev/null
+++ b/drivers/tty/tty_ldisc.c
@@ -0,0 +1,994 @@
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/devpts_fs.h>
+#include <linux/file.h>
+#include <linux/console.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/kd.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+
+#include <linux/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/kbd_kern.h>
+#include <linux/vt_kern.h>
+#include <linux/selection.h>
+
+#include <linux/kmod.h>
+#include <linux/nsproxy.h>
+#include <linux/ratelimit.h>
+
+/*
+ * This guards the refcounted line discipline lists. The lock
+ * must be taken with irqs off because there are hangup path
+ * callers who will do ldisc lookups and cannot sleep.
+ */
+
+static DEFINE_SPINLOCK(tty_ldisc_lock);
+static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
+static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle);
+/* Line disc dispatch table */
+static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
+
+static inline struct tty_ldisc *get_ldisc(struct tty_ldisc *ld)
+{
+ if (ld)
+ atomic_inc(&ld->users);
+ return ld;
+}
+
+static void put_ldisc(struct tty_ldisc *ld)
+{
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(!ld))
+ return;
+
+ /*
+ * If this is the last user, free the ldisc, and
+ * release the ldisc ops.
+ *
+ * We really want an "atomic_dec_and_lock_irqsave()",
+ * but we don't have it, so this does it by hand.
+ */
+ local_irq_save(flags);
+ if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
+ struct tty_ldisc_ops *ldo = ld->ops;
+
+ ldo->refcount--;
+ module_put(ldo->owner);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ kfree(ld);
+ return;
+ }
+ local_irq_restore(flags);
+ wake_up(&tty_ldisc_idle);
+}
+
+/**
+ * tty_register_ldisc - install a line discipline
+ * @disc: ldisc number
+ * @new_ldisc: pointer to the ldisc object
+ *
+ * Installs a new line discipline into the kernel. The discipline
+ * is set up as unreferenced and then made available to the kernel
+ * from this point onwards.
+ *
+ * Locking:
+ * takes tty_ldisc_lock to guard against ldisc races
+ */
+
+int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (disc < N_TTY || disc >= NR_LDISCS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ tty_ldiscs[disc] = new_ldisc;
+ new_ldisc->num = disc;
+ new_ldisc->refcount = 0;
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(tty_register_ldisc);
+
+/**
+ * tty_unregister_ldisc - unload a line discipline
+ * @disc: ldisc number
+ * @new_ldisc: pointer to the ldisc object
+ *
+ * Remove a line discipline from the kernel providing it is not
+ * currently in use.
+ *
+ * Locking:
+ * takes tty_ldisc_lock to guard against ldisc races
+ */
+
+int tty_unregister_ldisc(int disc)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (disc < N_TTY || disc >= NR_LDISCS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ if (tty_ldiscs[disc]->refcount)
+ ret = -EBUSY;
+ else
+ tty_ldiscs[disc] = NULL;
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(tty_unregister_ldisc);
+
+static struct tty_ldisc_ops *get_ldops(int disc)
+{
+ unsigned long flags;
+ struct tty_ldisc_ops *ldops, *ret;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ ret = ERR_PTR(-EINVAL);
+ ldops = tty_ldiscs[disc];
+ if (ldops) {
+ ret = ERR_PTR(-EAGAIN);
+ if (try_module_get(ldops->owner)) {
+ ldops->refcount++;
+ ret = ldops;
+ }
+ }
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ return ret;
+}
+
+static void put_ldops(struct tty_ldisc_ops *ldops)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ ldops->refcount--;
+ module_put(ldops->owner);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+}
+
+/**
+ * tty_ldisc_get - take a reference to an ldisc
+ * @disc: ldisc number
+ *
+ * Takes a reference to a line discipline. Deals with refcounts and
+ * module locking counts. Returns NULL if the discipline is not available.
+ * Returns a pointer to the discipline and bumps the ref count if it is
+ * available
+ *
+ * Locking:
+ * takes tty_ldisc_lock to guard against ldisc races
+ */
+
+static struct tty_ldisc *tty_ldisc_get(int disc)
+{
+ struct tty_ldisc *ld;
+ struct tty_ldisc_ops *ldops;
+
+ if (disc < N_TTY || disc >= NR_LDISCS)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Get the ldisc ops - we may need to request them to be loaded
+ * dynamically and try again.
+ */
+ ldops = get_ldops(disc);
+ if (IS_ERR(ldops)) {
+ request_module("tty-ldisc-%d", disc);
+ ldops = get_ldops(disc);
+ if (IS_ERR(ldops))
+ return ERR_CAST(ldops);
+ }
+
+ ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
+ if (ld == NULL) {
+ put_ldops(ldops);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ld->ops = ldops;
+ atomic_set(&ld->users, 1);
+ return ld;
+}
+
+static void *tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos)
+{
+ return (*pos < NR_LDISCS) ? pos : NULL;
+}
+
+static void *tty_ldiscs_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return (*pos < NR_LDISCS) ? pos : NULL;
+}
+
+static void tty_ldiscs_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static int tty_ldiscs_seq_show(struct seq_file *m, void *v)
+{
+ int i = *(loff_t *)v;
+ struct tty_ldisc_ops *ldops;
+
+ ldops = get_ldops(i);
+ if (IS_ERR(ldops))
+ return 0;
+ seq_printf(m, "%-10s %2d\n", ldops->name ? ldops->name : "???", i);
+ put_ldops(ldops);
+ return 0;
+}
+
+static const struct seq_operations tty_ldiscs_seq_ops = {
+ .start = tty_ldiscs_seq_start,
+ .next = tty_ldiscs_seq_next,
+ .stop = tty_ldiscs_seq_stop,
+ .show = tty_ldiscs_seq_show,
+};
+
+static int proc_tty_ldiscs_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &tty_ldiscs_seq_ops);
+}
+
+const struct file_operations tty_ldiscs_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = proc_tty_ldiscs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/**
+ * tty_ldisc_assign - set ldisc on a tty
+ * @tty: tty to assign
+ * @ld: line discipline
+ *
+ * Install an instance of a line discipline into a tty structure. The
+ * ldisc must have a reference count above zero to ensure it remains.
+ * The tty instance refcount starts at zero.
+ *
+ * Locking:
+ * Caller must hold references
+ */
+
+static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
+{
+ tty->ldisc = ld;
+}
+
+/**
+ * tty_ldisc_try - internal helper
+ * @tty: the tty
+ *
+ * Make a single attempt to grab and bump the refcount on
+ * the tty ldisc. Return 0 on failure or 1 on success. This is
+ * used to implement both the waiting and non waiting versions
+ * of tty_ldisc_ref
+ *
+ * Locking: takes tty_ldisc_lock
+ */
+
+static struct tty_ldisc *tty_ldisc_try(struct tty_struct *tty)
+{
+ unsigned long flags;
+ struct tty_ldisc *ld;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ ld = NULL;
+ if (test_bit(TTY_LDISC, &tty->flags))
+ ld = get_ldisc(tty->ldisc);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ return ld;
+}
+
+/**
+ * tty_ldisc_ref_wait - wait for the tty ldisc
+ * @tty: tty device
+ *
+ * Dereference the line discipline for the terminal and take a
+ * reference to it. If the line discipline is in flux then
+ * wait patiently until it changes.
+ *
+ * Note: Must not be called from an IRQ/timer context. The caller
+ * must also be careful not to hold other locks that will deadlock
+ * against a discipline change, such as an existing ldisc reference
+ * (which we check for)
+ *
+ * Locking: call functions take tty_ldisc_lock
+ */
+
+struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld;
+
+ /* wait_event is a macro */
+ wait_event(tty_ldisc_wait, (ld = tty_ldisc_try(tty)) != NULL);
+ return ld;
+}
+EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
+
+/**
+ * tty_ldisc_ref - get the tty ldisc
+ * @tty: tty device
+ *
+ * Dereference the line discipline for the terminal and take a
+ * reference to it. If the line discipline is in flux then
+ * return NULL. Can be called from IRQ and timer functions.
+ *
+ * Locking: called functions take tty_ldisc_lock
+ */
+
+struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
+{
+ return tty_ldisc_try(tty);
+}
+EXPORT_SYMBOL_GPL(tty_ldisc_ref);
+
+/**
+ * tty_ldisc_deref - free a tty ldisc reference
+ * @ld: reference to free up
+ *
+ * Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May
+ * be called in IRQ context.
+ *
+ * Locking: takes tty_ldisc_lock
+ */
+
+void tty_ldisc_deref(struct tty_ldisc *ld)
+{
+ put_ldisc(ld);
+}
+EXPORT_SYMBOL_GPL(tty_ldisc_deref);
+
+static inline void tty_ldisc_put(struct tty_ldisc *ld)
+{
+ put_ldisc(ld);
+}
+
+/**
+ * tty_ldisc_enable - allow ldisc use
+ * @tty: terminal to activate ldisc on
+ *
+ * Set the TTY_LDISC flag when the line discipline can be called
+ * again. Do necessary wakeups for existing sleepers. Clear the LDISC
+ * changing flag to indicate any ldisc change is now over.
+ *
+ * Note: nobody should set the TTY_LDISC bit except via this function.
+ * Clearing directly is allowed.
+ */
+
+void tty_ldisc_enable(struct tty_struct *tty)
+{
+ set_bit(TTY_LDISC, &tty->flags);
+ clear_bit(TTY_LDISC_CHANGING, &tty->flags);
+ wake_up(&tty_ldisc_wait);
+}
+
+/**
+ * tty_ldisc_flush - flush line discipline queue
+ * @tty: tty
+ *
+ * Flush the line discipline queue (if any) for this tty. If there
+ * is no line discipline active this is a no-op.
+ */
+
+void tty_ldisc_flush(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld = tty_ldisc_ref(tty);
+ if (ld) {
+ if (ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+ tty_ldisc_deref(ld);
+ }
+ tty_buffer_flush(tty);
+}
+EXPORT_SYMBOL_GPL(tty_ldisc_flush);
+
+/**
+ * tty_set_termios_ldisc - set ldisc field
+ * @tty: tty structure
+ * @num: line discipline number
+ *
+ * This is probably overkill for real world processors but
+ * they are not on hot paths so a little discipline won't do
+ * any harm.
+ *
+ * Locking: takes termios_mutex
+ */
+
+static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
+{
+ mutex_lock(&tty->termios_mutex);
+ tty->termios->c_line = num;
+ mutex_unlock(&tty->termios_mutex);
+}
+
+/**
+ * tty_ldisc_open - open a line discipline
+ * @tty: tty we are opening the ldisc on
+ * @ld: discipline to open
+ *
+ * A helper opening method. Also a convenient debugging and check
+ * point.
+ *
+ * Locking: always called with BTM already held.
+ */
+
+static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
+{
+ WARN_ON(test_and_set_bit(TTY_LDISC_OPEN, &tty->flags));
+ if (ld->ops->open) {
+ int ret;
+ /* BTM here locks versus a hangup event */
+ WARN_ON(!tty_locked());
+ ret = ld->ops->open(tty);
+ if (ret)
+ clear_bit(TTY_LDISC_OPEN, &tty->flags);
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * tty_ldisc_close - close a line discipline
+ * @tty: tty we are opening the ldisc on
+ * @ld: discipline to close
+ *
+ * A helper close method. Also a convenient debugging and check
+ * point.
+ */
+
+static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
+{
+ WARN_ON(!test_bit(TTY_LDISC_OPEN, &tty->flags));
+ clear_bit(TTY_LDISC_OPEN, &tty->flags);
+ if (ld->ops->close)
+ ld->ops->close(tty);
+}
+
+/**
+ * tty_ldisc_restore - helper for tty ldisc change
+ * @tty: tty to recover
+ * @old: previous ldisc
+ *
+ * Restore the previous line discipline or N_TTY when a line discipline
+ * change fails due to an open error
+ */
+
+static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
+{
+ char buf[64];
+ struct tty_ldisc *new_ldisc;
+ int r;
+
+ /* There is an outstanding reference here so this is safe */
+ old = tty_ldisc_get(old->ops->num);
+ WARN_ON(IS_ERR(old));
+ tty_ldisc_assign(tty, old);
+ tty_set_termios_ldisc(tty, old->ops->num);
+ if (tty_ldisc_open(tty, old) < 0) {
+ tty_ldisc_put(old);
+ /* This driver is always present */
+ new_ldisc = tty_ldisc_get(N_TTY);
+ if (IS_ERR(new_ldisc))
+ panic("n_tty: get");
+ tty_ldisc_assign(tty, new_ldisc);
+ tty_set_termios_ldisc(tty, N_TTY);
+ r = tty_ldisc_open(tty, new_ldisc);
+ if (r < 0)
+ panic("Couldn't open N_TTY ldisc for "
+ "%s --- error %d.",
+ tty_name(tty, buf), r);
+ }
+}
+
+/**
+ * tty_ldisc_halt - shut down the line discipline
+ * @tty: tty device
+ *
+ * Shut down the line discipline and work queue for this tty device.
+ * The TTY_LDISC flag being cleared ensures no further references can
+ * be obtained while the delayed work queue halt ensures that no more
+ * data is fed to the ldisc.
+ *
+ * You need to do a 'flush_scheduled_work()' (outside the ldisc_mutex)
+ * in order to make sure any currently executing ldisc work is also
+ * flushed.
+ */
+
+static int tty_ldisc_halt(struct tty_struct *tty)
+{
+ clear_bit(TTY_LDISC, &tty->flags);
+ return cancel_work_sync(&tty->buf.work);
+}
+
+/**
+ * tty_ldisc_flush_works - flush all works of a tty
+ * @tty: tty device to flush works for
+ *
+ * Sync flush all works belonging to @tty.
+ */
+static void tty_ldisc_flush_works(struct tty_struct *tty)
+{
+ flush_work_sync(&tty->hangup_work);
+ flush_work_sync(&tty->SAK_work);
+ flush_work_sync(&tty->buf.work);
+}
+
+/**
+ * tty_ldisc_wait_idle - wait for the ldisc to become idle
+ * @tty: tty to wait for
+ * @timeout: for how long to wait at most
+ *
+ * Wait for the line discipline to become idle. The discipline must
+ * have been halted for this to guarantee it remains idle.
+ */
+static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout)
+{
+ long ret;
+ ret = wait_event_timeout(tty_ldisc_idle,
+ atomic_read(&tty->ldisc->users) == 1, timeout);
+ if (ret < 0)
+ return ret;
+ return ret > 0 ? 0 : -EBUSY;
+}
+
+/**
+ * tty_set_ldisc - set line discipline
+ * @tty: the terminal to set
+ * @ldisc: the line discipline
+ *
+ * Set the discipline of a tty line. Must be called from a process
+ * context. The ldisc change logic has to protect itself against any
+ * overlapping ldisc change (including on the other end of pty pairs),
+ * the close of one side of a tty/pty pair, and eventually hangup.
+ *
+ * Locking: takes tty_ldisc_lock, termios_mutex
+ */
+
+int tty_set_ldisc(struct tty_struct *tty, int ldisc)
+{
+ int retval;
+ struct tty_ldisc *o_ldisc, *new_ldisc;
+ int work, o_work = 0;
+ struct tty_struct *o_tty;
+
+ new_ldisc = tty_ldisc_get(ldisc);
+ if (IS_ERR(new_ldisc))
+ return PTR_ERR(new_ldisc);
+
+ tty_lock();
+ /*
+ * We need to look at the tty locking here for pty/tty pairs
+ * when both sides try to change in parallel.
+ */
+
+ o_tty = tty->link; /* o_tty is the pty side or NULL */
+
+
+ /*
+ * Check the no-op case
+ */
+
+ if (tty->ldisc->ops->num == ldisc) {
+ tty_unlock();
+ tty_ldisc_put(new_ldisc);
+ return 0;
+ }
+
+ tty_unlock();
+ /*
+ * Problem: What do we do if this blocks ?
+ * We could deadlock here
+ */
+
+ tty_wait_until_sent(tty, 0);
+
+ tty_lock();
+ mutex_lock(&tty->ldisc_mutex);
+
+ /*
+ * We could be midstream of another ldisc change which has
+ * dropped the lock during processing. If so we need to wait.
+ */
+
+ while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
+ mutex_unlock(&tty->ldisc_mutex);
+ tty_unlock();
+ wait_event(tty_ldisc_wait,
+ test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
+ tty_lock();
+ mutex_lock(&tty->ldisc_mutex);
+ }
+
+ set_bit(TTY_LDISC_CHANGING, &tty->flags);
+
+ /*
+ * No more input please, we are switching. The new ldisc
+ * will update this value in the ldisc open function
+ */
+
+ tty->receive_room = 0;
+
+ o_ldisc = tty->ldisc;
+
+ tty_unlock();
+ /*
+ * Make sure we don't change while someone holds a
+ * reference to the line discipline. The TTY_LDISC bit
+ * prevents anyone taking a reference once it is clear.
+ * We need the lock to avoid racing reference takers.
+ *
+ * We must clear the TTY_LDISC bit here to avoid a livelock
+ * with a userspace app continually trying to use the tty in
+ * parallel to the change and re-referencing the tty.
+ */
+
+ work = tty_ldisc_halt(tty);
+ if (o_tty)
+ o_work = tty_ldisc_halt(o_tty);
+
+ /*
+ * Wait for ->hangup_work and ->buf.work handlers to terminate.
+ * We must drop the mutex here in case a hangup is also in process.
+ */
+
+ mutex_unlock(&tty->ldisc_mutex);
+
+ tty_ldisc_flush_works(tty);
+
+ retval = tty_ldisc_wait_idle(tty, 5 * HZ);
+
+ tty_lock();
+ mutex_lock(&tty->ldisc_mutex);
+
+ /* handle wait idle failure locked */
+ if (retval) {
+ tty_ldisc_put(new_ldisc);
+ goto enable;
+ }
+
+ if (test_bit(TTY_HUPPED, &tty->flags)) {
+ /* We were raced by the hangup method. It will have stomped
+ the ldisc data and closed the ldisc down */
+ clear_bit(TTY_LDISC_CHANGING, &tty->flags);
+ mutex_unlock(&tty->ldisc_mutex);
+ tty_ldisc_put(new_ldisc);
+ tty_unlock();
+ return -EIO;
+ }
+
+ /* Shutdown the current discipline. */
+ tty_ldisc_close(tty, o_ldisc);
+
+ /* Now set up the new line discipline. */
+ tty_ldisc_assign(tty, new_ldisc);
+ tty_set_termios_ldisc(tty, ldisc);
+
+ retval = tty_ldisc_open(tty, new_ldisc);
+ if (retval < 0) {
+ /* Back to the old one or N_TTY if we can't */
+ tty_ldisc_put(new_ldisc);
+ tty_ldisc_restore(tty, o_ldisc);
+ }
+
+ /* At this point we hold a reference to the new ldisc and a
+ a reference to the old ldisc. If we ended up flipping back
+ to the existing ldisc we have two references to it */
+
+ if (tty->ldisc->ops->num != o_ldisc->ops->num && tty->ops->set_ldisc)
+ tty->ops->set_ldisc(tty);
+
+ tty_ldisc_put(o_ldisc);
+
+enable:
+ /*
+ * Allow ldisc referencing to occur again
+ */
+
+ tty_ldisc_enable(tty);
+ if (o_tty)
+ tty_ldisc_enable(o_tty);
+
+ /* Restart the work queue in case no characters kick it off. Safe if
+ already running */
+ if (work)
+ schedule_work(&tty->buf.work);
+ if (o_work)
+ schedule_work(&o_tty->buf.work);
+ mutex_unlock(&tty->ldisc_mutex);
+ tty_unlock();
+ return retval;
+}
+
+/**
+ * tty_reset_termios - reset terminal state
+ * @tty: tty to reset
+ *
+ * Restore a terminal to the driver default state.
+ */
+
+static void tty_reset_termios(struct tty_struct *tty)
+{
+ mutex_lock(&tty->termios_mutex);
+ *tty->termios = tty->driver->init_termios;
+ tty->termios->c_ispeed = tty_termios_input_baud_rate(tty->termios);
+ tty->termios->c_ospeed = tty_termios_baud_rate(tty->termios);
+ mutex_unlock(&tty->termios_mutex);
+}
+
+
+/**
+ * tty_ldisc_reinit - reinitialise the tty ldisc
+ * @tty: tty to reinit
+ * @ldisc: line discipline to reinitialize
+ *
+ * Switch the tty to a line discipline and leave the ldisc
+ * state closed
+ */
+
+static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
+{
+ struct tty_ldisc *ld = tty_ldisc_get(ldisc);
+
+ if (IS_ERR(ld))
+ return -1;
+
+ tty_ldisc_close(tty, tty->ldisc);
+ tty_ldisc_put(tty->ldisc);
+ tty->ldisc = NULL;
+ /*
+ * Switch the line discipline back
+ */
+ tty_ldisc_assign(tty, ld);
+ tty_set_termios_ldisc(tty, ldisc);
+
+ return 0;
+}
+
+/**
+ * tty_ldisc_hangup - hangup ldisc reset
+ * @tty: tty being hung up
+ *
+ * Some tty devices reset their termios when they receive a hangup
+ * event. In that situation we must also switch back to N_TTY properly
+ * before we reset the termios data.
+ *
+ * Locking: We can take the ldisc mutex as the rest of the code is
+ * careful to allow for this.
+ *
+ * In the pty pair case this occurs in the close() path of the
+ * tty itself so we must be careful about locking rules.
+ */
+
+void tty_ldisc_hangup(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld;
+ int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS;
+ int err = 0;
+
+ /*
+ * FIXME! What are the locking issues here? This may me overdoing
+ * things... This question is especially important now that we've
+ * removed the irqlock.
+ */
+ ld = tty_ldisc_ref(tty);
+ if (ld != NULL) {
+ /* We may have no line discipline at this point */
+ if (ld->ops->flush_buffer)
+ ld->ops->flush_buffer(tty);
+ tty_driver_flush_buffer(tty);
+ if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
+ ld->ops->write_wakeup)
+ ld->ops->write_wakeup(tty);
+ if (ld->ops->hangup)
+ ld->ops->hangup(tty);
+ tty_ldisc_deref(ld);
+ }
+ /*
+ * FIXME: Once we trust the LDISC code better we can wait here for
+ * ldisc completion and fix the driver call race
+ */
+ wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
+ wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+ /*
+ * Shutdown the current line discipline, and reset it to
+ * N_TTY if need be.
+ *
+ * Avoid racing set_ldisc or tty_ldisc_release
+ */
+ mutex_lock(&tty->ldisc_mutex);
+
+ /*
+ * this is like tty_ldisc_halt, but we need to give up
+ * the BTM before calling cancel_work_sync, which may
+ * need to wait for another function taking the BTM
+ */
+ clear_bit(TTY_LDISC, &tty->flags);
+ tty_unlock();
+ cancel_work_sync(&tty->buf.work);
+ mutex_unlock(&tty->ldisc_mutex);
+retry:
+ tty_lock();
+ mutex_lock(&tty->ldisc_mutex);
+
+ /* At this point we have a closed ldisc and we want to
+ reopen it. We could defer this to the next open but
+ it means auditing a lot of other paths so this is
+ a FIXME */
+ if (tty->ldisc) { /* Not yet closed */
+ if (atomic_read(&tty->ldisc->users) != 1) {
+ char cur_n[TASK_COMM_LEN], tty_n[64];
+ long timeout = 3 * HZ;
+ tty_unlock();
+
+ while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ printk_ratelimited(KERN_WARNING
+ "%s: waiting (%s) for %s took too long, but we keep waiting...\n",
+ __func__, get_task_comm(cur_n, current),
+ tty_name(tty, tty_n));
+ }
+ mutex_unlock(&tty->ldisc_mutex);
+ goto retry;
+ }
+
+ if (reset == 0) {
+
+ if (!tty_ldisc_reinit(tty, tty->termios->c_line))
+ err = tty_ldisc_open(tty, tty->ldisc);
+ else
+ err = 1;
+ }
+ /* If the re-open fails or we reset then go to N_TTY. The
+ N_TTY open cannot fail */
+ if (reset || err) {
+ BUG_ON(tty_ldisc_reinit(tty, N_TTY));
+ WARN_ON(tty_ldisc_open(tty, tty->ldisc));
+ }
+ tty_ldisc_enable(tty);
+ }
+ mutex_unlock(&tty->ldisc_mutex);
+ if (reset)
+ tty_reset_termios(tty);
+}
+
+/**
+ * tty_ldisc_setup - open line discipline
+ * @tty: tty being shut down
+ * @o_tty: pair tty for pty/tty pairs
+ *
+ * Called during the initial open of a tty/pty pair in order to set up the
+ * line disciplines and bind them to the tty. This has no locking issues
+ * as the device isn't yet active.
+ */
+
+int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
+{
+ struct tty_ldisc *ld = tty->ldisc;
+ int retval;
+
+ retval = tty_ldisc_open(tty, ld);
+ if (retval)
+ return retval;
+
+ if (o_tty) {
+ retval = tty_ldisc_open(o_tty, o_tty->ldisc);
+ if (retval) {
+ tty_ldisc_close(tty, ld);
+ return retval;
+ }
+ tty_ldisc_enable(o_tty);
+ }
+ tty_ldisc_enable(tty);
+ return 0;
+}
+/**
+ * tty_ldisc_release - release line discipline
+ * @tty: tty being shut down
+ * @o_tty: pair tty for pty/tty pairs
+ *
+ * Called during the final close of a tty/pty pair in order to shut down
+ * the line discpline layer. On exit the ldisc assigned is N_TTY and the
+ * ldisc has not been opened.
+ */
+
+void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
+{
+ /*
+ * Prevent flush_to_ldisc() from rescheduling the work for later. Then
+ * kill any delayed work. As this is the final close it does not
+ * race with the set_ldisc code path.
+ */
+
+ tty_unlock();
+ tty_ldisc_halt(tty);
+ tty_ldisc_flush_works(tty);
+ tty_lock();
+
+ mutex_lock(&tty->ldisc_mutex);
+ /*
+ * Now kill off the ldisc
+ */
+ tty_ldisc_close(tty, tty->ldisc);
+ tty_ldisc_put(tty->ldisc);
+ /* Force an oops if we mess this up */
+ tty->ldisc = NULL;
+
+ /* Ensure the next open requests the N_TTY ldisc */
+ tty_set_termios_ldisc(tty, N_TTY);
+ mutex_unlock(&tty->ldisc_mutex);
+
+ /* This will need doing differently if we need to lock */
+ if (o_tty)
+ tty_ldisc_release(o_tty, NULL);
+
+ /* And the memory resources remaining (buffers, termios) will be
+ disposed of when the kref hits zero */
+}
+
+/**
+ * tty_ldisc_init - ldisc setup for new tty
+ * @tty: tty being allocated
+ *
+ * Set up the line discipline objects for a newly allocated tty. Note that
+ * the tty structure is not completely set up when this call is made.
+ */
+
+void tty_ldisc_init(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld = tty_ldisc_get(N_TTY);
+ if (IS_ERR(ld))
+ panic("n_tty: init_tty");
+ tty_ldisc_assign(tty, ld);
+}
+
+/**
+ * tty_ldisc_init - ldisc cleanup for new tty
+ * @tty: tty that was allocated recently
+ *
+ * The tty structure must not becompletely set up (tty_ldisc_setup) when
+ * this call is made.
+ */
+void tty_ldisc_deinit(struct tty_struct *tty)
+{
+ put_ldisc(tty->ldisc);
+ tty_ldisc_assign(tty, NULL);
+}
+
+void tty_ldisc_begin(void)
+{
+ /* Setup the default TTY line discipline. */
+ (void) tty_register_ldisc(N_TTY, &tty_ldisc_N_TTY);
+}
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
new file mode 100644
index 00000000..3b2bb771
--- /dev/null
+++ b/drivers/tty/tty_mutex.c
@@ -0,0 +1,44 @@
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/semaphore.h>
+#include <linux/sched.h>
+
+/*
+ * The 'big tty mutex'
+ *
+ * This mutex is taken and released by tty_lock() and tty_unlock(),
+ * replacing the older big kernel lock.
+ * It can no longer be taken recursively, and does not get
+ * released implicitly while sleeping.
+ *
+ * Don't use in new code.
+ */
+static DEFINE_MUTEX(big_tty_mutex);
+struct task_struct *__big_tty_mutex_owner;
+EXPORT_SYMBOL_GPL(__big_tty_mutex_owner);
+
+/*
+ * Getting the big tty mutex.
+ */
+void __lockfunc tty_lock(void)
+{
+ struct task_struct *task = current;
+
+ WARN_ON(__big_tty_mutex_owner == task);
+
+ mutex_lock(&big_tty_mutex);
+ __big_tty_mutex_owner = task;
+}
+EXPORT_SYMBOL(tty_lock);
+
+void __lockfunc tty_unlock(void)
+{
+ struct task_struct *task = current;
+
+ WARN_ON(__big_tty_mutex_owner != task);
+ __big_tty_mutex_owner = NULL;
+
+ mutex_unlock(&big_tty_mutex);
+}
+EXPORT_SYMBOL(tty_unlock);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
new file mode 100644
index 00000000..a4aaca0e
--- /dev/null
+++ b/drivers/tty/tty_port.c
@@ -0,0 +1,448 @@
+/*
+ * Tty port functions
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+
+void tty_port_init(struct tty_port *port)
+{
+ memset(port, 0, sizeof(*port));
+ init_waitqueue_head(&port->open_wait);
+ init_waitqueue_head(&port->close_wait);
+ init_waitqueue_head(&port->delta_msr_wait);
+ mutex_init(&port->mutex);
+ mutex_init(&port->buf_mutex);
+ spin_lock_init(&port->lock);
+ port->close_delay = (50 * HZ) / 100;
+ port->closing_wait = (3000 * HZ) / 100;
+ kref_init(&port->kref);
+}
+EXPORT_SYMBOL(tty_port_init);
+
+int tty_port_alloc_xmit_buf(struct tty_port *port)
+{
+ /* We may sleep in get_zeroed_page() */
+ mutex_lock(&port->buf_mutex);
+ if (port->xmit_buf == NULL)
+ port->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
+ mutex_unlock(&port->buf_mutex);
+ if (port->xmit_buf == NULL)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL(tty_port_alloc_xmit_buf);
+
+void tty_port_free_xmit_buf(struct tty_port *port)
+{
+ mutex_lock(&port->buf_mutex);
+ if (port->xmit_buf != NULL) {
+ free_page((unsigned long)port->xmit_buf);
+ port->xmit_buf = NULL;
+ }
+ mutex_unlock(&port->buf_mutex);
+}
+EXPORT_SYMBOL(tty_port_free_xmit_buf);
+
+static void tty_port_destructor(struct kref *kref)
+{
+ struct tty_port *port = container_of(kref, struct tty_port, kref);
+ if (port->xmit_buf)
+ free_page((unsigned long)port->xmit_buf);
+ if (port->ops->destruct)
+ port->ops->destruct(port);
+ else
+ kfree(port);
+}
+
+void tty_port_put(struct tty_port *port)
+{
+ if (port)
+ kref_put(&port->kref, tty_port_destructor);
+}
+EXPORT_SYMBOL(tty_port_put);
+
+/**
+ * tty_port_tty_get - get a tty reference
+ * @port: tty port
+ *
+ * Return a refcount protected tty instance or NULL if the port is not
+ * associated with a tty (eg due to close or hangup)
+ */
+
+struct tty_struct *tty_port_tty_get(struct tty_port *port)
+{
+ unsigned long flags;
+ struct tty_struct *tty;
+
+ spin_lock_irqsave(&port->lock, flags);
+ tty = tty_kref_get(port->tty);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return tty;
+}
+EXPORT_SYMBOL(tty_port_tty_get);
+
+/**
+ * tty_port_tty_set - set the tty of a port
+ * @port: tty port
+ * @tty: the tty
+ *
+ * Associate the port and tty pair. Manages any internal refcounts.
+ * Pass NULL to deassociate a port
+ */
+
+void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (port->tty)
+ tty_kref_put(port->tty);
+ port->tty = tty_kref_get(tty);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+EXPORT_SYMBOL(tty_port_tty_set);
+
+static void tty_port_shutdown(struct tty_port *port)
+{
+ mutex_lock(&port->mutex);
+ if (port->ops->shutdown && !port->console &&
+ test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags))
+ port->ops->shutdown(port);
+ mutex_unlock(&port->mutex);
+}
+
+/**
+ * tty_port_hangup - hangup helper
+ * @port: tty port
+ *
+ * Perform port level tty hangup flag and count changes. Drop the tty
+ * reference.
+ */
+
+void tty_port_hangup(struct tty_port *port)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->count = 0;
+ port->flags &= ~ASYNC_NORMAL_ACTIVE;
+ if (port->tty) {
+ set_bit(TTY_IO_ERROR, &port->tty->flags);
+ tty_kref_put(port->tty);
+ }
+ port->tty = NULL;
+ spin_unlock_irqrestore(&port->lock, flags);
+ wake_up_interruptible(&port->open_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
+ tty_port_shutdown(port);
+}
+EXPORT_SYMBOL(tty_port_hangup);
+
+/**
+ * tty_port_carrier_raised - carrier raised check
+ * @port: tty port
+ *
+ * Wrapper for the carrier detect logic. For the moment this is used
+ * to hide some internal details. This will eventually become entirely
+ * internal to the tty port.
+ */
+
+int tty_port_carrier_raised(struct tty_port *port)
+{
+ if (port->ops->carrier_raised == NULL)
+ return 1;
+ return port->ops->carrier_raised(port);
+}
+EXPORT_SYMBOL(tty_port_carrier_raised);
+
+/**
+ * tty_port_raise_dtr_rts - Raise DTR/RTS
+ * @port: tty port
+ *
+ * Wrapper for the DTR/RTS raise logic. For the moment this is used
+ * to hide some internal details. This will eventually become entirely
+ * internal to the tty port.
+ */
+
+void tty_port_raise_dtr_rts(struct tty_port *port)
+{
+ if (port->ops->dtr_rts)
+ port->ops->dtr_rts(port, 1);
+}
+EXPORT_SYMBOL(tty_port_raise_dtr_rts);
+
+/**
+ * tty_port_lower_dtr_rts - Lower DTR/RTS
+ * @port: tty port
+ *
+ * Wrapper for the DTR/RTS raise logic. For the moment this is used
+ * to hide some internal details. This will eventually become entirely
+ * internal to the tty port.
+ */
+
+void tty_port_lower_dtr_rts(struct tty_port *port)
+{
+ if (port->ops->dtr_rts)
+ port->ops->dtr_rts(port, 0);
+}
+EXPORT_SYMBOL(tty_port_lower_dtr_rts);
+
+/**
+ * tty_port_block_til_ready - Waiting logic for tty open
+ * @port: the tty port being opened
+ * @tty: the tty device being bound
+ * @filp: the file pointer of the opener
+ *
+ * Implement the core POSIX/SuS tty behaviour when opening a tty device.
+ * Handles:
+ * - hangup (both before and during)
+ * - non blocking open
+ * - rts/dtr/dcd
+ * - signals
+ * - port flags and counts
+ *
+ * The passed tty_port must implement the carrier_raised method if it can
+ * do carrier detect and the dtr_rts method if it supports software
+ * management of these lines. Note that the dtr/rts raise is done each
+ * iteration as a hangup may have previously dropped them while we wait.
+ */
+
+int tty_port_block_til_ready(struct tty_port *port,
+ struct tty_struct *tty, struct file *filp)
+{
+ int do_clocal = 0, retval;
+ unsigned long flags;
+ DEFINE_WAIT(wait);
+
+ /* block if port is in the process of being closed */
+ if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
+ wait_event_interruptible_tty(port->close_wait,
+ !(port->flags & ASYNC_CLOSING));
+ if (port->flags & ASYNC_HUP_NOTIFY)
+ return -EAGAIN;
+ else
+ return -ERESTARTSYS;
+ }
+
+ /* if non-blocking mode is set we can pass directly to open unless
+ the port has just hung up or is in another error state */
+ if (tty->flags & (1 << TTY_IO_ERROR)) {
+ port->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+ if (filp->f_flags & O_NONBLOCK) {
+ /* Indicate we are open */
+ if (tty->termios->c_cflag & CBAUD)
+ tty_port_raise_dtr_rts(port);
+ port->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+
+ if (C_CLOCAL(tty))
+ do_clocal = 1;
+
+ /* Block waiting until we can proceed. We may need to wait for the
+ carrier, but we must also wait for any close that is in progress
+ before the next open may complete */
+
+ retval = 0;
+
+ /* The port lock protects the port counts */
+ spin_lock_irqsave(&port->lock, flags);
+ if (!tty_hung_up_p(filp))
+ port->count--;
+ port->blocked_open++;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ while (1) {
+ /* Indicate we are open */
+ if (tty->termios->c_cflag & CBAUD)
+ tty_port_raise_dtr_rts(port);
+
+ prepare_to_wait(&port->open_wait, &wait, TASK_INTERRUPTIBLE);
+ /* Check for a hangup or uninitialised port.
+ Return accordingly */
+ if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
+ if (port->flags & ASYNC_HUP_NOTIFY)
+ retval = -EAGAIN;
+ else
+ retval = -ERESTARTSYS;
+ break;
+ }
+ /*
+ * Probe the carrier. For devices with no carrier detect
+ * tty_port_carrier_raised will always return true.
+ * Never ask drivers if CLOCAL is set, this causes troubles
+ * on some hardware.
+ */
+ if (!(port->flags & ASYNC_CLOSING) &&
+ (do_clocal || tty_port_carrier_raised(port)))
+ break;
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+ tty_unlock();
+ schedule();
+ tty_lock();
+ }
+ finish_wait(&port->open_wait, &wait);
+
+ /* Update counts. A parallel hangup will have set count to zero and
+ we must not mess that up further */
+ spin_lock_irqsave(&port->lock, flags);
+ if (!tty_hung_up_p(filp))
+ port->count++;
+ port->blocked_open--;
+ if (retval == 0)
+ port->flags |= ASYNC_NORMAL_ACTIVE;
+ spin_unlock_irqrestore(&port->lock, flags);
+ return retval;
+}
+EXPORT_SYMBOL(tty_port_block_til_ready);
+
+int tty_port_close_start(struct tty_port *port,
+ struct tty_struct *tty, struct file *filp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (tty_hung_up_p(filp)) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ return 0;
+ }
+
+ if (tty->count == 1 && port->count != 1) {
+ printk(KERN_WARNING
+ "tty_port_close_start: tty->count = 1 port count = %d.\n",
+ port->count);
+ port->count = 1;
+ }
+ if (--port->count < 0) {
+ printk(KERN_WARNING "tty_port_close_start: count = %d\n",
+ port->count);
+ port->count = 0;
+ }
+
+ if (port->count) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ if (port->ops->drop)
+ port->ops->drop(port);
+ return 0;
+ }
+ set_bit(ASYNCB_CLOSING, &port->flags);
+ tty->closing = 1;
+ spin_unlock_irqrestore(&port->lock, flags);
+ /* Don't block on a stalled port, just pull the chain */
+ if (tty->flow_stopped)
+ tty_driver_flush_buffer(tty);
+ if (test_bit(ASYNCB_INITIALIZED, &port->flags) &&
+ port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
+ tty_wait_until_sent(tty, port->closing_wait);
+ if (port->drain_delay) {
+ unsigned int bps = tty_get_baud_rate(tty);
+ long timeout;
+
+ if (bps > 1200)
+ timeout = max_t(long,
+ (HZ * 10 * port->drain_delay) / bps, HZ / 10);
+ else
+ timeout = 2 * HZ;
+ schedule_timeout_interruptible(timeout);
+ }
+ /* Flush the ldisc buffering */
+ tty_ldisc_flush(tty);
+
+ /* Drop DTR/RTS if HUPCL is set. This causes any attached modem to
+ hang up the line */
+ if (tty->termios->c_cflag & HUPCL)
+ tty_port_lower_dtr_rts(port);
+
+ /* Don't call port->drop for the last reference. Callers will want
+ to drop the last active reference in ->shutdown() or the tty
+ shutdown path */
+ return 1;
+}
+EXPORT_SYMBOL(tty_port_close_start);
+
+void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ tty->closing = 0;
+
+ if (port->blocked_open) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ if (port->close_delay) {
+ msleep_interruptible(
+ jiffies_to_msecs(port->close_delay));
+ }
+ spin_lock_irqsave(&port->lock, flags);
+ wake_up_interruptible(&port->open_wait);
+ }
+ port->flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING);
+ wake_up_interruptible(&port->close_wait);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+EXPORT_SYMBOL(tty_port_close_end);
+
+void tty_port_close(struct tty_port *port, struct tty_struct *tty,
+ struct file *filp)
+{
+ if (tty_port_close_start(port, tty, filp) == 0)
+ return;
+ tty_port_shutdown(port);
+ set_bit(TTY_IO_ERROR, &tty->flags);
+ tty_port_close_end(port, tty);
+ tty_port_tty_set(port, NULL);
+}
+EXPORT_SYMBOL(tty_port_close);
+
+int tty_port_open(struct tty_port *port, struct tty_struct *tty,
+ struct file *filp)
+{
+ spin_lock_irq(&port->lock);
+ if (!tty_hung_up_p(filp))
+ ++port->count;
+ spin_unlock_irq(&port->lock);
+ tty_port_tty_set(port, tty);
+
+ /*
+ * Do the device-specific open only if the hardware isn't
+ * already initialized. Serialize open and shutdown using the
+ * port mutex.
+ */
+
+ mutex_lock(&port->mutex);
+
+ if (!test_bit(ASYNCB_INITIALIZED, &port->flags)) {
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+ if (port->ops->activate) {
+ int retval = port->ops->activate(port, tty);
+ if (retval) {
+ mutex_unlock(&port->mutex);
+ return retval;
+ }
+ }
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+ }
+ mutex_unlock(&port->mutex);
+ return tty_port_block_til_ready(port, tty, filp);
+}
+
+EXPORT_SYMBOL(tty_port_open);
diff --git a/drivers/tty/vt/.gitignore b/drivers/tty/vt/.gitignore
new file mode 100644
index 00000000..83683a2d
--- /dev/null
+++ b/drivers/tty/vt/.gitignore
@@ -0,0 +1,2 @@
+consolemap_deftbl.c
+defkeymap.c
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
new file mode 100644
index 00000000..14a51c99
--- /dev/null
+++ b/drivers/tty/vt/Makefile
@@ -0,0 +1,34 @@
+#
+# This file contains the font map for the default (hardware) font
+#
+FONTMAPFILE = cp437.uni
+
+obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o \
+ selection.o keyboard.o
+obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
+obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
+
+# Files generated that shall be removed upon make clean
+clean-files := consolemap_deftbl.c defkeymap.c
+
+quiet_cmd_conmk = CONMK $@
+ cmd_conmk = scripts/conmakehash $< > $@
+
+$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
+ $(call cmd,conmk)
+
+$(obj)/defkeymap.o: $(obj)/defkeymap.c
+
+# Uncomment if you're changing the keymap and have an appropriate
+# loadkeys version for the map. By default, we'll use the shipped
+# versions.
+# GENERATE_KEYMAP := 1
+
+ifdef GENERATE_KEYMAP
+
+$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
+ loadkeys --mktable $< > $@.tmp
+ sed -e 's/^static *//' $@.tmp > $@
+ rm $@.tmp
+
+endif
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
new file mode 100644
index 00000000..f3438083
--- /dev/null
+++ b/drivers/tty/vt/consolemap.c
@@ -0,0 +1,780 @@
+/*
+ * consolemap.c
+ *
+ * Mapping from internal code (such as Latin-1 or Unicode or IBM PC code)
+ * to font positions.
+ *
+ * aeb, 950210
+ *
+ * Support for multiple unimaps by Jakub Jelinek <jj@ultra.linux.cz>, July 1998
+ *
+ * Fix bug in inverse translation. Stanislav Voronyi <stas@cnti.uanet.kharkov.ua>, Dec 1998
+ */
+
+#include <linux/module.h>
+#include <linux/kd.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <asm/uaccess.h>
+#include <linux/consolemap.h>
+#include <linux/vt_kern.h>
+
+static unsigned short translations[][256] = {
+ /* 8-bit Latin-1 mapped to Unicode -- trivial mapping */
+ {
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
+ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
+ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
+ 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f,
+ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
+ 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f,
+ 0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7,
+ 0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af,
+ 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x00b5, 0x00b6, 0x00b7,
+ 0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
+ 0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
+ 0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
+ 0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7,
+ 0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
+ 0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7,
+ 0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
+ 0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
+ 0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
+ },
+ /* VT100 graphics mapped to Unicode */
+ {
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x2192, 0x2190, 0x2191, 0x2193, 0x002f,
+ 0x2588, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x00a0,
+ 0x25c6, 0x2592, 0x2409, 0x240c, 0x240d, 0x240a, 0x00b0, 0x00b1,
+ 0x2591, 0x240b, 0x2518, 0x2510, 0x250c, 0x2514, 0x253c, 0x23ba,
+ 0x23bb, 0x2500, 0x23bc, 0x23bd, 0x251c, 0x2524, 0x2534, 0x252c,
+ 0x2502, 0x2264, 0x2265, 0x03c0, 0x2260, 0x00a3, 0x00b7, 0x007f,
+ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
+ 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f,
+ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
+ 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f,
+ 0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7,
+ 0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af,
+ 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x00b5, 0x00b6, 0x00b7,
+ 0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
+ 0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
+ 0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
+ 0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7,
+ 0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
+ 0x00e0, 0x00e1, 0x00e2, 0x00e3, 0x00e4, 0x00e5, 0x00e6, 0x00e7,
+ 0x00e8, 0x00e9, 0x00ea, 0x00eb, 0x00ec, 0x00ed, 0x00ee, 0x00ef,
+ 0x00f0, 0x00f1, 0x00f2, 0x00f3, 0x00f4, 0x00f5, 0x00f6, 0x00f7,
+ 0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00fd, 0x00fe, 0x00ff
+ },
+ /* IBM Codepage 437 mapped to Unicode */
+ {
+ 0x0000, 0x263a, 0x263b, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,
+ 0x25d8, 0x25cb, 0x25d9, 0x2642, 0x2640, 0x266a, 0x266b, 0x263c,
+ 0x25b6, 0x25c0, 0x2195, 0x203c, 0x00b6, 0x00a7, 0x25ac, 0x21a8,
+ 0x2191, 0x2193, 0x2192, 0x2190, 0x221f, 0x2194, 0x25b2, 0x25bc,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
+ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
+ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x2302,
+ 0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00e4, 0x00e0, 0x00e5, 0x00e7,
+ 0x00ea, 0x00eb, 0x00e8, 0x00ef, 0x00ee, 0x00ec, 0x00c4, 0x00c5,
+ 0x00c9, 0x00e6, 0x00c6, 0x00f4, 0x00f6, 0x00f2, 0x00fb, 0x00f9,
+ 0x00ff, 0x00d6, 0x00dc, 0x00a2, 0x00a3, 0x00a5, 0x20a7, 0x0192,
+ 0x00e1, 0x00ed, 0x00f3, 0x00fa, 0x00f1, 0x00d1, 0x00aa, 0x00ba,
+ 0x00bf, 0x2310, 0x00ac, 0x00bd, 0x00bc, 0x00a1, 0x00ab, 0x00bb,
+ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556,
+ 0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510,
+ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f,
+ 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567,
+ 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b,
+ 0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580,
+ 0x03b1, 0x00df, 0x0393, 0x03c0, 0x03a3, 0x03c3, 0x00b5, 0x03c4,
+ 0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229,
+ 0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248,
+ 0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0
+ },
+ /* User mapping -- default to codes for direct font mapping */
+ {
+ 0xf000, 0xf001, 0xf002, 0xf003, 0xf004, 0xf005, 0xf006, 0xf007,
+ 0xf008, 0xf009, 0xf00a, 0xf00b, 0xf00c, 0xf00d, 0xf00e, 0xf00f,
+ 0xf010, 0xf011, 0xf012, 0xf013, 0xf014, 0xf015, 0xf016, 0xf017,
+ 0xf018, 0xf019, 0xf01a, 0xf01b, 0xf01c, 0xf01d, 0xf01e, 0xf01f,
+ 0xf020, 0xf021, 0xf022, 0xf023, 0xf024, 0xf025, 0xf026, 0xf027,
+ 0xf028, 0xf029, 0xf02a, 0xf02b, 0xf02c, 0xf02d, 0xf02e, 0xf02f,
+ 0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
+ 0xf038, 0xf039, 0xf03a, 0xf03b, 0xf03c, 0xf03d, 0xf03e, 0xf03f,
+ 0xf040, 0xf041, 0xf042, 0xf043, 0xf044, 0xf045, 0xf046, 0xf047,
+ 0xf048, 0xf049, 0xf04a, 0xf04b, 0xf04c, 0xf04d, 0xf04e, 0xf04f,
+ 0xf050, 0xf051, 0xf052, 0xf053, 0xf054, 0xf055, 0xf056, 0xf057,
+ 0xf058, 0xf059, 0xf05a, 0xf05b, 0xf05c, 0xf05d, 0xf05e, 0xf05f,
+ 0xf060, 0xf061, 0xf062, 0xf063, 0xf064, 0xf065, 0xf066, 0xf067,
+ 0xf068, 0xf069, 0xf06a, 0xf06b, 0xf06c, 0xf06d, 0xf06e, 0xf06f,
+ 0xf070, 0xf071, 0xf072, 0xf073, 0xf074, 0xf075, 0xf076, 0xf077,
+ 0xf078, 0xf079, 0xf07a, 0xf07b, 0xf07c, 0xf07d, 0xf07e, 0xf07f,
+ 0xf080, 0xf081, 0xf082, 0xf083, 0xf084, 0xf085, 0xf086, 0xf087,
+ 0xf088, 0xf089, 0xf08a, 0xf08b, 0xf08c, 0xf08d, 0xf08e, 0xf08f,
+ 0xf090, 0xf091, 0xf092, 0xf093, 0xf094, 0xf095, 0xf096, 0xf097,
+ 0xf098, 0xf099, 0xf09a, 0xf09b, 0xf09c, 0xf09d, 0xf09e, 0xf09f,
+ 0xf0a0, 0xf0a1, 0xf0a2, 0xf0a3, 0xf0a4, 0xf0a5, 0xf0a6, 0xf0a7,
+ 0xf0a8, 0xf0a9, 0xf0aa, 0xf0ab, 0xf0ac, 0xf0ad, 0xf0ae, 0xf0af,
+ 0xf0b0, 0xf0b1, 0xf0b2, 0xf0b3, 0xf0b4, 0xf0b5, 0xf0b6, 0xf0b7,
+ 0xf0b8, 0xf0b9, 0xf0ba, 0xf0bb, 0xf0bc, 0xf0bd, 0xf0be, 0xf0bf,
+ 0xf0c0, 0xf0c1, 0xf0c2, 0xf0c3, 0xf0c4, 0xf0c5, 0xf0c6, 0xf0c7,
+ 0xf0c8, 0xf0c9, 0xf0ca, 0xf0cb, 0xf0cc, 0xf0cd, 0xf0ce, 0xf0cf,
+ 0xf0d0, 0xf0d1, 0xf0d2, 0xf0d3, 0xf0d4, 0xf0d5, 0xf0d6, 0xf0d7,
+ 0xf0d8, 0xf0d9, 0xf0da, 0xf0db, 0xf0dc, 0xf0dd, 0xf0de, 0xf0df,
+ 0xf0e0, 0xf0e1, 0xf0e2, 0xf0e3, 0xf0e4, 0xf0e5, 0xf0e6, 0xf0e7,
+ 0xf0e8, 0xf0e9, 0xf0ea, 0xf0eb, 0xf0ec, 0xf0ed, 0xf0ee, 0xf0ef,
+ 0xf0f0, 0xf0f1, 0xf0f2, 0xf0f3, 0xf0f4, 0xf0f5, 0xf0f6, 0xf0f7,
+ 0xf0f8, 0xf0f9, 0xf0fa, 0xf0fb, 0xf0fc, 0xf0fd, 0xf0fe, 0xf0ff
+ }
+};
+
+/* The standard kernel character-to-font mappings are not invertible
+ -- this is just a best effort. */
+
+#define MAX_GLYPH 512 /* Max possible glyph value */
+
+static int inv_translate[MAX_NR_CONSOLES];
+
+struct uni_pagedir {
+ u16 **uni_pgdir[32];
+ unsigned long refcount;
+ unsigned long sum;
+ unsigned char *inverse_translations[4];
+ u16 *inverse_trans_unicode;
+ int readonly;
+};
+
+static struct uni_pagedir *dflt;
+
+static void set_inverse_transl(struct vc_data *conp, struct uni_pagedir *p, int i)
+{
+ int j, glyph;
+ unsigned short *t = translations[i];
+ unsigned char *q;
+
+ if (!p) return;
+ q = p->inverse_translations[i];
+
+ if (!q) {
+ q = p->inverse_translations[i] = (unsigned char *)
+ kmalloc(MAX_GLYPH, GFP_KERNEL);
+ if (!q) return;
+ }
+ memset(q, 0, MAX_GLYPH);
+
+ for (j = 0; j < E_TABSZ; j++) {
+ glyph = conv_uni_to_pc(conp, t[j]);
+ if (glyph >= 0 && glyph < MAX_GLYPH && q[glyph] < 32) {
+ /* prefer '-' above SHY etc. */
+ q[glyph] = j;
+ }
+ }
+}
+
+static void set_inverse_trans_unicode(struct vc_data *conp,
+ struct uni_pagedir *p)
+{
+ int i, j, k, glyph;
+ u16 **p1, *p2;
+ u16 *q;
+
+ if (!p) return;
+ q = p->inverse_trans_unicode;
+ if (!q) {
+ q = p->inverse_trans_unicode =
+ kmalloc(MAX_GLYPH * sizeof(u16), GFP_KERNEL);
+ if (!q)
+ return;
+ }
+ memset(q, 0, MAX_GLYPH * sizeof(u16));
+
+ for (i = 0; i < 32; i++) {
+ p1 = p->uni_pgdir[i];
+ if (!p1)
+ continue;
+ for (j = 0; j < 32; j++) {
+ p2 = p1[j];
+ if (!p2)
+ continue;
+ for (k = 0; k < 64; k++) {
+ glyph = p2[k];
+ if (glyph >= 0 && glyph < MAX_GLYPH
+ && q[glyph] < 32)
+ q[glyph] = (i << 11) + (j << 6) + k;
+ }
+ }
+ }
+}
+
+unsigned short *set_translate(int m, struct vc_data *vc)
+{
+ inv_translate[vc->vc_num] = m;
+ return translations[m];
+}
+
+/*
+ * Inverse translation is impossible for several reasons:
+ * 1. The font<->character maps are not 1-1.
+ * 2. The text may have been written while a different translation map
+ * was active.
+ * Still, it is now possible to a certain extent to cut and paste non-ASCII.
+ */
+u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode)
+{
+ struct uni_pagedir *p;
+ int m;
+ if (glyph < 0 || glyph >= MAX_GLYPH)
+ return 0;
+ else if (!(p = (struct uni_pagedir *)*conp->vc_uni_pagedir_loc))
+ return glyph;
+ else if (use_unicode) {
+ if (!p->inverse_trans_unicode)
+ return glyph;
+ else
+ return p->inverse_trans_unicode[glyph];
+ } else {
+ m = inv_translate[conp->vc_num];
+ if (!p->inverse_translations[m])
+ return glyph;
+ else
+ return p->inverse_translations[m][glyph];
+ }
+}
+EXPORT_SYMBOL_GPL(inverse_translate);
+
+static void update_user_maps(void)
+{
+ int i;
+ struct uni_pagedir *p, *q = NULL;
+
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ if (!vc_cons_allocated(i))
+ continue;
+ p = (struct uni_pagedir *)*vc_cons[i].d->vc_uni_pagedir_loc;
+ if (p && p != q) {
+ set_inverse_transl(vc_cons[i].d, p, USER_MAP);
+ set_inverse_trans_unicode(vc_cons[i].d, p);
+ q = p;
+ }
+ }
+}
+
+/*
+ * Load customizable translation table
+ * arg points to a 256 byte translation table.
+ *
+ * The "old" variants are for translation directly to font (using the
+ * 0xf000-0xf0ff "transparent" Unicodes) whereas the "new" variants set
+ * Unicodes explicitly.
+ */
+int con_set_trans_old(unsigned char __user * arg)
+{
+ int i;
+ unsigned short *p = translations[USER_MAP];
+
+ if (!access_ok(VERIFY_READ, arg, E_TABSZ))
+ return -EFAULT;
+
+ for (i=0; i<E_TABSZ ; i++) {
+ unsigned char uc;
+ __get_user(uc, arg+i);
+ p[i] = UNI_DIRECT_BASE | uc;
+ }
+
+ update_user_maps();
+ return 0;
+}
+
+int con_get_trans_old(unsigned char __user * arg)
+{
+ int i, ch;
+ unsigned short *p = translations[USER_MAP];
+
+ if (!access_ok(VERIFY_WRITE, arg, E_TABSZ))
+ return -EFAULT;
+
+ for (i=0; i<E_TABSZ ; i++)
+ {
+ ch = conv_uni_to_pc(vc_cons[fg_console].d, p[i]);
+ __put_user((ch & ~0xff) ? 0 : ch, arg+i);
+ }
+ return 0;
+}
+
+int con_set_trans_new(ushort __user * arg)
+{
+ int i;
+ unsigned short *p = translations[USER_MAP];
+
+ if (!access_ok(VERIFY_READ, arg, E_TABSZ*sizeof(unsigned short)))
+ return -EFAULT;
+
+ for (i=0; i<E_TABSZ ; i++) {
+ unsigned short us;
+ __get_user(us, arg+i);
+ p[i] = us;
+ }
+
+ update_user_maps();
+ return 0;
+}
+
+int con_get_trans_new(ushort __user * arg)
+{
+ int i;
+ unsigned short *p = translations[USER_MAP];
+
+ if (!access_ok(VERIFY_WRITE, arg, E_TABSZ*sizeof(unsigned short)))
+ return -EFAULT;
+
+ for (i=0; i<E_TABSZ ; i++)
+ __put_user(p[i], arg+i);
+
+ return 0;
+}
+
+/*
+ * Unicode -> current font conversion
+ *
+ * A font has at most 512 chars, usually 256.
+ * But one font position may represent several Unicode chars.
+ * A hashtable is somewhat of a pain to deal with, so use a
+ * "paged table" instead. Simulation has shown the memory cost of
+ * this 3-level paged table scheme to be comparable to a hash table.
+ */
+
+extern u8 dfont_unicount[]; /* Defined in console_defmap.c */
+extern u16 dfont_unitable[];
+
+static void con_release_unimap(struct uni_pagedir *p)
+{
+ u16 **p1;
+ int i, j;
+
+ if (p == dflt) dflt = NULL;
+ for (i = 0; i < 32; i++) {
+ if ((p1 = p->uni_pgdir[i]) != NULL) {
+ for (j = 0; j < 32; j++)
+ kfree(p1[j]);
+ kfree(p1);
+ }
+ p->uni_pgdir[i] = NULL;
+ }
+ for (i = 0; i < 4; i++) {
+ kfree(p->inverse_translations[i]);
+ p->inverse_translations[i] = NULL;
+ }
+ if (p->inverse_trans_unicode) {
+ kfree(p->inverse_trans_unicode);
+ p->inverse_trans_unicode = NULL;
+ }
+}
+
+void con_free_unimap(struct vc_data *vc)
+{
+ struct uni_pagedir *p;
+
+ p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
+ if (!p)
+ return;
+ *vc->vc_uni_pagedir_loc = 0;
+ if (--p->refcount)
+ return;
+ con_release_unimap(p);
+ kfree(p);
+}
+
+static int con_unify_unimap(struct vc_data *conp, struct uni_pagedir *p)
+{
+ int i, j, k;
+ struct uni_pagedir *q;
+
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ if (!vc_cons_allocated(i))
+ continue;
+ q = (struct uni_pagedir *)*vc_cons[i].d->vc_uni_pagedir_loc;
+ if (!q || q == p || q->sum != p->sum)
+ continue;
+ for (j = 0; j < 32; j++) {
+ u16 **p1, **q1;
+ p1 = p->uni_pgdir[j]; q1 = q->uni_pgdir[j];
+ if (!p1 && !q1)
+ continue;
+ if (!p1 || !q1)
+ break;
+ for (k = 0; k < 32; k++) {
+ if (!p1[k] && !q1[k])
+ continue;
+ if (!p1[k] || !q1[k])
+ break;
+ if (memcmp(p1[k], q1[k], 64*sizeof(u16)))
+ break;
+ }
+ if (k < 32)
+ break;
+ }
+ if (j == 32) {
+ q->refcount++;
+ *conp->vc_uni_pagedir_loc = (unsigned long)q;
+ con_release_unimap(p);
+ kfree(p);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int
+con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
+{
+ int i, n;
+ u16 **p1, *p2;
+
+ if (!(p1 = p->uni_pgdir[n = unicode >> 11])) {
+ p1 = p->uni_pgdir[n] = kmalloc(32*sizeof(u16 *), GFP_KERNEL);
+ if (!p1) return -ENOMEM;
+ for (i = 0; i < 32; i++)
+ p1[i] = NULL;
+ }
+
+ if (!(p2 = p1[n = (unicode >> 6) & 0x1f])) {
+ p2 = p1[n] = kmalloc(64*sizeof(u16), GFP_KERNEL);
+ if (!p2) return -ENOMEM;
+ memset(p2, 0xff, 64*sizeof(u16)); /* No glyphs for the characters (yet) */
+ }
+
+ p2[unicode & 0x3f] = fontpos;
+
+ p->sum += (fontpos << 20) + unicode;
+
+ return 0;
+}
+
+/* ui is a leftover from using a hashtable, but might be used again */
+int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
+{
+ struct uni_pagedir *p, *q;
+
+ p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
+ if (p && p->readonly) return -EIO;
+ if (!p || --p->refcount) {
+ q = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!q) {
+ if (p) p->refcount++;
+ return -ENOMEM;
+ }
+ q->refcount=1;
+ *vc->vc_uni_pagedir_loc = (unsigned long)q;
+ } else {
+ if (p == dflt) dflt = NULL;
+ p->refcount++;
+ p->sum = 0;
+ con_release_unimap(p);
+ }
+ return 0;
+}
+
+int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
+{
+ int err = 0, err1, i;
+ struct uni_pagedir *p, *q;
+
+ /* Save original vc_unipagdir_loc in case we allocate a new one */
+ p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
+ if (p->readonly) return -EIO;
+
+ if (!ct) return 0;
+
+ if (p->refcount > 1) {
+ int j, k;
+ u16 **p1, *p2, l;
+
+ err1 = con_clear_unimap(vc, NULL);
+ if (err1) return err1;
+
+ /*
+ * Since refcount was > 1, con_clear_unimap() allocated a
+ * a new uni_pagedir for this vc. Re: p != q
+ */
+ q = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
+
+ /*
+ * uni_pgdir is a 32*32*64 table with rows allocated
+ * when its first entry is added. The unicode value must
+ * still be incremented for empty rows. We are copying
+ * entries from "p" (old) to "q" (new).
+ */
+ l = 0; /* unicode value */
+ for (i = 0; i < 32; i++)
+ if ((p1 = p->uni_pgdir[i]))
+ for (j = 0; j < 32; j++)
+ if ((p2 = p1[j])) {
+ for (k = 0; k < 64; k++, l++)
+ if (p2[k] != 0xffff) {
+ /*
+ * Found one, copy entry for unicode
+ * l with fontpos value p2[k].
+ */
+ err1 = con_insert_unipair(q, l, p2[k]);
+ if (err1) {
+ p->refcount++;
+ *vc->vc_uni_pagedir_loc = (unsigned long)p;
+ con_release_unimap(q);
+ kfree(q);
+ return err1;
+ }
+ }
+ } else {
+ /* Account for row of 64 empty entries */
+ l += 64;
+ }
+ else
+ /* Account for empty table */
+ l += 32 * 64;
+
+ /*
+ * Finished copying font table, set vc_uni_pagedir to new table
+ */
+ p = q;
+ } else if (p == dflt) {
+ dflt = NULL;
+ }
+
+ /*
+ * Insert user specified unicode pairs into new table.
+ */
+ while (ct--) {
+ unsigned short unicode, fontpos;
+ __get_user(unicode, &list->unicode);
+ __get_user(fontpos, &list->fontpos);
+ if ((err1 = con_insert_unipair(p, unicode,fontpos)) != 0)
+ err = err1;
+ list++;
+ }
+
+ /*
+ * Merge with fontmaps of any other virtual consoles.
+ */
+ if (con_unify_unimap(vc, p))
+ return err;
+
+ for (i = 0; i <= 3; i++)
+ set_inverse_transl(vc, p, i); /* Update inverse translations */
+ set_inverse_trans_unicode(vc, p);
+
+ return err;
+}
+
+/* Loads the unimap for the hardware font, as defined in uni_hash.tbl.
+ The representation used was the most compact I could come up
+ with. This routine is executed at sys_setup time, and when the
+ PIO_FONTRESET ioctl is called. */
+
+int con_set_default_unimap(struct vc_data *vc)
+{
+ int i, j, err = 0, err1;
+ u16 *q;
+ struct uni_pagedir *p;
+
+ if (dflt) {
+ p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
+ if (p == dflt)
+ return 0;
+ dflt->refcount++;
+ *vc->vc_uni_pagedir_loc = (unsigned long)dflt;
+ if (p && --p->refcount) {
+ con_release_unimap(p);
+ kfree(p);
+ }
+ return 0;
+ }
+
+ /* The default font is always 256 characters */
+
+ err = con_clear_unimap(vc, NULL);
+ if (err) return err;
+
+ p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
+ q = dfont_unitable;
+
+ for (i = 0; i < 256; i++)
+ for (j = dfont_unicount[i]; j; j--) {
+ err1 = con_insert_unipair(p, *(q++), i);
+ if (err1)
+ err = err1;
+ }
+
+ if (con_unify_unimap(vc, p)) {
+ dflt = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
+ return err;
+ }
+
+ for (i = 0; i <= 3; i++)
+ set_inverse_transl(vc, p, i); /* Update all inverse translations */
+ set_inverse_trans_unicode(vc, p);
+ dflt = p;
+ return err;
+}
+EXPORT_SYMBOL(con_set_default_unimap);
+
+int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc)
+{
+ struct uni_pagedir *q;
+
+ if (!*src_vc->vc_uni_pagedir_loc)
+ return -EINVAL;
+ if (*dst_vc->vc_uni_pagedir_loc == *src_vc->vc_uni_pagedir_loc)
+ return 0;
+ con_free_unimap(dst_vc);
+ q = (struct uni_pagedir *)*src_vc->vc_uni_pagedir_loc;
+ q->refcount++;
+ *dst_vc->vc_uni_pagedir_loc = (long)q;
+ return 0;
+}
+
+int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list)
+{
+ int i, j, k, ect;
+ u16 **p1, *p2;
+ struct uni_pagedir *p;
+
+ ect = 0;
+ if (*vc->vc_uni_pagedir_loc) {
+ p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
+ for (i = 0; i < 32; i++)
+ if ((p1 = p->uni_pgdir[i]))
+ for (j = 0; j < 32; j++)
+ if ((p2 = *(p1++)))
+ for (k = 0; k < 64; k++) {
+ if (*p2 < MAX_GLYPH && ect++ < ct) {
+ __put_user((u_short)((i<<11)+(j<<6)+k),
+ &list->unicode);
+ __put_user((u_short) *p2,
+ &list->fontpos);
+ list++;
+ }
+ p2++;
+ }
+ }
+ __put_user(ect, uct);
+ return ((ect <= ct) ? 0 : -ENOMEM);
+}
+
+void con_protect_unimap(struct vc_data *vc, int rdonly)
+{
+ struct uni_pagedir *p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
+
+ if (p)
+ p->readonly = rdonly;
+}
+
+/*
+ * Always use USER_MAP. These functions are used by the keyboard,
+ * which shouldn't be affected by G0/G1 switching, etc.
+ * If the user map still contains default values, i.e. the
+ * direct-to-font mapping, then assume user is using Latin1.
+ */
+/* may be called during an interrupt */
+u32 conv_8bit_to_uni(unsigned char c)
+{
+ unsigned short uni = translations[USER_MAP][c];
+ return uni == (0xf000 | c) ? c : uni;
+}
+
+int conv_uni_to_8bit(u32 uni)
+{
+ int c;
+ for (c = 0; c < 0x100; c++)
+ if (translations[USER_MAP][c] == uni ||
+ (translations[USER_MAP][c] == (c | 0xf000) && uni == c))
+ return c;
+ return -1;
+}
+
+int
+conv_uni_to_pc(struct vc_data *conp, long ucs)
+{
+ int h;
+ u16 **p1, *p2;
+ struct uni_pagedir *p;
+
+ /* Only 16-bit codes supported at this time */
+ if (ucs > 0xffff)
+ return -4; /* Not found */
+ else if (ucs < 0x20)
+ return -1; /* Not a printable character */
+ else if (ucs == 0xfeff || (ucs >= 0x200b && ucs <= 0x200f))
+ return -2; /* Zero-width space */
+ /*
+ * UNI_DIRECT_BASE indicates the start of the region in the User Zone
+ * which always has a 1:1 mapping to the currently loaded font. The
+ * UNI_DIRECT_MASK indicates the bit span of the region.
+ */
+ else if ((ucs & ~UNI_DIRECT_MASK) == UNI_DIRECT_BASE)
+ return ucs & UNI_DIRECT_MASK;
+
+ if (!*conp->vc_uni_pagedir_loc)
+ return -3;
+
+ p = (struct uni_pagedir *)*conp->vc_uni_pagedir_loc;
+ if ((p1 = p->uni_pgdir[ucs >> 11]) &&
+ (p2 = p1[(ucs >> 6) & 0x1f]) &&
+ (h = p2[ucs & 0x3f]) < MAX_GLYPH)
+ return h;
+
+ return -4; /* not found */
+}
+
+/*
+ * This is called at sys_setup time, after memory and the console are
+ * initialized. It must be possible to call kmalloc(..., GFP_KERNEL)
+ * from this function, hence the call from sys_setup.
+ */
+void __init
+console_map_init(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NR_CONSOLES; i++)
+ if (vc_cons_allocated(i) && !*vc_cons[i].d->vc_uni_pagedir_loc)
+ con_set_default_unimap(vc_cons[i].d);
+}
+
+EXPORT_SYMBOL(con_copy_unimap);
diff --git a/drivers/tty/vt/cp437.uni b/drivers/tty/vt/cp437.uni
new file mode 100644
index 00000000..bc616348
--- /dev/null
+++ b/drivers/tty/vt/cp437.uni
@@ -0,0 +1,291 @@
+#
+# Unicode table for IBM Codepage 437. Note that there are many more
+# substitutions that could be conceived (for example, thick-line
+# graphs probably should be replaced with double-line ones, accented
+# Latin characters should replaced with their nonaccented versions,
+# and some upper case Greek characters could be replaced by Latin), however,
+# I have limited myself to the Unicodes used by the kernel ISO 8859-1,
+# DEC VT, and IBM CP 437 tables.
+#
+# --------------------------------
+#
+# Basic IBM dingbats, some of which will never have a purpose clear
+# to mankind
+#
+0x00 U+0000
+0x01 U+263a
+0x02 U+263b
+0x03 U+2665
+0x04 U+2666 U+25c6
+0x05 U+2663
+0x06 U+2660
+0x07 U+2022
+0x08 U+25d8
+0x09 U+25cb
+0x0a U+25d9
+0x0b U+2642
+0x0c U+2640
+0x0d U+266a
+0x0e U+266b
+0x0f U+263c U+00a4
+0x10 U+25b6 U+25ba
+0x11 U+25c0 U+25c4
+0x12 U+2195
+0x13 U+203c
+0x14 U+00b6
+0x15 U+00a7
+0x16 U+25ac
+0x17 U+21a8
+0x18 U+2191
+0x19 U+2193
+0x1a U+2192
+0x1b U+2190
+0x1c U+221f
+0x1d U+2194
+0x1e U+25b2
+0x1f U+25bc
+#
+# The ASCII range is identity-mapped, but some of the characters also
+# have to act as substitutes, especially the upper-case characters.
+#
+0x20 U+0020
+0x21 U+0021
+0x22 U+0022 U+00a8
+0x23 U+0023
+0x24 U+0024
+0x25 U+0025
+0x26 U+0026
+0x27 U+0027 U+00b4
+0x28 U+0028
+0x29 U+0029
+0x2a U+002a
+0x2b U+002b
+0x2c U+002c U+00b8
+0x2d U+002d U+00ad
+0x2e U+002e
+0x2f U+002f
+0x30 U+0030
+0x31 U+0031
+0x32 U+0032
+0x33 U+0033
+0x34 U+0034
+0x35 U+0035
+0x36 U+0036
+0x37 U+0037
+0x38 U+0038
+0x39 U+0039
+0x3a U+003a
+0x3b U+003b
+0x3c U+003c
+0x3d U+003d
+0x3e U+003e
+0x3f U+003f
+0x40 U+0040
+0x41 U+0041 U+00c0 U+00c1 U+00c2 U+00c3
+0x42 U+0042
+0x43 U+0043 U+00a9
+0x44 U+0044 U+00d0
+0x45 U+0045 U+00c8 U+00ca U+00cb
+0x46 U+0046
+0x47 U+0047
+0x48 U+0048
+0x49 U+0049 U+00cc U+00cd U+00ce U+00cf
+0x4a U+004a
+0x4b U+004b U+212a
+0x4c U+004c
+0x4d U+004d
+0x4e U+004e
+0x4f U+004f U+00d2 U+00d3 U+00d4 U+00d5
+0x50 U+0050
+0x51 U+0051
+0x52 U+0052 U+00ae
+0x53 U+0053
+0x54 U+0054
+0x55 U+0055 U+00d9 U+00da U+00db
+0x56 U+0056
+0x57 U+0057
+0x58 U+0058
+0x59 U+0059 U+00dd
+0x5a U+005a
+0x5b U+005b
+0x5c U+005c
+0x5d U+005d
+0x5e U+005e
+0x5f U+005f U+23bd U+f804
+0x60 U+0060
+0x61 U+0061 U+00e3
+0x62 U+0062
+0x63 U+0063
+0x64 U+0064
+0x65 U+0065
+0x66 U+0066
+0x67 U+0067
+0x68 U+0068
+0x69 U+0069
+0x6a U+006a
+0x6b U+006b
+0x6c U+006c
+0x6d U+006d
+0x6e U+006e
+0x6f U+006f U+00f5
+0x70 U+0070
+0x71 U+0071
+0x72 U+0072
+0x73 U+0073
+0x74 U+0074
+0x75 U+0075
+0x76 U+0076
+0x77 U+0077
+0x78 U+0078 U+00d7
+0x79 U+0079 U+00fd
+0x7a U+007a
+0x7b U+007b
+0x7c U+007c U+00a6
+0x7d U+007d
+0x7e U+007e
+#
+# Okay, what on Earth is this one supposed to be used for?
+#
+0x7f U+2302
+#
+# Non-English characters, mostly lower case letters...
+#
+0x80 U+00c7
+0x81 U+00fc
+0x82 U+00e9
+0x83 U+00e2
+0x84 U+00e4
+0x85 U+00e0
+0x86 U+00e5
+0x87 U+00e7
+0x88 U+00ea
+0x89 U+00eb
+0x8a U+00e8
+0x8b U+00ef
+0x8c U+00ee
+0x8d U+00ec
+0x8e U+00c4
+0x8f U+00c5 U+212b
+0x90 U+00c9
+0x91 U+00e6
+0x92 U+00c6
+0x93 U+00f4
+0x94 U+00f6
+0x95 U+00f2
+0x96 U+00fb
+0x97 U+00f9
+0x98 U+00ff
+0x99 U+00d6
+0x9a U+00dc
+0x9b U+00a2
+0x9c U+00a3
+0x9d U+00a5
+0x9e U+20a7
+0x9f U+0192
+0xa0 U+00e1
+0xa1 U+00ed
+0xa2 U+00f3
+0xa3 U+00fa
+0xa4 U+00f1
+0xa5 U+00d1
+0xa6 U+00aa
+0xa7 U+00ba
+0xa8 U+00bf
+0xa9 U+2310
+0xaa U+00ac
+0xab U+00bd
+0xac U+00bc
+0xad U+00a1
+0xae U+00ab
+0xaf U+00bb
+#
+# Block graphics
+#
+0xb0 U+2591
+0xb1 U+2592
+0xb2 U+2593
+0xb3 U+2502
+0xb4 U+2524
+0xb5 U+2561
+0xb6 U+2562
+0xb7 U+2556
+0xb8 U+2555
+0xb9 U+2563
+0xba U+2551
+0xbb U+2557
+0xbc U+255d
+0xbd U+255c
+0xbe U+255b
+0xbf U+2510
+0xc0 U+2514
+0xc1 U+2534
+0xc2 U+252c
+0xc3 U+251c
+0xc4 U+2500
+0xc5 U+253c
+0xc6 U+255e
+0xc7 U+255f
+0xc8 U+255a
+0xc9 U+2554
+0xca U+2569
+0xcb U+2566
+0xcc U+2560
+0xcd U+2550
+0xce U+256c
+0xcf U+2567
+0xd0 U+2568
+0xd1 U+2564
+0xd2 U+2565
+0xd3 U+2559
+0xd4 U+2558
+0xd5 U+2552
+0xd6 U+2553
+0xd7 U+256b
+0xd8 U+256a
+0xd9 U+2518
+0xda U+250c
+0xdb U+2588
+0xdc U+2584
+0xdd U+258c
+0xde U+2590
+0xdf U+2580
+#
+# Greek letters and mathematical symbols
+#
+0xe0 U+03b1
+0xe1 U+03b2 U+00df
+0xe2 U+0393
+0xe3 U+03c0
+0xe4 U+03a3
+0xe5 U+03c3
+0xe6 U+00b5 U+03bc
+0xe7 U+03c4
+0xe8 U+03a6 U+00d8
+0xe9 U+0398
+0xea U+03a9 U+2126
+0xeb U+03b4 U+00f0
+0xec U+221e
+0xed U+03c6 U+00f8
+0xee U+03b5 U+2208
+0xef U+2229
+0xf0 U+2261
+0xf1 U+00b1
+0xf2 U+2265
+0xf3 U+2264
+0xf4 U+2320
+0xf5 U+2321
+0xf6 U+00f7
+0xf7 U+2248
+0xf8 U+00b0
+0xf9 U+2219
+0xfa U+00b7
+0xfb U+221a
+0xfc U+207f
+0xfd U+00b2
+#
+# Square bullet, non-spacing blank
+# Mapping U+fffd to the square bullet means it is the substitution
+# character
+#
+0xfe U+25a0 U+fffd
+0xff U+00a0
diff --git a/drivers/tty/vt/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped
new file mode 100644
index 00000000..d2208dfe
--- /dev/null
+++ b/drivers/tty/vt/defkeymap.c_shipped
@@ -0,0 +1,262 @@
+/* Do not edit this file! It was automatically generated by */
+/* loadkeys --mktable defkeymap.map > defkeymap.c */
+
+#include <linux/types.h>
+#include <linux/keyboard.h>
+#include <linux/kd.h>
+
+u_short plain_map[NR_KEYS] = {
+ 0xf200, 0xf01b, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036,
+ 0xf037, 0xf038, 0xf039, 0xf030, 0xf02d, 0xf03d, 0xf07f, 0xf009,
+ 0xfb71, 0xfb77, 0xfb65, 0xfb72, 0xfb74, 0xfb79, 0xfb75, 0xfb69,
+ 0xfb6f, 0xfb70, 0xf05b, 0xf05d, 0xf201, 0xf702, 0xfb61, 0xfb73,
+ 0xfb64, 0xfb66, 0xfb67, 0xfb68, 0xfb6a, 0xfb6b, 0xfb6c, 0xf03b,
+ 0xf027, 0xf060, 0xf700, 0xf05c, 0xfb7a, 0xfb78, 0xfb63, 0xfb76,
+ 0xfb62, 0xfb6e, 0xfb6d, 0xf02c, 0xf02e, 0xf02f, 0xf700, 0xf30c,
+ 0xf703, 0xf020, 0xf207, 0xf100, 0xf101, 0xf102, 0xf103, 0xf104,
+ 0xf105, 0xf106, 0xf107, 0xf108, 0xf109, 0xf208, 0xf209, 0xf307,
+ 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301,
+ 0xf302, 0xf303, 0xf300, 0xf310, 0xf206, 0xf200, 0xf03c, 0xf10a,
+ 0xf10b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf01c, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+u_short shift_map[NR_KEYS] = {
+ 0xf200, 0xf01b, 0xf021, 0xf040, 0xf023, 0xf024, 0xf025, 0xf05e,
+ 0xf026, 0xf02a, 0xf028, 0xf029, 0xf05f, 0xf02b, 0xf07f, 0xf009,
+ 0xfb51, 0xfb57, 0xfb45, 0xfb52, 0xfb54, 0xfb59, 0xfb55, 0xfb49,
+ 0xfb4f, 0xfb50, 0xf07b, 0xf07d, 0xf201, 0xf702, 0xfb41, 0xfb53,
+ 0xfb44, 0xfb46, 0xfb47, 0xfb48, 0xfb4a, 0xfb4b, 0xfb4c, 0xf03a,
+ 0xf022, 0xf07e, 0xf700, 0xf07c, 0xfb5a, 0xfb58, 0xfb43, 0xfb56,
+ 0xfb42, 0xfb4e, 0xfb4d, 0xf03c, 0xf03e, 0xf03f, 0xf700, 0xf30c,
+ 0xf703, 0xf020, 0xf207, 0xf10a, 0xf10b, 0xf10c, 0xf10d, 0xf10e,
+ 0xf10f, 0xf110, 0xf111, 0xf112, 0xf113, 0xf213, 0xf203, 0xf307,
+ 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301,
+ 0xf302, 0xf303, 0xf300, 0xf310, 0xf206, 0xf200, 0xf03e, 0xf10a,
+ 0xf10b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf200, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf20b, 0xf601, 0xf602, 0xf117, 0xf600, 0xf20a, 0xf115, 0xf116,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+u_short altgr_map[NR_KEYS] = {
+ 0xf200, 0xf200, 0xf200, 0xf040, 0xf200, 0xf024, 0xf200, 0xf200,
+ 0xf07b, 0xf05b, 0xf05d, 0xf07d, 0xf05c, 0xf200, 0xf200, 0xf200,
+ 0xfb71, 0xfb77, 0xf918, 0xfb72, 0xfb74, 0xfb79, 0xfb75, 0xfb69,
+ 0xfb6f, 0xfb70, 0xf200, 0xf07e, 0xf201, 0xf702, 0xf914, 0xfb73,
+ 0xf917, 0xf919, 0xfb67, 0xfb68, 0xfb6a, 0xfb6b, 0xfb6c, 0xf200,
+ 0xf200, 0xf200, 0xf700, 0xf200, 0xfb7a, 0xfb78, 0xf916, 0xfb76,
+ 0xf915, 0xfb6e, 0xfb6d, 0xf200, 0xf200, 0xf200, 0xf700, 0xf30c,
+ 0xf703, 0xf200, 0xf207, 0xf50c, 0xf50d, 0xf50e, 0xf50f, 0xf510,
+ 0xf511, 0xf512, 0xf513, 0xf514, 0xf515, 0xf208, 0xf202, 0xf911,
+ 0xf912, 0xf913, 0xf30b, 0xf90e, 0xf90f, 0xf910, 0xf30a, 0xf90b,
+ 0xf90c, 0xf90d, 0xf90a, 0xf310, 0xf206, 0xf200, 0xf07c, 0xf516,
+ 0xf517, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf200, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+u_short ctrl_map[NR_KEYS] = {
+ 0xf200, 0xf200, 0xf200, 0xf000, 0xf01b, 0xf01c, 0xf01d, 0xf01e,
+ 0xf01f, 0xf07f, 0xf200, 0xf200, 0xf01f, 0xf200, 0xf008, 0xf200,
+ 0xf011, 0xf017, 0xf005, 0xf012, 0xf014, 0xf019, 0xf015, 0xf009,
+ 0xf00f, 0xf010, 0xf01b, 0xf01d, 0xf201, 0xf702, 0xf001, 0xf013,
+ 0xf004, 0xf006, 0xf007, 0xf008, 0xf00a, 0xf00b, 0xf00c, 0xf200,
+ 0xf007, 0xf000, 0xf700, 0xf01c, 0xf01a, 0xf018, 0xf003, 0xf016,
+ 0xf002, 0xf00e, 0xf00d, 0xf200, 0xf20e, 0xf07f, 0xf700, 0xf30c,
+ 0xf703, 0xf000, 0xf207, 0xf100, 0xf101, 0xf102, 0xf103, 0xf104,
+ 0xf105, 0xf106, 0xf107, 0xf108, 0xf109, 0xf208, 0xf204, 0xf307,
+ 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301,
+ 0xf302, 0xf303, 0xf300, 0xf310, 0xf206, 0xf200, 0xf200, 0xf10a,
+ 0xf10b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf01c, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+u_short shift_ctrl_map[NR_KEYS] = {
+ 0xf200, 0xf200, 0xf200, 0xf000, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf01f, 0xf200, 0xf200, 0xf200,
+ 0xf011, 0xf017, 0xf005, 0xf012, 0xf014, 0xf019, 0xf015, 0xf009,
+ 0xf00f, 0xf010, 0xf200, 0xf200, 0xf201, 0xf702, 0xf001, 0xf013,
+ 0xf004, 0xf006, 0xf007, 0xf008, 0xf00a, 0xf00b, 0xf00c, 0xf200,
+ 0xf200, 0xf200, 0xf700, 0xf200, 0xf01a, 0xf018, 0xf003, 0xf016,
+ 0xf002, 0xf00e, 0xf00d, 0xf200, 0xf200, 0xf200, 0xf700, 0xf30c,
+ 0xf703, 0xf200, 0xf207, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf208, 0xf200, 0xf307,
+ 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301,
+ 0xf302, 0xf303, 0xf300, 0xf310, 0xf206, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf200, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+u_short alt_map[NR_KEYS] = {
+ 0xf200, 0xf81b, 0xf831, 0xf832, 0xf833, 0xf834, 0xf835, 0xf836,
+ 0xf837, 0xf838, 0xf839, 0xf830, 0xf82d, 0xf83d, 0xf87f, 0xf809,
+ 0xf871, 0xf877, 0xf865, 0xf872, 0xf874, 0xf879, 0xf875, 0xf869,
+ 0xf86f, 0xf870, 0xf85b, 0xf85d, 0xf80d, 0xf702, 0xf861, 0xf873,
+ 0xf864, 0xf866, 0xf867, 0xf868, 0xf86a, 0xf86b, 0xf86c, 0xf83b,
+ 0xf827, 0xf860, 0xf700, 0xf85c, 0xf87a, 0xf878, 0xf863, 0xf876,
+ 0xf862, 0xf86e, 0xf86d, 0xf82c, 0xf82e, 0xf82f, 0xf700, 0xf30c,
+ 0xf703, 0xf820, 0xf207, 0xf500, 0xf501, 0xf502, 0xf503, 0xf504,
+ 0xf505, 0xf506, 0xf507, 0xf508, 0xf509, 0xf208, 0xf209, 0xf907,
+ 0xf908, 0xf909, 0xf30b, 0xf904, 0xf905, 0xf906, 0xf30a, 0xf901,
+ 0xf902, 0xf903, 0xf900, 0xf310, 0xf206, 0xf200, 0xf83c, 0xf50a,
+ 0xf50b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf01c, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf118, 0xf210, 0xf211, 0xf117, 0xf600, 0xf119, 0xf115, 0xf116,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+u_short ctrl_alt_map[NR_KEYS] = {
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf811, 0xf817, 0xf805, 0xf812, 0xf814, 0xf819, 0xf815, 0xf809,
+ 0xf80f, 0xf810, 0xf200, 0xf200, 0xf201, 0xf702, 0xf801, 0xf813,
+ 0xf804, 0xf806, 0xf807, 0xf808, 0xf80a, 0xf80b, 0xf80c, 0xf200,
+ 0xf200, 0xf200, 0xf700, 0xf200, 0xf81a, 0xf818, 0xf803, 0xf816,
+ 0xf802, 0xf80e, 0xf80d, 0xf200, 0xf200, 0xf200, 0xf700, 0xf30c,
+ 0xf703, 0xf200, 0xf207, 0xf500, 0xf501, 0xf502, 0xf503, 0xf504,
+ 0xf505, 0xf506, 0xf507, 0xf508, 0xf509, 0xf208, 0xf200, 0xf307,
+ 0xf308, 0xf309, 0xf30b, 0xf304, 0xf305, 0xf306, 0xf30a, 0xf301,
+ 0xf302, 0xf303, 0xf300, 0xf20c, 0xf206, 0xf200, 0xf200, 0xf50a,
+ 0xf50b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+ 0xf30e, 0xf702, 0xf30d, 0xf200, 0xf701, 0xf205, 0xf114, 0xf603,
+ 0xf118, 0xf601, 0xf602, 0xf117, 0xf600, 0xf119, 0xf115, 0xf20c,
+ 0xf11a, 0xf10c, 0xf10d, 0xf11b, 0xf11c, 0xf110, 0xf311, 0xf11d,
+ 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
+};
+
+ushort *key_maps[MAX_NR_KEYMAPS] = {
+ plain_map, shift_map, altgr_map, NULL,
+ ctrl_map, shift_ctrl_map, NULL, NULL,
+ alt_map, NULL, NULL, NULL,
+ ctrl_alt_map, NULL
+};
+
+unsigned int keymap_count = 7;
+
+/*
+ * Philosophy: most people do not define more strings, but they who do
+ * often want quite a lot of string space. So, we statically allocate
+ * the default and allocate dynamically in chunks of 512 bytes.
+ */
+
+char func_buf[] = {
+ '\033', '[', '[', 'A', 0,
+ '\033', '[', '[', 'B', 0,
+ '\033', '[', '[', 'C', 0,
+ '\033', '[', '[', 'D', 0,
+ '\033', '[', '[', 'E', 0,
+ '\033', '[', '1', '7', '~', 0,
+ '\033', '[', '1', '8', '~', 0,
+ '\033', '[', '1', '9', '~', 0,
+ '\033', '[', '2', '0', '~', 0,
+ '\033', '[', '2', '1', '~', 0,
+ '\033', '[', '2', '3', '~', 0,
+ '\033', '[', '2', '4', '~', 0,
+ '\033', '[', '2', '5', '~', 0,
+ '\033', '[', '2', '6', '~', 0,
+ '\033', '[', '2', '8', '~', 0,
+ '\033', '[', '2', '9', '~', 0,
+ '\033', '[', '3', '1', '~', 0,
+ '\033', '[', '3', '2', '~', 0,
+ '\033', '[', '3', '3', '~', 0,
+ '\033', '[', '3', '4', '~', 0,
+ '\033', '[', '1', '~', 0,
+ '\033', '[', '2', '~', 0,
+ '\033', '[', '3', '~', 0,
+ '\033', '[', '4', '~', 0,
+ '\033', '[', '5', '~', 0,
+ '\033', '[', '6', '~', 0,
+ '\033', '[', 'M', 0,
+ '\033', '[', 'P', 0,
+};
+
+char *funcbufptr = func_buf;
+int funcbufsize = sizeof(func_buf);
+int funcbufleft = 0; /* space left */
+
+char *func_table[MAX_NR_FUNC] = {
+ func_buf + 0,
+ func_buf + 5,
+ func_buf + 10,
+ func_buf + 15,
+ func_buf + 20,
+ func_buf + 25,
+ func_buf + 31,
+ func_buf + 37,
+ func_buf + 43,
+ func_buf + 49,
+ func_buf + 55,
+ func_buf + 61,
+ func_buf + 67,
+ func_buf + 73,
+ func_buf + 79,
+ func_buf + 85,
+ func_buf + 91,
+ func_buf + 97,
+ func_buf + 103,
+ func_buf + 109,
+ func_buf + 115,
+ func_buf + 120,
+ func_buf + 125,
+ func_buf + 130,
+ func_buf + 135,
+ func_buf + 140,
+ func_buf + 145,
+ NULL,
+ NULL,
+ func_buf + 149,
+ NULL,
+};
+
+struct kbdiacruc accent_table[MAX_DIACR] = {
+ {'`', 'A', 0300}, {'`', 'a', 0340},
+ {'\'', 'A', 0301}, {'\'', 'a', 0341},
+ {'^', 'A', 0302}, {'^', 'a', 0342},
+ {'~', 'A', 0303}, {'~', 'a', 0343},
+ {'"', 'A', 0304}, {'"', 'a', 0344},
+ {'O', 'A', 0305}, {'o', 'a', 0345},
+ {'0', 'A', 0305}, {'0', 'a', 0345},
+ {'A', 'A', 0305}, {'a', 'a', 0345},
+ {'A', 'E', 0306}, {'a', 'e', 0346},
+ {',', 'C', 0307}, {',', 'c', 0347},
+ {'`', 'E', 0310}, {'`', 'e', 0350},
+ {'\'', 'E', 0311}, {'\'', 'e', 0351},
+ {'^', 'E', 0312}, {'^', 'e', 0352},
+ {'"', 'E', 0313}, {'"', 'e', 0353},
+ {'`', 'I', 0314}, {'`', 'i', 0354},
+ {'\'', 'I', 0315}, {'\'', 'i', 0355},
+ {'^', 'I', 0316}, {'^', 'i', 0356},
+ {'"', 'I', 0317}, {'"', 'i', 0357},
+ {'-', 'D', 0320}, {'-', 'd', 0360},
+ {'~', 'N', 0321}, {'~', 'n', 0361},
+ {'`', 'O', 0322}, {'`', 'o', 0362},
+ {'\'', 'O', 0323}, {'\'', 'o', 0363},
+ {'^', 'O', 0324}, {'^', 'o', 0364},
+ {'~', 'O', 0325}, {'~', 'o', 0365},
+ {'"', 'O', 0326}, {'"', 'o', 0366},
+ {'/', 'O', 0330}, {'/', 'o', 0370},
+ {'`', 'U', 0331}, {'`', 'u', 0371},
+ {'\'', 'U', 0332}, {'\'', 'u', 0372},
+ {'^', 'U', 0333}, {'^', 'u', 0373},
+ {'"', 'U', 0334}, {'"', 'u', 0374},
+ {'\'', 'Y', 0335}, {'\'', 'y', 0375},
+ {'T', 'H', 0336}, {'t', 'h', 0376},
+ {'s', 's', 0337}, {'"', 'y', 0377},
+ {'s', 'z', 0337}, {'i', 'j', 0377},
+};
+
+unsigned int accent_table_size = 68;
diff --git a/drivers/tty/vt/defkeymap.map b/drivers/tty/vt/defkeymap.map
new file mode 100644
index 00000000..50b30cac
--- /dev/null
+++ b/drivers/tty/vt/defkeymap.map
@@ -0,0 +1,357 @@
+# Default kernel keymap. This uses 7 modifier combinations.
+keymaps 0-2,4-5,8,12
+# Change the above line into
+# keymaps 0-2,4-6,8,12
+# in case you want the entries
+# altgr control keycode 83 = Boot
+# altgr control keycode 111 = Boot
+# below.
+#
+# In fact AltGr is used very little, and one more keymap can
+# be saved by mapping AltGr to Alt (and adapting a few entries):
+# keycode 100 = Alt
+#
+keycode 1 = Escape Escape
+ alt keycode 1 = Meta_Escape
+keycode 2 = one exclam
+ alt keycode 2 = Meta_one
+keycode 3 = two at at
+ control keycode 3 = nul
+ shift control keycode 3 = nul
+ alt keycode 3 = Meta_two
+keycode 4 = three numbersign
+ control keycode 4 = Escape
+ alt keycode 4 = Meta_three
+keycode 5 = four dollar dollar
+ control keycode 5 = Control_backslash
+ alt keycode 5 = Meta_four
+keycode 6 = five percent
+ control keycode 6 = Control_bracketright
+ alt keycode 6 = Meta_five
+keycode 7 = six asciicircum
+ control keycode 7 = Control_asciicircum
+ alt keycode 7 = Meta_six
+keycode 8 = seven ampersand braceleft
+ control keycode 8 = Control_underscore
+ alt keycode 8 = Meta_seven
+keycode 9 = eight asterisk bracketleft
+ control keycode 9 = Delete
+ alt keycode 9 = Meta_eight
+keycode 10 = nine parenleft bracketright
+ alt keycode 10 = Meta_nine
+keycode 11 = zero parenright braceright
+ alt keycode 11 = Meta_zero
+keycode 12 = minus underscore backslash
+ control keycode 12 = Control_underscore
+ shift control keycode 12 = Control_underscore
+ alt keycode 12 = Meta_minus
+keycode 13 = equal plus
+ alt keycode 13 = Meta_equal
+keycode 14 = Delete Delete
+ control keycode 14 = BackSpace
+ alt keycode 14 = Meta_Delete
+keycode 15 = Tab Tab
+ alt keycode 15 = Meta_Tab
+keycode 16 = q
+keycode 17 = w
+keycode 18 = e
+ altgr keycode 18 = Hex_E
+keycode 19 = r
+keycode 20 = t
+keycode 21 = y
+keycode 22 = u
+keycode 23 = i
+keycode 24 = o
+keycode 25 = p
+keycode 26 = bracketleft braceleft
+ control keycode 26 = Escape
+ alt keycode 26 = Meta_bracketleft
+keycode 27 = bracketright braceright asciitilde
+ control keycode 27 = Control_bracketright
+ alt keycode 27 = Meta_bracketright
+keycode 28 = Return
+ alt keycode 28 = Meta_Control_m
+keycode 29 = Control
+keycode 30 = a
+ altgr keycode 30 = Hex_A
+keycode 31 = s
+keycode 32 = d
+ altgr keycode 32 = Hex_D
+keycode 33 = f
+ altgr keycode 33 = Hex_F
+keycode 34 = g
+keycode 35 = h
+keycode 36 = j
+keycode 37 = k
+keycode 38 = l
+keycode 39 = semicolon colon
+ alt keycode 39 = Meta_semicolon
+keycode 40 = apostrophe quotedbl
+ control keycode 40 = Control_g
+ alt keycode 40 = Meta_apostrophe
+keycode 41 = grave asciitilde
+ control keycode 41 = nul
+ alt keycode 41 = Meta_grave
+keycode 42 = Shift
+keycode 43 = backslash bar
+ control keycode 43 = Control_backslash
+ alt keycode 43 = Meta_backslash
+keycode 44 = z
+keycode 45 = x
+keycode 46 = c
+ altgr keycode 46 = Hex_C
+keycode 47 = v
+keycode 48 = b
+ altgr keycode 48 = Hex_B
+keycode 49 = n
+keycode 50 = m
+keycode 51 = comma less
+ alt keycode 51 = Meta_comma
+keycode 52 = period greater
+ control keycode 52 = Compose
+ alt keycode 52 = Meta_period
+keycode 53 = slash question
+ control keycode 53 = Delete
+ alt keycode 53 = Meta_slash
+keycode 54 = Shift
+keycode 55 = KP_Multiply
+keycode 56 = Alt
+keycode 57 = space space
+ control keycode 57 = nul
+ alt keycode 57 = Meta_space
+keycode 58 = Caps_Lock
+keycode 59 = F1 F11 Console_13
+ control keycode 59 = F1
+ alt keycode 59 = Console_1
+ control alt keycode 59 = Console_1
+keycode 60 = F2 F12 Console_14
+ control keycode 60 = F2
+ alt keycode 60 = Console_2
+ control alt keycode 60 = Console_2
+keycode 61 = F3 F13 Console_15
+ control keycode 61 = F3
+ alt keycode 61 = Console_3
+ control alt keycode 61 = Console_3
+keycode 62 = F4 F14 Console_16
+ control keycode 62 = F4
+ alt keycode 62 = Console_4
+ control alt keycode 62 = Console_4
+keycode 63 = F5 F15 Console_17
+ control keycode 63 = F5
+ alt keycode 63 = Console_5
+ control alt keycode 63 = Console_5
+keycode 64 = F6 F16 Console_18
+ control keycode 64 = F6
+ alt keycode 64 = Console_6
+ control alt keycode 64 = Console_6
+keycode 65 = F7 F17 Console_19
+ control keycode 65 = F7
+ alt keycode 65 = Console_7
+ control alt keycode 65 = Console_7
+keycode 66 = F8 F18 Console_20
+ control keycode 66 = F8
+ alt keycode 66 = Console_8
+ control alt keycode 66 = Console_8
+keycode 67 = F9 F19 Console_21
+ control keycode 67 = F9
+ alt keycode 67 = Console_9
+ control alt keycode 67 = Console_9
+keycode 68 = F10 F20 Console_22
+ control keycode 68 = F10
+ alt keycode 68 = Console_10
+ control alt keycode 68 = Console_10
+keycode 69 = Num_Lock
+ shift keycode 69 = Bare_Num_Lock
+keycode 70 = Scroll_Lock Show_Memory Show_Registers
+ control keycode 70 = Show_State
+ alt keycode 70 = Scroll_Lock
+keycode 71 = KP_7
+ alt keycode 71 = Ascii_7
+ altgr keycode 71 = Hex_7
+keycode 72 = KP_8
+ alt keycode 72 = Ascii_8
+ altgr keycode 72 = Hex_8
+keycode 73 = KP_9
+ alt keycode 73 = Ascii_9
+ altgr keycode 73 = Hex_9
+keycode 74 = KP_Subtract
+keycode 75 = KP_4
+ alt keycode 75 = Ascii_4
+ altgr keycode 75 = Hex_4
+keycode 76 = KP_5
+ alt keycode 76 = Ascii_5
+ altgr keycode 76 = Hex_5
+keycode 77 = KP_6
+ alt keycode 77 = Ascii_6
+ altgr keycode 77 = Hex_6
+keycode 78 = KP_Add
+keycode 79 = KP_1
+ alt keycode 79 = Ascii_1
+ altgr keycode 79 = Hex_1
+keycode 80 = KP_2
+ alt keycode 80 = Ascii_2
+ altgr keycode 80 = Hex_2
+keycode 81 = KP_3
+ alt keycode 81 = Ascii_3
+ altgr keycode 81 = Hex_3
+keycode 82 = KP_0
+ alt keycode 82 = Ascii_0
+ altgr keycode 82 = Hex_0
+keycode 83 = KP_Period
+# altgr control keycode 83 = Boot
+ control alt keycode 83 = Boot
+keycode 84 = Last_Console
+keycode 85 =
+keycode 86 = less greater bar
+ alt keycode 86 = Meta_less
+keycode 87 = F11 F11 Console_23
+ control keycode 87 = F11
+ alt keycode 87 = Console_11
+ control alt keycode 87 = Console_11
+keycode 88 = F12 F12 Console_24
+ control keycode 88 = F12
+ alt keycode 88 = Console_12
+ control alt keycode 88 = Console_12
+keycode 89 =
+keycode 90 =
+keycode 91 =
+keycode 92 =
+keycode 93 =
+keycode 94 =
+keycode 95 =
+keycode 96 = KP_Enter
+keycode 97 = Control
+keycode 98 = KP_Divide
+keycode 99 = Control_backslash
+ control keycode 99 = Control_backslash
+ alt keycode 99 = Control_backslash
+keycode 100 = AltGr
+keycode 101 = Break
+keycode 102 = Find
+keycode 103 = Up
+keycode 104 = Prior
+ shift keycode 104 = Scroll_Backward
+keycode 105 = Left
+ alt keycode 105 = Decr_Console
+keycode 106 = Right
+ alt keycode 106 = Incr_Console
+keycode 107 = Select
+keycode 108 = Down
+keycode 109 = Next
+ shift keycode 109 = Scroll_Forward
+keycode 110 = Insert
+keycode 111 = Remove
+# altgr control keycode 111 = Boot
+ control alt keycode 111 = Boot
+keycode 112 = Macro
+keycode 113 = F13
+keycode 114 = F14
+keycode 115 = Help
+keycode 116 = Do
+keycode 117 = F17
+keycode 118 = KP_MinPlus
+keycode 119 = Pause
+keycode 120 =
+keycode 121 =
+keycode 122 =
+keycode 123 =
+keycode 124 =
+keycode 125 =
+keycode 126 =
+keycode 127 =
+string F1 = "\033[[A"
+string F2 = "\033[[B"
+string F3 = "\033[[C"
+string F4 = "\033[[D"
+string F5 = "\033[[E"
+string F6 = "\033[17~"
+string F7 = "\033[18~"
+string F8 = "\033[19~"
+string F9 = "\033[20~"
+string F10 = "\033[21~"
+string F11 = "\033[23~"
+string F12 = "\033[24~"
+string F13 = "\033[25~"
+string F14 = "\033[26~"
+string F15 = "\033[28~"
+string F16 = "\033[29~"
+string F17 = "\033[31~"
+string F18 = "\033[32~"
+string F19 = "\033[33~"
+string F20 = "\033[34~"
+string Find = "\033[1~"
+string Insert = "\033[2~"
+string Remove = "\033[3~"
+string Select = "\033[4~"
+string Prior = "\033[5~"
+string Next = "\033[6~"
+string Macro = "\033[M"
+string Pause = "\033[P"
+compose '`' 'A' to 'À'
+compose '`' 'a' to 'à'
+compose '\'' 'A' to 'Á'
+compose '\'' 'a' to 'á'
+compose '^' 'A' to 'Â'
+compose '^' 'a' to 'â'
+compose '~' 'A' to 'Ã'
+compose '~' 'a' to 'ã'
+compose '"' 'A' to 'Ä'
+compose '"' 'a' to 'ä'
+compose 'O' 'A' to 'Å'
+compose 'o' 'a' to 'å'
+compose '0' 'A' to 'Å'
+compose '0' 'a' to 'å'
+compose 'A' 'A' to 'Å'
+compose 'a' 'a' to 'å'
+compose 'A' 'E' to 'Æ'
+compose 'a' 'e' to 'æ'
+compose ',' 'C' to 'Ç'
+compose ',' 'c' to 'ç'
+compose '`' 'E' to 'È'
+compose '`' 'e' to 'è'
+compose '\'' 'E' to 'É'
+compose '\'' 'e' to 'é'
+compose '^' 'E' to 'Ê'
+compose '^' 'e' to 'ê'
+compose '"' 'E' to 'Ë'
+compose '"' 'e' to 'ë'
+compose '`' 'I' to 'Ì'
+compose '`' 'i' to 'ì'
+compose '\'' 'I' to 'Í'
+compose '\'' 'i' to 'í'
+compose '^' 'I' to 'Î'
+compose '^' 'i' to 'î'
+compose '"' 'I' to 'Ï'
+compose '"' 'i' to 'ï'
+compose '-' 'D' to 'Ð'
+compose '-' 'd' to 'ð'
+compose '~' 'N' to 'Ñ'
+compose '~' 'n' to 'ñ'
+compose '`' 'O' to 'Ò'
+compose '`' 'o' to 'ò'
+compose '\'' 'O' to 'Ó'
+compose '\'' 'o' to 'ó'
+compose '^' 'O' to 'Ô'
+compose '^' 'o' to 'ô'
+compose '~' 'O' to 'Õ'
+compose '~' 'o' to 'õ'
+compose '"' 'O' to 'Ö'
+compose '"' 'o' to 'ö'
+compose '/' 'O' to 'Ø'
+compose '/' 'o' to 'ø'
+compose '`' 'U' to 'Ù'
+compose '`' 'u' to 'ù'
+compose '\'' 'U' to 'Ú'
+compose '\'' 'u' to 'ú'
+compose '^' 'U' to 'Û'
+compose '^' 'u' to 'û'
+compose '"' 'U' to 'Ü'
+compose '"' 'u' to 'ü'
+compose '\'' 'Y' to 'Ý'
+compose '\'' 'y' to 'ý'
+compose 'T' 'H' to 'Þ'
+compose 't' 'h' to 'þ'
+compose 's' 's' to 'ß'
+compose '"' 'y' to 'ÿ'
+compose 's' 'z' to 'ß'
+compose 'i' 'j' to 'ÿ'
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
new file mode 100644
index 00000000..3761ccf0
--- /dev/null
+++ b/drivers/tty/vt/keyboard.c
@@ -0,0 +1,1453 @@
+/*
+ * Written for linux by Johan Myreen as a translation from
+ * the assembly version by Linus (with diacriticals added)
+ *
+ * Some additional features added by Christoph Niemann (ChN), March 1993
+ *
+ * Loadable keymaps by Risto Kankkunen, May 1993
+ *
+ * Diacriticals redone & other small changes, aeb@cwi.nl, June 1993
+ * Added decr/incr_console, dynamic keymaps, Unicode support,
+ * dynamic function/string keys, led setting, Sept 1994
+ * `Sticky' modifier keys, 951006.
+ *
+ * 11-11-96: SAK should now work in the raw mode (Martin Mares)
+ *
+ * Modified to provide 'generic' keyboard support by Hamish Macdonald
+ * Merge with the m68k keyboard driver and split-off of the PC low-level
+ * parts by Geert Uytterhoeven, May 1997
+ *
+ * 27-05-97: Added support for the Magic SysRq Key (Martin Mares)
+ * 30-07-98: Dead keys redone, aeb@cwi.nl.
+ * 21-08-02: Converted to input API, major cleanup. (Vojtech Pavlik)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/consolemap.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#include <linux/kbd_kern.h>
+#include <linux/kbd_diacr.h>
+#include <linux/vt_kern.h>
+#include <linux/input.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+#include <linux/jiffies.h>
+
+extern void ctrl_alt_del(void);
+
+/*
+ * Exported functions/variables
+ */
+
+#define KBD_DEFMODE ((1 << VC_REPEAT) | (1 << VC_META))
+
+/*
+ * Some laptops take the 789uiojklm,. keys as number pad when NumLock is on.
+ * This seems a good reason to start with NumLock off. On HIL keyboards
+ * of PARISC machines however there is no NumLock key and everyone expects the keypad
+ * to be used for numbers.
+ */
+
+#if defined(CONFIG_PARISC) && (defined(CONFIG_KEYBOARD_HIL) || defined(CONFIG_KEYBOARD_HIL_OLD))
+#define KBD_DEFLEDS (1 << VC_NUMLOCK)
+#else
+#define KBD_DEFLEDS 0
+#endif
+
+#define KBD_DEFLOCK 0
+
+void compute_shiftstate(void);
+
+/*
+ * Handler Tables.
+ */
+
+#define K_HANDLERS\
+ k_self, k_fn, k_spec, k_pad,\
+ k_dead, k_cons, k_cur, k_shift,\
+ k_meta, k_ascii, k_lock, k_lowercase,\
+ k_slock, k_dead2, k_brl, k_ignore
+
+typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value,
+ char up_flag);
+static k_handler_fn K_HANDLERS;
+static k_handler_fn *k_handler[16] = { K_HANDLERS };
+
+#define FN_HANDLERS\
+ fn_null, fn_enter, fn_show_ptregs, fn_show_mem,\
+ fn_show_state, fn_send_intr, fn_lastcons, fn_caps_toggle,\
+ fn_num, fn_hold, fn_scroll_forw, fn_scroll_back,\
+ fn_boot_it, fn_caps_on, fn_compose, fn_SAK,\
+ fn_dec_console, fn_inc_console, fn_spawn_con, fn_bare_num
+
+typedef void (fn_handler_fn)(struct vc_data *vc);
+static fn_handler_fn FN_HANDLERS;
+static fn_handler_fn *fn_handler[] = { FN_HANDLERS };
+
+/*
+ * Variables exported for vt_ioctl.c
+ */
+
+/* maximum values each key_handler can handle */
+const int max_vals[] = {
+ 255, ARRAY_SIZE(func_table) - 1, ARRAY_SIZE(fn_handler) - 1, NR_PAD - 1,
+ NR_DEAD - 1, 255, 3, NR_SHIFT - 1, 255, NR_ASCII - 1, NR_LOCK - 1,
+ 255, NR_LOCK - 1, 255, NR_BRL - 1
+};
+
+const int NR_TYPES = ARRAY_SIZE(max_vals);
+
+struct kbd_struct kbd_table[MAX_NR_CONSOLES];
+EXPORT_SYMBOL_GPL(kbd_table);
+static struct kbd_struct *kbd = kbd_table;
+
+struct vt_spawn_console vt_spawn_con = {
+ .lock = __SPIN_LOCK_UNLOCKED(vt_spawn_con.lock),
+ .pid = NULL,
+ .sig = 0,
+};
+
+/*
+ * Variables exported for vt.c
+ */
+
+int shift_state = 0;
+
+/*
+ * Internal Data.
+ */
+
+static struct input_handler kbd_handler;
+static DEFINE_SPINLOCK(kbd_event_lock);
+static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
+static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
+static bool dead_key_next;
+static int npadch = -1; /* -1 or number assembled on pad */
+static unsigned int diacr;
+static char rep; /* flag telling character repeat */
+
+static unsigned char ledstate = 0xff; /* undefined */
+static unsigned char ledioctl;
+
+static struct ledptr {
+ unsigned int *addr;
+ unsigned int mask;
+ unsigned char valid:1;
+} ledptrs[3];
+
+/*
+ * Notifier list for console keyboard events
+ */
+static ATOMIC_NOTIFIER_HEAD(keyboard_notifier_list);
+
+int register_keyboard_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&keyboard_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(register_keyboard_notifier);
+
+int unregister_keyboard_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&keyboard_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_keyboard_notifier);
+
+/*
+ * Translation of scancodes to keycodes. We set them on only the first
+ * keyboard in the list that accepts the scancode and keycode.
+ * Explanation for not choosing the first attached keyboard anymore:
+ * USB keyboards for example have two event devices: one for all "normal"
+ * keys and one for extra function keys (like "volume up", "make coffee",
+ * etc.). So this means that scancodes for the extra function keys won't
+ * be valid for the first event device, but will be for the second.
+ */
+
+struct getset_keycode_data {
+ struct input_keymap_entry ke;
+ int error;
+};
+
+static int getkeycode_helper(struct input_handle *handle, void *data)
+{
+ struct getset_keycode_data *d = data;
+
+ d->error = input_get_keycode(handle->dev, &d->ke);
+
+ return d->error == 0; /* stop as soon as we successfully get one */
+}
+
+int getkeycode(unsigned int scancode)
+{
+ struct getset_keycode_data d = {
+ .ke = {
+ .flags = 0,
+ .len = sizeof(scancode),
+ .keycode = 0,
+ },
+ .error = -ENODEV,
+ };
+
+ memcpy(d.ke.scancode, &scancode, sizeof(scancode));
+
+ input_handler_for_each_handle(&kbd_handler, &d, getkeycode_helper);
+
+ return d.error ?: d.ke.keycode;
+}
+
+static int setkeycode_helper(struct input_handle *handle, void *data)
+{
+ struct getset_keycode_data *d = data;
+
+ d->error = input_set_keycode(handle->dev, &d->ke);
+
+ return d->error == 0; /* stop as soon as we successfully set one */
+}
+
+int setkeycode(unsigned int scancode, unsigned int keycode)
+{
+ struct getset_keycode_data d = {
+ .ke = {
+ .flags = 0,
+ .len = sizeof(scancode),
+ .keycode = keycode,
+ },
+ .error = -ENODEV,
+ };
+
+ memcpy(d.ke.scancode, &scancode, sizeof(scancode));
+
+ input_handler_for_each_handle(&kbd_handler, &d, setkeycode_helper);
+
+ return d.error;
+}
+
+/*
+ * Making beeps and bells. Note that we prefer beeps to bells, but when
+ * shutting the sound off we do both.
+ */
+
+static int kd_sound_helper(struct input_handle *handle, void *data)
+{
+ unsigned int *hz = data;
+ struct input_dev *dev = handle->dev;
+
+ if (test_bit(EV_SND, dev->evbit)) {
+ if (test_bit(SND_TONE, dev->sndbit)) {
+ input_inject_event(handle, EV_SND, SND_TONE, *hz);
+ if (*hz)
+ return 0;
+ }
+ if (test_bit(SND_BELL, dev->sndbit))
+ input_inject_event(handle, EV_SND, SND_BELL, *hz ? 1 : 0);
+ }
+
+ return 0;
+}
+
+static void kd_nosound(unsigned long ignored)
+{
+ static unsigned int zero;
+
+ input_handler_for_each_handle(&kbd_handler, &zero, kd_sound_helper);
+}
+
+static DEFINE_TIMER(kd_mksound_timer, kd_nosound, 0, 0);
+
+void kd_mksound(unsigned int hz, unsigned int ticks)
+{
+ del_timer_sync(&kd_mksound_timer);
+
+ input_handler_for_each_handle(&kbd_handler, &hz, kd_sound_helper);
+
+ if (hz && ticks)
+ mod_timer(&kd_mksound_timer, jiffies + ticks);
+}
+EXPORT_SYMBOL(kd_mksound);
+
+/*
+ * Setting the keyboard rate.
+ */
+
+static int kbd_rate_helper(struct input_handle *handle, void *data)
+{
+ struct input_dev *dev = handle->dev;
+ struct kbd_repeat *rep = data;
+
+ if (test_bit(EV_REP, dev->evbit)) {
+
+ if (rep[0].delay > 0)
+ input_inject_event(handle,
+ EV_REP, REP_DELAY, rep[0].delay);
+ if (rep[0].period > 0)
+ input_inject_event(handle,
+ EV_REP, REP_PERIOD, rep[0].period);
+
+ rep[1].delay = dev->rep[REP_DELAY];
+ rep[1].period = dev->rep[REP_PERIOD];
+ }
+
+ return 0;
+}
+
+int kbd_rate(struct kbd_repeat *rep)
+{
+ struct kbd_repeat data[2] = { *rep };
+
+ input_handler_for_each_handle(&kbd_handler, data, kbd_rate_helper);
+ *rep = data[1]; /* Copy currently used settings */
+
+ return 0;
+}
+
+/*
+ * Helper Functions.
+ */
+static void put_queue(struct vc_data *vc, int ch)
+{
+ struct tty_struct *tty = vc->port.tty;
+
+ if (tty) {
+ tty_insert_flip_char(tty, ch, 0);
+ con_schedule_flip(tty);
+ }
+}
+
+static void puts_queue(struct vc_data *vc, char *cp)
+{
+ struct tty_struct *tty = vc->port.tty;
+
+ if (!tty)
+ return;
+
+ while (*cp) {
+ tty_insert_flip_char(tty, *cp, 0);
+ cp++;
+ }
+ con_schedule_flip(tty);
+}
+
+static void applkey(struct vc_data *vc, int key, char mode)
+{
+ static char buf[] = { 0x1b, 'O', 0x00, 0x00 };
+
+ buf[1] = (mode ? 'O' : '[');
+ buf[2] = key;
+ puts_queue(vc, buf);
+}
+
+/*
+ * Many other routines do put_queue, but I think either
+ * they produce ASCII, or they produce some user-assigned
+ * string, and in both cases we might assume that it is
+ * in utf-8 already.
+ */
+static void to_utf8(struct vc_data *vc, uint c)
+{
+ if (c < 0x80)
+ /* 0******* */
+ put_queue(vc, c);
+ else if (c < 0x800) {
+ /* 110***** 10****** */
+ put_queue(vc, 0xc0 | (c >> 6));
+ put_queue(vc, 0x80 | (c & 0x3f));
+ } else if (c < 0x10000) {
+ if (c >= 0xD800 && c < 0xE000)
+ return;
+ if (c == 0xFFFF)
+ return;
+ /* 1110**** 10****** 10****** */
+ put_queue(vc, 0xe0 | (c >> 12));
+ put_queue(vc, 0x80 | ((c >> 6) & 0x3f));
+ put_queue(vc, 0x80 | (c & 0x3f));
+ } else if (c < 0x110000) {
+ /* 11110*** 10****** 10****** 10****** */
+ put_queue(vc, 0xf0 | (c >> 18));
+ put_queue(vc, 0x80 | ((c >> 12) & 0x3f));
+ put_queue(vc, 0x80 | ((c >> 6) & 0x3f));
+ put_queue(vc, 0x80 | (c & 0x3f));
+ }
+}
+
+/*
+ * Called after returning from RAW mode or when changing consoles - recompute
+ * shift_down[] and shift_state from key_down[] maybe called when keymap is
+ * undefined, so that shiftkey release is seen
+ */
+void compute_shiftstate(void)
+{
+ unsigned int i, j, k, sym, val;
+
+ shift_state = 0;
+ memset(shift_down, 0, sizeof(shift_down));
+
+ for (i = 0; i < ARRAY_SIZE(key_down); i++) {
+
+ if (!key_down[i])
+ continue;
+
+ k = i * BITS_PER_LONG;
+
+ for (j = 0; j < BITS_PER_LONG; j++, k++) {
+
+ if (!test_bit(k, key_down))
+ continue;
+
+ sym = U(key_maps[0][k]);
+ if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
+ continue;
+
+ val = KVAL(sym);
+ if (val == KVAL(K_CAPSSHIFT))
+ val = KVAL(K_SHIFT);
+
+ shift_down[val]++;
+ shift_state |= (1 << val);
+ }
+ }
+}
+
+/*
+ * We have a combining character DIACR here, followed by the character CH.
+ * If the combination occurs in the table, return the corresponding value.
+ * Otherwise, if CH is a space or equals DIACR, return DIACR.
+ * Otherwise, conclude that DIACR was not combining after all,
+ * queue it and return CH.
+ */
+static unsigned int handle_diacr(struct vc_data *vc, unsigned int ch)
+{
+ unsigned int d = diacr;
+ unsigned int i;
+
+ diacr = 0;
+
+ if ((d & ~0xff) == BRL_UC_ROW) {
+ if ((ch & ~0xff) == BRL_UC_ROW)
+ return d | ch;
+ } else {
+ for (i = 0; i < accent_table_size; i++)
+ if (accent_table[i].diacr == d && accent_table[i].base == ch)
+ return accent_table[i].result;
+ }
+
+ if (ch == ' ' || ch == (BRL_UC_ROW|0) || ch == d)
+ return d;
+
+ if (kbd->kbdmode == VC_UNICODE)
+ to_utf8(vc, d);
+ else {
+ int c = conv_uni_to_8bit(d);
+ if (c != -1)
+ put_queue(vc, c);
+ }
+
+ return ch;
+}
+
+/*
+ * Special function handlers
+ */
+static void fn_enter(struct vc_data *vc)
+{
+ if (diacr) {
+ if (kbd->kbdmode == VC_UNICODE)
+ to_utf8(vc, diacr);
+ else {
+ int c = conv_uni_to_8bit(diacr);
+ if (c != -1)
+ put_queue(vc, c);
+ }
+ diacr = 0;
+ }
+
+ put_queue(vc, 13);
+ if (vc_kbd_mode(kbd, VC_CRLF))
+ put_queue(vc, 10);
+}
+
+static void fn_caps_toggle(struct vc_data *vc)
+{
+ if (rep)
+ return;
+
+ chg_vc_kbd_led(kbd, VC_CAPSLOCK);
+}
+
+static void fn_caps_on(struct vc_data *vc)
+{
+ if (rep)
+ return;
+
+ set_vc_kbd_led(kbd, VC_CAPSLOCK);
+}
+
+static void fn_show_ptregs(struct vc_data *vc)
+{
+ struct pt_regs *regs = get_irq_regs();
+
+ if (regs)
+ show_regs(regs);
+}
+
+static void fn_hold(struct vc_data *vc)
+{
+ struct tty_struct *tty = vc->port.tty;
+
+ if (rep || !tty)
+ return;
+
+ /*
+ * Note: SCROLLOCK will be set (cleared) by stop_tty (start_tty);
+ * these routines are also activated by ^S/^Q.
+ * (And SCROLLOCK can also be set by the ioctl KDSKBLED.)
+ */
+ if (tty->stopped)
+ start_tty(tty);
+ else
+ stop_tty(tty);
+}
+
+static void fn_num(struct vc_data *vc)
+{
+ if (vc_kbd_mode(kbd, VC_APPLIC))
+ applkey(vc, 'P', 1);
+ else
+ fn_bare_num(vc);
+}
+
+/*
+ * Bind this to Shift-NumLock if you work in application keypad mode
+ * but want to be able to change the NumLock flag.
+ * Bind this to NumLock if you prefer that the NumLock key always
+ * changes the NumLock flag.
+ */
+static void fn_bare_num(struct vc_data *vc)
+{
+ if (!rep)
+ chg_vc_kbd_led(kbd, VC_NUMLOCK);
+}
+
+static void fn_lastcons(struct vc_data *vc)
+{
+ /* switch to the last used console, ChN */
+ set_console(last_console);
+}
+
+static void fn_dec_console(struct vc_data *vc)
+{
+ int i, cur = fg_console;
+
+ /* Currently switching? Queue this next switch relative to that. */
+ if (want_console != -1)
+ cur = want_console;
+
+ for (i = cur - 1; i != cur; i--) {
+ if (i == -1)
+ i = MAX_NR_CONSOLES - 1;
+ if (vc_cons_allocated(i))
+ break;
+ }
+ set_console(i);
+}
+
+static void fn_inc_console(struct vc_data *vc)
+{
+ int i, cur = fg_console;
+
+ /* Currently switching? Queue this next switch relative to that. */
+ if (want_console != -1)
+ cur = want_console;
+
+ for (i = cur+1; i != cur; i++) {
+ if (i == MAX_NR_CONSOLES)
+ i = 0;
+ if (vc_cons_allocated(i))
+ break;
+ }
+ set_console(i);
+}
+
+static void fn_send_intr(struct vc_data *vc)
+{
+ struct tty_struct *tty = vc->port.tty;
+
+ if (!tty)
+ return;
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
+ con_schedule_flip(tty);
+}
+
+static void fn_scroll_forw(struct vc_data *vc)
+{
+ scrollfront(vc, 0);
+}
+
+static void fn_scroll_back(struct vc_data *vc)
+{
+ scrollback(vc, 0);
+}
+
+static void fn_show_mem(struct vc_data *vc)
+{
+ show_mem(0);
+}
+
+static void fn_show_state(struct vc_data *vc)
+{
+ show_state();
+}
+
+static void fn_boot_it(struct vc_data *vc)
+{
+ ctrl_alt_del();
+}
+
+static void fn_compose(struct vc_data *vc)
+{
+ dead_key_next = true;
+}
+
+static void fn_spawn_con(struct vc_data *vc)
+{
+ spin_lock(&vt_spawn_con.lock);
+ if (vt_spawn_con.pid)
+ if (kill_pid(vt_spawn_con.pid, vt_spawn_con.sig, 1)) {
+ put_pid(vt_spawn_con.pid);
+ vt_spawn_con.pid = NULL;
+ }
+ spin_unlock(&vt_spawn_con.lock);
+}
+
+static void fn_SAK(struct vc_data *vc)
+{
+ struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
+ schedule_work(SAK_work);
+}
+
+static void fn_null(struct vc_data *vc)
+{
+ compute_shiftstate();
+}
+
+/*
+ * Special key handlers
+ */
+static void k_ignore(struct vc_data *vc, unsigned char value, char up_flag)
+{
+}
+
+static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ if (up_flag)
+ return;
+ if (value >= ARRAY_SIZE(fn_handler))
+ return;
+ if ((kbd->kbdmode == VC_RAW ||
+ kbd->kbdmode == VC_MEDIUMRAW ||
+ kbd->kbdmode == VC_OFF) &&
+ value != KVAL(K_SAK))
+ return; /* SAK is allowed even in raw mode */
+ fn_handler[value](vc);
+}
+
+static void k_lowercase(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ pr_err("k_lowercase was called - impossible\n");
+}
+
+static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag)
+{
+ if (up_flag)
+ return; /* no action, if this is a key release */
+
+ if (diacr)
+ value = handle_diacr(vc, value);
+
+ if (dead_key_next) {
+ dead_key_next = false;
+ diacr = value;
+ return;
+ }
+ if (kbd->kbdmode == VC_UNICODE)
+ to_utf8(vc, value);
+ else {
+ int c = conv_uni_to_8bit(value);
+ if (c != -1)
+ put_queue(vc, c);
+ }
+}
+
+/*
+ * Handle dead key. Note that we now may have several
+ * dead keys modifying the same character. Very useful
+ * for Vietnamese.
+ */
+static void k_deadunicode(struct vc_data *vc, unsigned int value, char up_flag)
+{
+ if (up_flag)
+ return;
+
+ diacr = (diacr ? handle_diacr(vc, value) : value);
+}
+
+static void k_self(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ k_unicode(vc, conv_8bit_to_uni(value), up_flag);
+}
+
+static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ k_deadunicode(vc, value, up_flag);
+}
+
+/*
+ * Obsolete - for backwards compatibility only
+ */
+static void k_dead(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ static const unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' };
+
+ k_deadunicode(vc, ret_diacr[value], up_flag);
+}
+
+static void k_cons(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ if (up_flag)
+ return;
+
+ set_console(value);
+}
+
+static void k_fn(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ if (up_flag)
+ return;
+
+ if ((unsigned)value < ARRAY_SIZE(func_table)) {
+ if (func_table[value])
+ puts_queue(vc, func_table[value]);
+ } else
+ pr_err("k_fn called with value=%d\n", value);
+}
+
+static void k_cur(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ static const char cur_chars[] = "BDCA";
+
+ if (up_flag)
+ return;
+
+ applkey(vc, cur_chars[value], vc_kbd_mode(kbd, VC_CKMODE));
+}
+
+static void k_pad(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ static const char pad_chars[] = "0123456789+-*/\015,.?()#";
+ static const char app_map[] = "pqrstuvwxylSRQMnnmPQS";
+
+ if (up_flag)
+ return; /* no action, if this is a key release */
+
+ /* kludge... shift forces cursor/number keys */
+ if (vc_kbd_mode(kbd, VC_APPLIC) && !shift_down[KG_SHIFT]) {
+ applkey(vc, app_map[value], 1);
+ return;
+ }
+
+ if (!vc_kbd_led(kbd, VC_NUMLOCK)) {
+
+ switch (value) {
+ case KVAL(K_PCOMMA):
+ case KVAL(K_PDOT):
+ k_fn(vc, KVAL(K_REMOVE), 0);
+ return;
+ case KVAL(K_P0):
+ k_fn(vc, KVAL(K_INSERT), 0);
+ return;
+ case KVAL(K_P1):
+ k_fn(vc, KVAL(K_SELECT), 0);
+ return;
+ case KVAL(K_P2):
+ k_cur(vc, KVAL(K_DOWN), 0);
+ return;
+ case KVAL(K_P3):
+ k_fn(vc, KVAL(K_PGDN), 0);
+ return;
+ case KVAL(K_P4):
+ k_cur(vc, KVAL(K_LEFT), 0);
+ return;
+ case KVAL(K_P6):
+ k_cur(vc, KVAL(K_RIGHT), 0);
+ return;
+ case KVAL(K_P7):
+ k_fn(vc, KVAL(K_FIND), 0);
+ return;
+ case KVAL(K_P8):
+ k_cur(vc, KVAL(K_UP), 0);
+ return;
+ case KVAL(K_P9):
+ k_fn(vc, KVAL(K_PGUP), 0);
+ return;
+ case KVAL(K_P5):
+ applkey(vc, 'G', vc_kbd_mode(kbd, VC_APPLIC));
+ return;
+ }
+ }
+
+ put_queue(vc, pad_chars[value]);
+ if (value == KVAL(K_PENTER) && vc_kbd_mode(kbd, VC_CRLF))
+ put_queue(vc, 10);
+}
+
+static void k_shift(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ int old_state = shift_state;
+
+ if (rep)
+ return;
+ /*
+ * Mimic typewriter:
+ * a CapsShift key acts like Shift but undoes CapsLock
+ */
+ if (value == KVAL(K_CAPSSHIFT)) {
+ value = KVAL(K_SHIFT);
+ if (!up_flag)
+ clr_vc_kbd_led(kbd, VC_CAPSLOCK);
+ }
+
+ if (up_flag) {
+ /*
+ * handle the case that two shift or control
+ * keys are depressed simultaneously
+ */
+ if (shift_down[value])
+ shift_down[value]--;
+ } else
+ shift_down[value]++;
+
+ if (shift_down[value])
+ shift_state |= (1 << value);
+ else
+ shift_state &= ~(1 << value);
+
+ /* kludge */
+ if (up_flag && shift_state != old_state && npadch != -1) {
+ if (kbd->kbdmode == VC_UNICODE)
+ to_utf8(vc, npadch);
+ else
+ put_queue(vc, npadch & 0xff);
+ npadch = -1;
+ }
+}
+
+static void k_meta(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ if (up_flag)
+ return;
+
+ if (vc_kbd_mode(kbd, VC_META)) {
+ put_queue(vc, '\033');
+ put_queue(vc, value);
+ } else
+ put_queue(vc, value | 0x80);
+}
+
+static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ int base;
+
+ if (up_flag)
+ return;
+
+ if (value < 10) {
+ /* decimal input of code, while Alt depressed */
+ base = 10;
+ } else {
+ /* hexadecimal input of code, while AltGr depressed */
+ value -= 10;
+ base = 16;
+ }
+
+ if (npadch == -1)
+ npadch = value;
+ else
+ npadch = npadch * base + value;
+}
+
+static void k_lock(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ if (up_flag || rep)
+ return;
+
+ chg_vc_kbd_lock(kbd, value);
+}
+
+static void k_slock(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ k_shift(vc, value, up_flag);
+ if (up_flag || rep)
+ return;
+
+ chg_vc_kbd_slock(kbd, value);
+ /* try to make Alt, oops, AltGr and such work */
+ if (!key_maps[kbd->lockstate ^ kbd->slockstate]) {
+ kbd->slockstate = 0;
+ chg_vc_kbd_slock(kbd, value);
+ }
+}
+
+/* by default, 300ms interval for combination release */
+static unsigned brl_timeout = 300;
+MODULE_PARM_DESC(brl_timeout, "Braille keys release delay in ms (0 for commit on first key release)");
+module_param(brl_timeout, uint, 0644);
+
+static unsigned brl_nbchords = 1;
+MODULE_PARM_DESC(brl_nbchords, "Number of chords that produce a braille pattern (0 for dead chords)");
+module_param(brl_nbchords, uint, 0644);
+
+static void k_brlcommit(struct vc_data *vc, unsigned int pattern, char up_flag)
+{
+ static unsigned long chords;
+ static unsigned committed;
+
+ if (!brl_nbchords)
+ k_deadunicode(vc, BRL_UC_ROW | pattern, up_flag);
+ else {
+ committed |= pattern;
+ chords++;
+ if (chords == brl_nbchords) {
+ k_unicode(vc, BRL_UC_ROW | committed, up_flag);
+ chords = 0;
+ committed = 0;
+ }
+ }
+}
+
+static void k_brl(struct vc_data *vc, unsigned char value, char up_flag)
+{
+ static unsigned pressed, committing;
+ static unsigned long releasestart;
+
+ if (kbd->kbdmode != VC_UNICODE) {
+ if (!up_flag)
+ pr_warning("keyboard mode must be unicode for braille patterns\n");
+ return;
+ }
+
+ if (!value) {
+ k_unicode(vc, BRL_UC_ROW, up_flag);
+ return;
+ }
+
+ if (value > 8)
+ return;
+
+ if (!up_flag) {
+ pressed |= 1 << (value - 1);
+ if (!brl_timeout)
+ committing = pressed;
+ } else if (brl_timeout) {
+ if (!committing ||
+ time_after(jiffies,
+ releasestart + msecs_to_jiffies(brl_timeout))) {
+ committing = pressed;
+ releasestart = jiffies;
+ }
+ pressed &= ~(1 << (value - 1));
+ if (!pressed && committing) {
+ k_brlcommit(vc, committing, 0);
+ committing = 0;
+ }
+ } else {
+ if (committing) {
+ k_brlcommit(vc, committing, 0);
+ committing = 0;
+ }
+ pressed &= ~(1 << (value - 1));
+ }
+}
+
+/*
+ * The leds display either (i) the status of NumLock, CapsLock, ScrollLock,
+ * or (ii) whatever pattern of lights people want to show using KDSETLED,
+ * or (iii) specified bits of specified words in kernel memory.
+ */
+unsigned char getledstate(void)
+{
+ return ledstate;
+}
+
+void setledstate(struct kbd_struct *kbd, unsigned int led)
+{
+ if (!(led & ~7)) {
+ ledioctl = led;
+ kbd->ledmode = LED_SHOW_IOCTL;
+ } else
+ kbd->ledmode = LED_SHOW_FLAGS;
+
+ set_leds();
+}
+
+static inline unsigned char getleds(void)
+{
+ struct kbd_struct *kbd = kbd_table + fg_console;
+ unsigned char leds;
+ int i;
+
+ if (kbd->ledmode == LED_SHOW_IOCTL)
+ return ledioctl;
+
+ leds = kbd->ledflagstate;
+
+ if (kbd->ledmode == LED_SHOW_MEM) {
+ for (i = 0; i < 3; i++)
+ if (ledptrs[i].valid) {
+ if (*ledptrs[i].addr & ledptrs[i].mask)
+ leds |= (1 << i);
+ else
+ leds &= ~(1 << i);
+ }
+ }
+ return leds;
+}
+
+static int kbd_update_leds_helper(struct input_handle *handle, void *data)
+{
+ unsigned char leds = *(unsigned char *)data;
+
+ if (test_bit(EV_LED, handle->dev->evbit)) {
+ input_inject_event(handle, EV_LED, LED_SCROLLL, !!(leds & 0x01));
+ input_inject_event(handle, EV_LED, LED_NUML, !!(leds & 0x02));
+ input_inject_event(handle, EV_LED, LED_CAPSL, !!(leds & 0x04));
+ input_inject_event(handle, EV_SYN, SYN_REPORT, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * This is the tasklet that updates LED state on all keyboards
+ * attached to the box. The reason we use tasklet is that we
+ * need to handle the scenario when keyboard handler is not
+ * registered yet but we already getting updates form VT to
+ * update led state.
+ */
+static void kbd_bh(unsigned long dummy)
+{
+ unsigned char leds = getleds();
+
+ if (leds != ledstate) {
+ input_handler_for_each_handle(&kbd_handler, &leds,
+ kbd_update_leds_helper);
+ ledstate = leds;
+ }
+}
+
+DECLARE_TASKLET_DISABLED(keyboard_tasklet, kbd_bh, 0);
+
+#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_ALPHA) ||\
+ defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) ||\
+ defined(CONFIG_PARISC) || defined(CONFIG_SUPERH) ||\
+ (defined(CONFIG_ARM) && defined(CONFIG_KEYBOARD_ATKBD) && !defined(CONFIG_ARCH_RPC)) ||\
+ defined(CONFIG_AVR32)
+
+#define HW_RAW(dev) (test_bit(EV_MSC, dev->evbit) && test_bit(MSC_RAW, dev->mscbit) &&\
+ ((dev)->id.bustype == BUS_I8042) && ((dev)->id.vendor == 0x0001) && ((dev)->id.product == 0x0001))
+
+static const unsigned short x86_keycodes[256] =
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84,118, 86, 87, 88,115,120,119,121,112,123, 92,
+ 284,285,309, 0,312, 91,327,328,329,331,333,335,336,337,338,339,
+ 367,288,302,304,350, 89,334,326,267,126,268,269,125,347,348,349,
+ 360,261,262,263,268,376,100,101,321,316,373,286,289,102,351,355,
+ 103,104,105,275,287,279,258,106,274,107,294,364,358,363,362,361,
+ 291,108,381,281,290,272,292,305,280, 99,112,257,306,359,113,114,
+ 264,117,271,374,379,265,266, 93, 94, 95, 85,259,375,260, 90,116,
+ 377,109,111,277,278,282,283,295,296,297,299,300,301,293,303,307,
+ 308,310,313,314,315,317,318,319,320,357,322,323,324,325,276,330,
+ 332,340,365,342,343,344,345,346,356,270,341,368,369,370,371,372 };
+
+#ifdef CONFIG_SPARC
+static int sparc_l1_a_state;
+extern void sun_do_break(void);
+#endif
+
+static int emulate_raw(struct vc_data *vc, unsigned int keycode,
+ unsigned char up_flag)
+{
+ int code;
+
+ switch (keycode) {
+
+ case KEY_PAUSE:
+ put_queue(vc, 0xe1);
+ put_queue(vc, 0x1d | up_flag);
+ put_queue(vc, 0x45 | up_flag);
+ break;
+
+ case KEY_HANGEUL:
+ if (!up_flag)
+ put_queue(vc, 0xf2);
+ break;
+
+ case KEY_HANJA:
+ if (!up_flag)
+ put_queue(vc, 0xf1);
+ break;
+
+ case KEY_SYSRQ:
+ /*
+ * Real AT keyboards (that's what we're trying
+ * to emulate here emit 0xe0 0x2a 0xe0 0x37 when
+ * pressing PrtSc/SysRq alone, but simply 0x54
+ * when pressing Alt+PrtSc/SysRq.
+ */
+ if (test_bit(KEY_LEFTALT, key_down) ||
+ test_bit(KEY_RIGHTALT, key_down)) {
+ put_queue(vc, 0x54 | up_flag);
+ } else {
+ put_queue(vc, 0xe0);
+ put_queue(vc, 0x2a | up_flag);
+ put_queue(vc, 0xe0);
+ put_queue(vc, 0x37 | up_flag);
+ }
+ break;
+
+ default:
+ if (keycode > 255)
+ return -1;
+
+ code = x86_keycodes[keycode];
+ if (!code)
+ return -1;
+
+ if (code & 0x100)
+ put_queue(vc, 0xe0);
+ put_queue(vc, (code & 0x7f) | up_flag);
+
+ break;
+ }
+
+ return 0;
+}
+
+#else
+
+#define HW_RAW(dev) 0
+
+static int emulate_raw(struct vc_data *vc, unsigned int keycode, unsigned char up_flag)
+{
+ if (keycode > 127)
+ return -1;
+
+ put_queue(vc, keycode | up_flag);
+ return 0;
+}
+#endif
+
+static void kbd_rawcode(unsigned char data)
+{
+ struct vc_data *vc = vc_cons[fg_console].d;
+
+ kbd = kbd_table + vc->vc_num;
+ if (kbd->kbdmode == VC_RAW)
+ put_queue(vc, data);
+}
+
+static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
+{
+ struct vc_data *vc = vc_cons[fg_console].d;
+ unsigned short keysym, *key_map;
+ unsigned char type;
+ bool raw_mode;
+ struct tty_struct *tty;
+ int shift_final;
+ struct keyboard_notifier_param param = { .vc = vc, .value = keycode, .down = down };
+ int rc;
+
+ tty = vc->port.tty;
+
+ if (tty && (!tty->driver_data)) {
+ /* No driver data? Strange. Okay we fix it then. */
+ tty->driver_data = vc;
+ }
+
+ kbd = kbd_table + vc->vc_num;
+
+#ifdef CONFIG_SPARC
+ if (keycode == KEY_STOP)
+ sparc_l1_a_state = down;
+#endif
+
+ rep = (down == 2);
+
+ raw_mode = (kbd->kbdmode == VC_RAW);
+ if (raw_mode && !hw_raw)
+ if (emulate_raw(vc, keycode, !down << 7))
+ if (keycode < BTN_MISC && printk_ratelimit())
+ pr_warning("can't emulate rawmode for keycode %d\n",
+ keycode);
+
+#ifdef CONFIG_SPARC
+ if (keycode == KEY_A && sparc_l1_a_state) {
+ sparc_l1_a_state = false;
+ sun_do_break();
+ }
+#endif
+
+ if (kbd->kbdmode == VC_MEDIUMRAW) {
+ /*
+ * This is extended medium raw mode, with keys above 127
+ * encoded as 0, high 7 bits, low 7 bits, with the 0 bearing
+ * the 'up' flag if needed. 0 is reserved, so this shouldn't
+ * interfere with anything else. The two bytes after 0 will
+ * always have the up flag set not to interfere with older
+ * applications. This allows for 16384 different keycodes,
+ * which should be enough.
+ */
+ if (keycode < 128) {
+ put_queue(vc, keycode | (!down << 7));
+ } else {
+ put_queue(vc, !down << 7);
+ put_queue(vc, (keycode >> 7) | 0x80);
+ put_queue(vc, keycode | 0x80);
+ }
+ raw_mode = true;
+ }
+
+ if (down)
+ set_bit(keycode, key_down);
+ else
+ clear_bit(keycode, key_down);
+
+ if (rep &&
+ (!vc_kbd_mode(kbd, VC_REPEAT) ||
+ (tty && !L_ECHO(tty) && tty_chars_in_buffer(tty)))) {
+ /*
+ * Don't repeat a key if the input buffers are not empty and the
+ * characters get aren't echoed locally. This makes key repeat
+ * usable with slow applications and under heavy loads.
+ */
+ return;
+ }
+
+ param.shift = shift_final = (shift_state | kbd->slockstate) ^ kbd->lockstate;
+ param.ledstate = kbd->ledflagstate;
+ key_map = key_maps[shift_final];
+
+ rc = atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_KEYCODE, &param);
+ if (rc == NOTIFY_STOP || !key_map) {
+ atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_UNBOUND_KEYCODE, &param);
+ compute_shiftstate();
+ kbd->slockstate = 0;
+ return;
+ }
+
+ if (keycode < NR_KEYS)
+ keysym = key_map[keycode];
+ else if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
+ keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1));
+ else
+ return;
+
+ type = KTYP(keysym);
+
+ if (type < 0xf0) {
+ param.value = keysym;
+ rc = atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_UNICODE, &param);
+ if (rc != NOTIFY_STOP)
+ if (down && !raw_mode)
+ to_utf8(vc, keysym);
+ return;
+ }
+
+ type -= 0xf0;
+
+ if (type == KT_LETTER) {
+ type = KT_LATIN;
+ if (vc_kbd_led(kbd, VC_CAPSLOCK)) {
+ key_map = key_maps[shift_final ^ (1 << KG_SHIFT)];
+ if (key_map)
+ keysym = key_map[keycode];
+ }
+ }
+
+ param.value = keysym;
+ rc = atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_KEYSYM, &param);
+ if (rc == NOTIFY_STOP)
+ return;
+
+ if ((raw_mode || kbd->kbdmode == VC_OFF) && type != KT_SPEC && type != KT_SHIFT)
+ return;
+
+ (*k_handler[type])(vc, keysym & 0xff, !down);
+
+ param.ledstate = kbd->ledflagstate;
+ atomic_notifier_call_chain(&keyboard_notifier_list, KBD_POST_KEYSYM, &param);
+
+ if (type != KT_SLOCK)
+ kbd->slockstate = 0;
+}
+
+static void kbd_event(struct input_handle *handle, unsigned int event_type,
+ unsigned int event_code, int value)
+{
+ /* We are called with interrupts disabled, just take the lock */
+ spin_lock(&kbd_event_lock);
+
+ if (event_type == EV_MSC && event_code == MSC_RAW && HW_RAW(handle->dev))
+ kbd_rawcode(value);
+ if (event_type == EV_KEY)
+ kbd_keycode(event_code, value, HW_RAW(handle->dev));
+
+ spin_unlock(&kbd_event_lock);
+
+ tasklet_schedule(&keyboard_tasklet);
+ do_poke_blanked_console = 1;
+ schedule_console_callback();
+}
+
+static bool kbd_match(struct input_handler *handler, struct input_dev *dev)
+{
+ int i;
+
+ if (test_bit(EV_SND, dev->evbit))
+ return true;
+
+ if (test_bit(EV_KEY, dev->evbit)) {
+ for (i = KEY_RESERVED; i < BTN_MISC; i++)
+ if (test_bit(i, dev->keybit))
+ return true;
+ for (i = KEY_BRL_DOT1; i <= KEY_BRL_DOT10; i++)
+ if (test_bit(i, dev->keybit))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * When a keyboard (or other input device) is found, the kbd_connect
+ * function is called. The function then looks at the device, and if it
+ * likes it, it can open it and get events from it. In this (kbd_connect)
+ * function, we should decide which VT to bind that keyboard to initially.
+ */
+static int kbd_connect(struct input_handler *handler, struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ struct input_handle *handle;
+ int error;
+
+ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "kbd";
+
+ error = input_register_handle(handle);
+ if (error)
+ goto err_free_handle;
+
+ error = input_open_device(handle);
+ if (error)
+ goto err_unregister_handle;
+
+ return 0;
+
+ err_unregister_handle:
+ input_unregister_handle(handle);
+ err_free_handle:
+ kfree(handle);
+ return error;
+}
+
+static void kbd_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+/*
+ * Start keyboard handler on the new keyboard by refreshing LED state to
+ * match the rest of the system.
+ */
+static void kbd_start(struct input_handle *handle)
+{
+ tasklet_disable(&keyboard_tasklet);
+
+ if (ledstate != 0xff)
+ kbd_update_leds_helper(handle, &ledstate);
+
+ tasklet_enable(&keyboard_tasklet);
+}
+
+static const struct input_device_id kbd_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ },
+
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_SND) },
+ },
+
+ { }, /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(input, kbd_ids);
+
+static struct input_handler kbd_handler = {
+ .event = kbd_event,
+ .match = kbd_match,
+ .connect = kbd_connect,
+ .disconnect = kbd_disconnect,
+ .start = kbd_start,
+ .name = "kbd",
+ .id_table = kbd_ids,
+};
+
+int __init kbd_init(void)
+{
+ int i;
+ int error;
+
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ kbd_table[i].ledflagstate = KBD_DEFLEDS;
+ kbd_table[i].default_ledflagstate = KBD_DEFLEDS;
+ kbd_table[i].ledmode = LED_SHOW_FLAGS;
+ kbd_table[i].lockstate = KBD_DEFLOCK;
+ kbd_table[i].slockstate = 0;
+ kbd_table[i].modeflags = KBD_DEFMODE;
+ kbd_table[i].kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE;
+ }
+
+ error = input_register_handler(&kbd_handler);
+ if (error)
+ return error;
+
+ tasklet_enable(&keyboard_tasklet);
+ tasklet_schedule(&keyboard_tasklet);
+
+ return 0;
+}
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
new file mode 100644
index 00000000..fb864e7f
--- /dev/null
+++ b/drivers/tty/vt/selection.c
@@ -0,0 +1,345 @@
+/*
+ * This module exports the functions:
+ *
+ * 'int set_selection(struct tiocl_selection __user *, struct tty_struct *)'
+ * 'void clear_selection(void)'
+ * 'int paste_selection(struct tty_struct *)'
+ * 'int sel_loadlut(char __user *)'
+ *
+ * Now that /dev/vcs exists, most of this can disappear again.
+ */
+
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/uaccess.h>
+
+#include <linux/kbd_kern.h>
+#include <linux/vt_kern.h>
+#include <linux/consolemap.h>
+#include <linux/selection.h>
+#include <linux/tiocl.h>
+#include <linux/console.h>
+
+/* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
+#define isspace(c) ((c) == ' ')
+
+extern void poke_blanked_console(void);
+
+/* Variables for selection control. */
+/* Use a dynamic buffer, instead of static (Dec 1994) */
+struct vc_data *sel_cons; /* must not be deallocated */
+static int use_unicode;
+static volatile int sel_start = -1; /* cleared by clear_selection */
+static int sel_end;
+static int sel_buffer_lth;
+static char *sel_buffer;
+
+/* clear_selection, highlight and highlight_pointer can be called
+ from interrupt (via scrollback/front) */
+
+/* set reverse video on characters s-e of console with selection. */
+static inline void highlight(const int s, const int e)
+{
+ invert_screen(sel_cons, s, e-s+2, 1);
+}
+
+/* use complementary color to show the pointer */
+static inline void highlight_pointer(const int where)
+{
+ complement_pos(sel_cons, where);
+}
+
+static u16
+sel_pos(int n)
+{
+ return inverse_translate(sel_cons, screen_glyph(sel_cons, n),
+ use_unicode);
+}
+
+/* remove the current selection highlight, if any,
+ from the console holding the selection. */
+void
+clear_selection(void) {
+ highlight_pointer(-1); /* hide the pointer */
+ if (sel_start != -1) {
+ highlight(sel_start, sel_end);
+ sel_start = -1;
+ }
+}
+
+/*
+ * User settable table: what characters are to be considered alphabetic?
+ * 256 bits
+ */
+static u32 inwordLut[8]={
+ 0x00000000, /* control chars */
+ 0x03FF0000, /* digits */
+ 0x87FFFFFE, /* uppercase and '_' */
+ 0x07FFFFFE, /* lowercase */
+ 0x00000000,
+ 0x00000000,
+ 0xFF7FFFFF, /* latin-1 accented letters, not multiplication sign */
+ 0xFF7FFFFF /* latin-1 accented letters, not division sign */
+};
+
+static inline int inword(const u16 c) {
+ return c > 0xff || (( inwordLut[c>>5] >> (c & 0x1F) ) & 1);
+}
+
+/* set inwordLut contents. Invoked by ioctl(). */
+int sel_loadlut(char __user *p)
+{
+ return copy_from_user(inwordLut, (u32 __user *)(p+4), 32) ? -EFAULT : 0;
+}
+
+/* does screen address p correspond to character at LH/RH edge of screen? */
+static inline int atedge(const int p, int size_row)
+{
+ return (!(p % size_row) || !((p + 2) % size_row));
+}
+
+/* constrain v such that v <= u */
+static inline unsigned short limit(const unsigned short v, const unsigned short u)
+{
+ return (v > u) ? u : v;
+}
+
+/* stores the char in UTF8 and returns the number of bytes used (1-3) */
+static int store_utf8(u16 c, char *p)
+{
+ if (c < 0x80) {
+ /* 0******* */
+ p[0] = c;
+ return 1;
+ } else if (c < 0x800) {
+ /* 110***** 10****** */
+ p[0] = 0xc0 | (c >> 6);
+ p[1] = 0x80 | (c & 0x3f);
+ return 2;
+ } else {
+ /* 1110**** 10****** 10****** */
+ p[0] = 0xe0 | (c >> 12);
+ p[1] = 0x80 | ((c >> 6) & 0x3f);
+ p[2] = 0x80 | (c & 0x3f);
+ return 3;
+ }
+}
+
+/* set the current selection. Invoked by ioctl() or by kernel code. */
+int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty)
+{
+ struct vc_data *vc = vc_cons[fg_console].d;
+ int sel_mode, new_sel_start, new_sel_end, spc;
+ char *bp, *obp;
+ int i, ps, pe, multiplier;
+ u16 c;
+ struct kbd_struct *kbd = kbd_table + fg_console;
+
+ poke_blanked_console();
+
+ { unsigned short xs, ys, xe, ye;
+
+ if (!access_ok(VERIFY_READ, sel, sizeof(*sel)))
+ return -EFAULT;
+ __get_user(xs, &sel->xs);
+ __get_user(ys, &sel->ys);
+ __get_user(xe, &sel->xe);
+ __get_user(ye, &sel->ye);
+ __get_user(sel_mode, &sel->sel_mode);
+ xs--; ys--; xe--; ye--;
+ xs = limit(xs, vc->vc_cols - 1);
+ ys = limit(ys, vc->vc_rows - 1);
+ xe = limit(xe, vc->vc_cols - 1);
+ ye = limit(ye, vc->vc_rows - 1);
+ ps = ys * vc->vc_size_row + (xs << 1);
+ pe = ye * vc->vc_size_row + (xe << 1);
+
+ if (sel_mode == TIOCL_SELCLEAR) {
+ /* useful for screendump without selection highlights */
+ clear_selection();
+ return 0;
+ }
+
+ if (mouse_reporting() && (sel_mode & TIOCL_SELMOUSEREPORT)) {
+ mouse_report(tty, sel_mode & TIOCL_SELBUTTONMASK, xs, ys);
+ return 0;
+ }
+ }
+
+ if (ps > pe) /* make sel_start <= sel_end */
+ {
+ int tmp = ps;
+ ps = pe;
+ pe = tmp;
+ }
+
+ if (sel_cons != vc_cons[fg_console].d) {
+ clear_selection();
+ sel_cons = vc_cons[fg_console].d;
+ }
+ use_unicode = kbd && kbd->kbdmode == VC_UNICODE;
+
+ switch (sel_mode)
+ {
+ case TIOCL_SELCHAR: /* character-by-character selection */
+ new_sel_start = ps;
+ new_sel_end = pe;
+ break;
+ case TIOCL_SELWORD: /* word-by-word selection */
+ spc = isspace(sel_pos(ps));
+ for (new_sel_start = ps; ; ps -= 2)
+ {
+ if ((spc && !isspace(sel_pos(ps))) ||
+ (!spc && !inword(sel_pos(ps))))
+ break;
+ new_sel_start = ps;
+ if (!(ps % vc->vc_size_row))
+ break;
+ }
+ spc = isspace(sel_pos(pe));
+ for (new_sel_end = pe; ; pe += 2)
+ {
+ if ((spc && !isspace(sel_pos(pe))) ||
+ (!spc && !inword(sel_pos(pe))))
+ break;
+ new_sel_end = pe;
+ if (!((pe + 2) % vc->vc_size_row))
+ break;
+ }
+ break;
+ case TIOCL_SELLINE: /* line-by-line selection */
+ new_sel_start = ps - ps % vc->vc_size_row;
+ new_sel_end = pe + vc->vc_size_row
+ - pe % vc->vc_size_row - 2;
+ break;
+ case TIOCL_SELPOINTER:
+ highlight_pointer(pe);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ /* remove the pointer */
+ highlight_pointer(-1);
+
+ /* select to end of line if on trailing space */
+ if (new_sel_end > new_sel_start &&
+ !atedge(new_sel_end, vc->vc_size_row) &&
+ isspace(sel_pos(new_sel_end))) {
+ for (pe = new_sel_end + 2; ; pe += 2)
+ if (!isspace(sel_pos(pe)) ||
+ atedge(pe, vc->vc_size_row))
+ break;
+ if (isspace(sel_pos(pe)))
+ new_sel_end = pe;
+ }
+ if (sel_start == -1) /* no current selection */
+ highlight(new_sel_start, new_sel_end);
+ else if (new_sel_start == sel_start)
+ {
+ if (new_sel_end == sel_end) /* no action required */
+ return 0;
+ else if (new_sel_end > sel_end) /* extend to right */
+ highlight(sel_end + 2, new_sel_end);
+ else /* contract from right */
+ highlight(new_sel_end + 2, sel_end);
+ }
+ else if (new_sel_end == sel_end)
+ {
+ if (new_sel_start < sel_start) /* extend to left */
+ highlight(new_sel_start, sel_start - 2);
+ else /* contract from left */
+ highlight(sel_start, new_sel_start - 2);
+ }
+ else /* some other case; start selection from scratch */
+ {
+ clear_selection();
+ highlight(new_sel_start, new_sel_end);
+ }
+ sel_start = new_sel_start;
+ sel_end = new_sel_end;
+
+ /* Allocate a new buffer before freeing the old one ... */
+ multiplier = use_unicode ? 3 : 1; /* chars can take up to 3 bytes */
+ bp = kmalloc(((sel_end-sel_start)/2+1)*multiplier, GFP_KERNEL);
+ if (!bp) {
+ printk(KERN_WARNING "selection: kmalloc() failed\n");
+ clear_selection();
+ return -ENOMEM;
+ }
+ kfree(sel_buffer);
+ sel_buffer = bp;
+
+ obp = bp;
+ for (i = sel_start; i <= sel_end; i += 2) {
+ c = sel_pos(i);
+ if (use_unicode)
+ bp += store_utf8(c, bp);
+ else
+ *bp++ = c;
+ if (!isspace(c))
+ obp = bp;
+ if (! ((i + 2) % vc->vc_size_row)) {
+ /* strip trailing blanks from line and add newline,
+ unless non-space at end of line. */
+ if (obp != bp) {
+ bp = obp;
+ *bp++ = '\r';
+ }
+ obp = bp;
+ }
+ }
+ sel_buffer_lth = bp - sel_buffer;
+ return 0;
+}
+
+/* Insert the contents of the selection buffer into the
+ * queue of the tty associated with the current console.
+ * Invoked by ioctl().
+ */
+int paste_selection(struct tty_struct *tty)
+{
+ struct vc_data *vc = tty->driver_data;
+ int pasted = 0;
+ unsigned int count;
+ struct tty_ldisc *ld;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /* always called with BTM from vt_ioctl */
+ WARN_ON(!tty_locked());
+
+ console_lock();
+ poke_blanked_console();
+ console_unlock();
+
+ ld = tty_ldisc_ref(tty);
+ if (!ld) {
+ tty_unlock();
+ ld = tty_ldisc_ref_wait(tty);
+ tty_lock();
+ }
+
+ add_wait_queue(&vc->paste_wait, &wait);
+ while (sel_buffer && sel_buffer_lth > pasted) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (test_bit(TTY_THROTTLED, &tty->flags)) {
+ schedule();
+ continue;
+ }
+ count = sel_buffer_lth - pasted;
+ count = min(count, tty->receive_room);
+ tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
+ NULL, count);
+ pasted += count;
+ }
+ remove_wait_queue(&vc->paste_wait, &wait);
+ __set_current_state(TASK_RUNNING);
+
+ tty_ldisc_deref(ld);
+ return 0;
+}
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
new file mode 100644
index 00000000..66825c9f
--- /dev/null
+++ b/drivers/tty/vt/vc_screen.c
@@ -0,0 +1,665 @@
+/*
+ * Provide access to virtual console memory.
+ * /dev/vcs0: the screen as it is being viewed right now (possibly scrolled)
+ * /dev/vcsN: the screen of /dev/ttyN (1 <= N <= 63)
+ * [minor: N]
+ *
+ * /dev/vcsaN: idem, but including attributes, and prefixed with
+ * the 4 bytes lines,columns,x,y (as screendump used to give).
+ * Attribute/character pair is in native endianity.
+ * [minor: N+128]
+ *
+ * This replaces screendump and part of selection, so that the system
+ * administrator can control access using file system permissions.
+ *
+ * aeb@cwi.nl - efter Friedas begravelse - 950211
+ *
+ * machek@k332.feld.cvut.cz - modified not to send characters to wrong console
+ * - fixed some fatal off-by-one bugs (0-- no longer == -1 -> looping and looping and looping...)
+ * - making it shorter - scr_readw are macros which expand in PRETTY long code
+ */
+
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/vt_kern.h>
+#include <linux/selection.h>
+#include <linux/kbd_kern.h>
+#include <linux/console.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#undef attr
+#undef org
+#undef addr
+#define HEADER_SIZE 4
+
+#define CON_BUF_SIZE (CONFIG_BASE_SMALL ? 256 : PAGE_SIZE)
+
+struct vcs_poll_data {
+ struct notifier_block notifier;
+ unsigned int cons_num;
+ bool seen_last_update;
+ wait_queue_head_t waitq;
+ struct fasync_struct *fasync;
+};
+
+static int
+vcs_notifier(struct notifier_block *nb, unsigned long code, void *_param)
+{
+ struct vt_notifier_param *param = _param;
+ struct vc_data *vc = param->vc;
+ struct vcs_poll_data *poll =
+ container_of(nb, struct vcs_poll_data, notifier);
+ int currcons = poll->cons_num;
+
+ if (code != VT_UPDATE)
+ return NOTIFY_DONE;
+
+ if (currcons == 0)
+ currcons = fg_console;
+ else
+ currcons--;
+ if (currcons != vc->vc_num)
+ return NOTIFY_DONE;
+
+ poll->seen_last_update = false;
+ wake_up_interruptible(&poll->waitq);
+ kill_fasync(&poll->fasync, SIGIO, POLL_IN);
+ return NOTIFY_OK;
+}
+
+static void
+vcs_poll_data_free(struct vcs_poll_data *poll)
+{
+ unregister_vt_notifier(&poll->notifier);
+ kfree(poll);
+}
+
+static struct vcs_poll_data *
+vcs_poll_data_get(struct file *file)
+{
+ struct vcs_poll_data *poll = file->private_data;
+
+ if (poll)
+ return poll;
+
+ poll = kzalloc(sizeof(*poll), GFP_KERNEL);
+ if (!poll)
+ return NULL;
+ poll->cons_num = iminor(file->f_path.dentry->d_inode) & 127;
+ init_waitqueue_head(&poll->waitq);
+ poll->notifier.notifier_call = vcs_notifier;
+ if (register_vt_notifier(&poll->notifier) != 0) {
+ kfree(poll);
+ return NULL;
+ }
+
+ /*
+ * This code may be called either through ->poll() or ->fasync().
+ * If we have two threads using the same file descriptor, they could
+ * both enter this function, both notice that the structure hasn't
+ * been allocated yet and go ahead allocating it in parallel, but
+ * only one of them must survive and be shared otherwise we'd leak
+ * memory with a dangling notifier callback.
+ */
+ spin_lock(&file->f_lock);
+ if (!file->private_data) {
+ file->private_data = poll;
+ } else {
+ /* someone else raced ahead of us */
+ vcs_poll_data_free(poll);
+ poll = file->private_data;
+ }
+ spin_unlock(&file->f_lock);
+
+ return poll;
+}
+
+/*
+ * Returns VC for inode.
+ * Must be called with console_lock.
+ */
+static struct vc_data*
+vcs_vc(struct inode *inode, int *viewed)
+{
+ unsigned int currcons = iminor(inode) & 127;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ if (currcons == 0) {
+ currcons = fg_console;
+ if (viewed)
+ *viewed = 1;
+ } else {
+ currcons--;
+ if (viewed)
+ *viewed = 0;
+ }
+ return vc_cons[currcons].d;
+}
+
+/*
+ * Returns size for VC carried by inode.
+ * Must be called with console_lock.
+ */
+static int
+vcs_size(struct inode *inode)
+{
+ int size;
+ int minor = iminor(inode);
+ struct vc_data *vc;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ vc = vcs_vc(inode, NULL);
+ if (!vc)
+ return -ENXIO;
+
+ size = vc->vc_rows * vc->vc_cols;
+
+ if (minor & 128)
+ size = 2*size + HEADER_SIZE;
+ return size;
+}
+
+static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
+{
+ int size;
+
+ console_lock();
+ size = vcs_size(file->f_path.dentry->d_inode);
+ console_unlock();
+ if (size < 0)
+ return size;
+ switch (orig) {
+ default:
+ return -EINVAL;
+ case 2:
+ offset += size;
+ break;
+ case 1:
+ offset += file->f_pos;
+ case 0:
+ break;
+ }
+ if (offset < 0 || offset > size) {
+ return -EINVAL;
+ }
+ file->f_pos = offset;
+ return file->f_pos;
+}
+
+
+static ssize_t
+vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ unsigned int currcons = iminor(inode);
+ struct vc_data *vc;
+ struct vcs_poll_data *poll;
+ long pos;
+ long attr, read;
+ int col, maxcol, viewed;
+ unsigned short *org = NULL;
+ ssize_t ret;
+ char *con_buf;
+
+ con_buf = (char *) __get_free_page(GFP_KERNEL);
+ if (!con_buf)
+ return -ENOMEM;
+
+ pos = *ppos;
+
+ /* Select the proper current console and verify
+ * sanity of the situation under the console lock.
+ */
+ console_lock();
+
+ attr = (currcons & 128);
+ ret = -ENXIO;
+ vc = vcs_vc(inode, &viewed);
+ if (!vc)
+ goto unlock_out;
+
+ ret = -EINVAL;
+ if (pos < 0)
+ goto unlock_out;
+ poll = file->private_data;
+ if (count && poll)
+ poll->seen_last_update = true;
+ read = 0;
+ ret = 0;
+ while (count) {
+ char *con_buf0, *con_buf_start;
+ long this_round, size;
+ ssize_t orig_count;
+ long p = pos;
+
+ /* Check whether we are above size each round,
+ * as copy_to_user at the end of this loop
+ * could sleep.
+ */
+ size = vcs_size(inode);
+ if (size < 0) {
+ if (read)
+ break;
+ ret = size;
+ goto unlock_out;
+ }
+ if (pos >= size)
+ break;
+ if (count > size - pos)
+ count = size - pos;
+
+ this_round = count;
+ if (this_round > CON_BUF_SIZE)
+ this_round = CON_BUF_SIZE;
+
+ /* Perform the whole read into the local con_buf.
+ * Then we can drop the console spinlock and safely
+ * attempt to move it to userspace.
+ */
+
+ con_buf_start = con_buf0 = con_buf;
+ orig_count = this_round;
+ maxcol = vc->vc_cols;
+ if (!attr) {
+ org = screen_pos(vc, p, viewed);
+ col = p % maxcol;
+ p += maxcol - col;
+ while (this_round-- > 0) {
+ *con_buf0++ = (vcs_scr_readw(vc, org++) & 0xff);
+ if (++col == maxcol) {
+ org = screen_pos(vc, p, viewed);
+ col = 0;
+ p += maxcol;
+ }
+ }
+ } else {
+ if (p < HEADER_SIZE) {
+ size_t tmp_count;
+
+ con_buf0[0] = (char)vc->vc_rows;
+ con_buf0[1] = (char)vc->vc_cols;
+ getconsxy(vc, con_buf0 + 2);
+
+ con_buf_start += p;
+ this_round += p;
+ if (this_round > CON_BUF_SIZE) {
+ this_round = CON_BUF_SIZE;
+ orig_count = this_round - p;
+ }
+
+ tmp_count = HEADER_SIZE;
+ if (tmp_count > this_round)
+ tmp_count = this_round;
+
+ /* Advance state pointers and move on. */
+ this_round -= tmp_count;
+ p = HEADER_SIZE;
+ con_buf0 = con_buf + HEADER_SIZE;
+ /* If this_round >= 0, then p is even... */
+ } else if (p & 1) {
+ /* Skip first byte for output if start address is odd
+ * Update region sizes up/down depending on free
+ * space in buffer.
+ */
+ con_buf_start++;
+ if (this_round < CON_BUF_SIZE)
+ this_round++;
+ else
+ orig_count--;
+ }
+ if (this_round > 0) {
+ unsigned short *tmp_buf = (unsigned short *)con_buf0;
+
+ p -= HEADER_SIZE;
+ p /= 2;
+ col = p % maxcol;
+
+ org = screen_pos(vc, p, viewed);
+ p += maxcol - col;
+
+ /* Buffer has even length, so we can always copy
+ * character + attribute. We do not copy last byte
+ * to userspace if this_round is odd.
+ */
+ this_round = (this_round + 1) >> 1;
+
+ while (this_round) {
+ *tmp_buf++ = vcs_scr_readw(vc, org++);
+ this_round --;
+ if (++col == maxcol) {
+ org = screen_pos(vc, p, viewed);
+ col = 0;
+ p += maxcol;
+ }
+ }
+ }
+ }
+
+ /* Finally, release the console semaphore while we push
+ * all the data to userspace from our temporary buffer.
+ *
+ * AKPM: Even though it's a semaphore, we should drop it because
+ * the pagefault handling code may want to call printk().
+ */
+
+ console_unlock();
+ ret = copy_to_user(buf, con_buf_start, orig_count);
+ console_lock();
+
+ if (ret) {
+ read += (orig_count - ret);
+ ret = -EFAULT;
+ break;
+ }
+ buf += orig_count;
+ pos += orig_count;
+ read += orig_count;
+ count -= orig_count;
+ }
+ *ppos += read;
+ if (read)
+ ret = read;
+unlock_out:
+ console_unlock();
+ free_page((unsigned long) con_buf);
+ return ret;
+}
+
+static ssize_t
+vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ unsigned int currcons = iminor(inode);
+ struct vc_data *vc;
+ long pos;
+ long attr, size, written;
+ char *con_buf0;
+ int col, maxcol, viewed;
+ u16 *org0 = NULL, *org = NULL;
+ size_t ret;
+ char *con_buf;
+
+ con_buf = (char *) __get_free_page(GFP_KERNEL);
+ if (!con_buf)
+ return -ENOMEM;
+
+ pos = *ppos;
+
+ /* Select the proper current console and verify
+ * sanity of the situation under the console lock.
+ */
+ console_lock();
+
+ attr = (currcons & 128);
+ ret = -ENXIO;
+ vc = vcs_vc(inode, &viewed);
+ if (!vc)
+ goto unlock_out;
+
+ size = vcs_size(inode);
+ ret = -EINVAL;
+ if (pos < 0 || pos > size)
+ goto unlock_out;
+ if (count > size - pos)
+ count = size - pos;
+ written = 0;
+ while (count) {
+ long this_round = count;
+ size_t orig_count;
+ long p;
+
+ if (this_round > CON_BUF_SIZE)
+ this_round = CON_BUF_SIZE;
+
+ /* Temporarily drop the console lock so that we can read
+ * in the write data from userspace safely.
+ */
+ console_unlock();
+ ret = copy_from_user(con_buf, buf, this_round);
+ console_lock();
+
+ if (ret) {
+ this_round -= ret;
+ if (!this_round) {
+ /* Abort loop if no data were copied. Otherwise
+ * fail with -EFAULT.
+ */
+ if (written)
+ break;
+ ret = -EFAULT;
+ goto unlock_out;
+ }
+ }
+
+ /* The vcs_size might have changed while we slept to grab
+ * the user buffer, so recheck.
+ * Return data written up to now on failure.
+ */
+ size = vcs_size(inode);
+ if (size < 0) {
+ if (written)
+ break;
+ ret = size;
+ goto unlock_out;
+ }
+ if (pos >= size)
+ break;
+ if (this_round > size - pos)
+ this_round = size - pos;
+
+ /* OK, now actually push the write to the console
+ * under the lock using the local kernel buffer.
+ */
+
+ con_buf0 = con_buf;
+ orig_count = this_round;
+ maxcol = vc->vc_cols;
+ p = pos;
+ if (!attr) {
+ org0 = org = screen_pos(vc, p, viewed);
+ col = p % maxcol;
+ p += maxcol - col;
+
+ while (this_round > 0) {
+ unsigned char c = *con_buf0++;
+
+ this_round--;
+ vcs_scr_writew(vc,
+ (vcs_scr_readw(vc, org) & 0xff00) | c, org);
+ org++;
+ if (++col == maxcol) {
+ org = screen_pos(vc, p, viewed);
+ col = 0;
+ p += maxcol;
+ }
+ }
+ } else {
+ if (p < HEADER_SIZE) {
+ char header[HEADER_SIZE];
+
+ getconsxy(vc, header + 2);
+ while (p < HEADER_SIZE && this_round > 0) {
+ this_round--;
+ header[p++] = *con_buf0++;
+ }
+ if (!viewed)
+ putconsxy(vc, header + 2);
+ }
+ p -= HEADER_SIZE;
+ col = (p/2) % maxcol;
+ if (this_round > 0) {
+ org0 = org = screen_pos(vc, p/2, viewed);
+ if ((p & 1) && this_round > 0) {
+ char c;
+
+ this_round--;
+ c = *con_buf0++;
+#ifdef __BIG_ENDIAN
+ vcs_scr_writew(vc, c |
+ (vcs_scr_readw(vc, org) & 0xff00), org);
+#else
+ vcs_scr_writew(vc, (c << 8) |
+ (vcs_scr_readw(vc, org) & 0xff), org);
+#endif
+ org++;
+ p++;
+ if (++col == maxcol) {
+ org = screen_pos(vc, p/2, viewed);
+ col = 0;
+ }
+ }
+ p /= 2;
+ p += maxcol - col;
+ }
+ while (this_round > 1) {
+ unsigned short w;
+
+ w = get_unaligned(((unsigned short *)con_buf0));
+ vcs_scr_writew(vc, w, org++);
+ con_buf0 += 2;
+ this_round -= 2;
+ if (++col == maxcol) {
+ org = screen_pos(vc, p, viewed);
+ col = 0;
+ p += maxcol;
+ }
+ }
+ if (this_round > 0) {
+ unsigned char c;
+
+ c = *con_buf0++;
+#ifdef __BIG_ENDIAN
+ vcs_scr_writew(vc, (vcs_scr_readw(vc, org) & 0xff) | (c << 8), org);
+#else
+ vcs_scr_writew(vc, (vcs_scr_readw(vc, org) & 0xff00) | c, org);
+#endif
+ }
+ }
+ count -= orig_count;
+ written += orig_count;
+ buf += orig_count;
+ pos += orig_count;
+ if (org0)
+ update_region(vc, (unsigned long)(org0), org - org0);
+ }
+ *ppos += written;
+ ret = written;
+ if (written)
+ vcs_scr_updated(vc);
+
+unlock_out:
+ console_unlock();
+ free_page((unsigned long) con_buf);
+ return ret;
+}
+
+static unsigned int
+vcs_poll(struct file *file, poll_table *wait)
+{
+ struct vcs_poll_data *poll = vcs_poll_data_get(file);
+ int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
+
+ if (poll) {
+ poll_wait(file, &poll->waitq, wait);
+ if (poll->seen_last_update)
+ ret = DEFAULT_POLLMASK;
+ }
+ return ret;
+}
+
+static int
+vcs_fasync(int fd, struct file *file, int on)
+{
+ struct vcs_poll_data *poll = file->private_data;
+
+ if (!poll) {
+ /* don't allocate anything if all we want is disable fasync */
+ if (!on)
+ return 0;
+ poll = vcs_poll_data_get(file);
+ if (!poll)
+ return -ENOMEM;
+ }
+
+ return fasync_helper(fd, file, on, &poll->fasync);
+}
+
+static int
+vcs_open(struct inode *inode, struct file *filp)
+{
+ unsigned int currcons = iminor(inode) & 127;
+ int ret = 0;
+
+ tty_lock();
+ if(currcons && !vc_cons_allocated(currcons-1))
+ ret = -ENXIO;
+ tty_unlock();
+ return ret;
+}
+
+static int vcs_release(struct inode *inode, struct file *file)
+{
+ struct vcs_poll_data *poll = file->private_data;
+
+ if (poll)
+ vcs_poll_data_free(poll);
+ return 0;
+}
+
+static const struct file_operations vcs_fops = {
+ .llseek = vcs_lseek,
+ .read = vcs_read,
+ .write = vcs_write,
+ .poll = vcs_poll,
+ .fasync = vcs_fasync,
+ .open = vcs_open,
+ .release = vcs_release,
+};
+
+static struct class *vc_class;
+
+void vcs_make_sysfs(int index)
+{
+ device_create(vc_class, NULL, MKDEV(VCS_MAJOR, index + 1), NULL,
+ "vcs%u", index + 1);
+ device_create(vc_class, NULL, MKDEV(VCS_MAJOR, index + 129), NULL,
+ "vcsa%u", index + 1);
+}
+
+void vcs_remove_sysfs(int index)
+{
+ device_destroy(vc_class, MKDEV(VCS_MAJOR, index + 1));
+ device_destroy(vc_class, MKDEV(VCS_MAJOR, index + 129));
+}
+
+int __init vcs_init(void)
+{
+ unsigned int i;
+
+ if (register_chrdev(VCS_MAJOR, "vcs", &vcs_fops))
+ panic("unable to get major %d for vcs device", VCS_MAJOR);
+ vc_class = class_create(THIS_MODULE, "vc");
+
+ device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 0), NULL, "vcs");
+ device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 128), NULL, "vcsa");
+ for (i = 0; i < MIN_NR_CONSOLES; i++)
+ vcs_make_sysfs(i);
+ return 0;
+}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
new file mode 100644
index 00000000..b3915b7a
--- /dev/null
+++ b/drivers/tty/vt/vt.c
@@ -0,0 +1,4226 @@
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * Hopefully this will be a rather complete VT102 implementation.
+ *
+ * Beeping thanks to John T Kohl.
+ *
+ * Virtual Consoles, Screen Blanking, Screen Dumping, Color, Graphics
+ * Chars, and VT100 enhancements by Peter MacDonald.
+ *
+ * Copy and paste function by Andrew Haylett,
+ * some enhancements by Alessandro Rubini.
+ *
+ * Code to check for different video-cards mostly by Galen Hunt,
+ * <g-hunt@ee.utah.edu>
+ *
+ * Rudimentary ISO 10646/Unicode/UTF-8 character set support by
+ * Markus Kuhn, <mskuhn@immd4.informatik.uni-erlangen.de>.
+ *
+ * Dynamic allocation of consoles, aeb@cwi.nl, May 1994
+ * Resizing of consoles, aeb, 940926
+ *
+ * Code for xterm like mouse click reporting by Peter Orbaek 20-Jul-94
+ * <poe@daimi.aau.dk>
+ *
+ * User-defined bell sound, new setterm control sequences and printk
+ * redirection by Martin Mares <mj@k332.feld.cvut.cz> 19-Nov-95
+ *
+ * APM screenblank bug fixed Takashi Manabe <manabe@roy.dsl.tutics.tut.jp>
+ *
+ * Merge with the abstract console driver by Geert Uytterhoeven
+ * <geert@linux-m68k.org>, Jan 1997.
+ *
+ * Original m68k console driver modifications by
+ *
+ * - Arno Griffioen <arno@usn.nl>
+ * - David Carter <carter@cs.bris.ac.uk>
+ *
+ * The abstract console driver provides a generic interface for a text
+ * console. It supports VGA text mode, frame buffer based graphical consoles
+ * and special graphics processors that are only accessible through some
+ * registers (e.g. a TMS340x0 GSP).
+ *
+ * The interface to the hardware is specified using a special structure
+ * (struct consw) which contains function pointers to console operations
+ * (see <linux/console.h> for more information).
+ *
+ * Support for changeable cursor shape
+ * by Pavel Machek <pavel@atrey.karlin.mff.cuni.cz>, August 1997
+ *
+ * Ported to i386 and con_scrolldelta fixed
+ * by Emmanuel Marty <core@ggi-project.org>, April 1998
+ *
+ * Resurrected character buffers in videoram plus lots of other trickery
+ * by Martin Mares <mj@atrey.karlin.mff.cuni.cz>, July 1998
+ *
+ * Removed old-style timers, introduced console_timer, made timer
+ * deletion SMP-safe. 17Jun00, Andrew Morton
+ *
+ * Removed console_lock, enabled interrupts across all console operations
+ * 13 March 2001, Andrew Morton
+ *
+ * Fixed UTF-8 mode so alternate charset modes always work according
+ * to control sequences interpreted in do_con_trol function
+ * preserving backward VT100 semigraphics compatibility,
+ * malformed UTF sequences represented as sequences of replacement glyphs,
+ * original codes or '?' as a last resort if replacement glyph is undefined
+ * by Adam Tla/lka <atlka@pg.gda.pl>, Aug 2006
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kd.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+#include <linux/mm.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/vt_kern.h>
+#include <linux/selection.h>
+#include <linux/tiocl.h>
+#include <linux/kbd_kern.h>
+#include <linux/consolemap.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/pm.h>
+#include <linux/font.h>
+#include <linux/bitops.h>
+#include <linux/notifier.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <asm/system.h>
+#include <linux/uaccess.h>
+#include <linux/kdb.h>
+#include <linux/ctype.h>
+
+#define MAX_NR_CON_DRIVER 16
+
+#define CON_DRIVER_FLAG_MODULE 1
+#define CON_DRIVER_FLAG_INIT 2
+#define CON_DRIVER_FLAG_ATTR 4
+
+struct con_driver {
+ const struct consw *con;
+ const char *desc;
+ struct device *dev;
+ int node;
+ int first;
+ int last;
+ int flag;
+};
+
+static struct con_driver registered_con_driver[MAX_NR_CON_DRIVER];
+const struct consw *conswitchp;
+
+/* A bitmap for codes <32. A bit of 1 indicates that the code
+ * corresponding to that bit number invokes some special action
+ * (such as cursor movement) and should not be displayed as a
+ * glyph unless the disp_ctrl mode is explicitly enabled.
+ */
+#define CTRL_ACTION 0x0d00ff81
+#define CTRL_ALWAYS 0x0800f501 /* Cannot be overridden by disp_ctrl */
+
+/*
+ * Here is the default bell parameters: 750HZ, 1/8th of a second
+ */
+#define DEFAULT_BELL_PITCH 750
+#define DEFAULT_BELL_DURATION (HZ/8)
+
+struct vc vc_cons [MAX_NR_CONSOLES];
+
+#ifndef VT_SINGLE_DRIVER
+static const struct consw *con_driver_map[MAX_NR_CONSOLES];
+#endif
+
+static int con_open(struct tty_struct *, struct file *);
+static void vc_init(struct vc_data *vc, unsigned int rows,
+ unsigned int cols, int do_clear);
+static void gotoxy(struct vc_data *vc, int new_x, int new_y);
+static void save_cur(struct vc_data *vc);
+static void reset_terminal(struct vc_data *vc, int do_clear);
+static void con_flush_chars(struct tty_struct *tty);
+static int set_vesa_blanking(char __user *p);
+static void set_cursor(struct vc_data *vc);
+static void hide_cursor(struct vc_data *vc);
+static void console_callback(struct work_struct *ignored);
+static void blank_screen_t(unsigned long dummy);
+static void set_palette(struct vc_data *vc);
+
+static int printable; /* Is console ready for printing? */
+int default_utf8 = true;
+module_param(default_utf8, int, S_IRUGO | S_IWUSR);
+int global_cursor_default = -1;
+module_param(global_cursor_default, int, S_IRUGO | S_IWUSR);
+
+static int cur_default = CUR_DEFAULT;
+module_param(cur_default, int, S_IRUGO | S_IWUSR);
+
+/*
+ * ignore_poke: don't unblank the screen when things are typed. This is
+ * mainly for the privacy of braille terminal users.
+ */
+static int ignore_poke;
+
+int do_poke_blanked_console;
+int console_blanked;
+
+static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
+static int vesa_off_interval;
+static int blankinterval = 10*60;
+core_param(consoleblank, blankinterval, int, 0444);
+
+static DECLARE_WORK(console_work, console_callback);
+
+/*
+ * fg_console is the current virtual console,
+ * last_console is the last used one,
+ * want_console is the console we want to switch to,
+ * saved_* variants are for save/restore around kernel debugger enter/leave
+ */
+int fg_console;
+int last_console;
+int want_console = -1;
+static int saved_fg_console;
+static int saved_last_console;
+static int saved_want_console;
+static int saved_vc_mode;
+static int saved_console_blanked;
+
+/*
+ * For each existing display, we have a pointer to console currently visible
+ * on that display, allowing consoles other than fg_console to be refreshed
+ * appropriately. Unless the low-level driver supplies its own display_fg
+ * variable, we use this one for the "master display".
+ */
+static struct vc_data *master_display_fg;
+
+/*
+ * Unfortunately, we need to delay tty echo when we're currently writing to the
+ * console since the code is (and always was) not re-entrant, so we schedule
+ * all flip requests to process context with schedule-task() and run it from
+ * console_callback().
+ */
+
+/*
+ * For the same reason, we defer scrollback to the console callback.
+ */
+static int scrollback_delta;
+
+/*
+ * Hook so that the power management routines can (un)blank
+ * the console on our behalf.
+ */
+int (*console_blank_hook)(int);
+
+static DEFINE_TIMER(console_timer, blank_screen_t, 0, 0);
+static int blank_state;
+static int blank_timer_expired;
+enum {
+ blank_off = 0,
+ blank_normal_wait,
+ blank_vesa_wait,
+};
+
+/*
+ * /sys/class/tty/tty0/
+ *
+ * the attribute 'active' contains the name of the current vc
+ * console and it supports poll() to detect vc switches
+ */
+static struct device *tty0dev;
+
+/*
+ * Notifier list for console events.
+ */
+static ATOMIC_NOTIFIER_HEAD(vt_notifier_list);
+
+int register_vt_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&vt_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(register_vt_notifier);
+
+int unregister_vt_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&vt_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_vt_notifier);
+
+static void notify_write(struct vc_data *vc, unsigned int unicode)
+{
+ struct vt_notifier_param param = { .vc = vc, unicode = unicode };
+ atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
+}
+
+static void notify_update(struct vc_data *vc)
+{
+ struct vt_notifier_param param = { .vc = vc };
+ atomic_notifier_call_chain(&vt_notifier_list, VT_UPDATE, &param);
+}
+/*
+ * Low-Level Functions
+ */
+
+#define IS_FG(vc) ((vc)->vc_num == fg_console)
+
+#ifdef VT_BUF_VRAM_ONLY
+#define DO_UPDATE(vc) 0
+#else
+#define DO_UPDATE(vc) (CON_IS_VISIBLE(vc) && !console_blanked)
+#endif
+
+static inline unsigned short *screenpos(struct vc_data *vc, int offset, int viewed)
+{
+ unsigned short *p;
+
+ if (!viewed)
+ p = (unsigned short *)(vc->vc_origin + offset);
+ else if (!vc->vc_sw->con_screen_pos)
+ p = (unsigned short *)(vc->vc_visible_origin + offset);
+ else
+ p = vc->vc_sw->con_screen_pos(vc, offset);
+ return p;
+}
+
+/* Called from the keyboard irq path.. */
+static inline void scrolldelta(int lines)
+{
+ /* FIXME */
+ /* scrolldelta needs some kind of consistency lock, but the BKL was
+ and still is not protecting versus the scheduled back end */
+ scrollback_delta += lines;
+ schedule_console_callback();
+}
+
+void schedule_console_callback(void)
+{
+ schedule_work(&console_work);
+}
+
+static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
+{
+ unsigned short *d, *s;
+
+ if (t+nr >= b)
+ nr = b - t - 1;
+ if (b > vc->vc_rows || t >= b || nr < 1)
+ return;
+ if (CON_IS_VISIBLE(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_UP, nr))
+ return;
+ d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
+ s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
+ scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
+ scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char,
+ vc->vc_size_row * nr);
+}
+
+static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
+{
+ unsigned short *s;
+ unsigned int step;
+
+ if (t+nr >= b)
+ nr = b - t - 1;
+ if (b > vc->vc_rows || t >= b || nr < 1)
+ return;
+ if (CON_IS_VISIBLE(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_DOWN, nr))
+ return;
+ s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
+ step = vc->vc_cols * nr;
+ scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row);
+ scr_memsetw(s, vc->vc_video_erase_char, 2 * step);
+}
+
+static void do_update_region(struct vc_data *vc, unsigned long start, int count)
+{
+#ifndef VT_BUF_VRAM_ONLY
+ unsigned int xx, yy, offset;
+ u16 *p;
+
+ p = (u16 *) start;
+ if (!vc->vc_sw->con_getxy) {
+ offset = (start - vc->vc_origin) / 2;
+ xx = offset % vc->vc_cols;
+ yy = offset / vc->vc_cols;
+ } else {
+ int nxx, nyy;
+ start = vc->vc_sw->con_getxy(vc, start, &nxx, &nyy);
+ xx = nxx; yy = nyy;
+ }
+ for(;;) {
+ u16 attrib = scr_readw(p) & 0xff00;
+ int startx = xx;
+ u16 *q = p;
+ while (xx < vc->vc_cols && count) {
+ if (attrib != (scr_readw(p) & 0xff00)) {
+ if (p > q)
+ vc->vc_sw->con_putcs(vc, q, p-q, yy, startx);
+ startx = xx;
+ q = p;
+ attrib = scr_readw(p) & 0xff00;
+ }
+ p++;
+ xx++;
+ count--;
+ }
+ if (p > q)
+ vc->vc_sw->con_putcs(vc, q, p-q, yy, startx);
+ if (!count)
+ break;
+ xx = 0;
+ yy++;
+ if (vc->vc_sw->con_getxy) {
+ p = (u16 *)start;
+ start = vc->vc_sw->con_getxy(vc, start, NULL, NULL);
+ }
+ }
+#endif
+}
+
+void update_region(struct vc_data *vc, unsigned long start, int count)
+{
+ WARN_CONSOLE_UNLOCKED();
+
+ if (DO_UPDATE(vc)) {
+ hide_cursor(vc);
+ do_update_region(vc, start, count);
+ set_cursor(vc);
+ }
+}
+
+/* Structure of attributes is hardware-dependent */
+
+static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink,
+ u8 _underline, u8 _reverse, u8 _italic)
+{
+ if (vc->vc_sw->con_build_attr)
+ return vc->vc_sw->con_build_attr(vc, _color, _intensity,
+ _blink, _underline, _reverse, _italic);
+
+#ifndef VT_BUF_VRAM_ONLY
+/*
+ * ++roman: I completely changed the attribute format for monochrome
+ * mode (!can_do_color). The formerly used MDA (monochrome display
+ * adapter) format didn't allow the combination of certain effects.
+ * Now the attribute is just a bit vector:
+ * Bit 0..1: intensity (0..2)
+ * Bit 2 : underline
+ * Bit 3 : reverse
+ * Bit 7 : blink
+ */
+ {
+ u8 a = _color;
+ if (!vc->vc_can_do_color)
+ return _intensity |
+ (_italic ? 2 : 0) |
+ (_underline ? 4 : 0) |
+ (_reverse ? 8 : 0) |
+ (_blink ? 0x80 : 0);
+ if (_italic)
+ a = (a & 0xF0) | vc->vc_itcolor;
+ else if (_underline)
+ a = (a & 0xf0) | vc->vc_ulcolor;
+ else if (_intensity == 0)
+ a = (a & 0xf0) | vc->vc_ulcolor;
+ if (_reverse)
+ a = ((a) & 0x88) | ((((a) >> 4) | ((a) << 4)) & 0x77);
+ if (_blink)
+ a ^= 0x80;
+ if (_intensity == 2)
+ a ^= 0x08;
+ if (vc->vc_hi_font_mask == 0x100)
+ a <<= 1;
+ return a;
+ }
+#else
+ return 0;
+#endif
+}
+
+static void update_attr(struct vc_data *vc)
+{
+ vc->vc_attr = build_attr(vc, vc->vc_color, vc->vc_intensity,
+ vc->vc_blink, vc->vc_underline,
+ vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic);
+ vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' ';
+}
+
+/* Note: inverting the screen twice should revert to the original state */
+void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
+{
+ unsigned short *p;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ count /= 2;
+ p = screenpos(vc, offset, viewed);
+ if (vc->vc_sw->con_invert_region)
+ vc->vc_sw->con_invert_region(vc, p, count);
+#ifndef VT_BUF_VRAM_ONLY
+ else {
+ u16 *q = p;
+ int cnt = count;
+ u16 a;
+
+ if (!vc->vc_can_do_color) {
+ while (cnt--) {
+ a = scr_readw(q);
+ a ^= 0x0800;
+ scr_writew(a, q);
+ q++;
+ }
+ } else if (vc->vc_hi_font_mask == 0x100) {
+ while (cnt--) {
+ a = scr_readw(q);
+ a = ((a) & 0x11ff) | (((a) & 0xe000) >> 4) | (((a) & 0x0e00) << 4);
+ scr_writew(a, q);
+ q++;
+ }
+ } else {
+ while (cnt--) {
+ a = scr_readw(q);
+ a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | (((a) & 0x0700) << 4);
+ scr_writew(a, q);
+ q++;
+ }
+ }
+ }
+#endif
+ if (DO_UPDATE(vc))
+ do_update_region(vc, (unsigned long) p, count);
+}
+
+/* used by selection: complement pointer position */
+void complement_pos(struct vc_data *vc, int offset)
+{
+ static int old_offset = -1;
+ static unsigned short old;
+ static unsigned short oldx, oldy;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ if (old_offset != -1 && old_offset >= 0 &&
+ old_offset < vc->vc_screenbuf_size) {
+ scr_writew(old, screenpos(vc, old_offset, 1));
+ if (DO_UPDATE(vc))
+ vc->vc_sw->con_putc(vc, old, oldy, oldx);
+ }
+
+ old_offset = offset;
+
+ if (offset != -1 && offset >= 0 &&
+ offset < vc->vc_screenbuf_size) {
+ unsigned short new;
+ unsigned short *p;
+ p = screenpos(vc, offset, 1);
+ old = scr_readw(p);
+ new = old ^ vc->vc_complement_mask;
+ scr_writew(new, p);
+ if (DO_UPDATE(vc)) {
+ oldx = (offset >> 1) % vc->vc_cols;
+ oldy = (offset >> 1) / vc->vc_cols;
+ vc->vc_sw->con_putc(vc, new, oldy, oldx);
+ }
+ }
+
+}
+
+static void insert_char(struct vc_data *vc, unsigned int nr)
+{
+ unsigned short *p, *q = (unsigned short *)vc->vc_pos;
+
+ p = q + vc->vc_cols - nr - vc->vc_x;
+ while (--p >= q)
+ scr_writew(scr_readw(p), p + nr);
+ scr_memsetw(q, vc->vc_video_erase_char, nr * 2);
+ vc->vc_need_wrap = 0;
+ if (DO_UPDATE(vc)) {
+ unsigned short oldattr = vc->vc_attr;
+ vc->vc_sw->con_bmove(vc, vc->vc_y, vc->vc_x, vc->vc_y, vc->vc_x + nr, 1,
+ vc->vc_cols - vc->vc_x - nr);
+ vc->vc_attr = vc->vc_video_erase_char >> 8;
+ while (nr--)
+ vc->vc_sw->con_putc(vc, vc->vc_video_erase_char, vc->vc_y, vc->vc_x + nr);
+ vc->vc_attr = oldattr;
+ }
+}
+
+static void delete_char(struct vc_data *vc, unsigned int nr)
+{
+ unsigned int i = vc->vc_x;
+ unsigned short *p = (unsigned short *)vc->vc_pos;
+
+ while (++i <= vc->vc_cols - nr) {
+ scr_writew(scr_readw(p+nr), p);
+ p++;
+ }
+ scr_memsetw(p, vc->vc_video_erase_char, nr * 2);
+ vc->vc_need_wrap = 0;
+ if (DO_UPDATE(vc)) {
+ unsigned short oldattr = vc->vc_attr;
+ vc->vc_sw->con_bmove(vc, vc->vc_y, vc->vc_x + nr, vc->vc_y, vc->vc_x, 1,
+ vc->vc_cols - vc->vc_x - nr);
+ vc->vc_attr = vc->vc_video_erase_char >> 8;
+ while (nr--)
+ vc->vc_sw->con_putc(vc, vc->vc_video_erase_char, vc->vc_y,
+ vc->vc_cols - 1 - nr);
+ vc->vc_attr = oldattr;
+ }
+}
+
+static int softcursor_original;
+
+static void add_softcursor(struct vc_data *vc)
+{
+ int i = scr_readw((u16 *) vc->vc_pos);
+ u32 type = vc->vc_cursor_type;
+
+ if (! (type & 0x10)) return;
+ if (softcursor_original != -1) return;
+ softcursor_original = i;
+ i |= ((type >> 8) & 0xff00 );
+ i ^= ((type) & 0xff00 );
+ if ((type & 0x20) && ((softcursor_original & 0x7000) == (i & 0x7000))) i ^= 0x7000;
+ if ((type & 0x40) && ((i & 0x700) == ((i & 0x7000) >> 4))) i ^= 0x0700;
+ scr_writew(i, (u16 *) vc->vc_pos);
+ if (DO_UPDATE(vc))
+ vc->vc_sw->con_putc(vc, i, vc->vc_y, vc->vc_x);
+}
+
+static void hide_softcursor(struct vc_data *vc)
+{
+ if (softcursor_original != -1) {
+ scr_writew(softcursor_original, (u16 *)vc->vc_pos);
+ if (DO_UPDATE(vc))
+ vc->vc_sw->con_putc(vc, softcursor_original,
+ vc->vc_y, vc->vc_x);
+ softcursor_original = -1;
+ }
+}
+
+static void hide_cursor(struct vc_data *vc)
+{
+ if (vc == sel_cons)
+ clear_selection();
+ vc->vc_sw->con_cursor(vc, CM_ERASE);
+ hide_softcursor(vc);
+}
+
+static void set_cursor(struct vc_data *vc)
+{
+ if (!IS_FG(vc) || console_blanked ||
+ vc->vc_mode == KD_GRAPHICS)
+ return;
+ if (vc->vc_deccm) {
+ if (vc == sel_cons)
+ clear_selection();
+ add_softcursor(vc);
+ if ((vc->vc_cursor_type & 0x0f) != 1)
+ vc->vc_sw->con_cursor(vc, CM_DRAW);
+ } else
+ hide_cursor(vc);
+}
+
+static void set_origin(struct vc_data *vc)
+{
+ WARN_CONSOLE_UNLOCKED();
+
+ if (!CON_IS_VISIBLE(vc) ||
+ !vc->vc_sw->con_set_origin ||
+ !vc->vc_sw->con_set_origin(vc))
+ vc->vc_origin = (unsigned long)vc->vc_screenbuf;
+ vc->vc_visible_origin = vc->vc_origin;
+ vc->vc_scr_end = vc->vc_origin + vc->vc_screenbuf_size;
+ vc->vc_pos = vc->vc_origin + vc->vc_size_row * vc->vc_y + 2 * vc->vc_x;
+}
+
+static inline void save_screen(struct vc_data *vc)
+{
+ WARN_CONSOLE_UNLOCKED();
+
+ if (vc->vc_sw->con_save_screen)
+ vc->vc_sw->con_save_screen(vc);
+}
+
+/*
+ * Redrawing of screen
+ */
+
+static void clear_buffer_attributes(struct vc_data *vc)
+{
+ unsigned short *p = (unsigned short *)vc->vc_origin;
+ int count = vc->vc_screenbuf_size / 2;
+ int mask = vc->vc_hi_font_mask | 0xff;
+
+ for (; count > 0; count--, p++) {
+ scr_writew((scr_readw(p)&mask) | (vc->vc_video_erase_char & ~mask), p);
+ }
+}
+
+void redraw_screen(struct vc_data *vc, int is_switch)
+{
+ int redraw = 0;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ if (!vc) {
+ /* strange ... */
+ /* printk("redraw_screen: tty %d not allocated ??\n", new_console+1); */
+ return;
+ }
+
+ if (is_switch) {
+ struct vc_data *old_vc = vc_cons[fg_console].d;
+ if (old_vc == vc)
+ return;
+ if (!CON_IS_VISIBLE(vc))
+ redraw = 1;
+ *vc->vc_display_fg = vc;
+ fg_console = vc->vc_num;
+ hide_cursor(old_vc);
+ if (!CON_IS_VISIBLE(old_vc)) {
+ save_screen(old_vc);
+ set_origin(old_vc);
+ }
+ if (tty0dev)
+ sysfs_notify(&tty0dev->kobj, NULL, "active");
+ } else {
+ hide_cursor(vc);
+ redraw = 1;
+ }
+
+ if (redraw) {
+ int update;
+ int old_was_color = vc->vc_can_do_color;
+
+ set_origin(vc);
+ update = vc->vc_sw->con_switch(vc);
+ set_palette(vc);
+ /*
+ * If console changed from mono<->color, the best we can do
+ * is to clear the buffer attributes. As it currently stands,
+ * rebuilding new attributes from the old buffer is not doable
+ * without overly complex code.
+ */
+ if (old_was_color != vc->vc_can_do_color) {
+ update_attr(vc);
+ clear_buffer_attributes(vc);
+ }
+
+ /* Forcibly update if we're panicing */
+ if ((update && vc->vc_mode != KD_GRAPHICS) ||
+ vt_force_oops_output(vc))
+ do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2);
+ }
+ set_cursor(vc);
+ if (is_switch) {
+ set_leds();
+ compute_shiftstate();
+ notify_update(vc);
+ }
+}
+
+/*
+ * Allocation, freeing and resizing of VTs.
+ */
+
+int vc_cons_allocated(unsigned int i)
+{
+ return (i < MAX_NR_CONSOLES && vc_cons[i].d);
+}
+
+static void visual_init(struct vc_data *vc, int num, int init)
+{
+ /* ++Geert: vc->vc_sw->con_init determines console size */
+ if (vc->vc_sw)
+ module_put(vc->vc_sw->owner);
+ vc->vc_sw = conswitchp;
+#ifndef VT_SINGLE_DRIVER
+ if (con_driver_map[num])
+ vc->vc_sw = con_driver_map[num];
+#endif
+ __module_get(vc->vc_sw->owner);
+ vc->vc_num = num;
+ vc->vc_display_fg = &master_display_fg;
+ vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
+ vc->vc_uni_pagedir = 0;
+ vc->vc_hi_font_mask = 0;
+ vc->vc_complement_mask = 0;
+ vc->vc_can_do_color = 0;
+ vc->vc_panic_force_write = false;
+ vc->vc_sw->con_init(vc, init);
+ if (!vc->vc_complement_mask)
+ vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
+ vc->vc_s_complement_mask = vc->vc_complement_mask;
+ vc->vc_size_row = vc->vc_cols << 1;
+ vc->vc_screenbuf_size = vc->vc_rows * vc->vc_size_row;
+}
+
+int vc_allocate(unsigned int currcons) /* return 0 on success */
+{
+ WARN_CONSOLE_UNLOCKED();
+
+ if (currcons >= MAX_NR_CONSOLES)
+ return -ENXIO;
+ if (!vc_cons[currcons].d) {
+ struct vc_data *vc;
+ struct vt_notifier_param param;
+
+ /* prevent users from taking too much memory */
+ if (currcons >= MAX_NR_USER_CONSOLES && !capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ /* due to the granularity of kmalloc, we waste some memory here */
+ /* the alloc is done in two steps, to optimize the common situation
+ of a 25x80 console (structsize=216, screenbuf_size=4000) */
+ /* although the numbers above are not valid since long ago, the
+ point is still up-to-date and the comment still has its value
+ even if only as a historical artifact. --mj, July 1998 */
+ param.vc = vc = kzalloc(sizeof(struct vc_data), GFP_KERNEL);
+ if (!vc)
+ return -ENOMEM;
+ vc_cons[currcons].d = vc;
+ tty_port_init(&vc->port);
+ INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
+ visual_init(vc, currcons, 1);
+ if (!*vc->vc_uni_pagedir_loc)
+ con_set_default_unimap(vc);
+ vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
+ if (!vc->vc_screenbuf) {
+ kfree(vc);
+ vc_cons[currcons].d = NULL;
+ return -ENOMEM;
+ }
+
+ /* If no drivers have overridden us and the user didn't pass a
+ boot option, default to displaying the cursor */
+ if (global_cursor_default == -1)
+ global_cursor_default = 1;
+
+ vc_init(vc, vc->vc_rows, vc->vc_cols, 1);
+ vcs_make_sysfs(currcons);
+ atomic_notifier_call_chain(&vt_notifier_list, VT_ALLOCATE, &param);
+ }
+ return 0;
+}
+
+static inline int resize_screen(struct vc_data *vc, int width, int height,
+ int user)
+{
+ /* Resizes the resolution of the display adapater */
+ int err = 0;
+
+ if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize)
+ err = vc->vc_sw->con_resize(vc, width, height, user);
+
+ return err;
+}
+
+/*
+ * Change # of rows and columns (0 means unchanged/the size of fg_console)
+ * [this is to be used together with some user program
+ * like resize that changes the hardware videomode]
+ */
+#define VC_RESIZE_MAXCOL (32767)
+#define VC_RESIZE_MAXROW (32767)
+
+/**
+ * vc_do_resize - resizing method for the tty
+ * @tty: tty being resized
+ * @real_tty: real tty (different to tty if a pty/tty pair)
+ * @vc: virtual console private data
+ * @cols: columns
+ * @lines: lines
+ *
+ * Resize a virtual console, clipping according to the actual constraints.
+ * If the caller passes a tty structure then update the termios winsize
+ * information and perform any necessary signal handling.
+ *
+ * Caller must hold the console semaphore. Takes the termios mutex and
+ * ctrl_lock of the tty IFF a tty is passed.
+ */
+
+static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
+ unsigned int cols, unsigned int lines)
+{
+ unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0;
+ unsigned long end;
+ unsigned int old_rows, old_row_size;
+ unsigned int new_cols, new_rows, new_row_size, new_screen_size;
+ unsigned int user;
+ unsigned short *newscreen;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ if (!vc)
+ return -ENXIO;
+
+ user = vc->vc_resize_user;
+ vc->vc_resize_user = 0;
+
+ if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW)
+ return -EINVAL;
+
+ new_cols = (cols ? cols : vc->vc_cols);
+ new_rows = (lines ? lines : vc->vc_rows);
+ new_row_size = new_cols << 1;
+ new_screen_size = new_row_size * new_rows;
+
+ if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
+ return 0;
+
+ newscreen = kmalloc(new_screen_size, GFP_USER);
+ if (!newscreen)
+ return -ENOMEM;
+
+ old_rows = vc->vc_rows;
+ old_row_size = vc->vc_size_row;
+
+ err = resize_screen(vc, new_cols, new_rows, user);
+ if (err) {
+ kfree(newscreen);
+ return err;
+ }
+
+ vc->vc_rows = new_rows;
+ vc->vc_cols = new_cols;
+ vc->vc_size_row = new_row_size;
+ vc->vc_screenbuf_size = new_screen_size;
+
+ rlth = min(old_row_size, new_row_size);
+ rrem = new_row_size - rlth;
+ old_origin = vc->vc_origin;
+ new_origin = (long) newscreen;
+ new_scr_end = new_origin + new_screen_size;
+
+ if (vc->vc_y > new_rows) {
+ if (old_rows - vc->vc_y < new_rows) {
+ /*
+ * Cursor near the bottom, copy contents from the
+ * bottom of buffer
+ */
+ old_origin += (old_rows - new_rows) * old_row_size;
+ } else {
+ /*
+ * Cursor is in no man's land, copy 1/2 screenful
+ * from the top and bottom of cursor position
+ */
+ old_origin += (vc->vc_y - new_rows/2) * old_row_size;
+ }
+ }
+
+ end = old_origin + old_row_size * min(old_rows, new_rows);
+
+ update_attr(vc);
+
+ while (old_origin < end) {
+ scr_memcpyw((unsigned short *) new_origin,
+ (unsigned short *) old_origin, rlth);
+ if (rrem)
+ scr_memsetw((void *)(new_origin + rlth),
+ vc->vc_video_erase_char, rrem);
+ old_origin += old_row_size;
+ new_origin += new_row_size;
+ }
+ if (new_scr_end > new_origin)
+ scr_memsetw((void *)new_origin, vc->vc_video_erase_char,
+ new_scr_end - new_origin);
+ kfree(vc->vc_screenbuf);
+ vc->vc_screenbuf = newscreen;
+ vc->vc_screenbuf_size = new_screen_size;
+ set_origin(vc);
+
+ /* do part of a reset_terminal() */
+ vc->vc_top = 0;
+ vc->vc_bottom = vc->vc_rows;
+ gotoxy(vc, vc->vc_x, vc->vc_y);
+ save_cur(vc);
+
+ if (tty) {
+ /* Rewrite the requested winsize data with the actual
+ resulting sizes */
+ struct winsize ws;
+ memset(&ws, 0, sizeof(ws));
+ ws.ws_row = vc->vc_rows;
+ ws.ws_col = vc->vc_cols;
+ ws.ws_ypixel = vc->vc_scan_lines;
+ tty_do_resize(tty, &ws);
+ }
+
+ if (CON_IS_VISIBLE(vc))
+ update_screen(vc);
+ vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
+ return err;
+}
+
+/**
+ * vc_resize - resize a VT
+ * @vc: virtual console
+ * @cols: columns
+ * @rows: rows
+ *
+ * Resize a virtual console as seen from the console end of things. We
+ * use the common vc_do_resize methods to update the structures. The
+ * caller must hold the console sem to protect console internals and
+ * vc->port.tty
+ */
+
+int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows)
+{
+ return vc_do_resize(vc->port.tty, vc, cols, rows);
+}
+
+/**
+ * vt_resize - resize a VT
+ * @tty: tty to resize
+ * @ws: winsize attributes
+ *
+ * Resize a virtual terminal. This is called by the tty layer as we
+ * register our own handler for resizing. The mutual helper does all
+ * the actual work.
+ *
+ * Takes the console sem and the called methods then take the tty
+ * termios_mutex and the tty ctrl_lock in that order.
+ */
+static int vt_resize(struct tty_struct *tty, struct winsize *ws)
+{
+ struct vc_data *vc = tty->driver_data;
+ int ret;
+
+ console_lock();
+ ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row);
+ console_unlock();
+ return ret;
+}
+
+void vc_deallocate(unsigned int currcons)
+{
+ WARN_CONSOLE_UNLOCKED();
+
+ if (vc_cons_allocated(currcons)) {
+ struct vc_data *vc = vc_cons[currcons].d;
+ struct vt_notifier_param param = { .vc = vc };
+
+ atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, &param);
+ vcs_remove_sysfs(currcons);
+ vc->vc_sw->con_deinit(vc);
+ put_pid(vc->vt_pid);
+ module_put(vc->vc_sw->owner);
+ kfree(vc->vc_screenbuf);
+ if (currcons >= MIN_NR_CONSOLES)
+ kfree(vc);
+ vc_cons[currcons].d = NULL;
+ }
+}
+
+/*
+ * VT102 emulator
+ */
+
+#define set_kbd(vc, x) set_vc_kbd_mode(kbd_table + (vc)->vc_num, (x))
+#define clr_kbd(vc, x) clr_vc_kbd_mode(kbd_table + (vc)->vc_num, (x))
+#define is_kbd(vc, x) vc_kbd_mode(kbd_table + (vc)->vc_num, (x))
+
+#define decarm VC_REPEAT
+#define decckm VC_CKMODE
+#define kbdapplic VC_APPLIC
+#define lnm VC_CRLF
+
+/*
+ * this is what the terminal answers to a ESC-Z or csi0c query.
+ */
+#define VT100ID "\033[?1;2c"
+#define VT102ID "\033[?6c"
+
+unsigned char color_table[] = { 0, 4, 2, 6, 1, 5, 3, 7,
+ 8,12,10,14, 9,13,11,15 };
+
+/* the default colour table, for VGA+ colour systems */
+int default_red[] = {0x00,0xaa,0x00,0xaa,0x00,0xaa,0x00,0xaa,
+ 0x55,0xff,0x55,0xff,0x55,0xff,0x55,0xff};
+int default_grn[] = {0x00,0x00,0xaa,0x55,0x00,0x00,0xaa,0xaa,
+ 0x55,0x55,0xff,0xff,0x55,0x55,0xff,0xff};
+int default_blu[] = {0x00,0x00,0x00,0x00,0xaa,0xaa,0xaa,0xaa,
+ 0x55,0x55,0x55,0x55,0xff,0xff,0xff,0xff};
+
+module_param_array(default_red, int, NULL, S_IRUGO | S_IWUSR);
+module_param_array(default_grn, int, NULL, S_IRUGO | S_IWUSR);
+module_param_array(default_blu, int, NULL, S_IRUGO | S_IWUSR);
+
+/*
+ * gotoxy() must verify all boundaries, because the arguments
+ * might also be negative. If the given position is out of
+ * bounds, the cursor is placed at the nearest margin.
+ */
+static void gotoxy(struct vc_data *vc, int new_x, int new_y)
+{
+ int min_y, max_y;
+
+ if (new_x < 0)
+ vc->vc_x = 0;
+ else {
+ if (new_x >= vc->vc_cols)
+ vc->vc_x = vc->vc_cols - 1;
+ else
+ vc->vc_x = new_x;
+ }
+
+ if (vc->vc_decom) {
+ min_y = vc->vc_top;
+ max_y = vc->vc_bottom;
+ } else {
+ min_y = 0;
+ max_y = vc->vc_rows;
+ }
+ if (new_y < min_y)
+ vc->vc_y = min_y;
+ else if (new_y >= max_y)
+ vc->vc_y = max_y - 1;
+ else
+ vc->vc_y = new_y;
+ vc->vc_pos = vc->vc_origin + vc->vc_y * vc->vc_size_row + (vc->vc_x<<1);
+ vc->vc_need_wrap = 0;
+}
+
+/* for absolute user moves, when decom is set */
+static void gotoxay(struct vc_data *vc, int new_x, int new_y)
+{
+ gotoxy(vc, new_x, vc->vc_decom ? (vc->vc_top + new_y) : new_y);
+}
+
+void scrollback(struct vc_data *vc, int lines)
+{
+ if (!lines)
+ lines = vc->vc_rows / 2;
+ scrolldelta(-lines);
+}
+
+void scrollfront(struct vc_data *vc, int lines)
+{
+ if (!lines)
+ lines = vc->vc_rows / 2;
+ scrolldelta(lines);
+}
+
+static void lf(struct vc_data *vc)
+{
+ /* don't scroll if above bottom of scrolling region, or
+ * if below scrolling region
+ */
+ if (vc->vc_y + 1 == vc->vc_bottom)
+ scrup(vc, vc->vc_top, vc->vc_bottom, 1);
+ else if (vc->vc_y < vc->vc_rows - 1) {
+ vc->vc_y++;
+ vc->vc_pos += vc->vc_size_row;
+ }
+ vc->vc_need_wrap = 0;
+ notify_write(vc, '\n');
+}
+
+static void ri(struct vc_data *vc)
+{
+ /* don't scroll if below top of scrolling region, or
+ * if above scrolling region
+ */
+ if (vc->vc_y == vc->vc_top)
+ scrdown(vc, vc->vc_top, vc->vc_bottom, 1);
+ else if (vc->vc_y > 0) {
+ vc->vc_y--;
+ vc->vc_pos -= vc->vc_size_row;
+ }
+ vc->vc_need_wrap = 0;
+}
+
+static inline void cr(struct vc_data *vc)
+{
+ vc->vc_pos -= vc->vc_x << 1;
+ vc->vc_need_wrap = vc->vc_x = 0;
+ notify_write(vc, '\r');
+}
+
+static inline void bs(struct vc_data *vc)
+{
+ if (vc->vc_x) {
+ vc->vc_pos -= 2;
+ vc->vc_x--;
+ vc->vc_need_wrap = 0;
+ notify_write(vc, '\b');
+ }
+}
+
+static inline void del(struct vc_data *vc)
+{
+ /* ignored */
+}
+
+static void csi_J(struct vc_data *vc, int vpar)
+{
+ unsigned int count;
+ unsigned short * start;
+
+ switch (vpar) {
+ case 0: /* erase from cursor to end of display */
+ count = (vc->vc_scr_end - vc->vc_pos) >> 1;
+ start = (unsigned short *)vc->vc_pos;
+ if (DO_UPDATE(vc)) {
+ /* do in two stages */
+ vc->vc_sw->con_clear(vc, vc->vc_y, vc->vc_x, 1,
+ vc->vc_cols - vc->vc_x);
+ vc->vc_sw->con_clear(vc, vc->vc_y + 1, 0,
+ vc->vc_rows - vc->vc_y - 1,
+ vc->vc_cols);
+ }
+ break;
+ case 1: /* erase from start to cursor */
+ count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
+ start = (unsigned short *)vc->vc_origin;
+ if (DO_UPDATE(vc)) {
+ /* do in two stages */
+ vc->vc_sw->con_clear(vc, 0, 0, vc->vc_y,
+ vc->vc_cols);
+ vc->vc_sw->con_clear(vc, vc->vc_y, 0, 1,
+ vc->vc_x + 1);
+ }
+ break;
+ case 3: /* erase scroll-back buffer (and whole display) */
+ scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
+ vc->vc_screenbuf_size >> 1);
+ set_origin(vc);
+ if (CON_IS_VISIBLE(vc))
+ update_screen(vc);
+ /* fall through */
+ case 2: /* erase whole display */
+ count = vc->vc_cols * vc->vc_rows;
+ start = (unsigned short *)vc->vc_origin;
+ if (DO_UPDATE(vc))
+ vc->vc_sw->con_clear(vc, 0, 0,
+ vc->vc_rows,
+ vc->vc_cols);
+ break;
+ default:
+ return;
+ }
+ scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
+ vc->vc_need_wrap = 0;
+}
+
+static void csi_K(struct vc_data *vc, int vpar)
+{
+ unsigned int count;
+ unsigned short * start;
+
+ switch (vpar) {
+ case 0: /* erase from cursor to end of line */
+ count = vc->vc_cols - vc->vc_x;
+ start = (unsigned short *)vc->vc_pos;
+ if (DO_UPDATE(vc))
+ vc->vc_sw->con_clear(vc, vc->vc_y, vc->vc_x, 1,
+ vc->vc_cols - vc->vc_x);
+ break;
+ case 1: /* erase from start of line to cursor */
+ start = (unsigned short *)(vc->vc_pos - (vc->vc_x << 1));
+ count = vc->vc_x + 1;
+ if (DO_UPDATE(vc))
+ vc->vc_sw->con_clear(vc, vc->vc_y, 0, 1,
+ vc->vc_x + 1);
+ break;
+ case 2: /* erase whole line */
+ start = (unsigned short *)(vc->vc_pos - (vc->vc_x << 1));
+ count = vc->vc_cols;
+ if (DO_UPDATE(vc))
+ vc->vc_sw->con_clear(vc, vc->vc_y, 0, 1,
+ vc->vc_cols);
+ break;
+ default:
+ return;
+ }
+ scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
+ vc->vc_need_wrap = 0;
+}
+
+static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar positions */
+{ /* not vt100? */
+ int count;
+
+ if (!vpar)
+ vpar++;
+ count = (vpar > vc->vc_cols - vc->vc_x) ? (vc->vc_cols - vc->vc_x) : vpar;
+
+ scr_memsetw((unsigned short *)vc->vc_pos, vc->vc_video_erase_char, 2 * count);
+ if (DO_UPDATE(vc))
+ vc->vc_sw->con_clear(vc, vc->vc_y, vc->vc_x, 1, count);
+ vc->vc_need_wrap = 0;
+}
+
+static void default_attr(struct vc_data *vc)
+{
+ vc->vc_intensity = 1;
+ vc->vc_italic = 0;
+ vc->vc_underline = 0;
+ vc->vc_reverse = 0;
+ vc->vc_blink = 0;
+ vc->vc_color = vc->vc_def_color;
+}
+
+/* console_lock is held */
+static void csi_m(struct vc_data *vc)
+{
+ int i;
+
+ for (i = 0; i <= vc->vc_npar; i++)
+ switch (vc->vc_par[i]) {
+ case 0: /* all attributes off */
+ default_attr(vc);
+ break;
+ case 1:
+ vc->vc_intensity = 2;
+ break;
+ case 2:
+ vc->vc_intensity = 0;
+ break;
+ case 3:
+ vc->vc_italic = 1;
+ break;
+ case 4:
+ vc->vc_underline = 1;
+ break;
+ case 5:
+ vc->vc_blink = 1;
+ break;
+ case 7:
+ vc->vc_reverse = 1;
+ break;
+ case 10: /* ANSI X3.64-1979 (SCO-ish?)
+ * Select primary font, don't display
+ * control chars if defined, don't set
+ * bit 8 on output.
+ */
+ vc->vc_translate = set_translate(vc->vc_charset == 0
+ ? vc->vc_G0_charset
+ : vc->vc_G1_charset, vc);
+ vc->vc_disp_ctrl = 0;
+ vc->vc_toggle_meta = 0;
+ break;
+ case 11: /* ANSI X3.64-1979 (SCO-ish?)
+ * Select first alternate font, lets
+ * chars < 32 be displayed as ROM chars.
+ */
+ vc->vc_translate = set_translate(IBMPC_MAP, vc);
+ vc->vc_disp_ctrl = 1;
+ vc->vc_toggle_meta = 0;
+ break;
+ case 12: /* ANSI X3.64-1979 (SCO-ish?)
+ * Select second alternate font, toggle
+ * high bit before displaying as ROM char.
+ */
+ vc->vc_translate = set_translate(IBMPC_MAP, vc);
+ vc->vc_disp_ctrl = 1;
+ vc->vc_toggle_meta = 1;
+ break;
+ case 21:
+ case 22:
+ vc->vc_intensity = 1;
+ break;
+ case 23:
+ vc->vc_italic = 0;
+ break;
+ case 24:
+ vc->vc_underline = 0;
+ break;
+ case 25:
+ vc->vc_blink = 0;
+ break;
+ case 27:
+ vc->vc_reverse = 0;
+ break;
+ case 38: /* ANSI X3.64-1979 (SCO-ish?)
+ * Enables underscore, white foreground
+ * with white underscore (Linux - use
+ * default foreground).
+ */
+ vc->vc_color = (vc->vc_def_color & 0x0f) | (vc->vc_color & 0xf0);
+ vc->vc_underline = 1;
+ break;
+ case 39: /* ANSI X3.64-1979 (SCO-ish?)
+ * Disable underline option.
+ * Reset colour to default? It did this
+ * before...
+ */
+ vc->vc_color = (vc->vc_def_color & 0x0f) | (vc->vc_color & 0xf0);
+ vc->vc_underline = 0;
+ break;
+ case 49:
+ vc->vc_color = (vc->vc_def_color & 0xf0) | (vc->vc_color & 0x0f);
+ break;
+ default:
+ if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37)
+ vc->vc_color = color_table[vc->vc_par[i] - 30]
+ | (vc->vc_color & 0xf0);
+ else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47)
+ vc->vc_color = (color_table[vc->vc_par[i] - 40] << 4)
+ | (vc->vc_color & 0x0f);
+ break;
+ }
+ update_attr(vc);
+}
+
+static void respond_string(const char *p, struct tty_struct *tty)
+{
+ while (*p) {
+ tty_insert_flip_char(tty, *p, 0);
+ p++;
+ }
+ con_schedule_flip(tty);
+}
+
+static void cursor_report(struct vc_data *vc, struct tty_struct *tty)
+{
+ char buf[40];
+
+ sprintf(buf, "\033[%d;%dR", vc->vc_y + (vc->vc_decom ? vc->vc_top + 1 : 1), vc->vc_x + 1);
+ respond_string(buf, tty);
+}
+
+static inline void status_report(struct tty_struct *tty)
+{
+ respond_string("\033[0n", tty); /* Terminal ok */
+}
+
+static inline void respond_ID(struct tty_struct * tty)
+{
+ respond_string(VT102ID, tty);
+}
+
+void mouse_report(struct tty_struct *tty, int butt, int mrx, int mry)
+{
+ char buf[8];
+
+ sprintf(buf, "\033[M%c%c%c", (char)(' ' + butt), (char)('!' + mrx),
+ (char)('!' + mry));
+ respond_string(buf, tty);
+}
+
+/* invoked via ioctl(TIOCLINUX) and through set_selection */
+int mouse_reporting(void)
+{
+ return vc_cons[fg_console].d->vc_report_mouse;
+}
+
+/* console_lock is held */
+static void set_mode(struct vc_data *vc, int on_off)
+{
+ int i;
+
+ for (i = 0; i <= vc->vc_npar; i++)
+ if (vc->vc_ques) {
+ switch(vc->vc_par[i]) { /* DEC private modes set/reset */
+ case 1: /* Cursor keys send ^[Ox/^[[x */
+ if (on_off)
+ set_kbd(vc, decckm);
+ else
+ clr_kbd(vc, decckm);
+ break;
+ case 3: /* 80/132 mode switch unimplemented */
+ vc->vc_deccolm = on_off;
+#if 0
+ vc_resize(deccolm ? 132 : 80, vc->vc_rows);
+ /* this alone does not suffice; some user mode
+ utility has to change the hardware regs */
+#endif
+ break;
+ case 5: /* Inverted screen on/off */
+ if (vc->vc_decscnm != on_off) {
+ vc->vc_decscnm = on_off;
+ invert_screen(vc, 0, vc->vc_screenbuf_size, 0);
+ update_attr(vc);
+ }
+ break;
+ case 6: /* Origin relative/absolute */
+ vc->vc_decom = on_off;
+ gotoxay(vc, 0, 0);
+ break;
+ case 7: /* Autowrap on/off */
+ vc->vc_decawm = on_off;
+ break;
+ case 8: /* Autorepeat on/off */
+ if (on_off)
+ set_kbd(vc, decarm);
+ else
+ clr_kbd(vc, decarm);
+ break;
+ case 9:
+ vc->vc_report_mouse = on_off ? 1 : 0;
+ break;
+ case 25: /* Cursor on/off */
+ vc->vc_deccm = on_off;
+ break;
+ case 1000:
+ vc->vc_report_mouse = on_off ? 2 : 0;
+ break;
+ }
+ } else {
+ switch(vc->vc_par[i]) { /* ANSI modes set/reset */
+ case 3: /* Monitor (display ctrls) */
+ vc->vc_disp_ctrl = on_off;
+ break;
+ case 4: /* Insert Mode on/off */
+ vc->vc_decim = on_off;
+ break;
+ case 20: /* Lf, Enter == CrLf/Lf */
+ if (on_off)
+ set_kbd(vc, lnm);
+ else
+ clr_kbd(vc, lnm);
+ break;
+ }
+ }
+}
+
+/* console_lock is held */
+static void setterm_command(struct vc_data *vc)
+{
+ switch(vc->vc_par[0]) {
+ case 1: /* set color for underline mode */
+ if (vc->vc_can_do_color &&
+ vc->vc_par[1] < 16) {
+ vc->vc_ulcolor = color_table[vc->vc_par[1]];
+ if (vc->vc_underline)
+ update_attr(vc);
+ }
+ break;
+ case 2: /* set color for half intensity mode */
+ if (vc->vc_can_do_color &&
+ vc->vc_par[1] < 16) {
+ vc->vc_halfcolor = color_table[vc->vc_par[1]];
+ if (vc->vc_intensity == 0)
+ update_attr(vc);
+ }
+ break;
+ case 8: /* store colors as defaults */
+ vc->vc_def_color = vc->vc_attr;
+ if (vc->vc_hi_font_mask == 0x100)
+ vc->vc_def_color >>= 1;
+ default_attr(vc);
+ update_attr(vc);
+ break;
+ case 9: /* set blanking interval */
+ blankinterval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60;
+ poke_blanked_console();
+ break;
+ case 10: /* set bell frequency in Hz */
+ if (vc->vc_npar >= 1)
+ vc->vc_bell_pitch = vc->vc_par[1];
+ else
+ vc->vc_bell_pitch = DEFAULT_BELL_PITCH;
+ break;
+ case 11: /* set bell duration in msec */
+ if (vc->vc_npar >= 1)
+ vc->vc_bell_duration = (vc->vc_par[1] < 2000) ?
+ vc->vc_par[1] * HZ / 1000 : 0;
+ else
+ vc->vc_bell_duration = DEFAULT_BELL_DURATION;
+ break;
+ case 12: /* bring specified console to the front */
+ if (vc->vc_par[1] >= 1 && vc_cons_allocated(vc->vc_par[1] - 1))
+ set_console(vc->vc_par[1] - 1);
+ break;
+ case 13: /* unblank the screen */
+ poke_blanked_console();
+ break;
+ case 14: /* set vesa powerdown interval */
+ vesa_off_interval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60 * HZ;
+ break;
+ case 15: /* activate the previous console */
+ set_console(last_console);
+ break;
+ }
+}
+
+/* console_lock is held */
+static void csi_at(struct vc_data *vc, unsigned int nr)
+{
+ if (nr > vc->vc_cols - vc->vc_x)
+ nr = vc->vc_cols - vc->vc_x;
+ else if (!nr)
+ nr = 1;
+ insert_char(vc, nr);
+}
+
+/* console_lock is held */
+static void csi_L(struct vc_data *vc, unsigned int nr)
+{
+ if (nr > vc->vc_rows - vc->vc_y)
+ nr = vc->vc_rows - vc->vc_y;
+ else if (!nr)
+ nr = 1;
+ scrdown(vc, vc->vc_y, vc->vc_bottom, nr);
+ vc->vc_need_wrap = 0;
+}
+
+/* console_lock is held */
+static void csi_P(struct vc_data *vc, unsigned int nr)
+{
+ if (nr > vc->vc_cols - vc->vc_x)
+ nr = vc->vc_cols - vc->vc_x;
+ else if (!nr)
+ nr = 1;
+ delete_char(vc, nr);
+}
+
+/* console_lock is held */
+static void csi_M(struct vc_data *vc, unsigned int nr)
+{
+ if (nr > vc->vc_rows - vc->vc_y)
+ nr = vc->vc_rows - vc->vc_y;
+ else if (!nr)
+ nr=1;
+ scrup(vc, vc->vc_y, vc->vc_bottom, nr);
+ vc->vc_need_wrap = 0;
+}
+
+/* console_lock is held (except via vc_init->reset_terminal */
+static void save_cur(struct vc_data *vc)
+{
+ vc->vc_saved_x = vc->vc_x;
+ vc->vc_saved_y = vc->vc_y;
+ vc->vc_s_intensity = vc->vc_intensity;
+ vc->vc_s_italic = vc->vc_italic;
+ vc->vc_s_underline = vc->vc_underline;
+ vc->vc_s_blink = vc->vc_blink;
+ vc->vc_s_reverse = vc->vc_reverse;
+ vc->vc_s_charset = vc->vc_charset;
+ vc->vc_s_color = vc->vc_color;
+ vc->vc_saved_G0 = vc->vc_G0_charset;
+ vc->vc_saved_G1 = vc->vc_G1_charset;
+}
+
+/* console_lock is held */
+static void restore_cur(struct vc_data *vc)
+{
+ gotoxy(vc, vc->vc_saved_x, vc->vc_saved_y);
+ vc->vc_intensity = vc->vc_s_intensity;
+ vc->vc_italic = vc->vc_s_italic;
+ vc->vc_underline = vc->vc_s_underline;
+ vc->vc_blink = vc->vc_s_blink;
+ vc->vc_reverse = vc->vc_s_reverse;
+ vc->vc_charset = vc->vc_s_charset;
+ vc->vc_color = vc->vc_s_color;
+ vc->vc_G0_charset = vc->vc_saved_G0;
+ vc->vc_G1_charset = vc->vc_saved_G1;
+ vc->vc_translate = set_translate(vc->vc_charset ? vc->vc_G1_charset : vc->vc_G0_charset, vc);
+ update_attr(vc);
+ vc->vc_need_wrap = 0;
+}
+
+enum { ESnormal, ESesc, ESsquare, ESgetpars, ESgotpars, ESfunckey,
+ EShash, ESsetG0, ESsetG1, ESpercent, ESignore, ESnonstd,
+ ESpalette };
+
+/* console_lock is held (except via vc_init()) */
+static void reset_terminal(struct vc_data *vc, int do_clear)
+{
+ vc->vc_top = 0;
+ vc->vc_bottom = vc->vc_rows;
+ vc->vc_state = ESnormal;
+ vc->vc_ques = 0;
+ vc->vc_translate = set_translate(LAT1_MAP, vc);
+ vc->vc_G0_charset = LAT1_MAP;
+ vc->vc_G1_charset = GRAF_MAP;
+ vc->vc_charset = 0;
+ vc->vc_need_wrap = 0;
+ vc->vc_report_mouse = 0;
+ vc->vc_utf = default_utf8;
+ vc->vc_utf_count = 0;
+
+ vc->vc_disp_ctrl = 0;
+ vc->vc_toggle_meta = 0;
+
+ vc->vc_decscnm = 0;
+ vc->vc_decom = 0;
+ vc->vc_decawm = 1;
+ vc->vc_deccm = global_cursor_default;
+ vc->vc_decim = 0;
+
+ set_kbd(vc, decarm);
+ clr_kbd(vc, decckm);
+ clr_kbd(vc, kbdapplic);
+ clr_kbd(vc, lnm);
+ kbd_table[vc->vc_num].lockstate = 0;
+ kbd_table[vc->vc_num].slockstate = 0;
+ kbd_table[vc->vc_num].ledmode = LED_SHOW_FLAGS;
+ kbd_table[vc->vc_num].ledflagstate = kbd_table[vc->vc_num].default_ledflagstate;
+ /* do not do set_leds here because this causes an endless tasklet loop
+ when the keyboard hasn't been initialized yet */
+
+ vc->vc_cursor_type = cur_default;
+ vc->vc_complement_mask = vc->vc_s_complement_mask;
+
+ default_attr(vc);
+ update_attr(vc);
+
+ vc->vc_tab_stop[0] = 0x01010100;
+ vc->vc_tab_stop[1] =
+ vc->vc_tab_stop[2] =
+ vc->vc_tab_stop[3] =
+ vc->vc_tab_stop[4] =
+ vc->vc_tab_stop[5] =
+ vc->vc_tab_stop[6] =
+ vc->vc_tab_stop[7] = 0x01010101;
+
+ vc->vc_bell_pitch = DEFAULT_BELL_PITCH;
+ vc->vc_bell_duration = DEFAULT_BELL_DURATION;
+
+ gotoxy(vc, 0, 0);
+ save_cur(vc);
+ if (do_clear)
+ csi_J(vc, 2);
+}
+
+/* console_lock is held */
+static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
+{
+ /*
+ * Control characters can be used in the _middle_
+ * of an escape sequence.
+ */
+ switch (c) {
+ case 0:
+ return;
+ case 7:
+ if (vc->vc_bell_duration)
+ kd_mksound(vc->vc_bell_pitch, vc->vc_bell_duration);
+ return;
+ case 8:
+ bs(vc);
+ return;
+ case 9:
+ vc->vc_pos -= (vc->vc_x << 1);
+ while (vc->vc_x < vc->vc_cols - 1) {
+ vc->vc_x++;
+ if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31)))
+ break;
+ }
+ vc->vc_pos += (vc->vc_x << 1);
+ notify_write(vc, '\t');
+ return;
+ case 10: case 11: case 12:
+ lf(vc);
+ if (!is_kbd(vc, lnm))
+ return;
+ case 13:
+ cr(vc);
+ return;
+ case 14:
+ vc->vc_charset = 1;
+ vc->vc_translate = set_translate(vc->vc_G1_charset, vc);
+ vc->vc_disp_ctrl = 1;
+ return;
+ case 15:
+ vc->vc_charset = 0;
+ vc->vc_translate = set_translate(vc->vc_G0_charset, vc);
+ vc->vc_disp_ctrl = 0;
+ return;
+ case 24: case 26:
+ vc->vc_state = ESnormal;
+ return;
+ case 27:
+ vc->vc_state = ESesc;
+ return;
+ case 127:
+ del(vc);
+ return;
+ case 128+27:
+ vc->vc_state = ESsquare;
+ return;
+ }
+ switch(vc->vc_state) {
+ case ESesc:
+ vc->vc_state = ESnormal;
+ switch (c) {
+ case '[':
+ vc->vc_state = ESsquare;
+ return;
+ case ']':
+ vc->vc_state = ESnonstd;
+ return;
+ case '%':
+ vc->vc_state = ESpercent;
+ return;
+ case 'E':
+ cr(vc);
+ lf(vc);
+ return;
+ case 'M':
+ ri(vc);
+ return;
+ case 'D':
+ lf(vc);
+ return;
+ case 'H':
+ vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31));
+ return;
+ case 'Z':
+ respond_ID(tty);
+ return;
+ case '7':
+ save_cur(vc);
+ return;
+ case '8':
+ restore_cur(vc);
+ return;
+ case '(':
+ vc->vc_state = ESsetG0;
+ return;
+ case ')':
+ vc->vc_state = ESsetG1;
+ return;
+ case '#':
+ vc->vc_state = EShash;
+ return;
+ case 'c':
+ reset_terminal(vc, 1);
+ return;
+ case '>': /* Numeric keypad */
+ clr_kbd(vc, kbdapplic);
+ return;
+ case '=': /* Appl. keypad */
+ set_kbd(vc, kbdapplic);
+ return;
+ }
+ return;
+ case ESnonstd:
+ if (c=='P') { /* palette escape sequence */
+ for (vc->vc_npar = 0; vc->vc_npar < NPAR; vc->vc_npar++)
+ vc->vc_par[vc->vc_npar] = 0;
+ vc->vc_npar = 0;
+ vc->vc_state = ESpalette;
+ return;
+ } else if (c=='R') { /* reset palette */
+ reset_palette(vc);
+ vc->vc_state = ESnormal;
+ } else
+ vc->vc_state = ESnormal;
+ return;
+ case ESpalette:
+ if (isxdigit(c)) {
+ vc->vc_par[vc->vc_npar++] = hex_to_bin(c);
+ if (vc->vc_npar == 7) {
+ int i = vc->vc_par[0] * 3, j = 1;
+ vc->vc_palette[i] = 16 * vc->vc_par[j++];
+ vc->vc_palette[i++] += vc->vc_par[j++];
+ vc->vc_palette[i] = 16 * vc->vc_par[j++];
+ vc->vc_palette[i++] += vc->vc_par[j++];
+ vc->vc_palette[i] = 16 * vc->vc_par[j++];
+ vc->vc_palette[i] += vc->vc_par[j];
+ set_palette(vc);
+ vc->vc_state = ESnormal;
+ }
+ } else
+ vc->vc_state = ESnormal;
+ return;
+ case ESsquare:
+ for (vc->vc_npar = 0; vc->vc_npar < NPAR; vc->vc_npar++)
+ vc->vc_par[vc->vc_npar] = 0;
+ vc->vc_npar = 0;
+ vc->vc_state = ESgetpars;
+ if (c == '[') { /* Function key */
+ vc->vc_state=ESfunckey;
+ return;
+ }
+ vc->vc_ques = (c == '?');
+ if (vc->vc_ques)
+ return;
+ case ESgetpars:
+ if (c == ';' && vc->vc_npar < NPAR - 1) {
+ vc->vc_npar++;
+ return;
+ } else if (c>='0' && c<='9') {
+ vc->vc_par[vc->vc_npar] *= 10;
+ vc->vc_par[vc->vc_npar] += c - '0';
+ return;
+ } else
+ vc->vc_state = ESgotpars;
+ case ESgotpars:
+ vc->vc_state = ESnormal;
+ switch(c) {
+ case 'h':
+ set_mode(vc, 1);
+ return;
+ case 'l':
+ set_mode(vc, 0);
+ return;
+ case 'c':
+ if (vc->vc_ques) {
+ if (vc->vc_par[0])
+ vc->vc_cursor_type = vc->vc_par[0] | (vc->vc_par[1] << 8) | (vc->vc_par[2] << 16);
+ else
+ vc->vc_cursor_type = cur_default;
+ return;
+ }
+ break;
+ case 'm':
+ if (vc->vc_ques) {
+ clear_selection();
+ if (vc->vc_par[0])
+ vc->vc_complement_mask = vc->vc_par[0] << 8 | vc->vc_par[1];
+ else
+ vc->vc_complement_mask = vc->vc_s_complement_mask;
+ return;
+ }
+ break;
+ case 'n':
+ if (!vc->vc_ques) {
+ if (vc->vc_par[0] == 5)
+ status_report(tty);
+ else if (vc->vc_par[0] == 6)
+ cursor_report(vc, tty);
+ }
+ return;
+ }
+ if (vc->vc_ques) {
+ vc->vc_ques = 0;
+ return;
+ }
+ switch(c) {
+ case 'G': case '`':
+ if (vc->vc_par[0])
+ vc->vc_par[0]--;
+ gotoxy(vc, vc->vc_par[0], vc->vc_y);
+ return;
+ case 'A':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, vc->vc_x, vc->vc_y - vc->vc_par[0]);
+ return;
+ case 'B': case 'e':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, vc->vc_x, vc->vc_y + vc->vc_par[0]);
+ return;
+ case 'C': case 'a':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, vc->vc_x + vc->vc_par[0], vc->vc_y);
+ return;
+ case 'D':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, vc->vc_x - vc->vc_par[0], vc->vc_y);
+ return;
+ case 'E':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, 0, vc->vc_y + vc->vc_par[0]);
+ return;
+ case 'F':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, 0, vc->vc_y - vc->vc_par[0]);
+ return;
+ case 'd':
+ if (vc->vc_par[0])
+ vc->vc_par[0]--;
+ gotoxay(vc, vc->vc_x ,vc->vc_par[0]);
+ return;
+ case 'H': case 'f':
+ if (vc->vc_par[0])
+ vc->vc_par[0]--;
+ if (vc->vc_par[1])
+ vc->vc_par[1]--;
+ gotoxay(vc, vc->vc_par[1], vc->vc_par[0]);
+ return;
+ case 'J':
+ csi_J(vc, vc->vc_par[0]);
+ return;
+ case 'K':
+ csi_K(vc, vc->vc_par[0]);
+ return;
+ case 'L':
+ csi_L(vc, vc->vc_par[0]);
+ return;
+ case 'M':
+ csi_M(vc, vc->vc_par[0]);
+ return;
+ case 'P':
+ csi_P(vc, vc->vc_par[0]);
+ return;
+ case 'c':
+ if (!vc->vc_par[0])
+ respond_ID(tty);
+ return;
+ case 'g':
+ if (!vc->vc_par[0])
+ vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31));
+ else if (vc->vc_par[0] == 3) {
+ vc->vc_tab_stop[0] =
+ vc->vc_tab_stop[1] =
+ vc->vc_tab_stop[2] =
+ vc->vc_tab_stop[3] =
+ vc->vc_tab_stop[4] =
+ vc->vc_tab_stop[5] =
+ vc->vc_tab_stop[6] =
+ vc->vc_tab_stop[7] = 0;
+ }
+ return;
+ case 'm':
+ csi_m(vc);
+ return;
+ case 'q': /* DECLL - but only 3 leds */
+ /* map 0,1,2,3 to 0,1,2,4 */
+ if (vc->vc_par[0] < 4)
+ setledstate(kbd_table + vc->vc_num,
+ (vc->vc_par[0] < 3) ? vc->vc_par[0] : 4);
+ return;
+ case 'r':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ if (!vc->vc_par[1])
+ vc->vc_par[1] = vc->vc_rows;
+ /* Minimum allowed region is 2 lines */
+ if (vc->vc_par[0] < vc->vc_par[1] &&
+ vc->vc_par[1] <= vc->vc_rows) {
+ vc->vc_top = vc->vc_par[0] - 1;
+ vc->vc_bottom = vc->vc_par[1];
+ gotoxay(vc, 0, 0);
+ }
+ return;
+ case 's':
+ save_cur(vc);
+ return;
+ case 'u':
+ restore_cur(vc);
+ return;
+ case 'X':
+ csi_X(vc, vc->vc_par[0]);
+ return;
+ case '@':
+ csi_at(vc, vc->vc_par[0]);
+ return;
+ case ']': /* setterm functions */
+ setterm_command(vc);
+ return;
+ }
+ return;
+ case ESpercent:
+ vc->vc_state = ESnormal;
+ switch (c) {
+ case '@': /* defined in ISO 2022 */
+ vc->vc_utf = 0;
+ return;
+ case 'G': /* prelim official escape code */
+ case '8': /* retained for compatibility */
+ vc->vc_utf = 1;
+ return;
+ }
+ return;
+ case ESfunckey:
+ vc->vc_state = ESnormal;
+ return;
+ case EShash:
+ vc->vc_state = ESnormal;
+ if (c == '8') {
+ /* DEC screen alignment test. kludge :-) */
+ vc->vc_video_erase_char =
+ (vc->vc_video_erase_char & 0xff00) | 'E';
+ csi_J(vc, 2);
+ vc->vc_video_erase_char =
+ (vc->vc_video_erase_char & 0xff00) | ' ';
+ do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2);
+ }
+ return;
+ case ESsetG0:
+ if (c == '0')
+ vc->vc_G0_charset = GRAF_MAP;
+ else if (c == 'B')
+ vc->vc_G0_charset = LAT1_MAP;
+ else if (c == 'U')
+ vc->vc_G0_charset = IBMPC_MAP;
+ else if (c == 'K')
+ vc->vc_G0_charset = USER_MAP;
+ if (vc->vc_charset == 0)
+ vc->vc_translate = set_translate(vc->vc_G0_charset, vc);
+ vc->vc_state = ESnormal;
+ return;
+ case ESsetG1:
+ if (c == '0')
+ vc->vc_G1_charset = GRAF_MAP;
+ else if (c == 'B')
+ vc->vc_G1_charset = LAT1_MAP;
+ else if (c == 'U')
+ vc->vc_G1_charset = IBMPC_MAP;
+ else if (c == 'K')
+ vc->vc_G1_charset = USER_MAP;
+ if (vc->vc_charset == 1)
+ vc->vc_translate = set_translate(vc->vc_G1_charset, vc);
+ vc->vc_state = ESnormal;
+ return;
+ default:
+ vc->vc_state = ESnormal;
+ }
+}
+
+/* is_double_width() is based on the wcwidth() implementation by
+ * Markus Kuhn -- 2007-05-26 (Unicode 5.0)
+ * Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
+ */
+struct interval {
+ uint32_t first;
+ uint32_t last;
+};
+
+static int bisearch(uint32_t ucs, const struct interval *table, int max)
+{
+ int min = 0;
+ int mid;
+
+ if (ucs < table[0].first || ucs > table[max].last)
+ return 0;
+ while (max >= min) {
+ mid = (min + max) / 2;
+ if (ucs > table[mid].last)
+ min = mid + 1;
+ else if (ucs < table[mid].first)
+ max = mid - 1;
+ else
+ return 1;
+ }
+ return 0;
+}
+
+static int is_double_width(uint32_t ucs)
+{
+ static const struct interval double_width[] = {
+ { 0x1100, 0x115F }, { 0x2329, 0x232A }, { 0x2E80, 0x303E },
+ { 0x3040, 0xA4CF }, { 0xAC00, 0xD7A3 }, { 0xF900, 0xFAFF },
+ { 0xFE10, 0xFE19 }, { 0xFE30, 0xFE6F }, { 0xFF00, 0xFF60 },
+ { 0xFFE0, 0xFFE6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD }
+ };
+ return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
+}
+
+/* acquires console_lock */
+static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+#ifdef VT_BUF_VRAM_ONLY
+#define FLUSH do { } while(0);
+#else
+#define FLUSH if (draw_x >= 0) { \
+ vc->vc_sw->con_putcs(vc, (u16 *)draw_from, (u16 *)draw_to - (u16 *)draw_from, vc->vc_y, draw_x); \
+ draw_x = -1; \
+ }
+#endif
+
+ int c, tc, ok, n = 0, draw_x = -1;
+ unsigned int currcons;
+ unsigned long draw_from = 0, draw_to = 0;
+ struct vc_data *vc;
+ unsigned char vc_attr;
+ struct vt_notifier_param param;
+ uint8_t rescan;
+ uint8_t inverse;
+ uint8_t width;
+ u16 himask, charmask;
+
+ if (in_interrupt())
+ return count;
+
+ might_sleep();
+
+ console_lock();
+ vc = tty->driver_data;
+ if (vc == NULL) {
+ printk(KERN_ERR "vt: argh, driver_data is NULL !\n");
+ console_unlock();
+ return 0;
+ }
+
+ currcons = vc->vc_num;
+ if (!vc_cons_allocated(currcons)) {
+ /* could this happen? */
+ pr_warn_once("con_write: tty %d not allocated\n", currcons+1);
+ console_unlock();
+ return 0;
+ }
+
+ himask = vc->vc_hi_font_mask;
+ charmask = himask ? 0x1ff : 0xff;
+
+ /* undraw cursor first */
+ if (IS_FG(vc))
+ hide_cursor(vc);
+
+ param.vc = vc;
+
+ while (!tty->stopped && count) {
+ int orig = *buf;
+ c = orig;
+ buf++;
+ n++;
+ count--;
+ rescan = 0;
+ inverse = 0;
+ width = 1;
+
+ /* Do no translation at all in control states */
+ if (vc->vc_state != ESnormal) {
+ tc = c;
+ } else if (vc->vc_utf && !vc->vc_disp_ctrl) {
+ /* Combine UTF-8 into Unicode in vc_utf_char.
+ * vc_utf_count is the number of continuation bytes still
+ * expected to arrive.
+ * vc_npar is the number of continuation bytes arrived so
+ * far
+ */
+rescan_last_byte:
+ if ((c & 0xc0) == 0x80) {
+ /* Continuation byte received */
+ static const uint32_t utf8_length_changes[] = { 0x0000007f, 0x000007ff, 0x0000ffff, 0x001fffff, 0x03ffffff, 0x7fffffff };
+ if (vc->vc_utf_count) {
+ vc->vc_utf_char = (vc->vc_utf_char << 6) | (c & 0x3f);
+ vc->vc_npar++;
+ if (--vc->vc_utf_count) {
+ /* Still need some bytes */
+ continue;
+ }
+ /* Got a whole character */
+ c = vc->vc_utf_char;
+ /* Reject overlong sequences */
+ if (c <= utf8_length_changes[vc->vc_npar - 1] ||
+ c > utf8_length_changes[vc->vc_npar])
+ c = 0xfffd;
+ } else {
+ /* Unexpected continuation byte */
+ vc->vc_utf_count = 0;
+ c = 0xfffd;
+ }
+ } else {
+ /* Single ASCII byte or first byte of a sequence received */
+ if (vc->vc_utf_count) {
+ /* Continuation byte expected */
+ rescan = 1;
+ vc->vc_utf_count = 0;
+ c = 0xfffd;
+ } else if (c > 0x7f) {
+ /* First byte of a multibyte sequence received */
+ vc->vc_npar = 0;
+ if ((c & 0xe0) == 0xc0) {
+ vc->vc_utf_count = 1;
+ vc->vc_utf_char = (c & 0x1f);
+ } else if ((c & 0xf0) == 0xe0) {
+ vc->vc_utf_count = 2;
+ vc->vc_utf_char = (c & 0x0f);
+ } else if ((c & 0xf8) == 0xf0) {
+ vc->vc_utf_count = 3;
+ vc->vc_utf_char = (c & 0x07);
+ } else if ((c & 0xfc) == 0xf8) {
+ vc->vc_utf_count = 4;
+ vc->vc_utf_char = (c & 0x03);
+ } else if ((c & 0xfe) == 0xfc) {
+ vc->vc_utf_count = 5;
+ vc->vc_utf_char = (c & 0x01);
+ } else {
+ /* 254 and 255 are invalid */
+ c = 0xfffd;
+ }
+ if (vc->vc_utf_count) {
+ /* Still need some bytes */
+ continue;
+ }
+ }
+ /* Nothing to do if an ASCII byte was received */
+ }
+ /* End of UTF-8 decoding. */
+ /* c is the received character, or U+FFFD for invalid sequences. */
+ /* Replace invalid Unicode code points with U+FFFD too */
+ if ((c >= 0xd800 && c <= 0xdfff) || c == 0xfffe || c == 0xffff)
+ c = 0xfffd;
+ tc = c;
+ } else { /* no utf or alternate charset mode */
+ tc = vc_translate(vc, c);
+ }
+
+ param.c = tc;
+ if (atomic_notifier_call_chain(&vt_notifier_list, VT_PREWRITE,
+ &param) == NOTIFY_STOP)
+ continue;
+
+ /* If the original code was a control character we
+ * only allow a glyph to be displayed if the code is
+ * not normally used (such as for cursor movement) or
+ * if the disp_ctrl mode has been explicitly enabled.
+ * Certain characters (as given by the CTRL_ALWAYS
+ * bitmap) are always displayed as control characters,
+ * as the console would be pretty useless without
+ * them; to display an arbitrary font position use the
+ * direct-to-font zone in UTF-8 mode.
+ */
+ ok = tc && (c >= 32 ||
+ !(vc->vc_disp_ctrl ? (CTRL_ALWAYS >> c) & 1 :
+ vc->vc_utf || ((CTRL_ACTION >> c) & 1)))
+ && (c != 127 || vc->vc_disp_ctrl)
+ && (c != 128+27);
+
+ if (vc->vc_state == ESnormal && ok) {
+ if (vc->vc_utf && !vc->vc_disp_ctrl) {
+ if (is_double_width(c))
+ width = 2;
+ }
+ /* Now try to find out how to display it */
+ tc = conv_uni_to_pc(vc, tc);
+ if (tc & ~charmask) {
+ if (tc == -1 || tc == -2) {
+ continue; /* nothing to display */
+ }
+ /* Glyph not found */
+ if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) {
+ /* In legacy mode use the glyph we get by a 1:1 mapping.
+ This would make absolutely no sense with Unicode in mind,
+ but do this for ASCII characters since a font may lack
+ Unicode mapping info and we don't want to end up with
+ having question marks only. */
+ tc = c;
+ } else {
+ /* Display U+FFFD. If it's not found, display an inverse question mark. */
+ tc = conv_uni_to_pc(vc, 0xfffd);
+ if (tc < 0) {
+ inverse = 1;
+ tc = conv_uni_to_pc(vc, '?');
+ if (tc < 0) tc = '?';
+ }
+ }
+ }
+
+ if (!inverse) {
+ vc_attr = vc->vc_attr;
+ } else {
+ /* invert vc_attr */
+ if (!vc->vc_can_do_color) {
+ vc_attr = (vc->vc_attr) ^ 0x08;
+ } else if (vc->vc_hi_font_mask == 0x100) {
+ vc_attr = ((vc->vc_attr) & 0x11) | (((vc->vc_attr) & 0xe0) >> 4) | (((vc->vc_attr) & 0x0e) << 4);
+ } else {
+ vc_attr = ((vc->vc_attr) & 0x88) | (((vc->vc_attr) & 0x70) >> 4) | (((vc->vc_attr) & 0x07) << 4);
+ }
+ FLUSH
+ }
+
+ while (1) {
+ if (vc->vc_need_wrap || vc->vc_decim)
+ FLUSH
+ if (vc->vc_need_wrap) {
+ cr(vc);
+ lf(vc);
+ }
+ if (vc->vc_decim)
+ insert_char(vc, 1);
+ scr_writew(himask ?
+ ((vc_attr << 8) & ~himask) + ((tc & 0x100) ? himask : 0) + (tc & 0xff) :
+ (vc_attr << 8) + tc,
+ (u16 *) vc->vc_pos);
+ if (DO_UPDATE(vc) && draw_x < 0) {
+ draw_x = vc->vc_x;
+ draw_from = vc->vc_pos;
+ }
+ if (vc->vc_x == vc->vc_cols - 1) {
+ vc->vc_need_wrap = vc->vc_decawm;
+ draw_to = vc->vc_pos + 2;
+ } else {
+ vc->vc_x++;
+ draw_to = (vc->vc_pos += 2);
+ }
+
+ if (!--width) break;
+
+ tc = conv_uni_to_pc(vc, ' '); /* A space is printed in the second column */
+ if (tc < 0) tc = ' ';
+ }
+ notify_write(vc, c);
+
+ if (inverse) {
+ FLUSH
+ }
+
+ if (rescan) {
+ rescan = 0;
+ inverse = 0;
+ width = 1;
+ c = orig;
+ goto rescan_last_byte;
+ }
+ continue;
+ }
+ FLUSH
+ do_con_trol(tty, vc, orig);
+ }
+ FLUSH
+ console_conditional_schedule();
+ console_unlock();
+ notify_update(vc);
+ return n;
+#undef FLUSH
+}
+
+/*
+ * This is the console switching callback.
+ *
+ * Doing console switching in a process context allows
+ * us to do the switches asynchronously (needed when we want
+ * to switch due to a keyboard interrupt). Synchronization
+ * with other console code and prevention of re-entrancy is
+ * ensured with console_lock.
+ */
+static void console_callback(struct work_struct *ignored)
+{
+ console_lock();
+
+ if (want_console >= 0) {
+ if (want_console != fg_console &&
+ vc_cons_allocated(want_console)) {
+ hide_cursor(vc_cons[fg_console].d);
+ change_console(vc_cons[want_console].d);
+ /* we only changed when the console had already
+ been allocated - a new console is not created
+ in an interrupt routine */
+ }
+ want_console = -1;
+ }
+ if (do_poke_blanked_console) { /* do not unblank for a LED change */
+ do_poke_blanked_console = 0;
+ poke_blanked_console();
+ }
+ if (scrollback_delta) {
+ struct vc_data *vc = vc_cons[fg_console].d;
+ clear_selection();
+ if (vc->vc_mode == KD_TEXT)
+ vc->vc_sw->con_scrolldelta(vc, scrollback_delta);
+ scrollback_delta = 0;
+ }
+ if (blank_timer_expired) {
+ do_blank_screen(0);
+ blank_timer_expired = 0;
+ }
+ notify_update(vc_cons[fg_console].d);
+
+ console_unlock();
+}
+
+int set_console(int nr)
+{
+ struct vc_data *vc = vc_cons[fg_console].d;
+
+ if (!vc_cons_allocated(nr) || vt_dont_switch ||
+ (vc->vt_mode.mode == VT_AUTO && vc->vc_mode == KD_GRAPHICS)) {
+
+ /*
+ * Console switch will fail in console_callback() or
+ * change_console() so there is no point scheduling
+ * the callback
+ *
+ * Existing set_console() users don't check the return
+ * value so this shouldn't break anything
+ */
+ return -EINVAL;
+ }
+
+ want_console = nr;
+ schedule_console_callback();
+
+ return 0;
+}
+
+struct tty_driver *console_driver;
+
+#ifdef CONFIG_VT_CONSOLE
+
+/**
+ * vt_kmsg_redirect() - Sets/gets the kernel message console
+ * @new: The new virtual terminal number or -1 if the console should stay
+ * unchanged
+ *
+ * By default, the kernel messages are always printed on the current virtual
+ * console. However, the user may modify that default with the
+ * TIOCL_SETKMSGREDIRECT ioctl call.
+ *
+ * This function sets the kernel message console to be @new. It returns the old
+ * virtual console number. The virtual terminal number 0 (both as parameter and
+ * return value) means no redirection (i.e. always printed on the currently
+ * active console).
+ *
+ * The parameter -1 means that only the current console is returned, but the
+ * value is not modified. You may use the macro vt_get_kmsg_redirect() in that
+ * case to make the code more understandable.
+ *
+ * When the kernel is compiled without CONFIG_VT_CONSOLE, this function ignores
+ * the parameter and always returns 0.
+ */
+int vt_kmsg_redirect(int new)
+{
+ static int kmsg_con;
+
+ if (new != -1)
+ return xchg(&kmsg_con, new);
+ else
+ return kmsg_con;
+}
+
+/*
+ * Console on virtual terminal
+ *
+ * The console must be locked when we get here.
+ */
+
+static void vt_console_print(struct console *co, const char *b, unsigned count)
+{
+ struct vc_data *vc = vc_cons[fg_console].d;
+ unsigned char c;
+ static DEFINE_SPINLOCK(printing_lock);
+ const ushort *start;
+ ushort cnt = 0;
+ ushort myx;
+ int kmsg_console;
+
+ /* console busy or not yet initialized */
+ if (!printable)
+ return;
+ if (!spin_trylock(&printing_lock))
+ return;
+
+ kmsg_console = vt_get_kmsg_redirect();
+ if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
+ vc = vc_cons[kmsg_console - 1].d;
+
+ /* read `x' only after setting currcons properly (otherwise
+ the `x' macro will read the x of the foreground console). */
+ myx = vc->vc_x;
+
+ if (!vc_cons_allocated(fg_console)) {
+ /* impossible */
+ /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
+ goto quit;
+ }
+
+ if (vc->vc_mode != KD_TEXT && !vt_force_oops_output(vc))
+ goto quit;
+
+ /* undraw cursor first */
+ if (IS_FG(vc))
+ hide_cursor(vc);
+
+ start = (ushort *)vc->vc_pos;
+
+ /* Contrived structure to try to emulate original need_wrap behaviour
+ * Problems caused when we have need_wrap set on '\n' character */
+ while (count--) {
+ c = *b++;
+ if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
+ if (cnt > 0) {
+ if (CON_IS_VISIBLE(vc))
+ vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
+ vc->vc_x += cnt;
+ if (vc->vc_need_wrap)
+ vc->vc_x--;
+ cnt = 0;
+ }
+ if (c == 8) { /* backspace */
+ bs(vc);
+ start = (ushort *)vc->vc_pos;
+ myx = vc->vc_x;
+ continue;
+ }
+ if (c != 13)
+ lf(vc);
+ cr(vc);
+ start = (ushort *)vc->vc_pos;
+ myx = vc->vc_x;
+ if (c == 10 || c == 13)
+ continue;
+ }
+ scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
+ notify_write(vc, c);
+ cnt++;
+ if (myx == vc->vc_cols - 1) {
+ vc->vc_need_wrap = 1;
+ continue;
+ }
+ vc->vc_pos += 2;
+ myx++;
+ }
+ if (cnt > 0) {
+ if (CON_IS_VISIBLE(vc))
+ vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
+ vc->vc_x += cnt;
+ if (vc->vc_x == vc->vc_cols) {
+ vc->vc_x--;
+ vc->vc_need_wrap = 1;
+ }
+ }
+ set_cursor(vc);
+ notify_update(vc);
+
+quit:
+ spin_unlock(&printing_lock);
+}
+
+static struct tty_driver *vt_console_device(struct console *c, int *index)
+{
+ *index = c->index ? c->index-1 : fg_console;
+ return console_driver;
+}
+
+static struct console vt_console_driver = {
+ .name = "tty",
+ .write = vt_console_print,
+ .device = vt_console_device,
+ .unblank = unblank_screen,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+#endif
+
+/*
+ * Handling of Linux-specific VC ioctls
+ */
+
+/*
+ * Generally a bit racy with respect to console_lock();.
+ *
+ * There are some functions which don't need it.
+ *
+ * There are some functions which can sleep for arbitrary periods
+ * (paste_selection) but we don't need the lock there anyway.
+ *
+ * set_selection has locking, and definitely needs it
+ */
+
+int tioclinux(struct tty_struct *tty, unsigned long arg)
+{
+ char type, data;
+ char __user *p = (char __user *)arg;
+ int lines;
+ int ret;
+
+ if (current->signal->tty != tty && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(type, p))
+ return -EFAULT;
+ ret = 0;
+
+ switch (type)
+ {
+ case TIOCL_SETSEL:
+ console_lock();
+ ret = set_selection((struct tiocl_selection __user *)(p+1), tty);
+ console_unlock();
+ break;
+ case TIOCL_PASTESEL:
+ ret = paste_selection(tty);
+ break;
+ case TIOCL_UNBLANKSCREEN:
+ console_lock();
+ unblank_screen();
+ console_unlock();
+ break;
+ case TIOCL_SELLOADLUT:
+ ret = sel_loadlut(p);
+ break;
+ case TIOCL_GETSHIFTSTATE:
+
+ /*
+ * Make it possible to react to Shift+Mousebutton.
+ * Note that 'shift_state' is an undocumented
+ * kernel-internal variable; programs not closely
+ * related to the kernel should not use this.
+ */
+ data = shift_state;
+ ret = __put_user(data, p);
+ break;
+ case TIOCL_GETMOUSEREPORTING:
+ data = mouse_reporting();
+ ret = __put_user(data, p);
+ break;
+ case TIOCL_SETVESABLANK:
+ ret = set_vesa_blanking(p);
+ break;
+ case TIOCL_GETKMSGREDIRECT:
+ data = vt_get_kmsg_redirect();
+ ret = __put_user(data, p);
+ break;
+ case TIOCL_SETKMSGREDIRECT:
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ } else {
+ if (get_user(data, p+1))
+ ret = -EFAULT;
+ else
+ vt_kmsg_redirect(data);
+ }
+ break;
+ case TIOCL_GETFGCONSOLE:
+ ret = fg_console;
+ break;
+ case TIOCL_SCROLLCONSOLE:
+ if (get_user(lines, (s32 __user *)(p+4))) {
+ ret = -EFAULT;
+ } else {
+ scrollfront(vc_cons[fg_console].d, lines);
+ ret = 0;
+ }
+ break;
+ case TIOCL_BLANKSCREEN: /* until explicitly unblanked, not only poked */
+ console_lock();
+ ignore_poke = 1;
+ do_blank_screen(0);
+ console_unlock();
+ break;
+ case TIOCL_BLANKEDSCREEN:
+ ret = console_blanked;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * /dev/ttyN handling
+ */
+
+static int con_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ int retval;
+
+ retval = do_con_write(tty, buf, count);
+ con_flush_chars(tty);
+
+ return retval;
+}
+
+static int con_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ if (in_interrupt())
+ return 0; /* n_r3964 calls put_char() from interrupt context */
+ return do_con_write(tty, &ch, 1);
+}
+
+static int con_write_room(struct tty_struct *tty)
+{
+ if (tty->stopped)
+ return 0;
+ return 32768; /* No limit, really; we're not buffering */
+}
+
+static int con_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0; /* we're not buffering */
+}
+
+/*
+ * con_throttle and con_unthrottle are only used for
+ * paste_selection(), which has to stuff in a large number of
+ * characters...
+ */
+static void con_throttle(struct tty_struct *tty)
+{
+}
+
+static void con_unthrottle(struct tty_struct *tty)
+{
+ struct vc_data *vc = tty->driver_data;
+
+ wake_up_interruptible(&vc->paste_wait);
+}
+
+/*
+ * Turn the Scroll-Lock LED on when the tty is stopped
+ */
+static void con_stop(struct tty_struct *tty)
+{
+ int console_num;
+ if (!tty)
+ return;
+ console_num = tty->index;
+ if (!vc_cons_allocated(console_num))
+ return;
+ set_vc_kbd_led(kbd_table + console_num, VC_SCROLLOCK);
+ set_leds();
+}
+
+/*
+ * Turn the Scroll-Lock LED off when the console is started
+ */
+static void con_start(struct tty_struct *tty)
+{
+ int console_num;
+ if (!tty)
+ return;
+ console_num = tty->index;
+ if (!vc_cons_allocated(console_num))
+ return;
+ clr_vc_kbd_led(kbd_table + console_num, VC_SCROLLOCK);
+ set_leds();
+}
+
+static void con_flush_chars(struct tty_struct *tty)
+{
+ struct vc_data *vc;
+
+ if (in_interrupt()) /* from flush_to_ldisc */
+ return;
+
+ /* if we race with con_close(), vt may be null */
+ console_lock();
+ vc = tty->driver_data;
+ if (vc)
+ set_cursor(vc);
+ console_unlock();
+}
+
+/*
+ * Allocate the console screen memory.
+ */
+static int con_open(struct tty_struct *tty, struct file *filp)
+{
+ unsigned int currcons = tty->index;
+ int ret = 0;
+
+ console_lock();
+ if (tty->driver_data == NULL) {
+ ret = vc_allocate(currcons);
+ if (ret == 0) {
+ struct vc_data *vc = vc_cons[currcons].d;
+
+ /* Still being freed */
+ if (vc->port.tty) {
+ console_unlock();
+ return -ERESTARTSYS;
+ }
+ tty->driver_data = vc;
+ vc->port.tty = tty;
+
+ if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
+ tty->winsize.ws_row = vc_cons[currcons].d->vc_rows;
+ tty->winsize.ws_col = vc_cons[currcons].d->vc_cols;
+ }
+ if (vc->vc_utf)
+ tty->termios->c_iflag |= IUTF8;
+ else
+ tty->termios->c_iflag &= ~IUTF8;
+ console_unlock();
+ return ret;
+ }
+ }
+ console_unlock();
+ return ret;
+}
+
+static void con_close(struct tty_struct *tty, struct file *filp)
+{
+ /* Nothing to do - we defer to shutdown */
+}
+
+static void con_shutdown(struct tty_struct *tty)
+{
+ struct vc_data *vc = tty->driver_data;
+ BUG_ON(vc == NULL);
+ console_lock();
+ vc->port.tty = NULL;
+ console_unlock();
+ tty_shutdown(tty);
+}
+
+static int default_italic_color = 2; // green (ASCII)
+static int default_underline_color = 3; // cyan (ASCII)
+module_param_named(italic, default_italic_color, int, S_IRUGO | S_IWUSR);
+module_param_named(underline, default_underline_color, int, S_IRUGO | S_IWUSR);
+
+static void vc_init(struct vc_data *vc, unsigned int rows,
+ unsigned int cols, int do_clear)
+{
+ int j, k ;
+
+ vc->vc_cols = cols;
+ vc->vc_rows = rows;
+ vc->vc_size_row = cols << 1;
+ vc->vc_screenbuf_size = vc->vc_rows * vc->vc_size_row;
+
+ set_origin(vc);
+ vc->vc_pos = vc->vc_origin;
+ reset_vc(vc);
+ for (j=k=0; j<16; j++) {
+ vc->vc_palette[k++] = default_red[j] ;
+ vc->vc_palette[k++] = default_grn[j] ;
+ vc->vc_palette[k++] = default_blu[j] ;
+ }
+ vc->vc_def_color = 0x07; /* white */
+ vc->vc_ulcolor = default_underline_color;
+ vc->vc_itcolor = default_italic_color;
+ vc->vc_halfcolor = 0x08; /* grey */
+ init_waitqueue_head(&vc->paste_wait);
+ reset_terminal(vc, do_clear);
+}
+
+/*
+ * This routine initializes console interrupts, and does nothing
+ * else. If you want the screen to clear, call tty_write with
+ * the appropriate escape-sequence.
+ */
+
+static int __init con_init(void)
+{
+ const char *display_desc = NULL;
+ struct vc_data *vc;
+ unsigned int currcons = 0, i;
+
+ console_lock();
+
+ if (conswitchp)
+ display_desc = conswitchp->con_startup();
+ if (!display_desc) {
+ fg_console = 0;
+ console_unlock();
+ return 0;
+ }
+
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ struct con_driver *con_driver = &registered_con_driver[i];
+
+ if (con_driver->con == NULL) {
+ con_driver->con = conswitchp;
+ con_driver->desc = display_desc;
+ con_driver->flag = CON_DRIVER_FLAG_INIT;
+ con_driver->first = 0;
+ con_driver->last = MAX_NR_CONSOLES - 1;
+ break;
+ }
+ }
+
+ for (i = 0; i < MAX_NR_CONSOLES; i++)
+ con_driver_map[i] = conswitchp;
+
+ if (blankinterval) {
+ blank_state = blank_normal_wait;
+ mod_timer(&console_timer, jiffies + (blankinterval * HZ));
+ }
+
+ for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) {
+ vc_cons[currcons].d = vc = kzalloc(sizeof(struct vc_data), GFP_NOWAIT);
+ INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
+ tty_port_init(&vc->port);
+ visual_init(vc, currcons, 1);
+ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
+ vc_init(vc, vc->vc_rows, vc->vc_cols,
+ currcons || !vc->vc_sw->con_save_screen);
+ }
+ currcons = fg_console = 0;
+ master_display_fg = vc = vc_cons[currcons].d;
+ set_origin(vc);
+ save_screen(vc);
+ gotoxy(vc, vc->vc_x, vc->vc_y);
+ csi_J(vc, 0);
+ update_screen(vc);
+ pr_info("Console: %s %s %dx%d",
+ vc->vc_can_do_color ? "colour" : "mono",
+ display_desc, vc->vc_cols, vc->vc_rows);
+ printable = 1;
+ printk("\n");
+
+ console_unlock();
+
+#ifdef CONFIG_VT_CONSOLE
+ register_console(&vt_console_driver);
+#endif
+ return 0;
+}
+console_initcall(con_init);
+
+static const struct tty_operations con_ops = {
+ .open = con_open,
+ .close = con_close,
+ .write = con_write,
+ .write_room = con_write_room,
+ .put_char = con_put_char,
+ .flush_chars = con_flush_chars,
+ .chars_in_buffer = con_chars_in_buffer,
+ .ioctl = vt_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vt_compat_ioctl,
+#endif
+ .stop = con_stop,
+ .start = con_start,
+ .throttle = con_throttle,
+ .unthrottle = con_unthrottle,
+ .resize = vt_resize,
+ .shutdown = con_shutdown
+};
+
+static struct cdev vc0_cdev;
+
+static ssize_t show_tty_active(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "tty%d\n", fg_console + 1);
+}
+static DEVICE_ATTR(active, S_IRUGO, show_tty_active, NULL);
+
+int __init vty_init(const struct file_operations *console_fops)
+{
+ cdev_init(&vc0_cdev, console_fops);
+ if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
+ register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
+ panic("Couldn't register /dev/tty0 driver\n");
+ tty0dev = device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
+ if (IS_ERR(tty0dev))
+ tty0dev = NULL;
+ else
+ WARN_ON(device_create_file(tty0dev, &dev_attr_active) < 0);
+
+ vcs_init();
+
+ console_driver = alloc_tty_driver(MAX_NR_CONSOLES);
+ if (!console_driver)
+ panic("Couldn't allocate console driver\n");
+ console_driver->owner = THIS_MODULE;
+ console_driver->name = "tty";
+ console_driver->name_base = 1;
+ console_driver->major = TTY_MAJOR;
+ console_driver->minor_start = 1;
+ console_driver->type = TTY_DRIVER_TYPE_CONSOLE;
+ console_driver->init_termios = tty_std_termios;
+ if (default_utf8)
+ console_driver->init_termios.c_iflag |= IUTF8;
+ console_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
+ tty_set_operations(console_driver, &con_ops);
+ if (tty_register_driver(console_driver))
+ panic("Couldn't register console driver\n");
+ kbd_init();
+ console_map_init();
+#ifdef CONFIG_MDA_CONSOLE
+ mda_console_init();
+#endif
+ return 0;
+}
+
+#ifndef VT_SINGLE_DRIVER
+
+static struct class *vtconsole_class;
+
+static int bind_con_driver(const struct consw *csw, int first, int last,
+ int deflt)
+{
+ struct module *owner = csw->owner;
+ const char *desc = NULL;
+ struct con_driver *con_driver;
+ int i, j = -1, k = -1, retval = -ENODEV;
+
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+ console_lock();
+
+ /* check if driver is registered */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ con_driver = &registered_con_driver[i];
+
+ if (con_driver->con == csw) {
+ desc = con_driver->desc;
+ retval = 0;
+ break;
+ }
+ }
+
+ if (retval)
+ goto err;
+
+ if (!(con_driver->flag & CON_DRIVER_FLAG_INIT)) {
+ csw->con_startup();
+ con_driver->flag |= CON_DRIVER_FLAG_INIT;
+ }
+
+ if (deflt) {
+ if (conswitchp)
+ module_put(conswitchp->owner);
+
+ __module_get(owner);
+ conswitchp = csw;
+ }
+
+ first = max(first, con_driver->first);
+ last = min(last, con_driver->last);
+
+ for (i = first; i <= last; i++) {
+ int old_was_color;
+ struct vc_data *vc = vc_cons[i].d;
+
+ if (con_driver_map[i])
+ module_put(con_driver_map[i]->owner);
+ __module_get(owner);
+ con_driver_map[i] = csw;
+
+ if (!vc || !vc->vc_sw)
+ continue;
+
+ j = i;
+
+ if (CON_IS_VISIBLE(vc)) {
+ k = i;
+ save_screen(vc);
+ }
+
+ old_was_color = vc->vc_can_do_color;
+ vc->vc_sw->con_deinit(vc);
+ vc->vc_origin = (unsigned long)vc->vc_screenbuf;
+ visual_init(vc, i, 0);
+ set_origin(vc);
+ update_attr(vc);
+
+ /* If the console changed between mono <-> color, then
+ * the attributes in the screenbuf will be wrong. The
+ * following resets all attributes to something sane.
+ */
+ if (old_was_color != vc->vc_can_do_color)
+ clear_buffer_attributes(vc);
+ }
+
+ pr_info("Console: switching ");
+ if (!deflt)
+ printk("consoles %d-%d ", first+1, last+1);
+ if (j >= 0) {
+ struct vc_data *vc = vc_cons[j].d;
+
+ printk("to %s %s %dx%d\n",
+ vc->vc_can_do_color ? "colour" : "mono",
+ desc, vc->vc_cols, vc->vc_rows);
+
+ if (k >= 0) {
+ vc = vc_cons[k].d;
+ update_screen(vc);
+ }
+ } else
+ printk("to %s\n", desc);
+
+ retval = 0;
+err:
+ console_unlock();
+ module_put(owner);
+ return retval;
+};
+
+#ifdef CONFIG_VT_HW_CONSOLE_BINDING
+static int con_is_graphics(const struct consw *csw, int first, int last)
+{
+ int i, retval = 0;
+
+ for (i = first; i <= last; i++) {
+ struct vc_data *vc = vc_cons[i].d;
+
+ if (vc && vc->vc_mode == KD_GRAPHICS) {
+ retval = 1;
+ break;
+ }
+ }
+
+ return retval;
+}
+
+/**
+ * unbind_con_driver - unbind a console driver
+ * @csw: pointer to console driver to unregister
+ * @first: first in range of consoles that @csw should be unbound from
+ * @last: last in range of consoles that @csw should be unbound from
+ * @deflt: should next bound console driver be default after @csw is unbound?
+ *
+ * To unbind a driver from all possible consoles, pass 0 as @first and
+ * %MAX_NR_CONSOLES as @last.
+ *
+ * @deflt controls whether the console that ends up replacing @csw should be
+ * the default console.
+ *
+ * RETURNS:
+ * -ENODEV if @csw isn't a registered console driver or can't be unregistered
+ * or 0 on success.
+ */
+int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+{
+ struct module *owner = csw->owner;
+ const struct consw *defcsw = NULL;
+ struct con_driver *con_driver = NULL, *con_back = NULL;
+ int i, retval = -ENODEV;
+
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+ console_lock();
+
+ /* check if driver is registered and if it is unbindable */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ con_driver = &registered_con_driver[i];
+
+ if (con_driver->con == csw &&
+ con_driver->flag & CON_DRIVER_FLAG_MODULE) {
+ retval = 0;
+ break;
+ }
+ }
+
+ if (retval) {
+ console_unlock();
+ goto err;
+ }
+
+ retval = -ENODEV;
+
+ /* check if backup driver exists */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ con_back = &registered_con_driver[i];
+
+ if (con_back->con &&
+ !(con_back->flag & CON_DRIVER_FLAG_MODULE)) {
+ defcsw = con_back->con;
+ retval = 0;
+ break;
+ }
+ }
+
+ if (retval) {
+ console_unlock();
+ goto err;
+ }
+
+ if (!con_is_bound(csw)) {
+ console_unlock();
+ goto err;
+ }
+
+ first = max(first, con_driver->first);
+ last = min(last, con_driver->last);
+
+ for (i = first; i <= last; i++) {
+ if (con_driver_map[i] == csw) {
+ module_put(csw->owner);
+ con_driver_map[i] = NULL;
+ }
+ }
+
+ if (!con_is_bound(defcsw)) {
+ const struct consw *defconsw = conswitchp;
+
+ defcsw->con_startup();
+ con_back->flag |= CON_DRIVER_FLAG_INIT;
+ /*
+ * vgacon may change the default driver to point
+ * to dummycon, we restore it here...
+ */
+ conswitchp = defconsw;
+ }
+
+ if (!con_is_bound(csw))
+ con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
+
+ console_unlock();
+ /* ignore return value, binding should not fail */
+ bind_con_driver(defcsw, first, last, deflt);
+err:
+ module_put(owner);
+ return retval;
+
+}
+EXPORT_SYMBOL(unbind_con_driver);
+
+static int vt_bind(struct con_driver *con)
+{
+ const struct consw *defcsw = NULL, *csw = NULL;
+ int i, more = 1, first = -1, last = -1, deflt = 0;
+
+ if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
+ con_is_graphics(con->con, con->first, con->last))
+ goto err;
+
+ csw = con->con;
+
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ struct con_driver *con = &registered_con_driver[i];
+
+ if (con->con && !(con->flag & CON_DRIVER_FLAG_MODULE)) {
+ defcsw = con->con;
+ break;
+ }
+ }
+
+ if (!defcsw)
+ goto err;
+
+ while (more) {
+ more = 0;
+
+ for (i = con->first; i <= con->last; i++) {
+ if (con_driver_map[i] == defcsw) {
+ if (first == -1)
+ first = i;
+ last = i;
+ more = 1;
+ } else if (first != -1)
+ break;
+ }
+
+ if (first == 0 && last == MAX_NR_CONSOLES -1)
+ deflt = 1;
+
+ if (first != -1)
+ bind_con_driver(csw, first, last, deflt);
+
+ first = -1;
+ last = -1;
+ deflt = 0;
+ }
+
+err:
+ return 0;
+}
+
+static int vt_unbind(struct con_driver *con)
+{
+ const struct consw *csw = NULL;
+ int i, more = 1, first = -1, last = -1, deflt = 0;
+
+ if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
+ con_is_graphics(con->con, con->first, con->last))
+ goto err;
+
+ csw = con->con;
+
+ while (more) {
+ more = 0;
+
+ for (i = con->first; i <= con->last; i++) {
+ if (con_driver_map[i] == csw) {
+ if (first == -1)
+ first = i;
+ last = i;
+ more = 1;
+ } else if (first != -1)
+ break;
+ }
+
+ if (first == 0 && last == MAX_NR_CONSOLES -1)
+ deflt = 1;
+
+ if (first != -1)
+ unbind_con_driver(csw, first, last, deflt);
+
+ first = -1;
+ last = -1;
+ deflt = 0;
+ }
+
+err:
+ return 0;
+}
+#else
+static inline int vt_bind(struct con_driver *con)
+{
+ return 0;
+}
+static inline int vt_unbind(struct con_driver *con)
+{
+ return 0;
+}
+#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
+
+static ssize_t store_bind(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct con_driver *con = dev_get_drvdata(dev);
+ int bind = simple_strtoul(buf, NULL, 0);
+
+ if (bind)
+ vt_bind(con);
+ else
+ vt_unbind(con);
+
+ return count;
+}
+
+static ssize_t show_bind(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct con_driver *con = dev_get_drvdata(dev);
+ int bind = con_is_bound(con->con);
+
+ return snprintf(buf, PAGE_SIZE, "%i\n", bind);
+}
+
+static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct con_driver *con = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s %s\n",
+ (con->flag & CON_DRIVER_FLAG_MODULE) ? "(M)" : "(S)",
+ con->desc);
+
+}
+
+static struct device_attribute device_attrs[] = {
+ __ATTR(bind, S_IRUGO|S_IWUSR, show_bind, store_bind),
+ __ATTR(name, S_IRUGO, show_name, NULL),
+};
+
+static int vtconsole_init_device(struct con_driver *con)
+{
+ int i;
+ int error = 0;
+
+ con->flag |= CON_DRIVER_FLAG_ATTR;
+ dev_set_drvdata(con->dev, con);
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+ error = device_create_file(con->dev, &device_attrs[i]);
+ if (error)
+ break;
+ }
+
+ if (error) {
+ while (--i >= 0)
+ device_remove_file(con->dev, &device_attrs[i]);
+ con->flag &= ~CON_DRIVER_FLAG_ATTR;
+ }
+
+ return error;
+}
+
+static void vtconsole_deinit_device(struct con_driver *con)
+{
+ int i;
+
+ if (con->flag & CON_DRIVER_FLAG_ATTR) {
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
+ device_remove_file(con->dev, &device_attrs[i]);
+ con->flag &= ~CON_DRIVER_FLAG_ATTR;
+ }
+}
+
+/**
+ * con_is_bound - checks if driver is bound to the console
+ * @csw: console driver
+ *
+ * RETURNS: zero if unbound, nonzero if bound
+ *
+ * Drivers can call this and if zero, they should release
+ * all resources allocated on con_startup()
+ */
+int con_is_bound(const struct consw *csw)
+{
+ int i, bound = 0;
+
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ if (con_driver_map[i] == csw) {
+ bound = 1;
+ break;
+ }
+ }
+
+ return bound;
+}
+EXPORT_SYMBOL(con_is_bound);
+
+/**
+ * con_debug_enter - prepare the console for the kernel debugger
+ * @sw: console driver
+ *
+ * Called when the console is taken over by the kernel debugger, this
+ * function needs to save the current console state, then put the console
+ * into a state suitable for the kernel debugger.
+ *
+ * RETURNS:
+ * Zero on success, nonzero if a failure occurred when trying to prepare
+ * the console for the debugger.
+ */
+int con_debug_enter(struct vc_data *vc)
+{
+ int ret = 0;
+
+ saved_fg_console = fg_console;
+ saved_last_console = last_console;
+ saved_want_console = want_console;
+ saved_vc_mode = vc->vc_mode;
+ saved_console_blanked = console_blanked;
+ vc->vc_mode = KD_TEXT;
+ console_blanked = 0;
+ if (vc->vc_sw->con_debug_enter)
+ ret = vc->vc_sw->con_debug_enter(vc);
+#ifdef CONFIG_KGDB_KDB
+ /* Set the initial LINES variable if it is not already set */
+ if (vc->vc_rows < 999) {
+ int linecount;
+ char lns[4];
+ const char *setargs[3] = {
+ "set",
+ "LINES",
+ lns,
+ };
+ if (kdbgetintenv(setargs[0], &linecount)) {
+ snprintf(lns, 4, "%i", vc->vc_rows);
+ kdb_set(2, setargs);
+ }
+ }
+#endif /* CONFIG_KGDB_KDB */
+ return ret;
+}
+EXPORT_SYMBOL_GPL(con_debug_enter);
+
+/**
+ * con_debug_leave - restore console state
+ * @sw: console driver
+ *
+ * Restore the console state to what it was before the kernel debugger
+ * was invoked.
+ *
+ * RETURNS:
+ * Zero on success, nonzero if a failure occurred when trying to restore
+ * the console.
+ */
+int con_debug_leave(void)
+{
+ struct vc_data *vc;
+ int ret = 0;
+
+ fg_console = saved_fg_console;
+ last_console = saved_last_console;
+ want_console = saved_want_console;
+ console_blanked = saved_console_blanked;
+ vc_cons[fg_console].d->vc_mode = saved_vc_mode;
+
+ vc = vc_cons[fg_console].d;
+ if (vc->vc_sw->con_debug_leave)
+ ret = vc->vc_sw->con_debug_leave(vc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(con_debug_leave);
+
+/**
+ * register_con_driver - register console driver to console layer
+ * @csw: console driver
+ * @first: the first console to take over, minimum value is 0
+ * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
+ *
+ * DESCRIPTION: This function registers a console driver which can later
+ * bind to a range of consoles specified by @first and @last. It will
+ * also initialize the console driver by calling con_startup().
+ */
+int register_con_driver(const struct consw *csw, int first, int last)
+{
+ struct module *owner = csw->owner;
+ struct con_driver *con_driver;
+ const char *desc;
+ int i, retval = 0;
+
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+ console_lock();
+
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ con_driver = &registered_con_driver[i];
+
+ /* already registered */
+ if (con_driver->con == csw)
+ retval = -EBUSY;
+ }
+
+ if (retval)
+ goto err;
+
+ desc = csw->con_startup();
+
+ if (!desc)
+ goto err;
+
+ retval = -EINVAL;
+
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ con_driver = &registered_con_driver[i];
+
+ if (con_driver->con == NULL) {
+ con_driver->con = csw;
+ con_driver->desc = desc;
+ con_driver->node = i;
+ con_driver->flag = CON_DRIVER_FLAG_MODULE |
+ CON_DRIVER_FLAG_INIT;
+ con_driver->first = first;
+ con_driver->last = last;
+ retval = 0;
+ break;
+ }
+ }
+
+ if (retval)
+ goto err;
+
+ con_driver->dev = device_create(vtconsole_class, NULL,
+ MKDEV(0, con_driver->node),
+ NULL, "vtcon%i",
+ con_driver->node);
+
+ if (IS_ERR(con_driver->dev)) {
+ printk(KERN_WARNING "Unable to create device for %s; "
+ "errno = %ld\n", con_driver->desc,
+ PTR_ERR(con_driver->dev));
+ con_driver->dev = NULL;
+ } else {
+ vtconsole_init_device(con_driver);
+ }
+
+err:
+ console_unlock();
+ module_put(owner);
+ return retval;
+}
+EXPORT_SYMBOL(register_con_driver);
+
+/**
+ * unregister_con_driver - unregister console driver from console layer
+ * @csw: console driver
+ *
+ * DESCRIPTION: All drivers that registers to the console layer must
+ * call this function upon exit, or if the console driver is in a state
+ * where it won't be able to handle console services, such as the
+ * framebuffer console without loaded framebuffer drivers.
+ *
+ * The driver must unbind first prior to unregistration.
+ */
+int unregister_con_driver(const struct consw *csw)
+{
+ int i, retval = -ENODEV;
+
+ console_lock();
+
+ /* cannot unregister a bound driver */
+ if (con_is_bound(csw))
+ goto err;
+
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ struct con_driver *con_driver = &registered_con_driver[i];
+
+ if (con_driver->con == csw &&
+ con_driver->flag & CON_DRIVER_FLAG_MODULE) {
+ vtconsole_deinit_device(con_driver);
+ device_destroy(vtconsole_class,
+ MKDEV(0, con_driver->node));
+ con_driver->con = NULL;
+ con_driver->desc = NULL;
+ con_driver->dev = NULL;
+ con_driver->node = 0;
+ con_driver->flag = 0;
+ con_driver->first = 0;
+ con_driver->last = 0;
+ retval = 0;
+ break;
+ }
+ }
+err:
+ console_unlock();
+ return retval;
+}
+EXPORT_SYMBOL(unregister_con_driver);
+
+/*
+ * If we support more console drivers, this function is used
+ * when a driver wants to take over some existing consoles
+ * and become default driver for newly opened ones.
+ *
+ * take_over_console is basically a register followed by unbind
+ */
+int take_over_console(const struct consw *csw, int first, int last, int deflt)
+{
+ int err;
+
+ err = register_con_driver(csw, first, last);
+ /* if we get an busy error we still want to bind the console driver
+ * and return success, as we may have unbound the console driver
+  * but not unregistered it.
+ */
+ if (err == -EBUSY)
+ err = 0;
+ if (!err)
+ bind_con_driver(csw, first, last, deflt);
+
+ return err;
+}
+
+/*
+ * give_up_console is a wrapper to unregister_con_driver. It will only
+ * work if driver is fully unbound.
+ */
+void give_up_console(const struct consw *csw)
+{
+ unregister_con_driver(csw);
+}
+
+static int __init vtconsole_class_init(void)
+{
+ int i;
+
+ vtconsole_class = class_create(THIS_MODULE, "vtconsole");
+ if (IS_ERR(vtconsole_class)) {
+ printk(KERN_WARNING "Unable to create vt console class; "
+ "errno = %ld\n", PTR_ERR(vtconsole_class));
+ vtconsole_class = NULL;
+ }
+
+ /* Add system drivers to sysfs */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ struct con_driver *con = &registered_con_driver[i];
+
+ if (con->con && !con->dev) {
+ con->dev = device_create(vtconsole_class, NULL,
+ MKDEV(0, con->node),
+ NULL, "vtcon%i",
+ con->node);
+
+ if (IS_ERR(con->dev)) {
+ printk(KERN_WARNING "Unable to create "
+ "device for %s; errno = %ld\n",
+ con->desc, PTR_ERR(con->dev));
+ con->dev = NULL;
+ } else {
+ vtconsole_init_device(con);
+ }
+ }
+ }
+
+ return 0;
+}
+postcore_initcall(vtconsole_class_init);
+
+#endif
+
+/*
+ * Screen blanking
+ */
+
+static int set_vesa_blanking(char __user *p)
+{
+ unsigned int mode;
+
+ if (get_user(mode, p + 1))
+ return -EFAULT;
+
+ vesa_blank_mode = (mode < 4) ? mode : 0;
+ return 0;
+}
+
+void do_blank_screen(int entering_gfx)
+{
+ struct vc_data *vc = vc_cons[fg_console].d;
+ int i;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ if (console_blanked) {
+ if (blank_state == blank_vesa_wait) {
+ blank_state = blank_off;
+ vc->vc_sw->con_blank(vc, vesa_blank_mode + 1, 0);
+ }
+ return;
+ }
+
+ /* entering graphics mode? */
+ if (entering_gfx) {
+ hide_cursor(vc);
+ save_screen(vc);
+ vc->vc_sw->con_blank(vc, -1, 1);
+ console_blanked = fg_console + 1;
+ blank_state = blank_off;
+ set_origin(vc);
+ return;
+ }
+
+ if (blank_state != blank_normal_wait)
+ return;
+ blank_state = blank_off;
+
+ /* don't blank graphics */
+ if (vc->vc_mode != KD_TEXT) {
+ console_blanked = fg_console + 1;
+ return;
+ }
+
+ hide_cursor(vc);
+ del_timer_sync(&console_timer);
+ blank_timer_expired = 0;
+
+ save_screen(vc);
+ /* In case we need to reset origin, blanking hook returns 1 */
+ i = vc->vc_sw->con_blank(vc, vesa_off_interval ? 1 : (vesa_blank_mode + 1), 0);
+ console_blanked = fg_console + 1;
+ if (i)
+ set_origin(vc);
+
+ if (console_blank_hook && console_blank_hook(1))
+ return;
+
+ if (vesa_off_interval && vesa_blank_mode) {
+ blank_state = blank_vesa_wait;
+ mod_timer(&console_timer, jiffies + vesa_off_interval);
+ }
+ vt_event_post(VT_EVENT_BLANK, vc->vc_num, vc->vc_num);
+}
+EXPORT_SYMBOL(do_blank_screen);
+
+/*
+ * Called by timer as well as from vt_console_driver
+ */
+void do_unblank_screen(int leaving_gfx)
+{
+ struct vc_data *vc;
+
+ /* This should now always be called from a "sane" (read: can schedule)
+ * context for the sake of the low level drivers, except in the special
+ * case of oops_in_progress
+ */
+ if (!oops_in_progress)
+ might_sleep();
+
+ WARN_CONSOLE_UNLOCKED();
+
+ ignore_poke = 0;
+ if (!console_blanked)
+ return;
+ if (!vc_cons_allocated(fg_console)) {
+ /* impossible */
+ pr_warning("unblank_screen: tty %d not allocated ??\n",
+ fg_console+1);
+ return;
+ }
+ vc = vc_cons[fg_console].d;
+ /* Try to unblank in oops case too */
+ if (vc->vc_mode != KD_TEXT && !vt_force_oops_output(vc))
+ return; /* but leave console_blanked != 0 */
+
+ if (blankinterval) {
+ mod_timer(&console_timer, jiffies + (blankinterval * HZ));
+ blank_state = blank_normal_wait;
+ }
+
+ console_blanked = 0;
+ if (vc->vc_sw->con_blank(vc, 0, leaving_gfx) || vt_force_oops_output(vc))
+ /* Low-level driver cannot restore -> do it ourselves */
+ update_screen(vc);
+ if (console_blank_hook)
+ console_blank_hook(0);
+ set_palette(vc);
+ set_cursor(vc);
+ vt_event_post(VT_EVENT_UNBLANK, vc->vc_num, vc->vc_num);
+}
+EXPORT_SYMBOL(do_unblank_screen);
+
+/*
+ * This is called by the outside world to cause a forced unblank, mostly for
+ * oopses. Currently, I just call do_unblank_screen(0), but we could eventually
+ * call it with 1 as an argument and so force a mode restore... that may kill
+ * X or at least garbage the screen but would also make the Oops visible...
+ */
+void unblank_screen(void)
+{
+ do_unblank_screen(0);
+}
+
+/*
+ * We defer the timer blanking to work queue so it can take the console mutex
+ * (console operations can still happen at irq time, but only from printk which
+ * has the console mutex. Not perfect yet, but better than no locking
+ */
+static void blank_screen_t(unsigned long dummy)
+{
+ if (unlikely(!keventd_up())) {
+ mod_timer(&console_timer, jiffies + (blankinterval * HZ));
+ return;
+ }
+ blank_timer_expired = 1;
+ schedule_work(&console_work);
+}
+
+void poke_blanked_console(void)
+{
+ WARN_CONSOLE_UNLOCKED();
+
+ /* Add this so we quickly catch whoever might call us in a non
+ * safe context. Nowadays, unblank_screen() isn't to be called in
+ * atomic contexts and is allowed to schedule (with the special case
+ * of oops_in_progress, but that isn't of any concern for this
+ * function. --BenH.
+ */
+ might_sleep();
+
+ /* This isn't perfectly race free, but a race here would be mostly harmless,
+ * at worse, we'll do a spurrious blank and it's unlikely
+ */
+ del_timer(&console_timer);
+ blank_timer_expired = 0;
+
+ if (ignore_poke || !vc_cons[fg_console].d || vc_cons[fg_console].d->vc_mode == KD_GRAPHICS)
+ return;
+ if (console_blanked)
+ unblank_screen();
+ else if (blankinterval) {
+ mod_timer(&console_timer, jiffies + (blankinterval * HZ));
+ blank_state = blank_normal_wait;
+ }
+}
+
+/*
+ * Palettes
+ */
+
+static void set_palette(struct vc_data *vc)
+{
+ WARN_CONSOLE_UNLOCKED();
+
+ if (vc->vc_mode != KD_GRAPHICS)
+ vc->vc_sw->con_set_palette(vc, color_table);
+}
+
+static int set_get_cmap(unsigned char __user *arg, int set)
+{
+ int i, j, k;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ for (i = 0; i < 16; i++)
+ if (set) {
+ get_user(default_red[i], arg++);
+ get_user(default_grn[i], arg++);
+ get_user(default_blu[i], arg++);
+ } else {
+ put_user(default_red[i], arg++);
+ put_user(default_grn[i], arg++);
+ put_user(default_blu[i], arg++);
+ }
+ if (set) {
+ for (i = 0; i < MAX_NR_CONSOLES; i++)
+ if (vc_cons_allocated(i)) {
+ for (j = k = 0; j < 16; j++) {
+ vc_cons[i].d->vc_palette[k++] = default_red[j];
+ vc_cons[i].d->vc_palette[k++] = default_grn[j];
+ vc_cons[i].d->vc_palette[k++] = default_blu[j];
+ }
+ set_palette(vc_cons[i].d);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Load palette into the DAC registers. arg points to a colour
+ * map, 3 bytes per colour, 16 colours, range from 0 to 255.
+ */
+
+int con_set_cmap(unsigned char __user *arg)
+{
+ int rc;
+
+ console_lock();
+ rc = set_get_cmap (arg,1);
+ console_unlock();
+
+ return rc;
+}
+
+int con_get_cmap(unsigned char __user *arg)
+{
+ int rc;
+
+ console_lock();
+ rc = set_get_cmap (arg,0);
+ console_unlock();
+
+ return rc;
+}
+
+void reset_palette(struct vc_data *vc)
+{
+ int j, k;
+ for (j=k=0; j<16; j++) {
+ vc->vc_palette[k++] = default_red[j];
+ vc->vc_palette[k++] = default_grn[j];
+ vc->vc_palette[k++] = default_blu[j];
+ }
+ set_palette(vc);
+}
+
+/*
+ * Font switching
+ *
+ * Currently we only support fonts up to 32 pixels wide, at a maximum height
+ * of 32 pixels. Userspace fontdata is stored with 32 bytes (shorts/ints,
+ * depending on width) reserved for each character which is kinda wasty, but
+ * this is done in order to maintain compatibility with the EGA/VGA fonts. It
+ * is up to the actual low-level console-driver convert data into its favorite
+ * format (maybe we should add a `fontoffset' field to the `display'
+ * structure so we won't have to convert the fontdata all the time.
+ * /Jes
+ */
+
+#define max_font_size 65536
+
+static int con_font_get(struct vc_data *vc, struct console_font_op *op)
+{
+ struct console_font font;
+ int rc = -EINVAL;
+ int c;
+
+ if (vc->vc_mode != KD_TEXT)
+ return -EINVAL;
+
+ if (op->data) {
+ font.data = kmalloc(max_font_size, GFP_KERNEL);
+ if (!font.data)
+ return -ENOMEM;
+ } else
+ font.data = NULL;
+
+ console_lock();
+ if (vc->vc_sw->con_font_get)
+ rc = vc->vc_sw->con_font_get(vc, &font);
+ else
+ rc = -ENOSYS;
+ console_unlock();
+
+ if (rc)
+ goto out;
+
+ c = (font.width+7)/8 * 32 * font.charcount;
+
+ if (op->data && font.charcount > op->charcount)
+ rc = -ENOSPC;
+ if (!(op->flags & KD_FONT_FLAG_OLD)) {
+ if (font.width > op->width || font.height > op->height)
+ rc = -ENOSPC;
+ } else {
+ if (font.width != 8)
+ rc = -EIO;
+ else if ((op->height && font.height > op->height) ||
+ font.height > 32)
+ rc = -ENOSPC;
+ }
+ if (rc)
+ goto out;
+
+ op->height = font.height;
+ op->width = font.width;
+ op->charcount = font.charcount;
+
+ if (op->data && copy_to_user(op->data, font.data, c))
+ rc = -EFAULT;
+
+out:
+ kfree(font.data);
+ return rc;
+}
+
+static int con_font_set(struct vc_data *vc, struct console_font_op *op)
+{
+ struct console_font font;
+ int rc = -EINVAL;
+ int size;
+
+ if (vc->vc_mode != KD_TEXT)
+ return -EINVAL;
+ if (!op->data)
+ return -EINVAL;
+ if (op->charcount > 512)
+ return -EINVAL;
+ if (!op->height) { /* Need to guess font height [compat] */
+ int h, i;
+ u8 __user *charmap = op->data;
+ u8 tmp;
+
+ /* If from KDFONTOP ioctl, don't allow things which can be done in userland,
+ so that we can get rid of this soon */
+ if (!(op->flags & KD_FONT_FLAG_OLD))
+ return -EINVAL;
+ for (h = 32; h > 0; h--)
+ for (i = 0; i < op->charcount; i++) {
+ if (get_user(tmp, &charmap[32*i+h-1]))
+ return -EFAULT;
+ if (tmp)
+ goto nonzero;
+ }
+ return -EINVAL;
+ nonzero:
+ op->height = h;
+ }
+ if (op->width <= 0 || op->width > 32 || op->height > 32)
+ return -EINVAL;
+ size = (op->width+7)/8 * 32 * op->charcount;
+ if (size > max_font_size)
+ return -ENOSPC;
+ font.charcount = op->charcount;
+ font.height = op->height;
+ font.width = op->width;
+ font.data = memdup_user(op->data, size);
+ if (IS_ERR(font.data))
+ return PTR_ERR(font.data);
+ console_lock();
+ if (vc->vc_sw->con_font_set)
+ rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
+ else
+ rc = -ENOSYS;
+ console_unlock();
+ kfree(font.data);
+ return rc;
+}
+
+static int con_font_default(struct vc_data *vc, struct console_font_op *op)
+{
+ struct console_font font = {.width = op->width, .height = op->height};
+ char name[MAX_FONT_NAME];
+ char *s = name;
+ int rc;
+
+ if (vc->vc_mode != KD_TEXT)
+ return -EINVAL;
+
+ if (!op->data)
+ s = NULL;
+ else if (strncpy_from_user(name, op->data, MAX_FONT_NAME - 1) < 0)
+ return -EFAULT;
+ else
+ name[MAX_FONT_NAME - 1] = 0;
+
+ console_lock();
+ if (vc->vc_sw->con_font_default)
+ rc = vc->vc_sw->con_font_default(vc, &font, s);
+ else
+ rc = -ENOSYS;
+ console_unlock();
+ if (!rc) {
+ op->width = font.width;
+ op->height = font.height;
+ }
+ return rc;
+}
+
+static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
+{
+ int con = op->height;
+ int rc;
+
+ if (vc->vc_mode != KD_TEXT)
+ return -EINVAL;
+
+ console_lock();
+ if (!vc->vc_sw->con_font_copy)
+ rc = -ENOSYS;
+ else if (con < 0 || !vc_cons_allocated(con))
+ rc = -ENOTTY;
+ else if (con == vc->vc_num) /* nothing to do */
+ rc = 0;
+ else
+ rc = vc->vc_sw->con_font_copy(vc, con);
+ console_unlock();
+ return rc;
+}
+
+int con_font_op(struct vc_data *vc, struct console_font_op *op)
+{
+ switch (op->op) {
+ case KD_FONT_OP_SET:
+ return con_font_set(vc, op);
+ case KD_FONT_OP_GET:
+ return con_font_get(vc, op);
+ case KD_FONT_OP_SET_DEFAULT:
+ return con_font_default(vc, op);
+ case KD_FONT_OP_COPY:
+ return con_font_copy(vc, op);
+ }
+ return -ENOSYS;
+}
+
+/*
+ * Interface exported to selection and vcs.
+ */
+
+/* used by selection */
+u16 screen_glyph(struct vc_data *vc, int offset)
+{
+ u16 w = scr_readw(screenpos(vc, offset, 1));
+ u16 c = w & 0xff;
+
+ if (w & vc->vc_hi_font_mask)
+ c |= 0x100;
+ return c;
+}
+EXPORT_SYMBOL_GPL(screen_glyph);
+
+/* used by vcs - note the word offset */
+unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed)
+{
+ return screenpos(vc, 2 * w_offset, viewed);
+}
+
+void getconsxy(struct vc_data *vc, unsigned char *p)
+{
+ p[0] = vc->vc_x;
+ p[1] = vc->vc_y;
+}
+
+void putconsxy(struct vc_data *vc, unsigned char *p)
+{
+ hide_cursor(vc);
+ gotoxy(vc, p[0], p[1]);
+ set_cursor(vc);
+}
+
+u16 vcs_scr_readw(struct vc_data *vc, const u16 *org)
+{
+ if ((unsigned long)org == vc->vc_pos && softcursor_original != -1)
+ return softcursor_original;
+ return scr_readw(org);
+}
+
+void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org)
+{
+ scr_writew(val, org);
+ if ((unsigned long)org == vc->vc_pos) {
+ softcursor_original = -1;
+ add_softcursor(vc);
+ }
+}
+
+void vcs_scr_updated(struct vc_data *vc)
+{
+ notify_update(vc);
+}
+
+/*
+ * Visible symbols for modules
+ */
+
+EXPORT_SYMBOL(color_table);
+EXPORT_SYMBOL(default_red);
+EXPORT_SYMBOL(default_grn);
+EXPORT_SYMBOL(default_blu);
+EXPORT_SYMBOL(update_region);
+EXPORT_SYMBOL(redraw_screen);
+EXPORT_SYMBOL(vc_resize);
+EXPORT_SYMBOL(fg_console);
+EXPORT_SYMBOL(console_blank_hook);
+EXPORT_SYMBOL(console_blanked);
+EXPORT_SYMBOL(vc_cons);
+EXPORT_SYMBOL(global_cursor_default);
+#ifndef VT_SINGLE_DRIVER
+EXPORT_SYMBOL(take_over_console);
+EXPORT_SYMBOL(give_up_console);
+#endif
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
new file mode 100644
index 00000000..65447c5f
--- /dev/null
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -0,0 +1,1799 @@
+/*
+ * Copyright (C) 1992 obz under the linux copyright
+ *
+ * Dynamic diacritical handling - aeb@cwi.nl - Dec 1993
+ * Dynamic keymap and string allocation - aeb@cwi.nl - May 1994
+ * Restrict VT switching via ioctl() - grif@cs.ucr.edu - Dec 1995
+ * Some code moved for less code duplication - Andi Kleen - Mar 1997
+ * Check put/get_user, cleanups - acme@conectiva.com.br - Jun 2001
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/compat.h>
+#include <linux/module.h>
+#include <linux/kd.h>
+#include <linux/vt.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/console.h>
+#include <linux/consolemap.h>
+#include <linux/signal.h>
+#include <linux/timex.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include <linux/kbd_kern.h>
+#include <linux/vt_kern.h>
+#include <linux/kbd_diacr.h>
+#include <linux/selection.h>
+
+char vt_dont_switch;
+extern struct tty_driver *console_driver;
+
+#define VT_IS_IN_USE(i) (console_driver->ttys[i] && console_driver->ttys[i]->count)
+#define VT_BUSY(i) (VT_IS_IN_USE(i) || i == fg_console || vc_cons[i].d == sel_cons)
+
+/*
+ * Console (vt and kd) routines, as defined by USL SVR4 manual, and by
+ * experimentation and study of X386 SYSV handling.
+ *
+ * One point of difference: SYSV vt's are /dev/vtX, which X >= 0, and
+ * /dev/console is a separate ttyp. Under Linux, /dev/tty0 is /dev/console,
+ * and the vc start at /dev/ttyX, X >= 1. We maintain that here, so we will
+ * always treat our set of vt as numbered 1..MAX_NR_CONSOLES (corresponding to
+ * ttys 0..MAX_NR_CONSOLES-1). Explicitly naming VT 0 is illegal, but using
+ * /dev/tty0 (fg_console) as a target is legal, since an implicit aliasing
+ * to the current console is done by the main ioctl code.
+ */
+
+#ifdef CONFIG_X86
+#include <linux/syscalls.h>
+#endif
+
+static void complete_change_console(struct vc_data *vc);
+
+/*
+ * User space VT_EVENT handlers
+ */
+
+struct vt_event_wait {
+ struct list_head list;
+ struct vt_event event;
+ int done;
+};
+
+static LIST_HEAD(vt_events);
+static DEFINE_SPINLOCK(vt_event_lock);
+static DECLARE_WAIT_QUEUE_HEAD(vt_event_waitqueue);
+
+/**
+ * vt_event_post
+ * @event: the event that occurred
+ * @old: old console
+ * @new: new console
+ *
+ * Post an VT event to interested VT handlers
+ */
+
+void vt_event_post(unsigned int event, unsigned int old, unsigned int new)
+{
+ struct list_head *pos, *head;
+ unsigned long flags;
+ int wake = 0;
+
+ spin_lock_irqsave(&vt_event_lock, flags);
+ head = &vt_events;
+
+ list_for_each(pos, head) {
+ struct vt_event_wait *ve = list_entry(pos,
+ struct vt_event_wait, list);
+ if (!(ve->event.event & event))
+ continue;
+ ve->event.event = event;
+ /* kernel view is consoles 0..n-1, user space view is
+ console 1..n with 0 meaning current, so we must bias */
+ ve->event.oldev = old + 1;
+ ve->event.newev = new + 1;
+ wake = 1;
+ ve->done = 1;
+ }
+ spin_unlock_irqrestore(&vt_event_lock, flags);
+ if (wake)
+ wake_up_interruptible(&vt_event_waitqueue);
+}
+
+/**
+ * vt_event_wait - wait for an event
+ * @vw: our event
+ *
+ * Waits for an event to occur which completes our vt_event_wait
+ * structure. On return the structure has wv->done set to 1 for success
+ * or 0 if some event such as a signal ended the wait.
+ */
+
+static void vt_event_wait(struct vt_event_wait *vw)
+{
+ unsigned long flags;
+ /* Prepare the event */
+ INIT_LIST_HEAD(&vw->list);
+ vw->done = 0;
+ /* Queue our event */
+ spin_lock_irqsave(&vt_event_lock, flags);
+ list_add(&vw->list, &vt_events);
+ spin_unlock_irqrestore(&vt_event_lock, flags);
+ /* Wait for it to pass */
+ wait_event_interruptible_tty(vt_event_waitqueue, vw->done);
+ /* Dequeue it */
+ spin_lock_irqsave(&vt_event_lock, flags);
+ list_del(&vw->list);
+ spin_unlock_irqrestore(&vt_event_lock, flags);
+}
+
+/**
+ * vt_event_wait_ioctl - event ioctl handler
+ * @arg: argument to ioctl
+ *
+ * Implement the VT_WAITEVENT ioctl using the VT event interface
+ */
+
+static int vt_event_wait_ioctl(struct vt_event __user *event)
+{
+ struct vt_event_wait vw;
+
+ if (copy_from_user(&vw.event, event, sizeof(struct vt_event)))
+ return -EFAULT;
+ /* Highest supported event for now */
+ if (vw.event.event & ~VT_MAX_EVENT)
+ return -EINVAL;
+
+ vt_event_wait(&vw);
+ /* If it occurred report it */
+ if (vw.done) {
+ if (copy_to_user(event, &vw.event, sizeof(struct vt_event)))
+ return -EFAULT;
+ return 0;
+ }
+ return -EINTR;
+}
+
+/**
+ * vt_waitactive - active console wait
+ * @event: event code
+ * @n: new console
+ *
+ * Helper for event waits. Used to implement the legacy
+ * event waiting ioctls in terms of events
+ */
+
+int vt_waitactive(int n)
+{
+ struct vt_event_wait vw;
+ do {
+ if (n == fg_console + 1)
+ break;
+ vw.event.event = VT_EVENT_SWITCH;
+ vt_event_wait(&vw);
+ if (vw.done == 0)
+ return -EINTR;
+ } while (vw.event.newev != n);
+ return 0;
+}
+
+/*
+ * these are the valid i/o ports we're allowed to change. they map all the
+ * video ports
+ */
+#define GPFIRST 0x3b4
+#define GPLAST 0x3df
+#define GPNUM (GPLAST - GPFIRST + 1)
+
+#define i (tmp.kb_index)
+#define s (tmp.kb_table)
+#define v (tmp.kb_value)
+static inline int
+do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_struct *kbd)
+{
+ struct kbentry tmp;
+ ushort *key_map, val, ov;
+
+ if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
+ return -EFAULT;
+
+ if (!capable(CAP_SYS_TTY_CONFIG))
+ perm = 0;
+
+ switch (cmd) {
+ case KDGKBENT:
+ key_map = key_maps[s];
+ if (key_map) {
+ val = U(key_map[i]);
+ if (kbd->kbdmode != VC_UNICODE && KTYP(val) >= NR_TYPES)
+ val = K_HOLE;
+ } else
+ val = (i ? K_HOLE : K_NOSUCHMAP);
+ return put_user(val, &user_kbe->kb_value);
+ case KDSKBENT:
+ if (!perm)
+ return -EPERM;
+ if (!i && v == K_NOSUCHMAP) {
+ /* deallocate map */
+ key_map = key_maps[s];
+ if (s && key_map) {
+ key_maps[s] = NULL;
+ if (key_map[0] == U(K_ALLOCATED)) {
+ kfree(key_map);
+ keymap_count--;
+ }
+ }
+ break;
+ }
+
+ if (KTYP(v) < NR_TYPES) {
+ if (KVAL(v) > max_vals[KTYP(v)])
+ return -EINVAL;
+ } else
+ if (kbd->kbdmode != VC_UNICODE)
+ return -EINVAL;
+
+ /* ++Geert: non-PC keyboards may generate keycode zero */
+#if !defined(__mc68000__) && !defined(__powerpc__)
+ /* assignment to entry 0 only tests validity of args */
+ if (!i)
+ break;
+#endif
+
+ if (!(key_map = key_maps[s])) {
+ int j;
+
+ if (keymap_count >= MAX_NR_OF_USER_KEYMAPS &&
+ !capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ key_map = kmalloc(sizeof(plain_map),
+ GFP_KERNEL);
+ if (!key_map)
+ return -ENOMEM;
+ key_maps[s] = key_map;
+ key_map[0] = U(K_ALLOCATED);
+ for (j = 1; j < NR_KEYS; j++)
+ key_map[j] = U(K_HOLE);
+ keymap_count++;
+ }
+ ov = U(key_map[i]);
+ if (v == ov)
+ break; /* nothing to do */
+ /*
+ * Attention Key.
+ */
+ if (((ov == K_SAK) || (v == K_SAK)) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ key_map[i] = U(v);
+ if (!s && (KTYP(ov) == KT_SHIFT || KTYP(v) == KT_SHIFT))
+ compute_shiftstate();
+ break;
+ }
+ return 0;
+}
+#undef i
+#undef s
+#undef v
+
+static inline int
+do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc, int perm)
+{
+ struct kbkeycode tmp;
+ int kc = 0;
+
+ if (copy_from_user(&tmp, user_kbkc, sizeof(struct kbkeycode)))
+ return -EFAULT;
+ switch (cmd) {
+ case KDGETKEYCODE:
+ kc = getkeycode(tmp.scancode);
+ if (kc >= 0)
+ kc = put_user(kc, &user_kbkc->keycode);
+ break;
+ case KDSETKEYCODE:
+ if (!perm)
+ return -EPERM;
+ kc = setkeycode(tmp.scancode, tmp.keycode);
+ break;
+ }
+ return kc;
+}
+
+static inline int
+do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
+{
+ struct kbsentry *kbs;
+ char *p;
+ u_char *q;
+ u_char __user *up;
+ int sz;
+ int delta;
+ char *first_free, *fj, *fnw;
+ int i, j, k;
+ int ret;
+
+ if (!capable(CAP_SYS_TTY_CONFIG))
+ perm = 0;
+
+ kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
+ if (!kbs) {
+ ret = -ENOMEM;
+ goto reterr;
+ }
+
+ /* we mostly copy too much here (512bytes), but who cares ;) */
+ if (copy_from_user(kbs, user_kdgkb, sizeof(struct kbsentry))) {
+ ret = -EFAULT;
+ goto reterr;
+ }
+ kbs->kb_string[sizeof(kbs->kb_string)-1] = '\0';
+ i = kbs->kb_func;
+
+ switch (cmd) {
+ case KDGKBSENT:
+ sz = sizeof(kbs->kb_string) - 1; /* sz should have been
+ a struct member */
+ up = user_kdgkb->kb_string;
+ p = func_table[i];
+ if(p)
+ for ( ; *p && sz; p++, sz--)
+ if (put_user(*p, up++)) {
+ ret = -EFAULT;
+ goto reterr;
+ }
+ if (put_user('\0', up)) {
+ ret = -EFAULT;
+ goto reterr;
+ }
+ kfree(kbs);
+ return ((p && *p) ? -EOVERFLOW : 0);
+ case KDSKBSENT:
+ if (!perm) {
+ ret = -EPERM;
+ goto reterr;
+ }
+
+ q = func_table[i];
+ first_free = funcbufptr + (funcbufsize - funcbufleft);
+ for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
+ ;
+ if (j < MAX_NR_FUNC)
+ fj = func_table[j];
+ else
+ fj = first_free;
+
+ delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string);
+ if (delta <= funcbufleft) { /* it fits in current buf */
+ if (j < MAX_NR_FUNC) {
+ memmove(fj + delta, fj, first_free - fj);
+ for (k = j; k < MAX_NR_FUNC; k++)
+ if (func_table[k])
+ func_table[k] += delta;
+ }
+ if (!q)
+ func_table[i] = fj;
+ funcbufleft -= delta;
+ } else { /* allocate a larger buffer */
+ sz = 256;
+ while (sz < funcbufsize - funcbufleft + delta)
+ sz <<= 1;
+ fnw = kmalloc(sz, GFP_KERNEL);
+ if(!fnw) {
+ ret = -ENOMEM;
+ goto reterr;
+ }
+
+ if (!q)
+ func_table[i] = fj;
+ if (fj > funcbufptr)
+ memmove(fnw, funcbufptr, fj - funcbufptr);
+ for (k = 0; k < j; k++)
+ if (func_table[k])
+ func_table[k] = fnw + (func_table[k] - funcbufptr);
+
+ if (first_free > fj) {
+ memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj);
+ for (k = j; k < MAX_NR_FUNC; k++)
+ if (func_table[k])
+ func_table[k] = fnw + (func_table[k] - funcbufptr) + delta;
+ }
+ if (funcbufptr != func_buf)
+ kfree(funcbufptr);
+ funcbufptr = fnw;
+ funcbufleft = funcbufleft - delta + sz - funcbufsize;
+ funcbufsize = sz;
+ }
+ strcpy(func_table[i], kbs->kb_string);
+ break;
+ }
+ ret = 0;
+reterr:
+ kfree(kbs);
+ return ret;
+}
+
+static inline int
+do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struct console_font_op *op)
+{
+ struct consolefontdesc cfdarg;
+ int i;
+
+ if (copy_from_user(&cfdarg, user_cfd, sizeof(struct consolefontdesc)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case PIO_FONTX:
+ if (!perm)
+ return -EPERM;
+ op->op = KD_FONT_OP_SET;
+ op->flags = KD_FONT_FLAG_OLD;
+ op->width = 8;
+ op->height = cfdarg.charheight;
+ op->charcount = cfdarg.charcount;
+ op->data = cfdarg.chardata;
+ return con_font_op(vc_cons[fg_console].d, op);
+ case GIO_FONTX: {
+ op->op = KD_FONT_OP_GET;
+ op->flags = KD_FONT_FLAG_OLD;
+ op->width = 8;
+ op->height = cfdarg.charheight;
+ op->charcount = cfdarg.charcount;
+ op->data = cfdarg.chardata;
+ i = con_font_op(vc_cons[fg_console].d, op);
+ if (i)
+ return i;
+ cfdarg.charheight = op->height;
+ cfdarg.charcount = op->charcount;
+ if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc)))
+ return -EFAULT;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static inline int
+do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_data *vc)
+{
+ struct unimapdesc tmp;
+
+ if (copy_from_user(&tmp, user_ud, sizeof tmp))
+ return -EFAULT;
+ if (tmp.entries)
+ if (!access_ok(VERIFY_WRITE, tmp.entries,
+ tmp.entry_ct*sizeof(struct unipair)))
+ return -EFAULT;
+ switch (cmd) {
+ case PIO_UNIMAP:
+ if (!perm)
+ return -EPERM;
+ return con_set_unimap(vc, tmp.entry_ct, tmp.entries);
+ case GIO_UNIMAP:
+ if (!perm && fg_console != vc->vc_num)
+ return -EPERM;
+ return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct), tmp.entries);
+ }
+ return 0;
+}
+
+
+
+/*
+ * We handle the console-specific ioctl's here. We allow the
+ * capability to modify any console, not just the fg_console.
+ */
+int vt_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vc_data *vc = tty->driver_data;
+ struct console_font_op op; /* used in multiple places here */
+ struct kbd_struct * kbd;
+ unsigned int console;
+ unsigned char ucval;
+ unsigned int uival;
+ void __user *up = (void __user *)arg;
+ int i, perm;
+ int ret = 0;
+
+ console = vc->vc_num;
+
+ tty_lock();
+
+ if (!vc_cons_allocated(console)) { /* impossible? */
+ ret = -ENOIOCTLCMD;
+ goto out;
+ }
+
+
+ /*
+ * To have permissions to do most of the vt ioctls, we either have
+ * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
+ */
+ perm = 0;
+ if (current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG))
+ perm = 1;
+
+ kbd = kbd_table + console;
+ switch (cmd) {
+ case TIOCLINUX:
+ ret = tioclinux(tty, arg);
+ break;
+ case KIOCSOUND:
+ if (!perm)
+ goto eperm;
+ /*
+ * The use of PIT_TICK_RATE is historic, it used to be
+ * the platform-dependent CLOCK_TICK_RATE between 2.6.12
+ * and 2.6.36, which was a minor but unfortunate ABI
+ * change.
+ */
+ if (arg)
+ arg = PIT_TICK_RATE / arg;
+ kd_mksound(arg, 0);
+ break;
+
+ case KDMKTONE:
+ if (!perm)
+ goto eperm;
+ {
+ unsigned int ticks, count;
+
+ /*
+ * Generate the tone for the appropriate number of ticks.
+ * If the time is zero, turn off sound ourselves.
+ */
+ ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
+ count = ticks ? (arg & 0xffff) : 0;
+ if (count)
+ count = PIT_TICK_RATE / count;
+ kd_mksound(count, ticks);
+ break;
+ }
+
+ case KDGKBTYPE:
+ /*
+ * this is naive.
+ */
+ ucval = KB_101;
+ goto setchar;
+
+ /*
+ * These cannot be implemented on any machine that implements
+ * ioperm() in user level (such as Alpha PCs) or not at all.
+ *
+ * XXX: you should never use these, just call ioperm directly..
+ */
+#ifdef CONFIG_X86
+ case KDADDIO:
+ case KDDELIO:
+ /*
+ * KDADDIO and KDDELIO may be able to add ports beyond what
+ * we reject here, but to be safe...
+ */
+ if (arg < GPFIRST || arg > GPLAST) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = sys_ioperm(arg, 1, (cmd == KDADDIO)) ? -ENXIO : 0;
+ break;
+
+ case KDENABIO:
+ case KDDISABIO:
+ ret = sys_ioperm(GPFIRST, GPNUM,
+ (cmd == KDENABIO)) ? -ENXIO : 0;
+ break;
+#endif
+
+ /* Linux m68k/i386 interface for setting the keyboard delay/repeat rate */
+
+ case KDKBDREP:
+ {
+ struct kbd_repeat kbrep;
+
+ if (!capable(CAP_SYS_TTY_CONFIG))
+ goto eperm;
+
+ if (copy_from_user(&kbrep, up, sizeof(struct kbd_repeat))) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = kbd_rate(&kbrep);
+ if (ret)
+ break;
+ if (copy_to_user(up, &kbrep, sizeof(struct kbd_repeat)))
+ ret = -EFAULT;
+ break;
+ }
+
+ case KDSETMODE:
+ /*
+ * currently, setting the mode from KD_TEXT to KD_GRAPHICS
+ * doesn't do a whole lot. i'm not sure if it should do any
+ * restoration of modes or what...
+ *
+ * XXX It should at least call into the driver, fbdev's definitely
+ * need to restore their engine state. --BenH
+ */
+ if (!perm)
+ goto eperm;
+ switch (arg) {
+ case KD_GRAPHICS:
+ break;
+ case KD_TEXT0:
+ case KD_TEXT1:
+ arg = KD_TEXT;
+ case KD_TEXT:
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ if (vc->vc_mode == (unsigned char) arg)
+ break;
+ vc->vc_mode = (unsigned char) arg;
+ if (console != fg_console)
+ break;
+ /*
+ * explicitly blank/unblank the screen if switching modes
+ */
+ console_lock();
+ if (arg == KD_TEXT)
+ do_unblank_screen(1);
+ else
+ do_blank_screen(1);
+ console_unlock();
+ break;
+
+ case KDGETMODE:
+ uival = vc->vc_mode;
+ goto setint;
+
+ case KDMAPDISP:
+ case KDUNMAPDISP:
+ /*
+ * these work like a combination of mmap and KDENABIO.
+ * this could be easily finished.
+ */
+ ret = -EINVAL;
+ break;
+
+ case KDSKBMODE:
+ if (!perm)
+ goto eperm;
+ switch(arg) {
+ case K_RAW:
+ kbd->kbdmode = VC_RAW;
+ break;
+ case K_MEDIUMRAW:
+ kbd->kbdmode = VC_MEDIUMRAW;
+ break;
+ case K_XLATE:
+ kbd->kbdmode = VC_XLATE;
+ compute_shiftstate();
+ break;
+ case K_UNICODE:
+ kbd->kbdmode = VC_UNICODE;
+ compute_shiftstate();
+ break;
+ case K_OFF:
+ kbd->kbdmode = VC_OFF;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ tty_ldisc_flush(tty);
+ break;
+
+ case KDGKBMODE:
+ switch (kbd->kbdmode) {
+ case VC_RAW:
+ uival = K_RAW;
+ break;
+ case VC_MEDIUMRAW:
+ uival = K_MEDIUMRAW;
+ break;
+ case VC_UNICODE:
+ uival = K_UNICODE;
+ break;
+ case VC_OFF:
+ uival = K_OFF;
+ break;
+ default:
+ uival = K_XLATE;
+ break;
+ }
+ goto setint;
+
+ /* this could be folded into KDSKBMODE, but for compatibility
+ reasons it is not so easy to fold KDGKBMETA into KDGKBMODE */
+ case KDSKBMETA:
+ switch(arg) {
+ case K_METABIT:
+ clr_vc_kbd_mode(kbd, VC_META);
+ break;
+ case K_ESCPREFIX:
+ set_vc_kbd_mode(kbd, VC_META);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+
+ case KDGKBMETA:
+ uival = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
+ setint:
+ ret = put_user(uival, (int __user *)arg);
+ break;
+
+ case KDGETKEYCODE:
+ case KDSETKEYCODE:
+ if(!capable(CAP_SYS_TTY_CONFIG))
+ perm = 0;
+ ret = do_kbkeycode_ioctl(cmd, up, perm);
+ break;
+
+ case KDGKBENT:
+ case KDSKBENT:
+ ret = do_kdsk_ioctl(cmd, up, perm, kbd);
+ break;
+
+ case KDGKBSENT:
+ case KDSKBSENT:
+ ret = do_kdgkb_ioctl(cmd, up, perm);
+ break;
+
+ case KDGKBDIACR:
+ {
+ struct kbdiacrs __user *a = up;
+ struct kbdiacr diacr;
+ int i;
+
+ if (put_user(accent_table_size, &a->kb_cnt)) {
+ ret = -EFAULT;
+ break;
+ }
+ for (i = 0; i < accent_table_size; i++) {
+ diacr.diacr = conv_uni_to_8bit(accent_table[i].diacr);
+ diacr.base = conv_uni_to_8bit(accent_table[i].base);
+ diacr.result = conv_uni_to_8bit(accent_table[i].result);
+ if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr))) {
+ ret = -EFAULT;
+ break;
+ }
+ }
+ break;
+ }
+ case KDGKBDIACRUC:
+ {
+ struct kbdiacrsuc __user *a = up;
+
+ if (put_user(accent_table_size, &a->kb_cnt))
+ ret = -EFAULT;
+ else if (copy_to_user(a->kbdiacruc, accent_table,
+ accent_table_size*sizeof(struct kbdiacruc)))
+ ret = -EFAULT;
+ break;
+ }
+
+ case KDSKBDIACR:
+ {
+ struct kbdiacrs __user *a = up;
+ struct kbdiacr diacr;
+ unsigned int ct;
+ int i;
+
+ if (!perm)
+ goto eperm;
+ if (get_user(ct,&a->kb_cnt)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (ct >= MAX_DIACR) {
+ ret = -EINVAL;
+ break;
+ }
+ accent_table_size = ct;
+ for (i = 0; i < ct; i++) {
+ if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr))) {
+ ret = -EFAULT;
+ break;
+ }
+ accent_table[i].diacr = conv_8bit_to_uni(diacr.diacr);
+ accent_table[i].base = conv_8bit_to_uni(diacr.base);
+ accent_table[i].result = conv_8bit_to_uni(diacr.result);
+ }
+ break;
+ }
+
+ case KDSKBDIACRUC:
+ {
+ struct kbdiacrsuc __user *a = up;
+ unsigned int ct;
+
+ if (!perm)
+ goto eperm;
+ if (get_user(ct,&a->kb_cnt)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (ct >= MAX_DIACR) {
+ ret = -EINVAL;
+ break;
+ }
+ accent_table_size = ct;
+ if (copy_from_user(accent_table, a->kbdiacruc, ct*sizeof(struct kbdiacruc)))
+ ret = -EFAULT;
+ break;
+ }
+
+ /* the ioctls below read/set the flags usually shown in the leds */
+ /* don't use them - they will go away without warning */
+ case KDGKBLED:
+ ucval = kbd->ledflagstate | (kbd->default_ledflagstate << 4);
+ goto setchar;
+
+ case KDSKBLED:
+ if (!perm)
+ goto eperm;
+ if (arg & ~0x77) {
+ ret = -EINVAL;
+ break;
+ }
+ kbd->ledflagstate = (arg & 7);
+ kbd->default_ledflagstate = ((arg >> 4) & 7);
+ set_leds();
+ break;
+
+ /* the ioctls below only set the lights, not the functions */
+ /* for those, see KDGKBLED and KDSKBLED above */
+ case KDGETLED:
+ ucval = getledstate();
+ setchar:
+ ret = put_user(ucval, (char __user *)arg);
+ break;
+
+ case KDSETLED:
+ if (!perm)
+ goto eperm;
+ setledstate(kbd, arg);
+ break;
+
+ /*
+ * A process can indicate its willingness to accept signals
+ * generated by pressing an appropriate key combination.
+ * Thus, one can have a daemon that e.g. spawns a new console
+ * upon a keypress and then changes to it.
+ * See also the kbrequest field of inittab(5).
+ */
+ case KDSIGACCEPT:
+ {
+ if (!perm || !capable(CAP_KILL))
+ goto eperm;
+ if (!valid_signal(arg) || arg < 1 || arg == SIGKILL)
+ ret = -EINVAL;
+ else {
+ spin_lock_irq(&vt_spawn_con.lock);
+ put_pid(vt_spawn_con.pid);
+ vt_spawn_con.pid = get_pid(task_pid(current));
+ vt_spawn_con.sig = arg;
+ spin_unlock_irq(&vt_spawn_con.lock);
+ }
+ break;
+ }
+
+ case VT_SETMODE:
+ {
+ struct vt_mode tmp;
+
+ if (!perm)
+ goto eperm;
+ if (copy_from_user(&tmp, up, sizeof(struct vt_mode))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) {
+ ret = -EINVAL;
+ goto out;
+ }
+ console_lock();
+ vc->vt_mode = tmp;
+ /* the frsig is ignored, so we set it to 0 */
+ vc->vt_mode.frsig = 0;
+ put_pid(vc->vt_pid);
+ vc->vt_pid = get_pid(task_pid(current));
+ /* no switch is required -- saw@shade.msu.ru */
+ vc->vt_newvt = -1;
+ console_unlock();
+ break;
+ }
+
+ case VT_GETMODE:
+ {
+ struct vt_mode tmp;
+ int rc;
+
+ console_lock();
+ memcpy(&tmp, &vc->vt_mode, sizeof(struct vt_mode));
+ console_unlock();
+
+ rc = copy_to_user(up, &tmp, sizeof(struct vt_mode));
+ if (rc)
+ ret = -EFAULT;
+ break;
+ }
+
+ /*
+ * Returns global vt state. Note that VT 0 is always open, since
+ * it's an alias for the current VT, and people can't use it here.
+ * We cannot return state for more than 16 VTs, since v_state is short.
+ */
+ case VT_GETSTATE:
+ {
+ struct vt_stat __user *vtstat = up;
+ unsigned short state, mask;
+
+ if (put_user(fg_console + 1, &vtstat->v_active))
+ ret = -EFAULT;
+ else {
+ state = 1; /* /dev/tty0 is always open */
+ for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask;
+ ++i, mask <<= 1)
+ if (VT_IS_IN_USE(i))
+ state |= mask;
+ ret = put_user(state, &vtstat->v_state);
+ }
+ break;
+ }
+
+ /*
+ * Returns the first available (non-opened) console.
+ */
+ case VT_OPENQRY:
+ for (i = 0; i < MAX_NR_CONSOLES; ++i)
+ if (! VT_IS_IN_USE(i))
+ break;
+ uival = i < MAX_NR_CONSOLES ? (i+1) : -1;
+ goto setint;
+
+ /*
+ * ioctl(fd, VT_ACTIVATE, num) will cause us to switch to vt # num,
+ * with num >= 1 (switches to vt 0, our console, are not allowed, just
+ * to preserve sanity).
+ */
+ case VT_ACTIVATE:
+ if (!perm)
+ goto eperm;
+ if (arg == 0 || arg > MAX_NR_CONSOLES)
+ ret = -ENXIO;
+ else {
+ arg--;
+ console_lock();
+ ret = vc_allocate(arg);
+ console_unlock();
+ if (ret)
+ break;
+ set_console(arg);
+ }
+ break;
+
+ case VT_SETACTIVATE:
+ {
+ struct vt_setactivate vsa;
+
+ if (!perm)
+ goto eperm;
+
+ if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg,
+ sizeof(struct vt_setactivate))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
+ ret = -ENXIO;
+ else {
+ vsa.console--;
+ console_lock();
+ ret = vc_allocate(vsa.console);
+ if (ret == 0) {
+ struct vc_data *nvc;
+ /* This is safe providing we don't drop the
+ console sem between vc_allocate and
+ finishing referencing nvc */
+ nvc = vc_cons[vsa.console].d;
+ nvc->vt_mode = vsa.mode;
+ nvc->vt_mode.frsig = 0;
+ put_pid(nvc->vt_pid);
+ nvc->vt_pid = get_pid(task_pid(current));
+ }
+ console_unlock();
+ if (ret)
+ break;
+ /* Commence switch and lock */
+ set_console(vsa.console);
+ }
+ break;
+ }
+
+ /*
+ * wait until the specified VT has been activated
+ */
+ case VT_WAITACTIVE:
+ if (!perm)
+ goto eperm;
+ if (arg == 0 || arg > MAX_NR_CONSOLES)
+ ret = -ENXIO;
+ else
+ ret = vt_waitactive(arg);
+ break;
+
+ /*
+ * If a vt is under process control, the kernel will not switch to it
+ * immediately, but postpone the operation until the process calls this
+ * ioctl, allowing the switch to complete.
+ *
+ * According to the X sources this is the behavior:
+ * 0: pending switch-from not OK
+ * 1: pending switch-from OK
+ * 2: completed switch-to OK
+ */
+ case VT_RELDISP:
+ if (!perm)
+ goto eperm;
+
+ if (vc->vt_mode.mode != VT_PROCESS) {
+ ret = -EINVAL;
+ break;
+ }
+ /*
+ * Switching-from response
+ */
+ console_lock();
+ if (vc->vt_newvt >= 0) {
+ if (arg == 0)
+ /*
+ * Switch disallowed, so forget we were trying
+ * to do it.
+ */
+ vc->vt_newvt = -1;
+
+ else {
+ /*
+ * The current vt has been released, so
+ * complete the switch.
+ */
+ int newvt;
+ newvt = vc->vt_newvt;
+ vc->vt_newvt = -1;
+ ret = vc_allocate(newvt);
+ if (ret) {
+ console_unlock();
+ break;
+ }
+ /*
+ * When we actually do the console switch,
+ * make sure we are atomic with respect to
+ * other console switches..
+ */
+ complete_change_console(vc_cons[newvt].d);
+ }
+ } else {
+ /*
+ * Switched-to response
+ */
+ /*
+ * If it's just an ACK, ignore it
+ */
+ if (arg != VT_ACKACQ)
+ ret = -EINVAL;
+ }
+ console_unlock();
+ break;
+
+ /*
+ * Disallocate memory associated to VT (but leave VT1)
+ */
+ case VT_DISALLOCATE:
+ if (arg > MAX_NR_CONSOLES) {
+ ret = -ENXIO;
+ break;
+ }
+ if (arg == 0) {
+ /* deallocate all unused consoles, but leave 0 */
+ console_lock();
+ for (i=1; i<MAX_NR_CONSOLES; i++)
+ if (! VT_BUSY(i))
+ vc_deallocate(i);
+ console_unlock();
+ } else {
+ /* deallocate a single console, if possible */
+ arg--;
+ if (VT_BUSY(arg))
+ ret = -EBUSY;
+ else if (arg) { /* leave 0 */
+ console_lock();
+ vc_deallocate(arg);
+ console_unlock();
+ }
+ }
+ break;
+
+ case VT_RESIZE:
+ {
+ struct vt_sizes __user *vtsizes = up;
+ struct vc_data *vc;
+
+ ushort ll,cc;
+ if (!perm)
+ goto eperm;
+ if (get_user(ll, &vtsizes->v_rows) ||
+ get_user(cc, &vtsizes->v_cols))
+ ret = -EFAULT;
+ else {
+ console_lock();
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ vc = vc_cons[i].d;
+
+ if (vc) {
+ vc->vc_resize_user = 1;
+ vc_resize(vc_cons[i].d, cc, ll);
+ }
+ }
+ console_unlock();
+ }
+ break;
+ }
+
+ case VT_RESIZEX:
+ {
+ struct vt_consize __user *vtconsize = up;
+ ushort ll,cc,vlin,clin,vcol,ccol;
+ if (!perm)
+ goto eperm;
+ if (!access_ok(VERIFY_READ, vtconsize,
+ sizeof(struct vt_consize))) {
+ ret = -EFAULT;
+ break;
+ }
+ /* FIXME: Should check the copies properly */
+ __get_user(ll, &vtconsize->v_rows);
+ __get_user(cc, &vtconsize->v_cols);
+ __get_user(vlin, &vtconsize->v_vlin);
+ __get_user(clin, &vtconsize->v_clin);
+ __get_user(vcol, &vtconsize->v_vcol);
+ __get_user(ccol, &vtconsize->v_ccol);
+ vlin = vlin ? vlin : vc->vc_scan_lines;
+ if (clin) {
+ if (ll) {
+ if (ll != vlin/clin) {
+ /* Parameters don't add up */
+ ret = -EINVAL;
+ break;
+ }
+ } else
+ ll = vlin/clin;
+ }
+ if (vcol && ccol) {
+ if (cc) {
+ if (cc != vcol/ccol) {
+ ret = -EINVAL;
+ break;
+ }
+ } else
+ cc = vcol/ccol;
+ }
+
+ if (clin > 32) {
+ ret = -EINVAL;
+ break;
+ }
+
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ if (!vc_cons[i].d)
+ continue;
+ console_lock();
+ if (vlin)
+ vc_cons[i].d->vc_scan_lines = vlin;
+ if (clin)
+ vc_cons[i].d->vc_font.height = clin;
+ vc_cons[i].d->vc_resize_user = 1;
+ vc_resize(vc_cons[i].d, cc, ll);
+ console_unlock();
+ }
+ break;
+ }
+
+ case PIO_FONT: {
+ if (!perm)
+ goto eperm;
+ op.op = KD_FONT_OP_SET;
+ op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */
+ op.width = 8;
+ op.height = 0;
+ op.charcount = 256;
+ op.data = up;
+ ret = con_font_op(vc_cons[fg_console].d, &op);
+ break;
+ }
+
+ case GIO_FONT: {
+ op.op = KD_FONT_OP_GET;
+ op.flags = KD_FONT_FLAG_OLD;
+ op.width = 8;
+ op.height = 32;
+ op.charcount = 256;
+ op.data = up;
+ ret = con_font_op(vc_cons[fg_console].d, &op);
+ break;
+ }
+
+ case PIO_CMAP:
+ if (!perm)
+ ret = -EPERM;
+ else
+ ret = con_set_cmap(up);
+ break;
+
+ case GIO_CMAP:
+ ret = con_get_cmap(up);
+ break;
+
+ case PIO_FONTX:
+ case GIO_FONTX:
+ ret = do_fontx_ioctl(cmd, up, perm, &op);
+ break;
+
+ case PIO_FONTRESET:
+ {
+ if (!perm)
+ goto eperm;
+
+#ifdef BROKEN_GRAPHICS_PROGRAMS
+ /* With BROKEN_GRAPHICS_PROGRAMS defined, the default
+ font is not saved. */
+ ret = -ENOSYS;
+ break;
+#else
+ {
+ op.op = KD_FONT_OP_SET_DEFAULT;
+ op.data = NULL;
+ ret = con_font_op(vc_cons[fg_console].d, &op);
+ if (ret)
+ break;
+ con_set_default_unimap(vc_cons[fg_console].d);
+ break;
+ }
+#endif
+ }
+
+ case KDFONTOP: {
+ if (copy_from_user(&op, up, sizeof(op))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (!perm && op.op != KD_FONT_OP_GET)
+ goto eperm;
+ ret = con_font_op(vc, &op);
+ if (ret)
+ break;
+ if (copy_to_user(up, &op, sizeof(op)))
+ ret = -EFAULT;
+ break;
+ }
+
+ case PIO_SCRNMAP:
+ if (!perm)
+ ret = -EPERM;
+ else
+ ret = con_set_trans_old(up);
+ break;
+
+ case GIO_SCRNMAP:
+ ret = con_get_trans_old(up);
+ break;
+
+ case PIO_UNISCRNMAP:
+ if (!perm)
+ ret = -EPERM;
+ else
+ ret = con_set_trans_new(up);
+ break;
+
+ case GIO_UNISCRNMAP:
+ ret = con_get_trans_new(up);
+ break;
+
+ case PIO_UNIMAPCLR:
+ { struct unimapinit ui;
+ if (!perm)
+ goto eperm;
+ ret = copy_from_user(&ui, up, sizeof(struct unimapinit));
+ if (ret)
+ ret = -EFAULT;
+ else
+ con_clear_unimap(vc, &ui);
+ break;
+ }
+
+ case PIO_UNIMAP:
+ case GIO_UNIMAP:
+ ret = do_unimap_ioctl(cmd, up, perm, vc);
+ break;
+
+ case VT_LOCKSWITCH:
+ if (!capable(CAP_SYS_TTY_CONFIG))
+ goto eperm;
+ vt_dont_switch = 1;
+ break;
+ case VT_UNLOCKSWITCH:
+ if (!capable(CAP_SYS_TTY_CONFIG))
+ goto eperm;
+ vt_dont_switch = 0;
+ break;
+ case VT_GETHIFONTMASK:
+ ret = put_user(vc->vc_hi_font_mask,
+ (unsigned short __user *)arg);
+ break;
+ case VT_WAITEVENT:
+ ret = vt_event_wait_ioctl((struct vt_event __user *)arg);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+out:
+ tty_unlock();
+ return ret;
+eperm:
+ ret = -EPERM;
+ goto out;
+}
+
+void reset_vc(struct vc_data *vc)
+{
+ vc->vc_mode = KD_TEXT;
+ kbd_table[vc->vc_num].kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE;
+ vc->vt_mode.mode = VT_AUTO;
+ vc->vt_mode.waitv = 0;
+ vc->vt_mode.relsig = 0;
+ vc->vt_mode.acqsig = 0;
+ vc->vt_mode.frsig = 0;
+ put_pid(vc->vt_pid);
+ vc->vt_pid = NULL;
+ vc->vt_newvt = -1;
+ if (!in_interrupt()) /* Via keyboard.c:SAK() - akpm */
+ reset_palette(vc);
+}
+
+void vc_SAK(struct work_struct *work)
+{
+ struct vc *vc_con =
+ container_of(work, struct vc, SAK_work);
+ struct vc_data *vc;
+ struct tty_struct *tty;
+
+ console_lock();
+ vc = vc_con->d;
+ if (vc) {
+ tty = vc->port.tty;
+ /*
+ * SAK should also work in all raw modes and reset
+ * them properly.
+ */
+ if (tty)
+ __do_SAK(tty);
+ reset_vc(vc);
+ }
+ console_unlock();
+}
+
+#ifdef CONFIG_COMPAT
+
+struct compat_consolefontdesc {
+ unsigned short charcount; /* characters in font (256 or 512) */
+ unsigned short charheight; /* scan lines per character (1-32) */
+ compat_caddr_t chardata; /* font data in expanded form */
+};
+
+static inline int
+compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
+ int perm, struct console_font_op *op)
+{
+ struct compat_consolefontdesc cfdarg;
+ int i;
+
+ if (copy_from_user(&cfdarg, user_cfd, sizeof(struct compat_consolefontdesc)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case PIO_FONTX:
+ if (!perm)
+ return -EPERM;
+ op->op = KD_FONT_OP_SET;
+ op->flags = KD_FONT_FLAG_OLD;
+ op->width = 8;
+ op->height = cfdarg.charheight;
+ op->charcount = cfdarg.charcount;
+ op->data = compat_ptr(cfdarg.chardata);
+ return con_font_op(vc_cons[fg_console].d, op);
+ case GIO_FONTX:
+ op->op = KD_FONT_OP_GET;
+ op->flags = KD_FONT_FLAG_OLD;
+ op->width = 8;
+ op->height = cfdarg.charheight;
+ op->charcount = cfdarg.charcount;
+ op->data = compat_ptr(cfdarg.chardata);
+ i = con_font_op(vc_cons[fg_console].d, op);
+ if (i)
+ return i;
+ cfdarg.charheight = op->height;
+ cfdarg.charcount = op->charcount;
+ if (copy_to_user(user_cfd, &cfdarg, sizeof(struct compat_consolefontdesc)))
+ return -EFAULT;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+struct compat_console_font_op {
+ compat_uint_t op; /* operation code KD_FONT_OP_* */
+ compat_uint_t flags; /* KD_FONT_FLAG_* */
+ compat_uint_t width, height; /* font size */
+ compat_uint_t charcount;
+ compat_caddr_t data; /* font data with height fixed to 32 */
+};
+
+static inline int
+compat_kdfontop_ioctl(struct compat_console_font_op __user *fontop,
+ int perm, struct console_font_op *op, struct vc_data *vc)
+{
+ int i;
+
+ if (copy_from_user(op, fontop, sizeof(struct compat_console_font_op)))
+ return -EFAULT;
+ if (!perm && op->op != KD_FONT_OP_GET)
+ return -EPERM;
+ op->data = compat_ptr(((struct compat_console_font_op *)op)->data);
+ i = con_font_op(vc, op);
+ if (i)
+ return i;
+ ((struct compat_console_font_op *)op)->data = (unsigned long)op->data;
+ if (copy_to_user(fontop, op, sizeof(struct compat_console_font_op)))
+ return -EFAULT;
+ return 0;
+}
+
+struct compat_unimapdesc {
+ unsigned short entry_ct;
+ compat_caddr_t entries;
+};
+
+static inline int
+compat_unimap_ioctl(unsigned int cmd, struct compat_unimapdesc __user *user_ud,
+ int perm, struct vc_data *vc)
+{
+ struct compat_unimapdesc tmp;
+ struct unipair __user *tmp_entries;
+
+ if (copy_from_user(&tmp, user_ud, sizeof tmp))
+ return -EFAULT;
+ tmp_entries = compat_ptr(tmp.entries);
+ if (tmp_entries)
+ if (!access_ok(VERIFY_WRITE, tmp_entries,
+ tmp.entry_ct*sizeof(struct unipair)))
+ return -EFAULT;
+ switch (cmd) {
+ case PIO_UNIMAP:
+ if (!perm)
+ return -EPERM;
+ return con_set_unimap(vc, tmp.entry_ct, tmp_entries);
+ case GIO_UNIMAP:
+ if (!perm && fg_console != vc->vc_num)
+ return -EPERM;
+ return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct), tmp_entries);
+ }
+ return 0;
+}
+
+long vt_compat_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vc_data *vc = tty->driver_data;
+ struct console_font_op op; /* used in multiple places here */
+ unsigned int console;
+ void __user *up = (void __user *)arg;
+ int perm;
+ int ret = 0;
+
+ console = vc->vc_num;
+
+ tty_lock();
+
+ if (!vc_cons_allocated(console)) { /* impossible? */
+ ret = -ENOIOCTLCMD;
+ goto out;
+ }
+
+ /*
+ * To have permissions to do most of the vt ioctls, we either have
+ * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
+ */
+ perm = 0;
+ if (current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG))
+ perm = 1;
+
+ switch (cmd) {
+ /*
+ * these need special handlers for incompatible data structures
+ */
+ case PIO_FONTX:
+ case GIO_FONTX:
+ ret = compat_fontx_ioctl(cmd, up, perm, &op);
+ break;
+
+ case KDFONTOP:
+ ret = compat_kdfontop_ioctl(up, perm, &op, vc);
+ break;
+
+ case PIO_UNIMAP:
+ case GIO_UNIMAP:
+ ret = compat_unimap_ioctl(cmd, up, perm, vc);
+ break;
+
+ /*
+ * all these treat 'arg' as an integer
+ */
+ case KIOCSOUND:
+ case KDMKTONE:
+#ifdef CONFIG_X86
+ case KDADDIO:
+ case KDDELIO:
+#endif
+ case KDSETMODE:
+ case KDMAPDISP:
+ case KDUNMAPDISP:
+ case KDSKBMODE:
+ case KDSKBMETA:
+ case KDSKBLED:
+ case KDSETLED:
+ case KDSIGACCEPT:
+ case VT_ACTIVATE:
+ case VT_WAITACTIVE:
+ case VT_RELDISP:
+ case VT_DISALLOCATE:
+ case VT_RESIZE:
+ case VT_RESIZEX:
+ goto fallback;
+
+ /*
+ * the rest has a compatible data structure behind arg,
+ * but we have to convert it to a proper 64 bit pointer.
+ */
+ default:
+ arg = (unsigned long)compat_ptr(arg);
+ goto fallback;
+ }
+out:
+ tty_unlock();
+ return ret;
+
+fallback:
+ tty_unlock();
+ return vt_ioctl(tty, cmd, arg);
+}
+
+
+#endif /* CONFIG_COMPAT */
+
+
+/*
+ * Performs the back end of a vt switch. Called under the console
+ * semaphore.
+ */
+static void complete_change_console(struct vc_data *vc)
+{
+ unsigned char old_vc_mode;
+ int old = fg_console;
+
+ last_console = fg_console;
+
+ /*
+ * If we're switching, we could be going from KD_GRAPHICS to
+ * KD_TEXT mode or vice versa, which means we need to blank or
+ * unblank the screen later.
+ */
+ old_vc_mode = vc_cons[fg_console].d->vc_mode;
+ switch_screen(vc);
+
+ /*
+ * This can't appear below a successful kill_pid(). If it did,
+ * then the *blank_screen operation could occur while X, having
+ * received acqsig, is waking up on another processor. This
+ * condition can lead to overlapping accesses to the VGA range
+ * and the framebuffer (causing system lockups).
+ *
+ * To account for this we duplicate this code below only if the
+ * controlling process is gone and we've called reset_vc.
+ */
+ if (old_vc_mode != vc->vc_mode) {
+ if (vc->vc_mode == KD_TEXT)
+ do_unblank_screen(1);
+ else
+ do_blank_screen(1);
+ }
+
+ /*
+ * If this new console is under process control, send it a signal
+ * telling it that it has acquired. Also check if it has died and
+ * clean up (similar to logic employed in change_console())
+ */
+ if (vc->vt_mode.mode == VT_PROCESS) {
+ /*
+ * Send the signal as privileged - kill_pid() will
+ * tell us if the process has gone or something else
+ * is awry
+ */
+ if (kill_pid(vc->vt_pid, vc->vt_mode.acqsig, 1) != 0) {
+ /*
+ * The controlling process has died, so we revert back to
+ * normal operation. In this case, we'll also change back
+ * to KD_TEXT mode. I'm not sure if this is strictly correct
+ * but it saves the agony when the X server dies and the screen
+ * remains blanked due to KD_GRAPHICS! It would be nice to do
+ * this outside of VT_PROCESS but there is no single process
+ * to account for and tracking tty count may be undesirable.
+ */
+ reset_vc(vc);
+
+ if (old_vc_mode != vc->vc_mode) {
+ if (vc->vc_mode == KD_TEXT)
+ do_unblank_screen(1);
+ else
+ do_blank_screen(1);
+ }
+ }
+ }
+
+ /*
+ * Wake anyone waiting for their VT to activate
+ */
+ vt_event_post(VT_EVENT_SWITCH, old, vc->vc_num);
+ return;
+}
+
+/*
+ * Performs the front-end of a vt switch
+ */
+void change_console(struct vc_data *new_vc)
+{
+ struct vc_data *vc;
+
+ if (!new_vc || new_vc->vc_num == fg_console || vt_dont_switch)
+ return;
+
+ /*
+ * If this vt is in process mode, then we need to handshake with
+ * that process before switching. Essentially, we store where that
+ * vt wants to switch to and wait for it to tell us when it's done
+ * (via VT_RELDISP ioctl).
+ *
+ * We also check to see if the controlling process still exists.
+ * If it doesn't, we reset this vt to auto mode and continue.
+ * This is a cheap way to track process control. The worst thing
+ * that can happen is: we send a signal to a process, it dies, and
+ * the switch gets "lost" waiting for a response; hopefully, the
+ * user will try again, we'll detect the process is gone (unless
+ * the user waits just the right amount of time :-) and revert the
+ * vt to auto control.
+ */
+ vc = vc_cons[fg_console].d;
+ if (vc->vt_mode.mode == VT_PROCESS) {
+ /*
+ * Send the signal as privileged - kill_pid() will
+ * tell us if the process has gone or something else
+ * is awry.
+ *
+ * We need to set vt_newvt *before* sending the signal or we
+ * have a race.
+ */
+ vc->vt_newvt = new_vc->vc_num;
+ if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
+ /*
+ * It worked. Mark the vt to switch to and
+ * return. The process needs to send us a
+ * VT_RELDISP ioctl to complete the switch.
+ */
+ return;
+ }
+
+ /*
+ * The controlling process has died, so we revert back to
+ * normal operation. In this case, we'll also change back
+ * to KD_TEXT mode. I'm not sure if this is strictly correct
+ * but it saves the agony when the X server dies and the screen
+ * remains blanked due to KD_GRAPHICS! It would be nice to do
+ * this outside of VT_PROCESS but there is no single process
+ * to account for and tracking tty count may be undesirable.
+ */
+ reset_vc(vc);
+
+ /*
+ * Fall through to normal (VT_AUTO) handling of the switch...
+ */
+ }
+
+ /*
+ * Ignore all switches in KD_GRAPHICS+VT_AUTO mode
+ */
+ if (vc->vc_mode == KD_GRAPHICS)
+ return;
+
+ complete_change_console(new_vc);
+}
+
+/* Perform a kernel triggered VT switch for suspend/resume */
+
+static int disable_vt_switch;
+
+int vt_move_to_console(unsigned int vt, int alloc)
+{
+ int prev;
+
+ console_lock();
+ /* Graphics mode - up to X */
+ if (disable_vt_switch) {
+ console_unlock();
+ return 0;
+ }
+ prev = fg_console;
+
+ if (alloc && vc_allocate(vt)) {
+ /* we can't have a free VC for now. Too bad,
+ * we don't want to mess the screen for now. */
+ console_unlock();
+ return -ENOSPC;
+ }
+
+ if (set_console(vt)) {
+ /*
+ * We're unable to switch to the SUSPEND_CONSOLE.
+ * Let the calling function know so it can decide
+ * what to do.
+ */
+ console_unlock();
+ return -EIO;
+ }
+ console_unlock();
+ tty_lock();
+ if (vt_waitactive(vt + 1)) {
+ pr_debug("Suspend: Can't switch VCs.");
+ tty_unlock();
+ return -EINTR;
+ }
+ tty_unlock();
+ return prev;
+}
+
+/*
+ * Normally during a suspend, we allocate a new console and switch to it.
+ * When we resume, we switch back to the original console. This switch
+ * can be slow, so on systems where the framebuffer can handle restoration
+ * of video registers anyways, there's little point in doing the console
+ * switch. This function allows you to disable it by passing it '0'.
+ */
+void pm_set_vt_switch(int do_switch)
+{
+ console_lock();
+ disable_vt_switch = !do_switch;
+ console_unlock();
+}
+EXPORT_SYMBOL(pm_set_vt_switch);